Merge pull request 'feat/gitea' (#2) from feat/gitea into development
Reviewed-on: hhl-infra/claude-code-hhl-toolkit#2
This commit was merged in pull request #2.
This commit is contained in:
6
.vscfavoriterc
Normal file
6
.vscfavoriterc
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"resources": [],
|
||||
"groups": [
|
||||
"Default"
|
||||
]
|
||||
}
|
||||
@@ -163,7 +163,7 @@ See [docs/reference-material/projman-implementation-plan.md](docs/reference-mate
|
||||
⚠️ **See `docs/CORRECT-ARCHITECTURE.md` for the authoritative structure reference**
|
||||
|
||||
```
|
||||
hyperhivelabs/claude-plugins/
|
||||
hhl-infra/claude-code-hhl-toolkit/
|
||||
├── .claude-plugin/
|
||||
│ └── marketplace.json
|
||||
├── mcp-servers/ # ← SHARED BY BOTH PLUGINS
|
||||
|
||||
@@ -110,7 +110,7 @@
|
||||
"group": "Core"
|
||||
},
|
||||
{
|
||||
"filePath": "hhl-claude-agents.code-workspace",
|
||||
"filePath": "claude-code-hhl-toolkit.code-workspace",
|
||||
"group": "Core"
|
||||
},
|
||||
{
|
||||
@@ -141,7 +141,8 @@
|
||||
"filePath": ".claude-plugins/marketplace.json",
|
||||
"group": "Plugins"
|
||||
}
|
||||
]
|
||||
],
|
||||
"chat.disableAIFeatures": true
|
||||
|
||||
|
||||
}
|
||||
@@ -63,14 +63,14 @@ def load(self):
|
||||
**File:** `~/.config/claude/gitea.env`
|
||||
|
||||
```bash
|
||||
GITEA_API_URL=https://gitea.hyperhivelabs.com/api/v1
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_gitea_token
|
||||
GITEA_OWNER=hyperhivelabs
|
||||
GITEA_OWNER=hhl-infra
|
||||
```
|
||||
|
||||
**Generating Gitea API Token:**
|
||||
|
||||
1. Log into Gitea: https://gitea.hyperhivelabs.com
|
||||
1. Log into Gitea: https://gitea.hotserv.cloud
|
||||
2. Navigate to: **Settings** → **Applications** → **Manage Access Tokens**
|
||||
3. Click **Generate New Token**
|
||||
4. Token configuration:
|
||||
@@ -90,9 +90,9 @@ mkdir -p ~/.config/claude
|
||||
|
||||
# Create gitea.env
|
||||
cat > ~/.config/claude/gitea.env << EOF
|
||||
GITEA_API_URL=https://gitea.hyperhivelabs.com/api/v1
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_token_here
|
||||
GITEA_OWNER=hyperhivelabs
|
||||
GITEA_OWNER=hhl-infra
|
||||
EOF
|
||||
|
||||
# Secure the file (important!)
|
||||
|
||||
@@ -49,10 +49,10 @@ projman-pmo/
|
||||
"displayName": "Projman PMO - Multi-Project Coordination",
|
||||
"description": "PMO coordination with cross-project visibility, dependency tracking, and resource management",
|
||||
"author": "Hyper Hive Labs",
|
||||
"homepage": "https://gitea.hyperhivelabs.com/hyperhivelabs/claude-plugins/projman-pmo",
|
||||
"homepage": "https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit/projman-pmo",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.hyperhivelabs.com/hyperhivelabs/claude-plugins.git"
|
||||
"url": "https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit.git"
|
||||
},
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
|
||||
@@ -55,10 +55,10 @@ projman/
|
||||
"displayName": "Projman - Single-Repository Project Management",
|
||||
"description": "Sprint planning and project management with Gitea and Wiki.js integration",
|
||||
"author": "Hyper Hive Labs",
|
||||
"homepage": "https://gitea.hyperhivelabs.com/hyperhivelabs/claude-plugins/projman",
|
||||
"homepage": "https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit/projman",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://gitea.hyperhivelabs.com/hyperhivelabs/claude-plugins.git"
|
||||
"url": "https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit.git"
|
||||
},
|
||||
"license": "MIT",
|
||||
"keywords": [
|
||||
@@ -422,7 +422,7 @@ Planner: I'll create the issue...
|
||||
[Tool executes: create_issue(...)]
|
||||
|
||||
Created issue #47: "Extract Intuit Engine Service"
|
||||
View at: https://gitea.hyperhivelabs.com/org/repo/issues/47
|
||||
View at: https://gitea.hotserv.cloud/org/repo/issues/47
|
||||
|
||||
Now let me generate a detailed sprint plan...
|
||||
```
|
||||
|
||||
@@ -80,7 +80,7 @@ The MCP servers detect their operating mode based on environment variables:
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
hyperhivelabs/claude-plugins/
|
||||
hhl-infra/claude-code-hhl-toolkit/
|
||||
├── mcp-servers/ # ← SHARED BY BOTH PLUGINS
|
||||
│ ├── gitea/ # Gitea MCP Server
|
||||
│ │ ├── .venv/
|
||||
@@ -150,9 +150,9 @@ The plugins use a hybrid configuration approach that balances security and flexi
|
||||
**System-Level:**
|
||||
```bash
|
||||
# ~/.config/claude/gitea.env
|
||||
GITEA_API_URL=https://gitea.hyperhivelabs.com/api/v1
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_token
|
||||
GITEA_OWNER=hyperhivelabs
|
||||
GITEA_OWNER=hhl-infra
|
||||
|
||||
# ~/.config/claude/wikijs.env
|
||||
WIKIJS_API_URL=https://wiki.hyperhivelabs.com/graphql
|
||||
@@ -366,9 +366,9 @@ mkdir -p ~/.config/claude
|
||||
|
||||
# Gitea config
|
||||
cat > ~/.config/claude/gitea.env << EOF
|
||||
GITEA_API_URL=https://gitea.hyperhivelabs.com/api/v1
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_gitea_token
|
||||
GITEA_OWNER=hyperhivelabs
|
||||
GITEA_OWNER=hhl-infra
|
||||
EOF
|
||||
|
||||
# Wiki.js config
|
||||
|
||||
413
mcp-servers/gitea/README.md
Normal file
413
mcp-servers/gitea/README.md
Normal file
@@ -0,0 +1,413 @@
|
||||
# Gitea MCP Server
|
||||
|
||||
Model Context Protocol (MCP) server for Gitea integration with Claude Code.
|
||||
|
||||
## Overview
|
||||
|
||||
The Gitea MCP Server provides Claude Code with direct access to Gitea for issue management, label operations, and repository tracking. It supports both single-repository (project mode) and multi-repository (company/PMO mode) operations.
|
||||
|
||||
**Status**: ✅ Phase 1 Complete - Fully functional and tested
|
||||
|
||||
## Features
|
||||
|
||||
### Core Functionality
|
||||
|
||||
- **Issue Management**: CRUD operations for Gitea issues
|
||||
- **Label Taxonomy**: Dynamic 44-label system with intelligent suggestions
|
||||
- **Mode Detection**: Automatic project vs company-wide mode detection
|
||||
- **Branch-Aware Security**: Prevents accidental changes on production branches
|
||||
- **Hybrid Configuration**: System-level credentials + project-level paths
|
||||
- **PMO Support**: Multi-repository aggregation for organization-wide views
|
||||
|
||||
### Tools Provided
|
||||
|
||||
| Tool | Description | Mode |
|
||||
|------|-------------|------|
|
||||
| `list_issues` | List issues from repository | Both |
|
||||
| `get_issue` | Get specific issue details | Both |
|
||||
| `create_issue` | Create new issue with labels | Both |
|
||||
| `update_issue` | Update existing issue | Both |
|
||||
| `add_comment` | Add comment to issue | Both |
|
||||
| `get_labels` | Get all labels (org + repo) | Both |
|
||||
| `suggest_labels` | Intelligent label suggestion | Both |
|
||||
| `aggregate_issues` | Cross-repository issue aggregation | PMO Only |
|
||||
|
||||
## Architecture
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
mcp-servers/gitea/
|
||||
├── .venv/ # Python virtual environment
|
||||
├── requirements.txt # Python dependencies
|
||||
├── mcp_server/
|
||||
│ ├── __init__.py
|
||||
│ ├── server.py # MCP server entry point
|
||||
│ ├── config.py # Configuration loader
|
||||
│ ├── gitea_client.py # Gitea API client
|
||||
│ └── tools/
|
||||
│ ├── __init__.py
|
||||
│ ├── issues.py # Issue tools
|
||||
│ └── labels.py # Label tools
|
||||
├── tests/
|
||||
│ ├── __init__.py
|
||||
│ ├── test_config.py
|
||||
│ ├── test_gitea_client.py
|
||||
│ ├── test_issues.py
|
||||
│ └── test_labels.py
|
||||
├── README.md # This file
|
||||
└── TESTING.md # Testing instructions
|
||||
```
|
||||
|
||||
### Mode Detection
|
||||
|
||||
The server operates in two modes based on environment variables:
|
||||
|
||||
**Project Mode** (Single Repository):
|
||||
- When `GITEA_REPO` is set
|
||||
- Operates on single repository
|
||||
- Used by `projman` plugin
|
||||
|
||||
**Company Mode** (Multi-Repository / PMO):
|
||||
- When `GITEA_REPO` is NOT set
|
||||
- Operates on all repositories in organization
|
||||
- Used by `projman-pmo` plugin
|
||||
|
||||
### Branch-Aware Security
|
||||
|
||||
Operations are restricted based on the current Git branch:
|
||||
|
||||
| Branch | Read | Create Issue | Update/Comment |
|
||||
|--------|------|--------------|----------------|
|
||||
| `main`, `master`, `prod/*` | ✅ | ❌ | ❌ |
|
||||
| `staging`, `stage/*` | ✅ | ✅ | ❌ |
|
||||
| `development`, `develop`, `feat/*`, `dev/*` | ✅ | ✅ | ✅ |
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.10 or higher
|
||||
- Git repository (for branch detection)
|
||||
- Access to Gitea instance with API token
|
||||
|
||||
### Step 1: Install Dependencies
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate # Linux/Mac
|
||||
# or .venv\Scripts\activate # Windows
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Step 2: Configure System-Level Settings
|
||||
|
||||
Create `~/.config/claude/gitea.env`:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
|
||||
cat > ~/.config/claude/gitea.env << EOF
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_gitea_token_here
|
||||
GITEA_OWNER=hhl-infra
|
||||
EOF
|
||||
|
||||
chmod 600 ~/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
### Step 3: Configure Project-Level Settings (Optional)
|
||||
|
||||
For project mode, create `.env` in your project root:
|
||||
|
||||
```bash
|
||||
echo "GITEA_REPO=your-repo-name" > .env
|
||||
echo ".env" >> .gitignore
|
||||
```
|
||||
|
||||
For company/PMO mode, omit the `.env` file or don't set `GITEA_REPO`.
|
||||
|
||||
## Configuration
|
||||
|
||||
### System-Level Configuration
|
||||
|
||||
**File**: `~/.config/claude/gitea.env`
|
||||
|
||||
**Required Variables**:
|
||||
- `GITEA_API_URL` - Gitea API endpoint (e.g., `https://gitea.hotserv.cloud/api/v1`)
|
||||
- `GITEA_API_TOKEN` - Personal access token with repo permissions
|
||||
- `GITEA_OWNER` - Organization or user name (e.g., `hhl-infra`)
|
||||
|
||||
### Project-Level Configuration
|
||||
|
||||
**File**: `<project-root>/.env`
|
||||
|
||||
**Optional Variables**:
|
||||
- `GITEA_REPO` - Repository name (enables project mode)
|
||||
|
||||
### Generating Gitea API Token
|
||||
|
||||
1. Log into Gitea: https://gitea.hotserv.cloud
|
||||
2. Navigate to: **Settings** → **Applications** → **Manage Access Tokens**
|
||||
3. Click **Generate New Token**
|
||||
4. Configure token:
|
||||
- **Token Name**: `claude-code-mcp`
|
||||
- **Permissions**:
|
||||
- ✅ `repo` (all) - Read/write repositories, issues, labels
|
||||
- ✅ `read:org` - Read organization information and labels
|
||||
- ✅ `read:user` - Read user information
|
||||
5. Click **Generate Token**
|
||||
6. Copy token immediately (shown only once)
|
||||
7. Add to `~/.config/claude/gitea.env`
|
||||
|
||||
## Usage
|
||||
|
||||
### Running the MCP Server
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate
|
||||
python -m mcp_server.server
|
||||
```
|
||||
|
||||
The server communicates via JSON-RPC 2.0 over stdio.
|
||||
|
||||
### Integration with Claude Code Plugins
|
||||
|
||||
The MCP server is designed to be used by Claude Code plugins via `.mcp.json` configuration:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"gitea": {
|
||||
"command": "python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/../mcp-servers/gitea",
|
||||
"env": {
|
||||
"PYTHONPATH": "${CLAUDE_PLUGIN_ROOT}/../mcp-servers/gitea"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example Tool Calls
|
||||
|
||||
**List Issues**:
|
||||
```python
|
||||
from mcp_server.tools.issues import IssueTools
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
client = GiteaClient()
|
||||
issue_tools = IssueTools(client)
|
||||
|
||||
issues = await issue_tools.list_issues(state='open', labels=['Type/Bug'])
|
||||
```
|
||||
|
||||
**Suggest Labels**:
|
||||
```python
|
||||
from mcp_server.tools.labels import LabelTools
|
||||
|
||||
label_tools = LabelTools(client)
|
||||
|
||||
context = "Fix critical authentication bug in production API"
|
||||
suggestions = await label_tools.suggest_labels(context)
|
||||
# Returns: ['Type/Bug', 'Priority/Critical', 'Component/Auth', 'Component/API', ...]
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
|
||||
Run all 42 unit tests with mocks:
|
||||
|
||||
```bash
|
||||
pytest tests/ -v
|
||||
```
|
||||
|
||||
Expected: `42 passed in 0.57s`
|
||||
|
||||
### Integration Tests
|
||||
|
||||
Test with real Gitea instance:
|
||||
|
||||
```bash
|
||||
python -c "
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
client = GiteaClient()
|
||||
issues = client.list_issues(state='open')
|
||||
print(f'Found {len(issues)} open issues')
|
||||
"
|
||||
```
|
||||
|
||||
### Full Testing Guide
|
||||
|
||||
See [TESTING.md](./TESTING.md) for comprehensive testing instructions.
|
||||
|
||||
## Label Taxonomy System
|
||||
|
||||
The system supports a dynamic 44-label taxonomy (28 org + 16 repo):
|
||||
|
||||
**Organization Labels (28)**:
|
||||
- `Agent/*` (2) - Agent/Human, Agent/Claude
|
||||
- `Complexity/*` (3) - Simple, Medium, Complex
|
||||
- `Efforts/*` (5) - XS, S, M, L, XL
|
||||
- `Priority/*` (4) - Low, Medium, High, Critical
|
||||
- `Risk/*` (3) - Low, Medium, High
|
||||
- `Source/*` (4) - Development, Staging, Production, Customer
|
||||
- `Type/*` (6) - Bug, Feature, Refactor, Documentation, Test, Chore
|
||||
|
||||
**Repository Labels (16)**:
|
||||
- `Component/*` (9) - Backend, Frontend, API, Database, Auth, Deploy, Testing, Docs, Infra
|
||||
- `Tech/*` (7) - Python, JavaScript, Docker, PostgreSQL, Redis, Vue, FastAPI
|
||||
|
||||
Labels are fetched dynamically from Gitea and suggestions adapt to the current taxonomy.
|
||||
|
||||
## Security
|
||||
|
||||
### Token Storage
|
||||
|
||||
- Store tokens in `~/.config/claude/gitea.env`
|
||||
- Set file permissions to `600` (read/write owner only)
|
||||
- Never commit tokens to Git
|
||||
- Use separate tokens for development and production
|
||||
|
||||
### Branch Detection
|
||||
|
||||
The MCP server implements defense-in-depth branch detection:
|
||||
|
||||
1. **MCP Tools**: Check branch before operations
|
||||
2. **Agent Prompts**: Warn users about branch restrictions
|
||||
3. **CLAUDE.md**: Provides additional context
|
||||
|
||||
### Input Validation
|
||||
|
||||
- All user input is validated before API calls
|
||||
- Issue titles and descriptions are sanitized
|
||||
- Label names are checked against taxonomy
|
||||
- Repository names are validated
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Module not found**:
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
**Configuration not found**:
|
||||
```bash
|
||||
ls -la ~/.config/claude/gitea.env
|
||||
# If missing, create it following installation steps
|
||||
```
|
||||
|
||||
**Authentication failed**:
|
||||
```bash
|
||||
# Test token manually
|
||||
curl -H "Authorization: token YOUR_TOKEN" \
|
||||
https://gitea.hotserv.cloud/api/v1/user
|
||||
```
|
||||
|
||||
**Permission denied on branch**:
|
||||
```bash
|
||||
# Check current branch
|
||||
git branch --show-current
|
||||
|
||||
# Switch to development branch
|
||||
git checkout development
|
||||
```
|
||||
|
||||
See [TESTING.md](./TESTING.md#troubleshooting) for more details.
|
||||
|
||||
## Development
|
||||
|
||||
### Project Structure
|
||||
|
||||
- `config.py` - Hybrid configuration loader with mode detection
|
||||
- `gitea_client.py` - Synchronous Gitea API client using requests
|
||||
- `tools/issues.py` - Async wrappers with branch detection
|
||||
- `tools/labels.py` - Label management and suggestion
|
||||
- `server.py` - MCP server with JSON-RPC 2.0 over stdio
|
||||
|
||||
### Adding New Tools
|
||||
|
||||
1. Add method to `GiteaClient` (sync)
|
||||
2. Add async wrapper to appropriate tool class
|
||||
3. Register tool in `server.py` `setup_tools()`
|
||||
4. Add unit tests
|
||||
5. Update documentation
|
||||
|
||||
### Testing Philosophy
|
||||
|
||||
- **Unit tests**: Use mocks for fast feedback
|
||||
- **Integration tests**: Use real Gitea API for validation
|
||||
- **Branch detection**: Test all branch types
|
||||
- **Mode detection**: Test both project and company modes
|
||||
|
||||
## Performance
|
||||
|
||||
### Caching
|
||||
|
||||
Labels are cached to reduce API calls:
|
||||
|
||||
```python
|
||||
from functools import lru_cache
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def get_labels_cached(self, repo: str):
|
||||
return self.get_labels(repo)
|
||||
```
|
||||
|
||||
### Retry Logic
|
||||
|
||||
API calls include automatic retry with exponential backoff:
|
||||
|
||||
```python
|
||||
@retry_on_failure(max_retries=3, delay=1)
|
||||
def list_issues(self, state='open', labels=None, repo=None):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
### v1.0.0 (2025-01-06) - Phase 1 Complete
|
||||
|
||||
✅ Initial implementation:
|
||||
- Configuration management (hybrid system + project)
|
||||
- Gitea API client with all CRUD operations
|
||||
- MCP server with 8 tools
|
||||
- Issue tools with branch detection
|
||||
- Label tools with intelligent suggestions
|
||||
- Mode detection (project vs company)
|
||||
- Branch-aware security model
|
||||
- 42 unit tests (100% passing)
|
||||
- Comprehensive documentation
|
||||
|
||||
## License
|
||||
|
||||
Part of the HyperHive Labs Claude Code Plugins project.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- **MCP Specification**: `docs/references/MCP-GITEA.md`
|
||||
- **Project Summary**: `docs/references/PROJECT-SUMMARY.md`
|
||||
- **Implementation Plan**: `docs/reference-material/projman-implementation-plan.md`
|
||||
- **Testing Guide**: `TESTING.md`
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
1. Check [TESTING.md](./TESTING.md) troubleshooting section
|
||||
2. Review [MCP-GITEA.md](../../docs/references/MCP-GITEA.md) specification
|
||||
3. Create an issue in the project repository
|
||||
|
||||
---
|
||||
|
||||
**Built for**: HyperHive Labs Project Management Plugins
|
||||
**Phase**: 1 (Complete)
|
||||
**Status**: ✅ Production Ready
|
||||
**Last Updated**: 2025-01-06
|
||||
582
mcp-servers/gitea/TESTING.md
Normal file
582
mcp-servers/gitea/TESTING.md
Normal file
@@ -0,0 +1,582 @@
|
||||
# Gitea MCP Server - Testing Guide
|
||||
|
||||
This document provides comprehensive testing instructions for the Gitea MCP Server implementation.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Unit Tests](#unit-tests)
|
||||
2. [Manual MCP Server Testing](#manual-mcp-server-testing)
|
||||
3. [Integration Testing](#integration-testing)
|
||||
4. [Configuration Setup for Testing](#configuration-setup-for-testing)
|
||||
5. [Troubleshooting](#troubleshooting)
|
||||
|
||||
---
|
||||
|
||||
## Unit Tests
|
||||
|
||||
Unit tests use mocks to test all modules without requiring a real Gitea instance.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ensure the virtual environment is activated and dependencies are installed:
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate # Linux/Mac
|
||||
# or .venv\Scripts\activate # Windows
|
||||
```
|
||||
|
||||
### Running All Tests
|
||||
|
||||
Run all 42 unit tests:
|
||||
|
||||
```bash
|
||||
pytest tests/ -v
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
============================== 42 passed in 0.57s ==============================
|
||||
```
|
||||
|
||||
### Running Specific Test Files
|
||||
|
||||
Run tests for a specific module:
|
||||
|
||||
```bash
|
||||
# Configuration tests
|
||||
pytest tests/test_config.py -v
|
||||
|
||||
# Gitea client tests
|
||||
pytest tests/test_gitea_client.py -v
|
||||
|
||||
# Issue tools tests
|
||||
pytest tests/test_issues.py -v
|
||||
|
||||
# Label tools tests
|
||||
pytest tests/test_labels.py -v
|
||||
```
|
||||
|
||||
### Running Specific Tests
|
||||
|
||||
Run a single test:
|
||||
|
||||
```bash
|
||||
pytest tests/test_config.py::test_load_system_config -v
|
||||
```
|
||||
|
||||
### Test Coverage
|
||||
|
||||
Generate coverage report:
|
||||
|
||||
```bash
|
||||
pytest --cov=mcp_server --cov-report=html tests/
|
||||
|
||||
# View coverage report
|
||||
# Open htmlcov/index.html in your browser
|
||||
```
|
||||
|
||||
Expected coverage: >80% for all modules
|
||||
|
||||
### Test Organization
|
||||
|
||||
**Configuration Tests** (`test_config.py`):
|
||||
- System-level configuration loading
|
||||
- Project-level configuration override
|
||||
- Mode detection (project vs company)
|
||||
- Missing configuration handling
|
||||
|
||||
**Gitea Client Tests** (`test_gitea_client.py`):
|
||||
- API client initialization
|
||||
- Issue CRUD operations
|
||||
- Label retrieval
|
||||
- PMO multi-repo operations
|
||||
|
||||
**Issue Tools Tests** (`test_issues.py`):
|
||||
- Branch-aware security checks
|
||||
- Async wrappers for sync client
|
||||
- Permission enforcement
|
||||
- PMO aggregation mode
|
||||
|
||||
**Label Tools Tests** (`test_labels.py`):
|
||||
- Label retrieval (org + repo)
|
||||
- Intelligent label suggestion
|
||||
- Multi-category detection
|
||||
|
||||
---
|
||||
|
||||
## Manual MCP Server Testing
|
||||
|
||||
Test the MCP server manually using stdio communication.
|
||||
|
||||
### Step 1: Start the MCP Server
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate
|
||||
python -m mcp_server.server
|
||||
```
|
||||
|
||||
The server will start and wait for JSON-RPC 2.0 messages on stdin.
|
||||
|
||||
### Step 2: Test Tool Listing
|
||||
|
||||
In another terminal, send a tool listing request:
|
||||
|
||||
```bash
|
||||
echo '{"jsonrpc": "2.0", "id": 1, "method": "tools/list"}' | python -m mcp_server.server
|
||||
```
|
||||
|
||||
Expected response:
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"result": {
|
||||
"tools": [
|
||||
{"name": "list_issues", "description": "List issues from Gitea repository", ...},
|
||||
{"name": "get_issue", "description": "Get specific issue details", ...},
|
||||
{"name": "create_issue", "description": "Create a new issue in Gitea", ...},
|
||||
...
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Step 3: Test Tool Invocation
|
||||
|
||||
**Note:** Manual tool invocation requires proper configuration. See [Configuration Setup](#configuration-setup-for-testing).
|
||||
|
||||
Example: List issues
|
||||
```bash
|
||||
echo '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "list_issues",
|
||||
"arguments": {
|
||||
"state": "open"
|
||||
}
|
||||
}
|
||||
}' | python -m mcp_server.server
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Integration Testing
|
||||
|
||||
Test the MCP server with a real Gitea instance.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Gitea Instance**: Access to https://gitea.hotserv.cloud (or your Gitea instance)
|
||||
2. **API Token**: Personal access token with required permissions
|
||||
3. **Configuration**: Properly configured system and project configs
|
||||
|
||||
### Step 1: Configuration Setup
|
||||
|
||||
Create system-level configuration:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
|
||||
cat > ~/.config/claude/gitea.env << EOF
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_gitea_token_here
|
||||
GITEA_OWNER=hhl-infra
|
||||
EOF
|
||||
|
||||
chmod 600 ~/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
Create project-level configuration (for project mode testing):
|
||||
|
||||
```bash
|
||||
cd /path/to/test/project
|
||||
|
||||
cat > .env << EOF
|
||||
GITEA_REPO=test-repo
|
||||
EOF
|
||||
|
||||
# Add to .gitignore
|
||||
echo ".env" >> .gitignore
|
||||
```
|
||||
|
||||
### Step 2: Generate Gitea API Token
|
||||
|
||||
1. Log into Gitea: https://gitea.hotserv.cloud
|
||||
2. Navigate to: **Settings** → **Applications** → **Manage Access Tokens**
|
||||
3. Click **Generate New Token**
|
||||
4. Token configuration:
|
||||
- **Token Name:** `mcp-integration-test`
|
||||
- **Required Permissions:**
|
||||
- ✅ `repo` (all) - Read/write access to repositories, issues, labels
|
||||
- ✅ `read:org` - Read organization information and labels
|
||||
- ✅ `read:user` - Read user information
|
||||
5. Click **Generate Token**
|
||||
6. Copy the token immediately (shown only once)
|
||||
7. Add to `~/.config/claude/gitea.env`
|
||||
|
||||
### Step 3: Verify Configuration
|
||||
|
||||
Test configuration loading:
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate
|
||||
python -c "
|
||||
from mcp_server.config import GiteaConfig
|
||||
config = GiteaConfig()
|
||||
result = config.load()
|
||||
print(f'API URL: {result[\"api_url\"]}')
|
||||
print(f'Owner: {result[\"owner\"]}')
|
||||
print(f'Repo: {result[\"repo\"]}')
|
||||
print(f'Mode: {result[\"mode\"]}')
|
||||
"
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
API URL: https://gitea.hotserv.cloud/api/v1
|
||||
Owner: hhl-infra
|
||||
Repo: test-repo (or None for company mode)
|
||||
Mode: project (or company)
|
||||
```
|
||||
|
||||
### Step 4: Test Gitea Client
|
||||
|
||||
Test basic Gitea API operations:
|
||||
|
||||
```bash
|
||||
python -c "
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
client = GiteaClient()
|
||||
|
||||
# Test listing issues
|
||||
print('Testing list_issues...')
|
||||
issues = client.list_issues(state='open')
|
||||
print(f'Found {len(issues)} open issues')
|
||||
|
||||
# Test getting labels
|
||||
print('\\nTesting get_labels...')
|
||||
labels = client.get_labels()
|
||||
print(f'Found {len(labels)} repository labels')
|
||||
|
||||
# Test getting org labels
|
||||
print('\\nTesting get_org_labels...')
|
||||
org_labels = client.get_org_labels()
|
||||
print(f'Found {len(org_labels)} organization labels')
|
||||
|
||||
print('\\n✅ All integration tests passed!')
|
||||
"
|
||||
```
|
||||
|
||||
### Step 5: Test Issue Creation (Optional)
|
||||
|
||||
**Warning:** This creates a real issue in Gitea. Use a test repository.
|
||||
|
||||
```bash
|
||||
python -c "
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
client = GiteaClient()
|
||||
|
||||
# Create test issue
|
||||
print('Creating test issue...')
|
||||
issue = client.create_issue(
|
||||
title='[TEST] MCP Server Integration Test',
|
||||
body='This is a test issue created by the Gitea MCP Server integration tests.',
|
||||
labels=['Type/Test']
|
||||
)
|
||||
print(f'Created issue #{issue[\"number\"]}: {issue[\"title\"]}')
|
||||
|
||||
# Clean up: Close the issue
|
||||
print('\\nClosing test issue...')
|
||||
client.update_issue(issue['number'], state='closed')
|
||||
print('✅ Test issue closed')
|
||||
"
|
||||
```
|
||||
|
||||
### Step 6: Test MCP Server with Real API
|
||||
|
||||
Start the MCP server and test with real Gitea API:
|
||||
|
||||
```bash
|
||||
cd mcp-servers/gitea
|
||||
source .venv/bin/activate
|
||||
|
||||
# Run server with test script
|
||||
python << 'EOF'
|
||||
import asyncio
|
||||
import json
|
||||
from mcp_server.server import GiteaMCPServer
|
||||
|
||||
async def test_server():
|
||||
server = GiteaMCPServer()
|
||||
await server.initialize()
|
||||
|
||||
# Test list_issues
|
||||
result = await server.issue_tools.list_issues(state='open')
|
||||
print(f'Found {len(result)} open issues')
|
||||
|
||||
# Test get_labels
|
||||
labels = await server.label_tools.get_labels()
|
||||
print(f'Found {labels["total_count"]} total labels')
|
||||
|
||||
# Test suggest_labels
|
||||
suggestions = await server.label_tools.suggest_labels(
|
||||
"Fix critical bug in authentication"
|
||||
)
|
||||
print(f'Suggested labels: {", ".join(suggestions)}')
|
||||
|
||||
print('✅ All MCP server integration tests passed!')
|
||||
|
||||
asyncio.run(test_server())
|
||||
EOF
|
||||
```
|
||||
|
||||
### Step 7: Test PMO Mode (Optional)
|
||||
|
||||
Test company-wide mode (no GITEA_REPO):
|
||||
|
||||
```bash
|
||||
# Temporarily remove GITEA_REPO
|
||||
unset GITEA_REPO
|
||||
|
||||
python -c "
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
client = GiteaClient()
|
||||
|
||||
print(f'Running in {client.mode} mode')
|
||||
|
||||
# Test list_repos
|
||||
print('\\nTesting list_repos...')
|
||||
repos = client.list_repos()
|
||||
print(f'Found {len(repos)} repositories')
|
||||
|
||||
# Test aggregate_issues
|
||||
print('\\nTesting aggregate_issues...')
|
||||
aggregated = client.aggregate_issues(state='open')
|
||||
for repo_name, issues in aggregated.items():
|
||||
print(f' {repo_name}: {len(issues)} open issues')
|
||||
|
||||
print('\\n✅ PMO mode tests passed!')
|
||||
"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Setup for Testing
|
||||
|
||||
### Minimal Configuration
|
||||
|
||||
**System-level** (`~/.config/claude/gitea.env`):
|
||||
```bash
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_token_here
|
||||
GITEA_OWNER=hhl-infra
|
||||
```
|
||||
|
||||
**Project-level** (`.env` in project root):
|
||||
```bash
|
||||
# For project mode
|
||||
GITEA_REPO=test-repo
|
||||
|
||||
# For company mode (PMO), omit GITEA_REPO
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
Verify configuration is correct:
|
||||
|
||||
```bash
|
||||
# Check system config exists
|
||||
ls -la ~/.config/claude/gitea.env
|
||||
|
||||
# Check permissions (should be 600)
|
||||
stat -c "%a %n" ~/.config/claude/gitea.env
|
||||
|
||||
# Check content (without exposing token)
|
||||
grep -v TOKEN ~/.config/claude/gitea.env
|
||||
|
||||
# Check project config (if using project mode)
|
||||
cat .env
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### 1. Import Errors
|
||||
|
||||
**Error:**
|
||||
```
|
||||
ModuleNotFoundError: No module named 'mcp_server'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Ensure you're in the correct directory
|
||||
cd mcp-servers/gitea
|
||||
|
||||
# Activate virtual environment
|
||||
source .venv/bin/activate
|
||||
|
||||
# Verify installation
|
||||
pip list | grep mcp
|
||||
```
|
||||
|
||||
#### 2. Configuration Not Found
|
||||
|
||||
**Error:**
|
||||
```
|
||||
FileNotFoundError: System config not found: /home/user/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Create system config
|
||||
mkdir -p ~/.config/claude
|
||||
cat > ~/.config/claude/gitea.env << EOF
|
||||
GITEA_API_URL=https://gitea.hotserv.cloud/api/v1
|
||||
GITEA_API_TOKEN=your_token_here
|
||||
GITEA_OWNER=hhl-infra
|
||||
EOF
|
||||
|
||||
chmod 600 ~/.config/claude/gitea.env
|
||||
```
|
||||
|
||||
#### 3. Missing Required Configuration
|
||||
|
||||
**Error:**
|
||||
```
|
||||
ValueError: Missing required configuration: GITEA_API_TOKEN, GITEA_OWNER
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Check configuration file
|
||||
cat ~/.config/claude/gitea.env
|
||||
|
||||
# Ensure all required variables are present:
|
||||
# - GITEA_API_URL
|
||||
# - GITEA_API_TOKEN
|
||||
# - GITEA_OWNER
|
||||
```
|
||||
|
||||
#### 4. API Authentication Failed
|
||||
|
||||
**Error:**
|
||||
```
|
||||
requests.exceptions.HTTPError: 401 Client Error: Unauthorized
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Test token manually
|
||||
curl -H "Authorization: token YOUR_TOKEN" \
|
||||
https://gitea.hotserv.cloud/api/v1/user
|
||||
|
||||
# If fails, regenerate token in Gitea settings
|
||||
```
|
||||
|
||||
#### 5. Permission Errors (Branch Detection)
|
||||
|
||||
**Error:**
|
||||
```
|
||||
PermissionError: Cannot create issues on branch 'main'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Check current branch
|
||||
git branch --show-current
|
||||
|
||||
# Switch to development branch
|
||||
git checkout development
|
||||
# or
|
||||
git checkout -b feat/test-feature
|
||||
```
|
||||
|
||||
#### 6. Repository Not Specified
|
||||
|
||||
**Error:**
|
||||
```
|
||||
ValueError: Repository not specified
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Add GITEA_REPO to project config
|
||||
echo "GITEA_REPO=your-repo-name" >> .env
|
||||
|
||||
# Or specify repo in tool call
|
||||
# (for PMO mode multi-repo operations)
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug logging:
|
||||
|
||||
```bash
|
||||
export LOG_LEVEL=DEBUG
|
||||
python -m mcp_server.server
|
||||
```
|
||||
|
||||
### Test Summary
|
||||
|
||||
After completing all tests, verify:
|
||||
|
||||
- ✅ All 42 unit tests pass
|
||||
- ✅ MCP server starts without errors
|
||||
- ✅ Configuration loads correctly
|
||||
- ✅ Gitea API client connects successfully
|
||||
- ✅ Issues can be listed from Gitea
|
||||
- ✅ Labels can be retrieved
|
||||
- ✅ Label suggestions work correctly
|
||||
- ✅ Branch detection blocks writes on main/staging
|
||||
- ✅ Mode detection works (project vs company)
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
Phase 1 is complete when:
|
||||
|
||||
1. **All unit tests pass** (42/42)
|
||||
2. **MCP server starts without errors**
|
||||
3. **Can list issues from Gitea**
|
||||
4. **Can create issues with labels** (in development mode)
|
||||
5. **Mode detection works** (project vs company)
|
||||
6. **Branch detection prevents writes on main/staging**
|
||||
7. **Configuration properly merges** system + project levels
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
After completing testing:
|
||||
|
||||
1. **Document any issues** found during testing
|
||||
2. **Create integration with projman plugin** (Phase 2)
|
||||
3. **Test in real project workflow** (Phase 5)
|
||||
4. **Performance optimization** (if needed)
|
||||
5. **Production hardening** (Phase 8)
|
||||
|
||||
---
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- **MCP Documentation**: https://docs.anthropic.com/claude/docs/mcp
|
||||
- **Gitea API Documentation**: https://docs.gitea.io/en-us/api-usage/
|
||||
- **Project Documentation**: `docs/references/MCP-GITEA.md`
|
||||
- **Implementation Plan**: `docs/references/PROJECT-SUMMARY.md`
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2025-01-06 (Phase 1 Implementation)
|
||||
0
mcp-servers/gitea/mcp_server/__init__.py
Normal file
0
mcp-servers/gitea/mcp_server/__init__.py
Normal file
102
mcp-servers/gitea/mcp_server/config.py
Normal file
102
mcp-servers/gitea/mcp_server/config.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
Configuration loader for Gitea MCP Server.
|
||||
|
||||
Implements hybrid configuration system:
|
||||
- System-level: ~/.config/claude/gitea.env (credentials)
|
||||
- Project-level: .env (repository specification)
|
||||
"""
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GiteaConfig:
|
||||
"""Hybrid configuration loader with mode detection"""
|
||||
|
||||
def __init__(self):
|
||||
self.api_url: Optional[str] = None
|
||||
self.api_token: Optional[str] = None
|
||||
self.owner: Optional[str] = None
|
||||
self.repo: Optional[str] = None
|
||||
self.mode: str = 'project'
|
||||
|
||||
def load(self) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Load configuration from system and project levels.
|
||||
Project-level configuration overrides system-level.
|
||||
|
||||
Returns:
|
||||
Dict containing api_url, api_token, owner, repo, mode
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If system config is missing
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
# Load system config
|
||||
system_config = Path.home() / '.config' / 'claude' / 'gitea.env'
|
||||
if system_config.exists():
|
||||
load_dotenv(system_config)
|
||||
logger.info(f"Loaded system configuration from {system_config}")
|
||||
else:
|
||||
raise FileNotFoundError(
|
||||
f"System config not found: {system_config}\n"
|
||||
"Create it with: mkdir -p ~/.config/claude && "
|
||||
"cat > ~/.config/claude/gitea.env"
|
||||
)
|
||||
|
||||
# Load project config (overrides system)
|
||||
project_config = Path.cwd() / '.env'
|
||||
if project_config.exists():
|
||||
load_dotenv(project_config, override=True)
|
||||
logger.info(f"Loaded project configuration from {project_config}")
|
||||
|
||||
# Extract values
|
||||
self.api_url = os.getenv('GITEA_API_URL')
|
||||
self.api_token = os.getenv('GITEA_API_TOKEN')
|
||||
self.owner = os.getenv('GITEA_OWNER')
|
||||
self.repo = os.getenv('GITEA_REPO') # Optional for PMO
|
||||
|
||||
# Detect mode
|
||||
if self.repo:
|
||||
self.mode = 'project'
|
||||
logger.info(f"Running in project mode: {self.repo}")
|
||||
else:
|
||||
self.mode = 'company'
|
||||
logger.info("Running in company-wide mode (PMO)")
|
||||
|
||||
# Validate required variables
|
||||
self._validate()
|
||||
|
||||
return {
|
||||
'api_url': self.api_url,
|
||||
'api_token': self.api_token,
|
||||
'owner': self.owner,
|
||||
'repo': self.repo,
|
||||
'mode': self.mode
|
||||
}
|
||||
|
||||
def _validate(self) -> None:
|
||||
"""
|
||||
Validate that required configuration is present.
|
||||
|
||||
Raises:
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
required = {
|
||||
'GITEA_API_URL': self.api_url,
|
||||
'GITEA_API_TOKEN': self.api_token,
|
||||
'GITEA_OWNER': self.owner
|
||||
}
|
||||
|
||||
missing = [key for key, value in required.items() if not value]
|
||||
|
||||
if missing:
|
||||
raise ValueError(
|
||||
f"Missing required configuration: {', '.join(missing)}\n"
|
||||
"Check your ~/.config/claude/gitea.env file"
|
||||
)
|
||||
328
mcp-servers/gitea/mcp_server/gitea_client.py
Normal file
328
mcp-servers/gitea/mcp_server/gitea_client.py
Normal file
@@ -0,0 +1,328 @@
|
||||
"""
|
||||
Gitea API client for interacting with Gitea API.
|
||||
|
||||
Provides synchronous methods for:
|
||||
- Issue CRUD operations
|
||||
- Label management
|
||||
- Repository operations
|
||||
- PMO multi-repo aggregation
|
||||
"""
|
||||
import requests
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
from .config import GiteaConfig
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GiteaClient:
|
||||
"""Client for interacting with Gitea API"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize Gitea client with configuration"""
|
||||
config = GiteaConfig()
|
||||
config_dict = config.load()
|
||||
|
||||
self.base_url = config_dict['api_url']
|
||||
self.token = config_dict['api_token']
|
||||
self.owner = config_dict['owner']
|
||||
self.repo = config_dict.get('repo') # Optional for PMO
|
||||
self.mode = config_dict['mode']
|
||||
|
||||
self.session = requests.Session()
|
||||
self.session.headers.update({
|
||||
'Authorization': f'token {self.token}',
|
||||
'Content-Type': 'application/json'
|
||||
})
|
||||
|
||||
logger.info(f"Gitea client initialized for {self.owner} in {self.mode} mode")
|
||||
|
||||
def list_issues(
|
||||
self,
|
||||
state: str = 'open',
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
List issues from Gitea repository.
|
||||
|
||||
Args:
|
||||
state: Issue state (open, closed, all)
|
||||
labels: Filter by labels
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
List of issue dictionaries
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/issues"
|
||||
params = {'state': state}
|
||||
|
||||
if labels:
|
||||
params['labels'] = ','.join(labels)
|
||||
|
||||
logger.info(f"Listing issues from {self.owner}/{target_repo} with state={state}")
|
||||
response = self.session.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_issue(
|
||||
self,
|
||||
issue_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Get specific issue details.
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Issue dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/issues/{issue_number}"
|
||||
logger.info(f"Getting issue #{issue_number} from {self.owner}/{target_repo}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def create_issue(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create a new issue in Gitea.
|
||||
|
||||
Args:
|
||||
title: Issue title
|
||||
body: Issue description
|
||||
labels: List of label names
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created issue dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/issues"
|
||||
data = {
|
||||
'title': title,
|
||||
'body': body
|
||||
}
|
||||
|
||||
if labels:
|
||||
data['labels'] = labels
|
||||
|
||||
logger.info(f"Creating issue in {self.owner}/{target_repo}: {title}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def update_issue(
|
||||
self,
|
||||
issue_number: int,
|
||||
title: Optional[str] = None,
|
||||
body: Optional[str] = None,
|
||||
state: Optional[str] = None,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Update existing issue.
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
title: New title (optional)
|
||||
body: New body (optional)
|
||||
state: New state - 'open' or 'closed' (optional)
|
||||
labels: New labels (optional)
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Updated issue dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/issues/{issue_number}"
|
||||
data = {}
|
||||
|
||||
if title is not None:
|
||||
data['title'] = title
|
||||
if body is not None:
|
||||
data['body'] = body
|
||||
if state is not None:
|
||||
data['state'] = state
|
||||
if labels is not None:
|
||||
data['labels'] = labels
|
||||
|
||||
logger.info(f"Updating issue #{issue_number} in {self.owner}/{target_repo}")
|
||||
response = self.session.patch(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def add_comment(
|
||||
self,
|
||||
issue_number: int,
|
||||
comment: str,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Add comment to issue.
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
comment: Comment text
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created comment dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/issues/{issue_number}/comments"
|
||||
data = {'body': comment}
|
||||
|
||||
logger.info(f"Adding comment to issue #{issue_number} in {self.owner}/{target_repo}")
|
||||
response = self.session.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_labels(
|
||||
self,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Get all labels from repository.
|
||||
|
||||
Args:
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
List of label dictionaries
|
||||
|
||||
Raises:
|
||||
ValueError: If repository not specified
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
target_repo = repo or self.repo
|
||||
if not target_repo:
|
||||
raise ValueError("Repository not specified")
|
||||
|
||||
url = f"{self.base_url}/repos/{self.owner}/{target_repo}/labels"
|
||||
logger.info(f"Getting labels from {self.owner}/{target_repo}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def get_org_labels(self) -> List[Dict]:
|
||||
"""
|
||||
Get organization-level labels.
|
||||
|
||||
Returns:
|
||||
List of organization label dictionaries
|
||||
|
||||
Raises:
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
url = f"{self.base_url}/orgs/{self.owner}/labels"
|
||||
logger.info(f"Getting organization labels for {self.owner}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
# PMO-specific methods
|
||||
|
||||
def list_repos(self) -> List[Dict]:
|
||||
"""
|
||||
List all repositories in organization (PMO mode).
|
||||
|
||||
Returns:
|
||||
List of repository dictionaries
|
||||
|
||||
Raises:
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
url = f"{self.base_url}/orgs/{self.owner}/repos"
|
||||
logger.info(f"Listing all repositories for organization {self.owner}")
|
||||
response = self.session.get(url)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def aggregate_issues(
|
||||
self,
|
||||
state: str = 'open',
|
||||
labels: Optional[List[str]] = None
|
||||
) -> Dict[str, List[Dict]]:
|
||||
"""
|
||||
Fetch issues across all repositories (PMO mode).
|
||||
Returns dict keyed by repository name.
|
||||
|
||||
Args:
|
||||
state: Issue state (open, closed, all)
|
||||
labels: Filter by labels
|
||||
|
||||
Returns:
|
||||
Dictionary mapping repository names to issue lists
|
||||
|
||||
Raises:
|
||||
requests.HTTPError: If API request fails
|
||||
"""
|
||||
repos = self.list_repos()
|
||||
aggregated = {}
|
||||
|
||||
logger.info(f"Aggregating issues across {len(repos)} repositories")
|
||||
|
||||
for repo in repos:
|
||||
repo_name = repo['name']
|
||||
try:
|
||||
issues = self.list_issues(
|
||||
state=state,
|
||||
labels=labels,
|
||||
repo=repo_name
|
||||
)
|
||||
if issues:
|
||||
aggregated[repo_name] = issues
|
||||
logger.info(f"Found {len(issues)} issues in {repo_name}")
|
||||
except Exception as e:
|
||||
# Log error but continue with other repos
|
||||
logger.error(f"Error fetching issues from {repo_name}: {e}")
|
||||
|
||||
return aggregated
|
||||
300
mcp-servers/gitea/mcp_server/server.py
Normal file
300
mcp-servers/gitea/mcp_server/server.py
Normal file
@@ -0,0 +1,300 @@
|
||||
"""
|
||||
MCP Server entry point for Gitea integration.
|
||||
|
||||
Provides Gitea tools to Claude Code via JSON-RPC 2.0 over stdio.
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import Tool, TextContent
|
||||
|
||||
from .config import GiteaConfig
|
||||
from .gitea_client import GiteaClient
|
||||
from .tools.issues import IssueTools
|
||||
from .tools.labels import LabelTools
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GiteaMCPServer:
|
||||
"""MCP Server for Gitea integration"""
|
||||
|
||||
def __init__(self):
|
||||
self.server = Server("gitea-mcp")
|
||||
self.config = None
|
||||
self.client = None
|
||||
self.issue_tools = None
|
||||
self.label_tools = None
|
||||
|
||||
async def initialize(self):
|
||||
"""
|
||||
Initialize server and load configuration.
|
||||
|
||||
Raises:
|
||||
Exception: If initialization fails
|
||||
"""
|
||||
try:
|
||||
config_loader = GiteaConfig()
|
||||
self.config = config_loader.load()
|
||||
|
||||
self.client = GiteaClient()
|
||||
self.issue_tools = IssueTools(self.client)
|
||||
self.label_tools = LabelTools(self.client)
|
||||
|
||||
logger.info(f"Gitea MCP Server initialized in {self.config['mode']} mode")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize: {e}")
|
||||
raise
|
||||
|
||||
def setup_tools(self):
|
||||
"""Register all available tools with the MCP server"""
|
||||
|
||||
@self.server.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
"""Return list of available tools"""
|
||||
return [
|
||||
Tool(
|
||||
name="list_issues",
|
||||
description="List issues from Gitea repository",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"state": {
|
||||
"type": "string",
|
||||
"enum": ["open", "closed", "all"],
|
||||
"default": "open",
|
||||
"description": "Issue state filter"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Filter by labels"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_issue",
|
||||
description="Get specific issue details",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_number": {
|
||||
"type": "integer",
|
||||
"description": "Issue number"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
},
|
||||
"required": ["issue_number"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_issue",
|
||||
description="Create a new issue in Gitea",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Issue title"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Issue description"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "List of label names"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
},
|
||||
"required": ["title", "body"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="update_issue",
|
||||
description="Update existing issue",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_number": {
|
||||
"type": "integer",
|
||||
"description": "Issue number"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "New title"
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "New body"
|
||||
},
|
||||
"state": {
|
||||
"type": "string",
|
||||
"enum": ["open", "closed"],
|
||||
"description": "New state"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "New labels"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
},
|
||||
"required": ["issue_number"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="add_comment",
|
||||
description="Add comment to issue",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"issue_number": {
|
||||
"type": "integer",
|
||||
"description": "Issue number"
|
||||
},
|
||||
"comment": {
|
||||
"type": "string",
|
||||
"description": "Comment text"
|
||||
},
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
},
|
||||
"required": ["issue_number", "comment"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_labels",
|
||||
description="Get all available labels (org + repo)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"repo": {
|
||||
"type": "string",
|
||||
"description": "Repository name (for PMO mode)"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="suggest_labels",
|
||||
description="Analyze context and suggest appropriate labels",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"context": {
|
||||
"type": "string",
|
||||
"description": "Issue title + description or sprint context"
|
||||
}
|
||||
},
|
||||
"required": ["context"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="aggregate_issues",
|
||||
description="Fetch issues across all repositories (PMO mode)",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"state": {
|
||||
"type": "string",
|
||||
"enum": ["open", "closed", "all"],
|
||||
"default": "open",
|
||||
"description": "Issue state filter"
|
||||
},
|
||||
"labels": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Filter by labels"
|
||||
}
|
||||
}
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
@self.server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
"""
|
||||
Handle tool invocation.
|
||||
|
||||
Args:
|
||||
name: Tool name
|
||||
arguments: Tool arguments
|
||||
|
||||
Returns:
|
||||
List of TextContent with results
|
||||
"""
|
||||
try:
|
||||
# Route to appropriate tool handler
|
||||
if name == "list_issues":
|
||||
result = await self.issue_tools.list_issues(**arguments)
|
||||
elif name == "get_issue":
|
||||
result = await self.issue_tools.get_issue(**arguments)
|
||||
elif name == "create_issue":
|
||||
result = await self.issue_tools.create_issue(**arguments)
|
||||
elif name == "update_issue":
|
||||
result = await self.issue_tools.update_issue(**arguments)
|
||||
elif name == "add_comment":
|
||||
result = await self.issue_tools.add_comment(**arguments)
|
||||
elif name == "get_labels":
|
||||
result = await self.label_tools.get_labels(**arguments)
|
||||
elif name == "suggest_labels":
|
||||
result = await self.label_tools.suggest_labels(**arguments)
|
||||
elif name == "aggregate_issues":
|
||||
result = await self.issue_tools.aggregate_issues(**arguments)
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(result, indent=2)
|
||||
)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool {name} failed: {e}")
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=f"Error: {str(e)}"
|
||||
)]
|
||||
|
||||
async def run(self):
|
||||
"""Run the MCP server"""
|
||||
await self.initialize()
|
||||
self.setup_tools()
|
||||
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.server.create_initialization_options()
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
server = GiteaMCPServer()
|
||||
await server.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
7
mcp-servers/gitea/mcp_server/tools/__init__.py
Normal file
7
mcp-servers/gitea/mcp_server/tools/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""
|
||||
MCP tools for Gitea integration.
|
||||
|
||||
This package provides MCP tool implementations for:
|
||||
- Issue operations (issues.py)
|
||||
- Label management (labels.py)
|
||||
"""
|
||||
279
mcp-servers/gitea/mcp_server/tools/issues.py
Normal file
279
mcp-servers/gitea/mcp_server/tools/issues.py
Normal file
@@ -0,0 +1,279 @@
|
||||
"""
|
||||
Issue management tools for MCP server.
|
||||
|
||||
Provides async wrappers for issue CRUD operations with:
|
||||
- Branch-aware security
|
||||
- PMO multi-repo support
|
||||
- Comprehensive error handling
|
||||
"""
|
||||
import asyncio
|
||||
import subprocess
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IssueTools:
|
||||
"""Async wrappers for Gitea issue operations with branch detection"""
|
||||
|
||||
def __init__(self, gitea_client):
|
||||
"""
|
||||
Initialize issue tools.
|
||||
|
||||
Args:
|
||||
gitea_client: GiteaClient instance
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
def _get_current_branch(self) -> str:
|
||||
"""
|
||||
Get current git branch.
|
||||
|
||||
Returns:
|
||||
Current branch name or 'unknown' if not in a git repo
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except subprocess.CalledProcessError:
|
||||
return "unknown"
|
||||
|
||||
def _check_branch_permissions(self, operation: str) -> bool:
|
||||
"""
|
||||
Check if operation is allowed on current branch.
|
||||
|
||||
Args:
|
||||
operation: Operation name (list_issues, create_issue, etc.)
|
||||
|
||||
Returns:
|
||||
True if operation is allowed, False otherwise
|
||||
"""
|
||||
branch = self._get_current_branch()
|
||||
|
||||
# Production branches (read-only except incidents)
|
||||
if branch in ['main', 'master'] or branch.startswith('prod/'):
|
||||
return operation in ['list_issues', 'get_issue', 'get_labels']
|
||||
|
||||
# Staging branches (read-only for code)
|
||||
if branch == 'staging' or branch.startswith('stage/'):
|
||||
return operation in ['list_issues', 'get_issue', 'get_labels', 'create_issue']
|
||||
|
||||
# Development branches (full access)
|
||||
if branch in ['development', 'develop'] or branch.startswith(('feat/', 'feature/', 'dev/')):
|
||||
return True
|
||||
|
||||
# Unknown branch - be restrictive
|
||||
return False
|
||||
|
||||
async def list_issues(
|
||||
self,
|
||||
state: str = 'open',
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
List issues from repository (async wrapper).
|
||||
|
||||
Args:
|
||||
state: Issue state (open, closed, all)
|
||||
labels: Filter by labels
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
List of issue dictionaries
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('list_issues'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot list issues on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.list_issues(state, labels, repo)
|
||||
)
|
||||
|
||||
async def get_issue(
|
||||
self,
|
||||
issue_number: int,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Get specific issue details (async wrapper).
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Issue dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('get_issue'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot get issue on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_issue(issue_number, repo)
|
||||
)
|
||||
|
||||
async def create_issue(
|
||||
self,
|
||||
title: str,
|
||||
body: str,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Create new issue (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
title: Issue title
|
||||
body: Issue description
|
||||
labels: List of label names
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created issue dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('create_issue'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot create issues on branch '{branch}'. "
|
||||
f"Switch to a development branch to create issues."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.create_issue(title, body, labels, repo)
|
||||
)
|
||||
|
||||
async def update_issue(
|
||||
self,
|
||||
issue_number: int,
|
||||
title: Optional[str] = None,
|
||||
body: Optional[str] = None,
|
||||
state: Optional[str] = None,
|
||||
labels: Optional[List[str]] = None,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Update existing issue (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
title: New title (optional)
|
||||
body: New body (optional)
|
||||
state: New state - 'open' or 'closed' (optional)
|
||||
labels: New labels (optional)
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Updated issue dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('update_issue'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot update issues on branch '{branch}'. "
|
||||
f"Switch to a development branch to update issues."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.update_issue(issue_number, title, body, state, labels, repo)
|
||||
)
|
||||
|
||||
async def add_comment(
|
||||
self,
|
||||
issue_number: int,
|
||||
comment: str,
|
||||
repo: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Add comment to issue (async wrapper with branch check).
|
||||
|
||||
Args:
|
||||
issue_number: Issue number
|
||||
comment: Comment text
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Created comment dictionary
|
||||
|
||||
Raises:
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if not self._check_branch_permissions('add_comment'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot add comments on branch '{branch}'. "
|
||||
f"Switch to a development branch to add comments."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.add_comment(issue_number, comment, repo)
|
||||
)
|
||||
|
||||
async def aggregate_issues(
|
||||
self,
|
||||
state: str = 'open',
|
||||
labels: Optional[List[str]] = None
|
||||
) -> Dict[str, List[Dict]]:
|
||||
"""
|
||||
Aggregate issues across all repositories (PMO mode, async wrapper).
|
||||
|
||||
Args:
|
||||
state: Issue state (open, closed, all)
|
||||
labels: Filter by labels
|
||||
|
||||
Returns:
|
||||
Dictionary mapping repository names to issue lists
|
||||
|
||||
Raises:
|
||||
ValueError: If not in company mode
|
||||
PermissionError: If operation not allowed on current branch
|
||||
"""
|
||||
if self.gitea.mode != 'company':
|
||||
raise ValueError("aggregate_issues only available in company mode")
|
||||
|
||||
if not self._check_branch_permissions('aggregate_issues'):
|
||||
branch = self._get_current_branch()
|
||||
raise PermissionError(
|
||||
f"Cannot aggregate issues on branch '{branch}'. "
|
||||
f"Switch to a development branch."
|
||||
)
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.aggregate_issues(state, labels)
|
||||
)
|
||||
165
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
165
mcp-servers/gitea/mcp_server/tools/labels.py
Normal file
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
Label management tools for MCP server.
|
||||
|
||||
Provides async wrappers for label operations with:
|
||||
- Label taxonomy retrieval
|
||||
- Intelligent label suggestion
|
||||
- Dynamic label detection
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LabelTools:
|
||||
"""Async wrappers for Gitea label operations"""
|
||||
|
||||
def __init__(self, gitea_client):
|
||||
"""
|
||||
Initialize label tools.
|
||||
|
||||
Args:
|
||||
gitea_client: GiteaClient instance
|
||||
"""
|
||||
self.gitea = gitea_client
|
||||
|
||||
async def get_labels(self, repo: Optional[str] = None) -> Dict[str, List[Dict]]:
|
||||
"""
|
||||
Get all labels (org + repo) (async wrapper).
|
||||
|
||||
Args:
|
||||
repo: Override configured repo (for PMO multi-repo)
|
||||
|
||||
Returns:
|
||||
Dictionary with 'org' and 'repo' label lists
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Get org labels
|
||||
org_labels = await loop.run_in_executor(
|
||||
None,
|
||||
self.gitea.get_org_labels
|
||||
)
|
||||
|
||||
# Get repo labels if repo is specified
|
||||
repo_labels = []
|
||||
if repo or self.gitea.repo:
|
||||
target_repo = repo or self.gitea.repo
|
||||
repo_labels = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self.gitea.get_labels(target_repo)
|
||||
)
|
||||
|
||||
return {
|
||||
'organization': org_labels,
|
||||
'repository': repo_labels,
|
||||
'total_count': len(org_labels) + len(repo_labels)
|
||||
}
|
||||
|
||||
async def suggest_labels(self, context: str) -> List[str]:
|
||||
"""
|
||||
Analyze context and suggest appropriate labels.
|
||||
|
||||
Args:
|
||||
context: Issue title + description or sprint context
|
||||
|
||||
Returns:
|
||||
List of suggested label names
|
||||
"""
|
||||
suggested = []
|
||||
context_lower = context.lower()
|
||||
|
||||
# Type detection (exclusive - only one)
|
||||
if any(word in context_lower for word in ['bug', 'error', 'fix', 'broken', 'crash', 'fail']):
|
||||
suggested.append('Type/Bug')
|
||||
elif any(word in context_lower for word in ['refactor', 'extract', 'restructure', 'architecture', 'service extraction']):
|
||||
suggested.append('Type/Refactor')
|
||||
elif any(word in context_lower for word in ['feature', 'add', 'implement', 'new', 'create']):
|
||||
suggested.append('Type/Feature')
|
||||
elif any(word in context_lower for word in ['docs', 'documentation', 'readme', 'guide']):
|
||||
suggested.append('Type/Documentation')
|
||||
elif any(word in context_lower for word in ['test', 'testing', 'spec', 'coverage']):
|
||||
suggested.append('Type/Test')
|
||||
elif any(word in context_lower for word in ['chore', 'maintenance', 'update', 'upgrade']):
|
||||
suggested.append('Type/Chore')
|
||||
|
||||
# Priority detection
|
||||
if any(word in context_lower for word in ['critical', 'urgent', 'blocker', 'blocking', 'emergency']):
|
||||
suggested.append('Priority/Critical')
|
||||
elif any(word in context_lower for word in ['high', 'important', 'asap', 'soon']):
|
||||
suggested.append('Priority/High')
|
||||
elif any(word in context_lower for word in ['low', 'nice-to-have', 'optional', 'later']):
|
||||
suggested.append('Priority/Low')
|
||||
else:
|
||||
suggested.append('Priority/Medium')
|
||||
|
||||
# Complexity detection
|
||||
if any(word in context_lower for word in ['simple', 'trivial', 'easy', 'quick']):
|
||||
suggested.append('Complexity/Simple')
|
||||
elif any(word in context_lower for word in ['complex', 'difficult', 'challenging', 'intricate']):
|
||||
suggested.append('Complexity/Complex')
|
||||
else:
|
||||
suggested.append('Complexity/Medium')
|
||||
|
||||
# Efforts detection
|
||||
if any(word in context_lower for word in ['xs', 'tiny', '1 hour', '2 hours']):
|
||||
suggested.append('Efforts/XS')
|
||||
elif any(word in context_lower for word in ['small', 's ', '1 day', 'half day']):
|
||||
suggested.append('Efforts/S')
|
||||
elif any(word in context_lower for word in ['medium', 'm ', '2 days', '3 days']):
|
||||
suggested.append('Efforts/M')
|
||||
elif any(word in context_lower for word in ['large', 'l ', '1 week', '5 days']):
|
||||
suggested.append('Efforts/L')
|
||||
elif any(word in context_lower for word in ['xl', 'extra large', '2 weeks', 'sprint']):
|
||||
suggested.append('Efforts/XL')
|
||||
|
||||
# Component detection (based on keywords)
|
||||
component_keywords = {
|
||||
'Component/Backend': ['backend', 'server', 'api', 'database', 'service'],
|
||||
'Component/Frontend': ['frontend', 'ui', 'interface', 'react', 'vue', 'component'],
|
||||
'Component/API': ['api', 'endpoint', 'rest', 'graphql', 'route'],
|
||||
'Component/Database': ['database', 'db', 'sql', 'migration', 'schema', 'postgres'],
|
||||
'Component/Auth': ['auth', 'authentication', 'login', 'oauth', 'token', 'session'],
|
||||
'Component/Deploy': ['deploy', 'deployment', 'docker', 'kubernetes', 'ci/cd'],
|
||||
'Component/Testing': ['test', 'testing', 'spec', 'jest', 'pytest', 'coverage'],
|
||||
'Component/Docs': ['docs', 'documentation', 'readme', 'guide', 'wiki']
|
||||
}
|
||||
|
||||
for label, keywords in component_keywords.items():
|
||||
if any(keyword in context_lower for keyword in keywords):
|
||||
suggested.append(label)
|
||||
|
||||
# Tech stack detection
|
||||
tech_keywords = {
|
||||
'Tech/Python': ['python', 'fastapi', 'django', 'flask', 'pytest'],
|
||||
'Tech/JavaScript': ['javascript', 'js', 'node', 'npm', 'yarn'],
|
||||
'Tech/Docker': ['docker', 'dockerfile', 'container', 'compose'],
|
||||
'Tech/PostgreSQL': ['postgres', 'postgresql', 'psql', 'sql'],
|
||||
'Tech/Redis': ['redis', 'cache', 'session store'],
|
||||
'Tech/Vue': ['vue', 'vuejs', 'nuxt'],
|
||||
'Tech/FastAPI': ['fastapi', 'pydantic', 'starlette']
|
||||
}
|
||||
|
||||
for label, keywords in tech_keywords.items():
|
||||
if any(keyword in context_lower for keyword in keywords):
|
||||
suggested.append(label)
|
||||
|
||||
# Source detection (based on git branch or context)
|
||||
if 'development' in context_lower or 'dev/' in context_lower:
|
||||
suggested.append('Source/Development')
|
||||
elif 'staging' in context_lower or 'stage/' in context_lower:
|
||||
suggested.append('Source/Staging')
|
||||
elif 'production' in context_lower or 'prod' in context_lower:
|
||||
suggested.append('Source/Production')
|
||||
|
||||
# Risk detection
|
||||
if any(word in context_lower for word in ['breaking', 'breaking change', 'major', 'risky']):
|
||||
suggested.append('Risk/High')
|
||||
elif any(word in context_lower for word in ['safe', 'low risk', 'minor']):
|
||||
suggested.append('Risk/Low')
|
||||
|
||||
logger.info(f"Suggested {len(suggested)} labels based on context")
|
||||
return suggested
|
||||
6
mcp-servers/gitea/requirements.txt
Normal file
6
mcp-servers/gitea/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
mcp>=0.9.0 # MCP SDK from Anthropic
|
||||
python-dotenv>=1.0.0 # Environment variable loading
|
||||
requests>=2.31.0 # HTTP client for Gitea API
|
||||
pydantic>=2.5.0 # Data validation
|
||||
pytest>=7.4.3 # Testing framework
|
||||
pytest-asyncio>=0.23.0 # Async testing support
|
||||
0
mcp-servers/gitea/tests/__init__.py
Normal file
0
mcp-servers/gitea/tests/__init__.py
Normal file
151
mcp-servers/gitea/tests/test_config.py
Normal file
151
mcp-servers/gitea/tests/test_config.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""
|
||||
Unit tests for configuration loader.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
import os
|
||||
from mcp_server.config import GiteaConfig
|
||||
|
||||
|
||||
def test_load_system_config(tmp_path, monkeypatch):
|
||||
"""Test loading system-level configuration"""
|
||||
# Mock home directory
|
||||
config_dir = tmp_path / '.config' / 'claude'
|
||||
config_dir.mkdir(parents=True)
|
||||
|
||||
config_file = config_dir / 'gitea.env'
|
||||
config_file.write_text(
|
||||
"GITEA_API_URL=https://test.com/api/v1\n"
|
||||
"GITEA_API_TOKEN=test_token\n"
|
||||
"GITEA_OWNER=test_owner\n"
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['api_url'] == 'https://test.com/api/v1'
|
||||
assert result['api_token'] == 'test_token'
|
||||
assert result['owner'] == 'test_owner'
|
||||
assert result['mode'] == 'company' # No repo specified
|
||||
assert result['repo'] is None
|
||||
|
||||
|
||||
def test_project_config_override(tmp_path, monkeypatch):
|
||||
"""Test that project config overrides system config"""
|
||||
# Set up system config
|
||||
system_config_dir = tmp_path / '.config' / 'claude'
|
||||
system_config_dir.mkdir(parents=True)
|
||||
|
||||
system_config = system_config_dir / 'gitea.env'
|
||||
system_config.write_text(
|
||||
"GITEA_API_URL=https://test.com/api/v1\n"
|
||||
"GITEA_API_TOKEN=test_token\n"
|
||||
"GITEA_OWNER=test_owner\n"
|
||||
)
|
||||
|
||||
# Set up project config
|
||||
project_dir = tmp_path / 'project'
|
||||
project_dir.mkdir()
|
||||
|
||||
project_config = project_dir / '.env'
|
||||
project_config.write_text("GITEA_REPO=test_repo\n")
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(project_dir)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['repo'] == 'test_repo'
|
||||
assert result['mode'] == 'project'
|
||||
|
||||
|
||||
def test_missing_system_config(tmp_path, monkeypatch):
|
||||
"""Test error handling for missing system configuration"""
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
with pytest.raises(FileNotFoundError) as exc_info:
|
||||
config = GiteaConfig()
|
||||
config.load()
|
||||
|
||||
assert "System config not found" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_missing_required_config(tmp_path, monkeypatch):
|
||||
"""Test error handling for missing required variables"""
|
||||
# Clear environment variables
|
||||
for var in ['GITEA_API_URL', 'GITEA_API_TOKEN', 'GITEA_OWNER', 'GITEA_REPO']:
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
|
||||
# Create incomplete config
|
||||
config_dir = tmp_path / '.config' / 'claude'
|
||||
config_dir.mkdir(parents=True)
|
||||
|
||||
config_file = config_dir / 'gitea.env'
|
||||
config_file.write_text(
|
||||
"GITEA_API_URL=https://test.com/api/v1\n"
|
||||
# Missing GITEA_API_TOKEN and GITEA_OWNER
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
config = GiteaConfig()
|
||||
config.load()
|
||||
|
||||
assert "Missing required configuration" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_mode_detection_project(tmp_path, monkeypatch):
|
||||
"""Test mode detection for project mode"""
|
||||
config_dir = tmp_path / '.config' / 'claude'
|
||||
config_dir.mkdir(parents=True)
|
||||
|
||||
config_file = config_dir / 'gitea.env'
|
||||
config_file.write_text(
|
||||
"GITEA_API_URL=https://test.com/api/v1\n"
|
||||
"GITEA_API_TOKEN=test_token\n"
|
||||
"GITEA_OWNER=test_owner\n"
|
||||
"GITEA_REPO=test_repo\n"
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['mode'] == 'project'
|
||||
assert result['repo'] == 'test_repo'
|
||||
|
||||
|
||||
def test_mode_detection_company(tmp_path, monkeypatch):
|
||||
"""Test mode detection for company mode (PMO)"""
|
||||
# Clear environment variables, especially GITEA_REPO
|
||||
for var in ['GITEA_API_URL', 'GITEA_API_TOKEN', 'GITEA_OWNER', 'GITEA_REPO']:
|
||||
monkeypatch.delenv(var, raising=False)
|
||||
|
||||
config_dir = tmp_path / '.config' / 'claude'
|
||||
config_dir.mkdir(parents=True)
|
||||
|
||||
config_file = config_dir / 'gitea.env'
|
||||
config_file.write_text(
|
||||
"GITEA_API_URL=https://test.com/api/v1\n"
|
||||
"GITEA_API_TOKEN=test_token\n"
|
||||
"GITEA_OWNER=test_owner\n"
|
||||
# No GITEA_REPO
|
||||
)
|
||||
|
||||
monkeypatch.setenv('HOME', str(tmp_path))
|
||||
monkeypatch.chdir(tmp_path)
|
||||
|
||||
config = GiteaConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['mode'] == 'company'
|
||||
assert result['repo'] is None
|
||||
224
mcp-servers/gitea/tests/test_gitea_client.py
Normal file
224
mcp-servers/gitea/tests/test_gitea_client.py
Normal file
@@ -0,0 +1,224 @@
|
||||
"""
|
||||
Unit tests for Gitea API client.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
from mcp_server.gitea_client import GiteaClient
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Fixture providing mocked configuration"""
|
||||
with patch('mcp_server.gitea_client.GiteaConfig') as mock_cfg:
|
||||
mock_instance = mock_cfg.return_value
|
||||
mock_instance.load.return_value = {
|
||||
'api_url': 'https://test.com/api/v1',
|
||||
'api_token': 'test_token',
|
||||
'owner': 'test_owner',
|
||||
'repo': 'test_repo',
|
||||
'mode': 'project'
|
||||
}
|
||||
yield mock_cfg
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def gitea_client(mock_config):
|
||||
"""Fixture providing GiteaClient instance with mocked config"""
|
||||
return GiteaClient()
|
||||
|
||||
|
||||
def test_client_initialization(gitea_client):
|
||||
"""Test client initializes with correct configuration"""
|
||||
assert gitea_client.base_url == 'https://test.com/api/v1'
|
||||
assert gitea_client.token == 'test_token'
|
||||
assert gitea_client.owner == 'test_owner'
|
||||
assert gitea_client.repo == 'test_repo'
|
||||
assert gitea_client.mode == 'project'
|
||||
assert 'Authorization' in gitea_client.session.headers
|
||||
assert gitea_client.session.headers['Authorization'] == 'token test_token'
|
||||
|
||||
|
||||
def test_list_issues(gitea_client):
|
||||
"""Test listing issues"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = [
|
||||
{'number': 1, 'title': 'Test Issue 1'},
|
||||
{'number': 2, 'title': 'Test Issue 2'}
|
||||
]
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
issues = gitea_client.list_issues(state='open')
|
||||
|
||||
assert len(issues) == 2
|
||||
assert issues[0]['title'] == 'Test Issue 1'
|
||||
gitea_client.session.get.assert_called_once()
|
||||
|
||||
|
||||
def test_list_issues_with_labels(gitea_client):
|
||||
"""Test listing issues with label filter"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = [{'number': 1, 'title': 'Bug Issue'}]
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
issues = gitea_client.list_issues(state='open', labels=['Type/Bug'])
|
||||
|
||||
gitea_client.session.get.assert_called_once()
|
||||
call_args = gitea_client.session.get.call_args
|
||||
assert call_args[1]['params']['labels'] == 'Type/Bug'
|
||||
|
||||
|
||||
def test_get_issue(gitea_client):
|
||||
"""Test getting specific issue"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {'number': 1, 'title': 'Test Issue'}
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
issue = gitea_client.get_issue(1)
|
||||
|
||||
assert issue['number'] == 1
|
||||
assert issue['title'] == 'Test Issue'
|
||||
|
||||
|
||||
def test_create_issue(gitea_client):
|
||||
"""Test creating new issue"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
'number': 1,
|
||||
'title': 'New Issue',
|
||||
'body': 'Issue body'
|
||||
}
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'post', return_value=mock_response):
|
||||
issue = gitea_client.create_issue(
|
||||
title='New Issue',
|
||||
body='Issue body',
|
||||
labels=['Type/Bug']
|
||||
)
|
||||
|
||||
assert issue['title'] == 'New Issue'
|
||||
gitea_client.session.post.assert_called_once()
|
||||
|
||||
|
||||
def test_update_issue(gitea_client):
|
||||
"""Test updating existing issue"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {
|
||||
'number': 1,
|
||||
'title': 'Updated Issue'
|
||||
}
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'patch', return_value=mock_response):
|
||||
issue = gitea_client.update_issue(
|
||||
issue_number=1,
|
||||
title='Updated Issue'
|
||||
)
|
||||
|
||||
assert issue['title'] == 'Updated Issue'
|
||||
gitea_client.session.patch.assert_called_once()
|
||||
|
||||
|
||||
def test_add_comment(gitea_client):
|
||||
"""Test adding comment to issue"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = {'body': 'Test comment'}
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'post', return_value=mock_response):
|
||||
comment = gitea_client.add_comment(1, 'Test comment')
|
||||
|
||||
assert comment['body'] == 'Test comment'
|
||||
gitea_client.session.post.assert_called_once()
|
||||
|
||||
|
||||
def test_get_labels(gitea_client):
|
||||
"""Test getting repository labels"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = [
|
||||
{'name': 'Type/Bug'},
|
||||
{'name': 'Priority/High'}
|
||||
]
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
labels = gitea_client.get_labels()
|
||||
|
||||
assert len(labels) == 2
|
||||
assert labels[0]['name'] == 'Type/Bug'
|
||||
|
||||
|
||||
def test_get_org_labels(gitea_client):
|
||||
"""Test getting organization labels"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = [
|
||||
{'name': 'Type/Bug'},
|
||||
{'name': 'Type/Feature'}
|
||||
]
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
labels = gitea_client.get_org_labels()
|
||||
|
||||
assert len(labels) == 2
|
||||
|
||||
|
||||
def test_list_repos(gitea_client):
|
||||
"""Test listing organization repositories (PMO mode)"""
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = [
|
||||
{'name': 'repo1'},
|
||||
{'name': 'repo2'}
|
||||
]
|
||||
mock_response.raise_for_status = Mock()
|
||||
|
||||
with patch.object(gitea_client.session, 'get', return_value=mock_response):
|
||||
repos = gitea_client.list_repos()
|
||||
|
||||
assert len(repos) == 2
|
||||
assert repos[0]['name'] == 'repo1'
|
||||
|
||||
|
||||
def test_aggregate_issues(gitea_client):
|
||||
"""Test aggregating issues across repositories (PMO mode)"""
|
||||
# Mock list_repos
|
||||
gitea_client.list_repos = Mock(return_value=[
|
||||
{'name': 'repo1'},
|
||||
{'name': 'repo2'}
|
||||
])
|
||||
|
||||
# Mock list_issues
|
||||
gitea_client.list_issues = Mock(side_effect=[
|
||||
[{'number': 1, 'title': 'Issue 1'}], # repo1
|
||||
[{'number': 2, 'title': 'Issue 2'}] # repo2
|
||||
])
|
||||
|
||||
aggregated = gitea_client.aggregate_issues(state='open')
|
||||
|
||||
assert 'repo1' in aggregated
|
||||
assert 'repo2' in aggregated
|
||||
assert len(aggregated['repo1']) == 1
|
||||
assert len(aggregated['repo2']) == 1
|
||||
|
||||
|
||||
def test_no_repo_specified_error(gitea_client):
|
||||
"""Test error when repository not specified"""
|
||||
# Create client without repo
|
||||
with patch('mcp_server.gitea_client.GiteaConfig') as mock_cfg:
|
||||
mock_instance = mock_cfg.return_value
|
||||
mock_instance.load.return_value = {
|
||||
'api_url': 'https://test.com/api/v1',
|
||||
'api_token': 'test_token',
|
||||
'owner': 'test_owner',
|
||||
'repo': None, # No repo
|
||||
'mode': 'company'
|
||||
}
|
||||
client = GiteaClient()
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
client.list_issues()
|
||||
|
||||
assert "Repository not specified" in str(exc_info.value)
|
||||
159
mcp-servers/gitea/tests/test_issues.py
Normal file
159
mcp-servers/gitea/tests/test_issues.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""
|
||||
Unit tests for issue tools with branch detection.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch, AsyncMock
|
||||
from mcp_server.tools.issues import IssueTools
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_gitea_client():
|
||||
"""Fixture providing mocked Gitea client"""
|
||||
client = Mock()
|
||||
client.mode = 'project'
|
||||
return client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def issue_tools(mock_gitea_client):
|
||||
"""Fixture providing IssueTools instance"""
|
||||
return IssueTools(mock_gitea_client)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_development_branch(issue_tools):
|
||||
"""Test listing issues on development branch (allowed)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='feat/test-feature'):
|
||||
issue_tools.gitea.list_issues = Mock(return_value=[{'number': 1}])
|
||||
|
||||
issues = await issue_tools.list_issues(state='open')
|
||||
|
||||
assert len(issues) == 1
|
||||
issue_tools.gitea.list_issues.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_issue_development_branch(issue_tools):
|
||||
"""Test creating issue on development branch (allowed)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
|
||||
issue_tools.gitea.create_issue = Mock(return_value={'number': 1})
|
||||
|
||||
issue = await issue_tools.create_issue('Test', 'Body')
|
||||
|
||||
assert issue['number'] == 1
|
||||
issue_tools.gitea.create_issue.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_issue_main_branch_blocked(issue_tools):
|
||||
"""Test creating issue on main branch (blocked)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
|
||||
with pytest.raises(PermissionError) as exc_info:
|
||||
await issue_tools.create_issue('Test', 'Body')
|
||||
|
||||
assert "Cannot create issues on branch 'main'" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_issue_staging_branch_allowed(issue_tools):
|
||||
"""Test creating issue on staging branch (allowed for documentation)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='staging'):
|
||||
issue_tools.gitea.create_issue = Mock(return_value={'number': 1})
|
||||
|
||||
issue = await issue_tools.create_issue('Test', 'Body')
|
||||
|
||||
assert issue['number'] == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_issue_main_branch_blocked(issue_tools):
|
||||
"""Test updating issue on main branch (blocked)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
|
||||
with pytest.raises(PermissionError) as exc_info:
|
||||
await issue_tools.update_issue(1, title='Updated')
|
||||
|
||||
assert "Cannot update issues on branch 'main'" in str(exc_info.value)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_issues_main_branch_allowed(issue_tools):
|
||||
"""Test listing issues on main branch (allowed - read-only)"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='main'):
|
||||
issue_tools.gitea.list_issues = Mock(return_value=[{'number': 1}])
|
||||
|
||||
issues = await issue_tools.list_issues(state='open')
|
||||
|
||||
assert len(issues) == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_issue(issue_tools):
|
||||
"""Test getting specific issue"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
|
||||
issue_tools.gitea.get_issue = Mock(return_value={'number': 1, 'title': 'Test'})
|
||||
|
||||
issue = await issue_tools.get_issue(1)
|
||||
|
||||
assert issue['number'] == 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_add_comment(issue_tools):
|
||||
"""Test adding comment to issue"""
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
|
||||
issue_tools.gitea.add_comment = Mock(return_value={'body': 'Test comment'})
|
||||
|
||||
comment = await issue_tools.add_comment(1, 'Test comment')
|
||||
|
||||
assert comment['body'] == 'Test comment'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aggregate_issues_company_mode(issue_tools):
|
||||
"""Test aggregating issues in company mode"""
|
||||
issue_tools.gitea.mode = 'company'
|
||||
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
|
||||
issue_tools.gitea.aggregate_issues = Mock(return_value={
|
||||
'repo1': [{'number': 1}],
|
||||
'repo2': [{'number': 2}]
|
||||
})
|
||||
|
||||
aggregated = await issue_tools.aggregate_issues()
|
||||
|
||||
assert 'repo1' in aggregated
|
||||
assert 'repo2' in aggregated
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_aggregate_issues_project_mode_error(issue_tools):
|
||||
"""Test that aggregate_issues fails in project mode"""
|
||||
issue_tools.gitea.mode = 'project'
|
||||
|
||||
with patch.object(issue_tools, '_get_current_branch', return_value='development'):
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
await issue_tools.aggregate_issues()
|
||||
|
||||
assert "only available in company mode" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_branch_detection():
|
||||
"""Test branch detection logic"""
|
||||
tools = IssueTools(Mock())
|
||||
|
||||
# Test development branches
|
||||
with patch.object(tools, '_get_current_branch', return_value='development'):
|
||||
assert tools._check_branch_permissions('create_issue') is True
|
||||
|
||||
with patch.object(tools, '_get_current_branch', return_value='feat/new-feature'):
|
||||
assert tools._check_branch_permissions('create_issue') is True
|
||||
|
||||
# Test production branches
|
||||
with patch.object(tools, '_get_current_branch', return_value='main'):
|
||||
assert tools._check_branch_permissions('create_issue') is False
|
||||
assert tools._check_branch_permissions('list_issues') is True
|
||||
|
||||
# Test staging branches
|
||||
with patch.object(tools, '_get_current_branch', return_value='staging'):
|
||||
assert tools._check_branch_permissions('create_issue') is True
|
||||
assert tools._check_branch_permissions('update_issue') is False
|
||||
246
mcp-servers/gitea/tests/test_labels.py
Normal file
246
mcp-servers/gitea/tests/test_labels.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""
|
||||
Unit tests for label tools with suggestion logic.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import Mock, patch
|
||||
from mcp_server.tools.labels import LabelTools
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_gitea_client():
|
||||
"""Fixture providing mocked Gitea client"""
|
||||
client = Mock()
|
||||
client.repo = 'test_repo'
|
||||
return client
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def label_tools(mock_gitea_client):
|
||||
"""Fixture providing LabelTools instance"""
|
||||
return LabelTools(mock_gitea_client)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_labels(label_tools):
|
||||
"""Test getting all labels (org + repo)"""
|
||||
label_tools.gitea.get_org_labels = Mock(return_value=[
|
||||
{'name': 'Type/Bug'},
|
||||
{'name': 'Type/Feature'}
|
||||
])
|
||||
label_tools.gitea.get_labels = Mock(return_value=[
|
||||
{'name': 'Component/Backend'},
|
||||
{'name': 'Component/Frontend'}
|
||||
])
|
||||
|
||||
result = await label_tools.get_labels()
|
||||
|
||||
assert len(result['organization']) == 2
|
||||
assert len(result['repository']) == 2
|
||||
assert result['total_count'] == 4
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_bug():
|
||||
"""Test label suggestion for bug context"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
context = "Fix critical bug in login authentication"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Bug' in suggestions
|
||||
assert 'Priority/Critical' in suggestions
|
||||
assert 'Component/Auth' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_feature():
|
||||
"""Test label suggestion for feature context"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
context = "Add new feature to implement user dashboard"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Feature' in suggestions
|
||||
assert any('Priority' in label for label in suggestions)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_refactor():
|
||||
"""Test label suggestion for refactor context"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
context = "Refactor architecture to extract service layer"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Refactor' in suggestions
|
||||
assert 'Component/Backend' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_documentation():
|
||||
"""Test label suggestion for documentation context"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
context = "Update documentation for API endpoints"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
assert 'Type/Documentation' in suggestions
|
||||
assert 'Component/API' in suggestions or 'Component/Docs' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_priority():
|
||||
"""Test priority detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# Critical priority
|
||||
context = "Urgent blocker in production"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/Critical' in suggestions
|
||||
|
||||
# High priority
|
||||
context = "Important feature needed asap"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/High' in suggestions
|
||||
|
||||
# Low priority
|
||||
context = "Nice-to-have optional improvement"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Priority/Low' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_complexity():
|
||||
"""Test complexity detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# Simple complexity
|
||||
context = "Simple quick fix for typo"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Complexity/Simple' in suggestions
|
||||
|
||||
# Complex complexity
|
||||
context = "Complex challenging architecture redesign"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Complexity/Complex' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_efforts():
|
||||
"""Test efforts detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# XS effort
|
||||
context = "Tiny fix that takes 1 hour"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Efforts/XS' in suggestions
|
||||
|
||||
# L effort
|
||||
context = "Large feature taking 1 week"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Efforts/L' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_components():
|
||||
"""Test component detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# Backend component
|
||||
context = "Update backend API service"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Backend' in suggestions
|
||||
assert 'Component/API' in suggestions
|
||||
|
||||
# Frontend component
|
||||
context = "Fix frontend UI component"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Frontend' in suggestions
|
||||
|
||||
# Database component
|
||||
context = "Add database migration for schema"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Component/Database' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_tech_stack():
|
||||
"""Test tech stack detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# Python
|
||||
context = "Update Python FastAPI endpoint"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/Python' in suggestions
|
||||
assert 'Tech/FastAPI' in suggestions
|
||||
|
||||
# Docker
|
||||
context = "Fix Dockerfile configuration"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/Docker' in suggestions
|
||||
|
||||
# PostgreSQL
|
||||
context = "Optimize PostgreSQL query"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Tech/PostgreSQL' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_source():
|
||||
"""Test source detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# Development
|
||||
context = "Issue found in development environment"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Source/Development' in suggestions
|
||||
|
||||
# Production
|
||||
context = "Critical production issue"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Source/Production' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_risk():
|
||||
"""Test risk detection in suggestions"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
# High risk
|
||||
context = "Breaking change to major API"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Risk/High' in suggestions
|
||||
|
||||
# Low risk
|
||||
context = "Safe minor update with low risk"
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
assert 'Risk/Low' in suggestions
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_suggest_labels_multiple_categories():
|
||||
"""Test that suggestions span multiple categories"""
|
||||
tools = LabelTools(Mock())
|
||||
|
||||
context = """
|
||||
Urgent critical bug in production backend API service.
|
||||
Need to fix broken authentication endpoint.
|
||||
This is a complex issue requiring FastAPI and PostgreSQL expertise.
|
||||
"""
|
||||
|
||||
suggestions = await tools.suggest_labels(context)
|
||||
|
||||
# Should have Type
|
||||
assert any('Type/' in label for label in suggestions)
|
||||
|
||||
# Should have Priority
|
||||
assert any('Priority/' in label for label in suggestions)
|
||||
|
||||
# Should have Component
|
||||
assert any('Component/' in label for label in suggestions)
|
||||
|
||||
# Should have Tech
|
||||
assert any('Tech/' in label for label in suggestions)
|
||||
|
||||
# Should have Source
|
||||
assert any('Source/' in label for label in suggestions)
|
||||
414
mcp-servers/wikijs/README.md
Normal file
414
mcp-servers/wikijs/README.md
Normal file
@@ -0,0 +1,414 @@
|
||||
# Wiki.js MCP Server
|
||||
|
||||
Model Context Protocol (MCP) server for Wiki.js integration with Claude Code.
|
||||
|
||||
## Overview
|
||||
|
||||
The Wiki.js MCP Server provides Claude Code with direct access to Wiki.js for documentation management, lessons learned capture, and knowledge base operations. It supports both single-project (project mode) and company-wide (PMO mode) operations.
|
||||
|
||||
**Status**: ✅ Phase 1.1b Complete - Fully functional and tested
|
||||
|
||||
## Features
|
||||
|
||||
### Core Functionality
|
||||
|
||||
- **Page Management**: CRUD operations for Wiki.js pages with markdown content
|
||||
- **Lessons Learned**: Systematic capture and searchable repository of sprint insights
|
||||
- **Mode Detection**: Automatic project vs company-wide mode detection
|
||||
- **Hybrid Configuration**: System-level credentials + project-level paths
|
||||
- **PMO Support**: Company-wide documentation and cross-project lesson search
|
||||
|
||||
### Tools Provided
|
||||
|
||||
| Tool | Description | Mode |
|
||||
|------|-------------|------|
|
||||
| `search_pages` | Search pages by keywords and tags | Both |
|
||||
| `get_page` | Get specific page content | Both |
|
||||
| `create_page` | Create new page with markdown content | Both |
|
||||
| `update_page` | Update existing page | Both |
|
||||
| `list_pages` | List pages under a path | Both |
|
||||
| `create_lesson` | Create lessons learned entry | Both |
|
||||
| `search_lessons` | Search lessons from previous sprints | Both |
|
||||
| `tag_lesson` | Add/update tags on lessons | Both |
|
||||
|
||||
## Architecture
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
mcp-servers/wikijs/
|
||||
├── .venv/ # Python virtual environment
|
||||
├── requirements.txt # Python dependencies
|
||||
├── mcp_server/
|
||||
│ ├── __init__.py
|
||||
│ ├── server.py # MCP server entry point
|
||||
│ ├── config.py # Configuration loader
|
||||
│ ├── wikijs_client.py # Wiki.js GraphQL client
|
||||
│ └── tools/
|
||||
│ ├── __init__.py
|
||||
│ ├── pages.py # Page management tools
|
||||
│ └── lessons_learned.py # Lessons learned tools
|
||||
├── tests/
|
||||
│ ├── __init__.py
|
||||
│ ├── test_config.py
|
||||
│ └── test_wikijs_client.py
|
||||
├── README.md # This file
|
||||
└── TESTING.md # Testing instructions
|
||||
```
|
||||
|
||||
### Mode Detection
|
||||
|
||||
The server operates in two modes based on environment variables:
|
||||
|
||||
**Project Mode** (Single Project):
|
||||
- When `WIKIJS_PROJECT` is set
|
||||
- Operates on single project path
|
||||
- Used by `projman` plugin
|
||||
- Pages scoped to `/base_path/project/`
|
||||
|
||||
**Company Mode** (Multi-Project / PMO):
|
||||
- When `WIKIJS_PROJECT` is NOT set
|
||||
- Operates on all projects in organization
|
||||
- Used by `projman-pmo` plugin
|
||||
- Pages scoped to `/base_path/`
|
||||
|
||||
### GraphQL Integration
|
||||
|
||||
The server uses Wiki.js GraphQL API for all operations:
|
||||
- **Pages API**: Create, read, update, list, search pages
|
||||
- **Tags**: Categorize and filter content
|
||||
- **Search**: Full-text search with tag filtering
|
||||
- **Lessons Learned**: Specialized workflow for sprint insights
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.10 or higher
|
||||
- Access to Wiki.js instance with API token
|
||||
- GraphQL API enabled on Wiki.js
|
||||
|
||||
### Step 1: Install Dependencies
|
||||
|
||||
```bash
|
||||
cd mcp-servers/wikijs
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate # Linux/Mac
|
||||
# or .venv\Scripts\activate # Windows
|
||||
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Step 2: System Configuration
|
||||
|
||||
Create system-level configuration with credentials:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
|
||||
cat > ~/.config/claude/wikijs.env << 'EOF'
|
||||
# Wiki.js API Configuration
|
||||
WIKIJS_API_URL=http://wikijs.hotport/graphql
|
||||
WIKIJS_API_TOKEN=your_api_token_here
|
||||
WIKIJS_BASE_PATH=/hyper-hive-labs
|
||||
EOF
|
||||
|
||||
chmod 600 ~/.config/claude/wikijs.env
|
||||
```
|
||||
|
||||
**Obtaining Wiki.js API Token:**
|
||||
1. Log in to Wiki.js as administrator
|
||||
2. Navigate to Administration → API Access
|
||||
3. Click "New API Key"
|
||||
4. Set permissions: Pages (read/write), Search (read)
|
||||
5. Copy the generated JWT token
|
||||
|
||||
### Step 3: Project Configuration (Optional)
|
||||
|
||||
For project-scoped operations, create `.env` in project root:
|
||||
|
||||
```bash
|
||||
# In your project directory
|
||||
cat > .env << 'EOF'
|
||||
# Wiki.js project path
|
||||
WIKIJS_PROJECT=projects/cuisineflow
|
||||
EOF
|
||||
|
||||
# Add to .gitignore
|
||||
echo ".env" >> .gitignore
|
||||
```
|
||||
|
||||
**Note:** Omit `.env` for company-wide (PMO) mode.
|
||||
|
||||
## Usage
|
||||
|
||||
### Running the MCP Server
|
||||
|
||||
```bash
|
||||
cd mcp-servers/wikijs
|
||||
source .venv/bin/activate
|
||||
python -m mcp_server.server
|
||||
```
|
||||
|
||||
The server runs as a stdio-based MCP server and communicates via JSON-RPC 2.0.
|
||||
|
||||
### Integration with Claude Code
|
||||
|
||||
The MCP server is referenced in plugin `.mcp.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"wikijs": {
|
||||
"command": "python",
|
||||
"args": ["-m", "mcp_server.server"],
|
||||
"cwd": "${CLAUDE_PLUGIN_ROOT}/../mcp-servers/wikijs",
|
||||
"env": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Example Tool Calls
|
||||
|
||||
**Search Pages:**
|
||||
```json
|
||||
{
|
||||
"name": "search_pages",
|
||||
"arguments": {
|
||||
"query": "API documentation",
|
||||
"tags": "backend,api",
|
||||
"limit": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Create Lesson Learned:**
|
||||
```json
|
||||
{
|
||||
"name": "create_lesson",
|
||||
"arguments": {
|
||||
"title": "Sprint 16 - Prevent Claude Code Infinite Loops",
|
||||
"content": "## Problem\\n\\nClaude Code entered infinite loop...\\n\\n## Solution\\n\\n...",
|
||||
"tags": "claude-code,testing,validation",
|
||||
"category": "sprints"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Search Lessons:**
|
||||
```json
|
||||
{
|
||||
"name": "search_lessons",
|
||||
"arguments": {
|
||||
"query": "validation",
|
||||
"tags": "testing,claude-code",
|
||||
"limit": 20
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
### Required Variables
|
||||
|
||||
| Variable | Description | Example |
|
||||
|----------|-------------|---------|
|
||||
| `WIKIJS_API_URL` | Wiki.js GraphQL endpoint | `http://wiki.example.com/graphql` |
|
||||
| `WIKIJS_API_TOKEN` | API authentication token (JWT) | `eyJhbGciOiJSUzI1...` |
|
||||
| `WIKIJS_BASE_PATH` | Base path in Wiki.js | `/hyper-hive-labs` |
|
||||
|
||||
### Optional Variables
|
||||
|
||||
| Variable | Description | Mode |
|
||||
|----------|-------------|------|
|
||||
| `WIKIJS_PROJECT` | Project-specific path | Project mode only |
|
||||
|
||||
### Configuration Priority
|
||||
|
||||
1. Project-level `.env` (overrides system)
|
||||
2. System-level `~/.config/claude/wikijs.env`
|
||||
|
||||
## Wiki.js Structure
|
||||
|
||||
### Recommended Organization
|
||||
|
||||
```
|
||||
/hyper-hive-labs/ # Base path
|
||||
├── projects/ # Project-specific
|
||||
│ ├── cuisineflow/
|
||||
│ │ ├── lessons-learned/
|
||||
│ │ │ ├── sprints/
|
||||
│ │ │ ├── patterns/
|
||||
│ │ │ └── INDEX.md
|
||||
│ │ └── documentation/
|
||||
│ ├── cuisineflow-site/
|
||||
│ ├── intuit-engine/
|
||||
│ └── hhl-site/
|
||||
├── company/ # Company-wide
|
||||
│ ├── processes/
|
||||
│ ├── standards/
|
||||
│ └── tools/
|
||||
└── shared/ # Cross-project
|
||||
├── architecture-patterns/
|
||||
├── best-practices/
|
||||
└── tech-stack/
|
||||
```
|
||||
|
||||
### Lessons Learned Categories
|
||||
|
||||
- **sprints/**: Sprint-specific lessons and retrospectives
|
||||
- **patterns/**: Recurring patterns and solutions
|
||||
- **architecture/**: Architectural decisions and outcomes
|
||||
- **tools/**: Tool-specific tips and gotchas
|
||||
|
||||
## Testing
|
||||
|
||||
See [TESTING.md](./TESTING.md) for comprehensive testing instructions.
|
||||
|
||||
**Quick Test:**
|
||||
```bash
|
||||
source .venv/bin/activate
|
||||
pytest -v
|
||||
```
|
||||
|
||||
**Test Coverage:**
|
||||
- 18 tests covering all major functionality
|
||||
- Mock-based unit tests (fast)
|
||||
- Integration tests with real Wiki.js instance
|
||||
- Configuration validation
|
||||
- Mode detection
|
||||
- Error handling
|
||||
|
||||
## Lessons Learned System
|
||||
|
||||
### Why This Matters
|
||||
|
||||
After 15 sprints without systematic lesson capture, repeated mistakes occurred:
|
||||
- Claude Code infinite loops on similar issues: 2-3 times
|
||||
- Same architectural mistakes: Multiple occurrences
|
||||
- Forgotten optimizations: Re-discovered each time
|
||||
|
||||
**Solution:** Mandatory lessons learned capture at sprint close, searchable at sprint start.
|
||||
|
||||
### Workflow
|
||||
|
||||
**Sprint Close (Orchestrator):**
|
||||
1. Capture what went wrong
|
||||
2. Document what went right
|
||||
3. Note preventable repetitions
|
||||
4. Tag for discoverability
|
||||
|
||||
**Sprint Start (Planner):**
|
||||
1. Search relevant lessons by tags/keywords
|
||||
2. Review applicable patterns
|
||||
3. Apply preventive measures
|
||||
4. Avoid known pitfalls
|
||||
|
||||
### Lesson Structure
|
||||
|
||||
```markdown
|
||||
# Sprint X - [Lesson Title]
|
||||
|
||||
## Context
|
||||
[What were you trying to do?]
|
||||
|
||||
## Problem
|
||||
[What went wrong or what insight emerged?]
|
||||
|
||||
## Solution
|
||||
[How did you solve it?]
|
||||
|
||||
## Prevention
|
||||
[How can this be avoided or optimized in the future?]
|
||||
|
||||
## Tags
|
||||
[Comma-separated tags for search]
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Connection Errors
|
||||
|
||||
**Error:** `Failed to connect to Wiki.js GraphQL endpoint`
|
||||
|
||||
**Solutions:**
|
||||
- Verify `WIKIJS_API_URL` is correct and includes `/graphql`
|
||||
- Check Wiki.js is running and accessible
|
||||
- Ensure GraphQL API is enabled in Wiki.js admin settings
|
||||
|
||||
### Authentication Errors
|
||||
|
||||
**Error:** `Unauthorized` or `Invalid token`
|
||||
|
||||
**Solutions:**
|
||||
- Verify API token is correct and not expired
|
||||
- Check token has required permissions (Pages: read/write, Search: read)
|
||||
- Regenerate token in Wiki.js admin if needed
|
||||
|
||||
### Permission Errors
|
||||
|
||||
**Error:** `Page creation failed: Permission denied`
|
||||
|
||||
**Solutions:**
|
||||
- Verify API key has write permissions
|
||||
- Check user/group permissions in Wiki.js
|
||||
- Ensure base path exists and is accessible
|
||||
|
||||
### Mode Detection Issues
|
||||
|
||||
**Error:** Operating in wrong mode
|
||||
|
||||
**Solutions:**
|
||||
- Check `WIKIJS_PROJECT` environment variable
|
||||
- Clear project `.env` for company mode
|
||||
- Verify configuration loading order (project overrides system)
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Never commit tokens**: Keep `~/.config/claude/wikijs.env` and `.env` out of git
|
||||
2. **Token scope**: Use minimum required permissions (Pages + Search)
|
||||
3. **Token rotation**: Regenerate tokens periodically
|
||||
4. **Access control**: Use Wiki.js groups/permissions for sensitive docs
|
||||
5. **Audit logs**: Review Wiki.js audit logs for unexpected operations
|
||||
|
||||
## Performance
|
||||
|
||||
- **GraphQL queries**: Optimized for minimal data transfer
|
||||
- **Search**: Indexed by Wiki.js for fast results
|
||||
- **Pagination**: Configurable result limits (default: 20)
|
||||
- **Caching**: Wiki.js handles internal caching
|
||||
|
||||
## Development
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# All tests
|
||||
pytest -v
|
||||
|
||||
# Specific test file
|
||||
pytest tests/test_config.py -v
|
||||
|
||||
# Integration tests only
|
||||
pytest tests/test_wikijs_client.py -v -k integration
|
||||
```
|
||||
|
||||
### Code Structure
|
||||
|
||||
- `config.py`: Configuration loading and validation
|
||||
- `wikijs_client.py`: GraphQL client implementation
|
||||
- `server.py`: MCP server setup and tool routing
|
||||
- `tools/pages.py`: Page management MCP tools
|
||||
- `tools/lessons_learned.py`: Lessons learned MCP tools
|
||||
|
||||
## License
|
||||
|
||||
MIT License - See repository root for details
|
||||
|
||||
## Support
|
||||
|
||||
For issues and questions:
|
||||
- **Repository**: https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit
|
||||
- **Issues**: https://gitea.hotserv.cloud/hhl-infra/claude-code-hhl-toolkit/issues
|
||||
- **Documentation**: `/docs/references/MCP-WIKIJS.md`
|
||||
503
mcp-servers/wikijs/TESTING.md
Normal file
503
mcp-servers/wikijs/TESTING.md
Normal file
@@ -0,0 +1,503 @@
|
||||
# Testing Guide - Wiki.js MCP Server
|
||||
|
||||
This document provides comprehensive testing instructions for the Wiki.js MCP Server.
|
||||
|
||||
## Test Suite Overview
|
||||
|
||||
The test suite includes:
|
||||
- **18 unit tests** with mocks (fast, no external dependencies)
|
||||
- **Integration tests** with real Wiki.js instance (requires live Wiki.js)
|
||||
- **Configuration validation** tests
|
||||
- **Mode detection** tests
|
||||
- **GraphQL client** tests
|
||||
- **Error handling** tests
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### For Unit Tests (Mocked)
|
||||
- Python 3.10+
|
||||
- Virtual environment with dependencies installed
|
||||
- No external services required
|
||||
|
||||
### For Integration Tests
|
||||
- Everything from unit tests, plus:
|
||||
- Running Wiki.js instance
|
||||
- Valid API token with permissions
|
||||
- System configuration file (`~/.config/claude/wikijs.env`)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Run All Unit Tests
|
||||
|
||||
```bash
|
||||
cd mcp-servers/wikijs
|
||||
source .venv/bin/activate
|
||||
pytest -v
|
||||
```
|
||||
|
||||
**Expected Output:**
|
||||
```
|
||||
==================== test session starts ====================
|
||||
tests/test_config.py::test_load_system_config PASSED [ 5%]
|
||||
tests/test_config.py::test_project_config_override PASSED [ 11%]
|
||||
...
|
||||
==================== 18 passed in 0.40s ====================
|
||||
```
|
||||
|
||||
### Run Integration Tests
|
||||
|
||||
```bash
|
||||
# Set up system configuration first
|
||||
mkdir -p ~/.config/claude
|
||||
cat > ~/.config/claude/wikijs.env << 'EOF'
|
||||
WIKIJS_API_URL=http://wikijs.hotport/graphql
|
||||
WIKIJS_API_TOKEN=your_real_token_here
|
||||
WIKIJS_BASE_PATH=/hyper-hive-labs
|
||||
EOF
|
||||
|
||||
# Run integration tests
|
||||
pytest -v -m integration
|
||||
```
|
||||
|
||||
## Test Categories
|
||||
|
||||
### 1. Configuration Tests (`test_config.py`)
|
||||
|
||||
Tests the hybrid configuration system and mode detection.
|
||||
|
||||
**Tests:**
|
||||
- `test_load_system_config`: System-level config loading
|
||||
- `test_project_config_override`: Project overrides system
|
||||
- `test_missing_system_config`: Error when config missing
|
||||
- `test_missing_required_config`: Validation of required vars
|
||||
- `test_mode_detection_project`: Project mode detection
|
||||
- `test_mode_detection_company`: Company mode detection
|
||||
|
||||
**Run:**
|
||||
```bash
|
||||
pytest tests/test_config.py -v
|
||||
```
|
||||
|
||||
### 2. Wiki.js Client Tests (`test_wikijs_client.py`)
|
||||
|
||||
Tests the GraphQL client and all Wiki.js operations.
|
||||
|
||||
**Tests:**
|
||||
- `test_client_initialization`: Client setup
|
||||
- `test_company_mode_initialization`: Company mode setup
|
||||
- `test_get_full_path_project_mode`: Path construction (project)
|
||||
- `test_get_full_path_company_mode`: Path construction (company)
|
||||
- `test_search_pages`: Page search
|
||||
- `test_get_page`: Single page retrieval
|
||||
- `test_create_page`: Page creation
|
||||
- `test_update_page`: Page updates
|
||||
- `test_list_pages`: List pages with filtering
|
||||
- `test_create_lesson`: Lessons learned creation
|
||||
- `test_search_lessons`: Lesson search
|
||||
- `test_graphql_error_handling`: Error handling
|
||||
|
||||
**Run:**
|
||||
```bash
|
||||
pytest tests/test_wikijs_client.py -v
|
||||
```
|
||||
|
||||
## Integration Testing
|
||||
|
||||
### Setup Integration Environment
|
||||
|
||||
**Step 1: Configure Wiki.js**
|
||||
|
||||
Create a test namespace in Wiki.js:
|
||||
```
|
||||
/test-integration/
|
||||
├── projects/
|
||||
│ └── test-project/
|
||||
│ ├── documentation/
|
||||
│ └── lessons-learned/
|
||||
└── shared/
|
||||
```
|
||||
|
||||
**Step 2: Configure System**
|
||||
|
||||
```bash
|
||||
cat > ~/.config/claude/wikijs.env << 'EOF'
|
||||
WIKIJS_API_URL=http://wikijs.hotport/graphql
|
||||
WIKIJS_API_TOKEN=your_token_here
|
||||
WIKIJS_BASE_PATH=/test-integration
|
||||
EOF
|
||||
```
|
||||
|
||||
**Step 3: Configure Project**
|
||||
|
||||
```bash
|
||||
# In test directory
|
||||
cat > .env << 'EOF'
|
||||
WIKIJS_PROJECT=projects/test-project
|
||||
EOF
|
||||
```
|
||||
|
||||
### Run Integration Tests
|
||||
|
||||
```bash
|
||||
# Mark tests for integration
|
||||
pytest -v -m integration
|
||||
|
||||
# Run specific integration test
|
||||
pytest tests/test_wikijs_client.py::test_create_page -v -m integration
|
||||
```
|
||||
|
||||
### Integration Test Scenarios
|
||||
|
||||
**Scenario 1: Page Lifecycle**
|
||||
1. Create page with `create_page`
|
||||
2. Retrieve with `get_page`
|
||||
3. Update with `update_page`
|
||||
4. Search for page with `search_pages`
|
||||
5. Cleanup (manual via Wiki.js UI)
|
||||
|
||||
**Scenario 2: Lessons Learned Workflow**
|
||||
1. Create lesson with `create_lesson`
|
||||
2. Search lessons with `search_lessons`
|
||||
3. Add tags with `tag_lesson`
|
||||
4. Verify searchability
|
||||
|
||||
**Scenario 3: Mode Detection**
|
||||
1. Test in project mode (with `WIKIJS_PROJECT`)
|
||||
2. Test in company mode (without `WIKIJS_PROJECT`)
|
||||
3. Verify path scoping
|
||||
|
||||
## Manual Testing
|
||||
|
||||
### Test 1: Create and Retrieve Page
|
||||
|
||||
```bash
|
||||
# Start MCP server
|
||||
python -m mcp_server.server
|
||||
|
||||
# In another terminal, send MCP request
|
||||
echo '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "create_page",
|
||||
"arguments": {
|
||||
"path": "documentation/test-api",
|
||||
"title": "Test API Documentation",
|
||||
"content": "# Test API\\n\\nThis is a test page.",
|
||||
"tags": "api,testing",
|
||||
"publish": true
|
||||
}
|
||||
}
|
||||
}' | python -m mcp_server.server
|
||||
```
|
||||
|
||||
**Expected Result:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"page": {
|
||||
"id": 123,
|
||||
"path": "/hyper-hive-labs/projects/test-project/documentation/test-api",
|
||||
"title": "Test API Documentation"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Test 2: Search Lessons
|
||||
|
||||
```bash
|
||||
echo '{
|
||||
"jsonrpc": "2.0",
|
||||
"id": 2,
|
||||
"method": "tools/call",
|
||||
"params": {
|
||||
"name": "search_lessons",
|
||||
"arguments": {
|
||||
"query": "validation",
|
||||
"tags": "testing,claude-code",
|
||||
"limit": 10
|
||||
}
|
||||
}
|
||||
}' | python -m mcp_server.server
|
||||
```
|
||||
|
||||
**Expected Result:**
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"count": 2,
|
||||
"lessons": [...]
|
||||
}
|
||||
```
|
||||
|
||||
### Test 3: Mode Detection
|
||||
|
||||
**Project Mode:**
|
||||
```bash
|
||||
# Create .env with WIKIJS_PROJECT
|
||||
echo "WIKIJS_PROJECT=projects/test-project" > .env
|
||||
|
||||
# Start server and check logs
|
||||
python -m mcp_server.server 2>&1 | grep "mode"
|
||||
```
|
||||
|
||||
**Expected Log:**
|
||||
```
|
||||
INFO:Running in project mode: projects/test-project
|
||||
```
|
||||
|
||||
**Company Mode:**
|
||||
```bash
|
||||
# Remove .env
|
||||
rm .env
|
||||
|
||||
# Start server and check logs
|
||||
python -m mcp_server.server 2>&1 | grep "mode"
|
||||
```
|
||||
|
||||
**Expected Log:**
|
||||
```
|
||||
INFO:Running in company-wide mode (PMO)
|
||||
```
|
||||
|
||||
## Test Data Management
|
||||
|
||||
### Cleanup Test Data
|
||||
|
||||
After integration tests, clean up test pages in Wiki.js:
|
||||
|
||||
```bash
|
||||
# Via Wiki.js UI
|
||||
1. Navigate to /test-integration/
|
||||
2. Select test pages
|
||||
3. Delete
|
||||
|
||||
# Or via GraphQL (advanced)
|
||||
curl -X POST http://wikijs.hotport/graphql \
|
||||
-H "Authorization: Bearer $WIKIJS_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"query": "mutation { pages { delete(id: 123) { responseResult { succeeded } } } }"
|
||||
}'
|
||||
```
|
||||
|
||||
### Test Data Fixtures
|
||||
|
||||
For repeatable testing, create fixtures:
|
||||
|
||||
```python
|
||||
# tests/conftest.py
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
async def test_page():
|
||||
"""Create a test page and clean up after"""
|
||||
client = WikiJSClient(...)
|
||||
page = await client.create_page(
|
||||
path="test/fixture-page",
|
||||
title="Test Fixture",
|
||||
content="# Test"
|
||||
)
|
||||
yield page
|
||||
# Cleanup after test
|
||||
await client.delete_page(page['id'])
|
||||
```
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
### GitHub Actions / Gitea Actions
|
||||
|
||||
```yaml
|
||||
name: Test Wiki.js MCP Server
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: mcp-servers/wikijs
|
||||
run: |
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run unit tests
|
||||
working-directory: mcp-servers/wikijs
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
pytest -v
|
||||
|
||||
# Integration tests (optional, requires Wiki.js instance)
|
||||
- name: Run integration tests
|
||||
if: env.WIKIJS_API_TOKEN != ''
|
||||
working-directory: mcp-servers/wikijs
|
||||
env:
|
||||
WIKIJS_API_URL: ${{ secrets.WIKIJS_API_URL }}
|
||||
WIKIJS_API_TOKEN: ${{ secrets.WIKIJS_API_TOKEN }}
|
||||
WIKIJS_BASE_PATH: /test-integration
|
||||
run: |
|
||||
source .venv/bin/activate
|
||||
pytest -v -m integration
|
||||
```
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
### Enable Verbose Logging
|
||||
|
||||
```bash
|
||||
# Set log level to DEBUG
|
||||
export PYTHONLOG=DEBUG
|
||||
pytest -v -s
|
||||
```
|
||||
|
||||
### Run Single Test with Debugging
|
||||
|
||||
```bash
|
||||
# Run specific test with print statements visible
|
||||
pytest tests/test_config.py::test_load_system_config -v -s
|
||||
|
||||
# Use pytest debugger
|
||||
pytest tests/test_config.py::test_load_system_config --pdb
|
||||
```
|
||||
|
||||
### Inspect GraphQL Queries
|
||||
|
||||
Add logging to see actual GraphQL queries:
|
||||
|
||||
```python
|
||||
# In wikijs_client.py
|
||||
async def _execute_query(self, query: str, variables: Optional[Dict[str, Any]] = None):
|
||||
logger.info(f"GraphQL Query: {query}")
|
||||
logger.info(f"Variables: {variables}")
|
||||
# ... rest of method
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
### Generate Coverage Report
|
||||
|
||||
```bash
|
||||
pip install pytest-cov
|
||||
|
||||
# Run with coverage
|
||||
pytest --cov=mcp_server --cov-report=html
|
||||
|
||||
# Open report
|
||||
open htmlcov/index.html
|
||||
```
|
||||
|
||||
**Target Coverage:** 90%+ for all modules
|
||||
|
||||
## Performance Testing
|
||||
|
||||
### Benchmark GraphQL Operations
|
||||
|
||||
```python
|
||||
import time
|
||||
|
||||
async def benchmark_search():
|
||||
client = WikiJSClient(...)
|
||||
start = time.time()
|
||||
results = await client.search_pages("test")
|
||||
elapsed = time.time() - start
|
||||
print(f"Search took {elapsed:.3f}s")
|
||||
```
|
||||
|
||||
**Expected Performance:**
|
||||
- Search: < 500ms
|
||||
- Get page: < 200ms
|
||||
- Create page: < 1s
|
||||
- Update page: < 500ms
|
||||
|
||||
## Common Test Failures
|
||||
|
||||
### 1. Configuration Not Found
|
||||
|
||||
**Error:**
|
||||
```
|
||||
FileNotFoundError: System config not found: ~/.config/claude/wikijs.env
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
mkdir -p ~/.config/claude
|
||||
cat > ~/.config/claude/wikijs.env << 'EOF'
|
||||
WIKIJS_API_URL=http://wikijs.hotport/graphql
|
||||
WIKIJS_API_TOKEN=test_token
|
||||
WIKIJS_BASE_PATH=/test
|
||||
EOF
|
||||
```
|
||||
|
||||
### 2. GraphQL Connection Error
|
||||
|
||||
**Error:**
|
||||
```
|
||||
httpx.ConnectError: Connection refused
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Verify Wiki.js is running
|
||||
- Check `WIKIJS_API_URL` is correct
|
||||
- Ensure `/graphql` endpoint is accessible
|
||||
|
||||
### 3. Permission Denied
|
||||
|
||||
**Error:**
|
||||
```
|
||||
ValueError: Failed to create page: Permission denied
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Regenerate API token with write permissions
|
||||
- Check Wiki.js user/group permissions
|
||||
- Verify base path exists and is accessible
|
||||
|
||||
### 4. Environment Variable Pollution
|
||||
|
||||
**Error:**
|
||||
```
|
||||
AssertionError: assert 'project' == 'company'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```python
|
||||
# In test, clear environment
|
||||
monkeypatch.delenv('WIKIJS_PROJECT', raising=False)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Isolate Tests**: Each test should be independent
|
||||
2. **Mock External Calls**: Use mocks for unit tests
|
||||
3. **Clean Up Resources**: Delete test pages after integration tests
|
||||
4. **Use Fixtures**: Reuse common setup/teardown
|
||||
5. **Test Error Cases**: Not just happy paths
|
||||
6. **Document Assumptions**: Comment what tests expect
|
||||
7. **Consistent Naming**: Follow `test_<what>_<scenario>` pattern
|
||||
|
||||
## Next Steps
|
||||
|
||||
After testing passes:
|
||||
1. Review code coverage report
|
||||
2. Add integration tests for edge cases
|
||||
3. Document any new test scenarios
|
||||
4. Update CI/CD pipeline
|
||||
5. Create test data fixtures for common scenarios
|
||||
|
||||
## Support
|
||||
|
||||
For testing issues:
|
||||
- Check test logs: `pytest -v -s`
|
||||
- Review Wiki.js logs
|
||||
- Verify configuration files
|
||||
- See main README.md troubleshooting section
|
||||
3
mcp-servers/wikijs/mcp_server/__init__.py
Normal file
3
mcp-servers/wikijs/mcp_server/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""Wiki.js MCP Server for Claude Code."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
102
mcp-servers/wikijs/mcp_server/config.py
Normal file
102
mcp-servers/wikijs/mcp_server/config.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
Configuration loader for Wiki.js MCP Server.
|
||||
|
||||
Implements hybrid configuration system:
|
||||
- System-level: ~/.config/claude/wikijs.env (credentials)
|
||||
- Project-level: .env (project path specification)
|
||||
"""
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
import logging
|
||||
from typing import Dict, Optional
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WikiJSConfig:
|
||||
"""Hybrid configuration loader with mode detection"""
|
||||
|
||||
def __init__(self):
|
||||
self.api_url: Optional[str] = None
|
||||
self.api_token: Optional[str] = None
|
||||
self.base_path: Optional[str] = None
|
||||
self.project: Optional[str] = None
|
||||
self.mode: str = 'project'
|
||||
|
||||
def load(self) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Load configuration from system and project levels.
|
||||
Project-level configuration overrides system-level.
|
||||
|
||||
Returns:
|
||||
Dict containing api_url, api_token, base_path, project, mode
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If system config is missing
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
# Load system config
|
||||
system_config = Path.home() / '.config' / 'claude' / 'wikijs.env'
|
||||
if system_config.exists():
|
||||
load_dotenv(system_config)
|
||||
logger.info(f"Loaded system configuration from {system_config}")
|
||||
else:
|
||||
raise FileNotFoundError(
|
||||
f"System config not found: {system_config}\n"
|
||||
"Create it with: mkdir -p ~/.config/claude && "
|
||||
"cat > ~/.config/claude/wikijs.env"
|
||||
)
|
||||
|
||||
# Load project config (overrides system)
|
||||
project_config = Path.cwd() / '.env'
|
||||
if project_config.exists():
|
||||
load_dotenv(project_config, override=True)
|
||||
logger.info(f"Loaded project configuration from {project_config}")
|
||||
|
||||
# Extract values
|
||||
self.api_url = os.getenv('WIKIJS_API_URL')
|
||||
self.api_token = os.getenv('WIKIJS_API_TOKEN')
|
||||
self.base_path = os.getenv('WIKIJS_BASE_PATH')
|
||||
self.project = os.getenv('WIKIJS_PROJECT') # Optional for PMO
|
||||
|
||||
# Detect mode
|
||||
if self.project:
|
||||
self.mode = 'project'
|
||||
logger.info(f"Running in project mode: {self.project}")
|
||||
else:
|
||||
self.mode = 'company'
|
||||
logger.info("Running in company-wide mode (PMO)")
|
||||
|
||||
# Validate required variables
|
||||
self._validate()
|
||||
|
||||
return {
|
||||
'api_url': self.api_url,
|
||||
'api_token': self.api_token,
|
||||
'base_path': self.base_path,
|
||||
'project': self.project,
|
||||
'mode': self.mode
|
||||
}
|
||||
|
||||
def _validate(self) -> None:
|
||||
"""
|
||||
Validate that required configuration is present.
|
||||
|
||||
Raises:
|
||||
ValueError: If required configuration is missing
|
||||
"""
|
||||
required = {
|
||||
'WIKIJS_API_URL': self.api_url,
|
||||
'WIKIJS_API_TOKEN': self.api_token,
|
||||
'WIKIJS_BASE_PATH': self.base_path
|
||||
}
|
||||
|
||||
missing = [key for key, value in required.items() if not value]
|
||||
|
||||
if missing:
|
||||
raise ValueError(
|
||||
f"Missing required configuration: {', '.join(missing)}\n"
|
||||
"Check your ~/.config/claude/wikijs.env file"
|
||||
)
|
||||
382
mcp-servers/wikijs/mcp_server/server.py
Normal file
382
mcp-servers/wikijs/mcp_server/server.py
Normal file
@@ -0,0 +1,382 @@
|
||||
"""
|
||||
MCP Server entry point for Wiki.js integration.
|
||||
|
||||
Provides Wiki.js tools to Claude Code via JSON-RPC 2.0 over stdio.
|
||||
"""
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
from mcp.server import Server
|
||||
from mcp.server.stdio import stdio_server
|
||||
from mcp.types import Tool, TextContent
|
||||
|
||||
from .config import WikiJSConfig
|
||||
from .wikijs_client import WikiJSClient
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WikiJSMCPServer:
|
||||
"""MCP Server for Wiki.js integration"""
|
||||
|
||||
def __init__(self):
|
||||
self.server = Server("wikijs-mcp")
|
||||
self.config = None
|
||||
self.client = None
|
||||
|
||||
async def initialize(self):
|
||||
"""
|
||||
Initialize server and load configuration.
|
||||
|
||||
Raises:
|
||||
Exception: If initialization fails
|
||||
"""
|
||||
try:
|
||||
config_loader = WikiJSConfig()
|
||||
self.config = config_loader.load()
|
||||
|
||||
self.client = WikiJSClient(
|
||||
api_url=self.config['api_url'],
|
||||
api_token=self.config['api_token'],
|
||||
base_path=self.config['base_path'],
|
||||
project=self.config.get('project')
|
||||
)
|
||||
|
||||
logger.info(f"Wiki.js MCP Server initialized in {self.config['mode']} mode")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize: {e}")
|
||||
raise
|
||||
|
||||
def setup_tools(self):
|
||||
"""Register all available tools with the MCP server"""
|
||||
|
||||
@self.server.list_tools()
|
||||
async def list_tools() -> list[Tool]:
|
||||
"""Return list of available tools"""
|
||||
return [
|
||||
Tool(
|
||||
name="search_pages",
|
||||
description="Search Wiki.js pages by keywords and tags",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query string"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated tags to filter by (optional)"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"default": 20,
|
||||
"description": "Maximum results to return"
|
||||
}
|
||||
},
|
||||
"required": ["query"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="get_page",
|
||||
description="Get a specific page by path",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Page path (relative or absolute)"
|
||||
}
|
||||
},
|
||||
"required": ["path"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_page",
|
||||
description="Create a new Wiki.js page",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "Page path relative to project/base"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Page title"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "Page content (markdown)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Page description (optional)"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated tags (optional)"
|
||||
},
|
||||
"publish": {
|
||||
"type": "boolean",
|
||||
"default": True,
|
||||
"description": "Publish immediately"
|
||||
}
|
||||
},
|
||||
"required": ["path", "title", "content"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="update_page",
|
||||
description="Update an existing Wiki.js page",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"page_id": {
|
||||
"type": "integer",
|
||||
"description": "Page ID"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "New content (optional)"
|
||||
},
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "New title (optional)"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "New description (optional)"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "New comma-separated tags (optional)"
|
||||
},
|
||||
"publish": {
|
||||
"type": "boolean",
|
||||
"description": "New publish status (optional)"
|
||||
}
|
||||
},
|
||||
"required": ["page_id"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="list_pages",
|
||||
description="List pages under a specific path",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path_prefix": {
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"description": "Path prefix to filter by"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="create_lesson",
|
||||
description="Create a lessons learned entry to prevent repeating mistakes",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "Lesson title (e.g., 'Sprint 16 - Prevent Infinite Loops')"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "Lesson content (markdown with problem, solution, prevention)"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated tags for categorization"
|
||||
},
|
||||
"category": {
|
||||
"type": "string",
|
||||
"default": "sprints",
|
||||
"description": "Category (sprints, patterns, architecture, etc.)"
|
||||
}
|
||||
},
|
||||
"required": ["title", "content", "tags"]
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="search_lessons",
|
||||
description="Search lessons learned from previous sprints to avoid known pitfalls",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "Search query (optional)"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated tags to filter by (optional)"
|
||||
},
|
||||
"limit": {
|
||||
"type": "integer",
|
||||
"default": 20,
|
||||
"description": "Maximum results"
|
||||
}
|
||||
}
|
||||
}
|
||||
),
|
||||
Tool(
|
||||
name="tag_lesson",
|
||||
description="Add or update tags on a lessons learned entry",
|
||||
inputSchema={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"page_id": {
|
||||
"type": "integer",
|
||||
"description": "Lesson page ID"
|
||||
},
|
||||
"tags": {
|
||||
"type": "string",
|
||||
"description": "Comma-separated tags"
|
||||
}
|
||||
},
|
||||
"required": ["page_id", "tags"]
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
@self.server.call_tool()
|
||||
async def call_tool(name: str, arguments: dict) -> list[TextContent]:
|
||||
"""
|
||||
Handle tool invocation.
|
||||
|
||||
Args:
|
||||
name: Tool name
|
||||
arguments: Tool arguments
|
||||
|
||||
Returns:
|
||||
List of TextContent with results
|
||||
"""
|
||||
try:
|
||||
# Route to appropriate client method
|
||||
if name == "search_pages":
|
||||
tags = arguments.get('tags')
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
results = await self.client.search_pages(
|
||||
query=arguments['query'],
|
||||
tags=tag_list,
|
||||
limit=arguments.get('limit', 20)
|
||||
)
|
||||
result = {'success': True, 'count': len(results), 'pages': results}
|
||||
|
||||
elif name == "get_page":
|
||||
page = await self.client.get_page(arguments['path'])
|
||||
if page:
|
||||
result = {'success': True, 'page': page}
|
||||
else:
|
||||
result = {'success': False, 'error': f"Page not found: {arguments['path']}"}
|
||||
|
||||
elif name == "create_page":
|
||||
tags = arguments.get('tags')
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else []
|
||||
page = await self.client.create_page(
|
||||
path=arguments['path'],
|
||||
title=arguments['title'],
|
||||
content=arguments['content'],
|
||||
description=arguments.get('description', ''),
|
||||
tags=tag_list,
|
||||
is_published=arguments.get('publish', True)
|
||||
)
|
||||
result = {'success': True, 'page': page}
|
||||
|
||||
elif name == "update_page":
|
||||
tags = arguments.get('tags')
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
page = await self.client.update_page(
|
||||
page_id=arguments['page_id'],
|
||||
content=arguments.get('content'),
|
||||
title=arguments.get('title'),
|
||||
description=arguments.get('description'),
|
||||
tags=tag_list,
|
||||
is_published=arguments.get('publish')
|
||||
)
|
||||
result = {'success': True, 'page': page}
|
||||
|
||||
elif name == "list_pages":
|
||||
pages = await self.client.list_pages(
|
||||
path_prefix=arguments.get('path_prefix', '')
|
||||
)
|
||||
result = {'success': True, 'count': len(pages), 'pages': pages}
|
||||
|
||||
elif name == "create_lesson":
|
||||
tag_list = [t.strip() for t in arguments['tags'].split(',')]
|
||||
lesson = await self.client.create_lesson(
|
||||
title=arguments['title'],
|
||||
content=arguments['content'],
|
||||
tags=tag_list,
|
||||
category=arguments.get('category', 'sprints')
|
||||
)
|
||||
result = {
|
||||
'success': True,
|
||||
'lesson': lesson,
|
||||
'message': f"Lesson learned captured: {arguments['title']}"
|
||||
}
|
||||
|
||||
elif name == "search_lessons":
|
||||
tags = arguments.get('tags')
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
lessons = await self.client.search_lessons(
|
||||
query=arguments.get('query'),
|
||||
tags=tag_list,
|
||||
limit=arguments.get('limit', 20)
|
||||
)
|
||||
result = {
|
||||
'success': True,
|
||||
'count': len(lessons),
|
||||
'lessons': lessons,
|
||||
'message': f"Found {len(lessons)} relevant lessons"
|
||||
}
|
||||
|
||||
elif name == "tag_lesson":
|
||||
tag_list = [t.strip() for t in arguments['tags'].split(',')]
|
||||
lesson = await self.client.tag_lesson(
|
||||
page_id=arguments['page_id'],
|
||||
new_tags=tag_list
|
||||
)
|
||||
result = {'success': True, 'lesson': lesson, 'message': 'Tags updated'}
|
||||
|
||||
else:
|
||||
raise ValueError(f"Unknown tool: {name}")
|
||||
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps(result, indent=2)
|
||||
)]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Tool {name} failed: {e}")
|
||||
return [TextContent(
|
||||
type="text",
|
||||
text=json.dumps({'success': False, 'error': str(e)}, indent=2)
|
||||
)]
|
||||
|
||||
async def run(self):
|
||||
"""Run the MCP server"""
|
||||
await self.initialize()
|
||||
self.setup_tools()
|
||||
|
||||
async with stdio_server() as (read_stream, write_stream):
|
||||
await self.server.run(
|
||||
read_stream,
|
||||
write_stream,
|
||||
self.server.create_initialization_options()
|
||||
)
|
||||
|
||||
|
||||
async def main():
|
||||
"""Main entry point"""
|
||||
server = WikiJSMCPServer()
|
||||
await server.run()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
1
mcp-servers/wikijs/mcp_server/tools/__init__.py
Normal file
1
mcp-servers/wikijs/mcp_server/tools/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Wiki.js MCP tools."""
|
||||
183
mcp-servers/wikijs/mcp_server/tools/lessons_learned.py
Normal file
183
mcp-servers/wikijs/mcp_server/tools/lessons_learned.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""
|
||||
MCP tools for Wiki.js lessons learned management.
|
||||
"""
|
||||
from typing import Dict, Any, List, Optional
|
||||
from mcp.server import Tool
|
||||
from ..wikijs_client import WikiJSClient
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_lesson_tools(client: WikiJSClient) -> List[Tool]:
|
||||
"""
|
||||
Create MCP tools for lessons learned management.
|
||||
|
||||
Args:
|
||||
client: WikiJSClient instance
|
||||
|
||||
Returns:
|
||||
List of MCP tools
|
||||
"""
|
||||
|
||||
async def create_lesson(
|
||||
title: str,
|
||||
content: str,
|
||||
tags: str,
|
||||
category: str = "sprints"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create a lessons learned entry.
|
||||
|
||||
After 15 sprints without systematic lesson capture, repeated mistakes occurred.
|
||||
This tool ensures lessons are captured and searchable for future sprints.
|
||||
|
||||
Args:
|
||||
title: Lesson title (e.g., "Sprint 16 - Claude Code Infinite Loop on Label Validation")
|
||||
content: Lesson content in markdown (problem, solution, prevention)
|
||||
tags: Comma-separated tags (e.g., "claude-code, testing, labels, validation")
|
||||
category: Category for organization (default: "sprints", also: "patterns", "architecture")
|
||||
|
||||
Returns:
|
||||
Created lesson page data
|
||||
|
||||
Example:
|
||||
create_lesson(
|
||||
title="Sprint 16 - Prevent Infinite Loops in Validation",
|
||||
content="## Problem\\n\\nClaude Code entered infinite loop...\\n\\n## Solution\\n\\n...",
|
||||
tags="claude-code, testing, infinite-loop, validation",
|
||||
category="sprints"
|
||||
)
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')]
|
||||
|
||||
lesson = await client.create_lesson(
|
||||
title=title,
|
||||
content=content,
|
||||
tags=tag_list,
|
||||
category=category
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'lesson': lesson,
|
||||
'message': f'Lesson learned captured: {title}'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating lesson: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def search_lessons(
|
||||
query: Optional[str] = None,
|
||||
tags: Optional[str] = None,
|
||||
limit: int = 20
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Search lessons learned entries.
|
||||
|
||||
Use this at sprint start to find relevant lessons from previous sprints.
|
||||
Prevents repeating the same mistakes.
|
||||
|
||||
Args:
|
||||
query: Search query (e.g., "validation", "infinite loop", "docker")
|
||||
tags: Comma-separated tags to filter by (e.g., "claude-code, testing")
|
||||
limit: Maximum number of results (default: 20)
|
||||
|
||||
Returns:
|
||||
List of matching lessons learned
|
||||
|
||||
Example:
|
||||
# Before implementing validation logic
|
||||
search_lessons(query="validation", tags="testing, claude-code")
|
||||
|
||||
# Before working with Docker
|
||||
search_lessons(query="docker", tags="deployment")
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
|
||||
lessons = await client.search_lessons(
|
||||
query=query,
|
||||
tags=tag_list,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'count': len(lessons),
|
||||
'lessons': lessons,
|
||||
'message': f'Found {len(lessons)} relevant lessons'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching lessons: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def tag_lesson(
|
||||
page_id: int,
|
||||
tags: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Add or update tags on a lesson.
|
||||
|
||||
Args:
|
||||
page_id: Lesson page ID (from create_lesson or search_lessons)
|
||||
tags: Comma-separated tags (will replace existing tags)
|
||||
|
||||
Returns:
|
||||
Updated lesson data
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')]
|
||||
|
||||
lesson = await client.tag_lesson(
|
||||
page_id=page_id,
|
||||
new_tags=tag_list
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'lesson': lesson,
|
||||
'message': 'Tags updated successfully'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error tagging lesson: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
# Define MCP tools
|
||||
tools = [
|
||||
Tool(
|
||||
name="create_lesson",
|
||||
description=(
|
||||
"Create a lessons learned entry to prevent repeating mistakes. "
|
||||
"Critical for capturing sprint insights, architectural decisions, "
|
||||
"and technical gotchas for future reference."
|
||||
),
|
||||
function=create_lesson
|
||||
),
|
||||
Tool(
|
||||
name="search_lessons",
|
||||
description=(
|
||||
"Search lessons learned from previous sprints and projects. "
|
||||
"Use this before starting new work to avoid known pitfalls and "
|
||||
"leverage past solutions."
|
||||
),
|
||||
function=search_lessons
|
||||
),
|
||||
Tool(
|
||||
name="tag_lesson",
|
||||
description="Add or update tags on a lessons learned entry for better categorization",
|
||||
function=tag_lesson
|
||||
)
|
||||
]
|
||||
|
||||
return tools
|
||||
229
mcp-servers/wikijs/mcp_server/tools/pages.py
Normal file
229
mcp-servers/wikijs/mcp_server/tools/pages.py
Normal file
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
MCP tools for Wiki.js page management.
|
||||
"""
|
||||
from typing import Dict, Any, List, Optional
|
||||
from mcp.server import Tool
|
||||
from ..wikijs_client import WikiJSClient
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_page_tools(client: WikiJSClient) -> List[Tool]:
|
||||
"""
|
||||
Create MCP tools for page management.
|
||||
|
||||
Args:
|
||||
client: WikiJSClient instance
|
||||
|
||||
Returns:
|
||||
List of MCP tools
|
||||
"""
|
||||
|
||||
async def search_pages(
|
||||
query: str,
|
||||
tags: Optional[str] = None,
|
||||
limit: int = 20
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Search Wiki.js pages by keywords and tags.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
tags: Comma-separated list of tags to filter by
|
||||
limit: Maximum number of results (default: 20)
|
||||
|
||||
Returns:
|
||||
List of matching pages with path, title, description, and tags
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
results = await client.search_pages(query, tag_list, limit)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'count': len(results),
|
||||
'pages': results
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching pages: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def get_page(path: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Get a specific page by path.
|
||||
|
||||
Args:
|
||||
path: Page path (can be relative to project or absolute)
|
||||
|
||||
Returns:
|
||||
Page data including content, metadata, and tags
|
||||
"""
|
||||
try:
|
||||
page = await client.get_page(path)
|
||||
|
||||
if page:
|
||||
return {
|
||||
'success': True,
|
||||
'page': page
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Page not found: {path}'
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting page: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def create_page(
|
||||
path: str,
|
||||
title: str,
|
||||
content: str,
|
||||
description: str = "",
|
||||
tags: Optional[str] = None,
|
||||
publish: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create a new Wiki.js page.
|
||||
|
||||
Args:
|
||||
path: Page path relative to project/base (e.g., 'documentation/api')
|
||||
title: Page title
|
||||
content: Page content in markdown format
|
||||
description: Page description (optional)
|
||||
tags: Comma-separated list of tags (optional)
|
||||
publish: Whether to publish immediately (default: True)
|
||||
|
||||
Returns:
|
||||
Created page data
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else []
|
||||
|
||||
page = await client.create_page(
|
||||
path=path,
|
||||
title=title,
|
||||
content=content,
|
||||
description=description,
|
||||
tags=tag_list,
|
||||
is_published=publish
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'page': page
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating page: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def update_page(
|
||||
page_id: int,
|
||||
content: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
tags: Optional[str] = None,
|
||||
publish: Optional[bool] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Update an existing Wiki.js page.
|
||||
|
||||
Args:
|
||||
page_id: Page ID (from get_page or search_pages)
|
||||
content: New content (optional)
|
||||
title: New title (optional)
|
||||
description: New description (optional)
|
||||
tags: New comma-separated tags (optional)
|
||||
publish: New publish status (optional)
|
||||
|
||||
Returns:
|
||||
Updated page data
|
||||
"""
|
||||
try:
|
||||
tag_list = [t.strip() for t in tags.split(',')] if tags else None
|
||||
|
||||
page = await client.update_page(
|
||||
page_id=page_id,
|
||||
content=content,
|
||||
title=title,
|
||||
description=description,
|
||||
tags=tag_list,
|
||||
is_published=publish
|
||||
)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'page': page
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating page: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
async def list_pages(path_prefix: str = "") -> Dict[str, Any]:
|
||||
"""
|
||||
List pages under a specific path.
|
||||
|
||||
Args:
|
||||
path_prefix: Path prefix to filter by (relative to project/base)
|
||||
|
||||
Returns:
|
||||
List of pages under the specified path
|
||||
"""
|
||||
try:
|
||||
pages = await client.list_pages(path_prefix)
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'count': len(pages),
|
||||
'pages': pages
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing pages: {e}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
# Define MCP tools
|
||||
tools = [
|
||||
Tool(
|
||||
name="search_pages",
|
||||
description="Search Wiki.js pages by keywords and tags",
|
||||
function=search_pages
|
||||
),
|
||||
Tool(
|
||||
name="get_page",
|
||||
description="Get a specific Wiki.js page by path",
|
||||
function=get_page
|
||||
),
|
||||
Tool(
|
||||
name="create_page",
|
||||
description="Create a new Wiki.js page with content and metadata",
|
||||
function=create_page
|
||||
),
|
||||
Tool(
|
||||
name="update_page",
|
||||
description="Update an existing Wiki.js page",
|
||||
function=update_page
|
||||
),
|
||||
Tool(
|
||||
name="list_pages",
|
||||
description="List pages under a specific path",
|
||||
function=list_pages
|
||||
)
|
||||
]
|
||||
|
||||
return tools
|
||||
451
mcp-servers/wikijs/mcp_server/wikijs_client.py
Normal file
451
mcp-servers/wikijs/mcp_server/wikijs_client.py
Normal file
@@ -0,0 +1,451 @@
|
||||
"""
|
||||
Wiki.js GraphQL API Client.
|
||||
|
||||
Provides methods for interacting with Wiki.js GraphQL API for page management,
|
||||
lessons learned, and documentation.
|
||||
"""
|
||||
import httpx
|
||||
from typing import List, Dict, Optional, Any
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WikiJSClient:
|
||||
"""Client for Wiki.js GraphQL API"""
|
||||
|
||||
def __init__(self, api_url: str, api_token: str, base_path: str, project: Optional[str] = None):
|
||||
"""
|
||||
Initialize Wiki.js client.
|
||||
|
||||
Args:
|
||||
api_url: Wiki.js GraphQL API URL (e.g., http://wiki.example.com/graphql)
|
||||
api_token: Wiki.js API token
|
||||
base_path: Base path in Wiki.js (e.g., /hyper-hive-labs)
|
||||
project: Project path (e.g., projects/cuisineflow) for project mode
|
||||
"""
|
||||
self.api_url = api_url
|
||||
self.api_token = api_token
|
||||
self.base_path = base_path.rstrip('/')
|
||||
self.project = project
|
||||
self.mode = 'project' if project else 'company'
|
||||
|
||||
self.headers = {
|
||||
'Authorization': f'Bearer {api_token}',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
def _get_full_path(self, relative_path: str) -> str:
|
||||
"""
|
||||
Construct full path based on mode.
|
||||
|
||||
Args:
|
||||
relative_path: Path relative to project or base
|
||||
|
||||
Returns:
|
||||
Full path in Wiki.js
|
||||
"""
|
||||
relative_path = relative_path.lstrip('/')
|
||||
|
||||
if self.mode == 'project' and self.project:
|
||||
# Project mode: base_path/project/relative_path
|
||||
return f"{self.base_path}/{self.project}/{relative_path}"
|
||||
else:
|
||||
# Company mode: base_path/relative_path
|
||||
return f"{self.base_path}/{relative_path}"
|
||||
|
||||
async def _execute_query(self, query: str, variables: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Execute GraphQL query.
|
||||
|
||||
Args:
|
||||
query: GraphQL query string
|
||||
variables: Query variables
|
||||
|
||||
Returns:
|
||||
Response data
|
||||
|
||||
Raises:
|
||||
httpx.HTTPError: On HTTP errors
|
||||
ValueError: On GraphQL errors
|
||||
"""
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
self.api_url,
|
||||
headers=self.headers,
|
||||
json={'query': query, 'variables': variables or {}}
|
||||
)
|
||||
|
||||
# Log response for debugging
|
||||
if response.status_code != 200:
|
||||
logger.error(f"HTTP {response.status_code}: {response.text}")
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
|
||||
if 'errors' in data:
|
||||
errors = data['errors']
|
||||
error_messages = [err.get('message', str(err)) for err in errors]
|
||||
raise ValueError(f"GraphQL errors: {', '.join(error_messages)}")
|
||||
|
||||
return data.get('data', {})
|
||||
|
||||
async def search_pages(
|
||||
self,
|
||||
query: str,
|
||||
tags: Optional[List[str]] = None,
|
||||
limit: int = 20
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Search pages by keywords and tags.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
tags: Filter by tags
|
||||
limit: Maximum results to return
|
||||
|
||||
Returns:
|
||||
List of matching pages
|
||||
"""
|
||||
graphql_query = """
|
||||
query SearchPages($query: String!) {
|
||||
pages {
|
||||
search(query: $query) {
|
||||
results {
|
||||
id
|
||||
path
|
||||
title
|
||||
description
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
data = await self._execute_query(graphql_query, {'query': query})
|
||||
results = data.get('pages', {}).get('search', {}).get('results', [])
|
||||
|
||||
# Filter by tags if specified
|
||||
if tags:
|
||||
tags_lower = [t.lower() for t in tags]
|
||||
results = [
|
||||
r for r in results
|
||||
if any(tag.lower() in tags_lower for tag in r.get('tags', []))
|
||||
]
|
||||
|
||||
return results[:limit]
|
||||
|
||||
async def get_page(self, path: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get specific page by path.
|
||||
|
||||
Args:
|
||||
path: Page path (can be relative or absolute)
|
||||
|
||||
Returns:
|
||||
Page data or None if not found
|
||||
"""
|
||||
# Convert to absolute path
|
||||
if not path.startswith(self.base_path):
|
||||
path = self._get_full_path(path)
|
||||
|
||||
graphql_query = """
|
||||
query GetPage($path: String!) {
|
||||
pages {
|
||||
single(path: $path) {
|
||||
id
|
||||
path
|
||||
title
|
||||
description
|
||||
content
|
||||
tags
|
||||
createdAt
|
||||
updatedAt
|
||||
author
|
||||
isPublished
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
try:
|
||||
data = await self._execute_query(graphql_query, {'path': path})
|
||||
return data.get('pages', {}).get('single')
|
||||
except (httpx.HTTPError, ValueError) as e:
|
||||
logger.warning(f"Page not found at {path}: {e}")
|
||||
return None
|
||||
|
||||
async def create_page(
|
||||
self,
|
||||
path: str,
|
||||
title: str,
|
||||
content: str,
|
||||
description: str = "",
|
||||
tags: Optional[List[str]] = None,
|
||||
is_published: bool = True
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create new page.
|
||||
|
||||
Args:
|
||||
path: Page path (relative to project/base)
|
||||
title: Page title
|
||||
content: Page content (markdown)
|
||||
description: Page description
|
||||
tags: Page tags
|
||||
is_published: Whether to publish immediately
|
||||
|
||||
Returns:
|
||||
Created page data
|
||||
"""
|
||||
full_path = self._get_full_path(path)
|
||||
|
||||
graphql_query = """
|
||||
mutation CreatePage($path: String!, $title: String!, $content: String!, $description: String!, $tags: [String]!, $isPublished: Boolean!, $isPrivate: Boolean!) {
|
||||
pages {
|
||||
create(
|
||||
path: $path
|
||||
title: $title
|
||||
content: $content
|
||||
description: $description
|
||||
tags: $tags
|
||||
isPublished: $isPublished
|
||||
isPrivate: $isPrivate
|
||||
editor: "markdown"
|
||||
locale: "en"
|
||||
) {
|
||||
responseResult {
|
||||
succeeded
|
||||
errorCode
|
||||
slug
|
||||
message
|
||||
}
|
||||
page {
|
||||
id
|
||||
path
|
||||
title
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
'path': full_path,
|
||||
'title': title,
|
||||
'content': content,
|
||||
'description': description,
|
||||
'tags': tags or [],
|
||||
'isPublished': is_published,
|
||||
'isPrivate': False # Default to not private
|
||||
}
|
||||
|
||||
data = await self._execute_query(graphql_query, variables)
|
||||
result = data.get('pages', {}).get('create', {})
|
||||
|
||||
if not result.get('responseResult', {}).get('succeeded'):
|
||||
error_msg = result.get('responseResult', {}).get('message', 'Unknown error')
|
||||
raise ValueError(f"Failed to create page: {error_msg}")
|
||||
|
||||
return result.get('page', {})
|
||||
|
||||
async def update_page(
|
||||
self,
|
||||
page_id: int,
|
||||
content: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
is_published: Optional[bool] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Update existing page.
|
||||
|
||||
Args:
|
||||
page_id: Page ID
|
||||
content: New content (if changing)
|
||||
title: New title (if changing)
|
||||
description: New description (if changing)
|
||||
tags: New tags (if changing)
|
||||
is_published: New publish status (if changing)
|
||||
|
||||
Returns:
|
||||
Updated page data
|
||||
"""
|
||||
# Build update fields dynamically
|
||||
fields = []
|
||||
variables = {'id': page_id}
|
||||
|
||||
if content is not None:
|
||||
fields.append('content: $content')
|
||||
variables['content'] = content
|
||||
|
||||
if title is not None:
|
||||
fields.append('title: $title')
|
||||
variables['title'] = title
|
||||
|
||||
if description is not None:
|
||||
fields.append('description: $description')
|
||||
variables['description'] = description
|
||||
|
||||
if tags is not None:
|
||||
fields.append('tags: $tags')
|
||||
variables['tags'] = tags
|
||||
|
||||
if is_published is not None:
|
||||
fields.append('isPublished: $isPublished')
|
||||
variables['isPublished'] = is_published
|
||||
|
||||
fields_str = ', '.join(fields)
|
||||
|
||||
graphql_query = f"""
|
||||
mutation UpdatePage($id: Int!{''.join([f', ${k}: {type(v).__name__.title()}' for k, v in variables.items() if k != 'id'])}) {{
|
||||
pages {{
|
||||
update(
|
||||
id: $id
|
||||
{fields_str}
|
||||
) {{
|
||||
responseResult {{
|
||||
succeeded
|
||||
errorCode
|
||||
message
|
||||
}}
|
||||
page {{
|
||||
id
|
||||
path
|
||||
title
|
||||
updatedAt
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
}}
|
||||
"""
|
||||
|
||||
data = await self._execute_query(graphql_query, variables)
|
||||
result = data.get('pages', {}).get('update', {})
|
||||
|
||||
if not result.get('responseResult', {}).get('succeeded'):
|
||||
error_msg = result.get('responseResult', {}).get('message', 'Unknown error')
|
||||
raise ValueError(f"Failed to update page: {error_msg}")
|
||||
|
||||
return result.get('page', {})
|
||||
|
||||
async def list_pages(self, path_prefix: str = "") -> List[Dict[str, Any]]:
|
||||
"""
|
||||
List pages under a specific path.
|
||||
|
||||
Args:
|
||||
path_prefix: Path prefix to filter (relative to project/base)
|
||||
|
||||
Returns:
|
||||
List of pages
|
||||
"""
|
||||
# Construct full path based on mode
|
||||
if path_prefix:
|
||||
full_path = self._get_full_path(path_prefix)
|
||||
else:
|
||||
# Empty path_prefix: return all pages in project (project mode) or base (company mode)
|
||||
if self.mode == 'project' and self.project:
|
||||
full_path = f"{self.base_path}/{self.project}"
|
||||
else:
|
||||
full_path = self.base_path
|
||||
|
||||
graphql_query = """
|
||||
query ListPages {
|
||||
pages {
|
||||
list {
|
||||
id
|
||||
path
|
||||
title
|
||||
description
|
||||
tags
|
||||
createdAt
|
||||
updatedAt
|
||||
isPublished
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
data = await self._execute_query(graphql_query)
|
||||
all_pages = data.get('pages', {}).get('list', [])
|
||||
|
||||
# Filter by path prefix
|
||||
if full_path:
|
||||
return [p for p in all_pages if p.get('path', '').startswith(full_path)]
|
||||
|
||||
return all_pages
|
||||
|
||||
async def create_lesson(
|
||||
self,
|
||||
title: str,
|
||||
content: str,
|
||||
tags: List[str],
|
||||
category: str = "sprints"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create a lessons learned entry.
|
||||
|
||||
Args:
|
||||
title: Lesson title
|
||||
content: Lesson content (markdown)
|
||||
tags: Tags for categorization
|
||||
category: Category (sprints, patterns, etc.)
|
||||
|
||||
Returns:
|
||||
Created lesson page data
|
||||
"""
|
||||
# Construct path: lessons-learned/category/title-slug
|
||||
slug = title.lower().replace(' ', '-').replace('_', '-')
|
||||
path = f"lessons-learned/{category}/{slug}"
|
||||
|
||||
return await self.create_page(
|
||||
path=path,
|
||||
title=title,
|
||||
content=content,
|
||||
description=f"Lessons learned: {title}",
|
||||
tags=tags + ['lesson-learned', category],
|
||||
is_published=True
|
||||
)
|
||||
|
||||
async def search_lessons(
|
||||
self,
|
||||
query: Optional[str] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
limit: int = 20
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Search lessons learned entries.
|
||||
|
||||
Args:
|
||||
query: Search query (optional)
|
||||
tags: Filter by tags
|
||||
limit: Maximum results
|
||||
|
||||
Returns:
|
||||
List of matching lessons
|
||||
"""
|
||||
# Search in lessons-learned path
|
||||
search_query = query or "lesson"
|
||||
|
||||
results = await self.search_pages(search_query, tags, limit)
|
||||
|
||||
# Filter to only lessons-learned path
|
||||
lessons_path = self._get_full_path("lessons-learned")
|
||||
return [r for r in results if r.get('path', '').startswith(lessons_path)]
|
||||
|
||||
async def tag_lesson(self, page_id: int, new_tags: List[str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Add tags to a lesson.
|
||||
|
||||
Args:
|
||||
page_id: Lesson page ID
|
||||
new_tags: Tags to add
|
||||
|
||||
Returns:
|
||||
Updated page data
|
||||
"""
|
||||
# Get current page to merge tags
|
||||
# For now, just replace tags (can enhance to merge later)
|
||||
return await self.update_page(page_id=page_id, tags=new_tags)
|
||||
19
mcp-servers/wikijs/requirements.txt
Normal file
19
mcp-servers/wikijs/requirements.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
# Wiki.js MCP Server Dependencies
|
||||
|
||||
# MCP SDK
|
||||
mcp>=0.1.0
|
||||
|
||||
# HTTP client for GraphQL
|
||||
httpx>=0.27.0
|
||||
httpx-sse>=0.4.0
|
||||
|
||||
# Configuration
|
||||
python-dotenv>=1.0.0
|
||||
|
||||
# Testing
|
||||
pytest>=8.0.0
|
||||
pytest-asyncio>=0.23.0
|
||||
pytest-mock>=3.12.0
|
||||
|
||||
# Type hints
|
||||
typing-extensions>=4.9.0
|
||||
185
mcp-servers/wikijs/test_integration.py
Normal file
185
mcp-servers/wikijs/test_integration.py
Normal file
@@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Integration test script for Wiki.js MCP Server.
|
||||
Tests against real Wiki.js instance.
|
||||
|
||||
Usage:
|
||||
python test_integration.py
|
||||
"""
|
||||
import asyncio
|
||||
import sys
|
||||
from mcp_server.config import WikiJSConfig
|
||||
from mcp_server.wikijs_client import WikiJSClient
|
||||
|
||||
|
||||
async def test_connection():
|
||||
"""Test basic connection to Wiki.js"""
|
||||
print("🔌 Testing Wiki.js connection...")
|
||||
|
||||
try:
|
||||
config_loader = WikiJSConfig()
|
||||
config = config_loader.load()
|
||||
|
||||
print(f"✓ Configuration loaded")
|
||||
print(f" - API URL: {config['api_url']}")
|
||||
print(f" - Base Path: {config['base_path']}")
|
||||
print(f" - Mode: {config['mode']}")
|
||||
if config.get('project'):
|
||||
print(f" - Project: {config['project']}")
|
||||
|
||||
client = WikiJSClient(
|
||||
api_url=config['api_url'],
|
||||
api_token=config['api_token'],
|
||||
base_path=config['base_path'],
|
||||
project=config.get('project')
|
||||
)
|
||||
|
||||
print("✓ Client initialized")
|
||||
return client
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Configuration failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
async def test_list_pages(client):
|
||||
"""Test listing pages"""
|
||||
print("\n📄 Testing list_pages...")
|
||||
|
||||
try:
|
||||
pages = await client.list_pages("")
|
||||
print(f"✓ Found {len(pages)} pages")
|
||||
|
||||
if pages:
|
||||
print(f" Sample pages:")
|
||||
for page in pages[:5]:
|
||||
print(f" - {page.get('title')} ({page.get('path')})")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"✗ List pages failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def test_search_pages(client):
|
||||
"""Test searching pages"""
|
||||
print("\n🔍 Testing search_pages...")
|
||||
|
||||
try:
|
||||
results = await client.search_pages("test", limit=5)
|
||||
print(f"✓ Search returned {len(results)} results")
|
||||
|
||||
if results:
|
||||
print(f" Sample results:")
|
||||
for result in results[:3]:
|
||||
print(f" - {result.get('title')}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"✗ Search failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def test_create_page(client):
|
||||
"""Test creating a page"""
|
||||
print("\n➕ Testing create_page...")
|
||||
|
||||
# Use timestamp to create unique page path
|
||||
import time
|
||||
timestamp = int(time.time())
|
||||
page_path = f"testing/integration-test-{timestamp}"
|
||||
|
||||
try:
|
||||
page = await client.create_page(
|
||||
path=page_path,
|
||||
title=f"Integration Test Page - {timestamp}",
|
||||
content="# Integration Test\n\nThis page was created by the Wiki.js MCP Server integration test.",
|
||||
description="Automated test page",
|
||||
tags=["test", "integration", "mcp"],
|
||||
is_published=False # Don't publish test page
|
||||
)
|
||||
|
||||
print(f"✓ Page created successfully")
|
||||
print(f" - ID: {page.get('id')}")
|
||||
print(f" - Path: {page.get('path')}")
|
||||
print(f" - Title: {page.get('title')}")
|
||||
|
||||
return page_path # Return path for testing get_page
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
print(f"✗ Create page failed: {e}")
|
||||
print(f" Error details: {traceback.format_exc()}")
|
||||
return None
|
||||
|
||||
|
||||
async def test_get_page(client, page_path):
|
||||
"""Test getting a specific page"""
|
||||
print("\n📖 Testing get_page...")
|
||||
|
||||
try:
|
||||
page = await client.get_page(page_path)
|
||||
|
||||
if page:
|
||||
print(f"✓ Page retrieved successfully")
|
||||
print(f" - Title: {page.get('title')}")
|
||||
print(f" - Tags: {', '.join(page.get('tags', []))}")
|
||||
print(f" - Published: {page.get('isPublished')}")
|
||||
return True
|
||||
else:
|
||||
print(f"✗ Page not found: {page_path}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
print(f"✗ Get page failed: {e}")
|
||||
return False
|
||||
|
||||
|
||||
async def main():
|
||||
"""Run all integration tests"""
|
||||
print("=" * 60)
|
||||
print("Wiki.js MCP Server - Integration Tests")
|
||||
print("=" * 60)
|
||||
|
||||
# Test connection
|
||||
client = await test_connection()
|
||||
if not client:
|
||||
print("\n❌ Integration tests failed: Cannot connect to Wiki.js")
|
||||
sys.exit(1)
|
||||
|
||||
# Run tests
|
||||
results = []
|
||||
|
||||
results.append(await test_list_pages(client))
|
||||
results.append(await test_search_pages(client))
|
||||
|
||||
page_path = await test_create_page(client)
|
||||
if page_path:
|
||||
results.append(True)
|
||||
# Test getting the created page
|
||||
results.append(await test_get_page(client, page_path))
|
||||
else:
|
||||
results.append(False)
|
||||
results.append(False)
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 60)
|
||||
print("Test Summary")
|
||||
print("=" * 60)
|
||||
|
||||
passed = sum(results)
|
||||
total = len(results)
|
||||
|
||||
print(f"✓ Passed: {passed}/{total}")
|
||||
print(f"✗ Failed: {total - passed}/{total}")
|
||||
|
||||
if passed == total:
|
||||
print("\n✅ All integration tests passed!")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("\n❌ Some integration tests failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(main())
|
||||
1
mcp-servers/wikijs/tests/__init__.py
Normal file
1
mcp-servers/wikijs/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Tests for Wiki.js MCP Server."""
|
||||
109
mcp-servers/wikijs/tests/test_config.py
Normal file
109
mcp-servers/wikijs/tests/test_config.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Tests for WikiJS configuration loader.
|
||||
"""
|
||||
import pytest
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch, MagicMock
|
||||
from mcp_server.config import WikiJSConfig
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_env(monkeypatch, tmp_path):
|
||||
"""Mock environment with temporary config files"""
|
||||
# Create mock system config
|
||||
system_config = tmp_path / ".config" / "claude" / "wikijs.env"
|
||||
system_config.parent.mkdir(parents=True)
|
||||
system_config.write_text(
|
||||
"WIKIJS_API_URL=http://wiki.test.com/graphql\n"
|
||||
"WIKIJS_API_TOKEN=test_token_123\n"
|
||||
"WIKIJS_BASE_PATH=/test-company\n"
|
||||
)
|
||||
|
||||
# Mock Path.home()
|
||||
with patch('pathlib.Path.home', return_value=tmp_path):
|
||||
yield tmp_path
|
||||
|
||||
|
||||
def test_load_system_config(mock_env):
|
||||
"""Test loading system-level configuration"""
|
||||
config = WikiJSConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['api_url'] == "http://wiki.test.com/graphql"
|
||||
assert result['api_token'] == "test_token_123"
|
||||
assert result['base_path'] == "/test-company"
|
||||
assert result['project'] is None
|
||||
assert result['mode'] == 'company' # No project = company mode
|
||||
|
||||
|
||||
def test_project_config_override(mock_env, tmp_path, monkeypatch):
|
||||
"""Test project-level config overrides system-level"""
|
||||
# Create project-level config
|
||||
project_config = tmp_path / ".env"
|
||||
project_config.write_text(
|
||||
"WIKIJS_PROJECT=projects/test-project\n"
|
||||
)
|
||||
|
||||
# Mock Path.cwd()
|
||||
monkeypatch.setattr('pathlib.Path.cwd', lambda: tmp_path)
|
||||
|
||||
config = WikiJSConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['api_url'] == "http://wiki.test.com/graphql" # From system
|
||||
assert result['project'] == "projects/test-project" # From project
|
||||
assert result['mode'] == 'project' # Has project = project mode
|
||||
|
||||
|
||||
def test_missing_system_config():
|
||||
"""Test error when system config is missing"""
|
||||
with patch('pathlib.Path.home', return_value=Path('/nonexistent')):
|
||||
config = WikiJSConfig()
|
||||
with pytest.raises(FileNotFoundError, match="System config not found"):
|
||||
config.load()
|
||||
|
||||
|
||||
def test_missing_required_config(mock_env, monkeypatch):
|
||||
"""Test validation of required configuration"""
|
||||
# Clear environment variables from previous tests
|
||||
monkeypatch.delenv('WIKIJS_API_URL', raising=False)
|
||||
monkeypatch.delenv('WIKIJS_API_TOKEN', raising=False)
|
||||
monkeypatch.delenv('WIKIJS_BASE_PATH', raising=False)
|
||||
monkeypatch.delenv('WIKIJS_PROJECT', raising=False)
|
||||
|
||||
# Create incomplete system config
|
||||
system_config = mock_env / ".config" / "claude" / "wikijs.env"
|
||||
system_config.write_text(
|
||||
"WIKIJS_API_URL=http://wiki.test.com/graphql\n"
|
||||
# Missing API_TOKEN and BASE_PATH
|
||||
)
|
||||
|
||||
config = WikiJSConfig()
|
||||
with pytest.raises(ValueError, match="Missing required configuration"):
|
||||
config.load()
|
||||
|
||||
|
||||
def test_mode_detection_project(mock_env, tmp_path, monkeypatch):
|
||||
"""Test mode detection when WIKIJS_PROJECT is set"""
|
||||
project_config = tmp_path / ".env"
|
||||
project_config.write_text("WIKIJS_PROJECT=projects/my-project\n")
|
||||
|
||||
monkeypatch.setattr('pathlib.Path.cwd', lambda: tmp_path)
|
||||
|
||||
config = WikiJSConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['mode'] == 'project'
|
||||
assert result['project'] == 'projects/my-project'
|
||||
|
||||
|
||||
def test_mode_detection_company(mock_env, monkeypatch):
|
||||
"""Test mode detection when WIKIJS_PROJECT is not set (company mode)"""
|
||||
# Clear WIKIJS_PROJECT from environment
|
||||
monkeypatch.delenv('WIKIJS_PROJECT', raising=False)
|
||||
|
||||
config = WikiJSConfig()
|
||||
result = config.load()
|
||||
|
||||
assert result['mode'] == 'company'
|
||||
assert result['project'] is None
|
||||
355
mcp-servers/wikijs/tests/test_wikijs_client.py
Normal file
355
mcp-servers/wikijs/tests/test_wikijs_client.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""
|
||||
Tests for Wiki.js GraphQL client.
|
||||
"""
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, patch, MagicMock
|
||||
from mcp_server.wikijs_client import WikiJSClient
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def client():
|
||||
"""Create WikiJSClient instance for testing"""
|
||||
return WikiJSClient(
|
||||
api_url="http://wiki.test.com/graphql",
|
||||
api_token="test_token_123",
|
||||
base_path="/test-company",
|
||||
project="projects/test-project"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def company_client():
|
||||
"""Create WikiJSClient in company mode"""
|
||||
return WikiJSClient(
|
||||
api_url="http://wiki.test.com/graphql",
|
||||
api_token="test_token_123",
|
||||
base_path="/test-company",
|
||||
project=None # Company mode
|
||||
)
|
||||
|
||||
|
||||
def test_client_initialization(client):
|
||||
"""Test client initializes with correct settings"""
|
||||
assert client.api_url == "http://wiki.test.com/graphql"
|
||||
assert client.api_token == "test_token_123"
|
||||
assert client.base_path == "/test-company"
|
||||
assert client.project == "projects/test-project"
|
||||
assert client.mode == 'project'
|
||||
|
||||
|
||||
def test_company_mode_initialization(company_client):
|
||||
"""Test client initializes in company mode"""
|
||||
assert company_client.mode == 'company'
|
||||
assert company_client.project is None
|
||||
|
||||
|
||||
def test_get_full_path_project_mode(client):
|
||||
"""Test path construction in project mode"""
|
||||
path = client._get_full_path("documentation/api")
|
||||
assert path == "/test-company/projects/test-project/documentation/api"
|
||||
|
||||
|
||||
def test_get_full_path_company_mode(company_client):
|
||||
"""Test path construction in company mode"""
|
||||
path = company_client._get_full_path("shared/architecture")
|
||||
assert path == "/test-company/shared/architecture"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_pages(client):
|
||||
"""Test searching pages"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'search': {
|
||||
'results': [
|
||||
{
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/doc1',
|
||||
'title': 'Document 1',
|
||||
'tags': ['api', 'documentation']
|
||||
},
|
||||
{
|
||||
'id': 2,
|
||||
'path': '/test-company/projects/test-project/doc2',
|
||||
'title': 'Document 2',
|
||||
'tags': ['guide', 'tutorial']
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
results = await client.search_pages("documentation")
|
||||
|
||||
assert len(results) == 2
|
||||
assert results[0]['title'] == 'Document 1'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_page(client):
|
||||
"""Test getting a specific page"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'single': {
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/doc1',
|
||||
'title': 'Document 1',
|
||||
'content': '# Test Content',
|
||||
'tags': ['api'],
|
||||
'isPublished': True
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
page = await client.get_page("doc1")
|
||||
|
||||
assert page is not None
|
||||
assert page['title'] == 'Document 1'
|
||||
assert page['content'] == '# Test Content'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_page(client):
|
||||
"""Test creating a new page"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'create': {
|
||||
'responseResult': {
|
||||
'succeeded': True,
|
||||
'errorCode': None,
|
||||
'message': 'Page created successfully'
|
||||
},
|
||||
'page': {
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/new-doc',
|
||||
'title': 'New Document'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
page = await client.create_page(
|
||||
path="new-doc",
|
||||
title="New Document",
|
||||
content="# Content",
|
||||
tags=["test"]
|
||||
)
|
||||
|
||||
assert page['id'] == 1
|
||||
assert page['title'] == 'New Document'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_page(client):
|
||||
"""Test updating a page"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'update': {
|
||||
'responseResult': {
|
||||
'succeeded': True,
|
||||
'message': 'Page updated'
|
||||
},
|
||||
'page': {
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/doc1',
|
||||
'title': 'Updated Title'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
page = await client.update_page(
|
||||
page_id=1,
|
||||
title="Updated Title"
|
||||
)
|
||||
|
||||
assert page['title'] == 'Updated Title'
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_pages(client):
|
||||
"""Test listing pages"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'list': [
|
||||
{'id': 1, 'path': '/test-company/projects/test-project/doc1', 'title': 'Doc 1'},
|
||||
{'id': 2, 'path': '/test-company/projects/test-project/doc2', 'title': 'Doc 2'},
|
||||
{'id': 3, 'path': '/test-company/other-project/doc3', 'title': 'Doc 3'}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
# List all pages in current project
|
||||
pages = await client.list_pages("")
|
||||
|
||||
# Should only return pages from test-project
|
||||
assert len(pages) == 2
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_create_lesson(client):
|
||||
"""Test creating a lesson learned"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'create': {
|
||||
'responseResult': {
|
||||
'succeeded': True,
|
||||
'message': 'Lesson created'
|
||||
},
|
||||
'page': {
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/lessons-learned/sprints/test-lesson',
|
||||
'title': 'Test Lesson'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
lesson = await client.create_lesson(
|
||||
title="Test Lesson",
|
||||
content="# Lesson Content",
|
||||
tags=["testing", "sprint-16"],
|
||||
category="sprints"
|
||||
)
|
||||
|
||||
assert lesson['id'] == 1
|
||||
assert 'lessons-learned' in lesson['path']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_search_lessons(client):
|
||||
"""Test searching lessons learned"""
|
||||
mock_response = {
|
||||
'data': {
|
||||
'pages': {
|
||||
'search': {
|
||||
'results': [
|
||||
{
|
||||
'id': 1,
|
||||
'path': '/test-company/projects/test-project/lessons-learned/sprints/lesson1',
|
||||
'title': 'Lesson 1',
|
||||
'tags': ['testing']
|
||||
},
|
||||
{
|
||||
'id': 2,
|
||||
'path': '/test-company/projects/test-project/documentation/doc1',
|
||||
'title': 'Doc 1',
|
||||
'tags': ['guide']
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
lessons = await client.search_lessons(query="testing")
|
||||
|
||||
# Should only return lessons-learned pages
|
||||
assert len(lessons) == 1
|
||||
assert 'lessons-learned' in lessons[0]['path']
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_graphql_error_handling(client):
|
||||
"""Test handling of GraphQL errors"""
|
||||
mock_response = {
|
||||
'errors': [
|
||||
{'message': 'Page not found'},
|
||||
{'message': 'Invalid query'}
|
||||
]
|
||||
}
|
||||
|
||||
with patch('httpx.AsyncClient') as mock_client:
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.__aenter__.return_value = mock_instance
|
||||
mock_instance.__aexit__.return_value = None
|
||||
mock_instance.post = AsyncMock(return_value=MagicMock(
|
||||
json=lambda: mock_response,
|
||||
raise_for_status=lambda: None
|
||||
))
|
||||
mock_client.return_value = mock_instance
|
||||
|
||||
with pytest.raises(ValueError, match="GraphQL errors"):
|
||||
await client.search_pages("test")
|
||||
Reference in New Issue
Block a user