adding mcp tools
This commit is contained in:
115
.claude/tools/agent_cache_wrapper.py
Normal file
115
.claude/tools/agent_cache_wrapper.py
Normal file
@@ -0,0 +1,115 @@
|
||||
# .claude/tools/agent_cache_wrapper.py
|
||||
"""
|
||||
Cache wrapper for AI agents
|
||||
Use this in your agent workflows to add caching
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add the tools directory to Python path
|
||||
tools_dir = Path(__file__).parent
|
||||
sys.path.insert(0, str(tools_dir))
|
||||
|
||||
from local_cache_client import (
|
||||
get_cache,
|
||||
cached_ai_query,
|
||||
store_ai_response,
|
||||
print_cache_stats,
|
||||
)
|
||||
|
||||
|
||||
class AgentCacheWrapper:
|
||||
"""Wrapper for agent AI calls with caching support."""
|
||||
|
||||
def __init__(self, agent_type: str, project: str = None): # type: ignore
|
||||
self.agent_type = agent_type
|
||||
self.project = project or os.getenv("AI_CACHE_PROJECT", "job_forge")
|
||||
self.cache = get_cache()
|
||||
|
||||
print(f"🤖 {agent_type.title()} Agent initialized with caching")
|
||||
|
||||
def query_with_cache(self, prompt: str, make_ai_call_func=None) -> str:
|
||||
"""
|
||||
Query with cache support.
|
||||
|
||||
Args:
|
||||
prompt: The prompt to send
|
||||
make_ai_call_func: Function to call if cache miss (should return AI response)
|
||||
|
||||
Returns:
|
||||
AI response (from cache or fresh API call)
|
||||
"""
|
||||
# Try cache first
|
||||
cached_response, was_hit = cached_ai_query(
|
||||
prompt, self.agent_type, self.project
|
||||
)
|
||||
|
||||
if was_hit:
|
||||
return cached_response # type: ignore
|
||||
|
||||
# Cache miss - make AI call
|
||||
if make_ai_call_func:
|
||||
print(f"🤖 Making fresh AI call for {self.agent_type}...")
|
||||
ai_response = make_ai_call_func(prompt)
|
||||
|
||||
# Store in cache for next time
|
||||
if ai_response:
|
||||
store_ai_response(prompt, ai_response, self.agent_type, self.project)
|
||||
|
||||
return ai_response
|
||||
else:
|
||||
print(f"⚠️ No AI call function provided for cache miss")
|
||||
return None # type: ignore
|
||||
|
||||
def store_response(self, prompt: str, response: str):
|
||||
"""Manually store a response in cache."""
|
||||
store_ai_response(prompt, response, self.agent_type, self.project)
|
||||
|
||||
def get_stats(self):
|
||||
"""Get cache statistics for this session."""
|
||||
return self.cache.get_stats()
|
||||
|
||||
|
||||
# Convenience functions for each agent type
|
||||
def technical_lead_query(prompt: str, ai_call_func=None) -> str:
|
||||
"""Technical Lead agent with caching."""
|
||||
wrapper = AgentCacheWrapper("technical_lead")
|
||||
return wrapper.query_with_cache(prompt, ai_call_func)
|
||||
|
||||
|
||||
def qa_engineer_query(prompt: str, ai_call_func=None) -> str:
|
||||
"""QA Engineer agent with caching."""
|
||||
wrapper = AgentCacheWrapper("qa_engineer")
|
||||
return wrapper.query_with_cache(prompt, ai_call_func)
|
||||
|
||||
|
||||
def devops_engineer_query(prompt: str, ai_call_func=None) -> str:
|
||||
"""DevOps Engineer agent with caching."""
|
||||
wrapper = AgentCacheWrapper("devops_engineer")
|
||||
return wrapper.query_with_cache(prompt, ai_call_func)
|
||||
|
||||
|
||||
def fullstack_developer_query(prompt: str, ai_call_func=None) -> str:
|
||||
"""Full-Stack Developer agent with caching."""
|
||||
wrapper = AgentCacheWrapper("fullstack_developer")
|
||||
return wrapper.query_with_cache(prompt, ai_call_func)
|
||||
|
||||
|
||||
# Example usage and testing
|
||||
if __name__ == "__main__":
|
||||
# Example AI call function (replace with your actual Claude Code integration)
|
||||
def example_ai_call(prompt):
|
||||
# This is where you'd call Claude Code or your AI service
|
||||
# For testing, return a mock response
|
||||
return f"Mock AI response for: {prompt[:50]}..."
|
||||
|
||||
# Test with Technical Lead
|
||||
response = technical_lead_query(
|
||||
"What is the current FastAPI project structure?", example_ai_call
|
||||
)
|
||||
print(f"Response: {response}")
|
||||
|
||||
# Print stats
|
||||
print_cache_stats()
|
||||
307
.claude/tools/local_cache_client.py
Normal file
307
.claude/tools/local_cache_client.py
Normal file
@@ -0,0 +1,307 @@
|
||||
# .claude/tools/local_cache_client.py
|
||||
"""
|
||||
AI Cache Client for Local Development
|
||||
Integrates with n8n-based AI response caching system
|
||||
"""
|
||||
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
import hashlib
|
||||
import time
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class AICacheClient:
|
||||
"""Client for interacting with AI Cache MCP service."""
|
||||
|
||||
def __init__(self, base_url: str = None, enabled: bool = True): # type: ignore
|
||||
# Default to your n8n webhook URL
|
||||
self.base_url = base_url or os.getenv(
|
||||
"AI_CACHE_URL", "https://n8n.hotserv.cloud/webhook"
|
||||
)
|
||||
self.enabled = (
|
||||
enabled and os.getenv("AI_CACHE_ENABLED", "true").lower() == "true"
|
||||
)
|
||||
self.timeout = int(os.getenv("AI_CACHE_TIMEOUT", "15"))
|
||||
|
||||
# Stats tracking
|
||||
self.session_hits = 0
|
||||
self.session_misses = 0
|
||||
self.session_start = time.time()
|
||||
self.connection_failed = False
|
||||
|
||||
if self.enabled:
|
||||
print(f"🧠 AI Cache enabled: {self.base_url}")
|
||||
self._test_connection()
|
||||
else:
|
||||
print("⚠️ AI Cache disabled")
|
||||
|
||||
def _test_connection(self):
|
||||
"""Test if the cache service is accessible."""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.base_url}/ai-cache-stats",
|
||||
timeout=3 # Quick test
|
||||
)
|
||||
if response.status_code == 200:
|
||||
print("✅ Cache service is accessible")
|
||||
else:
|
||||
print(f"⚠️ Cache service returned HTTP {response.status_code}")
|
||||
self.connection_failed = True
|
||||
except Exception as e:
|
||||
print(f"❌ Cache service unreachable: {str(e)[:50]}...")
|
||||
self.connection_failed = True
|
||||
|
||||
def _normalize_prompt(self, prompt: str) -> str:
|
||||
"""Normalize prompt for consistent matching."""
|
||||
return prompt.strip().lower().replace("\n", " ").replace(" ", " ")
|
||||
|
||||
def lookup_cache(
|
||||
self, prompt: str, agent_type: str, project: str = "job_forge"
|
||||
) -> Optional[str]:
|
||||
"""Look up a cached AI response."""
|
||||
if not self.enabled or self.connection_failed:
|
||||
return None
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
response = requests.post(
|
||||
f"{self.base_url}/ai-cache-lookup",
|
||||
json={"prompt": prompt, "agent_type": agent_type, "project": project},
|
||||
timeout=self.timeout,
|
||||
)
|
||||
|
||||
lookup_time = (time.time() - start_time) * 1000
|
||||
|
||||
if response.status_code == 200:
|
||||
try:
|
||||
# Debug: print raw response
|
||||
raw_text = response.text
|
||||
print(f"🔍 Debug - Raw response: '{raw_text[:100]}...'")
|
||||
|
||||
if not raw_text.strip():
|
||||
print(f"❌ Cache MISS [{agent_type}] - Empty response | Lookup: {lookup_time:.0f}ms")
|
||||
self.session_misses += 1
|
||||
return None
|
||||
|
||||
data = response.json()
|
||||
if data.get("found"):
|
||||
similarity = data.get("similarity", 1.0)
|
||||
hit_count = data.get("hit_count", 1)
|
||||
|
||||
print(
|
||||
f"✅ Cache HIT! [{agent_type}] Similarity: {similarity:.2f} | Used: {hit_count}x | Lookup: {lookup_time:.0f}ms"
|
||||
)
|
||||
self.session_hits += 1
|
||||
return data.get("response")
|
||||
else:
|
||||
print(f"❌ Cache MISS [{agent_type}] | Lookup: {lookup_time:.0f}ms")
|
||||
self.session_misses += 1
|
||||
return None
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"🚨 JSON decode error: {str(e)} | Response: '{response.text[:50]}'")
|
||||
self.session_misses += 1
|
||||
return None
|
||||
else:
|
||||
print(f"⚠️ Cache lookup failed: HTTP {response.status_code}")
|
||||
return None
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"⏱️ Cache lookup timeout ({self.timeout}s)")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"🚨 Cache error: {str(e)}")
|
||||
return None
|
||||
|
||||
def store_cache(
|
||||
self,
|
||||
prompt: str,
|
||||
response: str,
|
||||
agent_type: str,
|
||||
ai_service: str = "claude",
|
||||
model: str = "claude-sonnet-4",
|
||||
project: str = "job_forge",
|
||||
) -> bool:
|
||||
"""Store an AI response in cache."""
|
||||
if not self.enabled or not response or len(response.strip()) < 10:
|
||||
return False
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
|
||||
result = requests.post(
|
||||
f"{self.base_url}/ai-cache-store",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"response": response,
|
||||
"ai_service": ai_service,
|
||||
"model": model,
|
||||
"agent_type": agent_type,
|
||||
"project": project,
|
||||
},
|
||||
timeout=self.timeout,
|
||||
)
|
||||
|
||||
store_time = (time.time() - start_time) * 1000
|
||||
|
||||
if result.status_code == 200:
|
||||
data = result.json()
|
||||
if data.get("success"):
|
||||
print(
|
||||
f"💾 Response cached [{agent_type}] | Store: {store_time:.0f}ms"
|
||||
)
|
||||
return True
|
||||
else:
|
||||
print(
|
||||
f"📄 Already cached [{agent_type}] | Store: {store_time:.0f}ms"
|
||||
)
|
||||
return False
|
||||
else:
|
||||
print(f"⚠️ Cache store failed: HTTP {result.status_code}")
|
||||
return False
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"⏱️ Cache store timeout ({self.timeout}s)")
|
||||
return False
|
||||
except Exception as e:
|
||||
print(f"🚨 Cache store error: {str(e)}")
|
||||
return False
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Get cache statistics."""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.base_url}/ai-cache-stats", timeout=self.timeout
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
stats = response.json()
|
||||
|
||||
# Add session stats
|
||||
session_time = time.time() - self.session_start
|
||||
session_total = self.session_hits + self.session_misses
|
||||
session_hit_rate = (
|
||||
(self.session_hits / session_total * 100)
|
||||
if session_total > 0
|
||||
else 0
|
||||
)
|
||||
|
||||
stats["session_stats"] = {
|
||||
"hits": self.session_hits,
|
||||
"misses": self.session_misses,
|
||||
"total": session_total,
|
||||
"hit_rate_percentage": round(session_hit_rate, 1),
|
||||
"duration_minutes": round(session_time / 60, 1),
|
||||
}
|
||||
|
||||
return stats
|
||||
else:
|
||||
return {"error": f"Failed to get stats: {response.status_code}"}
|
||||
|
||||
except Exception as e:
|
||||
return {"error": f"Stats error: {str(e)}"}
|
||||
|
||||
def print_session_summary(self):
|
||||
"""Print session cache performance summary."""
|
||||
total = self.session_hits + self.session_misses
|
||||
if total == 0:
|
||||
return
|
||||
|
||||
hit_rate = (self.session_hits / total) * 100
|
||||
session_time = (time.time() - self.session_start) / 60
|
||||
|
||||
print(f"\n📊 Cache Session Summary:")
|
||||
print(
|
||||
f" Hits: {self.session_hits} | Misses: {self.session_misses} | Hit Rate: {hit_rate:.1f}%"
|
||||
)
|
||||
print(f" Session Time: {session_time:.1f} minutes")
|
||||
|
||||
if hit_rate > 60:
|
||||
print(f" 🎉 Excellent cache performance!")
|
||||
elif hit_rate > 30:
|
||||
print(f" 👍 Good cache performance")
|
||||
else:
|
||||
print(f" 📈 Cache is learning your patterns...")
|
||||
|
||||
|
||||
# Global cache instance
|
||||
_cache_instance = None
|
||||
|
||||
|
||||
def get_cache() -> AICacheClient:
|
||||
"""Get or create global cache instance."""
|
||||
global _cache_instance
|
||||
if _cache_instance is None:
|
||||
_cache_instance = AICacheClient()
|
||||
return _cache_instance
|
||||
|
||||
|
||||
def cached_ai_query(
|
||||
prompt: str, agent_type: str, project: str = "job_forge"
|
||||
) -> tuple[Optional[str], bool]:
|
||||
"""
|
||||
Helper function for cached AI queries.
|
||||
Returns: (cached_response, was_cache_hit)
|
||||
"""
|
||||
cache = get_cache()
|
||||
cached_response = cache.lookup_cache(prompt, agent_type, project)
|
||||
|
||||
if cached_response:
|
||||
return cached_response, True
|
||||
else:
|
||||
return None, False
|
||||
|
||||
|
||||
def store_ai_response(
|
||||
prompt: str, response: str, agent_type: str, project: str = "job_forge"
|
||||
):
|
||||
"""Helper function to store AI responses."""
|
||||
cache = get_cache()
|
||||
cache.store_cache(prompt, response, agent_type, project=project)
|
||||
|
||||
|
||||
def print_cache_stats():
|
||||
"""Print current cache statistics."""
|
||||
cache = get_cache()
|
||||
stats = cache.get_stats()
|
||||
|
||||
if "error" in stats:
|
||||
print(f"❌ {stats['error']}")
|
||||
return
|
||||
|
||||
summary = stats.get("summary", {})
|
||||
session = stats.get("session_stats", {})
|
||||
|
||||
print(f"\n📈 AI Cache Statistics:")
|
||||
print(f" Overall Hit Rate: {summary.get('hit_rate_percentage', 0)}%")
|
||||
print(f" Total Saved: ${summary.get('total_cost_saved_usd', 0):.2f}")
|
||||
print(f" API Calls Saved: {summary.get('api_calls_saved', 0)}")
|
||||
|
||||
if session:
|
||||
print(
|
||||
f" This Session: {session['hits']}/{session['total']} hits ({session['hit_rate_percentage']}%)"
|
||||
)
|
||||
|
||||
|
||||
# Example usage for testing
|
||||
if __name__ == "__main__":
|
||||
# Test the cache
|
||||
cache = get_cache()
|
||||
|
||||
# Test lookup
|
||||
result = cache.lookup_cache("What is the database schema?", "technical_lead")
|
||||
print(f"Lookup result: {result}")
|
||||
|
||||
# Test store
|
||||
cache.store_cache(
|
||||
"What is the database schema?",
|
||||
"PostgreSQL with users and applications tables",
|
||||
"technical_lead",
|
||||
)
|
||||
|
||||
# Print stats
|
||||
print_cache_stats()
|
||||
cache.print_session_summary()
|
||||
Reference in New Issue
Block a user