116 lines
3.6 KiB
Python
116 lines
3.6 KiB
Python
# .claude/tools/agent_cache_wrapper.py
|
|
"""
|
|
Cache wrapper for AI agents
|
|
Use this in your agent workflows to add caching
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
# Add the tools directory to Python path
|
|
tools_dir = Path(__file__).parent
|
|
sys.path.insert(0, str(tools_dir))
|
|
|
|
from local_cache_client import (
|
|
get_cache,
|
|
cached_ai_query,
|
|
store_ai_response,
|
|
print_cache_stats,
|
|
)
|
|
|
|
|
|
class AgentCacheWrapper:
|
|
"""Wrapper for agent AI calls with caching support."""
|
|
|
|
def __init__(self, agent_type: str, project: str = None): # type: ignore
|
|
self.agent_type = agent_type
|
|
self.project = project or os.getenv("AI_CACHE_PROJECT", "job_forge")
|
|
self.cache = get_cache()
|
|
|
|
print(f"🤖 {agent_type.title()} Agent initialized with caching")
|
|
|
|
def query_with_cache(self, prompt: str, make_ai_call_func=None) -> str:
|
|
"""
|
|
Query with cache support.
|
|
|
|
Args:
|
|
prompt: The prompt to send
|
|
make_ai_call_func: Function to call if cache miss (should return AI response)
|
|
|
|
Returns:
|
|
AI response (from cache or fresh API call)
|
|
"""
|
|
# Try cache first
|
|
cached_response, was_hit = cached_ai_query(
|
|
prompt, self.agent_type, self.project
|
|
)
|
|
|
|
if was_hit:
|
|
return cached_response # type: ignore
|
|
|
|
# Cache miss - make AI call
|
|
if make_ai_call_func:
|
|
print(f"🤖 Making fresh AI call for {self.agent_type}...")
|
|
ai_response = make_ai_call_func(prompt)
|
|
|
|
# Store in cache for next time
|
|
if ai_response:
|
|
store_ai_response(prompt, ai_response, self.agent_type, self.project)
|
|
|
|
return ai_response
|
|
else:
|
|
print(f"⚠️ No AI call function provided for cache miss")
|
|
return None # type: ignore
|
|
|
|
def store_response(self, prompt: str, response: str):
|
|
"""Manually store a response in cache."""
|
|
store_ai_response(prompt, response, self.agent_type, self.project)
|
|
|
|
def get_stats(self):
|
|
"""Get cache statistics for this session."""
|
|
return self.cache.get_stats()
|
|
|
|
|
|
# Convenience functions for each agent type
|
|
def technical_lead_query(prompt: str, ai_call_func=None) -> str:
|
|
"""Technical Lead agent with caching."""
|
|
wrapper = AgentCacheWrapper("technical_lead")
|
|
return wrapper.query_with_cache(prompt, ai_call_func)
|
|
|
|
|
|
def qa_engineer_query(prompt: str, ai_call_func=None) -> str:
|
|
"""QA Engineer agent with caching."""
|
|
wrapper = AgentCacheWrapper("qa_engineer")
|
|
return wrapper.query_with_cache(prompt, ai_call_func)
|
|
|
|
|
|
def devops_engineer_query(prompt: str, ai_call_func=None) -> str:
|
|
"""DevOps Engineer agent with caching."""
|
|
wrapper = AgentCacheWrapper("devops_engineer")
|
|
return wrapper.query_with_cache(prompt, ai_call_func)
|
|
|
|
|
|
def fullstack_developer_query(prompt: str, ai_call_func=None) -> str:
|
|
"""Full-Stack Developer agent with caching."""
|
|
wrapper = AgentCacheWrapper("fullstack_developer")
|
|
return wrapper.query_with_cache(prompt, ai_call_func)
|
|
|
|
|
|
# Example usage and testing
|
|
if __name__ == "__main__":
|
|
# Example AI call function (replace with your actual Claude Code integration)
|
|
def example_ai_call(prompt):
|
|
# This is where you'd call Claude Code or your AI service
|
|
# For testing, return a mock response
|
|
return f"Mock AI response for: {prompt[:50]}..."
|
|
|
|
# Test with Technical Lead
|
|
response = technical_lead_query(
|
|
"What is the current FastAPI project structure?", example_ai_call
|
|
)
|
|
print(f"Response: {response}")
|
|
|
|
# Print stats
|
|
print_cache_stats()
|