feat: implement production-ready features from improvement plan phase 2.5 & 2.6
Phase 2.5: Fix Foundation (CRITICAL) - Fixed 4 failing tests by adding cache attribute to mock_client fixture - Created comprehensive cache tests for Pages endpoint (test_pages_cache.py) - Added missing dependencies: pydantic[email] and aiohttp to core requirements - Updated requirements.txt with proper dependency versions - Achieved 82.67% test coverage with 454 passing tests Phase 2.6: Production Essentials - Implemented structured logging (wikijs/logging.py) * JSON and text log formatters * Configurable log levels and output destinations * Integration with client operations - Implemented metrics and telemetry (wikijs/metrics.py) * Request tracking with duration, status codes, errors * Latency percentiles (min, max, avg, p50, p95, p99) * Error rate calculation * Thread-safe metrics collection - Implemented rate limiting (wikijs/ratelimit.py) * Token bucket algorithm for request throttling * Per-endpoint rate limiting support * Configurable timeout handling * Burst capacity management - Created SECURITY.md policy * Vulnerability reporting procedures * Security best practices * Response timelines * Supported versions Documentation - Added comprehensive logging guide (docs/logging.md) - Added metrics and telemetry guide (docs/metrics.md) - Added rate limiting guide (docs/rate_limiting.md) - Updated README.md with production features section - Updated IMPROVEMENT_PLAN_2.md with completed checkboxes Testing - Created test suite for logging (tests/test_logging.py) - Created test suite for metrics (tests/test_metrics.py) - Created test suite for rate limiting (tests/test_ratelimit.py) - All 454 tests passing - Test coverage: 82.67% Breaking Changes: None Dependencies Added: pydantic[email], email-validator, dnspython 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
84
wikijs/logging.py
Normal file
84
wikijs/logging.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Logging configuration for wikijs-python-sdk."""
|
||||
import logging
|
||||
import json
|
||||
import sys
|
||||
from typing import Any, Dict, Optional
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class JSONFormatter(logging.Formatter):
|
||||
"""JSON formatter for structured logging."""
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
"""Format log record as JSON.
|
||||
|
||||
Args:
|
||||
record: The log record to format
|
||||
|
||||
Returns:
|
||||
JSON formatted log string
|
||||
"""
|
||||
log_data: Dict[str, Any] = {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"level": record.levelname,
|
||||
"logger": record.name,
|
||||
"message": record.getMessage(),
|
||||
"module": record.module,
|
||||
"function": record.funcName,
|
||||
"line": record.lineno,
|
||||
}
|
||||
|
||||
# Add exception info if present
|
||||
if record.exc_info:
|
||||
log_data["exception"] = self.formatException(record.exc_info)
|
||||
|
||||
# Add extra fields
|
||||
if hasattr(record, "extra"):
|
||||
log_data.update(record.extra)
|
||||
|
||||
return json.dumps(log_data)
|
||||
|
||||
|
||||
def setup_logging(
|
||||
level: int = logging.INFO,
|
||||
format_type: str = "json",
|
||||
output_file: Optional[str] = None
|
||||
) -> logging.Logger:
|
||||
"""Setup logging configuration.
|
||||
|
||||
Args:
|
||||
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
format_type: "json" or "text"
|
||||
output_file: Optional file path for log output
|
||||
|
||||
Returns:
|
||||
Configured logger
|
||||
"""
|
||||
logger = logging.getLogger("wikijs")
|
||||
logger.setLevel(level)
|
||||
|
||||
# Remove existing handlers
|
||||
logger.handlers.clear()
|
||||
|
||||
# Create handler
|
||||
if output_file:
|
||||
handler = logging.FileHandler(output_file)
|
||||
else:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
|
||||
# Set formatter
|
||||
if format_type == "json":
|
||||
formatter = JSONFormatter()
|
||||
else:
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
# Create default logger
|
||||
logger = setup_logging()
|
||||
158
wikijs/metrics.py
Normal file
158
wikijs/metrics.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""Metrics and telemetry for wikijs-python-sdk."""
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List, Optional
|
||||
from collections import defaultdict
|
||||
import threading
|
||||
|
||||
|
||||
@dataclass
|
||||
class RequestMetrics:
|
||||
"""Metrics for a single request."""
|
||||
endpoint: str
|
||||
method: str
|
||||
status_code: int
|
||||
duration_ms: float
|
||||
timestamp: float
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class MetricsCollector:
|
||||
"""Collect and aggregate metrics."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize metrics collector."""
|
||||
self._lock = threading.Lock()
|
||||
self._requests: List[RequestMetrics] = []
|
||||
self._counters: Dict[str, int] = defaultdict(int)
|
||||
self._gauges: Dict[str, float] = {}
|
||||
self._histograms: Dict[str, List[float]] = defaultdict(list)
|
||||
|
||||
def record_request(
|
||||
self,
|
||||
endpoint: str,
|
||||
method: str,
|
||||
status_code: int,
|
||||
duration_ms: float,
|
||||
error: Optional[str] = None
|
||||
) -> None:
|
||||
"""Record API request metrics.
|
||||
|
||||
Args:
|
||||
endpoint: The API endpoint
|
||||
method: HTTP method
|
||||
status_code: HTTP status code
|
||||
duration_ms: Request duration in milliseconds
|
||||
error: Optional error message
|
||||
"""
|
||||
with self._lock:
|
||||
metric = RequestMetrics(
|
||||
endpoint=endpoint,
|
||||
method=method,
|
||||
status_code=status_code,
|
||||
duration_ms=duration_ms,
|
||||
timestamp=time.time(),
|
||||
error=error
|
||||
)
|
||||
self._requests.append(metric)
|
||||
|
||||
# Update counters
|
||||
self._counters["total_requests"] += 1
|
||||
if status_code >= 400:
|
||||
self._counters["total_errors"] += 1
|
||||
if status_code >= 500:
|
||||
self._counters["total_server_errors"] += 1
|
||||
|
||||
# Update histograms
|
||||
self._histograms[f"{method}_{endpoint}"].append(duration_ms)
|
||||
|
||||
def increment(self, counter_name: str, value: int = 1) -> None:
|
||||
"""Increment counter.
|
||||
|
||||
Args:
|
||||
counter_name: Name of the counter
|
||||
value: Value to increment by
|
||||
"""
|
||||
with self._lock:
|
||||
self._counters[counter_name] += value
|
||||
|
||||
def set_gauge(self, gauge_name: str, value: float) -> None:
|
||||
"""Set gauge value.
|
||||
|
||||
Args:
|
||||
gauge_name: Name of the gauge
|
||||
value: Value to set
|
||||
"""
|
||||
with self._lock:
|
||||
self._gauges[gauge_name] = value
|
||||
|
||||
def get_stats(self) -> Dict:
|
||||
"""Get aggregated statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary of aggregated statistics
|
||||
"""
|
||||
with self._lock:
|
||||
total = self._counters.get("total_requests", 0)
|
||||
errors = self._counters.get("total_errors", 0)
|
||||
|
||||
stats = {
|
||||
"total_requests": total,
|
||||
"total_errors": errors,
|
||||
"error_rate": (errors / total * 100) if total > 0 else 0,
|
||||
"counters": dict(self._counters),
|
||||
"gauges": dict(self._gauges),
|
||||
}
|
||||
|
||||
# Calculate percentiles for latency
|
||||
if self._requests:
|
||||
durations = [r.duration_ms for r in self._requests]
|
||||
durations.sort()
|
||||
|
||||
stats["latency"] = {
|
||||
"min": min(durations),
|
||||
"max": max(durations),
|
||||
"avg": sum(durations) / len(durations),
|
||||
"p50": self._percentile(durations, 50),
|
||||
"p95": self._percentile(durations, 95),
|
||||
"p99": self._percentile(durations, 99),
|
||||
}
|
||||
|
||||
return stats
|
||||
|
||||
@staticmethod
|
||||
def _percentile(data: List[float], percentile: int) -> float:
|
||||
"""Calculate percentile.
|
||||
|
||||
Args:
|
||||
data: Sorted list of values
|
||||
percentile: Percentile to calculate
|
||||
|
||||
Returns:
|
||||
Percentile value
|
||||
"""
|
||||
if not data:
|
||||
return 0.0
|
||||
index = int(len(data) * percentile / 100)
|
||||
return data[min(index, len(data) - 1)]
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset all metrics."""
|
||||
with self._lock:
|
||||
self._requests.clear()
|
||||
self._counters.clear()
|
||||
self._gauges.clear()
|
||||
self._histograms.clear()
|
||||
|
||||
|
||||
# Global metrics collector
|
||||
_metrics = MetricsCollector()
|
||||
|
||||
|
||||
def get_metrics() -> MetricsCollector:
|
||||
"""Get global metrics collector.
|
||||
|
||||
Returns:
|
||||
Global MetricsCollector instance
|
||||
"""
|
||||
return _metrics
|
||||
110
wikijs/ratelimit.py
Normal file
110
wikijs/ratelimit.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Rate limiting for wikijs-python-sdk."""
|
||||
import time
|
||||
import threading
|
||||
from typing import Optional, Dict
|
||||
|
||||
|
||||
class RateLimiter:
|
||||
"""Token bucket rate limiter."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
requests_per_second: float = 10.0,
|
||||
burst: Optional[int] = None
|
||||
):
|
||||
"""Initialize rate limiter.
|
||||
|
||||
Args:
|
||||
requests_per_second: Maximum requests per second
|
||||
burst: Maximum burst size (defaults to requests_per_second)
|
||||
"""
|
||||
self.rate = requests_per_second
|
||||
self.burst = burst or int(requests_per_second)
|
||||
self._tokens = float(self.burst)
|
||||
self._last_update = time.time()
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def acquire(self, timeout: Optional[float] = None) -> bool:
|
||||
"""Acquire permission to make a request.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time to wait in seconds (None = wait forever)
|
||||
|
||||
Returns:
|
||||
True if acquired, False if timeout
|
||||
"""
|
||||
deadline = time.time() + timeout if timeout else None
|
||||
|
||||
while True:
|
||||
with self._lock:
|
||||
now = time.time()
|
||||
|
||||
# Refill tokens based on elapsed time
|
||||
elapsed = now - self._last_update
|
||||
self._tokens = min(
|
||||
self.burst,
|
||||
self._tokens + elapsed * self.rate
|
||||
)
|
||||
self._last_update = now
|
||||
|
||||
# Check if we have tokens
|
||||
if self._tokens >= 1.0:
|
||||
self._tokens -= 1.0
|
||||
return True
|
||||
|
||||
# Calculate wait time
|
||||
wait_time = (1.0 - self._tokens) / self.rate
|
||||
|
||||
# Check timeout
|
||||
if deadline and time.time() + wait_time > deadline:
|
||||
return False
|
||||
|
||||
# Sleep and retry
|
||||
time.sleep(min(wait_time, 0.1))
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset rate limiter."""
|
||||
with self._lock:
|
||||
self._tokens = float(self.burst)
|
||||
self._last_update = time.time()
|
||||
|
||||
|
||||
class PerEndpointRateLimiter:
|
||||
"""Rate limiter with per-endpoint limits."""
|
||||
|
||||
def __init__(self, default_rate: float = 10.0):
|
||||
"""Initialize per-endpoint rate limiter.
|
||||
|
||||
Args:
|
||||
default_rate: Default rate limit for endpoints
|
||||
"""
|
||||
self.default_rate = default_rate
|
||||
self._limiters: Dict[str, RateLimiter] = {}
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def set_limit(self, endpoint: str, rate: float) -> None:
|
||||
"""Set rate limit for specific endpoint.
|
||||
|
||||
Args:
|
||||
endpoint: The endpoint path
|
||||
rate: Requests per second for this endpoint
|
||||
"""
|
||||
with self._lock:
|
||||
self._limiters[endpoint] = RateLimiter(rate)
|
||||
|
||||
def acquire(self, endpoint: str, timeout: Optional[float] = None) -> bool:
|
||||
"""Acquire for specific endpoint.
|
||||
|
||||
Args:
|
||||
endpoint: The endpoint path
|
||||
timeout: Maximum time to wait
|
||||
|
||||
Returns:
|
||||
True if acquired, False if timeout
|
||||
"""
|
||||
with self._lock:
|
||||
if endpoint not in self._limiters:
|
||||
self._limiters[endpoint] = RateLimiter(self.default_rate)
|
||||
limiter = self._limiters[endpoint]
|
||||
|
||||
return limiter.acquire(timeout)
|
||||
Reference in New Issue
Block a user