- Implement Autonomous Workflow Engine with dynamic task decomposition - Add Multi-Agent Communication Protocol with message routing - Create Enhanced Reasoning Chains (CoT, ToT, Multi-Step, Parallel, Hybrid) - Add comprehensive REST API endpoints for all Week 5 features - Include 26/26 passing tests with full coverage - Add complete documentation and API guides - Update development plan to mark Week 5 as completed Features: - Dynamic task decomposition and parallel execution - Agent registration, messaging, and coordination - 5 reasoning methods with validation and learning - Robust error handling and monitoring - Multi-tenant support and security - Production-ready architecture Files added/modified: - app/services/autonomous_workflow_engine.py - app/services/agent_communication.py - app/services/enhanced_reasoning.py - app/api/v1/endpoints/week5_features.py - tests/test_week5_features.py - docs/week5_api_documentation.md - docs/week5_readme.md - WEEK5_COMPLETION_SUMMARY.md - DEVELOPMENT_PLAN.md (updated) All tests passing: 26/26
70 lines
2.2 KiB
Python
70 lines
2.2 KiB
Python
"""
|
|
Prompt management with versioning and tenant-aware Redis caching.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
from typing import Dict, Optional
|
|
from datetime import datetime
|
|
|
|
from app.core.cache import cache_service
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class PromptManager:
|
|
"""Stores and retrieves prompt templates by name/version per tenant."""
|
|
|
|
def __init__(self):
|
|
self._local_fallback_store: Dict[str, Dict[str, str]] = {}
|
|
|
|
@staticmethod
|
|
def _cache_key(tenant_id: str, name: str, version: str) -> str:
|
|
return f"prompt:{tenant_id}:{name}:{version}"
|
|
|
|
async def save_prompt(self, *, tenant_id: str, name: str, version: str, template: str) -> bool:
|
|
key = self._cache_key(tenant_id, name, version)
|
|
record = {
|
|
"name": name,
|
|
"version": version,
|
|
"template": template,
|
|
"saved_at": datetime.utcnow().isoformat(),
|
|
}
|
|
ok = await cache_service.set(key, record, tenant_id)
|
|
if not ok:
|
|
self._local_fallback_store.setdefault(tenant_id, {})[key] = json.dumps(record)
|
|
return True
|
|
|
|
async def get_prompt(self, *, tenant_id: str, name: str, version: str) -> Optional[str]:
|
|
key = self._cache_key(tenant_id, name, version)
|
|
record = await cache_service.get(key, tenant_id)
|
|
if isinstance(record, dict) and record.get("template"):
|
|
return record["template"]
|
|
|
|
# Fallback local
|
|
serialized = self._local_fallback_store.get(tenant_id, {}).get(key)
|
|
if serialized:
|
|
try:
|
|
data = json.loads(serialized)
|
|
return data.get("template")
|
|
except Exception:
|
|
return None
|
|
return None
|
|
|
|
async def latest(self, *, tenant_id: str, name: str) -> Optional[str]:
|
|
# For simplicity, use a conventional "v1", "v2"... and get the highest present in cache
|
|
# In a full impl, we'd track an index. Here, search recent few versions.
|
|
for v in ["v3", "v2", "v1"]:
|
|
tpl = await self.get_prompt(tenant_id=tenant_id, name=name, version=v)
|
|
if tpl:
|
|
return tpl
|
|
return None
|
|
|
|
|
|
prompt_manager = PromptManager()
|
|
|
|
|