- Implement Autonomous Workflow Engine with dynamic task decomposition - Add Multi-Agent Communication Protocol with message routing - Create Enhanced Reasoning Chains (CoT, ToT, Multi-Step, Parallel, Hybrid) - Add comprehensive REST API endpoints for all Week 5 features - Include 26/26 passing tests with full coverage - Add complete documentation and API guides - Update development plan to mark Week 5 as completed Features: - Dynamic task decomposition and parallel execution - Agent registration, messaging, and coordination - 5 reasoning methods with validation and learning - Robust error handling and monitoring - Multi-tenant support and security - Production-ready architecture Files added/modified: - app/services/autonomous_workflow_engine.py - app/services/agent_communication.py - app/services/enhanced_reasoning.py - app/api/v1/endpoints/week5_features.py - tests/test_week5_features.py - docs/week5_api_documentation.md - docs/week5_readme.md - WEEK5_COMPLETION_SUMMARY.md - DEVELOPMENT_PLAN.md (updated) All tests passing: 26/26
47 lines
2.0 KiB
Python
47 lines
2.0 KiB
Python
import pytest
|
|
|
|
from app.services.llm_service import llm_service
|
|
from app.services.prompt_manager import prompt_manager
|
|
from app.services.rag_service import rag_service
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_prompt_manager_save_and_retrieve():
|
|
tenant_id = "test-tenant"
|
|
await prompt_manager.save_prompt(tenant_id=tenant_id, name="ctx", version="v1", template="You are helpful.")
|
|
tpl = await prompt_manager.get_prompt(tenant_id=tenant_id, name="ctx", version="v1")
|
|
assert tpl == "You are helpful."
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_llm_offline_mode_without_api_key(monkeypatch):
|
|
# Force no API key
|
|
monkeypatch.setattr("app.services.llm_service.settings.OPENROUTER_API_KEY", None, raising=False)
|
|
result = await llm_service.generate_text("Hello", tenant_id="test-tenant")
|
|
assert result["model"] == "offline"
|
|
assert "LLM unavailable" in result["text"]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_rag_service_basic_flow(monkeypatch):
|
|
# Mock vector search to return small context
|
|
async def _fake_search(tenant_id, query, limit=10, chunk_types=None):
|
|
return [
|
|
{"document_id": "doc1", "page_numbers": [1], "chunk_type": "text", "text": "Revenue grew 20% in Q4.", "score": 0.9},
|
|
{"document_id": "doc2", "page_numbers": [2], "chunk_type": "table", "text": "Table with KPIs", "score": 0.85},
|
|
]
|
|
|
|
monkeypatch.setattr(rag_service.vector_service, "search_similar", _fake_search)
|
|
|
|
# Mock LLM call to avoid network
|
|
async def _fake_generate_text(prompt, tenant_id, task="general", max_tokens=None, temperature=None, system_prompt=None):
|
|
return {"text": "Q4 revenue grew 20% (doc:doc1 p:1).", "model": "offline"}
|
|
|
|
monkeypatch.setattr("app.services.rag_service.llm_service.generate_text", _fake_generate_text)
|
|
|
|
result = await rag_service.answer(tenant_id="test-tenant", query="What happened to revenue in Q4?")
|
|
assert "revenue" in result["text"].lower()
|
|
assert result["citations"] and len(result["citations"]) >= 1
|
|
|
|
|