Files
cim_summary/test-llm-processing.js
Jon adb33154cc feat: Implement optimized agentic RAG processor with vector embeddings and LLM analysis
- Add LLM analysis integration to optimized agentic RAG processor
- Fix strategy routing in job queue service to use configured processing strategy
- Update ProcessingResult interface to include LLM analysis results
- Integrate vector database operations with semantic chunking
- Add comprehensive CIM review generation with proper error handling
- Fix TypeScript errors and improve type safety
- Ensure complete pipeline from upload to final analysis output

The optimized agentic RAG processor now:
- Creates intelligent semantic chunks with metadata enrichment
- Generates vector embeddings for all chunks
- Stores chunks in pgvector database with optimized batching
- Runs LLM analysis to generate comprehensive CIM reviews
- Provides complete integration from upload to final output

Tested successfully with STAX CIM document processing.
2025-07-28 20:11:32 -04:00

49 lines
1.5 KiB
JavaScript

const axios = require('axios');
async function testLLMProcessing() {
try {
console.log('🚀 Testing LLM Processing for STAX CIM...');
// First, authenticate to get a valid token
const loginResponse = await axios.post('http://localhost:5000/api/auth/login', {
email: 'test@stax-processing.com',
password: 'TestPass123!'
});
console.log('✅ Authentication successful');
console.log('Login response structure:', Object.keys(loginResponse.data));
const token = loginResponse.data.data?.tokens?.accessToken;
console.log('Token:', token ? 'Received' : 'Not received');
if (!token) {
console.error('No token received from login');
return;
}
// Document ID that's already in the system
const documentId = '0876b7f4-0899-4eb0-b2c6-434ec4e7a46d';
// Trigger LLM processing
const response = await axios.post(`http://localhost:5000/api/documents/${documentId}/process`, {
processingType: 'llm',
template: 'BPCP CIM Review Template'
}, {
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${token}`
}
});
console.log('✅ LLM Processing triggered successfully');
console.log('Response:', response.data);
} catch (error) {
console.error('❌ Error:', error.response?.data || error.message);
if (error.response?.data) {
console.error('Full error response:', JSON.stringify(error.response.data, null, 2));
}
}
}
testLLMProcessing();