- Configure workspace Git repository with comprehensive .gitignore - Add Odoo as submodule for ERP reference code - Include documentation: SETUP.md, GIT-STRUCTURE.md - Add gitignore templates for projects (backend, frontend, database) - Structure supports independent repos per project/subproject level Workspace includes: - core/ - Reusable patterns, modules, orchestration system - projects/ - Active projects (erp-suite, gamilit, trading-platform, etc.) - knowledge-base/ - Reference code and patterns (includes Odoo submodule) - devtools/ - Development tools and templates - customers/ - Client implementations template 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
355 lines
12 KiB
Python
Executable File
355 lines
12 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
Integration Test Script for OrbiQuant IA Trading Platform
|
||
Tests connectivity and basic functionality between all services
|
||
"""
|
||
|
||
import asyncio
|
||
import aiohttp
|
||
import sys
|
||
from datetime import datetime
|
||
from typing import Optional
|
||
|
||
# Service URLs
|
||
SERVICES = {
|
||
"frontend": "http://localhost:5173",
|
||
"backend": "http://localhost:3000",
|
||
"ml_engine": "http://localhost:8001",
|
||
"llm_agent": "http://localhost:8003",
|
||
"trading_agents": "http://localhost:8004",
|
||
"ollama": "http://localhost:11434",
|
||
}
|
||
|
||
class Colors:
|
||
GREEN = "\033[92m"
|
||
RED = "\033[91m"
|
||
YELLOW = "\033[93m"
|
||
BLUE = "\033[94m"
|
||
RESET = "\033[0m"
|
||
BOLD = "\033[1m"
|
||
|
||
def log_success(msg: str):
|
||
print(f"{Colors.GREEN}✓{Colors.RESET} {msg}")
|
||
|
||
def log_error(msg: str):
|
||
print(f"{Colors.RED}✗{Colors.RESET} {msg}")
|
||
|
||
def log_warning(msg: str):
|
||
print(f"{Colors.YELLOW}⚠{Colors.RESET} {msg}")
|
||
|
||
def log_info(msg: str):
|
||
print(f"{Colors.BLUE}ℹ{Colors.RESET} {msg}")
|
||
|
||
def log_header(msg: str):
|
||
print(f"\n{Colors.BOLD}{msg}{Colors.RESET}")
|
||
print("=" * 50)
|
||
|
||
|
||
async def check_service_health(session: aiohttp.ClientSession, name: str, url: str) -> bool:
|
||
"""Check if a service is healthy"""
|
||
try:
|
||
# Try health endpoint first
|
||
health_endpoints = ["/health", "/api/health", "/api/v1/health", "/"]
|
||
|
||
for endpoint in health_endpoints:
|
||
try:
|
||
async with session.get(f"{url}{endpoint}", timeout=aiohttp.ClientTimeout(total=5)) as resp:
|
||
if resp.status == 200:
|
||
log_success(f"{name}: Running at {url}")
|
||
return True
|
||
except:
|
||
continue
|
||
|
||
log_error(f"{name}: Not responding at {url}")
|
||
return False
|
||
except Exception as e:
|
||
log_error(f"{name}: Error - {str(e)}")
|
||
return False
|
||
|
||
|
||
async def test_ml_engine(session: aiohttp.ClientSession) -> bool:
|
||
"""Test ML Engine endpoints"""
|
||
log_header("Testing ML Engine")
|
||
|
||
base_url = SERVICES["ml_engine"]
|
||
tests_passed = 0
|
||
tests_total = 0
|
||
|
||
# Test 1: Health check
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/health") as resp:
|
||
if resp.status == 200:
|
||
log_success("Health check passed")
|
||
tests_passed += 1
|
||
else:
|
||
log_error(f"Health check failed: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Health check error: {e}")
|
||
|
||
# Test 2: AMD Detection
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/api/v1/amd/detect/BTCUSDT") as resp:
|
||
if resp.status in [200, 404]: # 404 is ok if no data
|
||
log_success("AMD detection endpoint accessible")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"AMD detection returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"AMD detection error: {e}")
|
||
|
||
# Test 3: Signal Generation
|
||
tests_total += 1
|
||
try:
|
||
async with session.post(
|
||
f"{base_url}/api/v1/signals/generate",
|
||
json={"symbol": "BTCUSDT"}
|
||
) as resp:
|
||
if resp.status in [200, 201, 400, 404]:
|
||
log_success("Signal generation endpoint accessible")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"Signal generation returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Signal generation error: {e}")
|
||
|
||
return tests_passed == tests_total
|
||
|
||
|
||
async def test_llm_agent(session: aiohttp.ClientSession) -> bool:
|
||
"""Test LLM Agent endpoints"""
|
||
log_header("Testing LLM Agent")
|
||
|
||
base_url = SERVICES["llm_agent"]
|
||
tests_passed = 0
|
||
tests_total = 0
|
||
|
||
# Test 1: Health check
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/health") as resp:
|
||
if resp.status == 200:
|
||
log_success("Health check passed")
|
||
tests_passed += 1
|
||
else:
|
||
log_error(f"Health check failed: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Health check error: {e}")
|
||
|
||
# Test 2: Chat endpoint
|
||
tests_total += 1
|
||
try:
|
||
async with session.post(
|
||
f"{base_url}/api/v1/chat",
|
||
json={
|
||
"user_id": "test-user",
|
||
"conversation_id": "test-conv",
|
||
"message": "Hello",
|
||
"user_plan": "free",
|
||
"stream": False
|
||
}
|
||
) as resp:
|
||
if resp.status in [200, 201]:
|
||
data = await resp.json()
|
||
log_success(f"Chat endpoint working - Response length: {len(data.get('response', ''))}")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"Chat endpoint returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Chat endpoint error: {e}")
|
||
|
||
# Test 3: Models endpoint
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/api/v1/models") as resp:
|
||
if resp.status == 200:
|
||
data = await resp.json()
|
||
log_success(f"Models available: {data.get('models', [])}")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"Models endpoint returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Models endpoint error: {e}")
|
||
|
||
return tests_passed == tests_total
|
||
|
||
|
||
async def test_trading_agents(session: aiohttp.ClientSession) -> bool:
|
||
"""Test Trading Agents endpoints"""
|
||
log_header("Testing Trading Agents")
|
||
|
||
base_url = SERVICES["trading_agents"]
|
||
tests_passed = 0
|
||
tests_total = 0
|
||
|
||
# Test 1: Health check
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/health") as resp:
|
||
if resp.status == 200:
|
||
log_success("Health check passed")
|
||
tests_passed += 1
|
||
else:
|
||
log_error(f"Health check failed: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Health check error: {e}")
|
||
|
||
# Test 2: Agents list
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/api/v1/agents") as resp:
|
||
if resp.status == 200:
|
||
data = await resp.json()
|
||
agents = data.get('agents', [])
|
||
log_success(f"Agents available: {len(agents)}")
|
||
for agent in agents:
|
||
log_info(f" - {agent.get('name', 'Unknown')}: {agent.get('status', 'unknown')}")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"Agents endpoint returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Agents endpoint error: {e}")
|
||
|
||
# Test 3: Account info
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/api/v1/account") as resp:
|
||
if resp.status in [200, 401, 403]: # Auth errors are expected without API keys
|
||
log_success("Account endpoint accessible")
|
||
tests_passed += 1
|
||
else:
|
||
log_warning(f"Account endpoint returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Account endpoint error: {e}")
|
||
|
||
return tests_passed == tests_total
|
||
|
||
|
||
async def test_ollama(session: aiohttp.ClientSession) -> bool:
|
||
"""Test Ollama LLM service"""
|
||
log_header("Testing Ollama")
|
||
|
||
base_url = SERVICES["ollama"]
|
||
tests_passed = 0
|
||
tests_total = 0
|
||
|
||
# Test 1: API availability
|
||
tests_total += 1
|
||
try:
|
||
async with session.get(f"{base_url}/api/tags") as resp:
|
||
if resp.status == 200:
|
||
data = await resp.json()
|
||
models = data.get('models', [])
|
||
log_success(f"Ollama running - {len(models)} models available")
|
||
for model in models:
|
||
log_info(f" - {model.get('name', 'Unknown')}")
|
||
tests_passed += 1
|
||
|
||
if not models:
|
||
log_warning("No models installed. Run: docker exec orbiquant-ollama ollama pull llama3:8b")
|
||
else:
|
||
log_error(f"Ollama API returned: {resp.status}")
|
||
except Exception as e:
|
||
log_error(f"Ollama not running: {e}")
|
||
log_info("Start with: docker-compose -f apps/llm-agent/docker-compose.ollama.yml up -d")
|
||
|
||
return tests_passed == tests_total
|
||
|
||
|
||
async def test_integration_flow(session: aiohttp.ClientSession) -> bool:
|
||
"""Test complete integration flow: Frontend -> LLM -> ML -> Trading"""
|
||
log_header("Testing Integration Flow")
|
||
|
||
# Flow: User asks for signal -> LLM processes -> Calls ML Engine -> Returns signal
|
||
try:
|
||
# Step 1: Request signal through LLM
|
||
log_info("Step 1: Requesting signal through LLM Agent...")
|
||
async with session.post(
|
||
f"{SERVICES['llm_agent']}/api/v1/chat",
|
||
json={
|
||
"user_id": "integration-test",
|
||
"conversation_id": f"test-{datetime.now().timestamp()}",
|
||
"message": "Get the current signal for BTCUSDT",
|
||
"user_plan": "pro",
|
||
"stream": False
|
||
},
|
||
timeout=aiohttp.ClientTimeout(total=30)
|
||
) as resp:
|
||
if resp.status == 200:
|
||
data = await resp.json()
|
||
response = data.get('response', '')
|
||
tools_used = data.get('tools_used', [])
|
||
|
||
log_success(f"LLM Response received ({len(response)} chars)")
|
||
if tools_used:
|
||
log_success(f"Tools used: {', '.join(tools_used)}")
|
||
else:
|
||
log_warning("No tools were called (ML integration may need verification)")
|
||
|
||
return True
|
||
else:
|
||
log_error(f"Integration flow failed: {resp.status}")
|
||
return False
|
||
except Exception as e:
|
||
log_error(f"Integration flow error: {e}")
|
||
return False
|
||
|
||
|
||
async def main():
|
||
print(f"""
|
||
{Colors.BOLD}╔══════════════════════════════════════════════════════════╗
|
||
║ OrbiQuant IA - Integration Test Suite ║
|
||
║ Testing all service connections and endpoints ║
|
||
╚══════════════════════════════════════════════════════════╝{Colors.RESET}
|
||
""")
|
||
|
||
print(f"Test started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
|
||
|
||
results = {}
|
||
|
||
async with aiohttp.ClientSession() as session:
|
||
# Check all services are running
|
||
log_header("Service Health Checks")
|
||
|
||
for name, url in SERVICES.items():
|
||
results[name] = await check_service_health(session, name, url)
|
||
|
||
# Run specific tests for running services
|
||
if results.get("ml_engine"):
|
||
results["ml_tests"] = await test_ml_engine(session)
|
||
|
||
if results.get("llm_agent"):
|
||
results["llm_tests"] = await test_llm_agent(session)
|
||
|
||
if results.get("trading_agents"):
|
||
results["trading_tests"] = await test_trading_agents(session)
|
||
|
||
if results.get("ollama"):
|
||
results["ollama_tests"] = await test_ollama(session)
|
||
|
||
# Integration flow test
|
||
if results.get("llm_agent") and results.get("ml_engine"):
|
||
results["integration"] = await test_integration_flow(session)
|
||
|
||
# Summary
|
||
log_header("Test Summary")
|
||
|
||
passed = sum(1 for v in results.values() if v)
|
||
total = len(results)
|
||
|
||
print(f"\nResults: {passed}/{total} tests passed")
|
||
|
||
if passed == total:
|
||
print(f"\n{Colors.GREEN}{Colors.BOLD}All tests passed! ✓{Colors.RESET}")
|
||
return 0
|
||
else:
|
||
failed = [k for k, v in results.items() if not v]
|
||
print(f"\n{Colors.RED}Failed tests: {', '.join(failed)}{Colors.RESET}")
|
||
return 1
|
||
|
||
|
||
if __name__ == "__main__":
|
||
exit_code = asyncio.run(main())
|
||
sys.exit(exit_code)
|