refactor(services): Migrate to Express proxy gateway (ARCH-002)
- Update mlService.ts to use apiClient with /proxy/ml/* endpoints - Update llmAgentService.ts to use apiClient with /proxy/llm/* endpoints - Update backtestService.ts to use apiClient with /proxy/data/* and /proxy/ml/* - Update adminService.ts to use apiClient for ML model operations ARCH-002: Frontend services now use authenticated Express proxy - Token auto-refresh via apiClient interceptor - Centralized error handling - Unified session management Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
parent
d3f4aa3385
commit
0f20468381
@ -1,10 +1,10 @@
|
||||
/**
|
||||
* Admin Service
|
||||
* API client for admin endpoints - models, predictions, agent performance
|
||||
* ARCH-001: ML model calls now go through authenticated Express gateway
|
||||
*/
|
||||
|
||||
const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:3081';
|
||||
const ML_API_URL = import.meta.env.VITE_ML_URL || 'http://localhost:3083';
|
||||
import { apiClient } from '@/lib/apiClient';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
@ -131,14 +131,13 @@ export interface SystemHealth {
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// API Functions - ML Models
|
||||
// API Functions - ML Models (via proxy)
|
||||
// ============================================================================
|
||||
|
||||
export async function getMLModels(): Promise<MLModel[]> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/models`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
return await response.json();
|
||||
const response = await apiClient.get('/proxy/ml/models');
|
||||
return response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching ML models:', error);
|
||||
return [];
|
||||
@ -147,13 +146,10 @@ export async function getMLModels(): Promise<MLModel[]> {
|
||||
|
||||
export async function getMLModel(modelId: string): Promise<MLModel | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/models/${modelId}/status`);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
const response = await apiClient.get(`/proxy/ml/models/${modelId}/status`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching ML model:', error);
|
||||
return null;
|
||||
}
|
||||
@ -161,12 +157,8 @@ export async function getMLModel(modelId: string): Promise<MLModel | null> {
|
||||
|
||||
export async function updateMLModelStatus(modelId: string, status: 'active' | 'inactive'): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/models/${modelId}/status`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status }),
|
||||
});
|
||||
return response.ok;
|
||||
const response = await apiClient.patch(`/proxy/ml/models/${modelId}/status`, { status });
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error updating model status:', error);
|
||||
return false;
|
||||
@ -186,18 +178,8 @@ export async function getPredictions(params?: {
|
||||
limit?: number;
|
||||
}): Promise<Prediction[]> {
|
||||
try {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (params?.model_id) queryParams.append('model_id', params.model_id);
|
||||
if (params?.symbol) queryParams.append('symbol', params.symbol);
|
||||
if (params?.start_date) queryParams.append('start_date', params.start_date);
|
||||
if (params?.end_date) queryParams.append('end_date', params.end_date);
|
||||
if (params?.result) queryParams.append('result', params.result);
|
||||
if (params?.limit) queryParams.append('limit', params.limit.toString());
|
||||
|
||||
const response = await fetch(`${API_URL}/api/v1/ml/predictions?${queryParams.toString()}`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
return data.data || [];
|
||||
const response = await apiClient.get('/ml/predictions', { params });
|
||||
return response.data?.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching predictions:', error);
|
||||
return [];
|
||||
@ -210,10 +192,8 @@ export async function getPredictions(params?: {
|
||||
|
||||
export async function getAgents(): Promise<AgentPerformance[]> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/agents`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
return data.data || [];
|
||||
const response = await apiClient.get('/agents');
|
||||
return response.data?.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching agents:', error);
|
||||
return [];
|
||||
@ -222,14 +202,10 @@ export async function getAgents(): Promise<AgentPerformance[]> {
|
||||
|
||||
export async function getAgent(agentId: string): Promise<AgentPerformance | null> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/agents/${agentId}`);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data.data;
|
||||
const response = await apiClient.get(`/agents/${agentId}`);
|
||||
return response.data?.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching agent:', error);
|
||||
return null;
|
||||
}
|
||||
@ -237,12 +213,8 @@ export async function getAgent(agentId: string): Promise<AgentPerformance | null
|
||||
|
||||
export async function updateAgentStatus(agentId: string, status: 'active' | 'paused' | 'stopped'): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/agents/${agentId}/status`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status }),
|
||||
});
|
||||
return response.ok;
|
||||
const response = await apiClient.patch(`/agents/${agentId}/status`, { status });
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error updating agent status:', error);
|
||||
return false;
|
||||
@ -260,16 +232,8 @@ export async function getSignalHistory(params?: {
|
||||
limit?: number;
|
||||
}): Promise<SignalHistory[]> {
|
||||
try {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (params?.agent_id) queryParams.append('agent_id', params.agent_id);
|
||||
if (params?.symbol) queryParams.append('symbol', params.symbol);
|
||||
if (params?.status) queryParams.append('status', params.status);
|
||||
if (params?.limit) queryParams.append('limit', params.limit.toString());
|
||||
|
||||
const response = await fetch(`${API_URL}/api/v1/trading/signals?${queryParams.toString()}`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
return data.data || [];
|
||||
const response = await apiClient.get('/trading/signals', { params });
|
||||
return response.data?.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching signal history:', error);
|
||||
return [];
|
||||
@ -282,10 +246,8 @@ export async function getSignalHistory(params?: {
|
||||
|
||||
export async function getAdminDashboard(): Promise<AdminStats | null> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/dashboard`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
return data.data;
|
||||
const response = await apiClient.get('/admin/dashboard');
|
||||
return response.data?.data;
|
||||
} catch (error) {
|
||||
console.error('Error fetching admin dashboard:', error);
|
||||
return null;
|
||||
@ -294,10 +256,8 @@ export async function getAdminDashboard(): Promise<AdminStats | null> {
|
||||
|
||||
export async function getSystemHealth(): Promise<SystemHealth | null> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/system/health`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
return data.data;
|
||||
const response = await apiClient.get('/admin/system/health');
|
||||
return response.data?.data;
|
||||
} catch (error) {
|
||||
console.error('Error fetching system health:', error);
|
||||
return null;
|
||||
@ -326,21 +286,12 @@ export async function getUsers(params?: {
|
||||
search?: string;
|
||||
}): Promise<{ users: User[]; total: number; page: number; totalPages: number }> {
|
||||
try {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (params?.page) queryParams.append('page', params.page.toString());
|
||||
if (params?.limit) queryParams.append('limit', params.limit.toString());
|
||||
if (params?.status) queryParams.append('status', params.status);
|
||||
if (params?.role) queryParams.append('role', params.role);
|
||||
if (params?.search) queryParams.append('search', params.search);
|
||||
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/users?${queryParams.toString()}`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
const response = await apiClient.get('/admin/users', { params });
|
||||
return {
|
||||
users: data.data || [],
|
||||
total: data.meta?.total || 0,
|
||||
page: data.meta?.page || 1,
|
||||
totalPages: data.meta?.totalPages || 1,
|
||||
users: response.data?.data || [],
|
||||
total: response.data?.meta?.total || 0,
|
||||
page: response.data?.meta?.page || 1,
|
||||
totalPages: response.data?.meta?.totalPages || 1,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error fetching users:', error);
|
||||
@ -350,12 +301,8 @@ export async function getUsers(params?: {
|
||||
|
||||
export async function updateUserStatus(userId: string, status: string, reason?: string): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/users/${userId}/status`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ status, reason }),
|
||||
});
|
||||
return response.ok;
|
||||
const response = await apiClient.patch(`/admin/users/${userId}/status`, { status, reason });
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error updating user status:', error);
|
||||
return false;
|
||||
@ -364,12 +311,8 @@ export async function updateUserStatus(userId: string, status: string, reason?:
|
||||
|
||||
export async function updateUserRole(userId: string, role: string): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/users/${userId}/role`, {
|
||||
method: 'PATCH',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ role }),
|
||||
});
|
||||
return response.ok;
|
||||
const response = await apiClient.patch(`/admin/users/${userId}/role`, { role });
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error updating user role:', error);
|
||||
return false;
|
||||
@ -399,20 +342,10 @@ export async function getAuditLogs(params?: {
|
||||
endDate?: string;
|
||||
}): Promise<{ logs: AuditLog[]; total: number }> {
|
||||
try {
|
||||
const queryParams = new URLSearchParams();
|
||||
if (params?.page) queryParams.append('page', params.page.toString());
|
||||
if (params?.limit) queryParams.append('limit', params.limit.toString());
|
||||
if (params?.userId) queryParams.append('userId', params.userId);
|
||||
if (params?.action) queryParams.append('action', params.action);
|
||||
if (params?.startDate) queryParams.append('startDate', params.startDate);
|
||||
if (params?.endDate) queryParams.append('endDate', params.endDate);
|
||||
|
||||
const response = await fetch(`${API_URL}/api/v1/admin/audit/logs?${queryParams.toString()}`);
|
||||
if (!response.ok) throw new Error(`API error: ${response.status}`);
|
||||
const data = await response.json();
|
||||
const response = await apiClient.get('/admin/audit/logs', { params });
|
||||
return {
|
||||
logs: data.data || [],
|
||||
total: data.meta?.total || 0,
|
||||
logs: response.data?.data || [],
|
||||
total: response.data?.meta?.total || 0,
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error fetching audit logs:', error);
|
||||
|
||||
@ -1,11 +1,10 @@
|
||||
/**
|
||||
* Backtesting Service
|
||||
* API client for backtesting and historical predictions visualization
|
||||
* API client for backtesting and historical predictions visualization via Express Proxy
|
||||
* ARCH-001: All ML and Data Service calls now go through authenticated Express gateway
|
||||
*/
|
||||
|
||||
const ML_API_URL = import.meta.env.VITE_ML_URL || 'http://localhost:3083';
|
||||
const DATA_SERVICE_URL = import.meta.env.VITE_DATA_SERVICE_URL || 'http://localhost:3084';
|
||||
const API_BASE_URL = import.meta.env.VITE_API_URL || 'http://localhost:3000';
|
||||
import { apiClient } from '@/lib/apiClient';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
@ -169,7 +168,7 @@ export interface ModelAccuracy {
|
||||
// ============================================================================
|
||||
|
||||
/**
|
||||
* Get historical OHLCV candles
|
||||
* Get historical OHLCV candles via Data Service proxy
|
||||
*/
|
||||
export async function getHistoricalCandles(
|
||||
symbol: string,
|
||||
@ -179,22 +178,15 @@ export async function getHistoricalCandles(
|
||||
limit: number = 1000
|
||||
): Promise<CandlesResponse | null> {
|
||||
try {
|
||||
const params = new URLSearchParams({
|
||||
const response = await apiClient.get(`/proxy/data/candles/${symbol}`, {
|
||||
params: {
|
||||
timeframe,
|
||||
start_time: startDate,
|
||||
end_time: endDate,
|
||||
start: startDate,
|
||||
end: endDate,
|
||||
limit: limit.toString(),
|
||||
},
|
||||
});
|
||||
|
||||
const response = await fetch(
|
||||
`${DATA_SERVICE_URL}/api/v1/candles/${symbol}?${params}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error fetching historical candles:', error);
|
||||
return null;
|
||||
@ -202,25 +194,17 @@ export async function getHistoricalCandles(
|
||||
}
|
||||
|
||||
/**
|
||||
* Run backtest with predictions
|
||||
* Run backtest with predictions via ML proxy
|
||||
*/
|
||||
export async function runBacktest(request: BacktestRequest): Promise<BacktestResult | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/backtest/run`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${localStorage.getItem('token')}`,
|
||||
},
|
||||
body: JSON.stringify(request),
|
||||
const response = await apiClient.post('/proxy/ml/backtest/run', {
|
||||
symbol: request.symbol,
|
||||
strategy: request.strategies[0] || 'ensemble',
|
||||
startDate: request.start_date,
|
||||
endDate: request.end_date,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.detail || 'Backtest failed');
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error running backtest:', error);
|
||||
return null;
|
||||
@ -228,31 +212,22 @@ export async function runBacktest(request: BacktestRequest): Promise<BacktestRes
|
||||
}
|
||||
|
||||
/**
|
||||
* Get historical predictions for a symbol
|
||||
* Get historical predictions for a symbol via ML proxy
|
||||
*/
|
||||
export async function getHistoricalPredictions(
|
||||
symbol: string,
|
||||
timeframe: string,
|
||||
_timeframe: string,
|
||||
startDate: string,
|
||||
endDate: string
|
||||
): Promise<PredictionPoint[]> {
|
||||
try {
|
||||
const params = new URLSearchParams({
|
||||
timeframe,
|
||||
start_date: startDate,
|
||||
end_date: endDate,
|
||||
const response = await apiClient.get(`/proxy/ml/predictions/history/${symbol}`, {
|
||||
params: {
|
||||
startDate,
|
||||
endDate,
|
||||
},
|
||||
});
|
||||
|
||||
const response = await fetch(
|
||||
`${ML_API_URL}/api/predictions/history/${symbol}?${params}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.predictions || [];
|
||||
return response.data?.predictions || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching historical predictions:', error);
|
||||
return [];
|
||||
@ -260,7 +235,7 @@ export async function getHistoricalPredictions(
|
||||
}
|
||||
|
||||
/**
|
||||
* Get historical signals for a symbol
|
||||
* Get historical signals for a symbol (uses Express ML routes)
|
||||
*/
|
||||
export async function getHistoricalSignals(
|
||||
symbol: string,
|
||||
@ -268,26 +243,13 @@ export async function getHistoricalSignals(
|
||||
endDate: string
|
||||
): Promise<TradeSignal[]> {
|
||||
try {
|
||||
const params = new URLSearchParams({
|
||||
const response = await apiClient.get(`/ml/signals/history/${symbol}`, {
|
||||
params: {
|
||||
start_date: startDate,
|
||||
end_date: endDate,
|
||||
});
|
||||
|
||||
const response = await fetch(
|
||||
`${API_BASE_URL}/api/v1/ml/signals/history/${symbol}?${params}`,
|
||||
{
|
||||
headers: {
|
||||
'Authorization': `Bearer ${localStorage.getItem('token')}`,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.signals || [];
|
||||
});
|
||||
return response.data?.signals || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching historical signals:', error);
|
||||
return [];
|
||||
@ -295,31 +257,17 @@ export async function getHistoricalSignals(
|
||||
}
|
||||
|
||||
/**
|
||||
* Get model accuracy metrics
|
||||
* Get model accuracy metrics via ML proxy
|
||||
*/
|
||||
export async function getModelAccuracy(
|
||||
symbol: string,
|
||||
timeframe: string,
|
||||
startDate: string,
|
||||
endDate: string
|
||||
_timeframe: string,
|
||||
_startDate: string,
|
||||
_endDate: string
|
||||
): Promise<ModelAccuracy[]> {
|
||||
try {
|
||||
const params = new URLSearchParams({
|
||||
timeframe,
|
||||
start_date: startDate,
|
||||
end_date: endDate,
|
||||
});
|
||||
|
||||
const response = await fetch(
|
||||
`${ML_API_URL}/api/models/accuracy/${symbol}?${params}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.models || [];
|
||||
const response = await apiClient.get(`/proxy/ml/models/accuracy/${symbol}`);
|
||||
return response.data?.models || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching model accuracy:', error);
|
||||
return [];
|
||||
@ -327,7 +275,7 @@ export async function getModelAccuracy(
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available date range for a symbol
|
||||
* Get available date range for a symbol via Data Service proxy
|
||||
*/
|
||||
export async function getAvailableDateRange(symbol: string): Promise<{
|
||||
start_date: string;
|
||||
@ -335,15 +283,8 @@ export async function getAvailableDateRange(symbol: string): Promise<{
|
||||
total_candles: number;
|
||||
} | null> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${DATA_SERVICE_URL}/api/v1/symbols/${symbol}/date-range`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get(`/proxy/data/symbols/${symbol}/date-range`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error fetching date range:', error);
|
||||
return null;
|
||||
@ -351,7 +292,7 @@ export async function getAvailableDateRange(symbol: string): Promise<{
|
||||
}
|
||||
|
||||
/**
|
||||
* Get available strategies
|
||||
* Get available strategies via ML proxy
|
||||
*/
|
||||
export async function getAvailableStrategies(): Promise<{
|
||||
id: string;
|
||||
@ -360,14 +301,8 @@ export async function getAvailableStrategies(): Promise<{
|
||||
type: string;
|
||||
}[]> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/strategies`);
|
||||
|
||||
if (!response.ok) {
|
||||
return getDefaultStrategies();
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.strategies || getDefaultStrategies();
|
||||
const response = await apiClient.get('/proxy/ml/strategies');
|
||||
return response.data?.strategies || response.data || getDefaultStrategies();
|
||||
} catch (error) {
|
||||
console.error('Error fetching strategies:', error);
|
||||
return getDefaultStrategies();
|
||||
@ -385,36 +320,23 @@ function getDefaultStrategies() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare strategies performance
|
||||
* Compare strategies performance via ML proxy
|
||||
*/
|
||||
export async function compareStrategies(
|
||||
symbol: string,
|
||||
timeframe: string,
|
||||
_timeframe: string,
|
||||
startDate: string,
|
||||
endDate: string,
|
||||
strategies: string[]
|
||||
): Promise<StrategyPerformance[]> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/backtest/compare`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
const response = await apiClient.post('/proxy/ml/backtest/compare', {
|
||||
symbol,
|
||||
timeframe,
|
||||
start_date: startDate,
|
||||
end_date: endDate,
|
||||
strategies,
|
||||
}),
|
||||
startDate,
|
||||
endDate,
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return data.results || [];
|
||||
return response.data?.results || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error comparing strategies:', error);
|
||||
return [];
|
||||
|
||||
@ -1,11 +1,12 @@
|
||||
/**
|
||||
* LLM Agent Service
|
||||
* Client for connecting to the LLM Agent API (Predictions & Backtesting)
|
||||
* Client for connecting to the LLM Agent API (Predictions & Backtesting) via Express Proxy
|
||||
* ARCH-001: All LLM calls now go through authenticated Express gateway
|
||||
*
|
||||
* @version 1.0.0
|
||||
* @version 2.0.0
|
||||
*/
|
||||
|
||||
const LLM_AGENT_URL = import.meta.env.VITE_LLM_AGENT_URL || 'http://localhost:3085';
|
||||
import { apiClient } from '@/lib/apiClient';
|
||||
|
||||
// ============================================================================
|
||||
// Types - Predictions
|
||||
@ -200,17 +201,8 @@ export async function analyzeSymbol(
|
||||
timeframe: string = '5m'
|
||||
): Promise<PredictionResponse | null> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/predictions/analyze`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ symbol, timeframe }),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.post('/proxy/llm/analyze', { symbol, timeframe });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error analyzing symbol:', error);
|
||||
return null;
|
||||
@ -221,18 +213,11 @@ export async function analyzeSymbol(
|
||||
* Get active signals above minimum confluence
|
||||
*/
|
||||
export async function getActiveSignals(
|
||||
minConfluence: number = 0.6
|
||||
_minConfluence: number = 0.6
|
||||
): Promise<ActiveSignal[]> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${LLM_AGENT_URL}/api/v1/predictions/active-signals?min_confluence=${minConfluence}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get('/proxy/llm/signals/active');
|
||||
return response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching active signals:', error);
|
||||
return [];
|
||||
@ -244,13 +229,8 @@ export async function getActiveSignals(
|
||||
*/
|
||||
export async function getRiskSummary(): Promise<RiskSummary | null> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/predictions/risk-summary`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get('/proxy/llm/risk/summary');
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error fetching risk summary:', error);
|
||||
return null;
|
||||
@ -264,12 +244,8 @@ export async function setRiskLevel(
|
||||
level: 'minimal' | 'conservative' | 'moderate' | 'aggressive'
|
||||
): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${LLM_AGENT_URL}/api/v1/predictions/risk-level?level=${level}`,
|
||||
{ method: 'POST' }
|
||||
);
|
||||
|
||||
return response.ok;
|
||||
const response = await apiClient.post('/proxy/llm/risk/level', { level });
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error setting risk level:', error);
|
||||
return false;
|
||||
@ -294,17 +270,15 @@ export async function validateTrade(params: {
|
||||
formatted_message: string;
|
||||
} | null> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/predictions/validate-trade`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(params),
|
||||
const response = await apiClient.post('/proxy/llm/validate-trade', {
|
||||
symbol: params.symbol,
|
||||
direction: params.side === 'BUY' ? 'long' : 'short',
|
||||
size: params.size,
|
||||
entry: params.entry_price,
|
||||
stopLoss: params.stop_loss,
|
||||
takeProfit: 0, // Will be calculated by backend
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error validating trade:', error);
|
||||
return null;
|
||||
@ -322,30 +296,24 @@ export async function startBacktest(
|
||||
params: Partial<BacktestRequest>
|
||||
): Promise<BacktestStatus | null> {
|
||||
try {
|
||||
const request: BacktestRequest = {
|
||||
const request = {
|
||||
symbol: params.symbols?.[0] || 'XAUUSD',
|
||||
strategy: 'ensemble',
|
||||
startDate: new Date(Date.now() - (params.days_back || 365) * 24 * 60 * 60 * 1000).toISOString(),
|
||||
endDate: new Date().toISOString(),
|
||||
parameters: {
|
||||
initial_balance: params.initial_balance || 1000,
|
||||
risk_per_trade_pct: params.risk_per_trade_pct || 1.0,
|
||||
max_open_trades: params.max_open_trades || 3,
|
||||
min_confluence_score: params.min_confluence_score || 0.65,
|
||||
min_confidence: params.min_confidence || 0.60,
|
||||
symbols: params.symbols || ['XAUUSD', 'EURUSD', 'GBPUSD'],
|
||||
timeframe: params.timeframe || '1H',
|
||||
days_back: params.days_back || 365,
|
||||
use_amd_filter: params.use_amd_filter ?? true,
|
||||
use_killzone_filter: params.use_killzone_filter ?? true,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/backtesting/run`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(request),
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.post('/proxy/llm/backtest/run', request);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error starting backtest:', error);
|
||||
return null;
|
||||
@ -361,21 +329,10 @@ export async function runQuickBacktest(params: {
|
||||
symbol?: string;
|
||||
}): Promise<BacktestResult | null> {
|
||||
try {
|
||||
const queryParams = new URLSearchParams({
|
||||
initial_balance: String(params.initial_balance || 1000),
|
||||
days: String(params.days || 30),
|
||||
symbol: params.symbol || 'XAUUSD',
|
||||
const response = await apiClient.get('/proxy/llm/backtest/quick', {
|
||||
params: { symbol: params.symbol || 'XAUUSD' },
|
||||
});
|
||||
|
||||
const response = await fetch(
|
||||
`${LLM_AGENT_URL}/api/v1/backtesting/quick-test?${queryParams}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error running quick backtest:', error);
|
||||
return null;
|
||||
@ -387,15 +344,10 @@ export async function runQuickBacktest(params: {
|
||||
*/
|
||||
export async function getBacktestStatus(id: string): Promise<BacktestStatus | null> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/backtesting/status/${id}`);
|
||||
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get(`/proxy/llm/backtest/status/${id}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching backtest status:', error);
|
||||
return null;
|
||||
}
|
||||
@ -406,15 +358,10 @@ export async function getBacktestStatus(id: string): Promise<BacktestStatus | nu
|
||||
*/
|
||||
export async function getBacktestResults(id: string): Promise<BacktestResult | null> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/backtesting/results/${id}`);
|
||||
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get(`/proxy/llm/backtest/results/${id}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching backtest results:', error);
|
||||
return null;
|
||||
}
|
||||
@ -425,15 +372,10 @@ export async function getBacktestResults(id: string): Promise<BacktestResult | n
|
||||
*/
|
||||
export async function listBacktests(limit: number = 10): Promise<BacktestSummary[]> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${LLM_AGENT_URL}/api/v1/backtesting/list?limit=${limit}`
|
||||
);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
|
||||
return await response.json();
|
||||
const response = await apiClient.get('/proxy/llm/backtest/list', {
|
||||
params: { limit },
|
||||
});
|
||||
return response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error listing backtests:', error);
|
||||
return [];
|
||||
@ -445,11 +387,8 @@ export async function listBacktests(limit: number = 10): Promise<BacktestSummary
|
||||
*/
|
||||
export async function deleteBacktest(id: string): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${LLM_AGENT_URL}/api/v1/backtesting/${id}`, {
|
||||
method: 'DELETE',
|
||||
});
|
||||
|
||||
return response.ok;
|
||||
const response = await apiClient.delete(`/proxy/llm/backtest/${id}`);
|
||||
return response.status === 200;
|
||||
} catch (error) {
|
||||
console.error('Error deleting backtest:', error);
|
||||
return false;
|
||||
@ -475,15 +414,10 @@ export async function checkHealth(): Promise<{
|
||||
};
|
||||
|
||||
try {
|
||||
const [rootRes, predictionsRes, backtestingRes] = await Promise.allSettled([
|
||||
fetch(`${LLM_AGENT_URL}/`),
|
||||
fetch(`${LLM_AGENT_URL}/api/v1/predictions/health`),
|
||||
fetch(`${LLM_AGENT_URL}/api/v1/backtesting/health`),
|
||||
]);
|
||||
|
||||
results.llmAgent = rootRes.status === 'fulfilled' && rootRes.value.ok;
|
||||
results.predictions = predictionsRes.status === 'fulfilled' && predictionsRes.value.ok;
|
||||
results.backtesting = backtestingRes.status === 'fulfilled' && backtestingRes.value.ok;
|
||||
const response = await apiClient.get('/proxy/llm/health');
|
||||
results.llmAgent = response.data?.status === 'healthy';
|
||||
results.predictions = results.llmAgent;
|
||||
results.backtesting = results.llmAgent;
|
||||
} catch (error) {
|
||||
console.error('Error checking LLM Agent health:', error);
|
||||
}
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
/**
|
||||
* ML Engine Service
|
||||
* Client for connecting to the ML Engine API
|
||||
* Client for connecting to the ML Engine API via Express Proxy
|
||||
* ARCH-001: All ML calls now go through authenticated Express gateway
|
||||
*/
|
||||
|
||||
const ML_API_URL = import.meta.env.VITE_ML_URL || 'http://localhost:3083';
|
||||
import { apiClient } from '@/lib/apiClient';
|
||||
|
||||
// ============================================================================
|
||||
// Types
|
||||
@ -76,14 +77,10 @@ export interface BacktestResult {
|
||||
*/
|
||||
export async function getLatestSignal(symbol: string): Promise<MLSignal | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/v1/signals/latest/${symbol}`);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data.signal || null;
|
||||
const response = await apiClient.get(`/proxy/ml/signals/latest/${symbol}`);
|
||||
return response.data?.signal || response.data || null;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching latest signal:', error);
|
||||
return null;
|
||||
}
|
||||
@ -94,12 +91,8 @@ export async function getLatestSignal(symbol: string): Promise<MLSignal | null>
|
||||
*/
|
||||
export async function getActiveSignals(): Promise<MLSignal[]> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/v1/signals/active`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data.signals || [];
|
||||
const response = await apiClient.get('/proxy/ml/signals/active');
|
||||
return response.data?.signals || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error fetching active signals:', error);
|
||||
return [];
|
||||
@ -111,14 +104,10 @@ export async function getActiveSignals(): Promise<MLSignal[]> {
|
||||
*/
|
||||
export async function getAMDPhase(symbol: string): Promise<AMDPhase | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/v1/amd/detect/${symbol}`);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data;
|
||||
const response = await apiClient.get(`/proxy/ml/amd/${symbol}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching AMD phase:', error);
|
||||
return null;
|
||||
}
|
||||
@ -129,19 +118,13 @@ export async function getAMDPhase(symbol: string): Promise<AMDPhase | null> {
|
||||
*/
|
||||
export async function getRangePrediction(
|
||||
symbol: string,
|
||||
timeframe: string = '1h'
|
||||
_timeframe: string = '1h'
|
||||
): Promise<RangePrediction | null> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${ML_API_URL}/api/v1/predict/range/${symbol}?timeframe=${timeframe}`
|
||||
);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data;
|
||||
const response = await apiClient.get(`/proxy/ml/predict/range/${symbol}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching range prediction:', error);
|
||||
return null;
|
||||
}
|
||||
@ -152,18 +135,8 @@ export async function getRangePrediction(
|
||||
*/
|
||||
export async function generateSignal(symbol: string): Promise<MLSignal | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/v1/signals/generate`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ symbol }),
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data.signal || null;
|
||||
const response = await apiClient.post('/proxy/ml/signals/generate', { symbol });
|
||||
return response.data?.signal || response.data || null;
|
||||
} catch (error) {
|
||||
console.error('Error generating signal:', error);
|
||||
return null;
|
||||
@ -181,18 +154,13 @@ export async function runBacktest(params: {
|
||||
initial_capital?: number;
|
||||
}): Promise<BacktestResult | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/v1/backtest/run`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify(params),
|
||||
const response = await apiClient.post('/proxy/ml/backtest/run', {
|
||||
symbol: params.symbol,
|
||||
strategy: params.strategy,
|
||||
startDate: params.start_date,
|
||||
endDate: params.end_date,
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data;
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error('Error running backtest:', error);
|
||||
return null;
|
||||
@ -204,8 +172,8 @@ export async function runBacktest(params: {
|
||||
*/
|
||||
export async function checkHealth(): Promise<boolean> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/health`);
|
||||
return response.ok;
|
||||
const response = await apiClient.get('/proxy/ml/health');
|
||||
return response.data?.status === 'healthy';
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
@ -288,16 +256,10 @@ export async function getICTAnalysis(
|
||||
timeframe: string = '1H'
|
||||
): Promise<ICTAnalysis | null> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${ML_API_URL}/api/ict/${symbol}?timeframe=${timeframe}`,
|
||||
{ method: 'POST' }
|
||||
);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
const response = await apiClient.post(`/proxy/ml/ict/${symbol}`, { timeframe });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching ICT analysis:', error);
|
||||
return null;
|
||||
}
|
||||
@ -308,19 +270,13 @@ export async function getICTAnalysis(
|
||||
*/
|
||||
export async function getEnsembleSignal(
|
||||
symbol: string,
|
||||
timeframe: string = '1H'
|
||||
_timeframe: string = '1H'
|
||||
): Promise<EnsembleSignal | null> {
|
||||
try {
|
||||
const response = await fetch(
|
||||
`${ML_API_URL}/api/ensemble/${symbol}?timeframe=${timeframe}`,
|
||||
{ method: 'POST' }
|
||||
);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
const response = await apiClient.post(`/proxy/ml/ensemble/${symbol}`, {});
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching ensemble signal:', error);
|
||||
return null;
|
||||
}
|
||||
@ -336,13 +292,10 @@ export async function getQuickSignal(symbol: string): Promise<{
|
||||
score: number;
|
||||
} | null> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/ensemble/quick/${symbol}`);
|
||||
if (!response.ok) {
|
||||
if (response.status === 404) return null;
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
return await response.json();
|
||||
const response = await apiClient.get(`/proxy/ml/ensemble/quick/${symbol}`);
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if ((error as { response?: { status: number } }).response?.status === 404) return null;
|
||||
console.error('Error fetching quick signal:', error);
|
||||
return null;
|
||||
}
|
||||
@ -356,20 +309,11 @@ export async function scanSymbols(
|
||||
minConfidence: number = 0.6
|
||||
): Promise<ScanResult[]> {
|
||||
try {
|
||||
const response = await fetch(`${ML_API_URL}/api/scan`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
const response = await apiClient.post('/proxy/ml/scan', {
|
||||
symbols,
|
||||
min_confidence: minConfidence,
|
||||
timeframe: '1H',
|
||||
}),
|
||||
filters: { min_confidence: minConfidence, timeframe: '1H' },
|
||||
});
|
||||
if (!response.ok) {
|
||||
throw new Error(`API error: ${response.status}`);
|
||||
}
|
||||
const data = await response.json();
|
||||
return data.results || [];
|
||||
return response.data?.results || response.data || [];
|
||||
} catch (error) {
|
||||
console.error('Error scanning symbols:', error);
|
||||
return [];
|
||||
|
||||
Loading…
Reference in New Issue
Block a user