[TS-FIX] fix: Fix TypeScript errors in ai module

- Remove unused imports (DataSource, ERP_ROLES, LessThan, MoreThanOrEqual)
- Fix AIUsageLog properties (promptTokens, completionTokens instead of inputTokens, outputTokens)
- Fix AIModel property (modelId instead of externalId, inputCostPer1k instead of inputCostPer1m)
- Add types to API response parsing
- Prefix unused parameters with underscore

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
Adrian Flores Cortes 2026-01-25 10:17:00 -06:00
parent 99064f5f24
commit 78c30d315d
3 changed files with 22 additions and 25 deletions

View File

@ -49,7 +49,7 @@ export class AIController {
// MODELS
// ============================================
private async findAllModels(req: Request, res: Response, next: NextFunction): Promise<void> {
private async findAllModels(_req: Request, res: Response, next: NextFunction): Promise<void> {
try {
const models = await this.aiService.findAllModels();
res.json({ data: models, total: models.length });

View File

@ -1,4 +1,4 @@
import { Repository, FindOptionsWhere, LessThan, MoreThanOrEqual } from 'typeorm';
import { Repository, FindOptionsWhere } from 'typeorm';
import { AIModel, AIConversation, AIMessage, AIPrompt, AIUsageLog, AITenantQuota } from '../entities';
export interface ConversationFilters {

View File

@ -7,7 +7,7 @@
* Basado en: michangarrito MCH-012/MCH-013
*/
import { Repository, DataSource } from 'typeorm';
import { Repository } from 'typeorm';
import {
AIModel,
AIConversation,
@ -19,7 +19,6 @@ import {
import { AIService } from './ai.service';
import {
ERPRole,
ERP_ROLES,
getERPRole,
hasToolAccess,
getToolsForRole,
@ -246,7 +245,7 @@ export class RoleBasedAIService extends AIService {
const tool = this.toolRegistry.get(toolCall.name);
if (tool?.handler) {
try {
const result = await tool.handler(toolCall.arguments, context);
await tool.handler(toolCall.arguments, context);
// El resultado se incorpora a la respuesta
// En una implementación completa, se haría otra llamada a la API
} catch (error: any) {
@ -271,9 +270,10 @@ export class RoleBasedAIService extends AIService {
await this.logUsage(context.tenantId, {
modelId: model.id,
conversationId: conversation.id,
inputTokens: response.tokensUsed.input,
outputTokens: response.tokensUsed.output,
costUsd: this.calculateCost(model, response.tokensUsed),
promptTokens: response.tokensUsed.input,
completionTokens: response.tokensUsed.output,
totalTokens: response.tokensUsed.total,
cost: this.calculateCost(model, response.tokensUsed),
usageType: 'chat',
});
@ -338,7 +338,7 @@ export class RoleBasedAIService extends AIService {
/**
* Obtener modelo por defecto para el tenant
*/
private async getDefaultModel(tenantId: string): Promise<AIModel | null> {
private async getDefaultModel(_tenantId: string): Promise<AIModel | null> {
// Buscar configuración del tenant o usar default
const models = await this.findAllModels();
return models.find((m) => m.isDefault) || models[0] || null;
@ -372,7 +372,7 @@ export class RoleBasedAIService extends AIService {
'HTTP-Referer': process.env.APP_URL || 'https://erp.local',
},
body: JSON.stringify({
model: model.externalId || model.code,
model: model.modelId || model.code,
messages: messages.map((m) => ({
role: m.role,
content: m.content,
@ -393,12 +393,12 @@ export class RoleBasedAIService extends AIService {
});
if (!response.ok) {
const error = await response.json().catch(() => ({}));
throw new Error(error.error?.message || 'AI provider error');
const errorData = await response.json().catch(() => ({}) as Record<string, any>);
throw new Error((errorData as any).error?.message || 'AI provider error');
}
const data = await response.json();
const choice = data.choices?.[0];
const data = await response.json() as Record<string, any>;
const choice = (data.choices as any[])?.[0];
return {
content: choice?.message?.content || '',
@ -408,9 +408,9 @@ export class RoleBasedAIService extends AIService {
arguments: JSON.parse(tc.function?.arguments || '{}'),
})),
tokensUsed: {
input: data.usage?.prompt_tokens || 0,
output: data.usage?.completion_tokens || 0,
total: data.usage?.total_tokens || 0,
input: (data.usage as any)?.prompt_tokens || 0,
output: (data.usage as any)?.completion_tokens || 0,
total: (data.usage as any)?.total_tokens || 0,
},
};
}
@ -422,22 +422,19 @@ export class RoleBasedAIService extends AIService {
model: AIModel,
tokens: { input: number; output: number }
): number {
const inputCost = (tokens.input / 1000000) * (model.inputCostPer1m || 0);
const outputCost = (tokens.output / 1000000) * (model.outputCostPer1m || 0);
const inputCost = (tokens.input / 1000) * (model.inputCostPer1k || 0);
const outputCost = (tokens.output / 1000) * (model.outputCostPer1k || 0);
return inputCost + outputCost;
}
/**
* Limpiar conversación antigua (para liberar memoria)
*/
cleanupOldConversations(maxAgeMinutes: number = 60): void {
const now = Date.now();
const maxAge = maxAgeMinutes * 60 * 1000;
cleanupOldConversations(_maxAgeMinutes: number = 60): void {
// En una implementación real, esto estaría en Redis o similar
// Por ahora limpiamos el Map en memoria
for (const [key, _] of this.conversationHistory) {
// Implementar lógica de limpieza basada en timestamp
for (const [_key, _value] of this.conversationHistory) {
// TODO: Implementar lógica de limpieza basada en timestamp
}
}
}