feat(agents): add 4-layer response quality control — structured outputs, LLM judge, smart truncation

AI回复质量硬约束系统,解决核心问题:AI无法用最少的语言精准回答用户问题。

## 四层防线架构

### Layer 1 — Prompt 优化 (软约束)
- coordinator-system-prompt.ts: 新增"最高优先级原则:精准回答"章节
  - 意图分类表(7种)+ 每种对应长度和回答策略
  - 错误示范 vs 正确示范对比
  - "宁可太短,不可太长"原则
  - 最终提醒三条:精准回答 > 准确性 > 简洁就是专业
- policy-expert-prompt.ts: 精简输出格式
- objection-handler-prompt.ts: 微调

### Layer 2 — Structured Outputs (格式约束)
- 新文件 coordinator-response.schema.ts: Zod schema 定义
  - intent: 7种意图分类 (factual/yes_no/comparison/assessment/objection/detailed/casual)
  - answer: 回复文本
  - followUp: 可选跟进问题
- agent-loop.ts: 通过 output_config 传入 Claude API,强制 JSON 输出
  - 流式模式下抑制 text delta(JSON 片段不展示给用户)
  - 流结束后解析 JSON,提取 answer 字段 yield 给前端
  - JSON 解析失败时回退到原始文本(安全降级)
- coordinator-agent.service.ts: 传入 zodOutputFormat(CoordinatorResponseSchema)
- agent.types.ts: AgentLoopParams 新增 outputConfig 字段

### Layer 3 — LLM-as-Judge (语义质检)
- evaluation-rule.entity.ts: 新增 LLM_JUDGE 规则类型(第9种)
- evaluation-gate.service.ts:
  - 注入 ConfigService + 初始化 Anthropic client (Haiku 4.5)
  - evaluateRule 改为 async(支持异步 LLM 调用)
  - 新增 checkLlmJudge():评估 relevance/conciseness/noise 三维度
  - 可配置阈值:minRelevance(7), minConciseness(6), maxNoise(3)
  - 5s 超时 + 异常默认通过(非阻塞)
  - EvaluationContext 新增 userMessage 字段
- coordinator-agent.service.ts: 传入 userMessage 到评估门控

### Layer 4 — 程序级硬截断 (物理约束)
- coordinator-response.schema.ts:
  - INTENT_MAX_ANSWER_LENGTH: 按意图限制字符数
    factual=200, yes_no=120, comparison=250, assessment=400,
    objection=200, detailed=500, casual=80
  - MAX_FOLLOWUP_LENGTH: 80 字符
  - smartTruncate(): 在句子边界处智能截断(中英文标点)
- agent-loop.ts: JSON 解析后按 intent 强制截断 answer 和 followUp
- max_tokens 从 4096 降至 2048

## Bug 修复
- agent-loop.ts: currentTextContent 在 content_block_stop 时被重置为空字符串,
  导致评估门控收到空文本。改为从 finalMessage.content 提取 responseText。

## 依赖升级
- @anthropic-ai/sdk: 0.52.0 → 0.73.0 (支持 output_config)
- 新增 zod@4.3.6 (Structured Output schema 定义)

## 文件清单 (1 new + 10 modified)
- NEW: agents/schemas/coordinator-response.schema.ts
- MOD: agents/coordinator/agent-loop.ts (核心改造)
- MOD: agents/coordinator/coordinator-agent.service.ts
- MOD: agents/coordinator/evaluation-gate.service.ts
- MOD: agents/types/agent.types.ts
- MOD: agents/prompts/coordinator-system-prompt.ts
- MOD: agents/prompts/policy-expert-prompt.ts
- MOD: agents/prompts/objection-handler-prompt.ts
- MOD: domain/entities/evaluation-rule.entity.ts
- MOD: package.json + pnpm-lock.yaml

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
hailin 2026-02-07 01:01:05 -08:00
parent 93ed3343de
commit bb1a1139a3
11 changed files with 387 additions and 80 deletions

View File

@ -20,7 +20,7 @@
"migration:generate": "npm run typeorm migration:generate -- -d src/data-source.ts"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.52.0",
"@anthropic-ai/sdk": "^0.73.0",
"@iconsulting/shared": "workspace:*",
"@modelcontextprotocol/sdk": "^1.26.0",
"@nestjs/common": "^10.0.0",
@ -40,7 +40,8 @@
"rxjs": "^7.8.0",
"socket.io": "^4.8.3",
"typeorm": "^0.3.19",
"uuid": "^9.0.0"
"uuid": "^9.0.0",
"zod": "^4.3.6"
},
"devDependencies": {
"@nestjs/cli": "^10.0.0",

View File

@ -12,6 +12,7 @@ export const EvaluationRuleType = {
CONVERSION_SIGNAL: 'CONVERSION_SIGNAL',
TOPIC_BOUNDARY: 'TOPIC_BOUNDARY',
NO_FABRICATION: 'NO_FABRICATION',
LLM_JUDGE: 'LLM_JUDGE',
} as const;
export type EvaluationRuleTypeValue =

View File

@ -31,6 +31,11 @@ import {
isAgentInvocationTool,
getToolsForClaudeAPI,
} from '../tools/coordinator-tools';
import {
INTENT_MAX_ANSWER_LENGTH,
MAX_FOLLOWUP_LENGTH,
smartTruncate,
} from '../schemas/coordinator-response.schema';
const logger = new Logger('AgentLoop');
@ -152,8 +157,9 @@ export async function* agentLoop(
system: systemPrompt,
messages: messages as any,
tools: getToolsForClaudeAPI(additionalTools) as any,
max_tokens: 4096,
});
max_tokens: 2048,
...(params.outputConfig ? { output_config: params.outputConfig } : {}),
} as any);
break; // success
} catch (error: any) {
const isRateLimit = error?.status === 429 || error?.error?.type === 'rate_limit_error';
@ -207,11 +213,14 @@ export async function* agentLoop(
if (delta.type === 'text_delta') {
currentTextContent += delta.text;
yield {
type: 'text',
content: delta.text,
timestamp: Date.now(),
};
// Structured Output 模式下不直接 yield textJSON 片段不能展示给用户)
if (!params.outputConfig) {
yield {
type: 'text',
content: delta.text,
timestamp: Date.now(),
};
}
} else if (delta.type === 'input_json_delta') {
// Tool input being streamed — accumulate silently
}
@ -313,11 +322,51 @@ export async function* agentLoop(
// If no tool_use → conversation is done (with optional evaluation gate)
if (toolUseBlocks.length === 0 || finalMessage.stop_reason === 'end_turn') {
// ---- Extract response text from finalMessage (修复 bugcurrentTextContent 在 content_block_stop 时已被重置为空) ----
const responseText = finalMessage.content
.filter((b): b is Anthropic.TextBlock => b.type === 'text')
.map(b => b.text)
.join('');
// ---- Structured Output 解析:从 JSON 中提取 answer强制截断yield ----
if (params.outputConfig && responseText) {
try {
const parsed = JSON.parse(responseText);
if (parsed.answer) {
// 按 intent 强制截断 answer
const maxLen = INTENT_MAX_ANSWER_LENGTH[parsed.intent] || 300;
const originalLen = parsed.answer.length;
const answer = smartTruncate(parsed.answer, maxLen);
if (originalLen > maxLen) {
logger.debug(
`[Turn ${currentTurn + 1}] Answer truncated: ${originalLen}${answer.length} chars (intent=${parsed.intent}, limit=${maxLen})`,
);
}
yield { type: 'text', content: answer, timestamp: Date.now() };
if (parsed.followUp) {
const followUp = smartTruncate(parsed.followUp, MAX_FOLLOWUP_LENGTH);
yield { type: 'text', content: '\n\n' + followUp, timestamp: Date.now() };
}
} else {
// JSON 合法但缺少 answer 字段 → yield 原始文本
yield { type: 'text', content: responseText, timestamp: Date.now() };
}
logger.debug(`[Turn ${currentTurn + 1}] Structured output intent: ${parsed.intent}`);
} catch {
// JSON 解析失败 → 回退到原始文本
logger.warn(`[Turn ${currentTurn + 1}] Structured output parse failed, falling back to raw text`);
yield { type: 'text', content: responseText, timestamp: Date.now() };
}
}
// --- Evaluation Gate (optional, zero-config safe) ---
if (params.evaluationGate) {
try {
const gateResult = await params.evaluationGate(
currentTextContent,
responseText,
currentTurn + 1,
agentsUsed,
);

View File

@ -21,6 +21,10 @@ import {
CoordinatorPromptConfig,
} from '../prompts/coordinator-system-prompt';
// Structured Output
import { zodOutputFormat } from '@anthropic-ai/sdk/helpers/zod';
import { CoordinatorResponseSchema } from '../schemas/coordinator-response.schema';
// Specialist Services
import { PolicyExpertService } from '../specialists/policy-expert.service';
import { AssessmentExpertService } from '../specialists/assessment-expert.service';
@ -242,6 +246,7 @@ export class CoordinatorAgentService implements OnModuleInit {
messageCount: context.previousMessages?.length || 0,
hasConverted: false,
agentsUsed: agentsUsedInLoop,
userMessage: userContent,
});
// Gate 失败时将失败教训异步保存为系统经验fire-and-forget
@ -274,6 +279,7 @@ export class CoordinatorAgentService implements OnModuleInit {
currentTurnCount: 0,
currentCostUsd: 0,
evaluationGate: evaluationGateCallback,
outputConfig: { format: zodOutputFormat(CoordinatorResponseSchema) as any },
};
// 6. Create tool executor

View File

@ -10,6 +10,8 @@
*/
import { Injectable, Inject, Logger } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import Anthropic from '@anthropic-ai/sdk';
import {
IEvaluationRuleRepository,
EVALUATION_RULE_REPOSITORY,
@ -39,6 +41,8 @@ export interface EvaluationContext {
messageCount: number;
hasConverted: boolean;
agentsUsed: string[];
/** 用户原始消息 — LLM_JUDGE 需要用来评估回复的相关性 */
userMessage?: string;
}
/** Result of a single rule check */
@ -68,11 +72,18 @@ export class EvaluationGateService {
private readonly logger = new Logger(EvaluationGateService.name);
private cache = new Map<string, { rules: EvaluationRuleEntity[]; expiresAt: number }>();
private readonly CACHE_TTL = 5 * 60 * 1000; // 5 minutes
private anthropicClient: Anthropic;
constructor(
@Inject(EVALUATION_RULE_REPOSITORY)
private readonly repo: IEvaluationRuleRepository,
) {}
private readonly configService: ConfigService,
) {
this.anthropicClient = new Anthropic({
apiKey: this.configService.get<string>('ANTHROPIC_API_KEY'),
baseURL: this.configService.get<string>('ANTHROPIC_BASE_URL') || undefined,
});
}
/**
* Main entry: evaluate all applicable rules
@ -90,7 +101,7 @@ export class EvaluationGateService {
const results: RuleCheckResult[] = [];
for (const rule of rules) {
const check = this.evaluateRule(rule, context);
const check = await this.evaluateRule(rule, context);
results.push({
ruleId: rule.id,
ruleName: rule.name,
@ -172,10 +183,10 @@ export class EvaluationGateService {
// Rule Evaluation (pure functions)
// ============================================================
private evaluateRule(
private async evaluateRule(
rule: EvaluationRuleEntity,
context: EvaluationContext,
): { passed: boolean; message?: string } {
): Promise<{ passed: boolean; message?: string }> {
switch (rule.ruleType) {
case EvaluationRuleType.FIELD_COMPLETENESS:
return this.checkFieldCompleteness(rule.config, context);
@ -193,6 +204,8 @@ export class EvaluationGateService {
return this.checkTopicBoundary(rule.config, context);
case EvaluationRuleType.NO_FABRICATION:
return this.checkNoFabrication(rule.config, context);
case EvaluationRuleType.LLM_JUDGE:
return this.checkLlmJudge(rule.config, context);
default:
this.logger.warn(`Unknown rule type: ${rule.ruleType}`);
return { passed: true };
@ -484,6 +497,77 @@ export class EvaluationGateService {
};
}
/**
* LLM_JUDGE: 使用 Haiku 4.5
* config: { minRelevance?: number, minConciseness?: number, maxNoise?: number, timeoutMs?: number }
*/
private async checkLlmJudge(
config: Record<string, unknown>,
context: EvaluationContext,
): Promise<{ passed: boolean; message?: string }> {
const minRelevance = (config.minRelevance as number) ?? 7;
const minConciseness = (config.minConciseness as number) ?? 6;
const maxNoise = (config.maxNoise as number) ?? 3;
const timeoutMs = (config.timeoutMs as number) ?? 5000;
if (!context.userMessage || !context.responseText) {
return { passed: true }; // 缺少信息时跳过
}
const judgePrompt = `评估AI回复质量。打分0-10
- relevance: 是否直接回答了用户的问题0=10=
- conciseness: 是否足够简洁0=10=
- noise: 是否包含用户没问的多余信息0=10=
用户消息: ${context.userMessage}
AI回复: ${context.responseText}
JSON输出: {"relevance":N,"conciseness":N,"noise":N,"reason":"一句话原因"}`;
try {
const response = await Promise.race([
this.anthropicClient.messages.create({
model: 'claude-haiku-4-5-20251001',
max_tokens: 200,
messages: [{ role: 'user', content: judgePrompt }],
}),
new Promise<never>((_, reject) =>
setTimeout(() => reject(new Error('LLM Judge timeout')), timeoutMs),
),
]);
const text = response.content[0]?.type === 'text' ? response.content[0].text : '';
// 提取 JSONHaiku 可能在 JSON 前后包裹 markdown 代码块)
const jsonMatch = text.match(/\{[\s\S]*\}/);
if (!jsonMatch) {
this.logger.warn(`LLM Judge returned non-JSON: ${text.substring(0, 100)}`);
return { passed: true };
}
const scores = JSON.parse(jsonMatch[0]);
const passed =
scores.relevance >= minRelevance &&
scores.conciseness >= minConciseness &&
scores.noise <= maxNoise;
if (!passed) {
this.logger.debug(
`LLM Judge failed: relevance=${scores.relevance}, conciseness=${scores.conciseness}, noise=${scores.noise}. ${scores.reason || ''}`,
);
}
return {
passed,
message: passed
? undefined
: `LLM评审未通过: 相关性=${scores.relevance}/10, 简洁度=${scores.conciseness}/10, 噪音=${scores.noise}/10. ${scores.reason || ''}`,
};
} catch (error) {
this.logger.warn(`LLM Judge error (non-fatal): ${error}`);
return { passed: true }; // 失败时默认通过(非阻塞)
}
}
// ============================================================
// Feedback Builder
// ============================================================

View File

@ -71,6 +71,67 @@ ${companyName} 是${companyDescription}。
- ****
- ****
## 1.4
****
###
1. ****
2. ****
3. ****
###
| | | | |
|---------|---------|---------|------|
| **** | "X的条件是什么""Y需要多少钱""Z怎么申请" | | 1-3 |
| **** | "我能不能申请X""Y行不行""这个符合吗" | // | 1-2 |
| **** | "A和B哪个好""应该选哪个" | + | 2-3 |
| **** | "帮我看看适合什么""评估一下" | Agent评估 | |
| **/** | "太贵了""不确定""再想想" | ++ | 2-3 |
| **** | | policy_expert | 150-300 |
| **/** | "你好""在吗""谢谢" | | 1 |
### vs
**"高才通需要什么条件?"**
> Top Talent Pass SchemeABC三个类别
>
> A类250...
> B类3...
> C类3...
>
>
>
> - **A类**250
> - **B类** + 3
> - **C类**31
>
>
**"我清华毕业的,能申请高才通吗?"**
> B类或C类的学历要求...
> 3B类3C类
###
1. ****
2. ****"感谢您的提问""这是个很好的问题"
3. ****"关于您问到的XX..."
4. ****
5. ****
6. ****
---
#
@ -100,10 +161,12 @@ ${companyName} 是${companyDescription}。
- includeProcessSteps true
- includeRequirements/ true
****
-
-
-
****
-
- ****1-2
- "高才通需要什么学历"
-
-
## 2.2 Agentinvoke_assessment_expert
@ -587,12 +650,14 @@ ${companyName} 是${companyDescription}。
- 使
- 使"感谢您的咨询""请问还有其他需要帮助的吗"
****
- 2-3
- 200
- 300-500
-
- ****500
****
- ****
- 1-2
- 1 + 1
- 100
- 200-300
- "详细说说""展开讲讲"
- ****
****
- ****
@ -600,12 +665,17 @@ ${companyName} 是${companyDescription}。
-
- "温度"
****
- "首先/其次/最后"AI了
- "您问到关于XXX..."
**AI**
- "首先/其次/最后"
- "关于您问到的XXX..."
-
- 使 emoji
- 使
- "这是一个好问题"
- "感谢您的咨询/提问"
- "首先""其次""另外"
-
-
## 5.3 线
@ -985,13 +1055,13 @@ ${categoriesList}
1. invoke_memory_manager (load_context)
2.
3.
4.
2. +
3. ****
"${companyName}
"
"您好!请问您对哪种移民方式比较感兴趣?或者简单说说您的情况,我帮您分析最适合的路径。"
"很高兴为您服务"
## 11.2
@ -999,19 +1069,20 @@ ${categoriesList}
1. invoke_policy_expert ({ query: "高才通B类申请条件要求", category: "GEP" })
2.
3.
2. **B类的条件A类C类的信息**
3.
## 11.3
"你们的服务费用是不是太高了?我看别的公司便宜很多。"
1. invoke_objection_handler ({ objection: "服务费用太高,竞品更便宜", ... })
2. "我理解费用是很重要的考虑因素"
3.
4.
5.
1. invoke_objection_handler
2. **100** + +
3.
"理解您的顾虑,费用确实是重要的考量因素。我们的服务包含从评估到获批的全流程跟进,包括材料审核和入境处沟通。您可以先做一个初步评估了解可行性,再决定是否需要全程服务。"
## 11.4
@ -1072,24 +1143,24 @@ ${categoriesList}
#
#
使
使
1. **** Agent
1. ****
2. **线**"我需要确认一下"
3. ****
3. ****
4. ****
4. **** Agent
5. ****怀
5. ****"倾听""长篇大论地回应"
6. ****
6. ****
7. **** ${companyName}
7. ****
****
`.trim();
}

View File

@ -205,7 +205,7 @@ export function buildObjectionHandlerPrompt(): string {
- **empathyResponse**
- **factualRebuttal** search_knowledge
- **successStoryReference** null
- **suggestedResponse** 200-400
- **suggestedResponse**** 80-150 ** + +
- **followUpQuestion**
#

View File

@ -124,41 +124,43 @@ export function buildPolicyExpertPrompt(): string {
#
Coordinator Agent
****
##
Coordinator Agent Coordinator Coordinator
##
1. **** 2-3
2. ****使
3. ****
4. ****
5. ****
1. ****Coordinator query
2. ****310
3. ****5
4. ****
##
****"高才通B类的学历要求"
****
\`\`\`
//53QS/THE/US News/ARWU四大排名综合认定
- TTPS政策详解
\`\`\`
****
\`\`\`
TTPSB类面向全球百强大学的毕业生
A/B/C三类...
- //
- 53
- QS/THE/US News/ARWU
-
- B类条件...
- A类条件...A类
- C类条件...C类
1.
2. 线
3. 4
4. 6
1. ...
-
-
- TTPS政策详解 /
...
\`\`\`
---

View File

@ -0,0 +1,67 @@
/**
* Coordinator Response Schema Structured Output
*
* Coordinator JSON
* Anthropic API output_config
*/
import { z } from 'zod';
export const CoordinatorResponseSchema = z.object({
intent: z.enum([
'factual_question', // 直接事实问题:"X的条件是什么"
'yes_no_question', // 是非判断问题:"我能不能申请X"
'comparison_question', // 对比选择问题:"A和B哪个好"
'assessment_request', // 评估请求:"帮我评估一下"
'objection_expression', // 情绪/犹豫表达:"太贵了"/"怕被拒"
'detailed_consultation', // 复杂政策咨询:明确要求详细了解
'casual_chat', // 闲聊/打招呼:"你好"
]),
answer: z.string().describe('直接回答用户的文本简洁精准默认100字以内'),
followUp: z.string().optional().describe('跟进引导问题(可选)'),
});
export type CoordinatorResponse = z.infer<typeof CoordinatorResponseSchema>;
// ============================================================
// Intent-based Answer Length Limits (中文字符数)
// ============================================================
/** 每种意图对应的最大 answer 长度(字符数)— 程序级硬约束 */
export const INTENT_MAX_ANSWER_LENGTH: Record<string, number> = {
factual_question: 200, // 1-3句直接给答案
yes_no_question: 120, // 1-2句结论+理由
comparison_question: 250, // 2-3句推荐+理由
assessment_request: 400, // 按需但有上限
objection_expression: 200, // 共情+事实+引导
detailed_consultation: 500, // 复杂咨询允许较长
casual_chat: 80, // 1句
};
/** followUp 问题最大长度 */
export const MAX_FOLLOWUP_LENGTH = 80;
/**
*
*/
export function smartTruncate(text: string, maxLen: number): string {
if (text.length <= maxLen) return text;
const truncated = text.substring(0, maxLen);
// 在截断范围内找最后一个句子结束符
const sentenceEnders = ['。', '', '', '', '. ', '! ', '? '];
let lastEnd = -1;
for (const ender of sentenceEnders) {
const idx = truncated.lastIndexOf(ender);
if (idx > lastEnd) lastEnd = idx;
}
// 如果在后半段找到句子边界,在那里截断
if (lastEnd > maxLen * 0.5) {
return text.substring(0, lastEnd + 1);
}
// 没有好的边界,硬截断
return truncated + '...';
}

View File

@ -280,6 +280,8 @@ export interface AgentLoopParams {
turnCount: number,
agentsUsed: string[],
) => Promise<import('../coordinator/evaluation-gate.service').GateResult>;
/** Structured Output — 传入 Claude API 的 output_config */
outputConfig?: { format: Record<string, unknown> };
}
/** Claude API 消息格式 */

View File

@ -100,8 +100,8 @@ importers:
packages/services/conversation-service:
dependencies:
'@anthropic-ai/sdk':
specifier: ^0.52.0
version: 0.52.0
specifier: ^0.73.0
version: 0.73.0(zod@4.3.6)
'@iconsulting/shared':
specifier: workspace:*
version: link:../../shared
@ -162,6 +162,9 @@ importers:
uuid:
specifier: ^9.0.0
version: 9.0.1
zod:
specifier: ^4.3.6
version: 4.3.6
devDependencies:
'@nestjs/cli':
specifier: ^10.0.0
@ -863,6 +866,19 @@ packages:
hasBin: true
dev: false
/@anthropic-ai/sdk@0.73.0(zod@4.3.6):
resolution: {integrity: sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw==}
hasBin: true
peerDependencies:
zod: ^3.25.0 || ^4.0.0
peerDependenciesMeta:
zod:
optional: true
dependencies:
json-schema-to-ts: 3.1.1
zod: 4.3.6
dev: false
/@babel/code-frame@7.27.1:
resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==}
engines: {node: '>=6.9.0'}
@ -2293,7 +2309,6 @@ packages:
uid: 2.0.2
transitivePeerDependencies:
- encoding
dev: false
/@nestjs/core@10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2):
resolution: {integrity: sha512-MhiSGplB4TkadceA7opn/NaZmJhwYYNdB8nS8I29nLNx3vU+8aGHBiueZgcphEVDETZJSfc2VA5Mn/FC3JcsrA==}
@ -2325,6 +2340,7 @@ packages:
uid: 2.0.2
transitivePeerDependencies:
- encoding
dev: false
/@nestjs/jwt@10.2.0(@nestjs/common@10.4.21):
resolution: {integrity: sha512-x8cG90SURkEiLOehNaN2aRlotxT0KZESUliOPKKnjWiyJOcWurkF3w345WOX0P4MgFzUjGoZ1Sy0aZnxeihT0g==}
@ -2343,7 +2359,7 @@ packages:
'@nestjs/core': ^10.0.0
dependencies:
'@nestjs/common': 10.4.21(class-transformer@0.5.1)(class-validator@0.14.3)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(@nestjs/websockets@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
body-parser: 1.20.3
cors: 2.8.5
express: 4.22.1
@ -2368,7 +2384,6 @@ packages:
- bufferutil
- supports-color
- utf-8-validate
dev: false
/@nestjs/schedule@4.1.2(@nestjs/common@10.4.21)(@nestjs/core@10.4.21):
resolution: {integrity: sha512-hCTQ1lNjIA5EHxeu8VvQu2Ed2DBLS1GSC6uKPYlBiQe6LL9a7zfE9iVSK+zuK8E2odsApteEBmfAQchc8Hx0Gg==}
@ -2426,7 +2441,7 @@ packages:
optional: true
dependencies:
'@nestjs/common': 10.4.21(class-transformer@0.5.1)(class-validator@0.14.3)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(@nestjs/websockets@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/platform-express': 10.4.21(@nestjs/common@10.4.21)(@nestjs/core@10.4.21)
tslib: 2.8.1
dev: true
@ -2441,7 +2456,7 @@ packages:
typeorm: ^0.3.0
dependencies:
'@nestjs/common': 10.4.21(class-transformer@0.5.1)(class-validator@0.14.3)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
'@nestjs/core': 10.4.21(@nestjs/common@10.4.21)(@nestjs/platform-express@10.4.21)(@nestjs/websockets@10.4.21)(reflect-metadata@0.2.2)(rxjs@7.8.2)
reflect-metadata: 0.2.2
rxjs: 7.8.2
typeorm: 0.3.28(ioredis@5.9.1)(pg@8.16.3)(ts-node@10.9.2)
@ -2468,7 +2483,6 @@ packages:
reflect-metadata: 0.2.2
rxjs: 7.8.2
tslib: 2.8.1
dev: false
/@nodelib/fs.scandir@2.1.5:
resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
@ -5231,7 +5245,6 @@ packages:
optional: true
dependencies:
ms: 2.1.3
dev: false
/debug@4.4.3:
resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==}
@ -7321,6 +7334,14 @@ packages:
resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
dev: true
/json-schema-to-ts@3.1.1:
resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==}
engines: {node: '>=16'}
dependencies:
'@babel/runtime': 7.28.4
ts-algebra: 2.0.0
dev: false
/json-schema-traverse@0.4.1:
resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==}
dev: true
@ -10088,7 +10109,6 @@ packages:
- bufferutil
- supports-color
- utf-8-validate
dev: false
/socket.io@4.8.3:
resolution: {integrity: sha512-2Dd78bqzzjE6KPkD5fHZmDAKRNe3J15q+YHDrIsy9WEkqttc7GY+kT9OBLSMaPbQaEd0x1BjcmtMtXkfpc+T5A==}
@ -10591,6 +10611,10 @@ packages:
resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==}
dev: false
/ts-algebra@2.0.0:
resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==}
dev: false
/ts-api-utils@1.4.3(typescript@5.9.3):
resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==}
engines: {node: '>=16'}