fix(dingtalk): async reply pattern — immediate ack + batchSend for LLM response

- Send '🤔 小虾米正在思考,稍等...' immediately via sessionWebhook on each message
- Await LLM bridge call (serial queue preserved) then deliver response via batchSend
- batchSend decoupled from sessionWebhook — works regardless of webhook state
- Fix duplicate const staffId declaration (TS compile error)
- TASK_TIMEOUT_S=55 passed explicitly to bridge (was using bridge default 25s)
- senderStaffId-first routing (OAuth binding) with senderId fallback (code binding)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
hailin 2026-03-08 23:50:34 -07:00
parent 440819add8
commit 5aaa8600c5
1 changed files with 44 additions and 37 deletions

View File

@ -615,8 +615,16 @@ export class DingTalkRouterService implements OnModuleInit, OnModuleDestroy {
}
const bridgeUrl = `http://${instance.serverHost}:${instance.hostPort}/task`;
let reply: string;
// sessionWebhook TTL is ~90 minutes (per DingTalk docs), but delivering the actual LLM
// response synchronously makes the user wait with no feedback. Strategy:
// 1. Immediately send "处理中..." via sessionWebhook — user sees instant acknowledgment
// 2. Await the bridge call (LLM processing) — the serial queue still blocks here,
// preventing concurrent LLM calls for the same user
// 3. Always deliver the actual response via batchSend — decoupled from webhook window
this.reply(msg, '🤔 小虾米正在思考,稍等...');
let reply: string;
try {
const result = await this.httpPostJson<{ ok: boolean; result?: unknown; error?: string }>(
bridgeUrl,
@ -624,7 +632,6 @@ export class DingTalkRouterService implements OnModuleInit, OnModuleDestroy {
prompt: text,
sessionKey: `agent:main:dt-${userId}`,
idempotencyKey: msg.msgId,
// Pass explicit timeout to bridge — default is 25s which is too short for LLM calls.
timeoutSeconds: TASK_TIMEOUT_S,
},
(TASK_TIMEOUT_S + 10) * 1000,
@ -642,41 +649,41 @@ export class DingTalkRouterService implements OnModuleInit, OnModuleDestroy {
reply = '与小龙虾通信时出现错误,请稍后重试。';
}
// Try sessionWebhook first; if it has expired by the time we have a reply (LLM took
// longer than ~30s), fall back to proactive batchSend so the reply still reaches the user.
const webhookExpiry = msg.sessionWebhookExpiredTime > 1e11
? msg.sessionWebhookExpiredTime
: msg.sessionWebhookExpiredTime * 1000;
await this.batchSend(staffId, reply, msg.msgId);
}
if (Date.now() <= webhookExpiry) {
this.reply(msg, reply);
} else {
this.logger.warn(
`sessionWebhook expired for msgId=${msg.msgId} — falling back to batchSend for userId=${userId}`,
);
const staffId = msg.senderStaffId?.trim();
if (staffId) {
this.getToken()
.then((token) =>
this.httpsPost<unknown>(
/** Send a proactive message to a DingTalk user via batchSend. Used for LLM replies
* so that users receive the response regardless of sessionWebhook state. */
private batchSend(staffId: string | undefined, content: string, msgId: string): Promise<void> {
if (!staffId) {
this.logger.warn(`batchSend skipped — no staffId for msgId=${msgId}`);
return Promise.resolve();
}
// Chunk content to stay within DingTalk's message size limit
const safe = content.replace(/\s+at\s+\S+:\d+:\d+/g, '').trim() || '(空响应)';
const chunks: string[] = [];
for (let i = 0; i < safe.length; i += DINGTALK_MAX_CHARS) {
chunks.push(safe.slice(i, i + DINGTALK_MAX_CHARS));
}
return this.getToken()
.then(async (token) => {
for (const chunk of chunks) {
await this.httpsPost<unknown>(
'api.dingtalk.com',
'/v1.0/robot/oToMessages/batchSend',
{
robotCode: this.clientId,
userIds: [staffId],
msgKey: 'sampleText',
msgParam: JSON.stringify({ content: reply }),
msgParam: JSON.stringify({ content: chunk }),
},
{ 'x-acs-dingtalk-access-token': token },
),
)
.catch((e: Error) =>
this.logger.error(`batchSend fallback failed for msgId=${msg.msgId}:`, e.message),
);
} else {
this.logger.warn(`No staffId for batchSend fallback, reply lost for msgId=${msg.msgId}`);
}
}
})
.catch((e: Error) =>
this.logger.error(`batchSend failed for msgId=${msgId}:`, e.message),
);
}
// ── Reply (chunked) ────────────────────────────────────────────────────────