fix(telegram): suppress reasoning-only leaks when reasoning is off

Co-authored-by: avirweb <avirweb@users.noreply.github.com>
This commit is contained in:
Peter Steinberger
2026-02-23 20:05:45 +00:00
parent 63e4dfaa9c
commit 5a475259bb
3 changed files with 73 additions and 21 deletions

View File

@@ -24,6 +24,7 @@ Docs: https://docs.openclaw.ai
- Sessions/Store: canonicalize inbound mixed-case session keys for metadata and route updates, and migrate legacy case-variant entries to a single lowercase key to prevent duplicate sessions and missing TUI/WebUI history. (#9561) Thanks @hillghost86.
- Telegram/Reactions: soft-fail reaction action errors (policy/token/emoji/API), accept snake_case `message_id`, and fallback to inbound message-id context when explicit `messageId` is omitted so DM reactions stay stable without regeneration loops. (#20236, #21001) Thanks @PeterShanxin and @vincentkoc.
- Telegram/Polling: scope persisted polling offsets to bot identity and reuse a single awaited runner-stop path on abort/retry, preventing cross-token offset bleed and overlapping pollers during restart/error recovery. (#10850, #11347) Thanks @talhaorak, @anooprdawar, and @vincentkoc.
- Telegram/Reasoning: when `/reasoning off` is active, suppress reasoning-only delivery segments and block raw fallback resend of suppressed `Reasoning:`/`<think>` text, preventing internal reasoning leakage in legacy sessions while preserving answer delivery. (#24626, #24518)
- Agents/Reasoning: when model-default thinking is active (for example `thinking=low`), keep auto-reasoning disabled unless explicitly enabled, preventing `Reasoning:` thinking-block leakage in channel replies. (#24335, #24290) thanks @Kay-051.
- Agents/Reasoning: avoid classifying provider reasoning-required errors as context overflows so these failures no longer trigger compaction-style overflow recovery. (#24593) Thanks @vincentkoc.
- Agents/Models: codify `agents.defaults.model` / `agents.defaults.imageModel` config-boundary input as `string | {primary,fallbacks}`, split explicit vs effective model resolution, and fix `models status --agent` source attribution so defaults-inherited agents are labeled as `defaults` while runtime selection still honors defaults fallback. (#24210) thanks @bianbiandashen.

View File

@@ -176,6 +176,15 @@ describe("dispatchTelegramMessage draft streaming", () => {
});
}
function createReasoningStreamContext(): TelegramMessageContext {
loadSessionStore.mockReturnValue({
s1: { reasoningLevel: "stream" },
});
return createContext({
ctxPayload: { SessionKey: "s1" } as unknown as TelegramMessageContext["ctxPayload"],
});
}
it("streams drafts in private threads and forwards thread id", async () => {
const draftStream = createDraftStream();
createTelegramDraftStream.mockReturnValue(draftStream);
@@ -772,7 +781,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode });
expect(reasoningDraftStream.forceNewMessage).toHaveBeenCalledTimes(1);
},
@@ -809,7 +818,11 @@ describe("dispatchTelegramMessage draft streaming", () => {
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
const bot = createBot();
await dispatchWithContext({ context: createContext(), streamMode: "partial", bot });
await dispatchWithContext({
context: createReasoningStreamContext(),
streamMode: "partial",
bot,
});
expect(reasoningDraftParams?.onSupersededPreview).toBeTypeOf("function");
const deleteMessageCalls = (
@@ -836,13 +849,13 @@ describe("dispatchTelegramMessage draft streaming", () => {
);
deliverReplies.mockResolvedValue({ delivered: true });
await dispatchWithContext({ context: createContext(), streamMode });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode });
expect(reasoningDraftStream.forceNewMessage).not.toHaveBeenCalled();
},
);
it("does not finalize preview with reasoning payloads before answer payloads", async () => {
it("suppresses reasoning-only final payloads when reasoning level is off", async () => {
setupDraftStreams({ answerMessageId: 999 });
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(
async ({ dispatcherOptions, replyOptions }) => {
@@ -860,14 +873,11 @@ describe("dispatchTelegramMessage draft streaming", () => {
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
// Keep reasoning as its own message.
expect(deliverReplies).toHaveBeenCalledTimes(1);
expect(deliverReplies).toHaveBeenCalledWith(
expect(deliverReplies).not.toHaveBeenCalledWith(
expect.objectContaining({
replies: [expect.objectContaining({ text: "Reasoning:\n_step one_" })],
}),
);
// Finalize preview with the actual answer instead of overwriting with reasoning.
expect(editMessageTelegram).toHaveBeenCalledTimes(1);
expect(editMessageTelegram).toHaveBeenCalledWith(
123,
@@ -877,6 +887,25 @@ describe("dispatchTelegramMessage draft streaming", () => {
);
});
it("does not resend suppressed reasoning-only text through raw fallback", async () => {
setupDraftStreams({ answerMessageId: 999 });
dispatchReplyWithBufferedBlockDispatcher.mockImplementation(async ({ dispatcherOptions }) => {
await dispatcherOptions.deliver({ text: "Reasoning:\n_step one_" }, { kind: "final" });
return { queuedFinal: true };
});
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
expect(deliverReplies).not.toHaveBeenCalledWith(
expect.objectContaining({
replies: [expect.objectContaining({ text: "Reasoning:\n_step one_" })],
}),
);
expect(editMessageTelegram).not.toHaveBeenCalled();
});
it("keeps reasoning and answer streaming in separate preview lanes", async () => {
const { answerDraftStream, reasoningDraftStream } = setupDraftStreams({
answerMessageId: 999,
@@ -893,7 +922,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(reasoningDraftStream.update).toHaveBeenCalledWith("Reasoning:\n_Working on it..._");
expect(answerDraftStream.update).toHaveBeenCalledWith("Checking the directory...");
@@ -913,7 +942,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(editMessageTelegram).not.toHaveBeenCalled();
expect(deliverReplies).toHaveBeenCalledWith(
@@ -955,7 +984,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "111" });
await dispatchWithContext({ context: createContext(), streamMode });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode });
expect(reasoningDraftStream.forceNewMessage).not.toHaveBeenCalled();
expect(editMessageTelegram).toHaveBeenCalledWith(
@@ -990,7 +1019,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(editMessageTelegram).toHaveBeenNthCalledWith(1, 123, 999, "3", expect.any(Object));
expect(editMessageTelegram).toHaveBeenNthCalledWith(
@@ -1028,7 +1057,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(reasoningDraftStream.update).toHaveBeenCalledWith(
"Reasoning:\n_Counting letters in strawberry_",
@@ -1060,7 +1089,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(reasoningDraftStream.update).toHaveBeenCalledWith(
"Reasoning:\n_Counting letters in strawberry_",
@@ -1096,7 +1125,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(reasoningDraftStream.update).toHaveBeenCalledWith(
"Reasoning:\n_Word: strawberry. r appears at 3, 8, 9._",
@@ -1127,7 +1156,7 @@ describe("dispatchTelegramMessage draft streaming", () => {
deliverReplies.mockResolvedValue({ delivered: true });
editMessageTelegram.mockResolvedValue({ ok: true, chatId: "123", messageId: "999" });
await dispatchWithContext({ context: createContext(), streamMode: "partial" });
await dispatchWithContext({ context: createReasoningStreamContext(), streamMode: "partial" });
expect(editMessageTelegram).toHaveBeenNthCalledWith(
1,

View File

@@ -202,16 +202,25 @@ export const dispatchTelegramMessage = async ({
let splitReasoningOnNextStream = false;
const reasoningStepState = createTelegramReasoningStepState();
type SplitLaneSegment = { lane: LaneName; text: string };
const splitTextIntoLaneSegments = (text?: string): SplitLaneSegment[] => {
type SplitLaneSegmentsResult = {
segments: SplitLaneSegment[];
suppressedReasoningOnly: boolean;
};
const splitTextIntoLaneSegments = (text?: string): SplitLaneSegmentsResult => {
const split = splitTelegramReasoningText(text);
const segments: SplitLaneSegment[] = [];
if (split.reasoningText) {
const suppressReasoning = resolvedReasoningLevel === "off";
if (split.reasoningText && !suppressReasoning) {
segments.push({ lane: "reasoning", text: split.reasoningText });
}
if (split.answerText) {
segments.push({ lane: "answer", text: split.answerText });
}
return segments;
return {
segments,
suppressedReasoningOnly:
Boolean(split.reasoningText) && suppressReasoning && !split.answerText,
};
};
const resetDraftLaneState = (lane: DraftLaneState) => {
lane.lastPartialText = "";
@@ -241,7 +250,8 @@ export const dispatchTelegramMessage = async ({
laneStream.update(text);
};
const ingestDraftLaneSegments = (text: string | undefined) => {
for (const segment of splitTextIntoLaneSegments(text)) {
const split = splitTextIntoLaneSegments(text);
for (const segment of split.segments) {
if (segment.lane === "reasoning") {
reasoningStepState.noteReasoningHint();
reasoningStepState.noteReasoningDelivered();
@@ -418,7 +428,8 @@ export const dispatchTelegramMessage = async ({
const previewButtons = (
payload.channelData?.telegram as { buttons?: TelegramInlineButtons } | undefined
)?.buttons;
const segments = splitTextIntoLaneSegments(payload.text);
const split = splitTextIntoLaneSegments(payload.text);
const segments = split.segments;
const hasMedia = Boolean(payload.mediaUrl) || (payload.mediaUrls?.length ?? 0) > 0;
const flushBufferedFinalAnswer = async () => {
@@ -478,6 +489,17 @@ export const dispatchTelegramMessage = async ({
if (segments.length > 0) {
return;
}
if (split.suppressedReasoningOnly) {
if (hasMedia) {
const payloadWithoutSuppressedReasoning =
typeof payload.text === "string" ? { ...payload, text: "" } : payload;
await sendPayload(payloadWithoutSuppressedReasoning);
}
if (info.kind === "final") {
await flushBufferedFinalAnswer();
}
return;
}
if (info.kind === "final") {
await answerLane.stream?.stop();