refactor(test): dedupe agent and discord test fixtures

This commit is contained in:
Peter Steinberger
2026-02-22 20:01:43 +00:00
parent 5547a2275c
commit 3c75bc0e41
26 changed files with 632 additions and 737 deletions

View File

@@ -6,14 +6,19 @@ const { supervisorSpawnMock } = vi.hoisted(() => ({
supervisorSpawnMock: vi.fn(),
}));
vi.mock("../process/supervisor/index.js", () => ({
getProcessSupervisor: () => ({
const makeSupervisor = () => {
const noop = vi.fn();
return {
spawn: (...args: unknown[]) => supervisorSpawnMock(...args),
cancel: vi.fn(),
cancelScope: vi.fn(),
reconcileOrphans: vi.fn(),
getRecord: vi.fn(),
}),
cancel: noop,
cancelScope: noop,
reconcileOrphans: noop,
getRecord: noop,
};
};
vi.mock("../process/supervisor/index.js", () => ({
getProcessSupervisor: () => makeSupervisor(),
}));
afterEach(() => {

View File

@@ -1,22 +1,13 @@
import fs from "node:fs/promises";
import os from "node:os";
import path from "node:path";
import { describe, expect, it } from "vitest";
import { withTempDir } from "../test-utils/temp-dir.js";
import { createExecTool } from "./bash-tools.exec.js";
const isWin = process.platform === "win32";
const describeNonWin = isWin ? describe.skip : describe;
async function withTempDir(prefix: string, run: (dir: string) => Promise<void>) {
const dir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
try {
await run(dir);
} finally {
await fs.rm(dir, { recursive: true, force: true });
}
}
describeNonWin("exec script preflight", () => {
it("blocks shell env var injection tokens in python scripts before execution", async () => {
await withTempDir("openclaw-exec-preflight-", async (tmp) => {

View File

@@ -1,6 +1,7 @@
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import type { AuthProfileStore } from "./auth-profiles.js";
import { makeModelFallbackCfg } from "./test-helpers/model-fallback-config-fixture.js";
// Mock auth-profiles module — must be before importing model-fallback
vi.mock("./auth-profiles.js", () => ({
@@ -23,19 +24,7 @@ const mockedGetSoonestCooldownExpiry = vi.mocked(getSoonestCooldownExpiry);
const mockedIsProfileInCooldown = vi.mocked(isProfileInCooldown);
const mockedResolveAuthProfileOrder = vi.mocked(resolveAuthProfileOrder);
function makeCfg(overrides: Partial<OpenClawConfig> = {}): OpenClawConfig {
return {
agents: {
defaults: {
model: {
primary: "openai/gpt-4.1-mini",
fallbacks: ["anthropic/claude-haiku-3-5"],
},
},
},
...overrides,
} as OpenClawConfig;
}
const makeCfg = makeModelFallbackCfg;
function expectFallbackUsed(
result: { result: unknown; attempts: Array<{ reason?: string }> },
@@ -50,10 +39,34 @@ function expectFallbackUsed(
expect(result.attempts[0]?.reason).toBe("rate_limit");
}
function expectPrimaryProbeSuccess(
result: { result: unknown },
run: {
(...args: unknown[]): unknown;
mock: { calls: unknown[][] };
},
expectedResult: unknown,
) {
expect(result.result).toBe(expectedResult);
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
}
describe("runWithModelFallback probe logic", () => {
let realDateNow: () => number;
const NOW = 1_700_000_000_000;
const runPrimaryCandidate = (
cfg: OpenClawConfig,
run: (provider: string, model: string) => Promise<unknown>,
) =>
runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
beforeEach(() => {
realDateNow = Date.now;
Date.now = vi.fn(() => NOW);
@@ -100,12 +113,7 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
const result = await runPrimaryCandidate(cfg, run);
// Should skip primary and use fallback
expectFallbackUsed(result, run);
@@ -119,17 +127,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("probed-ok");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
// Should probe primary and succeed
expect(result.result).toBe("probed-ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "probed-ok");
});
it("probes primary model when cooldown already expired", async () => {
@@ -140,16 +139,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("recovered");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
expect(result.result).toBe("recovered");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "recovered");
});
it("does NOT probe non-primary candidates during cooldown", async () => {
@@ -203,12 +194,7 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
const result = await runPrimaryCandidate(cfg, run);
// Should be throttled → skip primary, use fallback
expectFallbackUsed(result, run);
@@ -224,16 +210,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("probed-ok");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
expect(result.result).toBe("probed-ok");
expect(run).toHaveBeenCalledTimes(1);
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "probed-ok");
});
it("handles non-finite soonest safely (treats as probe-worthy)", async () => {
@@ -244,15 +222,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok-infinity");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
expect(result.result).toBe("ok-infinity");
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "ok-infinity");
});
it("handles NaN soonest safely (treats as probe-worthy)", async () => {
@@ -262,15 +233,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok-nan");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
expect(result.result).toBe("ok-nan");
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "ok-nan");
});
it("handles null soonest safely (treats as probe-worthy)", async () => {
@@ -280,15 +244,8 @@ describe("runWithModelFallback probe logic", () => {
const run = vi.fn().mockResolvedValue("ok-null");
const result = await runWithModelFallback({
cfg,
provider: "openai",
model: "gpt-4.1-mini",
run,
});
expect(result.result).toBe("ok-null");
expect(run).toHaveBeenCalledWith("openai", "gpt-4.1-mini");
const result = await runPrimaryCandidate(cfg, run);
expectPrimaryProbeSuccess(result, run, "ok-null");
});
it("single candidate skips with rate_limit and exhausts candidates", async () => {

View File

@@ -8,20 +8,9 @@ import type { AuthProfileStore } from "./auth-profiles.js";
import { saveAuthProfileStore } from "./auth-profiles.js";
import { AUTH_STORE_VERSION } from "./auth-profiles/constants.js";
import { runWithModelFallback } from "./model-fallback.js";
import { makeModelFallbackCfg } from "./test-helpers/model-fallback-config-fixture.js";
function makeCfg(overrides: Partial<OpenClawConfig> = {}): OpenClawConfig {
return {
agents: {
defaults: {
model: {
primary: "openai/gpt-4.1-mini",
fallbacks: ["anthropic/claude-haiku-3-5"],
},
},
},
...overrides,
} as OpenClawConfig;
}
const makeCfg = makeModelFallbackCfg;
function makeFallbacksOnlyCfg(): OpenClawConfig {
return {
@@ -99,6 +88,24 @@ async function expectFallsBackToHaiku(params: {
expect(run.mock.calls[1]?.[1]).toBe("claude-haiku-3-5");
}
function createOverrideFailureRun(params: {
overrideProvider: string;
overrideModel: string;
fallbackProvider: string;
fallbackModel: string;
firstError: Error;
}) {
return vi.fn().mockImplementation(async (provider, model) => {
if (provider === params.overrideProvider && model === params.overrideModel) {
throw params.firstError;
}
if (provider === params.fallbackProvider && model === params.fallbackModel) {
return "ok";
}
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
});
}
describe("runWithModelFallback", () => {
it("normalizes openai gpt-5.3 codex to openai-codex before running", async () => {
const cfg = makeCfg();
@@ -151,14 +158,12 @@ describe("runWithModelFallback", () => {
},
});
const run = vi.fn().mockImplementation(async (provider, model) => {
if (provider === "anthropic" && model === "claude-opus-4-5") {
throw Object.assign(new Error("unauthorized"), { status: 401 });
}
if (provider === "openai" && model === "gpt-4.1-mini") {
return "ok";
}
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
const run = createOverrideFailureRun({
overrideProvider: "anthropic",
overrideModel: "claude-opus-4-5",
fallbackProvider: "openai",
fallbackModel: "gpt-4.1-mini",
firstError: Object.assign(new Error("unauthorized"), { status: 401 }),
});
const result = await runWithModelFallback({
@@ -238,14 +243,12 @@ describe("runWithModelFallback", () => {
it("falls back to configured primary for override credential validation errors", async () => {
const cfg = makeCfg();
const run = vi.fn().mockImplementation(async (provider, model) => {
if (provider === "anthropic" && model === "claude-opus-4") {
throw new Error('No credentials found for profile "anthropic:default".');
}
if (provider === "openai" && model === "gpt-4.1-mini") {
return "ok";
}
throw new Error(`unexpected fallback candidate: ${provider}/${model}`);
const run = createOverrideFailureRun({
overrideProvider: "anthropic",
overrideModel: "claude-opus-4",
fallbackProvider: "openai",
fallbackModel: "gpt-4.1-mini",
firstError: new Error('No credentials found for profile "anthropic:default".'),
});
const result = await runWithModelFallback({

View File

@@ -20,6 +20,35 @@ import "./test-helpers/fast-core-tools.js";
import { createOpenClawTools } from "./openclaw-tools.js";
describe("agents_list", () => {
type AgentConfig = NonNullable<NonNullable<typeof configOverride.agents>["list"]>[number];
function setConfigWithAgentList(agentList: AgentConfig[]) {
configOverride = {
session: {
mainKey: "main",
scope: "per-sender",
},
agents: {
list: agentList,
},
};
}
function requireAgentsListTool() {
const tool = createOpenClawTools({
agentSessionKey: "main",
}).find((candidate) => candidate.name === "agents_list");
if (!tool) {
throw new Error("missing agents_list tool");
}
return tool;
}
function readAgentList(result: unknown) {
return (result as { details?: { agents?: Array<{ id: string; configured?: boolean }> } })
.details?.agents;
}
beforeEach(() => {
configOverride = {
session: {
@@ -30,137 +59,77 @@ describe("agents_list", () => {
});
it("defaults to the requester agent only", async () => {
const tool = createOpenClawTools({
agentSessionKey: "main",
}).find((candidate) => candidate.name === "agents_list");
if (!tool) {
throw new Error("missing agents_list tool");
}
const tool = requireAgentsListTool();
const result = await tool.execute("call1", {});
expect(result.details).toMatchObject({
requester: "main",
allowAny: false,
});
const agents = (result.details as { agents?: Array<{ id: string }> }).agents;
const agents = readAgentList(result);
expect(agents?.map((agent) => agent.id)).toEqual(["main"]);
});
it("includes allowlisted targets plus requester", async () => {
configOverride = {
session: {
mainKey: "main",
scope: "per-sender",
setConfigWithAgentList([
{
id: "main",
name: "Main",
subagents: {
allowAgents: ["research"],
},
},
agents: {
list: [
{
id: "main",
name: "Main",
subagents: {
allowAgents: ["research"],
},
},
{
id: "research",
name: "Research",
},
],
{
id: "research",
name: "Research",
},
};
const tool = createOpenClawTools({
agentSessionKey: "main",
}).find((candidate) => candidate.name === "agents_list");
if (!tool) {
throw new Error("missing agents_list tool");
}
]);
const tool = requireAgentsListTool();
const result = await tool.execute("call2", {});
const agents = (
result.details as {
agents?: Array<{ id: string }>;
}
).agents;
const agents = readAgentList(result);
expect(agents?.map((agent) => agent.id)).toEqual(["main", "research"]);
});
it("returns configured agents when allowlist is *", async () => {
configOverride = {
session: {
mainKey: "main",
scope: "per-sender",
setConfigWithAgentList([
{
id: "main",
subagents: {
allowAgents: ["*"],
},
},
agents: {
list: [
{
id: "main",
subagents: {
allowAgents: ["*"],
},
},
{
id: "research",
name: "Research",
},
{
id: "coder",
name: "Coder",
},
],
{
id: "research",
name: "Research",
},
};
const tool = createOpenClawTools({
agentSessionKey: "main",
}).find((candidate) => candidate.name === "agents_list");
if (!tool) {
throw new Error("missing agents_list tool");
}
{
id: "coder",
name: "Coder",
},
]);
const tool = requireAgentsListTool();
const result = await tool.execute("call3", {});
expect(result.details).toMatchObject({
allowAny: true,
});
const agents = (
result.details as {
agents?: Array<{ id: string }>;
}
).agents;
const agents = readAgentList(result);
expect(agents?.map((agent) => agent.id)).toEqual(["main", "coder", "research"]);
});
it("marks allowlisted-but-unconfigured agents", async () => {
configOverride = {
session: {
mainKey: "main",
scope: "per-sender",
setConfigWithAgentList([
{
id: "main",
subagents: {
allowAgents: ["research"],
},
},
agents: {
list: [
{
id: "main",
subagents: {
allowAgents: ["research"],
},
},
],
},
};
const tool = createOpenClawTools({
agentSessionKey: "main",
}).find((candidate) => candidate.name === "agents_list");
if (!tool) {
throw new Error("missing agents_list tool");
}
]);
const tool = requireAgentsListTool();
const result = await tool.execute("call4", {});
const agents = (
result.details as {
agents?: Array<{ id: string; configured: boolean }>;
}
).agents;
const agents = readAgentList(result);
expect(agents?.map((agent) => agent.id)).toEqual(["main", "research"]);
const research = agents?.find((agent) => agent.id === "research");
expect(research?.configured).toBe(false);

View File

@@ -69,6 +69,29 @@ async function executeSpawnAndExpectAccepted(params: {
return result;
}
async function emitLifecycleEndAndFlush(params: {
runId: string;
startedAt: number;
endedAt: number;
}) {
vi.useFakeTimers();
try {
emitAgentEvent({
runId: params.runId,
stream: "lifecycle",
data: {
phase: "end",
startedAt: params.startedAt,
endedAt: params.endedAt,
},
});
await vi.runAllTimersAsync();
} finally {
vi.useRealTimers();
}
}
describe("openclaw-tools: subagents (sessions_spawn lifecycle)", () => {
let previousFastTestEnv: string | undefined;
@@ -187,22 +210,11 @@ describe("openclaw-tools: subagents (sessions_spawn lifecycle)", () => {
if (!child.runId) {
throw new Error("missing child runId");
}
vi.useFakeTimers();
try {
emitAgentEvent({
runId: child.runId,
stream: "lifecycle",
data: {
phase: "end",
startedAt: 1234,
endedAt: 2345,
},
});
await vi.runAllTimersAsync();
} finally {
vi.useRealTimers();
}
await emitLifecycleEndAndFlush({
runId: child.runId,
startedAt: 1234,
endedAt: 2345,
});
await waitFor(() => ctx.calls.filter((call) => call.method === "agent").length >= 2);
await waitFor(() => Boolean(deletedKey));
@@ -341,22 +353,11 @@ describe("openclaw-tools: subagents (sessions_spawn lifecycle)", () => {
if (!child.runId) {
throw new Error("missing child runId");
}
vi.useFakeTimers();
try {
emitAgentEvent({
runId: child.runId,
stream: "lifecycle",
data: {
phase: "end",
startedAt: 1000,
endedAt: 2000,
},
});
await vi.runAllTimersAsync();
} finally {
vi.useRealTimers();
}
await emitLifecycleEndAndFlush({
runId: child.runId,
startedAt: 1000,
endedAt: 2000,
});
const agentCalls = ctx.calls.filter((call) => call.method === "agent");
expect(agentCalls).toHaveLength(2);

View File

@@ -5,22 +5,19 @@ import {
loadSanitizeSessionHistoryWithCleanMocks,
makeMockSessionManager,
makeSimpleUserMessages,
makeSnapshotChangedOpenAIReasoningScenario,
sanitizeSnapshotChangedOpenAIReasoning,
sanitizeWithOpenAIResponses,
} from "./pi-embedded-runner.sanitize-session-history.test-harness.js";
vi.mock("./pi-embedded-helpers.js", async () => ({
...(await vi.importActual("./pi-embedded-helpers.js")),
isGoogleModelApi: vi.fn(),
sanitizeSessionMessagesImages: vi.fn(async (msgs) => msgs),
}));
type SanitizeSessionHistory = Awaited<ReturnType<typeof loadSanitizeSessionHistoryWithCleanMocks>>;
let sanitizeSessionHistory: SanitizeSessionHistory;
vi.mock("./pi-embedded-helpers.js", async () => {
const actual = await vi.importActual("./pi-embedded-helpers.js");
return {
...actual,
isGoogleModelApi: vi.fn(),
sanitizeSessionMessagesImages: vi.fn().mockImplementation(async (msgs) => msgs),
};
});
describe("sanitizeSessionHistory e2e smoke", () => {
const mockSessionManager = makeMockSessionManager();
const mockMessages = makeSimpleUserMessages();
@@ -57,13 +54,8 @@ describe("sanitizeSessionHistory e2e smoke", () => {
});
it("downgrades openai reasoning blocks when the model snapshot changed", async () => {
const { sessionManager, messages, modelId } = makeSnapshotChangedOpenAIReasoningScenario();
const result = await sanitizeWithOpenAIResponses({
const result = await sanitizeSnapshotChangedOpenAIReasoning({
sanitizeSessionHistory,
messages,
modelId,
sessionManager,
});
expect(result).toEqual([]);

View File

@@ -152,3 +152,15 @@ export function makeSnapshotChangedOpenAIReasoningScenario() {
modelId: "gpt-5.2-codex",
};
}
export async function sanitizeSnapshotChangedOpenAIReasoning(params: {
sanitizeSessionHistory: SanitizeSessionHistoryFn;
}) {
const { sessionManager, messages, modelId } = makeSnapshotChangedOpenAIReasoningScenario();
return await sanitizeWithOpenAIResponses({
sanitizeSessionHistory: params.sanitizeSessionHistory,
messages,
modelId,
sessionManager,
});
}

View File

@@ -9,23 +9,19 @@ import {
makeModelSnapshotEntry,
makeReasoningAssistantMessages,
makeSimpleUserMessages,
makeSnapshotChangedOpenAIReasoningScenario,
sanitizeSnapshotChangedOpenAIReasoning,
type SanitizeSessionHistoryFn,
sanitizeWithOpenAIResponses,
TEST_SESSION_ID,
} from "./pi-embedded-runner.sanitize-session-history.test-harness.js";
let sanitizeSessionHistory: SanitizeSessionHistoryFn;
vi.mock("./pi-embedded-helpers.js", async () => ({
...(await vi.importActual("./pi-embedded-helpers.js")),
isGoogleModelApi: vi.fn(),
sanitizeSessionMessagesImages: vi.fn(async (msgs) => msgs),
}));
// Mock dependencies
vi.mock("./pi-embedded-helpers.js", async () => {
const actual = await vi.importActual("./pi-embedded-helpers.js");
return {
...actual,
isGoogleModelApi: vi.fn(),
sanitizeSessionMessagesImages: vi.fn().mockImplementation(async (msgs) => msgs),
};
});
let sanitizeSessionHistory: SanitizeSessionHistoryFn;
// We don't mock session-transcript-repair.js as it is a pure function and complicates mocking.
// We rely on the real implementation which should pass through our simple messages.
@@ -59,6 +55,24 @@ describe("sanitizeSessionHistory", () => {
const getAssistantContentTypes = (messages: AgentMessage[]) =>
getAssistantMessage(messages).content.map((block: { type: string }) => block.type);
const makeThinkingAndTextAssistantMessages = (
thinkingSignature: string = "some_sig",
): AgentMessage[] =>
[
{ role: "user", content: "hello" },
{
role: "assistant",
content: [
{
type: "thinking",
thinking: "internal",
thinkingSignature,
},
{ type: "text", text: "hi" },
],
},
] as unknown as AgentMessage[];
beforeEach(async () => {
sanitizeSessionHistory = await loadSanitizeSessionHistoryWithCleanMocks();
});
@@ -394,13 +408,8 @@ describe("sanitizeSessionHistory", () => {
});
it("downgrades orphaned openai reasoning when the model changes too", async () => {
const { sessionManager, messages, modelId } = makeSnapshotChangedOpenAIReasoningScenario();
const result = await sanitizeWithOpenAIResponses({
const result = await sanitizeSnapshotChangedOpenAIReasoning({
sanitizeSessionHistory,
messages,
modelId,
sessionManager,
});
expect(result).toEqual([]);
@@ -457,20 +466,7 @@ describe("sanitizeSessionHistory", () => {
it("drops assistant thinking blocks for github-copilot models", async () => {
setNonGoogleModelApi();
const messages = [
{ role: "user", content: "hello" },
{
role: "assistant",
content: [
{
type: "thinking",
thinking: "internal",
thinkingSignature: "reasoning_text",
},
{ type: "text", text: "hi" },
],
},
] as unknown as AgentMessage[];
const messages = makeThinkingAndTextAssistantMessages("reasoning_text");
const result = await sanitizeGithubCopilotHistory({ messages });
const assistant = getAssistantMessage(result);
@@ -532,20 +528,7 @@ describe("sanitizeSessionHistory", () => {
it("does not drop thinking blocks for non-copilot providers", async () => {
setNonGoogleModelApi();
const messages = [
{ role: "user", content: "hello" },
{
role: "assistant",
content: [
{
type: "thinking",
thinking: "internal",
thinkingSignature: "some_sig",
},
{ type: "text", text: "hi" },
],
},
] as unknown as AgentMessage[];
const messages = makeThinkingAndTextAssistantMessages();
const result = await sanitizeSessionHistory({
messages,
@@ -563,20 +546,7 @@ describe("sanitizeSessionHistory", () => {
it("does not drop thinking blocks for non-claude copilot models", async () => {
setNonGoogleModelApi();
const messages = [
{ role: "user", content: "hello" },
{
role: "assistant",
content: [
{
type: "thinking",
thinking: "internal",
thinkingSignature: "some_sig",
},
{ type: "text", text: "hi" },
],
},
] as unknown as AgentMessage[];
const messages = makeThinkingAndTextAssistantMessages();
const result = await sanitizeGithubCopilotHistory({ messages, modelId: "gpt-5.2" });
const types = getAssistantContentTypes(result);

View File

@@ -7,9 +7,9 @@ vi.mock("../pi-model-discovery.js", () => ({
import { buildInlineProviderModels, resolveModel } from "./model.js";
import {
buildOpenAICodexForwardCompatExpectation,
makeModel,
mockDiscoveredModel,
OPENAI_CODEX_TEMPLATE_MODEL,
mockOpenAICodexTemplateModel,
resetMockDiscoverModels,
} from "./model.test-harness.js";
@@ -38,21 +38,11 @@ describe("pi embedded model e2e smoke", () => {
});
it("builds an openai-codex forward-compat fallback for gpt-5.3-codex", () => {
mockDiscoveredModel({
provider: "openai-codex",
modelId: "gpt-5.2-codex",
templateModel: OPENAI_CODEX_TEMPLATE_MODEL,
});
mockOpenAICodexTemplateModel();
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.3-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
});
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex"));
});
it("keeps unknown-model errors for non-forward-compat IDs", () => {

View File

@@ -25,6 +25,28 @@ export const OPENAI_CODEX_TEMPLATE_MODEL = {
maxTokens: 128000,
};
export function mockOpenAICodexTemplateModel(): void {
mockDiscoveredModel({
provider: "openai-codex",
modelId: "gpt-5.2-codex",
templateModel: OPENAI_CODEX_TEMPLATE_MODEL,
});
}
export function buildOpenAICodexForwardCompatExpectation(
id: string = "gpt-5.3-codex",
): Partial<typeof OPENAI_CODEX_TEMPLATE_MODEL> & { provider: string; id: string } {
return {
provider: "openai-codex",
id,
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
contextWindow: 272000,
maxTokens: 128000,
};
}
export function resetMockDiscoverModels(): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn(() => null),

View File

@@ -8,9 +8,10 @@ vi.mock("../pi-model-discovery.js", () => ({
import type { OpenClawConfig } from "../../config/config.js";
import { buildInlineProviderModels, resolveModel } from "./model.js";
import {
buildOpenAICodexForwardCompatExpectation,
makeModel,
mockDiscoveredModel,
OPENAI_CODEX_TEMPLATE_MODEL,
mockOpenAICodexTemplateModel,
resetMockDiscoverModels,
} from "./model.test-harness.js";
@@ -171,24 +172,12 @@ describe("resolveModel", () => {
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
mockDiscoveredModel({
provider: "openai-codex",
modelId: "gpt-5.2-codex",
templateModel: OPENAI_CODEX_TEMPLATE_MODEL,
});
mockOpenAICodexTemplateModel();
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");
expect(result.error).toBeUndefined();
expect(result.model).toMatchObject({
provider: "openai-codex",
id: "gpt-5.3-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
contextWindow: 272000,
maxTokens: 128000,
});
expect(result.model).toMatchObject(buildOpenAICodexForwardCompatExpectation("gpt-5.3-codex"));
});
it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => {

View File

@@ -1,5 +1,28 @@
import type { EmbeddedRunAttemptResult } from "./run/types.js";
export const DEFAULT_OVERFLOW_ERROR_MESSAGE =
"request_too_large: Request size exceeds model context window";
export function makeOverflowError(message: string = DEFAULT_OVERFLOW_ERROR_MESSAGE): Error {
return new Error(message);
}
export function makeCompactionSuccess(params: {
summary: string;
firstKeptEntryId: string;
tokensBefore: number;
}) {
return {
ok: true as const,
compacted: true as const,
result: {
summary: params.summary,
firstKeptEntryId: params.firstKeptEntryId,
tokensBefore: params.tokensBefore,
},
};
}
export function makeAttemptResult(
overrides: Partial<EmbeddedRunAttemptResult> = {},
): EmbeddedRunAttemptResult {
@@ -43,24 +66,38 @@ export function mockOverflowRetrySuccess(params: {
compactDirect: MockCompactDirect;
overflowMessage?: string;
}) {
const overflowError = new Error(
params.overflowMessage ?? "request_too_large: Request size exceeds model context window",
);
const overflowError = makeOverflowError(params.overflowMessage);
params.runEmbeddedAttempt.mockResolvedValueOnce(
makeAttemptResult({ promptError: overflowError }),
);
params.runEmbeddedAttempt.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
params.compactDirect.mockResolvedValueOnce({
ok: true,
compacted: true,
result: {
params.compactDirect.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted session",
firstKeptEntryId: "entry-5",
tokensBefore: 150000,
},
});
}),
);
return overflowError;
}
export function queueOverflowAttemptWithOversizedToolOutput(
runEmbeddedAttempt: MockRunEmbeddedAttempt,
overflowError: Error = makeOverflowError(),
): Error {
runEmbeddedAttempt.mockResolvedValueOnce(
makeAttemptResult({
promptError: overflowError,
messagesSnapshot: [
{
role: "assistant",
content: "big tool output",
} as unknown as EmbeddedRunAttemptResult["messagesSnapshot"][number],
],
}),
);
return overflowError;
}

View File

@@ -8,7 +8,13 @@ vi.mock("../../utils.js", () => ({
import { log } from "./logger.js";
import { runEmbeddedPiAgent } from "./run.js";
import { makeAttemptResult, mockOverflowRetrySuccess } from "./run.overflow-compaction.fixture.js";
import {
makeAttemptResult,
makeCompactionSuccess,
makeOverflowError,
mockOverflowRetrySuccess,
queueOverflowAttemptWithOversizedToolOutput,
} from "./run.overflow-compaction.fixture.js";
import {
mockedCompactDirect,
mockedRunEmbeddedAttempt,
@@ -86,15 +92,13 @@ describe("overflow compaction in run loop", () => {
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowHintError }))
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
mockedCompactDirect.mockResolvedValueOnce({
ok: true,
compacted: true,
result: {
mockedCompactDirect.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted session",
firstKeptEntryId: "entry-6",
tokensBefore: 140000,
},
});
}),
);
const result = await runEmbeddedPiAgent(baseParams);
@@ -105,7 +109,7 @@ describe("overflow compaction in run loop", () => {
});
it("returns error if compaction fails", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
const overflowError = makeOverflowError();
mockedRunEmbeddedAttempt.mockResolvedValue(makeAttemptResult({ promptError: overflowError }));
@@ -125,21 +129,8 @@ describe("overflow compaction in run loop", () => {
});
it("falls back to tool-result truncation and retries when oversized results are detected", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
mockedRunEmbeddedAttempt
.mockResolvedValueOnce(
makeAttemptResult({
promptError: overflowError,
messagesSnapshot: [
{
role: "assistant",
content: "big tool output",
} as unknown as EmbeddedRunAttemptResult["messagesSnapshot"][number],
],
}),
)
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
queueOverflowAttemptWithOversizedToolOutput(mockedRunEmbeddedAttempt, makeOverflowError());
mockedRunEmbeddedAttempt.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
mockedCompactDirect.mockResolvedValueOnce({
ok: false,
@@ -167,7 +158,7 @@ describe("overflow compaction in run loop", () => {
});
it("retries compaction up to 3 times before giving up", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
const overflowError = makeOverflowError();
// 4 overflow errors: 3 compaction retries + final failure
mockedRunEmbeddedAttempt
@@ -177,21 +168,27 @@ describe("overflow compaction in run loop", () => {
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }));
mockedCompactDirect
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 1", firstKeptEntryId: "entry-3", tokensBefore: 180000 },
})
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 2", firstKeptEntryId: "entry-5", tokensBefore: 160000 },
})
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 3", firstKeptEntryId: "entry-7", tokensBefore: 140000 },
});
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 1",
firstKeptEntryId: "entry-3",
tokensBefore: 180000,
}),
)
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 2",
firstKeptEntryId: "entry-5",
tokensBefore: 160000,
}),
)
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 3",
firstKeptEntryId: "entry-7",
tokensBefore: 140000,
}),
);
const result = await runEmbeddedPiAgent(baseParams);
@@ -204,7 +201,7 @@ describe("overflow compaction in run loop", () => {
});
it("succeeds after second compaction attempt", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
const overflowError = makeOverflowError();
mockedRunEmbeddedAttempt
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }))
@@ -212,16 +209,20 @@ describe("overflow compaction in run loop", () => {
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
mockedCompactDirect
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 1", firstKeptEntryId: "entry-3", tokensBefore: 180000 },
})
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 2", firstKeptEntryId: "entry-5", tokensBefore: 160000 },
});
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 1",
firstKeptEntryId: "entry-3",
tokensBefore: 180000,
}),
)
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 2",
firstKeptEntryId: "entry-5",
tokensBefore: 160000,
}),
);
const result = await runEmbeddedPiAgent(baseParams);
@@ -259,15 +260,13 @@ describe("overflow compaction in run loop", () => {
)
.mockResolvedValueOnce(makeAttemptResult({ promptError: null }));
mockedCompactDirect.mockResolvedValueOnce({
ok: true,
compacted: true,
result: {
mockedCompactDirect.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted session",
firstKeptEntryId: "entry-5",
tokensBefore: 150000,
},
});
}),
);
const result = await runEmbeddedPiAgent(baseParams);

View File

@@ -2,7 +2,13 @@ import "./run.overflow-compaction.mocks.shared.js";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { pickFallbackThinkingLevel } from "../pi-embedded-helpers.js";
import { runEmbeddedPiAgent } from "./run.js";
import { makeAttemptResult, mockOverflowRetrySuccess } from "./run.overflow-compaction.fixture.js";
import {
makeAttemptResult,
makeCompactionSuccess,
makeOverflowError,
mockOverflowRetrySuccess,
queueOverflowAttemptWithOversizedToolOutput,
} from "./run.overflow-compaction.fixture.js";
import { mockedGlobalHookRunner } from "./run.overflow-compaction.mocks.shared.js";
import {
mockedCompactDirect,
@@ -11,7 +17,6 @@ import {
mockedTruncateOversizedToolResultsInSession,
overflowBaseRunParams,
} from "./run.overflow-compaction.shared-test.js";
import type { EmbeddedRunAttemptResult } from "./run/types.js";
const mockedPickFallbackThinkingLevel = vi.mocked(pickFallbackThinkingLevel);
describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
@@ -67,20 +72,11 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
});
it("does not reset compaction attempt budget after successful tool-result truncation", async () => {
const overflowError = new Error("request_too_large: Request size exceeds model context window");
const overflowError = queueOverflowAttemptWithOversizedToolOutput(
mockedRunEmbeddedAttempt,
makeOverflowError(),
);
mockedRunEmbeddedAttempt
.mockResolvedValueOnce(
makeAttemptResult({
promptError: overflowError,
messagesSnapshot: [
{
role: "assistant",
content: "big tool output",
} as unknown as EmbeddedRunAttemptResult["messagesSnapshot"][number],
],
}),
)
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }))
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }))
.mockResolvedValueOnce(makeAttemptResult({ promptError: overflowError }));
@@ -91,16 +87,20 @@ describe("runEmbeddedPiAgent overflow compaction trigger routing", () => {
compacted: false,
reason: "nothing to compact",
})
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 2", firstKeptEntryId: "entry-5", tokensBefore: 160000 },
})
.mockResolvedValueOnce({
ok: true,
compacted: true,
result: { summary: "Compacted 3", firstKeptEntryId: "entry-7", tokensBefore: 140000 },
});
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 2",
firstKeptEntryId: "entry-5",
tokensBefore: 160000,
}),
)
.mockResolvedValueOnce(
makeCompactionSuccess({
summary: "Compacted 3",
firstKeptEntryId: "entry-7",
tokensBefore: 140000,
}),
);
mockedSessionLikelyHasOversizedToolResults.mockReturnValue(true);
mockedTruncateOversizedToolResultsInSession.mockResolvedValueOnce({

View File

@@ -7,6 +7,7 @@ import type { OpenClawConfig } from "../config/config.js";
import { createOpenClawCodingTools } from "./pi-tools.js";
import type { SandboxDockerConfig } from "./sandbox.js";
import type { SandboxFsBridge } from "./sandbox/fs-bridge.js";
import { createRestrictedAgentSandboxConfig } from "./test-helpers/sandbox-agent-config-fixtures.js";
type ToolWithExecute = {
execute: (toolCallId: string, args: unknown, signal?: AbortSignal) => Promise<unknown>;
@@ -85,28 +86,41 @@ describe("Agent-specific tool filtering", () => {
}
}
it("should apply global tool policy when no agent-specific policy exists", () => {
const cfg: OpenClawConfig = {
tools: {
allow: ["read", "write"],
deny: ["bash"],
},
agents: {
list: [
{
id: "main",
workspace: "~/openclaw",
},
],
},
};
const tools = createOpenClawCodingTools({
function createMainSessionTools(cfg: OpenClawConfig) {
return createOpenClawCodingTools({
config: cfg,
sessionKey: "agent:main:main",
workspaceDir: "/tmp/test",
agentDir: "/tmp/agent",
});
}
function createMainAgentConfig(params: {
tools: NonNullable<OpenClawConfig["tools"]>;
agentTools?: NonNullable<NonNullable<OpenClawConfig["agents"]>["list"]>[number]["tools"];
}): OpenClawConfig {
return {
tools: params.tools,
agents: {
list: [
{
id: "main",
workspace: "~/openclaw",
...(params.agentTools ? { tools: params.agentTools } : {}),
},
],
},
};
}
it("should apply global tool policy when no agent-specific policy exists", () => {
const cfg = createMainAgentConfig({
tools: {
allow: ["read", "write"],
deny: ["bash"],
},
});
const tools = createMainSessionTools(cfg);
const toolNames = tools.map((t) => t.name);
expect(toolNames).toContain("read");
@@ -116,32 +130,18 @@ describe("Agent-specific tool filtering", () => {
});
it("should keep global tool policy when agent only sets tools.elevated", () => {
const cfg: OpenClawConfig = {
const cfg = createMainAgentConfig({
tools: {
deny: ["write"],
},
agents: {
list: [
{
id: "main",
workspace: "~/openclaw",
tools: {
elevated: {
enabled: true,
allowFrom: { whatsapp: ["+15555550123"] },
},
},
},
],
agentTools: {
elevated: {
enabled: true,
allowFrom: { whatsapp: ["+15555550123"] },
},
},
};
const tools = createOpenClawCodingTools({
config: cfg,
sessionKey: "agent:main:main",
workspaceDir: "/tmp/test",
agentDir: "/tmp/agent",
});
const tools = createMainSessionTools(cfg);
const toolNames = tools.map((t) => t.name);
expect(toolNames).toContain("exec");
@@ -524,38 +524,16 @@ describe("Agent-specific tool filtering", () => {
});
it("should work with sandbox tools filtering", () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
},
},
list: [
{
id: "restricted",
workspace: "~/openclaw-restricted",
sandbox: {
mode: "all",
scope: "agent",
},
tools: {
allow: ["read"], // Agent further restricts to only read
deny: ["exec", "write"],
},
},
],
const cfg = createRestrictedAgentSandboxConfig({
agentTools: {
allow: ["read"], // Agent further restricts to only read
deny: ["exec", "write"],
},
tools: {
sandbox: {
tools: {
allow: ["read", "write", "exec"], // Sandbox allows these
deny: [],
},
},
globalSandboxTools: {
allow: ["read", "write", "exec"], // Sandbox allows these
deny: [],
},
};
});
const tools = createOpenClawCodingTools({
config: cfg,

View File

@@ -3,6 +3,7 @@ import path from "node:path";
import { Readable } from "node:stream";
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
import type { OpenClawConfig } from "../config/config.js";
import { createRestrictedAgentSandboxConfig } from "./test-helpers/sandbox-agent-config-fixtures.js";
type SpawnCall = {
command: string;
@@ -72,6 +73,50 @@ function expectDockerSetupCommand(command: string) {
).toBe(true);
}
function createDefaultsSandboxConfig(
scope: "agent" | "shared" | "session" = "agent",
): OpenClawConfig {
return {
agents: {
defaults: {
sandbox: {
mode: "all",
scope,
},
},
},
};
}
function createWorkSetupCommandConfig(scope: "agent" | "shared"): OpenClawConfig {
return {
agents: {
defaults: {
sandbox: {
mode: "all",
scope,
docker: {
setupCommand: "echo global",
},
},
},
list: [
{
id: "work",
workspace: "~/openclaw-work",
sandbox: {
mode: "all",
scope,
docker: {
setupCommand: "echo work",
},
},
},
],
},
};
}
describe("Agent-specific sandbox config", () => {
beforeAll(async () => {
({ resolveSandboxConfigForAgent, resolveSandboxContext } = await import("./sandbox.js"));
@@ -157,42 +202,20 @@ describe("Agent-specific sandbox config", () => {
});
it("should prefer agent-specific sandbox tool policy", async () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
},
},
list: [
{
id: "restricted",
workspace: "~/openclaw-restricted",
sandbox: {
mode: "all",
scope: "agent",
},
tools: {
sandbox: {
tools: {
allow: ["read", "write"],
deny: ["edit"],
},
},
},
},
],
},
tools: {
const cfg = createRestrictedAgentSandboxConfig({
agentTools: {
sandbox: {
tools: {
allow: ["read"],
deny: ["exec"],
allow: ["read", "write"],
deny: ["edit"],
},
},
},
};
globalSandboxTools: {
allow: ["read"],
deny: ["exec"],
},
});
const context = await resolveContext(cfg, "agent:restricted:main", "/tmp/test-restricted");
@@ -228,32 +251,7 @@ describe("Agent-specific sandbox config", () => {
});
it("should allow agent-specific docker setupCommand overrides", async () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
docker: {
setupCommand: "echo global",
},
},
},
list: [
{
id: "work",
workspace: "~/openclaw-work",
sandbox: {
mode: "all",
scope: "agent",
docker: {
setupCommand: "echo work",
},
},
},
],
},
};
const cfg = createWorkSetupCommandConfig("agent");
const context = await resolveContext(cfg, "agent:work:main", "/tmp/test-work");
@@ -263,32 +261,7 @@ describe("Agent-specific sandbox config", () => {
});
it("should ignore agent-specific docker overrides when scope is shared", async () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "shared",
docker: {
setupCommand: "echo global",
},
},
},
list: [
{
id: "work",
workspace: "~/openclaw-work",
sandbox: {
mode: "all",
scope: "shared",
docker: {
setupCommand: "echo work",
},
},
},
],
},
};
const cfg = createWorkSetupCommandConfig("shared");
const context = await resolveContext(cfg, "agent:work:main", "/tmp/test-work");
@@ -421,32 +394,14 @@ describe("Agent-specific sandbox config", () => {
});
it("includes session_status in default sandbox allowlist", async () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
},
},
},
};
const cfg = createDefaultsSandboxConfig();
const sandbox = resolveSandboxConfigForAgent(cfg, "main");
expect(sandbox.tools.allow).toContain("session_status");
});
it("includes image in default sandbox allowlist", async () => {
const cfg: OpenClawConfig = {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
},
},
},
};
const cfg = createDefaultsSandboxConfig();
const sandbox = resolveSandboxConfigForAgent(cfg, "main");
expect(sandbox.tools.allow).toContain("image");

View File

@@ -0,0 +1,15 @@
import type { OpenClawConfig } from "../../config/config.js";
export function makeModelFallbackCfg(overrides: Partial<OpenClawConfig> = {}): OpenClawConfig {
return {
agents: {
defaults: {
model: {
primary: "openai/gpt-4.1-mini",
fallbacks: ["anthropic/claude-haiku-3-5"],
},
},
},
...overrides,
} as OpenClawConfig;
}

View File

@@ -0,0 +1,44 @@
import type { OpenClawConfig } from "../../config/config.js";
type AgentToolsConfig = NonNullable<NonNullable<OpenClawConfig["agents"]>["list"]>[number]["tools"];
type SandboxToolsConfig = {
allow?: string[];
deny?: string[];
};
export function createRestrictedAgentSandboxConfig(params: {
agentTools?: AgentToolsConfig;
globalSandboxTools?: SandboxToolsConfig;
workspace?: string;
}): OpenClawConfig {
return {
agents: {
defaults: {
sandbox: {
mode: "all",
scope: "agent",
},
},
list: [
{
id: "restricted",
workspace: params.workspace ?? "~/openclaw-restricted",
sandbox: {
mode: "all",
scope: "agent",
},
...(params.agentTools ? { tools: params.agentTools } : {}),
},
],
},
...(params.globalSandboxTools
? {
tools: {
sandbox: {
tools: params.globalSandboxTools,
},
},
}
: {}),
} as OpenClawConfig;
}

View File

@@ -13,6 +13,18 @@ function asOpenClawConfig(config: Partial<OpenClawConfig>): OpenClawConfig {
return config as OpenClawConfig;
}
function createToolConfig() {
return asOpenClawConfig({ agents: { list: [{ id: "main", default: true }] } });
}
function createMemoryGetToolOrThrow(config: OpenClawConfig = createToolConfig()) {
const tool = createMemoryGetTool({ config });
if (!tool) {
throw new Error("tool missing");
}
return tool;
}
beforeEach(() => {
resetMemoryToolMockState({
backend: "builtin",
@@ -144,12 +156,7 @@ describe("memory tools", () => {
throw new Error("path required");
});
const cfg = { agents: { list: [{ id: "main", default: true }] } };
const tool = createMemoryGetTool({ config: cfg });
expect(tool).not.toBeNull();
if (!tool) {
throw new Error("tool missing");
}
const tool = createMemoryGetToolOrThrow();
const result = await tool.execute("call_2", { path: "memory/NOPE.md" });
expect(result.details).toEqual({
@@ -165,12 +172,7 @@ describe("memory tools", () => {
return { text: "", path: "memory/2026-02-19.md" };
});
const cfg = { agents: { list: [{ id: "main", default: true }] } };
const tool = createMemoryGetTool({ config: cfg });
expect(tool).not.toBeNull();
if (!tool) {
throw new Error("tool missing");
}
const tool = createMemoryGetToolOrThrow();
const result = await tool.execute("call_enoent", { path: "memory/2026-02-19.md" });
expect(result.details).toEqual({

View File

@@ -58,6 +58,34 @@ describe("handleSlackAction", () => {
};
}
function createReplyToFirstScenario() {
const cfg = { channels: { slack: { botToken: "tok" } } } as OpenClawConfig;
sendSlackMessage.mockClear();
const hasRepliedRef = { value: false };
const context = createReplyToFirstContext(hasRepliedRef);
return { cfg, context, hasRepliedRef };
}
function expectLastSlackSend(content: string, threadTs?: string) {
expect(sendSlackMessage).toHaveBeenLastCalledWith("channel:C123", content, {
mediaUrl: undefined,
threadTs,
blocks: undefined,
});
}
async function sendSecondMessageAndExpectNoThread(params: {
cfg: OpenClawConfig;
context: ReturnType<typeof createReplyToFirstContext>;
}) {
await handleSlackAction(
{ action: "sendMessage", to: "channel:C123", content: "Second" },
params.cfg,
params.context,
);
expectLastSlackSend("Second");
}
async function resolveReadToken(cfg: OpenClawConfig): Promise<string | undefined> {
readSlackMessages.mockClear();
readSlackMessages.mockResolvedValueOnce({ messages: [], hasMore: false });
@@ -306,10 +334,7 @@ describe("handleSlackAction", () => {
});
it("replyToMode=first threads first message then stops", async () => {
const cfg = { channels: { slack: { botToken: "tok" } } } as OpenClawConfig;
sendSlackMessage.mockClear();
const hasRepliedRef = { value: false };
const context = createReplyToFirstContext(hasRepliedRef);
const { cfg, context, hasRepliedRef } = createReplyToFirstScenario();
// First message should be threaded
await handleSlackAction(
@@ -317,31 +342,14 @@ describe("handleSlackAction", () => {
cfg,
context,
);
expect(sendSlackMessage).toHaveBeenLastCalledWith("channel:C123", "First", {
mediaUrl: undefined,
threadTs: "1111111111.111111",
blocks: undefined,
});
expectLastSlackSend("First", "1111111111.111111");
expect(hasRepliedRef.value).toBe(true);
// Second message should NOT be threaded
await handleSlackAction(
{ action: "sendMessage", to: "channel:C123", content: "Second" },
cfg,
context,
);
expect(sendSlackMessage).toHaveBeenLastCalledWith("channel:C123", "Second", {
mediaUrl: undefined,
threadTs: undefined,
blocks: undefined,
});
await sendSecondMessageAndExpectNoThread({ cfg, context });
});
it("replyToMode=first marks hasRepliedRef even when threadTs is explicit", async () => {
const cfg = { channels: { slack: { botToken: "tok" } } } as OpenClawConfig;
sendSlackMessage.mockClear();
const hasRepliedRef = { value: false };
const context = createReplyToFirstContext(hasRepliedRef);
const { cfg, context, hasRepliedRef } = createReplyToFirstScenario();
await handleSlackAction(
{
@@ -353,23 +361,10 @@ describe("handleSlackAction", () => {
cfg,
context,
);
expect(sendSlackMessage).toHaveBeenLastCalledWith("channel:C123", "Explicit", {
mediaUrl: undefined,
threadTs: "2222222222.222222",
blocks: undefined,
});
expectLastSlackSend("Explicit", "2222222222.222222");
expect(hasRepliedRef.value).toBe(true);
await handleSlackAction(
{ action: "sendMessage", to: "channel:C123", content: "Second" },
cfg,
context,
);
expect(sendSlackMessage).toHaveBeenLastCalledWith("channel:C123", "Second", {
mediaUrl: undefined,
threadTs: undefined,
blocks: undefined,
});
await sendSecondMessageAndExpectNoThread({ cfg, context });
});
it("replyToMode=first without hasRepliedRef does not thread", async () => {

View File

@@ -421,11 +421,7 @@ describe("handleTelegramAction", () => {
});
it("allows inline buttons in DMs with tg: prefixed targets", async () => {
const cfg = {
channels: {
telegram: { botToken: "tok", capabilities: { inlineButtons: "dm" } },
},
} as OpenClawConfig;
const cfg = telegramConfig({ capabilities: { inlineButtons: "dm" } });
await handleTelegramAction(
{
action: "sendMessage",
@@ -439,11 +435,7 @@ describe("handleTelegramAction", () => {
});
it("allows inline buttons in groups with topic targets", async () => {
const cfg = {
channels: {
telegram: { botToken: "tok", capabilities: { inlineButtons: "group" } },
},
} as OpenClawConfig;
const cfg = telegramConfig({ capabilities: { inlineButtons: "group" } });
await handleTelegramAction(
{
action: "sendMessage",
@@ -457,11 +449,7 @@ describe("handleTelegramAction", () => {
});
it("sends messages with inline keyboard buttons when enabled", async () => {
const cfg = {
channels: {
telegram: { botToken: "tok", capabilities: { inlineButtons: "all" } },
},
} as OpenClawConfig;
const cfg = telegramConfig({ capabilities: { inlineButtons: "all" } });
await handleTelegramAction(
{
action: "sendMessage",
@@ -481,11 +469,7 @@ describe("handleTelegramAction", () => {
});
it("forwards optional button style", async () => {
const cfg = {
channels: {
telegram: { botToken: "tok", capabilities: { inlineButtons: "all" } },
},
} as OpenClawConfig;
const cfg = telegramConfig({ capabilities: { inlineButtons: "all" } });
await handleTelegramAction(
{
action: "sendMessage",

View File

@@ -1,10 +1,7 @@
import { describe, expect, it } from "vitest";
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
import { fetchDiscord } from "./api.js";
function jsonResponse(body: unknown, status = 200) {
return new Response(JSON.stringify(body), { status });
}
import { jsonResponse } from "./test-http-helpers.js";
describe("fetchDiscord", () => {
it("formats rate limit payloads without raw JSON", async () => {

View File

@@ -1,17 +1,7 @@
import { describe, expect, it } from "vitest";
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
import { resolveDiscordChannelAllowlist } from "./resolve-channels.js";
function jsonResponse(body: unknown) {
return new Response(JSON.stringify(body), { status: 200 });
}
const urlToString = (url: Request | URL | string): string => {
if (typeof url === "string") {
return url;
}
return "url" in url ? url.url : String(url);
};
import { jsonResponse, urlToString } from "./test-http-helpers.js";
describe("resolveDiscordChannelAllowlist", () => {
it("resolves guild/channel by name", async () => {

View File

@@ -1,17 +1,7 @@
import { describe, expect, it } from "vitest";
import { withFetchPreconnect } from "../test-utils/fetch-mock.js";
import { resolveDiscordUserAllowlist } from "./resolve-users.js";
function jsonResponse(body: unknown) {
return new Response(JSON.stringify(body), { status: 200 });
}
const urlToString = (url: Request | URL | string): string => {
if (typeof url === "string") {
return url;
}
return "url" in url ? url.url : String(url);
};
import { jsonResponse, urlToString } from "./test-http-helpers.js";
function createGuildListProbeFetcher() {
let guildsCalled = false;
@@ -29,6 +19,16 @@ function createGuildListProbeFetcher() {
};
}
function createGuildsForbiddenFetcher() {
return withFetchPreconnect(async (input: RequestInfo | URL) => {
const url = urlToString(input);
if (url.endsWith("/users/@me/guilds")) {
throw new Error("Forbidden: Missing Access");
}
return new Response("not found", { status: 404 });
});
}
describe("resolveDiscordUserAllowlist", () => {
it("resolves plain user ids without calling listGuilds", async () => {
const { fetcher, wasGuildsCalled } = createGuildListProbeFetcher();
@@ -84,13 +84,7 @@ describe("resolveDiscordUserAllowlist", () => {
});
it("resolves user ids even when listGuilds would fail", async () => {
const fetcher = withFetchPreconnect(async (input: RequestInfo | URL) => {
const url = urlToString(input);
if (url.endsWith("/users/@me/guilds")) {
throw new Error("Forbidden: Missing Access");
}
return new Response("not found", { status: 404 });
});
const fetcher = createGuildsForbiddenFetcher();
// Before the fix, this would throw because listGuilds() was called eagerly
const results = await resolveDiscordUserAllowlist({
@@ -177,13 +171,7 @@ describe("resolveDiscordUserAllowlist", () => {
});
it("handles mixed ids and usernames — ids resolve even if guilds fail", async () => {
const fetcher = withFetchPreconnect(async (input: RequestInfo | URL) => {
const url = urlToString(input);
if (url.endsWith("/users/@me/guilds")) {
throw new Error("Forbidden: Missing Access");
}
return new Response("not found", { status: 404 });
});
const fetcher = createGuildsForbiddenFetcher();
// IDs should succeed, username should fail (listGuilds throws)
await expect(

View File

@@ -0,0 +1,10 @@
export function jsonResponse(body: unknown, status = 200): Response {
return new Response(JSON.stringify(body), { status });
}
export function urlToString(url: Request | URL | string): string {
if (typeof url === "string") {
return url;
}
return "url" in url ? url.url : String(url);
}