refactor(test): share onboarding and model auth test helpers

This commit is contained in:
Peter Steinberger
2026-02-16 16:48:36 +00:00
parent ac5f6e7c9d
commit 2d8edf85ad
10 changed files with 253 additions and 244 deletions

View File

@@ -235,13 +235,10 @@ describe("applyAuthChoice", () => {
}
return "default";
});
const multiselect: WizardPrompter["multiselect"] = vi.fn(async () => []);
const prompter = createPrompter({
const { prompter, runtime } = createApiKeyPromptHarness({
select: select as WizardPrompter["select"],
multiselect,
text,
});
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "zai-api-key",
@@ -265,13 +262,10 @@ describe("applyAuthChoice", () => {
const text = vi.fn().mockResolvedValue("zai-test-key");
const select = vi.fn(async () => "default");
const multiselect: WizardPrompter["multiselect"] = vi.fn(async () => []);
const prompter = createPrompter({
const { prompter, runtime } = createApiKeyPromptHarness({
select: select as WizardPrompter["select"],
multiselect,
text,
});
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "zai-coding-global",

View File

@@ -34,6 +34,23 @@ describe("applyAuthChoice (moonshot)", () => {
}>(requireOpenClawAgentDir());
}
async function runMoonshotCnFlow(params: {
config: Record<string, unknown>;
setDefaultModel: boolean;
}) {
const text = vi.fn().mockResolvedValue("sk-moonshot-cn-test");
const prompter = createPrompter({ text: text as unknown as WizardPrompter["text"] });
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "moonshot-api-key-cn",
config: params.config,
prompter,
runtime,
setDefaultModel: params.setDefaultModel,
});
return { result, text };
}
afterEach(async () => {
await lifecycle.cleanup();
});
@@ -41,12 +58,7 @@ describe("applyAuthChoice (moonshot)", () => {
it("keeps the .cn baseUrl when setDefaultModel is false", async () => {
await setupTempState();
const text = vi.fn().mockResolvedValue("sk-moonshot-cn-test");
const prompter = createPrompter({ text: text as unknown as WizardPrompter["text"] });
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "moonshot-api-key-cn",
const { result, text } = await runMoonshotCnFlow({
config: {
agents: {
defaults: {
@@ -54,8 +66,6 @@ describe("applyAuthChoice (moonshot)", () => {
},
},
},
prompter,
runtime,
setDefaultModel: false,
});
@@ -73,15 +83,8 @@ describe("applyAuthChoice (moonshot)", () => {
it("sets the default model when setDefaultModel is true", async () => {
await setupTempState();
const text = vi.fn().mockResolvedValue("sk-moonshot-cn-test");
const prompter = createPrompter({ text: text as unknown as WizardPrompter["text"] });
const runtime = createExitThrowingRuntime();
const result = await applyAuthChoice({
authChoice: "moonshot-api-key-cn",
const { result } = await runMoonshotCnFlow({
config: {},
prompter,
runtime,
setDefaultModel: true,
});

View File

@@ -56,6 +56,10 @@ function expectRouterModelFiltering(options: Array<{ value: string }>) {
);
}
function createSelectAllMultiselect() {
return vi.fn(async (params) => params.options.map((option: { value: string }) => option.value));
}
describe("promptDefaultModel", () => {
it("filters internal router models from the selection list", async () => {
loadModelCatalog.mockResolvedValue(OPENROUTER_CATALOG);
@@ -132,9 +136,7 @@ describe("promptModelAllowlist", () => {
it("filters internal router models from the selection list", async () => {
loadModelCatalog.mockResolvedValue(OPENROUTER_CATALOG);
const multiselect = vi.fn(async (params) =>
params.options.map((option: { value: string }) => option.value),
);
const multiselect = createSelectAllMultiselect();
const prompter = makePrompter({ multiselect });
const config = { agents: { defaults: {} } } as OpenClawConfig;
@@ -163,9 +165,7 @@ describe("promptModelAllowlist", () => {
},
]);
const multiselect = vi.fn(async (params) =>
params.options.map((option: { value: string }) => option.value),
);
const multiselect = createSelectAllMultiselect();
const prompter = makePrompter({ multiselect });
const config = { agents: { defaults: {} } } as OpenClawConfig;

View File

@@ -32,6 +32,17 @@ function getWrittenConfig() {
return writeConfigFile.mock.calls[0]?.[0] as Record<string, unknown>;
}
function expectWrittenPrimaryModel(model: string) {
expect(writeConfigFile).toHaveBeenCalledTimes(1);
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
model: { primary: model },
models: { [model]: {} },
},
});
}
describe("models set + fallbacks", () => {
beforeEach(() => {
readConfigFileSnapshot.mockReset();
@@ -45,14 +56,7 @@ describe("models set + fallbacks", () => {
await modelsSetCommand("z.ai/glm-4.7", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
model: { primary: "zai/glm-4.7" },
models: { "zai/glm-4.7": {} },
},
});
expectWrittenPrimaryModel("zai/glm-4.7");
});
it("normalizes z-ai provider in models fallbacks add", async () => {
@@ -79,13 +83,6 @@ describe("models set + fallbacks", () => {
await modelsSetCommand("Z.AI/glm-4.7", runtime);
expect(writeConfigFile).toHaveBeenCalledTimes(1);
const written = getWrittenConfig();
expect(written.agents).toEqual({
defaults: {
model: { primary: "zai/glm-4.7" },
models: { "zai/glm-4.7": {} },
},
});
expectWrittenPrimaryModel("zai/glm-4.7");
});
});

View File

@@ -123,6 +123,41 @@ const runtime = {
exit: vi.fn(),
};
function createRuntime() {
return {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
}
async function withAgentScopeOverrides<T>(
overrides: {
primary?: string;
fallbacks?: string[];
agentDir?: string;
},
run: () => Promise<T>,
) {
const originalPrimary = mocks.resolveAgentModelPrimary.getMockImplementation();
const originalFallbacks = mocks.resolveAgentModelFallbacksOverride.getMockImplementation();
const originalAgentDir = mocks.resolveAgentDir.getMockImplementation();
mocks.resolveAgentModelPrimary.mockReturnValue(overrides.primary);
mocks.resolveAgentModelFallbacksOverride.mockReturnValue(overrides.fallbacks);
if (overrides.agentDir) {
mocks.resolveAgentDir.mockReturnValue(overrides.agentDir);
}
try {
return await run();
} finally {
mocks.resolveAgentModelPrimary.mockImplementation(originalPrimary);
mocks.resolveAgentModelFallbacksOverride.mockImplementation(originalFallbacks);
mocks.resolveAgentDir.mockImplementation(originalAgentDir);
}
}
describe("modelsStatusCommand auth overview", () => {
it("includes masked auth sources in JSON output", async () => {
await modelsStatusCommand({ json: true }, runtime as never);
@@ -160,69 +195,49 @@ describe("modelsStatusCommand auth overview", () => {
});
it("uses agent overrides and reports sources", async () => {
const localRuntime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const originalPrimary = mocks.resolveAgentModelPrimary.getMockImplementation();
const originalFallbacks = mocks.resolveAgentModelFallbacksOverride.getMockImplementation();
const originalAgentDir = mocks.resolveAgentDir.getMockImplementation();
mocks.resolveAgentModelPrimary.mockReturnValue("openai/gpt-4");
mocks.resolveAgentModelFallbacksOverride.mockReturnValue(["openai/gpt-3.5"]);
mocks.resolveAgentDir.mockReturnValue("/tmp/openclaw-agent-custom");
try {
await modelsStatusCommand({ json: true, agent: "Jeremiah" }, localRuntime as never);
expect(mocks.resolveAgentDir).toHaveBeenCalledWith(expect.anything(), "jeremiah");
const payload = JSON.parse(String((localRuntime.log as vi.Mock).mock.calls[0][0]));
expect(payload.agentId).toBe("jeremiah");
expect(payload.agentDir).toBe("/tmp/openclaw-agent-custom");
expect(payload.defaultModel).toBe("openai/gpt-4");
expect(payload.fallbacks).toEqual(["openai/gpt-3.5"]);
expect(payload.modelConfig).toEqual({
defaultSource: "agent",
fallbacksSource: "agent",
});
} finally {
mocks.resolveAgentModelPrimary.mockImplementation(originalPrimary);
mocks.resolveAgentModelFallbacksOverride.mockImplementation(originalFallbacks);
mocks.resolveAgentDir.mockImplementation(originalAgentDir);
}
const localRuntime = createRuntime();
await withAgentScopeOverrides(
{
primary: "openai/gpt-4",
fallbacks: ["openai/gpt-3.5"],
agentDir: "/tmp/openclaw-agent-custom",
},
async () => {
await modelsStatusCommand({ json: true, agent: "Jeremiah" }, localRuntime as never);
expect(mocks.resolveAgentDir).toHaveBeenCalledWith(expect.anything(), "jeremiah");
const payload = JSON.parse(String((localRuntime.log as vi.Mock).mock.calls[0][0]));
expect(payload.agentId).toBe("jeremiah");
expect(payload.agentDir).toBe("/tmp/openclaw-agent-custom");
expect(payload.defaultModel).toBe("openai/gpt-4");
expect(payload.fallbacks).toEqual(["openai/gpt-3.5"]);
expect(payload.modelConfig).toEqual({
defaultSource: "agent",
fallbacksSource: "agent",
});
},
);
});
it("labels defaults when --agent has no overrides", async () => {
const localRuntime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const originalPrimary = mocks.resolveAgentModelPrimary.getMockImplementation();
const originalFallbacks = mocks.resolveAgentModelFallbacksOverride.getMockImplementation();
mocks.resolveAgentModelPrimary.mockReturnValue(undefined);
mocks.resolveAgentModelFallbacksOverride.mockReturnValue(undefined);
try {
await modelsStatusCommand({ agent: "main" }, localRuntime as never);
const output = (localRuntime.log as vi.Mock).mock.calls
.map((call) => String(call[0]))
.join("\n");
expect(output).toContain("Default (defaults)");
expect(output).toContain("Fallbacks (0) (defaults)");
} finally {
mocks.resolveAgentModelPrimary.mockImplementation(originalPrimary);
mocks.resolveAgentModelFallbacksOverride.mockImplementation(originalFallbacks);
}
const localRuntime = createRuntime();
await withAgentScopeOverrides(
{
primary: undefined,
fallbacks: undefined,
},
async () => {
await modelsStatusCommand({ agent: "main" }, localRuntime as never);
const output = (localRuntime.log as vi.Mock).mock.calls
.map((call) => String(call[0]))
.join("\n");
expect(output).toContain("Default (defaults)");
expect(output).toContain("Fallbacks (0) (defaults)");
},
);
});
it("throws when agent id is unknown", async () => {
const localRuntime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const localRuntime = createRuntime();
await expect(modelsStatusCommand({ agent: "unknown" }, localRuntime as never)).rejects.toThrow(
'Unknown agent id "unknown".',
);
@@ -230,11 +245,7 @@ describe("modelsStatusCommand auth overview", () => {
it("exits non-zero when auth is missing", async () => {
const originalProfiles = { ...mocks.store.profiles };
mocks.store.profiles = {};
const localRuntime = {
log: vi.fn(),
error: vi.fn(),
exit: vi.fn(),
};
const localRuntime = createRuntime();
const originalEnvImpl = mocks.resolveEnvApiKey.getMockImplementation();
mocks.resolveEnvApiKey.mockImplementation(() => null);

View File

@@ -39,13 +39,15 @@ function createLegacyProviderConfig(params: {
api: string;
modelId?: string;
modelName?: string;
baseUrl?: string;
apiKey?: string;
}) {
return {
models: {
providers: {
[params.providerId]: {
baseUrl: "https://old.example.com",
apiKey: "old-key",
baseUrl: params.baseUrl ?? "https://old.example.com",
apiKey: params.apiKey ?? "old-key",
api: params.api,
models: [
{
@@ -64,6 +66,42 @@ function createLegacyProviderConfig(params: {
};
}
const EXPECTED_FALLBACKS = ["anthropic/claude-opus-4-5"] as const;
function createConfigWithFallbacks() {
return {
agents: {
defaults: {
model: { fallbacks: [...EXPECTED_FALLBACKS] },
},
},
};
}
function expectFallbacksPreserved(cfg: ReturnType<typeof applyMinimaxApiConfig>) {
expect(cfg.agents?.defaults?.model?.fallbacks).toEqual([...EXPECTED_FALLBACKS]);
}
function expectPrimaryModelPreserved(cfg: ReturnType<typeof applyMinimaxApiProviderConfig>) {
expect(cfg.agents?.defaults?.model?.primary).toBe("anthropic/claude-opus-4-5");
}
function expectAllowlistContains(
cfg: ReturnType<typeof applyOpenrouterProviderConfig>,
key: string,
) {
const models = cfg.agents?.defaults?.models ?? {};
expect(Object.keys(models)).toContain(key);
}
function expectAliasPreserved(
cfg: ReturnType<typeof applyOpenrouterProviderConfig>,
key: string,
alias: string,
) {
expect(cfg.agents?.defaults?.models?.[key]?.alias).toBe(alias);
}
describe("writeOAuthCredentials", () => {
const lifecycle = createAuthTestLifecycle([
"OPENCLAW_STATE_DIR",
@@ -177,14 +215,8 @@ describe("applyMinimaxApiConfig", () => {
});
it("preserves existing model fallbacks", () => {
const cfg = applyMinimaxApiConfig({
agents: {
defaults: {
model: { fallbacks: ["anthropic/claude-opus-4-5"] },
},
},
});
expect(cfg.agents?.defaults?.model?.fallbacks).toEqual(["anthropic/claude-opus-4-5"]);
const cfg = applyMinimaxApiConfig(createConfigWithFallbacks());
expectFallbacksPreserved(cfg);
});
it("adds model alias", () => {
@@ -270,7 +302,7 @@ describe("applyMinimaxApiProviderConfig", () => {
const cfg = applyMinimaxApiProviderConfig({
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
});
expect(cfg.agents?.defaults?.model?.primary).toBe("anthropic/claude-opus-4-5");
expectPrimaryModelPreserved(cfg);
});
});
@@ -312,7 +344,7 @@ describe("applyZaiProviderConfig", () => {
const cfg = applyZaiProviderConfig({
agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } },
});
expect(cfg.agents?.defaults?.model?.primary).toBe("anthropic/claude-opus-4-5");
expectPrimaryModelPreserved(cfg);
});
});
@@ -387,14 +419,8 @@ describe("applyXaiConfig", () => {
});
it("preserves existing model fallbacks", () => {
const cfg = applyXaiConfig({
agents: {
defaults: {
model: { fallbacks: ["anthropic/claude-opus-4-5"] },
},
},
});
expect(cfg.agents?.defaults?.model?.fallbacks).toEqual(["anthropic/claude-opus-4-5"]);
const cfg = applyXaiConfig(createConfigWithFallbacks());
expectFallbacksPreserved(cfg);
});
});
@@ -424,8 +450,7 @@ describe("applyXaiProviderConfig", () => {
describe("applyOpencodeZenProviderConfig", () => {
it("adds allowlist entry for the default model", () => {
const cfg = applyOpencodeZenProviderConfig({});
const models = cfg.agents?.defaults?.models ?? {};
expect(Object.keys(models)).toContain("opencode/claude-opus-4-6");
expectAllowlistContains(cfg, "opencode/claude-opus-4-6");
});
it("preserves existing alias for the default model", () => {
@@ -438,7 +463,7 @@ describe("applyOpencodeZenProviderConfig", () => {
},
},
});
expect(cfg.agents?.defaults?.models?.["opencode/claude-opus-4-6"]?.alias).toBe("My Opus");
expectAliasPreserved(cfg, "opencode/claude-opus-4-6", "My Opus");
});
});
@@ -449,22 +474,15 @@ describe("applyOpencodeZenConfig", () => {
});
it("preserves existing model fallbacks", () => {
const cfg = applyOpencodeZenConfig({
agents: {
defaults: {
model: { fallbacks: ["anthropic/claude-opus-4-5"] },
},
},
});
expect(cfg.agents?.defaults?.model?.fallbacks).toEqual(["anthropic/claude-opus-4-5"]);
const cfg = applyOpencodeZenConfig(createConfigWithFallbacks());
expectFallbacksPreserved(cfg);
});
});
describe("applyOpenrouterProviderConfig", () => {
it("adds allowlist entry for the default model", () => {
const cfg = applyOpenrouterProviderConfig({});
const models = cfg.agents?.defaults?.models ?? {};
expect(Object.keys(models)).toContain(OPENROUTER_DEFAULT_MODEL_REF);
expectAllowlistContains(cfg, OPENROUTER_DEFAULT_MODEL_REF);
});
it("preserves existing alias for the default model", () => {
@@ -477,34 +495,22 @@ describe("applyOpenrouterProviderConfig", () => {
},
},
});
expect(cfg.agents?.defaults?.models?.[OPENROUTER_DEFAULT_MODEL_REF]?.alias).toBe("Router");
expectAliasPreserved(cfg, OPENROUTER_DEFAULT_MODEL_REF, "Router");
});
});
describe("applyLitellmProviderConfig", () => {
it("preserves existing baseUrl and api key while adding the default model", () => {
const cfg = applyLitellmProviderConfig({
models: {
providers: {
litellm: {
baseUrl: "https://litellm.example/v1",
apiKey: " old-key ",
api: "anthropic-messages",
models: [
{
id: "custom-model",
name: "Custom",
reasoning: false,
input: ["text"],
cost: { input: 1, output: 2, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1000,
maxTokens: 100,
},
],
},
},
},
});
const cfg = applyLitellmProviderConfig(
createLegacyProviderConfig({
providerId: "litellm",
api: "anthropic-messages",
modelId: "custom-model",
modelName: "Custom",
baseUrl: "https://litellm.example/v1",
apiKey: " old-key ",
}),
);
expect(cfg.models?.providers?.litellm?.baseUrl).toBe("https://litellm.example/v1");
expect(cfg.models?.providers?.litellm?.api).toBe("openai-completions");
@@ -523,13 +529,7 @@ describe("applyOpenrouterConfig", () => {
});
it("preserves existing model fallbacks", () => {
const cfg = applyOpenrouterConfig({
agents: {
defaults: {
model: { fallbacks: ["anthropic/claude-opus-4-5"] },
},
},
});
expect(cfg.agents?.defaults?.model?.fallbacks).toEqual(["anthropic/claude-opus-4-5"]);
const cfg = applyOpenrouterConfig(createConfigWithFallbacks());
expectFallbacksPreserved(cfg);
});
});

View File

@@ -15,6 +15,17 @@ function createPrompter(overrides: Partial<WizardPrompter>): WizardPrompter {
);
}
function createUnexpectedPromptGuards() {
return {
multiselect: vi.fn(async () => {
throw new Error("unexpected multiselect");
}),
text: vi.fn(async ({ message }: { message: string }) => {
throw new Error(`unexpected text prompt: ${message}`);
}) as unknown as WizardPrompter["text"],
};
}
vi.mock("node:fs/promises", () => ({
default: {
access: vi.fn(async () => {
@@ -73,18 +84,13 @@ describe("setupChannels", () => {
it("shows explicit dmScope config command in channel primer", async () => {
const note = vi.fn(async () => {});
const select = vi.fn(async () => "__done__");
const multiselect = vi.fn(async () => {
throw new Error("unexpected multiselect");
});
const text = vi.fn(async ({ message }: { message: string }) => {
throw new Error(`unexpected text prompt: ${message}`);
});
const { multiselect, text } = createUnexpectedPromptGuards();
const prompter = createPrompter({
note,
select,
multiselect,
text: text as unknown as WizardPrompter["text"],
text,
});
const runtime = createExitThrowingRuntime();
@@ -112,17 +118,12 @@ describe("setupChannels", () => {
}
throw new Error(`unexpected select prompt: ${message}`);
});
const multiselect = vi.fn(async () => {
throw new Error("unexpected multiselect");
});
const text = vi.fn(async ({ message }: { message: string }) => {
throw new Error(`unexpected text prompt: ${message}`);
});
const { multiselect, text } = createUnexpectedPromptGuards();
const prompter = createPrompter({
select,
multiselect,
text: text as unknown as WizardPrompter["text"],
text,
});
const runtime = createExitThrowingRuntime();

View File

@@ -64,6 +64,17 @@ async function runPromptCustomApi(
});
}
function expectOpenAiCompatResult(params: {
prompter: ReturnType<typeof createTestPrompter>;
textCalls: number;
selectCalls: number;
result: Awaited<ReturnType<typeof runPromptCustomApi>>;
}) {
expect(params.prompter.text).toHaveBeenCalledTimes(params.textCalls);
expect(params.prompter.select).toHaveBeenCalledTimes(params.selectCalls);
expect(params.result.config.models?.providers?.custom?.api).toBe("openai-completions");
}
describe("promptCustomApiConfig", () => {
afterEach(() => {
vi.unstubAllGlobals();
@@ -78,9 +89,7 @@ describe("promptCustomApiConfig", () => {
stubFetchSequence([{ ok: true }]);
const result = await runPromptCustomApi(prompter);
expect(prompter.text).toHaveBeenCalledTimes(5);
expect(prompter.select).toHaveBeenCalledTimes(1);
expect(result.config.models?.providers?.custom?.api).toBe("openai-completions");
expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result });
expect(result.config.agents?.defaults?.models?.["custom/llama3"]?.alias).toBe("local");
});
@@ -104,9 +113,7 @@ describe("promptCustomApiConfig", () => {
stubFetchSequence([{ ok: true }]);
const result = await runPromptCustomApi(prompter);
expect(prompter.text).toHaveBeenCalledTimes(5);
expect(prompter.select).toHaveBeenCalledTimes(1);
expect(result.config.models?.providers?.custom?.api).toBe("openai-completions");
expectOpenAiCompatResult({ prompter, textCalls: 5, selectCalls: 1, result });
});
it("re-prompts base url when unknown detection fails", async () => {

View File

@@ -112,16 +112,28 @@ describe("onboard-hooks", () => {
],
});
async function runSetupInternalHooks(params: {
selected: string[];
cfg?: OpenClawConfig;
eligible?: boolean;
}) {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(
createMockHookReport(params.eligible ?? true),
);
const cfg = params.cfg ?? {};
const prompter = createMockPrompter(params.selected);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
return { result, cfg, prompter };
}
describe("setupInternalHooks", () => {
it("should enable hooks when user selects them", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport());
const cfg: OpenClawConfig = {};
const prompter = createMockPrompter(["session-memory"]);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
const { result, prompter } = await runSetupInternalHooks({
selected: ["session-memory"],
});
expect(result.hooks?.internal?.enabled).toBe(true);
expect(result.hooks?.internal?.entries).toEqual({
@@ -147,28 +159,19 @@ describe("onboard-hooks", () => {
});
it("should not enable hooks when user skips", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport());
const cfg: OpenClawConfig = {};
const prompter = createMockPrompter(["__skip__"]);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
const { result, prompter } = await runSetupInternalHooks({
selected: ["__skip__"],
});
expect(result.hooks?.internal).toBeUndefined();
expect(prompter.note).toHaveBeenCalledTimes(1);
});
it("should handle no eligible hooks", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport(false));
const cfg: OpenClawConfig = {};
const prompter = createMockPrompter([]);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
const { result, cfg, prompter } = await runSetupInternalHooks({
selected: [],
eligible: false,
});
expect(result).toEqual(cfg);
expect(prompter.multiselect).not.toHaveBeenCalled();
@@ -179,9 +182,6 @@ describe("onboard-hooks", () => {
});
it("should preserve existing hooks config when enabled", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport());
const cfg: OpenClawConfig = {
hooks: {
enabled: true,
@@ -189,10 +189,10 @@ describe("onboard-hooks", () => {
token: "existing-token",
},
};
const prompter = createMockPrompter(["session-memory"]);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
const { result } = await runSetupInternalHooks({
selected: ["session-memory"],
cfg,
});
expect(result.hooks?.enabled).toBe(true);
expect(result.hooks?.path).toBe("/webhook");
@@ -204,30 +204,22 @@ describe("onboard-hooks", () => {
});
it("should preserve existing config when user skips", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport());
const cfg: OpenClawConfig = {
agents: { defaults: { workspace: "/workspace" } },
};
const prompter = createMockPrompter(["__skip__"]);
const runtime = createMockRuntime();
const result = await setupInternalHooks(cfg, runtime, prompter);
const { result } = await runSetupInternalHooks({
selected: ["__skip__"],
cfg,
});
expect(result).toEqual(cfg);
expect(result.agents?.defaults?.workspace).toBe("/workspace");
});
it("should show informative notes to user", async () => {
const { buildWorkspaceHookStatus } = await import("../hooks/hooks-status.js");
vi.mocked(buildWorkspaceHookStatus).mockReturnValue(createMockHookReport());
const cfg: OpenClawConfig = {};
const prompter = createMockPrompter(["session-memory"]);
const runtime = createMockRuntime();
await setupInternalHooks(cfg, runtime, prompter);
const { prompter } = await runSetupInternalHooks({
selected: ["session-memory"],
});
const noteCalls = (prompter.note as ReturnType<typeof vi.fn>).mock.calls;
expect(noteCalls).toHaveLength(2);

View File

@@ -67,6 +67,14 @@ async function runInitialValueForChannel(channel: "dev" | "beta") {
return select.mock.calls[0]?.[0]?.initialValue;
}
function expectPluginLoadedFromLocalPath(
result: Awaited<ReturnType<typeof ensureOnboardingPluginInstalled>>,
) {
const expectedPath = path.resolve(process.cwd(), "extensions/zalo");
expect(result.installed).toBe(true);
expect(result.cfg.plugins?.load?.paths).toContain(expectedPath);
}
describe("ensureOnboardingPluginInstalled", () => {
it("installs from npm and enables the plugin", async () => {
const runtime = makeRuntime();
@@ -115,9 +123,7 @@ describe("ensureOnboardingPluginInstalled", () => {
runtime,
});
const expectedPath = path.resolve(process.cwd(), "extensions/zalo");
expect(result.installed).toBe(true);
expect(result.cfg.plugins?.load?.paths).toContain(expectedPath);
expectPluginLoadedFromLocalPath(result);
expect(result.cfg.plugins?.entries?.zalo?.enabled).toBe(true);
});
@@ -152,9 +158,7 @@ describe("ensureOnboardingPluginInstalled", () => {
runtime,
});
const expectedPath = path.resolve(process.cwd(), "extensions/zalo");
expect(result.installed).toBe(true);
expect(result.cfg.plugins?.load?.paths).toContain(expectedPath);
expectPluginLoadedFromLocalPath(result);
expect(note).toHaveBeenCalled();
expect(runtime.error).not.toHaveBeenCalled();
});