refactor(test): share pi embedded model fixtures

This commit is contained in:
Peter Steinberger
2026-02-14 22:06:04 +00:00
parent 5bead2de85
commit e63dcc320b
3 changed files with 68 additions and 67 deletions

View File

@@ -5,23 +5,16 @@ vi.mock("../pi-model-discovery.js", () => ({
discoverModels: vi.fn(() => ({ find: vi.fn(() => null) })),
}));
import { discoverModels } from "../pi-model-discovery.js";
import { buildInlineProviderModels, resolveModel } from "./model.js";
const makeModel = (id: string) => ({
id,
name: id,
reasoning: false,
input: ["text"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1,
maxTokens: 1,
});
import {
makeModel,
mockDiscoveredModel,
OPENAI_CODEX_TEMPLATE_MODEL,
resetMockDiscoverModels,
} from "./model.test-harness.js";
beforeEach(() => {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn(() => null),
} as unknown as ReturnType<typeof discoverModels>);
resetMockDiscoverModels();
});
describe("pi embedded model e2e smoke", () => {
@@ -45,26 +38,11 @@ describe("pi embedded model e2e smoke", () => {
});
it("builds an openai-codex forward-compat fallback for gpt-5.3-codex", () => {
const templateModel = {
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
mockDiscoveredModel({
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
contextWindow: 272000,
maxTokens: 128000,
};
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn((provider: string, modelId: string) => {
if (provider === "openai-codex" && modelId === "gpt-5.2-codex") {
return templateModel;
}
return null;
}),
} as unknown as ReturnType<typeof discoverModels>);
modelId: "gpt-5.2-codex",
templateModel: OPENAI_CODEX_TEMPLATE_MODEL,
});
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");
expect(result.error).toBeUndefined();

View File

@@ -0,0 +1,46 @@
import { vi } from "vitest";
import { discoverModels } from "../pi-model-discovery.js";
export const makeModel = (id: string) => ({
id,
name: id,
reasoning: false,
input: ["text"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1,
maxTokens: 1,
});
export const OPENAI_CODEX_TEMPLATE_MODEL = {
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
contextWindow: 272000,
maxTokens: 128000,
};
export function resetMockDiscoverModels(): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn(() => null),
} as unknown as ReturnType<typeof discoverModels>);
}
export function mockDiscoveredModel(params: {
provider: string;
modelId: string;
templateModel: unknown;
}): void {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn((provider: string, modelId: string) => {
if (provider === params.provider && modelId === params.modelId) {
return params.templateModel;
}
return null;
}),
} as unknown as ReturnType<typeof discoverModels>);
}

View File

@@ -6,23 +6,16 @@ vi.mock("../pi-model-discovery.js", () => ({
}));
import type { OpenClawConfig } from "../../config/config.js";
import { discoverModels } from "../pi-model-discovery.js";
import { buildInlineProviderModels, resolveModel } from "./model.js";
const makeModel = (id: string) => ({
id,
name: id,
reasoning: false,
input: ["text"] as const,
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: 1,
maxTokens: 1,
});
import {
makeModel,
mockDiscoveredModel,
OPENAI_CODEX_TEMPLATE_MODEL,
resetMockDiscoverModels,
} from "./model.test-harness.js";
beforeEach(() => {
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn(() => null),
} as unknown as ReturnType<typeof discoverModels>);
resetMockDiscoverModels();
});
describe("buildInlineProviderModels", () => {
@@ -136,27 +129,11 @@ describe("resolveModel", () => {
});
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
const templateModel = {
id: "gpt-5.2-codex",
name: "GPT-5.2 Codex",
mockDiscoveredModel({
provider: "openai-codex",
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
reasoning: true,
input: ["text", "image"] as const,
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
contextWindow: 272000,
maxTokens: 128000,
};
vi.mocked(discoverModels).mockReturnValue({
find: vi.fn((provider: string, modelId: string) => {
if (provider === "openai-codex" && modelId === "gpt-5.2-codex") {
return templateModel;
}
return null;
}),
} as unknown as ReturnType<typeof discoverModels>);
modelId: "gpt-5.2-codex",
templateModel: OPENAI_CODEX_TEMPLATE_MODEL,
});
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");