fix: normalize openai-codex gpt-5.4 transport overrides

This commit is contained in:
0xsline
2026-03-07 17:48:17 +08:00
committed by Peter Steinberger
parent 3da8882a02
commit 024857050a
3 changed files with 157 additions and 38 deletions

View File

@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
- Hooks/session-memory: keep `/new` and `/reset` memory artifacts in the bound agent workspace and align saved reset session keys with that workspace when stale main-agent keys leak into the hook path. (#39875) thanks @rbutera.
- Sessions/model switch: clear stale cached `contextTokens` when a session changes models so status and runtime paths recompute against the active model window. (#38044) thanks @yuweuii.
- ACP/session history: persist transcripts for successful ACP child runs, preserve exact transcript text, record ACP spawned-session lineage, and keep spawn-time transcript-path persistence best-effort so history storage failures do not block execution. (#40137) thanks @mbelinky.
- Agents/openai-codex: normalize `gpt-5.4` fallback transport back to `openai-codex-responses` on `chatgpt.com/backend-api` when config drifts to the generic OpenAI responses endpoint. (#38736) Thanks @0xsline.
- Browser/CDP: normalize loopback direct WebSocket CDP URLs back to HTTP(S) for `/json/*` tab operations so local `ws://` / `wss://` profiles can still list, focus, open, and close tabs after the new direct-WS support lands. (#31085) Thanks @shrey150.
- Browser/CDP: rewrite wildcard `ws://0.0.0.0` and `ws://[::]` debugger URLs from remote `/json/version` responses back to the external CDP host/port, fixing Browserless-style container endpoints. (#17760) Thanks @joeharouni.
- Browser/extension relay: wait briefly for a previously attached Chrome tab to reappear after transient relay drops before failing with `tab not found`, reducing noisy reconnect flakes. (#32461) Thanks @AaronWander.

View File

@@ -664,6 +664,60 @@ describe("resolveModel", () => {
});
});
it("normalizes openai-codex gpt-5.4 overrides away from /v1/responses", () => {
mockOpenAICodexTemplateModel();
const cfg: OpenClawConfig = {
models: {
providers: {
"openai-codex": {
baseUrl: "https://api.openai.com/v1",
api: "openai-responses",
},
},
},
} as unknown as OpenClawConfig;
expectResolvedForwardCompatFallback({
provider: "openai-codex",
id: "gpt-5.4",
cfg,
expectedModel: {
api: "openai-codex-responses",
baseUrl: "https://chatgpt.com/backend-api",
id: "gpt-5.4",
provider: "openai-codex",
},
});
});
it("does not rewrite openai baseUrl when openai-codex api stays non-codex", () => {
mockOpenAICodexTemplateModel();
const cfg: OpenClawConfig = {
models: {
providers: {
"openai-codex": {
baseUrl: "https://api.openai.com/v1",
api: "openai-completions",
},
},
},
} as unknown as OpenClawConfig;
expectResolvedForwardCompatFallback({
provider: "openai-codex",
id: "gpt-5.4",
cfg,
expectedModel: {
api: "openai-completions",
baseUrl: "https://api.openai.com/v1",
id: "gpt-5.4",
provider: "openai-codex",
},
});
});
it("includes auth hint for unknown ollama models (#17328)", () => {
// resetMockDiscoverModels() in beforeEach already sets find → null
const result = resolveModel("ollama", "gemma3:4b", "/tmp/agent");

View File

@@ -23,6 +23,8 @@ type InlineProviderConfig = {
headers?: unknown;
};
const OPENAI_CODEX_BASE_URL = "https://chatgpt.com/backend-api";
function sanitizeModelHeaders(
headers: unknown,
opts?: { stripSecretRefMarkers?: boolean },
@@ -43,6 +45,60 @@ function sanitizeModelHeaders(
return Object.keys(next).length > 0 ? next : undefined;
}
function isOpenAIApiBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return false;
}
return /^https?:\/\/api\.openai\.com(?:\/v1)?\/?$/i.test(trimmed);
}
function isOpenAICodexBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
return false;
}
return /^https?:\/\/chatgpt\.com\/backend-api\/?$/i.test(trimmed);
}
function normalizeOpenAICodexTransport(params: {
provider: string;
model: Model<Api>;
}): Model<Api> {
if (normalizeProviderId(params.provider) !== "openai-codex") {
return params.model;
}
const useCodexTransport =
!params.model.baseUrl ||
isOpenAIApiBaseUrl(params.model.baseUrl) ||
isOpenAICodexBaseUrl(params.model.baseUrl);
const nextApi =
useCodexTransport && params.model.api === "openai-responses"
? ("openai-codex-responses" as const)
: params.model.api;
const nextBaseUrl =
nextApi === "openai-codex-responses" &&
(!params.model.baseUrl || isOpenAIApiBaseUrl(params.model.baseUrl))
? OPENAI_CODEX_BASE_URL
: params.model.baseUrl;
if (nextApi === params.model.api && nextBaseUrl === params.model.baseUrl) {
return params.model;
}
return {
...params.model,
api: nextApi,
baseUrl: nextBaseUrl,
} as Model<Api>;
}
function normalizeResolvedModel(params: { provider: string; model: Model<Api> }): Model<Api> {
return normalizeModelCompat(normalizeOpenAICodexTransport(params));
}
export { buildModelAliasLines };
function resolveConfiguredProviderConfig(
@@ -145,13 +201,14 @@ export function resolveModelWithRegistry(params: {
const model = modelRegistry.find(provider, modelId) as Model<Api> | null;
if (model) {
return normalizeModelCompat(
applyConfiguredProviderOverrides({
return normalizeResolvedModel({
provider,
model: applyConfiguredProviderOverrides({
discoveredModel: model,
providerConfig,
modelId,
}),
);
});
}
const providers = cfg?.models?.providers ?? {};
@@ -161,64 +218,71 @@ export function resolveModelWithRegistry(params: {
(entry) => normalizeProviderId(entry.provider) === normalizedProvider && entry.id === modelId,
);
if (inlineMatch?.api) {
return normalizeModelCompat(inlineMatch as Model<Api>);
return normalizeResolvedModel({ provider, model: inlineMatch as Model<Api> });
}
// Forward-compat fallbacks must be checked BEFORE the generic providerCfg fallback.
// Otherwise, configured providers can default to a generic API and break specific transports.
const forwardCompat = resolveForwardCompatModel(provider, modelId, modelRegistry);
if (forwardCompat) {
return normalizeModelCompat(
applyConfiguredProviderOverrides({
return normalizeResolvedModel({
provider,
model: applyConfiguredProviderOverrides({
discoveredModel: forwardCompat,
providerConfig,
modelId,
}),
);
});
}
// OpenRouter is a pass-through proxy - any model ID available on OpenRouter
// should work without being pre-registered in the local catalog.
if (normalizedProvider === "openrouter") {
return normalizeModelCompat({
id: modelId,
name: modelId,
api: "openai-completions",
return normalizeResolvedModel({
provider,
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
maxTokens: 8192,
} as Model<Api>);
model: {
id: modelId,
name: modelId,
api: "openai-completions",
provider,
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow: DEFAULT_CONTEXT_TOKENS,
// Align with OPENROUTER_DEFAULT_MAX_TOKENS in models-config.providers.ts
maxTokens: 8192,
} as Model<Api>,
});
}
const configuredModel = providerConfig?.models?.find((candidate) => candidate.id === modelId);
const providerHeaders = sanitizeModelHeaders(providerConfig?.headers);
const modelHeaders = sanitizeModelHeaders(configuredModel?.headers);
if (providerConfig || modelId.startsWith("mock-")) {
return normalizeModelCompat({
id: modelId,
name: modelId,
api: providerConfig?.api ?? "openai-responses",
return normalizeResolvedModel({
provider,
baseUrl: providerConfig?.baseUrl,
reasoning: configuredModel?.reasoning ?? false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow:
configuredModel?.contextWindow ??
providerConfig?.models?.[0]?.contextWindow ??
DEFAULT_CONTEXT_TOKENS,
maxTokens:
configuredModel?.maxTokens ??
providerConfig?.models?.[0]?.maxTokens ??
DEFAULT_CONTEXT_TOKENS,
headers:
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
} as Model<Api>);
model: {
id: modelId,
name: modelId,
api: providerConfig?.api ?? "openai-responses",
provider,
baseUrl: providerConfig?.baseUrl,
reasoning: configuredModel?.reasoning ?? false,
input: ["text"],
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
contextWindow:
configuredModel?.contextWindow ??
providerConfig?.models?.[0]?.contextWindow ??
DEFAULT_CONTEXT_TOKENS,
maxTokens:
configuredModel?.maxTokens ??
providerConfig?.models?.[0]?.maxTokens ??
DEFAULT_CONTEXT_TOKENS,
headers:
providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined,
} as Model<Api>,
});
}
return undefined;