chore: speed up tests and update opencode models

This commit is contained in:
Peter Steinberger
2026-01-23 11:36:28 +00:00
parent bb9bddebb4
commit 8b7b7e154f
7 changed files with 79 additions and 48 deletions

View File

@@ -68,13 +68,9 @@ export const OPENCODE_ZEN_MODEL_ALIASES: Record<string, string> = {
"gemini-2.5-flash": "gemini-3-flash",
// GLM (free + alpha)
glm: "glm-4.7-free",
"glm-free": "glm-4.7-free",
glm: "glm-4.7",
"glm-free": "glm-4.7",
"alpha-glm": "alpha-glm-4.7",
// MiniMax
minimax: "minimax-m2.1-free",
"minimax-free": "minimax-m2.1-free",
};
/**
@@ -134,7 +130,7 @@ const MODEL_COSTS: Record<
cacheWrite: 0,
},
"gpt-5.1": { input: 1.07, output: 8.5, cacheRead: 0.107, cacheWrite: 0 },
"glm-4.7-free": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
"glm-4.7": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
"gemini-3-flash": { input: 0.5, output: 3, cacheRead: 0.05, cacheWrite: 0 },
"gpt-5.1-codex-max": {
input: 1.25,
@@ -142,7 +138,6 @@ const MODEL_COSTS: Record<
cacheRead: 0.125,
cacheWrite: 0,
},
"minimax-m2.1-free": { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
"gpt-5.2": { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
};
@@ -155,10 +150,9 @@ const MODEL_CONTEXT_WINDOWS: Record<string, number> = {
"alpha-glm-4.7": 204800,
"gpt-5.1-codex-mini": 400000,
"gpt-5.1": 400000,
"glm-4.7-free": 204800,
"glm-4.7": 204800,
"gemini-3-flash": 1048576,
"gpt-5.1-codex-max": 400000,
"minimax-m2.1-free": 204800,
"gpt-5.2": 400000,
};
@@ -173,10 +167,9 @@ const MODEL_MAX_TOKENS: Record<string, number> = {
"alpha-glm-4.7": 131072,
"gpt-5.1-codex-mini": 128000,
"gpt-5.1": 128000,
"glm-4.7-free": 131072,
"glm-4.7": 131072,
"gemini-3-flash": 65536,
"gpt-5.1-codex-max": 128000,
"minimax-m2.1-free": 131072,
"gpt-5.2": 128000,
};
@@ -211,10 +204,9 @@ const MODEL_NAMES: Record<string, string> = {
"alpha-glm-4.7": "Alpha GLM-4.7",
"gpt-5.1-codex-mini": "GPT-5.1 Codex Mini",
"gpt-5.1": "GPT-5.1",
"glm-4.7-free": "GLM-4.7",
"glm-4.7": "GLM-4.7",
"gemini-3-flash": "Gemini 3 Flash",
"gpt-5.1-codex-max": "GPT-5.1 Codex Max",
"minimax-m2.1-free": "MiniMax M2.1",
"gpt-5.2": "GPT-5.2",
};
@@ -240,10 +232,9 @@ export function getOpencodeZenStaticFallbackModels(): ModelDefinitionConfig[] {
"alpha-glm-4.7",
"gpt-5.1-codex-mini",
"gpt-5.1",
"glm-4.7-free",
"glm-4.7",
"gemini-3-flash",
"gpt-5.1-codex-max",
"minimax-m2.1-free",
"gpt-5.2",
];