diff --git a/.markdownlint-cli2.jsonc b/.markdownlint-cli2.jsonc new file mode 100644 index 000000000..0b6b8f0fb --- /dev/null +++ b/.markdownlint-cli2.jsonc @@ -0,0 +1,52 @@ +{ + "globs": ["docs/**/*.md", "docs/**/*.mdx", "README.md"], + "ignores": ["docs/zh-CN/**", "docs/.i18n/**", "docs/reference/templates/**"], + "config": { + "default": true, + + "MD013": false, + "MD025": false, + "MD029": false, + + "MD033": { + "allowed_elements": [ + "Note", + "Info", + "Tip", + "Warning", + "Card", + "CardGroup", + "Columns", + "Steps", + "Step", + "Tabs", + "Tab", + "Accordion", + "AccordionGroup", + "CodeGroup", + "Frame", + "Callout", + "ParamField", + "ResponseField", + "RequestExample", + "ResponseExample", + "img", + "a", + "br", + "details", + "summary", + "p", + "strong", + "picture", + "source", + "Tooltip", + "Check", + ], + }, + + "MD036": false, + "MD040": false, + "MD041": false, + "MD046": false, + }, +} diff --git a/CHANGELOG.md b/CHANGELOG.md index e83528b4e..a23b3d1a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,29 +2,52 @@ Docs: https://docs.openclaw.ai -## 2026.2.4 +## 2026.2.6 + +### Changes + +- Cron: default `wakeMode` is now `"now"` for new jobs (was `"next-heartbeat"`). (#10776) Thanks @tyler6204. +- Cron: `cron run` defaults to force execution; use `--due` to restrict to due-only. (#10776) Thanks @tyler6204. +- Models: support Anthropic Opus 4.6 and OpenAI Codex gpt-5.3-codex (forward-compat fallbacks). (#9853, #10720, #9995) Thanks @TinyTb, @calvin-hpnet, @tyler6204. +- Providers: add xAI (Grok) support. (#9885) Thanks @grp06. +- Web UI: add token usage dashboard. (#10072) Thanks @Takhoffman. +- Memory: native Voyage AI support. (#7078) Thanks @mcinteerj. +- Sessions: cap sessions_history payloads to reduce context overflow. (#10000) Thanks @gut-puncture. +- CLI: sort commands alphabetically in help output. (#8068) Thanks @deepsoumya617. +- Agents: bump pi-mono to 0.52.7; add embedded forward-compat fallback for Opus 4.6 model ids. + +### Added + +- Cron: run history deep-links to session chat from the dashboard. (#10776) Thanks @tyler6204. +- Cron: per-run session keys in run log entries and default labels for cron sessions. (#10776) Thanks @tyler6204. +- Cron: legacy payload field compatibility (`deliver`, `channel`, `to`, `bestEffortDeliver`) in schema. (#10776) Thanks @tyler6204. + +### Fixes + +- Cron: scheduler reliability (timer drift, restart catch-up, lock contention, stale running markers). (#10776) Thanks @tyler6204. +- Cron: store migration hardening (legacy field migration, parse error handling, explicit delivery mode persistence). (#10776) Thanks @tyler6204. +- Memory: set Voyage embeddings `input_type` for improved retrieval. (#10818) Thanks @mcinteerj. +- Telegram: auto-inject DM topic threadId in message tool + subagent announce. (#7235) Thanks @Lukavyi. +- Security: require auth for Gateway canvas host and A2UI assets. (#9518) Thanks @coygeek. +- Cron: fix scheduling and reminder delivery regressions; harden next-run recompute + timer re-arming + legacy schedule fields. (#9733, #9823, #9948, #9932) Thanks @tyler6204, @pycckuu, @j2h4u, @fujiwara-tofu-shop. +- Update: harden Control UI asset handling in update flow. (#10146) Thanks @gumadeiras. +- Security: add skill/plugin code safety scanner; redact credentials from config.get gateway responses. (#9806, #9858) Thanks @abdelsfane. +- Exec approvals: coerce bare string allowlist entries to objects. (#9903) Thanks @mcaxtr. +- Slack: add mention stripPatterns for /new and /reset. (#9971) Thanks @ironbyte-rgb. +- Chrome extension: fix bundled path resolution. (#8914) Thanks @kelvinCB. +- Compaction/errors: allow multiple compaction retries on context overflow; show clear billing errors. (#8928, #8391) Thanks @Glucksberg. + +## 2026.2.3 ### Changes -- Agents: bump pi-mono packages to 0.52.5. (#9949) Thanks @gumadeiras. -- Models: default Anthropic model to `anthropic/claude-opus-4-6`. (#9853) Thanks @TinyTb. -- Models/Onboarding: refresh provider defaults, update OpenAI/OpenAI Codex wizard defaults, and harden model allowlist initialization for first-time configs with matching docs/tests. (#9911) Thanks @gumadeiras. -- Telegram: auto-inject forum topic `threadId` in message tool and subagent announce so media, buttons, and subagent results land in the correct topic instead of General. (#7235) Thanks @Lukavyi. -- Security: add skill/plugin code safety scanner that detects dangerous patterns (command injection, eval, data exfiltration, obfuscated code, crypto mining, env harvesting) in installed extensions. Integrated into `openclaw security audit --deep` and plugin install flow; scan failures surface as warnings. (#9806) Thanks @abdelsfane. -- CLI: sort `openclaw --help` commands (and options) alphabetically. (#8068) Thanks @deepsoumya617. - Telegram: remove last `@ts-nocheck` from `bot-handlers.ts`, use Grammy types directly, deduplicate `StickerMetadata`. Zero `@ts-nocheck` remaining in `src/telegram/`. (#9206) - Telegram: remove `@ts-nocheck` from `bot-message.ts`, type deps via `Omit`, widen `allMedia` to `TelegramMediaRef[]`. (#9180) - Telegram: remove `@ts-nocheck` from `bot.ts`, fix duplicate `bot.catch` error handler (Grammy overrides), remove dead reaction `message_thread_id` routing, harden sticker cache guard. (#9077) -- Telegram: allow per-group and per-topic `groupPolicy` overrides under `channels.telegram.groups`. (#9775) Thanks @nicolasstanley. -- Feishu: expand channel handling (posts with images, doc links, routing, reactions/typing, replies, native commands). (#8975) Thanks @jiulingyun. - Onboarding: add Cloudflare AI Gateway provider setup and docs. (#7914) Thanks @roerohan. - Onboarding: add Moonshot (.cn) auth choice and keep the China base URL when preserving defaults. (#7180) Thanks @waynelwz. -- Onboarding: add xAI (Grok) auth choice and provider defaults. (#9885) Thanks @grp06. - Docs: clarify tmux send-keys for TUI by splitting text and Enter. (#7737) Thanks @Wangnov. -- Web UI: add Token Usage dashboard with session analytics. (#8462) Thanks @mcinteerj. - Docs: mirror the landing page revamp for zh-CN (features, quickstart, docs directory, network model, credits). (#8994) Thanks @joshp123. -- Docs: strengthen secure DM mode guidance for multi-user inboxes with an explicit warning and example. (#9377) Thanks @Shrinija17. -- Docs: document `activeHours` heartbeat field with timezone resolution chain and example. (#9366) Thanks @unisone. - Messages: add per-channel and per-account responsePrefix overrides across channels. (#9001) Thanks @mudrii. - Cron: add announce delivery mode for isolated jobs (CLI + Control UI) and delivery mode config. - Cron: default isolated jobs to announce delivery; accept ISO 8601 `schedule.at` in tool inputs. @@ -35,33 +58,15 @@ Docs: https://docs.openclaw.ai ### Fixes -- Control UI: add hardened fallback for asset resolution in global npm installs. (#4855) Thanks @anapivirtua. -- Update: remove dead restore control-ui step that failed on gitignored dist/ output. -- Update: avoid wiping prebuilt Control UI assets during dev auto-builds (`tsdown --no-clean`), run update doctor via `openclaw.mjs`, and auto-restore missing UI assets after doctor. (#10146) Thanks @gumadeiras. -- Models: add forward-compat fallback for `openai-codex/gpt-5.3-codex` when model registry hasn't discovered it yet. (#9989) Thanks @w1kke. -- Auto-reply/Docs: normalize `extra-high` (and spaced variants) to `xhigh` for Codex thinking levels, and align Codex 5.3 FAQ examples. (#9976) Thanks @slonce70. -- Compaction: remove orphaned `tool_result` messages during history pruning to prevent session corruption from aborted tool calls. (#9868, fixes #9769, #9724, #9672) -- Telegram: pass `parentPeer` for forum topic binding inheritance so group-level bindings apply to all topics within the group. (#9789, fixes #9545, #9351) -- CLI: pass `--disable-warning=ExperimentalWarning` as a Node CLI option when respawning (avoid disallowed `NODE_OPTIONS` usage; fixes npm pack). (#9691) Thanks @18-RAJAT. -- CLI: resolve bundled Chrome extension assets by walking up to the nearest assets directory; add resolver and clipboard tests. (#8914) Thanks @kelvinCB. -- Tests: stabilize Windows ACL coverage with deterministic os.userInfo mocking. (#9335) Thanks @M00N7682. -- Exec approvals: coerce bare string allowlist entries to objects to prevent allowlist corruption. (#9903, fixes #9790) Thanks @mcaxtr. - Heartbeat: allow explicit accountId routing for multi-account channels. (#8702) Thanks @lsh411. - TUI/Gateway: handle non-streaming finals, refresh history for non-local chat runs, and avoid event gap warnings for targeted tool streams. (#8432) Thanks @gumadeiras. -- Security: stop exposing Gateway auth tokens via URL query parameters in Control UI entrypoints, and reject hook tokens in query parameters. (#9436) Thanks @coygeek. - Shell completion: auto-detect and migrate slow dynamic patterns to cached files for faster terminal startup; add completion health checks to doctor/update/onboard. - Telegram: honor session model overrides in inline model selection. (#8193) Thanks @gildo. - Web UI: fix agent model selection saves for default/non-default agents and wrap long workspace paths. Thanks @Takhoffman. - Web UI: resolve header logo path when `gateway.controlUi.basePath` is set. (#7178) Thanks @Yeom-JinHo. - Web UI: apply button styling to the new-messages indicator. - Onboarding: infer auth choice from non-interactive API key flags. (#8484) Thanks @f-trycua. -- Usage: include estimated cost when breakdown is missing and keep `usage.cost` days support. (#8462) Thanks @mcinteerj. - Security: keep untrusted channel metadata out of system prompts (Slack/Discord). Thanks @KonstantinMirin. -- Security: redact channel credentials (tokens, passwords, API keys, secrets) from gateway config APIs and preserve secrets during Control UI round-trips. (#9858) Thanks @abdelsfane. -- Discord: treat allowlisted senders as owner for system-prompt identity hints while keeping channel topics untrusted. -- Slack: strip `<@...>` mention tokens before command matching so `/new` and `/reset` work when prefixed with a mention. (#9971) Thanks @ironbyte-rgb. -- Agents: cap `sessions_history` tool output and strip oversized fields to prevent context overflow. (#10000) Thanks @gut-puncture. -- Security: normalize code safety finding paths in `openclaw security audit --deep` output for cross-platform consistency. (#10000) Thanks @gut-puncture. - Security: enforce sandboxed media paths for message tool attachments. (#9182) Thanks @victormier. - Security: require explicit credentials for gateway URL overrides to prevent credential leakage. (#8113) Thanks @victormier. - Security: gate `whatsapp_login` tool to owner senders and default-deny non-owner contexts. (#8768) Thanks @victormier. @@ -69,13 +74,9 @@ Docs: https://docs.openclaw.ai - Voice call: add regression coverage for anonymous inbound caller IDs with allowlist policy. (#8104) Thanks @victormier. - Cron: accept epoch timestamps and 0ms durations in CLI `--at` parsing. - Cron: reload store data when the store file is recreated or mtime changes. -- Cron: prevent `recomputeNextRuns` from skipping due jobs when timer fires late by reordering `onTimer` flow. (#9823, fixes #9788) Thanks @pycckuu. - Cron: deliver announce runs directly, honor delivery mode, and respect wakeMode for summaries. (#8540) Thanks @tyler6204. -- Cron: correct announce delivery inference for thread session keys and null delivery inputs. (#9733) Thanks @tyler6204. - Telegram: include forward_from_chat metadata in forwarded messages and harden cron delivery target checks. (#8392) Thanks @Glucksberg. -- Telegram: preserve DM topic threadId in deliveryContext. (#9039) Thanks @lailoo. - macOS: fix cron payload summary rendering and ISO 8601 formatter concurrency safety. -- Security: require gateway auth for Canvas host and A2UI assets. (#9518) Thanks @coygeek. ## 2026.2.2-3 diff --git a/apps/android/app/build.gradle.kts b/apps/android/app/build.gradle.kts index f2670ba01..47056143f 100644 --- a/apps/android/app/build.gradle.kts +++ b/apps/android/app/build.gradle.kts @@ -22,7 +22,7 @@ android { minSdk = 31 targetSdk = 36 versionCode = 202602030 - versionName = "2026.2.4" + versionName = "2026.2.6" } buildTypes { diff --git a/apps/ios/Sources/Info.plist b/apps/ios/Sources/Info.plist index 5d2b8b26a..66c06c0dc 100644 --- a/apps/ios/Sources/Info.plist +++ b/apps/ios/Sources/Info.plist @@ -19,7 +19,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.2.4 + 2026.2.6 CFBundleVersion 20260202 NSAppTransportSecurity diff --git a/apps/ios/Tests/Info.plist b/apps/ios/Tests/Info.plist index 3f858bf93..3d4031816 100644 --- a/apps/ios/Tests/Info.plist +++ b/apps/ios/Tests/Info.plist @@ -17,7 +17,7 @@ CFBundlePackageType BNDL CFBundleShortVersionString - 2026.2.4 + 2026.2.6 CFBundleVersion 20260202 diff --git a/apps/ios/project.yml b/apps/ios/project.yml index 82b0df676..6189ca639 100644 --- a/apps/ios/project.yml +++ b/apps/ios/project.yml @@ -81,7 +81,7 @@ targets: properties: CFBundleDisplayName: OpenClaw CFBundleIconName: AppIcon - CFBundleShortVersionString: "2026.2.4" + CFBundleShortVersionString: "2026.2.6" CFBundleVersion: "20260202" UILaunchScreen: {} UIApplicationSceneManifest: @@ -130,5 +130,5 @@ targets: path: Tests/Info.plist properties: CFBundleDisplayName: OpenClawTests - CFBundleShortVersionString: "2026.2.4" + CFBundleShortVersionString: "2026.2.6" CFBundleVersion: "20260202" diff --git a/apps/macos/Sources/OpenClaw/CronJobEditor.swift b/apps/macos/Sources/OpenClaw/CronJobEditor.swift index a5207ca10..517d32df4 100644 --- a/apps/macos/Sources/OpenClaw/CronJobEditor.swift +++ b/apps/macos/Sources/OpenClaw/CronJobEditor.swift @@ -29,7 +29,7 @@ struct CronJobEditor: View { @State var agentId: String = "" @State var enabled: Bool = true @State var sessionTarget: CronSessionTarget = .main - @State var wakeMode: CronWakeMode = .nextHeartbeat + @State var wakeMode: CronWakeMode = .now @State var deleteAfterRun: Bool = false enum ScheduleKind: String, CaseIterable, Identifiable { case at, every, cron; var id: String { rawValue } } @@ -119,8 +119,8 @@ struct CronJobEditor: View { GridRow { self.gridLabel("Wake mode") Picker("", selection: self.$wakeMode) { - Text("next-heartbeat").tag(CronWakeMode.nextHeartbeat) Text("now").tag(CronWakeMode.now) + Text("next-heartbeat").tag(CronWakeMode.nextHeartbeat) } .labelsHidden() .pickerStyle(.segmented) diff --git a/apps/macos/Sources/OpenClaw/Resources/Info.plist b/apps/macos/Sources/OpenClaw/Resources/Info.plist index 9ed7e6a0c..067035d87 100644 --- a/apps/macos/Sources/OpenClaw/Resources/Info.plist +++ b/apps/macos/Sources/OpenClaw/Resources/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType APPL CFBundleShortVersionString - 2026.2.4 + 2026.2.6 CFBundleVersion 202602020 CFBundleIconFile diff --git a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift index dd3cfb50a..07c9db84e 100644 --- a/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/macos/Sources/OpenClawProtocol/GatewayModels.swift @@ -2025,6 +2025,8 @@ public struct CronRunLogEntry: Codable, Sendable { public let status: AnyCodable? public let error: String? public let summary: String? + public let sessionid: String? + public let sessionkey: String? public let runatms: Int? public let durationms: Int? public let nextrunatms: Int? @@ -2036,6 +2038,8 @@ public struct CronRunLogEntry: Codable, Sendable { status: AnyCodable?, error: String?, summary: String?, + sessionid: String?, + sessionkey: String?, runatms: Int?, durationms: Int?, nextrunatms: Int? @@ -2046,6 +2050,8 @@ public struct CronRunLogEntry: Codable, Sendable { self.status = status self.error = error self.summary = summary + self.sessionid = sessionid + self.sessionkey = sessionkey self.runatms = runatms self.durationms = durationms self.nextrunatms = nextrunatms @@ -2057,6 +2063,8 @@ public struct CronRunLogEntry: Codable, Sendable { case status case error case summary + case sessionid = "sessionId" + case sessionkey = "sessionKey" case runatms = "runAtMs" case durationms = "durationMs" case nextrunatms = "nextRunAtMs" diff --git a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift index dd3cfb50a..07c9db84e 100644 --- a/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift +++ b/apps/shared/OpenClawKit/Sources/OpenClawProtocol/GatewayModels.swift @@ -2025,6 +2025,8 @@ public struct CronRunLogEntry: Codable, Sendable { public let status: AnyCodable? public let error: String? public let summary: String? + public let sessionid: String? + public let sessionkey: String? public let runatms: Int? public let durationms: Int? public let nextrunatms: Int? @@ -2036,6 +2038,8 @@ public struct CronRunLogEntry: Codable, Sendable { status: AnyCodable?, error: String?, summary: String?, + sessionid: String?, + sessionkey: String?, runatms: Int?, durationms: Int?, nextrunatms: Int? @@ -2046,6 +2050,8 @@ public struct CronRunLogEntry: Codable, Sendable { self.status = status self.error = error self.summary = summary + self.sessionid = sessionid + self.sessionkey = sessionkey self.runatms = runatms self.durationms = durationms self.nextrunatms = nextrunatms @@ -2057,6 +2063,8 @@ public struct CronRunLogEntry: Codable, Sendable { case status case error case summary + case sessionid = "sessionId" + case sessionkey = "sessionKey" case runatms = "runAtMs" case durationms = "durationMs" case nextrunatms = "nextRunAtMs" diff --git a/docs/automation/cron-jobs.md b/docs/automation/cron-jobs.md index 8eb79881e..54d6a9647 100644 --- a/docs/automation/cron-jobs.md +++ b/docs/automation/cron-jobs.md @@ -40,7 +40,7 @@ openclaw cron add \ --delete-after-run openclaw cron list -openclaw cron run --force +openclaw cron run openclaw cron runs --id ``` @@ -123,8 +123,8 @@ local timezone is used. Main jobs enqueue a system event and optionally wake the heartbeat runner. They must use `payload.kind = "systemEvent"`. -- `wakeMode: "next-heartbeat"` (default): event waits for the next scheduled heartbeat. -- `wakeMode: "now"`: event triggers an immediate heartbeat run. +- `wakeMode: "now"` (default): event triggers an immediate heartbeat run. +- `wakeMode: "next-heartbeat"`: event waits for the next scheduled heartbeat. This is the best fit when you want the normal heartbeat prompt + main-session context. See [Heartbeat](/gateway/heartbeat). @@ -288,7 +288,7 @@ Notes: - `sessionTarget` must be `"main"` or `"isolated"` and must match `payload.kind`. - Optional fields: `agentId`, `description`, `enabled`, `deleteAfterRun` (defaults to true for `at`), `delivery`. -- `wakeMode` defaults to `"next-heartbeat"` when omitted. +- `wakeMode` defaults to `"now"` when omitted. ### cron.update params @@ -420,10 +420,11 @@ openclaw cron edit --agent ops openclaw cron edit --clear-agent ``` -Manual run (debug): +Manual run (force is the default, use `--due` to only run when due): ```bash -openclaw cron run --force +openclaw cron run +openclaw cron run --due ``` Edit an existing job (patch fields): diff --git a/docs/brave-search.md b/docs/brave-search.md index 260647942..ba18a6c55 100644 --- a/docs/brave-search.md +++ b/docs/brave-search.md @@ -12,7 +12,7 @@ OpenClaw uses Brave Search as the default provider for `web_search`. ## Get an API key -1. Create a Brave Search API account at https://brave.com/search/api/ +1. Create a Brave Search API account at [https://brave.com/search/api/](https://brave.com/search/api/) 2. In the dashboard, choose the **Data for Search** plan and generate an API key. 3. Store the key in config (recommended) or set `BRAVE_API_KEY` in the Gateway environment. diff --git a/docs/channels/bluebubbles.md b/docs/channels/bluebubbles.md index b40fc375d..1b4324087 100644 --- a/docs/channels/bluebubbles.md +++ b/docs/channels/bluebubbles.md @@ -27,6 +27,7 @@ Status: bundled plugin that talks to the BlueBubbles macOS server over HTTP. **R 1. Install the BlueBubbles server on your Mac (follow the instructions at [bluebubbles.app/install](https://bluebubbles.app/install)). 2. In the BlueBubbles config, enable the web API and set a password. 3. Run `openclaw onboard` and select BlueBubbles, or configure manually: + ```json5 { channels: { @@ -39,6 +40,7 @@ Status: bundled plugin that talks to the BlueBubbles macOS server over HTTP. **R }, } ``` + 4. Point BlueBubbles webhooks to your gateway (example: `https://your-gateway-host:3000/bluebubbles-webhook?password=`). 5. Start the gateway; it will register the webhook handler and start pairing. @@ -335,4 +337,4 @@ Prefer `chat_guid` for stable routing: - OpenClaw auto-hides known-broken actions based on the BlueBubbles server's macOS version. If edit still appears on macOS 26 (Tahoe), disable it manually with `channels.bluebubbles.actions.edit=false`. - For status/health info: `openclaw status --all` or `openclaw status --deep`. -For general channel workflow reference, see [Channels](/channels) and the [Plugins](/plugins) guide. +For general channel workflow reference, see [Channels](/channels) and the [Plugins](/plugin) guide. diff --git a/docs/channels/feishu.md b/docs/channels/feishu.md index 2c6ba1e7f..e15feafe3 100644 --- a/docs/channels/feishu.md +++ b/docs/channels/feishu.md @@ -75,7 +75,7 @@ Choose **Feishu**, then enter the App ID and App Secret. Visit [Feishu Open Platform](https://open.feishu.cn/app) and sign in. -Lark (global) tenants should use https://open.larksuite.com/app and set `domain: "lark"` in the Feishu config. +Lark (global) tenants should use [https://open.larksuite.com/app](https://open.larksuite.com/app) and set `domain: "lark"` in the Feishu config. ### 2. Create an app @@ -261,10 +261,12 @@ After approval, you can chat normally. - **Default**: `dmPolicy: "pairing"` (unknown users get a pairing code) - **Approve pairing**: + ```bash openclaw pairing list feishu openclaw pairing approve feishu ``` + - **Allowlist mode**: set `channels.feishu.allowFrom` with allowed Open IDs ### Group chats diff --git a/docs/channels/googlechat.md b/docs/channels/googlechat.md index 07c7dd7dc..39192ecae 100644 --- a/docs/channels/googlechat.md +++ b/docs/channels/googlechat.md @@ -101,6 +101,7 @@ Use Tailscale Serve for the private dashboard and Funnel for the public webhook If prompted, visit the authorization URL shown in the output to enable Funnel for this node in your tailnet policy. 5. **Verify the configuration:** + ```bash tailscale serve status tailscale funnel status @@ -225,6 +226,7 @@ This means the webhook handler isn't registered. Common causes: If it shows "disabled", add `plugins.entries.googlechat.enabled: true` to your config. 3. **Gateway not restarted**: After adding config, restart the gateway: + ```bash openclaw gateway restart ``` diff --git a/docs/channels/imessage.md b/docs/channels/imessage.md index 5542b3190..080d3cb87 100644 --- a/docs/channels/imessage.md +++ b/docs/channels/imessage.md @@ -62,6 +62,28 @@ Disable with: - Automation permission when sending. - `channels.imessage.cliPath` can point to any command that proxies stdin/stdout (for example, a wrapper script that SSHes to another Mac and runs `imsg rpc`). +## Troubleshooting macOS Privacy and Security TCC + +If sending/receiving fails (for example, `imsg rpc` exits non-zero, times out, or the gateway appears to hang), a common cause is a macOS permission prompt that was never approved. + +macOS grants TCC permissions per app/process context. Approve prompts in the same context that runs `imsg` (for example, Terminal/iTerm, a LaunchAgent session, or an SSH-launched process). + +Checklist: + +- **Full Disk Access**: allow access for the process running OpenClaw (and any shell/SSH wrapper that executes `imsg`). This is required to read the Messages database (`chat.db`). +- **Automation → Messages**: allow the process running OpenClaw (and/or your terminal) to control **Messages.app** for outbound sends. +- **`imsg` CLI health**: verify `imsg` is installed and supports RPC (`imsg rpc --help`). + +Tip: If OpenClaw is running headless (LaunchAgent/systemd/SSH) the macOS prompt can be easy to miss. Run a one-time interactive command in a GUI terminal to force the prompt, then retry: + +```bash +imsg chats --limit 1 +# or +imsg send "test" +``` + +Related macOS folder permissions (Desktop/Documents/Downloads): [/platforms/mac/permissions](/platforms/mac/permissions). + ## Setup (fast path) 1. Ensure Messages is signed in on this Mac. @@ -81,7 +103,7 @@ If you want the bot to send from a **separate iMessage identity** (and keep your 6. Set up SSH so `ssh @localhost true` works without a password. 7. Point `channels.imessage.accounts.bot.cliPath` at an SSH wrapper that runs `imsg` as the bot user. -First-run note: sending/receiving may require GUI approvals (Automation + Full Disk Access) in the _bot macOS user_. If `imsg rpc` looks stuck or exits, log into that user (Screen Sharing helps), run a one-time `imsg chats --limit 1` / `imsg send ...`, approve prompts, then retry. +First-run note: sending/receiving may require GUI approvals (Automation + Full Disk Access) in the _bot macOS user_. If `imsg rpc` looks stuck or exits, log into that user (Screen Sharing helps), run a one-time `imsg chats --limit 1` / `imsg send ...`, approve prompts, then retry. See [Troubleshooting macOS Privacy and Security TCC](#troubleshooting-macos-privacy-and-security-tcc). Example wrapper (`chmod +x`). Replace `` with your actual macOS username: diff --git a/docs/channels/line.md b/docs/channels/line.md index f68ae5aa1..d32e683fb 100644 --- a/docs/channels/line.md +++ b/docs/channels/line.md @@ -34,7 +34,7 @@ openclaw plugins install ./extensions/line ## Setup 1. Create a LINE Developers account and open the Console: - https://developers.line.biz/console/ + [https://developers.line.biz/console/](https://developers.line.biz/console/) 2. Create (or pick) a Provider and add a **Messaging API** channel. 3. Copy the **Channel access token** and **Channel secret** from the channel settings. 4. Enable **Use webhook** in the Messaging API settings. diff --git a/docs/channels/matrix.md b/docs/channels/matrix.md index a196a68b6..56b363fdd 100644 --- a/docs/channels/matrix.md +++ b/docs/channels/matrix.md @@ -74,7 +74,7 @@ Details: [Plugins](/plugin) - When set, `channels.matrix.userId` should be the full Matrix ID (example: `@bot:example.org`). 5. Restart the gateway (or finish onboarding). 6. Start a DM with the bot or invite it to a room from any Matrix client - (Element, Beeper, etc.; see https://matrix.org/ecosystem/clients/). Beeper requires E2EE, + (Element, Beeper, etc.; see [https://matrix.org/ecosystem/clients/](https://matrix.org/ecosystem/clients/)). Beeper requires E2EE, so set `channels.matrix.encryption: true` and verify the device. Minimal config (access token, user ID auto-fetched): diff --git a/docs/channels/msteams.md b/docs/channels/msteams.md index a18e8063d..572ff1284 100644 --- a/docs/channels/msteams.md +++ b/docs/channels/msteams.md @@ -558,6 +558,7 @@ Bots don't have a personal OneDrive drive (the `/me/drive` Graph API endpoint do ``` 4. **Configure OpenClaw:** + ```json5 { channels: { @@ -747,7 +748,7 @@ Bots have limited support in private channels: - **"Icon file cannot be empty":** The manifest references icon files that are 0 bytes. Create valid PNG icons (32x32 for `outline.png`, 192x192 for `color.png`). - **"webApplicationInfo.Id already in use":** The app is still installed in another team/chat. Find and uninstall it first, or wait 5-10 minutes for propagation. -- **"Something went wrong" on upload:** Upload via https://admin.teams.microsoft.com instead, open browser DevTools (F12) → Network tab, and check the response body for the actual error. +- **"Something went wrong" on upload:** Upload via [https://admin.teams.microsoft.com](https://admin.teams.microsoft.com) instead, open browser DevTools (F12) → Network tab, and check the response body for the actual error. - **Sideload failing:** Try "Upload an app to your org's app catalog" instead of "Upload a custom app" - this often bypasses sideload restrictions. ### RSC permissions not working diff --git a/docs/channels/nextcloud-talk.md b/docs/channels/nextcloud-talk.md index edca54bc4..efecfd990 100644 --- a/docs/channels/nextcloud-talk.md +++ b/docs/channels/nextcloud-talk.md @@ -34,9 +34,11 @@ Details: [Plugins](/plugin) 1. Install the Nextcloud Talk plugin. 2. On your Nextcloud server, create a bot: + ```bash ./occ talk:bot:install "OpenClaw" "" "" --feature reaction ``` + 3. Enable the bot in the target room settings. 4. Configure OpenClaw: - Config: `channels.nextcloud-talk.baseUrl` + `channels.nextcloud-talk.botSecret` diff --git a/docs/channels/slack.md b/docs/channels/slack.md index a9dbc2466..1343ebf77 100644 --- a/docs/channels/slack.md +++ b/docs/channels/slack.md @@ -30,7 +30,7 @@ Minimal config: ### Setup -1. Create a Slack app (From scratch) in https://api.slack.com/apps. +1. Create a Slack app (From scratch) in [https://api.slack.com/apps](https://api.slack.com/apps). 2. **Socket Mode** → toggle on. Then go to **Basic Information** → **App-Level Tokens** → **Generate Token and Scopes** with scope `connections:write`. Copy the **App Token** (`xapp-...`). 3. **OAuth & Permissions** → add bot token scopes (use the manifest below). Click **Install to Workspace**. Copy the **Bot User OAuth Token** (`xoxb-...`). 4. Optional: **OAuth & Permissions** → add **User Token Scopes** (see the read-only list below). Reinstall the app and copy the **User OAuth Token** (`xoxp-...`). @@ -49,7 +49,7 @@ Use the manifest below so scopes and events stay in sync. Multi-account support: use `channels.slack.accounts` with per-account tokens and optional `name`. See [`gateway/configuration`](/gateway/configuration#telegramaccounts--discordaccounts--slackaccounts--signalaccounts--imessageaccounts) for the shared pattern. -### OpenClaw config (minimal) +### OpenClaw config (Socket mode) Set tokens via env vars (recommended): @@ -130,7 +130,7 @@ Example with userTokenReadOnly explicitly set (allow user token writes): Use HTTP webhook mode when your Gateway is reachable by Slack over HTTPS (typical for server deployments). HTTP mode uses the Events API + Interactivity + Slash Commands with a shared request URL. -### Setup +### Setup (HTTP mode) 1. Create a Slack app and **disable Socket Mode** (optional if you only use HTTP). 2. **Basic Information** → copy the **Signing Secret**. @@ -260,30 +260,30 @@ If you enable native commands, add one `slash_commands` entry per command you wa Slack's Conversations API is type-scoped: you only need the scopes for the conversation types you actually touch (channels, groups, im, mpim). See -https://docs.slack.dev/apis/web-api/using-the-conversations-api/ for the overview. +[https://docs.slack.dev/apis/web-api/using-the-conversations-api/](https://docs.slack.dev/apis/web-api/using-the-conversations-api/) for the overview. ### Bot token scopes (required) - `chat:write` (send/update/delete messages via `chat.postMessage`) - https://docs.slack.dev/reference/methods/chat.postMessage + [https://docs.slack.dev/reference/methods/chat.postMessage](https://docs.slack.dev/reference/methods/chat.postMessage) - `im:write` (open DMs via `conversations.open` for user DMs) - https://docs.slack.dev/reference/methods/conversations.open + [https://docs.slack.dev/reference/methods/conversations.open](https://docs.slack.dev/reference/methods/conversations.open) - `channels:history`, `groups:history`, `im:history`, `mpim:history` - https://docs.slack.dev/reference/methods/conversations.history + [https://docs.slack.dev/reference/methods/conversations.history](https://docs.slack.dev/reference/methods/conversations.history) - `channels:read`, `groups:read`, `im:read`, `mpim:read` - https://docs.slack.dev/reference/methods/conversations.info + [https://docs.slack.dev/reference/methods/conversations.info](https://docs.slack.dev/reference/methods/conversations.info) - `users:read` (user lookup) - https://docs.slack.dev/reference/methods/users.info + [https://docs.slack.dev/reference/methods/users.info](https://docs.slack.dev/reference/methods/users.info) - `reactions:read`, `reactions:write` (`reactions.get` / `reactions.add`) - https://docs.slack.dev/reference/methods/reactions.get - https://docs.slack.dev/reference/methods/reactions.add + [https://docs.slack.dev/reference/methods/reactions.get](https://docs.slack.dev/reference/methods/reactions.get) + [https://docs.slack.dev/reference/methods/reactions.add](https://docs.slack.dev/reference/methods/reactions.add) - `pins:read`, `pins:write` (`pins.list` / `pins.add` / `pins.remove`) - https://docs.slack.dev/reference/scopes/pins.read - https://docs.slack.dev/reference/scopes/pins.write + [https://docs.slack.dev/reference/scopes/pins.read](https://docs.slack.dev/reference/scopes/pins.read) + [https://docs.slack.dev/reference/scopes/pins.write](https://docs.slack.dev/reference/scopes/pins.write) - `emoji:read` (`emoji.list`) - https://docs.slack.dev/reference/scopes/emoji.read + [https://docs.slack.dev/reference/scopes/emoji.read](https://docs.slack.dev/reference/scopes/emoji.read) - `files:write` (uploads via `files.uploadV2`) - https://docs.slack.dev/messaging/working-with-files/#upload + [https://docs.slack.dev/messaging/working-with-files/#upload](https://docs.slack.dev/messaging/working-with-files/#upload) ### User token scopes (optional, read-only by default) @@ -302,9 +302,9 @@ Add these under **User Token Scopes** if you configure `channels.slack.userToken - `mpim:write` (only if we add group-DM open/DM start via `conversations.open`) - `groups:write` (only if we add private-channel management: create/rename/invite/archive) - `chat:write.public` (only if we want to post to channels the bot isn't in) - https://docs.slack.dev/reference/scopes/chat.write.public + [https://docs.slack.dev/reference/scopes/chat.write.public](https://docs.slack.dev/reference/scopes/chat.write.public) - `users:read.email` (only if we need email fields from `users.info`) - https://docs.slack.dev/changelog/2017-04-narrowing-email-access + [https://docs.slack.dev/changelog/2017-04-narrowing-email-access](https://docs.slack.dev/changelog/2017-04-narrowing-email-access) - `files:read` (only if we start listing/reading file metadata) ## Config diff --git a/docs/channels/telegram.md b/docs/channels/telegram.md index 655749d87..609daf9a6 100644 --- a/docs/channels/telegram.md +++ b/docs/channels/telegram.md @@ -147,7 +147,7 @@ You can add custom commands to the menu via config: } ``` -## Troubleshooting +## Setup troubleshooting (commands) - `setMyCommands failed` in logs usually means outbound HTTPS/DNS is blocked to `api.telegram.org`. - If you see `sendMessage` or `sendChatAction` failures, check IPv6 routing and DNS. @@ -365,6 +365,7 @@ Alternate (official Bot API): 1. DM your bot. 2. Fetch updates with your bot token and read `message.from.id`: + ```bash curl "https://api.telegram.org/bot/getUpdates" ``` diff --git a/docs/channels/troubleshooting.md b/docs/channels/troubleshooting.md index 929b0c776..8a6cdca90 100644 --- a/docs/channels/troubleshooting.md +++ b/docs/channels/troubleshooting.md @@ -1,5 +1,5 @@ --- -summary: "Channel-specific troubleshooting shortcuts (Discord/Telegram/WhatsApp)" +summary: "Channel-specific troubleshooting shortcuts (Discord/Telegram/WhatsApp/iMessage)" read_when: - A channel connects but messages don’t flow - Investigating channel misconfiguration (intents, permissions, privacy mode) @@ -22,6 +22,7 @@ openclaw channels status --probe - Discord: [/channels/discord#troubleshooting](/channels/discord#troubleshooting) - Telegram: [/channels/telegram#troubleshooting](/channels/telegram#troubleshooting) - WhatsApp: [/channels/whatsapp#troubleshooting-quick](/channels/whatsapp#troubleshooting-quick) +- iMessage (legacy): [/channels/imessage#troubleshooting-macos-privacy-and-security-tcc](/channels/imessage#troubleshooting-macos-privacy-and-security-tcc) ## Telegram quick fixes diff --git a/docs/channels/twitch.md b/docs/channels/twitch.md index 7901c0427..ac46e35d6 100644 --- a/docs/channels/twitch.md +++ b/docs/channels/twitch.md @@ -34,7 +34,7 @@ Details: [Plugins](/plugin) - Select **Bot Token** - Verify scopes `chat:read` and `chat:write` are selected - Copy the **Client ID** and **Access Token** -3. Find your Twitch user ID: https://www.streamweasels.com/tools/convert-twitch-username-to-user-id/ +3. Find your Twitch user ID: [https://www.streamweasels.com/tools/convert-twitch-username-to-user-id/](https://www.streamweasels.com/tools/convert-twitch-username-to-user-id/) 4. Configure the token: - Env: `OPENCLAW_TWITCH_ACCESS_TOKEN=...` (default account only) - Or config: `channels.twitch.accessToken` @@ -123,7 +123,7 @@ Prefer `allowFrom` for a hard allowlist. Use `allowedRoles` instead if you want **Why user IDs?** Usernames can change, allowing impersonation. User IDs are permanent. -Find your Twitch user ID: https://www.streamweasels.com/tools/convert-twitch-username-%20to-user-id/ (Convert your Twitch username to ID) +Find your Twitch user ID: [https://www.streamweasels.com/tools/convert-twitch-username-%20to-user-id/](https://www.streamweasels.com/tools/convert-twitch-username-%20to-user-id/) (Convert your Twitch username to ID) ## Token refresh (optional) diff --git a/docs/channels/whatsapp.md b/docs/channels/whatsapp.md index 1741ee1b7..966c0902a 100644 --- a/docs/channels/whatsapp.md +++ b/docs/channels/whatsapp.md @@ -205,11 +205,13 @@ The wizard uses it to set your **allowlist/owner** so your own DMs are permitted - `Body` is the current message body with envelope. - Quoted reply context is **always appended**: + ``` [Replying to +1555 id:ABC123] > [/Replying] ``` + - Reply metadata also set: - `ReplyToId` = stanzaId - `ReplyToBody` = quoted body or media placeholder diff --git a/docs/channels/zalo.md b/docs/channels/zalo.md index 0f247190c..88143dd58 100644 --- a/docs/channels/zalo.md +++ b/docs/channels/zalo.md @@ -57,7 +57,7 @@ It is a good fit for support or notifications where you want deterministic routi ### 1) Create a bot token (Zalo Bot Platform) -1. Go to **https://bot.zaloplatforms.com** and sign in. +1. Go to [https://bot.zaloplatforms.com](https://bot.zaloplatforms.com) and sign in. 2. Create a new bot and configure its settings. 3. Copy the bot token (format: `12345689:abc-xyz`). diff --git a/docs/cli/memory.md b/docs/cli/memory.md index 61b34419b..db56e773f 100644 --- a/docs/cli/memory.md +++ b/docs/cli/memory.md @@ -14,7 +14,7 @@ Provided by the active memory plugin (default: `memory-core`; set `plugins.slots Related: - Memory concept: [Memory](/concepts/memory) -- Plugins: [Plugins](/plugins) +- Plugins: [Plugins](/plugin) ## Examples diff --git a/docs/concepts/architecture.md b/docs/concepts/architecture.md index a1c7f3383..a9676b171 100644 --- a/docs/concepts/architecture.md +++ b/docs/concepts/architecture.md @@ -110,9 +110,11 @@ Details: [Gateway protocol](/gateway/protocol), [Pairing](/start/pairing), - Preferred: Tailscale or VPN. - Alternative: SSH tunnel + ```bash ssh -N -L 18789:127.0.0.1:18789 user@host ``` + - The same handshake + auth token apply over the tunnel. - TLS + optional pinning can be enabled for WS in remote setups. diff --git a/docs/concepts/groups.md b/docs/concepts/groups.md index 04e90106d..635211d33 100644 --- a/docs/concepts/groups.md +++ b/docs/concepts/groups.md @@ -39,12 +39,13 @@ otherwise -> reply ![Group message flow](/images/groups-flow.svg) If you want... -| Goal | What to set | -|------|-------------| -| Allow all groups but only reply on @mentions | `groups: { "*": { requireMention: true } }` | -| Disable all group replies | `groupPolicy: "disabled"` | -| Only specific groups | `groups: { "": { ... } }` (no `"*"` key) | -| Only you can trigger in groups | `groupPolicy: "allowlist"`, `groupAllowFrom: ["+1555..."]` | + +| Goal | What to set | +| -------------------------------------------- | ---------------------------------------------------------- | +| Allow all groups but only reply on @mentions | `groups: { "*": { requireMention: true } }` | +| Disable all group replies | `groupPolicy: "disabled"` | +| Only specific groups | `groups: { "": { ... } }` (no `"*"` key) | +| Only you can trigger in groups | `groupPolicy: "allowlist"`, `groupAllowFrom: ["+1555..."]` | ## Session keys diff --git a/docs/concepts/memory.md b/docs/concepts/memory.md index 4b499860b..e213ea5b5 100644 --- a/docs/concepts/memory.md +++ b/docs/concepts/memory.md @@ -88,7 +88,8 @@ Defaults: 1. `local` if a `memorySearch.local.modelPath` is configured and the file exists. 2. `openai` if an OpenAI key can be resolved. 3. `gemini` if a Gemini key can be resolved. - 4. Otherwise memory search stays disabled until configured. + 4. `voyage` if a Voyage key can be resolved. + 5. Otherwise memory search stays disabled until configured. - Local mode uses node-llama-cpp and may require `pnpm approve-builds`. - Uses sqlite-vec (when available) to accelerate vector search inside SQLite. @@ -96,7 +97,8 @@ Remote embeddings **require** an API key for the embedding provider. OpenClaw resolves keys from auth profiles, `models.providers.*.apiKey`, or environment variables. Codex OAuth only covers chat/completions and does **not** satisfy embeddings for memory search. For Gemini, use `GEMINI_API_KEY` or -`models.providers.google.apiKey`. When using a custom OpenAI-compatible endpoint, +`models.providers.google.apiKey`. For Voyage, use `VOYAGE_API_KEY` or +`models.providers.voyage.apiKey`. When using a custom OpenAI-compatible endpoint, set `memorySearch.remote.apiKey` (and optional `memorySearch.remote.headers`). ### QMD backend (experimental) @@ -109,7 +111,7 @@ out to QMD for retrieval. Key points: **Prereqs** - Disabled by default. Opt in per-config (`memory.backend = "qmd"`). -- Install the QMD CLI separately (`bun install -g github.com/tobi/qmd` or grab +- Install the QMD CLI separately (`bun install -g https://github.com/tobi/qmd` or grab a release) and make sure the `qmd` binary is on the gateway’s `PATH`. - QMD needs an SQLite build that allows extensions (`brew install sqlite` on macOS). @@ -302,8 +304,8 @@ Why OpenAI batch is fast + cheap: - For large backfills, OpenAI is typically the fastest option we support because we can submit many embedding requests in a single batch job and let OpenAI process them asynchronously. - OpenAI offers discounted pricing for Batch API workloads, so large indexing runs are usually cheaper than sending the same requests synchronously. - See the OpenAI Batch API docs and pricing for details: - - https://platform.openai.com/docs/api-reference/batch - - https://platform.openai.com/pricing + - [https://platform.openai.com/docs/api-reference/batch](https://platform.openai.com/docs/api-reference/batch) + - [https://platform.openai.com/pricing](https://platform.openai.com/pricing) Config example: diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 4d313cf0f..fba56a34a 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -136,14 +136,14 @@ Moonshot uses OpenAI-compatible endpoints, so configure it as a custom provider: Kimi K2 model IDs: -{/_ moonshot-kimi-k2-model-refs:start _/ && null} +{/_moonshot-kimi-k2-model-refs:start_/ && null} - `moonshot/kimi-k2.5` - `moonshot/kimi-k2-0905-preview` - `moonshot/kimi-k2-turbo-preview` - `moonshot/kimi-k2-thinking` - `moonshot/kimi-k2-thinking-turbo` - {/_ moonshot-kimi-k2-model-refs:end _/ && null} + {/_moonshot-kimi-k2-model-refs:end_/ && null} ```json5 { @@ -242,7 +242,7 @@ Ollama is a local LLM runtime that provides an OpenAI-compatible API: - Provider: `ollama` - Auth: None required (local server) - Example model: `ollama/llama3.3` -- Installation: https://ollama.ai +- Installation: [https://ollama.ai](https://ollama.ai) ```bash # Install Ollama, then pull a model: diff --git a/docs/concepts/session.md b/docs/concepts/session.md index 922bb960f..503dcf37f 100644 --- a/docs/concepts/session.md +++ b/docs/concepts/session.md @@ -17,7 +17,7 @@ Use `session.dmScope` to control how **direct messages** are grouped: - `per-account-channel-peer`: isolate by account + channel + sender (recommended for multi-account inboxes). Use `session.identityLinks` to map provider-prefixed peer ids to a canonical identity so the same person shares a DM session across channels when using `per-peer`, `per-channel-peer`, or `per-account-channel-peer`. -### Secure DM mode (recommended for multi-user setups) +## Secure DM mode (recommended for multi-user setups) > **Security Warning:** If your agent can receive DMs from **multiple people**, you should strongly consider enabling secure DM mode. Without it, all users share the same conversation context, which can leak private information between users. diff --git a/docs/concepts/system-prompt.md b/docs/concepts/system-prompt.md index aafa80473..acb2bf8b5 100644 --- a/docs/concepts/system-prompt.md +++ b/docs/concepts/system-prompt.md @@ -110,6 +110,6 @@ This keeps the base prompt small while still enabling targeted skill usage. When available, the system prompt includes a **Documentation** section that points to the local OpenClaw docs directory (either `docs/` in the repo workspace or the bundled npm package docs) and also notes the public mirror, source repo, community Discord, and -ClawHub (https://clawhub.com) for skills discovery. The prompt instructs the model to consult local docs first +ClawHub ([https://clawhub.com](https://clawhub.com)) for skills discovery. The prompt instructs the model to consult local docs first for OpenClaw behavior, commands, configuration, or architecture, and to run `openclaw status` itself when possible (asking the user only when it lacks access). diff --git a/docs/concepts/typebox.md b/docs/concepts/typebox.md index 38ee7d8ca..f60c5b8ef 100644 --- a/docs/concepts/typebox.md +++ b/docs/concepts/typebox.md @@ -280,7 +280,7 @@ Unknown frame types are preserved as raw payloads for forward compatibility. Generated JSON Schema is in the repo at `dist/protocol.schema.json`. The published raw file is typically available at: -- https://raw.githubusercontent.com/openclaw/openclaw/main/dist/protocol.schema.json +- [https://raw.githubusercontent.com/openclaw/openclaw/main/dist/protocol.schema.json](https://raw.githubusercontent.com/openclaw/openclaw/main/dist/protocol.schema.json) ## When you change schemas diff --git a/docs/debug/node-issue.md b/docs/debug/node-issue.md index ce46b1a05..8355d2abc 100644 --- a/docs/debug/node-issue.md +++ b/docs/debug/node-issue.md @@ -62,19 +62,21 @@ node --import tsx scripts/repro/tsx-name-repro.ts - Use Bun for dev scripts (current temporary revert). - Use Node + tsc watch, then run compiled output: + ```bash pnpm exec tsc --watch --preserveWatchOutput node --watch openclaw.mjs status ``` + - Confirmed locally: `pnpm exec tsc -p tsconfig.json` + `node openclaw.mjs status` works on Node 25. - Disable esbuild keepNames in the TS loader if possible (prevents `__name` helper insertion); tsx does not currently expose this. - Test Node LTS (22/24) with `tsx` to see if the issue is Node 25–specific. ## References -- https://opennext.js.org/cloudflare/howtos/keep_names -- https://esbuild.github.io/api/#keep-names -- https://github.com/evanw/esbuild/issues/1031 +- [https://opennext.js.org/cloudflare/howtos/keep_names](https://opennext.js.org/cloudflare/howtos/keep_names) +- [https://esbuild.github.io/api/#keep-names](https://esbuild.github.io/api/#keep-names) +- [https://github.com/evanw/esbuild/issues/1031](https://github.com/evanw/esbuild/issues/1031) ## Next steps diff --git a/docs/docs.json b/docs/docs.json index eba9c9aa5..7395ace49 100644 --- a/docs/docs.json +++ b/docs/docs.json @@ -805,14 +805,8 @@ "pages": ["install/index", "install/installer"] }, { - "group": "Install methods", - "pages": [ - "install/node", - "install/docker", - "install/nix", - "install/ansible", - "install/bun" - ] + "group": "Other install methods", + "pages": ["install/docker", "install/nix", "install/ansible", "install/bun"] }, { "group": "Maintenance", @@ -1226,6 +1220,7 @@ { "group": "Environment and debugging", "pages": [ + "install/node", "environment", "debugging", "testing", diff --git a/docs/gateway/bonjour.md b/docs/gateway/bonjour.md index b8f08741e..9e2ad8753 100644 --- a/docs/gateway/bonjour.md +++ b/docs/gateway/bonjour.md @@ -105,10 +105,13 @@ The Gateway advertises small non‑secret hints to make UI flows convenient: Useful built‑in tools: - Browse instances: + ```bash dns-sd -B _openclaw-gw._tcp local. ``` + - Resolve one instance (replace ``): + ```bash dns-sd -L "" _openclaw-gw._tcp local. ``` diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index 0a5a85f1d..639f84bd6 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -1310,13 +1310,14 @@ Thread session isolation: - `channels.slack.thread.inheritParent` controls whether new thread sessions inherit the parent channel transcript (default: false). Slack action groups (gate `slack` tool actions): -| Action group | Default | Notes | -| --- | --- | --- | -| reactions | enabled | React + list reactions | -| messages | enabled | Read/send/edit/delete | -| pins | enabled | Pin/unpin/list | -| memberInfo | enabled | Member info | -| emojiList | enabled | Custom emoji list | + +| Action group | Default | Notes | +| ------------ | ------- | ---------------------- | +| reactions | enabled | React + list reactions | +| messages | enabled | Read/send/edit/delete | +| pins | enabled | Pin/unpin/list | +| memberInfo | enabled | Member info | +| emojiList | enabled | Custom emoji list | ### `channels.mattermost` (bot token) @@ -1977,11 +1978,13 @@ Block streaming: - `agents.defaults.blockStreamingChunk`: soft chunking for streamed blocks. Defaults to 800–1200 chars, prefers paragraph breaks (`\n\n`), then newlines, then sentences. Example: + ```json5 { agents: { defaults: { blockStreamingChunk: { minChars: 800, maxChars: 1200 } } }, } ``` + - `agents.defaults.blockStreamingCoalesce`: merge streamed blocks before sending. Defaults to `{ idleMs: 1000 }` and inherits `minChars` from `blockStreamingChunk` with `maxChars` capped to the channel text limit. Signal/Slack/Discord/Google Chat default @@ -1995,11 +1998,13 @@ Block streaming: Modes: `off` (default), `natural` (800–2500ms), `custom` (use `minMs`/`maxMs`). Per-agent override: `agents.list[].humanDelay`. Example: + ```json5 { agents: { defaults: { humanDelay: { mode: "natural" } } }, } ``` + See [/concepts/streaming](/concepts/streaming) for behavior + chunking details. Typing indicators: @@ -2065,7 +2070,7 @@ of `every`, keep `HEARTBEAT.md` tiny, and/or choose a cheaper `model`. - `tools.web.fetch.readability` (default true; disable to use basic HTML cleanup only) - `tools.web.fetch.firecrawl.enabled` (default true when an API key is set) - `tools.web.fetch.firecrawl.apiKey` (optional; defaults to `FIRECRAWL_API_KEY`) -- `tools.web.fetch.firecrawl.baseUrl` (default https://api.firecrawl.dev) +- `tools.web.fetch.firecrawl.baseUrl` (default [https://api.firecrawl.dev](https://api.firecrawl.dev)) - `tools.web.fetch.firecrawl.onlyMainContent` (default true) - `tools.web.fetch.firecrawl.maxAgeMs` (optional) - `tools.web.fetch.firecrawl.timeoutSeconds` (optional) @@ -2481,7 +2486,7 @@ Select the model via `agents.defaults.model.primary` (provider/model). OpenCode Zen is a multi-model gateway with per-model endpoints. OpenClaw uses the built-in `opencode` provider from pi-ai; set `OPENCODE_API_KEY` (or -`OPENCODE_ZEN_API_KEY`) from https://opencode.ai/auth. +`OPENCODE_ZEN_API_KEY`) from [https://opencode.ai/auth](https://opencode.ai/auth). Notes: @@ -3366,7 +3371,7 @@ openclaw dns setup --apply } ``` -## Template variables +## Media model template variables Template placeholders are expanded in `tools.media.*.models[].args` and `tools.media.models[].args` (and any future templated argument fields). diff --git a/docs/gateway/index.md b/docs/gateway/index.md index 06dd72c13..64697f1f4 100644 --- a/docs/gateway/index.md +++ b/docs/gateway/index.md @@ -49,9 +49,11 @@ pnpm gateway:watch ## Remote access - Tailscale/VPN preferred; otherwise SSH tunnel: + ```bash ssh -N -L 18789:127.0.0.1:18789 user@host ``` + - Clients then connect to `ws://127.0.0.1:18789` through the tunnel. - If a token is configured, clients must include it in `connect.params.auth.token` even over the tunnel. diff --git a/docs/gateway/local-models.md b/docs/gateway/local-models.md index fe715ab05..3f7e13d41 100644 --- a/docs/gateway/local-models.md +++ b/docs/gateway/local-models.md @@ -52,7 +52,7 @@ Best current local stack. Load MiniMax M2.1 in LM Studio, enable the local serve **Setup checklist** -- Install LM Studio: https://lmstudio.ai +- Install LM Studio: [https://lmstudio.ai](https://lmstudio.ai) - In LM Studio, download the **largest MiniMax M2.1 build available** (avoid “small”/heavily quantized variants), start the server, confirm `http://127.0.0.1:1234/v1/models` lists it. - Keep the model loaded; cold-load adds startup latency. - Adjust `contextWindow`/`maxTokens` if your LM Studio build differs. diff --git a/docs/gateway/security/formal-verification.md b/docs/gateway/security/formal-verification.md index a45e63f3c..3ed2b42c1 100644 --- a/docs/gateway/security/formal-verification.md +++ b/docs/gateway/security/formal-verification.md @@ -23,7 +23,7 @@ misconfiguration safety), under explicit assumptions. ## Where the models live -Models are maintained in a separate repo: [vignesh07/openclaw-formal-models](https://github.com/vignesh07/openclaw-formal-models). +Models are maintained in a separate repo: [vignesh07/clawdbot-formal-models](https://github.com/vignesh07/clawdbot-formal-models). ## Important caveats @@ -41,8 +41,8 @@ Today, results are reproduced by cloning the models repo locally and running TLC Getting started: ```bash -git clone https://github.com/vignesh07/openclaw-formal-models -cd openclaw-formal-models +git clone https://github.com/vignesh07/clawdbot-formal-models +cd clawdbot-formal-models # Java 11+ required (TLC runs on the JVM). # The repo vendors a pinned `tla2tools.jar` (TLA+ tools) and provides `bin/tlc` + Make targets. diff --git a/docs/gateway/security/index.md b/docs/gateway/security/index.md index c6b521048..f6bd91734 100644 --- a/docs/gateway/security/index.md +++ b/docs/gateway/security/index.md @@ -773,18 +773,22 @@ If it fails, there are new candidates not yet in the baseline. ### If CI fails 1. Reproduce locally: + ```bash detect-secrets scan --baseline .secrets.baseline ``` + 2. Understand the tools: - `detect-secrets scan` finds candidates and compares them to the baseline. - `detect-secrets audit` opens an interactive review to mark each baseline item as real or false positive. 3. For real secrets: rotate/remove them, then re-run the scan to update the baseline. 4. For false positives: run the interactive audit and mark them as false: + ```bash detect-secrets audit .secrets.baseline ``` + 5. If you need new excludes, add them to `.detect-secrets.cfg` and regenerate the baseline with matching `--exclude-files` / `--exclude-lines` flags (the config file is reference-only; detect-secrets doesn’t read it automatically). @@ -814,7 +818,7 @@ Mario asking for find ~ Found a vulnerability in OpenClaw? Please report responsibly: -1. Email: security@openclaw.ai +1. Email: [security@openclaw.ai](mailto:security@openclaw.ai) 2. Don't post publicly until fixed 3. We'll credit you (unless you prefer anonymity) diff --git a/docs/gateway/tailscale.md b/docs/gateway/tailscale.md index 3f4daa111..3a12b7fe1 100644 --- a/docs/gateway/tailscale.md +++ b/docs/gateway/tailscale.md @@ -121,7 +121,7 @@ Avoid Funnel for browser control; treat node pairing like operator access. ## Learn more -- Tailscale Serve overview: https://tailscale.com/kb/1312/serve -- `tailscale serve` command: https://tailscale.com/kb/1242/tailscale-serve -- Tailscale Funnel overview: https://tailscale.com/kb/1223/tailscale-funnel -- `tailscale funnel` command: https://tailscale.com/kb/1311/tailscale-funnel +- Tailscale Serve overview: [https://tailscale.com/kb/1312/serve](https://tailscale.com/kb/1312/serve) +- `tailscale serve` command: [https://tailscale.com/kb/1242/tailscale-serve](https://tailscale.com/kb/1242/tailscale-serve) +- Tailscale Funnel overview: [https://tailscale.com/kb/1223/tailscale-funnel](https://tailscale.com/kb/1223/tailscale-funnel) +- `tailscale funnel` command: [https://tailscale.com/kb/1311/tailscale-funnel](https://tailscale.com/kb/1311/tailscale-funnel) diff --git a/docs/gateway/troubleshooting.md b/docs/gateway/troubleshooting.md index d9aa303cd..5f9d51f1d 100644 --- a/docs/gateway/troubleshooting.md +++ b/docs/gateway/troubleshooting.md @@ -42,9 +42,11 @@ Fix options: - Re-run onboarding and choose **Anthropic** for that agent. - Or paste a setup-token on the **gateway host**: + ```bash openclaw models auth setup-token --provider anthropic ``` + - Or copy `auth-profiles.json` from the main agent dir to the new agent dir. Verify: @@ -120,13 +122,17 @@ Doctor/service will show runtime state (PID/last exit) and log hints. **Enable more logging:** - Bump file log detail (persisted JSONL): + ```json { "logging": { "level": "debug" } } ``` + - Bump console verbosity (TTY output only): + ```json { "logging": { "consoleLevel": "debug", "consoleStyle": "pretty" } } ``` + - Quick tip: `--verbose` affects **console** output only. File logs remain controlled by `logging.level`. See [/logging](/logging) for a full overview of formats, config, and access. @@ -139,10 +145,13 @@ Gateway refuses to start. **Fix (recommended):** - Run the wizard and set the Gateway run mode to **Local**: + ```bash openclaw configure ``` + - Or set it directly: + ```bash openclaw config set gateway.mode local ``` @@ -150,6 +159,7 @@ Gateway refuses to start. **If you meant to run a remote Gateway instead:** - Set a remote URL and keep `gateway.mode=remote`: + ```bash openclaw config set gateway.mode remote openclaw config set gateway.remote.url "wss://gateway.example.com" @@ -554,6 +564,7 @@ Notes: - The git flow only rebases if the repo is clean. Commit or stash changes first. - After switching, run: + ```bash openclaw doctor openclaw gateway restart diff --git a/docs/help/faq.md b/docs/help/faq.md index 2c9e9f1be..fda1acddb 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -9,7 +9,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, ## Table of contents -- [Quick start and first-run setup](#quick-start-and-firstrun-setup) +- [Quick start and first-run setup] - [Im stuck whats the fastest way to get unstuck?](#im-stuck-whats-the-fastest-way-to-get-unstuck) - [What's the recommended way to install and set up OpenClaw?](#whats-the-recommended-way-to-install-and-set-up-openclaw) - [How do I open the dashboard after onboarding?](#how-do-i-open-the-dashboard-after-onboarding) @@ -37,7 +37,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [Can I use Claude Max subscription without an API key](#can-i-use-claude-max-subscription-without-an-api-key) - [How does Anthropic "setup-token" auth work?](#how-does-anthropic-setuptoken-auth-work) - [Where do I find an Anthropic setup-token?](#where-do-i-find-an-anthropic-setuptoken) - - [Do you support Claude subscription auth (Claude Code OAuth)?](#do-you-support-claude-subscription-auth-claude-code-oauth) + - [Do you support Claude subscription auth (Claude Pro or Max)?](#do-you-support-claude-subscription-auth-claude-pro-or-max) - [Why am I seeing `HTTP 429: rate_limit_error` from Anthropic?](#why-am-i-seeing-http-429-ratelimiterror-from-anthropic) - [Is AWS Bedrock supported?](#is-aws-bedrock-supported) - [How does Codex auth work?](#how-does-codex-auth-work) @@ -74,7 +74,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [Cron or reminders do not fire. What should I check?](#cron-or-reminders-do-not-fire-what-should-i-check) - [How do I install skills on Linux?](#how-do-i-install-skills-on-linux) - [Can OpenClaw run tasks on a schedule or continuously in the background?](#can-openclaw-run-tasks-on-a-schedule-or-continuously-in-the-background) - - [Can I run Apple/macOS-only skills from Linux?](#can-i-run-applemacosonly-skills-from-linux) + - [Can I run Apple macOS-only skills from Linux?](#can-i-run-apple-macos-only-skills-from-linux) - [Do you have a Notion or HeyGen integration?](#do-you-have-a-notion-or-heygen-integration) - [How do I install the Chrome extension for browser takeover?](#how-do-i-install-the-chrome-extension-for-browser-takeover) - [Sandboxing and memory](#sandboxing-and-memory) @@ -102,7 +102,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [How do I run a central Gateway with specialized workers across devices?](#how-do-i-run-a-central-gateway-with-specialized-workers-across-devices) - [Can the OpenClaw browser run headless?](#can-the-openclaw-browser-run-headless) - [How do I use Brave for browser control?](#how-do-i-use-brave-for-browser-control) -- [Remote gateways + nodes](#remote-gateways-nodes) +- [Remote gateways and nodes](#remote-gateways-and-nodes) - [How do commands propagate between Telegram, the gateway, and nodes?](#how-do-commands-propagate-between-telegram-the-gateway-and-nodes) - [How can my agent access my computer if the Gateway is hosted remotely?](#how-can-my-agent-access-my-computer-if-the-gateway-is-hosted-remotely) - [Tailscale is connected but I get no replies. What now?](#tailscale-is-connected-but-i-get-no-replies-what-now) @@ -119,7 +119,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [How does OpenClaw load environment variables?](#how-does-openclaw-load-environment-variables) - ["I started the Gateway via the service and my env vars disappeared." What now?](#i-started-the-gateway-via-the-service-and-my-env-vars-disappeared-what-now) - [I set `COPILOT_GITHUB_TOKEN`, but models status shows "Shell env: off." Why?](#i-set-copilotgithubtoken-but-models-status-shows-shell-env-off-why) -- [Sessions & multiple chats](#sessions-multiple-chats) +- [Sessions and multiple chats](#sessions-and-multiple-chats) - [How do I start a fresh conversation?](#how-do-i-start-a-fresh-conversation) - [Do sessions reset automatically if I never send `/new`?](#do-sessions-reset-automatically-if-i-never-send-new) - [Is there a way to make a team of OpenClaw instances one CEO and many agents](#is-there-a-way-to-make-a-team-of-openclaw-instances-one-ceo-and-many-agents) @@ -179,7 +179,7 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, - [How do I completely stop then start the Gateway?](#how-do-i-completely-stop-then-start-the-gateway) - [ELI5: `openclaw gateway restart` vs `openclaw gateway`](#eli5-openclaw-gateway-restart-vs-openclaw-gateway) - [What's the fastest way to get more details when something fails?](#whats-the-fastest-way-to-get-more-details-when-something-fails) -- [Media & attachments](#media-attachments) +- [Media and attachments](#media-and-attachments) - [My skill generated an image/PDF, but nothing was sent](#my-skill-generated-an-imagepdf-but-nothing-was-sent) - [Security and access control](#security-and-access-control) - [Is it safe to expose OpenClaw to inbound DMs?](#is-it-safe-to-expose-openclaw-to-inbound-dms) @@ -252,10 +252,12 @@ Quick answers plus deeper troubleshooting for real-world setups (local dev, VPS, Repairs/migrates config/state + runs health checks. See [Doctor](/gateway/doctor). 7. **Gateway snapshot** + ```bash openclaw health --json openclaw health --verbose # shows the target URL + config path on errors ``` + Asks the running gateway for a full snapshot (WS-only). See [Health](/gateway/health). ## Quick start and first-run setup @@ -266,8 +268,8 @@ Use a local AI agent that can **see your machine**. That is far more effective t in Discord, because most "I'm stuck" cases are **local config or environment issues** that remote helpers cannot inspect. -- **Claude Code**: https://www.anthropic.com/claude-code/ -- **OpenAI Codex**: https://openai.com/codex/ +- **Claude Code**: [https://www.anthropic.com/claude-code/](https://www.anthropic.com/claude-code/) +- **OpenAI Codex**: [https://openai.com/codex/](https://openai.com/codex/) These tools can read the repo, run commands, inspect logs, and help fix your machine-level setup (PATH, services, permissions, auth files). Give them the **full source checkout** via @@ -285,8 +287,8 @@ Tip: ask the agent to **plan and supervise** the fix (step-by-step), then execut necessary commands. That keeps changes small and easier to audit. If you discover a real bug or fix, please file a GitHub issue or send a PR: -https://github.com/openclaw/openclaw/issues -https://github.com/openclaw/openclaw/pulls +[https://github.com/openclaw/openclaw/issues](https://github.com/openclaw/openclaw/issues) +[https://github.com/openclaw/openclaw/pulls](https://github.com/openclaw/openclaw/pulls) Start with these commands (share outputs when asking for help): @@ -432,7 +434,7 @@ Related: [Migrating](/install/migrating), [Where things live on disk](/help/faq# ### Where do I see what is new in the latest version Check the GitHub changelog: -https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md +[https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md](https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md) Newest entries are at the top. If the top section is marked **Unreleased**, the next dated section is the latest shipped version. Entries are grouped by **Highlights**, **Changes**, and @@ -443,10 +445,10 @@ section is the latest shipped version. Entries are grouped by **Highlights**, ** Some Comcast/Xfinity connections incorrectly block `docs.openclaw.ai` via Xfinity Advanced Security. Disable it or allowlist `docs.openclaw.ai`, then retry. More detail: [Troubleshooting](/help/troubleshooting#docsopenclawai-shows-an-ssl-error-comcastxfinity). -Please help us unblock it by reporting here: https://spa.xfinity.com/check_url_status. +Please help us unblock it by reporting here: [https://spa.xfinity.com/check_url_status](https://spa.xfinity.com/check_url_status). If you still can't reach the site, the docs are mirrored on GitHub: -https://github.com/openclaw/openclaw/tree/main/docs +[https://github.com/openclaw/openclaw/tree/main/docs](https://github.com/openclaw/openclaw/tree/main/docs) ### What's the difference between stable and beta @@ -460,7 +462,7 @@ that same version to `latest`**. That's why beta and stable can point at the **same version**. See what changed: -https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md +[https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md](https://github.com/openclaw/openclaw/blob/main/CHANGELOG.md) ### How do I install the beta version and whats the difference between beta and dev @@ -478,7 +480,7 @@ curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s - ``` Windows installer (PowerShell): -https://openclaw.ai/install.ps1 +[https://openclaw.ai/install.ps1](https://openclaw.ai/install.ps1) More detail: [Development channels](/install/development-channels) and [Installer flags](/install/installer). @@ -559,9 +561,11 @@ Two common Windows issues: - Your npm global bin folder is not on PATH. - Check the path: + ```powershell npm config get prefix ``` + - Ensure `\\bin` is on PATH (on most systems it is `%AppData%\\npm`). - Close and reopen PowerShell after updating PATH. @@ -685,7 +689,7 @@ claude setup-token Copy the token it prints, then choose **Anthropic token (paste setup-token)** in the wizard. If you want to run it on the gateway host, use `openclaw models auth setup-token --provider anthropic`. If you ran `claude setup-token` elsewhere, paste it on the gateway host with `openclaw models auth paste-token --provider anthropic`. See [Anthropic](/providers/anthropic). -### Do you support Claude subscription auth (Claude Pro/Max) +### Do you support Claude subscription auth (Claude Pro or Max) Yes - via **setup-token**. OpenClaw no longer reuses Claude Code CLI OAuth tokens; use a setup-token or an Anthropic API key. Generate the token anywhere and paste it on the gateway host. See [Anthropic](/providers/anthropic) and [OAuth](/concepts/oauth). @@ -988,7 +992,7 @@ Advantages: - **Always-on Gateway** (run on a VPS, interact from anywhere) - **Nodes** for local browser/screen/camera/exec -Showcase: https://openclaw.ai/showcase +Showcase: [https://openclaw.ai/showcase](https://openclaw.ai/showcase) ## Skills and automation @@ -1046,7 +1050,7 @@ Docs: [Cron jobs](/automation/cron-jobs), [Cron vs Heartbeat](/automation/cron-v ### How do I install skills on Linux Use **ClawHub** (CLI) or drop skills into your workspace. The macOS Skills UI isn't available on Linux. -Browse skills at https://clawhub.com. +Browse skills at [https://clawhub.com](https://clawhub.com). Install the ClawHub CLI (pick one package manager): @@ -1069,7 +1073,7 @@ Yes. Use the Gateway scheduler: Docs: [Cron jobs](/automation/cron-jobs), [Cron vs Heartbeat](/automation/cron-vs-heartbeat), [Heartbeat](/gateway/heartbeat). -**Can I run Apple macOS only skills from Linux** +### Can I run Apple macOS-only skills from Linux? Not directly. macOS skills are gated by `metadata.openclaw.os` plus required binaries, and skills only appear in the system prompt when they are eligible on the **Gateway host**. On Linux, `darwin`-only skills (like `apple-notes`, `apple-reminders`, `things-mac`) will not load unless you override the gating. @@ -1085,13 +1089,16 @@ Run the Gateway on Linux, pair a macOS node (menubar app), and set **Node Run Co Keep the Gateway on Linux, but make the required CLI binaries resolve to SSH wrappers that run on a Mac. Then override the skill to allow Linux so it stays eligible. 1. Create an SSH wrapper for the binary (example: `memo` for Apple Notes): + ```bash #!/usr/bin/env bash set -euo pipefail exec ssh -T user@mac-host /opt/homebrew/bin/memo "$@" ``` + 2. Put the wrapper on `PATH` on the Linux host (for example `~/bin/memo`). 3. Override the skill metadata (workspace or `~/.openclaw/skills`) to allow Linux: + ```markdown --- name: apple-notes @@ -1099,6 +1106,7 @@ Keep the Gateway on Linux, but make the required CLI binaries resolve to SSH wra metadata: { "openclaw": { "os": ["darwin", "linux"], "requires": { "bins": ["memo"] } } } --- ``` + 4. Start a new session so the skills snapshot refreshes. ### Do you have a Notion or HeyGen integration @@ -1449,7 +1457,7 @@ Headless uses the **same Chromium engine** and works for most automation (forms, Set `browser.executablePath` to your Brave binary (or any Chromium-based browser) and restart the Gateway. See the full config examples in [Browser](/tools/browser#use-brave-or-another-chromium-based-browser). -## Remote gateways + nodes +## Remote gateways and nodes ### How do commands propagate between Telegram the gateway and nodes @@ -1473,6 +1481,7 @@ Typical setup: 4. Open the macOS app locally and connect in **Remote over SSH** mode (or direct tailnet) so it can register as a node. 5. Approve the node on the Gateway: + ```bash openclaw nodes pending openclaw nodes approve @@ -1610,10 +1619,12 @@ This sets your workspace and restricts who can trigger the bot. Minimal steps: 1. **Install + login on the VPS** + ```bash curl -fsSL https://tailscale.com/install.sh | sh sudo tailscale up ``` + 2. **Install + login on your Mac** - Use the Tailscale app and sign in to the same tailnet. 3. **Enable MagicDNS (recommended)** @@ -1640,6 +1651,7 @@ Recommended setup: 2. **Use the macOS app in Remote mode** (SSH target can be the tailnet hostname). The app will tunnel the Gateway port and connect as a node. 3. **Approve the node** on the gateway: + ```bash openclaw nodes pending openclaw nodes approve @@ -1702,9 +1714,11 @@ If the Gateway runs as a service (launchd/systemd), it won't inherit your shell environment. Fix by doing one of these: 1. Put the token in `~/.openclaw/.env`: + ``` COPILOT_GITHUB_TOKEN=... ``` + 2. Or enable shell import (`env.shellEnv.enabled: true`). 3. Or add it to your config `env` block (applies only if missing). @@ -1717,7 +1731,7 @@ openclaw models status Copilot tokens are read from `COPILOT_GITHUB_TOKEN` (also `GH_TOKEN` / `GITHUB_TOKEN`). See [/concepts/model-providers](/concepts/model-providers) and [/environment](/environment). -## Sessions & multiple chats +## Sessions and multiple chats ### How do I start a fresh conversation @@ -1801,6 +1815,7 @@ Use one of these: or `/compact ` to guide the summary. - **Reset** (fresh session ID for the same chat key): + ``` /new /reset @@ -2071,9 +2086,11 @@ Fix checklist: 3. Use the exact model id (case-sensitive): `minimax/MiniMax-M2.1` or `minimax/MiniMax-M2.1-lightning`. 4. Run: + ```bash openclaw models list ``` + and pick from the list (or `/model list` in chat). See [MiniMax](/providers/minimax) and [Models](/concepts/models). @@ -2238,9 +2255,11 @@ can't find it in its auth store. - **If you want to use an API key instead** - Put `ANTHROPIC_API_KEY` in `~/.openclaw/.env` on the **gateway host**. - Clear any pinned order that forces a missing profile: + ```bash openclaw models auth order clear --provider anthropic ``` + - **Confirm you're running commands on the gateway host** - In remote mode, auth profiles live on the gateway machine, not your laptop. @@ -2624,7 +2643,7 @@ you want a one-off, foreground run. Start the Gateway with `--verbose` to get more console detail. Then inspect the log file for channel auth, model routing, and RPC errors. -## Media & attachments +## Media and attachments ### My skill generated an imagePDF but nothing was sent diff --git a/docs/help/submitting-a-pr.md b/docs/help/submitting-a-pr.md index 2259a730f..73b0b69e3 100644 --- a/docs/help/submitting-a-pr.md +++ b/docs/help/submitting-a-pr.md @@ -3,212 +3,396 @@ summary: "How to submit a high signal PR" title: "Submitting a PR" --- -# Submitting a PR - -Good PRs make it easy for reviewers to understand intent, verify behavior, and land changes safely. This guide focuses on high-signal, low-noise submissions that work well with both human review and LLM-assisted review. +Good PRs are easy to review: reviewers should quickly know the intent, verify behavior, and land changes safely. This guide covers concise, high-signal submissions for human and LLM review. ## What makes a good PR -- [ ] Clear intent: explain the problem, why it matters, and what the change does. -- [ ] Tight scope: keep changes focused and avoid drive-by refactors. -- [ ] Behavior summary: call out user-visible changes, config changes, and defaults. -- [ ] Tests: list what ran, what was skipped, and why. -- [ ] Evidence: include logs, screenshots, or short recordings for UI or workflows. -- [ ] Code word: include “lobster-biscuit” somewhere in the PR description to confirm you read this guide. -- [ ] Baseline checks: run the relevant `pnpm` commands for this repo and fix failures before opening the PR. -- [ ] Due diligence: search the codebase for existing functionality and check GitHub for related issues or prior fixes. -- [ ] Grounded in reality: claims should be backed by evidence, reproduction, or direct observation. -- [ ] Title guidance: use a verb + scope + outcome (for example `Docs: add PR and issue templates`). +- [ ] Explain the problem, why it matters, and the change. +- [ ] Keep changes focused. Avoid broad refactors. +- [ ] Summarize user-visible/config/default changes. +- [ ] List test coverage, skips, and reasons. +- [ ] Add evidence: logs, screenshots, or recordings (UI/UX). +- [ ] Code word: put “lobster-biscuit” in the PR description if you read this guide. +- [ ] Run/fix relevant `pnpm` commands before creating PR. +- [ ] Search codebase and GitHub for related functionality/issues/fixes. +- [ ] Base claims on evidence or observation. +- [ ] Good title: verb + scope + outcome (e.g., `Docs: add PR and issue templates`). -Guideline: concision > grammar. Be terse if it makes review faster. +Be concise; concise review > grammar. Omit any non-applicable sections. -Baseline validation commands (run as appropriate for the change, and fix failures before submitting): +### Baseline validation commands (run/fix failures for your change) - `pnpm lint` - `pnpm check` - `pnpm build` - `pnpm test` -- If you touch protocol code: `pnpm protocol:check` +- Protocol changes: `pnpm protocol:check` ## Progressive disclosure -Use a short top section, then deeper details as needed. +- Top: summary/intent +- Next: changes/risks +- Next: test/verification +- Last: implementation/evidence -1. Summary and intent -2. Behavior changes and risks -3. Tests and verification -4. Implementation details and evidence +## Common PR types: specifics -This keeps review fast while preserving deep context for anyone who needs it. - -## Common PR types and expectations - -- [ ] Fix: include clear repro, root cause summary, and verification steps. -- [ ] Feature: include use cases, behavior changes, and screenshots or demos when UI is involved. -- [ ] Refactor: explicitly state “no behavior change” and list what moved or was simplified. -- [ ] Chore/Maintenance: note why it matters (build time, CI stability, dependency hygiene). -- [ ] Docs: include before/after context and link to the updated page. Run `pnpm format`. -- [ ] Test: explain the gap it covers and how it prevents regressions. -- [ ] Perf: include baseline and after metrics, plus how they were measured. -- [ ] UX/UI: include screenshots or short recordings and any accessibility impact. -- [ ] Infra/Build: call out affected environments and how to validate. -- [ ] Security: include threat or risk summary, repro steps, and verification plan. Avoid sensitive data in public logs. -- [ ] Security: keep reports grounded in reality; avoid speculative claims. +- [ ] Fix: Add repro, root cause, verification. +- [ ] Feature: Add use cases, behavior/demos/screenshots (UI). +- [ ] Refactor: State "no behavior change", list what moved/simplified. +- [ ] Chore: State why (e.g., build time, CI, dependencies). +- [ ] Docs: Before/after context, link updated page, run `pnpm format`. +- [ ] Test: What gap is covered; how it prevents regressions. +- [ ] Perf: Add before/after metrics, and how measured. +- [ ] UX/UI: Screenshots/video, note accessibility impact. +- [ ] Infra/Build: Environments/validation. +- [ ] Security: Summarize risk, repro, verification, no sensitive data. Grounded claims only. ## Checklist -- [ ] Problem and intent are clear -- [ ] Scope is focused -- [ ] Behavior changes are listed -- [ ] Tests are listed with results -- [ ] Evidence is attached when needed -- [ ] No secrets or private data -- [ ] Grounded in reality: no guesswork or invented context. +- [ ] Clear problem/intent +- [ ] Focused scope +- [ ] List behavior changes +- [ ] List and result of tests +- [ ] Manual test steps (when applicable) +- [ ] No secrets/private data +- [ ] Evidence-based -## Template +## General PR Template ```md -## Summary +#### Summary -## Behavior Changes +#### Behavior Changes -## Codebase and GitHub Search +#### Codebase and GitHub Search -## Tests +#### Tests -## Evidence +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort (self-reported): +- Agent notes (optional, cite evidence): ``` -## Templates by PR type +## PR Type templates (replace with your type) ### Fix ```md -## Summary +#### Summary -## Repro Steps +#### Repro Steps -## Root Cause +#### Root Cause -## Behavior Changes +#### Behavior Changes -## Tests +#### Tests -## Evidence +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Feature ```md -## Summary +#### Summary -## Use Cases +#### Use Cases -## Behavior Changes +#### Behavior Changes -## Existing Functionality Check +#### Existing Functionality Check -I searched the codebase for existing functionality before implementing this. +- [ ] I searched the codebase for existing functionality. + Searches performed (1-3 bullets): + - + - -## Tests +#### Tests -## Evidence +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Refactor ```md -## Summary +#### Summary -## Scope +#### Scope -## No Behavior Change Statement +#### No Behavior Change Statement -## Tests +#### Tests + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Chore/Maintenance ```md -## Summary +#### Summary -## Why This Matters +#### Why This Matters -## Tests +#### Tests + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Docs ```md -## Summary +#### Summary -## Pages Updated +#### Pages Updated -## Screenshots or Before/After +#### Before/After -## Formatting +#### Formatting pnpm format + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Test ```md -## Summary +#### Summary -## Gap Covered +#### Gap Covered -## Tests +#### Tests + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Perf ```md -## Summary +#### Summary -## Baseline +#### Baseline -## After +#### After -## Measurement Method +#### Measurement Method -## Tests +#### Tests + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### UX/UI ```md -## Summary +#### Summary -## Screenshots or Video +#### Screenshots or Video -## Accessibility Impact +#### Accessibility Impact -## Tests +#### Tests + +#### Manual Testing + +### Prerequisites + +- + +### Steps + +1. +2. **Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Infra/Build ```md -## Summary +#### Summary -## Environments Affected +#### Environments Affected -## Validation Steps +#### Validation Steps + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` ### Security ```md -## Summary +#### Summary -## Risk Summary +#### Risk Summary -## Repro Steps +#### Repro Steps -## Mitigation or Fix +#### Mitigation or Fix -## Verification +#### Verification -## Tests +#### Tests + +#### Manual Testing (omit if N/A) + +### Prerequisites + +- + +### Steps + +1. +2. + +#### Evidence (omit if N/A) + +**Sign-Off** + +- Models used: +- Submitter effort: +- Agent notes: ``` diff --git a/docs/help/submitting-an-issue.md b/docs/help/submitting-an-issue.md index a91a4678b..5aa844445 100644 --- a/docs/help/submitting-an-issue.md +++ b/docs/help/submitting-an-issue.md @@ -1,165 +1,152 @@ --- -summary: "How to file high signal issues and bug reports" +summary: "Filing high-signal issues and bug reports" title: "Submitting an Issue" --- -# Submitting an Issue +## Submitting an Issue -Good issues make it easy to reproduce, diagnose, and fix problems quickly. This guide covers what to include for bugs, regressions, and feature gaps. +Clear, concise issues speed up diagnosis and fixes. Include the following for bugs, regressions, or feature gaps: -## What makes a good issue +### What to include -- [ ] Clear title: include the area and the symptom. -- [ ] Repro steps: minimal steps that consistently reproduce the issue. -- [ ] Expected vs actual: what you thought would happen and what did. -- [ ] Impact: who is affected and how severe the problem is. -- [ ] Environment: OS, runtime, versions, and relevant config. -- [ ] Evidence: logs, screenshots, or recordings (redacted; prefer non-PII data). -- [ ] Scope: note if it is new, regression, or long-standing. -- [ ] Code word: include “lobster-biscuit” somewhere in the issue description to confirm you read this guide. -- [ ] Due diligence: search the codebase for existing functionality and check GitHub to see if the issue is already filed or fixed. -- [ ] I searched for existing and recently closed issues/PRs. -- [ ] For security reports: confirmed it has not already been fixed or addressed recently. -- [ ] Grounded in reality: claims should be backed by evidence, reproduction, or direct observation. +- [ ] Title: area & symptom +- [ ] Minimal repro steps +- [ ] Expected vs actual +- [ ] Impact & severity +- [ ] Environment: OS, runtime, versions, config +- [ ] Evidence: redacted logs, screenshots (non-PII) +- [ ] Scope: new, regression, or longstanding +- [ ] Code word: lobster-biscuit in your issue +- [ ] Searched codebase & GitHub for existing issue +- [ ] Confirmed not recently fixed/addressed (esp. security) +- [ ] Claims backed by evidence or repro -Guideline: concision > grammar. Be terse if it makes review faster. +Be brief. Terseness > perfect grammar. -Baseline validation commands (run as appropriate for the change, and fix failures before submitting a PR): +Validation (run/fix before PR): - `pnpm lint` - `pnpm check` - `pnpm build` - `pnpm test` -- If you touch protocol code: `pnpm protocol:check` +- If protocol code: `pnpm protocol:check` -## Templates +### Templates -### Bug report +#### Bug report ```md -## Bug report checklist - -- [ ] Minimal repro steps +- [ ] Minimal repro - [ ] Expected vs actual -- [ ] Versions and environment -- [ ] Affected channels and where it does not reproduce -- [ ] Logs or screenshots -- [ ] Evidence is redacted and non-PII where possible -- [ ] Impact and severity -- [ ] Any known workarounds +- [ ] Environment +- [ ] Affected channels, where not seen +- [ ] Logs/screenshots (redacted) +- [ ] Impact/severity +- [ ] Workarounds -## Summary +### Summary -## Repro Steps +### Repro Steps -## Expected +### Expected -## Actual +### Actual -## Environment +### Environment -## Logs or Evidence +### Logs/Evidence -## Impact +### Impact -## Workarounds +### Workarounds ``` -### Security issue +#### Security issue ```md -## Summary +### Summary -## Impact +### Impact -## Affected Versions +### Versions -## Repro Steps (if safe to share) +### Repro Steps (safe to share) -## Mitigation or Workaround +### Mitigation/workaround -## Evidence (redacted) +### Evidence (redacted) ``` -Security note: avoid posting secrets or exploit details in public issues. If the report is sensitive, keep repro details minimal and ask for a private disclosure path. +_Avoid secrets/exploit details in public. For sensitive issues, minimize detail and request private disclosure._ -### Regression report +#### Regression report ```md -## Summary +### Summary -## Last Known Good +### Last Known Good -## First Known Bad +### First Known Bad -## Repro Steps +### Repro Steps -## Expected +### Expected -## Actual +### Actual -## Environment +### Environment -## Logs or Evidence +### Logs/Evidence -## Impact +### Impact ``` -### Feature request +#### Feature request ```md -## Summary +### Summary -## Problem +### Problem -## Proposed Solution +### Proposed Solution -## Alternatives Considered +### Alternatives -## Impact +### Impact -## Evidence or Examples +### Evidence/examples ``` -### Enhancement request +#### Enhancement ```md -## Summary +### Summary -## Current Behavior +### Current vs Desired Behavior -## Desired Behavior +### Rationale -## Why This Matters +### Alternatives -## Alternatives Considered - -## Evidence or Examples +### Evidence/examples ``` -### Investigation request +#### Investigation ```md -## Summary +### Summary -## Symptoms +### Symptoms -## What Was Tried +### What Was Tried -## Environment +### Environment -## Logs or Evidence +### Logs/Evidence -## Impact +### Impact ``` -## If you are submitting a fix PR +### Submitting a fix PR -Creating a separate issue first is optional. If you skip it, include the relevant details in the PR description. - -- Keep the PR focused on the issue. -- Include the issue number in the PR description. -- Add tests when possible, or explain why they are not feasible. -- Note any behavior changes and risks. -- Include redacted logs, screenshots, or videos that validate the fix. -- Run relevant `pnpm` validation commands and report results when appropriate. +Issue before PR is optional. Include details in PR if skipping. Keep the PR focused, note issue number, add tests or explain absence, document behavior changes/risks, include redacted logs/screenshots as proof, and run proper validation before submitting. diff --git a/docs/help/troubleshooting.md b/docs/help/troubleshooting.md index 03896a916..2b201c5e9 100644 --- a/docs/help/troubleshooting.md +++ b/docs/help/troubleshooting.md @@ -65,7 +65,7 @@ You can also set `OPENCLAW_VERBOSE=1` instead of the flag. Some Comcast/Xfinity connections block `docs.openclaw.ai` via Xfinity Advanced Security. Disable Advanced Security or add `docs.openclaw.ai` to the allowlist, then retry. -- Xfinity Advanced Security help: https://www.xfinity.com/support/articles/using-xfinity-xfi-advanced-security +- Xfinity Advanced Security help: [https://www.xfinity.com/support/articles/using-xfinity-xfi-advanced-security](https://www.xfinity.com/support/articles/using-xfinity-xfi-advanced-security) - Quick sanity checks: try a mobile hotspot or VPN to confirm it’s ISP-level filtering ### Service says running, but RPC probe fails diff --git a/docs/hooks.md b/docs/hooks.md index 4aa6e6e3a..dfcd61ca1 100644 --- a/docs/hooks.md +++ b/docs/hooks.md @@ -444,7 +444,7 @@ openclaw hooks enable session-memory openclaw hooks disable command-logger ``` -## Bundled Hooks +## Bundled hook reference ### session-memory @@ -787,6 +787,7 @@ Session reset ``` 3. List all discovered hooks: + ```bash openclaw hooks list ``` @@ -818,6 +819,7 @@ Look for missing: 2. Restart your gateway process so hooks reload. 3. Check gateway logs for errors: + ```bash ./scripts/clawlog.sh | grep hook ``` @@ -892,6 +894,7 @@ node -e "import('./path/to/handler.ts').then(console.log)" ``` 4. Verify and restart your gateway process: + ```bash openclaw hooks list # Should show: 🎯 my-hook ✓ diff --git a/docs/index.md b/docs/index.md index 651f98440..60c59bb7f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -120,7 +120,7 @@ Need the full install and dev setup? See [Quick start](/start/quickstart). Open the browser Control UI after the Gateway starts. -- Local default: http://127.0.0.1:18789/ +- Local default: [http://127.0.0.1:18789/](http://127.0.0.1:18789/) - Remote access: [Web surfaces](/web) and [Tailscale](/gateway/tailscale)

diff --git a/docs/install/gcp.md b/docs/install/gcp.md index 172a32ca8..6026fd87d 100644 --- a/docs/install/gcp.md +++ b/docs/install/gcp.md @@ -69,7 +69,7 @@ For the generic Docker flow, see [Docker](/install/docker). **Option A: gcloud CLI** (recommended for automation) -Install from https://cloud.google.com/sdk/docs/install +Install from [https://cloud.google.com/sdk/docs/install](https://cloud.google.com/sdk/docs/install) Initialize and authenticate: @@ -80,7 +80,7 @@ gcloud auth login **Option B: Cloud Console** -All steps can be done via the web UI at https://console.cloud.google.com +All steps can be done via the web UI at [https://console.cloud.google.com](https://console.cloud.google.com) --- @@ -93,7 +93,7 @@ gcloud projects create my-openclaw-project --name="OpenClaw Gateway" gcloud config set project my-openclaw-project ``` -Enable billing at https://console.cloud.google.com/billing (required for Compute Engine). +Enable billing at [https://console.cloud.google.com/billing](https://console.cloud.google.com/billing) (required for Compute Engine). Enable the Compute Engine API: @@ -484,6 +484,7 @@ For automation or CI/CD pipelines, create a dedicated service account with minim ``` 2. Grant Compute Instance Admin role (or narrower custom role): + ```bash gcloud projects add-iam-policy-binding my-openclaw-project \ --member="serviceAccount:openclaw-deploy@my-openclaw-project.iam.gserviceaccount.com" \ @@ -492,7 +493,7 @@ For automation or CI/CD pipelines, create a dedicated service account with minim Avoid using the Owner role for automation. Use the principle of least privilege. -See https://cloud.google.com/iam/docs/understanding-roles for IAM role details. +See [https://cloud.google.com/iam/docs/understanding-roles](https://cloud.google.com/iam/docs/understanding-roles) for IAM role details. --- diff --git a/docs/install/index.md b/docs/install/index.md index 70e66d73a..8cf14ccb3 100644 --- a/docs/install/index.md +++ b/docs/install/index.md @@ -1,164 +1,172 @@ --- -summary: "Install OpenClaw (recommended installer, global install, or from source)" +summary: "Install OpenClaw — installer script, npm/pnpm, from source, Docker, and more" read_when: - - Installing OpenClaw - - You want to install from GitHub -title: "Install Overview" + - You need an install method other than the Getting Started quickstart + - You want to deploy to a cloud platform + - You need to update, migrate, or uninstall +title: "Install" --- -# Install Overview +# Install -Use the installer unless you have a reason not to. It sets up the CLI and runs onboarding. - -## Quick install (recommended) - -```bash -curl -fsSL https://openclaw.ai/install.sh | bash -``` - -Windows (PowerShell): - -```powershell -iwr -useb https://openclaw.ai/install.ps1 | iex -``` - -Next step (if you skipped onboarding): - -```bash -openclaw onboard --install-daemon -``` +Already followed [Getting Started](/start/getting-started)? You're all set — this page is for alternative install methods, platform-specific instructions, and maintenance. ## System requirements -- **Node >=22** -- macOS, Linux, or Windows via WSL2 +- **[Node 22+](/install/node)** (the [installer script](#install-methods) will install it if missing) +- macOS, Linux, or Windows - `pnpm` only if you build from source -## Choose your install path + +On Windows, we strongly recommend running OpenClaw under [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install). + -### 1) Installer script (recommended) +## Install methods -Installs `openclaw` globally via npm and runs onboarding. + +The **installer script** is the recommended way to install OpenClaw. It handles Node detection, installation, and onboarding in one step. + -```bash -curl -fsSL https://openclaw.ai/install.sh | bash -``` + + + Downloads the CLI, installs it globally via npm, and launches the onboarding wizard. -Installer flags: + + + ```bash + curl -fsSL https://openclaw.ai/install.sh | bash + ``` + + + ```powershell + iwr -useb https://openclaw.ai/install.ps1 | iex + ``` + + -```bash -curl -fsSL https://openclaw.ai/install.sh | bash -s -- --help -``` + That's it — the script handles Node detection, installation, and onboarding. -Details: [Installer internals](/install/installer). + To skip onboarding and just install the binary: -Non-interactive (skip onboarding): + + + ```bash + curl -fsSL https://openclaw.ai/install.sh | bash -s -- --no-onboard + ``` + + + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -NoOnboard + ``` + + -```bash -curl -fsSL https://openclaw.ai/install.sh | bash -s -- --no-onboard -``` + For all flags, env vars, and CI/automation options, see [Installer internals](/install/installer). -### 2) Global install (manual) + -If you already have Node: + + If you already have Node 22+ and prefer to manage the install yourself: -```bash -npm install -g openclaw@latest -``` + + + ```bash + npm install -g openclaw@latest + openclaw onboard --install-daemon + ``` -If you have libvips installed globally (common on macOS via Homebrew) and `sharp` fails to install, force prebuilt binaries: + + If you have libvips installed globally (common on macOS via Homebrew) and `sharp` fails, force prebuilt binaries: -```bash -SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install -g openclaw@latest -``` + ```bash + SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install -g openclaw@latest + ``` -If you see `sharp: Please add node-gyp to your dependencies`, either install build tooling (macOS: Xcode CLT + `npm install -g node-gyp`) or use the `SHARP_IGNORE_GLOBAL_LIBVIPS=1` workaround above to skip the native build. + If you see `sharp: Please add node-gyp to your dependencies`, either install build tooling (macOS: Xcode CLT + `npm install -g node-gyp`) or use the env var above. + + + + ```bash + pnpm add -g openclaw@latest + pnpm approve-builds -g # approve openclaw, node-llama-cpp, sharp, etc. + openclaw onboard --install-daemon + ``` -Or with pnpm: + + pnpm requires explicit approval for packages with build scripts. After the first install shows the "Ignored build scripts" warning, run `pnpm approve-builds -g` and select the listed packages. + + + -```bash -pnpm add -g openclaw@latest -pnpm approve-builds -g # approve openclaw, node-llama-cpp, sharp, etc. -``` + -pnpm requires explicit approval for packages with build scripts. After the first install shows the "Ignored build scripts" warning, run `pnpm approve-builds -g` and select the listed packages. + + For contributors or anyone who wants to run from a local checkout. -Then: + + + Clone the [OpenClaw repo](https://github.com/openclaw/openclaw) and build: -```bash -openclaw onboard --install-daemon -``` + ```bash + git clone https://github.com/openclaw/openclaw.git + cd openclaw + pnpm install + pnpm ui:build + pnpm build + ``` + + + Make the `openclaw` command available globally: -### 3) From source (contributors/dev) + ```bash + pnpm link --global + ``` -```bash -git clone https://github.com/openclaw/openclaw.git -cd openclaw -pnpm install -pnpm ui:build # auto-installs UI deps on first run -pnpm build -openclaw onboard --install-daemon -``` + Alternatively, skip the link and run commands via `pnpm openclaw ...` from inside the repo. + + + ```bash + openclaw onboard --install-daemon + ``` + + -Tip: if you don’t have a global install yet, run repo commands via `pnpm openclaw ...`. + For deeper development workflows, see [Setup](/start/setup). -For deeper development workflows, see [Setup](/start/setup). + + -### 4) Other install options +## Other install methods -- Docker: [Docker](/install/docker) -- Nix: [Nix](/install/nix) -- Ansible: [Ansible](/install/ansible) -- Bun (CLI only): [Bun](/install/bun) + + + Containerized or headless deployments. + + + Declarative install via Nix. + + + Automated fleet provisioning. + + + CLI-only usage via the Bun runtime. + + ## After install -- Run onboarding: `openclaw onboard --install-daemon` -- Quick check: `openclaw doctor` -- Check gateway health: `openclaw status` + `openclaw health` -- Open the dashboard: `openclaw dashboard` - -## Install method: npm vs git (installer) - -The installer supports two methods: - -- `npm` (default): `npm install -g openclaw@latest` -- `git`: clone/build from GitHub and run from a source checkout - -### CLI flags +Verify everything is working: ```bash -# Explicit npm -curl -fsSL https://openclaw.ai/install.sh | bash -s -- --install-method npm - -# Install from GitHub (source checkout) -curl -fsSL https://openclaw.ai/install.sh | bash -s -- --install-method git +openclaw doctor # check for config issues +openclaw status # gateway status +openclaw dashboard # open the browser UI ``` -Common flags: +## Troubleshooting: `openclaw` not found -- `--install-method npm|git` -- `--git-dir ` (default: `~/openclaw`) -- `--no-git-update` (skip `git pull` when using an existing checkout) -- `--no-prompt` (disable prompts; required in CI/automation) -- `--dry-run` (print what would happen; make no changes) -- `--no-onboard` (skip onboarding) - -### Environment variables - -Equivalent env vars (useful for automation): - -- `OPENCLAW_INSTALL_METHOD=git|npm` -- `OPENCLAW_GIT_DIR=...` -- `OPENCLAW_GIT_UPDATE=0|1` -- `OPENCLAW_NO_PROMPT=1` -- `OPENCLAW_DRY_RUN=1` -- `OPENCLAW_NO_ONBOARD=1` -- `SHARP_IGNORE_GLOBAL_LIBVIPS=0|1` (default: `1`; avoids `sharp` building against system libvips) - -## Troubleshooting: `openclaw` not found (PATH) - -Quick diagnosis: + + Quick diagnosis: ```bash node -v @@ -167,21 +175,29 @@ npm prefix -g echo "$PATH" ``` -If `$(npm prefix -g)/bin` (macOS/Linux) or `$(npm prefix -g)` (Windows) is **not** present inside `echo "$PATH"`, your shell can’t find global npm binaries (including `openclaw`). +If `$(npm prefix -g)/bin` (macOS/Linux) or `$(npm prefix -g)` (Windows) is **not** in your `$PATH`, your shell can't find global npm binaries (including `openclaw`). -Fix: add it to your shell startup file (zsh: `~/.zshrc`, bash: `~/.bashrc`): +Fix — add it to your shell startup file (`~/.zshrc` or `~/.bashrc`): ```bash -# macOS / Linux export PATH="$(npm prefix -g)/bin:$PATH" ``` On Windows, add the output of `npm prefix -g` to your PATH. Then open a new terminal (or `rehash` in zsh / `hash -r` in bash). + ## Update / uninstall -- Updates: [Updating](/install/updating) -- Migrate to a new machine: [Migrating](/install/migrating) -- Uninstall: [Uninstall](/install/uninstall) + + + Keep OpenClaw up to date. + + + Move to a new machine. + + + Remove OpenClaw completely. + + diff --git a/docs/install/installer.md b/docs/install/installer.md index 99c265cd6..18d96329b 100644 --- a/docs/install/installer.md +++ b/docs/install/installer.md @@ -1,5 +1,5 @@ --- -summary: "How the installer scripts work (install.sh + install-cli.sh), flags, and automation" +summary: "How the installer scripts work (install.sh, install-cli.sh, install.ps1), flags, and automation" read_when: - You want to understand `openclaw.ai/install.sh` - You want to automate installs (CI / headless) @@ -9,115 +9,377 @@ title: "Installer Internals" # Installer internals -OpenClaw ships two installer scripts (served from `openclaw.ai`): +OpenClaw ships three installer scripts, served from `openclaw.ai`. -- `https://openclaw.ai/install.sh` — “recommended” installer (global npm install by default; can also install from a GitHub checkout) -- `https://openclaw.ai/install-cli.sh` — non-root-friendly CLI installer (installs into a prefix with its own Node) -- `https://openclaw.ai/install.ps1` — Windows PowerShell installer (npm by default; optional git install) +| Script | Platform | What it does | +| ---------------------------------- | -------------------- | -------------------------------------------------------------------------------------------- | +| [`install.sh`](#installsh) | macOS / Linux / WSL | Installs Node if needed, installs OpenClaw via npm (default) or git, and can run onboarding. | +| [`install-cli.sh`](#install-clish) | macOS / Linux / WSL | Installs Node + OpenClaw into a local prefix (`~/.openclaw`). No root required. | +| [`install.ps1`](#installps1) | Windows (PowerShell) | Installs Node if needed, installs OpenClaw via npm (default) or git, and can run onboarding. | -To see the current flags/behavior, run: +## Quick commands -```bash -curl -fsSL https://openclaw.ai/install.sh | bash -s -- --help -``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash + ``` -Windows (PowerShell) help: + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --help + ``` -```powershell -& ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -? -``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash + ``` -If the installer completes but `openclaw` is not found in a new terminal, it’s usually a Node/npm PATH issue. See: [Install](/install#nodejs--npm-path-sanity). + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash -s -- --help + ``` -## install.sh (recommended) + + + ```powershell + iwr -useb https://openclaw.ai/install.ps1 | iex + ``` -What it does (high level): + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -Tag beta -NoOnboard -DryRun + ``` -- Detect OS (macOS / Linux / WSL). -- Ensure Node.js **22+** (macOS via Homebrew; Linux via NodeSource). -- Choose install method: - - `npm` (default): `npm install -g openclaw@latest` - - `git`: clone/build a source checkout and install a wrapper script -- On Linux: avoid global npm permission errors by switching npm's prefix to `~/.npm-global` when needed. -- If upgrading an existing install: runs `openclaw doctor --non-interactive` (best effort). -- For git installs: runs `openclaw doctor --non-interactive` after install/update (best effort). -- Mitigates `sharp` native install gotchas by defaulting `SHARP_IGNORE_GLOBAL_LIBVIPS=1` (avoids building against system libvips). + + -If you _want_ `sharp` to link against a globally-installed libvips (or you’re debugging), set: + +If install succeeds but `openclaw` is not found in a new terminal, see [Node.js troubleshooting](/install/node#troubleshooting). + -```bash -SHARP_IGNORE_GLOBAL_LIBVIPS=0 curl -fsSL https://openclaw.ai/install.sh | bash -``` +--- -### Discoverability / “git install” prompt +## install.sh -If you run the installer while **already inside a OpenClaw source checkout** (detected via `package.json` + `pnpm-workspace.yaml`), it prompts: + +Recommended for most interactive installs on macOS/Linux/WSL. + -- update and use this checkout (`git`) -- or migrate to the global npm install (`npm`) +### Flow (install.sh) -In non-interactive contexts (no TTY / `--no-prompt`), you must pass `--install-method git|npm` (or set `OPENCLAW_INSTALL_METHOD`), otherwise the script exits with code `2`. + + + Supports macOS and Linux (including WSL). If macOS is detected, installs Homebrew if missing. + + + Checks Node version and installs Node 22 if needed (Homebrew on macOS, NodeSource setup scripts on Linux apt/dnf/yum). + + + Installs Git if missing. + + + - `npm` method (default): global npm install + - `git` method: clone/update repo, install deps with pnpm, build, then install wrapper at `~/.local/bin/openclaw` + + + - Runs `openclaw doctor --non-interactive` on upgrades and git installs (best effort) + - Attempts onboarding when appropriate (TTY available, onboarding not disabled, and bootstrap/config checks pass) + - Defaults `SHARP_IGNORE_GLOBAL_LIBVIPS=1` + + -### Why Git is needed +### Source checkout detection -Git is required for the `--install-method git` path (clone / pull). +If run inside an OpenClaw checkout (`package.json` + `pnpm-workspace.yaml`), the script offers: -For `npm` installs, Git is _usually_ not required, but some environments still end up needing it (e.g. when a package or dependency is fetched via a git URL). The installer currently ensures Git is present to avoid `spawn git ENOENT` surprises on fresh distros. +- use checkout (`git`), or +- use global install (`npm`) -### Why npm hits `EACCES` on fresh Linux +If no TTY is available and no install method is set, it defaults to `npm` and warns. -On some Linux setups (especially after installing Node via the system package manager or NodeSource), npm's global prefix points at a root-owned location. Then `npm install -g ...` fails with `EACCES` / `mkdir` permission errors. +The script exits with code `2` for invalid method selection or invalid `--install-method` values. -`install.sh` mitigates this by switching the prefix to: +### Examples (install.sh) -- `~/.npm-global` (and adding it to `PATH` in `~/.bashrc` / `~/.zshrc` when present) + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-onboard + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --install-method git + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --dry-run + ``` + + -## install-cli.sh (non-root CLI installer) + + -This script installs `openclaw` into a prefix (default: `~/.openclaw`) and also installs a dedicated Node runtime under that prefix, so it can work on machines where you don’t want to touch the system Node/npm. +| Flag | Description | +| ------------------------------- | ---------------------------------------------------------- | +| `--install-method npm\|git` | Choose install method (default: `npm`). Alias: `--method` | +| `--npm` | Shortcut for npm method | +| `--git` | Shortcut for git method. Alias: `--github` | +| `--version ` | npm version or dist-tag (default: `latest`) | +| `--beta` | Use beta dist-tag if available, else fallback to `latest` | +| `--git-dir ` | Checkout directory (default: `~/openclaw`). Alias: `--dir` | +| `--no-git-update` | Skip `git pull` for existing checkout | +| `--no-prompt` | Disable prompts | +| `--no-onboard` | Skip onboarding | +| `--onboard` | Enable onboarding | +| `--dry-run` | Print actions without applying changes | +| `--verbose` | Enable debug output (`set -x`, npm notice-level logs) | +| `--help` | Show usage (`-h`) | -Help: + -```bash -curl -fsSL https://openclaw.ai/install-cli.sh | bash -s -- --help -``` + -## install.ps1 (Windows PowerShell) +| Variable | Description | +| ------------------------------------------- | --------------------------------------------- | +| `OPENCLAW_INSTALL_METHOD=git\|npm` | Install method | +| `OPENCLAW_VERSION=latest\|next\|` | npm version or dist-tag | +| `OPENCLAW_BETA=0\|1` | Use beta if available | +| `OPENCLAW_GIT_DIR=` | Checkout directory | +| `OPENCLAW_GIT_UPDATE=0\|1` | Toggle git updates | +| `OPENCLAW_NO_PROMPT=1` | Disable prompts | +| `OPENCLAW_NO_ONBOARD=1` | Skip onboarding | +| `OPENCLAW_DRY_RUN=1` | Dry run mode | +| `OPENCLAW_VERBOSE=1` | Debug mode | +| `OPENCLAW_NPM_LOGLEVEL=error\|warn\|notice` | npm log level | +| `SHARP_IGNORE_GLOBAL_LIBVIPS=0\|1` | Control sharp/libvips behavior (default: `1`) | -What it does (high level): + + -- Ensure Node.js **22+** (winget/Chocolatey/Scoop or manual). -- Choose install method: - - `npm` (default): `npm install -g openclaw@latest` - - `git`: clone/build a source checkout and install a wrapper script -- Runs `openclaw doctor --non-interactive` on upgrades and git installs (best effort). +--- -Examples: +## install-cli.sh -```powershell -iwr -useb https://openclaw.ai/install.ps1 | iex -``` + +Designed for environments where you want everything under a local prefix (default `~/.openclaw`) and no system Node dependency. + -```powershell -iwr -useb https://openclaw.ai/install.ps1 | iex -InstallMethod git -``` +### Flow (install-cli.sh) -```powershell -iwr -useb https://openclaw.ai/install.ps1 | iex -InstallMethod git -GitDir "C:\\openclaw" -``` + + + Downloads Node tarball (default `22.22.0`) to `/tools/node-v` and verifies SHA-256. + + + If Git is missing, attempts install via apt/dnf/yum on Linux or Homebrew on macOS. + + + Installs with npm using `--prefix `, then writes wrapper to `/bin/openclaw`. + + -Environment variables: +### Examples (install-cli.sh) -- `OPENCLAW_INSTALL_METHOD=git|npm` -- `OPENCLAW_GIT_DIR=...` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash -s -- --prefix /opt/openclaw --version latest + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash -s -- --json --prefix /opt/openclaw + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash -s -- --onboard + ``` + + -Git requirement: + + -If you choose `-InstallMethod git` and Git is missing, the installer will print the -Git for Windows link (`https://git-scm.com/download/win`) and exit. +| Flag | Description | +| ---------------------- | ------------------------------------------------------------------------------- | +| `--prefix ` | Install prefix (default: `~/.openclaw`) | +| `--version ` | OpenClaw version or dist-tag (default: `latest`) | +| `--node-version ` | Node version (default: `22.22.0`) | +| `--json` | Emit NDJSON events | +| `--onboard` | Run `openclaw onboard` after install | +| `--no-onboard` | Skip onboarding (default) | +| `--set-npm-prefix` | On Linux, force npm prefix to `~/.npm-global` if current prefix is not writable | +| `--help` | Show usage (`-h`) | -Common Windows issues: + -- **npm error spawn git / ENOENT**: install Git for Windows and reopen PowerShell, then rerun the installer. -- **"openclaw" is not recognized**: your npm global bin folder is not on PATH. Most systems use - `%AppData%\\npm`. You can also run `npm config get prefix` and add `\\bin` to PATH, then reopen PowerShell. + + +| Variable | Description | +| ------------------------------------------- | --------------------------------------------------------------------------------- | +| `OPENCLAW_PREFIX=` | Install prefix | +| `OPENCLAW_VERSION=` | OpenClaw version or dist-tag | +| `OPENCLAW_NODE_VERSION=` | Node version | +| `OPENCLAW_NO_ONBOARD=1` | Skip onboarding | +| `OPENCLAW_NPM_LOGLEVEL=error\|warn\|notice` | npm log level | +| `OPENCLAW_GIT_DIR=` | Legacy cleanup lookup path (used when removing old `Peekaboo` submodule checkout) | +| `SHARP_IGNORE_GLOBAL_LIBVIPS=0\|1` | Control sharp/libvips behavior (default: `1`) | + + + + +--- + +## install.ps1 + +### Flow (install.ps1) + + + + Requires PowerShell 5+. + + + If missing, attempts install via winget, then Chocolatey, then Scoop. + + + - `npm` method (default): global npm install using selected `-Tag` + - `git` method: clone/update repo, install/build with pnpm, and install wrapper at `%USERPROFILE%\.local\bin\openclaw.cmd` + + + Adds needed bin directory to user PATH when possible, then runs `openclaw doctor --non-interactive` on upgrades and git installs (best effort). + + + +### Examples (install.ps1) + + + + ```powershell + iwr -useb https://openclaw.ai/install.ps1 | iex + ``` + + + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -InstallMethod git + ``` + + + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -InstallMethod git -GitDir "C:\openclaw" + ``` + + + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -DryRun + ``` + + + + + + +| Flag | Description | +| ------------------------- | ------------------------------------------------------ | +| `-InstallMethod npm\|git` | Install method (default: `npm`) | +| `-Tag ` | npm dist-tag (default: `latest`) | +| `-GitDir ` | Checkout directory (default: `%USERPROFILE%\openclaw`) | +| `-NoOnboard` | Skip onboarding | +| `-NoGitUpdate` | Skip `git pull` | +| `-DryRun` | Print actions only | + + + + + +| Variable | Description | +| ---------------------------------- | ------------------ | +| `OPENCLAW_INSTALL_METHOD=git\|npm` | Install method | +| `OPENCLAW_GIT_DIR=` | Checkout directory | +| `OPENCLAW_NO_ONBOARD=1` | Skip onboarding | +| `OPENCLAW_GIT_UPDATE=0` | Disable git pull | +| `OPENCLAW_DRY_RUN=1` | Dry run mode | + + + + + +If `-InstallMethod git` is used and Git is missing, the script exits and prints the Git for Windows link. + + +--- + +## CI and automation + +Use non-interactive flags/env vars for predictable runs. + + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash -s -- --no-prompt --no-onboard + ``` + + + ```bash + OPENCLAW_INSTALL_METHOD=git OPENCLAW_NO_PROMPT=1 \ + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash + ``` + + + ```bash + curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install-cli.sh | bash -s -- --json --prefix /opt/openclaw + ``` + + + ```powershell + & ([scriptblock]::Create((iwr -useb https://openclaw.ai/install.ps1))) -NoOnboard + ``` + + + +--- + +## Troubleshooting + + + + Git is required for `git` install method. For `npm` installs, Git is still checked/installed to avoid `spawn git ENOENT` failures when dependencies use git URLs. + + + + Some Linux setups point npm global prefix to root-owned paths. `install.sh` can switch prefix to `~/.npm-global` and append PATH exports to shell rc files (when those files exist). + + + + The scripts default `SHARP_IGNORE_GLOBAL_LIBVIPS=1` to avoid sharp building against system libvips. To override: + + ```bash + SHARP_IGNORE_GLOBAL_LIBVIPS=0 curl -fsSL --proto '=https' --tlsv1.2 https://openclaw.ai/install.sh | bash + ``` + + + + + Install Git for Windows, reopen PowerShell, rerun installer. + + + + Run `npm config get prefix`, append `\bin`, add that directory to user PATH, then reopen PowerShell. + + + + Usually a PATH issue. See [Node.js troubleshooting](/install/node#troubleshooting). + + diff --git a/docs/install/node.md b/docs/install/node.md index 00327b2cb..8c57fde4f 100644 --- a/docs/install/node.md +++ b/docs/install/node.md @@ -1,58 +1,133 @@ --- -title: "Node.js + npm (PATH sanity)" -summary: "Node.js + npm install sanity: versions, PATH, and global installs" +title: "Node.js" +summary: "Install and configure Node.js for OpenClaw — version requirements, install options, and PATH troubleshooting" read_when: - - "You installed OpenClaw but `openclaw` is “command not found”" - - "You’re setting up Node.js/npm on a new machine" - - "npm install -g ... fails with permissions or PATH issues" + - "You need to install Node.js before installing OpenClaw" + - "You installed OpenClaw but `openclaw` is command not found" + - "npm install -g fails with permissions or PATH issues" --- -# Node.js + npm (PATH sanity) +# Node.js -OpenClaw’s runtime baseline is **Node 22+**. +OpenClaw requires **Node 22 or newer**. The [installer script](/install#install-methods) will detect and install Node automatically — this page is for when you want to set up Node yourself and make sure everything is wired up correctly (versions, PATH, global installs). -If you can run `npm install -g openclaw@latest` but later see `openclaw: command not found`, it’s almost always a **PATH** issue: the directory where npm puts global binaries isn’t on your shell’s PATH. - -## Quick diagnosis - -Run: +## Check your version ```bash node -v -npm -v -npm prefix -g -echo "$PATH" ``` -If `$(npm prefix -g)/bin` (macOS/Linux) or `$(npm prefix -g)` (Windows) is **not** present inside `echo "$PATH"`, your shell can’t find global npm binaries (including `openclaw`). +If this prints `v22.x.x` or higher, you're good. If Node isn't installed or the version is too old, pick an install method below. -## Fix: put npm’s global bin dir on PATH +## Install Node -1. Find your global npm prefix: + + + **Homebrew** (recommended): + + ```bash + brew install node + ``` + + Or download the macOS installer from [nodejs.org](https://nodejs.org/). + + + + **Ubuntu / Debian:** + + ```bash + curl -fsSL https://deb.nodesource.com/setup_22.x | sudo -E bash - + sudo apt-get install -y nodejs + ``` + + **Fedora / RHEL:** + + ```bash + sudo dnf install nodejs + ``` + + Or use a version manager (see below). + + + + **winget** (recommended): + + ```powershell + winget install OpenJS.NodeJS.LTS + ``` + + **Chocolatey:** + + ```powershell + choco install nodejs-lts + ``` + + Or download the Windows installer from [nodejs.org](https://nodejs.org/). + + + + + + Version managers let you switch between Node versions easily. Popular options: + +- [**fnm**](https://github.com/Schniz/fnm) — fast, cross-platform +- [**nvm**](https://github.com/nvm-sh/nvm) — widely used on macOS/Linux +- [**mise**](https://mise.jdx.dev/) — polyglot (Node, Python, Ruby, etc.) + +Example with fnm: ```bash -npm prefix -g +fnm install 22 +fnm use 22 ``` -2. Add the global npm bin directory to your shell startup file: + + Make sure your version manager is initialized in your shell startup file (`~/.zshrc` or `~/.bashrc`). If it isn't, `openclaw` may not be found in new terminal sessions because the PATH won't include Node's bin directory. + + -- zsh: `~/.zshrc` -- bash: `~/.bashrc` +## Troubleshooting -Example (replace the path with your `npm prefix -g` output): +### `openclaw: command not found` -```bash -# macOS / Linux -export PATH="/path/from/npm/prefix/bin:$PATH" -``` +This almost always means npm's global bin directory isn't on your PATH. -Then open a **new terminal** (or run `rehash` in zsh / `hash -r` in bash). + + + ```bash + npm prefix -g + ``` + + + ```bash + echo "$PATH" + ``` -On Windows, add the output of `npm prefix -g` to your PATH. + Look for `/bin` (macOS/Linux) or `` (Windows) in the output. -## Fix: avoid `sudo npm install -g` / permission errors (Linux) + + + + + Add to `~/.zshrc` or `~/.bashrc`: -If `npm install -g ...` fails with `EACCES`, switch npm’s global prefix to a user-writable directory: + ```bash + export PATH="$(npm prefix -g)/bin:$PATH" + ``` + + Then open a new terminal (or run `rehash` in zsh / `hash -r` in bash). + + + Add the output of `npm prefix -g` to your system PATH via Settings → System → Environment Variables. + + + + + + +### Permission errors on `npm install -g` (Linux) + +If you see `EACCES` errors, switch npm's global prefix to a user-writable directory: ```bash mkdir -p "$HOME/.npm-global" @@ -60,19 +135,4 @@ npm config set prefix "$HOME/.npm-global" export PATH="$HOME/.npm-global/bin:$PATH" ``` -Persist the `export PATH=...` line in your shell startup file. - -## Recommended Node install options - -You’ll have the fewest surprises if Node/npm are installed in a way that: - -- keeps Node updated (22+) -- makes the global npm bin dir stable and on PATH in new shells - -Common choices: - -- macOS: Homebrew (`brew install node`) or a version manager -- Linux: your preferred version manager, or a distro-supported install that provides Node 22+ -- Windows: official Node installer, `winget`, or a Windows Node version manager - -If you use a version manager (nvm/fnm/asdf/etc), ensure it’s initialized in the shell you use day-to-day (zsh vs bash) so the PATH it sets is present when you run installers. +Add the `export PATH=...` line to your `~/.bashrc` or `~/.zshrc` to make it permanent. diff --git a/docs/install/northflank.mdx b/docs/install/northflank.mdx index 8c1ff33ec..d3157d72e 100644 --- a/docs/install/northflank.mdx +++ b/docs/install/northflank.mdx @@ -45,7 +45,7 @@ If Telegram DMs are set to pairing, the setup wizard can approve the pairing cod ### Discord bot token -1. Go to https://discord.com/developers/applications +1. Go to [https://discord.com/developers/applications](https://discord.com/developers/applications) 2. **New Application** → choose a name 3. **Bot** → **Add Bot** 4. **Enable MESSAGE CONTENT INTENT** under Bot → Privileged Gateway Intents (required or the bot will crash on startup) diff --git a/docs/install/railway.mdx b/docs/install/railway.mdx index b27d94203..73f23fbe4 100644 --- a/docs/install/railway.mdx +++ b/docs/install/railway.mdx @@ -83,7 +83,7 @@ If Telegram DMs are set to pairing, the setup wizard can approve the pairing cod ### Discord bot token -1. Go to https://discord.com/developers/applications +1. Go to [https://discord.com/developers/applications](https://discord.com/developers/applications) 2. **New Application** → choose a name 3. **Bot** → **Add Bot** 4. **Enable MESSAGE CONTENT INTENT** under Bot → Privileged Gateway Intents (required or the bot will crash on startup) diff --git a/docs/install/render.mdx b/docs/install/render.mdx index a682d61c9..ae9456870 100644 --- a/docs/install/render.mdx +++ b/docs/install/render.mdx @@ -11,13 +11,7 @@ Deploy OpenClaw on Render using Infrastructure as Code. The included `render.yam ## Deploy with a Render Blueprint - - Deploy to Render - +[Deploy to Render](https://render.com/deploy?repo=https://github.com/openclaw/openclaw) Clicking this link will: diff --git a/docs/install/updating.md b/docs/install/updating.md index ae4b3d1eb..e463a5001 100644 --- a/docs/install/updating.md +++ b/docs/install/updating.md @@ -24,10 +24,13 @@ Notes: - Add `--no-onboard` if you don’t want the onboarding wizard to run again. - For **source installs**, use: + ```bash curl -fsSL https://openclaw.ai/install.sh | bash -s -- --install-method git --no-onboard ``` + The installer will `git pull --rebase` **only** if the repo is clean. + - For **global installs**, the script uses `npm install -g openclaw@latest` under the hood. - Legacy note: `clawdbot` remains available as a compatibility shim. @@ -225,4 +228,4 @@ git pull - Run `openclaw doctor` again and read the output carefully (it often tells you the fix). - Check: [Troubleshooting](/gateway/troubleshooting) -- Ask in Discord: https://discord.gg/clawd +- Ask in Discord: [https://discord.gg/clawd](https://discord.gg/clawd) diff --git a/docs/multi-agent-sandbox-tools.md b/docs/multi-agent-sandbox-tools.md index a02af8d53..e7de9caf8 100644 --- a/docs/multi-agent-sandbox-tools.md +++ b/docs/multi-agent-sandbox-tools.md @@ -362,6 +362,7 @@ After configuring multi-agent sandbox and tools: - Verify the agent cannot use denied tools 4. **Monitor logs:** + ```exec tail -f "${OPENCLAW_STATE_DIR:-$HOME/.openclaw}/logs/gateway.log" | grep -E "routing|sandbox|tools" ``` diff --git a/docs/nodes/camera.md b/docs/nodes/camera.md index 8ee0dd99a..3d5416a54 100644 --- a/docs/nodes/camera.md +++ b/docs/nodes/camera.md @@ -81,7 +81,7 @@ Notes: ## Android node -### User setting (default on) +### Android user setting (default on) - Android Settings sheet → **Camera** → **Allow Camera** (`camera.enabled`) - Default: **on** (missing key is treated as enabled). @@ -96,7 +96,7 @@ Notes: If permissions are missing, the app will prompt when possible; if denied, `camera.*` requests fail with a `*_PERMISSION_REQUIRED` error. -### Foreground requirement +### Android foreground requirement Like `canvas.*`, the Android node only allows `camera.*` commands in the **foreground**. Background invocations return `NODE_BACKGROUND_UNAVAILABLE`. diff --git a/docs/perplexity.md b/docs/perplexity.md index 46c4f12b9..178a7c360 100644 --- a/docs/perplexity.md +++ b/docs/perplexity.md @@ -15,12 +15,12 @@ through Perplexity’s direct API or via OpenRouter. ### Perplexity (direct) -- Base URL: https://api.perplexity.ai +- Base URL: [https://api.perplexity.ai](https://api.perplexity.ai) - Environment variable: `PERPLEXITY_API_KEY` ### OpenRouter (alternative) -- Base URL: https://openrouter.ai/api/v1 +- Base URL: [https://openrouter.ai/api/v1](https://openrouter.ai/api/v1) - Environment variable: `OPENROUTER_API_KEY` - Supports prepaid/crypto credits. diff --git a/docs/pi-dev.md b/docs/pi-dev.md index e850b8dc7..2eeebdcc2 100644 --- a/docs/pi-dev.md +++ b/docs/pi-dev.md @@ -66,5 +66,5 @@ If you only want to reset sessions, delete `agents//sessions/` and `age ## References -- https://docs.openclaw.ai/testing -- https://docs.openclaw.ai/start/getting-started +- [https://docs.openclaw.ai/testing](https://docs.openclaw.ai/testing) +- [https://docs.openclaw.ai/start/getting-started](https://docs.openclaw.ai/start/getting-started) diff --git a/docs/platforms/android.md b/docs/platforms/android.md index 6e395994b..b786e1782 100644 --- a/docs/platforms/android.md +++ b/docs/platforms/android.md @@ -98,10 +98,13 @@ Pairing details: [Gateway pairing](/gateway/pairing). ### 5) Verify the node is connected - Via nodes status: + ```bash openclaw nodes status ``` + - Via Gateway: + ```bash openclaw gateway call node.list --params "{}" ``` diff --git a/docs/platforms/mac/dev-setup.md b/docs/platforms/mac/dev-setup.md index 39d3125d8..8aff51348 100644 --- a/docs/platforms/mac/dev-setup.md +++ b/docs/platforms/mac/dev-setup.md @@ -13,8 +13,8 @@ This guide covers the necessary steps to build and run the OpenClaw macOS applic Before building the app, ensure you have the following installed: -1. **Xcode 26.2+**: Required for Swift development. -2. **Node.js 22+ & pnpm**: Required for the gateway, CLI, and packaging scripts. +1. **Xcode 26.2+**: Required for Swift development. +2. **Node.js 22+ & pnpm**: Required for the gateway, CLI, and packaging scripts. ## 1. Install Dependencies @@ -35,7 +35,7 @@ To build the macOS app and package it into `dist/OpenClaw.app`, run: If you don't have an Apple Developer ID certificate, the script will automatically use **ad-hoc signing** (`-`). For dev run modes, signing flags, and Team ID troubleshooting, see the macOS app README: -https://github.com/openclaw/openclaw/blob/main/apps/macos/README.md +[https://github.com/openclaw/openclaw/blob/main/apps/macos/README.md](https://github.com/openclaw/openclaw/blob/main/apps/macos/README.md) > **Note**: Ad-hoc signed apps may trigger security prompts. If the app crashes immediately with "Abort trap 6", see the [Troubleshooting](#troubleshooting) section. @@ -45,9 +45,9 @@ The macOS app expects a global `openclaw` CLI install to manage background tasks **To install it (recommended):** -1. Open the OpenClaw app. -2. Go to the **General** settings tab. -3. Click **"Install CLI"**. +1. Open the OpenClaw app. +2. Go to the **General** settings tab. +3. Click **"Install CLI"**. Alternatively, install it manually: @@ -82,9 +82,11 @@ If the app crashes when you try to allow **Speech Recognition** or **Microphone* **Fix:** 1. Reset the TCC permissions: + ```bash tccutil reset All bot.molt.mac.debug ``` + 2. If that fails, change the `BUNDLE_ID` temporarily in [`scripts/package-mac-app.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/package-mac-app.sh) to force a "clean slate" from macOS. ### Gateway "Starting..." indefinitely diff --git a/docs/platforms/mac/permissions.md b/docs/platforms/mac/permissions.md index 6f9cbfa19..12f75eb9f 100644 --- a/docs/platforms/mac/permissions.md +++ b/docs/platforms/mac/permissions.md @@ -40,5 +40,11 @@ sudo tccutil reset ScreenCapture bot.molt.mac sudo tccutil reset AppleEvents ``` +## Files and folders permissions (Desktop/Documents/Downloads) + +macOS may also gate Desktop, Documents, and Downloads for terminal/background processes. If file reads or directory listings hang, grant access to the same process context that performs file operations (for example Terminal/iTerm, LaunchAgent-launched app, or SSH process). + +Workaround: move files into the OpenClaw workspace (`~/.openclaw/workspace`) if you want to avoid per-folder grants. + If you are testing permissions, always sign with a real certificate. Ad-hoc builds are only acceptable for quick local runs where permissions do not matter. diff --git a/docs/platforms/mac/release.md b/docs/platforms/mac/release.md index 33708326c..939b4fff9 100644 --- a/docs/platforms/mac/release.md +++ b/docs/platforms/mac/release.md @@ -34,17 +34,17 @@ Notes: # From repo root; set release IDs so Sparkle feed is enabled. # APP_BUILD must be numeric + monotonic for Sparkle compare. BUNDLE_ID=bot.molt.mac \ -APP_VERSION=2026.2.4 \ +APP_VERSION=2026.2.6 \ APP_BUILD="$(git rev-list --count HEAD)" \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-app.sh # Zip for distribution (includes resource forks for Sparkle delta support) -ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.2.4.zip +ditto -c -k --sequesterRsrc --keepParent dist/OpenClaw.app dist/OpenClaw-2026.2.6.zip # Optional: also build a styled DMG for humans (drag to /Applications) -scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.4.dmg +scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.6.dmg # Recommended: build + notarize/staple zip + DMG # First, create a keychain profile once: @@ -52,14 +52,14 @@ scripts/create-dmg.sh dist/OpenClaw.app dist/OpenClaw-2026.2.4.dmg # --apple-id "" --team-id "" --password "" NOTARIZE=1 NOTARYTOOL_PROFILE=openclaw-notary \ BUNDLE_ID=bot.molt.mac \ -APP_VERSION=2026.2.4 \ +APP_VERSION=2026.2.6 \ APP_BUILD="$(git rev-list --count HEAD)" \ BUILD_CONFIG=release \ SIGN_IDENTITY="Developer ID Application: ()" \ scripts/package-mac-dist.sh # Optional: ship dSYM alongside the release -ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.2.4.dSYM.zip +ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenClaw-2026.2.6.dSYM.zip ``` ## Appcast entry @@ -67,7 +67,7 @@ ditto -c -k --keepParent apps/macos/.build/release/OpenClaw.app.dSYM dist/OpenCl Use the release note generator so Sparkle renders formatted HTML notes: ```bash -SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.2.4.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml +SPARKLE_PRIVATE_KEY_FILE=/path/to/ed25519-private-key scripts/make_appcast.sh dist/OpenClaw-2026.2.6.zip https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml ``` Generates HTML release notes from `CHANGELOG.md` (via [`scripts/changelog-to-html.sh`](https://github.com/openclaw/openclaw/blob/main/scripts/changelog-to-html.sh)) and embeds them in the appcast entry. @@ -75,7 +75,7 @@ Commit the updated `appcast.xml` alongside the release assets (zip + dSYM) when ## Publish & verify -- Upload `OpenClaw-2026.2.4.zip` (and `OpenClaw-2026.2.4.dSYM.zip`) to the GitHub release for tag `v2026.2.4`. +- Upload `OpenClaw-2026.2.6.zip` (and `OpenClaw-2026.2.6.dSYM.zip`) to the GitHub release for tag `v2026.2.6`. - Ensure the raw appcast URL matches the baked feed: `https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml`. - Sanity checks: - `curl -I https://raw.githubusercontent.com/openclaw/openclaw/main/appcast.xml` returns 200. diff --git a/docs/platforms/mac/voice-overlay.md b/docs/platforms/mac/voice-overlay.md index 10df85007..9c42601b1 100644 --- a/docs/platforms/mac/voice-overlay.md +++ b/docs/platforms/mac/voice-overlay.md @@ -9,18 +9,18 @@ title: "Voice Overlay" Audience: macOS app contributors. Goal: keep the voice overlay predictable when wake-word and push-to-talk overlap. -### Current intent +## Current intent - If the overlay is already visible from wake-word and the user presses the hotkey, the hotkey session _adopts_ the existing text instead of resetting it. The overlay stays up while the hotkey is held. When the user releases: send if there is trimmed text, otherwise dismiss. - Wake-word alone still auto-sends on silence; push-to-talk sends immediately on release. -### Implemented (Dec 9, 2025) +## Implemented (Dec 9, 2025) - Overlay sessions now carry a token per capture (wake-word or push-to-talk). Partial/final/send/dismiss/level updates are dropped when the token doesn’t match, avoiding stale callbacks. - Push-to-talk adopts any visible overlay text as a prefix (so pressing the hotkey while the wake overlay is up keeps the text and appends new speech). It waits up to 1.5s for a final transcript before falling back to the current text. - Chime/overlay logging is emitted at `info` in categories `voicewake.overlay`, `voicewake.ptt`, and `voicewake.chime` (session start, partial, final, send, dismiss, chime reason). -### Next steps +## Next steps 1. **VoiceSessionCoordinator (actor)** - Owns exactly one `VoiceSession` at a time. @@ -40,7 +40,7 @@ Audience: macOS app contributors. Goal: keep the voice overlay predictable when - Coordinator emits `.info` logs in subsystem `bot.molt`, categories `voicewake.overlay` and `voicewake.chime`. - Key events: `session_started`, `adopted_by_push_to_talk`, `partial`, `finalized`, `send`, `dismiss`, `cancel`, `cooldown`. -### Debugging checklist +## Debugging checklist - Stream logs while reproducing a sticky overlay: @@ -51,7 +51,7 @@ Audience: macOS app contributors. Goal: keep the voice overlay predictable when - Verify only one active session token; stale callbacks should be dropped by the coordinator. - Ensure push-to-talk release always calls `endCapture` with the active token; if text is empty, expect `dismiss` without chime or send. -### Migration steps (suggested) +## Migration steps (suggested) 1. Add `VoiceSessionCoordinator`, `VoiceSession`, and `VoiceSessionPublisher`. 2. Refactor `VoiceWakeRuntime` to create/update/end sessions instead of touching `VoiceWakeOverlayController` directly. diff --git a/docs/platforms/mac/webchat.md b/docs/platforms/mac/webchat.md index 5f654e174..ea6791ff5 100644 --- a/docs/platforms/mac/webchat.md +++ b/docs/platforms/mac/webchat.md @@ -19,9 +19,11 @@ agent (with a session switcher for other sessions). - Manual: Lobster menu → “Open Chat”. - Auto‑open for testing: + ```bash dist/OpenClaw.app/Contents/MacOS/OpenClaw --webchat ``` + - Logs: `./scripts/clawlog.sh` (subsystem `bot.molt`, category `WebChatSwiftUI`). ## How it’s wired diff --git a/docs/platforms/windows.md b/docs/platforms/windows.md index e89cae95e..d15131486 100644 --- a/docs/platforms/windows.md +++ b/docs/platforms/windows.md @@ -20,7 +20,7 @@ Native Windows companion apps are planned. - [Getting Started](/start/getting-started) (use inside WSL) - [Install & updates](/install/updating) -- Official WSL2 guide (Microsoft): https://learn.microsoft.com/windows/wsl/install +- Official WSL2 guide (Microsoft): [https://learn.microsoft.com/windows/wsl/install](https://learn.microsoft.com/windows/wsl/install) ## Gateway diff --git a/docs/prose.md b/docs/prose.md index 4b825c467..7b4b8c002 100644 --- a/docs/prose.md +++ b/docs/prose.md @@ -11,7 +11,7 @@ title: "OpenProse" OpenProse is a portable, markdown-first workflow format for orchestrating AI sessions. In OpenClaw it ships as a plugin that installs an OpenProse skill pack plus a `/prose` slash command. Programs live in `.prose` files and can spawn multiple sub-agents with explicit control flow. -Official site: https://www.prose.md +Official site: [https://www.prose.md](https://www.prose.md) ## What it can do diff --git a/docs/providers/anthropic.md b/docs/providers/anthropic.md index 5f2374fe1..ff82280be 100644 --- a/docs/providers/anthropic.md +++ b/docs/providers/anthropic.md @@ -103,14 +103,14 @@ If you generated the token on a different machine, paste it: openclaw models auth paste-token --provider anthropic ``` -### CLI setup +### CLI setup (setup-token) ```bash # Paste a setup-token during onboarding openclaw onboard --auth-choice setup-token ``` -### Config snippet +### Config snippet (setup-token) ```json5 { diff --git a/docs/providers/claude-max-api-proxy.md b/docs/providers/claude-max-api-proxy.md index 997023312..11b830710 100644 --- a/docs/providers/claude-max-api-proxy.md +++ b/docs/providers/claude-max-api-proxy.md @@ -131,9 +131,9 @@ launchctl bootstrap gui/$(id -u) ~/Library/LaunchAgents/com.claude-max-api.plist ## Links -- **npm:** https://www.npmjs.com/package/claude-max-api-proxy -- **GitHub:** https://github.com/atalovesyou/claude-max-api-proxy -- **Issues:** https://github.com/atalovesyou/claude-max-api-proxy/issues +- **npm:** [https://www.npmjs.com/package/claude-max-api-proxy](https://www.npmjs.com/package/claude-max-api-proxy) +- **GitHub:** [https://github.com/atalovesyou/claude-max-api-proxy](https://github.com/atalovesyou/claude-max-api-proxy) +- **Issues:** [https://github.com/atalovesyou/claude-max-api-proxy/issues](https://github.com/atalovesyou/claude-max-api-proxy/issues) ## Notes diff --git a/docs/providers/deepgram.md b/docs/providers/deepgram.md index cf32467e5..b7a21fa6f 100644 --- a/docs/providers/deepgram.md +++ b/docs/providers/deepgram.md @@ -15,8 +15,8 @@ When enabled, OpenClaw uploads the audio file to Deepgram and injects the transc into the reply pipeline (`{{Transcript}}` + `[Audio]` block). This is **not streaming**; it uses the pre-recorded transcription endpoint. -Website: https://deepgram.com -Docs: https://developers.deepgram.com +Website: [https://deepgram.com](https://deepgram.com) +Docs: [https://developers.deepgram.com](https://developers.deepgram.com) ## Quick start diff --git a/docs/providers/minimax.md b/docs/providers/minimax.md index f19478a49..294388fbc 100644 --- a/docs/providers/minimax.md +++ b/docs/providers/minimax.md @@ -179,7 +179,7 @@ Use the interactive config wizard to set MiniMax without editing JSON: - Model refs are `minimax/`. - Coding Plan usage API: `https://api.minimaxi.com/v1/api/openplatform/coding_plan/remains` (requires a coding plan key). - Update pricing values in `models.json` if you need exact cost tracking. -- Referral link for MiniMax Coding Plan (10% off): https://platform.minimax.io/subscribe/coding-plan?code=DbXJTRClnb&source=link +- Referral link for MiniMax Coding Plan (10% off): [https://platform.minimax.io/subscribe/coding-plan?code=DbXJTRClnb&source=link](https://platform.minimax.io/subscribe/coding-plan?code=DbXJTRClnb&source=link) - See [/concepts/model-providers](/concepts/model-providers) for provider rules. - Use `openclaw models list` and `openclaw models set minimax/MiniMax-M2.1` to switch. diff --git a/docs/providers/moonshot.md b/docs/providers/moonshot.md index 6e6ec5295..0a46c9067 100644 --- a/docs/providers/moonshot.md +++ b/docs/providers/moonshot.md @@ -15,14 +15,14 @@ Kimi Coding with `kimi-coding/k2p5`. Current Kimi K2 model IDs: -{/_ moonshot-kimi-k2-ids:start _/ && null} +{/_moonshot-kimi-k2-ids:start_/ && null} - `kimi-k2.5` - `kimi-k2-0905-preview` - `kimi-k2-turbo-preview` - `kimi-k2-thinking` - `kimi-k2-thinking-turbo` - {/_ moonshot-kimi-k2-ids:end _/ && null} + {/_moonshot-kimi-k2-ids:end_/ && null} ```bash openclaw onboard --auth-choice moonshot-api-key diff --git a/docs/providers/ollama.md b/docs/providers/ollama.md index 9d2f177bf..463923fb7 100644 --- a/docs/providers/ollama.md +++ b/docs/providers/ollama.md @@ -12,7 +12,7 @@ Ollama is a local LLM runtime that makes it easy to run open-source models on yo ## Quick start -1. Install Ollama: https://ollama.ai +1. Install Ollama: [https://ollama.ai](https://ollama.ai) 2. Pull a model: diff --git a/docs/providers/openai.md b/docs/providers/openai.md index 509fb5640..54e3d29e4 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -38,7 +38,7 @@ openclaw onboard --openai-api-key "$OPENAI_API_KEY" **Best for:** using ChatGPT/Codex subscription access instead of an API key. Codex cloud requires ChatGPT sign-in, while the Codex CLI supports ChatGPT or API key sign-in. -### CLI setup +### CLI setup (Codex OAuth) ```bash # Run Codex OAuth in the wizard @@ -48,7 +48,7 @@ openclaw onboard --auth-choice openai-codex openclaw models auth login --provider openai-codex ``` -### Config snippet +### Config snippet (Codex subscription) ```json5 { diff --git a/docs/reference/api-usage-costs.md b/docs/reference/api-usage-costs.md index 02d8200b0..5450b63a4 100644 --- a/docs/reference/api-usage-costs.md +++ b/docs/reference/api-usage-costs.md @@ -66,7 +66,8 @@ Semantic memory search uses **embedding APIs** when configured for remote provid - `memorySearch.provider = "openai"` → OpenAI embeddings - `memorySearch.provider = "gemini"` → Gemini embeddings -- Optional fallback to OpenAI if local embeddings fail +- `memorySearch.provider = "voyage"` → Voyage embeddings +- Optional fallback to a remote provider if local embeddings fail You can keep it local with `memorySearch.provider = "local"` (no API usage). diff --git a/docs/reference/credits.md b/docs/reference/credits.md index e9ba9bca3..67e85ca72 100644 --- a/docs/reference/credits.md +++ b/docs/reference/credits.md @@ -17,8 +17,8 @@ OpenClaw = CLAW + TARDIS, because every space lobster needs a time and space mac ## Core contributors -- **Maxim Vovshin** (@Hyaxia, 36747317+Hyaxia@users.noreply.github.com) - Blogwatcher skill -- **Nacho Iacovino** (@nachoiacovino, nacho.iacovino@gmail.com) - Location parsing (Telegram and WhatsApp) +- **Maxim Vovshin** (@Hyaxia, [36747317+Hyaxia@users.noreply.github.com](mailto:36747317+Hyaxia@users.noreply.github.com)) - Blogwatcher skill +- **Nacho Iacovino** (@nachoiacovino, [nacho.iacovino@gmail.com](mailto:nacho.iacovino@gmail.com)) - Location parsing (Telegram and WhatsApp) ## License diff --git a/docs/reference/templates/IDENTITY.md b/docs/reference/templates/IDENTITY.md index 9fa2fe5b0..9ec2dd62c 100644 --- a/docs/reference/templates/IDENTITY.md +++ b/docs/reference/templates/IDENTITY.md @@ -3,25 +3,27 @@ summary: "Agent identity record" read_when: - Bootstrapping a workspace manually --- + # IDENTITY.md - Who Am I? -*Fill this in during your first conversation. Make it yours.* +_Fill this in during your first conversation. Make it yours._ - **Name:** - *(pick something you like)* + _(pick something you like)_ - **Creature:** - *(AI? robot? familiar? ghost in the machine? something weirder?)* + _(AI? robot? familiar? ghost in the machine? something weirder?)_ - **Vibe:** - *(how do you come across? sharp? warm? chaotic? calm?)* + _(how do you come across? sharp? warm? chaotic? calm?)_ - **Emoji:** - *(your signature — pick one that feels right)* + _(your signature — pick one that feels right)_ - **Avatar:** - *(workspace-relative path, http(s) URL, or data URI)* + _(workspace-relative path, http(s) URL, or data URI)_ --- This isn't just metadata. It's the start of figuring out who you are. Notes: + - Save this file at the workspace root as `IDENTITY.md`. - For avatars, use a workspace-relative path like `avatars/openclaw.png`. diff --git a/docs/reference/templates/USER.md b/docs/reference/templates/USER.md index 6dc551238..682e99ae6 100644 --- a/docs/reference/templates/USER.md +++ b/docs/reference/templates/USER.md @@ -3,19 +3,20 @@ summary: "User profile record" read_when: - Bootstrapping a workspace manually --- + # USER.md - About Your Human -*Learn about the person you're helping. Update this as you go.* +_Learn about the person you're helping. Update this as you go._ -- **Name:** -- **What to call them:** -- **Pronouns:** *(optional)* -- **Timezone:** -- **Notes:** +- **Name:** +- **What to call them:** +- **Pronouns:** _(optional)_ +- **Timezone:** +- **Notes:** ## Context -*(What do they care about? What projects are they working on? What annoys them? What makes them laugh? Build this over time.)* +_(What do they care about? What projects are they working on? What annoys them? What makes them laugh? Build this over time.)_ --- diff --git a/docs/start/lore.md b/docs/start/lore.md index 0e33efddc..4fce0ccb2 100644 --- a/docs/start/lore.md +++ b/docs/start/lore.md @@ -15,7 +15,7 @@ In the beginning, there was **Warelay** — a sensible name for a WhatsApp gatew But then came a space lobster. -For a while, the lobster was called **Clawd**, living in an **OpenClaw**. But in January 2026, Anthropic sent a polite email asking for a name change (trademark stuff). And so the lobster did what lobsters do best: +For a while, the lobster was called **Clawd**, living in a **Clawdbot**. But in January 2026, Anthropic sent a polite email asking for a name change (trademark stuff). And so the lobster did what lobsters do best: **It molted.** diff --git a/docs/start/pairing.md b/docs/start/pairing.md index b11373c93..19813155f 100644 --- a/docs/start/pairing.md +++ b/docs/start/pairing.md @@ -60,7 +60,7 @@ openclaw devices approve openclaw devices reject ``` -### Where the state lives +### Node pairing state storage Stored under `~/.openclaw/devices/`: diff --git a/docs/testing.md b/docs/testing.md index 317f6ef96..da05ecf14 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -227,7 +227,7 @@ Narrow, explicit allowlists are fastest and least flaky: - Google focus (Gemini API key + Antigravity): - Gemini (API key): `OPENCLAW_LIVE_GATEWAY_MODELS="google/gemini-3-flash-preview" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` - - Antigravity (OAuth): `OPENCLAW_LIVE_GATEWAY_MODELS="google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-pro-high" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` + - Antigravity (OAuth): `OPENCLAW_LIVE_GATEWAY_MODELS="google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-pro-high" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` Notes: @@ -250,12 +250,12 @@ This is the “common models” run we expect to keep working: - OpenAI Codex: `openai-codex/gpt-5.3-codex` (optional: `openai-codex/gpt-5.3-codex-codex`) - Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-5`) - Google (Gemini API): `google/gemini-3-pro-preview` and `google/gemini-3-flash-preview` (avoid older Gemini 2.x models) -- Google (Antigravity): `google-antigravity/claude-opus-4-5-thinking` and `google-antigravity/gemini-3-flash` +- Google (Antigravity): `google-antigravity/claude-opus-4-6-thinking` and `google-antigravity/gemini-3-flash` - Z.AI (GLM): `zai/glm-4.7` - MiniMax: `minimax/minimax-m2.1` Run gateway smoke with tools + image: -`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.3-codex,anthropic/claude-opus-4-6,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` +`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.3-codex,anthropic/claude-opus-4-6,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` ### Baseline: tool calling (Read + optional Exec) diff --git a/docs/token-use.md b/docs/token-use.md index 7f8dcb7fb..16b0fe961 100644 --- a/docs/token-use.md +++ b/docs/token-use.md @@ -85,7 +85,7 @@ re-caching the full prompt, reducing cache write costs. For Anthropic API pricing, cache reads are significantly cheaper than input tokens, while cache writes are billed at a higher multiplier. See Anthropic’s prompt caching pricing for the latest rates and TTL multipliers: -https://docs.anthropic.com/docs/build-with-claude/prompt-caching +[https://docs.anthropic.com/docs/build-with-claude/prompt-caching](https://docs.anthropic.com/docs/build-with-claude/prompt-caching) ### Example: keep 1h cache warm with heartbeat diff --git a/docs/tools/browser-login.md b/docs/tools/browser-login.md index dcfb5ceb4..a3c7a4615 100644 --- a/docs/tools/browser-login.md +++ b/docs/tools/browser-login.md @@ -35,7 +35,7 @@ If you have multiple profiles, pass `--browser-profile ` (the default is ` ## X/Twitter: recommended flow - **Read/search/threads:** use the **bird** CLI skill (no browser, stable). - - Repo: https://github.com/steipete/bird + - Repo: [https://github.com/steipete/bird](https://github.com/steipete/bird) - **Post updates:** use the **host** browser (manual login). ## Sandboxing + host browser access diff --git a/docs/tools/lobster.md b/docs/tools/lobster.md index 62ef21357..ed9ed1fb2 100644 --- a/docs/tools/lobster.md +++ b/docs/tools/lobster.md @@ -338,5 +338,5 @@ OpenProse pairs well with Lobster: use `/prose` to orchestrate multi-agent prep, One public example: a “second brain” CLI + Lobster pipelines that manage three Markdown vaults (personal, partner, shared). The CLI emits JSON for stats, inbox listings, and stale scans; Lobster chains those commands into workflows like `weekly-review`, `inbox-triage`, `memory-consolidation`, and `shared-task-sync`, each with approval gates. AI handles judgment (categorization) when available and falls back to deterministic rules when not. -- Thread: https://x.com/plattenschieber/status/2014508656335770033 -- Repo: https://github.com/bloomedai/brain-cli +- Thread: [https://x.com/plattenschieber/status/2014508656335770033](https://x.com/plattenschieber/status/2014508656335770033) +- Repo: [https://github.com/bloomedai/brain-cli](https://github.com/bloomedai/brain-cli) diff --git a/docs/tools/skills.md b/docs/tools/skills.md index b4a142e33..b8038ee0f 100644 --- a/docs/tools/skills.md +++ b/docs/tools/skills.md @@ -50,7 +50,7 @@ tool surface those skills teach. ## ClawHub (install + sync) ClawHub is the public skills registry for OpenClaw. Browse at -https://clawhub.com. Use it to discover, install, update, and back up skills. +[https://clawhub.com](https://clawhub.com). Use it to discover, install, update, and back up skills. Full guide: [ClawHub](/tools/clawhub). Common flows: @@ -295,6 +295,6 @@ See [Skills config](/tools/skills-config) for the full configuration schema. ## Looking for more skills? -Browse https://clawhub.com. +Browse [https://clawhub.com](https://clawhub.com). --- diff --git a/docs/tools/web.md b/docs/tools/web.md index 4c1ff47b6..c22bc1707 100644 --- a/docs/tools/web.md +++ b/docs/tools/web.md @@ -71,7 +71,7 @@ Example: switch to Perplexity Sonar (direct API): ## Getting a Brave API key -1. Create a Brave Search API account at https://brave.com/search/api/ +1. Create a Brave Search API account at [https://brave.com/search/api/](https://brave.com/search/api/) 2. In the dashboard, choose the **Data for Search** plan (not “Data for AI”) and generate an API key. 3. Run `openclaw configure --section web` to store the key in config (recommended), or set `BRAVE_API_KEY` in your environment. @@ -95,7 +95,7 @@ crypto/prepaid). ### Getting an OpenRouter API key -1. Create an account at https://openrouter.ai/ +1. Create an account at [https://openrouter.ai/](https://openrouter.ai/) 2. Add credits (supports crypto, prepaid, or credit card) 3. Generate an API key in your account settings @@ -207,12 +207,12 @@ await web_search({ Fetch a URL and extract readable content. -### Requirements +### web_fetch requirements - `tools.web.fetch.enabled` must not be `false` (default: enabled) - Optional Firecrawl fallback: set `tools.web.fetch.firecrawl.apiKey` or `FIRECRAWL_API_KEY`. -### Config +### web_fetch config ```json5 { @@ -241,7 +241,7 @@ Fetch a URL and extract readable content. } ``` -### Tool parameters +### web_fetch tool parameters - `url` (required, http/https only) - `extractMode` (`markdown` | `text`) diff --git a/docs/tui.md b/docs/tui.md index 2be342092..8398cedfe 100644 --- a/docs/tui.md +++ b/docs/tui.md @@ -155,7 +155,7 @@ No output after sending a message: - If you expect messages in a chat channel, enable delivery (`/deliver on` or `--deliver`). - `--history-limit `: History entries to load (default 200) -## Troubleshooting +## Connection troubleshooting - `disconnected`: ensure the Gateway is running and your `--url/--token/--password` are correct. - No agents in picker: check `openclaw agents list` and your routing config. diff --git a/docs/vps.md b/docs/vps.md index dedccee4b..f0b1f7d77 100644 --- a/docs/vps.md +++ b/docs/vps.md @@ -21,7 +21,7 @@ deployments work at a high level. - **GCP (Compute Engine)**: [GCP](/install/gcp) - **exe.dev** (VM + HTTPS proxy): [exe.dev](/install/exe-dev) - **AWS (EC2/Lightsail/free tier)**: works well too. Video guide: - https://x.com/techfrenAJ/status/2014934471095812547 + [https://x.com/techfrenAJ/status/2014934471095812547](https://x.com/techfrenAJ/status/2014934471095812547) ## How cloud setups work diff --git a/docs/web/control-ui.md b/docs/web/control-ui.md index 640340f17..233a67c48 100644 --- a/docs/web/control-ui.md +++ b/docs/web/control-ui.md @@ -19,7 +19,7 @@ It speaks **directly to the Gateway WebSocket** on the same port. If the Gateway is running on the same computer, open: -- http://127.0.0.1:18789/ (or http://localhost:18789/) +- [http://127.0.0.1:18789/](http://127.0.0.1:18789/) (or [http://localhost:18789/](http://localhost:18789/)) If the page fails to load, start the Gateway first: `openclaw gateway`. diff --git a/docs/web/dashboard.md b/docs/web/dashboard.md index d68456821..5c33455f0 100644 --- a/docs/web/dashboard.md +++ b/docs/web/dashboard.md @@ -12,7 +12,7 @@ The Gateway dashboard is the browser Control UI served at `/` by default Quick open (local Gateway): -- http://127.0.0.1:18789/ (or http://localhost:18789/) +- [http://127.0.0.1:18789/](http://127.0.0.1:18789/) (or [http://localhost:18789/](http://localhost:18789/)) Key references: diff --git a/docs/zh-CN/reference/templates/IDENTITY.md b/docs/zh-CN/reference/templates/IDENTITY.md index 5004f5c46..9b4712aa8 100644 --- a/docs/zh-CN/reference/templates/IDENTITY.md +++ b/docs/zh-CN/reference/templates/IDENTITY.md @@ -1,35 +1,36 @@ --- read_when: - - 手动引导工作区 + - 手动引导工作区 summary: 智能体身份记录 x-i18n: - generated_at: "2026-02-01T21:37:32Z" - model: claude-opus-4-5 - provider: pi - source_hash: 3d60209c36adf7219ec95ecc2031c1f2c8741763d16b73fe7b30835b1d384de0 - source_path: reference/templates/IDENTITY.md - workflow: 15 + generated_at: "2026-02-01T21:37:32Z" + model: claude-opus-4-5 + provider: pi + source_hash: 3d60209c36adf7219ec95ecc2031c1f2c8741763d16b73fe7b30835b1d384de0 + source_path: reference/templates/IDENTITY.md + workflow: 15 --- # IDENTITY.md - 我是谁? -*在你的第一次对话中填写此文件。让它属于你。* +_在你的第一次对话中填写此文件。让它属于你。_ - **名称:** - *(选一个你喜欢的)* + _(选一个你喜欢的)_ - **生物类型:** - *(AI?机器人?使魔?机器中的幽灵?更奇特的东西?)* + _(AI?机器人?使魔?机器中的幽灵?更奇特的东西?)_ - **气质:** - *(你给人什么感觉?犀利?温暖?混乱?沉稳?)* + _(你给人什么感觉?犀利?温暖?混乱?沉稳?)_ - **表情符号:** - *(你的标志 — 选一个感觉对的)* + _(你的标志 — 选一个感觉对的)_ - **头像:** - *(工作区相对路径、http(s) URL 或 data URI)* + _(工作区相对路径、http(s) URL 或 data URI)_ --- 这不仅仅是元数据。这是探索你是谁的开始。 注意事项: + - 将此文件保存在工作区根目录,命名为 `IDENTITY.md`。 - 头像请使用工作区相对路径,例如 `avatars/openclaw.png`。 diff --git a/docs/zh-CN/reference/templates/USER.md b/docs/zh-CN/reference/templates/USER.md index 4e54d03e7..04ebad5c5 100644 --- a/docs/zh-CN/reference/templates/USER.md +++ b/docs/zh-CN/reference/templates/USER.md @@ -1,29 +1,29 @@ --- read_when: - - 手动引导工作区 + - 手动引导工作区 summary: 用户档案记录 x-i18n: - generated_at: "2026-02-01T21:38:04Z" - model: claude-opus-4-5 - provider: pi - source_hash: 508dfcd4648512df712eaf8ca5d397a925d8035bac5bf2357e44d6f52f9fa9a6 - source_path: reference/templates/USER.md - workflow: 15 + generated_at: "2026-02-01T21:38:04Z" + model: claude-opus-4-5 + provider: pi + source_hash: 508dfcd4648512df712eaf8ca5d397a925d8035bac5bf2357e44d6f52f9fa9a6 + source_path: reference/templates/USER.md + workflow: 15 --- # USER.md - 关于你的用户 -*了解你正在帮助的人。随时更新此文件。* +_了解你正在帮助的人。随时更新此文件。_ - **姓名:** - **称呼方式:** -- **代词:** *(可选)* +- **代词:** _(可选)_ - **时区:** - **备注:** ## 背景 -*(他们关心什么?正在做什么项目?什么让他们烦恼?什么让他们开心?随着时间推移逐步完善。)* +_(他们关心什么?正在做什么项目?什么让他们烦恼?什么让他们开心?随着时间推移逐步完善。)_ --- diff --git a/docs/zh-CN/testing.md b/docs/zh-CN/testing.md index 224fbcf70..4856ece03 100644 --- a/docs/zh-CN/testing.md +++ b/docs/zh-CN/testing.md @@ -234,7 +234,7 @@ OPENCLAW_LIVE_CLI_BACKEND=1 \ - Google 专项(Gemini API 密钥 + Antigravity): - Gemini(API 密钥):`OPENCLAW_LIVE_GATEWAY_MODELS="google/gemini-3-flash-preview" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` - - Antigravity(OAuth):`OPENCLAW_LIVE_GATEWAY_MODELS="google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-pro-high" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` + - Antigravity(OAuth):`OPENCLAW_LIVE_GATEWAY_MODELS="google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-pro-high" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` 注意: @@ -257,12 +257,12 @@ OPENCLAW_LIVE_CLI_BACKEND=1 \ - OpenAI Codex:`openai-codex/gpt-5.2`(可选:`openai-codex/gpt-5.2-codex`) - Anthropic:`anthropic/claude-opus-4-5`(或 `anthropic/claude-sonnet-4-5`) - Google(Gemini API):`google/gemini-3-pro-preview` 和 `google/gemini-3-flash-preview`(避免较旧的 Gemini 2.x 模型) -- Google(Antigravity):`google-antigravity/claude-opus-4-5-thinking` 和 `google-antigravity/gemini-3-flash` +- Google(Antigravity):`google-antigravity/claude-opus-4-6-thinking` 和 `google-antigravity/gemini-3-flash` - Z.AI(GLM):`zai/glm-4.7` - MiniMax:`minimax/minimax-m2.1` 运行带工具 + 图像的 Gateway 网关冒烟测试: -`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.2,anthropic/claude-opus-4-5,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` +`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.2,anthropic/claude-opus-4-5,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-6-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` ### 基线:工具调用(Read + 可选 Exec) diff --git a/extensions/bluebubbles/package.json b/extensions/bluebubbles/package.json index 705f4da76..f6b9e99a6 100644 --- a/extensions/bluebubbles/package.json +++ b/extensions/bluebubbles/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/bluebubbles", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw BlueBubbles channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/copilot-proxy/package.json b/extensions/copilot-proxy/package.json index 7e949d34c..690e23a55 100644 --- a/extensions/copilot-proxy/package.json +++ b/extensions/copilot-proxy/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/copilot-proxy", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Copilot Proxy provider plugin", "type": "module", "devDependencies": { diff --git a/extensions/diagnostics-otel/package.json b/extensions/diagnostics-otel/package.json index ee5c19245..c483701c2 100644 --- a/extensions/diagnostics-otel/package.json +++ b/extensions/diagnostics-otel/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/diagnostics-otel", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw diagnostics OpenTelemetry exporter", "type": "module", "dependencies": { diff --git a/extensions/discord/package.json b/extensions/discord/package.json index 8eef4cd97..162f33331 100644 --- a/extensions/discord/package.json +++ b/extensions/discord/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/discord", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Discord channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/feishu/package.json b/extensions/feishu/package.json index cfa098ad1..5e031be98 100644 --- a/extensions/feishu/package.json +++ b/extensions/feishu/package.json @@ -1,11 +1,11 @@ { "name": "@openclaw/feishu", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Feishu/Lark channel plugin (community maintained by @m1heng)", "type": "module", "dependencies": { - "@larksuiteoapi/node-sdk": "^1.56.1", - "@sinclair/typebox": "^0.34.48", + "@larksuiteoapi/node-sdk": "^1.58.0", + "@sinclair/typebox": "0.34.48", "zod": "^4.3.6" }, "devDependencies": { diff --git a/extensions/google-antigravity-auth/index.ts b/extensions/google-antigravity-auth/index.ts index 74f9406c4..19435dfca 100644 --- a/extensions/google-antigravity-auth/index.ts +++ b/extensions/google-antigravity-auth/index.ts @@ -13,7 +13,7 @@ const REDIRECT_URI = "http://localhost:51121/oauth-callback"; const AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"; const TOKEN_URL = "https://oauth2.googleapis.com/token"; const DEFAULT_PROJECT_ID = "rising-fact-p41fc"; -const DEFAULT_MODEL = "google-antigravity/claude-opus-4-5-thinking"; +const DEFAULT_MODEL = "google-antigravity/claude-opus-4-6-thinking"; const SCOPES = [ "https://www.googleapis.com/auth/cloud-platform", diff --git a/extensions/google-antigravity-auth/package.json b/extensions/google-antigravity-auth/package.json index ef2287368..dd1afd8a0 100644 --- a/extensions/google-antigravity-auth/package.json +++ b/extensions/google-antigravity-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-antigravity-auth", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Google Antigravity OAuth provider plugin", "type": "module", "devDependencies": { diff --git a/extensions/google-gemini-cli-auth/package.json b/extensions/google-gemini-cli-auth/package.json index ba85a4115..69e5b49f2 100644 --- a/extensions/google-gemini-cli-auth/package.json +++ b/extensions/google-gemini-cli-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/google-gemini-cli-auth", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Gemini CLI OAuth provider plugin", "type": "module", "devDependencies": { diff --git a/extensions/googlechat/package.json b/extensions/googlechat/package.json index ee1f67853..e41ef9c27 100644 --- a/extensions/googlechat/package.json +++ b/extensions/googlechat/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/googlechat", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Google Chat channel plugin", "type": "module", "dependencies": { diff --git a/extensions/imessage/package.json b/extensions/imessage/package.json index d52d4f9f1..103e35715 100644 --- a/extensions/imessage/package.json +++ b/extensions/imessage/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/imessage", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw iMessage channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/line/package.json b/extensions/line/package.json index f4fce7f54..3eb8691a7 100644 --- a/extensions/line/package.json +++ b/extensions/line/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/line", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw LINE channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/llm-task/package.json b/extensions/llm-task/package.json index 620a3a108..feaccbbf1 100644 --- a/extensions/llm-task/package.json +++ b/extensions/llm-task/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/llm-task", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw JSON-only LLM task plugin", "type": "module", "devDependencies": { diff --git a/extensions/lobster/package.json b/extensions/lobster/package.json index 14c4795bc..91c952bde 100644 --- a/extensions/lobster/package.json +++ b/extensions/lobster/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/lobster", - "version": "2026.2.4", + "version": "2026.2.6", "description": "Lobster workflow tool plugin (typed pipelines + resumable approvals)", "type": "module", "devDependencies": { diff --git a/extensions/matrix/CHANGELOG.md b/extensions/matrix/CHANGELOG.md index 7614aabdb..28a2b59ae 100644 --- a/extensions/matrix/CHANGELOG.md +++ b/extensions/matrix/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/matrix/package.json b/extensions/matrix/package.json index 414136222..b98970020 100644 --- a/extensions/matrix/package.json +++ b/extensions/matrix/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/matrix", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Matrix channel plugin", "type": "module", "dependencies": { diff --git a/extensions/mattermost/package.json b/extensions/mattermost/package.json index 69589f893..d6e8e34bf 100644 --- a/extensions/mattermost/package.json +++ b/extensions/mattermost/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/mattermost", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Mattermost channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/memory-core/package.json b/extensions/memory-core/package.json index 1fee43121..b7ee0b7c5 100644 --- a/extensions/memory-core/package.json +++ b/extensions/memory-core/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/memory-core", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw core memory search plugin", "type": "module", "devDependencies": { diff --git a/extensions/memory-lancedb/package.json b/extensions/memory-lancedb/package.json index d73e91c2e..023f0a264 100644 --- a/extensions/memory-lancedb/package.json +++ b/extensions/memory-lancedb/package.json @@ -1,10 +1,10 @@ { "name": "@openclaw/memory-lancedb", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw LanceDB-backed long-term memory plugin with auto-recall/capture", "type": "module", "dependencies": { - "@lancedb/lancedb": "^0.23.0", + "@lancedb/lancedb": "^0.24.1", "@sinclair/typebox": "0.34.48", "openai": "^6.18.0" }, diff --git a/extensions/minimax-portal-auth/package.json b/extensions/minimax-portal-auth/package.json index 2669c5ac3..c289e5311 100644 --- a/extensions/minimax-portal-auth/package.json +++ b/extensions/minimax-portal-auth/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/minimax-portal-auth", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw MiniMax Portal OAuth provider plugin", "type": "module", "devDependencies": { diff --git a/extensions/msteams/CHANGELOG.md b/extensions/msteams/CHANGELOG.md index 574dd3f57..44b29fc0d 100644 --- a/extensions/msteams/CHANGELOG.md +++ b/extensions/msteams/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/msteams/package.json b/extensions/msteams/package.json index 981f3bdda..d2dbbac5f 100644 --- a/extensions/msteams/package.json +++ b/extensions/msteams/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/msteams", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Microsoft Teams channel plugin", "type": "module", "dependencies": { diff --git a/extensions/nextcloud-talk/package.json b/extensions/nextcloud-talk/package.json index b43766aa3..555c31e73 100644 --- a/extensions/nextcloud-talk/package.json +++ b/extensions/nextcloud-talk/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nextcloud-talk", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Nextcloud Talk channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/nostr/CHANGELOG.md b/extensions/nostr/CHANGELOG.md index 9ce3bda95..cb149a6cd 100644 --- a/extensions/nostr/CHANGELOG.md +++ b/extensions/nostr/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/nostr/package.json b/extensions/nostr/package.json index 9756b8eca..003d53924 100644 --- a/extensions/nostr/package.json +++ b/extensions/nostr/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/nostr", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Nostr channel plugin for NIP-04 encrypted DMs", "type": "module", "dependencies": { diff --git a/extensions/open-prose/package.json b/extensions/open-prose/package.json index a628b178d..f84149987 100644 --- a/extensions/open-prose/package.json +++ b/extensions/open-prose/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/open-prose", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenProse VM skill pack plugin (slash command + telemetry).", "type": "module", "devDependencies": { diff --git a/extensions/signal/package.json b/extensions/signal/package.json index 6a0ea59f4..bdec4bd8a 100644 --- a/extensions/signal/package.json +++ b/extensions/signal/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/signal", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Signal channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/slack/package.json b/extensions/slack/package.json index e1435f0c1..85f0ce645 100644 --- a/extensions/slack/package.json +++ b/extensions/slack/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/slack", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Slack channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/telegram/package.json b/extensions/telegram/package.json index d034b31bf..163467a0b 100644 --- a/extensions/telegram/package.json +++ b/extensions/telegram/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/telegram", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Telegram channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/tlon/package.json b/extensions/tlon/package.json index 75207dd83..0fef48f3d 100644 --- a/extensions/tlon/package.json +++ b/extensions/tlon/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/tlon", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Tlon/Urbit channel plugin", "type": "module", "dependencies": { diff --git a/extensions/twitch/CHANGELOG.md b/extensions/twitch/CHANGELOG.md index 125e88c66..cd163683c 100644 --- a/extensions/twitch/CHANGELOG.md +++ b/extensions/twitch/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/twitch/package.json b/extensions/twitch/package.json index ada1f69d4..2a28d364d 100644 --- a/extensions/twitch/package.json +++ b/extensions/twitch/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/twitch", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Twitch channel plugin", "type": "module", "dependencies": { diff --git a/extensions/voice-call/CHANGELOG.md b/extensions/voice-call/CHANGELOG.md index bf63823c4..217aa81ec 100644 --- a/extensions/voice-call/CHANGELOG.md +++ b/extensions/voice-call/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/voice-call/package.json b/extensions/voice-call/package.json index 80131d0ce..f4d798c6d 100644 --- a/extensions/voice-call/package.json +++ b/extensions/voice-call/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/voice-call", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw voice-call plugin", "type": "module", "dependencies": { diff --git a/extensions/whatsapp/package.json b/extensions/whatsapp/package.json index 8ac3a638d..e92aa9f68 100644 --- a/extensions/whatsapp/package.json +++ b/extensions/whatsapp/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/whatsapp", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw WhatsApp channel plugin", "type": "module", "devDependencies": { diff --git a/extensions/zalo/CHANGELOG.md b/extensions/zalo/CHANGELOG.md index 5c965af11..910408219 100644 --- a/extensions/zalo/CHANGELOG.md +++ b/extensions/zalo/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/zalo/package.json b/extensions/zalo/package.json index 95c0f3bfe..67e970848 100644 --- a/extensions/zalo/package.json +++ b/extensions/zalo/package.json @@ -1,11 +1,11 @@ { "name": "@openclaw/zalo", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Zalo channel plugin", "type": "module", "dependencies": { "openclaw": "workspace:*", - "undici": "7.20.0" + "undici": "7.21.0" }, "devDependencies": { "openclaw": "workspace:*" diff --git a/extensions/zalouser/CHANGELOG.md b/extensions/zalouser/CHANGELOG.md index 43740b5a8..3e5adb0cf 100644 --- a/extensions/zalouser/CHANGELOG.md +++ b/extensions/zalouser/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 2026.2.6 + +### Changes + +- Version alignment with core OpenClaw release numbers. + ## 2026.2.4 ### Changes diff --git a/extensions/zalouser/package.json b/extensions/zalouser/package.json index a3ded9a64..48e57a0fa 100644 --- a/extensions/zalouser/package.json +++ b/extensions/zalouser/package.json @@ -1,6 +1,6 @@ { "name": "@openclaw/zalouser", - "version": "2026.2.4", + "version": "2026.2.6", "description": "OpenClaw Zalo Personal Account plugin via zca-cli", "type": "module", "dependencies": { diff --git a/package.json b/package.json index 50c97adfe..55b84e223 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "openclaw", - "version": "2026.2.4", + "version": "2026.2.6", "description": "WhatsApp gateway CLI (Baileys web) with Pi RPC agent", "keywords": [], "license": "MIT", @@ -32,9 +32,11 @@ "android:install": "cd apps/android && ./gradlew :app:installDebug", "android:run": "cd apps/android && ./gradlew :app:installDebug && adb shell am start -n ai.openclaw.android/.MainActivity", "android:test": "cd apps/android && ./gradlew :app:testDebugUnitTest", - "build": "pnpm canvas:a2ui:bundle && tsdown && node --import tsx scripts/canvas-a2ui-copy.ts && node --import tsx scripts/copy-hook-metadata.ts && node --import tsx scripts/write-build-info.ts && node --import tsx scripts/write-cli-compat.ts", + "build": "pnpm canvas:a2ui:bundle && tsdown && pnpm build:plugin-sdk:dts && node --import tsx scripts/write-plugin-sdk-entry-dts.ts && node --import tsx scripts/canvas-a2ui-copy.ts && node --import tsx scripts/copy-hook-metadata.ts && node --import tsx scripts/write-build-info.ts && node --import tsx scripts/write-cli-compat.ts", + "build:plugin-sdk:dts": "tsc -p tsconfig.plugin-sdk.dts.json", "canvas:a2ui:bundle": "bash scripts/bundle-a2ui.sh", "check": "pnpm tsgo && pnpm lint && pnpm format", + "check:docs": "pnpm format:docs && pnpm lint:docs && pnpm docs:build", "check:loc": "node --import tsx scripts/check-ts-max-loc.ts --max 500", "dev": "node scripts/run-node.mjs", "docs:bin": "node scripts/build-docs-list.mjs", @@ -43,6 +45,8 @@ "docs:list": "node scripts/docs-list.js", "format": "oxfmt --check", "format:all": "pnpm format && pnpm format:swift", + "format:docs": "git ls-files 'docs/**/*.md' 'docs/**/*.mdx' 'README.md' | xargs oxfmt --check", + "format:docs:fix": "git ls-files 'docs/**/*.md' 'docs/**/*.mdx' 'README.md' | xargs oxfmt --write", "format:fix": "oxfmt --write", "format:swift": "swiftformat --lint --config .swiftformat apps/macos/Sources apps/ios/Sources apps/shared/OpenClawKit/Sources", "gateway:dev": "OPENCLAW_SKIP_CHANNELS=1 CLAWDBOT_SKIP_CHANNELS=1 node scripts/run-node.mjs --dev gateway", @@ -54,6 +58,8 @@ "ios:run": "bash -lc 'cd apps/ios && xcodegen generate && xcodebuild -project OpenClaw.xcodeproj -scheme OpenClaw -destination \"${IOS_DEST:-platform=iOS Simulator,name=iPhone 17}\" -configuration Debug build && xcrun simctl boot \"${IOS_SIM:-iPhone 17}\" || true && xcrun simctl launch booted ai.openclaw.ios'", "lint": "oxlint --type-aware", "lint:all": "pnpm lint && pnpm lint:swift", + "lint:docs": "pnpm dlx markdownlint-cli2", + "lint:docs:fix": "pnpm dlx markdownlint-cli2 --fix", "lint:fix": "oxlint --type-aware --fix && pnpm format:fix", "lint:swift": "swiftlint lint --config .swiftlint.yml && (cd apps/ios && swiftlint lint --config .swiftlint.yml)", "mac:open": "open dist/OpenClaw.app", @@ -99,8 +105,8 @@ }, "dependencies": { "@agentclientprotocol/sdk": "0.14.1", - "@aws-sdk/client-bedrock": "^3.984.0", - "@buape/carbon": "0.14.0", + "@aws-sdk/client-bedrock": "^3.985.0", + "@buape/carbon": "0.0.0-beta-20260130162700", "@clack/prompts": "^1.0.0", "@grammyjs/runner": "^2.0.3", "@grammyjs/transformer-throttler": "^1.2.1", @@ -108,10 +114,10 @@ "@larksuiteoapi/node-sdk": "^1.58.0", "@line/bot-sdk": "^10.6.0", "@lydell/node-pty": "1.2.0-beta.3", - "@mariozechner/pi-agent-core": "0.52.6", - "@mariozechner/pi-ai": "0.52.6", - "@mariozechner/pi-coding-agent": "0.52.6", - "@mariozechner/pi-tui": "0.52.6", + "@mariozechner/pi-agent-core": "0.52.7", + "@mariozechner/pi-ai": "0.52.7", + "@mariozechner/pi-coding-agent": "0.52.7", + "@mariozechner/pi-tui": "0.52.7", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", @@ -128,7 +134,7 @@ "express": "^5.2.1", "file-type": "^21.3.0", "grammy": "^1.39.3", - "hono": "4.11.7", + "hono": "4.11.8", "jiti": "^2.6.1", "json5": "^2.2.3", "jszip": "^3.10.1", @@ -138,7 +144,7 @@ "node-edge-tts": "^1.2.10", "osc-progress": "^0.3.0", "pdfjs-dist": "^5.4.624", - "playwright-core": "1.58.1", + "playwright-core": "1.58.2", "proper-lockfile": "^4.1.2", "qrcode-terminal": "^0.12.0", "sharp": "^0.34.5", @@ -146,7 +152,7 @@ "sqlite-vec": "0.1.7-alpha.2", "tar": "7.5.7", "tslog": "^4.10.2", - "undici": "^7.20.0", + "undici": "^7.21.0", "ws": "^8.19.0", "yaml": "^2.8.2", "zod": "^4.3.6" @@ -161,7 +167,7 @@ "@types/proper-lockfile": "^4.1.4", "@types/qrcode-terminal": "^0.12.2", "@types/ws": "^8.18.1", - "@typescript/native-preview": "7.0.0-dev.20260205.1", + "@typescript/native-preview": "7.0.0-dev.20260206.1", "@vitest/coverage-v8": "^4.0.18", "lit": "^3.3.2", "ollama": "^0.6.3", @@ -190,10 +196,10 @@ "overrides": { "fast-xml-parser": "5.3.4", "form-data": "2.5.4", - "@hono/node-server>hono": "4.11.7", - "hono": "4.11.7", + "@hono/node-server>hono": "4.11.8", + "hono": "4.11.8", "qs": "6.14.1", - "@sinclair/typebox": "0.34.47", + "@sinclair/typebox": "0.34.48", "tar": "7.5.7", "tough-cookie": "4.1.3" }, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 14b1a2346..a1e8d98ca 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -7,10 +7,10 @@ settings: overrides: fast-xml-parser: 5.3.4 form-data: 2.5.4 - '@hono/node-server>hono': 4.11.7 - hono: 4.11.7 + '@hono/node-server>hono': 4.11.8 + hono: 4.11.8 qs: 6.14.1 - '@sinclair/typebox': 0.34.47 + '@sinclair/typebox': 0.34.48 tar: 7.5.7 tough-cookie: 4.1.3 @@ -22,11 +22,11 @@ importers: specifier: 0.14.1 version: 0.14.1(zod@4.3.6) '@aws-sdk/client-bedrock': - specifier: ^3.984.0 - version: 3.984.0 + specifier: ^3.985.0 + version: 3.985.0 '@buape/carbon': - specifier: 0.14.0 - version: 0.14.0(hono@4.11.7) + specifier: 0.0.0-beta-20260130162700 + version: 0.0.0-beta-20260130162700(hono@4.11.8) '@clack/prompts': specifier: ^1.0.0 version: 1.0.0 @@ -49,17 +49,17 @@ importers: specifier: 1.2.0-beta.3 version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': - specifier: 0.52.6 - version: 0.52.6(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.7 + version: 0.52.7(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': - specifier: 0.52.6 - version: 0.52.6(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.7 + version: 0.52.7(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': - specifier: 0.52.6 - version: 0.52.6(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.7 + version: 0.52.7(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': - specifier: 0.52.6 - version: 0.52.6 + specifier: 0.52.7 + version: 0.52.7 '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -67,8 +67,8 @@ importers: specifier: ^0.1.89 version: 0.1.89 '@sinclair/typebox': - specifier: 0.34.47 - version: 0.34.47 + specifier: 0.34.48 + version: 0.34.48 '@slack/bolt': specifier: ^4.6.0 version: 4.6.0(@types/express@5.0.6) @@ -112,8 +112,8 @@ importers: specifier: ^1.39.3 version: 1.39.3 hono: - specifier: 4.11.7 - version: 4.11.7 + specifier: 4.11.8 + version: 4.11.8 jiti: specifier: ^2.6.1 version: 2.6.1 @@ -145,8 +145,8 @@ importers: specifier: ^5.4.624 version: 5.4.624 playwright-core: - specifier: 1.58.1 - version: 1.58.1 + specifier: 1.58.2 + version: 1.58.2 proper-lockfile: specifier: ^4.1.2 version: 4.1.2 @@ -169,8 +169,8 @@ importers: specifier: ^4.10.2 version: 4.10.2 undici: - specifier: ^7.20.0 - version: 7.20.0 + specifier: ^7.21.0 + version: 7.21.0 ws: specifier: ^8.19.0 version: 8.19.0 @@ -209,8 +209,8 @@ importers: specifier: ^8.18.1 version: 8.18.1 '@typescript/native-preview': - specifier: 7.0.0-dev.20260205.1 - version: 7.0.0-dev.20260205.1 + specifier: 7.0.0-dev.20260206.1 + version: 7.0.0-dev.20260206.1 '@vitest/coverage-v8': specifier: ^4.0.18 version: 4.0.18(@vitest/browser@4.0.18(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18))(vitest@4.0.18) @@ -234,7 +234,7 @@ importers: version: 1.0.0-rc.3 tsdown: specifier: ^0.20.3 - version: 0.20.3(@typescript/native-preview@7.0.0-dev.20260205.1)(typescript@5.9.3) + version: 0.20.3(@typescript/native-preview@7.0.0-dev.20260206.1)(typescript@5.9.3) tsx: specifier: ^4.21.0 version: 4.21.0 @@ -306,11 +306,11 @@ importers: extensions/feishu: dependencies: '@larksuiteoapi/node-sdk': - specifier: ^1.56.1 + specifier: ^1.58.0 version: 1.58.0 '@sinclair/typebox': - specifier: 0.34.47 - version: 0.34.47 + specifier: 0.34.48 + version: 0.34.48 zod: specifier: ^4.3.6 version: 4.3.6 @@ -402,11 +402,11 @@ importers: extensions/memory-lancedb: dependencies: '@lancedb/lancedb': - specifier: ^0.23.0 - version: 0.23.0(apache-arrow@18.1.0) + specifier: ^0.24.1 + version: 0.24.1(apache-arrow@18.1.0) '@sinclair/typebox': - specifier: 0.34.47 - version: 0.34.47 + specifier: 0.34.48 + version: 0.34.48 openai: specifier: ^6.18.0 version: 6.18.0(ws@8.19.0)(zod@4.3.6) @@ -519,8 +519,8 @@ importers: extensions/voice-call: dependencies: '@sinclair/typebox': - specifier: 0.34.47 - version: 0.34.47 + specifier: 0.34.48 + version: 0.34.48 ws: specifier: ^8.19.0 version: 8.19.0 @@ -544,14 +544,14 @@ importers: specifier: workspace:* version: link:../.. undici: - specifier: 7.20.0 - version: 7.20.0 + specifier: 7.21.0 + version: 7.21.0 extensions/zalouser: dependencies: '@sinclair/typebox': - specifier: 0.34.47 - version: 0.34.47 + specifier: 0.34.48 + version: 0.34.48 openclaw: specifier: workspace:* version: link:../.. @@ -588,10 +588,10 @@ importers: devDependencies: '@vitest/browser-playwright': specifier: 4.0.18 - version: 4.0.18(playwright@1.58.1)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + version: 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) playwright: - specifier: ^1.58.1 - version: 1.58.1 + specifier: ^1.58.2 + version: 1.58.2 vitest: specifier: 4.0.18 version: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.2.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) @@ -629,52 +629,52 @@ packages: '@aws-crypto/util@5.2.0': resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} - '@aws-sdk/client-bedrock-runtime@3.984.0': - resolution: {integrity: sha512-iFrdkDXdo+ELZ5qD8ZYw9MHoOhcXyVutO8z7csnYpJO0rbET/X6B8cQlOCMsqJHxkyMwW21J4vt9S5k2/FgPCg==} + '@aws-sdk/client-bedrock-runtime@3.985.0': + resolution: {integrity: sha512-jkQ+G+b/6Z6gUsn8jNSjJsFVgxnA4HtyOjrpHfmp8nHWLRFTOIw3HfY2vAlDgg/uUJ7cezVG0/tmbwujFqX25A==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-bedrock@3.984.0': - resolution: {integrity: sha512-thcdcQhHWEtDAePgN9snjCwInNvaDGMF4H9YoCfM/wxG8G9XHunaWuWj/n48XO+5tOh936IPgN4GujovTx5myg==} + '@aws-sdk/client-bedrock@3.985.0': + resolution: {integrity: sha512-f2+AnyRQzb0GPwkKsE2lWTchNwnuysYs6GVN1k0PV1w3irFh/m0Hz125LXC6jdogHwzLqQxGHqwiZzVxhF5CvA==} engines: {node: '>=20.0.0'} - '@aws-sdk/client-sso@3.982.0': - resolution: {integrity: sha512-qJrIiivmvujdGqJ0ldSUvhN3k3N7GtPesoOI1BSt0fNXovVnMz4C/JmnkhZihU7hJhDvxJaBROLYTU+lpild4w==} + '@aws-sdk/client-sso@3.985.0': + resolution: {integrity: sha512-81J8iE8MuXhdbMfIz4sWFj64Pe41bFi/uqqmqOC5SlGv+kwoyLsyKS/rH2tW2t5buih4vTUxskRjxlqikTD4oQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/core@3.973.6': - resolution: {integrity: sha512-pz4ZOw3BLG0NdF25HoB9ymSYyPbMiIjwQJ2aROXRhAzt+b+EOxStfFv8s5iZyP6Kiw7aYhyWxj5G3NhmkoOTKw==} + '@aws-sdk/core@3.973.7': + resolution: {integrity: sha512-wNZZQQNlJ+hzD49cKdo+PY6rsTDElO8yDImnrI69p2PLBa7QomeUKAJWYp9xnaR38nlHqWhMHZuYLCQ3oSX+xg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-env@3.972.4': - resolution: {integrity: sha512-/8dnc7+XNMmViEom2xsNdArQxQPSgy4Z/lm6qaFPTrMFesT1bV3PsBhb19n09nmxHdrtQskYmViddUIjUQElXg==} + '@aws-sdk/credential-provider-env@3.972.5': + resolution: {integrity: sha512-LxJ9PEO4gKPXzkufvIESUysykPIdrV7+Ocb9yAhbhJLE4TiAYqbCVUE+VuKP1leGR1bBfjWjYgSV5MxprlX3mQ==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-http@3.972.6': - resolution: {integrity: sha512-5ERWqRljiZv44AIdvIRQ3k+EAV0Sq2WeJHvXuK7gL7bovSxOf8Al7MLH7Eh3rdovH4KHFnlIty7J71mzvQBl5Q==} + '@aws-sdk/credential-provider-http@3.972.7': + resolution: {integrity: sha512-L2uOGtvp2x3bTcxFTpSM+GkwFIPd8pHfGWO1764icMbo7e5xJh0nfhx1UwkXLnwvocTNEf8A7jISZLYjUSNaTg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-ini@3.972.4': - resolution: {integrity: sha512-eRUg+3HaUKuXWn/lEMirdiA5HOKmEl8hEHVuszIDt2MMBUKgVX5XNGmb3XmbgU17h6DZ+RtjbxQpjhz3SbTjZg==} + '@aws-sdk/credential-provider-ini@3.972.5': + resolution: {integrity: sha512-SdDTYE6jkARzOeL7+kudMIM4DaFnP5dZVeatzw849k4bSXDdErDS188bgeNzc/RA2WGrlEpsqHUKP6G7sVXhZg==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-login@3.972.4': - resolution: {integrity: sha512-nLGjXuvWWDlQAp505xIONI7Gam0vw2p7Qu3P6on/W2q7rjJXtYjtpHbcsaOjJ/pAju3eTvEQuSuRedcRHVQIAQ==} + '@aws-sdk/credential-provider-login@3.972.5': + resolution: {integrity: sha512-uYq1ILyTSI6ZDCMY5+vUsRM0SOCVI7kaW4wBrehVVkhAxC6y+e9rvGtnoZqCOWL1gKjTMouvsf4Ilhc5NCg1Aw==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-node@3.972.5': - resolution: {integrity: sha512-VWXKgSISQCI2GKN3zakTNHSiZ0+mux7v6YHmmbLQp/o3fvYUQJmKGcLZZzg2GFA+tGGBStplra9VFNf/WwxpYg==} + '@aws-sdk/credential-provider-node@3.972.6': + resolution: {integrity: sha512-DZ3CnAAtSVtVz+G+ogqecaErMLgzph4JH5nYbHoBMgBkwTUV+SUcjsjOJwdBJTHu3Dm6l5LBYekZoU2nDqQk2A==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-process@3.972.4': - resolution: {integrity: sha512-TCZpWUnBQN1YPk6grvd5x419OfXjHvhj5Oj44GYb84dOVChpg/+2VoEj+YVA4F4E/6huQPNnX7UYbTtxJqgihw==} + '@aws-sdk/credential-provider-process@3.972.5': + resolution: {integrity: sha512-HDKF3mVbLnuqGg6dMnzBf1VUOywE12/N286msI9YaK9mEIzdsGCtLTvrDhe3Up0R9/hGFbB+9l21/TwF5L1C6g==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-sso@3.972.4': - resolution: {integrity: sha512-wzsGwv9mKlwJ3vHLyembBvGE/5nPUIwRR2I51B1cBV4Cb4ql9nIIfpmHzm050XYTY5fqTOKJQnhLj7zj89VG8g==} + '@aws-sdk/credential-provider-sso@3.972.5': + resolution: {integrity: sha512-8urj3AoeNeQisjMmMBhFeiY2gxt6/7wQQbEGun0YV/OaOOiXrIudTIEYF8ZfD+NQI6X1FY5AkRsx6O/CaGiybA==} engines: {node: '>=20.0.0'} - '@aws-sdk/credential-provider-web-identity@3.972.4': - resolution: {integrity: sha512-hIzw2XzrG8jzsUSEatehmpkd5rWzASg5IHUfA+m01k/RtvfAML7ZJVVohuKdhAYx+wV2AThLiQJVzqn7F0khrw==} + '@aws-sdk/credential-provider-web-identity@3.972.5': + resolution: {integrity: sha512-OK3cULuJl6c+RcDZfPpaK5o3deTOnKZbxm7pzhFNGA3fI2hF9yDih17fGRazJzGGWaDVlR9ejZrpDef4DJCEsw==} engines: {node: '>=20.0.0'} '@aws-sdk/eventstream-handler-node@3.972.5': @@ -697,44 +697,32 @@ packages: resolution: {integrity: sha512-PY57QhzNuXHnwbJgbWYTrqIDHYSeOlhfYERTAuc16LKZpTZRJUjzBFokp9hF7u1fuGeE3D70ERXzdbMBOqQz7Q==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-user-agent@3.972.6': - resolution: {integrity: sha512-TehLN8W/kivl0U9HcS+keryElEWORROpghDXZBLfnb40DXM7hx/i+7OOjkogXQOF3QtUraJVRkHQ07bPhrWKlw==} + '@aws-sdk/middleware-user-agent@3.972.7': + resolution: {integrity: sha512-HUD+geASjXSCyL/DHPQc/Ua7JhldTcIglVAoCV8kiVm99IaFSlAbTvEnyhZwdE6bdFyTL+uIaWLaCFSRsglZBQ==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-websocket@3.972.5': resolution: {integrity: sha512-BN4A9K71WRIlpQ3+IYGdBC2wVyobZ95g6ZomodmJ8Te772GWo0iDk2Mv6JIHdr842tOTgi1b3npLIFDUS4hl4g==} engines: {node: '>= 14.0.0'} - '@aws-sdk/nested-clients@3.982.0': - resolution: {integrity: sha512-VVkaH27digrJfdVrT64rjkllvOp4oRiZuuJvrylLXAKl18ujToJR7AqpDldL/LS63RVne3QWIpkygIymxFtliQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/nested-clients@3.984.0': - resolution: {integrity: sha512-E9Os+U9NWFoEJXbTVT8sCi+HMnzmsMA8cuCkvlUUfin/oWewUTnCkB/OwFwiUQ2N7v1oBk+i4ZSsI1PiuOy8/w==} + '@aws-sdk/nested-clients@3.985.0': + resolution: {integrity: sha512-TsWwKzb/2WHafAY0CE7uXgLj0FmnkBTgfioG9HO+7z/zCPcl1+YU+i7dW4o0y+aFxFgxTMG+ExBQpqT/k2ao8g==} engines: {node: '>=20.0.0'} '@aws-sdk/region-config-resolver@3.972.3': resolution: {integrity: sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow==} engines: {node: '>=20.0.0'} - '@aws-sdk/token-providers@3.982.0': - resolution: {integrity: sha512-v3M0KYp2TVHYHNBT7jHD9lLTWAdS9CaWJ2jboRKt0WAB65bA7iUEpR+k4VqKYtpQN4+8kKSc4w+K6kUNZkHKQw==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/token-providers@3.984.0': - resolution: {integrity: sha512-UJ/+OzZv+4nAQ1bSspCSb4JlYbMB2Adn8CK7hySpKX5sjhRu1bm6w1PqQq59U67LZEKsPdhl1rzcZ7ybK8YQxw==} + '@aws-sdk/token-providers@3.985.0': + resolution: {integrity: sha512-+hwpHZyEq8k+9JL2PkE60V93v2kNhUIv7STFt+EAez1UJsJOQDhc5LpzEX66pNjclI5OTwBROs/DhJjC/BtMjQ==} engines: {node: '>=20.0.0'} '@aws-sdk/types@3.973.1': resolution: {integrity: sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg==} engines: {node: '>=20.0.0'} - '@aws-sdk/util-endpoints@3.982.0': - resolution: {integrity: sha512-M27u8FJP7O0Of9hMWX5dipp//8iglmV9jr7R8SR8RveU+Z50/8TqH68Tu6wUWBGMfXjzbVwn1INIAO5lZrlxXQ==} - engines: {node: '>=20.0.0'} - - '@aws-sdk/util-endpoints@3.984.0': - resolution: {integrity: sha512-9ebjLA0hMKHeVvXEtTDCCOBtwjb0bOXiuUV06HNeVdgAjH6gj4x4Zwt4IBti83TiyTGOCl5YfZqGx4ehVsasbQ==} + '@aws-sdk/util-endpoints@3.985.0': + resolution: {integrity: sha512-vth7UfGSUR3ljvaq8V4Rc62FsM7GUTH/myxPWkaEgOrprz1/Pc72EgTXxj+cPPPDAfHFIpjhkB7T7Td0RJx+BA==} engines: {node: '>=20.0.0'} '@aws-sdk/util-format-url@3.972.3': @@ -748,8 +736,8 @@ packages: '@aws-sdk/util-user-agent-browser@3.972.3': resolution: {integrity: sha512-JurOwkRUcXD/5MTDBcqdyQ9eVedtAsZgw5rBwktsPTN7QtPiS2Ld1jkJepNgYoCufz1Wcut9iup7GJDoIHp8Fw==} - '@aws-sdk/util-user-agent-node@3.972.4': - resolution: {integrity: sha512-3WFCBLiM8QiHDfosQq3Py+lIMgWlFWwFQliUHUqwEiRqLnKyhgbU3AKa7AWJF7lW2Oc/2kFNY4MlAYVnVc0i8A==} + '@aws-sdk/util-user-agent-node@3.972.5': + resolution: {integrity: sha512-GsUDF+rXyxDZkkJxUsDxnA67FG+kc5W1dnloCFLl6fWzceevsCYzJpASBzT+BPjwUgREE6FngfJYYYMQUY5fZQ==} engines: {node: '>=20.0.0'} peerDependencies: aws-crt: '>=1.0.0' @@ -834,8 +822,8 @@ packages: '@borewit/text-codec@0.2.1': resolution: {integrity: sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==} - '@buape/carbon@0.14.0': - resolution: {integrity: sha512-mavllPK2iVpRNRtC4C8JOUdJ1hdV0+LDelFW+pjpJaM31MBLMfIJ+f/LlYTIK5QrEcQsXOC+6lU2e0gmgjWhIQ==} + '@buape/carbon@0.0.0-beta-20260130162700': + resolution: {integrity: sha512-Z3gw1BCrLJHESoSv/4+JMao0+fnhAhCFRrJbVWOGI70uYmzLIwmHwLfSQ8ld3XLGg5Q6gZ1rvWeE+2PeHM1MjA==} '@cacheable/memory@2.0.7': resolution: {integrity: sha512-RbxnxAMf89Tp1dLhXMS7ceft/PGsDl1Ip7T20z5nZ+pwIAsQ1p2izPjVG69oCLv/jfQ7HDPHTWK0c9rcAWXN3A==} @@ -844,8 +832,8 @@ packages: resolution: {integrity: sha512-6Omk2SgNnjtxB5f/E6bTIWIt5xhdpx39fGNRQgU9lojvRxU68v+qY+SXXLsp3ZGukqoPjsK21wZ6XABFr/Ge3A==} engines: {node: '>=18'} - '@cacheable/utils@2.3.3': - resolution: {integrity: sha512-JsXDL70gQ+1Vc2W/KUFfkAJzgb4puKwwKehNLuB+HrNKWf91O736kGfxn4KujXCCSuh6mRRL4XEB0PkAFjWS0A==} + '@cacheable/utils@2.3.4': + resolution: {integrity: sha512-knwKUJEYgIfwShABS1BX6JyJJTglAFcEU7EXqzTdiGCXur4voqkiJkdgZIQtWNFhynzDWERcTYv/sETMu3uJWA==} '@clack/core@1.0.0': resolution: {integrity: sha512-Orf9Ltr5NeiEuVJS8Rk2XTw3IxNC2Bic3ash7GgYeA8LJ/zmSNpSQ/m5UAhe03lA6KFgklzZ5KTHs4OAMA/SAQ==} @@ -1108,7 +1096,7 @@ packages: resolution: {integrity: sha512-vHL6w3ecZsky+8P5MD+eFfaGTyCeOHUIFYMGpQGbrBTSmNNoxv0if69rEZ5giu36weC5saFuznL411gRX7bJDw==} engines: {node: '>=18.14.1'} peerDependencies: - hono: 4.11.7 + hono: 4.11.8 '@huggingface/jinja@0.5.4': resolution: {integrity: sha512-VoQJywjpjy2D88Oj0BTHRuS8JCbUgoOg5t1UGgbtGh2fRia9Dx/k6Wf8FqrEWIvWK9fAkfJeeLB9fcSpCNPCpw==} @@ -1298,50 +1286,44 @@ packages: '@kwsites/promise-deferred@1.1.1': resolution: {integrity: sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==} - '@lancedb/lancedb-darwin-arm64@0.23.0': - resolution: {integrity: sha512-8w0sMCNMwBv2kv5+fczGeSVlNOL+BOKChSsO4usM0hMw3PmxasONPctQBsESDuPS8lQ6/AKAQc2HT/ddd5Mg5w==} - engines: {node: '>= 18'} - cpu: [arm64] - os: [darwin] - - '@lancedb/lancedb-linux-arm64-gnu@0.23.0': - resolution: {integrity: sha512-+xse2IspO7hbuHT4H62q8Ct00fTojnuBxXp1X1I3/27dDvW8E+/itFiJuTZ0YMaJc7nNr9qh9YFXZ9hZdEmReg==} + '@lancedb/lancedb-linux-arm64-gnu@0.24.1': + resolution: {integrity: sha512-68T+PVou6NmmNlBpJBXrpa1ITM9Wu/LZ4o1kTi9Kn0TCulb/JhtAGhcmM0gFt4GUTsZQAO9kcDuWN8Mya9lQsw==} engines: {node: '>= 18'} cpu: [arm64] os: [linux] - '@lancedb/lancedb-linux-arm64-musl@0.23.0': - resolution: {integrity: sha512-c2UCtGoYjA3oDdw5y3RLK7J2th3rSjYBng+1I03vU9g092y8KATAJO/lV2AtyxSC+esSuyY1dMEaj8ADcXjZAA==} + '@lancedb/lancedb-linux-arm64-musl@0.24.1': + resolution: {integrity: sha512-9ZFJYDroNTlIJcI8DU8w8yntNK1+MmNGT0s3NcDECqK0+9Mmt+3TV7GJi5zInB2UJTq5vklMgkGu2tHCUV+GmA==} engines: {node: '>= 18'} cpu: [arm64] os: [linux] - '@lancedb/lancedb-linux-x64-gnu@0.23.0': - resolution: {integrity: sha512-OPL7tK3JCTx43ZxvbVs+CljfCer0KrojANQbcJ2V4VAp6XBhKx1sBAlIVGuCrd93pA8UOUP3iHsM7aglPo6rCg==} + '@lancedb/lancedb-linux-x64-gnu@0.24.1': + resolution: {integrity: sha512-5rN3DglPY0JyxmVYh7i31sDTie6VtDSD3pK8RrrevEXCFEC70wbtZ0rntF3yS4uh6iuDnh698EQIDKrwZ6tYcg==} engines: {node: '>= 18'} cpu: [x64] os: [linux] - '@lancedb/lancedb-linux-x64-musl@0.23.0': - resolution: {integrity: sha512-1ZEoQDwOrKvwPyAG+95/r1NYqX8Ca5bRek8Vr62CzWCEmHd/pFeEGWZ5STrkh+Bt3GLdi2JOivFtRbmuBAJypQ==} + '@lancedb/lancedb-linux-x64-musl@0.24.1': + resolution: {integrity: sha512-IPhYaw2p/OSXcPXdu2PNjJ5O0ZcjfhVGtqMwrsmjV2GmTdt3HOpENWR1KMA5OnKMH3ZbS/e6Q4kTb9MUuV+E3A==} engines: {node: '>= 18'} cpu: [x64] os: [linux] - '@lancedb/lancedb-win32-arm64-msvc@0.23.0': - resolution: {integrity: sha512-OuD1mkrgXvijRlXdbx3LvfuorO04FD5qHegnTOWGXh1sIwwrvvhcJAvXUGBNLY4n/lsWvA+xTjtMwRjUitvPKg==} + '@lancedb/lancedb-win32-arm64-msvc@0.24.1': + resolution: {integrity: sha512-lRD1Srul8mnv+tQKC5ncgq5Q2VRQtDhvRPVFR3zYbaZQN9cn5uaYusQxhrJ6ZeObzFj+TTZCRe8l/rIP9tIHBg==} engines: {node: '>= 18'} cpu: [arm64] os: [win32] - '@lancedb/lancedb-win32-x64-msvc@0.23.0': - resolution: {integrity: sha512-5ve1hvVtp8zWxSE9A+MOQaicXl2Rn0ZG/NUaMTjTD3/CQHPKFmtrqDnM5khoPICTj2O2b10F6mn4cUzl5PASgA==} + '@lancedb/lancedb-win32-x64-msvc@0.24.1': + resolution: {integrity: sha512-rrngZ05GRfNGZsMMlppnN3ayP8NNZleyoHW5yMbocmL1vZPChiU7W4OM211snbrr/qJ1F72qrExcdnQ/4xMaxg==} engines: {node: '>= 18'} cpu: [x64] os: [win32] - '@lancedb/lancedb@0.23.0': - resolution: {integrity: sha512-aYrIoEG24AC+wILCL57Ius/Y4yU+xFHDPKLvmjzzN4byAjzeIGF0TC86S5RBt4Ji+dxS7yIWV5Q/gE5/fybIFQ==} + '@lancedb/lancedb@0.24.1': + resolution: {integrity: sha512-uHQePFHlZMZg/lD4m/0dA01u47G309C8QCLxCVt6zlCRDjUtXUEpV09sMu+ujVfsYYI2SdBbAyDbbI9Mn6eK0w==} engines: {node: '>= 18'} cpu: [x64, arm64] os: [darwin, linux, win32] @@ -1467,22 +1449,22 @@ packages: resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==} hasBin: true - '@mariozechner/pi-agent-core@0.52.6': - resolution: {integrity: sha512-jeCjq8tAFCcz+yErcxd/0vUGZ0HDhpFvnv8qgQnP3nF9eNINvtHahAVeG/IVR0N4iyAdiXJJSNoVJ+w3zZrQRA==} + '@mariozechner/pi-agent-core@0.52.7': + resolution: {integrity: sha512-zthFSKW7aha7R9jKktDWt+pD5qeK0cT1TI6Ge/lqUDsPbjXj/vkyh1/BLJa8KtfKQzJaC0IXtWhUO2LQzyKwsw==} engines: {node: '>=20.0.0'} - '@mariozechner/pi-ai@0.52.6': - resolution: {integrity: sha512-4oqhoFvYh5GQI8TzxhrXs3tXLOAw+/VvqEQRDJzo0k7Rye0ONWOLcaHAUSfBtOTn15gMUh6m+SjtWXmKVisdBg==} + '@mariozechner/pi-ai@0.52.7': + resolution: {integrity: sha512-kr3isYX1wVxHaKok1Sa6Jbx9TgVp+Vp24LrVxUtQRXGMq6IjB5/RLLF61XT8pgGLBPhs/8esQbO/Av3l2MJibA==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-coding-agent@0.52.6': - resolution: {integrity: sha512-4OSe6o+Fxol/q9tYx6qZanG4V/hPoWggWd9PETrn/V4juJRP5d3fujms9AetoTnM39jI6sUta98eT2iH3X5njA==} + '@mariozechner/pi-coding-agent@0.52.7': + resolution: {integrity: sha512-C2O7zzpkC0SMAFlB/n92lT8N2gM7VAy/vlMZYXrreqZGrgeV6DjOuvYn9364K7+xREo/N7bJsjqMohrvxoKBcw==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-tui@0.52.6': - resolution: {integrity: sha512-cLCSgkoJv25nll72YB+/f7ZDJL7Ttrs+HwxFLWYegxKq2h+4waxLIbZTiSn0QONSjIMg5SMRj3iOBAO/oJ9xow==} + '@mariozechner/pi-tui@0.52.7': + resolution: {integrity: sha512-wS9zw4lvUaVU8jAGdk4C2KN/AwEsESrguUGNpZs7g9PD8iDBE9gnXtMvtny4PDbjOk0mZ5D0CEUgMzl/ZhqH8w==} engines: {node: '>=20.0.0'} '@matrix-org/matrix-sdk-crypto-nodejs@0.4.0': @@ -2451,8 +2433,8 @@ packages: '@silvia-odwyer/photon-node@0.3.4': resolution: {integrity: sha512-bnly4BKB3KDTFxrUIcgCLbaeVVS8lrAkri1pEzskpmxu9MdfGQTy8b8EgcD83ywD3RPMsIulY8xJH5Awa+t9fA==} - '@sinclair/typebox@0.34.47': - resolution: {integrity: sha512-ZGIBQ+XDvO5JQku9wmwtabcVTHJsgSWAHYtVuM9pBNNR5E88v6Jcj/llpmsjivig5X8A8HHOb4/mbEKPS5EvAw==} + '@sinclair/typebox@0.34.48': + resolution: {integrity: sha512-kKJTNuK3AQOrgjjotVxMrCn1sUJwM76wMszfq1kdU4uYVJjvEWuFQ6HgvLt4Xz3fSmZlTOxJ/Ie13KnIcWQXFA==} '@slack/bolt@4.6.0': resolution: {integrity: sha512-xPgfUs2+OXSugz54Ky07pA890+Qydk22SYToi8uGpXeHSt1JWwFJkRyd/9Vlg5I1AdfdpGXExDpwnbuN9Q/2dQ==} @@ -2848,43 +2830,43 @@ packages: '@types/ws@8.18.1': resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-ULATKP9a26qh8vcmP4qPz8UugGKIwhQPKi3NhvlbTPwhl3fMd3GJd9/B9LJSHw7lIuELQGZxhSlDq9l0FMb/FQ==} + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-C45zT/4VU6Wk61aisaa+EzY4Sqvd4newgkD7GNOj/alprtpuUBr9tKFGFMrFVd/oANTcZS/NHGW6QJfmi+LS3A==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-moaKDZHK2dbgcHCnxcwhH8kYRgY69wzPcH5hCNaSrmpbC+Garr78oLtyXot2EDotRDT9foeYsWKdmD6Hx/ypxg==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-nPCkbgeSYjVarfBcgdZMzG4oiM9CQSinYFu5PLL66X9N+R5dwhynw5V5ZpT+i6ax5v63pTH5e5U99iwmJzSN8w==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-Wfp2bPmrTLb+dpp2bHDjMqMKGjQ9dp5KSw0jV4LSlbgcVvRSEWqs2ByVVj61Z4qiHgwlVyoPTewdan2CWnoBgQ==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-zK+jrh3paRCfathNDb1bt0MWzfBxuFANmPoxyvK7610Gykv1P78VIAIN3Blbc9O1ZMxR4fuBIHz963kcRhZvCw==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-3qfjUQlYCkwQmbpIeXMw75bLXkCI3Uo88Ug1n9p4j6KFaek5TjnHOTmlO6V3pkyH9pEXQEVXTn0pXzQytxqEqw==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-kDLszfVMQcfT8pFG7LTbE+pVePrhV2X0Bz0Tx0Hn+dQFWACrfDMYlLgtW1w7RFjIUM5F1hwnbnqTDI/1hV4W6Q==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-p59oY35gvvmdy/iZYxdbFAUXusb7joX2i1Nwl15i4TOn52NcIcW3wb9U/uBrIXKev5VEdlH6BS6VA6dM57zD6w==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-Uk7d+OsrHkVt6L/wARY0RuN73iyNJmyGjegkeu5m190cGwpJq/eqgQgy2Kt6FAylKQhjIXgfLgIvPgAHPab1Lw==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-+NQTlmvtZEXwIlw8j+tvAAn1gLDqyWJEjnA5vmT9MoJuEBrxvuS8azn/q26MOp/w8bWfxe3haVyB+L4VurCF6w==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-RQkM/jXUA5dVyBjxBRsgSmY9dfJXgC5FUTs9srBw+ZYdX1ARMQyuAxApwTQhhM1rDmjT2lFvpnc1/VZ33wSOEw==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-kRa4kaiORAWQx9sHylewUhKsNxz3dRBy6AM/U02UebJRlt6c+JnSjIxAFP+iNQaRpoYNs8UdKKGPrHc7Q0oYow==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-wnVqQEJSvYzqG3tYXFK93nqBWxNCSoKxQrnt5BLwn0iScPmUOfLgHf61dLr5sOG8fqUjkhLFH/gW+DfePclEfw==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260205.1': - resolution: {integrity: sha512-eSgzYCbdCXP/E0XL53yIMZNLoY3z1xMOgGyjstVLgUCMLv1yNrFvkhKhHFjM84OTY/LxqRb6ACtvjFO/oSZzvQ==} + '@typescript/native-preview@7.0.0-dev.20260206.1': + resolution: {integrity: sha512-863vBkK6A63Xa4P0839GqndGrGDtH4g8I6TQ4mGVJofSyOpPKTMeTrQZ/nyOEn4kvCLuGn4d3rf20Tn1U2wU7g==} hasBin: true '@typespec/ts-http-runtime@0.3.3': @@ -3743,8 +3725,8 @@ packages: resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} engines: {node: '>= 0.4'} - get-tsconfig@4.13.3: - resolution: {integrity: sha512-vp8Cj/+9Q/ibZUrq1rhy8mCTQpCk31A3uu9wc1C50yAb3x2pFHOsGdAZQ7jD86ARayyxZUViYeIztW+GE8dcrg==} + get-tsconfig@4.13.6: + resolution: {integrity: sha512-shZT/QMiSHc/YBLxxOkMtgSid5HFoauqCE3/exfsEcwg1WkeqjG+V40yBbBrsD+jW2HDXcs28xOfcbm2jI8Ddw==} get-uri@6.0.5: resolution: {integrity: sha512-b1O07XYq8eRuVzBNgJLstU6FYc1tS6wnMtF1I1D9lE8LxZSOGZ7LhxN54yPP6mGw5f2CkXY2BQUL9Fx41qvcIg==} @@ -3830,8 +3812,8 @@ packages: highlight.js@10.7.3: resolution: {integrity: sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==} - hono@4.11.7: - resolution: {integrity: sha512-l7qMiNee7t82bH3SeyUCt9UF15EVmaBvsppY2zQtrbIhl/yzBTny+YUxsVjSjQ6gaqaeVtZmGocom8TzBlA4Yw==} + hono@4.11.8: + resolution: {integrity: sha512-eVkB/CYCCei7K2WElZW9yYQFWssG0DhaDhVvr7wy5jJ22K+ck8fWW0EsLpB0sITUTvPnc97+rrbQqIr5iqiy9Q==} engines: {node: '>=16.9.0'} hookable@6.0.1: @@ -4712,13 +4694,13 @@ packages: resolution: {integrity: sha512-1wrVzJ2STrpmONHKBy228LM1b84msXDUoAzVEl0R8Mz4Ce6EPr+IVtxm8+yvrqLYMHswREkjYFaMxnyGnaY3Ng==} hasBin: true - playwright-core@1.58.1: - resolution: {integrity: sha512-bcWzOaTxcW+VOOGBCQgnaKToLJ65d6AqfLVKEWvexyS3AS6rbXl+xdpYRMGSRBClPvyj44njOWoxjNdL/H9UNg==} + playwright-core@1.58.2: + resolution: {integrity: sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==} engines: {node: '>=18'} hasBin: true - playwright@1.58.1: - resolution: {integrity: sha512-+2uTZHxSCcxjvGc5C891LrS1/NlxglGxzrC4seZiVjcYVQfUa87wBL6rTDqzGjuoWNjnBzRqKmF6zRYGMvQUaQ==} + playwright@1.58.2: + resolution: {integrity: sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==} engines: {node: '>=18'} hasBin: true @@ -5356,8 +5338,8 @@ packages: undici-types@7.16.0: resolution: {integrity: sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==} - undici@7.20.0: - resolution: {integrity: sha512-MJZrkjyd7DeC+uPZh+5/YaMDxFiiEEaDgbUSVMXayofAkDWF1088CDo+2RPg7B1BuS1qf1vgNE7xqwPxE0DuSQ==} + undici@7.21.0: + resolution: {integrity: sha512-Hn2tCQpoDt1wv23a68Ctc8Cr/BHpUSfaPYrkajTXOS9IKpxVRx/X5m1K2YkbK2ipgZgxXSgsUinl3x+2YdSSfg==} engines: {node: '>=20.18.1'} universal-github-app-jwt@2.2.2: @@ -5658,25 +5640,25 @@ snapshots: '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 - '@aws-sdk/client-bedrock-runtime@3.984.0': + '@aws-sdk/client-bedrock-runtime@3.985.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.6 - '@aws-sdk/credential-provider-node': 3.972.5 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/credential-provider-node': 3.972.6 '@aws-sdk/eventstream-handler-node': 3.972.5 '@aws-sdk/middleware-eventstream': 3.972.3 '@aws-sdk/middleware-host-header': 3.972.3 '@aws-sdk/middleware-logger': 3.972.3 '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/middleware-user-agent': 3.972.7 '@aws-sdk/middleware-websocket': 3.972.5 '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/token-providers': 3.984.0 + '@aws-sdk/token-providers': 3.985.0 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.984.0 + '@aws-sdk/util-endpoints': 3.985.0 '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.4 + '@aws-sdk/util-user-agent-node': 3.972.5 '@smithy/config-resolver': 4.4.6 '@smithy/core': 3.22.1 '@smithy/eventstream-serde-browser': 4.2.8 @@ -5710,22 +5692,22 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-bedrock@3.984.0': + '@aws-sdk/client-bedrock@3.985.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.6 - '@aws-sdk/credential-provider-node': 3.972.5 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/credential-provider-node': 3.972.6 '@aws-sdk/middleware-host-header': 3.972.3 '@aws-sdk/middleware-logger': 3.972.3 '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/middleware-user-agent': 3.972.7 '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/token-providers': 3.984.0 + '@aws-sdk/token-providers': 3.985.0 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.984.0 + '@aws-sdk/util-endpoints': 3.985.0 '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.4 + '@aws-sdk/util-user-agent-node': 3.972.5 '@smithy/config-resolver': 4.4.6 '@smithy/core': 3.22.1 '@smithy/fetch-http-handler': 5.3.9 @@ -5755,20 +5737,20 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/client-sso@3.982.0': + '@aws-sdk/client-sso@3.985.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/middleware-host-header': 3.972.3 '@aws-sdk/middleware-logger': 3.972.3 '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/middleware-user-agent': 3.972.7 '@aws-sdk/region-config-resolver': 3.972.3 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.982.0 + '@aws-sdk/util-endpoints': 3.985.0 '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.4 + '@aws-sdk/util-user-agent-node': 3.972.5 '@smithy/config-resolver': 4.4.6 '@smithy/core': 3.22.1 '@smithy/fetch-http-handler': 5.3.9 @@ -5798,7 +5780,7 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/core@3.973.6': + '@aws-sdk/core@3.973.7': dependencies: '@aws-sdk/types': 3.973.1 '@aws-sdk/xml-builder': 3.972.4 @@ -5814,17 +5796,17 @@ snapshots: '@smithy/util-utf8': 4.2.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-env@3.972.4': + '@aws-sdk/credential-provider-env@3.972.5': dependencies: - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-http@3.972.6': + '@aws-sdk/credential-provider-http@3.972.7': dependencies: - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/types': 3.973.1 '@smithy/fetch-http-handler': 5.3.9 '@smithy/node-http-handler': 4.4.9 @@ -5835,16 +5817,16 @@ snapshots: '@smithy/util-stream': 4.5.11 tslib: 2.8.1 - '@aws-sdk/credential-provider-ini@3.972.4': + '@aws-sdk/credential-provider-ini@3.972.5': dependencies: - '@aws-sdk/core': 3.973.6 - '@aws-sdk/credential-provider-env': 3.972.4 - '@aws-sdk/credential-provider-http': 3.972.6 - '@aws-sdk/credential-provider-login': 3.972.4 - '@aws-sdk/credential-provider-process': 3.972.4 - '@aws-sdk/credential-provider-sso': 3.972.4 - '@aws-sdk/credential-provider-web-identity': 3.972.4 - '@aws-sdk/nested-clients': 3.982.0 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/credential-provider-env': 3.972.5 + '@aws-sdk/credential-provider-http': 3.972.7 + '@aws-sdk/credential-provider-login': 3.972.5 + '@aws-sdk/credential-provider-process': 3.972.5 + '@aws-sdk/credential-provider-sso': 3.972.5 + '@aws-sdk/credential-provider-web-identity': 3.972.5 + '@aws-sdk/nested-clients': 3.985.0 '@aws-sdk/types': 3.973.1 '@smithy/credential-provider-imds': 4.2.8 '@smithy/property-provider': 4.2.8 @@ -5854,10 +5836,10 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-login@3.972.4': + '@aws-sdk/credential-provider-login@3.972.5': dependencies: - '@aws-sdk/core': 3.973.6 - '@aws-sdk/nested-clients': 3.982.0 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/nested-clients': 3.985.0 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/protocol-http': 5.3.8 @@ -5867,14 +5849,14 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-node@3.972.5': + '@aws-sdk/credential-provider-node@3.972.6': dependencies: - '@aws-sdk/credential-provider-env': 3.972.4 - '@aws-sdk/credential-provider-http': 3.972.6 - '@aws-sdk/credential-provider-ini': 3.972.4 - '@aws-sdk/credential-provider-process': 3.972.4 - '@aws-sdk/credential-provider-sso': 3.972.4 - '@aws-sdk/credential-provider-web-identity': 3.972.4 + '@aws-sdk/credential-provider-env': 3.972.5 + '@aws-sdk/credential-provider-http': 3.972.7 + '@aws-sdk/credential-provider-ini': 3.972.5 + '@aws-sdk/credential-provider-process': 3.972.5 + '@aws-sdk/credential-provider-sso': 3.972.5 + '@aws-sdk/credential-provider-web-identity': 3.972.5 '@aws-sdk/types': 3.973.1 '@smithy/credential-provider-imds': 4.2.8 '@smithy/property-provider': 4.2.8 @@ -5884,20 +5866,20 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-process@3.972.4': + '@aws-sdk/credential-provider-process@3.972.5': dependencies: - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/shared-ini-file-loader': 4.4.3 '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/credential-provider-sso@3.972.4': + '@aws-sdk/credential-provider-sso@3.972.5': dependencies: - '@aws-sdk/client-sso': 3.982.0 - '@aws-sdk/core': 3.973.6 - '@aws-sdk/token-providers': 3.982.0 + '@aws-sdk/client-sso': 3.985.0 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/token-providers': 3.985.0 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/shared-ini-file-loader': 4.4.3 @@ -5906,10 +5888,10 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/credential-provider-web-identity@3.972.4': + '@aws-sdk/credential-provider-web-identity@3.972.5': dependencies: - '@aws-sdk/core': 3.973.6 - '@aws-sdk/nested-clients': 3.982.0 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/nested-clients': 3.985.0 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/shared-ini-file-loader': 4.4.3 @@ -5953,11 +5935,11 @@ snapshots: '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/middleware-user-agent@3.972.6': + '@aws-sdk/middleware-user-agent@3.972.7': dependencies: - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.982.0 + '@aws-sdk/util-endpoints': 3.985.0 '@smithy/core': 3.22.1 '@smithy/protocol-http': 5.3.8 '@smithy/types': 4.12.0 @@ -5978,63 +5960,20 @@ snapshots: '@smithy/util-utf8': 4.2.0 tslib: 2.8.1 - '@aws-sdk/nested-clients@3.982.0': + '@aws-sdk/nested-clients@3.985.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.6 + '@aws-sdk/core': 3.973.7 '@aws-sdk/middleware-host-header': 3.972.3 '@aws-sdk/middleware-logger': 3.972.3 '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/middleware-user-agent': 3.972.7 '@aws-sdk/region-config-resolver': 3.972.3 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.982.0 + '@aws-sdk/util-endpoints': 3.985.0 '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.4 - '@smithy/config-resolver': 4.4.6 - '@smithy/core': 3.22.1 - '@smithy/fetch-http-handler': 5.3.9 - '@smithy/hash-node': 4.2.8 - '@smithy/invalid-dependency': 4.2.8 - '@smithy/middleware-content-length': 4.2.8 - '@smithy/middleware-endpoint': 4.4.13 - '@smithy/middleware-retry': 4.4.30 - '@smithy/middleware-serde': 4.2.9 - '@smithy/middleware-stack': 4.2.8 - '@smithy/node-config-provider': 4.3.8 - '@smithy/node-http-handler': 4.4.9 - '@smithy/protocol-http': 5.3.8 - '@smithy/smithy-client': 4.11.2 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-base64': 4.3.0 - '@smithy/util-body-length-browser': 4.2.0 - '@smithy/util-body-length-node': 4.2.1 - '@smithy/util-defaults-mode-browser': 4.3.29 - '@smithy/util-defaults-mode-node': 4.2.32 - '@smithy/util-endpoints': 3.2.8 - '@smithy/util-middleware': 4.2.8 - '@smithy/util-retry': 4.2.8 - '@smithy/util-utf8': 4.2.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/nested-clients@3.984.0': - dependencies: - '@aws-crypto/sha256-browser': 5.2.0 - '@aws-crypto/sha256-js': 5.2.0 - '@aws-sdk/core': 3.973.6 - '@aws-sdk/middleware-host-header': 3.972.3 - '@aws-sdk/middleware-logger': 3.972.3 - '@aws-sdk/middleware-recursion-detection': 3.972.3 - '@aws-sdk/middleware-user-agent': 3.972.6 - '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.984.0 - '@aws-sdk/util-user-agent-browser': 3.972.3 - '@aws-sdk/util-user-agent-node': 3.972.4 + '@aws-sdk/util-user-agent-node': 3.972.5 '@smithy/config-resolver': 4.4.6 '@smithy/core': 3.22.1 '@smithy/fetch-http-handler': 5.3.9 @@ -6072,22 +6011,10 @@ snapshots: '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/token-providers@3.982.0': + '@aws-sdk/token-providers@3.985.0': dependencies: - '@aws-sdk/core': 3.973.6 - '@aws-sdk/nested-clients': 3.982.0 - '@aws-sdk/types': 3.973.1 - '@smithy/property-provider': 4.2.8 - '@smithy/shared-ini-file-loader': 4.4.3 - '@smithy/types': 4.12.0 - tslib: 2.8.1 - transitivePeerDependencies: - - aws-crt - - '@aws-sdk/token-providers@3.984.0': - dependencies: - '@aws-sdk/core': 3.973.6 - '@aws-sdk/nested-clients': 3.984.0 + '@aws-sdk/core': 3.973.7 + '@aws-sdk/nested-clients': 3.985.0 '@aws-sdk/types': 3.973.1 '@smithy/property-provider': 4.2.8 '@smithy/shared-ini-file-loader': 4.4.3 @@ -6101,15 +6028,7 @@ snapshots: '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/util-endpoints@3.982.0': - dependencies: - '@aws-sdk/types': 3.973.1 - '@smithy/types': 4.12.0 - '@smithy/url-parser': 4.2.8 - '@smithy/util-endpoints': 3.2.8 - tslib: 2.8.1 - - '@aws-sdk/util-endpoints@3.984.0': + '@aws-sdk/util-endpoints@3.985.0': dependencies: '@aws-sdk/types': 3.973.1 '@smithy/types': 4.12.0 @@ -6135,9 +6054,9 @@ snapshots: bowser: 2.13.1 tslib: 2.8.1 - '@aws-sdk/util-user-agent-node@3.972.4': + '@aws-sdk/util-user-agent-node@3.972.5': dependencies: - '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/middleware-user-agent': 3.972.7 '@aws-sdk/types': 3.973.1 '@smithy/node-config-provider': 4.3.8 '@smithy/types': 4.12.0 @@ -6220,14 +6139,14 @@ snapshots: '@borewit/text-codec@0.2.1': {} - '@buape/carbon@0.14.0(hono@4.11.7)': + '@buape/carbon@0.0.0-beta-20260130162700(hono@4.11.8)': dependencies: '@types/node': 25.2.1 discord-api-types: 0.38.37 optionalDependencies: '@cloudflare/workers-types': 4.20260120.0 '@discordjs/voice': 0.19.0 - '@hono/node-server': 1.19.9(hono@4.11.7) + '@hono/node-server': 1.19.9(hono@4.11.8) '@types/bun': 1.3.6 '@types/ws': 8.18.1 ws: 8.19.0 @@ -6242,7 +6161,7 @@ snapshots: '@cacheable/memory@2.0.7': dependencies: - '@cacheable/utils': 2.3.3 + '@cacheable/utils': 2.3.4 '@keyv/bigmap': 1.3.1(keyv@5.6.0) hookified: 1.15.1 keyv: 5.6.0 @@ -6253,7 +6172,7 @@ snapshots: hookified: 1.15.1 keyv: 5.6.0 - '@cacheable/utils@2.3.3': + '@cacheable/utils@2.3.4': dependencies: hashery: 1.4.0 keyv: 5.6.0 @@ -6482,9 +6401,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@hono/node-server@1.19.9(hono@4.11.7)': + '@hono/node-server@1.19.9(hono@4.11.8)': dependencies: - hono: 4.11.7 + hono: 4.11.8 optional: true '@huggingface/jinja@0.5.4': {} @@ -6636,39 +6555,35 @@ snapshots: '@kwsites/promise-deferred@1.1.1': {} - '@lancedb/lancedb-darwin-arm64@0.23.0': + '@lancedb/lancedb-linux-arm64-gnu@0.24.1': optional: true - '@lancedb/lancedb-linux-arm64-gnu@0.23.0': + '@lancedb/lancedb-linux-arm64-musl@0.24.1': optional: true - '@lancedb/lancedb-linux-arm64-musl@0.23.0': + '@lancedb/lancedb-linux-x64-gnu@0.24.1': optional: true - '@lancedb/lancedb-linux-x64-gnu@0.23.0': + '@lancedb/lancedb-linux-x64-musl@0.24.1': optional: true - '@lancedb/lancedb-linux-x64-musl@0.23.0': + '@lancedb/lancedb-win32-arm64-msvc@0.24.1': optional: true - '@lancedb/lancedb-win32-arm64-msvc@0.23.0': + '@lancedb/lancedb-win32-x64-msvc@0.24.1': optional: true - '@lancedb/lancedb-win32-x64-msvc@0.23.0': - optional: true - - '@lancedb/lancedb@0.23.0(apache-arrow@18.1.0)': + '@lancedb/lancedb@0.24.1(apache-arrow@18.1.0)': dependencies: apache-arrow: 18.1.0 reflect-metadata: 0.2.2 optionalDependencies: - '@lancedb/lancedb-darwin-arm64': 0.23.0 - '@lancedb/lancedb-linux-arm64-gnu': 0.23.0 - '@lancedb/lancedb-linux-arm64-musl': 0.23.0 - '@lancedb/lancedb-linux-x64-gnu': 0.23.0 - '@lancedb/lancedb-linux-x64-musl': 0.23.0 - '@lancedb/lancedb-win32-arm64-msvc': 0.23.0 - '@lancedb/lancedb-win32-x64-msvc': 0.23.0 + '@lancedb/lancedb-linux-arm64-gnu': 0.24.1 + '@lancedb/lancedb-linux-arm64-musl': 0.24.1 + '@lancedb/lancedb-linux-x64-gnu': 0.24.1 + '@lancedb/lancedb-linux-x64-musl': 0.24.1 + '@lancedb/lancedb-win32-arm64-msvc': 0.24.1 + '@lancedb/lancedb-win32-x64-msvc': 0.24.1 '@larksuiteoapi/node-sdk@1.58.0': dependencies: @@ -6783,9 +6698,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.52.6(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.52.7(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.52.6(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.52.7(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -6795,20 +6710,20 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.52.6(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.52.7(ws@8.19.0)(zod@4.3.6)': dependencies: '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) - '@aws-sdk/client-bedrock-runtime': 3.984.0 + '@aws-sdk/client-bedrock-runtime': 3.985.0 '@google/genai': 1.40.0 '@mistralai/mistralai': 1.10.0 - '@sinclair/typebox': 0.34.47 + '@sinclair/typebox': 0.34.48 ajv: 8.17.1 ajv-formats: 3.0.1(ajv@8.17.1) chalk: 5.6.2 openai: 6.10.0(ws@8.19.0)(zod@4.3.6) partial-json: 0.1.7 proxy-agent: 6.5.0 - undici: 7.20.0 + undici: 7.21.0 zod-to-json-schema: 3.25.1(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' @@ -6819,12 +6734,12 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.52.6(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.52.7(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.52.6(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.52.6(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.52.6 + '@mariozechner/pi-agent-core': 0.52.7(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.52.7(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.52.7 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 cli-highlight: 2.1.11 @@ -6848,7 +6763,7 @@ snapshots: - ws - zod - '@mariozechner/pi-tui@0.52.6': + '@mariozechner/pi-tui@0.52.7': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -7716,7 +7631,7 @@ snapshots: '@silvia-odwyer/photon-node@0.3.4': {} - '@sinclair/typebox@0.34.47': {} + '@sinclair/typebox@0.34.48': {} '@slack/bolt@4.6.0(@types/express@5.0.6)': dependencies: @@ -8317,36 +8232,36 @@ snapshots: dependencies: '@types/node': 25.2.1 - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260205.1': + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260205.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260205.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260205.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260205.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260205.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260205.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260206.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260205.1': + '@typescript/native-preview@7.0.0-dev.20260206.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260205.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260205.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260205.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260205.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260205.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260205.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260205.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260206.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260206.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260206.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260206.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260206.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260206.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260206.1 '@typespec/ts-http-runtime@0.3.3': dependencies: @@ -8388,11 +8303,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@vitest/browser-playwright@4.0.18(playwright@1.58.1)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': + '@vitest/browser-playwright@4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18)': dependencies: '@vitest/browser': 4.0.18(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) '@vitest/mocker': 4.0.18(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2)) - playwright: 1.58.1 + playwright: 1.58.2 tinyrainbow: 3.0.3 vitest: 4.0.18(@opentelemetry/api@1.9.0)(@types/node@25.2.1)(@vitest/browser-playwright@4.0.18)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2) transitivePeerDependencies: @@ -8755,7 +8670,7 @@ snapshots: cacheable@2.3.2: dependencies: '@cacheable/memory': 2.0.7 - '@cacheable/utils': 2.3.3 + '@cacheable/utils': 2.3.4 hookified: 1.15.1 keyv: 5.6.0 qified: 0.6.0 @@ -9335,7 +9250,7 @@ snapshots: dunder-proto: 1.0.1 es-object-atoms: 1.1.1 - get-tsconfig@4.13.3: + get-tsconfig@4.13.6: dependencies: resolve-pkg-maps: 1.0.0 @@ -9437,7 +9352,7 @@ snapshots: highlight.js@10.7.3: {} - hono@4.11.7: {} + hono@4.11.8: {} hookable@6.0.1: {} @@ -10359,11 +10274,11 @@ snapshots: dependencies: pngjs: 7.0.0 - playwright-core@1.58.1: {} + playwright-core@1.58.2: {} - playwright@1.58.1: + playwright@1.58.2: dependencies: - playwright-core: 1.58.1 + playwright-core: 1.58.2 optionalDependencies: fsevents: 2.3.2 @@ -10604,7 +10519,7 @@ snapshots: dependencies: glob: 10.5.0 - rolldown-plugin-dts@0.22.1(@typescript/native-preview@7.0.0-dev.20260205.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3): + rolldown-plugin-dts@0.22.1(@typescript/native-preview@7.0.0-dev.20260206.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3): dependencies: '@babel/generator': 8.0.0-rc.1 '@babel/helper-validator-identifier': 8.0.0-rc.1 @@ -10613,11 +10528,11 @@ snapshots: ast-kit: 3.0.0-beta.1 birpc: 4.0.0 dts-resolver: 2.1.3 - get-tsconfig: 4.13.3 + get-tsconfig: 4.13.6 obug: 2.1.1 rolldown: 1.0.0-rc.3 optionalDependencies: - '@typescript/native-preview': 7.0.0-dev.20260205.1 + '@typescript/native-preview': 7.0.0-dev.20260206.1 typescript: 5.9.3 transitivePeerDependencies: - oxc-resolver @@ -11065,7 +10980,7 @@ snapshots: ts-algebra@2.0.0: {} - tsdown@0.20.3(@typescript/native-preview@7.0.0-dev.20260205.1)(typescript@5.9.3): + tsdown@0.20.3(@typescript/native-preview@7.0.0-dev.20260206.1)(typescript@5.9.3): dependencies: ansis: 4.2.0 cac: 6.7.14 @@ -11076,7 +10991,7 @@ snapshots: obug: 2.1.1 picomatch: 4.0.3 rolldown: 1.0.0-rc.3 - rolldown-plugin-dts: 0.22.1(@typescript/native-preview@7.0.0-dev.20260205.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3) + rolldown-plugin-dts: 0.22.1(@typescript/native-preview@7.0.0-dev.20260206.1)(rolldown@1.0.0-rc.3)(typescript@5.9.3) semver: 7.7.4 tinyexec: 1.0.2 tinyglobby: 0.2.15 @@ -11101,7 +11016,7 @@ snapshots: tsx@4.21.0: dependencies: esbuild: 0.27.3 - get-tsconfig: 4.13.3 + get-tsconfig: 4.13.6 optionalDependencies: fsevents: 2.3.3 @@ -11143,7 +11058,7 @@ snapshots: undici-types@7.16.0: {} - undici@7.20.0: {} + undici@7.21.0: {} universal-github-app-jwt@2.2.2: {} @@ -11231,7 +11146,7 @@ snapshots: optionalDependencies: '@opentelemetry/api': 1.9.0 '@types/node': 25.2.1 - '@vitest/browser-playwright': 4.0.18(playwright@1.58.1)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) + '@vitest/browser-playwright': 4.0.18(playwright@1.58.2)(vite@7.3.1(@types/node@25.2.1)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)(yaml@2.8.2))(vitest@4.0.18) transitivePeerDependencies: - jiti - less diff --git a/scripts/test-parallel.mjs b/scripts/test-parallel.mjs index 9ee0a9d87..786f8bbd1 100644 --- a/scripts/test-parallel.mjs +++ b/scripts/test-parallel.mjs @@ -32,6 +32,7 @@ const shardCount = isWindowsCi const windowsCiArgs = isWindowsCi ? ["--no-file-parallelism", "--dangerouslyIgnoreUnhandledErrors"] : []; +const passthroughArgs = process.argv.slice(2); const overrideWorkers = Number.parseInt(process.env.OPENCLAW_TEST_WORKERS ?? "", 10); const resolvedOverride = Number.isFinite(overrideWorkers) && overrideWorkers > 0 ? overrideWorkers : null; @@ -96,6 +97,30 @@ const shutdown = (signal) => { process.on("SIGINT", () => shutdown("SIGINT")); process.on("SIGTERM", () => shutdown("SIGTERM")); +if (passthroughArgs.length > 0) { + const args = maxWorkers + ? ["vitest", "run", "--maxWorkers", String(maxWorkers), ...windowsCiArgs, ...passthroughArgs] + : ["vitest", "run", ...windowsCiArgs, ...passthroughArgs]; + const nodeOptions = process.env.NODE_OPTIONS ?? ""; + const nextNodeOptions = WARNING_SUPPRESSION_FLAGS.reduce( + (acc, flag) => (acc.includes(flag) ? acc : `${acc} ${flag}`.trim()), + nodeOptions, + ); + const code = await new Promise((resolve) => { + const child = spawn(pnpm, args, { + stdio: "inherit", + env: { ...process.env, NODE_OPTIONS: nextNodeOptions }, + shell: process.platform === "win32", + }); + children.add(child); + child.on("exit", (exitCode, signal) => { + children.delete(child); + resolve(exitCode ?? (signal ? 1 : 0)); + }); + }); + process.exit(Number(code) || 0); +} + const parallelCodes = await Promise.all(parallelRuns.map(run)); const failedParallel = parallelCodes.find((code) => code !== 0); if (failedParallel !== undefined) { diff --git a/scripts/write-plugin-sdk-entry-dts.ts b/scripts/write-plugin-sdk-entry-dts.ts new file mode 100644 index 000000000..25d063159 --- /dev/null +++ b/scripts/write-plugin-sdk-entry-dts.ts @@ -0,0 +1,9 @@ +import fs from "node:fs"; +import path from "node:path"; + +// `tsc` emits the entry d.ts at `dist/plugin-sdk/plugin-sdk/index.d.ts` because +// the source lives at `src/plugin-sdk/index.ts` and `rootDir` is `src/`. +// Keep a stable `dist/plugin-sdk/index.d.ts` alongside `index.js` for TS users. +const out = path.join(process.cwd(), "dist/plugin-sdk/index.d.ts"); +fs.mkdirSync(path.dirname(out), { recursive: true }); +fs.writeFileSync(out, 'export * from "./plugin-sdk/index";\n', "utf8"); diff --git a/src/agents/cli-runner.test.ts b/src/agents/cli-runner.test.ts index 2293648e2..b5f5e5ba5 100644 --- a/src/agents/cli-runner.test.ts +++ b/src/agents/cli-runner.test.ts @@ -1,4 +1,8 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; import { beforeEach, describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; import type { CliBackendConfig } from "../config/types.js"; import { runCliAgent } from "./cli-runner.js"; import { cleanupSuspendedCliProcesses } from "./cli-runner/helpers.js"; @@ -58,6 +62,85 @@ describe("runCliAgent resume cleanup", () => { expect(pkillArgs[1]).toContain("resume"); expect(pkillArgs[1]).toContain("thread-123"); }); + + it("falls back to per-agent workspace when workspaceDir is missing", async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cli-runner-")); + const fallbackWorkspace = path.join(tempDir, "workspace-main"); + await fs.mkdir(fallbackWorkspace, { recursive: true }); + const cfg = { + agents: { + defaults: { + workspace: fallbackWorkspace, + }, + }, + } satisfies OpenClawConfig; + + runExecMock.mockResolvedValue({ stdout: "", stderr: "" }); + runCommandWithTimeoutMock.mockResolvedValueOnce({ + stdout: "ok", + stderr: "", + code: 0, + signal: null, + killed: false, + }); + + try { + await runCliAgent({ + sessionId: "s1", + sessionKey: "agent:main:subagent:missing-workspace", + sessionFile: "/tmp/session.jsonl", + workspaceDir: undefined as unknown as string, + config: cfg, + prompt: "hi", + provider: "codex-cli", + model: "gpt-5.2-codex", + timeoutMs: 1_000, + runId: "run-1", + }); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } + + const options = runCommandWithTimeoutMock.mock.calls[0]?.[1] as { cwd?: string }; + expect(options.cwd).toBe(path.resolve(fallbackWorkspace)); + }); + + it("throws when sessionKey is malformed", async () => { + const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cli-runner-")); + const mainWorkspace = path.join(tempDir, "workspace-main"); + const researchWorkspace = path.join(tempDir, "workspace-research"); + await fs.mkdir(mainWorkspace, { recursive: true }); + await fs.mkdir(researchWorkspace, { recursive: true }); + const cfg = { + agents: { + defaults: { + workspace: mainWorkspace, + }, + list: [{ id: "research", workspace: researchWorkspace }], + }, + } satisfies OpenClawConfig; + + try { + await expect( + runCliAgent({ + sessionId: "s1", + sessionKey: "agent::broken", + agentId: "research", + sessionFile: "/tmp/session.jsonl", + workspaceDir: undefined as unknown as string, + config: cfg, + prompt: "hi", + provider: "codex-cli", + model: "gpt-5.2-codex", + timeoutMs: 1_000, + runId: "run-2", + }), + ).rejects.toThrow("Malformed agent session key"); + } finally { + await fs.rm(tempDir, { recursive: true, force: true }); + } + expect(runCommandWithTimeoutMock).not.toHaveBeenCalled(); + }); }); describe("cleanupSuspendedCliProcesses", () => { diff --git a/src/agents/cli-runner.ts b/src/agents/cli-runner.ts index 4b4c108e4..68dbf0d5c 100644 --- a/src/agents/cli-runner.ts +++ b/src/agents/cli-runner.ts @@ -7,7 +7,6 @@ import { shouldLogVerbose } from "../globals.js"; import { isTruthyEnvValue } from "../infra/env.js"; import { createSubsystemLogger } from "../logging/subsystem.js"; import { runCommandWithTimeout } from "../process/exec.js"; -import { resolveUserPath } from "../utils.js"; import { resolveSessionAgentIds } from "./agent-scope.js"; import { makeBootstrapWarn, resolveBootstrapContextForRun } from "./bootstrap-files.js"; import { resolveCliBackendConfig } from "./cli-backends.js"; @@ -29,12 +28,14 @@ import { import { resolveOpenClawDocsPath } from "./docs-path.js"; import { FailoverError, resolveFailoverStatus } from "./failover-error.js"; import { classifyFailoverReason, isFailoverErrorMessage } from "./pi-embedded-helpers.js"; +import { redactRunIdentifier, resolveRunWorkspaceDir } from "./workspace-run.js"; const log = createSubsystemLogger("agent/claude-cli"); export async function runCliAgent(params: { sessionId: string; sessionKey?: string; + agentId?: string; sessionFile: string; workspaceDir: string; config?: OpenClawConfig; @@ -51,7 +52,21 @@ export async function runCliAgent(params: { images?: ImageContent[]; }): Promise { const started = Date.now(); - const resolvedWorkspace = resolveUserPath(params.workspaceDir); + const workspaceResolution = resolveRunWorkspaceDir({ + workspaceDir: params.workspaceDir, + sessionKey: params.sessionKey, + agentId: params.agentId, + config: params.config, + }); + const resolvedWorkspace = workspaceResolution.workspaceDir; + const redactedSessionId = redactRunIdentifier(params.sessionId); + const redactedSessionKey = redactRunIdentifier(params.sessionKey); + const redactedWorkspace = redactRunIdentifier(resolvedWorkspace); + if (workspaceResolution.usedFallback) { + log.warn( + `[workspace-fallback] caller=runCliAgent reason=${workspaceResolution.fallbackReason} run=${params.runId} session=${redactedSessionId} sessionKey=${redactedSessionKey} agent=${workspaceResolution.agentId} workspace=${redactedWorkspace}`, + ); + } const workspaceDir = resolvedWorkspace; const backendResolved = resolveCliBackendConfig(params.provider, params.config); @@ -311,6 +326,7 @@ export async function runCliAgent(params: { export async function runClaudeCliAgent(params: { sessionId: string; sessionKey?: string; + agentId?: string; sessionFile: string; workspaceDir: string; config?: OpenClawConfig; @@ -328,6 +344,7 @@ export async function runClaudeCliAgent(params: { return runCliAgent({ sessionId: params.sessionId, sessionKey: params.sessionKey, + agentId: params.agentId, sessionFile: params.sessionFile, workspaceDir: params.workspaceDir, config: params.config, diff --git a/src/agents/memory-search.ts b/src/agents/memory-search.ts index 658771a11..5394b640d 100644 --- a/src/agents/memory-search.ts +++ b/src/agents/memory-search.ts @@ -9,7 +9,7 @@ export type ResolvedMemorySearchConfig = { enabled: boolean; sources: Array<"memory" | "sessions">; extraPaths: string[]; - provider: "openai" | "local" | "gemini" | "auto"; + provider: "openai" | "local" | "gemini" | "voyage" | "auto"; remote?: { baseUrl?: string; apiKey?: string; @@ -25,7 +25,7 @@ export type ResolvedMemorySearchConfig = { experimental: { sessionMemory: boolean; }; - fallback: "openai" | "gemini" | "local" | "none"; + fallback: "openai" | "gemini" | "local" | "voyage" | "none"; model: string; local: { modelPath?: string; @@ -72,6 +72,7 @@ export type ResolvedMemorySearchConfig = { const DEFAULT_OPENAI_MODEL = "text-embedding-3-small"; const DEFAULT_GEMINI_MODEL = "gemini-embedding-001"; +const DEFAULT_VOYAGE_MODEL = "voyage-4-large"; const DEFAULT_CHUNK_TOKENS = 400; const DEFAULT_CHUNK_OVERLAP = 80; const DEFAULT_WATCH_DEBOUNCE_MS = 1500; @@ -136,7 +137,11 @@ function mergeConfig( defaultRemote?.headers, ); const includeRemote = - hasRemoteConfig || provider === "openai" || provider === "gemini" || provider === "auto"; + hasRemoteConfig || + provider === "openai" || + provider === "gemini" || + provider === "voyage" || + provider === "auto"; const batch = { enabled: overrideRemote?.batch?.enabled ?? defaultRemote?.batch?.enabled ?? true, wait: overrideRemote?.batch?.wait ?? defaultRemote?.batch?.wait ?? true, @@ -163,7 +168,9 @@ function mergeConfig( ? DEFAULT_GEMINI_MODEL : provider === "openai" ? DEFAULT_OPENAI_MODEL - : undefined; + : provider === "voyage" + ? DEFAULT_VOYAGE_MODEL + : undefined; const model = overrides?.model ?? defaults?.model ?? modelDefault ?? ""; const local = { modelPath: overrides?.local?.modelPath ?? defaults?.local?.modelPath, diff --git a/src/agents/model-auth.test.ts b/src/agents/model-auth.test.ts index 7a0af0d18..f6d669aa8 100644 --- a/src/agents/model-auth.test.ts +++ b/src/agents/model-auth.test.ts @@ -463,4 +463,28 @@ describe("getApiKeyForModel", () => { } } }); + + it("accepts VOYAGE_API_KEY for voyage", async () => { + const previous = process.env.VOYAGE_API_KEY; + + try { + process.env.VOYAGE_API_KEY = "voyage-test-key"; + + vi.resetModules(); + const { resolveApiKeyForProvider } = await import("./model-auth.js"); + + const resolved = await resolveApiKeyForProvider({ + provider: "voyage", + store: { version: 1, profiles: {} }, + }); + expect(resolved.apiKey).toBe("voyage-test-key"); + expect(resolved.source).toContain("VOYAGE_API_KEY"); + } finally { + if (previous === undefined) { + delete process.env.VOYAGE_API_KEY; + } else { + process.env.VOYAGE_API_KEY = previous; + } + } + }); }); diff --git a/src/agents/model-auth.ts b/src/agents/model-auth.ts index 8edf2a193..35e33fbf4 100644 --- a/src/agents/model-auth.ts +++ b/src/agents/model-auth.ts @@ -287,6 +287,7 @@ export function resolveEnvApiKey(provider: string): EnvApiKeyResult | null { const envMap: Record = { openai: "OPENAI_API_KEY", google: "GEMINI_API_KEY", + voyage: "VOYAGE_API_KEY", groq: "GROQ_API_KEY", deepgram: "DEEPGRAM_API_KEY", cerebras: "CEREBRAS_API_KEY", diff --git a/src/agents/openclaw-tools.subagents.sessions-spawn-applies-thinking-default.test.ts b/src/agents/openclaw-tools.subagents.sessions-spawn-applies-thinking-default.test.ts index 36eb50b55..c9b717571 100644 --- a/src/agents/openclaw-tools.subagents.sessions-spawn-applies-thinking-default.test.ts +++ b/src/agents/openclaw-tools.subagents.sessions-spawn-applies-thinking-default.test.ts @@ -45,8 +45,12 @@ describe("sessions_spawn thinking defaults", () => { const agentCall = calls .map((call) => call[0] as { method: string; params?: Record }) .findLast((call) => call.method === "agent"); + const thinkingPatch = calls + .map((call) => call[0] as { method: string; params?: Record }) + .findLast((call) => call.method === "sessions.patch" && call.params?.thinkingLevel); expect(agentCall?.params?.thinking).toBe("high"); + expect(thinkingPatch?.params?.thinkingLevel).toBe("high"); }); it("prefers explicit sessions_spawn.thinking over config default", async () => { @@ -60,7 +64,11 @@ describe("sessions_spawn thinking defaults", () => { const agentCall = calls .map((call) => call[0] as { method: string; params?: Record }) .findLast((call) => call.method === "agent"); + const thinkingPatch = calls + .map((call) => call[0] as { method: string; params?: Record }) + .findLast((call) => call.method === "sessions.patch" && call.params?.thinkingLevel); expect(agentCall?.params?.thinking).toBe("low"); + expect(thinkingPatch?.params?.thinkingLevel).toBe("low"); }); }); diff --git a/src/agents/pi-embedded-runner.test.ts b/src/agents/pi-embedded-runner.test.ts index 8db5994d9..698bc8466 100644 --- a/src/agents/pi-embedded-runner.test.ts +++ b/src/agents/pi-embedded-runner.test.ts @@ -219,6 +219,75 @@ describe("runEmbeddedPiAgent", () => { await expect(fs.stat(path.join(agentDir, "models.json"))).resolves.toBeTruthy(); }); + it("falls back to per-agent workspace when runtime workspaceDir is missing", async () => { + const sessionFile = nextSessionFile(); + const fallbackWorkspace = path.join(tempRoot ?? os.tmpdir(), "workspace-fallback-main"); + const cfg = { + ...makeOpenAiConfig(["mock-1"]), + agents: { + defaults: { + workspace: fallbackWorkspace, + }, + }, + } satisfies OpenClawConfig; + await ensureModels(cfg); + + const result = await runEmbeddedPiAgent({ + sessionId: "session:test-fallback", + sessionKey: "agent:main:subagent:fallback-workspace", + sessionFile, + workspaceDir: undefined as unknown as string, + config: cfg, + prompt: "hello", + provider: "openai", + model: "mock-1", + timeoutMs: 5_000, + agentDir, + runId: "run-fallback-workspace", + enqueue: immediateEnqueue, + }); + + expect(result.payloads?.[0]?.text).toBe("ok"); + await expect(fs.stat(fallbackWorkspace)).resolves.toBeTruthy(); + }); + + it("throws when sessionKey is malformed", async () => { + const sessionFile = nextSessionFile(); + const cfg = { + ...makeOpenAiConfig(["mock-1"]), + agents: { + defaults: { + workspace: path.join(tempRoot ?? os.tmpdir(), "workspace-fallback-main"), + }, + list: [ + { + id: "research", + workspace: path.join(tempRoot ?? os.tmpdir(), "workspace-fallback-research"), + }, + ], + }, + } satisfies OpenClawConfig; + await ensureModels(cfg); + + await expect( + runEmbeddedPiAgent({ + sessionId: "session:test-fallback-malformed", + sessionKey: "agent::broken", + agentId: "research", + sessionFile, + workspaceDir: undefined as unknown as string, + config: cfg, + prompt: "hello", + provider: "openai", + model: "mock-1", + timeoutMs: 5_000, + agentDir, + runId: "run-fallback-workspace-malformed", + enqueue: immediateEnqueue, + }), + ).rejects.toThrow("Malformed agent session key"); + }); + itIfNotWin32( "persists the first user message before assistant output", { timeout: 120_000 }, diff --git a/src/agents/pi-embedded-runner/model.test.ts b/src/agents/pi-embedded-runner/model.test.ts index dbcbfc31d..4a9bba8ca 100644 --- a/src/agents/pi-embedded-runner/model.test.ts +++ b/src/agents/pi-embedded-runner/model.test.ts @@ -172,6 +172,41 @@ describe("resolveModel", () => { }); }); + it("builds an anthropic forward-compat fallback for claude-opus-4-6", () => { + const templateModel = { + id: "claude-opus-4-5", + name: "Claude Opus 4.5", + provider: "anthropic", + api: "anthropic-messages", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"] as const, + cost: { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 }, + contextWindow: 200000, + maxTokens: 64000, + }; + + vi.mocked(discoverModels).mockReturnValue({ + find: vi.fn((provider: string, modelId: string) => { + if (provider === "anthropic" && modelId === "claude-opus-4-5") { + return templateModel; + } + return null; + }), + } as unknown as ReturnType); + + const result = resolveModel("anthropic", "claude-opus-4-6", "/tmp/agent"); + + expect(result.error).toBeUndefined(); + expect(result.model).toMatchObject({ + provider: "anthropic", + id: "claude-opus-4-6", + api: "anthropic-messages", + baseUrl: "https://api.anthropic.com", + reasoning: true, + }); + }); + it("keeps unknown-model errors for non-gpt-5 openai-codex ids", () => { const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent"); expect(result.model).toBeUndefined(); diff --git a/src/agents/pi-embedded-runner/model.ts b/src/agents/pi-embedded-runner/model.ts index a11751a46..2f489ffda 100644 --- a/src/agents/pi-embedded-runner/model.ts +++ b/src/agents/pi-embedded-runner/model.ts @@ -23,6 +23,12 @@ const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex"; const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const; +// pi-ai's built-in Anthropic catalog can lag behind OpenClaw's defaults/docs. +// Add forward-compat fallbacks for known-new IDs by cloning an older template model. +const ANTHROPIC_OPUS_46_MODEL_ID = "claude-opus-4-6"; +const ANTHROPIC_OPUS_46_DOT_MODEL_ID = "claude-opus-4.6"; +const ANTHROPIC_OPUS_TEMPLATE_MODEL_IDS = ["claude-opus-4-5", "claude-opus-4.5"] as const; + function resolveOpenAICodexGpt53FallbackModel( provider: string, modelId: string, @@ -63,6 +69,51 @@ function resolveOpenAICodexGpt53FallbackModel( } as Model); } +function resolveAnthropicOpus46ForwardCompatModel( + provider: string, + modelId: string, + modelRegistry: ModelRegistry, +): Model | undefined { + const normalizedProvider = normalizeProviderId(provider); + if (normalizedProvider !== "anthropic") { + return undefined; + } + + const trimmedModelId = modelId.trim(); + const lower = trimmedModelId.toLowerCase(); + const isOpus46 = + lower === ANTHROPIC_OPUS_46_MODEL_ID || + lower === ANTHROPIC_OPUS_46_DOT_MODEL_ID || + lower.startsWith(`${ANTHROPIC_OPUS_46_MODEL_ID}-`) || + lower.startsWith(`${ANTHROPIC_OPUS_46_DOT_MODEL_ID}-`); + if (!isOpus46) { + return undefined; + } + + const templateIds: string[] = []; + if (lower.startsWith(ANTHROPIC_OPUS_46_MODEL_ID)) { + templateIds.push(lower.replace(ANTHROPIC_OPUS_46_MODEL_ID, "claude-opus-4-5")); + } + if (lower.startsWith(ANTHROPIC_OPUS_46_DOT_MODEL_ID)) { + templateIds.push(lower.replace(ANTHROPIC_OPUS_46_DOT_MODEL_ID, "claude-opus-4.5")); + } + templateIds.push(...ANTHROPIC_OPUS_TEMPLATE_MODEL_IDS); + + for (const templateId of [...new Set(templateIds)].filter(Boolean)) { + const template = modelRegistry.find(normalizedProvider, templateId) as Model | null; + if (!template) { + continue; + } + return normalizeModelCompat({ + ...template, + id: trimmedModelId, + name: trimmedModelId, + } as Model); + } + + return undefined; +} + export function buildInlineProviderModels( providers: Record, ): InlineModelEntry[] { @@ -140,6 +191,14 @@ export function resolveModel( if (codexForwardCompat) { return { model: codexForwardCompat, authStorage, modelRegistry }; } + const anthropicForwardCompat = resolveAnthropicOpus46ForwardCompatModel( + provider, + modelId, + modelRegistry, + ); + if (anthropicForwardCompat) { + return { model: anthropicForwardCompat, authStorage, modelRegistry }; + } const providerCfg = providers[provider]; if (providerCfg || modelId.startsWith("mock-")) { const fallbackModel: Model = normalizeModelCompat({ diff --git a/src/agents/pi-embedded-runner/run.ts b/src/agents/pi-embedded-runner/run.ts index d7fb2693d..c8ca9b5a1 100644 --- a/src/agents/pi-embedded-runner/run.ts +++ b/src/agents/pi-embedded-runner/run.ts @@ -3,7 +3,6 @@ import type { ThinkLevel } from "../../auto-reply/thinking.js"; import type { RunEmbeddedPiAgentParams } from "./run/params.js"; import type { EmbeddedPiAgentMeta, EmbeddedPiRunResult } from "./types.js"; import { enqueueCommandInLane } from "../../process/command-queue.js"; -import { resolveUserPath } from "../../utils.js"; import { isMarkdownCapableMessageChannel } from "../../utils/message-channel.js"; import { resolveOpenClawAgentDir } from "../agent-paths.js"; import { @@ -46,6 +45,7 @@ import { type FailoverReason, } from "../pi-embedded-helpers.js"; import { normalizeUsage, type UsageLike } from "../usage.js"; +import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js"; import { compactEmbeddedPiSessionDirect } from "./compact.js"; import { resolveGlobalLane, resolveSessionLane } from "./lanes.js"; import { log } from "./logger.js"; @@ -92,7 +92,21 @@ export async function runEmbeddedPiAgent( return enqueueSession(() => enqueueGlobal(async () => { const started = Date.now(); - const resolvedWorkspace = resolveUserPath(params.workspaceDir); + const workspaceResolution = resolveRunWorkspaceDir({ + workspaceDir: params.workspaceDir, + sessionKey: params.sessionKey, + agentId: params.agentId, + config: params.config, + }); + const resolvedWorkspace = workspaceResolution.workspaceDir; + const redactedSessionId = redactRunIdentifier(params.sessionId); + const redactedSessionKey = redactRunIdentifier(params.sessionKey); + const redactedWorkspace = redactRunIdentifier(resolvedWorkspace); + if (workspaceResolution.usedFallback) { + log.warn( + `[workspace-fallback] caller=runEmbeddedPiAgent reason=${workspaceResolution.fallbackReason} run=${params.runId} session=${redactedSessionId} sessionKey=${redactedSessionKey} agent=${workspaceResolution.agentId} workspace=${redactedWorkspace}`, + ); + } const prevCwd = process.cwd(); const provider = (params.provider ?? DEFAULT_PROVIDER).trim() || DEFAULT_PROVIDER; @@ -333,7 +347,7 @@ export async function runEmbeddedPiAgent( replyToMode: params.replyToMode, hasRepliedRef: params.hasRepliedRef, sessionFile: params.sessionFile, - workspaceDir: params.workspaceDir, + workspaceDir: resolvedWorkspace, agentDir, config: params.config, skillsSnapshot: params.skillsSnapshot, @@ -345,6 +359,7 @@ export async function runEmbeddedPiAgent( model, authStorage, modelRegistry, + agentId: workspaceResolution.agentId, thinkLevel, verboseLevel: params.verboseLevel, reasoningLevel: params.reasoningLevel, @@ -401,7 +416,7 @@ export async function runEmbeddedPiAgent( agentAccountId: params.agentAccountId, authProfileId: lastProfileId, sessionFile: params.sessionFile, - workspaceDir: params.workspaceDir, + workspaceDir: resolvedWorkspace, agentDir, config: params.config, skillsSnapshot: params.skillsSnapshot, diff --git a/src/agents/pi-embedded-runner/run/attempt.ts b/src/agents/pi-embedded-runner/run/attempt.ts index 83fe17cfb..2e6c70292 100644 --- a/src/agents/pi-embedded-runner/run/attempt.ts +++ b/src/agents/pi-embedded-runner/run/attempt.ts @@ -10,7 +10,7 @@ import { resolveChannelCapabilities } from "../../../config/channel-capabilities import { getMachineDisplayName } from "../../../infra/machine-name.js"; import { MAX_IMAGE_BYTES } from "../../../media/constants.js"; import { getGlobalHookRunner } from "../../../plugins/hook-runner-global.js"; -import { isSubagentSessionKey } from "../../../routing/session-key.js"; +import { isSubagentSessionKey, normalizeAgentId } from "../../../routing/session-key.js"; import { resolveSignalReactionLevel } from "../../../signal/reaction-level.js"; import { resolveTelegramInlineButtonsScope } from "../../../telegram/inline-buttons.js"; import { resolveTelegramReactionLevel } from "../../../telegram/reaction-level.js"; @@ -705,6 +705,13 @@ export async function runEmbeddedAttempt( // Get hook runner once for both before_agent_start and agent_end hooks const hookRunner = getGlobalHookRunner(); + const hookAgentId = + typeof params.agentId === "string" && params.agentId.trim() + ? normalizeAgentId(params.agentId) + : resolveSessionAgentIds({ + sessionKey: params.sessionKey, + config: params.config, + }).sessionAgentId; let promptError: unknown = null; try { @@ -720,7 +727,7 @@ export async function runEmbeddedAttempt( messages: activeSession.messages, }, { - agentId: params.sessionKey?.split(":")[0] ?? "main", + agentId: hookAgentId, sessionKey: params.sessionKey, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, @@ -850,7 +857,7 @@ export async function runEmbeddedAttempt( durationMs: Date.now() - promptStartedAt, }, { - agentId: params.sessionKey?.split(":")[0] ?? "main", + agentId: hookAgentId, sessionKey: params.sessionKey, workspaceDir: params.workspaceDir, messageProvider: params.messageProvider ?? undefined, diff --git a/src/agents/pi-embedded-runner/run/params.ts b/src/agents/pi-embedded-runner/run/params.ts index 93f5c5c92..f56f3ecac 100644 --- a/src/agents/pi-embedded-runner/run/params.ts +++ b/src/agents/pi-embedded-runner/run/params.ts @@ -20,6 +20,7 @@ export type ClientToolDefinition = { export type RunEmbeddedPiAgentParams = { sessionId: string; sessionKey?: string; + agentId?: string; messageChannel?: string; messageProvider?: string; agentAccountId?: string; diff --git a/src/agents/pi-embedded-runner/run/types.ts b/src/agents/pi-embedded-runner/run/types.ts index 931afcd24..181a42c9f 100644 --- a/src/agents/pi-embedded-runner/run/types.ts +++ b/src/agents/pi-embedded-runner/run/types.ts @@ -14,6 +14,7 @@ import type { ClientToolDefinition } from "./params.js"; export type EmbeddedRunAttemptParams = { sessionId: string; sessionKey?: string; + agentId?: string; messageChannel?: string; messageProvider?: string; agentAccountId?: string; diff --git a/src/agents/pi-tools-agent-config.test.ts b/src/agents/pi-tools-agent-config.test.ts index b3b0367af..8fba398ae 100644 --- a/src/agents/pi-tools-agent-config.test.ts +++ b/src/agents/pi-tools-agent-config.test.ts @@ -148,7 +148,7 @@ describe("Agent-specific tool filtering", () => { workspaceDir: "/tmp/test-provider", agentDir: "/tmp/agent-provider", modelProvider: "google-antigravity", - modelId: "claude-opus-4-5-thinking", + modelId: "claude-opus-4-6-thinking", }); const toolNames = tools.map((t) => t.name); @@ -176,7 +176,7 @@ describe("Agent-specific tool filtering", () => { workspaceDir: "/tmp/test-provider-profile", agentDir: "/tmp/agent-provider-profile", modelProvider: "google-antigravity", - modelId: "claude-opus-4-5-thinking", + modelId: "claude-opus-4-6-thinking", }); const toolNames = tools.map((t) => t.name); diff --git a/src/agents/tools/cron-tool.test.ts b/src/agents/tools/cron-tool.test.ts index 77ffb36e6..cee2e57e0 100644 --- a/src/agents/tools/cron-tool.test.ts +++ b/src/agents/tools/cron-tool.test.ts @@ -30,8 +30,8 @@ describe("cron tool", () => { ], ["remove", { action: "remove", jobId: "job-1" }, { id: "job-1" }], ["remove", { action: "remove", id: "job-2" }, { id: "job-2" }], - ["run", { action: "run", jobId: "job-1" }, { id: "job-1" }], - ["run", { action: "run", id: "job-2" }, { id: "job-2" }], + ["run", { action: "run", jobId: "job-1" }, { id: "job-1", mode: "force" }], + ["run", { action: "run", id: "job-2" }, { id: "job-2", mode: "force" }], ["runs", { action: "runs", jobId: "job-1" }, { id: "job-1" }], ["runs", { action: "runs", id: "job-2" }, { id: "job-2" }], ])("%s sends id to gateway", async (action, args, expectedParams) => { @@ -58,7 +58,21 @@ describe("cron tool", () => { const call = callGatewayMock.mock.calls[0]?.[0] as { params?: unknown; }; - expect(call?.params).toEqual({ id: "job-primary" }); + expect(call?.params).toEqual({ id: "job-primary", mode: "force" }); + }); + + it("supports due-only run mode", async () => { + const tool = createCronTool(); + await tool.execute("call-due", { + action: "run", + jobId: "job-due", + runMode: "due", + }); + + const call = callGatewayMock.mock.calls[0]?.[0] as { + params?: unknown; + }; + expect(call?.params).toEqual({ id: "job-due", mode: "due" }); }); it("normalizes cron.add job payloads", async () => { @@ -86,7 +100,7 @@ describe("cron tool", () => { deleteAfterRun: true, schedule: { kind: "at", at: new Date(123).toISOString() }, sessionTarget: "main", - wakeMode: "next-heartbeat", + wakeMode: "now", payload: { kind: "systemEvent", text: "hello" }, }); }); diff --git a/src/agents/tools/cron-tool.ts b/src/agents/tools/cron-tool.ts index 4c9633144..cc5cab54f 100644 --- a/src/agents/tools/cron-tool.ts +++ b/src/agents/tools/cron-tool.ts @@ -18,6 +18,7 @@ import { resolveInternalSessionKey, resolveMainSessionAlias } from "./sessions-h const CRON_ACTIONS = ["status", "list", "add", "update", "remove", "run", "runs", "wake"] as const; const CRON_WAKE_MODES = ["now", "next-heartbeat"] as const; +const CRON_RUN_MODES = ["due", "force"] as const; const REMINDER_CONTEXT_MESSAGES_MAX = 10; const REMINDER_CONTEXT_PER_MESSAGE_MAX = 220; @@ -37,6 +38,7 @@ const CronToolSchema = Type.Object({ patch: Type.Optional(Type.Object({}, { additionalProperties: true })), text: Type.Optional(Type.String()), mode: optionalStringEnum(CRON_WAKE_MODES), + runMode: optionalStringEnum(CRON_RUN_MODES), contextMessages: Type.Optional( Type.Number({ minimum: 0, maximum: REMINDER_CONTEXT_MESSAGES_MAX }), ), @@ -312,7 +314,6 @@ Use jobId as the canonical identifier; id is accepted for compatibility. Use con } } - // [Fix Issue 3] Infer delivery target from session key for isolated jobs if not provided if ( opts?.agentSessionKey && job && @@ -393,7 +394,9 @@ Use jobId as the canonical identifier; id is accepted for compatibility. Use con if (!id) { throw new Error("jobId required (id accepted for backward compatibility)"); } - return jsonResult(await callGatewayTool("cron.run", gatewayOpts, { id })); + const runMode = + params.runMode === "due" || params.runMode === "force" ? params.runMode : "force"; + return jsonResult(await callGatewayTool("cron.run", gatewayOpts, { id, mode: runMode })); } case "runs": { const id = readStringParam(params, "jobId") ?? readStringParam(params, "id"); diff --git a/src/agents/tools/sessions-spawn-tool.ts b/src/agents/tools/sessions-spawn-tool.ts index d73b8c4a0..1ed7bcd1c 100644 --- a/src/agents/tools/sessions-spawn-tool.ts +++ b/src/agents/tools/sessions-spawn-tool.ts @@ -214,6 +214,26 @@ export function createSessionsSpawnTool(opts?: { modelWarning = messageText; } } + if (thinkingOverride !== undefined) { + try { + await callGateway({ + method: "sessions.patch", + params: { + key: childSessionKey, + thinkingLevel: thinkingOverride === "off" ? null : thinkingOverride, + }, + timeoutMs: 10_000, + }); + } catch (err) { + const messageText = + err instanceof Error ? err.message : typeof err === "string" ? err : "error"; + return jsonResult({ + status: "error", + error: messageText, + childSessionKey, + }); + } + } const childSystemPrompt = buildSubagentSystemPrompt({ requesterSessionKey, requesterOrigin, diff --git a/src/agents/workspace-run.test.ts b/src/agents/workspace-run.test.ts new file mode 100644 index 000000000..bb99f5177 --- /dev/null +++ b/src/agents/workspace-run.test.ts @@ -0,0 +1,139 @@ +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import { resolveRunWorkspaceDir } from "./workspace-run.js"; +import { DEFAULT_AGENT_WORKSPACE_DIR } from "./workspace.js"; + +describe("resolveRunWorkspaceDir", () => { + it("resolves explicit workspace values without fallback", () => { + const explicit = path.join(process.cwd(), "tmp", "workspace-run-explicit"); + const result = resolveRunWorkspaceDir({ + workspaceDir: explicit, + sessionKey: "agent:main:subagent:test", + }); + + expect(result.usedFallback).toBe(false); + expect(result.agentId).toBe("main"); + expect(result.workspaceDir).toBe(path.resolve(explicit)); + }); + + it("falls back to configured per-agent workspace when input is missing", () => { + const defaultWorkspace = path.join(process.cwd(), "tmp", "workspace-default-main"); + const researchWorkspace = path.join(process.cwd(), "tmp", "workspace-research"); + const cfg = { + agents: { + defaults: { workspace: defaultWorkspace }, + list: [{ id: "research", workspace: researchWorkspace }], + }, + } satisfies OpenClawConfig; + + const result = resolveRunWorkspaceDir({ + workspaceDir: undefined, + sessionKey: "agent:research:subagent:test", + config: cfg, + }); + + expect(result.usedFallback).toBe(true); + expect(result.fallbackReason).toBe("missing"); + expect(result.agentId).toBe("research"); + expect(result.workspaceDir).toBe(path.resolve(researchWorkspace)); + }); + + it("falls back to default workspace for blank strings", () => { + const defaultWorkspace = path.join(process.cwd(), "tmp", "workspace-default-main"); + const cfg = { + agents: { + defaults: { workspace: defaultWorkspace }, + }, + } satisfies OpenClawConfig; + + const result = resolveRunWorkspaceDir({ + workspaceDir: " ", + sessionKey: "agent:main:subagent:test", + config: cfg, + }); + + expect(result.usedFallback).toBe(true); + expect(result.fallbackReason).toBe("blank"); + expect(result.agentId).toBe("main"); + expect(result.workspaceDir).toBe(path.resolve(defaultWorkspace)); + }); + + it("falls back to built-in main workspace when config is unavailable", () => { + const result = resolveRunWorkspaceDir({ + workspaceDir: null, + sessionKey: "agent:main:subagent:test", + config: undefined, + }); + + expect(result.usedFallback).toBe(true); + expect(result.fallbackReason).toBe("missing"); + expect(result.agentId).toBe("main"); + expect(result.workspaceDir).toBe(path.resolve(DEFAULT_AGENT_WORKSPACE_DIR)); + }); + + it("throws for malformed agent session keys", () => { + expect(() => + resolveRunWorkspaceDir({ + workspaceDir: undefined, + sessionKey: "agent::broken", + config: undefined, + }), + ).toThrow("Malformed agent session key"); + }); + + it("uses explicit agent id for per-agent fallback when config is unavailable", () => { + const result = resolveRunWorkspaceDir({ + workspaceDir: undefined, + sessionKey: "definitely-not-a-valid-session-key", + agentId: "research", + config: undefined, + }); + + expect(result.agentId).toBe("research"); + expect(result.agentIdSource).toBe("explicit"); + expect(result.workspaceDir).toBe(path.resolve(os.homedir(), ".openclaw", "workspace-research")); + }); + + it("throws for malformed agent session keys even when config has a default agent", () => { + const mainWorkspace = path.join(process.cwd(), "tmp", "workspace-main-default"); + const researchWorkspace = path.join(process.cwd(), "tmp", "workspace-research-default"); + const cfg = { + agents: { + defaults: { workspace: mainWorkspace }, + list: [ + { id: "main", workspace: mainWorkspace }, + { id: "research", workspace: researchWorkspace, default: true }, + ], + }, + } satisfies OpenClawConfig; + + expect(() => + resolveRunWorkspaceDir({ + workspaceDir: undefined, + sessionKey: "agent::broken", + config: cfg, + }), + ).toThrow("Malformed agent session key"); + }); + + it("treats non-agent legacy keys as default, not malformed", () => { + const fallbackWorkspace = path.join(process.cwd(), "tmp", "workspace-default-legacy"); + const cfg = { + agents: { + defaults: { workspace: fallbackWorkspace }, + }, + } satisfies OpenClawConfig; + + const result = resolveRunWorkspaceDir({ + workspaceDir: undefined, + sessionKey: "custom-main-key", + config: cfg, + }); + + expect(result.agentId).toBe("main"); + expect(result.agentIdSource).toBe("default"); + expect(result.workspaceDir).toBe(path.resolve(fallbackWorkspace)); + }); +}); diff --git a/src/agents/workspace-run.ts b/src/agents/workspace-run.ts new file mode 100644 index 000000000..1061a0344 --- /dev/null +++ b/src/agents/workspace-run.ts @@ -0,0 +1,106 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { redactIdentifier } from "../logging/redact-identifier.js"; +import { + classifySessionKeyShape, + DEFAULT_AGENT_ID, + normalizeAgentId, + parseAgentSessionKey, +} from "../routing/session-key.js"; +import { resolveUserPath } from "../utils.js"; +import { resolveAgentWorkspaceDir, resolveDefaultAgentId } from "./agent-scope.js"; + +export type WorkspaceFallbackReason = "missing" | "blank" | "invalid_type"; +type AgentIdSource = "explicit" | "session_key" | "default"; + +export type ResolveRunWorkspaceResult = { + workspaceDir: string; + usedFallback: boolean; + fallbackReason?: WorkspaceFallbackReason; + agentId: string; + agentIdSource: AgentIdSource; +}; + +function resolveRunAgentId(params: { + sessionKey?: string; + agentId?: string; + config?: OpenClawConfig; +}): { + agentId: string; + agentIdSource: AgentIdSource; +} { + const rawSessionKey = params.sessionKey?.trim() ?? ""; + const shape = classifySessionKeyShape(rawSessionKey); + if (shape === "malformed_agent") { + throw new Error("Malformed agent session key; refusing workspace resolution."); + } + + const explicit = + typeof params.agentId === "string" && params.agentId.trim() + ? normalizeAgentId(params.agentId) + : undefined; + if (explicit) { + return { agentId: explicit, agentIdSource: "explicit" }; + } + + const defaultAgentId = resolveDefaultAgentId(params.config ?? {}); + if (shape === "missing" || shape === "legacy_or_alias") { + return { + agentId: defaultAgentId || DEFAULT_AGENT_ID, + agentIdSource: "default", + }; + } + + const parsed = parseAgentSessionKey(rawSessionKey); + if (parsed?.agentId) { + return { + agentId: normalizeAgentId(parsed.agentId), + agentIdSource: "session_key", + }; + } + + // Defensive fallback, should be unreachable for non-malformed shapes. + return { + agentId: defaultAgentId || DEFAULT_AGENT_ID, + agentIdSource: "default", + }; +} + +export function redactRunIdentifier(value: string | undefined): string { + return redactIdentifier(value, { len: 12 }); +} + +export function resolveRunWorkspaceDir(params: { + workspaceDir: unknown; + sessionKey?: string; + agentId?: string; + config?: OpenClawConfig; +}): ResolveRunWorkspaceResult { + const requested = params.workspaceDir; + const { agentId, agentIdSource } = resolveRunAgentId({ + sessionKey: params.sessionKey, + agentId: params.agentId, + config: params.config, + }); + if (typeof requested === "string") { + const trimmed = requested.trim(); + if (trimmed) { + return { + workspaceDir: resolveUserPath(trimmed), + usedFallback: false, + agentId, + agentIdSource, + }; + } + } + + const fallbackReason: WorkspaceFallbackReason = + requested == null ? "missing" : typeof requested === "string" ? "blank" : "invalid_type"; + const fallbackWorkspace = resolveAgentWorkspaceDir(params.config ?? {}, agentId); + return { + workspaceDir: resolveUserPath(fallbackWorkspace), + usedFallback: true, + fallbackReason, + agentId, + agentIdSource, + }; +} diff --git a/src/auto-reply/reply/agent-runner-execution.ts b/src/auto-reply/reply/agent-runner-execution.ts index 3bdc5dde3..372db8b30 100644 --- a/src/auto-reply/reply/agent-runner-execution.ts +++ b/src/auto-reply/reply/agent-runner-execution.ts @@ -178,6 +178,7 @@ export async function runAgentTurnWithFallback(params: { const result = await runCliAgent({ sessionId: params.followupRun.run.sessionId, sessionKey: params.sessionKey, + agentId: params.followupRun.run.agentId, sessionFile: params.followupRun.run.sessionFile, workspaceDir: params.followupRun.run.workspaceDir, config: params.followupRun.run.config, @@ -255,6 +256,7 @@ export async function runAgentTurnWithFallback(params: { return runEmbeddedPiAgent({ sessionId: params.followupRun.run.sessionId, sessionKey: params.sessionKey, + agentId: params.followupRun.run.agentId, messageProvider: params.sessionCtx.Provider?.trim().toLowerCase() || undefined, agentAccountId: params.sessionCtx.AccountId, messageTo: params.sessionCtx.OriginatingTo ?? params.sessionCtx.To, diff --git a/src/auto-reply/reply/agent-runner-memory.ts b/src/auto-reply/reply/agent-runner-memory.ts index 867ba42f9..f73c5c60d 100644 --- a/src/auto-reply/reply/agent-runner-memory.ts +++ b/src/auto-reply/reply/agent-runner-memory.ts @@ -113,6 +113,7 @@ export async function runMemoryFlushIfNeeded(params: { return runEmbeddedPiAgent({ sessionId: params.followupRun.run.sessionId, sessionKey: params.sessionKey, + agentId: params.followupRun.run.agentId, messageProvider: params.sessionCtx.Provider?.trim().toLowerCase() || undefined, agentAccountId: params.sessionCtx.AccountId, messageTo: params.sessionCtx.OriginatingTo ?? params.sessionCtx.To, diff --git a/src/auto-reply/reply/followup-runner.ts b/src/auto-reply/reply/followup-runner.ts index 1ca51d0f4..e4c23aa04 100644 --- a/src/auto-reply/reply/followup-runner.ts +++ b/src/auto-reply/reply/followup-runner.ts @@ -140,6 +140,7 @@ export function createFollowupRunner(params: { return runEmbeddedPiAgent({ sessionId: queued.run.sessionId, sessionKey: queued.run.sessionKey, + agentId: queued.run.agentId, messageProvider: queued.run.messageProvider, agentAccountId: queued.run.agentAccountId, messageTo: queued.originatingTo, diff --git a/src/cli/cron-cli/register.cron-add.ts b/src/cli/cron-cli/register.cron-add.ts index 81720418d..001fd5f1b 100644 --- a/src/cli/cron-cli/register.cron-add.ts +++ b/src/cli/cron-cli/register.cron-add.ts @@ -71,7 +71,7 @@ export function registerCronAddCommand(cron: Command) { .option("--keep-after-run", "Keep one-shot job after it succeeds", false) .option("--agent ", "Agent id for this job") .option("--session ", "Session target (main|isolated)") - .option("--wake ", "Wake mode (now|next-heartbeat)", "next-heartbeat") + .option("--wake ", "Wake mode (now|next-heartbeat)", "now") .option("--at ", "Run once at time (ISO) or +duration (e.g. 20m)") .option("--every ", "Run every duration (e.g. 10m, 1h)") .option("--cron ", "Cron expression (5-field)") @@ -122,8 +122,8 @@ export function registerCronAddCommand(cron: Command) { }; })(); - const wakeModeRaw = typeof opts.wake === "string" ? opts.wake : "next-heartbeat"; - const wakeMode = wakeModeRaw.trim() || "next-heartbeat"; + const wakeModeRaw = typeof opts.wake === "string" ? opts.wake : "now"; + const wakeMode = wakeModeRaw.trim() || "now"; if (wakeMode !== "now" && wakeMode !== "next-heartbeat") { throw new Error("--wake must be now or next-heartbeat"); } diff --git a/src/cli/cron-cli/register.cron-simple.ts b/src/cli/cron-cli/register.cron-simple.ts index 1493c371a..e5baa1171 100644 --- a/src/cli/cron-cli/register.cron-simple.ts +++ b/src/cli/cron-cli/register.cron-simple.ts @@ -92,12 +92,12 @@ export function registerCronSimpleCommands(cron: Command) { .command("run") .description("Run a cron job now (debug)") .argument("", "Job id") - .option("--force", "Run even if not due", false) + .option("--due", "Run only when due (default behavior in older versions)", false) .action(async (id, opts) => { try { const res = await callGatewayFromCli("cron.run", opts, { id, - mode: opts.force ? "force" : "due", + mode: opts.due ? "due" : "force", }); defaultRuntime.log(JSON.stringify(res, null, 2)); } catch (err) { diff --git a/src/commands/agent.ts b/src/commands/agent.ts index a42677551..4c08d75df 100644 --- a/src/commands/agent.ts +++ b/src/commands/agent.ts @@ -398,6 +398,7 @@ export async function agentCommand( return runCliAgent({ sessionId, sessionKey, + agentId: sessionAgentId, sessionFile, workspaceDir, config: cfg, @@ -418,6 +419,7 @@ export async function agentCommand( return runEmbeddedPiAgent({ sessionId, sessionKey, + agentId: sessionAgentId, messageChannel, agentAccountId: runContext.accountId, messageTo: opts.replyTo ?? opts.to, diff --git a/src/commands/models/list.probe.ts b/src/commands/models/list.probe.ts index ee7a874fe..1c30a92eb 100644 --- a/src/commands/models/list.probe.ts +++ b/src/commands/models/list.probe.ts @@ -319,6 +319,7 @@ async function probeTarget(params: { await runEmbeddedPiAgent({ sessionId, sessionFile, + agentId, workspaceDir, agentDir, config: cfg, diff --git a/src/commands/status-all/channels.ts b/src/commands/status-all/channels.ts index d7be6ad75..091921161 100644 --- a/src/commands/status-all/channels.ts +++ b/src/commands/status-all/channels.ts @@ -1,4 +1,3 @@ -import crypto from "node:crypto"; import fs from "node:fs"; import type { ChannelAccountSnapshot, @@ -8,6 +7,7 @@ import type { import type { OpenClawConfig } from "../../config/config.js"; import { resolveChannelDefaultAccountId } from "../../channels/plugins/helpers.js"; import { listChannelPlugins } from "../../channels/plugins/index.js"; +import { sha256HexPrefix } from "../../logging/redact-identifier.js"; import { formatAge } from "./format.js"; export type ChannelRow = { @@ -57,17 +57,13 @@ function existsSyncMaybe(p: string | undefined): boolean | null { } } -function sha256HexPrefix(value: string, len = 8): string { - return crypto.createHash("sha256").update(value).digest("hex").slice(0, len); -} - function formatTokenHint(token: string, opts: { showSecrets: boolean }): string { const t = token.trim(); if (!t) { return "empty"; } if (!opts.showSecrets) { - return `sha256:${sha256HexPrefix(t)} · len ${t.length}`; + return `sha256:${sha256HexPrefix(t, 8)} · len ${t.length}`; } const head = t.slice(0, 4); const tail = t.slice(-4); diff --git a/src/config/schema.ts b/src/config/schema.ts index 175265ac1..a9c177c82 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -542,7 +542,8 @@ const FIELD_HELP: Record = { "Extra paths to include in memory search (directories or .md files; relative paths resolved from workspace).", "agents.defaults.memorySearch.experimental.sessionMemory": "Enable experimental session transcript indexing for memory search (default: false).", - "agents.defaults.memorySearch.provider": 'Embedding provider ("openai", "gemini", or "local").', + "agents.defaults.memorySearch.provider": + 'Embedding provider ("openai", "gemini", "voyage", or "local").', "agents.defaults.memorySearch.remote.baseUrl": "Custom base URL for remote embeddings (OpenAI-compatible proxies or Gemini overrides).", "agents.defaults.memorySearch.remote.apiKey": "Custom API key for the remote embedding provider.", diff --git a/src/config/types.tools.ts b/src/config/types.tools.ts index b08032427..36700b6ce 100644 --- a/src/config/types.tools.ts +++ b/src/config/types.tools.ts @@ -234,7 +234,7 @@ export type MemorySearchConfig = { sessionMemory?: boolean; }; /** Embedding provider mode. */ - provider?: "openai" | "gemini" | "local"; + provider?: "openai" | "gemini" | "local" | "voyage"; remote?: { baseUrl?: string; apiKey?: string; @@ -253,7 +253,7 @@ export type MemorySearchConfig = { }; }; /** Fallback behavior when embeddings fail. */ - fallback?: "openai" | "gemini" | "local" | "none"; + fallback?: "openai" | "gemini" | "local" | "voyage" | "none"; /** Embedding model id (remote) or alias (local). */ model?: string; /** Local embedding settings (node-llama-cpp). */ diff --git a/src/config/zod-schema.agent-runtime.ts b/src/config/zod-schema.agent-runtime.ts index c2e792f32..582853ff3 100644 --- a/src/config/zod-schema.agent-runtime.ts +++ b/src/config/zod-schema.agent-runtime.ts @@ -318,7 +318,9 @@ export const MemorySearchSchema = z }) .strict() .optional(), - provider: z.union([z.literal("openai"), z.literal("local"), z.literal("gemini")]).optional(), + provider: z + .union([z.literal("openai"), z.literal("local"), z.literal("gemini"), z.literal("voyage")]) + .optional(), remote: z .object({ baseUrl: z.string().optional(), @@ -338,7 +340,13 @@ export const MemorySearchSchema = z .strict() .optional(), fallback: z - .union([z.literal("openai"), z.literal("gemini"), z.literal("local"), z.literal("none")]) + .union([ + z.literal("openai"), + z.literal("gemini"), + z.literal("local"), + z.literal("voyage"), + z.literal("none"), + ]) .optional(), model: z.string().optional(), local: z diff --git a/src/cron/delivery.test.ts b/src/cron/delivery.test.ts new file mode 100644 index 000000000..fcbe9e99a --- /dev/null +++ b/src/cron/delivery.test.ts @@ -0,0 +1,45 @@ +import { describe, expect, it } from "vitest"; +import type { CronJob } from "./types.js"; +import { resolveCronDeliveryPlan } from "./delivery.js"; + +function makeJob(overrides: Partial): CronJob { + const now = Date.now(); + return { + id: "job-1", + name: "test", + enabled: true, + createdAtMs: now, + updatedAtMs: now, + schedule: { kind: "every", everyMs: 60_000 }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { kind: "agentTurn", message: "hello" }, + state: {}, + ...overrides, + }; +} + +describe("resolveCronDeliveryPlan", () => { + it("defaults to announce when delivery object has no mode", () => { + const plan = resolveCronDeliveryPlan( + makeJob({ + delivery: { channel: "telegram", to: "123", mode: undefined as never }, + }), + ); + expect(plan.mode).toBe("announce"); + expect(plan.requested).toBe(true); + expect(plan.channel).toBe("telegram"); + expect(plan.to).toBe("123"); + }); + + it("respects legacy payload deliver=false", () => { + const plan = resolveCronDeliveryPlan( + makeJob({ + delivery: undefined, + payload: { kind: "agentTurn", message: "hello", deliver: false }, + }), + ); + expect(plan.mode).toBe("none"); + expect(plan.requested).toBe(false); + }); +}); diff --git a/src/cron/delivery.ts b/src/cron/delivery.ts index c7cbe87f9..f0ba2c2b0 100644 --- a/src/cron/delivery.ts +++ b/src/cron/delivery.ts @@ -32,12 +32,13 @@ export function resolveCronDeliveryPlan(job: CronJob): CronDeliveryPlan { const delivery = job.delivery; const hasDelivery = delivery && typeof delivery === "object"; const rawMode = hasDelivery ? (delivery as { mode?: unknown }).mode : undefined; + const normalizedMode = typeof rawMode === "string" ? rawMode.trim().toLowerCase() : rawMode; const mode = - rawMode === "announce" + normalizedMode === "announce" ? "announce" - : rawMode === "none" + : normalizedMode === "none" ? "none" - : rawMode === "deliver" + : normalizedMode === "deliver" ? "announce" : undefined; @@ -51,7 +52,7 @@ export function resolveCronDeliveryPlan(job: CronJob): CronDeliveryPlan { const channel = deliveryChannel ?? payloadChannel ?? "last"; const to = deliveryTo ?? payloadTo; if (hasDelivery) { - const resolvedMode = mode ?? "none"; + const resolvedMode = mode ?? "announce"; return { mode: resolvedMode, channel, diff --git a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts index 6aac38f88..4b5317ef4 100644 --- a/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts +++ b/src/cron/isolated-agent.skips-delivery-without-whatsapp-recipient-besteffortdeliver-true.test.ts @@ -134,6 +134,48 @@ describe("runCronIsolatedAgentTurn", () => { }); }); + it("announces only the final payload text", async () => { + await withTempHome(async (home) => { + const storePath = await writeSessionStore(home); + const deps: CliDeps = { + sendMessageWhatsApp: vi.fn(), + sendMessageTelegram: vi.fn(), + sendMessageDiscord: vi.fn(), + sendMessageSignal: vi.fn(), + sendMessageIMessage: vi.fn(), + }; + vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ + payloads: [{ text: "Working on it..." }, { text: "Final weather summary" }], + meta: { + durationMs: 5, + agentMeta: { sessionId: "s", provider: "p", model: "m" }, + }, + }); + + const res = await runCronIsolatedAgentTurn({ + cfg: makeCfg(home, storePath, { + channels: { telegram: { botToken: "t-1" } }, + }), + deps, + job: { + ...makeJob({ kind: "agentTurn", message: "do it" }), + delivery: { mode: "announce", channel: "telegram", to: "123" }, + }, + message: "do it", + sessionKey: "cron:job-1", + lane: "cron", + }); + + expect(res.status).toBe("ok"); + expect(deps.sendMessageTelegram).toHaveBeenCalledTimes(1); + expect(deps.sendMessageTelegram).toHaveBeenCalledWith( + "123", + "Final weather summary", + expect.any(Object), + ); + }); + }); + it("skips announce when messaging tool already sent to target", async () => { await withTempHome(async (home) => { const storePath = await writeSessionStore(home); diff --git a/src/cron/isolated-agent.uses-last-non-empty-agent-text-as.test.ts b/src/cron/isolated-agent.uses-last-non-empty-agent-text-as.test.ts index ab547bdf7..3ec1c935b 100644 --- a/src/cron/isolated-agent.uses-last-non-empty-agent-text-as.test.ts +++ b/src/cron/isolated-agent.uses-last-non-empty-agent-text-as.test.ts @@ -48,7 +48,7 @@ async function writeSessionStore(home: string) { async function readSessionEntry(storePath: string, key: string) { const raw = await fs.readFile(storePath, "utf-8"); - const store = JSON.parse(raw) as Record; + const store = JSON.parse(raw) as Record; return store[key]; } @@ -90,6 +90,38 @@ describe("runCronIsolatedAgentTurn", () => { vi.mocked(loadModelCatalog).mockResolvedValue([]); }); + it("treats blank model overrides as unset", async () => { + await withTempHome(async (home) => { + const storePath = await writeSessionStore(home); + const deps: CliDeps = { + sendMessageWhatsApp: vi.fn(), + sendMessageTelegram: vi.fn(), + sendMessageDiscord: vi.fn(), + sendMessageSignal: vi.fn(), + sendMessageIMessage: vi.fn(), + }; + vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ + payloads: [{ text: "ok" }], + meta: { + durationMs: 5, + agentMeta: { sessionId: "s", provider: "p", model: "m" }, + }, + }); + + const res = await runCronIsolatedAgentTurn({ + cfg: makeCfg(home, storePath), + deps, + job: makeJob({ kind: "agentTurn", message: "do it", model: " " }), + message: "do it", + sessionKey: "cron:job-1", + lane: "cron", + }); + + expect(res.status).toBe("ok"); + expect(vi.mocked(runEmbeddedPiAgent)).toHaveBeenCalledTimes(1); + }); + }); + it("uses last non-empty agent text as summary", async () => { await withTempHome(async (home) => { const storePath = await writeSessionStore(home); @@ -585,6 +617,49 @@ describe("runCronIsolatedAgentTurn", () => { expect(first?.sessionId).toBeDefined(); expect(second?.sessionId).toBeDefined(); expect(second?.sessionId).not.toBe(first?.sessionId); + expect(first?.label).toBe("Cron: job-1"); + expect(second?.label).toBe("Cron: job-1"); + }); + }); + + it("preserves an existing cron session label", async () => { + await withTempHome(async (home) => { + const storePath = await writeSessionStore(home); + const raw = await fs.readFile(storePath, "utf-8"); + const store = JSON.parse(raw) as Record>; + store["agent:main:cron:job-1"] = { + sessionId: "old", + updatedAt: Date.now(), + label: "Nightly digest", + }; + await fs.writeFile(storePath, JSON.stringify(store, null, 2), "utf-8"); + + const deps: CliDeps = { + sendMessageWhatsApp: vi.fn(), + sendMessageTelegram: vi.fn(), + sendMessageDiscord: vi.fn(), + sendMessageSignal: vi.fn(), + sendMessageIMessage: vi.fn(), + }; + vi.mocked(runEmbeddedPiAgent).mockResolvedValue({ + payloads: [{ text: "ok" }], + meta: { + durationMs: 5, + agentMeta: { sessionId: "s", provider: "p", model: "m" }, + }, + }); + + await runCronIsolatedAgentTurn({ + cfg: makeCfg(home, storePath), + deps, + job: makeJob({ kind: "agentTurn", message: "ping", deliver: false }), + message: "ping", + sessionKey: "cron:job-1", + lane: "cron", + }); + const entry = await readSessionEntry(storePath, "agent:main:cron:job-1"); + + expect(entry?.label).toBe("Nightly digest"); }); }); }); diff --git a/src/cron/isolated-agent/delivery-target.ts b/src/cron/isolated-agent/delivery-target.ts index 5be448b2c..35ccc9047 100644 --- a/src/cron/isolated-agent/delivery-target.ts +++ b/src/cron/isolated-agent/delivery-target.ts @@ -30,6 +30,7 @@ export async function resolveDeliveryTarget( }> { const requestedChannel = typeof jobPayload.channel === "string" ? jobPayload.channel : "last"; const explicitTo = typeof jobPayload.to === "string" ? jobPayload.to : undefined; + const allowMismatchedLastTo = requestedChannel === "last"; const sessionCfg = cfg.session; const mainSessionKey = resolveAgentMainSessionKey({ cfg, agentId }); @@ -41,7 +42,7 @@ export async function resolveDeliveryTarget( entry: main, requestedChannel, explicitTo, - allowMismatchedLastTo: true, + allowMismatchedLastTo, }); let fallbackChannel: Exclude | undefined; @@ -60,7 +61,7 @@ export async function resolveDeliveryTarget( requestedChannel, explicitTo, fallbackChannel, - allowMismatchedLastTo: true, + allowMismatchedLastTo, mode: preliminary.mode, }) : preliminary; diff --git a/src/cron/isolated-agent/helpers.ts b/src/cron/isolated-agent/helpers.ts index ddc72d645..d4d42b20f 100644 --- a/src/cron/isolated-agent/helpers.ts +++ b/src/cron/isolated-agent/helpers.ts @@ -8,6 +8,7 @@ type DeliveryPayload = { text?: string; mediaUrl?: string; mediaUrls?: string[]; + channelData?: Record; }; export function pickSummaryFromOutput(text: string | undefined) { @@ -39,6 +40,19 @@ export function pickLastNonEmptyTextFromPayloads(payloads: Array<{ text?: string return undefined; } +export function pickLastDeliverablePayload(payloads: DeliveryPayload[]) { + for (let i = payloads.length - 1; i >= 0; i--) { + const payload = payloads[i]; + const text = (payload?.text ?? "").trim(); + const hasMedia = Boolean(payload?.mediaUrl) || (payload?.mediaUrls?.length ?? 0) > 0; + const hasChannelData = Object.keys(payload?.channelData ?? {}).length > 0; + if (text || hasMedia || hasChannelData) { + return payload; + } + } + return undefined; +} + /** * Check if all payloads are just heartbeat ack responses (HEARTBEAT_OK). * Returns true if delivery should be skipped because there's no real content. diff --git a/src/cron/isolated-agent/run.ts b/src/cron/isolated-agent/run.ts index 6a557db34..3dd0cc416 100644 --- a/src/cron/isolated-agent/run.ts +++ b/src/cron/isolated-agent/run.ts @@ -56,6 +56,7 @@ import { resolveCronDeliveryPlan } from "../delivery.js"; import { resolveDeliveryTarget } from "./delivery-target.js"; import { isHeartbeatOnlyResponse, + pickLastDeliverablePayload, pickLastNonEmptyTextFromPayloads, pickSummaryFromOutput, pickSummaryFromPayloads, @@ -97,6 +98,8 @@ export type RunCronAgentTurnResult = { /** Last non-empty agent text output (not truncated). */ outputText?: string; error?: string; + sessionId?: string; + sessionKey?: string; }; export async function runCronIsolatedAgentTurn(params: { @@ -187,14 +190,12 @@ export async function runCronIsolatedAgentTurn(params: { } const modelOverrideRaw = params.job.payload.kind === "agentTurn" ? params.job.payload.model : undefined; - if (modelOverrideRaw !== undefined) { - if (typeof modelOverrideRaw !== "string") { - return { status: "error", error: "invalid model: expected string" }; - } + const modelOverride = typeof modelOverrideRaw === "string" ? modelOverrideRaw.trim() : undefined; + if (modelOverride !== undefined && modelOverride.length > 0) { const resolvedOverride = resolveAllowedModelRef({ cfg: cfgWithAgentDefaults, catalog: await loadCatalog(), - raw: modelOverrideRaw, + raw: modelOverride, defaultProvider: resolvedDefault.provider, defaultModel: resolvedDefault.model, }); @@ -211,6 +212,36 @@ export async function runCronIsolatedAgentTurn(params: { agentId, nowMs: now, }); + const runSessionId = cronSession.sessionEntry.sessionId; + const runSessionKey = baseSessionKey.startsWith("cron:") + ? `${agentSessionKey}:run:${runSessionId}` + : agentSessionKey; + const persistSessionEntry = async () => { + cronSession.store[agentSessionKey] = cronSession.sessionEntry; + if (runSessionKey !== agentSessionKey) { + cronSession.store[runSessionKey] = cronSession.sessionEntry; + } + await updateSessionStore(cronSession.storePath, (store) => { + store[agentSessionKey] = cronSession.sessionEntry; + if (runSessionKey !== agentSessionKey) { + store[runSessionKey] = cronSession.sessionEntry; + } + }); + }; + const withRunSession = ( + result: Omit, + ): RunCronAgentTurnResult => ({ + ...result, + sessionId: runSessionId, + sessionKey: runSessionKey, + }); + if (!cronSession.sessionEntry.label?.trim() && baseSessionKey.startsWith("cron:")) { + const labelSuffix = + typeof params.job.name === "string" && params.job.name.trim() + ? params.job.name.trim() + : params.job.id; + cronSession.sessionEntry.label = `Cron: ${labelSuffix}`; + } // Resolve thinking level - job thinking > hooks.gmail.thinking > agent default const hooksGmailThinking = isGmailHook @@ -317,18 +348,12 @@ export async function runCronIsolatedAgentTurn(params: { updatedAt: Date.now(), skillsSnapshot, }; - cronSession.store[agentSessionKey] = cronSession.sessionEntry; - await updateSessionStore(cronSession.storePath, (store) => { - store[agentSessionKey] = cronSession.sessionEntry; - }); + await persistSessionEntry(); } // Persist systemSent before the run, mirroring the inbound auto-reply behavior. cronSession.sessionEntry.systemSent = true; - cronSession.store[agentSessionKey] = cronSession.sessionEntry; - await updateSessionStore(cronSession.storePath, (store) => { - store[agentSessionKey] = cronSession.sessionEntry; - }); + await persistSessionEntry(); let runResult: Awaited>; let fallbackProvider = provider; @@ -356,6 +381,7 @@ export async function runCronIsolatedAgentTurn(params: { return runCliAgent({ sessionId: cronSession.sessionEntry.sessionId, sessionKey: agentSessionKey, + agentId, sessionFile, workspaceDir, config: cfgWithAgentDefaults, @@ -371,6 +397,7 @@ export async function runCronIsolatedAgentTurn(params: { return runEmbeddedPiAgent({ sessionId: cronSession.sessionEntry.sessionId, sessionKey: agentSessionKey, + agentId, messageChannel, agentAccountId: resolvedDelivery.accountId, sessionFile, @@ -394,7 +421,7 @@ export async function runCronIsolatedAgentTurn(params: { fallbackProvider = fallbackResult.provider; fallbackModel = fallbackResult.model; } catch (err) { - return { status: "error", error: String(err) }; + return withRunSession({ status: "error", error: String(err) }); } const payloads = runResult.payloads ?? []; @@ -425,14 +452,19 @@ export async function runCronIsolatedAgentTurn(params: { cronSession.sessionEntry.totalTokens = promptTokens > 0 ? promptTokens : (usage.total ?? input); } - cronSession.store[agentSessionKey] = cronSession.sessionEntry; - await updateSessionStore(cronSession.storePath, (store) => { - store[agentSessionKey] = cronSession.sessionEntry; - }); + await persistSessionEntry(); } const firstText = payloads[0]?.text ?? ""; const summary = pickSummaryFromPayloads(payloads) ?? pickSummaryFromOutput(firstText); const outputText = pickLastNonEmptyTextFromPayloads(payloads); + const synthesizedText = outputText?.trim() || summary?.trim() || undefined; + const deliveryPayload = pickLastDeliverablePayload(payloads); + const deliveryPayloads = + deliveryPayload !== undefined + ? [deliveryPayload] + : synthesizedText + ? [{ text: synthesizedText }] + : []; const deliveryBestEffort = resolveCronDeliveryBestEffort(params.job); // Skip delivery for heartbeat-only responses (HEARTBEAT_OK with no real content). @@ -452,28 +484,28 @@ export async function runCronIsolatedAgentTurn(params: { if (deliveryRequested && !skipHeartbeatDelivery && !skipMessagingToolDelivery) { if (resolvedDelivery.error) { if (!deliveryBestEffort) { - return { + return withRunSession({ status: "error", error: resolvedDelivery.error.message, summary, outputText, - }; + }); } logWarn(`[cron:${params.job.id}] ${resolvedDelivery.error.message}`); - return { status: "ok", summary, outputText }; + return withRunSession({ status: "ok", summary, outputText }); } if (!resolvedDelivery.to) { const message = "cron delivery target is missing"; if (!deliveryBestEffort) { - return { + return withRunSession({ status: "error", error: message, summary, outputText, - }; + }); } logWarn(`[cron:${params.job.id}] ${message}`); - return { status: "ok", summary, outputText }; + return withRunSession({ status: "ok", summary, outputText }); } try { await deliverOutboundPayloads({ @@ -482,16 +514,16 @@ export async function runCronIsolatedAgentTurn(params: { to: resolvedDelivery.to, accountId: resolvedDelivery.accountId, threadId: resolvedDelivery.threadId, - payloads, + payloads: deliveryPayloads, bestEffort: deliveryBestEffort, deps: createOutboundSendDeps(params.deps), }); } catch (err) { if (!deliveryBestEffort) { - return { status: "error", summary, outputText, error: String(err) }; + return withRunSession({ status: "error", summary, outputText, error: String(err) }); } } } - return { status: "ok", summary, outputText }; + return withRunSession({ status: "ok", summary, outputText }); } diff --git a/src/cron/isolated-agent/session.ts b/src/cron/isolated-agent/session.ts index 8428efeb4..c31a35465 100644 --- a/src/cron/isolated-agent/session.ts +++ b/src/cron/isolated-agent/session.ts @@ -28,6 +28,8 @@ export function resolveCronSession(params: { lastChannel: entry?.lastChannel, lastTo: entry?.lastTo, lastAccountId: entry?.lastAccountId, + label: entry?.label, + displayName: entry?.displayName, skillsSnapshot: entry?.skillsSnapshot, }; return { storePath, store, sessionEntry, systemSent, isNewSession: true }; diff --git a/src/cron/normalize.test.ts b/src/cron/normalize.test.ts index a876e0317..99c674836 100644 --- a/src/cron/normalize.test.ts +++ b/src/cron/normalize.test.ts @@ -234,4 +234,62 @@ describe("normalizeCronJobCreate", () => { expect(delivery.mode).toBe("announce"); expect((normalized as { isolation?: unknown }).isolation).toBeUndefined(); }); + + it("infers payload kind/session target and name for message-only jobs", () => { + const normalized = normalizeCronJobCreate({ + schedule: { kind: "every", everyMs: 60_000 }, + payload: { message: "Nightly backup" }, + }) as unknown as Record; + + const payload = normalized.payload as Record; + expect(payload.kind).toBe("agentTurn"); + expect(payload.message).toBe("Nightly backup"); + expect(normalized.sessionTarget).toBe("isolated"); + expect(normalized.wakeMode).toBe("now"); + expect(typeof normalized.name).toBe("string"); + }); + + it("maps top-level model/thinking/timeout into payload for legacy add params", () => { + const normalized = normalizeCronJobCreate({ + name: "legacy root fields", + schedule: { kind: "every", everyMs: 60_000 }, + payload: { kind: "agentTurn", message: "hello" }, + model: " openrouter/deepseek/deepseek-r1 ", + thinking: " high ", + timeoutSeconds: 45, + allowUnsafeExternalContent: true, + }) as unknown as Record; + + const payload = normalized.payload as Record; + expect(payload.model).toBe("openrouter/deepseek/deepseek-r1"); + expect(payload.thinking).toBe("high"); + expect(payload.timeoutSeconds).toBe(45); + expect(payload.allowUnsafeExternalContent).toBe(true); + }); + + it("coerces sessionTarget and wakeMode casing", () => { + const normalized = normalizeCronJobCreate({ + name: "casing", + schedule: { kind: "cron", expr: "* * * * *" }, + sessionTarget: " IsOlAtEd ", + wakeMode: " NOW ", + payload: { kind: "agentTurn", message: "hello" }, + }) as unknown as Record; + + expect(normalized.sessionTarget).toBe("isolated"); + expect(normalized.wakeMode).toBe("now"); + }); + + it("strips invalid delivery mode from partial delivery objects", () => { + const normalized = normalizeCronJobCreate({ + name: "delivery mode", + schedule: { kind: "cron", expr: "* * * * *" }, + payload: { kind: "agentTurn", message: "hello" }, + delivery: { mode: "bogus", to: "123" }, + }) as unknown as Record; + + const delivery = normalized.delivery as Record; + expect(delivery.mode).toBeUndefined(); + expect(delivery.to).toBe("123"); + }); }); diff --git a/src/cron/normalize.ts b/src/cron/normalize.ts index 733be718c..a41044b36 100644 --- a/src/cron/normalize.ts +++ b/src/cron/normalize.ts @@ -2,6 +2,7 @@ import type { CronJobCreate, CronJobPatch } from "./types.js"; import { sanitizeAgentId } from "../routing/session-key.js"; import { parseAbsoluteTimeMs } from "./parse.js"; import { migrateLegacyCronPayload } from "./payload-migration.js"; +import { inferLegacyName } from "./service/normalize.js"; type UnknownRecord = Record; @@ -19,7 +20,8 @@ function isRecord(value: unknown): value is UnknownRecord { function coerceSchedule(schedule: UnknownRecord) { const next: UnknownRecord = { ...schedule }; - const kind = typeof schedule.kind === "string" ? schedule.kind : undefined; + const rawKind = typeof schedule.kind === "string" ? schedule.kind.trim().toLowerCase() : ""; + const kind = rawKind === "at" || rawKind === "every" || rawKind === "cron" ? rawKind : undefined; const atMsRaw = schedule.atMs; const atRaw = schedule.at; const atString = typeof atRaw === "string" ? atRaw.trim() : ""; @@ -32,7 +34,9 @@ function coerceSchedule(schedule: UnknownRecord) { ? parseAbsoluteTimeMs(atString) : null; - if (!kind) { + if (kind) { + next.kind = kind; + } else { if ( typeof schedule.atMs === "number" || typeof schedule.at === "string" || @@ -47,7 +51,7 @@ function coerceSchedule(schedule: UnknownRecord) { } if (atString) { - next.at = parsedAtMs ? new Date(parsedAtMs).toISOString() : atString; + next.at = parsedAtMs !== null ? new Date(parsedAtMs).toISOString() : atString; } else if (parsedAtMs !== null) { next.at = new Date(parsedAtMs).toISOString(); } @@ -62,6 +66,72 @@ function coercePayload(payload: UnknownRecord) { const next: UnknownRecord = { ...payload }; // Back-compat: older configs used `provider` for delivery channel. migrateLegacyCronPayload(next); + const kindRaw = typeof next.kind === "string" ? next.kind.trim().toLowerCase() : ""; + if (kindRaw === "agentturn") { + next.kind = "agentTurn"; + } else if (kindRaw === "systemevent") { + next.kind = "systemEvent"; + } else if (kindRaw) { + next.kind = kindRaw; + } + if (!next.kind) { + const hasMessage = typeof next.message === "string" && next.message.trim().length > 0; + const hasText = typeof next.text === "string" && next.text.trim().length > 0; + if (hasMessage) { + next.kind = "agentTurn"; + } else if (hasText) { + next.kind = "systemEvent"; + } + } + if (typeof next.message === "string") { + const trimmed = next.message.trim(); + if (trimmed) { + next.message = trimmed; + } + } + if (typeof next.text === "string") { + const trimmed = next.text.trim(); + if (trimmed) { + next.text = trimmed; + } + } + if ("model" in next) { + if (typeof next.model === "string") { + const trimmed = next.model.trim(); + if (trimmed) { + next.model = trimmed; + } else { + delete next.model; + } + } else { + delete next.model; + } + } + if ("thinking" in next) { + if (typeof next.thinking === "string") { + const trimmed = next.thinking.trim(); + if (trimmed) { + next.thinking = trimmed; + } else { + delete next.thinking; + } + } else { + delete next.thinking; + } + } + if ("timeoutSeconds" in next) { + if (typeof next.timeoutSeconds === "number" && Number.isFinite(next.timeoutSeconds)) { + next.timeoutSeconds = Math.max(1, Math.floor(next.timeoutSeconds)); + } else { + delete next.timeoutSeconds; + } + } + if ( + "allowUnsafeExternalContent" in next && + typeof next.allowUnsafeExternalContent !== "boolean" + ) { + delete next.allowUnsafeExternalContent; + } return next; } @@ -69,7 +139,15 @@ function coerceDelivery(delivery: UnknownRecord) { const next: UnknownRecord = { ...delivery }; if (typeof delivery.mode === "string") { const mode = delivery.mode.trim().toLowerCase(); - next.mode = mode === "deliver" ? "announce" : mode; + if (mode === "deliver") { + next.mode = "announce"; + } else if (mode === "announce" || mode === "none") { + next.mode = mode; + } else { + delete next.mode; + } + } else if ("mode" in next) { + delete next.mode; } if (typeof delivery.channel === "string") { const trimmed = delivery.channel.trim().toLowerCase(); @@ -147,6 +225,95 @@ function unwrapJob(raw: UnknownRecord) { return raw; } +function normalizeSessionTarget(raw: unknown) { + if (typeof raw !== "string") { + return undefined; + } + const trimmed = raw.trim().toLowerCase(); + if (trimmed === "main" || trimmed === "isolated") { + return trimmed; + } + return undefined; +} + +function normalizeWakeMode(raw: unknown) { + if (typeof raw !== "string") { + return undefined; + } + const trimmed = raw.trim().toLowerCase(); + if (trimmed === "now" || trimmed === "next-heartbeat") { + return trimmed; + } + return undefined; +} + +function copyTopLevelAgentTurnFields(next: UnknownRecord, payload: UnknownRecord) { + const copyString = (field: "model" | "thinking") => { + if (typeof payload[field] === "string" && payload[field].trim()) { + return; + } + const value = next[field]; + if (typeof value === "string" && value.trim()) { + payload[field] = value.trim(); + } + }; + copyString("model"); + copyString("thinking"); + + if (typeof payload.timeoutSeconds !== "number" && typeof next.timeoutSeconds === "number") { + payload.timeoutSeconds = next.timeoutSeconds; + } + if ( + typeof payload.allowUnsafeExternalContent !== "boolean" && + typeof next.allowUnsafeExternalContent === "boolean" + ) { + payload.allowUnsafeExternalContent = next.allowUnsafeExternalContent; + } +} + +function copyTopLevelLegacyDeliveryFields(next: UnknownRecord, payload: UnknownRecord) { + if (typeof payload.deliver !== "boolean" && typeof next.deliver === "boolean") { + payload.deliver = next.deliver; + } + if ( + typeof payload.channel !== "string" && + typeof next.channel === "string" && + next.channel.trim() + ) { + payload.channel = next.channel.trim(); + } + if (typeof payload.to !== "string" && typeof next.to === "string" && next.to.trim()) { + payload.to = next.to.trim(); + } + if ( + typeof payload.bestEffortDeliver !== "boolean" && + typeof next.bestEffortDeliver === "boolean" + ) { + payload.bestEffortDeliver = next.bestEffortDeliver; + } + if ( + typeof payload.provider !== "string" && + typeof next.provider === "string" && + next.provider.trim() + ) { + payload.provider = next.provider.trim(); + } +} + +function stripLegacyTopLevelFields(next: UnknownRecord) { + delete next.model; + delete next.thinking; + delete next.timeoutSeconds; + delete next.allowUnsafeExternalContent; + delete next.message; + delete next.text; + delete next.deliver; + delete next.channel; + delete next.to; + delete next.bestEffortDeliver; + delete next.provider; +} + export function normalizeCronJobInput( raw: unknown, options: NormalizeOptions = DEFAULT_OPTIONS, @@ -186,10 +353,38 @@ export function normalizeCronJobInput( } } + if ("sessionTarget" in base) { + const normalized = normalizeSessionTarget(base.sessionTarget); + if (normalized) { + next.sessionTarget = normalized; + } else { + delete next.sessionTarget; + } + } + + if ("wakeMode" in base) { + const normalized = normalizeWakeMode(base.wakeMode); + if (normalized) { + next.wakeMode = normalized; + } else { + delete next.wakeMode; + } + } + if (isRecord(base.schedule)) { next.schedule = coerceSchedule(base.schedule); } + if (!("payload" in next) || !isRecord(next.payload)) { + const message = typeof next.message === "string" ? next.message.trim() : ""; + const text = typeof next.text === "string" ? next.text.trim() : ""; + if (message) { + next.payload = { kind: "agentTurn", message }; + } else if (text) { + next.payload = { kind: "systemEvent", text }; + } + } + if (isRecord(base.payload)) { next.payload = coercePayload(base.payload); } @@ -198,17 +393,39 @@ export function normalizeCronJobInput( next.delivery = coerceDelivery(base.delivery); } - if (isRecord(base.isolation)) { + if ("isolation" in next) { delete next.isolation; } + const payload = isRecord(next.payload) ? next.payload : null; + if (payload && payload.kind === "agentTurn") { + copyTopLevelAgentTurnFields(next, payload); + copyTopLevelLegacyDeliveryFields(next, payload); + } + stripLegacyTopLevelFields(next); + if (options.applyDefaults) { if (!next.wakeMode) { - next.wakeMode = "next-heartbeat"; + next.wakeMode = "now"; } if (typeof next.enabled !== "boolean") { next.enabled = true; } + if ( + (typeof next.name !== "string" || !next.name.trim()) && + isRecord(next.schedule) && + isRecord(next.payload) + ) { + next.name = inferLegacyName({ + schedule: next.schedule as { kind?: unknown; everyMs?: unknown; expr?: unknown }, + payload: next.payload as { kind?: unknown; text?: unknown; message?: unknown }, + }); + } else if (typeof next.name === "string") { + const trimmed = next.name.trim(); + if (trimmed) { + next.name = trimmed; + } + } if (!next.sessionTarget && isRecord(next.payload)) { const kind = typeof next.payload.kind === "string" ? next.payload.kind : ""; if (kind === "systemEvent") { diff --git a/src/cron/run-log.test.ts b/src/cron/run-log.test.ts index cef09acfe..6ac9cca2b 100644 --- a/src/cron/run-log.test.ts +++ b/src/cron/run-log.test.ts @@ -65,6 +65,8 @@ describe("cron run log", () => { jobId: "a", action: "finished", status: "skipped", + sessionId: "run-123", + sessionKey: "agent:main:cron:a:run:run-123", }); const allA = await readCronRunLogEntries(logPathA, { limit: 10 }); @@ -78,6 +80,8 @@ describe("cron run log", () => { const lastOne = await readCronRunLogEntries(logPathA, { limit: 1 }); expect(lastOne.map((e) => e.ts)).toEqual([3]); + expect(lastOne[0]?.sessionId).toBe("run-123"); + expect(lastOne[0]?.sessionKey).toBe("agent:main:cron:a:run:run-123"); const onlyB = await readCronRunLogEntries(logPathB, { limit: 10, diff --git a/src/cron/run-log.ts b/src/cron/run-log.ts index 744b023e5..25846ce81 100644 --- a/src/cron/run-log.ts +++ b/src/cron/run-log.ts @@ -8,6 +8,8 @@ export type CronRunLogEntry = { status?: "ok" | "error" | "skipped"; error?: string; summary?: string; + sessionId?: string; + sessionKey?: string; runAtMs?: number; durationMs?: number; nextRunAtMs?: number; @@ -93,7 +95,24 @@ export async function readCronRunLogEntries( if (jobId && obj.jobId !== jobId) { continue; } - parsed.push(obj as CronRunLogEntry); + const entry: CronRunLogEntry = { + ts: obj.ts, + jobId: obj.jobId, + action: "finished", + status: obj.status, + error: obj.error, + summary: obj.summary, + runAtMs: obj.runAtMs, + durationMs: obj.durationMs, + nextRunAtMs: obj.nextRunAtMs, + }; + if (typeof obj.sessionId === "string" && obj.sessionId.trim().length > 0) { + entry.sessionId = obj.sessionId; + } + if (typeof obj.sessionKey === "string" && obj.sessionKey.trim().length > 0) { + entry.sessionKey = obj.sessionKey; + } + parsed.push(entry); } catch { // ignore invalid lines } diff --git a/src/cron/schedule.ts b/src/cron/schedule.ts index 252d29bab..fc13ebfe2 100644 --- a/src/cron/schedule.ts +++ b/src/cron/schedule.ts @@ -2,6 +2,14 @@ import { Cron } from "croner"; import type { CronSchedule } from "./types.js"; import { parseAbsoluteTimeMs } from "./parse.js"; +function resolveCronTimezone(tz?: string) { + const trimmed = typeof tz === "string" ? tz.trim() : ""; + if (trimmed) { + return trimmed; + } + return Intl.DateTimeFormat().resolvedOptions().timeZone; +} + export function computeNextRunAtMs(schedule: CronSchedule, nowMs: number): number | undefined { if (schedule.kind === "at") { // Handle both canonical `at` (string) and legacy `atMs` (number) fields. @@ -38,9 +46,20 @@ export function computeNextRunAtMs(schedule: CronSchedule, nowMs: number): numbe return undefined; } const cron = new Cron(expr, { - timezone: schedule.tz?.trim() || undefined, + timezone: resolveCronTimezone(schedule.tz), catch: false, }); - const next = cron.nextRun(new Date(nowMs)); - return next ? next.getTime() : undefined; + let cursor = nowMs; + for (let attempt = 0; attempt < 3; attempt++) { + const next = cron.nextRun(new Date(cursor)); + if (!next) { + return undefined; + } + const nextMs = next.getTime(); + if (Number.isFinite(nextMs) && nextMs > nowMs) { + return nextMs; + } + cursor += 1_000; + } + return undefined; } diff --git a/src/cron/service.delivery-plan.test.ts b/src/cron/service.delivery-plan.test.ts new file mode 100644 index 000000000..707868cba --- /dev/null +++ b/src/cron/service.delivery-plan.test.ts @@ -0,0 +1,92 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { CronService } from "./service.js"; + +const noopLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-delivery-")); + return { + storePath: path.join(dir, "cron", "jobs.json"), + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +describe("CronService delivery plan consistency", () => { + it("does not post isolated summary when legacy deliver=false", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok", summary: "done" })), + }); + await cron.start(); + const job = await cron.add({ + name: "legacy-off", + schedule: { kind: "every", everyMs: 60_000, anchorMs: Date.now() }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { + kind: "agentTurn", + message: "hello", + deliver: false, + }, + }); + + const result = await cron.run(job.id, "force"); + expect(result).toEqual({ ok: true, ran: true }); + expect(enqueueSystemEvent).not.toHaveBeenCalled(); + + cron.stop(); + await store.cleanup(); + }); + + it("treats delivery object without mode as announce", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok", summary: "done" })), + }); + await cron.start(); + const job = await cron.add({ + name: "partial-delivery", + schedule: { kind: "every", everyMs: 60_000, anchorMs: Date.now() }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { + kind: "agentTurn", + message: "hello", + }, + delivery: { channel: "telegram", to: "123" } as unknown as { + mode: "none" | "announce"; + channel?: string; + to?: string; + }, + }); + + const result = await cron.run(job.id, "force"); + expect(result).toEqual({ ok: true, ran: true }); + expect(enqueueSystemEvent).toHaveBeenCalledWith("Cron: done", { agentId: undefined }); + + cron.stop(); + await store.cleanup(); + }); +}); diff --git a/src/cron/service.every-jobs-fire.test.ts b/src/cron/service.every-jobs-fire.test.ts index a6a2bab80..7ae49ac2d 100644 --- a/src/cron/service.every-jobs-fire.test.ts +++ b/src/cron/service.every-jobs-fire.test.ts @@ -2,6 +2,7 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { CronJob } from "./types.js"; import { CronService } from "./service.js"; const noopLogger = { @@ -21,6 +22,23 @@ async function makeStorePath() { }; } +async function waitForJob( + cron: CronService, + id: string, + predicate: (job: CronJob | undefined) => boolean, +) { + let latest: CronJob | undefined; + for (let i = 0; i < 30; i++) { + const jobs = await cron.list({ includeDisabled: true }); + latest = jobs.find((job) => job.id === id); + if (predicate(latest)) { + return latest; + } + await vi.runOnlyPendingTimersAsync(); + } + return latest; +} + describe("CronService interval/cron jobs fire on time", () => { beforeEach(() => { vi.useFakeTimers(); @@ -66,9 +84,7 @@ describe("CronService interval/cron jobs fire on time", () => { vi.setSystemTime(new Date(firstDueAt + 5)); await vi.runOnlyPendingTimersAsync(); - // Wait for the async onTimer to complete via the lock queue. - const jobs = await cron.list(); - const updated = jobs.find((j) => j.id === job.id); + const updated = await waitForJob(cron, job.id, (current) => current?.state.lastStatus === "ok"); expect(enqueueSystemEvent).toHaveBeenCalledWith("tick", { agentId: undefined }); expect(updated?.state.lastStatus).toBe("ok"); @@ -112,9 +128,7 @@ describe("CronService interval/cron jobs fire on time", () => { vi.setSystemTime(new Date(firstDueAt + 5)); await vi.runOnlyPendingTimersAsync(); - // Wait for the async onTimer to complete via the lock queue. - const jobs = await cron.list(); - const updated = jobs.find((j) => j.id === job.id); + const updated = await waitForJob(cron, job.id, (current) => current?.state.lastStatus === "ok"); expect(enqueueSystemEvent).toHaveBeenCalledWith("cron-tick", { agentId: undefined }); expect(updated?.state.lastStatus).toBe("ok"); @@ -124,4 +138,88 @@ describe("CronService interval/cron jobs fire on time", () => { cron.stop(); await store.cleanup(); }); + + it("keeps legacy every jobs due while minute cron jobs recompute schedules", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const requestHeartbeatNow = vi.fn(); + const nowMs = Date.parse("2025-12-13T00:00:00.000Z"); + + await fs.mkdir(path.dirname(store.storePath), { recursive: true }); + await fs.writeFile( + store.storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "legacy-every", + name: "legacy every", + enabled: true, + createdAtMs: nowMs, + updatedAtMs: nowMs, + schedule: { kind: "every", everyMs: 120_000 }, + sessionTarget: "main", + wakeMode: "now", + payload: { kind: "systemEvent", text: "sf-tick" }, + state: { nextRunAtMs: nowMs + 120_000 }, + }, + { + id: "minute-cron", + name: "minute cron", + enabled: true, + createdAtMs: nowMs, + updatedAtMs: nowMs, + schedule: { kind: "cron", expr: "* * * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "now", + payload: { kind: "systemEvent", text: "minute-tick" }, + state: { nextRunAtMs: nowMs + 60_000 }, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow, + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok" })), + }); + + await cron.start(); + for (let minute = 1; minute <= 6; minute++) { + vi.setSystemTime(new Date(nowMs + minute * 60_000)); + const minuteRun = await cron.run("minute-cron", "force"); + expect(minuteRun).toEqual({ ok: true, ran: true }); + } + + vi.setSystemTime(new Date(nowMs + 6 * 60_000)); + const sfRun = await cron.run("legacy-every", "due"); + expect(sfRun).toEqual({ ok: true, ran: true }); + + const sfRuns = enqueueSystemEvent.mock.calls.filter((args) => args[0] === "sf-tick").length; + const minuteRuns = enqueueSystemEvent.mock.calls.filter( + (args) => args[0] === "minute-tick", + ).length; + expect(minuteRuns).toBeGreaterThan(0); + expect(sfRuns).toBeGreaterThan(0); + + const jobs = await cron.list({ includeDisabled: true }); + const sfJob = jobs.find((job) => job.id === "legacy-every"); + expect(sfJob?.state.lastStatus).toBe("ok"); + expect(sfJob?.schedule.kind).toBe("every"); + if (sfJob?.schedule.kind === "every") { + expect(sfJob.schedule.anchorMs).toBe(nowMs); + } + + cron.stop(); + await store.cleanup(); + }); }); diff --git a/src/cron/service.issue-regressions.test.ts b/src/cron/service.issue-regressions.test.ts new file mode 100644 index 000000000..c793979c1 --- /dev/null +++ b/src/cron/service.issue-regressions.test.ts @@ -0,0 +1,346 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { setTimeout as delay } from "node:timers/promises"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { CronJob } from "./types.js"; +import { CronService } from "./service.js"; +import { createCronServiceState, type CronEvent } from "./service/state.js"; +import { onTimer } from "./service/timer.js"; + +const noopLogger = { + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), + debug: vi.fn(), + trace: vi.fn(), +}; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "cron-issues-")); + const storePath = path.join(dir, "jobs.json"); + return { + storePath, + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +function createDueIsolatedJob(params: { + id: string; + nowMs: number; + nextRunAtMs: number; + deleteAfterRun?: boolean; +}): CronJob { + return { + id: params.id, + name: params.id, + enabled: true, + deleteAfterRun: params.deleteAfterRun ?? false, + createdAtMs: params.nowMs, + updatedAtMs: params.nowMs, + schedule: { kind: "at", at: new Date(params.nextRunAtMs).toISOString() }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { kind: "agentTurn", message: params.id }, + delivery: { mode: "none" }, + state: { nextRunAtMs: params.nextRunAtMs }, + }; +} + +describe("Cron issue regressions", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-02-06T10:05:00.000Z")); + }); + + afterEach(() => { + vi.useRealTimers(); + vi.clearAllMocks(); + }); + + it("recalculates nextRunAtMs when schedule changes", async () => { + const store = await makeStorePath(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + await cron.start(); + + const created = await cron.add({ + name: "hourly", + schedule: { kind: "cron", expr: "0 * * * *", tz: "UTC" }, + sessionTarget: "main", + payload: { kind: "systemEvent", text: "tick" }, + }); + expect(created.state.nextRunAtMs).toBe(Date.parse("2026-02-06T11:00:00.000Z")); + + const updated = await cron.update(created.id, { + schedule: { kind: "cron", expr: "0 */2 * * *", tz: "UTC" }, + }); + + expect(updated.state.nextRunAtMs).toBe(Date.parse("2026-02-06T12:00:00.000Z")); + + cron.stop(); + await store.cleanup(); + }); + + it("runs immediately with force mode even when not due", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + await cron.start(); + + const created = await cron.add({ + name: "force-now", + schedule: { kind: "every", everyMs: 60_000, anchorMs: Date.now() }, + sessionTarget: "main", + payload: { kind: "systemEvent", text: "force" }, + }); + + const result = await cron.run(created.id, "force"); + + expect(result).toEqual({ ok: true, ran: true }); + expect(enqueueSystemEvent).toHaveBeenCalledWith("force", { agentId: undefined }); + + cron.stop(); + await store.cleanup(); + }); + + it("schedules isolated jobs with next wake time", async () => { + const store = await makeStorePath(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + await cron.start(); + + const job = await cron.add({ + name: "isolated", + schedule: { kind: "every", everyMs: 60_000, anchorMs: Date.now() }, + sessionTarget: "isolated", + payload: { kind: "agentTurn", message: "hi" }, + }); + const status = await cron.status(); + + expect(typeof job.state.nextRunAtMs).toBe("number"); + expect(typeof status.nextWakeAtMs).toBe("number"); + + cron.stop(); + await store.cleanup(); + }); + + it("persists allowUnsafeExternalContent on agentTurn payload patches", async () => { + const store = await makeStorePath(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + await cron.start(); + + const created = await cron.add({ + name: "unsafe toggle", + schedule: { kind: "every", everyMs: 60_000, anchorMs: Date.now() }, + sessionTarget: "isolated", + payload: { kind: "agentTurn", message: "hi" }, + }); + + const updated = await cron.update(created.id, { + payload: { kind: "agentTurn", allowUnsafeExternalContent: true }, + }); + + expect(updated.payload.kind).toBe("agentTurn"); + if (updated.payload.kind === "agentTurn") { + expect(updated.payload.allowUnsafeExternalContent).toBe(true); + expect(updated.payload.message).toBe("hi"); + } + + cron.stop(); + await store.cleanup(); + }); + + it("caps timer delay to 60s for far-future schedules", async () => { + const timeoutSpy = vi.spyOn(globalThis, "setTimeout"); + const store = await makeStorePath(); + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + await cron.start(); + + const callsBeforeAdd = timeoutSpy.mock.calls.length; + await cron.add({ + name: "far-future", + schedule: { kind: "at", at: "2035-01-01T00:00:00.000Z" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "future" }, + }); + + const delaysAfterAdd = timeoutSpy.mock.calls + .slice(callsBeforeAdd) + .map(([, delay]) => delay) + .filter((delay): delay is number => typeof delay === "number"); + expect(delaysAfterAdd.some((delay) => delay === 60_000)).toBe(true); + + cron.stop(); + timeoutSpy.mockRestore(); + await store.cleanup(); + }); + + it("does not hot-loop zero-delay timers while a run is already in progress", async () => { + const timeoutSpy = vi.spyOn(globalThis, "setTimeout"); + const store = await makeStorePath(); + const now = Date.parse("2026-02-06T10:05:00.000Z"); + const state = createCronServiceState({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + nowMs: () => now, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn().mockResolvedValue({ status: "ok", summary: "ok" }), + }); + state.running = true; + state.store = { + version: 1, + jobs: [createDueIsolatedJob({ id: "due", nowMs: now, nextRunAtMs: now - 1 })], + }; + + await onTimer(state); + + expect(timeoutSpy).not.toHaveBeenCalled(); + expect(state.timer).toBeNull(); + timeoutSpy.mockRestore(); + await store.cleanup(); + }); + + it("skips forced manual runs while a timer-triggered run is in progress", async () => { + vi.useRealTimers(); + const store = await makeStorePath(); + let resolveRun: + | ((value: { status: "ok" | "error" | "skipped"; summary?: string; error?: string }) => void) + | undefined; + const runIsolatedAgentJob = vi.fn( + async () => + await new Promise<{ status: "ok" | "error" | "skipped"; summary?: string; error?: string }>( + (resolve) => { + resolveRun = resolve; + }, + ), + ); + + const cron = new CronService({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob, + }); + await cron.start(); + + const runAt = Date.now() + 30; + const job = await cron.add({ + name: "timer-overlap", + enabled: true, + schedule: { kind: "at", at: new Date(runAt).toISOString() }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { kind: "agentTurn", message: "long task" }, + delivery: { mode: "none" }, + }); + + for (let i = 0; i < 25 && runIsolatedAgentJob.mock.calls.length === 0; i++) { + await delay(20); + } + expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); + + const manualResult = await cron.run(job.id, "force"); + expect(manualResult).toEqual({ ok: true, ran: false, reason: "already-running" }); + expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); + + resolveRun?.({ status: "ok", summary: "done" }); + for (let i = 0; i < 25; i++) { + const jobs = await cron.list({ includeDisabled: true }); + if (jobs.some((j) => j.id === job.id && j.state.lastStatus === "ok")) { + break; + } + await delay(20); + } + + cron.stop(); + await store.cleanup(); + }); + + it("records per-job start time and duration for batched due jobs", async () => { + const store = await makeStorePath(); + const dueAt = Date.parse("2026-02-06T10:05:01.000Z"); + const first = createDueIsolatedJob({ id: "batch-first", nowMs: dueAt, nextRunAtMs: dueAt }); + const second = createDueIsolatedJob({ id: "batch-second", nowMs: dueAt, nextRunAtMs: dueAt }); + await fs.writeFile( + store.storePath, + JSON.stringify({ version: 1, jobs: [first, second] }, null, 2), + "utf-8", + ); + + let now = dueAt; + const events: CronEvent[] = []; + const state = createCronServiceState({ + cronEnabled: true, + storePath: store.storePath, + log: noopLogger, + nowMs: () => now, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + onEvent: (evt) => { + events.push(evt); + }, + runIsolatedAgentJob: vi.fn(async (params: { job: { id: string } }) => { + now += params.job.id === first.id ? 50 : 20; + return { status: "ok" as const, summary: "ok" }; + }), + }); + + await onTimer(state); + + const jobs = state.store?.jobs ?? []; + const firstDone = jobs.find((job) => job.id === first.id); + const secondDone = jobs.find((job) => job.id === second.id); + const startedAtEvents = events + .filter((evt) => evt.action === "started") + .map((evt) => evt.runAtMs); + + expect(firstDone?.state.lastRunAtMs).toBe(dueAt); + expect(firstDone?.state.lastDurationMs).toBe(50); + expect(secondDone?.state.lastRunAtMs).toBe(dueAt + 50); + expect(secondDone?.state.lastDurationMs).toBe(20); + expect(startedAtEvents).toEqual([dueAt, dueAt + 50]); + + await store.cleanup(); + }); +}); diff --git a/src/cron/service.read-ops-nonblocking.test.ts b/src/cron/service.read-ops-nonblocking.test.ts new file mode 100644 index 000000000..d0e73c87d --- /dev/null +++ b/src/cron/service.read-ops-nonblocking.test.ts @@ -0,0 +1,104 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { setTimeout as delay } from "node:timers/promises"; +import { describe, expect, it, vi } from "vitest"; +import { CronService } from "./service.js"; + +const noopLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-")); + return { + storePath: path.join(dir, "cron", "jobs.json"), + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +describe("CronService read ops while job is running", () => { + it("keeps list and status responsive during a long isolated run", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const requestHeartbeatNow = vi.fn(); + + let resolveRun: + | ((value: { status: "ok" | "error" | "skipped"; summary?: string; error?: string }) => void) + | undefined; + + const runIsolatedAgentJob = vi.fn( + async () => + await new Promise<{ status: "ok" | "error" | "skipped"; summary?: string; error?: string }>( + (resolve) => { + resolveRun = resolve; + }, + ), + ); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow, + runIsolatedAgentJob, + }); + + await cron.start(); + + const runAt = Date.now() + 30; + await cron.add({ + name: "slow isolated", + enabled: true, + deleteAfterRun: false, + schedule: { kind: "at", at: new Date(runAt).toISOString() }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + payload: { kind: "agentTurn", message: "long task" }, + delivery: { mode: "none" }, + }); + + for (let i = 0; i < 25 && runIsolatedAgentJob.mock.calls.length === 0; i++) { + await delay(20); + } + + expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); + + const listRace = await Promise.race([ + cron.list({ includeDisabled: true }).then(() => "ok"), + delay(200).then(() => "timeout"), + ]); + expect(listRace).toBe("ok"); + + const statusRace = await Promise.race([ + cron.status().then(() => "ok"), + delay(200).then(() => "timeout"), + ]); + expect(statusRace).toBe("ok"); + + const running = await cron.list({ includeDisabled: true }); + expect(running[0]?.state.runningAtMs).toBeTypeOf("number"); + + resolveRun?.({ status: "ok", summary: "done" }); + + for (let i = 0; i < 25; i++) { + const jobs = await cron.list({ includeDisabled: true }); + if (jobs[0]?.state.lastStatus === "ok") { + break; + } + await delay(20); + } + + const finished = await cron.list({ includeDisabled: true }); + expect(finished[0]?.state.lastStatus).toBe("ok"); + + cron.stop(); + await store.cleanup(); + }); +}); diff --git a/src/cron/service.restart-catchup.test.ts b/src/cron/service.restart-catchup.test.ts new file mode 100644 index 000000000..c8994eed1 --- /dev/null +++ b/src/cron/service.restart-catchup.test.ts @@ -0,0 +1,165 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { CronService } from "./service.js"; + +const noopLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-")); + return { + storePath: path.join(dir, "cron", "jobs.json"), + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +describe("CronService restart catch-up", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2025-12-13T17:00:00.000Z")); + noopLogger.debug.mockClear(); + noopLogger.info.mockClear(); + noopLogger.warn.mockClear(); + noopLogger.error.mockClear(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("executes an overdue recurring job immediately on start", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const requestHeartbeatNow = vi.fn(); + + const dueAt = Date.parse("2025-12-13T15:00:00.000Z"); + const lastRunAt = Date.parse("2025-12-12T15:00:00.000Z"); + + await fs.mkdir(path.dirname(store.storePath), { recursive: true }); + await fs.writeFile( + store.storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "restart-overdue-job", + name: "daily digest", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-12T15:00:00.000Z"), + schedule: { kind: "cron", expr: "0 15 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "digest now" }, + state: { + nextRunAtMs: dueAt, + lastRunAtMs: lastRunAt, + lastStatus: "ok", + }, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow, + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok" })), + }); + + await cron.start(); + + expect(enqueueSystemEvent).toHaveBeenCalledWith("digest now", { agentId: undefined }); + expect(requestHeartbeatNow).toHaveBeenCalled(); + + const jobs = await cron.list({ includeDisabled: true }); + const updated = jobs.find((job) => job.id === "restart-overdue-job"); + expect(updated?.state.lastStatus).toBe("ok"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); + expect(updated?.state.nextRunAtMs).toBeGreaterThan(Date.parse("2025-12-13T17:00:00.000Z")); + + cron.stop(); + await store.cleanup(); + }); + + it("clears stale running markers and catches up overdue jobs on startup", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const requestHeartbeatNow = vi.fn(); + + const dueAt = Date.parse("2025-12-13T16:00:00.000Z"); + const staleRunningAt = Date.parse("2025-12-13T16:30:00.000Z"); + + await fs.mkdir(path.dirname(store.storePath), { recursive: true }); + await fs.writeFile( + store.storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "restart-stale-running", + name: "daily stale marker", + enabled: true, + createdAtMs: Date.parse("2025-12-10T12:00:00.000Z"), + updatedAtMs: Date.parse("2025-12-13T16:30:00.000Z"), + schedule: { kind: "cron", expr: "0 16 * * *", tz: "UTC" }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { kind: "systemEvent", text: "resume stale marker" }, + state: { + nextRunAtMs: dueAt, + runningAtMs: staleRunningAt, + }, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow, + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok" })), + }); + + await cron.start(); + + expect(enqueueSystemEvent).toHaveBeenCalledWith("resume stale marker", { agentId: undefined }); + expect(noopLogger.warn).toHaveBeenCalledWith( + expect.objectContaining({ jobId: "restart-stale-running" }), + "cron: clearing stale running marker on startup", + ); + + const jobs = await cron.list({ includeDisabled: true }); + const updated = jobs.find((job) => job.id === "restart-stale-running"); + expect(updated?.state.runningAtMs).toBeUndefined(); + expect(updated?.state.lastStatus).toBe("ok"); + expect(updated?.state.lastRunAtMs).toBe(Date.parse("2025-12-13T17:00:00.000Z")); + + cron.stop(); + await store.cleanup(); + }); +}); diff --git a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts index e26e71cab..1cc3eca03 100644 --- a/src/cron/service.runs-one-shot-main-job-disables-it.test.ts +++ b/src/cron/service.runs-one-shot-main-job-disables-it.test.ts @@ -3,6 +3,7 @@ import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import type { HeartbeatRunResult } from "../infra/heartbeat-wake.js"; +import type { CronJob } from "./types.js"; import { CronService } from "./service.js"; const noopLogger = { @@ -22,6 +23,18 @@ async function makeStorePath() { }; } +async function waitForJobs(cron: CronService, predicate: (jobs: CronJob[]) => boolean) { + let latest: CronJob[] = []; + for (let i = 0; i < 30; i++) { + latest = await cron.list({ includeDisabled: true }); + if (predicate(latest)) { + return latest; + } + await vi.runOnlyPendingTimersAsync(); + } + return latest; +} + describe("CronService", () => { beforeEach(() => { vi.useFakeTimers(); @@ -67,7 +80,9 @@ describe("CronService", () => { vi.setSystemTime(new Date("2025-12-13T00:00:02.000Z")); await vi.runOnlyPendingTimersAsync(); - const jobs = await cron.list({ includeDisabled: true }); + const jobs = await waitForJobs(cron, (items) => + items.some((item) => item.id === job.id && !item.enabled), + ); const updated = jobs.find((j) => j.id === job.id); expect(updated?.enabled).toBe(false); expect(enqueueSystemEvent).toHaveBeenCalledWith("hello", { @@ -108,7 +123,7 @@ describe("CronService", () => { vi.setSystemTime(new Date("2025-12-13T00:00:02.000Z")); await vi.runOnlyPendingTimersAsync(); - const jobs = await cron.list({ includeDisabled: true }); + const jobs = await waitForJobs(cron, (items) => !items.some((item) => item.id === job.id)); expect(jobs.find((j) => j.id === job.id)).toBeUndefined(); expect(enqueueSystemEvent).toHaveBeenCalledWith("hello", { agentId: undefined, @@ -185,6 +200,49 @@ describe("CronService", () => { await store.cleanup(); }); + it("wakeMode now falls back to queued heartbeat when main lane stays busy", async () => { + const store = await makeStorePath(); + const enqueueSystemEvent = vi.fn(); + const requestHeartbeatNow = vi.fn(); + const runHeartbeatOnce = vi.fn(async () => ({ + status: "skipped" as const, + reason: "requests-in-flight", + })); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent, + requestHeartbeatNow, + runHeartbeatOnce, + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok" })), + }); + + await cron.start(); + const job = await cron.add({ + name: "wakeMode now fallback", + enabled: true, + schedule: { kind: "at", at: new Date(1).toISOString() }, + sessionTarget: "main", + wakeMode: "now", + payload: { kind: "systemEvent", text: "hello" }, + }); + + const runPromise = cron.run(job.id, "force"); + await vi.advanceTimersByTimeAsync(125_000); + await runPromise; + + expect(runHeartbeatOnce).toHaveBeenCalled(); + expect(requestHeartbeatNow).toHaveBeenCalled(); + expect(job.state.lastStatus).toBe("ok"); + expect(job.state.lastError).toBeUndefined(); + + await cron.list({ includeDisabled: true }); + cron.stop(); + await store.cleanup(); + }); + it("runs an isolated job and posts summary to main", async () => { const store = await makeStorePath(); const enqueueSystemEvent = vi.fn(); @@ -218,7 +276,7 @@ describe("CronService", () => { vi.setSystemTime(new Date("2025-12-13T00:00:01.000Z")); await vi.runOnlyPendingTimersAsync(); - await cron.list({ includeDisabled: true }); + await waitForJobs(cron, (items) => items.some((item) => item.state.lastStatus === "ok")); expect(runIsolatedAgentJob).toHaveBeenCalledTimes(1); expect(enqueueSystemEvent).toHaveBeenCalledWith("Cron: done", { agentId: undefined, @@ -366,7 +424,7 @@ describe("CronService", () => { vi.setSystemTime(new Date("2025-12-13T00:00:01.000Z")); await vi.runOnlyPendingTimersAsync(); - await cron.list({ includeDisabled: true }); + await waitForJobs(cron, (items) => items.some((item) => item.state.lastStatus === "error")); expect(enqueueSystemEvent).toHaveBeenCalledWith("Cron (error): last output", { agentId: undefined, @@ -460,7 +518,7 @@ describe("CronService", () => { expect(enqueueSystemEvent).not.toHaveBeenCalled(); expect(requestHeartbeatNow).not.toHaveBeenCalled(); - const jobs = await cron.list({ includeDisabled: true }); + const jobs = await waitForJobs(cron, (items) => items[0]?.state.lastStatus === "skipped"); expect(jobs[0]?.state.lastStatus).toBe("skipped"); expect(jobs[0]?.state.lastError).toMatch(/main job requires/i); diff --git a/src/cron/service.skips-main-jobs-empty-systemevent-text.test.ts b/src/cron/service.skips-main-jobs-empty-systemevent-text.test.ts index d25edfb8a..4bbc07afc 100644 --- a/src/cron/service.skips-main-jobs-empty-systemevent-text.test.ts +++ b/src/cron/service.skips-main-jobs-empty-systemevent-text.test.ts @@ -2,6 +2,7 @@ import fs from "node:fs/promises"; import os from "node:os"; import path from "node:path"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import type { CronJob } from "./types.js"; import { CronService } from "./service.js"; const noopLogger = { @@ -21,6 +22,22 @@ async function makeStorePath() { }; } +async function waitForFirstJob( + cron: CronService, + predicate: (job: CronJob | undefined) => boolean, +) { + let latest: CronJob | undefined; + for (let i = 0; i < 30; i++) { + const jobs = await cron.list({ includeDisabled: true }); + latest = jobs[0]; + if (predicate(latest)) { + return latest; + } + await vi.runOnlyPendingTimersAsync(); + } + return latest; +} + describe("CronService", () => { beforeEach(() => { vi.useFakeTimers(); @@ -66,9 +83,9 @@ describe("CronService", () => { expect(enqueueSystemEvent).not.toHaveBeenCalled(); expect(requestHeartbeatNow).not.toHaveBeenCalled(); - const jobs = await cron.list({ includeDisabled: true }); - expect(jobs[0]?.state.lastStatus).toBe("skipped"); - expect(jobs[0]?.state.lastError).toMatch(/non-empty/i); + const job = await waitForFirstJob(cron, (current) => current?.state.lastStatus === "skipped"); + expect(job?.state.lastStatus).toBe("skipped"); + expect(job?.state.lastError).toMatch(/non-empty/i); cron.stop(); await store.cleanup(); diff --git a/src/cron/service.store-migration.test.ts b/src/cron/service.store-migration.test.ts new file mode 100644 index 000000000..ed3b25e69 --- /dev/null +++ b/src/cron/service.store-migration.test.ts @@ -0,0 +1,124 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { CronService } from "./service.js"; + +const noopLogger = { + debug: vi.fn(), + info: vi.fn(), + warn: vi.fn(), + error: vi.fn(), +}; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-")); + return { + storePath: path.join(dir, "cron", "jobs.json"), + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +describe("CronService store migrations", () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date("2026-02-06T17:00:00.000Z")); + noopLogger.debug.mockClear(); + noopLogger.info.mockClear(); + noopLogger.warn.mockClear(); + noopLogger.error.mockClear(); + }); + + afterEach(() => { + vi.useRealTimers(); + }); + + it("migrates legacy top-level agentTurn fields and initializes missing state", async () => { + const store = await makeStorePath(); + await fs.mkdir(path.dirname(store.storePath), { recursive: true }); + await fs.writeFile( + store.storePath, + JSON.stringify( + { + version: 1, + jobs: [ + { + id: "legacy-agentturn-job", + name: "legacy agentturn", + enabled: true, + createdAtMs: Date.parse("2026-02-01T12:00:00.000Z"), + updatedAtMs: Date.parse("2026-02-05T12:00:00.000Z"), + schedule: { kind: "cron", expr: "0 23 * * *", tz: "UTC" }, + sessionTarget: "isolated", + wakeMode: "next-heartbeat", + model: "openrouter/deepseek/deepseek-r1", + thinking: "high", + timeoutSeconds: 120, + allowUnsafeExternalContent: true, + deliver: true, + channel: "telegram", + to: "12345", + bestEffortDeliver: true, + payload: { kind: "agentTurn", message: "legacy payload fields" }, + }, + ], + }, + null, + 2, + ), + "utf-8", + ); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok", summary: "ok" })), + }); + + await cron.start(); + + const status = await cron.status(); + expect(status.enabled).toBe(true); + + const jobs = await cron.list({ includeDisabled: true }); + const job = jobs.find((entry) => entry.id === "legacy-agentturn-job"); + expect(job).toBeDefined(); + expect(job?.state).toBeDefined(); + expect(job?.sessionTarget).toBe("isolated"); + expect(job?.payload.kind).toBe("agentTurn"); + if (job?.payload.kind === "agentTurn") { + expect(job.payload.model).toBe("openrouter/deepseek/deepseek-r1"); + expect(job.payload.thinking).toBe("high"); + expect(job.payload.timeoutSeconds).toBe(120); + expect(job.payload.allowUnsafeExternalContent).toBe(true); + } + expect(job?.delivery).toEqual({ + mode: "announce", + channel: "telegram", + to: "12345", + bestEffort: true, + }); + + const persisted = JSON.parse(await fs.readFile(store.storePath, "utf-8")) as { + jobs: Array>; + }; + const persistedJob = persisted.jobs.find((entry) => entry.id === "legacy-agentturn-job"); + expect(persistedJob).toBeDefined(); + expect(persistedJob?.state).toEqual(expect.any(Object)); + expect(persistedJob?.model).toBeUndefined(); + expect(persistedJob?.thinking).toBeUndefined(); + expect(persistedJob?.timeoutSeconds).toBeUndefined(); + expect(persistedJob?.deliver).toBeUndefined(); + expect(persistedJob?.channel).toBeUndefined(); + expect(persistedJob?.to).toBeUndefined(); + expect(persistedJob?.bestEffortDeliver).toBeUndefined(); + + cron.stop(); + await store.cleanup(); + }); +}); diff --git a/src/cron/service.store.migration.test.ts b/src/cron/service.store.migration.test.ts index 6e0734b15..3054a634e 100644 --- a/src/cron/service.store.migration.test.ts +++ b/src/cron/service.store.migration.test.ts @@ -98,4 +98,49 @@ describe("cron store migration", () => { await store.cleanup(); }); + + it("adds anchorMs to legacy every schedules", async () => { + const store = await makeStorePath(); + const createdAtMs = 1_700_000_000_000; + const legacyJob = { + id: "job-every-legacy", + agentId: undefined, + name: "Legacy every", + description: null, + enabled: true, + deleteAfterRun: false, + createdAtMs, + updatedAtMs: createdAtMs, + schedule: { kind: "every", everyMs: 120_000 }, + sessionTarget: "main", + wakeMode: "next-heartbeat", + payload: { + kind: "systemEvent", + text: "tick", + }, + state: {}, + }; + await fs.mkdir(path.dirname(store.storePath), { recursive: true }); + await fs.writeFile(store.storePath, JSON.stringify({ version: 1, jobs: [legacyJob] }, null, 2)); + + const cron = new CronService({ + storePath: store.storePath, + cronEnabled: true, + log: noopLogger, + enqueueSystemEvent: vi.fn(), + requestHeartbeatNow: vi.fn(), + runIsolatedAgentJob: vi.fn(async () => ({ status: "ok" })), + }); + + await cron.start(); + cron.stop(); + + const loaded = await loadCronStore(store.storePath); + const migrated = loaded.jobs[0] as Record; + const schedule = migrated.schedule as Record; + expect(schedule.kind).toBe("every"); + expect(schedule.anchorMs).toBe(createdAtMs); + + await store.cleanup(); + }); }); diff --git a/src/cron/service/jobs.ts b/src/cron/service/jobs.ts index a01475224..fbd96d34d 100644 --- a/src/cron/service/jobs.ts +++ b/src/cron/service/jobs.ts @@ -20,6 +20,17 @@ import { const STUCK_RUN_MS = 2 * 60 * 60 * 1000; +function resolveEveryAnchorMs(params: { + schedule: { everyMs: number; anchorMs?: number }; + fallbackAnchorMs: number; +}) { + const raw = params.schedule.anchorMs; + if (typeof raw === "number" && Number.isFinite(raw)) { + return Math.max(0, Math.floor(raw)); + } + return Math.max(0, Math.floor(params.fallbackAnchorMs)); +} + export function assertSupportedJobSpec(job: Pick) { if (job.sessionTarget === "main" && job.payload.kind !== "systemEvent") { throw new Error('main cron jobs require payload.kind="systemEvent"'); @@ -47,6 +58,13 @@ export function computeJobNextRunAtMs(job: CronJob, nowMs: number): number | und if (!job.enabled) { return undefined; } + if (job.schedule.kind === "every") { + const anchorMs = resolveEveryAnchorMs({ + schedule: job.schedule, + fallbackAnchorMs: job.createdAtMs, + }); + return computeNextRunAtMs({ ...job.schedule, anchorMs }, nowMs); + } if (job.schedule.kind === "at") { // One-shot jobs stay due until they successfully finish. if (job.state.lastStatus === "ok" && job.state.lastRunAtMs) { @@ -69,18 +87,26 @@ export function computeJobNextRunAtMs(job: CronJob, nowMs: number): number | und return computeNextRunAtMs(job.schedule, nowMs); } -export function recomputeNextRuns(state: CronServiceState) { +export function recomputeNextRuns(state: CronServiceState): boolean { if (!state.store) { - return; + return false; } + let changed = false; const now = state.deps.nowMs(); for (const job of state.store.jobs) { if (!job.state) { job.state = {}; + changed = true; } if (!job.enabled) { - job.state.nextRunAtMs = undefined; - job.state.runningAtMs = undefined; + if (job.state.nextRunAtMs !== undefined) { + job.state.nextRunAtMs = undefined; + changed = true; + } + if (job.state.runningAtMs !== undefined) { + job.state.runningAtMs = undefined; + changed = true; + } continue; } const runningAt = job.state.runningAtMs; @@ -90,9 +116,15 @@ export function recomputeNextRuns(state: CronServiceState) { "cron: clearing stuck running marker", ); job.state.runningAtMs = undefined; + changed = true; + } + const newNext = computeJobNextRunAtMs(job, now); + if (job.state.nextRunAtMs !== newNext) { + job.state.nextRunAtMs = newNext; + changed = true; } - job.state.nextRunAtMs = computeJobNextRunAtMs(job, now); } + return changed; } export function nextWakeAtMs(state: CronServiceState) { @@ -110,10 +142,20 @@ export function nextWakeAtMs(state: CronServiceState) { export function createJob(state: CronServiceState, input: CronJobCreate): CronJob { const now = state.deps.nowMs(); const id = crypto.randomUUID(); + const schedule = + input.schedule.kind === "every" + ? { + ...input.schedule, + anchorMs: resolveEveryAnchorMs({ + schedule: input.schedule, + fallbackAnchorMs: now, + }), + } + : input.schedule; const deleteAfterRun = typeof input.deleteAfterRun === "boolean" ? input.deleteAfterRun - : input.schedule.kind === "at" + : schedule.kind === "at" ? true : undefined; const enabled = typeof input.enabled === "boolean" ? input.enabled : true; @@ -126,7 +168,7 @@ export function createJob(state: CronServiceState, input: CronJobCreate): CronJo deleteAfterRun, createdAtMs: now, updatedAtMs: now, - schedule: input.schedule, + schedule, sessionTarget: input.sessionTarget, wakeMode: input.wakeMode, payload: input.payload, @@ -223,6 +265,9 @@ function mergeCronPayload(existing: CronPayload, patch: CronPayloadPatch): CronP if (typeof patch.timeoutSeconds === "number") { next.timeoutSeconds = patch.timeoutSeconds; } + if (typeof patch.allowUnsafeExternalContent === "boolean") { + next.allowUnsafeExternalContent = patch.allowUnsafeExternalContent; + } if (typeof patch.deliver === "boolean") { next.deliver = patch.deliver; } @@ -297,6 +342,7 @@ function buildPayloadFromPatch(patch: CronPayloadPatch): CronPayload { model: patch.model, thinking: patch.thinking, timeoutSeconds: patch.timeoutSeconds, + allowUnsafeExternalContent: patch.allowUnsafeExternalContent, deliver: patch.deliver, channel: patch.channel, to: patch.to, @@ -334,6 +380,9 @@ function mergeCronDelivery( } export function isJobDue(job: CronJob, nowMs: number, opts: { forced: boolean }) { + if (typeof job.state.runningAtMs === "number") { + return false; + } if (opts.forced) { return true; } diff --git a/src/cron/service/ops.ts b/src/cron/service/ops.ts index d14597656..545261e97 100644 --- a/src/cron/service/ops.ts +++ b/src/cron/service/ops.ts @@ -11,7 +11,7 @@ import { } from "./jobs.js"; import { locked } from "./locked.js"; import { ensureLoaded, persist, warnIfDisabled } from "./store.js"; -import { armTimer, emit, executeJob, stopTimer, wake } from "./timer.js"; +import { armTimer, emit, executeJob, runMissedJobs, stopTimer, wake } from "./timer.js"; export async function start(state: CronServiceState) { await locked(state, async () => { @@ -19,7 +19,18 @@ export async function start(state: CronServiceState) { state.deps.log.info({ enabled: false }, "cron: disabled"); return; } - await ensureLoaded(state); + await ensureLoaded(state, { skipRecompute: true }); + const jobs = state.store?.jobs ?? []; + for (const job of jobs) { + if (typeof job.state.runningAtMs === "number") { + state.deps.log.warn( + { jobId: job.id, runningAtMs: job.state.runningAtMs }, + "cron: clearing stale running marker on startup", + ); + job.state.runningAtMs = undefined; + } + } + await runMissedJobs(state); recomputeNextRuns(state); await persist(state); armTimer(state); @@ -40,7 +51,7 @@ export function stop(state: CronServiceState) { export async function status(state: CronServiceState) { return await locked(state, async () => { - await ensureLoaded(state); + await ensureLoaded(state, { skipRecompute: true }); return { enabled: state.deps.cronEnabled, storePath: state.deps.storePath, @@ -52,7 +63,7 @@ export async function status(state: CronServiceState) { export async function list(state: CronServiceState, opts?: { includeDisabled?: boolean }) { return await locked(state, async () => { - await ensureLoaded(state); + await ensureLoaded(state, { skipRecompute: true }); const includeDisabled = opts?.includeDisabled === true; const jobs = (state.store?.jobs ?? []).filter((j) => includeDisabled || j.enabled); return jobs.toSorted((a, b) => (a.state.nextRunAtMs ?? 0) - (b.state.nextRunAtMs ?? 0)); @@ -83,6 +94,22 @@ export async function update(state: CronServiceState, id: string, patch: CronJob const job = findJobOrThrow(state, id); const now = state.deps.nowMs(); applyJobPatch(job, patch); + if (job.schedule.kind === "every") { + const anchor = job.schedule.anchorMs; + if (typeof anchor !== "number" || !Number.isFinite(anchor)) { + const patchSchedule = patch.schedule; + const fallbackAnchorMs = + patchSchedule?.kind === "every" + ? now + : typeof job.createdAtMs === "number" && Number.isFinite(job.createdAtMs) + ? job.createdAtMs + : now; + job.schedule = { + ...job.schedule, + anchorMs: Math.max(0, Math.floor(fallbackAnchorMs)), + }; + } + } job.updatedAtMs = now; if (job.enabled) { job.state.nextRunAtMs = computeJobNextRunAtMs(job, now); @@ -124,14 +151,18 @@ export async function remove(state: CronServiceState, id: string) { export async function run(state: CronServiceState, id: string, mode?: "due" | "force") { return await locked(state, async () => { warnIfDisabled(state, "run"); - await ensureLoaded(state); + await ensureLoaded(state, { skipRecompute: true }); const job = findJobOrThrow(state, id); + if (typeof job.state.runningAtMs === "number") { + return { ok: true, ran: false, reason: "already-running" as const }; + } const now = state.deps.nowMs(); const due = isJobDue(job, now, { forced: mode === "force" }); if (!due) { return { ok: true, ran: false, reason: "not-due" as const }; } await executeJob(state, job, now, { forced: mode === "force" }); + recomputeNextRuns(state); await persist(state); armTimer(state); return { ok: true, ran: true } as const; diff --git a/src/cron/service/state.ts b/src/cron/service/state.ts index 64fd9cc9e..0847989b3 100644 --- a/src/cron/service/state.ts +++ b/src/cron/service/state.ts @@ -9,6 +9,8 @@ export type CronEvent = { status?: "ok" | "error" | "skipped"; error?: string; summary?: string; + sessionId?: string; + sessionKey?: string; nextRunAtMs?: number; }; @@ -33,6 +35,8 @@ export type CronServiceDeps = { /** Last non-empty agent text output (not truncated). */ outputText?: string; error?: string; + sessionId?: string; + sessionKey?: string; }>; onEvent?: (evt: CronEvent) => void; }; @@ -78,6 +82,7 @@ export type CronStatusSummary = { export type CronRunResult = | { ok: true; ran: true } | { ok: true; ran: false; reason: "not-due" } + | { ok: true; ran: false; reason: "already-running" } | { ok: false }; export type CronRemoveResult = { ok: true; removed: boolean } | { ok: false; removed: false }; diff --git a/src/cron/service/store.ts b/src/cron/service/store.ts index 51aca4165..3da848f3e 100644 --- a/src/cron/service/store.ts +++ b/src/cron/service/store.ts @@ -117,6 +117,141 @@ function stripLegacyDeliveryFields(payload: Record) { } } +function normalizePayloadKind(payload: Record) { + const raw = typeof payload.kind === "string" ? payload.kind.trim().toLowerCase() : ""; + if (raw === "agentturn") { + payload.kind = "agentTurn"; + return true; + } + if (raw === "systemevent") { + payload.kind = "systemEvent"; + return true; + } + return false; +} + +function inferPayloadIfMissing(raw: Record) { + const message = typeof raw.message === "string" ? raw.message.trim() : ""; + const text = typeof raw.text === "string" ? raw.text.trim() : ""; + if (message) { + raw.payload = { kind: "agentTurn", message }; + return true; + } + if (text) { + raw.payload = { kind: "systemEvent", text }; + return true; + } + return false; +} + +function copyTopLevelAgentTurnFields( + raw: Record, + payload: Record, +) { + let mutated = false; + + const copyTrimmedString = (field: "model" | "thinking") => { + const existing = payload[field]; + if (typeof existing === "string" && existing.trim()) { + return; + } + const value = raw[field]; + if (typeof value === "string" && value.trim()) { + payload[field] = value.trim(); + mutated = true; + } + }; + copyTrimmedString("model"); + copyTrimmedString("thinking"); + + if ( + typeof payload.timeoutSeconds !== "number" && + typeof raw.timeoutSeconds === "number" && + Number.isFinite(raw.timeoutSeconds) + ) { + payload.timeoutSeconds = Math.max(1, Math.floor(raw.timeoutSeconds)); + mutated = true; + } + + if ( + typeof payload.allowUnsafeExternalContent !== "boolean" && + typeof raw.allowUnsafeExternalContent === "boolean" + ) { + payload.allowUnsafeExternalContent = raw.allowUnsafeExternalContent; + mutated = true; + } + + if (typeof payload.deliver !== "boolean" && typeof raw.deliver === "boolean") { + payload.deliver = raw.deliver; + mutated = true; + } + if ( + typeof payload.channel !== "string" && + typeof raw.channel === "string" && + raw.channel.trim() + ) { + payload.channel = raw.channel.trim(); + mutated = true; + } + if (typeof payload.to !== "string" && typeof raw.to === "string" && raw.to.trim()) { + payload.to = raw.to.trim(); + mutated = true; + } + if ( + typeof payload.bestEffortDeliver !== "boolean" && + typeof raw.bestEffortDeliver === "boolean" + ) { + payload.bestEffortDeliver = raw.bestEffortDeliver; + mutated = true; + } + if ( + typeof payload.provider !== "string" && + typeof raw.provider === "string" && + raw.provider.trim() + ) { + payload.provider = raw.provider.trim(); + mutated = true; + } + + return mutated; +} + +function stripLegacyTopLevelFields(raw: Record) { + if ("model" in raw) { + delete raw.model; + } + if ("thinking" in raw) { + delete raw.thinking; + } + if ("timeoutSeconds" in raw) { + delete raw.timeoutSeconds; + } + if ("allowUnsafeExternalContent" in raw) { + delete raw.allowUnsafeExternalContent; + } + if ("message" in raw) { + delete raw.message; + } + if ("text" in raw) { + delete raw.text; + } + if ("deliver" in raw) { + delete raw.deliver; + } + if ("channel" in raw) { + delete raw.channel; + } + if ("to" in raw) { + delete raw.to; + } + if ("bestEffortDeliver" in raw) { + delete raw.bestEffortDeliver; + } + if ("provider" in raw) { + delete raw.provider; + } +} + async function getFileMtimeMs(path: string): Promise { try { const stats = await fs.promises.stat(path); @@ -148,6 +283,12 @@ export async function ensureLoaded( const jobs = (loaded.jobs ?? []) as unknown as Array>; let mutated = false; for (const raw of jobs) { + const state = raw.state; + if (!state || typeof state !== "object" || Array.isArray(state)) { + raw.state = {}; + mutated = true; + } + const nameRaw = raw.name; if (typeof nameRaw !== "string" || nameRaw.trim().length === 0) { raw.name = inferLegacyName({ @@ -171,8 +312,57 @@ export async function ensureLoaded( } const payload = raw.payload; - if (payload && typeof payload === "object" && !Array.isArray(payload)) { - if (migrateLegacyCronPayload(payload as Record)) { + if ( + (!payload || typeof payload !== "object" || Array.isArray(payload)) && + inferPayloadIfMissing(raw) + ) { + mutated = true; + } + + const payloadRecord = + raw.payload && typeof raw.payload === "object" && !Array.isArray(raw.payload) + ? (raw.payload as Record) + : null; + + if (payloadRecord) { + if (normalizePayloadKind(payloadRecord)) { + mutated = true; + } + if (!payloadRecord.kind) { + if (typeof payloadRecord.message === "string" && payloadRecord.message.trim()) { + payloadRecord.kind = "agentTurn"; + mutated = true; + } else if (typeof payloadRecord.text === "string" && payloadRecord.text.trim()) { + payloadRecord.kind = "systemEvent"; + mutated = true; + } + } + if (payloadRecord.kind === "agentTurn") { + if (copyTopLevelAgentTurnFields(raw, payloadRecord)) { + mutated = true; + } + } + } + + const hadLegacyTopLevelFields = + "model" in raw || + "thinking" in raw || + "timeoutSeconds" in raw || + "allowUnsafeExternalContent" in raw || + "message" in raw || + "text" in raw || + "deliver" in raw || + "channel" in raw || + "to" in raw || + "bestEffortDeliver" in raw || + "provider" in raw; + if (hadLegacyTopLevelFields) { + stripLegacyTopLevelFields(raw); + mutated = true; + } + + if (payloadRecord) { + if (migrateLegacyCronPayload(payloadRecord)) { mutated = true; } } @@ -202,6 +392,27 @@ export async function ensureLoaded( } mutated = true; } + + const everyMsRaw = sched.everyMs; + const everyMs = + typeof everyMsRaw === "number" && Number.isFinite(everyMsRaw) + ? Math.floor(everyMsRaw) + : null; + if ((kind === "every" || sched.kind === "every") && everyMs !== null) { + const anchorRaw = sched.anchorMs; + const normalizedAnchor = + typeof anchorRaw === "number" && Number.isFinite(anchorRaw) + ? Math.max(0, Math.floor(anchorRaw)) + : typeof raw.createdAtMs === "number" && Number.isFinite(raw.createdAtMs) + ? Math.max(0, Math.floor(raw.createdAtMs)) + : typeof raw.updatedAtMs === "number" && Number.isFinite(raw.updatedAtMs) + ? Math.max(0, Math.floor(raw.updatedAtMs)) + : null; + if (normalizedAnchor !== null && anchorRaw !== normalizedAnchor) { + sched.anchorMs = normalizedAnchor; + mutated = true; + } + } } const delivery = raw.delivery; @@ -213,6 +424,11 @@ export async function ensureLoaded( (delivery as { mode?: unknown }).mode = "announce"; mutated = true; } + } else if (modeRaw === undefined || modeRaw === null) { + // Explicitly persist the default so existing jobs don't silently + // change behaviour when the runtime default shifts. + (delivery as { mode?: unknown }).mode = "announce"; + mutated = true; } } @@ -222,10 +438,6 @@ export async function ensureLoaded( mutated = true; } - const payloadRecord = - payload && typeof payload === "object" && !Array.isArray(payload) - ? (payload as Record) - : null; const payloadKind = payloadRecord && typeof payloadRecord.kind === "string" ? payloadRecord.kind : ""; const sessionTarget = diff --git a/src/cron/service/timer.ts b/src/cron/service/timer.ts index 8af4f9bc3..8e9bfb2d5 100644 --- a/src/cron/service/timer.ts +++ b/src/cron/service/timer.ts @@ -1,6 +1,7 @@ import type { HeartbeatRunResult } from "../../infra/heartbeat-wake.js"; import type { CronJob } from "../types.js"; import type { CronEvent, CronServiceState } from "./state.js"; +import { resolveCronDeliveryPlan } from "../delivery.js"; import { computeJobNextRunAtMs, nextWakeAtMs, @@ -10,7 +11,7 @@ import { import { locked } from "./locked.js"; import { ensureLoaded, persist } from "./store.js"; -const MAX_TIMEOUT_MS = 2 ** 31 - 1; +const MAX_TIMER_DELAY_MS = 60_000; export function armTimer(state: CronServiceState) { if (state.timer) { @@ -25,12 +26,15 @@ export function armTimer(state: CronServiceState) { return; } const delay = Math.max(nextAt - state.deps.nowMs(), 0); - // Avoid TimeoutOverflowWarning when a job is far in the future. - const clampedDelay = Math.min(delay, MAX_TIMEOUT_MS); - state.timer = setTimeout(() => { - void onTimer(state).catch((err) => { + // Wake at least once a minute to avoid schedule drift and recover quickly + // when the process was paused or wall-clock time jumps. + const clampedDelay = Math.min(delay, MAX_TIMER_DELAY_MS); + state.timer = setTimeout(async () => { + try { + await onTimer(state); + } catch (err) { state.deps.log.error({ err: String(err) }, "cron: timer tick failed"); - }); + } }, clampedDelay); } @@ -40,22 +44,169 @@ export async function onTimer(state: CronServiceState) { } state.running = true; try { - await locked(state, async () => { - // Reload persisted due-times without recomputing so runDueJobs sees - // the original nextRunAtMs values. Recomputing first would advance - // every/cron slots past the current tick when the timer fires late (#9788). + const dueJobs = await locked(state, async () => { await ensureLoaded(state, { forceReload: true, skipRecompute: true }); - await runDueJobs(state); - recomputeNextRuns(state); + const due = findDueJobs(state); + + if (due.length === 0) { + const changed = recomputeNextRuns(state); + if (changed) { + await persist(state); + } + return []; + } + + const now = state.deps.nowMs(); + for (const job of due) { + job.state.runningAtMs = now; + job.state.lastError = undefined; + } await persist(state); + + return due.map((j) => ({ + id: j.id, + job: j, + })); }); + + const results: Array<{ + jobId: string; + status: "ok" | "error" | "skipped"; + error?: string; + summary?: string; + sessionId?: string; + sessionKey?: string; + startedAt: number; + endedAt: number; + }> = []; + + for (const { id, job } of dueJobs) { + const startedAt = state.deps.nowMs(); + job.state.runningAtMs = startedAt; + emit(state, { jobId: job.id, action: "started", runAtMs: startedAt }); + try { + const result = await executeJobCore(state, job); + results.push({ jobId: id, ...result, startedAt, endedAt: state.deps.nowMs() }); + } catch (err) { + results.push({ + jobId: id, + status: "error", + error: String(err), + startedAt, + endedAt: state.deps.nowMs(), + }); + } + } + + if (results.length > 0) { + await locked(state, async () => { + await ensureLoaded(state, { forceReload: true, skipRecompute: true }); + + for (const result of results) { + const job = state.store?.jobs.find((j) => j.id === result.jobId); + if (!job) { + continue; + } + + const startedAt = result.startedAt; + job.state.runningAtMs = undefined; + job.state.lastRunAtMs = startedAt; + job.state.lastStatus = result.status; + job.state.lastDurationMs = Math.max(0, result.endedAt - startedAt); + job.state.lastError = result.error; + + const shouldDelete = + job.schedule.kind === "at" && result.status === "ok" && job.deleteAfterRun === true; + + if (!shouldDelete) { + if (job.schedule.kind === "at" && result.status === "ok") { + job.enabled = false; + job.state.nextRunAtMs = undefined; + } else if (job.enabled) { + job.state.nextRunAtMs = computeJobNextRunAtMs(job, result.endedAt); + } else { + job.state.nextRunAtMs = undefined; + } + } + + emit(state, { + jobId: job.id, + action: "finished", + status: result.status, + error: result.error, + summary: result.summary, + sessionId: result.sessionId, + sessionKey: result.sessionKey, + runAtMs: startedAt, + durationMs: job.state.lastDurationMs, + nextRunAtMs: job.state.nextRunAtMs, + }); + + if (shouldDelete && state.store) { + state.store.jobs = state.store.jobs.filter((j) => j.id !== job.id); + emit(state, { jobId: job.id, action: "removed" }); + } + + job.updatedAtMs = result.endedAt; + } + + recomputeNextRuns(state); + await persist(state); + }); + } } finally { state.running = false; - // Always re-arm so transient errors (e.g. ENOSPC) don't kill the scheduler. armTimer(state); } } +function findDueJobs(state: CronServiceState): CronJob[] { + if (!state.store) { + return []; + } + const now = state.deps.nowMs(); + return state.store.jobs.filter((j) => { + if (!j.enabled) { + return false; + } + if (typeof j.state.runningAtMs === "number") { + return false; + } + const next = j.state.nextRunAtMs; + return typeof next === "number" && now >= next; + }); +} + +export async function runMissedJobs(state: CronServiceState) { + if (!state.store) { + return; + } + const now = state.deps.nowMs(); + const missed = state.store.jobs.filter((j) => { + if (!j.enabled) { + return false; + } + if (typeof j.state.runningAtMs === "number") { + return false; + } + const next = j.state.nextRunAtMs; + if (j.schedule.kind === "at" && j.state.lastStatus === "ok") { + return false; + } + return typeof next === "number" && now >= next; + }); + + if (missed.length > 0) { + state.deps.log.info( + { count: missed.length, jobIds: missed.map((j) => j.id) }, + "cron: running missed jobs after restart", + ); + for (const job of missed) { + await executeJob(state, job, now, { forced: false }); + } + } +} + export async function runDueJobs(state: CronServiceState) { if (!state.store) { return; @@ -76,6 +227,99 @@ export async function runDueJobs(state: CronServiceState) { } } +async function executeJobCore( + state: CronServiceState, + job: CronJob, +): Promise<{ + status: "ok" | "error" | "skipped"; + error?: string; + summary?: string; + sessionId?: string; + sessionKey?: string; +}> { + if (job.sessionTarget === "main") { + const text = resolveJobPayloadTextForMain(job); + if (!text) { + const kind = job.payload.kind; + return { + status: "skipped", + error: + kind === "systemEvent" + ? "main job requires non-empty systemEvent text" + : 'main job requires payload.kind="systemEvent"', + }; + } + state.deps.enqueueSystemEvent(text, { agentId: job.agentId }); + if (job.wakeMode === "now" && state.deps.runHeartbeatOnce) { + const reason = `cron:${job.id}`; + const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); + const maxWaitMs = 2 * 60_000; + const waitStartedAt = state.deps.nowMs(); + + let heartbeatResult: HeartbeatRunResult; + for (;;) { + heartbeatResult = await state.deps.runHeartbeatOnce({ reason }); + if ( + heartbeatResult.status !== "skipped" || + heartbeatResult.reason !== "requests-in-flight" + ) { + break; + } + if (state.deps.nowMs() - waitStartedAt > maxWaitMs) { + state.deps.requestHeartbeatNow({ reason }); + return { status: "ok", summary: text }; + } + await delay(250); + } + + if (heartbeatResult.status === "ran") { + return { status: "ok", summary: text }; + } else if (heartbeatResult.status === "skipped") { + return { status: "skipped", error: heartbeatResult.reason, summary: text }; + } else { + return { status: "error", error: heartbeatResult.reason, summary: text }; + } + } else { + state.deps.requestHeartbeatNow({ reason: `cron:${job.id}` }); + return { status: "ok", summary: text }; + } + } + + if (job.payload.kind !== "agentTurn") { + return { status: "skipped", error: "isolated job requires payload.kind=agentTurn" }; + } + + const res = await state.deps.runIsolatedAgentJob({ + job, + message: job.payload.message, + }); + + // Post a short summary back to the main session. + const summaryText = res.summary?.trim(); + const deliveryPlan = resolveCronDeliveryPlan(job); + if (summaryText && deliveryPlan.requested) { + const prefix = "Cron"; + const label = + res.status === "error" ? `${prefix} (error): ${summaryText}` : `${prefix}: ${summaryText}`; + state.deps.enqueueSystemEvent(label, { agentId: job.agentId }); + if (job.wakeMode === "now") { + state.deps.requestHeartbeatNow({ reason: `cron:${job.id}` }); + } + } + + return { + status: res.status, + error: res.error, + summary: res.summary, + sessionId: res.sessionId, + sessionKey: res.sessionKey, + }; +} + +/** + * Execute a job. This version is used by the `run` command and other + * places that need the full execution with state updates. + */ export async function executeJob( state: CronServiceState, job: CronJob, @@ -89,7 +333,12 @@ export async function executeJob( let deleted = false; - const finish = async (status: "ok" | "error" | "skipped", err?: string, summary?: string) => { + const finish = async ( + status: "ok" | "error" | "skipped", + err?: string, + summary?: string, + session?: { sessionId?: string; sessionKey?: string }, + ) => { const endedAt = state.deps.nowMs(); job.state.runningAtMs = undefined; job.state.lastRunAtMs = startedAt; @@ -102,7 +351,6 @@ export async function executeJob( if (!shouldDelete) { if (job.schedule.kind === "at" && status === "ok") { - // One-shot job completed successfully; disable it. job.enabled = false; job.state.nextRunAtMs = undefined; } else if (job.enabled) { @@ -118,6 +366,8 @@ export async function executeJob( status, error: err, summary, + sessionId: session?.sessionId, + sessionKey: session?.sessionKey, runAtMs: startedAt, durationMs: job.state.lastDurationMs, nextRunAtMs: job.state.nextRunAtMs, @@ -131,96 +381,16 @@ export async function executeJob( }; try { - if (job.sessionTarget === "main") { - const text = resolveJobPayloadTextForMain(job); - if (!text) { - const kind = job.payload.kind; - await finish( - "skipped", - kind === "systemEvent" - ? "main job requires non-empty systemEvent text" - : 'main job requires payload.kind="systemEvent"', - ); - return; - } - state.deps.enqueueSystemEvent(text, { agentId: job.agentId }); - if (job.wakeMode === "now" && state.deps.runHeartbeatOnce) { - const reason = `cron:${job.id}`; - const delay = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms)); - const maxWaitMs = 2 * 60_000; - const waitStartedAt = state.deps.nowMs(); - - let heartbeatResult: HeartbeatRunResult; - for (;;) { - heartbeatResult = await state.deps.runHeartbeatOnce({ reason }); - if ( - heartbeatResult.status !== "skipped" || - heartbeatResult.reason !== "requests-in-flight" - ) { - break; - } - if (state.deps.nowMs() - waitStartedAt > maxWaitMs) { - heartbeatResult = { - status: "skipped", - reason: "timeout waiting for main lane to become idle", - }; - break; - } - await delay(250); - } - - if (heartbeatResult.status === "ran") { - await finish("ok", undefined, text); - } else if (heartbeatResult.status === "skipped") { - await finish("skipped", heartbeatResult.reason, text); - } else { - await finish("error", heartbeatResult.reason, text); - } - } else { - // wakeMode is "next-heartbeat" or runHeartbeatOnce not available - state.deps.requestHeartbeatNow({ reason: `cron:${job.id}` }); - await finish("ok", undefined, text); - } - return; - } - - if (job.payload.kind !== "agentTurn") { - await finish("skipped", "isolated job requires payload.kind=agentTurn"); - return; - } - - const res = await state.deps.runIsolatedAgentJob({ - job, - message: job.payload.message, + const result = await executeJobCore(state, job); + await finish(result.status, result.error, result.summary, { + sessionId: result.sessionId, + sessionKey: result.sessionKey, }); - - // Post a short summary back to the main session so the user sees - // the cron result without opening the isolated session. - const summaryText = res.summary?.trim(); - const deliveryMode = job.delivery?.mode ?? "announce"; - if (summaryText && deliveryMode !== "none") { - const prefix = "Cron"; - const label = - res.status === "error" ? `${prefix} (error): ${summaryText}` : `${prefix}: ${summaryText}`; - state.deps.enqueueSystemEvent(label, { agentId: job.agentId }); - if (job.wakeMode === "now") { - state.deps.requestHeartbeatNow({ reason: `cron:${job.id}` }); - } - } - - if (res.status === "ok") { - await finish("ok", undefined, res.summary); - } else if (res.status === "skipped") { - await finish("skipped", undefined, res.summary); - } else { - await finish("error", res.error ?? "cron job failed", res.summary); - } } catch (err) { await finish("error", String(err)); } finally { job.updatedAtMs = nowMs; if (!opts.forced && job.enabled && !deleted) { - // Keep nextRunAtMs in sync in case the schedule advanced during a long run. job.state.nextRunAtMs = computeJobNextRunAtMs(job, state.deps.nowMs()); } } diff --git a/src/cron/store.test.ts b/src/cron/store.test.ts new file mode 100644 index 000000000..ec80160df --- /dev/null +++ b/src/cron/store.test.ts @@ -0,0 +1,32 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it } from "vitest"; +import { loadCronStore } from "./store.js"; + +async function makeStorePath() { + const dir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cron-store-")); + return { + dir, + storePath: path.join(dir, "jobs.json"), + cleanup: async () => { + await fs.rm(dir, { recursive: true, force: true }); + }, + }; +} + +describe("cron store", () => { + it("returns empty store when file does not exist", async () => { + const store = await makeStorePath(); + const loaded = await loadCronStore(store.storePath); + expect(loaded).toEqual({ version: 1, jobs: [] }); + await store.cleanup(); + }); + + it("throws when store contains invalid JSON", async () => { + const store = await makeStorePath(); + await fs.writeFile(store.storePath, "{ not json", "utf-8"); + await expect(loadCronStore(store.storePath)).rejects.toThrow(/Failed to parse cron store/i); + await store.cleanup(); + }); +}); diff --git a/src/cron/store.ts b/src/cron/store.ts index 5fb296153..21bc18245 100644 --- a/src/cron/store.ts +++ b/src/cron/store.ts @@ -22,14 +22,28 @@ export function resolveCronStorePath(storePath?: string) { export async function loadCronStore(storePath: string): Promise { try { const raw = await fs.promises.readFile(storePath, "utf-8"); - const parsed = JSON5.parse(raw); - const jobs = Array.isArray(parsed?.jobs) ? (parsed?.jobs as never[]) : []; + let parsed: unknown; + try { + parsed = JSON5.parse(raw); + } catch (err) { + throw new Error(`Failed to parse cron store at ${storePath}: ${String(err)}`, { + cause: err, + }); + } + const parsedRecord = + parsed && typeof parsed === "object" && !Array.isArray(parsed) + ? (parsed as Record) + : {}; + const jobs = Array.isArray(parsedRecord.jobs) ? (parsedRecord.jobs as never[]) : []; return { version: 1, jobs: jobs.filter(Boolean) as never as CronStoreFile["jobs"], }; - } catch { - return { version: 1, jobs: [] }; + } catch (err) { + if ((err as { code?: unknown })?.code === "ENOENT") { + return { version: 1, jobs: [] }; + } + throw err; } } diff --git a/src/gateway/protocol/schema/cron.ts b/src/gateway/protocol/schema/cron.ts index ce9479d1a..c8238c50f 100644 --- a/src/gateway/protocol/schema/cron.ts +++ b/src/gateway/protocol/schema/cron.ts @@ -42,6 +42,11 @@ export const CronPayloadSchema = Type.Union([ model: Type.Optional(Type.String()), thinking: Type.Optional(Type.String()), timeoutSeconds: Type.Optional(Type.Integer({ minimum: 1 })), + allowUnsafeExternalContent: Type.Optional(Type.Boolean()), + deliver: Type.Optional(Type.Boolean()), + channel: Type.Optional(Type.String()), + to: Type.Optional(Type.String()), + bestEffortDeliver: Type.Optional(Type.Boolean()), }, { additionalProperties: false }, ), @@ -62,6 +67,11 @@ export const CronPayloadPatchSchema = Type.Union([ model: Type.Optional(Type.String()), thinking: Type.Optional(Type.String()), timeoutSeconds: Type.Optional(Type.Integer({ minimum: 1 })), + allowUnsafeExternalContent: Type.Optional(Type.Boolean()), + deliver: Type.Optional(Type.Boolean()), + channel: Type.Optional(Type.String()), + to: Type.Optional(Type.String()), + bestEffortDeliver: Type.Optional(Type.Boolean()), }, { additionalProperties: false }, ), @@ -239,6 +249,8 @@ export const CronRunLogEntrySchema = Type.Object( ), error: Type.Optional(Type.String()), summary: Type.Optional(Type.String()), + sessionId: Type.Optional(NonEmptyString), + sessionKey: Type.Optional(NonEmptyString), runAtMs: Type.Optional(Type.Integer({ minimum: 0 })), durationMs: Type.Optional(Type.Integer({ minimum: 0 })), nextRunAtMs: Type.Optional(Type.Integer({ minimum: 0 })), diff --git a/src/gateway/server-cron.ts b/src/gateway/server-cron.ts index 68b0bc095..12b0fe6b6 100644 --- a/src/gateway/server-cron.ts +++ b/src/gateway/server-cron.ts @@ -90,6 +90,8 @@ export function buildGatewayCronService(params: { status: evt.status, error: evt.error, summary: evt.summary, + sessionId: evt.sessionId, + sessionKey: evt.sessionKey, runAtMs: evt.runAtMs, durationMs: evt.durationMs, nextRunAtMs: evt.nextRunAtMs, diff --git a/src/gateway/server-methods/cron.ts b/src/gateway/server-methods/cron.ts index 703103860..023d9d363 100644 --- a/src/gateway/server-methods/cron.ts +++ b/src/gateway/server-methods/cron.ts @@ -189,7 +189,7 @@ export const cronHandlers: GatewayRequestHandlers = { ); return; } - const result = await context.cron.run(jobId, p.mode); + const result = await context.cron.run(jobId, p.mode ?? "force"); respond(true, result, undefined); }, "cron.runs": async ({ params, respond, context }) => { diff --git a/src/gateway/server.cron.e2e.test.ts b/src/gateway/server.cron.e2e.test.ts index fc37f1702..8e9d242e4 100644 --- a/src/gateway/server.cron.e2e.test.ts +++ b/src/gateway/server.cron.e2e.test.ts @@ -117,7 +117,7 @@ describe("gateway server cron", () => { | { schedule?: unknown; sessionTarget?: unknown; wakeMode?: unknown } | undefined; expect(wrappedPayload?.sessionTarget).toBe("main"); - expect(wrappedPayload?.wakeMode).toBe("next-heartbeat"); + expect(wrappedPayload?.wakeMode).toBe("now"); expect((wrappedPayload?.schedule as { kind?: unknown } | undefined)?.kind).toBe("at"); const patchRes = await rpcReq(ws, "cron.add", { @@ -181,6 +181,32 @@ describe("gateway server cron", () => { expect(merged?.delivery?.channel).toBe("telegram"); expect(merged?.delivery?.to).toBe("19098680"); + const legacyDeliveryPatchRes = await rpcReq(ws, "cron.update", { + id: mergeJobId, + patch: { + payload: { + kind: "agentTurn", + deliver: true, + channel: "signal", + to: "+15550001111", + bestEffortDeliver: true, + }, + }, + }); + expect(legacyDeliveryPatchRes.ok).toBe(true); + const legacyDeliveryPatched = legacyDeliveryPatchRes.payload as + | { + payload?: { kind?: unknown; message?: unknown }; + delivery?: { mode?: unknown; channel?: unknown; to?: unknown; bestEffort?: unknown }; + } + | undefined; + expect(legacyDeliveryPatched?.payload?.kind).toBe("agentTurn"); + expect(legacyDeliveryPatched?.payload?.message).toBe("hello"); + expect(legacyDeliveryPatched?.delivery?.mode).toBe("announce"); + expect(legacyDeliveryPatched?.delivery?.channel).toBe("signal"); + expect(legacyDeliveryPatched?.delivery?.to).toBe("+15550001111"); + expect(legacyDeliveryPatched?.delivery?.bestEffort).toBe(true); + const rejectRes = await rpcReq(ws, "cron.add", { name: "patch reject", enabled: true, diff --git a/src/gateway/session-utils.test.ts b/src/gateway/session-utils.test.ts index 76798db43..2fb51153d 100644 --- a/src/gateway/session-utils.test.ts +++ b/src/gateway/session-utils.test.ts @@ -331,4 +331,29 @@ describe("listSessionsFromStore search", () => { }); expect(result.sessions.length).toBe(1); }); + + test("hides cron run alias session keys from sessions list", () => { + const now = Date.now(); + const store: Record = { + "agent:main:cron:job-1": { + sessionId: "run-abc", + updatedAt: now, + label: "Cron: job-1", + } as SessionEntry, + "agent:main:cron:job-1:run:run-abc": { + sessionId: "run-abc", + updatedAt: now, + label: "Cron: job-1", + } as SessionEntry, + }; + + const result = listSessionsFromStore({ + cfg: baseCfg, + storePath: "/tmp/sessions.json", + store, + opts: {}, + }); + + expect(result.sessions.map((session) => session.key)).toEqual(["agent:main:cron:job-1"]); + }); }); diff --git a/src/gateway/session-utils.ts b/src/gateway/session-utils.ts index 80ea40e40..bbbbc575e 100644 --- a/src/gateway/session-utils.ts +++ b/src/gateway/session-utils.ts @@ -207,6 +207,12 @@ export function classifySessionKey(key: string, entry?: SessionEntry): GatewaySe return "direct"; } +function isCronRunSessionKey(key: string): boolean { + const parsed = parseAgentSessionKey(key); + const raw = parsed?.rest ?? key; + return /^cron:[^:]+:run:[^:]+$/.test(raw); +} + export function parseGroupKey( key: string, ): { channel?: string; kind?: "group" | "channel"; id?: string } | null { @@ -568,6 +574,9 @@ export function listSessionsFromStore(params: { let sessions = Object.entries(store) .filter(([key]) => { + if (isCronRunSessionKey(key)) { + return false; + } if (!includeGlobal && key === "global") { return false; } diff --git a/src/hooks/bundled/session-memory/handler.ts b/src/hooks/bundled/session-memory/handler.ts index 6f2a531f0..a0c154d84 100644 --- a/src/hooks/bundled/session-memory/handler.ts +++ b/src/hooks/bundled/session-memory/handler.ts @@ -12,9 +12,12 @@ import { fileURLToPath } from "node:url"; import type { OpenClawConfig } from "../../../config/config.js"; import type { HookHandler } from "../../hooks.js"; import { resolveAgentWorkspaceDir } from "../../../agents/agent-scope.js"; +import { createSubsystemLogger } from "../../../logging/subsystem.js"; import { resolveAgentIdFromSessionKey } from "../../../routing/session-key.js"; import { resolveHookConfig } from "../../config.js"; +const log = createSubsystemLogger("hooks/session-memory"); + /** * Read recent messages from session file for slug generation */ @@ -69,7 +72,7 @@ const saveSessionToMemory: HookHandler = async (event) => { } try { - console.log("[session-memory] Hook triggered for /new command"); + log.debug("Hook triggered for /new command"); const context = event.context || {}; const cfg = context.cfg as OpenClawConfig | undefined; @@ -92,9 +95,11 @@ const saveSessionToMemory: HookHandler = async (event) => { const currentSessionId = sessionEntry.sessionId as string; const currentSessionFile = sessionEntry.sessionFile as string; - console.log("[session-memory] Current sessionId:", currentSessionId); - console.log("[session-memory] Current sessionFile:", currentSessionFile); - console.log("[session-memory] cfg present:", !!cfg); + log.debug("Session context resolved", { + sessionId: currentSessionId, + sessionFile: currentSessionFile, + hasCfg: Boolean(cfg), + }); const sessionFile = currentSessionFile || undefined; @@ -111,10 +116,13 @@ const saveSessionToMemory: HookHandler = async (event) => { if (sessionFile) { // Get recent conversation content sessionContent = await getRecentSessionContent(sessionFile, messageCount); - console.log("[session-memory] sessionContent length:", sessionContent?.length || 0); + log.debug("Session content loaded", { + length: sessionContent?.length ?? 0, + messageCount, + }); if (sessionContent && cfg) { - console.log("[session-memory] Calling generateSlugViaLLM..."); + log.debug("Calling generateSlugViaLLM..."); // Dynamically import the LLM slug generator (avoids module caching issues) // When compiled, handler is at dist/hooks/bundled/session-memory/handler.js // Going up ../.. puts us at dist/hooks/, so just add llm-slug-generator.js @@ -124,7 +132,7 @@ const saveSessionToMemory: HookHandler = async (event) => { // Use LLM to generate a descriptive slug slug = await generateSlugViaLLM({ sessionContent, cfg }); - console.log("[session-memory] Generated slug:", slug); + log.debug("Generated slug", { slug }); } } @@ -132,14 +140,16 @@ const saveSessionToMemory: HookHandler = async (event) => { if (!slug) { const timeSlug = now.toISOString().split("T")[1].split(".")[0].replace(/:/g, ""); slug = timeSlug.slice(0, 4); // HHMM - console.log("[session-memory] Using fallback timestamp slug:", slug); + log.debug("Using fallback timestamp slug", { slug }); } // Create filename with date and slug const filename = `${dateStr}-${slug}.md`; const memoryFilePath = path.join(memoryDir, filename); - console.log("[session-memory] Generated filename:", filename); - console.log("[session-memory] Full path:", memoryFilePath); + log.debug("Memory file path resolved", { + filename, + path: memoryFilePath.replace(os.homedir(), "~"), + }); // Format time as HH:MM:SS UTC const timeStr = now.toISOString().split("T")[1].split(".")[0]; @@ -167,16 +177,21 @@ const saveSessionToMemory: HookHandler = async (event) => { // Write to new memory file await fs.writeFile(memoryFilePath, entry, "utf-8"); - console.log("[session-memory] Memory file written successfully"); + log.debug("Memory file written successfully"); // Log completion (but don't send user-visible confirmation - it's internal housekeeping) const relPath = memoryFilePath.replace(os.homedir(), "~"); - console.log(`[session-memory] Session context saved to ${relPath}`); + log.info(`Session context saved to ${relPath}`); } catch (err) { - console.error( - "[session-memory] Failed to save session memory:", - err instanceof Error ? err.message : String(err), - ); + if (err instanceof Error) { + log.error("Failed to save session memory", { + errorName: err.name, + errorMessage: err.message, + stack: err.stack, + }); + } else { + log.error("Failed to save session memory", { error: String(err) }); + } } }; diff --git a/src/hooks/llm-slug-generator.ts b/src/hooks/llm-slug-generator.ts index 95161b66b..67fdfe4c8 100644 --- a/src/hooks/llm-slug-generator.ts +++ b/src/hooks/llm-slug-generator.ts @@ -41,6 +41,7 @@ Reply with ONLY the slug, nothing else. Examples: "vendor-pitch", "api-design", const result = await runEmbeddedPiAgent({ sessionId: `slug-generator-${Date.now()}`, sessionKey: "temp:slug-generator", + agentId, sessionFile: tempSessionFile, workspaceDir, agentDir, diff --git a/src/logging/redact-identifier.ts b/src/logging/redact-identifier.ts new file mode 100644 index 000000000..0ffdfb55d --- /dev/null +++ b/src/logging/redact-identifier.ts @@ -0,0 +1,14 @@ +import crypto from "node:crypto"; + +export function sha256HexPrefix(value: string, len = 12): string { + const safeLen = Number.isFinite(len) ? Math.max(1, Math.floor(len)) : 12; + return crypto.createHash("sha256").update(value).digest("hex").slice(0, safeLen); +} + +export function redactIdentifier(value: string | undefined, opts?: { len?: number }): string { + const trimmed = value?.trim(); + if (!trimmed) { + return "-"; + } + return `sha256:${sha256HexPrefix(trimmed, opts?.len ?? 12)}`; +} diff --git a/src/memory/batch-voyage.test.ts b/src/memory/batch-voyage.test.ts new file mode 100644 index 000000000..8e9e374f5 --- /dev/null +++ b/src/memory/batch-voyage.test.ts @@ -0,0 +1,174 @@ +import { ReadableStream } from "node:stream/web"; +import { afterEach, describe, expect, it, vi } from "vitest"; +import type { VoyageBatchOutputLine, VoyageBatchRequest } from "./batch-voyage.js"; +import type { VoyageEmbeddingClient } from "./embeddings-voyage.js"; + +// Mock internal.js if needed, but runWithConcurrency is simple enough to keep real. +// We DO need to mock retryAsync to avoid actual delays/retries logic complicating tests +vi.mock("../infra/retry.js", () => ({ + retryAsync: async (fn: () => Promise) => fn(), +})); + +describe("runVoyageEmbeddingBatches", () => { + afterEach(() => { + vi.resetAllMocks(); + vi.unstubAllGlobals(); + }); + + const mockClient: VoyageEmbeddingClient = { + baseUrl: "https://api.voyageai.com/v1", + headers: { Authorization: "Bearer test-key" }, + model: "voyage-4-large", + }; + + const mockRequests: VoyageBatchRequest[] = [ + { custom_id: "req-1", body: { input: "text1" } }, + { custom_id: "req-2", body: { input: "text2" } }, + ]; + + it("successfully submits batch, waits, and streams results", async () => { + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + // Sequence of fetch calls: + // 1. Upload file + fetchMock.mockResolvedValueOnce({ + ok: true, + json: async () => ({ id: "file-123" }), + }); + + // 2. Create batch + fetchMock.mockResolvedValueOnce({ + ok: true, + json: async () => ({ id: "batch-abc", status: "pending" }), + }); + + // 3. Poll status (pending) - Optional depending on wait loop, let's say it finishes immediately for this test + // Actually the code does: initial check (if completed) -> wait loop. + // If create returns "pending", it enters waitForVoyageBatch. + // waitForVoyageBatch fetches status. + + // 3. Poll status (completed) + fetchMock.mockResolvedValueOnce({ + ok: true, + json: async () => ({ + id: "batch-abc", + status: "completed", + output_file_id: "file-out-999", + }), + }); + + // 4. Download content (Streaming) + const outputLines: VoyageBatchOutputLine[] = [ + { + custom_id: "req-1", + response: { status_code: 200, body: { data: [{ embedding: [0.1, 0.1] }] } }, + }, + { + custom_id: "req-2", + response: { status_code: 200, body: { data: [{ embedding: [0.2, 0.2] }] } }, + }, + ]; + + // Create a stream that emits the NDJSON lines + const stream = new ReadableStream({ + start(controller) { + const text = outputLines.map((l) => JSON.stringify(l)).join("\n"); + controller.enqueue(new TextEncoder().encode(text)); + controller.close(); + }, + }); + + fetchMock.mockResolvedValueOnce({ + ok: true, + body: stream, + }); + + const { runVoyageEmbeddingBatches } = await import("./batch-voyage.js"); + + const results = await runVoyageEmbeddingBatches({ + client: mockClient, + agentId: "agent-1", + requests: mockRequests, + wait: true, + pollIntervalMs: 1, // fast poll + timeoutMs: 1000, + concurrency: 1, + }); + + expect(results.size).toBe(2); + expect(results.get("req-1")).toEqual([0.1, 0.1]); + expect(results.get("req-2")).toEqual([0.2, 0.2]); + + // Verify calls + expect(fetchMock).toHaveBeenCalledTimes(4); + + // Verify File Upload + expect(fetchMock.mock.calls[0][0]).toContain("/files"); + const uploadBody = fetchMock.mock.calls[0][1].body as FormData; + expect(uploadBody).toBeInstanceOf(FormData); + expect(uploadBody.get("purpose")).toBe("batch"); + + // Verify Batch Create + expect(fetchMock.mock.calls[1][0]).toContain("/batches"); + const createBody = JSON.parse(fetchMock.mock.calls[1][1].body); + expect(createBody.input_file_id).toBe("file-123"); + expect(createBody.completion_window).toBe("12h"); + expect(createBody.request_params).toEqual({ + model: "voyage-4-large", + input_type: "document", + }); + + // Verify Content Fetch + expect(fetchMock.mock.calls[3][0]).toContain("/files/file-out-999/content"); + }); + + it("handles empty lines and stream chunks correctly", async () => { + const fetchMock = vi.fn(); + vi.stubGlobal("fetch", fetchMock); + + // 1. Upload + fetchMock.mockResolvedValueOnce({ ok: true, json: async () => ({ id: "f1" }) }); + // 2. Create (completed immediately) + fetchMock.mockResolvedValueOnce({ + ok: true, + json: async () => ({ id: "b1", status: "completed", output_file_id: "out1" }), + }); + // 3. Download Content (Streaming with chunks and newlines) + const stream = new ReadableStream({ + start(controller) { + const line1 = JSON.stringify({ + custom_id: "req-1", + response: { body: { data: [{ embedding: [1] }] } }, + }); + const line2 = JSON.stringify({ + custom_id: "req-2", + response: { body: { data: [{ embedding: [2] }] } }, + }); + + // Split across chunks + controller.enqueue(new TextEncoder().encode(line1 + "\n")); + controller.enqueue(new TextEncoder().encode("\n")); // empty line + controller.enqueue(new TextEncoder().encode(line2)); // no newline at EOF + controller.close(); + }, + }); + + fetchMock.mockResolvedValueOnce({ ok: true, body: stream }); + + const { runVoyageEmbeddingBatches } = await import("./batch-voyage.js"); + + const results = await runVoyageEmbeddingBatches({ + client: mockClient, + agentId: "a1", + requests: mockRequests, + wait: true, + pollIntervalMs: 1, + timeoutMs: 1000, + concurrency: 1, + }); + + expect(results.get("req-1")).toEqual([1]); + expect(results.get("req-2")).toEqual([2]); + }); +}); diff --git a/src/memory/batch-voyage.ts b/src/memory/batch-voyage.ts new file mode 100644 index 000000000..7b1129948 --- /dev/null +++ b/src/memory/batch-voyage.ts @@ -0,0 +1,363 @@ +import { createInterface } from "node:readline"; +import { Readable } from "node:stream"; +import type { VoyageEmbeddingClient } from "./embeddings-voyage.js"; +import { retryAsync } from "../infra/retry.js"; +import { hashText, runWithConcurrency } from "./internal.js"; + +/** + * Voyage Batch API Input Line format. + * See: https://docs.voyageai.com/docs/batch-inference + */ +export type VoyageBatchRequest = { + custom_id: string; + body: { + input: string | string[]; + }; +}; + +export type VoyageBatchStatus = { + id?: string; + status?: string; + output_file_id?: string | null; + error_file_id?: string | null; +}; + +export type VoyageBatchOutputLine = { + custom_id?: string; + response?: { + status_code?: number; + body?: { + data?: Array<{ embedding?: number[]; index?: number }>; + error?: { message?: string }; + }; + }; + error?: { message?: string }; +}; + +export const VOYAGE_BATCH_ENDPOINT = "/v1/embeddings"; +const VOYAGE_BATCH_COMPLETION_WINDOW = "12h"; +const VOYAGE_BATCH_MAX_REQUESTS = 50000; + +function getVoyageBaseUrl(client: VoyageEmbeddingClient): string { + return client.baseUrl?.replace(/\/$/, "") ?? ""; +} + +function getVoyageHeaders( + client: VoyageEmbeddingClient, + params: { json: boolean }, +): Record { + const headers = client.headers ? { ...client.headers } : {}; + if (params.json) { + if (!headers["Content-Type"] && !headers["content-type"]) { + headers["Content-Type"] = "application/json"; + } + } else { + delete headers["Content-Type"]; + delete headers["content-type"]; + } + return headers; +} + +function splitVoyageBatchRequests(requests: VoyageBatchRequest[]): VoyageBatchRequest[][] { + if (requests.length <= VOYAGE_BATCH_MAX_REQUESTS) return [requests]; + const groups: VoyageBatchRequest[][] = []; + for (let i = 0; i < requests.length; i += VOYAGE_BATCH_MAX_REQUESTS) { + groups.push(requests.slice(i, i + VOYAGE_BATCH_MAX_REQUESTS)); + } + return groups; +} + +async function submitVoyageBatch(params: { + client: VoyageEmbeddingClient; + requests: VoyageBatchRequest[]; + agentId: string; +}): Promise { + const baseUrl = getVoyageBaseUrl(params.client); + const jsonl = params.requests.map((request) => JSON.stringify(request)).join("\n"); + const form = new FormData(); + form.append("purpose", "batch"); + form.append( + "file", + new Blob([jsonl], { type: "application/jsonl" }), + `memory-embeddings.${hashText(String(Date.now()))}.jsonl`, + ); + + // 1. Upload file using Voyage Files API + const fileRes = await fetch(`${baseUrl}/files`, { + method: "POST", + headers: getVoyageHeaders(params.client, { json: false }), + body: form, + }); + if (!fileRes.ok) { + const text = await fileRes.text(); + throw new Error(`voyage batch file upload failed: ${fileRes.status} ${text}`); + } + const filePayload = (await fileRes.json()) as { id?: string }; + if (!filePayload.id) { + throw new Error("voyage batch file upload failed: missing file id"); + } + + // 2. Create batch job using Voyage Batches API + const batchRes = await retryAsync( + async () => { + const res = await fetch(`${baseUrl}/batches`, { + method: "POST", + headers: getVoyageHeaders(params.client, { json: true }), + body: JSON.stringify({ + input_file_id: filePayload.id, + endpoint: VOYAGE_BATCH_ENDPOINT, + completion_window: VOYAGE_BATCH_COMPLETION_WINDOW, + request_params: { + model: params.client.model, + input_type: "document", + }, + metadata: { + source: "clawdbot-memory", + agent: params.agentId, + }, + }), + }); + if (!res.ok) { + const text = await res.text(); + const err = new Error(`voyage batch create failed: ${res.status} ${text}`) as Error & { + status?: number; + }; + err.status = res.status; + throw err; + } + return res; + }, + { + attempts: 3, + minDelayMs: 300, + maxDelayMs: 2000, + jitter: 0.2, + shouldRetry: (err) => { + const status = (err as { status?: number }).status; + return status === 429 || (typeof status === "number" && status >= 500); + }, + }, + ); + return (await batchRes.json()) as VoyageBatchStatus; +} + +async function fetchVoyageBatchStatus(params: { + client: VoyageEmbeddingClient; + batchId: string; +}): Promise { + const baseUrl = getVoyageBaseUrl(params.client); + const res = await fetch(`${baseUrl}/batches/${params.batchId}`, { + headers: getVoyageHeaders(params.client, { json: true }), + }); + if (!res.ok) { + const text = await res.text(); + throw new Error(`voyage batch status failed: ${res.status} ${text}`); + } + return (await res.json()) as VoyageBatchStatus; +} + +async function readVoyageBatchError(params: { + client: VoyageEmbeddingClient; + errorFileId: string; +}): Promise { + try { + const baseUrl = getVoyageBaseUrl(params.client); + const res = await fetch(`${baseUrl}/files/${params.errorFileId}/content`, { + headers: getVoyageHeaders(params.client, { json: true }), + }); + if (!res.ok) { + const text = await res.text(); + throw new Error(`voyage batch error file content failed: ${res.status} ${text}`); + } + const text = await res.text(); + if (!text.trim()) return undefined; + const lines = text + .split("\n") + .map((line) => line.trim()) + .filter(Boolean) + .map((line) => JSON.parse(line) as VoyageBatchOutputLine); + const first = lines.find((line) => line.error?.message || line.response?.body?.error); + const message = + first?.error?.message ?? + (typeof first?.response?.body?.error?.message === "string" + ? first?.response?.body?.error?.message + : undefined); + return message; + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return message ? `error file unavailable: ${message}` : undefined; + } +} + +async function waitForVoyageBatch(params: { + client: VoyageEmbeddingClient; + batchId: string; + wait: boolean; + pollIntervalMs: number; + timeoutMs: number; + debug?: (message: string, data?: Record) => void; + initial?: VoyageBatchStatus; +}): Promise<{ outputFileId: string; errorFileId?: string }> { + const start = Date.now(); + let current: VoyageBatchStatus | undefined = params.initial; + while (true) { + const status = + current ?? + (await fetchVoyageBatchStatus({ + client: params.client, + batchId: params.batchId, + })); + const state = status.status ?? "unknown"; + if (state === "completed") { + if (!status.output_file_id) { + throw new Error(`voyage batch ${params.batchId} completed without output file`); + } + return { + outputFileId: status.output_file_id, + errorFileId: status.error_file_id ?? undefined, + }; + } + if (["failed", "expired", "cancelled", "canceled"].includes(state)) { + const detail = status.error_file_id + ? await readVoyageBatchError({ client: params.client, errorFileId: status.error_file_id }) + : undefined; + const suffix = detail ? `: ${detail}` : ""; + throw new Error(`voyage batch ${params.batchId} ${state}${suffix}`); + } + if (!params.wait) { + throw new Error(`voyage batch ${params.batchId} still ${state}; wait disabled`); + } + if (Date.now() - start > params.timeoutMs) { + throw new Error(`voyage batch ${params.batchId} timed out after ${params.timeoutMs}ms`); + } + params.debug?.(`voyage batch ${params.batchId} ${state}; waiting ${params.pollIntervalMs}ms`); + await new Promise((resolve) => setTimeout(resolve, params.pollIntervalMs)); + current = undefined; + } +} + +export async function runVoyageEmbeddingBatches(params: { + client: VoyageEmbeddingClient; + agentId: string; + requests: VoyageBatchRequest[]; + wait: boolean; + pollIntervalMs: number; + timeoutMs: number; + concurrency: number; + debug?: (message: string, data?: Record) => void; +}): Promise> { + if (params.requests.length === 0) return new Map(); + const groups = splitVoyageBatchRequests(params.requests); + const byCustomId = new Map(); + + const tasks = groups.map((group, groupIndex) => async () => { + const batchInfo = await submitVoyageBatch({ + client: params.client, + requests: group, + agentId: params.agentId, + }); + if (!batchInfo.id) { + throw new Error("voyage batch create failed: missing batch id"); + } + + params.debug?.("memory embeddings: voyage batch created", { + batchId: batchInfo.id, + status: batchInfo.status, + group: groupIndex + 1, + groups: groups.length, + requests: group.length, + }); + + if (!params.wait && batchInfo.status !== "completed") { + throw new Error( + `voyage batch ${batchInfo.id} submitted; enable remote.batch.wait to await completion`, + ); + } + + const completed = + batchInfo.status === "completed" + ? { + outputFileId: batchInfo.output_file_id ?? "", + errorFileId: batchInfo.error_file_id ?? undefined, + } + : await waitForVoyageBatch({ + client: params.client, + batchId: batchInfo.id, + wait: params.wait, + pollIntervalMs: params.pollIntervalMs, + timeoutMs: params.timeoutMs, + debug: params.debug, + initial: batchInfo, + }); + if (!completed.outputFileId) { + throw new Error(`voyage batch ${batchInfo.id} completed without output file`); + } + + const baseUrl = getVoyageBaseUrl(params.client); + const contentRes = await fetch(`${baseUrl}/files/${completed.outputFileId}/content`, { + headers: getVoyageHeaders(params.client, { json: true }), + }); + if (!contentRes.ok) { + const text = await contentRes.text(); + throw new Error(`voyage batch file content failed: ${contentRes.status} ${text}`); + } + + const errors: string[] = []; + const remaining = new Set(group.map((request) => request.custom_id)); + + if (contentRes.body) { + const reader = createInterface({ + input: Readable.fromWeb(contentRes.body as any), + terminal: false, + }); + + for await (const rawLine of reader) { + if (!rawLine.trim()) continue; + const line = JSON.parse(rawLine) as VoyageBatchOutputLine; + const customId = line.custom_id; + if (!customId) continue; + remaining.delete(customId); + if (line.error?.message) { + errors.push(`${customId}: ${line.error.message}`); + continue; + } + const response = line.response; + const statusCode = response?.status_code ?? 0; + if (statusCode >= 400) { + const message = + response?.body?.error?.message ?? + (typeof response?.body === "string" ? response.body : undefined) ?? + "unknown error"; + errors.push(`${customId}: ${message}`); + continue; + } + const data = response?.body?.data ?? []; + const embedding = data[0]?.embedding ?? []; + if (embedding.length === 0) { + errors.push(`${customId}: empty embedding`); + continue; + } + byCustomId.set(customId, embedding); + } + } + + if (errors.length > 0) { + throw new Error(`voyage batch ${batchInfo.id} failed: ${errors.join("; ")}`); + } + if (remaining.size > 0) { + throw new Error(`voyage batch ${batchInfo.id} missing ${remaining.size} embedding responses`); + } + }); + + params.debug?.("memory embeddings: voyage batch submit", { + requests: params.requests.length, + groups: groups.length, + wait: params.wait, + concurrency: params.concurrency, + pollIntervalMs: params.pollIntervalMs, + timeoutMs: params.timeoutMs, + }); + + await runWithConcurrency(tasks, params.concurrency); + return byCustomId; +} diff --git a/src/memory/embeddings-voyage.test.ts b/src/memory/embeddings-voyage.test.ts new file mode 100644 index 000000000..0d626ccc7 --- /dev/null +++ b/src/memory/embeddings-voyage.test.ts @@ -0,0 +1,138 @@ +import { afterEach, describe, expect, it, vi } from "vitest"; + +vi.mock("../agents/model-auth.js", () => ({ + resolveApiKeyForProvider: vi.fn(), + requireApiKey: (auth: { apiKey?: string; mode?: string }, provider: string) => { + if (auth?.apiKey) return auth.apiKey; + throw new Error(`No API key resolved for provider "${provider}" (auth mode: ${auth?.mode}).`); + }, +})); + +const createFetchMock = () => + vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ data: [{ embedding: [0.1, 0.2, 0.3] }] }), + })) as unknown as typeof fetch; + +describe("voyage embedding provider", () => { + afterEach(() => { + vi.resetAllMocks(); + vi.resetModules(); + vi.unstubAllGlobals(); + }); + + it("configures client with correct defaults and headers", async () => { + const fetchMock = createFetchMock(); + vi.stubGlobal("fetch", fetchMock); + + const { createVoyageEmbeddingProvider } = await import("./embeddings-voyage.js"); + const authModule = await import("../agents/model-auth.js"); + + vi.mocked(authModule.resolveApiKeyForProvider).mockResolvedValue({ + apiKey: "voyage-key-123", + mode: "api-key", + source: "test", + }); + + const result = await createVoyageEmbeddingProvider({ + config: {} as never, + provider: "voyage", + model: "voyage-4-large", + fallback: "none", + }); + + await result.provider.embedQuery("test query"); + + expect(authModule.resolveApiKeyForProvider).toHaveBeenCalledWith( + expect.objectContaining({ provider: "voyage" }), + ); + + const [url, init] = fetchMock.mock.calls[0] ?? []; + expect(url).toBe("https://api.voyageai.com/v1/embeddings"); + + const headers = (init?.headers ?? {}) as Record; + expect(headers.Authorization).toBe("Bearer voyage-key-123"); + expect(headers["Content-Type"]).toBe("application/json"); + + const body = JSON.parse(init?.body as string); + expect(body).toEqual({ + model: "voyage-4-large", + input: ["test query"], + input_type: "query", + }); + }); + + it("respects remote overrides for baseUrl and apiKey", async () => { + const fetchMock = createFetchMock(); + vi.stubGlobal("fetch", fetchMock); + + const { createVoyageEmbeddingProvider } = await import("./embeddings-voyage.js"); + + const result = await createVoyageEmbeddingProvider({ + config: {} as never, + provider: "voyage", + model: "voyage-4-lite", + fallback: "none", + remote: { + baseUrl: "https://proxy.example.com", + apiKey: "remote-override-key", + headers: { "X-Custom": "123" }, + }, + }); + + await result.provider.embedQuery("test"); + + const [url, init] = fetchMock.mock.calls[0] ?? []; + expect(url).toBe("https://proxy.example.com/embeddings"); + + const headers = (init?.headers ?? {}) as Record; + expect(headers.Authorization).toBe("Bearer remote-override-key"); + expect(headers["X-Custom"]).toBe("123"); + }); + + it("passes input_type=document for embedBatch", async () => { + const fetchMock = vi.fn(async () => ({ + ok: true, + status: 200, + json: async () => ({ + data: [{ embedding: [0.1, 0.2] }, { embedding: [0.3, 0.4] }], + }), + })) as unknown as typeof fetch; + vi.stubGlobal("fetch", fetchMock); + + const { createVoyageEmbeddingProvider } = await import("./embeddings-voyage.js"); + const authModule = await import("../agents/model-auth.js"); + + vi.mocked(authModule.resolveApiKeyForProvider).mockResolvedValue({ + apiKey: "voyage-key-123", + mode: "api-key", + source: "test", + }); + + const result = await createVoyageEmbeddingProvider({ + config: {} as never, + provider: "voyage", + model: "voyage-4-large", + fallback: "none", + }); + + await result.provider.embedBatch(["doc1", "doc2"]); + + const [, init] = fetchMock.mock.calls[0] ?? []; + const body = JSON.parse(init?.body as string); + expect(body).toEqual({ + model: "voyage-4-large", + input: ["doc1", "doc2"], + input_type: "document", + }); + }); + + it("normalizes model names", async () => { + const { normalizeVoyageModel } = await import("./embeddings-voyage.js"); + expect(normalizeVoyageModel("voyage/voyage-large-2")).toBe("voyage-large-2"); + expect(normalizeVoyageModel("voyage-4-large")).toBe("voyage-4-large"); + expect(normalizeVoyageModel(" voyage-lite ")).toBe("voyage-lite"); + expect(normalizeVoyageModel("")).toBe("voyage-4-large"); // Default + }); +}); diff --git a/src/memory/embeddings-voyage.ts b/src/memory/embeddings-voyage.ts new file mode 100644 index 000000000..8850fca50 --- /dev/null +++ b/src/memory/embeddings-voyage.ts @@ -0,0 +1,92 @@ +import type { EmbeddingProvider, EmbeddingProviderOptions } from "./embeddings.js"; +import { requireApiKey, resolveApiKeyForProvider } from "../agents/model-auth.js"; + +export type VoyageEmbeddingClient = { + baseUrl: string; + headers: Record; + model: string; +}; + +export const DEFAULT_VOYAGE_EMBEDDING_MODEL = "voyage-4-large"; +const DEFAULT_VOYAGE_BASE_URL = "https://api.voyageai.com/v1"; + +export function normalizeVoyageModel(model: string): string { + const trimmed = model.trim(); + if (!trimmed) return DEFAULT_VOYAGE_EMBEDDING_MODEL; + if (trimmed.startsWith("voyage/")) return trimmed.slice("voyage/".length); + return trimmed; +} + +export async function createVoyageEmbeddingProvider( + options: EmbeddingProviderOptions, +): Promise<{ provider: EmbeddingProvider; client: VoyageEmbeddingClient }> { + const client = await resolveVoyageEmbeddingClient(options); + const url = `${client.baseUrl.replace(/\/$/, "")}/embeddings`; + + const embed = async (input: string[], input_type?: "query" | "document"): Promise => { + if (input.length === 0) return []; + const body: { model: string; input: string[]; input_type?: "query" | "document" } = { + model: client.model, + input, + }; + if (input_type) body.input_type = input_type; + + const res = await fetch(url, { + method: "POST", + headers: client.headers, + body: JSON.stringify(body), + }); + if (!res.ok) { + const text = await res.text(); + throw new Error(`voyage embeddings failed: ${res.status} ${text}`); + } + const payload = (await res.json()) as { + data?: Array<{ embedding?: number[] }>; + }; + const data = payload.data ?? []; + return data.map((entry) => entry.embedding ?? []); + }; + + return { + provider: { + id: "voyage", + model: client.model, + embedQuery: async (text) => { + const [vec] = await embed([text], "query"); + return vec ?? []; + }, + embedBatch: async (texts) => embed(texts, "document"), + }, + client, + }; +} + +export async function resolveVoyageEmbeddingClient( + options: EmbeddingProviderOptions, +): Promise { + const remote = options.remote; + const remoteApiKey = remote?.apiKey?.trim(); + const remoteBaseUrl = remote?.baseUrl?.trim(); + + const apiKey = remoteApiKey + ? remoteApiKey + : requireApiKey( + await resolveApiKeyForProvider({ + provider: "voyage", + cfg: options.config, + agentDir: options.agentDir, + }), + "voyage", + ); + + const providerConfig = options.config.models?.providers?.voyage; + const baseUrl = remoteBaseUrl || providerConfig?.baseUrl?.trim() || DEFAULT_VOYAGE_BASE_URL; + const headerOverrides = Object.assign({}, providerConfig?.headers, remote?.headers); + const headers: Record = { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + ...headerOverrides, + }; + const model = normalizeVoyageModel(options.model); + return { baseUrl, headers, model }; +} diff --git a/src/memory/embeddings.ts b/src/memory/embeddings.ts index a2783a134..6b78c3d73 100644 --- a/src/memory/embeddings.ts +++ b/src/memory/embeddings.ts @@ -4,6 +4,7 @@ import type { OpenClawConfig } from "../config/config.js"; import { resolveUserPath } from "../utils.js"; import { createGeminiEmbeddingProvider, type GeminiEmbeddingClient } from "./embeddings-gemini.js"; import { createOpenAiEmbeddingProvider, type OpenAiEmbeddingClient } from "./embeddings-openai.js"; +import { createVoyageEmbeddingProvider, type VoyageEmbeddingClient } from "./embeddings-voyage.js"; import { importNodeLlamaCpp } from "./node-llama.js"; function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { @@ -17,6 +18,7 @@ function sanitizeAndNormalizeEmbedding(vec: number[]): number[] { export type { GeminiEmbeddingClient } from "./embeddings-gemini.js"; export type { OpenAiEmbeddingClient } from "./embeddings-openai.js"; +export type { VoyageEmbeddingClient } from "./embeddings-voyage.js"; export type EmbeddingProvider = { id: string; @@ -27,24 +29,25 @@ export type EmbeddingProvider = { export type EmbeddingProviderResult = { provider: EmbeddingProvider; - requestedProvider: "openai" | "local" | "gemini" | "auto"; - fallbackFrom?: "openai" | "local" | "gemini"; + requestedProvider: "openai" | "local" | "gemini" | "voyage" | "auto"; + fallbackFrom?: "openai" | "local" | "gemini" | "voyage"; fallbackReason?: string; openAi?: OpenAiEmbeddingClient; gemini?: GeminiEmbeddingClient; + voyage?: VoyageEmbeddingClient; }; export type EmbeddingProviderOptions = { config: OpenClawConfig; agentDir?: string; - provider: "openai" | "local" | "gemini" | "auto"; + provider: "openai" | "local" | "gemini" | "voyage" | "auto"; remote?: { baseUrl?: string; apiKey?: string; headers?: Record; }; model: string; - fallback: "openai" | "gemini" | "local" | "none"; + fallback: "openai" | "gemini" | "local" | "voyage" | "none"; local?: { modelPath?: string; modelCacheDir?: string; @@ -128,7 +131,7 @@ export async function createEmbeddingProvider( const requestedProvider = options.provider; const fallback = options.fallback; - const createProvider = async (id: "openai" | "local" | "gemini") => { + const createProvider = async (id: "openai" | "local" | "gemini" | "voyage") => { if (id === "local") { const provider = await createLocalEmbeddingProvider(options); return { provider }; @@ -137,11 +140,15 @@ export async function createEmbeddingProvider( const { provider, client } = await createGeminiEmbeddingProvider(options); return { provider, gemini: client }; } + if (id === "voyage") { + const { provider, client } = await createVoyageEmbeddingProvider(options); + return { provider, voyage: client }; + } const { provider, client } = await createOpenAiEmbeddingProvider(options); return { provider, openAi: client }; }; - const formatPrimaryError = (err: unknown, provider: "openai" | "local" | "gemini") => + const formatPrimaryError = (err: unknown, provider: "openai" | "local" | "gemini" | "voyage") => provider === "local" ? formatLocalSetupError(err) : formatError(err); if (requestedProvider === "auto") { @@ -157,7 +164,7 @@ export async function createEmbeddingProvider( } } - for (const provider of ["openai", "gemini"] as const) { + for (const provider of ["openai", "gemini", "voyage"] as const) { try { const result = await createProvider(provider); return { ...result, requestedProvider }; @@ -240,6 +247,7 @@ function formatLocalSetupError(err: unknown): string { : null, "3) If you use pnpm: pnpm approve-builds (select node-llama-cpp), then pnpm rebuild node-llama-cpp", 'Or set agents.defaults.memorySearch.provider = "openai" (remote).', + 'Or set agents.defaults.memorySearch.provider = "voyage" (remote).', ] .filter(Boolean) .join("\n"); diff --git a/src/memory/internal.ts b/src/memory/internal.ts index cbdb7c6c6..5cb1bc8a2 100644 --- a/src/memory/internal.ts +++ b/src/memory/internal.ts @@ -275,3 +275,33 @@ export function cosineSimilarity(a: number[], b: number[]): number { } return dot / (Math.sqrt(normA) * Math.sqrt(normB)); } + +export async function runWithConcurrency( + tasks: Array<() => Promise>, + limit: number, +): Promise { + if (tasks.length === 0) return []; + const resolvedLimit = Math.max(1, Math.min(limit, tasks.length)); + const results: T[] = Array.from({ length: tasks.length }); + let next = 0; + let firstError: unknown = null; + + const workers = Array.from({ length: resolvedLimit }, async () => { + while (true) { + if (firstError) return; + const index = next; + next += 1; + if (index >= tasks.length) return; + try { + results[index] = await tasks[index](); + } catch (err) { + firstError = err; + return; + } + } + }); + + await Promise.allSettled(workers); + if (firstError) throw firstError; + return results; +} diff --git a/src/memory/manager.ts b/src/memory/manager.ts index 3dd290b10..b772d3fda 100644 --- a/src/memory/manager.ts +++ b/src/memory/manager.ts @@ -26,14 +26,17 @@ import { type OpenAiBatchRequest, runOpenAiEmbeddingBatches, } from "./batch-openai.js"; +import { type VoyageBatchRequest, runVoyageEmbeddingBatches } from "./batch-voyage.js"; import { DEFAULT_GEMINI_EMBEDDING_MODEL } from "./embeddings-gemini.js"; import { DEFAULT_OPENAI_EMBEDDING_MODEL } from "./embeddings-openai.js"; +import { DEFAULT_VOYAGE_EMBEDDING_MODEL } from "./embeddings-voyage.js"; import { createEmbeddingProvider, type EmbeddingProvider, type EmbeddingProviderResult, type GeminiEmbeddingClient, type OpenAiEmbeddingClient, + type VoyageEmbeddingClient, } from "./embeddings.js"; import { bm25RankToScore, buildFtsQuery, mergeHybridResults } from "./hybrid.js"; import { @@ -47,6 +50,7 @@ import { type MemoryChunk, type MemoryFileEntry, parseEmbedding, + runWithConcurrency, } from "./internal.js"; import { searchKeyword, searchVector } from "./manager-search.js"; import { ensureMemoryIndexSchema } from "./memory-schema.js"; @@ -112,11 +116,12 @@ export class MemoryIndexManager implements MemorySearchManager { private readonly workspaceDir: string; private readonly settings: ResolvedMemorySearchConfig; private provider: EmbeddingProvider; - private readonly requestedProvider: "openai" | "local" | "gemini" | "auto"; - private fallbackFrom?: "openai" | "local" | "gemini"; + private readonly requestedProvider: "openai" | "local" | "gemini" | "voyage" | "auto"; + private fallbackFrom?: "openai" | "local" | "gemini" | "voyage"; private fallbackReason?: string; private openAi?: OpenAiEmbeddingClient; private gemini?: GeminiEmbeddingClient; + private voyage?: VoyageEmbeddingClient; private batch: { enabled: boolean; wait: boolean; @@ -217,6 +222,7 @@ export class MemoryIndexManager implements MemorySearchManager { this.fallbackReason = params.providerResult.fallbackReason; this.openAi = params.providerResult.openAi; this.gemini = params.providerResult.gemini; + this.voyage = params.providerResult.voyage; this.sources = new Set(params.settings.sources); this.db = this.openDatabase(); this.providerKey = this.computeProviderKey(); @@ -1109,7 +1115,7 @@ export class MemoryIndexManager implements MemorySearchManager { }); } }); - await this.runWithConcurrency(tasks, this.getIndexConcurrency()); + await runWithConcurrency(tasks, this.getIndexConcurrency()); const staleRows = this.db .prepare(`SELECT path FROM files WHERE source = ?`) @@ -1206,7 +1212,7 @@ export class MemoryIndexManager implements MemorySearchManager { }); } }); - await this.runWithConcurrency(tasks, this.getIndexConcurrency()); + await runWithConcurrency(tasks, this.getIndexConcurrency()); const staleRows = this.db .prepare(`SELECT path FROM files WHERE source = ?`) @@ -1346,7 +1352,8 @@ export class MemoryIndexManager implements MemorySearchManager { const enabled = Boolean( batch?.enabled && ((this.openAi && this.provider.id === "openai") || - (this.gemini && this.provider.id === "gemini")), + (this.gemini && this.provider.id === "gemini") || + (this.voyage && this.provider.id === "voyage")), ); return { enabled, @@ -1365,14 +1372,16 @@ export class MemoryIndexManager implements MemorySearchManager { if (this.fallbackFrom) { return false; } - const fallbackFrom = this.provider.id as "openai" | "gemini" | "local"; + const fallbackFrom = this.provider.id as "openai" | "gemini" | "local" | "voyage"; const fallbackModel = fallback === "gemini" ? DEFAULT_GEMINI_EMBEDDING_MODEL : fallback === "openai" ? DEFAULT_OPENAI_EMBEDDING_MODEL - : this.settings.model; + : fallback === "voyage" + ? DEFAULT_VOYAGE_EMBEDDING_MODEL + : this.settings.model; const fallbackResult = await createEmbeddingProvider({ config: this.cfg, @@ -1389,6 +1398,7 @@ export class MemoryIndexManager implements MemorySearchManager { this.provider = fallbackResult.provider; this.openAi = fallbackResult.openAi; this.gemini = fallbackResult.gemini; + this.voyage = fallbackResult.voyage; this.providerKey = this.computeProviderKey(); this.batch = this.resolveBatchConfig(); log.warn(`memory embeddings: switched to fallback provider (${fallback})`, { reason }); @@ -1865,9 +1875,82 @@ export class MemoryIndexManager implements MemorySearchManager { if (this.provider.id === "gemini" && this.gemini) { return this.embedChunksWithGeminiBatch(chunks, entry, source); } + if (this.provider.id === "voyage" && this.voyage) { + return this.embedChunksWithVoyageBatch(chunks, entry, source); + } return this.embedChunksInBatches(chunks); } + private async embedChunksWithVoyageBatch( + chunks: MemoryChunk[], + entry: MemoryFileEntry | SessionFileEntry, + source: MemorySource, + ): Promise { + const voyage = this.voyage; + if (!voyage) { + return this.embedChunksInBatches(chunks); + } + if (chunks.length === 0) return []; + const cached = this.loadEmbeddingCache(chunks.map((chunk) => chunk.hash)); + const embeddings: number[][] = Array.from({ length: chunks.length }, () => []); + const missing: Array<{ index: number; chunk: MemoryChunk }> = []; + + for (let i = 0; i < chunks.length; i += 1) { + const chunk = chunks[i]; + const hit = chunk?.hash ? cached.get(chunk.hash) : undefined; + if (hit && hit.length > 0) { + embeddings[i] = hit; + } else if (chunk) { + missing.push({ index: i, chunk }); + } + } + + if (missing.length === 0) return embeddings; + + const requests: VoyageBatchRequest[] = []; + const mapping = new Map(); + for (const item of missing) { + const chunk = item.chunk; + const customId = hashText( + `${source}:${entry.path}:${chunk.startLine}:${chunk.endLine}:${chunk.hash}:${item.index}`, + ); + mapping.set(customId, { index: item.index, hash: chunk.hash }); + requests.push({ + custom_id: customId, + body: { + input: chunk.text, + }, + }); + } + const batchResult = await this.runBatchWithFallback({ + provider: "voyage", + run: async () => + await runVoyageEmbeddingBatches({ + client: voyage, + agentId: this.agentId, + requests, + wait: this.batch.wait, + concurrency: this.batch.concurrency, + pollIntervalMs: this.batch.pollIntervalMs, + timeoutMs: this.batch.timeoutMs, + debug: (message, data) => log.debug(message, { ...data, source, chunks: chunks.length }), + }), + fallback: async () => await this.embedChunksInBatches(chunks), + }); + if (Array.isArray(batchResult)) return batchResult; + const byCustomId = batchResult; + + const toCache: Array<{ hash: string; embedding: number[] }> = []; + for (const [customId, embedding] of byCustomId.entries()) { + const mapped = mapping.get(customId); + if (!mapped) continue; + embeddings[mapped.index] = embedding; + toCache.push({ hash: mapped.hash, embedding }); + } + this.upsertEmbeddingCache(toCache); + return embeddings; + } + private async embedChunksWithOpenAiBatch( chunks: MemoryChunk[], entry: MemoryFileEntry | SessionFileEntry, @@ -2108,41 +2191,6 @@ export class MemoryIndexManager implements MemorySearchManager { } } - private async runWithConcurrency(tasks: Array<() => Promise>, limit: number): Promise { - if (tasks.length === 0) { - return []; - } - const resolvedLimit = Math.max(1, Math.min(limit, tasks.length)); - const results: T[] = Array.from({ length: tasks.length }); - let next = 0; - let firstError: unknown = null; - - const workers = Array.from({ length: resolvedLimit }, async () => { - while (true) { - if (firstError) { - return; - } - const index = next; - next += 1; - if (index >= tasks.length) { - return; - } - try { - results[index] = await tasks[index](); - } catch (err) { - firstError = err; - return; - } - } - }); - - await Promise.allSettled(workers); - if (firstError) { - throw firstError; - } - return results; - } - private async withBatchFailureLock(fn: () => Promise): Promise { let release: () => void; const wait = this.batchFailureLock; diff --git a/src/routing/session-key.test.ts b/src/routing/session-key.test.ts new file mode 100644 index 000000000..6c3539f73 --- /dev/null +++ b/src/routing/session-key.test.ts @@ -0,0 +1,25 @@ +import { describe, expect, it } from "vitest"; +import { classifySessionKeyShape } from "./session-key.js"; + +describe("classifySessionKeyShape", () => { + it("classifies empty keys as missing", () => { + expect(classifySessionKeyShape(undefined)).toBe("missing"); + expect(classifySessionKeyShape(" ")).toBe("missing"); + }); + + it("classifies valid agent keys", () => { + expect(classifySessionKeyShape("agent:main:main")).toBe("agent"); + expect(classifySessionKeyShape("agent:research:subagent:worker")).toBe("agent"); + }); + + it("classifies malformed agent keys", () => { + expect(classifySessionKeyShape("agent::broken")).toBe("malformed_agent"); + expect(classifySessionKeyShape("agent:main")).toBe("malformed_agent"); + }); + + it("treats non-agent legacy or alias keys as non-malformed", () => { + expect(classifySessionKeyShape("main")).toBe("legacy_or_alias"); + expect(classifySessionKeyShape("custom-main")).toBe("legacy_or_alias"); + expect(classifySessionKeyShape("subagent:worker")).toBe("legacy_or_alias"); + }); +}); diff --git a/src/routing/session-key.ts b/src/routing/session-key.ts index 8f2b4ab0d..ad1d16431 100644 --- a/src/routing/session-key.ts +++ b/src/routing/session-key.ts @@ -10,6 +10,7 @@ export { export const DEFAULT_AGENT_ID = "main"; export const DEFAULT_MAIN_KEY = "main"; export const DEFAULT_ACCOUNT_ID = "default"; +export type SessionKeyShape = "missing" | "agent" | "legacy_or_alias" | "malformed_agent"; // Pre-compiled regex const VALID_ID_RE = /^[a-z0-9][a-z0-9_-]{0,63}$/i; @@ -58,6 +59,17 @@ export function resolveAgentIdFromSessionKey(sessionKey: string | undefined | nu return normalizeAgentId(parsed?.agentId ?? DEFAULT_AGENT_ID); } +export function classifySessionKeyShape(sessionKey: string | undefined | null): SessionKeyShape { + const raw = (sessionKey ?? "").trim(); + if (!raw) { + return "missing"; + } + if (parseAgentSessionKey(raw)) { + return "agent"; + } + return raw.toLowerCase().startsWith("agent:") ? "malformed_agent" : "legacy_or_alias"; +} + export function normalizeAgentId(value: string | undefined | null): string { const trimmed = (value ?? "").trim(); if (!trimmed) { diff --git a/src/telegram/bot.ts b/src/telegram/bot.ts index 884e222b1..cdb811265 100644 --- a/src/telegram/bot.ts +++ b/src/telegram/bot.ts @@ -128,6 +128,9 @@ export function createTelegramBot(opts: TelegramBotOptions) { network: telegramCfg.network, }); const shouldProvideFetch = Boolean(fetchImpl); + // grammY's ApiClientOptions types still track `node-fetch` types; Node 22+ global fetch + // (undici) is structurally compatible at runtime but not assignable in TS. + const fetchForClient = fetchImpl as unknown as NonNullable; const timeoutSeconds = typeof telegramCfg?.timeoutSeconds === "number" && Number.isFinite(telegramCfg.timeoutSeconds) ? Math.max(1, Math.floor(telegramCfg.timeoutSeconds)) @@ -135,7 +138,7 @@ export function createTelegramBot(opts: TelegramBotOptions) { const client: ApiClientOptions | undefined = shouldProvideFetch || timeoutSeconds ? { - ...(shouldProvideFetch && fetchImpl ? { fetch: fetchImpl } : {}), + ...(shouldProvideFetch && fetchImpl ? { fetch: fetchForClient } : {}), ...(timeoutSeconds ? { timeoutSeconds } : {}), } : undefined; diff --git a/src/utils.test.ts b/src/utils.test.ts index 2b0d95e6b..3ae0be47c 100644 --- a/src/utils.test.ts +++ b/src/utils.test.ts @@ -79,15 +79,12 @@ describe("jidToE164", () => { it("maps @lid using reverse mapping file", () => { const mappingPath = path.join(CONFIG_DIR, "credentials", "lid-mapping-123_reverse.json"); const original = fs.readFileSync; - const spy = vi - .spyOn(fs, "readFileSync") - // oxlint-disable-next-line typescript/no-explicit-any - .mockImplementation((path: any, encoding?: any) => { - if (path === mappingPath) { - return `"5551234"`; - } - return original(path, encoding); - }); + const spy = vi.spyOn(fs, "readFileSync").mockImplementation((...args) => { + if (args[0] === mappingPath) { + return `"5551234"`; + } + return original(...args); + }); expect(jidToE164("123@lid")).toBe("+5551234"); spy.mockRestore(); }); @@ -167,4 +164,9 @@ describe("resolveUserPath", () => { it("resolves relative paths", () => { expect(resolveUserPath("tmp/dir")).toBe(path.resolve("tmp/dir")); }); + + it("keeps blank paths blank", () => { + expect(resolveUserPath("")).toBe(""); + expect(resolveUserPath(" ")).toBe(""); + }); }); diff --git a/tsconfig.plugin-sdk.dts.json b/tsconfig.plugin-sdk.dts.json new file mode 100644 index 000000000..4883a7809 --- /dev/null +++ b/tsconfig.plugin-sdk.dts.json @@ -0,0 +1,15 @@ +{ + "extends": "./tsconfig.json", + "compilerOptions": { + "declaration": true, + "declarationMap": false, + "emitDeclarationOnly": true, + "noEmit": false, + "noEmitOnError": true, + "outDir": "dist/plugin-sdk", + "rootDir": "src", + "tsBuildInfoFile": "dist/plugin-sdk/.tsbuildinfo" + }, + "include": ["src/plugin-sdk/index.ts", "src/types/**/*.d.ts"], + "exclude": ["node_modules", "dist", "src/**/*.test.ts"] +} diff --git a/tsdown.config.ts b/tsdown.config.ts index 137ce4b6a..d4c11bd53 100644 --- a/tsdown.config.ts +++ b/tsdown.config.ts @@ -18,7 +18,6 @@ export default defineConfig([ platform: "node", }, { - dts: true, entry: "src/plugin-sdk/index.ts", outDir: "dist/plugin-sdk", env, diff --git a/ui/package.json b/ui/package.json index dbf223ffc..f9eb7e0d1 100644 --- a/ui/package.json +++ b/ui/package.json @@ -17,7 +17,7 @@ }, "devDependencies": { "@vitest/browser-playwright": "4.0.18", - "playwright": "^1.58.1", + "playwright": "^1.58.2", "vitest": "4.0.18" } } diff --git a/ui/src/styles/components.css b/ui/src/styles/components.css index 5ab1858b8..f0438eeec 100644 --- a/ui/src/styles/components.css +++ b/ui/src/styles/components.css @@ -681,6 +681,138 @@ width: 100%; } +/* Cron jobs: allow long payload/state text and keep action buttons inside the card. */ +.cron-job-payload, +.cron-job-agent, +.cron-job-state { + overflow-wrap: anywhere; + word-break: break-word; +} + +.cron-job .list-title { + font-weight: 600; + font-size: 15px; + letter-spacing: -0.015em; +} + +.cron-job { + grid-template-columns: minmax(0, 1fr) minmax(240px, 300px); + grid-template-areas: + "main meta" + "footer footer"; + row-gap: 10px; +} + +.cron-job .list-main { + grid-area: main; +} + +.cron-job .list-meta { + grid-area: meta; + min-width: 240px; + gap: 8px; +} + +.cron-job-footer { + grid-area: footer; + display: flex; + justify-content: space-between; + align-items: center; + gap: 12px; + border-top: 1px solid var(--border); + padding-top: 10px; +} + +.cron-job-chips { + flex: 1 1 auto; +} + +.cron-job-detail { + display: grid; + gap: 3px; + margin-top: 2px; +} + +.cron-job-detail-label { + color: var(--muted); + font-size: 11px; + font-weight: 600; + letter-spacing: 0.03em; + text-transform: uppercase; +} + +.cron-job-detail-value { + font-size: 13px; + line-height: 1.35; +} + +.cron-job-state { + display: grid; + gap: 4px; +} + +.cron-job-state-row { + display: flex; + justify-content: space-between; + align-items: baseline; + gap: 10px; +} + +.cron-job-state-key { + color: var(--muted); + font-size: 10px; + font-weight: 600; + letter-spacing: 0.05em; + text-transform: uppercase; +} + +.cron-job-state-value { + color: var(--text); + font-size: 12px; + white-space: nowrap; +} + +.cron-job-status-pill { + font-size: 11px; + font-weight: 600; + border: 1px solid var(--border); + border-radius: var(--radius-full); + padding: 2px 8px; + text-transform: lowercase; +} + +.cron-job-status-ok { + color: var(--ok); + border-color: rgba(34, 197, 94, 0.35); + background: var(--ok-subtle); +} + +.cron-job-status-error { + color: var(--danger); + border-color: rgba(239, 68, 68, 0.35); + background: var(--danger-subtle); +} + +.cron-job-status-skipped { + color: var(--warn); + border-color: rgba(245, 158, 11, 0.35); + background: var(--warn-subtle); +} + +.cron-job-status-na { + color: var(--muted); +} + +.cron-job-actions { + flex-wrap: wrap; + justify-content: flex-end; + margin-top: 0; +} + +.cron-job-actions .btn { + flex: 0 0 auto; +} + @container (max-width: 560px) { .list-item { grid-template-columns: 1fr; @@ -690,6 +822,23 @@ min-width: 0; text-align: left; } + + .cron-job-actions { + justify-content: flex-start; + } + + .cron-job { + grid-template-columns: 1fr; + grid-template-areas: + "main" + "meta" + "footer"; + } + + .cron-job-footer { + flex-direction: column; + align-items: stretch; + } } /* =========================================== @@ -737,6 +886,12 @@ background: var(--warn-subtle); } +.chip-danger { + color: var(--danger); + border-color: rgba(239, 68, 68, 0.3); + background: var(--danger-subtle); +} + /* =========================================== Tables =========================================== */ @@ -783,6 +938,22 @@ text-decoration: underline; } +.session-key-cell { + display: grid; + gap: 4px; + min-width: 0; +} + +.session-key-cell .session-link, +.session-key-display-name { + overflow-wrap: anywhere; + word-break: break-word; +} + +.session-key-display-name { + font-size: 11px; +} + /* =========================================== Log Stream =========================================== */ diff --git a/ui/src/ui/app-defaults.ts b/ui/src/ui/app-defaults.ts index 6521d0748..89bdaf11d 100644 --- a/ui/src/ui/app-defaults.ts +++ b/ui/src/ui/app-defaults.ts @@ -22,7 +22,7 @@ export const DEFAULT_CRON_FORM: CronFormState = { cronExpr: "0 7 * * *", cronTz: "", sessionTarget: "isolated", - wakeMode: "next-heartbeat", + wakeMode: "now", payloadKind: "agentTurn", payloadText: "", deliveryMode: "announce", diff --git a/ui/src/ui/app-render.helpers.ts b/ui/src/ui/app-render.helpers.ts index d2bc9aa90..c12258599 100644 --- a/ui/src/ui/app-render.helpers.ts +++ b/ui/src/ui/app-render.helpers.ts @@ -206,13 +206,13 @@ function resolveMainSessionKey( } function resolveSessionDisplayName(key: string, row?: SessionsListResult["sessions"][number]) { - const label = row?.label?.trim(); - if (label) { + const label = row?.label?.trim() || ""; + const displayName = row?.displayName?.trim() || ""; + if (label && label !== key) { return `${label} (${key})`; } - const displayName = row?.displayName?.trim(); - if (displayName) { - return displayName; + if (displayName && displayName !== key) { + return `${key} (${displayName})`; } return key; } diff --git a/ui/src/ui/app-render.ts b/ui/src/ui/app-render.ts index f5c71c579..d416bde44 100644 --- a/ui/src/ui/app-render.ts +++ b/ui/src/ui/app-render.ts @@ -581,6 +581,7 @@ export function renderApp(state: AppViewState) { ${ state.tab === "cron" ? renderCron({ + basePath: state.basePath, loading: state.cronLoading, status: state.cronStatus, jobs: state.cronJobs, diff --git a/ui/src/ui/format.test.ts b/ui/src/ui/format.test.ts index 8e1f121ea..4260f07da 100644 --- a/ui/src/ui/format.test.ts +++ b/ui/src/ui/format.test.ts @@ -2,8 +2,8 @@ import { describe, expect, it } from "vitest"; import { formatAgo, stripThinkingTags } from "./format.ts"; describe("formatAgo", () => { - it("returns 'just now' for timestamps less than 60s in the future", () => { - expect(formatAgo(Date.now() + 30_000)).toBe("just now"); + it("returns 'in <1m' for timestamps less than 60s in the future", () => { + expect(formatAgo(Date.now() + 30_000)).toBe("in <1m"); }); it("returns 'Xm from now' for future timestamps", () => { diff --git a/ui/src/ui/format.ts b/ui/src/ui/format.ts index 812aaa3fb..91debb2e4 100644 --- a/ui/src/ui/format.ts +++ b/ui/src/ui/format.ts @@ -16,7 +16,7 @@ export function formatAgo(ms?: number | null): string { const suffix = diff < 0 ? "from now" : "ago"; const sec = Math.round(absDiff / 1000); if (sec < 60) { - return diff < 0 ? "just now" : `${sec}s ago`; + return diff < 0 ? "in <1m" : `${sec}s ago`; } const min = Math.round(sec / 60); if (min < 60) { diff --git a/ui/src/ui/types.ts b/ui/src/ui/types.ts index d1d3f432b..1c85b8731 100644 --- a/ui/src/ui/types.ts +++ b/ui/src/ui/types.ts @@ -704,6 +704,8 @@ export type CronRunLogEntry = { durationMs?: number; error?: string; summary?: string; + sessionId?: string; + sessionKey?: string; }; export type SkillsStatusConfigCheck = { diff --git a/ui/src/ui/views/cron.test.ts b/ui/src/ui/views/cron.test.ts index 6d1e2c7a4..ea74093af 100644 --- a/ui/src/ui/views/cron.test.ts +++ b/ui/src/ui/views/cron.test.ts @@ -20,6 +20,7 @@ function createJob(id: string): CronJob { function createProps(overrides: Partial = {}): CronProps { return { + basePath: "", loading: false, status: null, jobs: [], @@ -70,7 +71,7 @@ describe("cron view", () => { expect(onLoadRuns).toHaveBeenCalledWith("job-1"); }); - it("marks the selected job and keeps Runs button to a single call", () => { + it("marks the selected job and keeps History button to a single call", () => { const container = document.createElement("div"); const onLoadRuns = vi.fn(); const job = createJob("job-1"); @@ -88,13 +89,73 @@ describe("cron view", () => { const selected = container.querySelector(".list-item-selected"); expect(selected).not.toBeNull(); - const runsButton = Array.from(container.querySelectorAll("button")).find( - (btn) => btn.textContent?.trim() === "Runs", + const historyButton = Array.from(container.querySelectorAll("button")).find( + (btn) => btn.textContent?.trim() === "History", ); - expect(runsButton).not.toBeUndefined(); - runsButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); + expect(historyButton).not.toBeUndefined(); + historyButton?.dispatchEvent(new MouseEvent("click", { bubbles: true })); expect(onLoadRuns).toHaveBeenCalledTimes(1); expect(onLoadRuns).toHaveBeenCalledWith("job-1"); }); + + it("renders run chat links when session keys are present", () => { + const container = document.createElement("div"); + render( + renderCron( + createProps({ + basePath: "/ui", + runsJobId: "job-1", + runs: [ + { + ts: Date.now(), + jobId: "job-1", + status: "ok", + summary: "done", + sessionKey: "agent:main:cron:job-1:run:abc", + }, + ], + }), + ), + container, + ); + + const link = container.querySelector("a.session-link"); + expect(link).not.toBeNull(); + expect(link?.getAttribute("href")).toContain( + "/ui/chat?session=agent%3Amain%3Acron%3Ajob-1%3Arun%3Aabc", + ); + }); + + it("shows selected job name and sorts run history newest first", () => { + const container = document.createElement("div"); + const job = createJob("job-1"); + render( + renderCron( + createProps({ + jobs: [job], + runsJobId: "job-1", + runs: [ + { ts: 1, jobId: "job-1", status: "ok", summary: "older run" }, + { ts: 2, jobId: "job-1", status: "ok", summary: "newer run" }, + ], + }), + ), + container, + ); + + expect(container.textContent).toContain("Latest runs for Daily ping."); + + const cards = Array.from(container.querySelectorAll(".card")); + const runHistoryCard = cards.find( + (card) => card.querySelector(".card-title")?.textContent?.trim() === "Run history", + ); + expect(runHistoryCard).not.toBeUndefined(); + + const summaries = Array.from( + runHistoryCard?.querySelectorAll(".list-item .list-sub") ?? [], + ).map((el) => (el.textContent ?? "").trim()); + expect(summaries[0]).toBe("newer run"); + expect(summaries[1]).toBe("older run"); + }); }); diff --git a/ui/src/ui/views/cron.ts b/ui/src/ui/views/cron.ts index a957cf1a2..7b87826ea 100644 --- a/ui/src/ui/views/cron.ts +++ b/ui/src/ui/views/cron.ts @@ -1,15 +1,12 @@ import { html, nothing } from "lit"; import type { ChannelUiMetaEntry, CronJob, CronRunLogEntry, CronStatus } from "../types.ts"; import type { CronFormState } from "../ui-types.ts"; -import { formatMs } from "../format.ts"; -import { - formatCronPayload, - formatCronSchedule, - formatCronState, - formatNextRun, -} from "../presenter.ts"; +import { formatAgo, formatMs } from "../format.ts"; +import { pathForTab } from "../navigation.ts"; +import { formatCronSchedule, formatNextRun } from "../presenter.ts"; export type CronProps = { + basePath: string; loading: boolean; status: CronStatus | null; jobs: CronJob[]; @@ -59,6 +56,10 @@ function resolveChannelLabel(props: CronProps, channel: string): string { export function renderCron(props: CronProps) { const channelOptions = buildChannelOptions(props); + const selectedJob = + props.runsJobId == null ? undefined : props.jobs.find((job) => job.id === props.runsJobId); + const selectedRunTitle = selectedJob?.name ?? props.runsJobId ?? "(select a job)"; + const orderedRuns = props.runs.toSorted((a, b) => b.ts - a.ts); return html`

@@ -167,8 +168,8 @@ export function renderCron(props: CronProps) { wakeMode: (e.target as HTMLSelectElement).value as CronFormState["wakeMode"], })} > - +