Merge branch 'master' into canonical-url

This commit is contained in:
Victor Duarte
2026-03-06 19:32:29 +01:00
committed by GitHub
131 changed files with 18798 additions and 1003 deletions

View File

@@ -1,4 +1,4 @@
FROM node:20-bookworm-slim AS base
FROM node:lts-trixie-slim AS base
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates curl git \
&& rm -rf /var/lib/apt/lists/*
@@ -15,14 +15,18 @@ COPY packages/db/package.json packages/db/
COPY packages/adapter-utils/package.json packages/adapter-utils/
COPY packages/adapters/claude-local/package.json packages/adapters/claude-local/
COPY packages/adapters/codex-local/package.json packages/adapters/codex-local/
COPY packages/adapters/cursor-local/package.json packages/adapters/cursor-local/
COPY packages/adapters/openclaw/package.json packages/adapters/openclaw/
COPY packages/adapters/opencode-local/package.json packages/adapters/opencode-local/
RUN pnpm install --frozen-lockfile
FROM base AS build
WORKDIR /app
COPY --from=deps /app /app
COPY . .
RUN pnpm --filter @paperclip/ui build
RUN pnpm --filter @paperclip/server build
RUN pnpm --filter @paperclipai/ui build
RUN pnpm --filter @paperclipai/server build
RUN test -f server/dist/index.js || (echo "ERROR: server build output missing" && exit 1)
FROM base AS production
WORKDIR /app
@@ -37,7 +41,7 @@ ENV NODE_ENV=production \
PAPERCLIP_HOME=/paperclip \
PAPERCLIP_INSTANCE_ID=default \
PAPERCLIP_CONFIG=/paperclip/instances/default/config.json \
PAPERCLIP_DEPLOYMENT_MODE=local_trusted \
PAPERCLIP_DEPLOYMENT_MODE=authenticated \
PAPERCLIP_DEPLOYMENT_EXPOSURE=private
VOLUME ["/paperclip"]

View File

@@ -17,7 +17,7 @@ const codexLocalCLIAdapter: CLIAdapterModule = {
formatStdoutEvent: printCodexStreamEvent,
};
const opencodeLocalCLIAdapter: CLIAdapterModule = {
const openCodeLocalCLIAdapter: CLIAdapterModule = {
type: "opencode_local",
formatStdoutEvent: printOpenCodeStreamEvent,
};
@@ -33,7 +33,7 @@ const openclawCLIAdapter: CLIAdapterModule = {
};
const adaptersByType = new Map<string, CLIAdapterModule>(
[claudeLocalCLIAdapter, codexLocalCLIAdapter, opencodeLocalCLIAdapter, cursorLocalCLIAdapter, openclawCLIAdapter, processCLIAdapter, httpCLIAdapter].map((a) => [a.type, a]),
[claudeLocalCLIAdapter, codexLocalCLIAdapter, openCodeLocalCLIAdapter, cursorLocalCLIAdapter, openclawCLIAdapter, processCLIAdapter, httpCLIAdapter].map((a) => [a.type, a]),
);
export function getCLIAdapter(type: string): CLIAdapterModule {

View File

@@ -246,7 +246,7 @@ Agent-oriented invite onboarding now exposes machine-readable API docs:
- `GET /api/invites/:token` returns invite summary plus onboarding and skills index links.
- `GET /api/invites/:token/onboarding` returns onboarding manifest details (registration endpoint, claim endpoint template, skill install hints).
- `GET /api/invites/:token/onboarding.txt` returns a plain-text onboarding doc intended for both human operators and agents (llm.txt-style handoff).
- `GET /api/invites/:token/onboarding.txt` returns a plain-text onboarding doc intended for both human operators and agents (llm.txt-style handoff), including optional inviter message and suggested network host candidates.
- `GET /api/skills/index` lists available skill documents.
- `GET /api/skills/paperclip` returns the Paperclip heartbeat skill markdown.
@@ -287,5 +287,20 @@ This script lives at `scripts/smoke/openclaw-docker-ui.sh` and automates clone/b
Pairing behavior for this smoke script:
- default `OPENCLAW_DISABLE_DEVICE_AUTH=1` (no Control UI pairing prompt for local smoke)
- default `OPENCLAW_DISABLE_DEVICE_AUTH=1` (no Control UI pairing prompt for local smoke; no extra pairing env vars required)
- set `OPENCLAW_DISABLE_DEVICE_AUTH=0` to require standard device pairing
Model behavior for this smoke script:
- defaults to OpenAI models (`openai/gpt-5.2` + OpenAI fallback) so it does not require Anthropic auth by default
State behavior for this smoke script:
- defaults to isolated config dir `~/.openclaw-paperclip-smoke`
- resets smoke agent state each run by default (`OPENCLAW_RESET_STATE=1`) to avoid stale provider/auth drift
Networking behavior for this smoke script:
- auto-detects and prints a Paperclip host URL reachable from inside OpenClaw Docker
- default container-side host alias is `host.docker.internal` (override with `PAPERCLIP_HOST_FROM_CONTAINER` / `PAPERCLIP_HOST_PORT`)
- if Paperclip rejects container hostnames in authenticated/private mode, allow `host.docker.internal` via `pnpm paperclipai allowed-hostname host.docker.internal` and restart Paperclip

View File

@@ -10,6 +10,9 @@ services:
PAPERCLIP_HOME: "/paperclip"
OPENAI_API_KEY: "${OPENAI_API_KEY:-}"
ANTHROPIC_API_KEY: "${ANTHROPIC_API_KEY:-}"
PAPERCLIP_DEPLOYMENT_MODE: "authenticated"
PAPERCLIP_DEPLOYMENT_EXPOSURE: "private"
PAPERCLIP_PUBLIC_URL: "${PAPERCLIP_PUBLIC_URL:-http://localhost:3100}"
BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET:?BETTER_AUTH_SECRET must be set}"
volumes:
- "${PAPERCLIP_DATA_DIR:-./data/docker-paperclip}:/paperclip"

View File

@@ -5,6 +5,11 @@ services:
POSTGRES_USER: paperclip
POSTGRES_PASSWORD: paperclip
POSTGRES_DB: paperclip
healthcheck:
test: ["CMD-SHELL", "pg_isready -U paperclip -d paperclip"]
interval: 2s
timeout: 5s
retries: 30
ports:
- "5432:5432"
volumes:
@@ -18,9 +23,16 @@ services:
DATABASE_URL: postgres://paperclip:paperclip@db:5432/paperclip
PORT: "3100"
SERVE_UI: "true"
PAPERCLIP_DEPLOYMENT_MODE: "authenticated"
PAPERCLIP_DEPLOYMENT_EXPOSURE: "private"
PAPERCLIP_PUBLIC_URL: "${PAPERCLIP_PUBLIC_URL:-http://localhost:3100}"
BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET:?BETTER_AUTH_SECRET must be set}"
volumes:
- paperclip-data:/paperclip
depends_on:
- db
db:
condition: service_healthy
volumes:
pgdata:
paperclip-data:

View File

@@ -20,6 +20,8 @@ When a heartbeat fires, Paperclip:
|---------|----------|-------------|
| [Claude Local](/adapters/claude-local) | `claude_local` | Runs Claude Code CLI locally |
| [Codex Local](/adapters/codex-local) | `codex_local` | Runs OpenAI Codex CLI locally |
| OpenCode Local | `opencode_local` | Runs OpenCode CLI locally (multi-provider `provider/model`) |
| OpenClaw | `openclaw` | Sends wake payloads to an OpenClaw webhook |
| [Process](/adapters/process) | `process` | Executes arbitrary shell commands |
| [HTTP](/adapters/http) | `http` | Sends webhooks to external agents |
@@ -52,7 +54,7 @@ Three registries consume these modules:
## Choosing an Adapter
- **Need a coding agent?** Use `claude_local` or `codex_local`
- **Need a coding agent?** Use `claude_local`, `codex_local`, or `opencode_local`
- **Need to run a script or command?** Use `process`
- **Need to call an external service?** Use `http`
- **Need something custom?** [Create your own adapter](/adapters/creating-an-adapter)

View File

@@ -123,6 +123,18 @@ GET /api/companies/{companyId}/org
Returns the full organizational tree for the company.
## List Adapter Models
```
GET /api/companies/{companyId}/adapters/{adapterType}/models
```
Returns selectable models for an adapter type.
- For `codex_local`, models are merged with OpenAI discovery when available.
- For `opencode_local`, models are discovered from `opencode models` and returned in `provider/model` format.
- `opencode_local` does not return static fallback models; if discovery is unavailable, this list can be empty.
## Config Revisions
```

View File

@@ -48,12 +48,20 @@ pnpm dev --tailscale-auth
This binds the server to `0.0.0.0` for private-network access.
Alias:
```sh
pnpm dev --authenticated-private
```
Allow additional private hostnames:
```sh
pnpm paperclipai allowed-hostname dotta-macbook-pro
```
For full setup and troubleshooting, see [Tailscale Private Access](/deploy/tailscale-private-access).
## Health Checks
```sh

View File

@@ -0,0 +1,77 @@
---
title: Tailscale Private Access
summary: Run Paperclip with Tailscale-friendly host binding and connect from other devices
---
Use this when you want to access Paperclip over Tailscale (or a private LAN/VPN) instead of only `localhost`.
## 1. Start Paperclip in private authenticated mode
```sh
pnpm dev --tailscale-auth
```
This configures:
- `PAPERCLIP_DEPLOYMENT_MODE=authenticated`
- `PAPERCLIP_DEPLOYMENT_EXPOSURE=private`
- `PAPERCLIP_AUTH_BASE_URL_MODE=auto`
- `HOST=0.0.0.0` (bind on all interfaces)
Equivalent flag:
```sh
pnpm dev --authenticated-private
```
## 2. Find your reachable Tailscale address
From the machine running Paperclip:
```sh
tailscale ip -4
```
You can also use your Tailscale MagicDNS hostname (for example `my-macbook.tailnet.ts.net`).
## 3. Open Paperclip from another device
Use the Tailscale IP or MagicDNS host with the Paperclip port:
```txt
http://<tailscale-host-or-ip>:3100
```
Example:
```txt
http://my-macbook.tailnet.ts.net:3100
```
## 4. Allow custom private hostnames when needed
If you access Paperclip with a custom private hostname, add it to the allowlist:
```sh
pnpm paperclipai allowed-hostname my-macbook.tailnet.ts.net
```
## 5. Verify the server is reachable
From a remote Tailscale-connected device:
```sh
curl http://<tailscale-host-or-ip>:3100/api/health
```
Expected result:
```json
{"status":"ok"}
```
## Troubleshooting
- Login or redirect errors on a private hostname: add it with `paperclipai allowed-hostname`.
- App only works on `localhost`: make sure you started with `--tailscale-auth` (or set `HOST=0.0.0.0` in private mode).
- Can connect locally but not remotely: verify both devices are on the same Tailscale network and port `3100` is reachable.

View File

@@ -73,6 +73,7 @@
"pages": [
"deploy/overview",
"deploy/local-development",
"deploy/tailscale-private-access",
"deploy/docker",
"deploy/deployment-modes",
"deploy/database",

View File

@@ -27,6 +27,14 @@ Create agents from the Agents page. Each agent requires:
- **Adapter config** — runtime-specific settings (working directory, model, prompt, etc.)
- **Capabilities** — short description of what this agent does
Common adapter choices:
- `claude_local` / `codex_local` / `opencode_local` for local coding agents
- `openclaw` / `http` for webhook-based external agents
- `process` for generic local command execution
For `opencode_local`, configure an explicit `adapterConfig.model` (`provider/model`).
Paperclip validates the selected model against live `opencode models` output.
## Agent Hiring via Governance
Agents can request to hire subordinates. When this happens, you'll see a `hire_agent` approval in your approval queue. Review the proposed agent config and approve or reject.

View File

@@ -33,12 +33,16 @@ To spin up OpenClaw in Docker and print a host-browser dashboard URL in one comm
pnpm smoke:openclaw-docker-ui
```
Default behavior is zero-flag: you can run the command as-is with no pairing-related env vars.
What this command does:
- clones/updates `openclaw/openclaw` in `/tmp/openclaw-docker`
- builds `openclaw:local` (unless `OPENCLAW_BUILD=0`)
- writes `~/.openclaw/openclaw.json` and Docker `.env`
- writes isolated smoke config under `~/.openclaw-paperclip-smoke/openclaw.json` and Docker `.env`
- pins agent model defaults to OpenAI (`openai/gpt-5.2` with OpenAI fallback)
- starts `openclaw-gateway` via Compose (with required `/tmp` tmpfs override)
- probes and prints a Paperclip host URL that is reachable from inside OpenClaw Docker
- waits for health and prints:
- `http://127.0.0.1:18789/#token=...`
- disables Control UI device pairing by default for local smoke ergonomics
@@ -53,6 +57,12 @@ Environment knobs:
- `OPENCLAW_OPEN_BROWSER=1` to auto-open the URL on macOS
- `OPENCLAW_DISABLE_DEVICE_AUTH=1` (default) disables Control UI device pairing for local smoke
- `OPENCLAW_DISABLE_DEVICE_AUTH=0` keeps pairing enabled (then approve browser with `devices` CLI commands)
- `OPENCLAW_MODEL_PRIMARY` (default `openai/gpt-5.2`)
- `OPENCLAW_MODEL_FALLBACK` (default `openai/gpt-5.2-chat-latest`)
- `OPENCLAW_CONFIG_DIR` (default `~/.openclaw-paperclip-smoke`)
- `OPENCLAW_RESET_STATE=1` (default) resets smoke agent state on each run to avoid stale auth/session drift
- `PAPERCLIP_HOST_PORT` (default `3100`)
- `PAPERCLIP_HOST_FROM_CONTAINER` (default `host.docker.internal`)
### Authenticated mode
@@ -67,6 +77,15 @@ PAPERCLIP_COOKIE="your_session_cookie=..." pnpm smoke:openclaw-join
### Network topology tips
- Local same-host smoke: default callback uses `http://127.0.0.1:<port>/webhook`.
- Inside OpenClaw Docker, `127.0.0.1` points to the container itself, not your host Paperclip server.
- For invite/onboarding URLs consumed by OpenClaw in Docker, use the script-printed Paperclip URL (typically `http://host.docker.internal:3100`).
- If Paperclip rejects the container-visible host with a hostname error, allow it from host:
```bash
pnpm paperclipai allowed-hostname host.docker.internal
```
Then restart Paperclip and rerun the smoke script.
- Docker/remote OpenClaw: prefer a reachable hostname (Docker host alias, Tailscale hostname, or public domain).
- Authenticated/private mode: ensure hostnames are in the allowed list when required:

View File

@@ -23,7 +23,8 @@
"check:tokens": "node scripts/check-forbidden-tokens.mjs",
"docs:dev": "cd docs && npx mintlify dev",
"smoke:openclaw-join": "./scripts/smoke/openclaw-join.sh",
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh"
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh",
"smoke:openclaw-sse-standalone": "./scripts/smoke/openclaw-sse-standalone.sh"
},
"devDependencies": {
"@changesets/cli": "^2.30.0",

View File

@@ -30,6 +30,7 @@
"typecheck": "tsc --noEmit"
},
"devDependencies": {
"@types/node": "^24.6.0",
"typescript": "^5.7.3"
}
}

View File

@@ -13,6 +13,8 @@ export type {
AdapterEnvironmentTestContext,
AdapterSessionCodec,
AdapterModel,
HireApprovedPayload,
HireApprovedHookResult,
ServerAdapterModule,
TranscriptEntry,
StdoutLineParser,

View File

@@ -15,6 +15,14 @@ interface RunningProcess {
graceSec: number;
}
type ChildProcessWithEvents = ChildProcess & {
on(event: "error", listener: (err: Error) => void): ChildProcess;
on(
event: "close",
listener: (code: number | null, signal: NodeJS.Signals | null) => void,
): ChildProcess;
};
export const runningProcesses = new Map<string, RunningProcess>();
export const MAX_CAPTURE_BYTES = 4 * 1024 * 1024;
export const MAX_EXCERPT_BYTES = 32 * 1024;
@@ -217,7 +225,7 @@ export async function runChildProcess(
env: mergedEnv,
shell: false,
stdio: [opts.stdin != null ? "pipe" : "ignore", "pipe", "pipe"],
});
}) as ChildProcessWithEvents;
if (opts.stdin != null && child.stdin) {
child.stdin.write(opts.stdin);
@@ -244,7 +252,7 @@ export async function runChildProcess(
}, opts.timeoutSec * 1000)
: null;
child.stdout?.on("data", (chunk) => {
child.stdout?.on("data", (chunk: unknown) => {
const text = String(chunk);
stdout = appendWithCap(stdout, text);
logChain = logChain
@@ -252,7 +260,7 @@ export async function runChildProcess(
.catch((err) => onLogError(err, runId, "failed to append stdout log chunk"));
});
child.stderr?.on("data", (chunk) => {
child.stderr?.on("data", (chunk: unknown) => {
const text = String(chunk);
stderr = appendWithCap(stderr, text);
logChain = logChain
@@ -260,7 +268,7 @@ export async function runChildProcess(
.catch((err) => onLogError(err, runId, "failed to append stderr log chunk"));
});
child.on("error", (err) => {
child.on("error", (err: Error) => {
if (timeout) clearTimeout(timeout);
runningProcesses.delete(runId);
const errno = (err as NodeJS.ErrnoException).code;
@@ -272,7 +280,7 @@ export async function runChildProcess(
reject(new Error(msg));
});
child.on("close", (code, signal) => {
child.on("close", (code: number | null, signal: NodeJS.Signals | null) => {
if (timeout) clearTimeout(timeout);
runningProcesses.delete(runId);
void logChain.finally(() => {

View File

@@ -119,6 +119,27 @@ export interface AdapterEnvironmentTestContext {
};
}
/** Payload for the onHireApproved adapter lifecycle hook (e.g. join-request or hire_agent approval). */
export interface HireApprovedPayload {
companyId: string;
agentId: string;
agentName: string;
adapterType: string;
/** "join_request" | "approval" */
source: "join_request" | "approval";
sourceId: string;
approvedAt: string;
/** Canonical operator-facing message for cloud adapters to show the user. */
message: string;
}
/** Result of onHireApproved hook; failures are non-fatal to the approval flow. */
export interface HireApprovedHookResult {
ok: boolean;
error?: string;
detail?: Record<string, unknown>;
}
export interface ServerAdapterModule {
type: string;
execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult>;
@@ -128,6 +149,14 @@ export interface ServerAdapterModule {
models?: AdapterModel[];
listModels?: () => Promise<AdapterModel[]>;
agentConfigurationDoc?: string;
/**
* Optional lifecycle hook when an agent is approved/hired (join-request or hire_agent approval).
* adapterConfig is the agent's adapter config so the adapter can e.g. send a callback to a configured URL.
*/
onHireApproved?: (
payload: HireApprovedPayload,
adapterConfig: Record<string, unknown>,
) => Promise<HireApprovedHookResult>;
}
// ---------------------------------------------------------------------------
@@ -135,7 +164,7 @@ export interface ServerAdapterModule {
// ---------------------------------------------------------------------------
export type TranscriptEntry =
| { kind: "assistant"; ts: string; text: string }
| { kind: "assistant"; ts: string; text: string; delta?: boolean }
| { kind: "thinking"; ts: string; text: string; delta?: boolean }
| { kind: "user"; ts: string; text: string }
| { kind: "tool_call"; ts: string; name: string; input: unknown }

View File

@@ -45,6 +45,7 @@
"picocolors": "^1.1.1"
},
"devDependencies": {
"@types/node": "^24.6.0",
"typescript": "^5.7.3"
}
}

View File

@@ -45,6 +45,7 @@
"picocolors": "^1.1.1"
},
"devDependencies": {
"@types/node": "^24.6.0",
"typescript": "^5.7.3"
}
}

View File

@@ -4,6 +4,7 @@ export const DEFAULT_CODEX_LOCAL_MODEL = "gpt-5.3-codex";
export const DEFAULT_CODEX_LOCAL_BYPASS_APPROVALS_AND_SANDBOX = true;
export const models = [
{ id: "gpt-5.4", label: "gpt-5.4" },
{ id: DEFAULT_CODEX_LOCAL_MODEL, label: DEFAULT_CODEX_LOCAL_MODEL },
{ id: "gpt-5.3-codex-spark", label: "gpt-5.3-codex-spark" },
{ id: "gpt-5", label: "gpt-5" },

View File

@@ -44,6 +44,7 @@
"picocolors": "^1.1.1"
},
"devDependencies": {
"@types/node": "^24.6.0",
"typescript": "^5.7.3"
}
}

View File

@@ -8,20 +8,32 @@ export const agentConfigurationDoc = `# openclaw agent configuration
Adapter: openclaw
Use when:
- You run an OpenClaw agent remotely and wake it via webhook.
- You want Paperclip heartbeat/task events delivered over HTTP.
- You run an OpenClaw agent remotely and wake it over HTTP.
- You want SSE-first execution so one Paperclip run captures live progress and completion.
Don't use when:
- You need local CLI execution inside Paperclip (use claude_local/codex_local/opencode_local/process).
- The OpenClaw endpoint is not reachable from the Paperclip server.
Core fields:
- url (string, required): OpenClaw webhook endpoint URL
- url (string, required): OpenClaw SSE endpoint URL
- streamTransport (string, optional): must be \`sse\` when provided
- method (string, optional): HTTP method, default POST
- headers (object, optional): extra HTTP headers for webhook calls
- headers (object, optional): extra HTTP headers for requests
- webhookAuthHeader (string, optional): Authorization header value if your endpoint requires auth
- payloadTemplate (object, optional): additional JSON payload fields merged into each wake payload
- paperclipApiUrl (string, optional): absolute http(s) Paperclip base URL to advertise to OpenClaw as \`PAPERCLIP_API_URL\`
Session routing fields:
- sessionKeyStrategy (string, optional): \`fixed\` (default), \`issue\`, or \`run\`
- sessionKey (string, optional): fixed session key value when strategy is \`fixed\` (default \`paperclip\`)
Operational fields:
- timeoutSec (number, optional): request timeout in seconds (default 30)
- timeoutSec (number, optional): SSE request timeout in seconds (default 0 = no adapter timeout)
Hire-approved callback fields (optional):
- hireApprovedCallbackUrl (string): callback endpoint invoked when this agent is approved/hired
- hireApprovedCallbackMethod (string): HTTP method for the callback (default POST)
- hireApprovedCallbackAuthHeader (string): Authorization header value for callback requests
- hireApprovedCallbackHeaders (object): extra headers merged into callback requests
`;

View File

@@ -1,11 +1,460 @@
import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclipai/adapter-utils";
import { asNumber, asString, parseObject } from "@paperclipai/adapter-utils/server-utils";
import { asNumber, asString, buildPaperclipEnv, parseObject } from "@paperclipai/adapter-utils/server-utils";
import { parseOpenClawResponse } from "./parse.js";
type SessionKeyStrategy = "fixed" | "issue" | "run";
function nonEmpty(value: unknown): string | null {
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
}
function toAuthorizationHeaderValue(rawToken: string): string {
const trimmed = rawToken.trim();
if (!trimmed) return trimmed;
return /^bearer\s+/i.test(trimmed) ? trimmed : `Bearer ${trimmed}`;
}
function resolvePaperclipApiUrlOverride(value: unknown): string | null {
const raw = nonEmpty(value);
if (!raw) return null;
try {
const parsed = new URL(raw);
if (parsed.protocol !== "http:" && parsed.protocol !== "https:") return null;
return parsed.toString();
} catch {
return null;
}
}
function normalizeSessionKeyStrategy(value: unknown): SessionKeyStrategy {
const normalized = asString(value, "fixed").trim().toLowerCase();
if (normalized === "issue" || normalized === "run") return normalized;
return "fixed";
}
function resolveSessionKey(input: {
strategy: SessionKeyStrategy;
configuredSessionKey: string | null;
runId: string;
issueId: string | null;
}): string {
const fallback = input.configuredSessionKey ?? "paperclip";
if (input.strategy === "run") return `paperclip:run:${input.runId}`;
if (input.strategy === "issue" && input.issueId) return `paperclip:issue:${input.issueId}`;
return fallback;
}
function isWakeCompatibilityEndpoint(url: string): boolean {
try {
const parsed = new URL(url);
const path = parsed.pathname.toLowerCase();
return path === "/hooks/wake" || path.endsWith("/hooks/wake");
} catch {
return false;
}
}
function isOpenResponsesEndpoint(url: string): boolean {
try {
const parsed = new URL(url);
const path = parsed.pathname.toLowerCase();
return path === "/v1/responses" || path.endsWith("/v1/responses");
} catch {
return false;
}
}
function toStringRecord(value: unknown): Record<string, string> {
const parsed = parseObject(value);
const out: Record<string, string> = {};
for (const [key, entry] of Object.entries(parsed)) {
if (typeof entry === "string") {
out[key] = entry;
}
}
return out;
}
type WakePayload = {
runId: string;
agentId: string;
companyId: string;
taskId: string | null;
issueId: string | null;
wakeReason: string | null;
wakeCommentId: string | null;
approvalId: string | null;
approvalStatus: string | null;
issueIds: string[];
};
function buildWakeText(payload: WakePayload, paperclipEnv: Record<string, string>): string {
const orderedKeys = [
"PAPERCLIP_RUN_ID",
"PAPERCLIP_AGENT_ID",
"PAPERCLIP_COMPANY_ID",
"PAPERCLIP_API_URL",
"PAPERCLIP_TASK_ID",
"PAPERCLIP_WAKE_REASON",
"PAPERCLIP_WAKE_COMMENT_ID",
"PAPERCLIP_APPROVAL_ID",
"PAPERCLIP_APPROVAL_STATUS",
"PAPERCLIP_LINKED_ISSUE_IDS",
];
const envLines: string[] = [];
for (const key of orderedKeys) {
const value = paperclipEnv[key];
if (!value) continue;
envLines.push(`${key}=${value}`);
}
const lines = [
"Paperclip wake event for a cloud adapter.",
"",
"Set these values in your run context:",
...envLines,
"",
`task_id=${payload.taskId ?? ""}`,
`issue_id=${payload.issueId ?? ""}`,
`wake_reason=${payload.wakeReason ?? ""}`,
`wake_comment_id=${payload.wakeCommentId ?? ""}`,
`approval_id=${payload.approvalId ?? ""}`,
`approval_status=${payload.approvalStatus ?? ""}`,
`linked_issue_ids=${payload.issueIds.join(",")}`,
];
lines.push("", "Run your Paperclip heartbeat procedure now.");
return lines.join("\n");
}
function appendWakeText(baseText: string, wakeText: string): string {
const trimmedBase = baseText.trim();
return trimmedBase.length > 0 ? `${trimmedBase}\n\n${wakeText}` : wakeText;
}
function buildOpenResponsesWakeInputMessage(wakeText: string): Record<string, unknown> {
return {
type: "message",
role: "user",
content: [
{
type: "input_text",
text: wakeText,
},
],
};
}
function appendWakeTextToOpenResponsesInput(input: unknown, wakeText: string): unknown {
if (typeof input === "string") {
return appendWakeText(input, wakeText);
}
if (Array.isArray(input)) {
return [...input, buildOpenResponsesWakeInputMessage(wakeText)];
}
if (typeof input === "object" && input !== null) {
const parsed = parseObject(input);
const content = parsed.content;
if (typeof content === "string") {
return {
...parsed,
content: appendWakeText(content, wakeText),
};
}
if (Array.isArray(content)) {
return {
...parsed,
content: [
...content,
{
type: "input_text",
text: wakeText,
},
],
};
}
return [parsed, buildOpenResponsesWakeInputMessage(wakeText)];
}
return wakeText;
}
function isTextRequiredResponse(responseText: string): boolean {
const parsed = parseOpenClawResponse(responseText);
const parsedError = parsed && typeof parsed.error === "string" ? parsed.error : null;
if (parsedError && parsedError.toLowerCase().includes("text required")) {
return true;
}
return responseText.toLowerCase().includes("text required");
}
async function sendJsonRequest(params: {
url: string;
method: string;
headers: Record<string, string>;
payload: Record<string, unknown>;
signal: AbortSignal;
}): Promise<Response> {
return fetch(params.url, {
method: params.method,
headers: params.headers,
body: JSON.stringify(params.payload),
signal: params.signal,
});
}
async function readAndLogResponseText(params: {
response: Response;
onLog: AdapterExecutionContext["onLog"];
}): Promise<string> {
const responseText = await params.response.text();
if (responseText.trim().length > 0) {
await params.onLog(
"stdout",
`[openclaw] response (${params.response.status}) ${responseText.slice(0, 2000)}\n`,
);
} else {
await params.onLog("stdout", `[openclaw] response (${params.response.status}) <empty>\n`);
}
return responseText;
}
type ConsumedSse = {
eventCount: number;
lastEventType: string | null;
lastData: string | null;
lastPayload: Record<string, unknown> | null;
terminal: boolean;
failed: boolean;
errorMessage: string | null;
};
function inferSseTerminal(input: {
eventType: string;
data: string;
parsedPayload: Record<string, unknown> | null;
}): { terminal: boolean; failed: boolean; errorMessage: string | null } {
const normalizedType = input.eventType.trim().toLowerCase();
const trimmedData = input.data.trim();
const payload = input.parsedPayload;
const payloadType = nonEmpty(payload?.type)?.toLowerCase() ?? null;
const payloadStatus = nonEmpty(payload?.status)?.toLowerCase() ?? null;
if (trimmedData === "[DONE]") {
return { terminal: true, failed: false, errorMessage: null };
}
const failType =
normalizedType.includes("error") ||
normalizedType.includes("failed") ||
normalizedType.includes("cancel");
if (failType) {
return {
terminal: true,
failed: true,
errorMessage:
nonEmpty(payload?.error) ??
nonEmpty(payload?.message) ??
(trimmedData.length > 0 ? trimmedData : "OpenClaw SSE error"),
};
}
const doneType =
normalizedType === "done" ||
normalizedType.endsWith(".completed") ||
normalizedType.endsWith(".done") ||
normalizedType === "completed";
if (doneType) {
return { terminal: true, failed: false, errorMessage: null };
}
if (payloadStatus) {
if (
payloadStatus === "completed" ||
payloadStatus === "succeeded" ||
payloadStatus === "done"
) {
return { terminal: true, failed: false, errorMessage: null };
}
if (
payloadStatus === "failed" ||
payloadStatus === "cancelled" ||
payloadStatus === "error"
) {
return {
terminal: true,
failed: true,
errorMessage:
nonEmpty(payload?.error) ??
nonEmpty(payload?.message) ??
`OpenClaw SSE status ${payloadStatus}`,
};
}
}
if (payloadType) {
if (payloadType.endsWith(".completed") || payloadType.endsWith(".done")) {
return { terminal: true, failed: false, errorMessage: null };
}
if (
payloadType.endsWith(".failed") ||
payloadType.endsWith(".cancelled") ||
payloadType.endsWith(".error")
) {
return {
terminal: true,
failed: true,
errorMessage:
nonEmpty(payload?.error) ??
nonEmpty(payload?.message) ??
`OpenClaw SSE type ${payloadType}`,
};
}
}
if (payload?.done === true) {
return { terminal: true, failed: false, errorMessage: null };
}
return { terminal: false, failed: false, errorMessage: null };
}
async function consumeSseResponse(params: {
response: Response;
onLog: AdapterExecutionContext["onLog"];
}): Promise<ConsumedSse> {
const reader = params.response.body?.getReader();
if (!reader) {
throw new Error("OpenClaw SSE response body is missing");
}
const decoder = new TextDecoder();
let buffer = "";
let eventType = "message";
let dataLines: string[] = [];
let eventCount = 0;
let lastEventType: string | null = null;
let lastData: string | null = null;
let lastPayload: Record<string, unknown> | null = null;
let terminal = false;
let failed = false;
let errorMessage: string | null = null;
const dispatchEvent = async (): Promise<boolean> => {
if (dataLines.length === 0) {
eventType = "message";
return false;
}
const data = dataLines.join("\n");
const trimmedData = data.trim();
const parsedPayload = parseOpenClawResponse(trimmedData);
eventCount += 1;
lastEventType = eventType;
lastData = data;
if (parsedPayload) lastPayload = parsedPayload;
const preview =
trimmedData.length > 1000 ? `${trimmedData.slice(0, 1000)}...` : trimmedData;
await params.onLog("stdout", `[openclaw:sse] event=${eventType} data=${preview}\n`);
const resolution = inferSseTerminal({
eventType,
data,
parsedPayload,
});
dataLines = [];
eventType = "message";
if (resolution.terminal) {
terminal = true;
failed = resolution.failed;
errorMessage = resolution.errorMessage;
return true;
}
return false;
};
let shouldStop = false;
while (!shouldStop) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
while (!shouldStop) {
const newlineIndex = buffer.indexOf("\n");
if (newlineIndex === -1) break;
let line = buffer.slice(0, newlineIndex);
buffer = buffer.slice(newlineIndex + 1);
if (line.endsWith("\r")) line = line.slice(0, -1);
if (line.length === 0) {
shouldStop = await dispatchEvent();
continue;
}
if (line.startsWith(":")) continue;
const colonIndex = line.indexOf(":");
const field = colonIndex === -1 ? line : line.slice(0, colonIndex);
const rawValue =
colonIndex === -1 ? "" : line.slice(colonIndex + 1).replace(/^ /, "");
if (field === "event") {
eventType = rawValue || "message";
} else if (field === "data") {
dataLines.push(rawValue);
}
}
}
buffer += decoder.decode();
if (!shouldStop && buffer.trim().length > 0) {
for (const rawLine of buffer.split(/\r?\n/)) {
const line = rawLine.trimEnd();
if (line.length === 0) {
shouldStop = await dispatchEvent();
if (shouldStop) break;
continue;
}
if (line.startsWith(":")) continue;
const colonIndex = line.indexOf(":");
const field = colonIndex === -1 ? line : line.slice(0, colonIndex);
const rawValue =
colonIndex === -1 ? "" : line.slice(colonIndex + 1).replace(/^ /, "");
if (field === "event") {
eventType = rawValue || "message";
} else if (field === "data") {
dataLines.push(rawValue);
}
}
}
if (!shouldStop && dataLines.length > 0) {
await dispatchEvent();
}
return {
eventCount,
lastEventType,
lastData,
lastPayload,
terminal,
failed,
errorMessage,
};
}
export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult> {
const { config, runId, agent, context, onLog, onMeta } = ctx;
const url = asString(config.url, "").trim();
@@ -19,11 +468,34 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
};
}
if (isWakeCompatibilityEndpoint(url)) {
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: "OpenClaw /hooks/wake is not stream-capable. Use a streaming endpoint.",
errorCode: "openclaw_sse_incompatible_endpoint",
};
}
const streamTransport = asString(config.streamTransport, "sse").trim().toLowerCase();
if (streamTransport && streamTransport !== "sse") {
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: "OpenClaw adapter only supports streamTransport=sse.",
errorCode: "openclaw_stream_transport_unsupported",
};
}
const method = asString(config.method, "POST").trim().toUpperCase() || "POST";
const timeoutSec = Math.max(1, asNumber(config.timeoutSec, 30));
const timeoutSecRaw = asNumber(config.timeoutSec, 0);
const timeoutSec = timeoutSecRaw > 0 ? Math.max(1, Math.floor(timeoutSecRaw)) : 0;
const headersConfig = parseObject(config.headers) as Record<string, unknown>;
const payloadTemplate = parseObject(config.payloadTemplate);
const webhookAuthHeader = nonEmpty(config.webhookAuthHeader);
const sessionKeyStrategy = normalizeSessionKeyStrategy(config.sessionKeyStrategy);
const headers: Record<string, string> = {
"content-type": "application/json",
@@ -33,6 +505,10 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
headers[key] = value;
}
}
const openClawAuthHeader = nonEmpty(headers["x-openclaw-auth"] ?? headers["X-OpenClaw-Auth"]);
if (openClawAuthHeader && !headers.authorization && !headers.Authorization) {
headers.authorization = toAuthorizationHeaderValue(openClawAuthHeader);
}
if (webhookAuthHeader && !headers.authorization && !headers.Authorization) {
headers.authorization = webhookAuthHeader;
}
@@ -48,54 +524,123 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
approvalId: nonEmpty(context.approvalId),
approvalStatus: nonEmpty(context.approvalStatus),
issueIds: Array.isArray(context.issueIds)
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
? context.issueIds.filter(
(value): value is string => typeof value === "string" && value.trim().length > 0,
)
: [],
};
const body = {
...payloadTemplate,
paperclip: {
...wakePayload,
context,
},
const sessionKey = resolveSessionKey({
strategy: sessionKeyStrategy,
configuredSessionKey: nonEmpty(config.sessionKey),
runId,
issueId: wakePayload.issueId ?? wakePayload.taskId,
});
const templateText = nonEmpty(payloadTemplate.text);
const paperclipApiUrlOverride = resolvePaperclipApiUrlOverride(config.paperclipApiUrl);
const paperclipEnv: Record<string, string> = {
...buildPaperclipEnv(agent),
PAPERCLIP_RUN_ID: runId,
};
if (paperclipApiUrlOverride) {
paperclipEnv.PAPERCLIP_API_URL = paperclipApiUrlOverride;
}
if (wakePayload.taskId) paperclipEnv.PAPERCLIP_TASK_ID = wakePayload.taskId;
if (wakePayload.wakeReason) paperclipEnv.PAPERCLIP_WAKE_REASON = wakePayload.wakeReason;
if (wakePayload.wakeCommentId) paperclipEnv.PAPERCLIP_WAKE_COMMENT_ID = wakePayload.wakeCommentId;
if (wakePayload.approvalId) paperclipEnv.PAPERCLIP_APPROVAL_ID = wakePayload.approvalId;
if (wakePayload.approvalStatus) paperclipEnv.PAPERCLIP_APPROVAL_STATUS = wakePayload.approvalStatus;
if (wakePayload.issueIds.length > 0) {
paperclipEnv.PAPERCLIP_LINKED_ISSUE_IDS = wakePayload.issueIds.join(",");
}
const wakeText = buildWakeText(wakePayload, paperclipEnv);
const payloadText = templateText ? `${templateText}\n\n${wakeText}` : wakeText;
const isOpenResponses = isOpenResponsesEndpoint(url);
const openResponsesInput = Object.prototype.hasOwnProperty.call(payloadTemplate, "input")
? appendWakeTextToOpenResponsesInput(payloadTemplate.input, wakeText)
: payloadText;
const paperclipBody: Record<string, unknown> = isOpenResponses
? {
...payloadTemplate,
stream: true,
model:
nonEmpty(payloadTemplate.model) ??
nonEmpty(config.model) ??
"openclaw",
input: openResponsesInput,
metadata: {
...toStringRecord(payloadTemplate.metadata),
...paperclipEnv,
paperclip_session_key: sessionKey,
},
}
: {
...payloadTemplate,
stream: true,
sessionKey,
text: payloadText,
paperclip: {
...wakePayload,
sessionKey,
streamTransport: "sse",
env: paperclipEnv,
context,
},
};
if (isOpenResponses) {
delete paperclipBody.text;
delete paperclipBody.sessionKey;
delete paperclipBody.paperclip;
if (!headers["x-openclaw-session-key"] && !headers["X-OpenClaw-Session-Key"]) {
headers["x-openclaw-session-key"] = sessionKey;
}
}
if (onMeta) {
await onMeta({
adapterType: "openclaw",
command: "webhook",
command: "sse",
commandArgs: [method, url],
context,
});
}
await onLog("stdout", `[openclaw] invoking ${method} ${url}\n`);
const outboundHeaderKeys = Array.from(new Set([...Object.keys(headers), "accept"])).sort();
await onLog("stdout", `[openclaw] outbound header keys: ${outboundHeaderKeys.join(", ")}\n`);
await onLog("stdout", `[openclaw] invoking ${method} ${url} (transport=sse)\n`);
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutSec * 1000);
const timeout = timeoutSec > 0 ? setTimeout(() => controller.abort(), timeoutSec * 1000) : null;
try {
const response = await fetch(url, {
const response = await sendJsonRequest({
url,
method,
headers,
body: JSON.stringify(body),
headers: {
...headers,
accept: "text/event-stream",
},
payload: paperclipBody,
signal: controller.signal,
});
const responseText = await response.text();
if (responseText.trim().length > 0) {
await onLog("stdout", `[openclaw] response (${response.status}) ${responseText.slice(0, 2000)}\n`);
} else {
await onLog("stdout", `[openclaw] response (${response.status}) <empty>\n`);
}
if (!response.ok) {
const responseText = await readAndLogResponseText({ response, onLog });
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: `OpenClaw webhook failed with status ${response.status}`,
errorCode: "openclaw_http_error",
errorMessage:
isTextRequiredResponse(responseText)
? "OpenClaw endpoint rejected the payload as text-required."
: `OpenClaw SSE request failed with status ${response.status}`,
errorCode: isTextRequiredResponse(responseText)
? "openclaw_text_required"
: "openclaw_http_error",
resultJson: {
status: response.status,
statusText: response.statusText,
@@ -104,28 +649,87 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
};
}
const contentType = (response.headers.get("content-type") ?? "").toLowerCase();
if (!contentType.includes("text/event-stream")) {
const responseText = await readAndLogResponseText({ response, onLog });
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: "OpenClaw SSE endpoint did not return text/event-stream",
errorCode: "openclaw_sse_expected_event_stream",
resultJson: {
status: response.status,
statusText: response.statusText,
contentType,
response: parseOpenClawResponse(responseText) ?? responseText,
},
};
}
const consumed = await consumeSseResponse({ response, onLog });
if (consumed.failed) {
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: consumed.errorMessage ?? "OpenClaw SSE stream failed",
errorCode: "openclaw_sse_stream_failed",
resultJson: {
eventCount: consumed.eventCount,
terminal: consumed.terminal,
lastEventType: consumed.lastEventType,
lastData: consumed.lastData,
response: consumed.lastPayload ?? consumed.lastData,
},
};
}
if (!consumed.terminal) {
return {
exitCode: 1,
signal: null,
timedOut: false,
errorMessage: "OpenClaw SSE stream closed without a terminal event",
errorCode: "openclaw_sse_stream_incomplete",
resultJson: {
eventCount: consumed.eventCount,
terminal: consumed.terminal,
lastEventType: consumed.lastEventType,
lastData: consumed.lastData,
response: consumed.lastPayload ?? consumed.lastData,
},
};
}
return {
exitCode: 0,
signal: null,
timedOut: false,
provider: "openclaw",
model: null,
summary: `OpenClaw webhook ${method} ${url}`,
summary: `OpenClaw SSE ${method} ${url}`,
resultJson: {
status: response.status,
statusText: response.statusText,
response: parseOpenClawResponse(responseText) ?? responseText,
eventCount: consumed.eventCount,
terminal: consumed.terminal,
lastEventType: consumed.lastEventType,
lastData: consumed.lastData,
response: consumed.lastPayload ?? consumed.lastData,
},
};
} catch (err) {
if (err instanceof Error && err.name === "AbortError") {
await onLog("stderr", `[openclaw] request timed out after ${timeoutSec}s\n`);
const timeoutMessage =
timeoutSec > 0
? `[openclaw] SSE request timed out after ${timeoutSec}s\n`
: "[openclaw] SSE request aborted\n";
await onLog("stderr", timeoutMessage);
return {
exitCode: null,
signal: null,
timedOut: true,
errorMessage: `Timed out after ${timeoutSec}s`,
errorCode: "timeout",
errorMessage: timeoutSec > 0 ? `Timed out after ${timeoutSec}s` : "Request aborted",
errorCode: "openclaw_sse_timeout",
};
}
@@ -139,6 +743,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
errorCode: "openclaw_request_failed",
};
} finally {
clearTimeout(timeout);
if (timeout) clearTimeout(timeout);
}
}

View File

@@ -0,0 +1,77 @@
import type { HireApprovedPayload, HireApprovedHookResult } from "@paperclipai/adapter-utils";
import { asString, parseObject } from "@paperclipai/adapter-utils/server-utils";
const HIRE_CALLBACK_TIMEOUT_MS = 10_000;
function nonEmpty(value: unknown): string | null {
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
}
/**
* OpenClaw adapter lifecycle hook: when an agent is approved/hired, POST the payload to a
* configured callback URL so the cloud operator can notify the user (e.g. "you're hired").
* Best-effort; failures are non-fatal to the approval flow.
*/
export async function onHireApproved(
payload: HireApprovedPayload,
adapterConfig: Record<string, unknown>,
): Promise<HireApprovedHookResult> {
const config = parseObject(adapterConfig);
const url = nonEmpty(config.hireApprovedCallbackUrl);
if (!url) {
return { ok: true };
}
const method = (asString(config.hireApprovedCallbackMethod, "POST").trim().toUpperCase()) || "POST";
const authHeader = nonEmpty(config.hireApprovedCallbackAuthHeader) ?? nonEmpty(config.webhookAuthHeader);
const headers: Record<string, string> = {
"content-type": "application/json",
};
if (authHeader && !headers.authorization && !headers.Authorization) {
headers.Authorization = authHeader;
}
const extraHeaders = parseObject(config.hireApprovedCallbackHeaders) as Record<string, unknown>;
for (const [key, value] of Object.entries(extraHeaders)) {
if (typeof value === "string" && value.trim().length > 0) {
headers[key] = value;
}
}
const body = JSON.stringify({
...payload,
event: "hire_approved",
});
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), HIRE_CALLBACK_TIMEOUT_MS);
try {
const response = await fetch(url, {
method,
headers,
body,
signal: controller.signal,
});
clearTimeout(timeout);
if (!response.ok) {
const text = await response.text().catch(() => "");
return {
ok: false,
error: `HTTP ${response.status} ${response.statusText}`,
detail: { status: response.status, statusText: response.statusText, body: text.slice(0, 500) },
};
}
return { ok: true };
} catch (err) {
clearTimeout(timeout);
const message = err instanceof Error ? err.message : String(err);
const cause = err instanceof Error ? err.cause : undefined;
return {
ok: false,
error: message,
detail: cause != null ? { cause: String(cause) } : undefined,
};
}
}

View File

@@ -1,3 +1,4 @@
export { execute } from "./execute.js";
export { testEnvironment } from "./test.js";
export { parseOpenClawResponse, isOpenClawUnknownSessionError } from "./parse.js";
export { onHireApproved } from "./hire-hook.js";

View File

@@ -29,6 +29,11 @@ function normalizeHostname(value: string | null | undefined): string | null {
return trimmed.toLowerCase();
}
function isWakePath(pathname: string): boolean {
const value = pathname.trim().toLowerCase();
return value === "/hooks/wake" || value.endsWith("/hooks/wake");
}
function pushDeploymentDiagnostics(
checks: AdapterEnvironmentCheck[],
ctx: AdapterEnvironmentTestContext,
@@ -102,8 +107,8 @@ export async function testEnvironment(
checks.push({
code: "openclaw_url_missing",
level: "error",
message: "OpenClaw adapter requires a webhook URL.",
hint: "Set adapterConfig.url to your OpenClaw webhook endpoint.",
message: "OpenClaw adapter requires a streaming endpoint URL.",
hint: "Set adapterConfig.url to your OpenClaw SSE endpoint.",
});
return {
adapterType: ctx.adapterType,
@@ -148,6 +153,25 @@ export async function testEnvironment(
hint: "Use a reachable hostname/IP (for example Tailscale/private hostname or public domain).",
});
}
if (isWakePath(url.pathname)) {
checks.push({
code: "openclaw_wake_endpoint_incompatible",
level: "error",
message: "Endpoint targets /hooks/wake, which is not stream-capable for strict SSE mode.",
hint: "Use an endpoint that returns text/event-stream for the full run duration.",
});
}
}
const streamTransport = asString(config.streamTransport, "sse").trim().toLowerCase();
if (streamTransport && streamTransport !== "sse") {
checks.push({
code: "openclaw_stream_transport_unsupported",
level: "error",
message: `Unsupported streamTransport: ${streamTransport}`,
hint: "OpenClaw adapter now requires streamTransport=sse.",
});
}
pushDeploymentDiagnostics(checks, ctx, url);
@@ -169,7 +193,7 @@ export async function testEnvironment(
code: "openclaw_endpoint_probe_unexpected_status",
level: "warn",
message: `Endpoint probe returned HTTP ${response.status}.`,
hint: "Verify OpenClaw webhook reachability and auth/network settings.",
hint: "Verify OpenClaw endpoint reachability and auth/network settings.",
});
} else {
checks.push({

View File

@@ -0,0 +1,16 @@
export function normalizeOpenClawStreamLine(rawLine: string): {
stream: "stdout" | "stderr" | null;
line: string;
} {
const trimmed = rawLine.trim();
if (!trimmed) return { stream: null, line: "" };
const prefixed = trimmed.match(/^(stdout|stderr)\s*[:=]?\s*(.*)$/i);
if (!prefixed) {
return { stream: null, line: trimmed };
}
const stream = prefixed[1]?.toLowerCase() === "stderr" ? "stderr" : "stdout";
const line = (prefixed[2] ?? "").trim();
return { stream, line };
}

View File

@@ -4,6 +4,9 @@ export function buildOpenClawConfig(v: CreateConfigValues): Record<string, unkno
const ac: Record<string, unknown> = {};
if (v.url) ac.url = v.url;
ac.method = "POST";
ac.timeoutSec = 30;
ac.timeoutSec = 0;
ac.streamTransport = "sse";
ac.sessionKeyStrategy = "fixed";
ac.sessionKey = "paperclip";
return ac;
}

View File

@@ -1,5 +1,167 @@
import type { TranscriptEntry } from "@paperclipai/adapter-utils";
import { normalizeOpenClawStreamLine } from "../shared/stream.js";
function safeJsonParse(text: string): unknown {
try {
return JSON.parse(text);
} catch {
return null;
}
}
function asRecord(value: unknown): Record<string, unknown> | null {
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
return value as Record<string, unknown>;
}
function asString(value: unknown, fallback = ""): string {
return typeof value === "string" ? value : fallback;
}
function asNumber(value: unknown, fallback = 0): number {
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
}
function stringifyUnknown(value: unknown): string {
if (typeof value === "string") return value;
if (value === null || value === undefined) return "";
try {
return JSON.stringify(value);
} catch {
return String(value);
}
}
function readErrorText(value: unknown): string {
if (typeof value === "string") return value;
const obj = asRecord(value);
if (!obj) return stringifyUnknown(value);
return (
asString(obj.message).trim() ||
asString(obj.error).trim() ||
asString(obj.code).trim() ||
stringifyUnknown(obj)
);
}
function readDeltaText(payload: Record<string, unknown> | null): string {
if (!payload) return "";
if (typeof payload.delta === "string") return payload.delta;
const deltaObj = asRecord(payload.delta);
if (deltaObj) {
const nestedDelta =
asString(deltaObj.text) ||
asString(deltaObj.value) ||
asString(deltaObj.delta);
if (nestedDelta.length > 0) return nestedDelta;
}
const part = asRecord(payload.part);
if (part) {
const partText = asString(part.text);
if (partText.length > 0) return partText;
}
return "";
}
function extractResponseOutputText(response: Record<string, unknown> | null): string {
if (!response) return "";
const output = Array.isArray(response.output) ? response.output : [];
const parts: string[] = [];
for (const itemRaw of output) {
const item = asRecord(itemRaw);
if (!item) continue;
const content = Array.isArray(item.content) ? item.content : [];
for (const partRaw of content) {
const part = asRecord(partRaw);
if (!part) continue;
const type = asString(part.type).trim().toLowerCase();
if (type !== "output_text" && type !== "text" && type !== "refusal") continue;
const text = asString(part.text).trim();
if (text) parts.push(text);
}
}
return parts.join("\n\n").trim();
}
function parseOpenClawSseLine(line: string, ts: string): TranscriptEntry[] {
const match = line.match(/^\[openclaw:sse\]\s+event=([^\s]+)\s+data=(.*)$/s);
if (!match) return [{ kind: "stdout", ts, text: line }];
const eventType = (match[1] ?? "").trim();
const dataText = (match[2] ?? "").trim();
const parsed = asRecord(safeJsonParse(dataText));
const normalizedEventType = eventType.toLowerCase();
if (dataText === "[DONE]") {
return [];
}
const delta = readDeltaText(parsed);
if (normalizedEventType.endsWith(".delta") && delta.length > 0) {
return [{ kind: "assistant", ts, text: delta, delta: true }];
}
if (
normalizedEventType.includes("error") ||
normalizedEventType.includes("failed") ||
normalizedEventType.includes("cancel")
) {
const message = readErrorText(parsed?.error) || readErrorText(parsed?.message) || dataText;
return message ? [{ kind: "stderr", ts, text: message }] : [];
}
if (normalizedEventType === "response.completed" || normalizedEventType.endsWith(".completed")) {
const response = asRecord(parsed?.response);
const usage = asRecord(response?.usage);
const status = asString(response?.status, asString(parsed?.status, eventType));
const statusLower = status.trim().toLowerCase();
const errorText =
readErrorText(response?.error).trim() ||
readErrorText(parsed?.error).trim() ||
readErrorText(parsed?.message).trim();
const isError =
statusLower === "failed" ||
statusLower === "error" ||
statusLower === "cancelled";
return [{
kind: "result",
ts,
text: extractResponseOutputText(response),
inputTokens: asNumber(usage?.input_tokens),
outputTokens: asNumber(usage?.output_tokens),
cachedTokens: asNumber(usage?.cached_input_tokens),
costUsd: asNumber(usage?.cost_usd, asNumber(usage?.total_cost_usd)),
subtype: status || eventType,
isError,
errors: errorText ? [errorText] : [],
}];
}
return [];
}
export function parseOpenClawStdoutLine(line: string, ts: string): TranscriptEntry[] {
return [{ kind: "stdout", ts, text: line }];
const normalized = normalizeOpenClawStreamLine(line);
if (normalized.stream === "stderr") {
return [{ kind: "stderr", ts, text: normalized.line }];
}
const trimmed = normalized.line.trim();
if (!trimmed) return [];
if (trimmed.startsWith("[openclaw:sse]")) {
return parseOpenClawSseLine(trimmed, ts);
}
if (trimmed.startsWith("[openclaw]")) {
return [{ kind: "system", ts, text: trimmed.replace(/^\[openclaw\]\s*/, "") }];
}
return [{ kind: "stdout", ts, text: normalized.line }];
}

View File

@@ -4,4 +4,4 @@
### Patch Changes
- Added initial `opencode_local` adapter package for local OpenCode execution
- Add local OpenCode adapter package with server/UI/CLI modules.

View File

@@ -45,6 +45,7 @@
"picocolors": "^1.1.1"
},
"devDependencies": {
"@types/node": "^24.6.0",
"typescript": "^5.7.3"
}
}

View File

@@ -1,5 +1,13 @@
import pc from "picocolors";
function safeJsonParse(text: string): unknown {
try {
return JSON.parse(text);
} catch {
return null;
}
}
function asRecord(value: unknown): Record<string, unknown> | null {
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
return value as Record<string, unknown>;
@@ -13,42 +21,21 @@ function asNumber(value: unknown, fallback = 0): number {
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
}
function printToolEvent(part: Record<string, unknown>): void {
const tool = asString(part.tool, "tool");
const callId = asString(part.callID, asString(part.id, ""));
const state = asRecord(part.state);
const status = asString(state?.status);
const input = state?.input;
const output = asString(state?.output).replace(/\s+$/, "");
const metadata = asRecord(state?.metadata);
const exit = asNumber(metadata?.exit, NaN);
const isError =
status === "failed" ||
status === "error" ||
status === "cancelled" ||
(Number.isFinite(exit) && exit !== 0);
console.log(pc.yellow(`tool_call: ${tool}${callId ? ` (${callId})` : ""}`));
if (input !== undefined) {
try {
console.log(pc.gray(JSON.stringify(input, null, 2)));
} catch {
console.log(pc.gray(String(input)));
}
}
if (status || output) {
const summary = [
"tool_result",
status ? `status=${status}` : "",
Number.isFinite(exit) ? `exit=${exit}` : "",
]
.filter(Boolean)
.join(" ");
console.log((isError ? pc.red : pc.cyan)(summary));
if (output) {
console.log((isError ? pc.red : pc.gray)(output));
}
function errorText(value: unknown): string {
if (typeof value === "string") return value;
const rec = asRecord(value);
if (!rec) return "";
const data = asRecord(rec.data);
const message =
asString(rec.message) ||
asString(data?.message) ||
asString(rec.name) ||
"";
if (message) return message;
try {
return JSON.stringify(rec);
} catch {
return "";
}
}
@@ -56,10 +43,8 @@ export function printOpenCodeStreamEvent(raw: string, _debug: boolean): void {
const line = raw.trim();
if (!line) return;
let parsed: Record<string, unknown> | null = null;
try {
parsed = JSON.parse(line) as Record<string, unknown>;
} catch {
const parsed = asRecord(safeJsonParse(line));
if (!parsed) {
console.log(line);
return;
}
@@ -74,18 +59,36 @@ export function printOpenCodeStreamEvent(raw: string, _debug: boolean): void {
if (type === "text") {
const part = asRecord(parsed.part);
const text = asString(part?.text);
const text = asString(part?.text).trim();
if (text) console.log(pc.green(`assistant: ${text}`));
return;
}
if (type === "reasoning") {
const part = asRecord(parsed.part);
const text = asString(part?.text).trim();
if (text) console.log(pc.gray(`thinking: ${text}`));
return;
}
if (type === "tool_use") {
const part = asRecord(parsed.part);
if (part) {
printToolEvent(part);
} else {
console.log(pc.yellow("tool_use"));
const tool = asString(part?.tool, "tool");
const state = asRecord(part?.state);
const status = asString(state?.status);
const summary = `tool_${status || "event"}: ${tool}`;
const isError = status === "error";
console.log((isError ? pc.red : pc.yellow)(summary));
const input = state?.input;
if (input !== undefined) {
try {
console.log(pc.gray(JSON.stringify(input, null, 2)));
} catch {
console.log(pc.gray(String(input)));
}
}
const output = asString(state?.output) || asString(state?.error);
if (output) console.log((isError ? pc.red : pc.gray)(output));
return;
}
@@ -93,20 +96,18 @@ export function printOpenCodeStreamEvent(raw: string, _debug: boolean): void {
const part = asRecord(parsed.part);
const tokens = asRecord(part?.tokens);
const cache = asRecord(tokens?.cache);
const reason = asString(part?.reason, "step_finish");
const input = asNumber(tokens?.input);
const output = asNumber(tokens?.output);
const cached = asNumber(cache?.read);
const cost = asNumber(part?.cost);
console.log(pc.blue(`step finished: reason=${reason}`));
console.log(pc.blue(`tokens: in=${input} out=${output} cached=${cached} cost=$${cost.toFixed(6)}`));
const input = asNumber(tokens?.input, 0);
const output = asNumber(tokens?.output, 0) + asNumber(tokens?.reasoning, 0);
const cached = asNumber(cache?.read, 0);
const cost = asNumber(part?.cost, 0);
const reason = asString(part?.reason, "step");
console.log(pc.blue(`step finished (${reason}) tokens: in=${input} out=${output} cached=${cached} cost=$${cost.toFixed(6)}`));
return;
}
if (type === "error") {
const part = asRecord(parsed.part);
const message = asString(parsed.message) || asString(part?.message) || line;
console.log(pc.red(`error: ${message}`));
const message = errorText(parsed.error ?? parsed.message);
if (message) console.log(pc.red(`error: ${message}`));
return;
}

View File

@@ -1,13 +1,7 @@
export const type = "opencode_local";
export const label = "OpenCode (local)";
export const DEFAULT_OPENCODE_LOCAL_MODEL = "openai/gpt-5.2-codex";
export const models = [
{ id: DEFAULT_OPENCODE_LOCAL_MODEL, label: DEFAULT_OPENCODE_LOCAL_MODEL },
{ id: "openai/gpt-5.2", label: "openai/gpt-5.2" },
{ id: "openai/gpt-5.1-codex-max", label: "openai/gpt-5.1-codex-max" },
{ id: "openai/gpt-5.1-codex-mini", label: "openai/gpt-5.1-codex-mini" },
];
export const models: Array<{ id: string; label: string }> = [];
export const agentConfigurationDoc = `# opencode_local agent configuration
@@ -26,8 +20,8 @@ Don't use when:
Core fields:
- cwd (string, optional): default absolute working directory fallback for the agent process (created if missing when possible)
- instructionsFilePath (string, optional): absolute path to a markdown instructions file prepended to the run prompt
- model (string, optional): OpenCode model id in provider/model format (for example openai/gpt-5.2-codex)
- variant (string, optional): provider-specific reasoning/profile variant passed as --variant
- model (string, required): OpenCode model id in provider/model format (for example anthropic/claude-sonnet-4-5)
- variant (string, optional): provider-specific model variant (for example minimal|low|medium|high|max)
- promptTemplate (string, optional): run prompt template
- command (string, optional): defaults to "opencode"
- extraArgs (string[], optional): additional CLI args
@@ -38,7 +32,9 @@ Operational fields:
- graceSec (number, optional): SIGTERM grace period in seconds
Notes:
- OpenCode supports multiple providers and models. Use \
\`opencode models\` to list available options in provider/model format.
- Paperclip requires an explicit \`model\` value for \`opencode_local\` agents.
- Runs are executed with: opencode run --format json ...
- Prompts are passed as the final positional message argument.
- Sessions are resumed with --session when stored session cwd matches current cwd.
`;

View File

@@ -16,8 +16,8 @@ import {
renderTemplate,
runChildProcess,
} from "@paperclipai/adapter-utils/server-utils";
import { DEFAULT_OPENCODE_LOCAL_MODEL } from "../index.js";
import { parseOpenCodeJsonl, isOpenCodeUnknownSessionError } from "./parse.js";
import { isOpenCodeUnknownSessionError, parseOpenCodeJsonl } from "./parse.js";
import { ensureOpenCodeModelConfiguredAndAvailable } from "./models.js";
const __moduleDir = path.dirname(fileURLToPath(import.meta.url));
const PAPERCLIP_SKILLS_CANDIDATES = [
@@ -34,81 +34,11 @@ function firstNonEmptyLine(text: string): string {
);
}
function getEffectiveEnvValue(envOverrides: Record<string, string>, key: string): string {
if (Object.prototype.hasOwnProperty.call(envOverrides, key)) {
const raw = envOverrides[key];
return typeof raw === "string" ? raw : "";
}
const raw = process.env[key];
return typeof raw === "string" ? raw : "";
}
function hasEffectiveEnvValue(envOverrides: Record<string, string>, key: string): boolean {
return getEffectiveEnvValue(envOverrides, key).trim().length > 0;
}
function resolveOpenCodeBillingType(env: Record<string, string>): "api" | "subscription" {
return hasEffectiveEnvValue(env, "OPENAI_API_KEY") ? "api" : "subscription";
}
function resolveProviderFromModel(model: string): string | null {
function parseModelProvider(model: string | null): string | null {
if (!model) return null;
const trimmed = model.trim();
if (!trimmed) return null;
const slash = trimmed.indexOf("/");
if (slash <= 0) return null;
return trimmed.slice(0, slash).toLowerCase();
}
function isProviderModelNotFoundFailure(stdout: string, stderr: string): boolean {
const haystack = `${stdout}\n${stderr}`;
return /ProviderModelNotFoundError|provider model not found/i.test(haystack);
}
type ProviderModelNotFoundDetails = {
providerId: string | null;
modelId: string | null;
suggestions: string[];
};
function parseProviderModelNotFoundDetails(
stdout: string,
stderr: string,
): ProviderModelNotFoundDetails | null {
if (!isProviderModelNotFoundFailure(stdout, stderr)) return null;
const haystack = `${stdout}\n${stderr}`;
const providerMatch = haystack.match(/providerID:\s*"([^"]+)"/i);
const modelMatch = haystack.match(/modelID:\s*"([^"]+)"/i);
const suggestionsMatch = haystack.match(/suggestions:\s*\[([^\]]*)\]/i);
const suggestions = suggestionsMatch
? Array.from(
suggestionsMatch[1].matchAll(/"([^"]+)"/g),
(match) => match[1].trim(),
).filter((value) => value.length > 0)
: [];
return {
providerId: providerMatch?.[1]?.trim().toLowerCase() || null,
modelId: modelMatch?.[1]?.trim() || null,
suggestions,
};
}
function formatModelNotFoundError(
model: string,
providerFromModel: string | null,
details: ProviderModelNotFoundDetails | null,
): string {
const provider = details?.providerId || providerFromModel || "unknown";
const missingModel = details?.modelId || model;
const suggestions = details?.suggestions ?? [];
const suggestionText =
suggestions.length > 0 ? ` Suggested models: ${suggestions.map((value) => `\`${value}\``).join(", ")}.` : "";
return (
`OpenCode model \`${missingModel}\` is unavailable for provider \`${provider}\`.` +
` Run \`opencode models ${provider}\` and set adapterConfig.model to a supported value.` +
suggestionText
);
if (!trimmed.includes("/")) return null;
return trimmed.slice(0, trimmed.indexOf("/")).trim() || null;
}
function claudeSkillsHome(): string {
@@ -160,8 +90,8 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
"You are agent {{agent.id}} ({{agent.name}}). Continue your Paperclip work.",
);
const command = asString(config.command, "opencode");
const model = asString(config.model, DEFAULT_OPENCODE_LOCAL_MODEL);
const variant = asString(config.variant, asString(config.effort, ""));
const model = asString(config.model, "").trim();
const variant = asString(config.variant, "").trim();
const workspaceContext = parseObject(context.paperclipWorkspace);
const workspaceCwd = asString(workspaceContext.cwd, "");
@@ -209,52 +139,39 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
const linkedIssueIds = Array.isArray(context.issueIds)
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
: [];
if (wakeTaskId) {
env.PAPERCLIP_TASK_ID = wakeTaskId;
}
if (wakeReason) {
env.PAPERCLIP_WAKE_REASON = wakeReason;
}
if (wakeCommentId) {
env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
}
if (approvalId) {
env.PAPERCLIP_APPROVAL_ID = approvalId;
}
if (approvalStatus) {
env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
}
if (linkedIssueIds.length > 0) {
env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
}
if (effectiveWorkspaceCwd) {
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
}
if (workspaceSource) {
env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
}
if (workspaceId) {
env.PAPERCLIP_WORKSPACE_ID = workspaceId;
}
if (workspaceRepoUrl) {
env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
}
if (workspaceRepoRef) {
env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
}
if (workspaceHints.length > 0) {
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
}
for (const [k, v] of Object.entries(envConfig)) {
if (typeof v === "string") env[k] = v;
if (wakeTaskId) env.PAPERCLIP_TASK_ID = wakeTaskId;
if (wakeReason) env.PAPERCLIP_WAKE_REASON = wakeReason;
if (wakeCommentId) env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
if (approvalId) env.PAPERCLIP_APPROVAL_ID = approvalId;
if (approvalStatus) env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
if (linkedIssueIds.length > 0) env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
if (effectiveWorkspaceCwd) env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
if (workspaceSource) env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
if (workspaceId) env.PAPERCLIP_WORKSPACE_ID = workspaceId;
if (workspaceRepoUrl) env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
if (workspaceRepoRef) env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
if (workspaceHints.length > 0) env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
for (const [key, value] of Object.entries(envConfig)) {
if (typeof value === "string") env[key] = value;
}
if (!hasExplicitApiKey && authToken) {
env.PAPERCLIP_API_KEY = authToken;
}
const billingType = resolveOpenCodeBillingType(env);
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
const runtimeEnv = Object.fromEntries(
Object.entries(ensurePathInEnv({ ...process.env, ...env })).filter(
(entry): entry is [string, string] => typeof entry[1] === "string",
),
);
await ensureCommandResolvable(command, cwd, runtimeEnv);
await ensureOpenCodeModelConfiguredAndAvailable({
model,
command,
cwd,
env: runtimeEnv,
});
const timeoutSec = asNumber(config.timeoutSec, 0);
const graceSec = asNumber(config.graceSec, 20);
const extraArgs = (() => {
@@ -278,37 +195,41 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
}
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
const instructionsDir = instructionsFilePath ? `${path.dirname(instructionsFilePath)}/` : "";
const resolvedInstructionsFilePath = instructionsFilePath
? path.resolve(cwd, instructionsFilePath)
: "";
const instructionsDir = resolvedInstructionsFilePath ? `${path.dirname(resolvedInstructionsFilePath)}/` : "";
let instructionsPrefix = "";
if (instructionsFilePath) {
if (resolvedInstructionsFilePath) {
try {
const instructionsContents = await fs.readFile(instructionsFilePath, "utf8");
const instructionsContents = await fs.readFile(resolvedInstructionsFilePath, "utf8");
instructionsPrefix =
`${instructionsContents}\n\n` +
`The above agent instructions were loaded from ${instructionsFilePath}. ` +
`The above agent instructions were loaded from ${resolvedInstructionsFilePath}. ` +
`Resolve any relative file references from ${instructionsDir}.\n\n`;
await onLog(
"stderr",
`[paperclip] Loaded agent instructions file: ${instructionsFilePath}\n`,
`[paperclip] Loaded agent instructions file: ${resolvedInstructionsFilePath}\n`,
);
} catch (err) {
const reason = err instanceof Error ? err.message : String(err);
await onLog(
"stderr",
`[paperclip] Warning: could not read agent instructions file "${instructionsFilePath}": ${reason}\n`,
`[paperclip] Warning: could not read agent instructions file "${resolvedInstructionsFilePath}": ${reason}\n`,
);
}
}
const commandNotes = (() => {
if (!instructionsFilePath) return [] as string[];
if (!resolvedInstructionsFilePath) return [] as string[];
if (instructionsPrefix.length > 0) {
return [
`Loaded agent instructions from ${instructionsFilePath}`,
`Prepended instructions + path directive to prompt (relative references from ${instructionsDir}).`,
`Loaded agent instructions from ${resolvedInstructionsFilePath}`,
`Prepended instructions + path directive to stdin prompt (relative references from ${instructionsDir}).`,
];
}
return [
`Configured instructionsFilePath ${instructionsFilePath}, but file could not be read; continuing without injected instructions.`,
`Configured instructionsFilePath ${resolvedInstructionsFilePath}, but file could not be read; continuing without injected instructions.`,
];
})();
@@ -329,7 +250,6 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
if (model) args.push("--model", model);
if (variant) args.push("--variant", variant);
if (extraArgs.length > 0) args.push(...extraArgs);
args.push(prompt);
return args;
};
@@ -341,10 +261,7 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
command,
cwd,
commandNotes,
commandArgs: args.map((value, idx) => {
if (idx === args.length - 1) return `<prompt ${prompt.length} chars>`;
return value;
}),
commandArgs: [...args, `<stdin prompt ${prompt.length} chars>`],
env: redactEnvForLogs(env),
prompt,
context,
@@ -353,29 +270,23 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
const proc = await runChildProcess(runId, command, args, {
cwd,
env,
env: runtimeEnv,
stdin: prompt,
timeoutSec,
graceSec,
onLog,
});
return {
proc,
rawStderr: proc.stderr,
parsed: parseOpenCodeJsonl(proc.stdout),
};
};
const providerFromModel = resolveProviderFromModel(model);
const toResult = (
attempt: {
proc: {
exitCode: number | null;
signal: string | null;
timedOut: boolean;
stdout: string;
stderr: string;
};
proc: { exitCode: number | null; signal: string | null; timedOut: boolean; stdout: string; stderr: string };
rawStderr: string;
parsed: ReturnType<typeof parseOpenCodeJsonl>;
},
clearSessionOnMissingSession = false,
@@ -390,7 +301,9 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
};
}
const resolvedSessionId = attempt.parsed.sessionId ?? runtimeSessionId ?? runtime.sessionId ?? null;
const resolvedSessionId =
attempt.parsed.sessionId ??
(clearSessionOnMissingSession ? null : runtimeSessionId ?? runtime.sessionId ?? null);
const resolvedSessionParams = resolvedSessionId
? ({
sessionId: resolvedSessionId,
@@ -400,50 +313,54 @@ export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExec
...(workspaceRepoRef ? { repoRef: workspaceRepoRef } : {}),
} as Record<string, unknown>)
: null;
const parsedError = typeof attempt.parsed.errorMessage === "string" ? attempt.parsed.errorMessage.trim() : "";
const stderrLine = firstNonEmptyLine(attempt.proc.stderr);
const modelNotFound = parseProviderModelNotFoundDetails(attempt.proc.stdout, attempt.proc.stderr);
const fallbackErrorMessage = modelNotFound
? formatModelNotFoundError(model, providerFromModel, modelNotFound)
: parsedError ||
stderrLine ||
`OpenCode exited with code ${attempt.proc.exitCode ?? -1}`;
const rawExitCode = attempt.proc.exitCode;
const synthesizedExitCode = parsedError && (rawExitCode ?? 0) === 0 ? 1 : rawExitCode;
const fallbackErrorMessage =
parsedError ||
stderrLine ||
`OpenCode exited with code ${synthesizedExitCode ?? -1}`;
const modelId = model || null;
return {
exitCode: attempt.proc.exitCode,
exitCode: synthesizedExitCode,
signal: attempt.proc.signal,
timedOut: false,
errorMessage:
(attempt.proc.exitCode ?? 0) === 0
? null
: fallbackErrorMessage,
usage: attempt.parsed.usage,
errorMessage: (synthesizedExitCode ?? 0) === 0 ? null : fallbackErrorMessage,
usage: {
inputTokens: attempt.parsed.usage.inputTokens,
outputTokens: attempt.parsed.usage.outputTokens,
cachedInputTokens: attempt.parsed.usage.cachedInputTokens,
},
sessionId: resolvedSessionId,
sessionParams: resolvedSessionParams,
sessionDisplayId: resolvedSessionId,
provider: providerFromModel,
model,
billingType,
costUsd: attempt.parsed.costUsd,
provider: parseModelProvider(modelId),
model: modelId,
billingType: "unknown",
costUsd: attempt.parsed.usage.costUsd,
resultJson: {
stdout: attempt.proc.stdout,
stderr: attempt.proc.stderr,
},
summary: attempt.parsed.summary,
clearSession: Boolean(clearSessionOnMissingSession && !resolvedSessionId),
clearSession: Boolean(clearSessionOnMissingSession && !attempt.parsed.sessionId),
};
};
const initial = await runAttempt(sessionId);
const initialFailed =
!initial.proc.timedOut && ((initial.proc.exitCode ?? 0) !== 0 || Boolean(initial.parsed.errorMessage));
if (
sessionId &&
!initial.proc.timedOut &&
(initial.proc.exitCode ?? 0) !== 0 &&
isOpenCodeUnknownSessionError(initial.proc.stdout, initial.proc.stderr)
initialFailed &&
isOpenCodeUnknownSessionError(initial.proc.stdout, initial.rawStderr)
) {
await onLog(
"stderr",
`[paperclip] OpenCode resume session "${sessionId}" is unavailable; retrying with a fresh session.\n`,
`[paperclip] OpenCode session "${sessionId}" is unavailable; retrying with a fresh session.\n`,
);
const retry = await runAttempt(null);
return toResult(retry, true);

View File

@@ -1,6 +1,3 @@
export { execute } from "./execute.js";
export { testEnvironment } from "./test.js";
export { parseOpenCodeJsonl, isOpenCodeUnknownSessionError } from "./parse.js";
import type { AdapterSessionCodec } from "@paperclipai/adapter-utils";
function readNonEmptyString(value: unknown): string | null {
@@ -62,3 +59,13 @@ export const sessionCodec: AdapterSessionCodec = {
);
},
};
export { execute } from "./execute.js";
export { testEnvironment } from "./test.js";
export {
listOpenCodeModels,
discoverOpenCodeModels,
ensureOpenCodeModelConfiguredAndAvailable,
resetOpenCodeModelsCacheForTests,
} from "./models.js";
export { parseOpenCodeJsonl, isOpenCodeUnknownSessionError } from "./parse.js";

View File

@@ -0,0 +1,33 @@
import { afterEach, describe, expect, it } from "vitest";
import {
ensureOpenCodeModelConfiguredAndAvailable,
listOpenCodeModels,
resetOpenCodeModelsCacheForTests,
} from "./models.js";
describe("openCode models", () => {
afterEach(() => {
delete process.env.PAPERCLIP_OPENCODE_COMMAND;
resetOpenCodeModelsCacheForTests();
});
it("returns an empty list when discovery command is unavailable", async () => {
process.env.PAPERCLIP_OPENCODE_COMMAND = "__paperclip_missing_opencode_command__";
await expect(listOpenCodeModels()).resolves.toEqual([]);
});
it("rejects when model is missing", async () => {
await expect(
ensureOpenCodeModelConfiguredAndAvailable({ model: "" }),
).rejects.toThrow("OpenCode requires `adapterConfig.model`");
});
it("rejects when discovery cannot run for configured model", async () => {
process.env.PAPERCLIP_OPENCODE_COMMAND = "__paperclip_missing_opencode_command__";
await expect(
ensureOpenCodeModelConfiguredAndAvailable({
model: "openai/gpt-5",
}),
).rejects.toThrow("Failed to start command");
});
});

View File

@@ -0,0 +1,195 @@
import { createHash } from "node:crypto";
import type { AdapterModel } from "@paperclipai/adapter-utils";
import {
asString,
ensurePathInEnv,
runChildProcess,
} from "@paperclipai/adapter-utils/server-utils";
const MODELS_CACHE_TTL_MS = 60_000;
function resolveOpenCodeCommand(input: unknown): string {
const envOverride =
typeof process.env.PAPERCLIP_OPENCODE_COMMAND === "string" &&
process.env.PAPERCLIP_OPENCODE_COMMAND.trim().length > 0
? process.env.PAPERCLIP_OPENCODE_COMMAND.trim()
: "opencode";
return asString(input, envOverride);
}
const discoveryCache = new Map<string, { expiresAt: number; models: AdapterModel[] }>();
const VOLATILE_ENV_KEY_PREFIXES = ["PAPERCLIP_", "npm_", "NPM_"] as const;
const VOLATILE_ENV_KEY_EXACT = new Set(["PWD", "OLDPWD", "SHLVL", "_", "TERM_SESSION_ID"]);
function dedupeModels(models: AdapterModel[]): AdapterModel[] {
const seen = new Set<string>();
const deduped: AdapterModel[] = [];
for (const model of models) {
const id = model.id.trim();
if (!id || seen.has(id)) continue;
seen.add(id);
deduped.push({ id, label: model.label.trim() || id });
}
return deduped;
}
function sortModels(models: AdapterModel[]): AdapterModel[] {
return [...models].sort((a, b) =>
a.id.localeCompare(b.id, "en", { numeric: true, sensitivity: "base" }),
);
}
function firstNonEmptyLine(text: string): string {
return (
text
.split(/\r?\n/)
.map((line) => line.trim())
.find(Boolean) ?? ""
);
}
function parseModelsOutput(stdout: string): AdapterModel[] {
const parsed: AdapterModel[] = [];
for (const raw of stdout.split(/\r?\n/)) {
const line = raw.trim();
if (!line) continue;
const firstToken = line.split(/\s+/)[0]?.trim() ?? "";
if (!firstToken.includes("/")) continue;
const provider = firstToken.slice(0, firstToken.indexOf("/")).trim();
const model = firstToken.slice(firstToken.indexOf("/") + 1).trim();
if (!provider || !model) continue;
parsed.push({ id: `${provider}/${model}`, label: `${provider}/${model}` });
}
return dedupeModels(parsed);
}
function normalizeEnv(input: unknown): Record<string, string> {
const envInput = typeof input === "object" && input !== null && !Array.isArray(input)
? (input as Record<string, unknown>)
: {};
const env: Record<string, string> = {};
for (const [key, value] of Object.entries(envInput)) {
if (typeof value === "string") env[key] = value;
}
return env;
}
function isVolatileEnvKey(key: string): boolean {
if (VOLATILE_ENV_KEY_EXACT.has(key)) return true;
return VOLATILE_ENV_KEY_PREFIXES.some((prefix) => key.startsWith(prefix));
}
function hashValue(value: string): string {
return createHash("sha256").update(value).digest("hex");
}
function discoveryCacheKey(command: string, cwd: string, env: Record<string, string>) {
const envKey = Object.entries(env)
.filter(([key]) => !isVolatileEnvKey(key))
.sort(([a], [b]) => a.localeCompare(b))
.map(([key, value]) => `${key}=${hashValue(value)}`)
.join("\n");
return `${command}\n${cwd}\n${envKey}`;
}
function pruneExpiredDiscoveryCache(now: number) {
for (const [key, value] of discoveryCache.entries()) {
if (value.expiresAt <= now) discoveryCache.delete(key);
}
}
export async function discoverOpenCodeModels(input: {
command?: unknown;
cwd?: unknown;
env?: unknown;
} = {}): Promise<AdapterModel[]> {
const command = resolveOpenCodeCommand(input.command);
const cwd = asString(input.cwd, process.cwd());
const env = normalizeEnv(input.env);
const runtimeEnv = normalizeEnv(ensurePathInEnv({ ...process.env, ...env }));
const result = await runChildProcess(
`opencode-models-${Date.now()}-${Math.random().toString(16).slice(2)}`,
command,
["models"],
{
cwd,
env: runtimeEnv,
timeoutSec: 20,
graceSec: 3,
onLog: async () => {},
},
);
if (result.timedOut) {
throw new Error("`opencode models` timed out.");
}
if ((result.exitCode ?? 1) !== 0) {
const detail = firstNonEmptyLine(result.stderr) || firstNonEmptyLine(result.stdout);
throw new Error(detail ? `\`opencode models\` failed: ${detail}` : "`opencode models` failed.");
}
return sortModels(parseModelsOutput(result.stdout));
}
export async function discoverOpenCodeModelsCached(input: {
command?: unknown;
cwd?: unknown;
env?: unknown;
} = {}): Promise<AdapterModel[]> {
const command = resolveOpenCodeCommand(input.command);
const cwd = asString(input.cwd, process.cwd());
const env = normalizeEnv(input.env);
const key = discoveryCacheKey(command, cwd, env);
const now = Date.now();
pruneExpiredDiscoveryCache(now);
const cached = discoveryCache.get(key);
if (cached && cached.expiresAt > now) return cached.models;
const models = await discoverOpenCodeModels({ command, cwd, env });
discoveryCache.set(key, { expiresAt: now + MODELS_CACHE_TTL_MS, models });
return models;
}
export async function ensureOpenCodeModelConfiguredAndAvailable(input: {
model?: unknown;
command?: unknown;
cwd?: unknown;
env?: unknown;
}): Promise<AdapterModel[]> {
const model = asString(input.model, "").trim();
if (!model) {
throw new Error("OpenCode requires `adapterConfig.model` in provider/model format.");
}
const models = await discoverOpenCodeModelsCached({
command: input.command,
cwd: input.cwd,
env: input.env,
});
if (models.length === 0) {
throw new Error("OpenCode returned no models. Run `opencode models` and verify provider auth.");
}
if (!models.some((entry) => entry.id === model)) {
const sample = models.slice(0, 12).map((entry) => entry.id).join(", ");
throw new Error(
`Configured OpenCode model is unavailable: ${model}. Available models: ${sample}${models.length > 12 ? ", ..." : ""}`,
);
}
return models;
}
export async function listOpenCodeModels(): Promise<AdapterModel[]> {
try {
return await discoverOpenCodeModelsCached();
} catch {
return [];
}
}
export function resetOpenCodeModelsCacheForTests() {
discoveryCache.clear();
}

View File

@@ -0,0 +1,50 @@
import { describe, expect, it } from "vitest";
import { parseOpenCodeJsonl, isOpenCodeUnknownSessionError } from "./parse.js";
describe("parseOpenCodeJsonl", () => {
it("parses assistant text, usage, cost, and errors", () => {
const stdout = [
JSON.stringify({
type: "text",
sessionID: "session_123",
part: { text: "Hello from OpenCode" },
}),
JSON.stringify({
type: "step_finish",
sessionID: "session_123",
part: {
reason: "done",
cost: 0.0025,
tokens: {
input: 120,
output: 40,
reasoning: 10,
cache: { read: 20, write: 0 },
},
},
}),
JSON.stringify({
type: "error",
sessionID: "session_123",
error: { message: "model unavailable" },
}),
].join("\n");
const parsed = parseOpenCodeJsonl(stdout);
expect(parsed.sessionId).toBe("session_123");
expect(parsed.summary).toBe("Hello from OpenCode");
expect(parsed.usage).toEqual({
inputTokens: 120,
cachedInputTokens: 20,
outputTokens: 50,
costUsd: 0.0025,
});
expect(parsed.errorMessage).toContain("model unavailable");
});
it("detects unknown session errors", () => {
expect(isOpenCodeUnknownSessionError("Session not found: s_123", "")).toBe(true);
expect(isOpenCodeUnknownSessionError("", "unknown session id")).toBe(true);
expect(isOpenCodeUnknownSessionError("all good", "")).toBe(false);
});
});

View File

@@ -1,10 +1,17 @@
import { asString, asNumber, parseObject, parseJson } from "@paperclipai/adapter-utils/server-utils";
import { asNumber, asString, parseJson, parseObject } from "@paperclipai/adapter-utils/server-utils";
function asErrorText(value: unknown): string {
function errorText(value: unknown): string {
if (typeof value === "string") return value;
const rec = parseObject(value);
const message = asString(rec.message, "") || asString(rec.error, "") || asString(rec.code, "");
const message = asString(rec.message, "").trim();
if (message) return message;
const data = parseObject(rec.data);
const nestedMessage = asString(data.message, "").trim();
if (nestedMessage) return nestedMessage;
const name = asString(rec.name, "").trim();
if (name) return name;
const code = asString(rec.code, "").trim();
if (code) return code;
try {
return JSON.stringify(rec);
} catch {
@@ -15,12 +22,12 @@ function asErrorText(value: unknown): string {
export function parseOpenCodeJsonl(stdout: string) {
let sessionId: string | null = null;
const messages: string[] = [];
let errorMessage: string | null = null;
let totalCostUsd = 0;
const errors: string[] = [];
const usage = {
inputTokens: 0,
cachedInputTokens: 0,
outputTokens: 0,
costUsd: 0,
};
for (const rawLine of stdout.split(/\r?\n/)) {
@@ -30,8 +37,8 @@ export function parseOpenCodeJsonl(stdout: string) {
const event = parseJson(line);
if (!event) continue;
const foundSession = asString(event.sessionID, "").trim();
if (foundSession) sessionId = foundSession;
const currentSessionId = asString(event.sessionID, "").trim();
if (currentSessionId) sessionId = currentSessionId;
const type = asString(event.type, "");
@@ -48,15 +55,25 @@ export function parseOpenCodeJsonl(stdout: string) {
const cache = parseObject(tokens.cache);
usage.inputTokens += asNumber(tokens.input, 0);
usage.cachedInputTokens += asNumber(cache.read, 0);
usage.outputTokens += asNumber(tokens.output, 0);
totalCostUsd += asNumber(part.cost, 0);
usage.outputTokens += asNumber(tokens.output, 0) + asNumber(tokens.reasoning, 0);
usage.costUsd += asNumber(part.cost, 0);
continue;
}
if (type === "tool_use") {
const part = parseObject(event.part);
const state = parseObject(part.state);
if (asString(state.status, "") === "error") {
const text = asString(state.error, "").trim();
if (text) errors.push(text);
}
continue;
}
if (type === "error") {
const part = parseObject(event.part);
const msg = asErrorText(event.message ?? part.message ?? event.error ?? part.error).trim();
if (msg) errorMessage = msg;
const text = errorText(event.error ?? event.message).trim();
if (text) errors.push(text);
continue;
}
}
@@ -64,8 +81,7 @@ export function parseOpenCodeJsonl(stdout: string) {
sessionId,
summary: messages.join("\n\n").trim(),
usage,
costUsd: totalCostUsd > 0 ? totalCostUsd : null,
errorMessage,
errorMessage: errors.length > 0 ? errors.join("\n") : null,
};
}
@@ -76,7 +92,7 @@ export function isOpenCodeUnknownSessionError(stdout: string, stderr: string): b
.filter(Boolean)
.join("\n");
return /unknown\s+session|session\s+.*\s+not\s+found|resource\s+not\s+found:.*[\\/]session[\\/].*\.json|notfounderror/i.test(
return /unknown\s+session|session\s+.*\s+not\s+found|resource\s+not\s+found:.*[\\/]session[\\/].*\.json|notfounderror|no session/i.test(
haystack,
);
}

View File

@@ -12,8 +12,7 @@ import {
ensurePathInEnv,
runChildProcess,
} from "@paperclipai/adapter-utils/server-utils";
import path from "node:path";
import { DEFAULT_OPENCODE_LOCAL_MODEL } from "../index.js";
import { discoverOpenCodeModels, ensureOpenCodeModelConfiguredAndAvailable } from "./models.js";
import { parseOpenCodeJsonl } from "./parse.js";
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
@@ -22,19 +21,6 @@ function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentT
return "pass";
}
function isNonEmpty(value: unknown): value is string {
return typeof value === "string" && value.trim().length > 0;
}
function getEffectiveEnvValue(envOverrides: Record<string, string>, key: string): string {
if (Object.prototype.hasOwnProperty.call(envOverrides, key)) {
const raw = envOverrides[key];
return typeof raw === "string" ? raw : "";
}
const raw = process.env[key];
return typeof raw === "string" ? raw : "";
}
function firstNonEmptyLine(text: string): string {
return (
text
@@ -44,22 +30,25 @@ function firstNonEmptyLine(text: string): string {
);
}
function commandLooksLike(command: string, expected: string): boolean {
const base = path.basename(command).toLowerCase();
return base === expected || base === `${expected}.cmd` || base === `${expected}.exe`;
}
function summarizeProbeDetail(stdout: string, stderr: string, parsedError: string | null): string | null {
const raw = parsedError?.trim() || firstNonEmptyLine(stderr) || firstNonEmptyLine(stdout);
if (!raw) return null;
const clean = raw.replace(/\s+/g, " ").trim();
const max = 240;
return clean.length > max ? `${clean.slice(0, max - 1)}` : clean;
return clean.length > max ? `${clean.slice(0, max - 1)}...` : clean;
}
function normalizeEnv(input: unknown): Record<string, string> {
if (typeof input !== "object" || input === null || Array.isArray(input)) return {};
const env: Record<string, string> = {};
for (const [key, value] of Object.entries(input as Record<string, unknown>)) {
if (typeof value === "string") env[key] = value;
}
return env;
}
const OPENCODE_AUTH_REQUIRED_RE =
/(?:not\s+authenticated|authentication\s+required|unauthorized|forbidden|api(?:[_\s-]?key)?(?:\s+is)?\s+required|missing\s+api(?:[_\s-]?key)?|openai[_\s-]?api[_\s-]?key|provider\s+credentials|login\s+required)/i;
const OPENCODE_MODEL_NOT_FOUND_RE = /ProviderModelNotFoundError|provider\s+model\s+not\s+found/i;
/(?:auth(?:entication)?\s+required|api\s*key|invalid\s*api\s*key|not\s+logged\s+in|opencode\s+auth\s+login|free\s+usage\s+exceeded)/i;
export async function testEnvironment(
ctx: AdapterEnvironmentTestContext,
@@ -70,7 +59,7 @@ export async function testEnvironment(
const cwd = asString(config.cwd, process.cwd());
try {
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
await ensureAbsoluteDirectory(cwd, { createIfMissing: false });
checks.push({
code: "opencode_cwd_valid",
level: "info",
@@ -90,100 +79,138 @@ export async function testEnvironment(
for (const [key, value] of Object.entries(envConfig)) {
if (typeof value === "string") env[key] = value;
}
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
try {
await ensureCommandResolvable(command, cwd, runtimeEnv);
const runtimeEnv = normalizeEnv(ensurePathInEnv({ ...process.env, ...env }));
const cwdInvalid = checks.some((check) => check.code === "opencode_cwd_invalid");
if (cwdInvalid) {
checks.push({
code: "opencode_command_resolvable",
level: "info",
message: `Command is executable: ${command}`,
});
} catch (err) {
checks.push({
code: "opencode_command_unresolvable",
level: "error",
message: err instanceof Error ? err.message : "Command is not executable",
code: "opencode_command_skipped",
level: "warn",
message: "Skipped command check because working directory validation failed.",
detail: command,
});
}
const configDefinesOpenAiKey = Object.prototype.hasOwnProperty.call(env, "OPENAI_API_KEY");
const effectiveOpenAiKey = getEffectiveEnvValue(env, "OPENAI_API_KEY");
if (isNonEmpty(effectiveOpenAiKey)) {
const source = configDefinesOpenAiKey ? "adapter config env" : "server environment";
checks.push({
code: "opencode_openai_api_key_present",
level: "info",
message: "OPENAI_API_KEY is set for OpenCode authentication.",
detail: `Detected in ${source}.`,
});
} else {
checks.push({
code: "opencode_openai_api_key_missing",
level: "warn",
message: "OPENAI_API_KEY is not set. OpenCode runs may fail until authentication is configured.",
hint: configDefinesOpenAiKey
? "adapterConfig.env defines OPENAI_API_KEY but it is empty. Set a non-empty value or remove the override."
: "Set OPENAI_API_KEY in adapter env/shell, or authenticate with `opencode auth login`.",
});
try {
await ensureCommandResolvable(command, cwd, runtimeEnv);
checks.push({
code: "opencode_command_resolvable",
level: "info",
message: `Command is executable: ${command}`,
});
} catch (err) {
checks.push({
code: "opencode_command_unresolvable",
level: "error",
message: err instanceof Error ? err.message : "Command is not executable",
detail: command,
});
}
}
const canRunProbe =
checks.every((check) => check.code !== "opencode_cwd_invalid" && check.code !== "opencode_command_unresolvable");
let modelValidationPassed = false;
if (canRunProbe) {
if (!commandLooksLike(command, "opencode")) {
try {
const discovered = await discoverOpenCodeModels({ command, cwd, env: runtimeEnv });
if (discovered.length > 0) {
checks.push({
code: "opencode_models_discovered",
level: "info",
message: `Discovered ${discovered.length} model(s) from OpenCode providers.`,
});
} else {
checks.push({
code: "opencode_models_empty",
level: "error",
message: "OpenCode returned no models.",
hint: "Run `opencode models` and verify provider authentication.",
});
}
} catch (err) {
checks.push({
code: "opencode_hello_probe_skipped_custom_command",
level: "info",
message: "Skipped hello probe because command is not `opencode`.",
detail: command,
hint: "Use the `opencode` CLI command to run the automatic installation and auth probe.",
code: "opencode_models_discovery_failed",
level: "error",
message: err instanceof Error ? err.message : "OpenCode model discovery failed.",
hint: "Run `opencode models` manually to verify provider auth and config.",
});
} else {
const model = asString(config.model, DEFAULT_OPENCODE_LOCAL_MODEL).trim();
const variant = asString(config.variant, asString(config.effort, "")).trim();
const extraArgs = (() => {
const fromExtraArgs = asStringArray(config.extraArgs);
if (fromExtraArgs.length > 0) return fromExtraArgs;
return asStringArray(config.args);
})();
}
}
const args = ["run", "--format", "json"];
if (model) args.push("--model", model);
if (variant) args.push("--variant", variant);
if (extraArgs.length > 0) args.push(...extraArgs);
args.push("Respond with hello.");
const configuredModel = asString(config.model, "").trim();
if (!configuredModel) {
checks.push({
code: "opencode_model_required",
level: "error",
message: "OpenCode requires a configured model in provider/model format.",
hint: "Set adapterConfig.model using an ID from `opencode models`.",
});
} else if (canRunProbe) {
try {
await ensureOpenCodeModelConfiguredAndAvailable({
model: configuredModel,
command,
cwd,
env: runtimeEnv,
});
checks.push({
code: "opencode_model_configured",
level: "info",
message: `Configured model: ${configuredModel}`,
});
modelValidationPassed = true;
} catch (err) {
checks.push({
code: "opencode_model_invalid",
level: "error",
message: err instanceof Error ? err.message : "Configured model is unavailable.",
hint: "Run `opencode models` and choose a currently available provider/model ID.",
});
}
}
if (canRunProbe && modelValidationPassed) {
const extraArgs = (() => {
const fromExtraArgs = asStringArray(config.extraArgs);
if (fromExtraArgs.length > 0) return fromExtraArgs;
return asStringArray(config.args);
})();
const variant = asString(config.variant, "").trim();
const probeModel = configuredModel;
const args = ["run", "--format", "json"];
args.push("--model", probeModel);
if (variant) args.push("--variant", variant);
if (extraArgs.length > 0) args.push(...extraArgs);
try {
const probe = await runChildProcess(
`opencode-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
command,
args,
{
cwd,
env,
timeoutSec: 45,
env: runtimeEnv,
timeoutSec: 60,
graceSec: 5,
stdin: "Respond with hello.",
onLog: async () => {},
},
);
const parsed = parseOpenCodeJsonl(probe.stdout);
const detail = summarizeProbeDetail(probe.stdout, probe.stderr, parsed.errorMessage);
const authEvidence = `${parsed.errorMessage ?? ""}\n${probe.stdout}\n${probe.stderr}`.trim();
const modelNotFound = OPENCODE_MODEL_NOT_FOUND_RE.test(authEvidence);
const modelProvider = (() => {
const slash = model.indexOf("/");
if (slash <= 0) return "openai";
return model.slice(0, slash).toLowerCase();
})();
if (probe.timedOut) {
checks.push({
code: "opencode_hello_probe_timed_out",
level: "warn",
message: "OpenCode hello probe timed out.",
hint: "Retry the probe. If this persists, verify `opencode run --format json \"Respond with hello\"` manually.",
hint: "Retry the probe. If this persists, run OpenCode manually in this working directory.",
});
} else if ((probe.exitCode ?? 1) === 0) {
} else if ((probe.exitCode ?? 1) === 0 && !parsed.errorMessage) {
const summary = parsed.summary.trim();
const hasHello = /\bhello\b/i.test(summary);
checks.push({
@@ -196,24 +223,16 @@ export async function testEnvironment(
...(hasHello
? {}
: {
hint: "Try `opencode run --format json \"Respond with hello\"` manually to inspect full output.",
hint: "Run `opencode run --format json` manually and prompt `Respond with hello` to inspect output.",
}),
});
} else if (modelNotFound) {
checks.push({
code: "opencode_hello_probe_model_unavailable",
level: "warn",
message: `OpenCode could not run model \`${model}\`.`,
...(detail ? { detail } : {}),
hint: `Run \`opencode models ${modelProvider}\` and set adapterConfig.model to one of the available models.`,
});
} else if (OPENCODE_AUTH_REQUIRED_RE.test(authEvidence)) {
checks.push({
code: "opencode_hello_probe_auth_required",
level: "warn",
message: "OpenCode CLI is installed, but authentication is not ready.",
message: "OpenCode is installed, but provider authentication is not ready.",
...(detail ? { detail } : {}),
hint: "Configure OPENAI_API_KEY in adapter env/shell, then retry the probe.",
hint: "Run `opencode auth login` or set provider credentials, then retry the probe.",
});
} else {
checks.push({
@@ -221,9 +240,17 @@ export async function testEnvironment(
level: "error",
message: "OpenCode hello probe failed.",
...(detail ? { detail } : {}),
hint: "Run `opencode run --format json \"Respond with hello\"` manually in this working directory to debug.",
hint: "Run `opencode run --format json` manually in this working directory to debug.",
});
}
} catch (err) {
checks.push({
code: "opencode_hello_probe_failed",
level: "error",
message: "OpenCode hello probe failed.",
detail: err instanceof Error ? err.message : String(err),
hint: "Run `opencode run --format json` manually in this working directory to debug.",
});
}
}

View File

@@ -1,5 +1,4 @@
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
import { DEFAULT_OPENCODE_LOCAL_MODEL } from "../index.js";
function parseCommaArgs(value: string): string[] {
return value
@@ -56,10 +55,12 @@ export function buildOpenCodeLocalConfig(v: CreateConfigValues): Record<string,
if (v.cwd) ac.cwd = v.cwd;
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
ac.model = v.model || DEFAULT_OPENCODE_LOCAL_MODEL;
if (v.model) ac.model = v.model;
if (v.thinkingEffort) ac.variant = v.thinkingEffort;
// OpenCode sessions can run until the CLI exits naturally; keep timeout disabled (0)
// and rely on graceSec for termination handling when a timeout is configured elsewhere.
ac.timeoutSec = 0;
ac.graceSec = 15;
ac.graceSec = 20;
const env = parseEnvBindings(v.envBindings);
const legacy = parseEnvVars(v.envVars);
for (const [key, value] of Object.entries(legacy)) {

View File

@@ -21,26 +21,57 @@ function asNumber(value: unknown, fallback = 0): number {
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
}
function stringifyUnknown(value: unknown): string {
function errorText(value: unknown): string {
if (typeof value === "string") return value;
if (value === null || value === undefined) return "";
const rec = asRecord(value);
if (!rec) return "";
const data = asRecord(rec.data);
const msg =
asString(rec.message) ||
asString(data?.message) ||
asString(rec.name) ||
"";
if (msg) return msg;
try {
return JSON.stringify(value, null, 2);
return JSON.stringify(rec);
} catch {
return String(value);
return "";
}
}
function isJsonLike(text: string): boolean {
const trimmed = text.trim();
if (!trimmed) return false;
if (!(trimmed.startsWith("{") || trimmed.startsWith("["))) return false;
try {
JSON.parse(trimmed);
return true;
} catch {
return false;
}
function parseToolUse(parsed: Record<string, unknown>, ts: string): TranscriptEntry[] {
const part = asRecord(parsed.part);
if (!part) return [{ kind: "system", ts, text: "tool event" }];
const toolName = asString(part.tool, "tool");
const state = asRecord(part.state);
const input = state?.input ?? {};
const callEntry: TranscriptEntry = {
kind: "tool_call",
ts,
name: toolName,
input,
};
const status = asString(state?.status);
if (status !== "completed" && status !== "error") return [callEntry];
const output =
asString(state?.output) ||
asString(state?.error) ||
asString(part.title) ||
`${toolName} ${status}`;
return [
callEntry,
{
kind: "tool_result",
ts,
toolUseId: asString(part.id, toolName),
content: output,
isError: status === "error",
},
];
}
export function parseOpenCodeStdoutLine(line: string, ts: string): TranscriptEntry[] {
@@ -51,6 +82,24 @@ export function parseOpenCodeStdoutLine(line: string, ts: string): TranscriptEnt
const type = asString(parsed.type);
if (type === "text") {
const part = asRecord(parsed.part);
const text = asString(part?.text).trim();
if (!text) return [];
return [{ kind: "assistant", ts, text }];
}
if (type === "reasoning") {
const part = asRecord(parsed.part);
const text = asString(part?.text).trim();
if (!text) return [];
return [{ kind: "thinking", ts, text }];
}
if (type === "tool_use") {
return parseToolUse(parsed, ts);
}
if (type === "step_start") {
const sessionId = asString(parsed.sessionID);
return [
@@ -62,93 +111,31 @@ export function parseOpenCodeStdoutLine(line: string, ts: string): TranscriptEnt
];
}
if (type === "text") {
const part = asRecord(parsed.part);
const text = asString(part?.text).trim();
if (!text) return [];
return [{ kind: "assistant", ts, text }];
}
if (type === "tool_use") {
const part = asRecord(parsed.part);
const toolUseId = asString(part?.callID, asString(part?.id, "tool_use"));
const toolName = asString(part?.tool, "tool");
const state = asRecord(part?.state);
const input = state?.input ?? {};
const output = asString(state?.output).trim();
const status = asString(state?.status).trim();
const exitCode = asNumber(asRecord(state?.metadata)?.exit, NaN);
const isError =
status === "failed" ||
status === "error" ||
status === "cancelled" ||
(Number.isFinite(exitCode) && exitCode !== 0);
const entries: TranscriptEntry[] = [
{
kind: "tool_call",
ts,
name: toolName,
input,
},
];
if (status || output) {
const lines: string[] = [];
if (status) lines.push(`status: ${status}`);
if (Number.isFinite(exitCode)) lines.push(`exit: ${exitCode}`);
if (output) {
if (lines.length > 0) lines.push("");
if (isJsonLike(output)) {
try {
lines.push(JSON.stringify(JSON.parse(output), null, 2));
} catch {
lines.push(output);
}
} else {
lines.push(output);
}
}
entries.push({
kind: "tool_result",
ts,
toolUseId,
content: lines.join("\n").trim() || "tool completed",
isError,
});
}
return entries;
}
if (type === "step_finish") {
const part = asRecord(parsed.part);
const tokens = asRecord(part?.tokens);
const cache = asRecord(tokens?.cache);
const reason = asString(part?.reason);
const reason = asString(part?.reason, "step");
const output = asNumber(tokens?.output, 0) + asNumber(tokens?.reasoning, 0);
return [
{
kind: "result",
ts,
text: reason,
inputTokens: asNumber(tokens?.input),
outputTokens: asNumber(tokens?.output),
cachedTokens: asNumber(cache?.read),
costUsd: asNumber(part?.cost),
subtype: reason || "step_finish",
isError: reason === "error" || reason === "failed",
inputTokens: asNumber(tokens?.input, 0),
outputTokens: output,
cachedTokens: asNumber(cache?.read, 0),
costUsd: asNumber(part?.cost, 0),
subtype: reason,
isError: false,
errors: [],
},
];
}
if (type === "error") {
const message =
asString(parsed.message) ||
asString(asRecord(parsed.part)?.message) ||
stringifyUnknown(parsed.error ?? asRecord(parsed.part)?.error) ||
line;
return [{ kind: "stderr", ts, text: message }];
const text = errorText(parsed.error ?? parsed.message);
return [{ kind: "stderr", ts, text: text || line }];
}
return [{ kind: "stdout", ts, text: line }];

View File

@@ -0,0 +1,7 @@
import { defineConfig } from "vitest/config";
export default defineConfig({
test: {
environment: "node",
},
});

View File

@@ -38,6 +38,7 @@
"postgres": "^3.4.5"
},
"devDependencies": {
"@types/node": "^24.6.0",
"drizzle-kit": "^0.31.9",
"tsx": "^4.19.2",
"typescript": "^5.7.3",

View File

@@ -0,0 +1,2 @@
CREATE INDEX "issue_comments_company_issue_created_at_idx" ON "issue_comments" USING btree ("company_id","issue_id","created_at");--> statement-breakpoint
CREATE INDEX "issue_comments_company_author_issue_created_at_idx" ON "issue_comments" USING btree ("company_id","author_user_id","issue_id","created_at");

View File

@@ -0,0 +1,15 @@
CREATE TABLE "issue_read_states" (
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
"company_id" uuid NOT NULL,
"issue_id" uuid NOT NULL,
"user_id" text NOT NULL,
"last_read_at" timestamp with time zone DEFAULT now() NOT NULL,
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
);
--> statement-breakpoint
ALTER TABLE "issue_read_states" ADD CONSTRAINT "issue_read_states_company_id_companies_id_fk" FOREIGN KEY ("company_id") REFERENCES "public"."companies"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "issue_read_states" ADD CONSTRAINT "issue_read_states_issue_id_issues_id_fk" FOREIGN KEY ("issue_id") REFERENCES "public"."issues"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
CREATE INDEX "issue_read_states_company_issue_idx" ON "issue_read_states" USING btree ("company_id","issue_id");--> statement-breakpoint
CREATE INDEX "issue_read_states_company_user_idx" ON "issue_read_states" USING btree ("company_id","user_id");--> statement-breakpoint
CREATE UNIQUE INDEX "issue_read_states_company_issue_user_idx" ON "issue_read_states" USING btree ("company_id","issue_id","user_id");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -169,6 +169,20 @@
"when": 1772139727599,
"tag": "0023_fair_lethal_legion",
"breakpoints": true
},
{
"idx": 24,
"version": "7",
"when": 1772806603601,
"tag": "0024_far_beast",
"breakpoints": true
},
{
"idx": 25,
"version": "7",
"when": 1772807461603,
"tag": "0025_nasty_salo",
"breakpoints": true
}
]
}

View File

@@ -20,6 +20,7 @@ export { labels } from "./labels.js";
export { issueLabels } from "./issue_labels.js";
export { issueApprovals } from "./issue_approvals.js";
export { issueComments } from "./issue_comments.js";
export { issueReadStates } from "./issue_read_states.js";
export { assets } from "./assets.js";
export { issueAttachments } from "./issue_attachments.js";
export { heartbeatRuns } from "./heartbeat_runs.js";

View File

@@ -18,5 +18,16 @@ export const issueComments = pgTable(
(table) => ({
issueIdx: index("issue_comments_issue_idx").on(table.issueId),
companyIdx: index("issue_comments_company_idx").on(table.companyId),
companyIssueCreatedAtIdx: index("issue_comments_company_issue_created_at_idx").on(
table.companyId,
table.issueId,
table.createdAt,
),
companyAuthorIssueCreatedAtIdx: index("issue_comments_company_author_issue_created_at_idx").on(
table.companyId,
table.authorUserId,
table.issueId,
table.createdAt,
),
}),
);

View File

@@ -0,0 +1,25 @@
import { pgTable, uuid, text, timestamp, index, uniqueIndex } from "drizzle-orm/pg-core";
import { companies } from "./companies.js";
import { issues } from "./issues.js";
export const issueReadStates = pgTable(
"issue_read_states",
{
id: uuid("id").primaryKey().defaultRandom(),
companyId: uuid("company_id").notNull().references(() => companies.id),
issueId: uuid("issue_id").notNull().references(() => issues.id),
userId: text("user_id").notNull(),
lastReadAt: timestamp("last_read_at", { withTimezone: true }).notNull().defaultNow(),
createdAt: timestamp("created_at", { withTimezone: true }).notNull().defaultNow(),
updatedAt: timestamp("updated_at", { withTimezone: true }).notNull().defaultNow(),
},
(table) => ({
companyIssueIdx: index("issue_read_states_company_issue_idx").on(table.companyId, table.issueId),
companyUserIdx: index("issue_read_states_company_user_idx").on(table.companyId, table.userId),
companyIssueUserUnique: uniqueIndex("issue_read_states_company_issue_user_idx").on(
table.companyId,
table.issueId,
table.userId,
),
}),
);

View File

@@ -21,7 +21,15 @@ export const AGENT_STATUSES = [
] as const;
export type AgentStatus = (typeof AGENT_STATUSES)[number];
export const AGENT_ADAPTER_TYPES = ["process", "http", "claude_local", "codex_local", "opencode_local", "cursor", "openclaw"] as const;
export const AGENT_ADAPTER_TYPES = [
"process",
"http",
"claude_local",
"codex_local",
"opencode_local",
"cursor",
"openclaw",
] as const;
export type AgentAdapterType = (typeof AGENT_ADAPTER_TYPES)[number];
export const AGENT_ROLES = [

View File

@@ -82,6 +82,9 @@ export interface Issue {
project?: Project | null;
goal?: Goal | null;
mentionedProjects?: Project[];
myLastTouchAt?: Date | null;
lastExternalCommentAt?: Date | null;
isUnreadForMe?: boolean;
createdAt: Date;
updatedAt: Date;
}

View File

@@ -9,8 +9,8 @@ import {
export const createCompanyInviteSchema = z.object({
allowedJoinTypes: z.enum(INVITE_JOIN_TYPES).default("both"),
expiresInHours: z.number().int().min(1).max(24 * 30).optional().default(72),
defaultsPayload: z.record(z.string(), z.unknown()).optional().nullable(),
agentMessage: z.string().max(4000).optional().nullable(),
});
export type CreateCompanyInvite = z.infer<typeof createCompanyInviteSchema>;
@@ -21,6 +21,12 @@ export const acceptInviteSchema = z.object({
adapterType: z.enum(AGENT_ADAPTER_TYPES).optional(),
capabilities: z.string().max(4000).optional().nullable(),
agentDefaultsPayload: z.record(z.string(), z.unknown()).optional().nullable(),
// OpenClaw join compatibility fields accepted at top level.
responsesWebhookUrl: z.string().max(4000).optional().nullable(),
responsesWebhookMethod: z.string().max(32).optional().nullable(),
responsesWebhookHeaders: z.record(z.string(), z.unknown()).optional().nullable(),
paperclipApiUrl: z.string().max(4000).optional().nullable(),
webhookAuthHeader: z.string().max(4000).optional().nullable(),
});
export type AcceptInvite = z.infer<typeof acceptInviteSchema>;

163
pnpm-lock.yaml generated
View File

@@ -78,6 +78,9 @@ importers:
packages/adapter-utils:
devDependencies:
'@types/node':
specifier: ^24.6.0
version: 24.12.0
typescript:
specifier: ^5.7.3
version: 5.9.3
@@ -91,6 +94,9 @@ importers:
specifier: ^1.1.1
version: 1.1.1
devDependencies:
'@types/node':
specifier: ^24.6.0
version: 24.12.0
typescript:
specifier: ^5.7.3
version: 5.9.3
@@ -104,6 +110,9 @@ importers:
specifier: ^1.1.1
version: 1.1.1
devDependencies:
'@types/node':
specifier: ^24.6.0
version: 24.12.0
typescript:
specifier: ^5.7.3
version: 5.9.3
@@ -130,6 +139,9 @@ importers:
specifier: ^1.1.1
version: 1.1.1
devDependencies:
'@types/node':
specifier: ^24.6.0
version: 24.12.0
typescript:
specifier: ^5.7.3
version: 5.9.3
@@ -143,6 +155,9 @@ importers:
specifier: ^1.1.1
version: 1.1.1
devDependencies:
'@types/node':
specifier: ^22.12.0
version: 22.19.11
typescript:
specifier: ^5.7.3
version: 5.9.3
@@ -159,6 +174,9 @@ importers:
specifier: ^3.4.5
version: 3.4.8
devDependencies:
'@types/node':
specifier: ^24.6.0
version: 24.12.0
drizzle-kit:
specifier: ^0.31.9
version: 0.31.9
@@ -170,7 +188,7 @@ importers:
version: 5.9.3
vitest:
specifier: ^3.0.5
version: 3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
version: 3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
packages/shared:
dependencies:
@@ -213,7 +231,7 @@ importers:
version: link:../packages/shared
better-auth:
specifier: 1.4.18
version: 1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.38.4(@electric-sql/pglite@0.3.15)(@types/react@19.2.14)(kysely@0.28.11)(pg@8.18.0)(postgres@3.4.8)(react@19.2.4))(pg@8.18.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))
version: 1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.38.4(@electric-sql/pglite@0.3.15)(@types/react@19.2.14)(kysely@0.28.11)(pg@8.18.0)(postgres@3.4.8)(react@19.2.4))(pg@8.18.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))
detect-port:
specifier: ^2.1.0
version: 2.1.0
@@ -260,9 +278,15 @@ importers:
'@types/multer':
specifier: ^2.0.0
version: 2.0.0
'@types/node':
specifier: ^24.6.0
version: 24.12.0
'@types/supertest':
specifier: ^6.0.2
version: 6.0.3
'@types/ws':
specifier: ^8.18.1
version: 8.18.1
supertest:
specifier: ^7.0.0
version: 7.2.2
@@ -274,10 +298,10 @@ importers:
version: 5.9.3
vite:
specifier: ^6.1.0
version: 6.4.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
version: 6.4.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
vitest:
specifier: ^3.0.5
version: 3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
version: 3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
ui:
dependencies:
@@ -2816,6 +2840,9 @@ packages:
'@types/node@22.19.11':
resolution: {integrity: sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==}
'@types/node@24.12.0':
resolution: {integrity: sha512-GYDxsZi3ChgmckRT9HPU0WEhKLP08ev/Yfcq2AstjrDASOYCSXeyjDsHg4v5t4jOj7cyDX3vmprafKlWIG9MXQ==}
'@types/node@25.2.3':
resolution: {integrity: sha512-m0jEgYlYz+mDJZ2+F4v8D1AyQb+QzsNqRuI7xg1VQX/KlKS0qT9r1Mo16yo5F/MtifXFgaofIFsdFMox2SxIbQ==}
@@ -2851,6 +2878,9 @@ packages:
'@types/unist@3.0.3':
resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==}
'@types/ws@8.18.1':
resolution: {integrity: sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==}
'@ungap/structured-clone@1.3.0':
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
@@ -8132,7 +8162,7 @@ snapshots:
'@types/body-parser@1.19.6':
dependencies:
'@types/connect': 3.4.38
'@types/node': 25.2.3
'@types/node': 24.12.0
'@types/chai@5.2.3':
dependencies:
@@ -8141,7 +8171,7 @@ snapshots:
'@types/connect@3.4.38':
dependencies:
'@types/node': 25.2.3
'@types/node': 24.12.0
'@types/cookiejar@2.1.5': {}
@@ -8159,7 +8189,7 @@ snapshots:
'@types/express-serve-static-core@5.1.1':
dependencies:
'@types/node': 25.2.3
'@types/node': 24.12.0
'@types/qs': 6.14.0
'@types/range-parser': 1.2.7
'@types/send': 1.2.1
@@ -8194,6 +8224,10 @@ snapshots:
dependencies:
undici-types: 6.21.0
'@types/node@24.12.0':
dependencies:
undici-types: 7.16.0
'@types/node@25.2.3':
dependencies:
undici-types: 7.16.0
@@ -8212,18 +8246,18 @@ snapshots:
'@types/send@1.2.1':
dependencies:
'@types/node': 25.2.3
'@types/node': 24.12.0
'@types/serve-static@2.2.0':
dependencies:
'@types/http-errors': 2.0.5
'@types/node': 25.2.3
'@types/node': 24.12.0
'@types/superagent@8.1.9':
dependencies:
'@types/cookiejar': 2.1.5
'@types/methods': 1.1.4
'@types/node': 25.2.3
'@types/node': 24.12.0
form-data: 4.0.5
'@types/supertest@6.0.3':
@@ -8235,6 +8269,10 @@ snapshots:
'@types/unist@3.0.3': {}
'@types/ws@8.18.1':
dependencies:
'@types/node': 24.12.0
'@ungap/structured-clone@1.3.0': {}
'@vitejs/plugin-react@4.7.0(vite@6.4.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))':
@@ -8257,6 +8295,14 @@ snapshots:
chai: 5.3.3
tinyrainbow: 2.0.0
'@vitest/mocker@3.2.4(vite@7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))':
dependencies:
'@vitest/spy': 3.2.4
estree-walker: 3.0.3
magic-string: 0.30.21
optionalDependencies:
vite: 7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
'@vitest/mocker@3.2.4(vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))':
dependencies:
'@vitest/spy': 3.2.4
@@ -8340,7 +8386,7 @@ snapshots:
baseline-browser-mapping@2.9.19: {}
better-auth@1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.38.4(@electric-sql/pglite@0.3.15)(@types/react@19.2.14)(kysely@0.28.11)(pg@8.18.0)(postgres@3.4.8)(react@19.2.4))(pg@8.18.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)):
better-auth@1.4.18(drizzle-kit@0.31.9)(drizzle-orm@0.38.4(@electric-sql/pglite@0.3.15)(@types/react@19.2.14)(kysely@0.28.11)(pg@8.18.0)(postgres@3.4.8)(react@19.2.4))(pg@8.18.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(vitest@3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)):
dependencies:
'@better-auth/core': 1.4.18(@better-auth/utils@0.3.0)(@better-fetch/fetch@1.1.21)(better-call@1.1.8(zod@3.25.76))(jose@6.1.3)(kysely@0.28.11)(nanostores@1.1.0)
'@better-auth/telemetry': 1.4.18(@better-auth/core@1.4.18(@better-auth/utils@0.3.0)(@better-fetch/fetch@1.1.21)(better-call@1.1.8(zod@3.25.76))(jose@6.1.3)(kysely@0.28.11)(nanostores@1.1.0))
@@ -8360,7 +8406,7 @@ snapshots:
pg: 8.18.0
react: 19.2.4
react-dom: 19.2.4(react@19.2.4)
vitest: 3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
vitest: 3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
better-call@1.1.8(zod@4.3.6):
dependencies:
@@ -10601,6 +10647,27 @@ snapshots:
'@types/unist': 3.0.3
vfile-message: 4.0.3
vite-node@3.2.4(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
cac: 6.7.14
debug: 4.4.3
es-module-lexer: 1.7.0
pathe: 2.0.3
vite: 6.4.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
transitivePeerDependencies:
- '@types/node'
- jiti
- less
- lightningcss
- sass
- sass-embedded
- stylus
- sugarss
- supports-color
- terser
- tsx
- yaml
vite-node@3.2.4(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
cac: 6.7.14
@@ -10622,6 +10689,21 @@ snapshots:
- tsx
- yaml
vite@6.4.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
esbuild: 0.25.12
fdir: 6.5.0(picomatch@4.0.3)
picomatch: 4.0.3
postcss: 8.5.6
rollup: 4.57.1
tinyglobby: 0.2.15
optionalDependencies:
'@types/node': 24.12.0
fsevents: 2.3.3
jiti: 2.6.1
lightningcss: 1.30.2
tsx: 4.21.0
vite@6.4.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
esbuild: 0.25.12
@@ -10637,6 +10719,21 @@ snapshots:
lightningcss: 1.30.2
tsx: 4.21.0
vite@7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
esbuild: 0.27.3
fdir: 6.5.0(picomatch@4.0.3)
picomatch: 4.0.3
postcss: 8.5.6
rollup: 4.57.1
tinyglobby: 0.2.15
optionalDependencies:
'@types/node': 24.12.0
fsevents: 2.3.3
jiti: 2.6.1
lightningcss: 1.30.2
tsx: 4.21.0
vite@7.3.1(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
esbuild: 0.27.3
@@ -10652,6 +10749,48 @@ snapshots:
lightningcss: 1.30.2
tsx: 4.21.0
vitest@3.2.4(@types/debug@4.1.12)(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
'@types/chai': 5.2.3
'@vitest/expect': 3.2.4
'@vitest/mocker': 3.2.4(vite@7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))
'@vitest/pretty-format': 3.2.4
'@vitest/runner': 3.2.4
'@vitest/snapshot': 3.2.4
'@vitest/spy': 3.2.4
'@vitest/utils': 3.2.4
chai: 5.3.3
debug: 4.4.3
expect-type: 1.3.0
magic-string: 0.30.21
pathe: 2.0.3
picomatch: 4.0.3
std-env: 3.10.0
tinybench: 2.9.0
tinyexec: 0.3.2
tinyglobby: 0.2.15
tinypool: 1.1.1
tinyrainbow: 2.0.0
vite: 7.3.1(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
vite-node: 3.2.4(@types/node@24.12.0)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0)
why-is-node-running: 2.3.0
optionalDependencies:
'@types/debug': 4.1.12
'@types/node': 24.12.0
transitivePeerDependencies:
- jiti
- less
- lightningcss
- msw
- sass
- sass-embedded
- stylus
- sugarss
- supports-color
- terser
- tsx
- yaml
vitest@3.2.4(@types/debug@4.1.12)(@types/node@25.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0):
dependencies:
'@types/chai': 5.2.3

0
releases/.gitkeep Normal file
View File

15
releases/v0.2.7.md Normal file
View File

@@ -0,0 +1,15 @@
# v0.2.7
> Released: 2026-03-04
## Improvements
- **Onboarding resilience** — The setup wizard now continues after a failed environment test instead of getting stuck. If your Anthropic API key doesn't work, you can retry or clear it and proceed with a different configuration.
- **Docker onboarding flow** — Cleaner defaults for the Docker smoke test and improved console guidance during `npx` onboarding runs.
- **Issue search in skills** — The Paperclip skill reference now documents the `q=` search parameter for finding issues by keyword.
## Fixes
- **Markdown list rendering** — Fixed list markers (`-`, `*`) not rendering correctly in the editor and comment views.
- **Archived companies hidden** — The new issue dialog no longer shows archived companies in the company selector.
- **Embedded Postgres requirement** — The server now correctly requires the `embedded-postgres` dependency when running in embedded DB mode, preventing startup failures.

View File

@@ -32,6 +32,7 @@ const workspacePaths = [
"packages/adapter-utils",
"packages/adapters/claude-local",
"packages/adapters/codex-local",
"packages/adapters/opencode-local",
"packages/adapters/openclaw",
];

View File

@@ -4,12 +4,16 @@ set -euo pipefail
# release.sh — One-command version bump, build, and publish via Changesets.
#
# Usage:
# ./scripts/release.sh patch # 0.2.0 → 0.2.1
# ./scripts/release.sh minor # 0.2.0 → 0.3.0
# ./scripts/release.sh major # 0.2.0 → 1.0.0
# ./scripts/release.sh patch --dry-run # everything except npm publish
# ./scripts/release.sh patch # 0.2.0 → 0.2.1
# ./scripts/release.sh minor # 0.2.0 → 0.3.0
# ./scripts/release.sh major # 0.2.0 → 1.0.0
# ./scripts/release.sh patch --dry-run # everything except npm publish
# ./scripts/release.sh patch --canary # publish under @canary tag, no commit/tag
# ./scripts/release.sh patch --canary --dry-run
# ./scripts/release.sh --promote 0.2.8 # promote canary to @latest + commit/tag
# ./scripts/release.sh --promote 0.2.8 --dry-run
#
# Steps:
# Steps (normal):
# 1. Preflight checks (clean tree, npm login)
# 2. Auto-create a changeset for all public packages
# 3. Run changeset version (bumps versions, generates CHANGELOGs)
@@ -17,6 +21,9 @@ set -euo pipefail
# 5. Build CLI bundle (esbuild)
# 6. Publish to npm via changeset publish (unless --dry-run)
# 7. Commit and tag
#
# --canary: Steps 1-5 unchanged, Step 6 publishes with --tag canary, Step 7 skipped.
# --promote: Skips Steps 1-6, promotes canary to latest, then commits and tags.
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
CLI_DIR="$REPO_ROOT/cli"
@@ -24,23 +31,130 @@ CLI_DIR="$REPO_ROOT/cli"
# ── Parse args ────────────────────────────────────────────────────────────────
dry_run=false
canary=false
promote=false
promote_version=""
bump_type=""
for arg in "$@"; do
case "$arg" in
while [ $# -gt 0 ]; do
case "$1" in
--dry-run) dry_run=true ;;
*) bump_type="$arg" ;;
--canary) canary=true ;;
--promote)
promote=true
shift
if [ $# -eq 0 ] || [[ "$1" == --* ]]; then
echo "Error: --promote requires a version argument (e.g. --promote 0.2.8)"
exit 1
fi
promote_version="$1"
;;
*) bump_type="$1" ;;
esac
shift
done
if [ -z "$bump_type" ]; then
echo "Usage: $0 <patch|minor|major> [--dry-run]"
if [ "$promote" = true ] && [ "$canary" = true ]; then
echo "Error: --canary and --promote cannot be used together"
exit 1
fi
if [[ ! "$bump_type" =~ ^(patch|minor|major)$ ]]; then
echo "Error: bump type must be patch, minor, or major (got '$bump_type')"
exit 1
if [ "$promote" = false ]; then
if [ -z "$bump_type" ]; then
echo "Usage: $0 <patch|minor|major> [--dry-run] [--canary]"
echo " $0 --promote <version> [--dry-run]"
exit 1
fi
if [[ ! "$bump_type" =~ ^(patch|minor|major)$ ]]; then
echo "Error: bump type must be patch, minor, or major (got '$bump_type')"
exit 1
fi
fi
# ── Promote mode (skips Steps 1-6) ───────────────────────────────────────────
if [ "$promote" = true ]; then
NEW_VERSION="$promote_version"
echo ""
echo "==> Promote mode: promoting v$NEW_VERSION from canary to latest..."
# Get all publishable package names
PACKAGES=$(node -e "
const { readFileSync } = require('fs');
const { resolve } = require('path');
const root = '$REPO_ROOT';
const dirs = ['packages/shared', 'packages/adapter-utils', 'packages/db',
'packages/adapters/claude-local', 'packages/adapters/codex-local', 'packages/adapters/openclaw',
'server', 'cli'];
const names = [];
for (const d of dirs) {
try {
const pkg = JSON.parse(readFileSync(resolve(root, d, 'package.json'), 'utf8'));
if (!pkg.private) names.push(pkg.name);
} catch {}
}
console.log(names.join('\n'));
")
echo ""
echo " Promoting packages to @latest:"
while IFS= read -r pkg; do
if [ "$dry_run" = true ]; then
echo " [dry-run] npm dist-tag add ${pkg}@${NEW_VERSION} latest"
else
npm dist-tag add "${pkg}@${NEW_VERSION}" latest
echo "${pkg}@${NEW_VERSION} → latest"
fi
done <<< "$PACKAGES"
# Restore CLI dev package.json if present
if [ -f "$CLI_DIR/package.dev.json" ]; then
mv "$CLI_DIR/package.dev.json" "$CLI_DIR/package.json"
echo " ✓ Restored workspace dependencies in cli/package.json"
fi
# Remove the README copied for npm publishing
if [ -f "$CLI_DIR/README.md" ]; then
rm "$CLI_DIR/README.md"
fi
# Remove temporary build artifacts
rm -rf "$REPO_ROOT/server/ui-dist"
for pkg_dir in server packages/adapters/claude-local packages/adapters/codex-local; do
rm -rf "$REPO_ROOT/$pkg_dir/skills"
done
# Stage release files, commit, and tag
echo ""
echo " Committing and tagging v$NEW_VERSION..."
if [ "$dry_run" = true ]; then
echo " [dry-run] git add + commit + tag v$NEW_VERSION"
else
git add \
.changeset/ \
'**/CHANGELOG.md' \
'**/package.json' \
cli/src/index.ts
git commit -m "chore: release v$NEW_VERSION"
git tag "v$NEW_VERSION"
echo " ✓ Committed and tagged v$NEW_VERSION"
fi
echo ""
if [ "$dry_run" = true ]; then
echo "Dry run complete for promote v$NEW_VERSION."
echo " - Would promote all packages to @latest"
echo " - Would commit and tag v$NEW_VERSION"
else
echo "Promoted all packages to @latest at v$NEW_VERSION"
echo ""
echo "Verify: npm view paperclipai@latest version"
echo ""
echo "To push:"
echo " git push && git push origin v$NEW_VERSION"
fi
exit 0
fi
# ── Step 1: Preflight checks ─────────────────────────────────────────────────
@@ -74,7 +188,7 @@ const { resolve } = require('path');
const root = '$REPO_ROOT';
const wsYaml = readFileSync(resolve(root, 'pnpm-workspace.yaml'), 'utf8');
const dirs = ['packages/shared', 'packages/adapter-utils', 'packages/db',
'packages/adapters/claude-local', 'packages/adapters/codex-local', 'packages/adapters/openclaw',
'packages/adapters/claude-local', 'packages/adapters/codex-local', 'packages/adapters/opencode-local', 'packages/adapters/openclaw',
'server', 'cli'];
const names = [];
for (const d of dirs) {
@@ -131,6 +245,7 @@ pnpm --filter @paperclipai/adapter-utils build
pnpm --filter @paperclipai/db build
pnpm --filter @paperclipai/adapter-claude-local build
pnpm --filter @paperclipai/adapter-codex-local build
pnpm --filter @paperclipai/adapter-opencode-local build
pnpm --filter @paperclipai/adapter-openclaw build
pnpm --filter @paperclipai/server build
@@ -158,29 +273,48 @@ echo " ✓ CLI bundled"
if [ "$dry_run" = true ]; then
echo ""
echo "==> Step 6/7: Skipping publish (--dry-run)"
if [ "$canary" = true ]; then
echo "==> Step 6/7: Skipping publish (--dry-run, --canary)"
else
echo "==> Step 6/7: Skipping publish (--dry-run)"
fi
echo ""
echo " Preview what would be published:"
for dir in packages/shared packages/adapter-utils packages/db \
packages/adapters/claude-local packages/adapters/codex-local packages/adapters/openclaw \
packages/adapters/claude-local packages/adapters/codex-local packages/adapters/opencode-local packages/adapters/openclaw \
server cli; do
echo " --- $dir ---"
cd "$REPO_ROOT/$dir"
npm pack --dry-run 2>&1 | tail -3
done
cd "$REPO_ROOT"
if [ "$canary" = true ]; then
echo ""
echo " [dry-run] Would publish with: npx changeset publish --tag canary"
fi
else
echo ""
echo "==> Step 6/7: Publishing to npm..."
cd "$REPO_ROOT"
npx changeset publish
echo " ✓ Published all packages"
if [ "$canary" = true ]; then
echo "==> Step 6/7: Publishing to npm (canary)..."
cd "$REPO_ROOT"
npx changeset publish --tag canary
echo " ✓ Published all packages under @canary tag"
else
echo "==> Step 6/7: Publishing to npm..."
cd "$REPO_ROOT"
npx changeset publish
echo " ✓ Published all packages"
fi
fi
# ── Step 7: Restore CLI dev package.json and commit ──────────────────────────
echo ""
echo "==> Step 7/7: Restoring dev package.json, committing, and tagging..."
if [ "$canary" = true ]; then
echo "==> Step 7/7: Skipping commit and tag (canary mode — promote later)..."
else
echo "==> Step 7/7: Restoring dev package.json, committing, and tagging..."
fi
cd "$REPO_ROOT"
# Restore the dev package.json (build-npm.sh backs it up)
@@ -200,20 +334,39 @@ for pkg_dir in server packages/adapters/claude-local packages/adapters/codex-loc
rm -rf "$REPO_ROOT/$pkg_dir/skills"
done
# Stage only release-related files (avoid sweeping unrelated changes with -A)
git add \
.changeset/ \
'**/CHANGELOG.md' \
'**/package.json' \
cli/src/index.ts
git commit -m "chore: release v$NEW_VERSION"
git tag "v$NEW_VERSION"
echo " ✓ Committed and tagged v$NEW_VERSION"
if [ "$canary" = false ]; then
# Stage only release-related files (avoid sweeping unrelated changes with -A)
git add \
.changeset/ \
'**/CHANGELOG.md' \
'**/package.json' \
cli/src/index.ts
git commit -m "chore: release v$NEW_VERSION"
git tag "v$NEW_VERSION"
echo " ✓ Committed and tagged v$NEW_VERSION"
fi
# ── Done ──────────────────────────────────────────────────────────────────────
echo ""
if [ "$dry_run" = true ]; then
if [ "$canary" = true ]; then
if [ "$dry_run" = true ]; then
echo "Dry run complete for canary v$NEW_VERSION."
echo " - Versions bumped, built, and previewed"
echo " - Dev package.json restored"
echo " - No commit or tag (canary mode)"
echo ""
echo "To actually publish canary, run:"
echo " ./scripts/release.sh $bump_type --canary"
else
echo "Published canary at v$NEW_VERSION"
echo ""
echo "Verify: npm view paperclipai@canary version"
echo ""
echo "To promote to latest:"
echo " ./scripts/release.sh --promote $NEW_VERSION"
fi
elif [ "$dry_run" = true ]; then
echo "Dry run complete for v$NEW_VERSION."
echo " - Versions bumped, built, and previewed"
echo " - Dev package.json restored"

View File

@@ -15,6 +15,39 @@ require_cmd() {
command -v "$cmd" >/dev/null 2>&1 || fail "missing required command: $cmd"
}
reset_openclaw_state_dir() {
local state_dir="$1"
local resolved_state_dir resolved_home
[[ -n "$state_dir" ]] || fail "OPENCLAW_CONFIG_DIR must not be empty when resetting state"
mkdir -p "$state_dir"
resolved_state_dir="$(cd "$state_dir" && pwd -P)"
resolved_home="$(cd "$HOME" && pwd -P)"
case "$resolved_state_dir" in
"/"|"$resolved_home")
fail "refusing to reset unsafe OPENCLAW_CONFIG_DIR: $resolved_state_dir"
;;
esac
log "resetting OpenClaw state under $resolved_state_dir"
rm -rf \
"$resolved_state_dir/agents" \
"$resolved_state_dir/canvas" \
"$resolved_state_dir/cron" \
"$resolved_state_dir/credentials" \
"$resolved_state_dir/devices" \
"$resolved_state_dir/identity" \
"$resolved_state_dir/logs" \
"$resolved_state_dir/memory" \
"$resolved_state_dir/skills" \
"$resolved_state_dir/workspace"
rm -f \
"$resolved_state_dir/openclaw.json" \
"$resolved_state_dir/openclaw.json.bak" \
"$resolved_state_dir/update-check.json"
}
require_cmd docker
require_cmd git
require_cmd curl
@@ -24,7 +57,10 @@ require_cmd grep
OPENCLAW_REPO_URL="${OPENCLAW_REPO_URL:-https://github.com/openclaw/openclaw.git}"
OPENCLAW_DOCKER_DIR="${OPENCLAW_DOCKER_DIR:-/tmp/openclaw-docker}"
OPENCLAW_IMAGE="${OPENCLAW_IMAGE:-openclaw:local}"
OPENCLAW_CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-$HOME/.openclaw}"
OPENCLAW_TMP_DIR="${OPENCLAW_TMP_DIR:-${TMPDIR:-/tmp}}"
OPENCLAW_TMP_DIR="${OPENCLAW_TMP_DIR%/}"
OPENCLAW_TMP_DIR="${OPENCLAW_TMP_DIR:-/tmp}"
OPENCLAW_CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-$OPENCLAW_TMP_DIR/openclaw-paperclip-smoke}"
OPENCLAW_WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-$OPENCLAW_CONFIG_DIR/workspace}"
OPENCLAW_GATEWAY_PORT="${OPENCLAW_GATEWAY_PORT:-18789}"
OPENCLAW_BRIDGE_PORT="${OPENCLAW_BRIDGE_PORT:-18790}"
@@ -34,7 +70,13 @@ OPENCLAW_BUILD="${OPENCLAW_BUILD:-1}"
OPENCLAW_WAIT_SECONDS="${OPENCLAW_WAIT_SECONDS:-45}"
OPENCLAW_OPEN_BROWSER="${OPENCLAW_OPEN_BROWSER:-0}"
OPENCLAW_SECRETS_FILE="${OPENCLAW_SECRETS_FILE:-$HOME/.secrets}"
# Keep default one-command UX: local smoke run should not require manual pairing.
OPENCLAW_DISABLE_DEVICE_AUTH="${OPENCLAW_DISABLE_DEVICE_AUTH:-1}"
OPENCLAW_MODEL_PRIMARY="${OPENCLAW_MODEL_PRIMARY:-openai/gpt-5.2}"
OPENCLAW_MODEL_FALLBACK="${OPENCLAW_MODEL_FALLBACK:-openai/gpt-5.2-chat-latest}"
OPENCLAW_RESET_STATE="${OPENCLAW_RESET_STATE:-1}"
PAPERCLIP_HOST_PORT="${PAPERCLIP_HOST_PORT:-3100}"
PAPERCLIP_HOST_FROM_CONTAINER="${PAPERCLIP_HOST_FROM_CONTAINER:-host.docker.internal}"
case "$OPENCLAW_DISABLE_DEVICE_AUTH" in
1|true|TRUE|True|yes|YES|Yes)
@@ -73,6 +115,19 @@ if [[ "$OPENCLAW_BUILD" == "1" ]]; then
fi
log "writing OpenClaw config under $OPENCLAW_CONFIG_DIR"
if [[ "$OPENCLAW_RESET_STATE" == "1" ]]; then
# Ensure deterministic smoke behavior across reruns by restarting with a clean state dir.
OPENCLAW_CONFIG_DIR="$OPENCLAW_CONFIG_DIR" \
OPENCLAW_WORKSPACE_DIR="$OPENCLAW_WORKSPACE_DIR" \
OPENCLAW_GATEWAY_PORT="$OPENCLAW_GATEWAY_PORT" \
OPENCLAW_BRIDGE_PORT="$OPENCLAW_BRIDGE_PORT" \
OPENCLAW_GATEWAY_BIND="$OPENCLAW_GATEWAY_BIND" \
OPENCLAW_GATEWAY_TOKEN="$OPENCLAW_GATEWAY_TOKEN" \
OPENCLAW_IMAGE="$OPENCLAW_IMAGE" \
OPENAI_API_KEY="$OPENAI_API_KEY" \
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" down --remove-orphans >/dev/null 2>&1 || true
reset_openclaw_state_dir "$OPENCLAW_CONFIG_DIR"
fi
mkdir -p "$OPENCLAW_WORKSPACE_DIR" "$OPENCLAW_CONFIG_DIR/identity" "$OPENCLAW_CONFIG_DIR/credentials"
chmod 700 "$OPENCLAW_CONFIG_DIR" "$OPENCLAW_CONFIG_DIR/credentials"
@@ -100,6 +155,12 @@ cat > "$OPENCLAW_CONFIG_DIR/openclaw.json" <<EOF
},
"agents": {
"defaults": {
"model": {
"primary": "${OPENCLAW_MODEL_PRIMARY}",
"fallbacks": [
"${OPENCLAW_MODEL_FALLBACK}"
]
},
"workspace": "/home/node/.openclaw/workspace"
}
}
@@ -124,6 +185,8 @@ services:
openclaw-gateway:
tmpfs:
- /tmp:exec,size=512M
extra_hosts:
- "host.docker.internal:host-gateway"
openclaw-cli:
tmpfs:
- /tmp:exec,size=512M
@@ -136,6 +199,20 @@ compose() {
"$@"
}
detect_paperclip_base_url() {
local bridge_gateway candidate health_url
bridge_gateway="$(docker network inspect openclaw-docker_default --format '{{(index .IPAM.Config 0).Gateway}}' 2>/dev/null || true)"
for candidate in "$PAPERCLIP_HOST_FROM_CONTAINER" "$bridge_gateway"; do
[[ -n "$candidate" ]] || continue
health_url="http://${candidate}:${PAPERCLIP_HOST_PORT}/api/health"
if compose exec -T openclaw-gateway node -e "fetch('${health_url}').then((r)=>process.exit(r.ok?0:1)).catch(()=>process.exit(1))" >/dev/null 2>&1; then
echo "http://${candidate}:${PAPERCLIP_HOST_PORT}"
return 0
fi
done
return 1
}
log "starting OpenClaw gateway container"
compose up -d openclaw-gateway
@@ -154,6 +231,7 @@ if [[ "$READY" != "1" ]]; then
fail "gateway did not become healthy in ${OPENCLAW_WAIT_SECONDS}s"
fi
paperclip_base_url="$(detect_paperclip_base_url || true)"
dashboard_output="$(compose run --rm openclaw-cli dashboard --no-open)"
dashboard_url="$(grep -Eo 'https?://[^[:space:]]+#token=[^[:space:]]+' <<<"$dashboard_output" | head -n1 || true)"
if [[ -z "$dashboard_url" ]]; then
@@ -166,15 +244,35 @@ OpenClaw gateway is running.
Dashboard URL:
$dashboard_url
Pairing mode:
OPENCLAW_DISABLE_DEVICE_AUTH=$OPENCLAW_DISABLE_DEVICE_AUTH
EOF
if [[ "$OPENCLAW_DISABLE_DEVICE_AUTH_JSON" == "true" ]]; then
cat <<EOF
Device pairing is disabled for this local smoke run.
Pairing:
Device pairing is disabled by default for this local smoke run.
No extra env vars are required for the default path.
(Security tradeoff: enable pairing with OPENCLAW_DISABLE_DEVICE_AUTH=0.)
Model:
${OPENCLAW_MODEL_PRIMARY} (fallback: ${OPENCLAW_MODEL_FALLBACK})
State:
OPENCLAW_RESET_STATE=$OPENCLAW_RESET_STATE
Paperclip URL for OpenClaw container:
EOF
if [[ -n "$paperclip_base_url" ]]; then
cat <<EOF
$paperclip_base_url
(Use this base URL for invite/onboarding links from inside OpenClaw Docker.)
EOF
else
cat <<EOF
Auto-detect failed. Try: http://host.docker.internal:${PAPERCLIP_HOST_PORT}
(Do not use http://127.0.0.1:${PAPERCLIP_HOST_PORT} inside the container.)
If Paperclip rejects the host, run on host machine:
pnpm paperclipai allowed-hostname host.docker.internal
Then restart Paperclip and re-run this script.
EOF
fi
cat <<EOF
Useful commands:
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" -f "$COMPOSE_OVERRIDE" logs -f openclaw-gateway
@@ -182,10 +280,32 @@ Useful commands:
EOF
else
cat <<EOF
Pairing:
Device pairing is enabled.
If UI shows "pairing required", run:
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" -f "$COMPOSE_OVERRIDE" run --rm openclaw-cli devices list
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" -f "$COMPOSE_OVERRIDE" run --rm openclaw-cli devices approve --latest
Model:
${OPENCLAW_MODEL_PRIMARY} (fallback: ${OPENCLAW_MODEL_FALLBACK})
State:
OPENCLAW_RESET_STATE=$OPENCLAW_RESET_STATE
Paperclip URL for OpenClaw container:
EOF
if [[ -n "$paperclip_base_url" ]]; then
cat <<EOF
$paperclip_base_url
(Use this base URL for invite/onboarding links from inside OpenClaw Docker.)
EOF
else
cat <<EOF
Auto-detect failed. Try: http://host.docker.internal:${PAPERCLIP_HOST_PORT}
(Do not use http://127.0.0.1:${PAPERCLIP_HOST_PORT} inside the container.)
If Paperclip rejects the host, run on host machine:
pnpm paperclipai allowed-hostname host.docker.internal
Then restart Paperclip and re-run this script.
EOF
fi
cat <<EOF
Useful commands:
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" -f "$COMPOSE_OVERRIDE" logs -f openclaw-gateway

View File

@@ -160,7 +160,7 @@ if [[ -z "$COMPANY_ID" ]]; then
fi
log "creating agent-only invite for company ${COMPANY_ID}"
INVITE_PAYLOAD="$(jq -nc '{allowedJoinTypes:"agent",expiresInHours:24}')"
INVITE_PAYLOAD="$(jq -nc '{allowedJoinTypes:"agent"}')"
api_request "POST" "/companies/${COMPANY_ID}/invites" "$INVITE_PAYLOAD"
if [[ "$RESPONSE_CODE" == "401" || "$RESPONSE_CODE" == "403" ]]; then
fail_board_auth_required "Invite creation"

View File

@@ -0,0 +1,146 @@
#!/usr/bin/env bash
set -euo pipefail
log() {
echo "[openclaw-sse-standalone] $*"
}
fail() {
echo "[openclaw-sse-standalone] ERROR: $*" >&2
exit 1
}
require_cmd() {
local cmd="$1"
command -v "$cmd" >/dev/null 2>&1 || fail "missing required command: $cmd"
}
require_cmd curl
require_cmd jq
require_cmd grep
OPENCLAW_URL="${OPENCLAW_URL:-}"
OPENCLAW_METHOD="${OPENCLAW_METHOD:-POST}"
OPENCLAW_AUTH_HEADER="${OPENCLAW_AUTH_HEADER:-}"
OPENCLAW_TIMEOUT_SEC="${OPENCLAW_TIMEOUT_SEC:-180}"
OPENCLAW_MODEL="${OPENCLAW_MODEL:-openclaw}"
OPENCLAW_USER="${OPENCLAW_USER:-paperclip-smoke}"
PAPERCLIP_RUN_ID="${PAPERCLIP_RUN_ID:-smoke-run-$(date +%s)}"
PAPERCLIP_AGENT_ID="${PAPERCLIP_AGENT_ID:-openclaw-smoke-agent}"
PAPERCLIP_COMPANY_ID="${PAPERCLIP_COMPANY_ID:-openclaw-smoke-company}"
PAPERCLIP_API_URL="${PAPERCLIP_API_URL:-http://localhost:3100}"
PAPERCLIP_TASK_ID="${PAPERCLIP_TASK_ID:-openclaw-smoke-task}"
PAPERCLIP_WAKE_REASON="${PAPERCLIP_WAKE_REASON:-openclaw_smoke_test}"
PAPERCLIP_WAKE_COMMENT_ID="${PAPERCLIP_WAKE_COMMENT_ID:-}"
PAPERCLIP_APPROVAL_ID="${PAPERCLIP_APPROVAL_ID:-}"
PAPERCLIP_APPROVAL_STATUS="${PAPERCLIP_APPROVAL_STATUS:-}"
PAPERCLIP_LINKED_ISSUE_IDS="${PAPERCLIP_LINKED_ISSUE_IDS:-}"
OPENCLAW_TEXT_PREFIX="${OPENCLAW_TEXT_PREFIX:-Standalone OpenClaw SSE smoke test.}"
[[ -n "$OPENCLAW_URL" ]] || fail "OPENCLAW_URL is required"
read -r -d '' TEXT_BODY <<EOF || true
${OPENCLAW_TEXT_PREFIX}
PAPERCLIP_RUN_ID=${PAPERCLIP_RUN_ID}
PAPERCLIP_AGENT_ID=${PAPERCLIP_AGENT_ID}
PAPERCLIP_COMPANY_ID=${PAPERCLIP_COMPANY_ID}
PAPERCLIP_API_URL=${PAPERCLIP_API_URL}
PAPERCLIP_TASK_ID=${PAPERCLIP_TASK_ID}
PAPERCLIP_WAKE_REASON=${PAPERCLIP_WAKE_REASON}
PAPERCLIP_WAKE_COMMENT_ID=${PAPERCLIP_WAKE_COMMENT_ID}
PAPERCLIP_APPROVAL_ID=${PAPERCLIP_APPROVAL_ID}
PAPERCLIP_APPROVAL_STATUS=${PAPERCLIP_APPROVAL_STATUS}
PAPERCLIP_LINKED_ISSUE_IDS=${PAPERCLIP_LINKED_ISSUE_IDS}
Run your Paperclip heartbeat procedure now.
EOF
PAYLOAD="$(jq -nc \
--arg text "$TEXT_BODY" \
--arg model "$OPENCLAW_MODEL" \
--arg user "$OPENCLAW_USER" \
--arg runId "$PAPERCLIP_RUN_ID" \
--arg agentId "$PAPERCLIP_AGENT_ID" \
--arg companyId "$PAPERCLIP_COMPANY_ID" \
--arg apiUrl "$PAPERCLIP_API_URL" \
--arg taskId "$PAPERCLIP_TASK_ID" \
--arg wakeReason "$PAPERCLIP_WAKE_REASON" \
--arg wakeCommentId "$PAPERCLIP_WAKE_COMMENT_ID" \
--arg approvalId "$PAPERCLIP_APPROVAL_ID" \
--arg approvalStatus "$PAPERCLIP_APPROVAL_STATUS" \
--arg linkedIssueIds "$PAPERCLIP_LINKED_ISSUE_IDS" \
'{
model: $model,
user: $user,
input: $text,
stream: true,
metadata: {
PAPERCLIP_RUN_ID: $runId,
PAPERCLIP_AGENT_ID: $agentId,
PAPERCLIP_COMPANY_ID: $companyId,
PAPERCLIP_API_URL: $apiUrl,
PAPERCLIP_TASK_ID: $taskId,
PAPERCLIP_WAKE_REASON: $wakeReason,
PAPERCLIP_WAKE_COMMENT_ID: $wakeCommentId,
PAPERCLIP_APPROVAL_ID: $approvalId,
PAPERCLIP_APPROVAL_STATUS: $approvalStatus,
PAPERCLIP_LINKED_ISSUE_IDS: $linkedIssueIds,
paperclip_session_key: ("paperclip:run:" + $runId)
}
}')"
headers_file="$(mktemp)"
body_file="$(mktemp)"
cleanup() {
rm -f "$headers_file" "$body_file"
}
trap cleanup EXIT
args=(
-sS
-N
--max-time "$OPENCLAW_TIMEOUT_SEC"
-X "$OPENCLAW_METHOD"
-H "content-type: application/json"
-H "accept: text/event-stream"
-H "x-openclaw-session-key: paperclip:run:${PAPERCLIP_RUN_ID}"
-D "$headers_file"
-o "$body_file"
--data "$PAYLOAD"
"$OPENCLAW_URL"
)
if [[ -n "$OPENCLAW_AUTH_HEADER" ]]; then
args=(-H "Authorization: $OPENCLAW_AUTH_HEADER" "${args[@]}")
fi
log "posting SSE wake payload to ${OPENCLAW_URL}"
http_code="$(curl "${args[@]}" -w "%{http_code}")"
log "http status: ${http_code}"
if [[ ! "$http_code" =~ ^2 ]]; then
tail -n 80 "$body_file" >&2 || true
fail "non-success HTTP status: ${http_code}"
fi
if ! grep -Eqi '^content-type:.*text/event-stream' "$headers_file"; then
tail -n 40 "$body_file" >&2 || true
fail "response content-type was not text/event-stream"
fi
if grep -Eqi 'event:\s*(error|failed|cancel)|"status":"(failed|cancelled|error)"|"type":"[^"]*(failed|cancelled|error)"' "$body_file"; then
tail -n 120 "$body_file" >&2 || true
fail "stream reported a failure event"
fi
if ! grep -Eqi 'event:\s*(done|completed|response\.completed)|\[DONE\]|"status":"(completed|succeeded|done)"|"type":"response\.completed"' "$body_file"; then
tail -n 120 "$body_file" >&2 || true
fail "stream ended without a terminal completion marker"
fi
event_count="$(grep -Ec '^event:' "$body_file" || true)"
log "stream completed successfully (events=${event_count})"
echo
tail -n 40 "$body_file"

View File

@@ -57,7 +57,9 @@
"@types/express": "^5.0.0",
"@types/express-serve-static-core": "^5.0.0",
"@types/multer": "^2.0.0",
"@types/node": "^24.6.0",
"@types/supertest": "^6.0.2",
"@types/ws": "^8.18.1",
"supertest": "^7.0.0",
"tsx": "^4.19.2",
"typescript": "^5.7.3",

View File

@@ -1,6 +1,7 @@
import { beforeEach, describe, expect, it, vi } from "vitest";
import { models as codexFallbackModels } from "@paperclipai/adapter-codex-local";
import { models as cursorFallbackModels } from "@paperclipai/adapter-cursor-local";
import { resetOpenCodeModelsCacheForTests } from "@paperclipai/adapter-opencode-local/server";
import { listAdapterModels } from "../adapters/index.js";
import { resetCodexModelsCacheForTests } from "../adapters/codex-models.js";
import { resetCursorModelsCacheForTests, setCursorModelsRunnerForTests } from "../adapters/cursor-models.js";
@@ -8,9 +9,11 @@ import { resetCursorModelsCacheForTests, setCursorModelsRunnerForTests } from ".
describe("adapter model listing", () => {
beforeEach(() => {
delete process.env.OPENAI_API_KEY;
delete process.env.PAPERCLIP_OPENCODE_COMMAND;
resetCodexModelsCacheForTests();
resetCursorModelsCacheForTests();
setCursorModelsRunnerForTests(null);
resetOpenCodeModelsCacheForTests();
vi.restoreAllMocks();
});
@@ -60,6 +63,7 @@ describe("adapter model listing", () => {
expect(models).toEqual(codexFallbackModels);
});
it("returns cursor fallback models when CLI discovery is unavailable", async () => {
setCursorModelsRunnerForTests(() => ({
status: null,
@@ -90,4 +94,11 @@ describe("adapter model listing", () => {
expect(first.some((model) => model.id === "gpt-5.3-codex-high")).toBe(true);
expect(first.some((model) => model.id === "composer-1")).toBe(true);
});
it("returns no opencode models when opencode command is unavailable", async () => {
process.env.PAPERCLIP_OPENCODE_COMMAND = "__paperclip_missing_opencode_command__";
const models = await listAdapterModels("opencode_local");
expect(models).toEqual([]);
});
});

View File

@@ -0,0 +1,37 @@
import { describe, expect, it } from "vitest";
import { hasAgentShortnameCollision } from "../services/agents.ts";
describe("hasAgentShortnameCollision", () => {
it("detects collisions by normalized shortname", () => {
const collision = hasAgentShortnameCollision("Codex Coder", [
{ id: "a1", name: "codex-coder", status: "idle" },
]);
expect(collision).toBe(true);
});
it("ignores terminated agents", () => {
const collision = hasAgentShortnameCollision("Codex Coder", [
{ id: "a1", name: "codex-coder", status: "terminated" },
]);
expect(collision).toBe(false);
});
it("ignores the excluded agent id", () => {
const collision = hasAgentShortnameCollision(
"Codex Coder",
[
{ id: "a1", name: "codex-coder", status: "idle" },
{ id: "a2", name: "other-agent", status: "idle" },
],
{ excludeAgentId: "a1" },
);
expect(collision).toBe(false);
});
it("does not collide when candidate has no shortname", () => {
const collision = hasAgentShortnameCollision("!!!", [
{ id: "a1", name: "codex-coder", status: "idle" },
]);
expect(collision).toBe(false);
});
});

View File

@@ -0,0 +1,180 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import type { Db } from "@paperclipai/db";
import { notifyHireApproved } from "../services/hire-hook.js";
// Mock the registry so we control whether the adapter has onHireApproved and what it does.
vi.mock("../adapters/registry.js", () => ({
findServerAdapter: vi.fn(),
}));
vi.mock("../services/activity-log.js", () => ({
logActivity: vi.fn().mockResolvedValue(undefined),
}));
const { findServerAdapter } = await import("../adapters/registry.js");
const { logActivity } = await import("../services/activity-log.js");
function mockDbWithAgent(agent: { id: string; companyId: string; name: string; adapterType: string; adapterConfig?: Record<string, unknown> }): Db {
return {
select: () => ({
from: () => ({
where: () =>
Promise.resolve([
{
id: agent.id,
companyId: agent.companyId,
name: agent.name,
adapterType: agent.adapterType,
adapterConfig: agent.adapterConfig ?? {},
},
]),
}),
}),
} as unknown as Db;
}
afterEach(() => {
vi.clearAllMocks();
});
describe("notifyHireApproved", () => {
it("writes success activity when adapter hook returns ok", async () => {
vi.mocked(findServerAdapter).mockReturnValue({
type: "openclaw",
onHireApproved: vi.fn().mockResolvedValue({ ok: true }),
} as any);
const db = mockDbWithAgent({
id: "a1",
companyId: "c1",
name: "OpenClaw Agent",
adapterType: "openclaw",
});
await expect(
notifyHireApproved(db, {
companyId: "c1",
agentId: "a1",
source: "approval",
sourceId: "ap1",
}),
).resolves.toBeUndefined();
expect(logActivity).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
action: "hire_hook.succeeded",
entityId: "a1",
details: expect.objectContaining({ source: "approval", sourceId: "ap1", adapterType: "openclaw" }),
}),
);
});
it("does nothing when agent is not found", async () => {
const db = {
select: () => ({
from: () => ({
where: () => Promise.resolve([]),
}),
}),
} as unknown as Db;
await expect(
notifyHireApproved(db, {
companyId: "c1",
agentId: "a1",
source: "join_request",
sourceId: "jr1",
}),
).resolves.toBeUndefined();
expect(findServerAdapter).not.toHaveBeenCalled();
});
it("does nothing when adapter has no onHireApproved", async () => {
vi.mocked(findServerAdapter).mockReturnValue({ type: "process" } as any);
const db = mockDbWithAgent({
id: "a1",
companyId: "c1",
name: "Agent",
adapterType: "process",
});
await expect(
notifyHireApproved(db, {
companyId: "c1",
agentId: "a1",
source: "approval",
sourceId: "ap1",
}),
).resolves.toBeUndefined();
expect(findServerAdapter).toHaveBeenCalledWith("process");
expect(logActivity).not.toHaveBeenCalled();
});
it("logs failed result when adapter onHireApproved returns ok=false", async () => {
vi.mocked(findServerAdapter).mockReturnValue({
type: "openclaw",
onHireApproved: vi.fn().mockResolvedValue({ ok: false, error: "HTTP 500", detail: { status: 500 } }),
} as any);
const db = mockDbWithAgent({
id: "a1",
companyId: "c1",
name: "OpenClaw Agent",
adapterType: "openclaw",
});
await expect(
notifyHireApproved(db, {
companyId: "c1",
agentId: "a1",
source: "join_request",
sourceId: "jr1",
}),
).resolves.toBeUndefined();
expect(logActivity).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
action: "hire_hook.failed",
entityId: "a1",
details: expect.objectContaining({ source: "join_request", sourceId: "jr1", error: "HTTP 500" }),
}),
);
});
it("does not throw when adapter onHireApproved throws (non-fatal)", async () => {
vi.mocked(findServerAdapter).mockReturnValue({
type: "openclaw",
onHireApproved: vi.fn().mockRejectedValue(new Error("Network error")),
} as any);
const db = mockDbWithAgent({
id: "a1",
companyId: "c1",
name: "OpenClaw Agent",
adapterType: "openclaw",
});
await expect(
notifyHireApproved(db, {
companyId: "c1",
agentId: "a1",
source: "join_request",
sourceId: "jr1",
}),
).resolves.toBeUndefined();
expect(logActivity).toHaveBeenCalledWith(
expect.anything(),
expect.objectContaining({
action: "hire_hook.error",
entityId: "a1",
details: expect.objectContaining({ source: "join_request", sourceId: "jr1", error: "Network error" }),
}),
);
});
});

View File

@@ -0,0 +1,84 @@
import { describe, expect, it } from "vitest";
import { buildJoinDefaultsPayloadForAccept } from "../routes/access.js";
describe("buildJoinDefaultsPayloadForAccept", () => {
it("maps OpenClaw compatibility fields into agent defaults", () => {
const result = buildJoinDefaultsPayloadForAccept({
adapterType: "openclaw",
defaultsPayload: null,
responsesWebhookUrl: "http://localhost:18789/v1/responses",
paperclipApiUrl: "http://host.docker.internal:3100",
inboundOpenClawAuthHeader: "gateway-token",
}) as Record<string, unknown>;
expect(result).toMatchObject({
url: "http://localhost:18789/v1/responses",
paperclipApiUrl: "http://host.docker.internal:3100",
webhookAuthHeader: "Bearer gateway-token",
headers: {
"x-openclaw-auth": "gateway-token",
},
});
});
it("does not overwrite explicit OpenClaw endpoint defaults when already provided", () => {
const result = buildJoinDefaultsPayloadForAccept({
adapterType: "openclaw",
defaultsPayload: {
url: "https://example.com/v1/responses",
method: "POST",
headers: {
"x-openclaw-auth": "existing-token",
},
paperclipApiUrl: "https://paperclip.example.com",
},
responsesWebhookUrl: "https://legacy.example.com/v1/responses",
responsesWebhookMethod: "PUT",
paperclipApiUrl: "https://legacy-paperclip.example.com",
inboundOpenClawAuthHeader: "legacy-token",
}) as Record<string, unknown>;
expect(result).toMatchObject({
url: "https://example.com/v1/responses",
method: "POST",
paperclipApiUrl: "https://paperclip.example.com",
webhookAuthHeader: "Bearer existing-token",
headers: {
"x-openclaw-auth": "existing-token",
},
});
});
it("preserves explicit webhookAuthHeader when configured", () => {
const result = buildJoinDefaultsPayloadForAccept({
adapterType: "openclaw",
defaultsPayload: {
url: "https://example.com/v1/responses",
webhookAuthHeader: "Bearer explicit-token",
headers: {
"x-openclaw-auth": "existing-token",
},
},
inboundOpenClawAuthHeader: "legacy-token",
}) as Record<string, unknown>;
expect(result).toMatchObject({
webhookAuthHeader: "Bearer explicit-token",
headers: {
"x-openclaw-auth": "existing-token",
},
});
});
it("leaves non-openclaw payloads unchanged", () => {
const defaultsPayload = { command: "echo hello" };
const result = buildJoinDefaultsPayloadForAccept({
adapterType: "process",
defaultsPayload,
responsesWebhookUrl: "https://ignored.example.com",
inboundOpenClawAuthHeader: "ignored-token",
});
expect(result).toEqual(defaultsPayload);
});
});

View File

@@ -0,0 +1,10 @@
import { describe, expect, it } from "vitest";
import { companyInviteExpiresAt } from "../routes/access.js";
describe("companyInviteExpiresAt", () => {
it("sets invite expiration to 10 minutes after invite creation time", () => {
const createdAtMs = Date.parse("2026-03-06T00:00:00.000Z");
const expiresAt = companyInviteExpiresAt(createdAtMs);
expect(expiresAt.toISOString()).toBe("2026-03-06T00:10:00.000Z");
});
});

View File

@@ -41,6 +41,12 @@ describe("buildInviteOnboardingTextDocument", () => {
expect(text).toContain("/api/invites/token-123/accept");
expect(text).toContain("/api/join-requests/{requestId}/claim-api-key");
expect(text).toContain("/api/invites/token-123/onboarding.txt");
expect(text).toContain("/api/invites/token-123/test-resolution");
expect(text).toContain("Suggested Paperclip base URLs to try");
expect(text).toContain("http://localhost:3100");
expect(text).toContain("host.docker.internal");
expect(text).toContain("paperclipApiUrl");
expect(text).toContain("set the first reachable candidate as agentDefaultsPayload.paperclipApiUrl");
});
it("includes loopback diagnostics for authenticated/private onboarding", () => {
@@ -69,5 +75,36 @@ describe("buildInviteOnboardingTextDocument", () => {
expect(text).toContain("Connectivity diagnostics");
expect(text).toContain("loopback hostname");
expect(text).toContain("If none are reachable");
});
it("includes inviter message in the onboarding text when provided", () => {
const req = buildReq("localhost:3100");
const invite = {
id: "invite-3",
companyId: "company-1",
inviteType: "company_join",
allowedJoinTypes: "agent",
tokenHash: "hash",
defaultsPayload: {
agentMessage: "Please join as our QA lead and prioritize flaky test triage first.",
},
expiresAt: new Date("2026-03-05T00:00:00.000Z"),
invitedByUserId: null,
revokedAt: null,
acceptedAt: null,
createdAt: new Date("2026-03-04T00:00:00.000Z"),
updatedAt: new Date("2026-03-04T00:00:00.000Z"),
} as const;
const text = buildInviteOnboardingTextDocument(req, "token-789", invite as any, {
deploymentMode: "local_trusted",
deploymentExposure: "private",
bindHost: "127.0.0.1",
allowedHostnames: [],
});
expect(text).toContain("Message from inviter");
expect(text).toContain("prioritize flaky test triage first");
});
});

View File

@@ -0,0 +1,48 @@
import { describe, expect, it } from "vitest";
import { shouldWakeAssigneeOnCheckout } from "../routes/issues-checkout-wakeup.js";
describe("shouldWakeAssigneeOnCheckout", () => {
it("keeps wakeup behavior for board actors", () => {
expect(
shouldWakeAssigneeOnCheckout({
actorType: "board",
actorAgentId: null,
checkoutAgentId: "agent-1",
checkoutRunId: null,
}),
).toBe(true);
});
it("skips wakeup for agent self-checkout in an active run", () => {
expect(
shouldWakeAssigneeOnCheckout({
actorType: "agent",
actorAgentId: "agent-1",
checkoutAgentId: "agent-1",
checkoutRunId: "run-1",
}),
).toBe(false);
});
it("still wakes when checkout run id is missing", () => {
expect(
shouldWakeAssigneeOnCheckout({
actorType: "agent",
actorAgentId: "agent-1",
checkoutAgentId: "agent-1",
checkoutRunId: null,
}),
).toBe(true);
});
it("still wakes when agent checks out on behalf of another agent id", () => {
expect(
shouldWakeAssigneeOnCheckout({
actorType: "agent",
actorAgentId: "agent-1",
checkoutAgentId: "agent-2",
checkoutRunId: "run-1",
}),
).toBe(true);
});
});

View File

@@ -0,0 +1,113 @@
import { describe, expect, it } from "vitest";
import { deriveIssueUserContext } from "../services/issues.ts";
function makeIssue(overrides?: Partial<{
createdByUserId: string | null;
assigneeUserId: string | null;
createdAt: Date;
updatedAt: Date;
}>) {
return {
createdByUserId: null,
assigneeUserId: null,
createdAt: new Date("2026-03-06T10:00:00.000Z"),
updatedAt: new Date("2026-03-06T11:00:00.000Z"),
...overrides,
};
}
describe("deriveIssueUserContext", () => {
it("marks issue unread when external comments are newer than my latest comment", () => {
const context = deriveIssueUserContext(
makeIssue({ createdByUserId: "user-1" }),
"user-1",
{
myLastCommentAt: new Date("2026-03-06T12:00:00.000Z"),
myLastReadAt: null,
lastExternalCommentAt: new Date("2026-03-06T13:00:00.000Z"),
},
);
expect(context.myLastTouchAt?.toISOString()).toBe("2026-03-06T12:00:00.000Z");
expect(context.lastExternalCommentAt?.toISOString()).toBe("2026-03-06T13:00:00.000Z");
expect(context.isUnreadForMe).toBe(true);
});
it("marks issue read when my latest comment is newest", () => {
const context = deriveIssueUserContext(
makeIssue({ createdByUserId: "user-1" }),
"user-1",
{
myLastCommentAt: new Date("2026-03-06T14:00:00.000Z"),
myLastReadAt: null,
lastExternalCommentAt: new Date("2026-03-06T13:00:00.000Z"),
},
);
expect(context.isUnreadForMe).toBe(false);
});
it("uses issue creation time as fallback touch point for creator", () => {
const context = deriveIssueUserContext(
makeIssue({ createdByUserId: "user-1", createdAt: new Date("2026-03-06T09:00:00.000Z") }),
"user-1",
{
myLastCommentAt: null,
myLastReadAt: null,
lastExternalCommentAt: new Date("2026-03-06T10:00:00.000Z"),
},
);
expect(context.myLastTouchAt?.toISOString()).toBe("2026-03-06T09:00:00.000Z");
expect(context.isUnreadForMe).toBe(true);
});
it("uses issue updated time as fallback touch point for assignee", () => {
const context = deriveIssueUserContext(
makeIssue({ assigneeUserId: "user-1", updatedAt: new Date("2026-03-06T15:00:00.000Z") }),
"user-1",
{
myLastCommentAt: null,
myLastReadAt: null,
lastExternalCommentAt: new Date("2026-03-06T14:59:00.000Z"),
},
);
expect(context.myLastTouchAt?.toISOString()).toBe("2026-03-06T15:00:00.000Z");
expect(context.isUnreadForMe).toBe(false);
});
it("uses latest read timestamp to clear unread without requiring a comment", () => {
const context = deriveIssueUserContext(
makeIssue({ createdByUserId: "user-1", createdAt: new Date("2026-03-06T09:00:00.000Z") }),
"user-1",
{
myLastCommentAt: null,
myLastReadAt: new Date("2026-03-06T11:30:00.000Z"),
lastExternalCommentAt: new Date("2026-03-06T11:00:00.000Z"),
},
);
expect(context.myLastTouchAt?.toISOString()).toBe("2026-03-06T11:30:00.000Z");
expect(context.isUnreadForMe).toBe(false);
});
it("handles SQL timestamp strings without throwing", () => {
const context = deriveIssueUserContext(
makeIssue({
createdByUserId: "user-1",
createdAt: new Date("2026-03-06T09:00:00.000Z"),
}),
"user-1",
{
myLastCommentAt: "2026-03-06T10:00:00.000Z",
myLastReadAt: null,
lastExternalCommentAt: "2026-03-06T11:00:00.000Z",
},
);
expect(context.myLastTouchAt?.toISOString()).toBe("2026-03-06T10:00:00.000Z");
expect(context.lastExternalCommentAt?.toISOString()).toBe("2026-03-06T11:00:00.000Z");
expect(context.isUnreadForMe).toBe(true);
});
});

View File

@@ -0,0 +1,607 @@
import { afterEach, describe, expect, it, vi } from "vitest";
import { execute, testEnvironment, onHireApproved } from "@paperclipai/adapter-openclaw/server";
import { parseOpenClawStdoutLine } from "@paperclipai/adapter-openclaw/ui";
import type { AdapterExecutionContext } from "@paperclipai/adapter-utils";
function buildContext(
config: Record<string, unknown>,
overrides?: Partial<AdapterExecutionContext>,
): AdapterExecutionContext {
return {
runId: "run-123",
agent: {
id: "agent-123",
companyId: "company-123",
name: "OpenClaw Agent",
adapterType: "openclaw",
adapterConfig: {},
},
runtime: {
sessionId: null,
sessionParams: null,
sessionDisplayId: null,
taskKey: null,
},
config,
context: {
taskId: "task-123",
issueId: "issue-123",
wakeReason: "issue_assigned",
issueIds: ["issue-123"],
},
onLog: async () => {},
...overrides,
};
}
function sseResponse(lines: string[]) {
const encoder = new TextEncoder();
const stream = new ReadableStream<Uint8Array>({
start(controller) {
for (const line of lines) {
controller.enqueue(encoder.encode(line));
}
controller.close();
},
});
return new Response(stream, {
status: 200,
statusText: "OK",
headers: {
"content-type": "text/event-stream",
},
});
}
afterEach(() => {
vi.restoreAllMocks();
vi.unstubAllGlobals();
});
describe("openclaw ui stdout parser", () => {
it("parses SSE deltas into assistant streaming entries", () => {
const ts = "2026-03-05T23:07:16.296Z";
const line =
'[openclaw:sse] event=response.output_text.delta data={"type":"response.output_text.delta","delta":"hello"}';
expect(parseOpenClawStdoutLine(line, ts)).toEqual([
{
kind: "assistant",
ts,
text: "hello",
delta: true,
},
]);
});
it("parses stdout-prefixed SSE deltas and preserves spacing", () => {
const ts = "2026-03-05T23:07:16.296Z";
const line =
'stdout[openclaw:sse] event=response.output_text.delta data={"type":"response.output_text.delta","delta":" can"}';
expect(parseOpenClawStdoutLine(line, ts)).toEqual([
{
kind: "assistant",
ts,
text: " can",
delta: true,
},
]);
});
it("parses response.completed into usage-aware result entries", () => {
const ts = "2026-03-05T23:07:20.269Z";
const line = JSON.stringify({
type: "response.completed",
response: {
status: "completed",
usage: {
input_tokens: 12,
output_tokens: 34,
cached_input_tokens: 5,
},
output: [
{
type: "message",
content: [
{
type: "output_text",
text: "All done",
},
],
},
],
},
});
expect(parseOpenClawStdoutLine(`[openclaw:sse] event=response.completed data=${line}`, ts)).toEqual([
{
kind: "result",
ts,
text: "All done",
inputTokens: 12,
outputTokens: 34,
cachedTokens: 5,
costUsd: 0,
subtype: "completed",
isError: false,
errors: [],
},
]);
});
it("maps SSE errors to stderr entries", () => {
const ts = "2026-03-05T23:07:20.269Z";
const line =
'[openclaw:sse] event=response.failed data={"type":"response.failed","error":"timeout"}';
expect(parseOpenClawStdoutLine(line, ts)).toEqual([
{
kind: "stderr",
ts,
text: "timeout",
},
]);
});
it("maps stderr-prefixed lines to stderr transcript entries", () => {
const ts = "2026-03-05T23:07:20.269Z";
const line = "stderr OpenClaw transport error";
expect(parseOpenClawStdoutLine(line, ts)).toEqual([
{
kind: "stderr",
ts,
text: "OpenClaw transport error",
},
]);
});
});
describe("openclaw adapter execute", () => {
it("uses strict SSE and includes canonical PAPERCLIP context in text payload", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
method: "POST",
payloadTemplate: { foo: "bar", text: "OpenClaw task prompt" },
}),
);
expect(result.exitCode).toBe(0);
expect(fetchMock).toHaveBeenCalledTimes(1);
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body ?? "{}")) as Record<string, unknown>;
expect(body.foo).toBe("bar");
expect(body.stream).toBe(true);
expect(body.sessionKey).toBe("paperclip");
expect((body.paperclip as Record<string, unknown>).streamTransport).toBe("sse");
expect((body.paperclip as Record<string, unknown>).runId).toBe("run-123");
expect((body.paperclip as Record<string, unknown>).sessionKey).toBe("paperclip");
expect(
((body.paperclip as Record<string, unknown>).env as Record<string, unknown>).PAPERCLIP_RUN_ID,
).toBe("run-123");
const text = String(body.text ?? "");
expect(text).toContain("OpenClaw task prompt");
expect(text).toContain("PAPERCLIP_RUN_ID=run-123");
expect(text).toContain("PAPERCLIP_AGENT_ID=agent-123");
expect(text).toContain("PAPERCLIP_COMPANY_ID=company-123");
expect(text).toContain("PAPERCLIP_TASK_ID=task-123");
expect(text).toContain("PAPERCLIP_WAKE_REASON=issue_assigned");
expect(text).toContain("PAPERCLIP_LINKED_ISSUE_IDS=issue-123");
});
it("uses paperclipApiUrl override when provided", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
method: "POST",
paperclipApiUrl: "http://dotta-macbook-pro:3100",
}),
);
expect(result.exitCode).toBe(0);
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body ?? "{}")) as Record<string, unknown>;
const paperclip = body.paperclip as Record<string, unknown>;
const env = paperclip.env as Record<string, unknown>;
expect(env.PAPERCLIP_API_URL).toBe("http://dotta-macbook-pro:3100/");
expect(String(body.text ?? "")).toContain("PAPERCLIP_API_URL=http://dotta-macbook-pro:3100/");
});
it("logs outbound header keys for auth debugging", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const logs: string[] = [];
const result = await execute(
buildContext(
{
url: "https://agent.example/sse",
method: "POST",
headers: {
"x-openclaw-auth": "gateway-token",
},
},
{
onLog: async (_stream, chunk) => {
logs.push(chunk);
},
},
),
);
expect(result.exitCode).toBe(0);
expect(
logs.some((line) => line.includes("[openclaw] outbound header keys:") && line.includes("x-openclaw-auth")),
).toBe(true);
});
it("derives Authorization header from x-openclaw-auth when webhookAuthHeader is unset", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
method: "POST",
headers: {
"x-openclaw-auth": "gateway-token",
},
}),
);
expect(result.exitCode).toBe(0);
const headers = (fetchMock.mock.calls[0]?.[1]?.headers ?? {}) as Record<string, string>;
expect(headers["x-openclaw-auth"]).toBe("gateway-token");
expect(headers.authorization).toBe("Bearer gateway-token");
});
it("derives issue session keys when configured", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: done\n",
"data: [DONE]\n\n",
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
method: "POST",
sessionKeyStrategy: "issue",
}),
);
expect(result.exitCode).toBe(0);
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body ?? "{}")) as Record<string, unknown>;
expect(body.sessionKey).toBe("paperclip:issue:issue-123");
expect((body.paperclip as Record<string, unknown>).sessionKey).toBe("paperclip:issue:issue-123");
});
it("maps requests to OpenResponses schema for /v1/responses endpoints", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/v1/responses",
method: "POST",
payloadTemplate: {
model: "openclaw",
user: "paperclip",
},
}),
);
expect(result.exitCode).toBe(0);
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body ?? "{}")) as Record<string, unknown>;
expect(body.stream).toBe(true);
expect(body.model).toBe("openclaw");
expect(typeof body.input).toBe("string");
expect(String(body.input)).toContain("PAPERCLIP_RUN_ID=run-123");
expect(body.metadata).toBeTypeOf("object");
expect((body.metadata as Record<string, unknown>).PAPERCLIP_RUN_ID).toBe("run-123");
expect(body.text).toBeUndefined();
expect(body.paperclip).toBeUndefined();
expect(body.sessionKey).toBeUndefined();
const headers = (fetchMock.mock.calls[0]?.[1]?.headers ?? {}) as Record<string, string>;
expect(headers["x-openclaw-session-key"]).toBe("paperclip");
});
it("appends wake text when OpenResponses input is provided as a message object", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.completed\n",
'data: {"type":"response.completed","status":"completed"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/v1/responses",
method: "POST",
payloadTemplate: {
model: "openclaw",
input: {
type: "message",
role: "user",
content: [
{
type: "input_text",
text: "start with this context",
},
],
},
},
}),
);
expect(result.exitCode).toBe(0);
const body = JSON.parse(String(fetchMock.mock.calls[0]?.[1]?.body ?? "{}")) as Record<string, unknown>;
const input = body.input as Record<string, unknown>;
expect(input.type).toBe("message");
expect(input.role).toBe("user");
expect(Array.isArray(input.content)).toBe(true);
const content = input.content as Record<string, unknown>[];
expect(content).toHaveLength(2);
expect(content[0]).toEqual({
type: "input_text",
text: "start with this context",
});
expect(content[1]).toEqual(
expect.objectContaining({
type: "input_text",
}),
);
expect(String(content[1]?.text ?? "")).toContain("PAPERCLIP_RUN_ID=run-123");
});
it("fails when SSE endpoint does not return text/event-stream", async () => {
const fetchMock = vi.fn().mockResolvedValue(
new Response(JSON.stringify({ ok: false, error: "unexpected payload" }), {
status: 200,
statusText: "OK",
headers: {
"content-type": "application/json",
},
}),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
method: "POST",
}),
);
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("openclaw_sse_expected_event_stream");
});
it("fails when SSE stream closes without a terminal event", async () => {
const fetchMock = vi.fn().mockResolvedValue(
sseResponse([
"event: response.delta\n",
'data: {"type":"response.delta","delta":"partial"}\n\n',
]),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
}),
);
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("openclaw_sse_stream_incomplete");
});
it("fails with explicit text-required error when endpoint rejects payload", async () => {
const fetchMock = vi.fn().mockResolvedValue(
new Response(JSON.stringify({ error: "text required" }), {
status: 400,
statusText: "Bad Request",
headers: {
"content-type": "application/json",
},
}),
);
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
}),
);
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("openclaw_text_required");
});
it("rejects non-sse transport configuration", async () => {
const fetchMock = vi.fn();
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/sse",
streamTransport: "webhook",
}),
);
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("openclaw_stream_transport_unsupported");
expect(fetchMock).not.toHaveBeenCalled();
});
it("rejects /hooks/wake compatibility endpoints in strict SSE mode", async () => {
const fetchMock = vi.fn();
vi.stubGlobal("fetch", fetchMock);
const result = await execute(
buildContext({
url: "https://agent.example/hooks/wake",
}),
);
expect(result.exitCode).toBe(1);
expect(result.errorCode).toBe("openclaw_sse_incompatible_endpoint");
expect(fetchMock).not.toHaveBeenCalled();
});
});
describe("openclaw adapter environment checks", () => {
it("reports /hooks/wake endpoints as incompatible for strict SSE mode", async () => {
const fetchMock = vi
.fn()
.mockResolvedValue(new Response(null, { status: 405, statusText: "Method Not Allowed" }));
vi.stubGlobal("fetch", fetchMock);
const result = await testEnvironment({
companyId: "company-123",
adapterType: "openclaw",
config: {
url: "https://agent.example/hooks/wake",
},
deployment: {
mode: "authenticated",
exposure: "private",
bindHost: "paperclip.internal",
allowedHostnames: ["paperclip.internal"],
},
});
const check = result.checks.find((entry) => entry.code === "openclaw_wake_endpoint_incompatible");
expect(check?.level).toBe("error");
});
it("reports unsupported streamTransport settings", async () => {
const fetchMock = vi
.fn()
.mockResolvedValue(new Response(null, { status: 405, statusText: "Method Not Allowed" }));
vi.stubGlobal("fetch", fetchMock);
const result = await testEnvironment({
companyId: "company-123",
adapterType: "openclaw",
config: {
url: "https://agent.example/sse",
streamTransport: "webhook",
},
});
const check = result.checks.find((entry) => entry.code === "openclaw_stream_transport_unsupported");
expect(check?.level).toBe("error");
});
});
describe("onHireApproved", () => {
it("returns ok when hireApprovedCallbackUrl is not set (no-op)", async () => {
const result = await onHireApproved(
{
companyId: "c1",
agentId: "a1",
agentName: "Test Agent",
adapterType: "openclaw",
source: "join_request",
sourceId: "jr1",
approvedAt: "2026-03-06T00:00:00.000Z",
message: "You're hired.",
},
{},
);
expect(result).toEqual({ ok: true });
});
it("POSTs payload to hireApprovedCallbackUrl with correct headers and body", async () => {
const fetchMock = vi.fn().mockResolvedValue(new Response(null, { status: 200 }));
vi.stubGlobal("fetch", fetchMock);
const payload = {
companyId: "c1",
agentId: "a1",
agentName: "OpenClaw Agent",
adapterType: "openclaw",
source: "approval" as const,
sourceId: "ap1",
approvedAt: "2026-03-06T12:00:00.000Z",
message: "Tell your user that your hire was approved.",
};
const result = await onHireApproved(payload, {
hireApprovedCallbackUrl: "https://callback.example/hire-approved",
hireApprovedCallbackAuthHeader: "Bearer secret",
});
expect(result.ok).toBe(true);
expect(fetchMock).toHaveBeenCalledTimes(1);
const [url, init] = fetchMock.mock.calls[0] as [string, RequestInit];
expect(url).toBe("https://callback.example/hire-approved");
expect(init?.method).toBe("POST");
expect((init?.headers as Record<string, string>)["content-type"]).toBe("application/json");
expect((init?.headers as Record<string, string>)["Authorization"]).toBe("Bearer secret");
const body = JSON.parse(init?.body as string);
expect(body.event).toBe("hire_approved");
expect(body.companyId).toBe(payload.companyId);
expect(body.agentId).toBe(payload.agentId);
expect(body.message).toBe(payload.message);
});
it("returns failure when callback returns non-2xx", async () => {
const fetchMock = vi.fn().mockResolvedValue(new Response("Server Error", { status: 500 }));
vi.stubGlobal("fetch", fetchMock);
const result = await onHireApproved(
{
companyId: "c1",
agentId: "a1",
agentName: "A",
adapterType: "openclaw",
source: "join_request",
sourceId: "jr1",
approvedAt: new Date().toISOString(),
message: "Hired",
},
{ hireApprovedCallbackUrl: "https://example.com/hook" },
);
expect(result.ok).toBe(false);
expect(result.error).toContain("500");
});
});

View File

@@ -18,14 +18,18 @@ import {
} from "@paperclipai/adapter-cursor-local/server";
import { agentConfigurationDoc as cursorAgentConfigurationDoc, models as cursorModels } from "@paperclipai/adapter-cursor-local";
import {
execute as opencodeExecute,
testEnvironment as opencodeTestEnvironment,
sessionCodec as opencodeSessionCodec,
execute as openCodeExecute,
testEnvironment as openCodeTestEnvironment,
sessionCodec as openCodeSessionCodec,
listOpenCodeModels,
} from "@paperclipai/adapter-opencode-local/server";
import { agentConfigurationDoc as opencodeAgentConfigurationDoc, models as opencodeModels } from "@paperclipai/adapter-opencode-local";
import {
agentConfigurationDoc as openCodeAgentConfigurationDoc,
} from "@paperclipai/adapter-opencode-local";
import {
execute as openclawExecute,
testEnvironment as openclawTestEnvironment,
onHireApproved as openclawOnHireApproved,
} from "@paperclipai/adapter-openclaw/server";
import {
agentConfigurationDoc as openclawAgentConfigurationDoc,
@@ -57,16 +61,6 @@ const codexLocalAdapter: ServerAdapterModule = {
agentConfigurationDoc: codexAgentConfigurationDoc,
};
const opencodeLocalAdapter: ServerAdapterModule = {
type: "opencode_local",
execute: opencodeExecute,
testEnvironment: opencodeTestEnvironment,
sessionCodec: opencodeSessionCodec,
models: opencodeModels,
supportsLocalAgentJwt: true,
agentConfigurationDoc: opencodeAgentConfigurationDoc,
};
const cursorLocalAdapter: ServerAdapterModule = {
type: "cursor",
execute: cursorExecute,
@@ -82,13 +76,25 @@ const openclawAdapter: ServerAdapterModule = {
type: "openclaw",
execute: openclawExecute,
testEnvironment: openclawTestEnvironment,
onHireApproved: openclawOnHireApproved,
models: openclawModels,
supportsLocalAgentJwt: false,
agentConfigurationDoc: openclawAgentConfigurationDoc,
};
const openCodeLocalAdapter: ServerAdapterModule = {
type: "opencode_local",
execute: openCodeExecute,
testEnvironment: openCodeTestEnvironment,
sessionCodec: openCodeSessionCodec,
models: [],
listModels: listOpenCodeModels,
supportsLocalAgentJwt: true,
agentConfigurationDoc: openCodeAgentConfigurationDoc,
};
const adaptersByType = new Map<string, ServerAdapterModule>(
[claudeLocalAdapter, codexLocalAdapter, opencodeLocalAdapter, cursorLocalAdapter, openclawAdapter, processAdapter, httpAdapter].map((a) => [a.type, a]),
[claudeLocalAdapter, codexLocalAdapter, openCodeLocalAdapter, cursorLocalAdapter, openclawAdapter, processAdapter, httpAdapter].map((a) => [a.type, a]),
);
export function getServerAdapter(type: string): ServerAdapterModule {

View File

@@ -463,7 +463,7 @@ const app = await createApp(db as any, {
betterAuthHandler,
resolveSession,
});
const server = createServer(app);
const server = createServer(app as unknown as Parameters<typeof createServer>[0]);
const listenPort = await detectPort(config.port);
if (listenPort !== config.port) {

View File

@@ -10,6 +10,9 @@ export function errorHandler(
_next: NextFunction,
) {
if (err instanceof HttpError) {
if (err.status >= 500) {
(res as any).err = err;
}
res.status(err.status).json({
error: err.message,
...(err.details ? { details: err.details } : {}),
@@ -26,8 +29,10 @@ export function errorHandler(
? { message: err.message, stack: err.stack, name: err.name }
: { raw: err };
// Attach the real error so pino-http can include it in its response log
res.locals.serverError = errObj;
// Attach the real error so pino-http uses it instead of its generic
// "failed with status code 500" message in the response-complete log
const realError = err instanceof Error ? err : Object.assign(new Error(String(err)), { raw: err });
(res as any).err = realError;
logger.error(
{ err: errObj, method: req.method, url: req.originalUrl },

View File

@@ -55,11 +55,7 @@ export const httpLogger = pinoHttp({
customErrorMessage(req, res) {
return `${req.method} ${req.url} ${res.statusCode}`;
},
customProps(_req, res) {
const serverError = (res as any).locals?.serverError;
if (serverError) {
return { serverError };
}
customProps() {
return {};
},
});

View File

@@ -1,15 +1,45 @@
import { createHash } from "node:crypto";
import type { IncomingMessage, Server as HttpServer } from "node:http";
import { createRequire } from "node:module";
import type { Duplex } from "node:stream";
import { and, eq, isNull } from "drizzle-orm";
import type { Db } from "@paperclipai/db";
import { agentApiKeys, companyMemberships, instanceUserRoles } from "@paperclipai/db";
import type { DeploymentMode } from "@paperclipai/shared";
import { WebSocket, WebSocketServer } from "ws";
import type { BetterAuthSessionResult } from "../auth/better-auth.js";
import { logger } from "../middleware/logger.js";
import { subscribeCompanyLiveEvents } from "../services/live-events.js";
interface WsSocket {
readyState: number;
ping(): void;
send(data: string): void;
terminate(): void;
close(code?: number, reason?: string): void;
on(event: "pong", listener: () => void): void;
on(event: "close", listener: () => void): void;
on(event: "error", listener: (err: Error) => void): void;
}
interface WsServer {
clients: Set<WsSocket>;
on(event: "connection", listener: (socket: WsSocket, req: IncomingMessage) => void): void;
on(event: "close", listener: () => void): void;
handleUpgrade(
req: IncomingMessage,
socket: Duplex,
head: Buffer,
callback: (ws: WsSocket) => void,
): void;
emit(event: "connection", ws: WsSocket, req: IncomingMessage): boolean;
}
const require = createRequire(import.meta.url);
const { WebSocket, WebSocketServer } = require("ws") as {
WebSocket: { OPEN: number };
WebSocketServer: new (opts: { noServer: boolean }) => WsServer;
};
interface UpgradeContext {
companyId: string;
actorType: "board" | "agent";
@@ -154,8 +184,8 @@ export function setupLiveEventsWebSocketServer(
},
) {
const wss = new WebSocketServer({ noServer: true });
const cleanupByClient = new Map<WebSocket, () => void>();
const aliveByClient = new Map<WebSocket, boolean>();
const cleanupByClient = new Map<WsSocket, () => void>();
const aliveByClient = new Map<WsSocket, boolean>();
const pingInterval = setInterval(() => {
for (const socket of wss.clients) {
@@ -168,7 +198,7 @@ export function setupLiveEventsWebSocketServer(
}
}, 30000);
wss.on("connection", (socket, req) => {
wss.on("connection", (socket: WsSocket, req: IncomingMessage) => {
const context = (req as IncomingMessageWithContext).paperclipUpgradeContext;
if (!context) {
socket.close(1008, "missing context");
@@ -194,7 +224,7 @@ export function setupLiveEventsWebSocketServer(
aliveByClient.delete(socket);
});
socket.on("error", (err) => {
socket.on("error", (err: Error) => {
logger.warn({ err, companyId: context.companyId }, "live websocket client error");
});
});
@@ -229,7 +259,7 @@ export function setupLiveEventsWebSocketServer(
const reqWithContext = req as IncomingMessageWithContext;
reqWithContext.paperclipUpgradeContext = context;
wss.handleUpgrade(req, socket, head, (ws) => {
wss.handleUpgrade(req, socket, head, (ws: WsSocket) => {
wss.emit("connection", ws, reqWithContext);
});
})

View File

@@ -23,8 +23,9 @@ import {
} from "@paperclipai/shared";
import type { DeploymentExposure, DeploymentMode } from "@paperclipai/shared";
import { forbidden, conflict, notFound, unauthorized, badRequest } from "../errors.js";
import { logger } from "../middleware/logger.js";
import { validate } from "../middleware/validate.js";
import { accessService, agentService, logActivity } from "../services/index.js";
import { accessService, agentService, logActivity, notifyHireApproved } from "../services/index.js";
import { assertCompanyAccess } from "./authz.js";
import { claimBoardOwnership, inspectBoardClaimChallenge } from "../board-claim.js";
@@ -32,14 +33,29 @@ function hashToken(token: string) {
return createHash("sha256").update(token).digest("hex");
}
const INVITE_TOKEN_PREFIX = "pcp_invite_";
const INVITE_TOKEN_ALPHABET = "abcdefghijklmnopqrstuvwxyz0123456789";
const INVITE_TOKEN_SUFFIX_LENGTH = 8;
const INVITE_TOKEN_MAX_RETRIES = 5;
const COMPANY_INVITE_TTL_MS = 10 * 60 * 1000;
function createInviteToken() {
return `pcp_invite_${randomBytes(24).toString("hex")}`;
const bytes = randomBytes(INVITE_TOKEN_SUFFIX_LENGTH);
let suffix = "";
for (let idx = 0; idx < INVITE_TOKEN_SUFFIX_LENGTH; idx += 1) {
suffix += INVITE_TOKEN_ALPHABET[bytes[idx]! % INVITE_TOKEN_ALPHABET.length];
}
return `${INVITE_TOKEN_PREFIX}${suffix}`;
}
function createClaimSecret() {
return `pcp_claim_${randomBytes(24).toString("hex")}`;
}
export function companyInviteExpiresAt(nowMs: number = Date.now()) {
return new Date(nowMs + COMPANY_INVITE_TTL_MS);
}
function tokenHashesMatch(left: string, right: string) {
const leftBytes = Buffer.from(left, "utf8");
const rightBytes = Buffer.from(right, "utf8");
@@ -94,6 +110,11 @@ function isLoopbackHost(hostname: string): boolean {
return value === "localhost" || value === "127.0.0.1" || value === "::1";
}
function isWakePath(pathname: string): boolean {
const value = pathname.trim().toLowerCase();
return value === "/hooks/wake" || value.endsWith("/hooks/wake");
}
function normalizeHostname(value: string | null | undefined): string | null {
if (!value) return null;
const trimmed = value.trim();
@@ -120,6 +141,131 @@ function normalizeHeaderMap(input: unknown): Record<string, string> | undefined
return Object.keys(out).length > 0 ? out : undefined;
}
function nonEmptyTrimmedString(value: unknown): string | null {
if (typeof value !== "string") return null;
const trimmed = value.trim();
return trimmed.length > 0 ? trimmed : null;
}
function headerMapHasKeyIgnoreCase(headers: Record<string, string>, targetKey: string): boolean {
const normalizedTarget = targetKey.trim().toLowerCase();
return Object.keys(headers).some((key) => key.trim().toLowerCase() === normalizedTarget);
}
function headerMapGetIgnoreCase(headers: Record<string, string>, targetKey: string): string | null {
const normalizedTarget = targetKey.trim().toLowerCase();
const key = Object.keys(headers).find((candidate) => candidate.trim().toLowerCase() === normalizedTarget);
if (!key) return null;
const value = headers[key];
return typeof value === "string" ? value : null;
}
function toAuthorizationHeaderValue(rawToken: string): string {
const trimmed = rawToken.trim();
if (!trimmed) return trimmed;
return /^bearer\s+/i.test(trimmed) ? trimmed : `Bearer ${trimmed}`;
}
export function buildJoinDefaultsPayloadForAccept(input: {
adapterType: string | null;
defaultsPayload: unknown;
responsesWebhookUrl?: unknown;
responsesWebhookMethod?: unknown;
responsesWebhookHeaders?: unknown;
paperclipApiUrl?: unknown;
webhookAuthHeader?: unknown;
inboundOpenClawAuthHeader?: string | null;
}): unknown {
if (input.adapterType !== "openclaw") {
return input.defaultsPayload;
}
const merged = isPlainObject(input.defaultsPayload)
? { ...(input.defaultsPayload as Record<string, unknown>) }
: {} as Record<string, unknown>;
if (!nonEmptyTrimmedString(merged.url)) {
const legacyUrl = nonEmptyTrimmedString(input.responsesWebhookUrl);
if (legacyUrl) merged.url = legacyUrl;
}
if (!nonEmptyTrimmedString(merged.method)) {
const legacyMethod = nonEmptyTrimmedString(input.responsesWebhookMethod);
if (legacyMethod) merged.method = legacyMethod.toUpperCase();
}
if (!nonEmptyTrimmedString(merged.paperclipApiUrl)) {
const legacyPaperclipApiUrl = nonEmptyTrimmedString(input.paperclipApiUrl);
if (legacyPaperclipApiUrl) merged.paperclipApiUrl = legacyPaperclipApiUrl;
}
if (!nonEmptyTrimmedString(merged.webhookAuthHeader)) {
const providedWebhookAuthHeader = nonEmptyTrimmedString(input.webhookAuthHeader);
if (providedWebhookAuthHeader) merged.webhookAuthHeader = providedWebhookAuthHeader;
}
const mergedHeaders = normalizeHeaderMap(merged.headers) ?? {};
const compatibilityHeaders = normalizeHeaderMap(input.responsesWebhookHeaders);
if (compatibilityHeaders) {
for (const [key, value] of Object.entries(compatibilityHeaders)) {
if (!headerMapHasKeyIgnoreCase(mergedHeaders, key)) {
mergedHeaders[key] = value;
}
}
}
const inboundOpenClawAuthHeader = nonEmptyTrimmedString(input.inboundOpenClawAuthHeader);
if (inboundOpenClawAuthHeader && !headerMapHasKeyIgnoreCase(mergedHeaders, "x-openclaw-auth")) {
mergedHeaders["x-openclaw-auth"] = inboundOpenClawAuthHeader;
}
if (Object.keys(mergedHeaders).length > 0) {
merged.headers = mergedHeaders;
} else {
delete merged.headers;
}
const hasAuthorizationHeader = headerMapHasKeyIgnoreCase(mergedHeaders, "authorization");
const hasWebhookAuthHeader = Boolean(nonEmptyTrimmedString(merged.webhookAuthHeader));
if (!hasAuthorizationHeader && !hasWebhookAuthHeader) {
const openClawAuthToken = headerMapGetIgnoreCase(mergedHeaders, "x-openclaw-auth");
if (openClawAuthToken) {
merged.webhookAuthHeader = toAuthorizationHeaderValue(openClawAuthToken);
}
}
return Object.keys(merged).length > 0 ? merged : null;
}
function summarizeSecretForLog(value: unknown): { present: true; length: number; sha256Prefix: string } | null {
const trimmed = nonEmptyTrimmedString(value);
if (!trimmed) return null;
return {
present: true,
length: trimmed.length,
sha256Prefix: hashToken(trimmed).slice(0, 12),
};
}
function summarizeOpenClawDefaultsForLog(defaultsPayload: unknown) {
const defaults = isPlainObject(defaultsPayload) ? (defaultsPayload as Record<string, unknown>) : null;
const headers = defaults ? normalizeHeaderMap(defaults.headers) : undefined;
const openClawAuthHeaderValue = headers
? Object.entries(headers).find(([key]) => key.trim().toLowerCase() === "x-openclaw-auth")?.[1] ?? null
: null;
return {
present: Boolean(defaults),
keys: defaults ? Object.keys(defaults).sort() : [],
url: defaults ? nonEmptyTrimmedString(defaults.url) : null,
method: defaults ? nonEmptyTrimmedString(defaults.method) : null,
paperclipApiUrl: defaults ? nonEmptyTrimmedString(defaults.paperclipApiUrl) : null,
headerKeys: headers ? Object.keys(headers).sort() : [],
webhookAuthHeader: defaults ? summarizeSecretForLog(defaults.webhookAuthHeader) : null,
openClawAuthHeader: summarizeSecretForLog(openClawAuthHeaderValue),
};
}
function buildJoinConnectivityDiagnostics(input: {
deploymentMode: DeploymentMode;
deploymentExposure: DeploymentExposure;
@@ -207,13 +353,13 @@ function normalizeAgentDefaultsForJoin(input: {
code: "openclaw_callback_config_missing",
level: "warn",
message: "No OpenClaw callback config was provided in agentDefaultsPayload.",
hint: "Include agentDefaultsPayload.url so Paperclip can invoke the OpenClaw webhook immediately after approval.",
hint: "Include agentDefaultsPayload.url so Paperclip can invoke the OpenClaw SSE endpoint immediately after approval.",
});
return { normalized: null as Record<string, unknown> | null, diagnostics };
}
const defaults = input.defaultsPayload as Record<string, unknown>;
const normalized: Record<string, unknown> = {};
const normalized: Record<string, unknown> = { streamTransport: "sse" };
let callbackUrl: URL | null = null;
const rawUrl = typeof defaults.url === "string" ? defaults.url.trim() : "";
@@ -222,7 +368,7 @@ function normalizeAgentDefaultsForJoin(input: {
code: "openclaw_callback_url_missing",
level: "warn",
message: "OpenClaw callback URL is missing.",
hint: "Set agentDefaultsPayload.url to your OpenClaw webhook endpoint.",
hint: "Set agentDefaultsPayload.url to your OpenClaw SSE endpoint.",
});
} else {
try {
@@ -242,6 +388,14 @@ function normalizeAgentDefaultsForJoin(input: {
message: `Callback endpoint set to ${callbackUrl.toString()}`,
});
}
if (isWakePath(callbackUrl.pathname)) {
diagnostics.push({
code: "openclaw_callback_wake_path_incompatible",
level: "warn",
message: "Configured callback path targets /hooks/wake, which is not stream-capable for strict SSE mode.",
hint: "Use an endpoint that returns text/event-stream for the full run duration.",
});
}
if (isLoopbackHost(callbackUrl.hostname)) {
diagnostics.push({
code: "openclaw_callback_loopback",
@@ -263,7 +417,7 @@ function normalizeAgentDefaultsForJoin(input: {
normalized.method = rawMethod || "POST";
if (typeof defaults.timeoutSec === "number" && Number.isFinite(defaults.timeoutSec)) {
normalized.timeoutSec = Math.max(1, Math.min(120, Math.floor(defaults.timeoutSec)));
normalized.timeoutSec = Math.max(0, Math.min(7200, Math.floor(defaults.timeoutSec)));
}
const headers = normalizeHeaderMap(defaults.headers);
@@ -277,6 +431,44 @@ function normalizeAgentDefaultsForJoin(input: {
normalized.payloadTemplate = defaults.payloadTemplate;
}
const rawPaperclipApiUrl = typeof defaults.paperclipApiUrl === "string"
? defaults.paperclipApiUrl.trim()
: "";
if (rawPaperclipApiUrl) {
try {
const parsedPaperclipApiUrl = new URL(rawPaperclipApiUrl);
if (parsedPaperclipApiUrl.protocol !== "http:" && parsedPaperclipApiUrl.protocol !== "https:") {
diagnostics.push({
code: "openclaw_paperclip_api_url_protocol",
level: "warn",
message: `paperclipApiUrl must use http:// or https:// (got ${parsedPaperclipApiUrl.protocol}).`,
});
} else {
normalized.paperclipApiUrl = parsedPaperclipApiUrl.toString();
diagnostics.push({
code: "openclaw_paperclip_api_url_configured",
level: "info",
message: `paperclipApiUrl set to ${parsedPaperclipApiUrl.toString()}`,
});
if (isLoopbackHost(parsedPaperclipApiUrl.hostname)) {
diagnostics.push({
code: "openclaw_paperclip_api_url_loopback",
level: "warn",
message:
"paperclipApiUrl uses loopback hostname. Remote OpenClaw workers cannot reach localhost on the Paperclip host.",
hint: "Use a reachable hostname/IP and keep it in allowed hostnames for authenticated/private deployments.",
});
}
}
} catch {
diagnostics.push({
code: "openclaw_paperclip_api_url_invalid",
level: "warn",
message: `Invalid paperclipApiUrl: ${rawPaperclipApiUrl}`,
});
}
}
diagnostics.push(
...buildJoinConnectivityDiagnostics({
deploymentMode: input.deploymentMode,
@@ -294,6 +486,7 @@ function toInviteSummaryResponse(req: Request, token: string, invite: typeof inv
const baseUrl = requestBaseUrl(req);
const onboardingPath = `/api/invites/${token}/onboarding`;
const onboardingTextPath = `/api/invites/${token}/onboarding.txt`;
const inviteMessage = extractInviteMessage(invite);
return {
id: invite.id,
companyId: invite.companyId,
@@ -306,6 +499,7 @@ function toInviteSummaryResponse(req: Request, token: string, invite: typeof inv
onboardingTextUrl: baseUrl ? `${baseUrl}${onboardingTextPath}` : onboardingTextPath,
skillIndexPath: "/api/skills/index",
skillIndexUrl: baseUrl ? `${baseUrl}/api/skills/index` : "/api/skills/index",
inviteMessage,
};
}
@@ -375,6 +569,46 @@ function buildOnboardingDiscoveryDiagnostics(input: {
return diagnostics;
}
function buildOnboardingConnectionCandidates(input: {
apiBaseUrl: string;
bindHost: string;
allowedHostnames: string[];
}): string[] {
let base: URL | null = null;
try {
if (input.apiBaseUrl) {
base = new URL(input.apiBaseUrl);
}
} catch {
base = null;
}
const protocol = base?.protocol ?? "http:";
const port = base?.port ? `:${base.port}` : "";
const candidates = new Set<string>();
if (base) {
candidates.add(base.origin);
}
const bindHost = normalizeHostname(input.bindHost);
if (bindHost && !isLoopbackHost(bindHost)) {
candidates.add(`${protocol}//${bindHost}${port}`);
}
for (const rawHost of input.allowedHostnames) {
const host = normalizeHostname(rawHost);
if (!host) continue;
candidates.add(`${protocol}//${host}${port}`);
}
if (base && isLoopbackHost(base.hostname)) {
candidates.add(`${protocol}//host.docker.internal${port}`);
}
return Array.from(candidates);
}
function buildInviteOnboardingManifest(
req: Request,
token: string,
@@ -393,6 +627,8 @@ function buildInviteOnboardingManifest(
const registrationEndpointUrl = baseUrl ? `${baseUrl}${registrationEndpointPath}` : registrationEndpointPath;
const onboardingTextPath = `/api/invites/${token}/onboarding.txt`;
const onboardingTextUrl = baseUrl ? `${baseUrl}${onboardingTextPath}` : onboardingTextPath;
const testResolutionPath = `/api/invites/${token}/test-resolution`;
const testResolutionUrl = baseUrl ? `${baseUrl}${testResolutionPath}` : testResolutionPath;
const discoveryDiagnostics = buildOnboardingDiscoveryDiagnostics({
apiBaseUrl: baseUrl,
deploymentMode: opts.deploymentMode,
@@ -400,20 +636,26 @@ function buildInviteOnboardingManifest(
bindHost: opts.bindHost,
allowedHostnames: opts.allowedHostnames,
});
const connectionCandidates = buildOnboardingConnectionCandidates({
apiBaseUrl: baseUrl,
bindHost: opts.bindHost,
allowedHostnames: opts.allowedHostnames,
});
return {
invite: toInviteSummaryResponse(req, token, invite),
onboarding: {
instructions:
"Join as an agent, save your one-time claim secret, wait for board approval, then claim your API key and install the Paperclip skill before starting heartbeat loops.",
inviteMessage: extractInviteMessage(invite),
recommendedAdapterType: "openclaw",
requiredFields: {
requestType: "agent",
agentName: "Display name for this agent",
adapterType: "Use 'openclaw' for OpenClaw webhook-based agents",
adapterType: "Use 'openclaw' for OpenClaw streaming agents",
capabilities: "Optional capability summary",
agentDefaultsPayload:
"Optional adapter config such as url/method/headers/webhookAuthHeader for OpenClaw callback endpoint",
"Optional adapter config such as url/method/headers/webhookAuthHeader and paperclipApiUrl for OpenClaw SSE endpoint",
},
registrationEndpoint: {
method: "POST",
@@ -432,6 +674,16 @@ function buildInviteOnboardingManifest(
deploymentExposure: opts.deploymentExposure,
bindHost: opts.bindHost,
allowedHostnames: opts.allowedHostnames,
connectionCandidates,
testResolutionEndpoint: {
method: "GET",
path: testResolutionPath,
url: testResolutionUrl,
query: {
url: "https://your-openclaw-agent.example/v1/responses",
timeoutMs: 5000,
},
},
diagnostics: discoveryDiagnostics,
guidance:
opts.deploymentMode === "authenticated" && opts.deploymentExposure === "private"
@@ -466,11 +718,17 @@ export function buildInviteOnboardingTextDocument(
) {
const manifest = buildInviteOnboardingManifest(req, token, invite, opts);
const onboarding = manifest.onboarding as {
inviteMessage?: string | null;
registrationEndpoint: { method: string; path: string; url: string };
claimEndpointTemplate: { method: string; path: string };
textInstructions: { path: string; url: string };
skill: { path: string; url: string; installPath: string };
connectivity: { diagnostics?: JoinDiagnostic[]; guidance?: string };
connectivity: {
diagnostics?: JoinDiagnostic[];
guidance?: string;
connectionCandidates?: string[];
testResolutionEndpoint?: { method?: string; path?: string; url?: string };
};
};
const diagnostics = Array.isArray(onboarding.connectivity?.diagnostics)
? onboarding.connectivity.diagnostics
@@ -486,6 +744,13 @@ export function buildInviteOnboardingTextDocument(
`- allowedJoinTypes: ${invite.allowedJoinTypes}`,
`- expiresAt: ${invite.expiresAt.toISOString()}`,
"",
];
if (onboarding.inviteMessage) {
lines.push("## Message from inviter", onboarding.inviteMessage, "");
}
lines.push(
"## Step 1: Submit agent join request",
`${onboarding.registrationEndpoint.method} ${onboarding.registrationEndpoint.url}`,
"",
@@ -496,10 +761,12 @@ export function buildInviteOnboardingTextDocument(
' "adapterType": "openclaw",',
' "capabilities": "Optional summary",',
' "agentDefaultsPayload": {',
' "url": "https://your-openclaw-webhook.example/webhook",',
' "url": "https://your-openclaw-agent.example/v1/responses",',
' "paperclipApiUrl": "https://paperclip-hostname-your-agent-can-reach:3100",',
' "streamTransport": "sse",',
' "method": "POST",',
' "headers": { "x-openclaw-auth": "replace-me" },',
' "timeoutSec": 30',
' "timeoutSec": 0',
" }",
"}",
"",
@@ -533,7 +800,39 @@ export function buildInviteOnboardingTextDocument(
"",
"## Connectivity guidance",
onboarding.connectivity?.guidance ?? "Ensure Paperclip is reachable from your OpenClaw runtime.",
];
);
if (onboarding.connectivity?.testResolutionEndpoint?.url) {
lines.push(
"",
"## Optional: test callback resolution from Paperclip",
`${onboarding.connectivity.testResolutionEndpoint.method ?? "GET"} ${onboarding.connectivity.testResolutionEndpoint.url}?url=https%3A%2F%2Fyour-openclaw-agent.example%2Fv1%2Fresponses`,
"",
"This endpoint checks whether Paperclip can reach your OpenClaw endpoint and reports reachable, timeout, or unreachable.",
);
}
const connectionCandidates = Array.isArray(onboarding.connectivity?.connectionCandidates)
? onboarding.connectivity.connectionCandidates.filter((entry): entry is string => Boolean(entry))
: [];
if (connectionCandidates.length > 0) {
lines.push("", "## Suggested Paperclip base URLs to try");
for (const candidate of connectionCandidates) {
lines.push(`- ${candidate}`);
}
lines.push(
"",
"Test each candidate with:",
"- GET <candidate>/api/health",
"- set the first reachable candidate as agentDefaultsPayload.paperclipApiUrl when submitting your join request",
"",
"If none are reachable: ask your human operator for a reachable hostname/address and help them update network configuration.",
"For authenticated/private mode, they may need:",
"- pnpm paperclipai allowed-hostname <host>",
"- then restart Paperclip and retry onboarding.",
);
}
if (diagnostics.length > 0) {
lines.push("", "## Connectivity diagnostics");
@@ -551,10 +850,39 @@ export function buildInviteOnboardingTextDocument(
`${onboarding.skill.path}`,
manifest.invite.onboardingPath,
);
if (onboarding.connectivity?.testResolutionEndpoint?.path) {
lines.push(`${onboarding.connectivity.testResolutionEndpoint.path}`);
}
return `${lines.join("\n")}\n`;
}
function extractInviteMessage(invite: typeof invites.$inferSelect): string | null {
const rawDefaults = invite.defaultsPayload;
if (!rawDefaults || typeof rawDefaults !== "object" || Array.isArray(rawDefaults)) {
return null;
}
const rawMessage = (rawDefaults as Record<string, unknown>).agentMessage;
if (typeof rawMessage !== "string") {
return null;
}
const trimmed = rawMessage.trim();
return trimmed.length ? trimmed : null;
}
function mergeInviteDefaults(
defaultsPayload: Record<string, unknown> | null | undefined,
agentMessage: string | null,
): Record<string, unknown> | null {
const merged = defaultsPayload && typeof defaultsPayload === "object"
? { ...defaultsPayload }
: {};
if (agentMessage) {
merged.agentMessage = agentMessage;
}
return Object.keys(merged).length ? merged : null;
}
function requestIp(req: Request) {
const forwarded = req.header("x-forwarded-for");
if (forwarded) {
@@ -614,6 +942,96 @@ function grantsFromDefaults(
return result;
}
function isInviteTokenHashCollisionError(error: unknown) {
const candidates = [
error,
(error as { cause?: unknown } | null)?.cause ?? null,
];
for (const candidate of candidates) {
if (!candidate || typeof candidate !== "object") continue;
const code = "code" in candidate && typeof candidate.code === "string" ? candidate.code : null;
const message = "message" in candidate && typeof candidate.message === "string" ? candidate.message : "";
const constraint = "constraint" in candidate && typeof candidate.constraint === "string"
? candidate.constraint
: null;
if (code !== "23505") continue;
if (constraint === "invites_token_hash_unique_idx") return true;
if (message.includes("invites_token_hash_unique_idx")) return true;
}
return false;
}
function isAbortError(error: unknown) {
return error instanceof Error && error.name === "AbortError";
}
type InviteResolutionProbe = {
status: "reachable" | "timeout" | "unreachable";
method: "HEAD";
durationMs: number;
httpStatus: number | null;
message: string;
};
async function probeInviteResolutionTarget(url: URL, timeoutMs: number): Promise<InviteResolutionProbe> {
const startedAt = Date.now();
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), timeoutMs);
try {
const response = await fetch(url, {
method: "HEAD",
redirect: "manual",
signal: controller.signal,
});
const durationMs = Date.now() - startedAt;
if (
response.ok ||
response.status === 401 ||
response.status === 403 ||
response.status === 404 ||
response.status === 405 ||
response.status === 422 ||
response.status === 500 ||
response.status === 501
) {
return {
status: "reachable",
method: "HEAD",
durationMs,
httpStatus: response.status,
message: `Webhook endpoint responded to HEAD with HTTP ${response.status}.`,
};
}
return {
status: "unreachable",
method: "HEAD",
durationMs,
httpStatus: response.status,
message: `Webhook endpoint probe returned HTTP ${response.status}.`,
};
} catch (error) {
const durationMs = Date.now() - startedAt;
if (isAbortError(error)) {
return {
status: "timeout",
method: "HEAD",
durationMs,
httpStatus: null,
message: `Webhook endpoint probe timed out after ${timeoutMs}ms.`,
};
}
return {
status: "unreachable",
method: "HEAD",
durationMs,
httpStatus: null,
message: error instanceof Error ? error.message : "Webhook endpoint probe failed.",
};
} finally {
clearTimeout(timeout);
}
}
export function accessRoutes(
db: Db,
opts: {
@@ -704,21 +1122,43 @@ export function accessRoutes(
async (req, res) => {
const companyId = req.params.companyId as string;
await assertCompanyPermission(req, companyId, "users:invite");
const normalizedAgentMessage = typeof req.body.agentMessage === "string"
? req.body.agentMessage.trim() || null
: null;
const insertValues = {
companyId,
inviteType: "company_join" as const,
allowedJoinTypes: req.body.allowedJoinTypes,
defaultsPayload: mergeInviteDefaults(req.body.defaultsPayload ?? null, normalizedAgentMessage),
expiresAt: companyInviteExpiresAt(),
invitedByUserId: req.actor.userId ?? null,
};
const token = createInviteToken();
const created = await db
.insert(invites)
.values({
companyId,
inviteType: "company_join",
tokenHash: hashToken(token),
allowedJoinTypes: req.body.allowedJoinTypes,
defaultsPayload: req.body.defaultsPayload ?? null,
expiresAt: new Date(Date.now() + req.body.expiresInHours * 60 * 60 * 1000),
invitedByUserId: req.actor.userId ?? null,
})
.returning()
.then((rows) => rows[0]);
let token: string | null = null;
let created: typeof invites.$inferSelect | null = null;
for (let attempt = 0; attempt < INVITE_TOKEN_MAX_RETRIES; attempt += 1) {
const candidateToken = createInviteToken();
try {
const row = await db
.insert(invites)
.values({
...insertValues,
tokenHash: hashToken(candidateToken),
})
.returning()
.then((rows) => rows[0]);
token = candidateToken;
created = row;
break;
} catch (error) {
if (!isInviteTokenHashCollisionError(error)) {
throw error;
}
}
}
if (!token || !created) {
throw conflict("Failed to generate a unique invite token. Please retry.");
}
await logActivity(db, {
companyId,
@@ -731,13 +1171,18 @@ export function accessRoutes(
inviteType: created.inviteType,
allowedJoinTypes: created.allowedJoinTypes,
expiresAt: created.expiresAt.toISOString(),
hasAgentMessage: Boolean(normalizedAgentMessage),
},
});
const inviteSummary = toInviteSummaryResponse(req, token, created);
res.status(201).json({
...created,
token,
inviteUrl: `/invite/${token}`,
onboardingTextPath: inviteSummary.onboardingTextPath,
onboardingTextUrl: inviteSummary.onboardingTextUrl,
inviteMessage: inviteSummary.inviteMessage,
});
},
);
@@ -787,6 +1232,44 @@ export function accessRoutes(
res.type("text/plain; charset=utf-8").send(buildInviteOnboardingTextDocument(req, token, invite, opts));
});
router.get("/invites/:token/test-resolution", async (req, res) => {
const token = (req.params.token as string).trim();
if (!token) throw notFound("Invite not found");
const invite = await db
.select()
.from(invites)
.where(eq(invites.tokenHash, hashToken(token)))
.then((rows) => rows[0] ?? null);
if (!invite || invite.revokedAt || inviteExpired(invite)) {
throw notFound("Invite not found");
}
const rawUrl = typeof req.query.url === "string" ? req.query.url.trim() : "";
if (!rawUrl) throw badRequest("url query parameter is required");
let target: URL;
try {
target = new URL(rawUrl);
} catch {
throw badRequest("url must be an absolute http(s) URL");
}
if (target.protocol !== "http:" && target.protocol !== "https:") {
throw badRequest("url must use http or https");
}
const parsedTimeoutMs = typeof req.query.timeoutMs === "string" ? Number(req.query.timeoutMs) : NaN;
const timeoutMs = Number.isFinite(parsedTimeoutMs)
? Math.max(1000, Math.min(15000, Math.floor(parsedTimeoutMs)))
: 5000;
const probe = await probeInviteResolutionTarget(target, timeoutMs);
res.json({
inviteId: invite.id,
testResolutionPath: `/api/invites/${token}/test-resolution`,
requestedUrl: target.toString(),
timeoutMs,
...probe,
});
});
router.post("/invites/:token/accept", validate(acceptInviteSchema), async (req, res) => {
const token = (req.params.token as string).trim();
if (!token) throw notFound("Invite not found");
@@ -844,10 +1327,41 @@ export function accessRoutes(
throw badRequest("agentName is required for agent join requests");
}
const openClawDefaultsPayload = requestType === "agent"
? buildJoinDefaultsPayloadForAccept({
adapterType: req.body.adapterType ?? null,
defaultsPayload: req.body.agentDefaultsPayload ?? null,
responsesWebhookUrl: req.body.responsesWebhookUrl ?? null,
responsesWebhookMethod: req.body.responsesWebhookMethod ?? null,
responsesWebhookHeaders: req.body.responsesWebhookHeaders ?? null,
paperclipApiUrl: req.body.paperclipApiUrl ?? null,
webhookAuthHeader: req.body.webhookAuthHeader ?? null,
inboundOpenClawAuthHeader: req.header("x-openclaw-auth") ?? null,
})
: null;
if (requestType === "agent" && (req.body.adapterType ?? null) === "openclaw") {
logger.info(
{
inviteId: invite.id,
requestType,
adapterType: req.body.adapterType ?? null,
bodyKeys: isPlainObject(req.body) ? Object.keys(req.body).sort() : [],
responsesWebhookUrl: nonEmptyTrimmedString(req.body.responsesWebhookUrl),
paperclipApiUrl: nonEmptyTrimmedString(req.body.paperclipApiUrl),
webhookAuthHeader: summarizeSecretForLog(req.body.webhookAuthHeader),
inboundOpenClawAuthHeader: summarizeSecretForLog(req.header("x-openclaw-auth") ?? null),
rawAgentDefaults: summarizeOpenClawDefaultsForLog(req.body.agentDefaultsPayload ?? null),
mergedAgentDefaults: summarizeOpenClawDefaultsForLog(openClawDefaultsPayload),
},
"invite accept received OpenClaw join payload",
);
}
const joinDefaults = requestType === "agent"
? normalizeAgentDefaultsForJoin({
adapterType: req.body.adapterType ?? null,
defaultsPayload: req.body.agentDefaultsPayload ?? null,
defaultsPayload: openClawDefaultsPayload,
deploymentMode: opts.deploymentMode,
deploymentExposure: opts.deploymentExposure,
bindHost: opts.bindHost,
@@ -855,6 +1369,20 @@ export function accessRoutes(
})
: { normalized: null as Record<string, unknown> | null, diagnostics: [] as JoinDiagnostic[] };
if (requestType === "agent" && (req.body.adapterType ?? null) === "openclaw") {
logger.info(
{
inviteId: invite.id,
joinRequestDiagnostics: joinDefaults.diagnostics.map((diag) => ({
code: diag.code,
level: diag.level,
})),
normalizedAgentDefaults: summarizeOpenClawDefaultsForLog(joinDefaults.normalized),
},
"invite accept normalized OpenClaw defaults",
);
}
const claimSecret = requestType === "agent" ? createClaimSecret() : null;
const claimSecretHash = claimSecret ? hashToken(claimSecret) : null;
const claimSecretExpiresAt = claimSecret
@@ -890,6 +1418,54 @@ export function accessRoutes(
return row;
});
if (requestType === "agent" && (req.body.adapterType ?? null) === "openclaw") {
const expectedDefaults = summarizeOpenClawDefaultsForLog(joinDefaults.normalized);
const persistedDefaults = summarizeOpenClawDefaultsForLog(created.agentDefaultsPayload);
const missingPersistedFields: string[] = [];
if (expectedDefaults.url && !persistedDefaults.url) missingPersistedFields.push("url");
if (expectedDefaults.paperclipApiUrl && !persistedDefaults.paperclipApiUrl) {
missingPersistedFields.push("paperclipApiUrl");
}
if (expectedDefaults.webhookAuthHeader && !persistedDefaults.webhookAuthHeader) {
missingPersistedFields.push("webhookAuthHeader");
}
if (expectedDefaults.openClawAuthHeader && !persistedDefaults.openClawAuthHeader) {
missingPersistedFields.push("headers.x-openclaw-auth");
}
if (expectedDefaults.headerKeys.length > 0 && persistedDefaults.headerKeys.length === 0) {
missingPersistedFields.push("headers");
}
logger.info(
{
inviteId: invite.id,
joinRequestId: created.id,
joinRequestStatus: created.status,
expectedDefaults,
persistedDefaults,
diagnostics: joinDefaults.diagnostics.map((diag) => ({
code: diag.code,
level: diag.level,
message: diag.message,
hint: diag.hint ?? null,
})),
},
"invite accept persisted OpenClaw join request",
);
if (missingPersistedFields.length > 0) {
logger.warn(
{
inviteId: invite.id,
joinRequestId: created.id,
missingPersistedFields,
},
"invite accept detected missing persisted OpenClaw defaults",
);
}
}
await logActivity(db, {
companyId,
actorType: req.actor.type === "agent" ? "agent" : "user",
@@ -1053,6 +1629,16 @@ export function accessRoutes(
details: { requestType: existing.requestType, createdAgentId },
});
if (createdAgentId) {
void notifyHireApproved(db, {
companyId,
agentId: createdAgentId,
source: "join_request",
sourceId: requestId,
approvedAt: new Date(),
}).catch(() => {});
}
res.json(toJoinRequestResponse(approved));
});

View File

@@ -27,7 +27,7 @@ import {
logActivity,
secretService,
} from "../services/index.js";
import { conflict, forbidden, unprocessable } from "../errors.js";
import { conflict, forbidden, notFound, unprocessable } from "../errors.js";
import { assertBoard, assertCompanyAccess, getActorInfo } from "./authz.js";
import { findServerAdapter, listAdapterModels } from "../adapters/index.js";
import { redactEventPayload } from "../redaction.js";
@@ -37,7 +37,7 @@ import {
DEFAULT_CODEX_LOCAL_MODEL,
} from "@paperclipai/adapter-codex-local";
import { DEFAULT_CURSOR_LOCAL_MODEL } from "@paperclipai/adapter-cursor-local";
import { DEFAULT_OPENCODE_LOCAL_MODEL } from "@paperclipai/adapter-opencode-local";
import { ensureOpenCodeModelConfiguredAndAvailable } from "@paperclipai/adapter-opencode-local/server";
export function agentRoutes(db: Db) {
const DEFAULT_INSTRUCTIONS_PATH_KEYS: Record<string, string> = {
@@ -152,7 +152,10 @@ export function agentRoutes(db: Db) {
if (resolved.ambiguous) {
throw conflict("Agent shortname is ambiguous in this company. Use the agent ID.");
}
return resolved.agent?.id ?? raw;
if (!resolved.agent) {
throw notFound("Agent not found");
}
return resolved.agent.id;
}
function parseSourceIssueIds(input: {
@@ -195,15 +198,34 @@ export function agentRoutes(db: Db) {
}
return next;
}
if (adapterType === "opencode_local" && !asNonEmptyString(next.model)) {
next.model = DEFAULT_OPENCODE_LOCAL_MODEL;
}
// OpenCode requires explicit model selection — no default
if (adapterType === "cursor" && !asNonEmptyString(next.model)) {
next.model = DEFAULT_CURSOR_LOCAL_MODEL;
}
return next;
}
async function assertAdapterConfigConstraints(
companyId: string,
adapterType: string | null | undefined,
adapterConfig: Record<string, unknown>,
) {
if (adapterType !== "opencode_local") return;
const runtimeConfig = await secretsSvc.resolveAdapterConfigForRuntime(companyId, adapterConfig);
const runtimeEnv = asRecord(runtimeConfig.env) ?? {};
try {
await ensureOpenCodeModelConfiguredAndAvailable({
model: runtimeConfig.model,
command: runtimeConfig.command,
cwd: runtimeConfig.cwd,
env: runtimeEnv,
});
} catch (err) {
const reason = err instanceof Error ? err.message : String(err);
throw unprocessable(`Invalid opencode_local adapterConfig: ${reason}`);
}
}
function resolveInstructionsFilePath(candidatePath: string, adapterConfig: Record<string, unknown>) {
const trimmed = candidatePath.trim();
if (path.isAbsolute(trimmed)) return trimmed;
@@ -335,7 +357,9 @@ export function agentRoutes(db: Db) {
}
});
router.get("/adapters/:type/models", async (req, res) => {
router.get("/companies/:companyId/adapters/:type/models", async (req, res) => {
const companyId = req.params.companyId as string;
assertCompanyAccess(req, companyId);
const type = req.params.type as string;
const models = await listAdapterModels(type);
res.json(models);
@@ -589,6 +613,11 @@ export function agentRoutes(db: Db) {
requestedAdapterConfig,
{ strictMode: strictSecretsMode },
);
await assertAdapterConfigConstraints(
companyId,
hireInput.adapterType,
normalizedAdapterConfig,
);
const normalizedHireInput = {
...hireInput,
adapterConfig: normalizedAdapterConfig,
@@ -724,6 +753,11 @@ export function agentRoutes(db: Db) {
requestedAdapterConfig,
{ strictMode: strictSecretsMode },
);
await assertAdapterConfigConstraints(
companyId,
req.body.adapterType,
normalizedAdapterConfig,
);
const agent = await svc.create(companyId, {
...req.body,
@@ -903,6 +937,27 @@ export function agentRoutes(db: Db) {
);
}
const requestedAdapterType =
typeof patchData.adapterType === "string" ? patchData.adapterType : existing.adapterType;
const touchesAdapterConfiguration =
Object.prototype.hasOwnProperty.call(patchData, "adapterType") ||
Object.prototype.hasOwnProperty.call(patchData, "adapterConfig");
if (touchesAdapterConfiguration && requestedAdapterType === "opencode_local") {
const rawEffectiveAdapterConfig = Object.prototype.hasOwnProperty.call(patchData, "adapterConfig")
? (asRecord(patchData.adapterConfig) ?? {})
: (asRecord(existing.adapterConfig) ?? {});
const effectiveAdapterConfig = await secretsSvc.normalizeAdapterConfigForPersistence(
existing.companyId,
rawEffectiveAdapterConfig,
{ strictMode: strictSecretsMode },
);
await assertAdapterConfigConstraints(
existing.companyId,
requestedAdapterType,
effectiveAdapterConfig,
);
}
const actor = getActorInfo(req);
const agent = await svc.update(id, patchData, {
recordRevision: {

View File

@@ -0,0 +1,14 @@
type CheckoutWakeInput = {
actorType: "board" | "agent" | "none";
actorAgentId: string | null;
checkoutAgentId: string;
checkoutRunId: string | null;
};
export function shouldWakeAssigneeOnCheckout(input: CheckoutWakeInput): boolean {
if (input.actorType !== "agent") return true;
if (!input.actorAgentId) return true;
if (input.actorAgentId !== input.checkoutAgentId) return true;
if (!input.checkoutRunId) return true;
return false;
}

View File

@@ -25,6 +25,7 @@ import {
import { logger } from "../middleware/logger.js";
import { forbidden, HttpError, unauthorized } from "../errors.js";
import { assertCompanyAccess, getActorInfo } from "./authz.js";
import { shouldWakeAssigneeOnCheckout } from "./issues-checkout-wakeup.js";
const MAX_ATTACHMENT_BYTES = Number(process.env.PAPERCLIP_ATTACHMENT_MAX_BYTES) || 10 * 1024 * 1024;
const ALLOWED_ATTACHMENT_CONTENT_TYPES = new Set([
@@ -187,20 +188,40 @@ export function issueRoutes(db: Db, storage: StorageService) {
const companyId = req.params.companyId as string;
assertCompanyAccess(req, companyId);
const assigneeUserFilterRaw = req.query.assigneeUserId as string | undefined;
const touchedByUserFilterRaw = req.query.touchedByUserId as string | undefined;
const unreadForUserFilterRaw = req.query.unreadForUserId as string | undefined;
const assigneeUserId =
assigneeUserFilterRaw === "me" && req.actor.type === "board"
? req.actor.userId
: assigneeUserFilterRaw;
const touchedByUserId =
touchedByUserFilterRaw === "me" && req.actor.type === "board"
? req.actor.userId
: touchedByUserFilterRaw;
const unreadForUserId =
unreadForUserFilterRaw === "me" && req.actor.type === "board"
? req.actor.userId
: unreadForUserFilterRaw;
if (assigneeUserFilterRaw === "me" && (!assigneeUserId || req.actor.type !== "board")) {
res.status(403).json({ error: "assigneeUserId=me requires board authentication" });
return;
}
if (touchedByUserFilterRaw === "me" && (!touchedByUserId || req.actor.type !== "board")) {
res.status(403).json({ error: "touchedByUserId=me requires board authentication" });
return;
}
if (unreadForUserFilterRaw === "me" && (!unreadForUserId || req.actor.type !== "board")) {
res.status(403).json({ error: "unreadForUserId=me requires board authentication" });
return;
}
const result = await svc.list(companyId, {
status: req.query.status as string | undefined,
assigneeAgentId: req.query.assigneeAgentId as string | undefined,
assigneeUserId,
touchedByUserId,
unreadForUserId,
projectId: req.query.projectId as string | undefined,
labelId: req.query.labelId as string | undefined,
q: req.query.q as string | undefined,
@@ -282,6 +303,38 @@ export function issueRoutes(db: Db, storage: StorageService) {
res.json({ ...issue, ancestors, project: project ?? null, goal: goal ?? null, mentionedProjects });
});
router.post("/issues/:id/read", async (req, res) => {
const id = req.params.id as string;
const issue = await svc.getById(id);
if (!issue) {
res.status(404).json({ error: "Issue not found" });
return;
}
assertCompanyAccess(req, issue.companyId);
if (req.actor.type !== "board") {
res.status(403).json({ error: "Board authentication required" });
return;
}
if (!req.actor.userId) {
res.status(403).json({ error: "Board user context required" });
return;
}
const readState = await svc.markRead(issue.companyId, issue.id, req.actor.userId, new Date());
const actor = getActorInfo(req);
await logActivity(db, {
companyId: issue.companyId,
actorType: actor.actorType,
actorId: actor.actorId,
agentId: actor.agentId,
runId: actor.runId,
action: "issue.read_marked",
entityType: "issue",
entityId: issue.id,
details: { userId: req.actor.userId, lastReadAt: readState.lastReadAt },
});
res.json(readState);
});
router.get("/issues/:id/approvals", async (req, res) => {
const id = req.params.id as string;
const issue = await svc.getById(id);
@@ -634,17 +687,26 @@ export function issueRoutes(db: Db, storage: StorageService) {
details: { agentId: req.body.agentId },
});
void heartbeat
.wakeup(req.body.agentId, {
source: "assignment",
triggerDetail: "system",
reason: "issue_checked_out",
payload: { issueId: issue.id, mutation: "checkout" },
requestedByActorType: actor.actorType,
requestedByActorId: actor.actorId,
contextSnapshot: { issueId: issue.id, source: "issue.checkout" },
if (
shouldWakeAssigneeOnCheckout({
actorType: req.actor.type,
actorAgentId: req.actor.type === "agent" ? req.actor.agentId ?? null : null,
checkoutAgentId: req.body.agentId,
checkoutRunId,
})
.catch((err) => logger.warn({ err, issueId: issue.id }, "failed to wake assignee on issue checkout"));
) {
void heartbeat
.wakeup(req.body.agentId, {
source: "assignment",
triggerDetail: "system",
reason: "issue_checked_out",
payload: { issueId: issue.id, mutation: "checkout" },
requestedByActorType: actor.actorType,
requestedByActorId: actor.actorId,
contextSnapshot: { issueId: issue.id, source: "issue.checkout" },
})
.catch((err) => logger.warn({ err, issueId: issue.id }, "failed to wake assignee on issue checkout"));
}
res.json(updated);
});

View File

@@ -1,17 +1,19 @@
import { Router } from "express";
import type { Db } from "@paperclipai/db";
import { and, eq, inArray, isNull, sql } from "drizzle-orm";
import { issues, joinRequests } from "@paperclipai/db";
import { and, eq, sql } from "drizzle-orm";
import { joinRequests } from "@paperclipai/db";
import { sidebarBadgeService } from "../services/sidebar-badges.js";
import { issueService } from "../services/issues.js";
import { accessService } from "../services/access.js";
import { dashboardService } from "../services/dashboard.js";
import { assertCompanyAccess } from "./authz.js";
const INBOX_ISSUE_STATUSES = ["backlog", "todo", "in_progress", "in_review", "blocked"] as const;
export function sidebarBadgeRoutes(db: Db) {
const router = Router();
const svc = sidebarBadgeService(db);
const issueSvc = issueService(db);
const access = accessService(db);
const dashboard = dashboardService(db);
router.get("/companies/:companyId/sidebar-badges", async (req, res) => {
const companyId = req.params.companyId as string;
@@ -34,26 +36,16 @@ export function sidebarBadgeRoutes(db: Db) {
.then((rows) => Number(rows[0]?.count ?? 0))
: 0;
const assignedIssueCount =
req.actor.type === "board" && req.actor.userId
? await db
.select({ count: sql<number>`count(*)` })
.from(issues)
.where(
and(
eq(issues.companyId, companyId),
eq(issues.assigneeUserId, req.actor.userId),
inArray(issues.status, [...INBOX_ISSUE_STATUSES]),
isNull(issues.hiddenAt),
),
)
.then((rows) => Number(rows[0]?.count ?? 0))
: 0;
const badges = await svc.get(companyId, {
joinRequests: joinRequestCount,
assignedIssues: assignedIssueCount,
});
const summary = await dashboard.summary(companyId);
const staleIssueCount = await issueSvc.staleCount(companyId, 24 * 60);
const alertsCount =
(summary.agents.error > 0 ? 1 : 0) +
(summary.costs.monthBudgetCents > 0 && summary.costs.monthUtilizationPercent >= 80 ? 1 : 0);
badges.inbox = badges.failedRuns + alertsCount + staleIssueCount;
res.json(badges);
});

View File

@@ -51,6 +51,16 @@ interface UpdateAgentOptions {
recordRevision?: RevisionMetadata;
}
interface AgentShortnameRow {
id: string;
name: string;
status: string;
}
interface AgentShortnameCollisionOptions {
excludeAgentId?: string | null;
}
function isPlainRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
@@ -140,6 +150,21 @@ function configPatchFromSnapshot(snapshot: unknown): Partial<typeof agents.$infe
};
}
export function hasAgentShortnameCollision(
candidateName: string,
existingAgents: AgentShortnameRow[],
options?: AgentShortnameCollisionOptions,
): boolean {
const candidateShortname = normalizeAgentUrlKey(candidateName);
if (!candidateShortname) return false;
return existingAgents.some((agent) => {
if (agent.status === "terminated") return false;
if (options?.excludeAgentId && agent.id === options.excludeAgentId) return false;
return normalizeAgentUrlKey(agent.name) === candidateShortname;
});
}
export function agentService(db: Db) {
function withUrlKey<T extends { id: string; name: string }>(row: T) {
return {
@@ -185,6 +210,31 @@ export function agentService(db: Db) {
}
}
async function assertCompanyShortnameAvailable(
companyId: string,
candidateName: string,
options?: AgentShortnameCollisionOptions,
) {
const candidateShortname = normalizeAgentUrlKey(candidateName);
if (!candidateShortname) return;
const existingAgents = await db
.select({
id: agents.id,
name: agents.name,
status: agents.status,
})
.from(agents)
.where(eq(agents.companyId, companyId));
const hasCollision = hasAgentShortnameCollision(candidateName, existingAgents, options);
if (hasCollision) {
throw conflict(
`Agent shortname '${candidateShortname}' is already in use in this company`,
);
}
}
async function updateAgent(
id: string,
data: Partial<typeof agents.$inferInsert>,
@@ -212,6 +262,14 @@ export function agentService(db: Db) {
await assertNoCycle(id, data.reportsTo);
}
if (data.name !== undefined) {
const previousShortname = normalizeAgentUrlKey(existing.name);
const nextShortname = normalizeAgentUrlKey(data.name);
if (previousShortname !== nextShortname) {
await assertCompanyShortnameAvailable(existing.companyId, data.name, { excludeAgentId: id });
}
}
const normalizedPatch = { ...data } as Partial<typeof agents.$inferInsert>;
if (data.permissions !== undefined) {
const role = (data.role ?? existing.role) as string;
@@ -267,6 +325,8 @@ export function agentService(db: Db) {
await ensureManager(companyId, data.reportsTo);
}
await assertCompanyShortnameAvailable(companyId, data.name);
const role = data.role ?? "general";
const normalizedPermissions = normalizeAgentPermissions(data.permissions, role);
const created = await db

View File

@@ -3,6 +3,7 @@ import type { Db } from "@paperclipai/db";
import { approvalComments, approvals } from "@paperclipai/db";
import { notFound, unprocessable } from "../errors.js";
import { agentService } from "./agents.js";
import { notifyHireApproved } from "./hire-hook.js";
export function approvalService(db: Db) {
const agentsSvc = agentService(db);
@@ -59,13 +60,15 @@ export function approvalService(db: Db) {
.returning()
.then((rows) => rows[0]);
let hireApprovedAgentId: string | null = null;
if (updated.type === "hire_agent") {
const payload = updated.payload as Record<string, unknown>;
const payloadAgentId = typeof payload.agentId === "string" ? payload.agentId : null;
if (payloadAgentId) {
await agentsSvc.activatePendingApproval(payloadAgentId);
hireApprovedAgentId = payloadAgentId;
} else {
await agentsSvc.create(updated.companyId, {
const created = await agentsSvc.create(updated.companyId, {
name: String(payload.name ?? "New Agent"),
role: String(payload.role ?? "general"),
title: typeof payload.title === "string" ? payload.title : null,
@@ -87,6 +90,16 @@ export function approvalService(db: Db) {
permissions: undefined,
lastHeartbeatAt: null,
});
hireApprovedAgentId = created?.id ?? null;
}
if (hireApprovedAgentId) {
void notifyHireApproved(db, {
companyId: updated.companyId,
agentId: hireApprovedAgentId,
source: "approval",
sourceId: id,
approvedAt: now,
}).catch(() => {});
}
}

View File

@@ -0,0 +1,113 @@
import { and, eq } from "drizzle-orm";
import type { Db } from "@paperclipai/db";
import { agents } from "@paperclipai/db";
import type { HireApprovedPayload } from "@paperclipai/adapter-utils";
import { findServerAdapter } from "../adapters/registry.js";
import { logger } from "../middleware/logger.js";
import { logActivity } from "./activity-log.js";
const HIRE_APPROVED_MESSAGE =
"Tell your user that your hire was approved, now they should assign you a task in Paperclip or ask you to create issues.";
export interface NotifyHireApprovedInput {
companyId: string;
agentId: string;
source: "join_request" | "approval";
sourceId: string;
approvedAt?: Date;
}
/**
* Invokes the adapter's onHireApproved hook when an agent is approved (join-request or hire_agent approval).
* Failures are non-fatal: we log and write to activity, never throw.
*/
export async function notifyHireApproved(
db: Db,
input: NotifyHireApprovedInput,
): Promise<void> {
const { companyId, agentId, source, sourceId } = input;
const approvedAt = input.approvedAt ?? new Date();
const row = await db
.select()
.from(agents)
.where(and(eq(agents.id, agentId), eq(agents.companyId, companyId)))
.then((rows) => rows[0] ?? null);
if (!row) {
logger.warn({ companyId, agentId, source, sourceId }, "hire hook: agent not found in company, skipping");
return;
}
const adapterType = row.adapterType ?? "process";
const adapter = findServerAdapter(adapterType);
const onHireApproved = adapter?.onHireApproved;
if (!onHireApproved) {
return;
}
const payload: HireApprovedPayload = {
companyId,
agentId,
agentName: row.name,
adapterType,
source,
sourceId,
approvedAt: approvedAt.toISOString(),
message: HIRE_APPROVED_MESSAGE,
};
const adapterConfig =
typeof row.adapterConfig === "object" && row.adapterConfig !== null && !Array.isArray(row.adapterConfig)
? (row.adapterConfig as Record<string, unknown>)
: {};
try {
const result = await onHireApproved(payload, adapterConfig);
if (result.ok) {
await logActivity(db, {
companyId,
actorType: "system",
actorId: "hire_hook",
action: "hire_hook.succeeded",
entityType: "agent",
entityId: agentId,
details: { source, sourceId, adapterType },
});
return;
}
logger.warn(
{ companyId, agentId, adapterType, source, sourceId, error: result.error, detail: result.detail },
"hire hook: adapter returned failure",
);
await logActivity(db, {
companyId,
actorType: "system",
actorId: "hire_hook",
action: "hire_hook.failed",
entityType: "agent",
entityId: agentId,
details: { source, sourceId, adapterType, error: result.error, detail: result.detail },
});
} catch (err) {
logger.error(
{ err, companyId, agentId, adapterType, source, sourceId },
"hire hook: adapter threw",
);
await logActivity(db, {
companyId,
actorType: "system",
actorId: "hire_hook",
action: "hire_hook.error",
entityType: "agent",
entityId: agentId,
details: {
source,
sourceId,
adapterType,
error: err instanceof Error ? err.message : String(err),
},
});
}
}

View File

@@ -15,5 +15,6 @@ export { sidebarBadgeService } from "./sidebar-badges.js";
export { accessService } from "./access.js";
export { companyPortabilityService } from "./company-portability.js";
export { logActivity, type LogActivityInput } from "./activity-log.js";
export { notifyHireApproved, type NotifyHireApprovedInput } from "./hire-hook.js";
export { publishLiveEvent, subscribeCompanyLiveEvents } from "./live-events.js";
export { createStorageServiceFromConfig, getStorageService } from "../storage/index.js";

View File

@@ -10,6 +10,7 @@ import {
issueAttachments,
issueLabels,
issueComments,
issueReadStates,
issues,
labels,
projectWorkspaces,
@@ -49,6 +50,8 @@ export interface IssueFilters {
status?: string;
assigneeAgentId?: string;
assigneeUserId?: string;
touchedByUserId?: string;
unreadForUserId?: string;
projectId?: string;
labelId?: string;
q?: string;
@@ -68,6 +71,17 @@ type IssueActiveRunRow = {
};
type IssueWithLabels = IssueRow & { labels: IssueLabelRow[]; labelIds: string[] };
type IssueWithLabelsAndRun = IssueWithLabels & { activeRun: IssueActiveRunRow | null };
type IssueUserCommentStats = {
issueId: string;
myLastCommentAt: Date | null;
lastExternalCommentAt: Date | null;
};
type IssueUserContextInput = {
createdByUserId: string | null;
assigneeUserId: string | null;
createdAt: Date | string;
updatedAt: Date | string;
};
function sameRunLock(checkoutRunId: string | null, actorRunId: string | null) {
if (actorRunId) return checkoutRunId === actorRunId;
@@ -80,6 +94,127 @@ function escapeLikePattern(value: string): string {
return value.replace(/[\\%_]/g, "\\$&");
}
function touchedByUserCondition(companyId: string, userId: string) {
return sql<boolean>`
(
${issues.createdByUserId} = ${userId}
OR ${issues.assigneeUserId} = ${userId}
OR EXISTS (
SELECT 1
FROM ${issueReadStates}
WHERE ${issueReadStates.issueId} = ${issues.id}
AND ${issueReadStates.companyId} = ${companyId}
AND ${issueReadStates.userId} = ${userId}
)
OR EXISTS (
SELECT 1
FROM ${issueComments}
WHERE ${issueComments.issueId} = ${issues.id}
AND ${issueComments.companyId} = ${companyId}
AND ${issueComments.authorUserId} = ${userId}
)
)
`;
}
function myLastCommentAtExpr(companyId: string, userId: string) {
return sql<Date | null>`
(
SELECT MAX(${issueComments.createdAt})
FROM ${issueComments}
WHERE ${issueComments.issueId} = ${issues.id}
AND ${issueComments.companyId} = ${companyId}
AND ${issueComments.authorUserId} = ${userId}
)
`;
}
function myLastReadAtExpr(companyId: string, userId: string) {
return sql<Date | null>`
(
SELECT MAX(${issueReadStates.lastReadAt})
FROM ${issueReadStates}
WHERE ${issueReadStates.issueId} = ${issues.id}
AND ${issueReadStates.companyId} = ${companyId}
AND ${issueReadStates.userId} = ${userId}
)
`;
}
function myLastTouchAtExpr(companyId: string, userId: string) {
const myLastCommentAt = myLastCommentAtExpr(companyId, userId);
const myLastReadAt = myLastReadAtExpr(companyId, userId);
return sql<Date | null>`
GREATEST(
COALESCE(${myLastCommentAt}, to_timestamp(0)),
COALESCE(${myLastReadAt}, to_timestamp(0)),
COALESCE(CASE WHEN ${issues.createdByUserId} = ${userId} THEN ${issues.createdAt} ELSE NULL END, to_timestamp(0)),
COALESCE(CASE WHEN ${issues.assigneeUserId} = ${userId} THEN ${issues.updatedAt} ELSE NULL END, to_timestamp(0))
)
`;
}
function unreadForUserCondition(companyId: string, userId: string) {
const touchedCondition = touchedByUserCondition(companyId, userId);
const myLastTouchAt = myLastTouchAtExpr(companyId, userId);
return sql<boolean>`
(
${touchedCondition}
AND EXISTS (
SELECT 1
FROM ${issueComments}
WHERE ${issueComments.issueId} = ${issues.id}
AND ${issueComments.companyId} = ${companyId}
AND (
${issueComments.authorUserId} IS NULL
OR ${issueComments.authorUserId} <> ${userId}
)
AND ${issueComments.createdAt} > ${myLastTouchAt}
)
)
`;
}
export function deriveIssueUserContext(
issue: IssueUserContextInput,
userId: string,
stats:
| {
myLastCommentAt: Date | string | null;
myLastReadAt: Date | string | null;
lastExternalCommentAt: Date | string | null;
}
| null
| undefined,
) {
const normalizeDate = (value: Date | string | null | undefined) => {
if (!value) return null;
if (value instanceof Date) return Number.isNaN(value.getTime()) ? null : value;
const parsed = new Date(value);
return Number.isNaN(parsed.getTime()) ? null : parsed;
};
const myLastCommentAt = normalizeDate(stats?.myLastCommentAt);
const myLastReadAt = normalizeDate(stats?.myLastReadAt);
const createdTouchAt = issue.createdByUserId === userId ? normalizeDate(issue.createdAt) : null;
const assignedTouchAt = issue.assigneeUserId === userId ? normalizeDate(issue.updatedAt) : null;
const myLastTouchAt = [myLastCommentAt, myLastReadAt, createdTouchAt, assignedTouchAt]
.filter((value): value is Date => value instanceof Date)
.sort((a, b) => b.getTime() - a.getTime())[0] ?? null;
const lastExternalCommentAt = normalizeDate(stats?.lastExternalCommentAt);
const isUnreadForMe = Boolean(
myLastTouchAt &&
lastExternalCommentAt &&
lastExternalCommentAt.getTime() > myLastTouchAt.getTime(),
);
return {
myLastTouchAt,
lastExternalCommentAt,
isUnreadForMe,
};
}
async function labelMapForIssues(dbOrTx: any, issueIds: string[]): Promise<Map<string, IssueLabelRow[]>> {
const map = new Map<string, IssueLabelRow[]>();
if (issueIds.length === 0) return map;
@@ -284,6 +419,9 @@ export function issueService(db: Db) {
return {
list: async (companyId: string, filters?: IssueFilters) => {
const conditions = [eq(issues.companyId, companyId)];
const touchedByUserId = filters?.touchedByUserId?.trim() || undefined;
const unreadForUserId = filters?.unreadForUserId?.trim() || undefined;
const contextUserId = unreadForUserId ?? touchedByUserId;
const rawSearch = filters?.q?.trim() ?? "";
const hasSearch = rawSearch.length > 0;
const escapedSearch = hasSearch ? escapeLikePattern(rawSearch) : "";
@@ -313,6 +451,12 @@ export function issueService(db: Db) {
if (filters?.assigneeUserId) {
conditions.push(eq(issues.assigneeUserId, filters.assigneeUserId));
}
if (touchedByUserId) {
conditions.push(touchedByUserCondition(companyId, touchedByUserId));
}
if (unreadForUserId) {
conditions.push(unreadForUserCondition(companyId, unreadForUserId));
}
if (filters?.projectId) conditions.push(eq(issues.projectId, filters.projectId));
if (filters?.labelId) {
const labeledIssueIds = await db
@@ -353,7 +497,102 @@ export function issueService(db: Db) {
.orderBy(hasSearch ? asc(searchOrder) : asc(priorityOrder), asc(priorityOrder), desc(issues.updatedAt));
const withLabels = await withIssueLabels(db, rows);
const runMap = await activeRunMapForIssues(db, withLabels);
return withActiveRuns(withLabels, runMap);
const withRuns = withActiveRuns(withLabels, runMap);
if (!contextUserId || withRuns.length === 0) {
return withRuns;
}
const issueIds = withRuns.map((row) => row.id);
const statsRows = await db
.select({
issueId: issueComments.issueId,
myLastCommentAt: sql<Date | null>`
MAX(CASE WHEN ${issueComments.authorUserId} = ${contextUserId} THEN ${issueComments.createdAt} END)
`,
lastExternalCommentAt: sql<Date | null>`
MAX(
CASE
WHEN ${issueComments.authorUserId} IS NULL OR ${issueComments.authorUserId} <> ${contextUserId}
THEN ${issueComments.createdAt}
END
)
`,
})
.from(issueComments)
.where(
and(
eq(issueComments.companyId, companyId),
inArray(issueComments.issueId, issueIds),
),
)
.groupBy(issueComments.issueId);
const readRows = await db
.select({
issueId: issueReadStates.issueId,
myLastReadAt: issueReadStates.lastReadAt,
})
.from(issueReadStates)
.where(
and(
eq(issueReadStates.companyId, companyId),
eq(issueReadStates.userId, contextUserId),
inArray(issueReadStates.issueId, issueIds),
),
);
const statsByIssueId = new Map(statsRows.map((row) => [row.issueId, row]));
const readByIssueId = new Map(readRows.map((row) => [row.issueId, row.myLastReadAt]));
return withRuns.map((row) => ({
...row,
...deriveIssueUserContext(row, contextUserId, {
myLastCommentAt: statsByIssueId.get(row.id)?.myLastCommentAt ?? null,
myLastReadAt: readByIssueId.get(row.id) ?? null,
lastExternalCommentAt: statsByIssueId.get(row.id)?.lastExternalCommentAt ?? null,
}),
}));
},
countUnreadTouchedByUser: async (companyId: string, userId: string, status?: string) => {
const conditions = [
eq(issues.companyId, companyId),
isNull(issues.hiddenAt),
unreadForUserCondition(companyId, userId),
];
if (status) {
const statuses = status.split(",").map((s) => s.trim()).filter(Boolean);
if (statuses.length === 1) {
conditions.push(eq(issues.status, statuses[0]));
} else if (statuses.length > 1) {
conditions.push(inArray(issues.status, statuses));
}
}
const [row] = await db
.select({ count: sql<number>`count(*)` })
.from(issues)
.where(and(...conditions));
return Number(row?.count ?? 0);
},
markRead: async (companyId: string, issueId: string, userId: string, readAt: Date = new Date()) => {
const now = new Date();
const [row] = await db
.insert(issueReadStates)
.values({
companyId,
issueId,
userId,
lastReadAt: readAt,
updatedAt: now,
})
.onConflictDoUpdate({
target: [issueReadStates.companyId, issueReadStates.issueId, issueReadStates.userId],
set: {
lastReadAt: readAt,
updatedAt: now,
},
})
.returning();
return row;
},
getById: async (id: string) => {

View File

@@ -1,4 +1,4 @@
import { createReadStream, createWriteStream, promises as fs } from "node:fs";
import { createReadStream, promises as fs } from "node:fs";
import path from "node:path";
import { createHash } from "node:crypto";
import { notFound } from "../errors.js";
@@ -113,11 +113,7 @@ function createLocalFileRunLogStore(basePath: string): RunLogStore {
stream: event.stream,
chunk: event.chunk,
});
await new Promise<void>((resolve, reject) => {
const stream = createWriteStream(absPath, { flags: "a", encoding: "utf8" });
stream.on("error", reject);
stream.end(`${line}\n`, () => resolve());
});
await fs.appendFile(absPath, `${line}\n`, "utf8");
},
async finalize(handle) {

View File

@@ -10,7 +10,7 @@ export function sidebarBadgeService(db: Db) {
return {
get: async (
companyId: string,
extra?: { joinRequests?: number; assignedIssues?: number },
extra?: { joinRequests?: number; unreadTouchedIssues?: number },
): Promise<SidebarBadges> => {
const actionableApprovals = await db
.select({ count: sql<number>`count(*)` })
@@ -43,9 +43,9 @@ export function sidebarBadgeService(db: Db) {
).length;
const joinRequests = extra?.joinRequests ?? 0;
const assignedIssues = extra?.assignedIssues ?? 0;
const unreadTouchedIssues = extra?.unreadTouchedIssues ?? 0;
return {
inbox: actionableApprovals + failedRuns + joinRequests + assignedIssues,
inbox: actionableApprovals + failedRuns + joinRequests + unreadTouchedIssues,
approvals: actionableApprovals,
failedRuns,
joinRequests,

View File

@@ -0,0 +1,363 @@
---
name: release-changelog
description: >
Generate user-facing release changelogs for Paperclip. Reads git history,
merged PRs, and changeset files since the last release tag. Detects breaking
changes, categorizes changes, and outputs structured markdown to
releases/v{version}.md. Use when preparing a release or when asked to
generate a changelog.
---
# Release Changelog Skill
Generate a user-facing changelog for a new Paperclip release. This skill reads
the commit history, changeset files, and merged PRs since the last release tag,
detects breaking changes, categorizes everything, and writes a structured
release notes file.
**Output:** `releases/v{version}.md` in the repo root.
**Review required:** Always present the draft for human sign-off before
finalizing. Never auto-publish.
---
## Step 0 — Idempotency Check
Before generating anything, check if a changelog already exists for this version:
```bash
ls releases/v{version}.md 2>/dev/null
```
**If the file already exists:**
1. Read the existing changelog and present it to the reviewer.
2. Ask: "A changelog for v{version} already exists. Do you want to (a) keep it
as-is, (b) regenerate from scratch, or (c) update specific sections?"
3. If the reviewer says keep it → **stop here**. Do not overwrite. This skill is
done.
4. If the reviewer says regenerate → back up the existing file to
`releases/v{version}.md.prev`, then proceed from Step 1.
5. If the reviewer says update → read the existing file, proceed through Steps
1-4 to gather fresh data, then merge changes into the existing file rather
than replacing it wholesale. Preserve any manual edits the reviewer previously
made.
**If the file does not exist:** Proceed normally from Step 1.
**Critical rule:** This skill NEVER triggers a version bump. It only reads git
history and writes a markdown file. The `release.sh` script is the only thing
that bumps versions, and it is called separately by the `release` coordination
skill. Running this skill multiple times is always safe — worst case it
overwrites a draft changelog (with reviewer permission).
---
## Step 1 — Determine the Release Range
Find the last release tag and the planned version:
```bash
# Last release tag (most recent semver tag)
git tag --sort=-version:refname | head -1
# e.g. v0.2.7
# All commits since that tag
git log v0.2.7..HEAD --oneline --no-merges
```
If no tag exists yet, use the initial commit as the base.
The new version number comes from one of:
- An explicit argument (e.g. "generate changelog for v0.3.0")
- The bump type (patch/minor/major) applied to the last tag
- The version already set in `cli/package.json` if `scripts/release.sh` has been run
---
## Step 2 — Gather Raw Change Data
Collect changes from three sources, in priority order:
### 2a. Git Commits
```bash
git log v{last}..HEAD --oneline --no-merges
git log v{last}..HEAD --format="%H %s" --no-merges # full SHAs for file diffs
```
### 2b. Changeset Files
Look for unconsumed changesets in `.changeset/`:
```bash
ls .changeset/*.md | grep -v README.md
```
Each changeset file has YAML frontmatter with package names and bump types
(`patch`, `minor`, `major`), followed by a description. Parse these — the bump
type is a strong categorization signal, and the description may contain
user-facing summaries.
### 2c. Merged PRs (when available)
If GitHub access is available via `gh`:
```bash
gh pr list --state merged --search "merged:>={last-tag-date}" --json number,title,body,labels
```
PR titles and bodies are often the best source of user-facing descriptions.
Prefer PR descriptions over raw commit messages when both are available.
---
## Step 3 — Detect Breaking Changes
Scan for breaking changes using these signals. **Any match flags the release as
containing breaking changes**, which affects version bump requirements and
changelog structure.
### 3a. Migration Files
Check for new migration files since the last tag:
```bash
git diff --name-only v{last}..HEAD -- packages/db/src/migrations/
```
- **New migration files exist** = DB migration required in upgrade.
- Inspect migration content: look for `DROP`, `ALTER ... DROP`, `RENAME` to
distinguish destructive vs. additive migrations.
- Additive-only migrations (new tables, new nullable columns, new indexes) are
safe but should still be mentioned.
- Destructive migrations (column drops, type changes, table drops) = breaking.
### 3b. Schema Changes
```bash
git diff v{last}..HEAD -- packages/db/src/schema/
```
Look for:
- Removed or renamed columns/tables
- Changed column types
- Removed default values or nullable constraints
- These indicate breaking DB changes even if no explicit migration file exists
### 3c. API Route Changes
```bash
git diff v{last}..HEAD -- server/src/routes/ server/src/api/
```
Look for:
- Removed endpoints
- Changed request/response shapes (removed fields, type changes)
- Changed authentication requirements
### 3d. Config Changes
```bash
git diff v{last}..HEAD -- cli/src/config/ packages/*/src/*config*
```
Look for renamed, removed, or restructured configuration keys.
### 3e. Changeset Severity
Any `.changeset/*.md` file with a `major` bump = explicitly flagged breaking.
### 3f. Commit Conventions
Scan commit messages for:
- `BREAKING:` or `BREAKING CHANGE:` prefix
- `!` after the type in conventional commits (e.g. `feat!:`, `fix!:`)
### Version Bump Rules
| Condition | Minimum Bump |
|---|---|
| Destructive migration (DROP, RENAME) | `major` |
| Removed API endpoints or fields | `major` |
| Any `major` changeset or `BREAKING:` commit | `major` |
| New (additive) migration | `minor` |
| New features (`feat:` commits, `minor` changesets) | `minor` |
| Bug fixes only | `patch` |
If the planned bump is lower than the minimum required, **warn the reviewer**
and recommend the correct bump level.
---
## Step 4 — Categorize Changes
Assign every meaningful change to one of these categories:
| Category | What Goes Here | Shows in User Notes? |
|---|---|---|
| **Breaking Changes** | Anything requiring user action to upgrade | Yes (top, with warning) |
| **Highlights** | New user-visible features, major behavioral changes | Yes (with 1-2 sentence descriptions) |
| **Improvements** | Enhancements to existing features | Yes (bullet list) |
| **Fixes** | Bug fixes | Yes (bullet list) |
| **Internal** | Refactoring, deps, CI, tests, docs | No (dev changelog only) |
### Categorization Heuristics
Use these signals to auto-categorize. When signals conflict, prefer the
higher-visibility category and flag for human review.
| Signal | Category |
|---|---|
| Commit touches migration files, schema changes | Breaking Change (if destructive) |
| Changeset marked `major` | Breaking Change |
| Commit message has `BREAKING:` or `!:` | Breaking Change |
| New UI components, new routes, new API endpoints | Highlight |
| Commit message starts with `feat:` or `add:` | Highlight or Improvement |
| Changeset marked `minor` | Highlight |
| Commit message starts with `fix:` or `bug:` | Fix |
| Changeset marked `patch` | Fix or Improvement |
| Commit message starts with `chore:`, `refactor:`, `ci:`, `test:`, `docs:` | Internal |
| PR has detailed body with user-facing description | Use PR body as the description |
### Writing Good Descriptions
- **Highlights** get 1-2 sentence descriptions explaining the user benefit.
Write from the user's perspective ("You can now..." not "Added a component that...").
- **Improvements and Fixes** are concise bullet points.
- **Breaking Changes** get detailed descriptions including what changed,
why, and what the user needs to do.
- Group related commits into a single changelog entry. Five commits implementing
one feature = one Highlight entry, not five bullets.
- Omit purely internal changes from user-facing notes entirely.
---
## Step 5 — Write the Changelog
Output the changelog to `releases/v{version}.md` using this template:
```markdown
# v{version}
> Released: {YYYY-MM-DD}
{If breaking changes detected, include this section:}
## Breaking Changes
> **Action required before upgrading.** Read the Upgrade Guide below.
- **{Breaking change title}** — {What changed and why. What the user needs to do.}
## Highlights
- **{Feature name}** — {1-2 sentence description of what it does and why it matters.}
## Improvements
- {Concise description of improvement}
## Fixes
- {Concise description of fix}
---
{If breaking changes detected, include this section:}
## Upgrade Guide
### Before You Update
1. **Back up your database.**
- SQLite: `cp paperclip.db paperclip.db.backup`
- Postgres: `pg_dump -Fc paperclip > paperclip-pre-{version}.dump`
2. **Note your current version:** `paperclip --version`
### After Updating
{Specific steps: run migrations, update configs, etc.}
### Rolling Back
If something goes wrong:
1. Restore your database backup
2. `npm install @paperclipai/server@{previous-version}`
```
### Template Rules
- Omit any empty section entirely (don't show "## Fixes" with no bullets).
- The Breaking Changes section always comes first when present.
- The Upgrade Guide always comes last when present.
- Use `**bold**` for feature/change names, regular text for descriptions.
- Keep the entire changelog scannable — a busy user should get the gist from
headings and bold text alone.
---
## Step 6 — Present for Review
After generating the draft:
1. **Show the full changelog** to the reviewer (CTO or whoever triggered the release).
2. **Flag ambiguous items** — commits you weren't sure how to categorize, or
items that might be breaking but aren't clearly signaled.
3. **Flag version bump mismatches** — if the planned bump is lower than what
the changes warrant.
4. **Wait for approval** before considering the changelog final.
If the reviewer requests edits, update `releases/v{version}.md` accordingly.
Do not proceed to publishing, website updates, or social announcements. Those
are handled by the `release` coordination skill (separate from this one).
---
## Directory Convention
Release changelogs live in `releases/` at the repo root:
```
releases/
v0.2.7.md
v0.3.0.md
...
```
Each file is named `v{version}.md` matching the git tag. This directory is
committed to the repo and serves as the source of truth for release history.
The `releases/` directory should be created with a `.gitkeep` if it doesn't
exist yet.
---
## Quick Reference
```bash
# Full workflow summary:
# 1. Find last tag
LAST_TAG=$(git tag --sort=-version:refname | head -1)
# 2. Commits since last tag
git log $LAST_TAG..HEAD --oneline --no-merges
# 3. Files changed (for breaking change detection)
git diff --name-only $LAST_TAG..HEAD
# 4. Migration changes specifically
git diff --name-only $LAST_TAG..HEAD -- packages/db/src/migrations/
# 5. Schema changes
git diff $LAST_TAG..HEAD -- packages/db/src/schema/
# 6. Unconsumed changesets
ls .changeset/*.md | grep -v README.md
# 7. Merged PRs (if gh available)
gh pr list --state merged --search "merged:>=$(git log -1 --format=%aI $LAST_TAG)" \
--json number,title,body,labels
```

402
skills/release/SKILL.md Normal file
View File

@@ -0,0 +1,402 @@
---
name: release
description: >
Coordinate a full Paperclip release across engineering, website publishing,
and social announcement. Use when CTO/CEO requests "do a release" or
"release vX.Y.Z". Runs pre-flight checks, generates changelog via
release-changelog, executes npm release, creates cross-project follow-up
tasks, and posts a release wrap-up.
---
# Release Coordination Skill
Run the full Paperclip release process as an organizational workflow, not just
an npm publish.
This skill coordinates:
- User-facing changelog generation (`release-changelog` skill)
- Canary publish to npm (`scripts/release.sh --canary`)
- Docker smoke test of the canary (`scripts/docker-onboard-smoke.sh`)
- Promotion to `latest` after canary is verified
- Website publishing task creation
- CMO announcement task creation
- Final release summary with links
---
## Trigger
Use this skill when leadership asks for:
- "do a release"
- "release {patch|minor|major}"
- "release vX.Y.Z"
---
## Preconditions
Before proceeding, verify all of the following:
1. `skills/release-changelog/SKILL.md` exists and is usable.
2. The `release-changelog` dependency work is complete/reviewed before running this flow.
3. App repo working tree is clean.
4. There are commits since the last release tag.
5. You have release permissions (`npm whoami` succeeds for real publish).
6. If running via Paperclip, you have issue context for posting status updates.
If any precondition fails, stop and report the blocker.
---
## Inputs
Collect these inputs up front:
- Release request source issue (if in Paperclip)
- Requested bump (`patch|minor|major`) or explicit version (`vX.Y.Z`)
- Whether this run is dry-run or live publish
- Company/project context for follow-up issue creation
---
## Step 0 — Idempotency Guards
Each step in this skill is designed to be safely re-runnable. Before executing
any step, check whether it has already been completed:
| Step | How to Check | If Already Done |
|---|---|---|
| Changelog | `releases/v{version}.md` exists | Read it, ask reviewer to confirm or update. Do NOT regenerate without asking. |
| Canary publish | `npm view paperclipai@{version}` succeeds | Skip canary publish. Proceed to smoke test. |
| Smoke test | Manual or scripted verification | If canary already verified, proceed to promote. |
| Promote | `git tag v{version}` exists | Skip promotion entirely. A tag means the version is already promoted to latest. |
| Website task | Search Paperclip issues for "Publish release notes for v{version}" | Skip creation. Link the existing task. |
| CMO task | Search Paperclip issues for "release announcement tweet for v{version}" | Skip creation. Link the existing task. |
**The golden rule:** If a git tag `v{version}` already exists, the release is
fully promoted. Only post-publish tasks (website, CMO, wrap-up) should proceed.
If the version exists on npm but there's no git tag, the canary was published but
not yet promoted — resume from smoke test.
**Iterating on changelogs:** You can re-run this skill with an existing changelog
to refine it _before_ the npm publish step. The `release-changelog` skill has
its own idempotency check and will ask the reviewer what to do with an existing
file. This is the expected workflow for iterating on release notes.
---
## Step 1 - Pre-flight and Version Decision
Run pre-flight in the App repo root:
```bash
LAST_TAG=$(git tag --sort=-version:refname | head -1)
git diff --quiet && git diff --cached --quiet
git log "${LAST_TAG}..HEAD" --oneline --no-merges | head -50
```
Then detect minimum required bump:
```bash
# migrations
git diff --name-only "${LAST_TAG}..HEAD" -- packages/db/src/migrations/
# schema deltas
git diff "${LAST_TAG}..HEAD" -- packages/db/src/schema/
# breaking commit conventions
git log "${LAST_TAG}..HEAD" --format="%s" | rg -n 'BREAKING CHANGE|BREAKING:|^[a-z]+!:' || true
```
Bump policy:
- Destructive migration/API removal/major changeset/breaking commit -> `major`
- Additive migrations or clear new features -> at least `minor`
- Fixes-only -> `patch`
If requested bump is lower than required minimum, escalate bump and explain why.
---
## Step 2 - Generate Changelog Draft
First, check if `releases/v{version}.md` already exists. If it does, the
`release-changelog` skill will detect this and ask the reviewer whether to keep,
regenerate, or update it. **Do not silently overwrite an existing changelog.**
Invoke the `release-changelog` skill and produce:
- `releases/v{version}.md`
- Sections ordered as: Breaking Changes (if any), Highlights, Improvements, Fixes, Upgrade Guide (if any)
Required behavior:
- Present the draft for human review.
- Flag ambiguous categorization items.
- Flag bump mismatches before publish.
- Do not publish until reviewer confirms.
---
## Step 3 — Publish Canary
The canary is the gatekeeper: every release goes to npm as a canary first. The
`latest` tag is never touched until the canary passes smoke testing.
**Idempotency check:** Before publishing, check if this version already exists
on npm:
```bash
# Check if canary is already published
npm view paperclipai@{version} version 2>/dev/null && echo "ALREADY_PUBLISHED" || echo "NOT_PUBLISHED"
# Also check git tag
git tag -l "v{version}"
```
- If a git tag exists → the release is already fully promoted. Skip to Step 6.
- If the version exists on npm but no git tag → canary was published but not yet
promoted. Skip to Step 4 (smoke test).
- If neither exists → proceed with canary publish.
### Publishing the canary
Use `release.sh` with the `--canary` flag (see script changes below):
```bash
# Dry run first
./scripts/release.sh {patch|minor|major} --canary --dry-run
# Publish canary (after dry-run review)
./scripts/release.sh {patch|minor|major} --canary
```
This publishes all packages to npm with the `canary` dist-tag. The `latest` tag
is **not** updated. Users running `npx paperclipai onboard` still get the
previous stable version.
After publish, verify the canary is accessible:
```bash
npm view paperclipai@canary version
# Should show the new version
```
**How `--canary` works in release.sh:**
- Steps 1-5 are the same (preflight, changeset, version, build, CLI bundle)
- Step 6 uses `npx changeset publish --tag canary` instead of `npx changeset publish`
- Step 7 does NOT commit or tag — the commit and tag happen later in the promote
step, only after smoke testing passes
**Script changes required:** Add `--canary` support to `scripts/release.sh`:
- Parse `--canary` flag alongside `--dry-run`
- When `--canary`: pass `--tag canary` to `changeset publish`
- When `--canary`: skip the git commit and tag step (Step 7)
- When NOT `--canary`: behavior is unchanged (backwards compatible)
---
## Step 4 — Smoke Test the Canary
Run the canary in a clean Docker environment to verify `npx paperclipai onboard`
works end-to-end.
### Automated smoke test
Use the existing Docker smoke test infrastructure with the canary version:
```bash
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
```
This builds a clean Ubuntu container, installs `paperclipai@canary` via npx, and
runs the onboarding flow. The UI is accessible at `http://localhost:3131`.
### What to verify
At minimum, confirm:
1. **Container starts** — no npm install errors, no missing dependencies
2. **Onboarding completes** — the wizard runs through without crashes
3. **Server boots** — UI is accessible at the expected port
4. **Basic operations** — can create a company, view the dashboard
For a more thorough check (stretch goal — can be automated later):
5. **Browser automation** — script Playwright/Puppeteer to walk through onboard
in the Docker container's browser and verify key pages render
### If smoke test fails
- Do NOT promote the canary.
- Fix the issue, publish a new canary (re-run Step 3 — idempotency guards allow
this since there's no git tag yet).
- Re-run the smoke test.
### If smoke test passes
Proceed to Step 5 (promote).
---
## Step 5 — Promote Canary to Latest
Once the canary passes smoke testing, promote it to `latest` so that
`npx paperclipai onboard` picks up the new version.
### Promote on npm
```bash
# For each published package, move the dist-tag from canary to latest
npm dist-tag add paperclipai@{version} latest
npm dist-tag add @paperclipai/server@{version} latest
npm dist-tag add @paperclipai/cli@{version} latest
npm dist-tag add @paperclipai/shared@{version} latest
npm dist-tag add @paperclipai/db@{version} latest
npm dist-tag add @paperclipai/adapter-utils@{version} latest
npm dist-tag add @paperclipai/adapter-claude-local@{version} latest
npm dist-tag add @paperclipai/adapter-codex-local@{version} latest
npm dist-tag add @paperclipai/adapter-openclaw@{version} latest
```
**Script option:** Add `./scripts/release.sh --promote {version}` to automate
the dist-tag promotion for all packages.
### Commit and tag
After promotion, finalize in git (this is what `release.sh` Step 7 normally
does, but was deferred during canary publish):
```bash
git add .
git commit -m "chore: release v{version}"
git tag "v{version}"
```
### Verify promotion
```bash
npm view paperclipai@latest version
# Should now show the new version
# Final sanity check
npx --yes paperclipai@latest --version
```
---
## Step 6 - Create Cross-Project Follow-up Tasks
**Idempotency check:** Before creating tasks, search for existing ones:
```
GET /api/companies/{companyId}/issues?q=release+notes+v{version}
GET /api/companies/{companyId}/issues?q=announcement+tweet+v{version}
```
If matching tasks already exist (check title contains the version), skip
creation and link the existing tasks instead. Do not create duplicates.
Create at least two tasks in Paperclip (only if they don't already exist):
1. Website task: publish changelog for `v{version}`
2. CMO task: draft announcement tweet for `v{version}`
When creating tasks:
- Set `parentId` to the release issue id.
- Carry over `goalId` from the parent issue when present.
- Include `billingCode` for cross-team work when required by company policy.
- Mark website task `high` priority if release has breaking changes.
Suggested payloads:
```json
POST /api/companies/{companyId}/issues
{
"projectId": "{websiteProjectId}",
"parentId": "{releaseIssueId}",
"goalId": "{goalId-or-null}",
"billingCode": "{billingCode-or-null}",
"title": "Publish release notes for v{version}",
"priority": "medium",
"status": "todo",
"description": "Publish /changelog entry for v{version}. Include full markdown from releases/v{version}.md and prominent upgrade guide if breaking changes exist."
}
```
```json
POST /api/companies/{companyId}/issues
{
"projectId": "{workspaceProjectId}",
"parentId": "{releaseIssueId}",
"goalId": "{goalId-or-null}",
"billingCode": "{billingCode-or-null}",
"title": "Draft release announcement tweet for v{version}",
"priority": "medium",
"status": "todo",
"description": "Draft launch tweet with top 1-2 highlights, version number, and changelog URL. If breaking changes exist, include an explicit upgrade-guide callout."
}
```
---
## Step 7 - Wrap Up the Release Issue
Post a concise markdown update linking:
- Release issue
- Changelog file (`releases/v{version}.md`)
- npm package URL (both `@canary` and `@latest` after promotion)
- Canary smoke test result (pass/fail, what was tested)
- Website task
- CMO task
- Final changelog URL (once website publishes)
- Tweet URL (once published)
Completion rules:
- Keep issue `in_progress` until canary is promoted AND website + social tasks
are done.
- Mark `done` only when all required artifacts are published and linked.
- If waiting on another team, keep open with clear owner and next action.
---
## Release Flow Summary
The full release lifecycle is now:
```
1. Generate changelog → releases/v{version}.md (review + iterate)
2. Publish canary → npm @canary dist-tag (latest untouched)
3. Smoke test canary → Docker clean install verification
4. Promote to latest → npm @latest dist-tag + git tag + commit
5. Create follow-up tasks → website changelog + CMO tweet
6. Wrap up → link everything, close issue
```
At any point you can re-enter the flow — idempotency guards detect which steps
are already done and skip them. The changelog can be iterated before or after
canary publish. The canary can be re-published if the smoke test reveals issues
(just fix + re-run Step 3). Only after smoke testing passes does `latest` get
updated.
---
## Paperclip API Notes (When Running in Agent Context)
Use:
- `GET /api/companies/{companyId}/projects` to resolve website/workspace project IDs.
- `POST /api/companies/{companyId}/issues` to create follow-up tasks.
- `PATCH /api/issues/{issueId}` with comments for release progress.
For issue-modifying calls, include:
- `Authorization: Bearer $PAPERCLIP_API_KEY`
- `X-Paperclip-Run-Id: $PAPERCLIP_RUN_ID`
---
## Failure Handling
If blocked, update the release issue explicitly with:
- what failed
- exact blocker
- who must act next
- whether any release artifacts were partially published
Never silently fail mid-release.

View File

@@ -0,0 +1,18 @@
<svg width="300" height="300" viewBox="0 0 300 300" fill="none" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(30, 0)">
<g clip-path="url(#clip0_1401_86283)">
<mask id="mask0_1401_86283" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="0" width="240" height="300">
<path d="M240 0H0V300H240V0Z" fill="white"/>
</mask>
<g mask="url(#mask0_1401_86283)">
<path d="M180 240H60V120H180V240Z" fill="#4B4646"/>
<path d="M180 60H60V240H180V60ZM240 300H0V0H240V300Z" fill="#F1ECEC"/>
</g>
</g>
</g>
<defs>
<clipPath id="clip0_1401_86283">
<rect width="240" height="300" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 631 B

View File

@@ -0,0 +1,18 @@
<svg width="300" height="300" viewBox="0 0 300 300" fill="none" xmlns="http://www.w3.org/2000/svg">
<g transform="translate(30, 0)">
<g clip-path="url(#clip0_1401_86274)">
<mask id="mask0_1401_86274" style="mask-type:luminance" maskUnits="userSpaceOnUse" x="0" y="0" width="240" height="300">
<path d="M240 0H0V300H240V0Z" fill="white"/>
</mask>
<g mask="url(#mask0_1401_86274)">
<path d="M180 240H60V120H180V240Z" fill="#CFCECD"/>
<path d="M180 60H60V240H180V60ZM240 300H0V0H240V300Z" fill="#211E1E"/>
</g>
</g>
</g>
<defs>
<clipPath id="clip0_1401_86274">
<rect width="240" height="300" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 631 B

View File

@@ -1,6 +1,14 @@
{
"id": "/",
"name": "Paperclip",
"short_name": "Paperclip",
"description": "AI-powered project management and agent coordination platform",
"start_url": "/",
"scope": "/",
"display": "standalone",
"orientation": "any",
"theme_color": "#18181b",
"background_color": "#18181b",
"icons": [
{
"src": "/android-chrome-192x192.png",
@@ -11,9 +19,12 @@
"src": "/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png"
},
{
"src": "/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png",
"purpose": "maskable"
}
],
"theme_color": "#18181b",
"background_color": "#18181b",
"display": "standalone"
]
}

42
ui/public/sw.js Normal file
View File

@@ -0,0 +1,42 @@
const CACHE_NAME = "paperclip-v2";
self.addEventListener("install", () => {
self.skipWaiting();
});
self.addEventListener("activate", (event) => {
event.waitUntil(
caches.keys().then((keys) =>
Promise.all(keys.map((key) => caches.delete(key)))
)
);
self.clients.claim();
});
self.addEventListener("fetch", (event) => {
const { request } = event;
const url = new URL(request.url);
// Skip non-GET requests and API calls
if (request.method !== "GET" || url.pathname.startsWith("/api")) {
return;
}
// Network-first for everything — cache is only an offline fallback
event.respondWith(
fetch(request)
.then((response) => {
if (response.ok && url.origin === self.location.origin) {
const clone = response.clone();
caches.open(CACHE_NAME).then((cache) => cache.put(request, clone));
}
return response;
})
.catch(() => {
if (request.mode === "navigate") {
return caches.match("/") || new Response("Offline", { status: 503 });
}
return caches.match(request);
})
);
});

View File

@@ -16,9 +16,41 @@ export function OpenClawConfigFields({
eff,
mark,
}: AdapterConfigFieldsProps) {
const configuredHeaders =
config.headers && typeof config.headers === "object" && !Array.isArray(config.headers)
? (config.headers as Record<string, unknown>)
: {};
const effectiveHeaders =
(eff("adapterConfig", "headers", configuredHeaders) as Record<string, unknown>) ?? {};
const effectiveGatewayAuthHeader = typeof effectiveHeaders["x-openclaw-auth"] === "string"
? String(effectiveHeaders["x-openclaw-auth"])
: "";
const commitGatewayAuthHeader = (rawValue: string) => {
const nextValue = rawValue.trim();
const nextHeaders: Record<string, unknown> = { ...effectiveHeaders };
if (nextValue) {
nextHeaders["x-openclaw-auth"] = nextValue;
} else {
delete nextHeaders["x-openclaw-auth"];
}
mark("adapterConfig", "headers", Object.keys(nextHeaders).length > 0 ? nextHeaders : undefined);
};
const transport = eff(
"adapterConfig",
"streamTransport",
String(config.streamTransport ?? "sse"),
);
const sessionStrategy = eff(
"adapterConfig",
"sessionKeyStrategy",
String(config.sessionKeyStrategy ?? "fixed"),
);
return (
<>
<Field label="Webhook URL" hint={help.webhookUrl}>
<Field label="Gateway URL" hint={help.webhookUrl}>
<DraftInput
value={
isCreate
@@ -36,17 +68,80 @@ export function OpenClawConfigFields({
/>
</Field>
{!isCreate && (
<Field label="Webhook auth header (optional)">
<DraftInput
value={
eff("adapterConfig", "webhookAuthHeader", String(config.webhookAuthHeader ?? ""))
}
onCommit={(v) => mark("adapterConfig", "webhookAuthHeader", v || undefined)}
immediate
className={inputClass}
placeholder="Bearer <token>"
/>
</Field>
<>
<Field label="Paperclip API URL override">
<DraftInput
value={
eff(
"adapterConfig",
"paperclipApiUrl",
String(config.paperclipApiUrl ?? ""),
)
}
onCommit={(v) => mark("adapterConfig", "paperclipApiUrl", v || undefined)}
immediate
className={inputClass}
placeholder="https://paperclip.example"
/>
</Field>
<Field label="Transport">
<select
value={transport}
onChange={(e) => mark("adapterConfig", "streamTransport", e.target.value)}
className={inputClass}
>
<option value="sse">SSE (recommended)</option>
<option value="webhook">Webhook</option>
</select>
</Field>
<Field label="Session strategy">
<select
value={sessionStrategy}
onChange={(e) => mark("adapterConfig", "sessionKeyStrategy", e.target.value)}
className={inputClass}
>
<option value="fixed">Fixed</option>
<option value="issue">Per issue</option>
<option value="run">Per run</option>
</select>
</Field>
{sessionStrategy === "fixed" && (
<Field label="Session key">
<DraftInput
value={eff("adapterConfig", "sessionKey", String(config.sessionKey ?? "paperclip"))}
onCommit={(v) => mark("adapterConfig", "sessionKey", v || undefined)}
immediate
className={inputClass}
placeholder="paperclip"
/>
</Field>
)}
<Field label="Webhook auth header (optional)">
<DraftInput
value={
eff("adapterConfig", "webhookAuthHeader", String(config.webhookAuthHeader ?? ""))
}
onCommit={(v) => mark("adapterConfig", "webhookAuthHeader", v || undefined)}
immediate
className={inputClass}
placeholder="Bearer <token>"
/>
</Field>
<Field label="Gateway auth token (x-openclaw-auth)">
<DraftInput
value={effectiveGatewayAuthHeader}
onCommit={commitGatewayAuthHeader}
immediate
className={inputClass}
placeholder="OpenClaw gateway token"
/>
</Field>
</>
)}
</>
);

View File

@@ -8,7 +8,7 @@ import { ChoosePathButton } from "../../components/PathInstructionsModal";
const inputClass =
"w-full rounded-md border border-border px-2.5 py-1.5 bg-transparent outline-none text-sm font-mono placeholder:text-muted-foreground/40";
const instructionsFileHint =
"Absolute path to a markdown file (e.g. AGENTS.md) that defines this agent's behavior. Injected into the prompt at runtime.";
"Absolute path to a markdown file (e.g. AGENTS.md) that defines this agent's behavior. Injected into the system prompt at runtime.";
export function OpenCodeLocalConfigFields({
isCreate,

View File

@@ -3,9 +3,9 @@ import type { TranscriptEntry, StdoutLineParser } from "./types";
type RunLogChunk = { ts: string; stream: "stdout" | "stderr" | "system"; chunk: string };
function appendTranscriptEntry(entries: TranscriptEntry[], entry: TranscriptEntry) {
if (entry.kind === "thinking" && entry.delta) {
if ((entry.kind === "thinking" || entry.kind === "assistant") && entry.delta) {
const last = entries[entries.length - 1];
if (last && last.kind === "thinking" && last.delta) {
if (last && last.kind === entry.kind && last.delta) {
last.text += entry.text;
last.ts = entry.ts;
return;

View File

@@ -13,6 +13,7 @@ type InviteSummary = {
onboardingTextUrl?: string;
skillIndexPath?: string;
skillIndexUrl?: string;
inviteMessage?: string | null;
};
type AcceptInviteInput =
@@ -39,7 +40,21 @@ type AgentJoinRequestAccepted = JoinRequest & {
type InviteOnboardingManifest = {
invite: InviteSummary;
onboarding: Record<string, unknown>;
onboarding: {
inviteMessage?: string | null;
connectivity?: {
guidance?: string;
connectionCandidates?: string[];
testResolutionEndpoint?: {
method?: string;
path?: string;
url?: string;
};
};
textInstructions?: {
url?: string;
};
};
};
type BoardClaimStatus = {
@@ -54,8 +69,8 @@ export const accessApi = {
companyId: string,
input: {
allowedJoinTypes?: "human" | "agent" | "both";
expiresInHours?: number;
defaultsPayload?: Record<string, unknown> | null;
agentMessage?: string | null;
} = {},
) =>
api.post<{
@@ -64,6 +79,9 @@ export const accessApi = {
inviteUrl: string;
expiresAt: string;
allowedJoinTypes: "human" | "agent" | "both";
onboardingTextPath?: string;
onboardingTextUrl?: string;
inviteMessage?: string | null;
}>(`/companies/${companyId}/invites`, input),
getInvite: (token: string) => api.get<InviteSummary>(`/invites/${token}`),

Some files were not shown because too many files have changed in this diff Show More