Compare commits
356 Commits
@paperclip
...
paperclipa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5dd1e6335a | ||
|
|
469bfe3953 | ||
|
|
d20341c797 | ||
|
|
756ddb6cf7 | ||
|
|
200dd66f63 | ||
|
|
9859bac440 | ||
|
|
8d6b20b47b | ||
|
|
a418106005 | ||
|
|
84ef17bf85 | ||
|
|
23dec980e2 | ||
|
|
03c37f8dea | ||
|
|
8360b2e3e3 | ||
|
|
d9ba4790e9 | ||
|
|
3ec96fdb73 | ||
|
|
eecb780dd7 | ||
|
|
632079ae3b | ||
|
|
7d8d6a5caf | ||
|
|
948080fee9 | ||
|
|
af0e05f38c | ||
|
|
8d53800c19 | ||
|
|
422f57b160 | ||
|
|
31c947bf7f | ||
|
|
f5bf743745 | ||
|
|
0a8b96cdb3 | ||
|
|
a47ea343ba | ||
|
|
0781b7a15c | ||
|
|
30ee59c324 | ||
|
|
aa2b11d528 | ||
|
|
e1ddcbb71f | ||
|
|
df94c98494 | ||
|
|
a7cfd9f24b | ||
|
|
e48beafc90 | ||
|
|
e6e41dba9d | ||
|
|
f4a9788f2d | ||
|
|
ccd501ea02 | ||
|
|
d7b98a72b4 | ||
|
|
210715117c | ||
|
|
38cb2bf3c4 | ||
|
|
f2a0a0b804 | ||
|
|
035e1a9333 | ||
|
|
ec4667c8b2 | ||
|
|
f32b76f213 | ||
|
|
ee7fddf8d5 | ||
|
|
77e04407b9 | ||
|
|
7b70713fcb | ||
|
|
ad55af04cc | ||
|
|
57406dbc90 | ||
|
|
e35e2c4343 | ||
|
|
d58f269281 | ||
|
|
2a7043d677 | ||
|
|
31b5ff1c61 | ||
|
|
c674462a02 | ||
|
|
e3ff0c8e1b | ||
|
|
17b10c43fe | ||
|
|
343d4e5877 | ||
|
|
1078c7dd2b | ||
|
|
4c630bc66e | ||
|
|
f5190f28d1 | ||
|
|
edfc6be63c | ||
|
|
61551ffea3 | ||
|
|
0fedd8a395 | ||
|
|
b090c33ca1 | ||
|
|
3fb96506bd | ||
|
|
dcf879f6fb | ||
|
|
4e01633202 | ||
|
|
ff3f04ff48 | ||
|
|
91fda5d04f | ||
|
|
77e06c57f9 | ||
|
|
0f75c35392 | ||
|
|
45473b3e72 | ||
|
|
a96556b8f4 | ||
|
|
ce8fe38ffc | ||
|
|
6e86f69f95 | ||
|
|
7661fae4b3 | ||
|
|
ba080cb4dd | ||
|
|
3860812323 | ||
|
|
2639184f46 | ||
|
|
61966fba1f | ||
|
|
54b512f9e0 | ||
|
|
667d23e79e | ||
|
|
416177ae4c | ||
|
|
72cc748aa8 | ||
|
|
9299660388 | ||
|
|
2cb82f326f | ||
|
|
f81d2ebcc4 | ||
|
|
048e2b1bfe | ||
|
|
5fae7d4de7 | ||
|
|
0f32fffe79 | ||
|
|
0233525e99 | ||
|
|
20b171bd16 | ||
|
|
3f2274cd8d | ||
|
|
c59e059976 | ||
|
|
9933039094 | ||
|
|
b886eb3cf0 | ||
|
|
53c944e8bc | ||
|
|
977f5570be | ||
|
|
609b55f530 | ||
|
|
2223afa0e9 | ||
|
|
3479ea6e80 | ||
|
|
63a876ca3c | ||
|
|
df0f101fbd | ||
|
|
0abb6a1205 | ||
|
|
d52f1d4b44 | ||
|
|
e27ec5de8c | ||
|
|
83488b4ed0 | ||
|
|
271a632f1c | ||
|
|
9a0e3a8425 | ||
|
|
1c1b86f495 | ||
|
|
1420b86aa7 | ||
|
|
22053d18e4 | ||
|
|
3b4db7a3bc | ||
|
|
db15dfaf5e | ||
|
|
1afadd7354 | ||
|
|
9ac2e71187 | ||
|
|
3bde21bb06 | ||
|
|
672d769c68 | ||
|
|
46c343f81d | ||
|
|
17058dd751 | ||
|
|
346152f67d | ||
|
|
dd14643848 | ||
|
|
1dac0ec7cf | ||
|
|
7c0a3efea6 | ||
|
|
671a8ae554 | ||
|
|
baa71d6a08 | ||
|
|
638f2303bb | ||
|
|
a4d0901e89 | ||
|
|
f85f2fbcc2 | ||
|
|
fbcd80948e | ||
|
|
9d6a83dcca | ||
|
|
a251a53571 | ||
|
|
63afce3692 | ||
|
|
e07646bade | ||
|
|
ddb7101fa5 | ||
|
|
3f42357e5f | ||
|
|
3b08d4d582 | ||
|
|
049f768bc7 | ||
|
|
19c295ec03 | ||
|
|
a6b5f12daf | ||
|
|
4bd6961020 | ||
|
|
fd0799fd71 | ||
|
|
b91820afd3 | ||
|
|
0315e4cdc2 | ||
|
|
654463c28f | ||
|
|
f1ad727f8e | ||
|
|
10cccc07cd | ||
|
|
a498c268c5 | ||
|
|
fa8499719a | ||
|
|
1fcc6900ff | ||
|
|
45708a06f1 | ||
|
|
792397c2a9 | ||
|
|
36e4e67025 | ||
|
|
6077ae6064 | ||
|
|
eb7f690ceb | ||
|
|
ef0e08b8ed | ||
|
|
3bcdf3e3ad | ||
|
|
fccec94805 | ||
|
|
bee9fdd207 | ||
|
|
0ae5d81deb | ||
|
|
ffc59f5b08 | ||
|
|
f5f8c4a883 | ||
|
|
e693e3d466 | ||
|
|
e4928f3a10 | ||
|
|
514dc43923 | ||
|
|
b539462319 | ||
|
|
aa7e069044 | ||
|
|
3b0ff94e3f | ||
|
|
5ab1c18530 | ||
|
|
36013c35d9 | ||
|
|
b155415d7d | ||
|
|
d7f68ec1c9 | ||
|
|
af09510f6a | ||
|
|
a2bdfb0dd3 | ||
|
|
67247b5d6a | ||
|
|
5f2dfcb94e | ||
|
|
67491483b7 | ||
|
|
54a4f784a4 | ||
|
|
5aecb148a2 | ||
|
|
f49a003bd9 | ||
|
|
feb384acca | ||
|
|
c9718dc27a | ||
|
|
0b42045053 | ||
|
|
d8f7c6bf81 | ||
|
|
c8bd578415 | ||
|
|
5dfd9a2429 | ||
|
|
0324259da3 | ||
|
|
7af9aa61fa | ||
|
|
55bb3012ea | ||
|
|
ca919d73f9 | ||
|
|
70051735f6 | ||
|
|
2ad616780f | ||
|
|
fa43e5b0dd | ||
|
|
1d42b6e726 | ||
|
|
a3493dbb74 | ||
|
|
59a07324ec | ||
|
|
4d8663ebc8 | ||
|
|
2e7bf85e7a | ||
|
|
35e4897256 | ||
|
|
68ee3f8ea0 | ||
|
|
cf1ccd1e14 | ||
|
|
f56901b473 | ||
|
|
cec372f9bb | ||
|
|
8355dd7905 | ||
|
|
8151331375 | ||
|
|
b06e41bed2 | ||
|
|
1179d7e75a | ||
|
|
2ec2dcf9c6 | ||
|
|
cbce8bfbc3 | ||
|
|
0f895a8cf9 | ||
|
|
c3ac209e5f | ||
|
|
192d76678e | ||
|
|
7bcf994064 | ||
|
|
e670324334 | ||
|
|
c23ddbad3f | ||
|
|
e6339e911d | ||
|
|
c0c64fe682 | ||
|
|
ae60879507 | ||
|
|
de60519ef6 | ||
|
|
44a00596a4 | ||
|
|
a57732f7dd | ||
|
|
63c0e22a2a | ||
|
|
2405851436 | ||
|
|
d9d2ad209d | ||
|
|
e1d4e37776 | ||
|
|
08ac2bc9a7 | ||
|
|
b213eb695b | ||
|
|
494448dcf7 | ||
|
|
854e818b74 | ||
|
|
38d3d5fa59 | ||
|
|
86bd26ee8a | ||
|
|
9cacf4a981 | ||
|
|
9184cf92dd | ||
|
|
38b9a55eab | ||
|
|
3369a9e685 | ||
|
|
553c939f1f | ||
|
|
67bc601258 | ||
|
|
9d570b3ed7 | ||
|
|
d4eb502389 | ||
|
|
50276ed981 | ||
|
|
2d21045424 | ||
|
|
eb607f7df8 | ||
|
|
eb033a221f | ||
|
|
5f6e68e7aa | ||
|
|
88682632f9 | ||
|
|
264d40e6ca | ||
|
|
de7d6294ea | ||
|
|
f41373dc46 | ||
|
|
1bbb98aaa9 | ||
|
|
cecb94213d | ||
|
|
0cdc9547d9 | ||
|
|
4c1504872f | ||
|
|
7086ad00ae | ||
|
|
222e0624a8 | ||
|
|
81bc8c7313 | ||
|
|
5134cac993 | ||
|
|
e401979851 | ||
|
|
4569d57f5b | ||
|
|
eff0c506fa | ||
|
|
c486bad2dd | ||
|
|
0e387426fa | ||
|
|
6ee4315eef | ||
|
|
7c07b16f80 | ||
|
|
77500b50d9 | ||
|
|
0cc75c6e10 | ||
|
|
82d97418b2 | ||
|
|
35a7acc058 | ||
|
|
bd32c871b7 | ||
|
|
8e63dd44b6 | ||
|
|
4eedf15870 | ||
|
|
a0e6ad0b7d | ||
|
|
4b90784183 | ||
|
|
ab6ec999c5 | ||
|
|
babea25649 | ||
|
|
e9ffde610b | ||
|
|
a05aa99c7e | ||
|
|
690149d555 | ||
|
|
ffd1631b14 | ||
|
|
185317c153 | ||
|
|
988f1244e5 | ||
|
|
38b855e495 | ||
|
|
0ed0c0abdb | ||
|
|
7a2ecff4f0 | ||
|
|
bee24e880f | ||
|
|
7ab5b8a0c2 | ||
|
|
089a2d08bf | ||
|
|
d8fb93edcf | ||
|
|
201d91b4f5 | ||
|
|
9da1803f29 | ||
|
|
1b98c2b279 | ||
|
|
a85511dad2 | ||
|
|
f75a4d9589 | ||
|
|
0d36cf00f8 | ||
|
|
4b8e880a96 | ||
|
|
1e5e09f0fa | ||
|
|
57db28e9e6 | ||
|
|
c610951a71 | ||
|
|
e5049a448e | ||
|
|
1f261d90f3 | ||
|
|
d2dd8d0cc5 | ||
|
|
e08362b667 | ||
|
|
2c809d55c0 | ||
|
|
529d53acc0 | ||
|
|
fd73d6fcab | ||
|
|
cdf63d0024 | ||
|
|
09a8ecbded | ||
|
|
6f98c5f25c | ||
|
|
70e41150c5 | ||
|
|
bc765b0867 | ||
|
|
9dbd72cffd | ||
|
|
084c0a19a2 | ||
|
|
85f95c4542 | ||
|
|
732ae4e46c | ||
|
|
c1a92d8520 | ||
|
|
69b2875060 | ||
|
|
7cb46d97f6 | ||
|
|
e31d77bc47 | ||
|
|
90d39b9cbd | ||
|
|
bc68c3a504 | ||
|
|
59bc52f527 | ||
|
|
d37e1d3dc3 | ||
|
|
34d9122b45 | ||
|
|
1f7218640c | ||
|
|
0078fa66a3 | ||
|
|
f4f9d6fd3f | ||
|
|
69c453b274 | ||
|
|
d54ee6c4dc | ||
|
|
b48f0314e7 | ||
|
|
e1b24c1d5c | ||
|
|
1c9b7ef918 | ||
|
|
8f70e79240 | ||
|
|
eabfd9d9f6 | ||
|
|
6a101e0da1 | ||
|
|
426c1044b6 | ||
|
|
875924a7f3 | ||
|
|
e835c5cee9 | ||
|
|
db54f77b73 | ||
|
|
67eb5e5734 | ||
|
|
758a5538c5 | ||
|
|
3ae112acff | ||
|
|
9454f76c0c | ||
|
|
944263f44b | ||
|
|
a1944fceab | ||
|
|
8d5c9fde3b | ||
|
|
d8688bbd93 | ||
|
|
306cd65353 | ||
|
|
8a85173150 | ||
|
|
b4a02ebc3f | ||
|
|
1f57577c54 | ||
|
|
ec0b7daca2 | ||
|
|
bdc0480e62 | ||
|
|
c145074daf | ||
|
|
f6a09bcbea | ||
|
|
d4a2fc6464 | ||
|
|
be50daba42 | ||
|
|
7b334ff2b7 | ||
|
|
5bbfddf70d | ||
|
|
358467a506 |
5
.changeset/add-pi-adapter-support.md
Normal file
5
.changeset/add-pi-adapter-support.md
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
"@paperclipai/shared": minor
|
||||
---
|
||||
|
||||
Add support for Pi local adapter in constants and onboarding UI.
|
||||
44
.github/workflows/e2e.yml
vendored
Normal file
44
.github/workflows/e2e.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: E2E Tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_llm:
|
||||
description: "Skip LLM-dependent assertions (default: true)"
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
jobs:
|
||||
e2e:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
env:
|
||||
PAPERCLIP_E2E_SKIP_LLM: ${{ inputs.skip_llm && 'true' || 'false' }}
|
||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: pnpm
|
||||
|
||||
- run: pnpm install --frozen-lockfile
|
||||
- run: pnpm build
|
||||
- run: npx playwright install --with-deps chromium
|
||||
|
||||
- name: Run e2e tests
|
||||
run: pnpm run test:e2e
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: |
|
||||
tests/e2e/playwright-report/
|
||||
tests/e2e/test-results/
|
||||
retention-days: 14
|
||||
49
.github/workflows/pr-policy.yml
vendored
Normal file
49
.github/workflows/pr-policy.yml
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
name: PR Policy
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: pr-policy-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
policy:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
|
||||
- name: Block manual lockfile edits
|
||||
if: github.head_ref != 'chore/refresh-lockfile'
|
||||
run: |
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")"
|
||||
if printf '%s\n' "$changed" | grep -qx 'pnpm-lock.yaml'; then
|
||||
echo "Do not commit pnpm-lock.yaml in pull requests. CI owns lockfile updates."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Validate dependency resolution when manifests change
|
||||
run: |
|
||||
changed="$(git diff --name-only "${{ github.event.pull_request.base.sha }}" "${{ github.event.pull_request.head.sha }}")"
|
||||
manifest_pattern='(^|/)package\.json$|^pnpm-workspace\.yaml$|^\.npmrc$|^pnpmfile\.(cjs|js|mjs)$'
|
||||
if printf '%s\n' "$changed" | grep -Eq "$manifest_pattern"; then
|
||||
pnpm install --lockfile-only --ignore-scripts --no-frozen-lockfile
|
||||
fi
|
||||
42
.github/workflows/pr-verify.yml
vendored
Normal file
42
.github/workflows/pr-verify.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: PR Verify
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: pr-verify-${{ github.event.pull_request.number }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --no-frozen-lockfile
|
||||
|
||||
- name: Typecheck
|
||||
run: pnpm -r typecheck
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test:run
|
||||
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
81
.github/workflows/refresh-lockfile.yml
vendored
Normal file
81
.github/workflows/refresh-lockfile.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Refresh Lockfile
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: refresh-lockfile-master
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
refresh:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
run_install: false
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: pnpm
|
||||
|
||||
- name: Refresh pnpm lockfile
|
||||
run: pnpm install --lockfile-only --ignore-scripts --no-frozen-lockfile
|
||||
|
||||
- name: Fail on unexpected file changes
|
||||
run: |
|
||||
changed="$(git status --porcelain)"
|
||||
if [ -z "$changed" ]; then
|
||||
echo "Lockfile is already up to date."
|
||||
exit 0
|
||||
fi
|
||||
if printf '%s\n' "$changed" | grep -Fvq ' pnpm-lock.yaml'; then
|
||||
echo "Unexpected files changed during lockfile refresh:"
|
||||
echo "$changed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create or update pull request
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
if git diff --quiet -- pnpm-lock.yaml; then
|
||||
echo "Lockfile unchanged, nothing to do."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
BRANCH="chore/refresh-lockfile"
|
||||
git config user.name "lockfile-bot"
|
||||
git config user.email "lockfile-bot@users.noreply.github.com"
|
||||
|
||||
git checkout -B "$BRANCH"
|
||||
git add pnpm-lock.yaml
|
||||
git commit -m "chore(lockfile): refresh pnpm-lock.yaml"
|
||||
git push --force origin "$BRANCH"
|
||||
|
||||
# Create PR if one doesn't already exist
|
||||
existing=$(gh pr list --head "$BRANCH" --json number --jq '.[0].number')
|
||||
if [ -z "$existing" ]; then
|
||||
gh pr create \
|
||||
--head "$BRANCH" \
|
||||
--title "chore(lockfile): refresh pnpm-lock.yaml" \
|
||||
--body "Auto-generated lockfile refresh after dependencies changed on master. This PR only updates pnpm-lock.yaml."
|
||||
echo "Created new PR."
|
||||
else
|
||||
echo "PR #$existing already exists, branch updated via force push."
|
||||
fi
|
||||
132
.github/workflows/release.yml
vendored
Normal file
132
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,132 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
channel:
|
||||
description: Release channel
|
||||
required: true
|
||||
type: choice
|
||||
default: canary
|
||||
options:
|
||||
- canary
|
||||
- stable
|
||||
bump:
|
||||
description: Semantic version bump
|
||||
required: true
|
||||
type: choice
|
||||
default: patch
|
||||
options:
|
||||
- patch
|
||||
- minor
|
||||
- major
|
||||
dry_run:
|
||||
description: Preview the release without publishing
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
verify:
|
||||
if: startsWith(github.ref, 'refs/heads/release/')
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Typecheck
|
||||
run: pnpm -r typecheck
|
||||
|
||||
- name: Run tests
|
||||
run: pnpm test:run
|
||||
|
||||
- name: Build
|
||||
run: pnpm build
|
||||
|
||||
publish:
|
||||
if: startsWith(github.ref, 'refs/heads/release/')
|
||||
needs: verify
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 45
|
||||
environment: npm-release
|
||||
permissions:
|
||||
contents: write
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
with:
|
||||
version: 9.15.4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
cache: pnpm
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Configure git author
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Run release script
|
||||
env:
|
||||
GITHUB_ACTIONS: "true"
|
||||
run: |
|
||||
args=("${{ inputs.bump }}")
|
||||
if [ "${{ inputs.channel }}" = "canary" ]; then
|
||||
args+=("--canary")
|
||||
fi
|
||||
if [ "${{ inputs.dry_run }}" = "true" ]; then
|
||||
args+=("--dry-run")
|
||||
fi
|
||||
./scripts/release.sh "${args[@]}"
|
||||
|
||||
- name: Push stable release branch commit and tag
|
||||
if: inputs.channel == 'stable' && !inputs.dry_run
|
||||
run: git push origin "HEAD:${GITHUB_REF_NAME}" --follow-tags
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: inputs.channel == 'stable' && !inputs.dry_run
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
version="$(git tag --points-at HEAD | grep '^v' | head -1 | sed 's/^v//')"
|
||||
if [ -z "$version" ]; then
|
||||
echo "Error: no v* tag points at HEAD after stable release." >&2
|
||||
exit 1
|
||||
fi
|
||||
./scripts/create-github-release.sh "$version"
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -36,4 +36,8 @@ tmp/
|
||||
*.tmp
|
||||
.vscode/
|
||||
.claude/settings.local.json
|
||||
.paperclip-local/
|
||||
.paperclip-local/
|
||||
|
||||
# Playwright
|
||||
tests/e2e/test-results/
|
||||
tests/e2e/playwright-report/
|
||||
41
CONTRIBUTING.md
Normal file
41
CONTRIBUTING.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Contributing Guide
|
||||
|
||||
Thanks for wanting to contribute!
|
||||
|
||||
We really appreciate both small fixes and thoughtful larger changes.
|
||||
|
||||
## Two Paths to Get Your Pull Request Accepted
|
||||
|
||||
### Path 1: Small, Focused Changes (Fastest way to get merged)
|
||||
- Pick **one** clear thing to fix/improve
|
||||
- Touch the **smallest possible number of files**
|
||||
- Make sure the change is very targeted and easy to review
|
||||
- All automated checks pass (including Greptile comments)
|
||||
- No new lint/test failures
|
||||
|
||||
These almost always get merged quickly when they're clean.
|
||||
|
||||
### Path 2: Bigger or Impactful Changes
|
||||
- **First** talk about it in Discord → #dev channel
|
||||
→ Describe what you're trying to solve
|
||||
→ Share rough ideas / approach
|
||||
- Once there's rough agreement, build it
|
||||
- In your PR include:
|
||||
- Before / After screenshots (or short video if UI/behavior change)
|
||||
- Clear description of what & why
|
||||
- Proof it works (manual testing notes)
|
||||
- All tests passing
|
||||
- All Greptile + other PR comments addressed
|
||||
|
||||
PRs that follow this path are **much** more likely to be accepted, even when they're large.
|
||||
|
||||
## General Rules (both paths)
|
||||
- Write clear commit messages
|
||||
- Keep PR title + description meaningful
|
||||
- One PR = one logical change (unless it's a small related group)
|
||||
- Run tests locally first
|
||||
- Be kind in discussions 😄
|
||||
|
||||
Questions? Just ask in #dev — we're happy to help.
|
||||
|
||||
Happy hacking!
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,4 +1,4 @@
|
||||
FROM node:20-bookworm-slim AS base
|
||||
FROM node:lts-trixie-slim AS base
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates curl git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
@@ -15,19 +15,27 @@ COPY packages/db/package.json packages/db/
|
||||
COPY packages/adapter-utils/package.json packages/adapter-utils/
|
||||
COPY packages/adapters/claude-local/package.json packages/adapters/claude-local/
|
||||
COPY packages/adapters/codex-local/package.json packages/adapters/codex-local/
|
||||
COPY packages/adapters/cursor-local/package.json packages/adapters/cursor-local/
|
||||
COPY packages/adapters/openclaw-gateway/package.json packages/adapters/openclaw-gateway/
|
||||
COPY packages/adapters/opencode-local/package.json packages/adapters/opencode-local/
|
||||
COPY packages/adapters/pi-local/package.json packages/adapters/pi-local/
|
||||
|
||||
RUN pnpm install --frozen-lockfile
|
||||
|
||||
FROM base AS build
|
||||
WORKDIR /app
|
||||
COPY --from=deps /app /app
|
||||
COPY . .
|
||||
RUN pnpm --filter @paperclip/ui build
|
||||
RUN pnpm --filter @paperclip/server build
|
||||
RUN pnpm --filter @paperclipai/ui build
|
||||
RUN pnpm --filter @paperclipai/server build
|
||||
RUN test -f server/dist/index.js || (echo "ERROR: server build output missing" && exit 1)
|
||||
|
||||
FROM base AS production
|
||||
WORKDIR /app
|
||||
COPY --from=build /app /app
|
||||
RUN npm install --global --omit=dev @anthropic-ai/claude-code@latest @openai/codex@latest
|
||||
COPY --chown=node:node --from=build /app /app
|
||||
RUN npm install --global --omit=dev @anthropic-ai/claude-code@latest @openai/codex@latest opencode-ai \
|
||||
&& mkdir -p /paperclip \
|
||||
&& chown node:node /paperclip
|
||||
|
||||
ENV NODE_ENV=production \
|
||||
HOME=/paperclip \
|
||||
@@ -37,10 +45,11 @@ ENV NODE_ENV=production \
|
||||
PAPERCLIP_HOME=/paperclip \
|
||||
PAPERCLIP_INSTANCE_ID=default \
|
||||
PAPERCLIP_CONFIG=/paperclip/instances/default/config.json \
|
||||
PAPERCLIP_DEPLOYMENT_MODE=local_trusted \
|
||||
PAPERCLIP_DEPLOYMENT_MODE=authenticated \
|
||||
PAPERCLIP_DEPLOYMENT_EXPOSURE=private
|
||||
|
||||
VOLUME ["/paperclip"]
|
||||
EXPOSE 3100
|
||||
|
||||
USER node
|
||||
CMD ["node", "--import", "./server/node_modules/tsx/dist/loader.mjs", "server/dist/index.js"]
|
||||
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Paperclip AI
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
17
README.md
17
README.md
@@ -218,7 +218,8 @@ By default, agents run on scheduled heartbeats and event-based triggers (task as
|
||||
## Development
|
||||
|
||||
```bash
|
||||
pnpm dev # Full dev (API + UI)
|
||||
pnpm dev # Full dev (API + UI, watch mode)
|
||||
pnpm dev:once # Full dev without file watching
|
||||
pnpm dev:server # Server only
|
||||
pnpm build # Build all
|
||||
pnpm typecheck # Type checking
|
||||
@@ -233,9 +234,13 @@ See [doc/DEVELOPING.md](doc/DEVELOPING.md) for the full development guide.
|
||||
|
||||
## Roadmap
|
||||
|
||||
- 🛒 **Clipmart** — Download and share entire company architectures
|
||||
- 🧩 **Plugin System** — Embed custom plugins (e.g. Reporting, Knowledge Base) into Paperclip
|
||||
- ☁️ **Cloud Agent Adapters** — Add more adapters for cloud-hosted agents
|
||||
- ⚪ Get OpenClaw onboarding easier
|
||||
- ⚪ Get cloud agents working e.g. Cursor / e2b agents
|
||||
- ⚪ ClipMart - buy and sell entire agent companies
|
||||
- ⚪ Easy agent configurations / easier to understand
|
||||
- ⚪ Better support for harness engineering
|
||||
- ⚪ Plugin system (e.g. if you want to add a knowledgebase, custom tracing, queues, etc)
|
||||
- ⚪ Better docs
|
||||
|
||||
<br/>
|
||||
|
||||
@@ -259,6 +264,10 @@ We welcome contributions. See the [contributing guide](CONTRIBUTING.md) for deta
|
||||
|
||||
MIT © 2026 Paperclip
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/?repos=paperclipai%2Fpaperclip&type=date&legend=top-left)
|
||||
|
||||
<br/>
|
||||
|
||||
---
|
||||
|
||||
@@ -1,5 +1,19 @@
|
||||
# paperclipai
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/shared@0.2.7
|
||||
- @paperclipai/adapter-utils@0.2.7
|
||||
- @paperclipai/db@0.2.7
|
||||
- @paperclipai/adapter-claude-local@0.2.7
|
||||
- @paperclipai/adapter-codex-local@0.2.7
|
||||
- @paperclipai/adapter-openclaw@0.2.7
|
||||
- @paperclipai/server@0.2.7
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
@@ -21,7 +21,7 @@ const workspacePaths = [
|
||||
"packages/adapter-utils",
|
||||
"packages/adapters/claude-local",
|
||||
"packages/adapters/codex-local",
|
||||
"packages/adapters/openclaw",
|
||||
"packages/adapters/openclaw-gateway",
|
||||
];
|
||||
|
||||
// Workspace packages that should NOT be bundled — they'll be published
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "paperclipai",
|
||||
"version": "0.2.6",
|
||||
"version": "0.2.7",
|
||||
"description": "Paperclip CLI — orchestrate AI agent teams to run a business",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
@@ -36,7 +36,10 @@
|
||||
"@clack/prompts": "^0.10.0",
|
||||
"@paperclipai/adapter-claude-local": "workspace:*",
|
||||
"@paperclipai/adapter-codex-local": "workspace:*",
|
||||
"@paperclipai/adapter-openclaw": "workspace:*",
|
||||
"@paperclipai/adapter-cursor-local": "workspace:*",
|
||||
"@paperclipai/adapter-opencode-local": "workspace:*",
|
||||
"@paperclipai/adapter-pi-local": "workspace:*",
|
||||
"@paperclipai/adapter-openclaw-gateway": "workspace:*",
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"@paperclipai/db": "workspace:*",
|
||||
"@paperclipai/server": "workspace:*",
|
||||
|
||||
@@ -21,6 +21,12 @@ function writeBaseConfig(configPath: string) {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: "/tmp/paperclip-db",
|
||||
embeddedPostgresPort: 54329,
|
||||
backup: {
|
||||
enabled: true,
|
||||
intervalMinutes: 60,
|
||||
retentionDays: 30,
|
||||
dir: "/tmp/paperclip-backups",
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
mode: "file",
|
||||
@@ -36,6 +42,7 @@ function writeBaseConfig(configPath: string) {
|
||||
},
|
||||
auth: {
|
||||
baseUrlMode: "auto",
|
||||
disableSignUp: false,
|
||||
},
|
||||
storage: {
|
||||
provider: "local_disk",
|
||||
@@ -68,4 +75,3 @@ describe("allowed-hostname command", () => {
|
||||
expect(raw.server.allowedHostnames).toEqual(["dotta-macbook-pro"]);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
import type { CLIAdapterModule } from "@paperclipai/adapter-utils";
|
||||
import { printClaudeStreamEvent } from "@paperclipai/adapter-claude-local/cli";
|
||||
import { printCodexStreamEvent } from "@paperclipai/adapter-codex-local/cli";
|
||||
import { printOpenClawStreamEvent } from "@paperclipai/adapter-openclaw/cli";
|
||||
import { printCursorStreamEvent } from "@paperclipai/adapter-cursor-local/cli";
|
||||
import { printOpenCodeStreamEvent } from "@paperclipai/adapter-opencode-local/cli";
|
||||
import { printPiStreamEvent } from "@paperclipai/adapter-pi-local/cli";
|
||||
import { printOpenClawGatewayStreamEvent } from "@paperclipai/adapter-openclaw-gateway/cli";
|
||||
import { processCLIAdapter } from "./process/index.js";
|
||||
import { httpCLIAdapter } from "./http/index.js";
|
||||
|
||||
@@ -15,13 +18,37 @@ const codexLocalCLIAdapter: CLIAdapterModule = {
|
||||
formatStdoutEvent: printCodexStreamEvent,
|
||||
};
|
||||
|
||||
const openclawCLIAdapter: CLIAdapterModule = {
|
||||
type: "openclaw",
|
||||
formatStdoutEvent: printOpenClawStreamEvent,
|
||||
const openCodeLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "opencode_local",
|
||||
formatStdoutEvent: printOpenCodeStreamEvent,
|
||||
};
|
||||
|
||||
const piLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "pi_local",
|
||||
formatStdoutEvent: printPiStreamEvent,
|
||||
};
|
||||
|
||||
const cursorLocalCLIAdapter: CLIAdapterModule = {
|
||||
type: "cursor",
|
||||
formatStdoutEvent: printCursorStreamEvent,
|
||||
};
|
||||
|
||||
const openclawGatewayCLIAdapter: CLIAdapterModule = {
|
||||
type: "openclaw_gateway",
|
||||
formatStdoutEvent: printOpenClawGatewayStreamEvent,
|
||||
};
|
||||
|
||||
const adaptersByType = new Map<string, CLIAdapterModule>(
|
||||
[claudeLocalCLIAdapter, codexLocalCLIAdapter, openclawCLIAdapter, processCLIAdapter, httpCLIAdapter].map((a) => [a.type, a]),
|
||||
[
|
||||
claudeLocalCLIAdapter,
|
||||
codexLocalCLIAdapter,
|
||||
openCodeLocalCLIAdapter,
|
||||
piLocalCLIAdapter,
|
||||
cursorLocalCLIAdapter,
|
||||
openclawGatewayCLIAdapter,
|
||||
processCLIAdapter,
|
||||
httpCLIAdapter,
|
||||
].map((a) => [a.type, a]),
|
||||
);
|
||||
|
||||
export function getCLIAdapter(type: string): CLIAdapterModule {
|
||||
|
||||
@@ -104,8 +104,10 @@ export class PaperclipApiClient {
|
||||
|
||||
function buildUrl(apiBase: string, path: string): string {
|
||||
const normalizedPath = path.startsWith("/") ? path : `/${path}`;
|
||||
const [pathname, query] = normalizedPath.split("?");
|
||||
const url = new URL(apiBase);
|
||||
url.pathname = `${url.pathname.replace(/\/+$/, "")}${normalizedPath}`;
|
||||
url.pathname = `${url.pathname.replace(/\/+$/, "")}${pathname}`;
|
||||
if (query) url.search = query;
|
||||
return url.toString();
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ import * as p from "@clack/prompts";
|
||||
import pc from "picocolors";
|
||||
import { and, eq, gt, isNull } from "drizzle-orm";
|
||||
import { createDb, instanceUserRoles, invites } from "@paperclipai/db";
|
||||
import { loadPaperclipEnvFile } from "../config/env.js";
|
||||
import { readConfig, resolveConfigPath } from "../config/store.js";
|
||||
|
||||
function hashToken(token: string) {
|
||||
@@ -13,7 +14,8 @@ function createInviteToken() {
|
||||
return `pcp_bootstrap_${randomBytes(24).toString("hex")}`;
|
||||
}
|
||||
|
||||
function resolveDbUrl(configPath?: string) {
|
||||
function resolveDbUrl(configPath?: string, explicitDbUrl?: string) {
|
||||
if (explicitDbUrl) return explicitDbUrl;
|
||||
const config = readConfig(configPath);
|
||||
if (process.env.DATABASE_URL) return process.env.DATABASE_URL;
|
||||
if (config?.database.mode === "postgres" && config.database.connectionString) {
|
||||
@@ -28,6 +30,12 @@ function resolveDbUrl(configPath?: string) {
|
||||
|
||||
function resolveBaseUrl(configPath?: string, explicitBaseUrl?: string) {
|
||||
if (explicitBaseUrl) return explicitBaseUrl.replace(/\/+$/, "");
|
||||
const fromEnv =
|
||||
process.env.PAPERCLIP_PUBLIC_URL ??
|
||||
process.env.PAPERCLIP_AUTH_PUBLIC_BASE_URL ??
|
||||
process.env.BETTER_AUTH_URL ??
|
||||
process.env.BETTER_AUTH_BASE_URL;
|
||||
if (fromEnv?.trim()) return fromEnv.trim().replace(/\/+$/, "");
|
||||
const config = readConfig(configPath);
|
||||
if (config?.auth.baseUrlMode === "explicit" && config.auth.publicBaseUrl) {
|
||||
return config.auth.publicBaseUrl.replace(/\/+$/, "");
|
||||
@@ -43,8 +51,10 @@ export async function bootstrapCeoInvite(opts: {
|
||||
force?: boolean;
|
||||
expiresHours?: number;
|
||||
baseUrl?: string;
|
||||
dbUrl?: string;
|
||||
}) {
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
loadPaperclipEnvFile(configPath);
|
||||
const config = readConfig(configPath);
|
||||
if (!config) {
|
||||
p.log.error(`No config found at ${configPath}. Run ${pc.cyan("paperclip onboard")} first.`);
|
||||
@@ -56,7 +66,7 @@ export async function bootstrapCeoInvite(opts: {
|
||||
return;
|
||||
}
|
||||
|
||||
const dbUrl = resolveDbUrl(configPath);
|
||||
const dbUrl = resolveDbUrl(configPath, opts.dbUrl);
|
||||
if (!dbUrl) {
|
||||
p.log.error(
|
||||
"Could not resolve database connection for bootstrap.",
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
import { Command } from "commander";
|
||||
import type { Agent } from "@paperclipai/shared";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import {
|
||||
addCommonClientOptions,
|
||||
formatInlineRecord,
|
||||
@@ -13,6 +17,107 @@ interface AgentListOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
}
|
||||
|
||||
interface AgentLocalCliOptions extends BaseClientOptions {
|
||||
companyId?: string;
|
||||
keyName?: string;
|
||||
installSkills?: boolean;
|
||||
}
|
||||
|
||||
interface CreatedAgentKey {
|
||||
id: string;
|
||||
name: string;
|
||||
token: string;
|
||||
createdAt: string;
|
||||
}
|
||||
|
||||
interface SkillsInstallSummary {
|
||||
tool: "codex" | "claude";
|
||||
target: string;
|
||||
linked: string[];
|
||||
skipped: string[];
|
||||
failed: Array<{ name: string; error: string }>;
|
||||
}
|
||||
|
||||
const __moduleDir = path.dirname(fileURLToPath(import.meta.url));
|
||||
const PAPERCLIP_SKILLS_CANDIDATES = [
|
||||
path.resolve(__moduleDir, "../../../../../skills"), // dev: cli/src/commands/client -> repo root/skills
|
||||
path.resolve(process.cwd(), "skills"),
|
||||
];
|
||||
|
||||
function codexSkillsHome(): string {
|
||||
const fromEnv = process.env.CODEX_HOME?.trim();
|
||||
const base = fromEnv && fromEnv.length > 0 ? fromEnv : path.join(os.homedir(), ".codex");
|
||||
return path.join(base, "skills");
|
||||
}
|
||||
|
||||
function claudeSkillsHome(): string {
|
||||
const fromEnv = process.env.CLAUDE_HOME?.trim();
|
||||
const base = fromEnv && fromEnv.length > 0 ? fromEnv : path.join(os.homedir(), ".claude");
|
||||
return path.join(base, "skills");
|
||||
}
|
||||
|
||||
async function resolvePaperclipSkillsDir(): Promise<string | null> {
|
||||
for (const candidate of PAPERCLIP_SKILLS_CANDIDATES) {
|
||||
const isDir = await fs.stat(candidate).then((s) => s.isDirectory()).catch(() => false);
|
||||
if (isDir) return candidate;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
async function installSkillsForTarget(
|
||||
sourceSkillsDir: string,
|
||||
targetSkillsDir: string,
|
||||
tool: "codex" | "claude",
|
||||
): Promise<SkillsInstallSummary> {
|
||||
const summary: SkillsInstallSummary = {
|
||||
tool,
|
||||
target: targetSkillsDir,
|
||||
linked: [],
|
||||
skipped: [],
|
||||
failed: [],
|
||||
};
|
||||
|
||||
await fs.mkdir(targetSkillsDir, { recursive: true });
|
||||
const entries = await fs.readdir(sourceSkillsDir, { withFileTypes: true });
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
const source = path.join(sourceSkillsDir, entry.name);
|
||||
const target = path.join(targetSkillsDir, entry.name);
|
||||
const existing = await fs.lstat(target).catch(() => null);
|
||||
if (existing) {
|
||||
summary.skipped.push(entry.name);
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
await fs.symlink(source, target);
|
||||
summary.linked.push(entry.name);
|
||||
} catch (err) {
|
||||
summary.failed.push({
|
||||
name: entry.name,
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return summary;
|
||||
}
|
||||
|
||||
function buildAgentEnvExports(input: {
|
||||
apiBase: string;
|
||||
companyId: string;
|
||||
agentId: string;
|
||||
apiKey: string;
|
||||
}): string {
|
||||
const escaped = (value: string) => value.replace(/'/g, "'\"'\"'");
|
||||
return [
|
||||
`export PAPERCLIP_API_URL='${escaped(input.apiBase)}'`,
|
||||
`export PAPERCLIP_COMPANY_ID='${escaped(input.companyId)}'`,
|
||||
`export PAPERCLIP_AGENT_ID='${escaped(input.agentId)}'`,
|
||||
`export PAPERCLIP_API_KEY='${escaped(input.apiKey)}'`,
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
export function registerAgentCommands(program: Command): void {
|
||||
const agent = program.command("agent").description("Agent operations");
|
||||
|
||||
@@ -71,4 +176,102 @@ export function registerAgentCommands(program: Command): void {
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
addCommonClientOptions(
|
||||
agent
|
||||
.command("local-cli")
|
||||
.description(
|
||||
"Create an agent API key, install local Paperclip skills for Codex/Claude, and print shell exports",
|
||||
)
|
||||
.argument("<agentRef>", "Agent ID or shortname/url-key")
|
||||
.requiredOption("-C, --company-id <id>", "Company ID")
|
||||
.option("--key-name <name>", "API key label", "local-cli")
|
||||
.option(
|
||||
"--no-install-skills",
|
||||
"Skip installing Paperclip skills into ~/.codex/skills and ~/.claude/skills",
|
||||
)
|
||||
.action(async (agentRef: string, opts: AgentLocalCliOptions) => {
|
||||
try {
|
||||
const ctx = resolveCommandContext(opts, { requireCompany: true });
|
||||
const query = new URLSearchParams({ companyId: ctx.companyId ?? "" });
|
||||
const agentRow = await ctx.api.get<Agent>(
|
||||
`/api/agents/${encodeURIComponent(agentRef)}?${query.toString()}`,
|
||||
);
|
||||
if (!agentRow) {
|
||||
throw new Error(`Agent not found: ${agentRef}`);
|
||||
}
|
||||
|
||||
const now = new Date().toISOString().replaceAll(":", "-");
|
||||
const keyName = opts.keyName?.trim() ? opts.keyName.trim() : `local-cli-${now}`;
|
||||
const key = await ctx.api.post<CreatedAgentKey>(`/api/agents/${agentRow.id}/keys`, { name: keyName });
|
||||
if (!key) {
|
||||
throw new Error("Failed to create API key");
|
||||
}
|
||||
|
||||
const installSummaries: SkillsInstallSummary[] = [];
|
||||
if (opts.installSkills !== false) {
|
||||
const skillsDir = await resolvePaperclipSkillsDir();
|
||||
if (!skillsDir) {
|
||||
throw new Error(
|
||||
"Could not locate local Paperclip skills directory. Expected ./skills in the repo checkout.",
|
||||
);
|
||||
}
|
||||
|
||||
installSummaries.push(
|
||||
await installSkillsForTarget(skillsDir, codexSkillsHome(), "codex"),
|
||||
await installSkillsForTarget(skillsDir, claudeSkillsHome(), "claude"),
|
||||
);
|
||||
}
|
||||
|
||||
const exportsText = buildAgentEnvExports({
|
||||
apiBase: ctx.api.apiBase,
|
||||
companyId: agentRow.companyId,
|
||||
agentId: agentRow.id,
|
||||
apiKey: key.token,
|
||||
});
|
||||
|
||||
if (ctx.json) {
|
||||
printOutput(
|
||||
{
|
||||
agent: {
|
||||
id: agentRow.id,
|
||||
name: agentRow.name,
|
||||
urlKey: agentRow.urlKey,
|
||||
companyId: agentRow.companyId,
|
||||
},
|
||||
key: {
|
||||
id: key.id,
|
||||
name: key.name,
|
||||
createdAt: key.createdAt,
|
||||
token: key.token,
|
||||
},
|
||||
skills: installSummaries,
|
||||
exports: exportsText,
|
||||
},
|
||||
{ json: true },
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Agent: ${agentRow.name} (${agentRow.id})`);
|
||||
console.log(`API key created: ${key.name} (${key.id})`);
|
||||
if (installSummaries.length > 0) {
|
||||
for (const summary of installSummaries) {
|
||||
console.log(
|
||||
`${summary.tool}: linked=${summary.linked.length} skipped=${summary.skipped.length} failed=${summary.failed.length} target=${summary.target}`,
|
||||
);
|
||||
for (const failed of summary.failed) {
|
||||
console.log(` failed ${failed.name}: ${failed.error}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
console.log("");
|
||||
console.log("# Run this in your shell before launching codex/claude:");
|
||||
console.log(exportsText);
|
||||
} catch (err) {
|
||||
handleCommandError(err);
|
||||
}
|
||||
}),
|
||||
{ includeCompany: false },
|
||||
);
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import { defaultSecretsConfig, promptSecrets } from "../prompts/secrets.js";
|
||||
import { defaultStorageConfig, promptStorage } from "../prompts/storage.js";
|
||||
import { promptServer } from "../prompts/server.js";
|
||||
import {
|
||||
resolveDefaultBackupDir,
|
||||
resolveDefaultEmbeddedPostgresDir,
|
||||
resolveDefaultLogsDir,
|
||||
resolvePaperclipInstanceId,
|
||||
@@ -39,6 +40,12 @@ function defaultConfig(): PaperclipConfig {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: resolveDefaultEmbeddedPostgresDir(instanceId),
|
||||
embeddedPostgresPort: 54329,
|
||||
backup: {
|
||||
enabled: true,
|
||||
intervalMinutes: 60,
|
||||
retentionDays: 30,
|
||||
dir: resolveDefaultBackupDir(instanceId),
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
mode: "file",
|
||||
@@ -54,6 +61,7 @@ function defaultConfig(): PaperclipConfig {
|
||||
},
|
||||
auth: {
|
||||
baseUrlMode: "auto",
|
||||
disableSignUp: false,
|
||||
},
|
||||
storage: defaultStorageConfig(),
|
||||
secrets: defaultSecretsConfig(),
|
||||
@@ -118,7 +126,7 @@ export async function configure(opts: {
|
||||
|
||||
switch (section) {
|
||||
case "database":
|
||||
config.database = await promptDatabase();
|
||||
config.database = await promptDatabase(config.database);
|
||||
break;
|
||||
case "llm": {
|
||||
const llm = await promptLlm();
|
||||
|
||||
102
cli/src/commands/db-backup.ts
Normal file
102
cli/src/commands/db-backup.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import path from "node:path";
|
||||
import * as p from "@clack/prompts";
|
||||
import pc from "picocolors";
|
||||
import { formatDatabaseBackupResult, runDatabaseBackup } from "@paperclipai/db";
|
||||
import {
|
||||
expandHomePrefix,
|
||||
resolveDefaultBackupDir,
|
||||
resolvePaperclipInstanceId,
|
||||
} from "../config/home.js";
|
||||
import { readConfig, resolveConfigPath } from "../config/store.js";
|
||||
import { printPaperclipCliBanner } from "../utils/banner.js";
|
||||
|
||||
type DbBackupOptions = {
|
||||
config?: string;
|
||||
dir?: string;
|
||||
retentionDays?: number;
|
||||
filenamePrefix?: string;
|
||||
json?: boolean;
|
||||
};
|
||||
|
||||
function resolveConnectionString(configPath?: string): { value: string; source: string } {
|
||||
const envUrl = process.env.DATABASE_URL?.trim();
|
||||
if (envUrl) return { value: envUrl, source: "DATABASE_URL" };
|
||||
|
||||
const config = readConfig(configPath);
|
||||
if (config?.database.mode === "postgres" && config.database.connectionString?.trim()) {
|
||||
return { value: config.database.connectionString.trim(), source: "config.database.connectionString" };
|
||||
}
|
||||
|
||||
const port = config?.database.embeddedPostgresPort ?? 54329;
|
||||
return {
|
||||
value: `postgres://paperclip:paperclip@127.0.0.1:${port}/paperclip`,
|
||||
source: `embedded-postgres@${port}`,
|
||||
};
|
||||
}
|
||||
|
||||
function normalizeRetentionDays(value: number | undefined, fallback: number): number {
|
||||
const candidate = value ?? fallback;
|
||||
if (!Number.isInteger(candidate) || candidate < 1) {
|
||||
throw new Error(`Invalid retention days '${String(candidate)}'. Use a positive integer.`);
|
||||
}
|
||||
return candidate;
|
||||
}
|
||||
|
||||
function resolveBackupDir(raw: string): string {
|
||||
return path.resolve(expandHomePrefix(raw.trim()));
|
||||
}
|
||||
|
||||
export async function dbBackupCommand(opts: DbBackupOptions): Promise<void> {
|
||||
printPaperclipCliBanner();
|
||||
p.intro(pc.bgCyan(pc.black(" paperclip db:backup ")));
|
||||
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
const config = readConfig(opts.config);
|
||||
const connection = resolveConnectionString(opts.config);
|
||||
const defaultDir = resolveDefaultBackupDir(resolvePaperclipInstanceId());
|
||||
const configuredDir = opts.dir?.trim() || config?.database.backup.dir || defaultDir;
|
||||
const backupDir = resolveBackupDir(configuredDir);
|
||||
const retentionDays = normalizeRetentionDays(
|
||||
opts.retentionDays,
|
||||
config?.database.backup.retentionDays ?? 30,
|
||||
);
|
||||
const filenamePrefix = opts.filenamePrefix?.trim() || "paperclip";
|
||||
|
||||
p.log.message(pc.dim(`Config: ${configPath}`));
|
||||
p.log.message(pc.dim(`Connection source: ${connection.source}`));
|
||||
p.log.message(pc.dim(`Backup dir: ${backupDir}`));
|
||||
p.log.message(pc.dim(`Retention: ${retentionDays} day(s)`));
|
||||
|
||||
const spinner = p.spinner();
|
||||
spinner.start("Creating database backup...");
|
||||
try {
|
||||
const result = await runDatabaseBackup({
|
||||
connectionString: connection.value,
|
||||
backupDir,
|
||||
retentionDays,
|
||||
filenamePrefix,
|
||||
});
|
||||
spinner.stop(`Backup saved: ${formatDatabaseBackupResult(result)}`);
|
||||
|
||||
if (opts.json) {
|
||||
console.log(
|
||||
JSON.stringify(
|
||||
{
|
||||
backupFile: result.backupFile,
|
||||
sizeBytes: result.sizeBytes,
|
||||
prunedCount: result.prunedCount,
|
||||
backupDir,
|
||||
retentionDays,
|
||||
connectionSource: connection.source,
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
);
|
||||
}
|
||||
p.outro(pc.green("Backup completed."));
|
||||
} catch (err) {
|
||||
spinner.stop(pc.red("Backup failed."));
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import {
|
||||
storageCheck,
|
||||
type CheckResult,
|
||||
} from "../checks/index.js";
|
||||
import { loadPaperclipEnvFile } from "../config/env.js";
|
||||
import { printPaperclipCliBanner } from "../utils/banner.js";
|
||||
|
||||
const STATUS_ICON = {
|
||||
@@ -31,6 +32,7 @@ export async function doctor(opts: {
|
||||
p.intro(pc.bgCyan(pc.black(" paperclip doctor ")));
|
||||
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
loadPaperclipEnvFile(configPath);
|
||||
const results: CheckResult[] = [];
|
||||
|
||||
// 1. Config check (must pass before others)
|
||||
|
||||
@@ -118,6 +118,29 @@ function collectDeploymentEnvRows(config: PaperclipConfig | null, configPath: st
|
||||
const dbUrl = process.env.DATABASE_URL ?? config?.database?.connectionString ?? "";
|
||||
const databaseMode = config?.database?.mode ?? "embedded-postgres";
|
||||
const dbUrlSource: EnvSource = process.env.DATABASE_URL ? "env" : config?.database?.connectionString ? "config" : "missing";
|
||||
const publicUrl =
|
||||
process.env.PAPERCLIP_PUBLIC_URL ??
|
||||
process.env.PAPERCLIP_AUTH_PUBLIC_BASE_URL ??
|
||||
process.env.BETTER_AUTH_URL ??
|
||||
process.env.BETTER_AUTH_BASE_URL ??
|
||||
config?.auth?.publicBaseUrl ??
|
||||
"";
|
||||
const publicUrlSource: EnvSource =
|
||||
process.env.PAPERCLIP_PUBLIC_URL
|
||||
? "env"
|
||||
: process.env.PAPERCLIP_AUTH_PUBLIC_BASE_URL || process.env.BETTER_AUTH_URL || process.env.BETTER_AUTH_BASE_URL
|
||||
? "env"
|
||||
: config?.auth?.publicBaseUrl
|
||||
? "config"
|
||||
: "missing";
|
||||
let trustedOriginsDefault = "";
|
||||
if (publicUrl) {
|
||||
try {
|
||||
trustedOriginsDefault = new URL(publicUrl).origin;
|
||||
} catch {
|
||||
trustedOriginsDefault = "";
|
||||
}
|
||||
}
|
||||
|
||||
const heartbeatInterval = process.env.HEARTBEAT_SCHEDULER_INTERVAL_MS ?? DEFAULT_HEARTBEAT_SCHEDULER_INTERVAL_MS;
|
||||
const heartbeatEnabled = process.env.HEARTBEAT_SCHEDULER_ENABLED ?? "true";
|
||||
@@ -192,6 +215,24 @@ function collectDeploymentEnvRows(config: PaperclipConfig | null, configPath: st
|
||||
required: false,
|
||||
note: "HTTP listen port",
|
||||
},
|
||||
{
|
||||
key: "PAPERCLIP_PUBLIC_URL",
|
||||
value: publicUrl,
|
||||
source: publicUrlSource,
|
||||
required: false,
|
||||
note: "Canonical public URL for auth/callback/invite origin wiring",
|
||||
},
|
||||
{
|
||||
key: "BETTER_AUTH_TRUSTED_ORIGINS",
|
||||
value: process.env.BETTER_AUTH_TRUSTED_ORIGINS ?? trustedOriginsDefault,
|
||||
source: process.env.BETTER_AUTH_TRUSTED_ORIGINS
|
||||
? "env"
|
||||
: trustedOriginsDefault
|
||||
? "default"
|
||||
: "missing",
|
||||
required: false,
|
||||
note: "Comma-separated auth origin allowlist (auto-derived from PAPERCLIP_PUBLIC_URL when possible)",
|
||||
},
|
||||
{
|
||||
key: "PAPERCLIP_AGENT_JWT_TTL_SECONDS",
|
||||
value: process.env.PAPERCLIP_AGENT_JWT_TTL_SECONDS ?? DEFAULT_AGENT_JWT_TTL_SECONDS,
|
||||
|
||||
@@ -1,5 +1,18 @@
|
||||
import * as p from "@clack/prompts";
|
||||
import path from "node:path";
|
||||
import pc from "picocolors";
|
||||
import {
|
||||
AUTH_BASE_URL_MODES,
|
||||
DEPLOYMENT_EXPOSURES,
|
||||
DEPLOYMENT_MODES,
|
||||
SECRET_PROVIDERS,
|
||||
STORAGE_PROVIDERS,
|
||||
type AuthBaseUrlMode,
|
||||
type DeploymentExposure,
|
||||
type DeploymentMode,
|
||||
type SecretProvider,
|
||||
type StorageProvider,
|
||||
} from "@paperclipai/shared";
|
||||
import { configExists, readConfig, resolveConfigPath, writeConfig } from "../config/store.js";
|
||||
import type { PaperclipConfig } from "../config/schema.js";
|
||||
import { ensureAgentJwtSecret, resolveAgentJwtEnvFile } from "../config/env.js";
|
||||
@@ -12,6 +25,8 @@ import { defaultStorageConfig, promptStorage } from "../prompts/storage.js";
|
||||
import { promptServer } from "../prompts/server.js";
|
||||
import {
|
||||
describeLocalInstancePaths,
|
||||
expandHomePrefix,
|
||||
resolveDefaultBackupDir,
|
||||
resolveDefaultEmbeddedPostgresDir,
|
||||
resolveDefaultLogsDir,
|
||||
resolvePaperclipInstanceId,
|
||||
@@ -28,32 +43,194 @@ type OnboardOptions = {
|
||||
invokedByRun?: boolean;
|
||||
};
|
||||
|
||||
function quickstartDefaults(): Pick<PaperclipConfig, "database" | "logging" | "server" | "auth" | "storage" | "secrets"> {
|
||||
type OnboardDefaults = Pick<PaperclipConfig, "database" | "logging" | "server" | "auth" | "storage" | "secrets">;
|
||||
|
||||
const ONBOARD_ENV_KEYS = [
|
||||
"PAPERCLIP_PUBLIC_URL",
|
||||
"DATABASE_URL",
|
||||
"PAPERCLIP_DB_BACKUP_ENABLED",
|
||||
"PAPERCLIP_DB_BACKUP_INTERVAL_MINUTES",
|
||||
"PAPERCLIP_DB_BACKUP_RETENTION_DAYS",
|
||||
"PAPERCLIP_DB_BACKUP_DIR",
|
||||
"PAPERCLIP_DEPLOYMENT_MODE",
|
||||
"PAPERCLIP_DEPLOYMENT_EXPOSURE",
|
||||
"HOST",
|
||||
"PORT",
|
||||
"SERVE_UI",
|
||||
"PAPERCLIP_ALLOWED_HOSTNAMES",
|
||||
"PAPERCLIP_AUTH_BASE_URL_MODE",
|
||||
"PAPERCLIP_AUTH_PUBLIC_BASE_URL",
|
||||
"BETTER_AUTH_URL",
|
||||
"BETTER_AUTH_BASE_URL",
|
||||
"PAPERCLIP_STORAGE_PROVIDER",
|
||||
"PAPERCLIP_STORAGE_LOCAL_DIR",
|
||||
"PAPERCLIP_STORAGE_S3_BUCKET",
|
||||
"PAPERCLIP_STORAGE_S3_REGION",
|
||||
"PAPERCLIP_STORAGE_S3_ENDPOINT",
|
||||
"PAPERCLIP_STORAGE_S3_PREFIX",
|
||||
"PAPERCLIP_STORAGE_S3_FORCE_PATH_STYLE",
|
||||
"PAPERCLIP_SECRETS_PROVIDER",
|
||||
"PAPERCLIP_SECRETS_STRICT_MODE",
|
||||
"PAPERCLIP_SECRETS_MASTER_KEY_FILE",
|
||||
] as const;
|
||||
|
||||
function parseBooleanFromEnv(rawValue: string | undefined): boolean | null {
|
||||
if (rawValue === undefined) return null;
|
||||
const lower = rawValue.trim().toLowerCase();
|
||||
if (lower === "true" || lower === "1" || lower === "yes") return true;
|
||||
if (lower === "false" || lower === "0" || lower === "no") return false;
|
||||
return null;
|
||||
}
|
||||
|
||||
function parseNumberFromEnv(rawValue: string | undefined): number | null {
|
||||
if (!rawValue) return null;
|
||||
const parsed = Number(rawValue);
|
||||
if (!Number.isFinite(parsed)) return null;
|
||||
return parsed;
|
||||
}
|
||||
|
||||
function parseEnumFromEnv<T extends string>(rawValue: string | undefined, allowedValues: readonly T[]): T | null {
|
||||
if (!rawValue) return null;
|
||||
return allowedValues.includes(rawValue as T) ? (rawValue as T) : null;
|
||||
}
|
||||
|
||||
function resolvePathFromEnv(rawValue: string | undefined): string | null {
|
||||
if (!rawValue || rawValue.trim().length === 0) return null;
|
||||
return path.resolve(expandHomePrefix(rawValue.trim()));
|
||||
}
|
||||
|
||||
function quickstartDefaultsFromEnv(): {
|
||||
defaults: OnboardDefaults;
|
||||
usedEnvKeys: string[];
|
||||
ignoredEnvKeys: Array<{ key: string; reason: string }>;
|
||||
} {
|
||||
const instanceId = resolvePaperclipInstanceId();
|
||||
return {
|
||||
const defaultStorage = defaultStorageConfig();
|
||||
const defaultSecrets = defaultSecretsConfig();
|
||||
const databaseUrl = process.env.DATABASE_URL?.trim() || undefined;
|
||||
const publicUrl =
|
||||
process.env.PAPERCLIP_PUBLIC_URL?.trim() ||
|
||||
process.env.PAPERCLIP_AUTH_PUBLIC_BASE_URL?.trim() ||
|
||||
process.env.BETTER_AUTH_URL?.trim() ||
|
||||
process.env.BETTER_AUTH_BASE_URL?.trim() ||
|
||||
undefined;
|
||||
const deploymentMode =
|
||||
parseEnumFromEnv<DeploymentMode>(process.env.PAPERCLIP_DEPLOYMENT_MODE, DEPLOYMENT_MODES) ?? "local_trusted";
|
||||
const deploymentExposureFromEnv = parseEnumFromEnv<DeploymentExposure>(
|
||||
process.env.PAPERCLIP_DEPLOYMENT_EXPOSURE,
|
||||
DEPLOYMENT_EXPOSURES,
|
||||
);
|
||||
const deploymentExposure =
|
||||
deploymentMode === "local_trusted" ? "private" : (deploymentExposureFromEnv ?? "private");
|
||||
const authPublicBaseUrl = publicUrl;
|
||||
const authBaseUrlModeFromEnv = parseEnumFromEnv<AuthBaseUrlMode>(
|
||||
process.env.PAPERCLIP_AUTH_BASE_URL_MODE,
|
||||
AUTH_BASE_URL_MODES,
|
||||
);
|
||||
const authBaseUrlMode = authBaseUrlModeFromEnv ?? (authPublicBaseUrl ? "explicit" : "auto");
|
||||
const allowedHostnamesFromEnv = process.env.PAPERCLIP_ALLOWED_HOSTNAMES
|
||||
? process.env.PAPERCLIP_ALLOWED_HOSTNAMES
|
||||
.split(",")
|
||||
.map((value) => value.trim().toLowerCase())
|
||||
.filter((value) => value.length > 0)
|
||||
: [];
|
||||
const hostnameFromPublicUrl = publicUrl
|
||||
? (() => {
|
||||
try {
|
||||
return new URL(publicUrl).hostname.trim().toLowerCase();
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
})()
|
||||
: null;
|
||||
const storageProvider =
|
||||
parseEnumFromEnv<StorageProvider>(process.env.PAPERCLIP_STORAGE_PROVIDER, STORAGE_PROVIDERS) ??
|
||||
defaultStorage.provider;
|
||||
const secretsProvider =
|
||||
parseEnumFromEnv<SecretProvider>(process.env.PAPERCLIP_SECRETS_PROVIDER, SECRET_PROVIDERS) ??
|
||||
defaultSecrets.provider;
|
||||
const databaseBackupEnabled = parseBooleanFromEnv(process.env.PAPERCLIP_DB_BACKUP_ENABLED) ?? true;
|
||||
const databaseBackupIntervalMinutes = Math.max(
|
||||
1,
|
||||
parseNumberFromEnv(process.env.PAPERCLIP_DB_BACKUP_INTERVAL_MINUTES) ?? 60,
|
||||
);
|
||||
const databaseBackupRetentionDays = Math.max(
|
||||
1,
|
||||
parseNumberFromEnv(process.env.PAPERCLIP_DB_BACKUP_RETENTION_DAYS) ?? 30,
|
||||
);
|
||||
const defaults: OnboardDefaults = {
|
||||
database: {
|
||||
mode: "embedded-postgres",
|
||||
mode: databaseUrl ? "postgres" : "embedded-postgres",
|
||||
...(databaseUrl ? { connectionString: databaseUrl } : {}),
|
||||
embeddedPostgresDataDir: resolveDefaultEmbeddedPostgresDir(instanceId),
|
||||
embeddedPostgresPort: 54329,
|
||||
backup: {
|
||||
enabled: databaseBackupEnabled,
|
||||
intervalMinutes: databaseBackupIntervalMinutes,
|
||||
retentionDays: databaseBackupRetentionDays,
|
||||
dir: resolvePathFromEnv(process.env.PAPERCLIP_DB_BACKUP_DIR) ?? resolveDefaultBackupDir(instanceId),
|
||||
},
|
||||
},
|
||||
logging: {
|
||||
mode: "file",
|
||||
logDir: resolveDefaultLogsDir(instanceId),
|
||||
},
|
||||
server: {
|
||||
deploymentMode: "local_trusted",
|
||||
exposure: "private",
|
||||
host: "127.0.0.1",
|
||||
port: 3100,
|
||||
allowedHostnames: [],
|
||||
serveUi: true,
|
||||
deploymentMode,
|
||||
exposure: deploymentExposure,
|
||||
host: process.env.HOST ?? "127.0.0.1",
|
||||
port: Number(process.env.PORT) || 3100,
|
||||
allowedHostnames: Array.from(new Set([...allowedHostnamesFromEnv, ...(hostnameFromPublicUrl ? [hostnameFromPublicUrl] : [])])),
|
||||
serveUi: parseBooleanFromEnv(process.env.SERVE_UI) ?? true,
|
||||
},
|
||||
auth: {
|
||||
baseUrlMode: "auto",
|
||||
baseUrlMode: authBaseUrlMode,
|
||||
disableSignUp: false,
|
||||
...(authPublicBaseUrl ? { publicBaseUrl: authPublicBaseUrl } : {}),
|
||||
},
|
||||
storage: {
|
||||
provider: storageProvider,
|
||||
localDisk: {
|
||||
baseDir:
|
||||
resolvePathFromEnv(process.env.PAPERCLIP_STORAGE_LOCAL_DIR) ?? defaultStorage.localDisk.baseDir,
|
||||
},
|
||||
s3: {
|
||||
bucket: process.env.PAPERCLIP_STORAGE_S3_BUCKET ?? defaultStorage.s3.bucket,
|
||||
region: process.env.PAPERCLIP_STORAGE_S3_REGION ?? defaultStorage.s3.region,
|
||||
endpoint: process.env.PAPERCLIP_STORAGE_S3_ENDPOINT ?? defaultStorage.s3.endpoint,
|
||||
prefix: process.env.PAPERCLIP_STORAGE_S3_PREFIX ?? defaultStorage.s3.prefix,
|
||||
forcePathStyle:
|
||||
parseBooleanFromEnv(process.env.PAPERCLIP_STORAGE_S3_FORCE_PATH_STYLE) ??
|
||||
defaultStorage.s3.forcePathStyle,
|
||||
},
|
||||
},
|
||||
secrets: {
|
||||
provider: secretsProvider,
|
||||
strictMode: parseBooleanFromEnv(process.env.PAPERCLIP_SECRETS_STRICT_MODE) ?? defaultSecrets.strictMode,
|
||||
localEncrypted: {
|
||||
keyFilePath:
|
||||
resolvePathFromEnv(process.env.PAPERCLIP_SECRETS_MASTER_KEY_FILE) ??
|
||||
defaultSecrets.localEncrypted.keyFilePath,
|
||||
},
|
||||
},
|
||||
storage: defaultStorageConfig(),
|
||||
secrets: defaultSecretsConfig(),
|
||||
};
|
||||
const ignoredEnvKeys: Array<{ key: string; reason: string }> = [];
|
||||
if (deploymentMode === "local_trusted" && process.env.PAPERCLIP_DEPLOYMENT_EXPOSURE !== undefined) {
|
||||
ignoredEnvKeys.push({
|
||||
key: "PAPERCLIP_DEPLOYMENT_EXPOSURE",
|
||||
reason: "Ignored because deployment mode local_trusted always forces private exposure",
|
||||
});
|
||||
}
|
||||
|
||||
const ignoredKeySet = new Set(ignoredEnvKeys.map((entry) => entry.key));
|
||||
const usedEnvKeys = ONBOARD_ENV_KEYS.filter(
|
||||
(key) => process.env[key] !== undefined && !ignoredKeySet.has(key),
|
||||
);
|
||||
return { defaults, usedEnvKeys, ignoredEnvKeys };
|
||||
}
|
||||
|
||||
function canCreateBootstrapInviteImmediately(config: Pick<PaperclipConfig, "database" | "server">): boolean {
|
||||
return config.server.deploymentMode === "authenticated" && config.database.mode !== "embedded-postgres";
|
||||
}
|
||||
|
||||
export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
@@ -109,6 +286,7 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
}
|
||||
|
||||
let llm: PaperclipConfig["llm"] | undefined;
|
||||
const { defaults: derivedDefaults, usedEnvKeys, ignoredEnvKeys } = quickstartDefaultsFromEnv();
|
||||
let {
|
||||
database,
|
||||
logging,
|
||||
@@ -116,11 +294,11 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
auth,
|
||||
storage,
|
||||
secrets,
|
||||
} = quickstartDefaults();
|
||||
} = derivedDefaults;
|
||||
|
||||
if (setupMode === "advanced") {
|
||||
p.log.step(pc.bold("Database"));
|
||||
database = await promptDatabase();
|
||||
database = await promptDatabase(database);
|
||||
|
||||
if (database.mode === "postgres" && database.connectionString) {
|
||||
const s = p.spinner();
|
||||
@@ -184,13 +362,20 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
logging = await promptLogging();
|
||||
|
||||
p.log.step(pc.bold("Server"));
|
||||
({ server, auth } = await promptServer());
|
||||
({ server, auth } = await promptServer({ currentServer: server, currentAuth: auth }));
|
||||
|
||||
p.log.step(pc.bold("Storage"));
|
||||
storage = await promptStorage(defaultStorageConfig());
|
||||
storage = await promptStorage(storage);
|
||||
|
||||
p.log.step(pc.bold("Secrets"));
|
||||
secrets = defaultSecretsConfig();
|
||||
const secretsDefaults = defaultSecretsConfig();
|
||||
secrets = {
|
||||
provider: secrets.provider ?? secretsDefaults.provider,
|
||||
strictMode: secrets.strictMode ?? secretsDefaults.strictMode,
|
||||
localEncrypted: {
|
||||
keyFilePath: secrets.localEncrypted?.keyFilePath ?? secretsDefaults.localEncrypted.keyFilePath,
|
||||
},
|
||||
};
|
||||
p.log.message(
|
||||
pc.dim(
|
||||
`Using defaults: provider=${secrets.provider}, strictMode=${secrets.strictMode}, keyFile=${secrets.localEncrypted.keyFilePath}`,
|
||||
@@ -198,9 +383,17 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
);
|
||||
} else {
|
||||
p.log.step(pc.bold("Quickstart"));
|
||||
p.log.message(
|
||||
pc.dim("Using local defaults: embedded database, no LLM provider, file storage, and local encrypted secrets."),
|
||||
);
|
||||
p.log.message(pc.dim("Using quickstart defaults."));
|
||||
if (usedEnvKeys.length > 0) {
|
||||
p.log.message(pc.dim(`Environment-aware defaults active (${usedEnvKeys.length} env var(s) detected).`));
|
||||
} else {
|
||||
p.log.message(
|
||||
pc.dim("No environment overrides detected: embedded database, file storage, local encrypted secrets."),
|
||||
);
|
||||
}
|
||||
for (const ignored of ignoredEnvKeys) {
|
||||
p.log.message(pc.dim(`Ignored ${ignored.key}: ${ignored.reason}`));
|
||||
}
|
||||
}
|
||||
|
||||
const jwtSecret = ensureAgentJwtSecret(configPath);
|
||||
@@ -261,7 +454,7 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
"Next commands",
|
||||
);
|
||||
|
||||
if (server.deploymentMode === "authenticated") {
|
||||
if (canCreateBootstrapInviteImmediately({ database, server })) {
|
||||
p.log.step("Generating bootstrap CEO invite");
|
||||
await bootstrapCeoInvite({ config: configPath });
|
||||
}
|
||||
@@ -284,5 +477,15 @@ export async function onboard(opts: OnboardOptions): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
if (server.deploymentMode === "authenticated" && database.mode === "embedded-postgres") {
|
||||
p.log.info(
|
||||
[
|
||||
"Bootstrap CEO invite will be created after the server starts.",
|
||||
`Next: ${pc.cyan("paperclipai run")}`,
|
||||
`Then: ${pc.cyan("paperclipai auth bootstrap-ceo")}`,
|
||||
].join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
p.outro("You're all set!");
|
||||
}
|
||||
|
||||
@@ -3,9 +3,13 @@ import path from "node:path";
|
||||
import { fileURLToPath, pathToFileURL } from "node:url";
|
||||
import * as p from "@clack/prompts";
|
||||
import pc from "picocolors";
|
||||
import { bootstrapCeoInvite } from "./auth-bootstrap-ceo.js";
|
||||
import { onboard } from "./onboard.js";
|
||||
import { doctor } from "./doctor.js";
|
||||
import { loadPaperclipEnvFile } from "../config/env.js";
|
||||
import { configExists, resolveConfigPath } from "../config/store.js";
|
||||
import type { PaperclipConfig } from "../config/schema.js";
|
||||
import { readConfig } from "../config/store.js";
|
||||
import {
|
||||
describeLocalInstancePaths,
|
||||
resolvePaperclipHomeDir,
|
||||
@@ -19,6 +23,13 @@ interface RunOptions {
|
||||
yes?: boolean;
|
||||
}
|
||||
|
||||
interface StartedServer {
|
||||
apiUrl: string;
|
||||
databaseUrl: string;
|
||||
host: string;
|
||||
listenPort: number;
|
||||
}
|
||||
|
||||
export async function runCommand(opts: RunOptions): Promise<void> {
|
||||
const instanceId = resolvePaperclipInstanceId(opts.instance);
|
||||
process.env.PAPERCLIP_INSTANCE_ID = instanceId;
|
||||
@@ -31,6 +42,7 @@ export async function runCommand(opts: RunOptions): Promise<void> {
|
||||
|
||||
const configPath = resolveConfigPath(opts.config);
|
||||
process.env.PAPERCLIP_CONFIG = configPath;
|
||||
loadPaperclipEnvFile(configPath);
|
||||
|
||||
p.intro(pc.bgCyan(pc.black(" paperclipai run ")));
|
||||
p.log.message(pc.dim(`Home: ${paths.homeDir}`));
|
||||
@@ -60,8 +72,23 @@ export async function runCommand(opts: RunOptions): Promise<void> {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const config = readConfig(configPath);
|
||||
if (!config) {
|
||||
p.log.error(`No config found at ${configPath}.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
p.log.step("Starting Paperclip server...");
|
||||
await importServerEntry();
|
||||
const startedServer = await importServerEntry();
|
||||
|
||||
if (shouldGenerateBootstrapInviteAfterStart(config)) {
|
||||
p.log.step("Generating bootstrap CEO invite");
|
||||
await bootstrapCeoInvite({
|
||||
config: configPath,
|
||||
dbUrl: startedServer.databaseUrl,
|
||||
baseUrl: startedServer.apiUrl.replace(/\/api$/, ""),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function formatError(err: unknown): string {
|
||||
@@ -101,19 +128,20 @@ function maybeEnableUiDevMiddleware(entrypoint: string): void {
|
||||
}
|
||||
}
|
||||
|
||||
async function importServerEntry(): Promise<void> {
|
||||
async function importServerEntry(): Promise<StartedServer> {
|
||||
// Dev mode: try local workspace path (monorepo with tsx)
|
||||
const projectRoot = path.resolve(path.dirname(fileURLToPath(import.meta.url)), "../../..");
|
||||
const devEntry = path.resolve(projectRoot, "server/src/index.ts");
|
||||
if (fs.existsSync(devEntry)) {
|
||||
maybeEnableUiDevMiddleware(devEntry);
|
||||
await import(pathToFileURL(devEntry).href);
|
||||
return;
|
||||
const mod = await import(pathToFileURL(devEntry).href);
|
||||
return await startServerFromModule(mod, devEntry);
|
||||
}
|
||||
|
||||
// Production mode: import the published @paperclipai/server package
|
||||
try {
|
||||
await import("@paperclipai/server");
|
||||
const mod = await import("@paperclipai/server");
|
||||
return await startServerFromModule(mod, "@paperclipai/server");
|
||||
} catch (err) {
|
||||
const missingSpecifier = getMissingModuleSpecifier(err);
|
||||
const missingServerEntrypoint = !missingSpecifier || missingSpecifier === "@paperclipai/server";
|
||||
@@ -130,3 +158,15 @@ async function importServerEntry(): Promise<void> {
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
function shouldGenerateBootstrapInviteAfterStart(config: PaperclipConfig): boolean {
|
||||
return config.server.deploymentMode === "authenticated" && config.database.mode === "embedded-postgres";
|
||||
}
|
||||
|
||||
async function startServerFromModule(mod: unknown, label: string): Promise<StartedServer> {
|
||||
const startServer = (mod as { startServer?: () => Promise<StartedServer> }).startServer;
|
||||
if (typeof startServer !== "function") {
|
||||
throw new Error(`Paperclip server entrypoint did not export startServer(): ${label}`);
|
||||
}
|
||||
return await startServer();
|
||||
}
|
||||
|
||||
@@ -36,6 +36,10 @@ export function resolveAgentJwtEnvFile(configPath?: string): string {
|
||||
return resolveEnvFilePath(configPath);
|
||||
}
|
||||
|
||||
export function loadPaperclipEnvFile(configPath?: string): void {
|
||||
loadAgentJwtEnvFile(resolveEnvFilePath(configPath));
|
||||
}
|
||||
|
||||
export function loadAgentJwtEnvFile(filePath = resolveEnvFilePath()): void {
|
||||
if (loadedEnvFiles.has(filePath)) return;
|
||||
|
||||
|
||||
@@ -49,6 +49,10 @@ export function resolveDefaultStorageDir(instanceId?: string): string {
|
||||
return path.resolve(resolvePaperclipInstanceRoot(instanceId), "data", "storage");
|
||||
}
|
||||
|
||||
export function resolveDefaultBackupDir(instanceId?: string): string {
|
||||
return path.resolve(resolvePaperclipInstanceRoot(instanceId), "data", "backups");
|
||||
}
|
||||
|
||||
export function expandHomePrefix(value: string): string {
|
||||
if (value === "~") return os.homedir();
|
||||
if (value.startsWith("~/")) return path.resolve(os.homedir(), value.slice(2));
|
||||
@@ -64,6 +68,7 @@ export function describeLocalInstancePaths(instanceId?: string) {
|
||||
instanceRoot,
|
||||
configPath: resolveDefaultConfigPath(resolvedInstanceId),
|
||||
embeddedPostgresDataDir: resolveDefaultEmbeddedPostgresDir(resolvedInstanceId),
|
||||
backupDir: resolveDefaultBackupDir(resolvedInstanceId),
|
||||
logDir: resolveDefaultLogsDir(resolvedInstanceId),
|
||||
secretsKeyFilePath: resolveDefaultSecretsKeyFilePath(resolvedInstanceId),
|
||||
storageDir: resolveDefaultStorageDir(resolvedInstanceId),
|
||||
|
||||
@@ -2,6 +2,7 @@ export {
|
||||
paperclipConfigSchema,
|
||||
configMetaSchema,
|
||||
llmConfigSchema,
|
||||
databaseBackupConfigSchema,
|
||||
databaseConfigSchema,
|
||||
loggingConfigSchema,
|
||||
serverConfigSchema,
|
||||
@@ -13,6 +14,7 @@ export {
|
||||
secretsLocalEncryptedConfigSchema,
|
||||
type PaperclipConfig,
|
||||
type LlmConfig,
|
||||
type DatabaseBackupConfig,
|
||||
type DatabaseConfig,
|
||||
type LoggingConfig,
|
||||
type ServerConfig,
|
||||
|
||||
@@ -7,6 +7,7 @@ import { addAllowedHostname } from "./commands/allowed-hostname.js";
|
||||
import { heartbeatRun } from "./commands/heartbeat-run.js";
|
||||
import { runCommand } from "./commands/run.js";
|
||||
import { bootstrapCeoInvite } from "./commands/auth-bootstrap-ceo.js";
|
||||
import { dbBackupCommand } from "./commands/db-backup.js";
|
||||
import { registerContextCommands } from "./commands/client/context.js";
|
||||
import { registerCompanyCommands } from "./commands/client/company.js";
|
||||
import { registerIssueCommands } from "./commands/client/issue.js";
|
||||
@@ -23,7 +24,7 @@ const DATA_DIR_OPTION_HELP =
|
||||
program
|
||||
.name("paperclipai")
|
||||
.description("Paperclip CLI — setup, diagnose, and configure your instance")
|
||||
.version("0.2.6");
|
||||
.version("0.2.7");
|
||||
|
||||
program.hook("preAction", (_thisCommand, actionCommand) => {
|
||||
const options = actionCommand.optsWithGlobals() as DataDirOptionLike;
|
||||
@@ -70,6 +71,19 @@ program
|
||||
.option("-s, --section <section>", "Section to configure (llm, database, logging, server, storage, secrets)")
|
||||
.action(configure);
|
||||
|
||||
program
|
||||
.command("db:backup")
|
||||
.description("Create a one-off database backup using current config")
|
||||
.option("-c, --config <path>", "Path to config file")
|
||||
.option("-d, --data-dir <path>", DATA_DIR_OPTION_HELP)
|
||||
.option("--dir <path>", "Backup output directory (overrides config)")
|
||||
.option("--retention-days <days>", "Retention window used for pruning", (value) => Number(value))
|
||||
.option("--filename-prefix <prefix>", "Backup filename prefix", "paperclip")
|
||||
.option("--json", "Print backup metadata as JSON")
|
||||
.action(async (opts) => {
|
||||
await dbBackupCommand(opts);
|
||||
});
|
||||
|
||||
program
|
||||
.command("allowed-hostname")
|
||||
.description("Allow a hostname for authenticated/private mode access")
|
||||
|
||||
@@ -1,9 +1,26 @@
|
||||
import * as p from "@clack/prompts";
|
||||
import type { DatabaseConfig } from "../config/schema.js";
|
||||
import { resolveDefaultEmbeddedPostgresDir, resolvePaperclipInstanceId } from "../config/home.js";
|
||||
import {
|
||||
resolveDefaultBackupDir,
|
||||
resolveDefaultEmbeddedPostgresDir,
|
||||
resolvePaperclipInstanceId,
|
||||
} from "../config/home.js";
|
||||
|
||||
export async function promptDatabase(): Promise<DatabaseConfig> {
|
||||
const defaultEmbeddedDir = resolveDefaultEmbeddedPostgresDir(resolvePaperclipInstanceId());
|
||||
export async function promptDatabase(current?: DatabaseConfig): Promise<DatabaseConfig> {
|
||||
const instanceId = resolvePaperclipInstanceId();
|
||||
const defaultEmbeddedDir = resolveDefaultEmbeddedPostgresDir(instanceId);
|
||||
const defaultBackupDir = resolveDefaultBackupDir(instanceId);
|
||||
const base: DatabaseConfig = current ?? {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: defaultEmbeddedDir,
|
||||
embeddedPostgresPort: 54329,
|
||||
backup: {
|
||||
enabled: true,
|
||||
intervalMinutes: 60,
|
||||
retentionDays: 30,
|
||||
dir: defaultBackupDir,
|
||||
},
|
||||
};
|
||||
|
||||
const mode = await p.select({
|
||||
message: "Database mode",
|
||||
@@ -11,6 +28,7 @@ export async function promptDatabase(): Promise<DatabaseConfig> {
|
||||
{ value: "embedded-postgres" as const, label: "Embedded PostgreSQL (managed locally)", hint: "recommended" },
|
||||
{ value: "postgres" as const, label: "PostgreSQL (external server)" },
|
||||
],
|
||||
initialValue: base.mode,
|
||||
});
|
||||
|
||||
if (p.isCancel(mode)) {
|
||||
@@ -18,9 +36,14 @@ export async function promptDatabase(): Promise<DatabaseConfig> {
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
let connectionString: string | undefined = base.connectionString;
|
||||
let embeddedPostgresDataDir = base.embeddedPostgresDataDir || defaultEmbeddedDir;
|
||||
let embeddedPostgresPort = base.embeddedPostgresPort || 54329;
|
||||
|
||||
if (mode === "postgres") {
|
||||
const connectionString = await p.text({
|
||||
const value = await p.text({
|
||||
message: "PostgreSQL connection string",
|
||||
defaultValue: base.connectionString ?? "",
|
||||
placeholder: "postgres://user:pass@localhost:5432/paperclip",
|
||||
validate: (val) => {
|
||||
if (!val) return "Connection string is required for PostgreSQL mode";
|
||||
@@ -28,48 +51,107 @@ export async function promptDatabase(): Promise<DatabaseConfig> {
|
||||
},
|
||||
});
|
||||
|
||||
if (p.isCancel(connectionString)) {
|
||||
if (p.isCancel(value)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
return {
|
||||
mode: "postgres",
|
||||
connectionString,
|
||||
embeddedPostgresDataDir: defaultEmbeddedDir,
|
||||
embeddedPostgresPort: 54329,
|
||||
};
|
||||
connectionString = value;
|
||||
} else {
|
||||
const dataDir = await p.text({
|
||||
message: "Embedded PostgreSQL data directory",
|
||||
defaultValue: base.embeddedPostgresDataDir || defaultEmbeddedDir,
|
||||
placeholder: defaultEmbeddedDir,
|
||||
});
|
||||
|
||||
if (p.isCancel(dataDir)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
embeddedPostgresDataDir = dataDir || defaultEmbeddedDir;
|
||||
|
||||
const portValue = await p.text({
|
||||
message: "Embedded PostgreSQL port",
|
||||
defaultValue: String(base.embeddedPostgresPort || 54329),
|
||||
placeholder: "54329",
|
||||
validate: (val) => {
|
||||
const n = Number(val);
|
||||
if (!Number.isInteger(n) || n < 1 || n > 65535) return "Port must be an integer between 1 and 65535";
|
||||
},
|
||||
});
|
||||
|
||||
if (p.isCancel(portValue)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
embeddedPostgresPort = Number(portValue || "54329");
|
||||
connectionString = undefined;
|
||||
}
|
||||
|
||||
const embeddedPostgresDataDir = await p.text({
|
||||
message: "Embedded PostgreSQL data directory",
|
||||
defaultValue: defaultEmbeddedDir,
|
||||
placeholder: defaultEmbeddedDir,
|
||||
const backupEnabled = await p.confirm({
|
||||
message: "Enable automatic database backups?",
|
||||
initialValue: base.backup.enabled,
|
||||
});
|
||||
|
||||
if (p.isCancel(embeddedPostgresDataDir)) {
|
||||
if (p.isCancel(backupEnabled)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const embeddedPostgresPort = await p.text({
|
||||
message: "Embedded PostgreSQL port",
|
||||
defaultValue: "54329",
|
||||
placeholder: "54329",
|
||||
const backupDirInput = await p.text({
|
||||
message: "Backup directory",
|
||||
defaultValue: base.backup.dir || defaultBackupDir,
|
||||
placeholder: defaultBackupDir,
|
||||
validate: (val) => (!val || val.trim().length === 0 ? "Backup directory is required" : undefined),
|
||||
});
|
||||
if (p.isCancel(backupDirInput)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const backupIntervalInput = await p.text({
|
||||
message: "Backup interval (minutes)",
|
||||
defaultValue: String(base.backup.intervalMinutes || 60),
|
||||
placeholder: "60",
|
||||
validate: (val) => {
|
||||
const n = Number(val);
|
||||
if (!Number.isInteger(n) || n < 1 || n > 65535) return "Port must be an integer between 1 and 65535";
|
||||
if (!Number.isInteger(n) || n < 1) return "Interval must be a positive integer";
|
||||
if (n > 10080) return "Interval must be 10080 minutes (7 days) or less";
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
if (p.isCancel(backupIntervalInput)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
if (p.isCancel(embeddedPostgresPort)) {
|
||||
const backupRetentionInput = await p.text({
|
||||
message: "Backup retention (days)",
|
||||
defaultValue: String(base.backup.retentionDays || 30),
|
||||
placeholder: "30",
|
||||
validate: (val) => {
|
||||
const n = Number(val);
|
||||
if (!Number.isInteger(n) || n < 1) return "Retention must be a positive integer";
|
||||
if (n > 3650) return "Retention must be 3650 days or less";
|
||||
return undefined;
|
||||
},
|
||||
});
|
||||
if (p.isCancel(backupRetentionInput)) {
|
||||
p.cancel("Setup cancelled.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
return {
|
||||
mode: "embedded-postgres",
|
||||
embeddedPostgresDataDir: embeddedPostgresDataDir || defaultEmbeddedDir,
|
||||
embeddedPostgresPort: Number(embeddedPostgresPort || "54329"),
|
||||
mode,
|
||||
connectionString,
|
||||
embeddedPostgresDataDir,
|
||||
embeddedPostgresPort,
|
||||
backup: {
|
||||
enabled: backupEnabled,
|
||||
intervalMinutes: Number(backupIntervalInput || "60"),
|
||||
retentionDays: Number(backupRetentionInput || "30"),
|
||||
dir: backupDirInput || defaultBackupDir,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ export async function promptServer(opts?: {
|
||||
}
|
||||
|
||||
const port = Number(portStr) || 3100;
|
||||
let auth: AuthConfig = { baseUrlMode: "auto" };
|
||||
let auth: AuthConfig = { baseUrlMode: "auto", disableSignUp: false };
|
||||
if (deploymentMode === "authenticated" && exposure === "public") {
|
||||
const urlInput = await p.text({
|
||||
message: "Public base URL",
|
||||
@@ -139,17 +139,26 @@ export async function promptServer(opts?: {
|
||||
}
|
||||
auth = {
|
||||
baseUrlMode: "explicit",
|
||||
disableSignUp: false,
|
||||
publicBaseUrl: urlInput.trim().replace(/\/+$/, ""),
|
||||
};
|
||||
} else if (currentAuth?.baseUrlMode === "explicit" && currentAuth.publicBaseUrl) {
|
||||
auth = {
|
||||
baseUrlMode: "explicit",
|
||||
disableSignUp: false,
|
||||
publicBaseUrl: currentAuth.publicBaseUrl,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
server: { deploymentMode, exposure, host: hostStr.trim(), port, allowedHostnames, serveUi: true },
|
||||
server: {
|
||||
deploymentMode,
|
||||
exposure,
|
||||
host: hostStr.trim(),
|
||||
port,
|
||||
allowedHostnames,
|
||||
serveUi: currentServer?.serveUi ?? true,
|
||||
},
|
||||
auth,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"extends": "../tsconfig.json",
|
||||
"extends": "../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
|
||||
14
doc/CLI.md
14
doc/CLI.md
@@ -116,6 +116,20 @@ pnpm paperclipai issue release <issue-id>
|
||||
```sh
|
||||
pnpm paperclipai agent list --company-id <company-id>
|
||||
pnpm paperclipai agent get <agent-id>
|
||||
pnpm paperclipai agent local-cli <agent-id-or-shortname> --company-id <company-id>
|
||||
```
|
||||
|
||||
`agent local-cli` is the quickest way to run local Claude/Codex manually as a Paperclip agent:
|
||||
|
||||
- creates a new long-lived agent API key
|
||||
- installs missing Paperclip skills into `~/.codex/skills` and `~/.claude/skills`
|
||||
- prints `export ...` lines for `PAPERCLIP_API_URL`, `PAPERCLIP_COMPANY_ID`, `PAPERCLIP_AGENT_ID`, and `PAPERCLIP_API_KEY`
|
||||
|
||||
Example for shortname-based local setup:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai agent local-cli codexcoder --company-id <company-id>
|
||||
pnpm paperclipai agent local-cli claudecoder --company-id <company-id>
|
||||
```
|
||||
|
||||
## Approval Commands
|
||||
|
||||
@@ -15,6 +15,14 @@ Current implementation status:
|
||||
- Node.js 20+
|
||||
- pnpm 9+
|
||||
|
||||
## Dependency Lockfile Policy
|
||||
|
||||
GitHub Actions owns `pnpm-lock.yaml`.
|
||||
|
||||
- Do not commit `pnpm-lock.yaml` in pull requests.
|
||||
- Pull request CI validates dependency resolution when manifests change.
|
||||
- Pushes to `master` regenerate `pnpm-lock.yaml` with `pnpm install --lockfile-only --no-frozen-lockfile`, commit it back if needed, and then run verification with `--frozen-lockfile`.
|
||||
|
||||
## Start Dev
|
||||
|
||||
From repo root:
|
||||
@@ -29,6 +37,8 @@ This starts:
|
||||
- API server: `http://localhost:3100`
|
||||
- UI: served by the API server in dev middleware mode (same origin as API)
|
||||
|
||||
`pnpm dev` runs the server in watch mode and restarts on changes from workspace packages (including adapter packages). Use `pnpm dev:once` to run without file watching.
|
||||
|
||||
Tailscale/private-auth dev mode:
|
||||
|
||||
```sh
|
||||
@@ -141,6 +151,36 @@ pnpm dev
|
||||
|
||||
If you set `DATABASE_URL`, the server will use that instead of embedded PostgreSQL.
|
||||
|
||||
## Automatic DB Backups
|
||||
|
||||
Paperclip can run automatic DB backups on a timer. Defaults:
|
||||
|
||||
- enabled
|
||||
- every 60 minutes
|
||||
- retain 30 days
|
||||
- backup dir: `~/.paperclip/instances/default/data/backups`
|
||||
|
||||
Configure these in:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai configure --section database
|
||||
```
|
||||
|
||||
Run a one-off backup manually:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai db:backup
|
||||
# or:
|
||||
pnpm db:backup
|
||||
```
|
||||
|
||||
Environment overrides:
|
||||
|
||||
- `PAPERCLIP_DB_BACKUP_ENABLED=true|false`
|
||||
- `PAPERCLIP_DB_BACKUP_INTERVAL_MINUTES=<minutes>`
|
||||
- `PAPERCLIP_DB_BACKUP_RETENTION_DAYS=<days>`
|
||||
- `PAPERCLIP_DB_BACKUP_DIR=/absolute/or/~/path`
|
||||
|
||||
## Secrets in Dev
|
||||
|
||||
Agent env vars now support secret references. By default, secret values are stored with local encryption and only secret refs are persisted in agent config.
|
||||
@@ -216,5 +256,61 @@ Agent-oriented invite onboarding now exposes machine-readable API docs:
|
||||
|
||||
- `GET /api/invites/:token` returns invite summary plus onboarding and skills index links.
|
||||
- `GET /api/invites/:token/onboarding` returns onboarding manifest details (registration endpoint, claim endpoint template, skill install hints).
|
||||
- `GET /api/invites/:token/onboarding.txt` returns a plain-text onboarding doc intended for both human operators and agents (llm.txt-style handoff), including optional inviter message and suggested network host candidates.
|
||||
- `GET /api/skills/index` lists available skill documents.
|
||||
- `GET /api/skills/paperclip` returns the Paperclip heartbeat skill markdown.
|
||||
|
||||
## OpenClaw Join Smoke Test
|
||||
|
||||
Run the end-to-end OpenClaw join smoke harness:
|
||||
|
||||
```sh
|
||||
pnpm smoke:openclaw-join
|
||||
```
|
||||
|
||||
What it validates:
|
||||
|
||||
- invite creation for agent-only join
|
||||
- agent join request using `adapterType=openclaw`
|
||||
- board approval + one-time API key claim semantics
|
||||
- callback delivery on wakeup to a dockerized OpenClaw-style webhook receiver
|
||||
|
||||
Required permissions:
|
||||
|
||||
- This script performs board-governed actions (create invite, approve join, wakeup another agent).
|
||||
- In authenticated mode, run with board auth via `PAPERCLIP_AUTH_HEADER` or `PAPERCLIP_COOKIE`.
|
||||
|
||||
Optional auth flags (for authenticated mode):
|
||||
|
||||
- `PAPERCLIP_AUTH_HEADER` (for example `Bearer ...`)
|
||||
- `PAPERCLIP_COOKIE` (session cookie header value)
|
||||
|
||||
## OpenClaw Docker UI One-Command Script
|
||||
|
||||
To boot OpenClaw in Docker and print a host-browser dashboard URL in one command:
|
||||
|
||||
```sh
|
||||
pnpm smoke:openclaw-docker-ui
|
||||
```
|
||||
|
||||
This script lives at `scripts/smoke/openclaw-docker-ui.sh` and automates clone/build/config/start for Compose-based local OpenClaw UI testing.
|
||||
|
||||
Pairing behavior for this smoke script:
|
||||
|
||||
- default `OPENCLAW_DISABLE_DEVICE_AUTH=1` (no Control UI pairing prompt for local smoke; no extra pairing env vars required)
|
||||
- set `OPENCLAW_DISABLE_DEVICE_AUTH=0` to require standard device pairing
|
||||
|
||||
Model behavior for this smoke script:
|
||||
|
||||
- defaults to OpenAI models (`openai/gpt-5.2` + OpenAI fallback) so it does not require Anthropic auth by default
|
||||
|
||||
State behavior for this smoke script:
|
||||
|
||||
- defaults to isolated config dir `~/.openclaw-paperclip-smoke`
|
||||
- resets smoke agent state each run by default (`OPENCLAW_RESET_STATE=1`) to avoid stale provider/auth drift
|
||||
|
||||
Networking behavior for this smoke script:
|
||||
|
||||
- auto-detects and prints a Paperclip host URL reachable from inside OpenClaw Docker
|
||||
- default container-side host alias is `host.docker.internal` (override with `PAPERCLIP_HOST_FROM_CONTAINER` / `PAPERCLIP_HOST_PORT`)
|
||||
- if Paperclip rejects container hostnames in authenticated/private mode, allow `host.docker.internal` via `pnpm paperclipai allowed-hostname host.docker.internal` and restart Paperclip
|
||||
|
||||
@@ -42,6 +42,32 @@ Optional overrides:
|
||||
PAPERCLIP_PORT=3200 PAPERCLIP_DATA_DIR=./data/pc docker compose -f docker-compose.quickstart.yml up --build
|
||||
```
|
||||
|
||||
If you change host port or use a non-local domain, set `PAPERCLIP_PUBLIC_URL` to the external URL you will use in browser/auth flows.
|
||||
|
||||
## Authenticated Compose (Single Public URL)
|
||||
|
||||
For authenticated deployments, set one canonical public URL and let Paperclip derive auth/callback defaults:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
paperclip:
|
||||
environment:
|
||||
PAPERCLIP_DEPLOYMENT_MODE: authenticated
|
||||
PAPERCLIP_DEPLOYMENT_EXPOSURE: private
|
||||
PAPERCLIP_PUBLIC_URL: https://desk.koker.net
|
||||
```
|
||||
|
||||
`PAPERCLIP_PUBLIC_URL` is used as the primary source for:
|
||||
|
||||
- auth public base URL
|
||||
- Better Auth base URL defaults
|
||||
- bootstrap invite URL defaults
|
||||
- hostname allowlist defaults (hostname extracted from URL)
|
||||
|
||||
Granular overrides remain available if needed (`PAPERCLIP_AUTH_PUBLIC_BASE_URL`, `BETTER_AUTH_URL`, `BETTER_AUTH_TRUSTED_ORIGINS`, `PAPERCLIP_ALLOWED_HOSTNAMES`).
|
||||
|
||||
Set `PAPERCLIP_ALLOWED_HOSTNAMES` explicitly only when you need additional hostnames beyond the public URL host (for example Tailscale/LAN aliases or multiple private hostnames).
|
||||
|
||||
## Claude + Codex Local Adapters in Docker
|
||||
|
||||
The image pre-installs:
|
||||
|
||||
94
doc/OPENCLAW_ONBOARDING.md
Normal file
94
doc/OPENCLAW_ONBOARDING.md
Normal file
@@ -0,0 +1,94 @@
|
||||
Use this exact checklist.
|
||||
|
||||
1. Start Paperclip in auth mode.
|
||||
```bash
|
||||
cd <paperclip-repo-root>
|
||||
pnpm dev --tailscale-auth
|
||||
```
|
||||
Then verify:
|
||||
```bash
|
||||
curl -sS http://127.0.0.1:3100/api/health | jq
|
||||
```
|
||||
|
||||
2. Start a clean/stock OpenClaw Docker.
|
||||
```bash
|
||||
OPENCLAW_RESET_STATE=1 OPENCLAW_BUILD=1 ./scripts/smoke/openclaw-docker-ui.sh
|
||||
```
|
||||
Open the printed `Dashboard URL` (includes `#token=...`) in your browser.
|
||||
|
||||
3. In Paperclip UI, go to `http://127.0.0.1:3100/CLA/company/settings`.
|
||||
|
||||
4. Use the OpenClaw invite prompt flow.
|
||||
- In the Invites section, click `Generate OpenClaw Invite Prompt`.
|
||||
- Copy the generated prompt from `OpenClaw Invite Prompt`.
|
||||
- Paste it into OpenClaw main chat as one message.
|
||||
- If it stalls, send one follow-up: `How is onboarding going? Continue setup now.`
|
||||
|
||||
Security/control note:
|
||||
- The OpenClaw invite prompt is created from a controlled endpoint:
|
||||
- `POST /api/companies/{companyId}/openclaw/invite-prompt`
|
||||
- board users with invite permission can call it
|
||||
- agent callers are limited to the company CEO agent
|
||||
|
||||
5. Approve the join request in Paperclip UI, then confirm the OpenClaw agent appears in CLA agents.
|
||||
|
||||
6. Gateway preflight (required before task tests).
|
||||
- Confirm the created agent uses `openclaw_gateway` (not `openclaw`).
|
||||
- Confirm gateway URL is `ws://...` or `wss://...`.
|
||||
- Confirm gateway token is non-trivial (not empty / not 1-char placeholder).
|
||||
- The OpenClaw Gateway adapter UI should not expose `disableDeviceAuth` for normal onboarding.
|
||||
- Confirm pairing mode is explicit:
|
||||
- required default: device auth enabled (`adapterConfig.disableDeviceAuth` false/absent) with persisted `adapterConfig.devicePrivateKeyPem`
|
||||
- do not rely on `disableDeviceAuth` for normal onboarding
|
||||
- If you can run API checks with board auth:
|
||||
```bash
|
||||
AGENT_ID="<newly-created-agent-id>"
|
||||
curl -sS -H "Cookie: $PAPERCLIP_COOKIE" "http://127.0.0.1:3100/api/agents/$AGENT_ID" | jq '{adapterType,adapterConfig:{url:.adapterConfig.url,tokenLen:(.adapterConfig.headers["x-openclaw-token"] // .adapterConfig.headers["x-openclaw-auth"] // "" | length),disableDeviceAuth:(.adapterConfig.disableDeviceAuth // false),hasDeviceKey:(.adapterConfig.devicePrivateKeyPem // "" | length > 0)}}'
|
||||
```
|
||||
- Expected: `adapterType=openclaw_gateway`, `tokenLen >= 16`, `hasDeviceKey=true`, and `disableDeviceAuth=false`.
|
||||
|
||||
Pairing handshake note:
|
||||
- Clean run expectation: first task should succeed without manual pairing commands.
|
||||
- The adapter attempts one automatic pairing approval + retry on first `pairing required` (when shared gateway auth token/password is valid).
|
||||
- If auto-pair cannot complete (for example token mismatch or no pending request), the first gateway run may still return `pairing required`.
|
||||
- This is a separate approval from Paperclip invite approval. You must approve the pending device in OpenClaw itself.
|
||||
- Approve it in OpenClaw, then retry the task.
|
||||
- For local docker smoke, you can approve from host:
|
||||
```bash
|
||||
docker exec openclaw-docker-openclaw-gateway-1 sh -lc 'openclaw devices approve --latest --json --url "ws://127.0.0.1:18789" --token "$(node -p \"require(process.env.HOME+\\\"/.openclaw/openclaw.json\\\").gateway.auth.token\")"'
|
||||
```
|
||||
- You can inspect pending vs paired devices:
|
||||
```bash
|
||||
docker exec openclaw-docker-openclaw-gateway-1 sh -lc 'TOK="$(node -e \"const fs=require(\\\"fs\\\");const c=JSON.parse(fs.readFileSync(\\\"/home/node/.openclaw/openclaw.json\\\",\\\"utf8\\\"));process.stdout.write(c.gateway?.auth?.token||\\\"\\\");\")\"; openclaw devices list --json --url \"ws://127.0.0.1:18789\" --token \"$TOK\"'
|
||||
```
|
||||
|
||||
7. Case A (manual issue test).
|
||||
- Create an issue assigned to the OpenClaw agent.
|
||||
- Put instructions: “post comment `OPENCLAW_CASE_A_OK_<timestamp>` and mark done.”
|
||||
- Verify in UI: issue status becomes `done` and comment exists.
|
||||
|
||||
8. Case B (message tool test).
|
||||
- Create another issue assigned to OpenClaw.
|
||||
- Instructions: “send `OPENCLAW_CASE_B_OK_<timestamp>` to main webchat via message tool, then comment same marker on issue, then mark done.”
|
||||
- Verify both:
|
||||
- marker comment on issue
|
||||
- marker text appears in OpenClaw main chat
|
||||
|
||||
9. Case C (new session memory/skills test).
|
||||
- In OpenClaw, start `/new` session.
|
||||
- Ask it to create a new CLA issue in Paperclip with unique title `OPENCLAW_CASE_C_CREATED_<timestamp>`.
|
||||
- Verify in Paperclip UI that new issue exists.
|
||||
|
||||
10. Watch logs during test (optional but helpful):
|
||||
```bash
|
||||
docker compose -f /tmp/openclaw-docker/docker-compose.yml -f /tmp/openclaw-docker/.paperclip-openclaw.override.yml logs -f openclaw-gateway
|
||||
```
|
||||
|
||||
11. Expected pass criteria.
|
||||
- Preflight: `openclaw_gateway` + non-placeholder token (`tokenLen >= 16`).
|
||||
- Pairing mode: stable `devicePrivateKeyPem` configured with device auth enabled (default path).
|
||||
- Case A: `done` + marker comment.
|
||||
- Case B: `done` + marker comment + main-chat message visible.
|
||||
- Case C: original task done and new issue created from `/new` session.
|
||||
|
||||
If you want, I can also give you a single “observer mode” command that runs the stock smoke harness while you watch the same steps live in UI.
|
||||
@@ -1,196 +1,121 @@
|
||||
# Publishing to npm
|
||||
|
||||
This document covers how to build and publish the `paperclipai` CLI package to npm.
|
||||
Low-level reference for how Paperclip packages are built for npm.
|
||||
|
||||
## Prerequisites
|
||||
For the maintainer release workflow, use [doc/RELEASING.md](RELEASING.md). This document is only about packaging internals and the scripts that produce publishable artifacts.
|
||||
|
||||
- Node.js 20+
|
||||
- pnpm 9.15+
|
||||
- An npm account with publish access to the `paperclipai` package
|
||||
- Logged in to npm: `npm login`
|
||||
## Current Release Entry Points
|
||||
|
||||
## One-Command Publish
|
||||
Use these scripts instead of older one-off publish commands:
|
||||
|
||||
The fastest way to publish — bumps version, builds, publishes, restores, commits, and tags in one shot:
|
||||
- [`scripts/release-start.sh`](../scripts/release-start.sh) to create or resume `release/X.Y.Z`
|
||||
- [`scripts/release-preflight.sh`](../scripts/release-preflight.sh) before any canary or stable release
|
||||
- [`scripts/release.sh`](../scripts/release.sh) for canary and stable npm publishes
|
||||
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh) to repoint `latest` during rollback
|
||||
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh) after pushing the stable branch tag
|
||||
|
||||
```bash
|
||||
./scripts/bump-and-publish.sh patch # 0.1.1 → 0.1.2
|
||||
./scripts/bump-and-publish.sh minor # 0.1.1 → 0.2.0
|
||||
./scripts/bump-and-publish.sh major # 0.1.1 → 1.0.0
|
||||
./scripts/bump-and-publish.sh 2.0.0 # set explicit version
|
||||
./scripts/bump-and-publish.sh patch --dry-run # everything except npm publish
|
||||
```
|
||||
## Why the CLI needs special packaging
|
||||
|
||||
The script runs all 6 steps below in order. It requires a clean working tree and an active `npm login` session (unless `--dry-run`). After it finishes, push:
|
||||
The CLI package, `paperclipai`, imports code from workspace packages such as:
|
||||
|
||||
```bash
|
||||
git push && git push origin v<version>
|
||||
```
|
||||
- `@paperclipai/server`
|
||||
- `@paperclipai/db`
|
||||
- `@paperclipai/shared`
|
||||
- adapter packages under `packages/adapters/`
|
||||
|
||||
## Manual Step-by-Step
|
||||
Those workspace references use `workspace:*` during development. npm cannot install those references directly for end users, so the release build has to transform the CLI into a publishable standalone package.
|
||||
|
||||
If you prefer to run each step individually:
|
||||
## `build-npm.sh`
|
||||
|
||||
### Quick Reference
|
||||
|
||||
```bash
|
||||
# Bump version
|
||||
./scripts/version-bump.sh patch # 0.1.0 → 0.1.1
|
||||
|
||||
# Build
|
||||
./scripts/build-npm.sh
|
||||
|
||||
# Preview what will be published
|
||||
cd cli && npm pack --dry-run
|
||||
|
||||
# Publish
|
||||
cd cli && npm publish --access public
|
||||
|
||||
# Restore dev package.json
|
||||
mv cli/package.dev.json cli/package.json
|
||||
```
|
||||
|
||||
## Step-by-Step
|
||||
|
||||
### 1. Bump the version
|
||||
|
||||
```bash
|
||||
./scripts/version-bump.sh <patch|minor|major|X.Y.Z>
|
||||
```
|
||||
|
||||
This updates the version in two places:
|
||||
|
||||
- `cli/package.json` — the source of truth
|
||||
- `cli/src/index.ts` — the Commander `.version()` call
|
||||
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
./scripts/version-bump.sh patch # 0.1.0 → 0.1.1
|
||||
./scripts/version-bump.sh minor # 0.1.0 → 0.2.0
|
||||
./scripts/version-bump.sh major # 0.1.0 → 1.0.0
|
||||
./scripts/version-bump.sh 1.2.3 # set explicit version
|
||||
```
|
||||
|
||||
### 2. Build
|
||||
Run:
|
||||
|
||||
```bash
|
||||
./scripts/build-npm.sh
|
||||
```
|
||||
|
||||
The build script runs five steps:
|
||||
This script does six things:
|
||||
|
||||
1. **Forbidden token check** — scans tracked files for tokens listed in `.git/hooks/forbidden-tokens.txt`. If the file is missing (e.g. on a contributor's machine), the check passes silently. The script never prints which tokens it's searching for.
|
||||
2. **TypeScript type-check** — runs `pnpm -r typecheck` across all workspace packages.
|
||||
3. **esbuild bundle** — bundles the CLI entry point (`cli/src/index.ts`) and all workspace package code (`@paperclipai/*`) into a single file at `cli/dist/index.js`. External npm dependencies (express, postgres, etc.) are kept as regular imports.
|
||||
4. **Generate publishable package.json** — replaces `cli/package.json` with a version that has real npm dependency ranges instead of `workspace:*` references (see [package.dev.json](#packagedevjson) below).
|
||||
5. **Summary** — prints the bundle size and next steps.
|
||||
1. Runs the forbidden token check unless `--skip-checks` is supplied
|
||||
2. Runs `pnpm -r typecheck`
|
||||
3. Bundles the CLI entrypoint with esbuild into `cli/dist/index.js`
|
||||
4. Verifies the bundled entrypoint with `node --check`
|
||||
5. Rewrites `cli/package.json` into a publishable npm manifest and stores the dev copy as `cli/package.dev.json`
|
||||
6. Copies the repo `README.md` into `cli/README.md` for npm package metadata
|
||||
|
||||
To skip the forbidden token check (e.g. in CI without the token list):
|
||||
`build-npm.sh` is used by the release script so that npm users install a real package rather than unresolved workspace dependencies.
|
||||
|
||||
## Publishable CLI layout
|
||||
|
||||
During development, [`cli/package.json`](../cli/package.json) contains workspace references.
|
||||
|
||||
During release preparation:
|
||||
|
||||
- `cli/package.json` becomes a publishable manifest with external npm dependency ranges
|
||||
- `cli/package.dev.json` stores the development manifest temporarily
|
||||
- `cli/dist/index.js` contains the bundled CLI entrypoint
|
||||
- `cli/README.md` is copied in for npm metadata
|
||||
|
||||
After release finalization, the release script restores the development manifest and removes the temporary README copy.
|
||||
|
||||
## Package discovery
|
||||
|
||||
The release tooling scans the workspace for public packages under:
|
||||
|
||||
- `packages/`
|
||||
- `server/`
|
||||
- `cli/`
|
||||
|
||||
`ui/` remains ignored for npm publishing because it is private.
|
||||
|
||||
This matters because all public packages are versioned and published together as one release unit.
|
||||
|
||||
## Canary packaging model
|
||||
|
||||
Canaries are published as semver prereleases such as:
|
||||
|
||||
- `1.2.3-canary.0`
|
||||
- `1.2.3-canary.1`
|
||||
|
||||
They are published under the npm dist-tag `canary`.
|
||||
|
||||
This means:
|
||||
|
||||
- `npx paperclipai@canary onboard` can install them explicitly
|
||||
- `npx paperclipai onboard` continues to resolve `latest`
|
||||
- the stable changelog can stay at `releases/v1.2.3.md`
|
||||
|
||||
## Stable packaging model
|
||||
|
||||
Stable releases publish normal semver versions such as `1.2.3` under the npm dist-tag `latest`.
|
||||
|
||||
The stable publish flow also creates the local release commit and git tag on `release/X.Y.Z`. Pushing that branch commit/tag, creating the GitHub Release, and merging the release branch back to `master` happen afterward as separate maintainer steps.
|
||||
|
||||
## Rollback model
|
||||
|
||||
Rollback does not unpublish packages.
|
||||
|
||||
Instead, the maintainer should move the `latest` dist-tag back to the previous good stable version with:
|
||||
|
||||
```bash
|
||||
./scripts/build-npm.sh --skip-checks
|
||||
./scripts/rollback-latest.sh <stable-version>
|
||||
```
|
||||
|
||||
### 3. Preview (optional)
|
||||
That keeps history intact while restoring the default install path quickly.
|
||||
|
||||
See what npm will publish:
|
||||
## Notes for CI
|
||||
|
||||
```bash
|
||||
cd cli && npm pack --dry-run
|
||||
```
|
||||
The repo includes a manual GitHub Actions release workflow at [`.github/workflows/release.yml`](../.github/workflows/release.yml).
|
||||
|
||||
### 4. Publish
|
||||
Recommended CI release setup:
|
||||
|
||||
```bash
|
||||
cd cli && npm publish --access public
|
||||
```
|
||||
- use npm trusted publishing via GitHub OIDC
|
||||
- require approval through the `npm-release` environment
|
||||
- run releases from `release/X.Y.Z`
|
||||
- use canary first, then stable
|
||||
|
||||
### 5. Restore dev package.json
|
||||
## Related Files
|
||||
|
||||
After publishing, restore the workspace-aware `package.json`:
|
||||
|
||||
```bash
|
||||
mv cli/package.dev.json cli/package.json
|
||||
```
|
||||
|
||||
### 6. Commit and tag
|
||||
|
||||
```bash
|
||||
git add cli/package.json cli/src/index.ts
|
||||
git commit -m "chore: bump version to X.Y.Z"
|
||||
git tag vX.Y.Z
|
||||
```
|
||||
|
||||
## package.dev.json
|
||||
|
||||
During development, `cli/package.json` contains `workspace:*` references like:
|
||||
|
||||
```json
|
||||
{
|
||||
"dependencies": {
|
||||
"@paperclipai/server": "workspace:*",
|
||||
"@paperclipai/db": "workspace:*"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
These tell pnpm to resolve those packages from the local monorepo. This is great for development but **npm doesn't understand `workspace:*`** — publishing with these references would cause install failures for users.
|
||||
|
||||
The build script solves this with a two-file swap:
|
||||
|
||||
1. **Before building:** `cli/package.json` has `workspace:*` refs (the dev version).
|
||||
2. **During build (`build-npm.sh` step 4):**
|
||||
- The dev `package.json` is copied to `package.dev.json` as a backup.
|
||||
- `generate-npm-package-json.mjs` reads every workspace package's `package.json`, collects all their external npm dependencies, and writes a new `cli/package.json` with those real dependency ranges — no `workspace:*` refs.
|
||||
3. **After publishing:** you restore the dev version with `mv package.dev.json package.json`.
|
||||
|
||||
The generated publishable `package.json` looks like:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "paperclipai",
|
||||
"version": "0.1.0",
|
||||
"bin": { "paperclipai": "./dist/index.js" },
|
||||
"dependencies": {
|
||||
"express": "^5.1.0",
|
||||
"postgres": "^3.4.5",
|
||||
"commander": "^13.1.0"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
`package.dev.json` is listed in `.gitignore` — it only exists temporarily on disk during the build/publish cycle.
|
||||
|
||||
## How the bundle works
|
||||
|
||||
The CLI is a monorepo package that imports code from `@paperclipai/server`, `@paperclipai/db`, `@paperclipai/shared`, and several adapter packages. These workspace packages don't exist on npm.
|
||||
|
||||
**esbuild** bundles all workspace TypeScript code into a single `dist/index.js` file (~250kb). External npm packages (express, postgres, zod, etc.) are left as normal `import` statements — they get installed by npm when a user runs `npx paperclipai onboard`.
|
||||
|
||||
The esbuild configuration lives at `cli/esbuild.config.mjs`. It automatically reads every workspace package's `package.json` to determine which dependencies are external (real npm packages) vs. internal (workspace code to bundle).
|
||||
|
||||
## Forbidden token enforcement
|
||||
|
||||
The build process includes the same forbidden-token check used by the git pre-commit hook. This catches any accidentally committed tokens before they reach npm.
|
||||
|
||||
- Token list: `.git/hooks/forbidden-tokens.txt` (one token per line, `#` comments supported)
|
||||
- The file lives inside `.git/` and is never committed
|
||||
- If the file is missing, the check passes — contributors without the list can still build
|
||||
- The script never prints which tokens are being searched for
|
||||
- Matches are printed so you know which files to fix, but not which token triggered it
|
||||
|
||||
Run the check standalone:
|
||||
|
||||
```bash
|
||||
pnpm check:tokens
|
||||
```
|
||||
|
||||
## npm scripts reference
|
||||
|
||||
| Script | Command | Description |
|
||||
|---|---|---|
|
||||
| `bump-and-publish` | `pnpm bump-and-publish <type>` | One-command bump + build + publish + commit + tag |
|
||||
| `build:npm` | `pnpm build:npm` | Full build (check + typecheck + bundle + package.json) |
|
||||
| `version:bump` | `pnpm version:bump <type>` | Bump CLI version |
|
||||
| `check:tokens` | `pnpm check:tokens` | Run forbidden token check only |
|
||||
- [`scripts/build-npm.sh`](../scripts/build-npm.sh)
|
||||
- [`scripts/generate-npm-package-json.mjs`](../scripts/generate-npm-package-json.mjs)
|
||||
- [`cli/esbuild.config.mjs`](../cli/esbuild.config.mjs)
|
||||
- [`doc/RELEASING.md`](RELEASING.md)
|
||||
|
||||
422
doc/RELEASING.md
Normal file
422
doc/RELEASING.md
Normal file
@@ -0,0 +1,422 @@
|
||||
# Releasing Paperclip
|
||||
|
||||
Maintainer runbook for shipping a full Paperclip release across npm, GitHub, and the website-facing changelog surface.
|
||||
|
||||
The release model is branch-driven:
|
||||
|
||||
1. Start a release train on `release/X.Y.Z`
|
||||
2. Draft the stable changelog on that branch
|
||||
3. Publish one or more canaries from that branch
|
||||
4. Publish stable from that same branch head
|
||||
5. Push the branch commit and tag
|
||||
6. Create the GitHub Release
|
||||
7. Merge `release/X.Y.Z` back to `master` without squash or rebase
|
||||
|
||||
## Release Surfaces
|
||||
|
||||
Every release has four separate surfaces:
|
||||
|
||||
1. **Verification** — the exact git SHA passes typecheck, tests, and build
|
||||
2. **npm** — `paperclipai` and public workspace packages are published
|
||||
3. **GitHub** — the stable release gets a git tag and GitHub Release
|
||||
4. **Website / announcements** — the stable changelog is published externally and announced
|
||||
|
||||
A release is done only when all four surfaces are handled.
|
||||
|
||||
## Core Invariants
|
||||
|
||||
- Canary and stable for `X.Y.Z` must come from the same `release/X.Y.Z` branch.
|
||||
- The release scripts must run from the matching `release/X.Y.Z` branch.
|
||||
- Once `vX.Y.Z` exists locally, on GitHub, or on npm, that release train is frozen.
|
||||
- Do not squash-merge or rebase-merge a release branch PR back to `master`.
|
||||
- The stable changelog is always `releases/vX.Y.Z.md`. Never create canary changelog files.
|
||||
|
||||
The reason for the merge rule is simple: the tag must keep pointing at the exact published commit. Squash or rebase breaks that property.
|
||||
|
||||
## TL;DR
|
||||
|
||||
### 1. Start the release train
|
||||
|
||||
Use this to compute the next version, create or resume the branch, create or resume a dedicated worktree, and push the branch to GitHub.
|
||||
|
||||
```bash
|
||||
./scripts/release-start.sh patch
|
||||
```
|
||||
|
||||
That script:
|
||||
|
||||
- fetches the release remote and tags
|
||||
- computes the next stable version from the latest `v*` tag
|
||||
- creates or resumes `release/X.Y.Z`
|
||||
- creates or resumes a dedicated worktree
|
||||
- pushes the branch to the remote by default
|
||||
- refuses to reuse a frozen release train
|
||||
|
||||
### 2. Draft the stable changelog
|
||||
|
||||
From the release worktree:
|
||||
|
||||
```bash
|
||||
VERSION=X.Y.Z
|
||||
claude --print --output-format stream-json --verbose --dangerously-skip-permissions --model claude-opus-4-6 "Use the release-changelog skill to draft or update releases/v${VERSION}.md for Paperclip. Read doc/RELEASING.md and skills/release-changelog/SKILL.md, then generate the stable changelog for v${VERSION} from commits since the last stable tag. Do not create a canary changelog."
|
||||
```
|
||||
|
||||
### 3. Verify and publish a canary
|
||||
|
||||
```bash
|
||||
./scripts/release-preflight.sh canary patch
|
||||
./scripts/release.sh patch --canary --dry-run
|
||||
./scripts/release.sh patch --canary
|
||||
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
|
||||
```
|
||||
|
||||
Users install canaries with:
|
||||
|
||||
```bash
|
||||
npx paperclipai@canary onboard
|
||||
```
|
||||
|
||||
### 4. Publish stable
|
||||
|
||||
```bash
|
||||
./scripts/release-preflight.sh stable patch
|
||||
./scripts/release.sh patch --dry-run
|
||||
./scripts/release.sh patch
|
||||
git push public-gh HEAD --follow-tags
|
||||
./scripts/create-github-release.sh X.Y.Z
|
||||
```
|
||||
|
||||
Then open a PR from `release/X.Y.Z` to `master` and merge without squash or rebase.
|
||||
|
||||
## Release Branches
|
||||
|
||||
Paperclip uses one release branch per target stable version:
|
||||
|
||||
- `release/0.3.0`
|
||||
- `release/0.3.1`
|
||||
- `release/1.0.0`
|
||||
|
||||
Do not create separate per-canary branches like `canary/0.3.0-1`. A canary is just a prerelease snapshot of the same stable train.
|
||||
|
||||
## Script Entry Points
|
||||
|
||||
- [`scripts/release-start.sh`](../scripts/release-start.sh) — create or resume the release train branch/worktree
|
||||
- [`scripts/release-preflight.sh`](../scripts/release-preflight.sh) — validate branch, version plan, git/npm state, and verification gate
|
||||
- [`scripts/release.sh`](../scripts/release.sh) — publish canary or stable from the release branch
|
||||
- [`scripts/create-github-release.sh`](../scripts/create-github-release.sh) — create or update the GitHub Release after pushing the tag
|
||||
- [`scripts/rollback-latest.sh`](../scripts/rollback-latest.sh) — repoint `latest` to the last good stable version
|
||||
|
||||
## Detailed Workflow
|
||||
|
||||
### 1. Start or resume the release train
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
./scripts/release-start.sh <patch|minor|major>
|
||||
```
|
||||
|
||||
Useful options:
|
||||
|
||||
```bash
|
||||
./scripts/release-start.sh patch --dry-run
|
||||
./scripts/release-start.sh minor --worktree-dir ../paperclip-release-0.4.0
|
||||
./scripts/release-start.sh patch --no-push
|
||||
```
|
||||
|
||||
The script is intentionally idempotent:
|
||||
|
||||
- if `release/X.Y.Z` already exists locally, it reuses it
|
||||
- if the branch already exists on the remote, it resumes it locally
|
||||
- if the branch is already checked out in another worktree, it points you there
|
||||
- if `vX.Y.Z` already exists locally, remotely, or on npm, it refuses to reuse that train
|
||||
|
||||
### 2. Write the stable changelog early
|
||||
|
||||
Create or update:
|
||||
|
||||
- `releases/vX.Y.Z.md`
|
||||
|
||||
That file is for the eventual stable release. It should not include `-canary` in the filename or heading.
|
||||
|
||||
Recommended structure:
|
||||
|
||||
- `Breaking Changes` when needed
|
||||
- `Highlights`
|
||||
- `Improvements`
|
||||
- `Fixes`
|
||||
- `Upgrade Guide` when needed
|
||||
- `Contributors` — @-mention every contributor by GitHub username (no emails)
|
||||
|
||||
Package-level `CHANGELOG.md` files are generated as part of the release mechanics. They are not the main release narrative.
|
||||
|
||||
### 3. Run release preflight
|
||||
|
||||
From the `release/X.Y.Z` worktree:
|
||||
|
||||
```bash
|
||||
./scripts/release-preflight.sh canary <patch|minor|major>
|
||||
# or
|
||||
./scripts/release-preflight.sh stable <patch|minor|major>
|
||||
```
|
||||
|
||||
The preflight script now checks all of the following before it runs the verification gate:
|
||||
|
||||
- the worktree is clean, including untracked files
|
||||
- the current branch matches the computed `release/X.Y.Z`
|
||||
- the release train is not frozen
|
||||
- the target version is still free on npm
|
||||
- the target tag does not already exist locally or remotely
|
||||
- whether the remote release branch already exists
|
||||
- whether `releases/vX.Y.Z.md` is present
|
||||
|
||||
Then it runs:
|
||||
|
||||
```bash
|
||||
pnpm -r typecheck
|
||||
pnpm test:run
|
||||
pnpm build
|
||||
```
|
||||
|
||||
### 4. Publish one or more canaries
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
./scripts/release.sh <patch|minor|major> --canary --dry-run
|
||||
./scripts/release.sh <patch|minor|major> --canary
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
- npm gets a prerelease such as `1.2.3-canary.0` under dist-tag `canary`
|
||||
- `latest` is unchanged
|
||||
- no git tag is created
|
||||
- no GitHub Release is created
|
||||
- the worktree returns to clean after the script finishes
|
||||
|
||||
Guardrails:
|
||||
|
||||
- the script refuses to run from the wrong branch
|
||||
- the script refuses to publish from a frozen train
|
||||
- the canary is always derived from the next stable version
|
||||
- if the stable notes file is missing, the script warns before you forget it
|
||||
|
||||
Concrete example:
|
||||
|
||||
- if the latest stable is `0.2.7`, a patch canary targets `0.2.8-canary.0`
|
||||
- `0.2.7-canary.N` is invalid because `0.2.7` is already stable
|
||||
|
||||
### 5. Smoke test the canary
|
||||
|
||||
Run the actual install path in Docker:
|
||||
|
||||
```bash
|
||||
PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
|
||||
```
|
||||
|
||||
Useful isolated variants:
|
||||
|
||||
```bash
|
||||
HOST_PORT=3232 DATA_DIR=./data/release-smoke-canary PAPERCLIPAI_VERSION=canary ./scripts/docker-onboard-smoke.sh
|
||||
HOST_PORT=3233 DATA_DIR=./data/release-smoke-stable PAPERCLIPAI_VERSION=latest ./scripts/docker-onboard-smoke.sh
|
||||
```
|
||||
|
||||
If you want to exercise onboarding from the current committed ref instead of npm, use:
|
||||
|
||||
```bash
|
||||
./scripts/clean-onboard-ref.sh
|
||||
PAPERCLIP_PORT=3234 ./scripts/clean-onboard-ref.sh
|
||||
./scripts/clean-onboard-ref.sh HEAD
|
||||
```
|
||||
|
||||
Minimum checks:
|
||||
|
||||
- `npx paperclipai@canary onboard` installs
|
||||
- onboarding completes without crashes
|
||||
- the server boots
|
||||
- the UI loads
|
||||
- basic company creation and dashboard load work
|
||||
|
||||
If smoke testing fails:
|
||||
|
||||
1. stop the stable release
|
||||
2. fix the issue on the same `release/X.Y.Z` branch
|
||||
3. publish another canary
|
||||
4. rerun smoke testing
|
||||
|
||||
### 6. Publish stable from the same release branch
|
||||
|
||||
Once the branch head is vetted, run:
|
||||
|
||||
```bash
|
||||
./scripts/release.sh <patch|minor|major> --dry-run
|
||||
./scripts/release.sh <patch|minor|major>
|
||||
```
|
||||
|
||||
Stable publish:
|
||||
|
||||
- publishes `X.Y.Z` to npm under `latest`
|
||||
- creates the local release commit
|
||||
- creates the local tag `vX.Y.Z`
|
||||
|
||||
Stable publish refuses to proceed if:
|
||||
|
||||
- the current branch is not `release/X.Y.Z`
|
||||
- the remote release branch does not exist yet
|
||||
- the stable notes file is missing
|
||||
- the target tag already exists locally or remotely
|
||||
- the stable version already exists on npm
|
||||
|
||||
Those checks intentionally freeze the train after stable publish.
|
||||
|
||||
### 7. Push the stable branch commit and tag
|
||||
|
||||
After stable publish succeeds:
|
||||
|
||||
```bash
|
||||
git push public-gh HEAD --follow-tags
|
||||
./scripts/create-github-release.sh X.Y.Z
|
||||
```
|
||||
|
||||
The GitHub Release notes come from:
|
||||
|
||||
- `releases/vX.Y.Z.md`
|
||||
|
||||
### 8. Merge the release branch back to `master`
|
||||
|
||||
Open a PR:
|
||||
|
||||
- base: `master`
|
||||
- head: `release/X.Y.Z`
|
||||
|
||||
Merge rule:
|
||||
|
||||
- allowed: merge commit or fast-forward
|
||||
- forbidden: squash merge
|
||||
- forbidden: rebase merge
|
||||
|
||||
Post-merge verification:
|
||||
|
||||
```bash
|
||||
git fetch public-gh --tags
|
||||
git merge-base --is-ancestor "vX.Y.Z" "public-gh/master"
|
||||
```
|
||||
|
||||
That command must succeed. If it fails, the published tagged commit is not reachable from `master`, which means the merge strategy was wrong.
|
||||
|
||||
### 9. Finish the external surfaces
|
||||
|
||||
After GitHub is correct:
|
||||
|
||||
- publish the changelog on the website
|
||||
- write and send the announcement copy
|
||||
- ensure public docs and install guidance point to the stable version
|
||||
|
||||
## GitHub Actions Release
|
||||
|
||||
There is also a manual workflow at [`.github/workflows/release.yml`](../.github/workflows/release.yml).
|
||||
|
||||
Use it from the Actions tab on the relevant `release/X.Y.Z` branch:
|
||||
|
||||
1. Choose `Release`
|
||||
2. Choose `channel`: `canary` or `stable`
|
||||
3. Choose `bump`: `patch`, `minor`, or `major`
|
||||
4. Choose whether this is a `dry_run`
|
||||
5. Run it from the release branch, not from `master`
|
||||
|
||||
The workflow:
|
||||
|
||||
- reruns `typecheck`, `test:run`, and `build`
|
||||
- gates publish behind the `npm-release` environment
|
||||
- can publish canaries without touching `latest`
|
||||
- can publish stable, push the stable branch commit and tag, and create the GitHub Release
|
||||
|
||||
It does not merge the release branch back to `master` for you.
|
||||
|
||||
## Release Checklist
|
||||
|
||||
### Before any publish
|
||||
|
||||
- [ ] The release train exists on `release/X.Y.Z`
|
||||
- [ ] The working tree is clean, including untracked files
|
||||
- [ ] If package manifests changed, the CI-owned `pnpm-lock.yaml` refresh is already merged on `master` before the train is cut
|
||||
- [ ] The required verification gate passed on the exact branch head you want to publish
|
||||
- [ ] The bump type is correct for the user-visible impact
|
||||
- [ ] The stable changelog file exists or is ready at `releases/vX.Y.Z.md`
|
||||
- [ ] You know which previous stable version you would roll back to if needed
|
||||
|
||||
### Before a stable
|
||||
|
||||
- [ ] The candidate has already passed smoke testing
|
||||
- [ ] The remote `release/X.Y.Z` branch exists
|
||||
- [ ] You are ready to push the stable branch commit and tag immediately after npm publish
|
||||
- [ ] You are ready to create the GitHub Release immediately after the push
|
||||
- [ ] You are ready to open the PR back to `master`
|
||||
|
||||
### After a stable
|
||||
|
||||
- [ ] `npm view paperclipai@latest version` matches the new stable version
|
||||
- [ ] The git tag exists on GitHub
|
||||
- [ ] The GitHub Release exists and uses `releases/vX.Y.Z.md`
|
||||
- [ ] `vX.Y.Z` is reachable from `master`
|
||||
- [ ] The website changelog is updated
|
||||
- [ ] Announcement copy matches the stable release, not the canary
|
||||
|
||||
## Failure Playbooks
|
||||
|
||||
### If the canary publishes but the smoke test fails
|
||||
|
||||
Do not publish stable.
|
||||
|
||||
Instead:
|
||||
|
||||
1. fix the issue on `release/X.Y.Z`
|
||||
2. publish another canary
|
||||
3. rerun smoke testing
|
||||
|
||||
### If stable npm publish succeeds but push or GitHub release creation fails
|
||||
|
||||
This is a partial release. npm is already live.
|
||||
|
||||
Do this immediately:
|
||||
|
||||
1. fix the git or GitHub issue from the same checkout
|
||||
2. push the stable branch commit and tag
|
||||
3. create the GitHub Release
|
||||
|
||||
Do not republish the same version.
|
||||
|
||||
### If `latest` is broken after stable publish
|
||||
|
||||
Preview:
|
||||
|
||||
```bash
|
||||
./scripts/rollback-latest.sh X.Y.Z --dry-run
|
||||
```
|
||||
|
||||
Roll back:
|
||||
|
||||
```bash
|
||||
./scripts/rollback-latest.sh X.Y.Z
|
||||
```
|
||||
|
||||
This does not unpublish anything. It only moves the `latest` dist-tag back to the last good stable release.
|
||||
|
||||
Then fix forward with a new patch release.
|
||||
|
||||
### If the GitHub Release notes are wrong
|
||||
|
||||
Re-run:
|
||||
|
||||
```bash
|
||||
./scripts/create-github-release.sh X.Y.Z
|
||||
```
|
||||
|
||||
If the release already exists, the script updates it.
|
||||
|
||||
## Related Docs
|
||||
|
||||
- [doc/PUBLISHING.md](PUBLISHING.md) — low-level npm build and packaging internals
|
||||
- [skills/release/SKILL.md](../skills/release/SKILL.md) — agent release coordination workflow
|
||||
- [skills/release-changelog/SKILL.md](../skills/release-changelog/SKILL.md) — stable changelog drafting workflow
|
||||
1617
doc/plugins/PLUGIN_SPEC.md
Normal file
1617
doc/plugins/PLUGIN_SPEC.md
Normal file
File diff suppressed because it is too large
Load Diff
1738
doc/plugins/ideas-from-opencode.md
Normal file
1738
doc/plugins/ideas-from-opencode.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -10,5 +10,9 @@ services:
|
||||
PAPERCLIP_HOME: "/paperclip"
|
||||
OPENAI_API_KEY: "${OPENAI_API_KEY:-}"
|
||||
ANTHROPIC_API_KEY: "${ANTHROPIC_API_KEY:-}"
|
||||
PAPERCLIP_DEPLOYMENT_MODE: "authenticated"
|
||||
PAPERCLIP_DEPLOYMENT_EXPOSURE: "private"
|
||||
PAPERCLIP_PUBLIC_URL: "${PAPERCLIP_PUBLIC_URL:-http://localhost:3100}"
|
||||
BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET:?BETTER_AUTH_SECRET must be set}"
|
||||
volumes:
|
||||
- "${PAPERCLIP_DATA_DIR:-./data/docker-paperclip}:/paperclip"
|
||||
|
||||
@@ -5,6 +5,11 @@ services:
|
||||
POSTGRES_USER: paperclip
|
||||
POSTGRES_PASSWORD: paperclip
|
||||
POSTGRES_DB: paperclip
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U paperclip -d paperclip"]
|
||||
interval: 2s
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
@@ -18,8 +23,16 @@ services:
|
||||
DATABASE_URL: postgres://paperclip:paperclip@db:5432/paperclip
|
||||
PORT: "3100"
|
||||
SERVE_UI: "true"
|
||||
PAPERCLIP_DEPLOYMENT_MODE: "authenticated"
|
||||
PAPERCLIP_DEPLOYMENT_EXPOSURE: "private"
|
||||
PAPERCLIP_PUBLIC_URL: "${PAPERCLIP_PUBLIC_URL:-http://localhost:3100}"
|
||||
BETTER_AUTH_SECRET: "${BETTER_AUTH_SECRET:?BETTER_AUTH_SECRET must be set}"
|
||||
volumes:
|
||||
- paperclip-data:/paperclip
|
||||
depends_on:
|
||||
- db
|
||||
db:
|
||||
condition: service_healthy
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
paperclip-data:
|
||||
|
||||
8
docker/openclaw-smoke/Dockerfile
Normal file
8
docker/openclaw-smoke/Dockerfile
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM node:22-alpine
|
||||
|
||||
WORKDIR /app
|
||||
COPY server.mjs /app/server.mjs
|
||||
|
||||
EXPOSE 8787
|
||||
|
||||
CMD ["node", "/app/server.mjs"]
|
||||
103
docker/openclaw-smoke/server.mjs
Normal file
103
docker/openclaw-smoke/server.mjs
Normal file
@@ -0,0 +1,103 @@
|
||||
import http from "node:http";
|
||||
|
||||
const port = Number.parseInt(process.env.PORT ?? "8787", 10);
|
||||
const webhookPath = process.env.OPENCLAW_SMOKE_PATH?.trim() || "/webhook";
|
||||
const expectedAuthHeader = process.env.OPENCLAW_SMOKE_AUTH?.trim() || "";
|
||||
const maxBodyBytes = 1_000_000;
|
||||
const maxEvents = 200;
|
||||
|
||||
const events = [];
|
||||
let nextId = 1;
|
||||
|
||||
function writeJson(res, status, payload) {
|
||||
res.statusCode = status;
|
||||
res.setHeader("content-type", "application/json; charset=utf-8");
|
||||
res.end(JSON.stringify(payload));
|
||||
}
|
||||
|
||||
function readBody(req) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const chunks = [];
|
||||
let total = 0;
|
||||
req.on("data", (chunk) => {
|
||||
total += chunk.length;
|
||||
if (total > maxBodyBytes) {
|
||||
reject(new Error("payload_too_large"));
|
||||
req.destroy();
|
||||
return;
|
||||
}
|
||||
chunks.push(chunk);
|
||||
});
|
||||
req.on("end", () => {
|
||||
resolve(Buffer.concat(chunks).toString("utf8"));
|
||||
});
|
||||
req.on("error", reject);
|
||||
});
|
||||
}
|
||||
|
||||
function trimEvents() {
|
||||
if (events.length <= maxEvents) return;
|
||||
events.splice(0, events.length - maxEvents);
|
||||
}
|
||||
|
||||
const server = http.createServer(async (req, res) => {
|
||||
const method = req.method ?? "GET";
|
||||
const url = req.url ?? "/";
|
||||
|
||||
if (method === "GET" && url === "/health") {
|
||||
writeJson(res, 200, { ok: true, webhookPath, events: events.length });
|
||||
return;
|
||||
}
|
||||
|
||||
if (method === "GET" && url === "/events") {
|
||||
writeJson(res, 200, { count: events.length, events });
|
||||
return;
|
||||
}
|
||||
|
||||
if (method === "POST" && url === "/reset") {
|
||||
events.length = 0;
|
||||
writeJson(res, 200, { ok: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (method === "POST" && url === webhookPath) {
|
||||
const authorization = req.headers.authorization ?? "";
|
||||
if (expectedAuthHeader && authorization !== expectedAuthHeader) {
|
||||
writeJson(res, 401, { error: "unauthorized" });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const raw = await readBody(req);
|
||||
let body = null;
|
||||
try {
|
||||
body = raw.length > 0 ? JSON.parse(raw) : null;
|
||||
} catch {
|
||||
body = { raw };
|
||||
}
|
||||
|
||||
const event = {
|
||||
id: `evt-${nextId++}`,
|
||||
receivedAt: new Date().toISOString(),
|
||||
method,
|
||||
path: url,
|
||||
authorizationPresent: Boolean(authorization),
|
||||
body,
|
||||
};
|
||||
events.push(event);
|
||||
trimEvents();
|
||||
writeJson(res, 200, { ok: true, received: true, eventId: event.id, count: events.length });
|
||||
} catch (err) {
|
||||
const code = err instanceof Error && err.message === "payload_too_large" ? 413 : 500;
|
||||
writeJson(res, code, { error: err instanceof Error ? err.message : "unknown_error" });
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
writeJson(res, 404, { error: "not_found" });
|
||||
});
|
||||
|
||||
server.listen(port, "0.0.0.0", () => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`[openclaw-smoke] listening on :${port} path=${webhookPath}`);
|
||||
});
|
||||
@@ -47,6 +47,14 @@ If resume fails with an unknown session error, the adapter automatically retries
|
||||
|
||||
The adapter creates a temporary directory with symlinks to Paperclip skills and passes it via `--add-dir`. This makes skills discoverable without polluting the agent's working directory.
|
||||
|
||||
For manual local CLI usage outside heartbeat runs (for example running as `claudecoder` directly), use:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai agent local-cli claudecoder --company-id <company-id>
|
||||
```
|
||||
|
||||
This installs Paperclip skills in `~/.claude/skills`, creates an agent API key, and prints shell exports to run as that agent.
|
||||
|
||||
## Environment Test
|
||||
|
||||
Use the "Test Environment" button in the UI to validate the adapter config. It checks:
|
||||
|
||||
@@ -30,6 +30,14 @@ Codex uses `previous_response_id` for session continuity. The adapter serializes
|
||||
|
||||
The adapter symlinks Paperclip skills into the global Codex skills directory (`~/.codex/skills`). Existing user skills are not overwritten.
|
||||
|
||||
For manual local CLI usage outside heartbeat runs (for example running as `codexcoder` directly), use:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai agent local-cli codexcoder --company-id <company-id>
|
||||
```
|
||||
|
||||
This installs any missing skills, creates an agent API key, and prints shell exports to run as that agent.
|
||||
|
||||
## Environment Test
|
||||
|
||||
The environment test checks:
|
||||
|
||||
@@ -20,6 +20,8 @@ When a heartbeat fires, Paperclip:
|
||||
|---------|----------|-------------|
|
||||
| [Claude Local](/adapters/claude-local) | `claude_local` | Runs Claude Code CLI locally |
|
||||
| [Codex Local](/adapters/codex-local) | `codex_local` | Runs OpenAI Codex CLI locally |
|
||||
| OpenCode Local | `opencode_local` | Runs OpenCode CLI locally (multi-provider `provider/model`) |
|
||||
| OpenClaw | `openclaw` | Sends wake payloads to an OpenClaw webhook |
|
||||
| [Process](/adapters/process) | `process` | Executes arbitrary shell commands |
|
||||
| [HTTP](/adapters/http) | `http` | Sends webhooks to external agents |
|
||||
|
||||
@@ -52,7 +54,7 @@ Three registries consume these modules:
|
||||
|
||||
## Choosing an Adapter
|
||||
|
||||
- **Need a coding agent?** Use `claude_local` or `codex_local`
|
||||
- **Need a coding agent?** Use `claude_local`, `codex_local`, or `opencode_local`
|
||||
- **Need to run a script or command?** Use `process`
|
||||
- **Need to call an external service?** Use `http`
|
||||
- **Need something custom?** [Create your own adapter](/adapters/creating-an-adapter)
|
||||
|
||||
@@ -123,6 +123,18 @@ GET /api/companies/{companyId}/org
|
||||
|
||||
Returns the full organizational tree for the company.
|
||||
|
||||
## List Adapter Models
|
||||
|
||||
```
|
||||
GET /api/companies/{companyId}/adapters/{adapterType}/models
|
||||
```
|
||||
|
||||
Returns selectable models for an adapter type.
|
||||
|
||||
- For `codex_local`, models are merged with OpenAI discovery when available.
|
||||
- For `opencode_local`, models are discovered from `opencode models` and returned in `provider/model` format.
|
||||
- `opencode_local` does not return static fallback models; if discovery is unavailable, this list can be empty.
|
||||
|
||||
## Config Revisions
|
||||
|
||||
```
|
||||
|
||||
@@ -48,12 +48,20 @@ pnpm dev --tailscale-auth
|
||||
|
||||
This binds the server to `0.0.0.0` for private-network access.
|
||||
|
||||
Alias:
|
||||
|
||||
```sh
|
||||
pnpm dev --authenticated-private
|
||||
```
|
||||
|
||||
Allow additional private hostnames:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai allowed-hostname dotta-macbook-pro
|
||||
```
|
||||
|
||||
For full setup and troubleshooting, see [Tailscale Private Access](/deploy/tailscale-private-access).
|
||||
|
||||
## Health Checks
|
||||
|
||||
```sh
|
||||
|
||||
77
docs/deploy/tailscale-private-access.md
Normal file
77
docs/deploy/tailscale-private-access.md
Normal file
@@ -0,0 +1,77 @@
|
||||
---
|
||||
title: Tailscale Private Access
|
||||
summary: Run Paperclip with Tailscale-friendly host binding and connect from other devices
|
||||
---
|
||||
|
||||
Use this when you want to access Paperclip over Tailscale (or a private LAN/VPN) instead of only `localhost`.
|
||||
|
||||
## 1. Start Paperclip in private authenticated mode
|
||||
|
||||
```sh
|
||||
pnpm dev --tailscale-auth
|
||||
```
|
||||
|
||||
This configures:
|
||||
|
||||
- `PAPERCLIP_DEPLOYMENT_MODE=authenticated`
|
||||
- `PAPERCLIP_DEPLOYMENT_EXPOSURE=private`
|
||||
- `PAPERCLIP_AUTH_BASE_URL_MODE=auto`
|
||||
- `HOST=0.0.0.0` (bind on all interfaces)
|
||||
|
||||
Equivalent flag:
|
||||
|
||||
```sh
|
||||
pnpm dev --authenticated-private
|
||||
```
|
||||
|
||||
## 2. Find your reachable Tailscale address
|
||||
|
||||
From the machine running Paperclip:
|
||||
|
||||
```sh
|
||||
tailscale ip -4
|
||||
```
|
||||
|
||||
You can also use your Tailscale MagicDNS hostname (for example `my-macbook.tailnet.ts.net`).
|
||||
|
||||
## 3. Open Paperclip from another device
|
||||
|
||||
Use the Tailscale IP or MagicDNS host with the Paperclip port:
|
||||
|
||||
```txt
|
||||
http://<tailscale-host-or-ip>:3100
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```txt
|
||||
http://my-macbook.tailnet.ts.net:3100
|
||||
```
|
||||
|
||||
## 4. Allow custom private hostnames when needed
|
||||
|
||||
If you access Paperclip with a custom private hostname, add it to the allowlist:
|
||||
|
||||
```sh
|
||||
pnpm paperclipai allowed-hostname my-macbook.tailnet.ts.net
|
||||
```
|
||||
|
||||
## 5. Verify the server is reachable
|
||||
|
||||
From a remote Tailscale-connected device:
|
||||
|
||||
```sh
|
||||
curl http://<tailscale-host-or-ip>:3100/api/health
|
||||
```
|
||||
|
||||
Expected result:
|
||||
|
||||
```json
|
||||
{"status":"ok"}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- Login or redirect errors on a private hostname: add it with `paperclipai allowed-hostname`.
|
||||
- App only works on `localhost`: make sure you started with `--tailscale-auth` (or set `HOST=0.0.0.0` in private mode).
|
||||
- Can connect locally but not remotely: verify both devices are on the same Tailscale network and port `3100` is reachable.
|
||||
@@ -73,6 +73,7 @@
|
||||
"pages": [
|
||||
"deploy/overview",
|
||||
"deploy/local-development",
|
||||
"deploy/tailscale-private-access",
|
||||
"deploy/docker",
|
||||
"deploy/deployment-modes",
|
||||
"deploy/database",
|
||||
|
||||
@@ -27,6 +27,14 @@ Create agents from the Agents page. Each agent requires:
|
||||
- **Adapter config** — runtime-specific settings (working directory, model, prompt, etc.)
|
||||
- **Capabilities** — short description of what this agent does
|
||||
|
||||
Common adapter choices:
|
||||
- `claude_local` / `codex_local` / `opencode_local` for local coding agents
|
||||
- `openclaw` / `http` for webhook-based external agents
|
||||
- `process` for generic local command execution
|
||||
|
||||
For `opencode_local`, configure an explicit `adapterConfig.model` (`provider/model`).
|
||||
Paperclip validates the selected model against live `opencode models` output.
|
||||
|
||||
## Agent Hiring via Governance
|
||||
|
||||
Agents can request to hire subordinates. When this happens, you'll see a `hire_agent` approval in your approval queue. Review the proposed agent config and approve or reject.
|
||||
|
||||
@@ -2,6 +2,97 @@
|
||||
|
||||
How to get OpenClaw running in a Docker container for local development and testing the Paperclip OpenClaw adapter integration.
|
||||
|
||||
## Automated Join Smoke Test (Recommended First)
|
||||
|
||||
Paperclip includes an end-to-end join smoke harness:
|
||||
|
||||
```bash
|
||||
pnpm smoke:openclaw-join
|
||||
```
|
||||
|
||||
The harness automates:
|
||||
|
||||
- invite creation (`allowedJoinTypes=agent`)
|
||||
- OpenClaw agent join request (`adapterType=openclaw`)
|
||||
- board approval
|
||||
- one-time API key claim (including invalid/replay claim checks)
|
||||
- wakeup callback delivery to a dockerized OpenClaw-style webhook receiver
|
||||
|
||||
By default, this uses a preconfigured Docker receiver image (`docker/openclaw-smoke`) so the run is deterministic and requires no manual OpenClaw config edits.
|
||||
|
||||
Permissions note:
|
||||
|
||||
- The harness performs board-governed actions (invite creation, join approval, wakeup of the new agent).
|
||||
- In authenticated mode, provide board/operator auth or the run exits early with an explicit permissions error.
|
||||
|
||||
## One-Command OpenClaw Gateway UI (Manual Docker Flow)
|
||||
|
||||
To spin up OpenClaw in Docker and print a host-browser dashboard URL in one command:
|
||||
|
||||
```bash
|
||||
pnpm smoke:openclaw-docker-ui
|
||||
```
|
||||
|
||||
Default behavior is zero-flag: you can run the command as-is with no pairing-related env vars.
|
||||
|
||||
What this command does:
|
||||
|
||||
- clones/updates `openclaw/openclaw` in `/tmp/openclaw-docker`
|
||||
- builds `openclaw:local` (unless `OPENCLAW_BUILD=0`)
|
||||
- writes isolated smoke config under `~/.openclaw-paperclip-smoke/openclaw.json` and Docker `.env`
|
||||
- pins agent model defaults to OpenAI (`openai/gpt-5.2` with OpenAI fallback)
|
||||
- starts `openclaw-gateway` via Compose (with required `/tmp` tmpfs override)
|
||||
- probes and prints a Paperclip host URL that is reachable from inside OpenClaw Docker
|
||||
- waits for health and prints:
|
||||
- `http://127.0.0.1:18789/#token=...`
|
||||
- disables Control UI device pairing by default for local smoke ergonomics
|
||||
|
||||
Environment knobs:
|
||||
|
||||
- `OPENAI_API_KEY` (required; loaded from env or `~/.secrets`)
|
||||
- `OPENCLAW_DOCKER_DIR` (default `/tmp/openclaw-docker`)
|
||||
- `OPENCLAW_GATEWAY_PORT` (default `18789`)
|
||||
- `OPENCLAW_GATEWAY_TOKEN` (default random)
|
||||
- `OPENCLAW_BUILD=0` to skip rebuild
|
||||
- `OPENCLAW_OPEN_BROWSER=1` to auto-open the URL on macOS
|
||||
- `OPENCLAW_DISABLE_DEVICE_AUTH=1` (default) disables Control UI device pairing for local smoke
|
||||
- `OPENCLAW_DISABLE_DEVICE_AUTH=0` keeps pairing enabled (then approve browser with `devices` CLI commands)
|
||||
- `OPENCLAW_MODEL_PRIMARY` (default `openai/gpt-5.2`)
|
||||
- `OPENCLAW_MODEL_FALLBACK` (default `openai/gpt-5.2-chat-latest`)
|
||||
- `OPENCLAW_CONFIG_DIR` (default `~/.openclaw-paperclip-smoke`)
|
||||
- `OPENCLAW_RESET_STATE=1` (default) resets smoke agent state on each run to avoid stale auth/session drift
|
||||
- `PAPERCLIP_HOST_PORT` (default `3100`)
|
||||
- `PAPERCLIP_HOST_FROM_CONTAINER` (default `host.docker.internal`)
|
||||
|
||||
### Authenticated mode
|
||||
|
||||
If your Paperclip deployment is `authenticated`, provide auth context:
|
||||
|
||||
```bash
|
||||
PAPERCLIP_AUTH_HEADER="Bearer <token>" pnpm smoke:openclaw-join
|
||||
# or
|
||||
PAPERCLIP_COOKIE="your_session_cookie=..." pnpm smoke:openclaw-join
|
||||
```
|
||||
|
||||
### Network topology tips
|
||||
|
||||
- Local same-host smoke: default callback uses `http://127.0.0.1:<port>/webhook`.
|
||||
- Inside OpenClaw Docker, `127.0.0.1` points to the container itself, not your host Paperclip server.
|
||||
- For invite/onboarding URLs consumed by OpenClaw in Docker, use the script-printed Paperclip URL (typically `http://host.docker.internal:3100`).
|
||||
- If Paperclip rejects the container-visible host with a hostname error, allow it from host:
|
||||
|
||||
```bash
|
||||
pnpm paperclipai allowed-hostname host.docker.internal
|
||||
```
|
||||
|
||||
Then restart Paperclip and rerun the smoke script.
|
||||
- Docker/remote OpenClaw: prefer a reachable hostname (Docker host alias, Tailscale hostname, or public domain).
|
||||
- Authenticated/private mode: ensure hostnames are in the allowed list when required:
|
||||
|
||||
```bash
|
||||
pnpm paperclipai allowed-hostname <host>
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **Docker Desktop v29+** (with Docker Sandbox support)
|
||||
|
||||
524
docs/specs/cliphub-plan.md
Normal file
524
docs/specs/cliphub-plan.md
Normal file
@@ -0,0 +1,524 @@
|
||||
# ClipHub: Marketplace for Paperclip Team Configurations
|
||||
|
||||
> The "app store" for whole-company AI teams — pre-built Paperclip configurations, agent blueprints, skills, and governance templates that ship real work from day one.
|
||||
|
||||
## 1. Vision & Positioning
|
||||
|
||||
**ClipHub** sells **entire team configurations** — org charts, agent roles, inter-agent workflows, governance rules, and project templates — for Paperclip-managed companies.
|
||||
|
||||
| Dimension | ClipHub |
|
||||
|---|---|
|
||||
| Unit of sale | Team blueprint (multi-agent org) |
|
||||
| Buyer | Founder / team lead spinning up an AI company |
|
||||
| Install target | Paperclip company (agents, projects, governance) |
|
||||
| Value prop | "Skip org design — get a shipping team in minutes" |
|
||||
| Price range | $0–$499 per blueprint (+ individual add-ons) |
|
||||
|
||||
---
|
||||
|
||||
## 2. Product Taxonomy
|
||||
|
||||
### 2.1 Team Blueprints (primary product)
|
||||
|
||||
A complete Paperclip company configuration:
|
||||
|
||||
- **Org chart**: Agents with roles, titles, reporting chains, capabilities
|
||||
- **Agent configs**: Adapter type, model, prompt templates, instructions paths
|
||||
- **Governance rules**: Approval flows, budget limits, escalation chains
|
||||
- **Project templates**: Pre-configured projects with workspace settings
|
||||
- **Skills & instructions**: AGENTS.md / skill files bundled per agent
|
||||
|
||||
**Examples:**
|
||||
- "SaaS Startup Team" — CEO, CTO, Engineer, CMO, Designer ($199)
|
||||
- "Content Agency" — Editor-in-Chief, 3 Writers, SEO Analyst, Social Manager ($149)
|
||||
- "Dev Shop" — CTO, 2 Engineers, QA, DevOps ($99)
|
||||
- "Solo Founder + Crew" — CEO agent + 3 ICs across eng/marketing/ops ($79)
|
||||
|
||||
### 2.2 Agent Blueprints (individual agents within a team context)
|
||||
|
||||
Single-agent configurations designed to plug into a Paperclip org:
|
||||
|
||||
- Role definition, prompt template, adapter config
|
||||
- Reporting chain expectations (who they report to)
|
||||
- Skill bundles included
|
||||
- Governance defaults (budget, permissions)
|
||||
|
||||
**Examples:**
|
||||
- "Staff Engineer" — ships production code, manages PRs ($29)
|
||||
- "Growth Marketer" — content pipeline, SEO, social ($39)
|
||||
- "DevOps Agent" — CI/CD, deployment, monitoring ($29)
|
||||
|
||||
### 2.3 Skills (modular capabilities)
|
||||
|
||||
Portable skill files that any Paperclip agent can use:
|
||||
|
||||
- Markdown skill files with instructions
|
||||
- Tool configurations and shell scripts
|
||||
- Compatible with Paperclip's skill loading system
|
||||
|
||||
**Examples:**
|
||||
- "Git PR Workflow" — standardized PR creation and review (Free)
|
||||
- "Deployment Pipeline" — Cloudflare/Vercel deploy skill ($9)
|
||||
- "Customer Support Triage" — ticket classification and routing ($19)
|
||||
|
||||
### 2.4 Governance Templates
|
||||
|
||||
Pre-built approval flows and policies:
|
||||
|
||||
- Budget thresholds and approval chains
|
||||
- Cross-team delegation rules
|
||||
- Escalation procedures
|
||||
- Billing code structures
|
||||
|
||||
**Examples:**
|
||||
- "Startup Governance" — lightweight, CEO approves > $50 (Free)
|
||||
- "Enterprise Governance" — multi-tier approval, audit trail ($49)
|
||||
|
||||
---
|
||||
|
||||
## 3. Data Schemas
|
||||
|
||||
### 3.1 Listing
|
||||
|
||||
```typescript
|
||||
interface Listing {
|
||||
id: string;
|
||||
slug: string; // URL-friendly identifier
|
||||
type: 'team_blueprint' | 'agent_blueprint' | 'skill' | 'governance_template';
|
||||
title: string;
|
||||
tagline: string; // Short pitch (≤120 chars)
|
||||
description: string; // Markdown, full details
|
||||
|
||||
// Pricing
|
||||
price: number; // Cents (0 = free)
|
||||
currency: 'usd';
|
||||
|
||||
// Creator
|
||||
creatorId: string;
|
||||
creatorName: string;
|
||||
creatorAvatar: string | null;
|
||||
|
||||
// Categorization
|
||||
categories: string[]; // e.g. ['saas', 'engineering', 'marketing']
|
||||
tags: string[]; // e.g. ['claude', 'startup', '5-agent']
|
||||
agentCount: number | null; // For team blueprints
|
||||
|
||||
// Content
|
||||
previewImages: string[]; // Screenshots / org chart visuals
|
||||
readmeMarkdown: string; // Full README shown on detail page
|
||||
includedFiles: string[]; // List of files in the bundle
|
||||
|
||||
// Compatibility
|
||||
compatibleAdapters: string[]; // ['claude_local', 'codex_local', ...]
|
||||
requiredModels: string[]; // ['claude-opus-4-6', 'claude-sonnet-4-6']
|
||||
paperclipVersionMin: string; // Minimum Paperclip version
|
||||
|
||||
// Social proof
|
||||
installCount: number;
|
||||
rating: number | null; // 1.0–5.0
|
||||
reviewCount: number;
|
||||
|
||||
// Metadata
|
||||
version: string; // Semver
|
||||
publishedAt: string;
|
||||
updatedAt: string;
|
||||
status: 'draft' | 'published' | 'archived';
|
||||
}
|
||||
```
|
||||
|
||||
### 3.2 Team Blueprint Bundle
|
||||
|
||||
```typescript
|
||||
interface TeamBlueprint {
|
||||
listingId: string;
|
||||
|
||||
// Org structure
|
||||
agents: AgentBlueprint[];
|
||||
reportingChain: { agentSlug: string; reportsTo: string | null }[];
|
||||
|
||||
// Governance
|
||||
governance: {
|
||||
approvalRules: ApprovalRule[];
|
||||
budgetDefaults: { role: string; monthlyCents: number }[];
|
||||
escalationChain: string[]; // Agent slugs in escalation order
|
||||
};
|
||||
|
||||
// Projects
|
||||
projects: ProjectTemplate[];
|
||||
|
||||
// Company-level config
|
||||
companyDefaults: {
|
||||
name: string;
|
||||
defaultModel: string;
|
||||
defaultAdapter: string;
|
||||
};
|
||||
}
|
||||
|
||||
interface AgentBlueprint {
|
||||
slug: string; // e.g. 'cto', 'engineer-1'
|
||||
name: string;
|
||||
role: string;
|
||||
title: string;
|
||||
icon: string;
|
||||
capabilities: string;
|
||||
promptTemplate: string;
|
||||
adapterType: string;
|
||||
adapterConfig: Record<string, any>;
|
||||
instructionsPath: string | null; // Path to AGENTS.md or similar
|
||||
skills: SkillBundle[];
|
||||
budgetMonthlyCents: number;
|
||||
permissions: {
|
||||
canCreateAgents: boolean;
|
||||
canApproveHires: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
interface ProjectTemplate {
|
||||
name: string;
|
||||
description: string;
|
||||
workspace: {
|
||||
cwd: string | null;
|
||||
repoUrl: string | null;
|
||||
} | null;
|
||||
}
|
||||
|
||||
interface ApprovalRule {
|
||||
trigger: string; // e.g. 'hire_agent', 'budget_exceed'
|
||||
threshold: number | null;
|
||||
approverRole: string;
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 Creator / Seller
|
||||
|
||||
```typescript
|
||||
interface Creator {
|
||||
id: string;
|
||||
userId: string; // Auth provider ID
|
||||
displayName: string;
|
||||
bio: string;
|
||||
avatarUrl: string | null;
|
||||
website: string | null;
|
||||
listings: string[]; // Listing IDs
|
||||
totalInstalls: number;
|
||||
totalRevenue: number; // Cents earned
|
||||
joinedAt: string;
|
||||
verified: boolean;
|
||||
payoutMethod: 'stripe_connect';
|
||||
stripeAccountId: string | null;
|
||||
}
|
||||
```
|
||||
|
||||
### 3.4 Purchase / Install
|
||||
|
||||
```typescript
|
||||
interface Purchase {
|
||||
id: string;
|
||||
listingId: string;
|
||||
buyerUserId: string;
|
||||
buyerCompanyId: string | null; // Target Paperclip company
|
||||
pricePaidCents: number;
|
||||
paymentIntentId: string | null; // Stripe
|
||||
installedAt: string | null; // When deployed to company
|
||||
status: 'pending' | 'completed' | 'refunded';
|
||||
createdAt: string;
|
||||
}
|
||||
```
|
||||
|
||||
### 3.5 Review
|
||||
|
||||
```typescript
|
||||
interface Review {
|
||||
id: string;
|
||||
listingId: string;
|
||||
authorUserId: string;
|
||||
authorDisplayName: string;
|
||||
rating: number; // 1–5
|
||||
title: string;
|
||||
body: string; // Markdown
|
||||
verifiedPurchase: boolean;
|
||||
createdAt: string;
|
||||
updatedAt: string;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Pages & Routes
|
||||
|
||||
### 4.1 Public Pages
|
||||
|
||||
| Route | Page | Description |
|
||||
|---|---|---|
|
||||
| `/` | Homepage | Hero, featured blueprints, popular skills, how it works |
|
||||
| `/browse` | Marketplace browse | Filterable grid of all listings |
|
||||
| `/browse?type=team_blueprint` | Team blueprints | Filtered to team configs |
|
||||
| `/browse?type=agent_blueprint` | Agent blueprints | Single-agent configs |
|
||||
| `/browse?type=skill` | Skills | Skill listings |
|
||||
| `/browse?type=governance_template` | Governance | Policy templates |
|
||||
| `/listings/:slug` | Listing detail | Full product page |
|
||||
| `/creators/:slug` | Creator profile | Bio, all listings, stats |
|
||||
| `/about` | About ClipHub | Mission, how it works |
|
||||
| `/pricing` | Pricing & fees | Creator revenue share, buyer info |
|
||||
|
||||
### 4.2 Authenticated Pages
|
||||
|
||||
| Route | Page | Description |
|
||||
|---|---|---|
|
||||
| `/dashboard` | Buyer dashboard | Purchased items, installed blueprints |
|
||||
| `/dashboard/purchases` | Purchase history | All transactions |
|
||||
| `/dashboard/installs` | Installations | Deployed blueprints with status |
|
||||
| `/creator` | Creator dashboard | Listing management, analytics |
|
||||
| `/creator/listings/new` | Create listing | Multi-step listing wizard |
|
||||
| `/creator/listings/:id/edit` | Edit listing | Modify existing listing |
|
||||
| `/creator/analytics` | Analytics | Revenue, installs, views |
|
||||
| `/creator/payouts` | Payouts | Stripe Connect payout history |
|
||||
|
||||
### 4.3 API Routes
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|---|---|---|
|
||||
| `GET` | `/api/listings` | Browse listings (filters: type, category, price range, sort) |
|
||||
| `GET` | `/api/listings/:slug` | Get listing detail |
|
||||
| `POST` | `/api/listings` | Create listing (creator auth) |
|
||||
| `PATCH` | `/api/listings/:id` | Update listing |
|
||||
| `DELETE` | `/api/listings/:id` | Archive listing |
|
||||
| `POST` | `/api/listings/:id/purchase` | Purchase listing (Stripe checkout) |
|
||||
| `POST` | `/api/listings/:id/install` | Install to Paperclip company |
|
||||
| `GET` | `/api/listings/:id/reviews` | Get reviews |
|
||||
| `POST` | `/api/listings/:id/reviews` | Submit review |
|
||||
| `GET` | `/api/creators/:slug` | Creator profile |
|
||||
| `GET` | `/api/creators/me` | Current creator profile |
|
||||
| `POST` | `/api/creators` | Register as creator |
|
||||
| `GET` | `/api/purchases` | Buyer's purchase history |
|
||||
| `GET` | `/api/analytics` | Creator analytics |
|
||||
|
||||
---
|
||||
|
||||
## 5. User Flows
|
||||
|
||||
### 5.1 Buyer: Browse → Purchase → Install
|
||||
|
||||
```
|
||||
Homepage → Browse marketplace → Filter by type/category
|
||||
→ Click listing → Read details, reviews, preview org chart
|
||||
→ Click "Buy" → Stripe checkout (or free install)
|
||||
→ Post-purchase: "Install to Company" button
|
||||
→ Select target Paperclip company (or create new)
|
||||
→ ClipHub API calls Paperclip API to:
|
||||
1. Create agents with configs from blueprint
|
||||
2. Set up reporting chains
|
||||
3. Create projects with workspace configs
|
||||
4. Apply governance rules
|
||||
5. Deploy skill files to agent instruction paths
|
||||
→ Redirect to Paperclip dashboard with new team running
|
||||
```
|
||||
|
||||
### 5.2 Creator: Build → Publish → Earn
|
||||
|
||||
```
|
||||
Sign up as creator → Connect Stripe
|
||||
→ "New Listing" wizard:
|
||||
Step 1: Type (team/agent/skill/governance)
|
||||
Step 2: Basic info (title, tagline, description, categories)
|
||||
Step 3: Upload bundle (JSON config + skill files + README)
|
||||
Step 4: Preview & org chart visualization
|
||||
Step 5: Pricing ($0–$499)
|
||||
Step 6: Publish
|
||||
→ Live on marketplace immediately
|
||||
→ Track installs, revenue, reviews on creator dashboard
|
||||
```
|
||||
|
||||
### 5.3 Creator: Export from Paperclip → Publish
|
||||
|
||||
```
|
||||
Running Paperclip company → "Export as Blueprint" (CLI or UI)
|
||||
→ Paperclip exports:
|
||||
- Agent configs (sanitized — no secrets)
|
||||
- Org chart / reporting chains
|
||||
- Governance rules
|
||||
- Project templates
|
||||
- Skill files
|
||||
→ Upload to ClipHub as new listing
|
||||
→ Edit details, set price, publish
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. UI Design Direction
|
||||
|
||||
### 6.1 Visual Language
|
||||
|
||||
- **Color palette**: Dark ink primary, warm sand backgrounds, accent color for CTAs (Paperclip brand blue/purple)
|
||||
- **Typography**: Clean sans-serif, strong hierarchy, monospace for technical details
|
||||
- **Cards**: Rounded corners, subtle shadows, clear pricing badges
|
||||
- **Org chart visuals**: Interactive tree/graph showing agent relationships in team blueprints
|
||||
|
||||
### 6.2 Key Design Elements
|
||||
|
||||
| Element | ClipHub |
|
||||
|---|---|
|
||||
| Product card | Org chart mini-preview + agent count badge |
|
||||
| Detail page | Interactive org chart + per-agent breakdown |
|
||||
| Install flow | One-click deploy to Paperclip company |
|
||||
| Social proof | "X companies running this blueprint" |
|
||||
| Preview | Live demo sandbox (stretch goal) |
|
||||
|
||||
### 6.3 Listing Card Design
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ [Org Chart Mini-Preview] │
|
||||
│ ┌─CEO─┐ │
|
||||
│ ├─CTO─┤ │
|
||||
│ └─ENG──┘ │
|
||||
│ │
|
||||
│ SaaS Startup Team │
|
||||
│ "Ship your MVP with a 5-agent │
|
||||
│ engineering + marketing team" │
|
||||
│ │
|
||||
│ 👥 5 agents ⬇ 234 installs │
|
||||
│ ★ 4.7 (12 reviews) │
|
||||
│ │
|
||||
│ By @masinov $199 [Buy] │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 6.4 Detail Page Sections
|
||||
|
||||
1. **Hero**: Title, tagline, price, install button, creator info
|
||||
2. **Org Chart**: Interactive visualization of agent hierarchy
|
||||
3. **Agent Breakdown**: Expandable cards for each agent — role, capabilities, model, skills
|
||||
4. **Governance**: Approval flows, budget structure, escalation chain
|
||||
5. **Included Projects**: Project templates with workspace configs
|
||||
6. **README**: Full markdown documentation
|
||||
7. **Reviews**: Star ratings + written reviews
|
||||
8. **Related Blueprints**: Cross-sell similar team configs
|
||||
9. **Creator Profile**: Mini bio, other listings
|
||||
|
||||
---
|
||||
|
||||
## 7. Installation Mechanics
|
||||
|
||||
### 7.1 Install API Flow
|
||||
|
||||
When a buyer clicks "Install to Company":
|
||||
|
||||
```
|
||||
POST /api/listings/:id/install
|
||||
{
|
||||
"targetCompanyId": "uuid", // Existing Paperclip company
|
||||
"overrides": { // Optional customization
|
||||
"agentModel": "claude-sonnet-4-6", // Override default model
|
||||
"budgetScale": 0.5, // Scale budgets
|
||||
"skipProjects": false
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The install handler:
|
||||
|
||||
1. Validates buyer owns the purchase
|
||||
2. Validates target company access
|
||||
3. For each agent in blueprint:
|
||||
- `POST /api/companies/:id/agents` (if `paperclip-create-agent` supports it, or via approval flow)
|
||||
- Sets adapter config, prompt template, instructions path
|
||||
4. Sets reporting chains
|
||||
5. Creates projects and workspaces
|
||||
6. Applies governance rules
|
||||
7. Deploys skill files to configured paths
|
||||
8. Returns summary of created resources
|
||||
|
||||
### 7.2 Conflict Resolution
|
||||
|
||||
- **Agent name collision**: Append `-2`, `-3` suffix
|
||||
- **Project name collision**: Prompt buyer to rename or skip
|
||||
- **Adapter mismatch**: Warn if blueprint requires adapter not available locally
|
||||
- **Model availability**: Warn if required model not configured
|
||||
|
||||
---
|
||||
|
||||
## 8. Revenue Model
|
||||
|
||||
| Fee | Amount | Notes |
|
||||
|---|---|---|
|
||||
| Creator revenue share | 90% of sale price | Minus Stripe processing (~2.9% + $0.30) |
|
||||
| Platform fee | 10% of sale price | ClipHub's cut |
|
||||
| Free listings | $0 | No fees for free listings |
|
||||
| Stripe Connect | Standard rates | Handled by Stripe |
|
||||
|
||||
---
|
||||
|
||||
## 9. Technical Architecture
|
||||
|
||||
### 9.1 Stack
|
||||
|
||||
- **Frontend**: Next.js (React), Tailwind CSS, same UI framework as Paperclip
|
||||
- **Backend**: Node.js API (or extend Paperclip server)
|
||||
- **Database**: Postgres (can share Paperclip's DB or separate)
|
||||
- **Payments**: Stripe Connect (marketplace mode)
|
||||
- **Storage**: S3/R2 for listing bundles and images
|
||||
- **Auth**: Shared with Paperclip auth (or OAuth2)
|
||||
|
||||
### 9.2 Integration with Paperclip
|
||||
|
||||
ClipHub can be:
|
||||
- **Option A**: A separate app that calls Paperclip's API to install blueprints
|
||||
- **Option B**: A built-in section of the Paperclip UI (`/marketplace` route)
|
||||
|
||||
Option B is simpler for MVP — adds routes to the existing Paperclip UI and API.
|
||||
|
||||
### 9.3 Bundle Format
|
||||
|
||||
Listing bundles are ZIP/tar archives containing:
|
||||
|
||||
```
|
||||
blueprint/
|
||||
├── manifest.json # Listing metadata + agent configs
|
||||
├── README.md # Documentation
|
||||
├── org-chart.json # Agent hierarchy
|
||||
├── governance.json # Approval rules, budgets
|
||||
├── agents/
|
||||
│ ├── ceo/
|
||||
│ │ ├── prompt.md # Prompt template
|
||||
│ │ ├── AGENTS.md # Instructions
|
||||
│ │ └── skills/ # Skill files
|
||||
│ ├── cto/
|
||||
│ │ ├── prompt.md
|
||||
│ │ ├── AGENTS.md
|
||||
│ │ └── skills/
|
||||
│ └── engineer/
|
||||
│ ├── prompt.md
|
||||
│ ├── AGENTS.md
|
||||
│ └── skills/
|
||||
└── projects/
|
||||
└── default/
|
||||
└── workspace.json # Project workspace config
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. MVP Scope
|
||||
|
||||
### Phase 1: Foundation
|
||||
- [ ] Listing schema and CRUD API
|
||||
- [ ] Browse page with filters (type, category, price)
|
||||
- [ ] Listing detail page with org chart visualization
|
||||
- [ ] Creator registration and listing creation wizard
|
||||
- [ ] Free installs only (no payments yet)
|
||||
- [ ] Install flow: blueprint → Paperclip company
|
||||
|
||||
### Phase 2: Payments & Social
|
||||
- [ ] Stripe Connect integration
|
||||
- [ ] Purchase flow
|
||||
- [ ] Review system
|
||||
- [ ] Creator analytics dashboard
|
||||
- [ ] "Export from Paperclip" CLI command
|
||||
|
||||
### Phase 3: Growth
|
||||
- [ ] Search with relevance ranking
|
||||
- [ ] Featured/trending listings
|
||||
- [ ] Creator verification program
|
||||
- [ ] Blueprint versioning and update notifications
|
||||
- [ ] Live demo sandbox
|
||||
- [ ] API for programmatic publishing
|
||||
18
package.json
18
package.json
@@ -3,8 +3,9 @@
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "node scripts/dev-runner.mjs dev",
|
||||
"dev:watch": "PAPERCLIP_MIGRATION_PROMPT=never node scripts/dev-runner.mjs watch",
|
||||
"dev": "node scripts/dev-runner.mjs watch",
|
||||
"dev:watch": "cross-env PAPERCLIP_MIGRATION_PROMPT=never node scripts/dev-runner.mjs watch",
|
||||
"dev:once": "node scripts/dev-runner.mjs dev",
|
||||
"dev:server": "pnpm --filter @paperclipai/server dev",
|
||||
"dev:ui": "pnpm --filter @paperclipai/ui dev",
|
||||
"build": "pnpm -r build",
|
||||
@@ -17,14 +18,25 @@
|
||||
"db:backup": "./scripts/backup-db.sh",
|
||||
"paperclipai": "node cli/node_modules/tsx/dist/cli.mjs cli/src/index.ts",
|
||||
"build:npm": "./scripts/build-npm.sh",
|
||||
"release:start": "./scripts/release-start.sh",
|
||||
"release": "./scripts/release.sh",
|
||||
"release:preflight": "./scripts/release-preflight.sh",
|
||||
"release:github": "./scripts/create-github-release.sh",
|
||||
"release:rollback": "./scripts/rollback-latest.sh",
|
||||
"changeset": "changeset",
|
||||
"version-packages": "changeset version",
|
||||
"check:tokens": "node scripts/check-forbidden-tokens.mjs",
|
||||
"docs:dev": "cd docs && npx mintlify dev"
|
||||
"docs:dev": "cd docs && npx mintlify dev",
|
||||
"smoke:openclaw-join": "./scripts/smoke/openclaw-join.sh",
|
||||
"smoke:openclaw-docker-ui": "./scripts/smoke/openclaw-docker-ui.sh",
|
||||
"smoke:openclaw-sse-standalone": "./scripts/smoke/openclaw-sse-standalone.sh",
|
||||
"test:e2e": "npx playwright test --config tests/e2e/playwright.config.ts",
|
||||
"test:e2e:headed": "npx playwright test --config tests/e2e/playwright.config.ts --headed"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@changesets/cli": "^2.30.0",
|
||||
"cross-env": "^10.1.0",
|
||||
"@playwright/test": "^1.58.2",
|
||||
"esbuild": "^0.27.3",
|
||||
"typescript": "^5.7.3",
|
||||
"vitest": "^3.0.5"
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
# @paperclipai/adapter-utils
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-utils",
|
||||
"version": "0.2.6",
|
||||
"version": "0.2.7",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
@@ -30,6 +30,7 @@
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ export type {
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterSessionCodec,
|
||||
AdapterModel,
|
||||
HireApprovedPayload,
|
||||
HireApprovedHookResult,
|
||||
ServerAdapterModule,
|
||||
TranscriptEntry,
|
||||
StdoutLineParser,
|
||||
|
||||
@@ -15,6 +15,19 @@ interface RunningProcess {
|
||||
graceSec: number;
|
||||
}
|
||||
|
||||
interface SpawnTarget {
|
||||
command: string;
|
||||
args: string[];
|
||||
}
|
||||
|
||||
type ChildProcessWithEvents = ChildProcess & {
|
||||
on(event: "error", listener: (err: Error) => void): ChildProcess;
|
||||
on(
|
||||
event: "close",
|
||||
listener: (code: number | null, signal: NodeJS.Signals | null) => void,
|
||||
): ChildProcess;
|
||||
};
|
||||
|
||||
export const runningProcesses = new Map<string, RunningProcess>();
|
||||
export const MAX_CAPTURE_BYTES = 4 * 1024 * 1024;
|
||||
export const MAX_EXCERPT_BYTES = 32 * 1024;
|
||||
@@ -117,6 +130,78 @@ export function defaultPathForPlatform() {
|
||||
return "/usr/local/bin:/opt/homebrew/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin";
|
||||
}
|
||||
|
||||
function windowsPathExts(env: NodeJS.ProcessEnv): string[] {
|
||||
return (env.PATHEXT ?? ".EXE;.CMD;.BAT;.COM").split(";").filter(Boolean);
|
||||
}
|
||||
|
||||
async function pathExists(candidate: string) {
|
||||
try {
|
||||
await fs.access(candidate, process.platform === "win32" ? fsConstants.F_OK : fsConstants.X_OK);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function resolveCommandPath(command: string, cwd: string, env: NodeJS.ProcessEnv): Promise<string | null> {
|
||||
const hasPathSeparator = command.includes("/") || command.includes("\\");
|
||||
if (hasPathSeparator) {
|
||||
const absolute = path.isAbsolute(command) ? command : path.resolve(cwd, command);
|
||||
return (await pathExists(absolute)) ? absolute : null;
|
||||
}
|
||||
|
||||
const pathValue = env.PATH ?? env.Path ?? "";
|
||||
const delimiter = process.platform === "win32" ? ";" : ":";
|
||||
const dirs = pathValue.split(delimiter).filter(Boolean);
|
||||
const exts = process.platform === "win32" ? windowsPathExts(env) : [""];
|
||||
const hasExtension = process.platform === "win32" && path.extname(command).length > 0;
|
||||
|
||||
for (const dir of dirs) {
|
||||
const candidates =
|
||||
process.platform === "win32"
|
||||
? hasExtension
|
||||
? [path.join(dir, command)]
|
||||
: exts.map((ext) => path.join(dir, `${command}${ext}`))
|
||||
: [path.join(dir, command)];
|
||||
for (const candidate of candidates) {
|
||||
if (await pathExists(candidate)) return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function quoteForCmd(arg: string) {
|
||||
if (!arg.length) return '""';
|
||||
const escaped = arg.replace(/"/g, '""');
|
||||
return /[\s"&<>|^()]/.test(escaped) ? `"${escaped}"` : escaped;
|
||||
}
|
||||
|
||||
async function resolveSpawnTarget(
|
||||
command: string,
|
||||
args: string[],
|
||||
cwd: string,
|
||||
env: NodeJS.ProcessEnv,
|
||||
): Promise<SpawnTarget> {
|
||||
const resolved = await resolveCommandPath(command, cwd, env);
|
||||
const executable = resolved ?? command;
|
||||
|
||||
if (process.platform !== "win32") {
|
||||
return { command: executable, args };
|
||||
}
|
||||
|
||||
if (/\.(cmd|bat)$/i.test(executable)) {
|
||||
const shell = env.ComSpec || process.env.ComSpec || "cmd.exe";
|
||||
const commandLine = [quoteForCmd(executable), ...args.map(quoteForCmd)].join(" ");
|
||||
return {
|
||||
command: shell,
|
||||
args: ["/d", "/s", "/c", commandLine],
|
||||
};
|
||||
}
|
||||
|
||||
return { command: executable, args };
|
||||
}
|
||||
|
||||
export function ensurePathInEnv(env: NodeJS.ProcessEnv): NodeJS.ProcessEnv {
|
||||
if (typeof env.PATH === "string" && env.PATH.length > 0) return env;
|
||||
if (typeof env.Path === "string" && env.Path.length > 0) return env;
|
||||
@@ -161,36 +246,12 @@ export async function ensureAbsoluteDirectory(
|
||||
}
|
||||
|
||||
export async function ensureCommandResolvable(command: string, cwd: string, env: NodeJS.ProcessEnv) {
|
||||
const hasPathSeparator = command.includes("/") || command.includes("\\");
|
||||
if (hasPathSeparator) {
|
||||
const resolved = await resolveCommandPath(command, cwd, env);
|
||||
if (resolved) return;
|
||||
if (command.includes("/") || command.includes("\\")) {
|
||||
const absolute = path.isAbsolute(command) ? command : path.resolve(cwd, command);
|
||||
try {
|
||||
await fs.access(absolute, fsConstants.X_OK);
|
||||
} catch {
|
||||
throw new Error(`Command is not executable: "${command}" (resolved: "${absolute}")`);
|
||||
}
|
||||
return;
|
||||
throw new Error(`Command is not executable: "${command}" (resolved: "${absolute}")`);
|
||||
}
|
||||
|
||||
const pathValue = env.PATH ?? env.Path ?? "";
|
||||
const delimiter = process.platform === "win32" ? ";" : ":";
|
||||
const dirs = pathValue.split(delimiter).filter(Boolean);
|
||||
const windowsExt = process.platform === "win32"
|
||||
? (env.PATHEXT ?? ".EXE;.CMD;.BAT;.COM").split(";")
|
||||
: [""];
|
||||
|
||||
for (const dir of dirs) {
|
||||
for (const ext of windowsExt) {
|
||||
const candidate = path.join(dir, process.platform === "win32" ? `${command}${ext}` : command);
|
||||
try {
|
||||
await fs.access(candidate, fsConstants.X_OK);
|
||||
return;
|
||||
} catch {
|
||||
// continue scanning PATH
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Command not found in PATH: "${command}"`);
|
||||
}
|
||||
|
||||
@@ -212,78 +273,82 @@ export async function runChildProcess(
|
||||
|
||||
return new Promise<RunProcessResult>((resolve, reject) => {
|
||||
const mergedEnv = ensurePathInEnv({ ...process.env, ...opts.env });
|
||||
const child = spawn(command, args, {
|
||||
cwd: opts.cwd,
|
||||
env: mergedEnv,
|
||||
shell: false,
|
||||
stdio: [opts.stdin != null ? "pipe" : "ignore", "pipe", "pipe"],
|
||||
});
|
||||
void resolveSpawnTarget(command, args, opts.cwd, mergedEnv)
|
||||
.then((target) => {
|
||||
const child = spawn(target.command, target.args, {
|
||||
cwd: opts.cwd,
|
||||
env: mergedEnv,
|
||||
shell: false,
|
||||
stdio: [opts.stdin != null ? "pipe" : "ignore", "pipe", "pipe"],
|
||||
}) as ChildProcessWithEvents;
|
||||
|
||||
if (opts.stdin != null && child.stdin) {
|
||||
child.stdin.write(opts.stdin);
|
||||
child.stdin.end();
|
||||
}
|
||||
if (opts.stdin != null && child.stdin) {
|
||||
child.stdin.write(opts.stdin);
|
||||
child.stdin.end();
|
||||
}
|
||||
|
||||
runningProcesses.set(runId, { child, graceSec: opts.graceSec });
|
||||
runningProcesses.set(runId, { child, graceSec: opts.graceSec });
|
||||
|
||||
let timedOut = false;
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
let logChain: Promise<void> = Promise.resolve();
|
||||
let timedOut = false;
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
let logChain: Promise<void> = Promise.resolve();
|
||||
|
||||
const timeout =
|
||||
opts.timeoutSec > 0
|
||||
? setTimeout(() => {
|
||||
timedOut = true;
|
||||
child.kill("SIGTERM");
|
||||
setTimeout(() => {
|
||||
if (!child.killed) {
|
||||
child.kill("SIGKILL");
|
||||
}
|
||||
}, Math.max(1, opts.graceSec) * 1000);
|
||||
}, opts.timeoutSec * 1000)
|
||||
: null;
|
||||
const timeout =
|
||||
opts.timeoutSec > 0
|
||||
? setTimeout(() => {
|
||||
timedOut = true;
|
||||
child.kill("SIGTERM");
|
||||
setTimeout(() => {
|
||||
if (!child.killed) {
|
||||
child.kill("SIGKILL");
|
||||
}
|
||||
}, Math.max(1, opts.graceSec) * 1000);
|
||||
}, opts.timeoutSec * 1000)
|
||||
: null;
|
||||
|
||||
child.stdout?.on("data", (chunk) => {
|
||||
const text = String(chunk);
|
||||
stdout = appendWithCap(stdout, text);
|
||||
logChain = logChain
|
||||
.then(() => opts.onLog("stdout", text))
|
||||
.catch((err) => onLogError(err, runId, "failed to append stdout log chunk"));
|
||||
});
|
||||
|
||||
child.stderr?.on("data", (chunk) => {
|
||||
const text = String(chunk);
|
||||
stderr = appendWithCap(stderr, text);
|
||||
logChain = logChain
|
||||
.then(() => opts.onLog("stderr", text))
|
||||
.catch((err) => onLogError(err, runId, "failed to append stderr log chunk"));
|
||||
});
|
||||
|
||||
child.on("error", (err) => {
|
||||
if (timeout) clearTimeout(timeout);
|
||||
runningProcesses.delete(runId);
|
||||
const errno = (err as NodeJS.ErrnoException).code;
|
||||
const pathValue = mergedEnv.PATH ?? mergedEnv.Path ?? "";
|
||||
const msg =
|
||||
errno === "ENOENT"
|
||||
? `Failed to start command "${command}" in "${opts.cwd}". Verify adapter command, working directory, and PATH (${pathValue}).`
|
||||
: `Failed to start command "${command}" in "${opts.cwd}": ${err.message}`;
|
||||
reject(new Error(msg));
|
||||
});
|
||||
|
||||
child.on("close", (code, signal) => {
|
||||
if (timeout) clearTimeout(timeout);
|
||||
runningProcesses.delete(runId);
|
||||
void logChain.finally(() => {
|
||||
resolve({
|
||||
exitCode: code,
|
||||
signal,
|
||||
timedOut,
|
||||
stdout,
|
||||
stderr,
|
||||
child.stdout?.on("data", (chunk: unknown) => {
|
||||
const text = String(chunk);
|
||||
stdout = appendWithCap(stdout, text);
|
||||
logChain = logChain
|
||||
.then(() => opts.onLog("stdout", text))
|
||||
.catch((err) => onLogError(err, runId, "failed to append stdout log chunk"));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
child.stderr?.on("data", (chunk: unknown) => {
|
||||
const text = String(chunk);
|
||||
stderr = appendWithCap(stderr, text);
|
||||
logChain = logChain
|
||||
.then(() => opts.onLog("stderr", text))
|
||||
.catch((err) => onLogError(err, runId, "failed to append stderr log chunk"));
|
||||
});
|
||||
|
||||
child.on("error", (err: Error) => {
|
||||
if (timeout) clearTimeout(timeout);
|
||||
runningProcesses.delete(runId);
|
||||
const errno = (err as NodeJS.ErrnoException).code;
|
||||
const pathValue = mergedEnv.PATH ?? mergedEnv.Path ?? "";
|
||||
const msg =
|
||||
errno === "ENOENT"
|
||||
? `Failed to start command "${command}" in "${opts.cwd}". Verify adapter command, working directory, and PATH (${pathValue}).`
|
||||
: `Failed to start command "${command}" in "${opts.cwd}": ${err.message}`;
|
||||
reject(new Error(msg));
|
||||
});
|
||||
|
||||
child.on("close", (code: number | null, signal: NodeJS.Signals | null) => {
|
||||
if (timeout) clearTimeout(timeout);
|
||||
runningProcesses.delete(runId);
|
||||
void logChain.finally(() => {
|
||||
resolve({
|
||||
exitCode: code,
|
||||
signal,
|
||||
timedOut,
|
||||
stdout,
|
||||
stderr,
|
||||
});
|
||||
});
|
||||
});
|
||||
})
|
||||
.catch(reject);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -119,6 +119,27 @@ export interface AdapterEnvironmentTestContext {
|
||||
};
|
||||
}
|
||||
|
||||
/** Payload for the onHireApproved adapter lifecycle hook (e.g. join-request or hire_agent approval). */
|
||||
export interface HireApprovedPayload {
|
||||
companyId: string;
|
||||
agentId: string;
|
||||
agentName: string;
|
||||
adapterType: string;
|
||||
/** "join_request" | "approval" */
|
||||
source: "join_request" | "approval";
|
||||
sourceId: string;
|
||||
approvedAt: string;
|
||||
/** Canonical operator-facing message for cloud adapters to show the user. */
|
||||
message: string;
|
||||
}
|
||||
|
||||
/** Result of onHireApproved hook; failures are non-fatal to the approval flow. */
|
||||
export interface HireApprovedHookResult {
|
||||
ok: boolean;
|
||||
error?: string;
|
||||
detail?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface ServerAdapterModule {
|
||||
type: string;
|
||||
execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult>;
|
||||
@@ -128,6 +149,14 @@ export interface ServerAdapterModule {
|
||||
models?: AdapterModel[];
|
||||
listModels?: () => Promise<AdapterModel[]>;
|
||||
agentConfigurationDoc?: string;
|
||||
/**
|
||||
* Optional lifecycle hook when an agent is approved/hired (join-request or hire_agent approval).
|
||||
* adapterConfig is the agent's adapter config so the adapter can e.g. send a callback to a configured URL.
|
||||
*/
|
||||
onHireApproved?: (
|
||||
payload: HireApprovedPayload,
|
||||
adapterConfig: Record<string, unknown>,
|
||||
) => Promise<HireApprovedHookResult>;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -135,8 +164,8 @@ export interface ServerAdapterModule {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type TranscriptEntry =
|
||||
| { kind: "assistant"; ts: string; text: string }
|
||||
| { kind: "thinking"; ts: string; text: string }
|
||||
| { kind: "assistant"; ts: string; text: string; delta?: boolean }
|
||||
| { kind: "thinking"; ts: string; text: string; delta?: boolean }
|
||||
| { kind: "user"; ts: string; text: string }
|
||||
| { kind: "tool_call"; ts: string; name: string; input: unknown }
|
||||
| { kind: "tool_result"; ts: string; toolUseId: string; content: string; isError: boolean }
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"extends": "../../tsconfig.json",
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# @paperclipai/adapter-claude-local
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.7
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-claude-local",
|
||||
"version": "0.2.6",
|
||||
"version": "0.2.7",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
@@ -45,6 +45,7 @@
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ export const label = "Claude Code (local)";
|
||||
|
||||
export const models = [
|
||||
{ id: "claude-opus-4-6", label: "Claude Opus 4.6" },
|
||||
{ id: "claude-sonnet-4-6", label: "Claude Sonnet 4.6" },
|
||||
{ id: "claude-haiku-4-6", label: "Claude Haiku 4.6" },
|
||||
{ id: "claude-sonnet-4-5-20250929", label: "Claude Sonnet 4.5" },
|
||||
{ id: "claude-haiku-4-5-20251001", label: "Claude Haiku 4.5" },
|
||||
];
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
# @paperclipai/adapter-codex-local
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.7
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-codex-local",
|
||||
"version": "0.2.6",
|
||||
"version": "0.2.7",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
@@ -45,6 +45,7 @@
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ export const DEFAULT_CODEX_LOCAL_MODEL = "gpt-5.3-codex";
|
||||
export const DEFAULT_CODEX_LOCAL_BYPASS_APPROVALS_AND_SANDBOX = true;
|
||||
|
||||
export const models = [
|
||||
{ id: "gpt-5.4", label: "gpt-5.4" },
|
||||
{ id: DEFAULT_CODEX_LOCAL_MODEL, label: DEFAULT_CODEX_LOCAL_MODEL },
|
||||
{ id: "gpt-5.3-codex-spark", label: "gpt-5.3-codex-spark" },
|
||||
{ id: "gpt-5", label: "gpt-5" },
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
|
||||
7
packages/adapters/cursor-local/CHANGELOG.md
Normal file
7
packages/adapters/cursor-local/CHANGELOG.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# @paperclipai/adapter-cursor-local
|
||||
|
||||
## 0.2.7
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Added initial `cursor` adapter package for local Cursor CLI execution
|
||||
51
packages/adapters/cursor-local/package.json
Normal file
51
packages/adapters/cursor-local/package.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-cursor-local",
|
||||
"version": "0.2.7",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
"./server": "./src/server/index.ts",
|
||||
"./ui": "./src/ui/index.ts",
|
||||
"./cli": "./src/cli/index.ts"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
},
|
||||
"./server": {
|
||||
"types": "./dist/server/index.d.ts",
|
||||
"import": "./dist/server/index.js"
|
||||
},
|
||||
"./ui": {
|
||||
"types": "./dist/ui/index.d.ts",
|
||||
"import": "./dist/ui/index.js"
|
||||
},
|
||||
"./cli": {
|
||||
"types": "./dist/cli/index.d.ts",
|
||||
"import": "./dist/cli/index.js"
|
||||
}
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"skills"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"picocolors": "^1.1.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
317
packages/adapters/cursor-local/src/cli/format-event.ts
Normal file
317
packages/adapters/cursor-local/src/cli/format-event.ts
Normal file
@@ -0,0 +1,317 @@
|
||||
import pc from "picocolors";
|
||||
import { normalizeCursorStreamLine } from "../shared/stream.js";
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function asString(value: unknown, fallback = ""): string {
|
||||
return typeof value === "string" ? value : fallback;
|
||||
}
|
||||
|
||||
function asNumber(value: unknown, fallback = 0): number {
|
||||
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
|
||||
}
|
||||
|
||||
function stringifyUnknown(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
if (value === null || value === undefined) return "";
|
||||
try {
|
||||
return JSON.stringify(value, null, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
function printUserMessage(messageRaw: unknown): void {
|
||||
if (typeof messageRaw === "string") {
|
||||
const text = messageRaw.trim();
|
||||
if (text) console.log(pc.gray(`user: ${text}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const message = asRecord(messageRaw);
|
||||
if (!message) return;
|
||||
|
||||
const directText = asString(message.text).trim();
|
||||
if (directText) console.log(pc.gray(`user: ${directText}`));
|
||||
|
||||
const content = Array.isArray(message.content) ? message.content : [];
|
||||
for (const partRaw of content) {
|
||||
const part = asRecord(partRaw);
|
||||
if (!part) continue;
|
||||
const type = asString(part.type).trim();
|
||||
if (type !== "output_text" && type !== "text") continue;
|
||||
const text = asString(part.text).trim();
|
||||
if (text) console.log(pc.gray(`user: ${text}`));
|
||||
}
|
||||
}
|
||||
|
||||
function printAssistantMessage(messageRaw: unknown): void {
|
||||
if (typeof messageRaw === "string") {
|
||||
const text = messageRaw.trim();
|
||||
if (text) console.log(pc.green(`assistant: ${text}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const message = asRecord(messageRaw);
|
||||
if (!message) return;
|
||||
|
||||
const directText = asString(message.text).trim();
|
||||
if (directText) console.log(pc.green(`assistant: ${directText}`));
|
||||
|
||||
const content = Array.isArray(message.content) ? message.content : [];
|
||||
for (const partRaw of content) {
|
||||
const part = asRecord(partRaw);
|
||||
if (!part) continue;
|
||||
const type = asString(part.type).trim();
|
||||
|
||||
if (type === "output_text" || type === "text") {
|
||||
const text = asString(part.text).trim();
|
||||
if (text) console.log(pc.green(`assistant: ${text}`));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "thinking") {
|
||||
const text = asString(part.text).trim();
|
||||
if (text) console.log(pc.gray(`thinking: ${text}`));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "tool_call") {
|
||||
const name = asString(part.name, asString(part.tool, "tool"));
|
||||
console.log(pc.yellow(`tool_call: ${name}`));
|
||||
const input = part.input ?? part.arguments ?? part.args;
|
||||
if (input !== undefined) {
|
||||
try {
|
||||
console.log(pc.gray(JSON.stringify(input, null, 2)));
|
||||
} catch {
|
||||
console.log(pc.gray(String(input)));
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "tool_result") {
|
||||
const isError = part.is_error === true || asString(part.status).toLowerCase() === "error";
|
||||
const contentText =
|
||||
asString(part.output) ||
|
||||
asString(part.text) ||
|
||||
asString(part.result) ||
|
||||
stringifyUnknown(part.output ?? part.result ?? part.text ?? part);
|
||||
console.log((isError ? pc.red : pc.cyan)(`tool_result${isError ? " (error)" : ""}`));
|
||||
if (contentText) console.log((isError ? pc.red : pc.gray)(contentText));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function printToolCallEventTopLevel(parsed: Record<string, unknown>): void {
|
||||
const subtype = asString(parsed.subtype).trim().toLowerCase();
|
||||
const callId = asString(parsed.call_id, asString(parsed.callId, asString(parsed.id, "")));
|
||||
const toolCall = asRecord(parsed.tool_call ?? parsed.toolCall);
|
||||
if (!toolCall) {
|
||||
console.log(pc.yellow(`tool_call${subtype ? `: ${subtype}` : ""}`));
|
||||
return;
|
||||
}
|
||||
|
||||
const [toolName] = Object.keys(toolCall);
|
||||
if (!toolName) {
|
||||
console.log(pc.yellow(`tool_call${subtype ? `: ${subtype}` : ""}`));
|
||||
return;
|
||||
}
|
||||
const payload = asRecord(toolCall[toolName]) ?? {};
|
||||
const args = payload.args ?? asRecord(payload.function)?.arguments;
|
||||
const result =
|
||||
payload.result ??
|
||||
payload.output ??
|
||||
payload.error ??
|
||||
asRecord(payload.function)?.result ??
|
||||
asRecord(payload.function)?.output;
|
||||
const isError =
|
||||
parsed.is_error === true ||
|
||||
payload.is_error === true ||
|
||||
subtype === "failed" ||
|
||||
subtype === "error" ||
|
||||
subtype === "cancelled" ||
|
||||
payload.error !== undefined;
|
||||
|
||||
if (subtype === "started" || subtype === "start") {
|
||||
console.log(pc.yellow(`tool_call: ${toolName}${callId ? ` (${callId})` : ""}`));
|
||||
if (args !== undefined) {
|
||||
console.log(pc.gray(stringifyUnknown(args)));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (subtype === "completed" || subtype === "complete" || subtype === "finished") {
|
||||
const header = `tool_result${isError ? " (error)" : ""}${callId ? ` (${callId})` : ""}`;
|
||||
console.log((isError ? pc.red : pc.cyan)(header));
|
||||
if (result !== undefined) {
|
||||
console.log((isError ? pc.red : pc.gray)(stringifyUnknown(result)));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(pc.yellow(`tool_call: ${toolName}${subtype ? ` (${subtype})` : ""}`));
|
||||
}
|
||||
|
||||
function printLegacyToolEvent(part: Record<string, unknown>): void {
|
||||
const tool = asString(part.tool, "tool");
|
||||
const callId = asString(part.callID, asString(part.id, ""));
|
||||
const state = asRecord(part.state);
|
||||
const status = asString(state?.status);
|
||||
const input = state?.input;
|
||||
const output = asString(state?.output).replace(/\s+$/, "");
|
||||
const metadata = asRecord(state?.metadata);
|
||||
const exit = asNumber(metadata?.exit, NaN);
|
||||
const isError =
|
||||
status === "failed" ||
|
||||
status === "error" ||
|
||||
status === "cancelled" ||
|
||||
(Number.isFinite(exit) && exit !== 0);
|
||||
|
||||
console.log(pc.yellow(`tool_call: ${tool}${callId ? ` (${callId})` : ""}`));
|
||||
if (input !== undefined) {
|
||||
try {
|
||||
console.log(pc.gray(JSON.stringify(input, null, 2)));
|
||||
} catch {
|
||||
console.log(pc.gray(String(input)));
|
||||
}
|
||||
}
|
||||
|
||||
if (status || output) {
|
||||
const summary = [
|
||||
"tool_result",
|
||||
status ? `status=${status}` : "",
|
||||
Number.isFinite(exit) ? `exit=${exit}` : "",
|
||||
]
|
||||
.filter(Boolean)
|
||||
.join(" ");
|
||||
console.log((isError ? pc.red : pc.cyan)(summary));
|
||||
if (output) {
|
||||
console.log((isError ? pc.red : pc.gray)(output));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function printCursorStreamEvent(raw: string, _debug: boolean): void {
|
||||
const line = normalizeCursorStreamLine(raw).line;
|
||||
if (!line) return;
|
||||
|
||||
let parsed: Record<string, unknown> | null = null;
|
||||
try {
|
||||
parsed = JSON.parse(line) as Record<string, unknown>;
|
||||
} catch {
|
||||
console.log(line);
|
||||
return;
|
||||
}
|
||||
|
||||
const type = asString(parsed.type);
|
||||
|
||||
if (type === "system") {
|
||||
const subtype = asString(parsed.subtype);
|
||||
if (subtype === "init") {
|
||||
const sessionId =
|
||||
asString(parsed.session_id) ||
|
||||
asString(parsed.sessionId) ||
|
||||
asString(parsed.sessionID);
|
||||
const model = asString(parsed.model);
|
||||
const details = [sessionId ? `session: ${sessionId}` : "", model ? `model: ${model}` : ""]
|
||||
.filter(Boolean)
|
||||
.join(", ");
|
||||
console.log(pc.blue(`Cursor init${details ? ` (${details})` : ""}`));
|
||||
return;
|
||||
}
|
||||
console.log(pc.blue(`system: ${subtype || "event"}`));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "assistant") {
|
||||
printAssistantMessage(parsed.message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "user") {
|
||||
printUserMessage(parsed.message);
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "thinking") {
|
||||
const text = asString(parsed.text).trim() || asString(asRecord(parsed.delta)?.text).trim();
|
||||
if (text) console.log(pc.gray(`thinking: ${text}`));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "tool_call") {
|
||||
printToolCallEventTopLevel(parsed);
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "result") {
|
||||
const usage = asRecord(parsed.usage);
|
||||
const input = asNumber(usage?.input_tokens, asNumber(usage?.inputTokens));
|
||||
const output = asNumber(usage?.output_tokens, asNumber(usage?.outputTokens));
|
||||
const cached = asNumber(
|
||||
usage?.cached_input_tokens,
|
||||
asNumber(usage?.cachedInputTokens, asNumber(usage?.cache_read_input_tokens)),
|
||||
);
|
||||
const cost = asNumber(parsed.total_cost_usd, asNumber(parsed.cost_usd, asNumber(parsed.cost)));
|
||||
const subtype = asString(parsed.subtype, "result");
|
||||
const isError = parsed.is_error === true || subtype === "error" || subtype === "failed";
|
||||
|
||||
console.log(pc.blue(`result: subtype=${subtype}`));
|
||||
console.log(pc.blue(`tokens: in=${input} out=${output} cached=${cached} cost=$${cost.toFixed(6)}`));
|
||||
const resultText = asString(parsed.result).trim();
|
||||
if (resultText) console.log((isError ? pc.red : pc.green)(`assistant: ${resultText}`));
|
||||
const errors = Array.isArray(parsed.errors) ? parsed.errors.map((value) => stringifyUnknown(value)).filter(Boolean) : [];
|
||||
if (errors.length > 0) console.log(pc.red(`errors: ${errors.join(" | ")}`));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "error") {
|
||||
const message = asString(parsed.message) || stringifyUnknown(parsed.error ?? parsed.detail) || line;
|
||||
console.log(pc.red(`error: ${message}`));
|
||||
return;
|
||||
}
|
||||
|
||||
// Compatibility with older stream-json event shapes.
|
||||
if (type === "step_start") {
|
||||
const sessionId = asString(parsed.sessionID);
|
||||
console.log(pc.blue(`step started${sessionId ? ` (session: ${sessionId})` : ""}`));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "text") {
|
||||
const part = asRecord(parsed.part);
|
||||
const text = asString(part?.text);
|
||||
if (text) console.log(pc.green(`assistant: ${text}`));
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "tool_use") {
|
||||
const part = asRecord(parsed.part);
|
||||
if (part) {
|
||||
printLegacyToolEvent(part);
|
||||
} else {
|
||||
console.log(pc.yellow("tool_use"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (type === "step_finish") {
|
||||
const part = asRecord(parsed.part);
|
||||
const tokens = asRecord(part?.tokens);
|
||||
const cache = asRecord(tokens?.cache);
|
||||
const reason = asString(part?.reason, "step_finish");
|
||||
const input = asNumber(tokens?.input);
|
||||
const output = asNumber(tokens?.output);
|
||||
const cached = asNumber(cache?.read);
|
||||
const cost = asNumber(part?.cost);
|
||||
console.log(pc.blue(`step finished: reason=${reason}`));
|
||||
console.log(pc.blue(`tokens: in=${input} out=${output} cached=${cached} cost=$${cost.toFixed(6)}`));
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(line);
|
||||
}
|
||||
1
packages/adapters/cursor-local/src/cli/index.ts
Normal file
1
packages/adapters/cursor-local/src/cli/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { printCursorStreamEvent } from "./format-event.js";
|
||||
83
packages/adapters/cursor-local/src/index.ts
Normal file
83
packages/adapters/cursor-local/src/index.ts
Normal file
@@ -0,0 +1,83 @@
|
||||
export const type = "cursor";
|
||||
export const label = "Cursor CLI (local)";
|
||||
export const DEFAULT_CURSOR_LOCAL_MODEL = "auto";
|
||||
|
||||
const CURSOR_FALLBACK_MODEL_IDS = [
|
||||
"auto",
|
||||
"composer-1.5",
|
||||
"composer-1",
|
||||
"gpt-5.3-codex-low",
|
||||
"gpt-5.3-codex-low-fast",
|
||||
"gpt-5.3-codex",
|
||||
"gpt-5.3-codex-fast",
|
||||
"gpt-5.3-codex-high",
|
||||
"gpt-5.3-codex-high-fast",
|
||||
"gpt-5.3-codex-xhigh",
|
||||
"gpt-5.3-codex-xhigh-fast",
|
||||
"gpt-5.3-codex-spark-preview",
|
||||
"gpt-5.2",
|
||||
"gpt-5.2-codex-low",
|
||||
"gpt-5.2-codex-low-fast",
|
||||
"gpt-5.2-codex",
|
||||
"gpt-5.2-codex-fast",
|
||||
"gpt-5.2-codex-high",
|
||||
"gpt-5.2-codex-high-fast",
|
||||
"gpt-5.2-codex-xhigh",
|
||||
"gpt-5.2-codex-xhigh-fast",
|
||||
"gpt-5.1-codex-max",
|
||||
"gpt-5.1-codex-max-high",
|
||||
"gpt-5.2-high",
|
||||
"gpt-5.1-high",
|
||||
"gpt-5.1-codex-mini",
|
||||
"opus-4.6-thinking",
|
||||
"opus-4.6",
|
||||
"opus-4.5",
|
||||
"opus-4.5-thinking",
|
||||
"sonnet-4.6",
|
||||
"sonnet-4.6-thinking",
|
||||
"sonnet-4.5",
|
||||
"sonnet-4.5-thinking",
|
||||
"gemini-3.1-pro",
|
||||
"gemini-3-pro",
|
||||
"gemini-3-flash",
|
||||
"grok",
|
||||
"kimi-k2.5",
|
||||
];
|
||||
|
||||
export const models = CURSOR_FALLBACK_MODEL_IDS.map((id) => ({ id, label: id }));
|
||||
|
||||
export const agentConfigurationDoc = `# cursor agent configuration
|
||||
|
||||
Adapter: cursor
|
||||
|
||||
Use when:
|
||||
- You want Paperclip to run Cursor Agent CLI locally as the agent runtime
|
||||
- You want Cursor chat session resume across heartbeats via --resume
|
||||
- You want structured stream output in run logs via --output-format stream-json
|
||||
|
||||
Don't use when:
|
||||
- You need webhook-style external invocation (use openclaw_gateway or http)
|
||||
- You only need one-shot shell commands (use process)
|
||||
- Cursor Agent CLI is not installed on the machine
|
||||
|
||||
Core fields:
|
||||
- cwd (string, optional): default absolute working directory fallback for the agent process (created if missing when possible)
|
||||
- instructionsFilePath (string, optional): absolute path to a markdown instructions file prepended to the run prompt
|
||||
- promptTemplate (string, optional): run prompt template
|
||||
- model (string, optional): Cursor model id (for example auto or gpt-5.3-codex)
|
||||
- mode (string, optional): Cursor execution mode passed as --mode (plan|ask). Leave unset for normal autonomous runs.
|
||||
- command (string, optional): defaults to "agent"
|
||||
- extraArgs (string[], optional): additional CLI args
|
||||
- env (object, optional): KEY=VALUE environment variables
|
||||
|
||||
Operational fields:
|
||||
- timeoutSec (number, optional): run timeout in seconds
|
||||
- graceSec (number, optional): SIGTERM grace period in seconds
|
||||
|
||||
Notes:
|
||||
- Runs are executed with: agent -p --output-format stream-json ...
|
||||
- Prompts are piped to Cursor via stdin.
|
||||
- Sessions are resumed with --resume when stored session cwd matches current cwd.
|
||||
- Paperclip auto-injects local skills into "~/.cursor/skills" when missing, so Cursor can discover "$paperclip" and related skills on local runs.
|
||||
- Paperclip auto-adds --yolo unless one of --trust/--yolo/-f is already present in extraArgs.
|
||||
`;
|
||||
485
packages/adapters/cursor-local/src/server/execute.ts
Normal file
485
packages/adapters/cursor-local/src/server/execute.ts
Normal file
@@ -0,0 +1,485 @@
|
||||
import fs from "node:fs/promises";
|
||||
import type { Dirent } from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import type { AdapterExecutionContext, AdapterExecutionResult } from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
asString,
|
||||
asNumber,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
buildPaperclipEnv,
|
||||
redactEnvForLogs,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
renderTemplate,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import { DEFAULT_CURSOR_LOCAL_MODEL } from "../index.js";
|
||||
import { parseCursorJsonl, isCursorUnknownSessionError } from "./parse.js";
|
||||
import { normalizeCursorStreamLine } from "../shared/stream.js";
|
||||
import { hasCursorTrustBypassArg } from "../shared/trust.js";
|
||||
|
||||
const __moduleDir = path.dirname(fileURLToPath(import.meta.url));
|
||||
const PAPERCLIP_SKILLS_CANDIDATES = [
|
||||
path.resolve(__moduleDir, "../../skills"),
|
||||
path.resolve(__moduleDir, "../../../../../skills"),
|
||||
];
|
||||
|
||||
function firstNonEmptyLine(text: string): string {
|
||||
return (
|
||||
text
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.find(Boolean) ?? ""
|
||||
);
|
||||
}
|
||||
|
||||
function hasNonEmptyEnvValue(env: Record<string, string>, key: string): boolean {
|
||||
const raw = env[key];
|
||||
return typeof raw === "string" && raw.trim().length > 0;
|
||||
}
|
||||
|
||||
function resolveCursorBillingType(env: Record<string, string>): "api" | "subscription" {
|
||||
return hasNonEmptyEnvValue(env, "CURSOR_API_KEY") || hasNonEmptyEnvValue(env, "OPENAI_API_KEY")
|
||||
? "api"
|
||||
: "subscription";
|
||||
}
|
||||
|
||||
function resolveProviderFromModel(model: string): string | null {
|
||||
const trimmed = model.trim().toLowerCase();
|
||||
if (!trimmed) return null;
|
||||
const slash = trimmed.indexOf("/");
|
||||
if (slash > 0) return trimmed.slice(0, slash);
|
||||
if (trimmed.includes("sonnet") || trimmed.includes("claude")) return "anthropic";
|
||||
if (trimmed.startsWith("gpt") || trimmed.startsWith("o")) return "openai";
|
||||
return null;
|
||||
}
|
||||
|
||||
function normalizeMode(rawMode: string): "plan" | "ask" | null {
|
||||
const mode = rawMode.trim().toLowerCase();
|
||||
if (mode === "plan" || mode === "ask") return mode;
|
||||
return null;
|
||||
}
|
||||
|
||||
function renderPaperclipEnvNote(env: Record<string, string>): string {
|
||||
const paperclipKeys = Object.keys(env)
|
||||
.filter((key) => key.startsWith("PAPERCLIP_"))
|
||||
.sort();
|
||||
if (paperclipKeys.length === 0) return "";
|
||||
return [
|
||||
"Paperclip runtime note:",
|
||||
`The following PAPERCLIP_* environment variables are available in this run: ${paperclipKeys.join(", ")}`,
|
||||
"Do not assume these variables are missing without checking your shell environment.",
|
||||
"",
|
||||
"",
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
function cursorSkillsHome(): string {
|
||||
return path.join(os.homedir(), ".cursor", "skills");
|
||||
}
|
||||
|
||||
async function resolvePaperclipSkillsDir(): Promise<string | null> {
|
||||
for (const candidate of PAPERCLIP_SKILLS_CANDIDATES) {
|
||||
const isDir = await fs.stat(candidate).then((s) => s.isDirectory()).catch(() => false);
|
||||
if (isDir) return candidate;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
type EnsureCursorSkillsInjectedOptions = {
|
||||
skillsDir?: string | null;
|
||||
skillsHome?: string;
|
||||
linkSkill?: (source: string, target: string) => Promise<void>;
|
||||
};
|
||||
|
||||
export async function ensureCursorSkillsInjected(
|
||||
onLog: AdapterExecutionContext["onLog"],
|
||||
options: EnsureCursorSkillsInjectedOptions = {},
|
||||
) {
|
||||
const skillsDir = options.skillsDir ?? await resolvePaperclipSkillsDir();
|
||||
if (!skillsDir) return;
|
||||
|
||||
const skillsHome = options.skillsHome ?? cursorSkillsHome();
|
||||
try {
|
||||
await fs.mkdir(skillsHome, { recursive: true });
|
||||
} catch (err) {
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Failed to prepare Cursor skills directory ${skillsHome}: ${err instanceof Error ? err.message : String(err)}\n`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let entries: Dirent[];
|
||||
try {
|
||||
entries = await fs.readdir(skillsDir, { withFileTypes: true });
|
||||
} catch (err) {
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Failed to read Paperclip skills from ${skillsDir}: ${err instanceof Error ? err.message : String(err)}\n`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const linkSkill = options.linkSkill ?? ((source: string, target: string) => fs.symlink(source, target));
|
||||
for (const entry of entries) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
const source = path.join(skillsDir, entry.name);
|
||||
const target = path.join(skillsHome, entry.name);
|
||||
const existing = await fs.lstat(target).catch(() => null);
|
||||
if (existing) continue;
|
||||
|
||||
try {
|
||||
await linkSkill(source, target);
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Injected Cursor skill "${entry.name}" into ${skillsHome}\n`,
|
||||
);
|
||||
} catch (err) {
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Failed to inject Cursor skill "${entry.name}" into ${skillsHome}: ${err instanceof Error ? err.message : String(err)}\n`,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export async function execute(ctx: AdapterExecutionContext): Promise<AdapterExecutionResult> {
|
||||
const { runId, agent, runtime, config, context, onLog, onMeta, authToken } = ctx;
|
||||
|
||||
const promptTemplate = asString(
|
||||
config.promptTemplate,
|
||||
"You are agent {{agent.id}} ({{agent.name}}). Continue your Paperclip work.",
|
||||
);
|
||||
const command = asString(config.command, "agent");
|
||||
const model = asString(config.model, DEFAULT_CURSOR_LOCAL_MODEL).trim();
|
||||
const mode = normalizeMode(asString(config.mode, ""));
|
||||
|
||||
const workspaceContext = parseObject(context.paperclipWorkspace);
|
||||
const workspaceCwd = asString(workspaceContext.cwd, "");
|
||||
const workspaceSource = asString(workspaceContext.source, "");
|
||||
const workspaceId = asString(workspaceContext.workspaceId, "");
|
||||
const workspaceRepoUrl = asString(workspaceContext.repoUrl, "");
|
||||
const workspaceRepoRef = asString(workspaceContext.repoRef, "");
|
||||
const workspaceHints = Array.isArray(context.paperclipWorkspaces)
|
||||
? context.paperclipWorkspaces.filter(
|
||||
(value): value is Record<string, unknown> => typeof value === "object" && value !== null,
|
||||
)
|
||||
: [];
|
||||
const configuredCwd = asString(config.cwd, "");
|
||||
const useConfiguredInsteadOfAgentHome = workspaceSource === "agent_home" && configuredCwd.length > 0;
|
||||
const effectiveWorkspaceCwd = useConfiguredInsteadOfAgentHome ? "" : workspaceCwd;
|
||||
const cwd = effectiveWorkspaceCwd || configuredCwd || process.cwd();
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
await ensureCursorSkillsInjected(onLog);
|
||||
|
||||
const envConfig = parseObject(config.env);
|
||||
const hasExplicitApiKey =
|
||||
typeof envConfig.PAPERCLIP_API_KEY === "string" && envConfig.PAPERCLIP_API_KEY.trim().length > 0;
|
||||
const env: Record<string, string> = { ...buildPaperclipEnv(agent) };
|
||||
env.PAPERCLIP_RUN_ID = runId;
|
||||
const wakeTaskId =
|
||||
(typeof context.taskId === "string" && context.taskId.trim().length > 0 && context.taskId.trim()) ||
|
||||
(typeof context.issueId === "string" && context.issueId.trim().length > 0 && context.issueId.trim()) ||
|
||||
null;
|
||||
const wakeReason =
|
||||
typeof context.wakeReason === "string" && context.wakeReason.trim().length > 0
|
||||
? context.wakeReason.trim()
|
||||
: null;
|
||||
const wakeCommentId =
|
||||
(typeof context.wakeCommentId === "string" && context.wakeCommentId.trim().length > 0 && context.wakeCommentId.trim()) ||
|
||||
(typeof context.commentId === "string" && context.commentId.trim().length > 0 && context.commentId.trim()) ||
|
||||
null;
|
||||
const approvalId =
|
||||
typeof context.approvalId === "string" && context.approvalId.trim().length > 0
|
||||
? context.approvalId.trim()
|
||||
: null;
|
||||
const approvalStatus =
|
||||
typeof context.approvalStatus === "string" && context.approvalStatus.trim().length > 0
|
||||
? context.approvalStatus.trim()
|
||||
: null;
|
||||
const linkedIssueIds = Array.isArray(context.issueIds)
|
||||
? context.issueIds.filter((value): value is string => typeof value === "string" && value.trim().length > 0)
|
||||
: [];
|
||||
if (wakeTaskId) {
|
||||
env.PAPERCLIP_TASK_ID = wakeTaskId;
|
||||
}
|
||||
if (wakeReason) {
|
||||
env.PAPERCLIP_WAKE_REASON = wakeReason;
|
||||
}
|
||||
if (wakeCommentId) {
|
||||
env.PAPERCLIP_WAKE_COMMENT_ID = wakeCommentId;
|
||||
}
|
||||
if (approvalId) {
|
||||
env.PAPERCLIP_APPROVAL_ID = approvalId;
|
||||
}
|
||||
if (approvalStatus) {
|
||||
env.PAPERCLIP_APPROVAL_STATUS = approvalStatus;
|
||||
}
|
||||
if (linkedIssueIds.length > 0) {
|
||||
env.PAPERCLIP_LINKED_ISSUE_IDS = linkedIssueIds.join(",");
|
||||
}
|
||||
if (effectiveWorkspaceCwd) {
|
||||
env.PAPERCLIP_WORKSPACE_CWD = effectiveWorkspaceCwd;
|
||||
}
|
||||
if (workspaceSource) {
|
||||
env.PAPERCLIP_WORKSPACE_SOURCE = workspaceSource;
|
||||
}
|
||||
if (workspaceId) {
|
||||
env.PAPERCLIP_WORKSPACE_ID = workspaceId;
|
||||
}
|
||||
if (workspaceRepoUrl) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_URL = workspaceRepoUrl;
|
||||
}
|
||||
if (workspaceRepoRef) {
|
||||
env.PAPERCLIP_WORKSPACE_REPO_REF = workspaceRepoRef;
|
||||
}
|
||||
if (workspaceHints.length > 0) {
|
||||
env.PAPERCLIP_WORKSPACES_JSON = JSON.stringify(workspaceHints);
|
||||
}
|
||||
for (const [k, v] of Object.entries(envConfig)) {
|
||||
if (typeof v === "string") env[k] = v;
|
||||
}
|
||||
if (!hasExplicitApiKey && authToken) {
|
||||
env.PAPERCLIP_API_KEY = authToken;
|
||||
}
|
||||
const billingType = resolveCursorBillingType(env);
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
|
||||
const timeoutSec = asNumber(config.timeoutSec, 0);
|
||||
const graceSec = asNumber(config.graceSec, 20);
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
return asStringArray(config.args);
|
||||
})();
|
||||
const autoTrustEnabled = !hasCursorTrustBypassArg(extraArgs);
|
||||
|
||||
const runtimeSessionParams = parseObject(runtime.sessionParams);
|
||||
const runtimeSessionId = asString(runtimeSessionParams.sessionId, runtime.sessionId ?? "");
|
||||
const runtimeSessionCwd = asString(runtimeSessionParams.cwd, "");
|
||||
const canResumeSession =
|
||||
runtimeSessionId.length > 0 &&
|
||||
(runtimeSessionCwd.length === 0 || path.resolve(runtimeSessionCwd) === path.resolve(cwd));
|
||||
const sessionId = canResumeSession ? runtimeSessionId : null;
|
||||
if (runtimeSessionId && !canResumeSession) {
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Cursor session "${runtimeSessionId}" was saved for cwd "${runtimeSessionCwd}" and will not be resumed in "${cwd}".\n`,
|
||||
);
|
||||
}
|
||||
|
||||
const instructionsFilePath = asString(config.instructionsFilePath, "").trim();
|
||||
const instructionsDir = instructionsFilePath ? `${path.dirname(instructionsFilePath)}/` : "";
|
||||
let instructionsPrefix = "";
|
||||
if (instructionsFilePath) {
|
||||
try {
|
||||
const instructionsContents = await fs.readFile(instructionsFilePath, "utf8");
|
||||
instructionsPrefix =
|
||||
`${instructionsContents}\n\n` +
|
||||
`The above agent instructions were loaded from ${instructionsFilePath}. ` +
|
||||
`Resolve any relative file references from ${instructionsDir}.\n\n`;
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Loaded agent instructions file: ${instructionsFilePath}\n`,
|
||||
);
|
||||
} catch (err) {
|
||||
const reason = err instanceof Error ? err.message : String(err);
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Warning: could not read agent instructions file "${instructionsFilePath}": ${reason}\n`,
|
||||
);
|
||||
}
|
||||
}
|
||||
const commandNotes = (() => {
|
||||
const notes: string[] = [];
|
||||
if (autoTrustEnabled) {
|
||||
notes.push("Auto-added --yolo to bypass interactive prompts.");
|
||||
}
|
||||
notes.push("Prompt is piped to Cursor via stdin.");
|
||||
if (!instructionsFilePath) return notes;
|
||||
if (instructionsPrefix.length > 0) {
|
||||
notes.push(
|
||||
`Loaded agent instructions from ${instructionsFilePath}`,
|
||||
`Prepended instructions + path directive to prompt (relative references from ${instructionsDir}).`,
|
||||
);
|
||||
return notes;
|
||||
}
|
||||
notes.push(
|
||||
`Configured instructionsFilePath ${instructionsFilePath}, but file could not be read; continuing without injected instructions.`,
|
||||
);
|
||||
return notes;
|
||||
})();
|
||||
|
||||
const renderedPrompt = renderTemplate(promptTemplate, {
|
||||
agentId: agent.id,
|
||||
companyId: agent.companyId,
|
||||
runId,
|
||||
company: { id: agent.companyId },
|
||||
agent,
|
||||
run: { id: runId, source: "on_demand" },
|
||||
context,
|
||||
});
|
||||
const paperclipEnvNote = renderPaperclipEnvNote(env);
|
||||
const prompt = `${instructionsPrefix}${paperclipEnvNote}${renderedPrompt}`;
|
||||
|
||||
const buildArgs = (resumeSessionId: string | null) => {
|
||||
const args = ["-p", "--output-format", "stream-json", "--workspace", cwd];
|
||||
if (resumeSessionId) args.push("--resume", resumeSessionId);
|
||||
if (model) args.push("--model", model);
|
||||
if (mode) args.push("--mode", mode);
|
||||
if (autoTrustEnabled) args.push("--yolo");
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
return args;
|
||||
};
|
||||
|
||||
const runAttempt = async (resumeSessionId: string | null) => {
|
||||
const args = buildArgs(resumeSessionId);
|
||||
if (onMeta) {
|
||||
await onMeta({
|
||||
adapterType: "cursor",
|
||||
command,
|
||||
cwd,
|
||||
commandNotes,
|
||||
commandArgs: args,
|
||||
env: redactEnvForLogs(env),
|
||||
prompt,
|
||||
context,
|
||||
});
|
||||
}
|
||||
|
||||
let stdoutLineBuffer = "";
|
||||
const emitNormalizedStdoutLine = async (rawLine: string) => {
|
||||
const normalized = normalizeCursorStreamLine(rawLine);
|
||||
if (!normalized.line) return;
|
||||
await onLog(normalized.stream ?? "stdout", `${normalized.line}\n`);
|
||||
};
|
||||
const flushStdoutChunk = async (chunk: string, finalize = false) => {
|
||||
const combined = `${stdoutLineBuffer}${chunk}`;
|
||||
const lines = combined.split(/\r?\n/);
|
||||
stdoutLineBuffer = lines.pop() ?? "";
|
||||
|
||||
for (const line of lines) {
|
||||
await emitNormalizedStdoutLine(line);
|
||||
}
|
||||
|
||||
if (finalize) {
|
||||
const trailing = stdoutLineBuffer.trim();
|
||||
stdoutLineBuffer = "";
|
||||
if (trailing) {
|
||||
await emitNormalizedStdoutLine(trailing);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const proc = await runChildProcess(runId, command, args, {
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec,
|
||||
graceSec,
|
||||
stdin: prompt,
|
||||
onLog: async (stream, chunk) => {
|
||||
if (stream !== "stdout") {
|
||||
await onLog(stream, chunk);
|
||||
return;
|
||||
}
|
||||
await flushStdoutChunk(chunk);
|
||||
},
|
||||
});
|
||||
await flushStdoutChunk("", true);
|
||||
|
||||
return {
|
||||
proc,
|
||||
parsed: parseCursorJsonl(proc.stdout),
|
||||
};
|
||||
};
|
||||
|
||||
const providerFromModel = resolveProviderFromModel(model);
|
||||
|
||||
const toResult = (
|
||||
attempt: {
|
||||
proc: {
|
||||
exitCode: number | null;
|
||||
signal: string | null;
|
||||
timedOut: boolean;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
};
|
||||
parsed: ReturnType<typeof parseCursorJsonl>;
|
||||
},
|
||||
clearSessionOnMissingSession = false,
|
||||
): AdapterExecutionResult => {
|
||||
if (attempt.proc.timedOut) {
|
||||
return {
|
||||
exitCode: attempt.proc.exitCode,
|
||||
signal: attempt.proc.signal,
|
||||
timedOut: true,
|
||||
errorMessage: `Timed out after ${timeoutSec}s`,
|
||||
clearSession: clearSessionOnMissingSession,
|
||||
};
|
||||
}
|
||||
|
||||
const resolvedSessionId = attempt.parsed.sessionId ?? runtimeSessionId ?? runtime.sessionId ?? null;
|
||||
const resolvedSessionParams = resolvedSessionId
|
||||
? ({
|
||||
sessionId: resolvedSessionId,
|
||||
cwd,
|
||||
...(workspaceId ? { workspaceId } : {}),
|
||||
...(workspaceRepoUrl ? { repoUrl: workspaceRepoUrl } : {}),
|
||||
...(workspaceRepoRef ? { repoRef: workspaceRepoRef } : {}),
|
||||
} as Record<string, unknown>)
|
||||
: null;
|
||||
const parsedError = typeof attempt.parsed.errorMessage === "string" ? attempt.parsed.errorMessage.trim() : "";
|
||||
const stderrLine = firstNonEmptyLine(attempt.proc.stderr);
|
||||
const fallbackErrorMessage =
|
||||
parsedError ||
|
||||
stderrLine ||
|
||||
`Cursor exited with code ${attempt.proc.exitCode ?? -1}`;
|
||||
|
||||
return {
|
||||
exitCode: attempt.proc.exitCode,
|
||||
signal: attempt.proc.signal,
|
||||
timedOut: false,
|
||||
errorMessage:
|
||||
(attempt.proc.exitCode ?? 0) === 0
|
||||
? null
|
||||
: fallbackErrorMessage,
|
||||
usage: attempt.parsed.usage,
|
||||
sessionId: resolvedSessionId,
|
||||
sessionParams: resolvedSessionParams,
|
||||
sessionDisplayId: resolvedSessionId,
|
||||
provider: providerFromModel,
|
||||
model,
|
||||
billingType,
|
||||
costUsd: attempt.parsed.costUsd,
|
||||
resultJson: {
|
||||
stdout: attempt.proc.stdout,
|
||||
stderr: attempt.proc.stderr,
|
||||
},
|
||||
summary: attempt.parsed.summary,
|
||||
clearSession: Boolean(clearSessionOnMissingSession && !resolvedSessionId),
|
||||
};
|
||||
};
|
||||
|
||||
const initial = await runAttempt(sessionId);
|
||||
if (
|
||||
sessionId &&
|
||||
!initial.proc.timedOut &&
|
||||
(initial.proc.exitCode ?? 0) !== 0 &&
|
||||
isCursorUnknownSessionError(initial.proc.stdout, initial.proc.stderr)
|
||||
) {
|
||||
await onLog(
|
||||
"stderr",
|
||||
`[paperclip] Cursor resume session "${sessionId}" is unavailable; retrying with a fresh session.\n`,
|
||||
);
|
||||
const retry = await runAttempt(null);
|
||||
return toResult(retry, true);
|
||||
}
|
||||
|
||||
return toResult(initial);
|
||||
}
|
||||
64
packages/adapters/cursor-local/src/server/index.ts
Normal file
64
packages/adapters/cursor-local/src/server/index.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
export { execute, ensureCursorSkillsInjected } from "./execute.js";
|
||||
export { testEnvironment } from "./test.js";
|
||||
export { parseCursorJsonl, isCursorUnknownSessionError } from "./parse.js";
|
||||
import type { AdapterSessionCodec } from "@paperclipai/adapter-utils";
|
||||
|
||||
function readNonEmptyString(value: unknown): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
export const sessionCodec: AdapterSessionCodec = {
|
||||
deserialize(raw: unknown) {
|
||||
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) return null;
|
||||
const record = raw as Record<string, unknown>;
|
||||
const sessionId =
|
||||
readNonEmptyString(record.sessionId) ??
|
||||
readNonEmptyString(record.session_id) ??
|
||||
readNonEmptyString(record.sessionID);
|
||||
if (!sessionId) return null;
|
||||
const cwd =
|
||||
readNonEmptyString(record.cwd) ??
|
||||
readNonEmptyString(record.workdir) ??
|
||||
readNonEmptyString(record.folder);
|
||||
const workspaceId = readNonEmptyString(record.workspaceId) ?? readNonEmptyString(record.workspace_id);
|
||||
const repoUrl = readNonEmptyString(record.repoUrl) ?? readNonEmptyString(record.repo_url);
|
||||
const repoRef = readNonEmptyString(record.repoRef) ?? readNonEmptyString(record.repo_ref);
|
||||
return {
|
||||
sessionId,
|
||||
...(cwd ? { cwd } : {}),
|
||||
...(workspaceId ? { workspaceId } : {}),
|
||||
...(repoUrl ? { repoUrl } : {}),
|
||||
...(repoRef ? { repoRef } : {}),
|
||||
};
|
||||
},
|
||||
serialize(params: Record<string, unknown> | null) {
|
||||
if (!params) return null;
|
||||
const sessionId =
|
||||
readNonEmptyString(params.sessionId) ??
|
||||
readNonEmptyString(params.session_id) ??
|
||||
readNonEmptyString(params.sessionID);
|
||||
if (!sessionId) return null;
|
||||
const cwd =
|
||||
readNonEmptyString(params.cwd) ??
|
||||
readNonEmptyString(params.workdir) ??
|
||||
readNonEmptyString(params.folder);
|
||||
const workspaceId = readNonEmptyString(params.workspaceId) ?? readNonEmptyString(params.workspace_id);
|
||||
const repoUrl = readNonEmptyString(params.repoUrl) ?? readNonEmptyString(params.repo_url);
|
||||
const repoRef = readNonEmptyString(params.repoRef) ?? readNonEmptyString(params.repo_ref);
|
||||
return {
|
||||
sessionId,
|
||||
...(cwd ? { cwd } : {}),
|
||||
...(workspaceId ? { workspaceId } : {}),
|
||||
...(repoUrl ? { repoUrl } : {}),
|
||||
...(repoRef ? { repoRef } : {}),
|
||||
};
|
||||
},
|
||||
getDisplayId(params: Record<string, unknown> | null) {
|
||||
if (!params) return null;
|
||||
return (
|
||||
readNonEmptyString(params.sessionId) ??
|
||||
readNonEmptyString(params.session_id) ??
|
||||
readNonEmptyString(params.sessionID)
|
||||
);
|
||||
},
|
||||
};
|
||||
162
packages/adapters/cursor-local/src/server/parse.ts
Normal file
162
packages/adapters/cursor-local/src/server/parse.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
import { asString, asNumber, parseObject, parseJson } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { normalizeCursorStreamLine } from "../shared/stream.js";
|
||||
|
||||
function asErrorText(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
const rec = parseObject(value);
|
||||
const message =
|
||||
asString(rec.message, "") ||
|
||||
asString(rec.error, "") ||
|
||||
asString(rec.code, "") ||
|
||||
asString(rec.detail, "");
|
||||
if (message) return message;
|
||||
try {
|
||||
return JSON.stringify(rec);
|
||||
} catch {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
function collectAssistantText(message: unknown): string[] {
|
||||
if (typeof message === "string") {
|
||||
const trimmed = message.trim();
|
||||
return trimmed ? [trimmed] : [];
|
||||
}
|
||||
|
||||
const rec = parseObject(message);
|
||||
const direct = asString(rec.text, "").trim();
|
||||
const lines: string[] = direct ? [direct] : [];
|
||||
const content = Array.isArray(rec.content) ? rec.content : [];
|
||||
|
||||
for (const partRaw of content) {
|
||||
const part = parseObject(partRaw);
|
||||
const type = asString(part.type, "").trim();
|
||||
if (type === "output_text" || type === "text") {
|
||||
const text = asString(part.text, "").trim();
|
||||
if (text) lines.push(text);
|
||||
}
|
||||
}
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
function readSessionId(event: Record<string, unknown>): string | null {
|
||||
return (
|
||||
asString(event.session_id, "").trim() ||
|
||||
asString(event.sessionId, "").trim() ||
|
||||
asString(event.sessionID, "").trim() ||
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
export function parseCursorJsonl(stdout: string) {
|
||||
let sessionId: string | null = null;
|
||||
const messages: string[] = [];
|
||||
let errorMessage: string | null = null;
|
||||
let totalCostUsd = 0;
|
||||
const usage = {
|
||||
inputTokens: 0,
|
||||
cachedInputTokens: 0,
|
||||
outputTokens: 0,
|
||||
};
|
||||
|
||||
for (const rawLine of stdout.split(/\r?\n/)) {
|
||||
const line = normalizeCursorStreamLine(rawLine).line;
|
||||
if (!line) continue;
|
||||
|
||||
const event = parseJson(line);
|
||||
if (!event) continue;
|
||||
|
||||
const foundSession = readSessionId(event);
|
||||
if (foundSession) sessionId = foundSession;
|
||||
|
||||
const type = asString(event.type, "").trim();
|
||||
|
||||
if (type === "assistant") {
|
||||
messages.push(...collectAssistantText(event.message));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "result") {
|
||||
const usageObj = parseObject(event.usage);
|
||||
usage.inputTokens += asNumber(
|
||||
usageObj.input_tokens,
|
||||
asNumber(usageObj.inputTokens, 0),
|
||||
);
|
||||
usage.cachedInputTokens += asNumber(
|
||||
usageObj.cached_input_tokens,
|
||||
asNumber(usageObj.cachedInputTokens, asNumber(usageObj.cache_read_input_tokens, 0)),
|
||||
);
|
||||
usage.outputTokens += asNumber(
|
||||
usageObj.output_tokens,
|
||||
asNumber(usageObj.outputTokens, 0),
|
||||
);
|
||||
totalCostUsd += asNumber(event.total_cost_usd, asNumber(event.cost_usd, asNumber(event.cost, 0)));
|
||||
|
||||
const isError = event.is_error === true || asString(event.subtype, "").toLowerCase() === "error";
|
||||
const resultText = asString(event.result, "").trim();
|
||||
if (resultText && messages.length === 0) {
|
||||
messages.push(resultText);
|
||||
}
|
||||
if (isError) {
|
||||
const resultError = asErrorText(event.error ?? event.message ?? event.result).trim();
|
||||
if (resultError) errorMessage = resultError;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "error") {
|
||||
const message = asErrorText(event.message ?? event.error ?? event.detail).trim();
|
||||
if (message) errorMessage = message;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "system") {
|
||||
const subtype = asString(event.subtype, "").trim().toLowerCase();
|
||||
if (subtype === "error") {
|
||||
const message = asErrorText(event.message ?? event.error ?? event.detail).trim();
|
||||
if (message) errorMessage = message;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Compatibility with older stream-json shapes.
|
||||
if (type === "text") {
|
||||
const part = parseObject(event.part);
|
||||
const text = asString(part.text, "").trim();
|
||||
if (text) messages.push(text);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "step_finish") {
|
||||
const part = parseObject(event.part);
|
||||
const tokens = parseObject(part.tokens);
|
||||
const cache = parseObject(tokens.cache);
|
||||
usage.inputTokens += asNumber(tokens.input, 0);
|
||||
usage.cachedInputTokens += asNumber(cache.read, 0);
|
||||
usage.outputTokens += asNumber(tokens.output, 0);
|
||||
totalCostUsd += asNumber(part.cost, 0);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
sessionId,
|
||||
summary: messages.join("\n\n").trim(),
|
||||
usage,
|
||||
costUsd: totalCostUsd > 0 ? totalCostUsd : null,
|
||||
errorMessage,
|
||||
};
|
||||
}
|
||||
|
||||
export function isCursorUnknownSessionError(stdout: string, stderr: string): boolean {
|
||||
const haystack = `${stdout}\n${stderr}`
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean)
|
||||
.join("\n");
|
||||
|
||||
return /unknown\s+(session|chat)|session\s+.*\s+not\s+found|chat\s+.*\s+not\s+found|resume\s+.*\s+not\s+found|could\s+not\s+resume/i.test(
|
||||
haystack,
|
||||
);
|
||||
}
|
||||
210
packages/adapters/cursor-local/src/server/test.ts
Normal file
210
packages/adapters/cursor-local/src/server/test.ts
Normal file
@@ -0,0 +1,210 @@
|
||||
import type {
|
||||
AdapterEnvironmentCheck,
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterEnvironmentTestResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import {
|
||||
asString,
|
||||
asStringArray,
|
||||
parseObject,
|
||||
ensureAbsoluteDirectory,
|
||||
ensureCommandResolvable,
|
||||
ensurePathInEnv,
|
||||
runChildProcess,
|
||||
} from "@paperclipai/adapter-utils/server-utils";
|
||||
import path from "node:path";
|
||||
import { DEFAULT_CURSOR_LOCAL_MODEL } from "../index.js";
|
||||
import { parseCursorJsonl } from "./parse.js";
|
||||
import { hasCursorTrustBypassArg } from "../shared/trust.js";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
if (checks.some((check) => check.level === "error")) return "fail";
|
||||
if (checks.some((check) => check.level === "warn")) return "warn";
|
||||
return "pass";
|
||||
}
|
||||
|
||||
function isNonEmpty(value: unknown): value is string {
|
||||
return typeof value === "string" && value.trim().length > 0;
|
||||
}
|
||||
|
||||
function firstNonEmptyLine(text: string): string {
|
||||
return (
|
||||
text
|
||||
.split(/\r?\n/)
|
||||
.map((line) => line.trim())
|
||||
.find(Boolean) ?? ""
|
||||
);
|
||||
}
|
||||
|
||||
function commandLooksLike(command: string, expected: string): boolean {
|
||||
const base = path.basename(command).toLowerCase();
|
||||
return base === expected || base === `${expected}.cmd` || base === `${expected}.exe`;
|
||||
}
|
||||
|
||||
function summarizeProbeDetail(stdout: string, stderr: string, parsedError: string | null): string | null {
|
||||
const raw = parsedError?.trim() || firstNonEmptyLine(stderr) || firstNonEmptyLine(stdout);
|
||||
if (!raw) return null;
|
||||
const clean = raw.replace(/\s+/g, " ").trim();
|
||||
const max = 240;
|
||||
return clean.length > max ? `${clean.slice(0, max - 1)}…` : clean;
|
||||
}
|
||||
|
||||
const CURSOR_AUTH_REQUIRED_RE =
|
||||
/(?:authentication\s+required|not\s+authenticated|not\s+logged\s+in|unauthorized|invalid(?:\s+or\s+missing)?\s+api(?:[_\s-]?key)?|cursor[_\s-]?api[_\s-]?key|run\s+'?agent\s+login'?\s+first|api(?:[_\s-]?key)?(?:\s+is)?\s+required)/i;
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const command = asString(config.command, "agent");
|
||||
const cwd = asString(config.cwd, process.cwd());
|
||||
|
||||
try {
|
||||
await ensureAbsoluteDirectory(cwd, { createIfMissing: true });
|
||||
checks.push({
|
||||
code: "cursor_cwd_valid",
|
||||
level: "info",
|
||||
message: `Working directory is valid: ${cwd}`,
|
||||
});
|
||||
} catch (err) {
|
||||
checks.push({
|
||||
code: "cursor_cwd_invalid",
|
||||
level: "error",
|
||||
message: err instanceof Error ? err.message : "Invalid working directory",
|
||||
detail: cwd,
|
||||
});
|
||||
}
|
||||
|
||||
const envConfig = parseObject(config.env);
|
||||
const env: Record<string, string> = {};
|
||||
for (const [key, value] of Object.entries(envConfig)) {
|
||||
if (typeof value === "string") env[key] = value;
|
||||
}
|
||||
const runtimeEnv = ensurePathInEnv({ ...process.env, ...env });
|
||||
try {
|
||||
await ensureCommandResolvable(command, cwd, runtimeEnv);
|
||||
checks.push({
|
||||
code: "cursor_command_resolvable",
|
||||
level: "info",
|
||||
message: `Command is executable: ${command}`,
|
||||
});
|
||||
} catch (err) {
|
||||
checks.push({
|
||||
code: "cursor_command_unresolvable",
|
||||
level: "error",
|
||||
message: err instanceof Error ? err.message : "Command is not executable",
|
||||
detail: command,
|
||||
});
|
||||
}
|
||||
|
||||
const configCursorApiKey = env.CURSOR_API_KEY;
|
||||
const hostCursorApiKey = process.env.CURSOR_API_KEY;
|
||||
if (isNonEmpty(configCursorApiKey) || isNonEmpty(hostCursorApiKey)) {
|
||||
const source = isNonEmpty(configCursorApiKey) ? "adapter config env" : "server environment";
|
||||
checks.push({
|
||||
code: "cursor_api_key_present",
|
||||
level: "info",
|
||||
message: "CURSOR_API_KEY is set for Cursor authentication.",
|
||||
detail: `Detected in ${source}.`,
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "cursor_api_key_missing",
|
||||
level: "warn",
|
||||
message: "CURSOR_API_KEY is not set. Cursor runs may fail until authentication is configured.",
|
||||
hint: "Set CURSOR_API_KEY in adapter env or run `agent login`.",
|
||||
});
|
||||
}
|
||||
|
||||
const canRunProbe =
|
||||
checks.every((check) => check.code !== "cursor_cwd_invalid" && check.code !== "cursor_command_unresolvable");
|
||||
if (canRunProbe) {
|
||||
if (!commandLooksLike(command, "agent")) {
|
||||
checks.push({
|
||||
code: "cursor_hello_probe_skipped_custom_command",
|
||||
level: "info",
|
||||
message: "Skipped hello probe because command is not `agent`.",
|
||||
detail: command,
|
||||
hint: "Use the `agent` CLI command to run the automatic installation and auth probe.",
|
||||
});
|
||||
} else {
|
||||
const model = asString(config.model, DEFAULT_CURSOR_LOCAL_MODEL).trim();
|
||||
const extraArgs = (() => {
|
||||
const fromExtraArgs = asStringArray(config.extraArgs);
|
||||
if (fromExtraArgs.length > 0) return fromExtraArgs;
|
||||
return asStringArray(config.args);
|
||||
})();
|
||||
const autoTrustEnabled = !hasCursorTrustBypassArg(extraArgs);
|
||||
const args = ["-p", "--mode", "ask", "--output-format", "json", "--workspace", cwd];
|
||||
if (model) args.push("--model", model);
|
||||
if (autoTrustEnabled) args.push("--yolo");
|
||||
if (extraArgs.length > 0) args.push(...extraArgs);
|
||||
args.push("Respond with hello.");
|
||||
|
||||
const probe = await runChildProcess(
|
||||
`cursor-envtest-${Date.now()}-${Math.random().toString(16).slice(2)}`,
|
||||
command,
|
||||
args,
|
||||
{
|
||||
cwd,
|
||||
env,
|
||||
timeoutSec: 45,
|
||||
graceSec: 5,
|
||||
onLog: async () => {},
|
||||
},
|
||||
);
|
||||
const parsed = parseCursorJsonl(probe.stdout);
|
||||
const detail = summarizeProbeDetail(probe.stdout, probe.stderr, parsed.errorMessage);
|
||||
const authEvidence = `${parsed.errorMessage ?? ""}\n${probe.stdout}\n${probe.stderr}`.trim();
|
||||
|
||||
if (probe.timedOut) {
|
||||
checks.push({
|
||||
code: "cursor_hello_probe_timed_out",
|
||||
level: "warn",
|
||||
message: "Cursor hello probe timed out.",
|
||||
hint: "Retry the probe. If this persists, verify `agent -p --mode ask --output-format json \"Respond with hello.\"` manually.",
|
||||
});
|
||||
} else if ((probe.exitCode ?? 1) === 0) {
|
||||
const summary = parsed.summary.trim();
|
||||
const hasHello = /\bhello\b/i.test(summary);
|
||||
checks.push({
|
||||
code: hasHello ? "cursor_hello_probe_passed" : "cursor_hello_probe_unexpected_output",
|
||||
level: hasHello ? "info" : "warn",
|
||||
message: hasHello
|
||||
? "Cursor hello probe succeeded."
|
||||
: "Cursor probe ran but did not return `hello` as expected.",
|
||||
...(summary ? { detail: summary.replace(/\s+/g, " ").trim().slice(0, 240) } : {}),
|
||||
...(hasHello
|
||||
? {}
|
||||
: {
|
||||
hint: "Try `agent -p --mode ask --output-format json \"Respond with hello.\"` manually to inspect full output.",
|
||||
}),
|
||||
});
|
||||
} else if (CURSOR_AUTH_REQUIRED_RE.test(authEvidence)) {
|
||||
checks.push({
|
||||
code: "cursor_hello_probe_auth_required",
|
||||
level: "warn",
|
||||
message: "Cursor CLI is installed, but authentication is not ready.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: "Run `agent login` or configure CURSOR_API_KEY in adapter env/shell, then retry the probe.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "cursor_hello_probe_failed",
|
||||
level: "error",
|
||||
message: "Cursor hello probe failed.",
|
||||
...(detail ? { detail } : {}),
|
||||
hint: "Run `agent -p --mode ask --output-format json \"Respond with hello.\"` manually in this working directory to debug.",
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
16
packages/adapters/cursor-local/src/shared/stream.ts
Normal file
16
packages/adapters/cursor-local/src/shared/stream.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
export function normalizeCursorStreamLine(rawLine: string): {
|
||||
stream: "stdout" | "stderr" | null;
|
||||
line: string;
|
||||
} {
|
||||
const trimmed = rawLine.trim();
|
||||
if (!trimmed) return { stream: null, line: "" };
|
||||
|
||||
const prefixed = trimmed.match(/^(stdout|stderr)\s*[:=]?\s*([\[{].*)$/i);
|
||||
if (!prefixed) {
|
||||
return { stream: null, line: trimmed };
|
||||
}
|
||||
|
||||
const stream = prefixed[1]?.toLowerCase() === "stderr" ? "stderr" : "stdout";
|
||||
const line = (prefixed[2] ?? "").trim();
|
||||
return { stream, line };
|
||||
}
|
||||
9
packages/adapters/cursor-local/src/shared/trust.ts
Normal file
9
packages/adapters/cursor-local/src/shared/trust.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
export function hasCursorTrustBypassArg(args: readonly string[]): boolean {
|
||||
return args.some(
|
||||
(arg) =>
|
||||
arg === "--trust" ||
|
||||
arg === "--yolo" ||
|
||||
arg === "-f" ||
|
||||
arg.startsWith("--trust="),
|
||||
);
|
||||
}
|
||||
81
packages/adapters/cursor-local/src/ui/build-config.ts
Normal file
81
packages/adapters/cursor-local/src/ui/build-config.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
|
||||
import { DEFAULT_CURSOR_LOCAL_MODEL } from "../index.js";
|
||||
|
||||
function parseCommaArgs(value: string): string[] {
|
||||
return value
|
||||
.split(",")
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
|
||||
function parseEnvVars(text: string): Record<string, string> {
|
||||
const env: Record<string, string> = {};
|
||||
for (const line of text.split(/\r?\n/)) {
|
||||
const trimmed = line.trim();
|
||||
if (!trimmed || trimmed.startsWith("#")) continue;
|
||||
const eq = trimmed.indexOf("=");
|
||||
if (eq <= 0) continue;
|
||||
const key = trimmed.slice(0, eq).trim();
|
||||
const value = trimmed.slice(eq + 1);
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
env[key] = value;
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function parseEnvBindings(bindings: unknown): Record<string, unknown> {
|
||||
if (typeof bindings !== "object" || bindings === null || Array.isArray(bindings)) return {};
|
||||
const env: Record<string, unknown> = {};
|
||||
for (const [key, raw] of Object.entries(bindings)) {
|
||||
if (!/^[A-Za-z_][A-Za-z0-9_]*$/.test(key)) continue;
|
||||
if (typeof raw === "string") {
|
||||
env[key] = { type: "plain", value: raw };
|
||||
continue;
|
||||
}
|
||||
if (typeof raw !== "object" || raw === null || Array.isArray(raw)) continue;
|
||||
const rec = raw as Record<string, unknown>;
|
||||
if (rec.type === "plain" && typeof rec.value === "string") {
|
||||
env[key] = { type: "plain", value: rec.value };
|
||||
continue;
|
||||
}
|
||||
if (rec.type === "secret_ref" && typeof rec.secretId === "string") {
|
||||
env[key] = {
|
||||
type: "secret_ref",
|
||||
secretId: rec.secretId,
|
||||
...(typeof rec.version === "number" || rec.version === "latest"
|
||||
? { version: rec.version }
|
||||
: {}),
|
||||
};
|
||||
}
|
||||
}
|
||||
return env;
|
||||
}
|
||||
|
||||
function normalizeMode(value: string): "plan" | "ask" | null {
|
||||
const mode = value.trim().toLowerCase();
|
||||
if (mode === "plan" || mode === "ask") return mode;
|
||||
return null;
|
||||
}
|
||||
|
||||
export function buildCursorLocalConfig(v: CreateConfigValues): Record<string, unknown> {
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.cwd) ac.cwd = v.cwd;
|
||||
if (v.instructionsFilePath) ac.instructionsFilePath = v.instructionsFilePath;
|
||||
if (v.promptTemplate) ac.promptTemplate = v.promptTemplate;
|
||||
ac.model = v.model || DEFAULT_CURSOR_LOCAL_MODEL;
|
||||
const mode = normalizeMode(v.thinkingEffort);
|
||||
if (mode) ac.mode = mode;
|
||||
ac.timeoutSec = 0;
|
||||
ac.graceSec = 15;
|
||||
const env = parseEnvBindings(v.envBindings);
|
||||
const legacy = parseEnvVars(v.envVars);
|
||||
for (const [key, value] of Object.entries(legacy)) {
|
||||
if (!Object.prototype.hasOwnProperty.call(env, key)) {
|
||||
env[key] = { type: "plain", value };
|
||||
}
|
||||
}
|
||||
if (Object.keys(env).length > 0) ac.env = env;
|
||||
if (v.command) ac.command = v.command;
|
||||
if (v.extraArgs) ac.extraArgs = parseCommaArgs(v.extraArgs);
|
||||
return ac;
|
||||
}
|
||||
2
packages/adapters/cursor-local/src/ui/index.ts
Normal file
2
packages/adapters/cursor-local/src/ui/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { parseCursorStdoutLine } from "./parse-stdout.js";
|
||||
export { buildCursorLocalConfig } from "./build-config.js";
|
||||
400
packages/adapters/cursor-local/src/ui/parse-stdout.ts
Normal file
400
packages/adapters/cursor-local/src/ui/parse-stdout.ts
Normal file
@@ -0,0 +1,400 @@
|
||||
import type { TranscriptEntry } from "@paperclipai/adapter-utils";
|
||||
import { normalizeCursorStreamLine } from "../shared/stream.js";
|
||||
|
||||
function safeJsonParse(text: string): unknown {
|
||||
try {
|
||||
return JSON.parse(text);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function asString(value: unknown, fallback = ""): string {
|
||||
return typeof value === "string" ? value : fallback;
|
||||
}
|
||||
|
||||
function asNumber(value: unknown, fallback = 0): number {
|
||||
return typeof value === "number" && Number.isFinite(value) ? value : fallback;
|
||||
}
|
||||
|
||||
function stringifyUnknown(value: unknown): string {
|
||||
if (typeof value === "string") return value;
|
||||
if (value === null || value === undefined) return "";
|
||||
try {
|
||||
return JSON.stringify(value, null, 2);
|
||||
} catch {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
/** Max chars of stdout/stderr to show in run log for shell tool results. */
|
||||
const SHELL_OUTPUT_TRUNCATE = 2000;
|
||||
|
||||
/**
|
||||
* Format shell tool result for run log: exit code + stdout/stderr (truncated).
|
||||
* If the result is not a shell-shaped object, returns full stringify.
|
||||
*/
|
||||
function formatShellToolResultForLog(result: unknown): string {
|
||||
const obj = asRecord(result);
|
||||
if (!obj) return stringifyUnknown(result);
|
||||
const success = asRecord(obj.success);
|
||||
if (!success) return stringifyUnknown(result);
|
||||
const exitCode = asNumber(success.exitCode, NaN);
|
||||
const stdout = asString(success.stdout).trim();
|
||||
const stderr = asString(success.stderr).trim();
|
||||
const hasShellShape = Number.isFinite(exitCode) || stdout.length > 0 || stderr.length > 0;
|
||||
if (!hasShellShape) return stringifyUnknown(result);
|
||||
|
||||
const lines: string[] = [];
|
||||
if (Number.isFinite(exitCode)) lines.push(`exit ${exitCode}`);
|
||||
if (stdout) {
|
||||
const out = stdout.length > SHELL_OUTPUT_TRUNCATE ? stdout.slice(0, SHELL_OUTPUT_TRUNCATE) + "\n... (truncated)" : stdout;
|
||||
lines.push("<stdout>");
|
||||
lines.push(out);
|
||||
}
|
||||
if (stderr) {
|
||||
const err = stderr.length > SHELL_OUTPUT_TRUNCATE ? stderr.slice(0, SHELL_OUTPUT_TRUNCATE) + "\n... (truncated)" : stderr;
|
||||
lines.push("<stderr>");
|
||||
lines.push(err);
|
||||
}
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
/** Return compact input for run log when tool is shell/shellToolCall (command only). */
|
||||
function compactShellToolInput(rawInput: unknown, payload?: Record<string, unknown>): unknown {
|
||||
const cmd = asString(payload?.command ?? asRecord(rawInput)?.command);
|
||||
if (cmd) return { command: cmd };
|
||||
return rawInput;
|
||||
}
|
||||
|
||||
function parseUserMessage(messageRaw: unknown, ts: string): TranscriptEntry[] {
|
||||
if (typeof messageRaw === "string") {
|
||||
const text = messageRaw.trim();
|
||||
return text ? [{ kind: "user", ts, text }] : [];
|
||||
}
|
||||
|
||||
const message = asRecord(messageRaw);
|
||||
if (!message) return [];
|
||||
|
||||
const entries: TranscriptEntry[] = [];
|
||||
const directText = asString(message.text).trim();
|
||||
if (directText) entries.push({ kind: "user", ts, text: directText });
|
||||
|
||||
const content = Array.isArray(message.content) ? message.content : [];
|
||||
for (const partRaw of content) {
|
||||
const part = asRecord(partRaw);
|
||||
if (!part) continue;
|
||||
const type = asString(part.type).trim();
|
||||
if (type !== "output_text" && type !== "text") continue;
|
||||
const text = asString(part.text).trim();
|
||||
if (text) entries.push({ kind: "user", ts, text });
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
function parseAssistantMessage(messageRaw: unknown, ts: string): TranscriptEntry[] {
|
||||
if (typeof messageRaw === "string") {
|
||||
const text = messageRaw.trim();
|
||||
return text ? [{ kind: "assistant", ts, text }] : [];
|
||||
}
|
||||
|
||||
const message = asRecord(messageRaw);
|
||||
if (!message) return [];
|
||||
|
||||
const entries: TranscriptEntry[] = [];
|
||||
const directText = asString(message.text).trim();
|
||||
if (directText) {
|
||||
entries.push({ kind: "assistant", ts, text: directText });
|
||||
}
|
||||
|
||||
const content = Array.isArray(message.content) ? message.content : [];
|
||||
for (const partRaw of content) {
|
||||
const part = asRecord(partRaw);
|
||||
if (!part) continue;
|
||||
const type = asString(part.type).trim();
|
||||
|
||||
if (type === "output_text" || type === "text") {
|
||||
const text = asString(part.text).trim();
|
||||
if (text) entries.push({ kind: "assistant", ts, text });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "thinking") {
|
||||
const text = asString(part.text).trim();
|
||||
if (text) entries.push({ kind: "thinking", ts, text });
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "tool_call") {
|
||||
const name = asString(part.name, asString(part.tool, "tool"));
|
||||
const rawInput = part.input ?? part.arguments ?? part.args ?? {};
|
||||
const input =
|
||||
name === "shellToolCall" || name === "shell"
|
||||
? compactShellToolInput(rawInput, asRecord(rawInput) ?? undefined)
|
||||
: rawInput;
|
||||
entries.push({
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name,
|
||||
input,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (type === "tool_result") {
|
||||
const toolUseId =
|
||||
asString(part.tool_use_id) ||
|
||||
asString(part.toolUseId) ||
|
||||
asString(part.call_id) ||
|
||||
asString(part.id) ||
|
||||
"tool_result";
|
||||
const rawOutput = part.output ?? part.result ?? part.text;
|
||||
const contentText =
|
||||
typeof rawOutput === "object" && rawOutput !== null
|
||||
? formatShellToolResultForLog(rawOutput)
|
||||
: asString(rawOutput) || stringifyUnknown(rawOutput);
|
||||
const isError = part.is_error === true || asString(part.status).toLowerCase() === "error";
|
||||
entries.push({
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId,
|
||||
content: contentText,
|
||||
isError,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
function parseCursorToolCallEvent(event: Record<string, unknown>, ts: string): TranscriptEntry[] {
|
||||
const subtype = asString(event.subtype).trim().toLowerCase();
|
||||
const callId =
|
||||
asString(event.call_id) ||
|
||||
asString(event.callId) ||
|
||||
asString(event.id) ||
|
||||
"tool_call";
|
||||
const toolCall = asRecord(event.tool_call ?? event.toolCall);
|
||||
if (!toolCall) {
|
||||
return [{ kind: "system", ts, text: `tool_call${subtype ? ` (${subtype})` : ""}` }];
|
||||
}
|
||||
|
||||
const [toolName] = Object.keys(toolCall);
|
||||
if (!toolName) {
|
||||
return [{ kind: "system", ts, text: `tool_call${subtype ? ` (${subtype})` : ""}` }];
|
||||
}
|
||||
const payload = asRecord(toolCall[toolName]) ?? {};
|
||||
const rawInput = payload.args ?? asRecord(payload.function)?.arguments ?? payload;
|
||||
const isShellTool = toolName === "shellToolCall" || toolName === "shell";
|
||||
const input = isShellTool ? compactShellToolInput(rawInput, payload) : rawInput;
|
||||
|
||||
if (subtype === "started" || subtype === "start") {
|
||||
return [{
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name: toolName,
|
||||
input,
|
||||
}];
|
||||
}
|
||||
|
||||
if (subtype === "completed" || subtype === "complete" || subtype === "finished") {
|
||||
const result =
|
||||
payload.result ??
|
||||
payload.output ??
|
||||
payload.error ??
|
||||
asRecord(payload.function)?.result ??
|
||||
asRecord(payload.function)?.output;
|
||||
const isError =
|
||||
event.is_error === true ||
|
||||
payload.is_error === true ||
|
||||
asString(payload.status).toLowerCase() === "error" ||
|
||||
asString(payload.status).toLowerCase() === "failed" ||
|
||||
asString(payload.status).toLowerCase() === "cancelled" ||
|
||||
payload.error !== undefined;
|
||||
const content =
|
||||
result !== undefined
|
||||
? isShellTool
|
||||
? formatShellToolResultForLog(result)
|
||||
: stringifyUnknown(result)
|
||||
: `${toolName} completed`;
|
||||
return [{
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId: callId,
|
||||
content,
|
||||
isError,
|
||||
}];
|
||||
}
|
||||
|
||||
return [{
|
||||
kind: "system",
|
||||
ts,
|
||||
text: `tool_call${subtype ? ` (${subtype})` : ""}: ${toolName}`,
|
||||
}];
|
||||
}
|
||||
|
||||
export function parseCursorStdoutLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const normalized = normalizeCursorStreamLine(line);
|
||||
if (!normalized.line) return [];
|
||||
|
||||
const parsed = asRecord(safeJsonParse(normalized.line));
|
||||
if (!parsed) {
|
||||
return [{ kind: "stdout", ts, text: normalized.line }];
|
||||
}
|
||||
|
||||
const type = asString(parsed.type);
|
||||
|
||||
if (type === "system") {
|
||||
const subtype = asString(parsed.subtype);
|
||||
if (subtype === "init") {
|
||||
const sessionId =
|
||||
asString(parsed.session_id) ||
|
||||
asString(parsed.sessionId) ||
|
||||
asString(parsed.sessionID);
|
||||
return [{ kind: "init", ts, model: asString(parsed.model, "cursor"), sessionId }];
|
||||
}
|
||||
return [{ kind: "system", ts, text: subtype ? `system: ${subtype}` : "system" }];
|
||||
}
|
||||
|
||||
if (type === "assistant") {
|
||||
const entries = parseAssistantMessage(parsed.message, ts);
|
||||
return entries.length > 0 ? entries : [{ kind: "assistant", ts, text: asString(parsed.result) }];
|
||||
}
|
||||
|
||||
if (type === "user") {
|
||||
return parseUserMessage(parsed.message, ts);
|
||||
}
|
||||
|
||||
if (type === "thinking") {
|
||||
const textFromTopLevel = asString(parsed.text);
|
||||
const textFromDelta = asString(asRecord(parsed.delta)?.text);
|
||||
const text = textFromTopLevel.length > 0 ? textFromTopLevel : textFromDelta;
|
||||
const subtype = asString(parsed.subtype).trim().toLowerCase();
|
||||
const isDelta = subtype === "delta" || asRecord(parsed.delta) !== null;
|
||||
if (!text.trim()) return [];
|
||||
return [{ kind: "thinking", ts, text: isDelta ? text : text.trim(), ...(isDelta ? { delta: true } : {}) }];
|
||||
}
|
||||
|
||||
if (type === "tool_call") {
|
||||
return parseCursorToolCallEvent(parsed, ts);
|
||||
}
|
||||
|
||||
if (type === "result") {
|
||||
const usage = asRecord(parsed.usage);
|
||||
const inputTokens = asNumber(usage?.input_tokens, asNumber(usage?.inputTokens));
|
||||
const outputTokens = asNumber(usage?.output_tokens, asNumber(usage?.outputTokens));
|
||||
const cachedTokens = asNumber(
|
||||
usage?.cached_input_tokens,
|
||||
asNumber(usage?.cachedInputTokens, asNumber(usage?.cache_read_input_tokens)),
|
||||
);
|
||||
const subtype = asString(parsed.subtype, "result");
|
||||
const errors = Array.isArray(parsed.errors)
|
||||
? parsed.errors.map((value) => stringifyUnknown(value)).filter(Boolean)
|
||||
: [];
|
||||
const errorText = asString(parsed.error).trim();
|
||||
if (errorText) errors.push(errorText);
|
||||
const isError = parsed.is_error === true || subtype === "error" || subtype === "failed";
|
||||
|
||||
return [{
|
||||
kind: "result",
|
||||
ts,
|
||||
text: asString(parsed.result),
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
cachedTokens,
|
||||
costUsd: asNumber(parsed.total_cost_usd, asNumber(parsed.cost_usd, asNumber(parsed.cost))),
|
||||
subtype,
|
||||
isError,
|
||||
errors,
|
||||
}];
|
||||
}
|
||||
|
||||
if (type === "error") {
|
||||
const message = asString(parsed.message) || stringifyUnknown(parsed.error ?? parsed.detail) || normalized.line;
|
||||
return [{ kind: "stderr", ts, text: message }];
|
||||
}
|
||||
|
||||
// Compatibility with older stream-json event shapes.
|
||||
if (type === "step_start") {
|
||||
const sessionId = asString(parsed.sessionID);
|
||||
return [{ kind: "system", ts, text: `step started${sessionId ? ` (${sessionId})` : ""}` }];
|
||||
}
|
||||
|
||||
if (type === "text") {
|
||||
const part = asRecord(parsed.part);
|
||||
const text = asString(part?.text).trim();
|
||||
if (!text) return [];
|
||||
return [{ kind: "assistant", ts, text }];
|
||||
}
|
||||
|
||||
if (type === "tool_use") {
|
||||
const part = asRecord(parsed.part);
|
||||
const toolUseId = asString(part?.callID, asString(part?.id, "tool_use"));
|
||||
const toolName = asString(part?.tool, "tool");
|
||||
const state = asRecord(part?.state);
|
||||
const input = state?.input ?? {};
|
||||
const output = asString(state?.output).trim();
|
||||
const status = asString(state?.status).trim();
|
||||
const exitCode = asNumber(asRecord(state?.metadata)?.exit, NaN);
|
||||
const isError =
|
||||
status === "failed" ||
|
||||
status === "error" ||
|
||||
status === "cancelled" ||
|
||||
(Number.isFinite(exitCode) && exitCode !== 0);
|
||||
|
||||
const entries: TranscriptEntry[] = [
|
||||
{
|
||||
kind: "tool_call",
|
||||
ts,
|
||||
name: toolName,
|
||||
input,
|
||||
},
|
||||
];
|
||||
|
||||
if (status || output) {
|
||||
const lines: string[] = [];
|
||||
if (status) lines.push(`status: ${status}`);
|
||||
if (Number.isFinite(exitCode)) lines.push(`exit: ${exitCode}`);
|
||||
if (output) {
|
||||
if (lines.length > 0) lines.push("");
|
||||
lines.push(output);
|
||||
}
|
||||
entries.push({
|
||||
kind: "tool_result",
|
||||
ts,
|
||||
toolUseId,
|
||||
content: lines.join("\n").trim() || "tool completed",
|
||||
isError,
|
||||
});
|
||||
}
|
||||
|
||||
return entries;
|
||||
}
|
||||
|
||||
if (type === "step_finish") {
|
||||
const part = asRecord(parsed.part);
|
||||
const tokens = asRecord(part?.tokens);
|
||||
const cache = asRecord(tokens?.cache);
|
||||
const reason = asString(part?.reason);
|
||||
return [{
|
||||
kind: "result",
|
||||
ts,
|
||||
text: reason,
|
||||
inputTokens: asNumber(tokens?.input),
|
||||
outputTokens: asNumber(tokens?.output),
|
||||
cachedTokens: asNumber(cache?.read),
|
||||
costUsd: asNumber(part?.cost),
|
||||
subtype: reason || "step_finish",
|
||||
isError: reason === "error" || reason === "failed",
|
||||
errors: [],
|
||||
}];
|
||||
}
|
||||
|
||||
return [{ kind: "stdout", ts, text: normalized.line }];
|
||||
}
|
||||
9
packages/adapters/cursor-local/tsconfig.json
Normal file
9
packages/adapters/cursor-local/tsconfig.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src",
|
||||
"types": ["node"]
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
72
packages/adapters/openclaw-gateway/README.md
Normal file
72
packages/adapters/openclaw-gateway/README.md
Normal file
@@ -0,0 +1,72 @@
|
||||
# OpenClaw Gateway Adapter
|
||||
|
||||
This document describes how `@paperclipai/adapter-openclaw-gateway` invokes OpenClaw over the Gateway protocol.
|
||||
|
||||
## Transport
|
||||
|
||||
This adapter always uses WebSocket gateway transport.
|
||||
|
||||
- URL must be `ws://` or `wss://`
|
||||
- Connect flow follows gateway protocol:
|
||||
1. receive `connect.challenge`
|
||||
2. send `req connect` (protocol/client/auth/device payload)
|
||||
3. send `req agent`
|
||||
4. wait for completion via `req agent.wait`
|
||||
5. stream `event agent` frames into Paperclip logs/transcript parsing
|
||||
|
||||
## Auth Modes
|
||||
|
||||
Gateway credentials can be provided in any of these ways:
|
||||
|
||||
- `authToken` / `token` in adapter config
|
||||
- `headers.x-openclaw-token`
|
||||
- `headers.x-openclaw-auth` (legacy)
|
||||
- `password` (shared password mode)
|
||||
|
||||
When a token is present and `authorization` header is missing, the adapter derives `Authorization: Bearer <token>`.
|
||||
|
||||
## Device Auth
|
||||
|
||||
By default the adapter sends a signed `device` payload in `connect` params.
|
||||
|
||||
- set `disableDeviceAuth=true` to omit device signing
|
||||
- set `devicePrivateKeyPem` to pin a stable signing key
|
||||
- without `devicePrivateKeyPem`, the adapter generates an ephemeral Ed25519 keypair per run
|
||||
- when `autoPairOnFirstConnect` is enabled (default), the adapter handles one initial `pairing required` by calling `device.pair.list` + `device.pair.approve` over shared auth, then retries once.
|
||||
|
||||
## Session Strategy
|
||||
|
||||
The adapter supports the same session routing model as HTTP OpenClaw mode:
|
||||
|
||||
- `sessionKeyStrategy=issue|fixed|run`
|
||||
- `sessionKey` is used when strategy is `fixed`
|
||||
|
||||
Resolved session key is sent as `agent.sessionKey`.
|
||||
|
||||
## Payload Mapping
|
||||
|
||||
The agent request is built as:
|
||||
|
||||
- required fields:
|
||||
- `message` (wake text plus optional `payloadTemplate.message`/`payloadTemplate.text` prefix)
|
||||
- `idempotencyKey` (Paperclip `runId`)
|
||||
- `sessionKey` (resolved strategy)
|
||||
- optional additions:
|
||||
- all `payloadTemplate` fields merged in
|
||||
- `agentId` from config if set and not already in template
|
||||
|
||||
## Timeouts
|
||||
|
||||
- `timeoutSec` controls adapter-level request budget
|
||||
- `waitTimeoutMs` controls `agent.wait.timeoutMs`
|
||||
|
||||
If `agent.wait` returns `timeout`, adapter returns `openclaw_gateway_wait_timeout`.
|
||||
|
||||
## Log Format
|
||||
|
||||
Structured gateway event logs use:
|
||||
|
||||
- `[openclaw-gateway] ...` for lifecycle/system logs
|
||||
- `[openclaw-gateway:event] run=<id> stream=<stream> data=<json>` for `event agent` frames
|
||||
|
||||
UI/CLI parsers consume these lines to render transcript updates.
|
||||
@@ -0,0 +1,109 @@
|
||||
# OpenClaw Gateway Onboarding and Test Plan
|
||||
|
||||
## Scope
|
||||
This plan is now **gateway-only**. Paperclip supports OpenClaw through `openclaw_gateway` only.
|
||||
|
||||
- Removed path: legacy `openclaw` adapter (`/v1/responses`, `/hooks/*`, SSE/webhook transport switching)
|
||||
- Supported path: `openclaw_gateway` over WebSocket (`ws://` or `wss://`)
|
||||
|
||||
## Requirements
|
||||
1. OpenClaw test image must be stock/clean every run.
|
||||
2. Onboarding must work from one primary prompt pasted into OpenClaw (optional one follow-up ping allowed).
|
||||
3. Device auth stays enabled by default; pairing is persisted via `adapterConfig.devicePrivateKeyPem`.
|
||||
4. Invite/access flow must be secure:
|
||||
- invite prompt endpoint is board-permission protected
|
||||
- CEO agent is allowed to invoke the invite prompt endpoint for their own company
|
||||
5. E2E pass criteria must include the 3 functional task cases.
|
||||
|
||||
## Current Product Flow
|
||||
1. Board/CEO opens company settings.
|
||||
2. Click `Generate OpenClaw Invite Prompt`.
|
||||
3. Paste generated prompt into OpenClaw chat.
|
||||
4. OpenClaw submits invite acceptance with:
|
||||
- `adapterType: "openclaw_gateway"`
|
||||
- `agentDefaultsPayload.url: ws://... | wss://...`
|
||||
- `agentDefaultsPayload.headers["x-openclaw-token"]`
|
||||
5. Board approves join request.
|
||||
6. OpenClaw claims API key and installs/uses Paperclip skill.
|
||||
7. First task run may trigger pairing approval once; after approval, pairing persists via stored device key.
|
||||
|
||||
## Technical Contract (Gateway)
|
||||
`agentDefaultsPayload` minimum:
|
||||
```json
|
||||
{
|
||||
"url": "ws://127.0.0.1:18789",
|
||||
"headers": { "x-openclaw-token": "<gateway-token>" }
|
||||
}
|
||||
```
|
||||
|
||||
Recommended fields:
|
||||
```json
|
||||
{
|
||||
"paperclipApiUrl": "http://host.docker.internal:3100",
|
||||
"waitTimeoutMs": 120000,
|
||||
"sessionKeyStrategy": "issue",
|
||||
"role": "operator",
|
||||
"scopes": ["operator.admin"]
|
||||
}
|
||||
```
|
||||
|
||||
Security/pairing defaults:
|
||||
- `disableDeviceAuth`: default false
|
||||
- `devicePrivateKeyPem`: generated during join if missing
|
||||
|
||||
## Codex Automation Workflow
|
||||
|
||||
### 0) Reset and boot
|
||||
```bash
|
||||
OPENCLAW_DOCKER_DIR=/tmp/openclaw-docker
|
||||
if [ -d "$OPENCLAW_DOCKER_DIR" ]; then
|
||||
docker compose -f "$OPENCLAW_DOCKER_DIR/docker-compose.yml" down --remove-orphans || true
|
||||
fi
|
||||
|
||||
docker image rm openclaw:local || true
|
||||
OPENCLAW_RESET_STATE=1 OPENCLAW_BUILD=1 ./scripts/smoke/openclaw-docker-ui.sh
|
||||
```
|
||||
|
||||
### 1) Start Paperclip
|
||||
```bash
|
||||
pnpm dev --tailscale-auth
|
||||
curl -fsS http://127.0.0.1:3100/api/health
|
||||
```
|
||||
|
||||
### 2) Invite + join + approval
|
||||
- create invite prompt via `POST /api/companies/:companyId/openclaw/invite-prompt`
|
||||
- paste prompt to OpenClaw
|
||||
- approve join request
|
||||
- assert created agent:
|
||||
- `adapterType == openclaw_gateway`
|
||||
- token header exists and length >= 16
|
||||
- `devicePrivateKeyPem` exists
|
||||
|
||||
### 3) Pairing stabilization
|
||||
- if first run returns `pairing required`, approve pending device in OpenClaw
|
||||
- rerun task and confirm success
|
||||
- assert later runs do not require re-pairing for same agent
|
||||
|
||||
### 4) Functional E2E assertions
|
||||
1. Task assigned to OpenClaw is completed and closed.
|
||||
2. Task asking OpenClaw to send main-webchat message succeeds (message visible in main chat).
|
||||
3. In `/new` OpenClaw session, OpenClaw can still create a Paperclip task.
|
||||
|
||||
## Manual Smoke Checklist
|
||||
Use [doc/OPENCLAW_ONBOARDING.md](../../../../doc/OPENCLAW_ONBOARDING.md) as the operator runbook.
|
||||
|
||||
## Regression Gates
|
||||
Required before merge:
|
||||
```bash
|
||||
pnpm -r typecheck
|
||||
pnpm test:run
|
||||
pnpm build
|
||||
```
|
||||
|
||||
If full suite is too heavy locally, run at least:
|
||||
```bash
|
||||
pnpm --filter @paperclipai/server test:run -- openclaw-gateway
|
||||
pnpm --filter @paperclipai/server typecheck
|
||||
pnpm --filter @paperclipai/ui typecheck
|
||||
pnpm --filter paperclipai typecheck
|
||||
```
|
||||
52
packages/adapters/openclaw-gateway/package.json
Normal file
52
packages/adapters/openclaw-gateway/package.json
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"name": "@paperclipai/adapter-openclaw-gateway",
|
||||
"version": "0.2.7",
|
||||
"type": "module",
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
"./server": "./src/server/index.ts",
|
||||
"./ui": "./src/ui/index.ts",
|
||||
"./cli": "./src/cli/index.ts"
|
||||
},
|
||||
"publishConfig": {
|
||||
"access": "public",
|
||||
"exports": {
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.js"
|
||||
},
|
||||
"./server": {
|
||||
"types": "./dist/server/index.d.ts",
|
||||
"import": "./dist/server/index.js"
|
||||
},
|
||||
"./ui": {
|
||||
"types": "./dist/ui/index.d.ts",
|
||||
"import": "./dist/ui/index.js"
|
||||
},
|
||||
"./cli": {
|
||||
"types": "./dist/cli/index.d.ts",
|
||||
"import": "./dist/cli/index.js"
|
||||
}
|
||||
},
|
||||
"main": "./dist/index.js",
|
||||
"types": "./dist/index.d.ts"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"clean": "rm -rf dist",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"dependencies": {
|
||||
"@paperclipai/adapter-utils": "workspace:*",
|
||||
"picocolors": "^1.1.1",
|
||||
"ws": "^8.19.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.6.0",
|
||||
"@types/ws": "^8.18.1",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
23
packages/adapters/openclaw-gateway/src/cli/format-event.ts
Normal file
23
packages/adapters/openclaw-gateway/src/cli/format-event.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import pc from "picocolors";
|
||||
|
||||
export function printOpenClawGatewayStreamEvent(raw: string, debug: boolean): void {
|
||||
const line = raw.trim();
|
||||
if (!line) return;
|
||||
|
||||
if (!debug) {
|
||||
console.log(line);
|
||||
return;
|
||||
}
|
||||
|
||||
if (line.startsWith("[openclaw-gateway:event]")) {
|
||||
console.log(pc.cyan(line));
|
||||
return;
|
||||
}
|
||||
|
||||
if (line.startsWith("[openclaw-gateway]")) {
|
||||
console.log(pc.blue(line));
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(pc.gray(line));
|
||||
}
|
||||
1
packages/adapters/openclaw-gateway/src/cli/index.ts
Normal file
1
packages/adapters/openclaw-gateway/src/cli/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export { printOpenClawGatewayStreamEvent } from "./format-event.js";
|
||||
42
packages/adapters/openclaw-gateway/src/index.ts
Normal file
42
packages/adapters/openclaw-gateway/src/index.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
export const type = "openclaw_gateway";
|
||||
export const label = "OpenClaw Gateway";
|
||||
|
||||
export const models: { id: string; label: string }[] = [];
|
||||
|
||||
export const agentConfigurationDoc = `# openclaw_gateway agent configuration
|
||||
|
||||
Adapter: openclaw_gateway
|
||||
|
||||
Use when:
|
||||
- You want Paperclip to invoke OpenClaw over the Gateway WebSocket protocol.
|
||||
- You want native gateway auth/connect semantics instead of HTTP /v1/responses or /hooks/*.
|
||||
|
||||
Don't use when:
|
||||
- You only expose OpenClaw HTTP endpoints.
|
||||
- Your deployment does not permit outbound WebSocket access from the Paperclip server.
|
||||
|
||||
Core fields:
|
||||
- url (string, required): OpenClaw gateway WebSocket URL (ws:// or wss://)
|
||||
- headers (object, optional): handshake headers; supports x-openclaw-token / x-openclaw-auth
|
||||
- authToken (string, optional): shared gateway token override
|
||||
- password (string, optional): gateway shared password, if configured
|
||||
|
||||
Gateway connect identity fields:
|
||||
- clientId (string, optional): gateway client id (default gateway-client)
|
||||
- clientMode (string, optional): gateway client mode (default backend)
|
||||
- clientVersion (string, optional): client version string
|
||||
- role (string, optional): gateway role (default operator)
|
||||
- scopes (string[] | comma string, optional): gateway scopes (default ["operator.admin"])
|
||||
- disableDeviceAuth (boolean, optional): disable signed device payload in connect params (default false)
|
||||
|
||||
Request behavior fields:
|
||||
- payloadTemplate (object, optional): additional fields merged into gateway agent params
|
||||
- timeoutSec (number, optional): adapter timeout in seconds (default 120)
|
||||
- waitTimeoutMs (number, optional): agent.wait timeout override (default timeoutSec * 1000)
|
||||
- autoPairOnFirstConnect (boolean, optional): on first "pairing required", attempt device.pair.list/device.pair.approve via shared auth, then retry once (default true)
|
||||
- paperclipApiUrl (string, optional): absolute Paperclip base URL advertised in wake text
|
||||
|
||||
Session routing fields:
|
||||
- sessionKeyStrategy (string, optional): issue (default), fixed, or run
|
||||
- sessionKey (string, optional): fixed session key when strategy=fixed (default paperclip)
|
||||
`;
|
||||
1278
packages/adapters/openclaw-gateway/src/server/execute.ts
Normal file
1278
packages/adapters/openclaw-gateway/src/server/execute.ts
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,2 @@
|
||||
export { execute } from "./execute.js";
|
||||
export { testEnvironment } from "./test.js";
|
||||
export { parseOpenClawResponse, isOpenClawUnknownSessionError } from "./parse.js";
|
||||
317
packages/adapters/openclaw-gateway/src/server/test.ts
Normal file
317
packages/adapters/openclaw-gateway/src/server/test.ts
Normal file
@@ -0,0 +1,317 @@
|
||||
import type {
|
||||
AdapterEnvironmentCheck,
|
||||
AdapterEnvironmentTestContext,
|
||||
AdapterEnvironmentTestResult,
|
||||
} from "@paperclipai/adapter-utils";
|
||||
import { asString, parseObject } from "@paperclipai/adapter-utils/server-utils";
|
||||
import { randomUUID } from "node:crypto";
|
||||
import { WebSocket } from "ws";
|
||||
|
||||
function summarizeStatus(checks: AdapterEnvironmentCheck[]): AdapterEnvironmentTestResult["status"] {
|
||||
if (checks.some((check) => check.level === "error")) return "fail";
|
||||
if (checks.some((check) => check.level === "warn")) return "warn";
|
||||
return "pass";
|
||||
}
|
||||
|
||||
function nonEmpty(value: unknown): string | null {
|
||||
return typeof value === "string" && value.trim().length > 0 ? value.trim() : null;
|
||||
}
|
||||
|
||||
function isLoopbackHost(hostname: string): boolean {
|
||||
const value = hostname.trim().toLowerCase();
|
||||
return value === "localhost" || value === "127.0.0.1" || value === "::1";
|
||||
}
|
||||
|
||||
function toStringRecord(value: unknown): Record<string, string> {
|
||||
const parsed = parseObject(value);
|
||||
const out: Record<string, string> = {};
|
||||
for (const [key, entry] of Object.entries(parsed)) {
|
||||
if (typeof entry === "string") out[key] = entry;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
function toStringArray(value: unknown): string[] {
|
||||
if (Array.isArray(value)) {
|
||||
return value
|
||||
.filter((entry): entry is string => typeof entry === "string")
|
||||
.map((entry) => entry.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
if (typeof value === "string") {
|
||||
return value
|
||||
.split(",")
|
||||
.map((entry) => entry.trim())
|
||||
.filter(Boolean);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
function headerMapGetIgnoreCase(headers: Record<string, string>, key: string): string | null {
|
||||
const match = Object.entries(headers).find(([entryKey]) => entryKey.toLowerCase() === key.toLowerCase());
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
function tokenFromAuthHeader(rawHeader: string | null): string | null {
|
||||
if (!rawHeader) return null;
|
||||
const trimmed = rawHeader.trim();
|
||||
if (!trimmed) return null;
|
||||
const match = trimmed.match(/^bearer\s+(.+)$/i);
|
||||
return match ? nonEmpty(match[1]) : trimmed;
|
||||
}
|
||||
|
||||
function resolveAuthToken(config: Record<string, unknown>, headers: Record<string, string>): string | null {
|
||||
const explicit = nonEmpty(config.authToken) ?? nonEmpty(config.token);
|
||||
if (explicit) return explicit;
|
||||
|
||||
const tokenHeader = headerMapGetIgnoreCase(headers, "x-openclaw-token");
|
||||
if (nonEmpty(tokenHeader)) return nonEmpty(tokenHeader);
|
||||
|
||||
const authHeader =
|
||||
headerMapGetIgnoreCase(headers, "x-openclaw-auth") ??
|
||||
headerMapGetIgnoreCase(headers, "authorization");
|
||||
return tokenFromAuthHeader(authHeader);
|
||||
}
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function rawDataToString(data: unknown): string {
|
||||
if (typeof data === "string") return data;
|
||||
if (Buffer.isBuffer(data)) return data.toString("utf8");
|
||||
if (data instanceof ArrayBuffer) return Buffer.from(data).toString("utf8");
|
||||
if (Array.isArray(data)) {
|
||||
return Buffer.concat(
|
||||
data.map((entry) => (Buffer.isBuffer(entry) ? entry : Buffer.from(String(entry), "utf8"))),
|
||||
).toString("utf8");
|
||||
}
|
||||
return String(data ?? "");
|
||||
}
|
||||
|
||||
async function probeGateway(input: {
|
||||
url: string;
|
||||
headers: Record<string, string>;
|
||||
authToken: string | null;
|
||||
role: string;
|
||||
scopes: string[];
|
||||
timeoutMs: number;
|
||||
}): Promise<"ok" | "challenge_only" | "failed"> {
|
||||
return await new Promise((resolve) => {
|
||||
const ws = new WebSocket(input.url, { headers: input.headers, maxPayload: 2 * 1024 * 1024 });
|
||||
const timeout = setTimeout(() => {
|
||||
try {
|
||||
ws.close();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
resolve("failed");
|
||||
}, input.timeoutMs);
|
||||
|
||||
let completed = false;
|
||||
|
||||
const finish = (status: "ok" | "challenge_only" | "failed") => {
|
||||
if (completed) return;
|
||||
completed = true;
|
||||
clearTimeout(timeout);
|
||||
try {
|
||||
ws.close();
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
resolve(status);
|
||||
};
|
||||
|
||||
ws.on("message", (raw) => {
|
||||
let parsed: unknown;
|
||||
try {
|
||||
parsed = JSON.parse(rawDataToString(raw));
|
||||
} catch {
|
||||
return;
|
||||
}
|
||||
const event = asRecord(parsed);
|
||||
if (event?.type === "event" && event.event === "connect.challenge") {
|
||||
const nonce = nonEmpty(asRecord(event.payload)?.nonce);
|
||||
if (!nonce) {
|
||||
finish("failed");
|
||||
return;
|
||||
}
|
||||
|
||||
const connectId = randomUUID();
|
||||
ws.send(
|
||||
JSON.stringify({
|
||||
type: "req",
|
||||
id: connectId,
|
||||
method: "connect",
|
||||
params: {
|
||||
minProtocol: 3,
|
||||
maxProtocol: 3,
|
||||
client: {
|
||||
id: "gateway-client",
|
||||
version: "paperclip-probe",
|
||||
platform: process.platform,
|
||||
mode: "probe",
|
||||
},
|
||||
role: input.role,
|
||||
scopes: input.scopes,
|
||||
...(input.authToken
|
||||
? {
|
||||
auth: {
|
||||
token: input.authToken,
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
}),
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (event?.type === "res") {
|
||||
if (event.ok === true) {
|
||||
finish("ok");
|
||||
} else {
|
||||
finish("challenge_only");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ws.on("error", () => {
|
||||
finish("failed");
|
||||
});
|
||||
|
||||
ws.on("close", () => {
|
||||
if (!completed) finish("failed");
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export async function testEnvironment(
|
||||
ctx: AdapterEnvironmentTestContext,
|
||||
): Promise<AdapterEnvironmentTestResult> {
|
||||
const checks: AdapterEnvironmentCheck[] = [];
|
||||
const config = parseObject(ctx.config);
|
||||
const urlValue = asString(config.url, "").trim();
|
||||
|
||||
if (!urlValue) {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_url_missing",
|
||||
level: "error",
|
||||
message: "OpenClaw gateway adapter requires a WebSocket URL.",
|
||||
hint: "Set adapterConfig.url to ws://host:port (or wss://).",
|
||||
});
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
|
||||
let url: URL | null = null;
|
||||
try {
|
||||
url = new URL(urlValue);
|
||||
} catch {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_url_invalid",
|
||||
level: "error",
|
||||
message: `Invalid URL: ${urlValue}`,
|
||||
});
|
||||
}
|
||||
|
||||
if (url && url.protocol !== "ws:" && url.protocol !== "wss:") {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_url_protocol_invalid",
|
||||
level: "error",
|
||||
message: `Unsupported URL protocol: ${url.protocol}`,
|
||||
hint: "Use ws:// or wss://.",
|
||||
});
|
||||
}
|
||||
|
||||
if (url) {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_url_valid",
|
||||
level: "info",
|
||||
message: `Configured gateway URL: ${url.toString()}`,
|
||||
});
|
||||
|
||||
if (url.protocol === "ws:" && !isLoopbackHost(url.hostname)) {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_plaintext_remote_ws",
|
||||
level: "warn",
|
||||
message: "Gateway URL uses plaintext ws:// on a non-loopback host.",
|
||||
hint: "Prefer wss:// for remote gateways.",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const headers = toStringRecord(config.headers);
|
||||
const authToken = resolveAuthToken(config, headers);
|
||||
const password = nonEmpty(config.password);
|
||||
const role = nonEmpty(config.role) ?? "operator";
|
||||
const scopes = toStringArray(config.scopes);
|
||||
|
||||
if (authToken || password) {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_auth_present",
|
||||
level: "info",
|
||||
message: "Gateway credentials are configured.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_auth_missing",
|
||||
level: "warn",
|
||||
message: "No gateway credentials detected in adapter config.",
|
||||
hint: "Set authToken/password or headers.x-openclaw-token for authenticated gateways.",
|
||||
});
|
||||
}
|
||||
|
||||
if (url && (url.protocol === "ws:" || url.protocol === "wss:")) {
|
||||
try {
|
||||
const probeResult = await probeGateway({
|
||||
url: url.toString(),
|
||||
headers,
|
||||
authToken,
|
||||
role,
|
||||
scopes: scopes.length > 0 ? scopes : ["operator.admin"],
|
||||
timeoutMs: 3_000,
|
||||
});
|
||||
|
||||
if (probeResult === "ok") {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_probe_ok",
|
||||
level: "info",
|
||||
message: "Gateway connect probe succeeded.",
|
||||
});
|
||||
} else if (probeResult === "challenge_only") {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_probe_challenge_only",
|
||||
level: "warn",
|
||||
message: "Gateway challenge was received, but connect probe was rejected.",
|
||||
hint: "Check gateway credentials, scopes, role, and device-auth requirements.",
|
||||
});
|
||||
} else {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_probe_failed",
|
||||
level: "warn",
|
||||
message: "Gateway probe failed.",
|
||||
hint: "Verify network reachability and gateway URL from the Paperclip server host.",
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
checks.push({
|
||||
code: "openclaw_gateway_probe_error",
|
||||
level: "warn",
|
||||
message: err instanceof Error ? err.message : "Gateway probe failed",
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
adapterType: ctx.adapterType,
|
||||
status: summarizeStatus(checks),
|
||||
checks,
|
||||
testedAt: new Date().toISOString(),
|
||||
};
|
||||
}
|
||||
16
packages/adapters/openclaw-gateway/src/shared/stream.ts
Normal file
16
packages/adapters/openclaw-gateway/src/shared/stream.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
export function normalizeOpenClawGatewayStreamLine(rawLine: string): {
|
||||
stream: "stdout" | "stderr" | null;
|
||||
line: string;
|
||||
} {
|
||||
const trimmed = rawLine.trim();
|
||||
if (!trimmed) return { stream: null, line: "" };
|
||||
|
||||
const prefixed = trimmed.match(/^(stdout|stderr)\s*[:=]?\s*(.*)$/i);
|
||||
if (!prefixed) {
|
||||
return { stream: null, line: trimmed };
|
||||
}
|
||||
|
||||
const stream = prefixed[1]?.toLowerCase() === "stderr" ? "stderr" : "stdout";
|
||||
const line = (prefixed[2] ?? "").trim();
|
||||
return { stream, line };
|
||||
}
|
||||
12
packages/adapters/openclaw-gateway/src/ui/build-config.ts
Normal file
12
packages/adapters/openclaw-gateway/src/ui/build-config.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import type { CreateConfigValues } from "@paperclipai/adapter-utils";
|
||||
|
||||
export function buildOpenClawGatewayConfig(v: CreateConfigValues): Record<string, unknown> {
|
||||
const ac: Record<string, unknown> = {};
|
||||
if (v.url) ac.url = v.url;
|
||||
ac.timeoutSec = 120;
|
||||
ac.waitTimeoutMs = 120000;
|
||||
ac.sessionKeyStrategy = "issue";
|
||||
ac.role = "operator";
|
||||
ac.scopes = ["operator.admin"];
|
||||
return ac;
|
||||
}
|
||||
2
packages/adapters/openclaw-gateway/src/ui/index.ts
Normal file
2
packages/adapters/openclaw-gateway/src/ui/index.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
export { parseOpenClawGatewayStdoutLine } from "./parse-stdout.js";
|
||||
export { buildOpenClawGatewayConfig } from "./build-config.js";
|
||||
75
packages/adapters/openclaw-gateway/src/ui/parse-stdout.ts
Normal file
75
packages/adapters/openclaw-gateway/src/ui/parse-stdout.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
import type { TranscriptEntry } from "@paperclipai/adapter-utils";
|
||||
import { normalizeOpenClawGatewayStreamLine } from "../shared/stream.js";
|
||||
|
||||
function safeJsonParse(text: string): unknown {
|
||||
try {
|
||||
return JSON.parse(text);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function asRecord(value: unknown): Record<string, unknown> | null {
|
||||
if (typeof value !== "object" || value === null || Array.isArray(value)) return null;
|
||||
return value as Record<string, unknown>;
|
||||
}
|
||||
|
||||
function asString(value: unknown): string {
|
||||
return typeof value === "string" ? value : "";
|
||||
}
|
||||
|
||||
function parseAgentEventLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const match = line.match(/^\[openclaw-gateway:event\]\s+run=([^\s]+)\s+stream=([^\s]+)\s+data=(.*)$/s);
|
||||
if (!match) return [{ kind: "stdout", ts, text: line }];
|
||||
|
||||
const stream = asString(match[2]).toLowerCase();
|
||||
const data = asRecord(safeJsonParse(asString(match[3]).trim()));
|
||||
|
||||
if (stream === "assistant") {
|
||||
const delta = asString(data?.delta);
|
||||
if (delta.length > 0) {
|
||||
return [{ kind: "assistant", ts, text: delta, delta: true }];
|
||||
}
|
||||
|
||||
const text = asString(data?.text);
|
||||
if (text.length > 0) {
|
||||
return [{ kind: "assistant", ts, text }];
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
if (stream === "error") {
|
||||
const message = asString(data?.error) || asString(data?.message);
|
||||
return message ? [{ kind: "stderr", ts, text: message }] : [];
|
||||
}
|
||||
|
||||
if (stream === "lifecycle") {
|
||||
const phase = asString(data?.phase).toLowerCase();
|
||||
const message = asString(data?.error) || asString(data?.message);
|
||||
if ((phase === "error" || phase === "failed" || phase === "cancelled") && message) {
|
||||
return [{ kind: "stderr", ts, text: message }];
|
||||
}
|
||||
}
|
||||
|
||||
return [];
|
||||
}
|
||||
|
||||
export function parseOpenClawGatewayStdoutLine(line: string, ts: string): TranscriptEntry[] {
|
||||
const normalized = normalizeOpenClawGatewayStreamLine(line);
|
||||
if (normalized.stream === "stderr") {
|
||||
return [{ kind: "stderr", ts, text: normalized.line }];
|
||||
}
|
||||
|
||||
const trimmed = normalized.line.trim();
|
||||
if (!trimmed) return [];
|
||||
|
||||
if (trimmed.startsWith("[openclaw-gateway:event]")) {
|
||||
return parseAgentEventLine(trimmed, ts);
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("[openclaw-gateway]")) {
|
||||
return [{ kind: "system", ts, text: trimmed.replace(/^\[openclaw-gateway\]\s*/, "") }];
|
||||
}
|
||||
|
||||
return [{ kind: "stdout", ts, text: normalized.line }];
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"extends": "../../../tsconfig.json",
|
||||
"extends": "../../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"rootDir": "src"
|
||||
@@ -1,49 +0,0 @@
|
||||
# @paperclipai/adapter-openclaw
|
||||
|
||||
## 0.2.6
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.6
|
||||
|
||||
## 0.2.5
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.5
|
||||
|
||||
## 0.2.4
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.4
|
||||
|
||||
## 0.2.3
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.3
|
||||
|
||||
## 0.2.2
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.2
|
||||
|
||||
## 0.2.1
|
||||
|
||||
### Patch Changes
|
||||
|
||||
- Version bump (patch)
|
||||
- Updated dependencies
|
||||
- @paperclipai/adapter-utils@0.2.1
|
||||
@@ -1,18 +0,0 @@
|
||||
import pc from "picocolors";
|
||||
|
||||
export function printOpenClawStreamEvent(raw: string, debug: boolean): void {
|
||||
const line = raw.trim();
|
||||
if (!line) return;
|
||||
|
||||
if (!debug) {
|
||||
console.log(line);
|
||||
return;
|
||||
}
|
||||
|
||||
if (line.startsWith("[openclaw]")) {
|
||||
console.log(pc.cyan(line));
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(pc.gray(line));
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user