Add agent_task_sessions table for per-task session state keyed by (agent, adapter, taskKey). Add AgentTaskSession type, resetAgentSessionSchema validator, and sessionDisplayId/sessionParamsJson to AgentRuntimeState. Rework migration hash-resolution fallback ordering to prefer hash-based matching over timestamp-based journal matching. Move backup-db.sh logic into packages/db/src/backup.ts for programmatic use and simplify the shell script to call the TypeScript implementation. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
339 lines
12 KiB
TypeScript
339 lines
12 KiB
TypeScript
import { existsSync, mkdirSync, readFileSync, readdirSync, statSync, unlinkSync } from "node:fs";
|
|
import { writeFile } from "node:fs/promises";
|
|
import { resolve } from "node:path";
|
|
import postgres from "postgres";
|
|
|
|
const PROJECT_ROOT = resolve(import.meta.dirname, "../../..");
|
|
const BACKUP_DIR = resolve(PROJECT_ROOT, "data/backups");
|
|
const CONFIG_FILE = resolve(PROJECT_ROOT, ".paperclip/config.json");
|
|
const MAX_AGE_DAYS = 30;
|
|
|
|
function loadPort(): number {
|
|
try {
|
|
const raw = readFileSync(CONFIG_FILE, "utf8");
|
|
const config = JSON.parse(raw);
|
|
const port = config?.database?.embeddedPostgresPort;
|
|
if (typeof port === "number" && Number.isFinite(port)) return port;
|
|
} catch {}
|
|
return 54329;
|
|
}
|
|
|
|
function timestamp(): string {
|
|
const d = new Date();
|
|
const pad = (n: number) => String(n).padStart(2, "0");
|
|
return `${d.getFullYear()}${pad(d.getMonth() + 1)}${pad(d.getDate())}-${pad(d.getHours())}${pad(d.getMinutes())}${pad(d.getSeconds())}`;
|
|
}
|
|
|
|
function pruneOldBackups() {
|
|
if (!existsSync(BACKUP_DIR)) return;
|
|
const cutoff = Date.now() - MAX_AGE_DAYS * 24 * 60 * 60 * 1000;
|
|
let pruned = 0;
|
|
for (const name of readdirSync(BACKUP_DIR)) {
|
|
if (!name.startsWith("paperclip-") || !name.endsWith(".sql")) continue;
|
|
const fullPath = resolve(BACKUP_DIR, name);
|
|
const stat = statSync(fullPath);
|
|
if (stat.mtimeMs < cutoff) {
|
|
unlinkSync(fullPath);
|
|
pruned++;
|
|
}
|
|
}
|
|
if (pruned > 0) console.log(`Pruned ${pruned} backup(s) older than ${MAX_AGE_DAYS} days.`);
|
|
}
|
|
|
|
async function main() {
|
|
const port = loadPort();
|
|
const connString = `postgres://paperclip:paperclip@127.0.0.1:${port}/paperclip`;
|
|
|
|
console.log(`Connecting to embedded PostgreSQL on port ${port}...`);
|
|
|
|
const sql = postgres(connString, { max: 1, connect_timeout: 5 });
|
|
|
|
try {
|
|
// Verify connection
|
|
await sql`SELECT 1`;
|
|
} catch (err: any) {
|
|
console.error(`Error: Cannot connect to embedded PostgreSQL on port ${port}.`);
|
|
console.error(" Make sure the server is running (pnpm dev).");
|
|
process.exit(1);
|
|
}
|
|
|
|
try {
|
|
const lines: string[] = [];
|
|
const emit = (line: string) => lines.push(line);
|
|
|
|
emit("-- Paperclip database backup");
|
|
emit(`-- Created: ${new Date().toISOString()}`);
|
|
emit(`-- Server port: ${port}`);
|
|
emit("");
|
|
emit("BEGIN;");
|
|
emit("");
|
|
|
|
// Get all enums
|
|
const enums = await sql<{ typname: string; labels: string[] }[]>`
|
|
SELECT t.typname, array_agg(e.enumlabel ORDER BY e.enumsortorder) AS labels
|
|
FROM pg_type t
|
|
JOIN pg_enum e ON t.oid = e.enumtypid
|
|
JOIN pg_namespace n ON t.typnamespace = n.oid
|
|
WHERE n.nspname = 'public'
|
|
GROUP BY t.typname
|
|
ORDER BY t.typname
|
|
`;
|
|
|
|
for (const e of enums) {
|
|
const labels = e.labels.map((l) => `'${l.replace(/'/g, "''")}'`).join(", ");
|
|
emit(`CREATE TYPE "public"."${e.typname}" AS ENUM (${labels});`);
|
|
}
|
|
if (enums.length > 0) emit("");
|
|
|
|
// Get tables in dependency order (referenced tables first)
|
|
const tables = await sql<{ tablename: string }[]>`
|
|
SELECT c.relname AS tablename
|
|
FROM pg_class c
|
|
JOIN pg_namespace n ON n.oid = c.relnamespace
|
|
WHERE n.nspname = 'public'
|
|
AND c.relkind = 'r'
|
|
AND c.relname != '__drizzle_migrations'
|
|
ORDER BY c.relname
|
|
`;
|
|
|
|
// Get full CREATE TABLE DDL via column info
|
|
for (const { tablename } of tables) {
|
|
const columns = await sql<{
|
|
column_name: string;
|
|
data_type: string;
|
|
udt_name: string;
|
|
is_nullable: string;
|
|
column_default: string | null;
|
|
character_maximum_length: number | null;
|
|
numeric_precision: number | null;
|
|
numeric_scale: number | null;
|
|
}[]>`
|
|
SELECT column_name, data_type, udt_name, is_nullable, column_default,
|
|
character_maximum_length, numeric_precision, numeric_scale
|
|
FROM information_schema.columns
|
|
WHERE table_schema = 'public' AND table_name = ${tablename}
|
|
ORDER BY ordinal_position
|
|
`;
|
|
|
|
emit(`-- Table: ${tablename}`);
|
|
emit(`DROP TABLE IF EXISTS "${tablename}" CASCADE;`);
|
|
|
|
const colDefs: string[] = [];
|
|
for (const col of columns) {
|
|
let typeStr: string;
|
|
if (col.data_type === "USER-DEFINED") {
|
|
typeStr = `"${col.udt_name}"`;
|
|
} else if (col.data_type === "ARRAY") {
|
|
typeStr = `${col.udt_name.replace(/^_/, "")}[]`;
|
|
} else if (col.data_type === "character varying") {
|
|
typeStr = col.character_maximum_length
|
|
? `varchar(${col.character_maximum_length})`
|
|
: "varchar";
|
|
} else if (col.data_type === "numeric" && col.numeric_precision != null) {
|
|
typeStr =
|
|
col.numeric_scale != null
|
|
? `numeric(${col.numeric_precision}, ${col.numeric_scale})`
|
|
: `numeric(${col.numeric_precision})`;
|
|
} else {
|
|
typeStr = col.data_type;
|
|
}
|
|
|
|
let def = ` "${col.column_name}" ${typeStr}`;
|
|
if (col.column_default != null) def += ` DEFAULT ${col.column_default}`;
|
|
if (col.is_nullable === "NO") def += " NOT NULL";
|
|
colDefs.push(def);
|
|
}
|
|
|
|
// Primary key
|
|
const pk = await sql<{ constraint_name: string; column_names: string[] }[]>`
|
|
SELECT c.conname AS constraint_name,
|
|
array_agg(a.attname ORDER BY array_position(c.conkey, a.attnum)) AS column_names
|
|
FROM pg_constraint c
|
|
JOIN pg_class t ON t.oid = c.conrelid
|
|
JOIN pg_namespace n ON n.oid = t.relnamespace
|
|
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(c.conkey)
|
|
WHERE n.nspname = 'public' AND t.relname = ${tablename} AND c.contype = 'p'
|
|
GROUP BY c.conname
|
|
`;
|
|
for (const p of pk) {
|
|
const cols = p.column_names.map((c) => `"${c}"`).join(", ");
|
|
colDefs.push(` CONSTRAINT "${p.constraint_name}" PRIMARY KEY (${cols})`);
|
|
}
|
|
|
|
emit(`CREATE TABLE "${tablename}" (`);
|
|
emit(colDefs.join(",\n"));
|
|
emit(");");
|
|
emit("");
|
|
}
|
|
|
|
// Foreign keys (after all tables created)
|
|
const fks = await sql<{
|
|
constraint_name: string;
|
|
source_table: string;
|
|
source_columns: string[];
|
|
target_table: string;
|
|
target_columns: string[];
|
|
update_rule: string;
|
|
delete_rule: string;
|
|
}[]>`
|
|
SELECT
|
|
c.conname AS constraint_name,
|
|
src.relname AS source_table,
|
|
array_agg(sa.attname ORDER BY array_position(c.conkey, sa.attnum)) AS source_columns,
|
|
tgt.relname AS target_table,
|
|
array_agg(ta.attname ORDER BY array_position(c.confkey, ta.attnum)) AS target_columns,
|
|
CASE c.confupdtype WHEN 'a' THEN 'NO ACTION' WHEN 'r' THEN 'RESTRICT' WHEN 'c' THEN 'CASCADE' WHEN 'n' THEN 'SET NULL' WHEN 'd' THEN 'SET DEFAULT' END AS update_rule,
|
|
CASE c.confdeltype WHEN 'a' THEN 'NO ACTION' WHEN 'r' THEN 'RESTRICT' WHEN 'c' THEN 'CASCADE' WHEN 'n' THEN 'SET NULL' WHEN 'd' THEN 'SET DEFAULT' END AS delete_rule
|
|
FROM pg_constraint c
|
|
JOIN pg_class src ON src.oid = c.conrelid
|
|
JOIN pg_class tgt ON tgt.oid = c.confrelid
|
|
JOIN pg_namespace n ON n.oid = src.relnamespace
|
|
JOIN pg_attribute sa ON sa.attrelid = src.oid AND sa.attnum = ANY(c.conkey)
|
|
JOIN pg_attribute ta ON ta.attrelid = tgt.oid AND ta.attnum = ANY(c.confkey)
|
|
WHERE c.contype = 'f' AND n.nspname = 'public'
|
|
GROUP BY c.conname, src.relname, tgt.relname, c.confupdtype, c.confdeltype
|
|
ORDER BY src.relname, c.conname
|
|
`;
|
|
|
|
if (fks.length > 0) {
|
|
emit("-- Foreign keys");
|
|
for (const fk of fks) {
|
|
const srcCols = fk.source_columns.map((c) => `"${c}"`).join(", ");
|
|
const tgtCols = fk.target_columns.map((c) => `"${c}"`).join(", ");
|
|
emit(
|
|
`ALTER TABLE "${fk.source_table}" ADD CONSTRAINT "${fk.constraint_name}" FOREIGN KEY (${srcCols}) REFERENCES "${fk.target_table}" (${tgtCols}) ON UPDATE ${fk.update_rule} ON DELETE ${fk.delete_rule};`,
|
|
);
|
|
}
|
|
emit("");
|
|
}
|
|
|
|
// Unique constraints
|
|
const uniques = await sql<{
|
|
constraint_name: string;
|
|
tablename: string;
|
|
column_names: string[];
|
|
}[]>`
|
|
SELECT c.conname AS constraint_name,
|
|
t.relname AS tablename,
|
|
array_agg(a.attname ORDER BY array_position(c.conkey, a.attnum)) AS column_names
|
|
FROM pg_constraint c
|
|
JOIN pg_class t ON t.oid = c.conrelid
|
|
JOIN pg_namespace n ON n.oid = t.relnamespace
|
|
JOIN pg_attribute a ON a.attrelid = t.oid AND a.attnum = ANY(c.conkey)
|
|
WHERE n.nspname = 'public' AND c.contype = 'u'
|
|
GROUP BY c.conname, t.relname
|
|
ORDER BY t.relname, c.conname
|
|
`;
|
|
|
|
if (uniques.length > 0) {
|
|
emit("-- Unique constraints");
|
|
for (const u of uniques) {
|
|
const cols = u.column_names.map((c) => `"${c}"`).join(", ");
|
|
emit(`ALTER TABLE "${u.tablename}" ADD CONSTRAINT "${u.constraint_name}" UNIQUE (${cols});`);
|
|
}
|
|
emit("");
|
|
}
|
|
|
|
// Indexes (non-primary, non-unique-constraint)
|
|
const indexes = await sql<{ indexdef: string }[]>`
|
|
SELECT indexdef
|
|
FROM pg_indexes
|
|
WHERE schemaname = 'public'
|
|
AND indexname NOT IN (
|
|
SELECT conname FROM pg_constraint
|
|
WHERE connamespace = (SELECT oid FROM pg_namespace WHERE nspname = 'public')
|
|
)
|
|
ORDER BY tablename, indexname
|
|
`;
|
|
|
|
if (indexes.length > 0) {
|
|
emit("-- Indexes");
|
|
for (const idx of indexes) {
|
|
emit(`${idx.indexdef};`);
|
|
}
|
|
emit("");
|
|
}
|
|
|
|
// Dump data for each table
|
|
for (const { tablename } of tables) {
|
|
const count = await sql<{ n: number }[]>`
|
|
SELECT count(*)::int AS n FROM ${sql(tablename)}
|
|
`;
|
|
if ((count[0]?.n ?? 0) === 0) continue;
|
|
|
|
// Get column info for this table
|
|
const cols = await sql<{ column_name: string; data_type: string }[]>`
|
|
SELECT column_name, data_type
|
|
FROM information_schema.columns
|
|
WHERE table_schema = 'public' AND table_name = ${tablename}
|
|
ORDER BY ordinal_position
|
|
`;
|
|
const colNames = cols.map((c) => `"${c.column_name}"`).join(", ");
|
|
|
|
emit(`-- Data for: ${tablename} (${count[0]!.n} rows)`);
|
|
|
|
const rows = await sql`SELECT * FROM ${sql(tablename)}`.values();
|
|
for (const row of rows) {
|
|
const values = row.map((val: any) => {
|
|
if (val === null || val === undefined) return "NULL";
|
|
if (typeof val === "boolean") return val ? "true" : "false";
|
|
if (typeof val === "number") return String(val);
|
|
if (val instanceof Date) return `'${val.toISOString()}'`;
|
|
if (typeof val === "object") return `'${JSON.stringify(val).replace(/'/g, "''")}'`;
|
|
return `'${String(val).replace(/'/g, "''")}'`;
|
|
});
|
|
emit(`INSERT INTO "${tablename}" (${colNames}) VALUES (${values.join(", ")});`);
|
|
}
|
|
emit("");
|
|
}
|
|
|
|
// Sequence values
|
|
const sequences = await sql<{ sequence_name: string }[]>`
|
|
SELECT sequence_name
|
|
FROM information_schema.sequences
|
|
WHERE sequence_schema = 'public'
|
|
ORDER BY sequence_name
|
|
`;
|
|
|
|
if (sequences.length > 0) {
|
|
emit("-- Sequence values");
|
|
for (const seq of sequences) {
|
|
const val = await sql<{ last_value: string }[]>`
|
|
SELECT last_value::text FROM ${sql(seq.sequence_name)}
|
|
`;
|
|
if (val[0]) {
|
|
emit(`SELECT setval('"${seq.sequence_name}"', ${val[0].last_value});`);
|
|
}
|
|
}
|
|
emit("");
|
|
}
|
|
|
|
emit("COMMIT;");
|
|
emit("");
|
|
|
|
// Write the backup file
|
|
mkdirSync(BACKUP_DIR, { recursive: true });
|
|
const backupFile = resolve(BACKUP_DIR, `paperclip-${timestamp()}.sql`);
|
|
await writeFile(backupFile, lines.join("\n"), "utf8");
|
|
|
|
const sizeBytes = statSync(backupFile).size;
|
|
const sizeStr =
|
|
sizeBytes < 1024
|
|
? `${sizeBytes}B`
|
|
: sizeBytes < 1024 * 1024
|
|
? `${(sizeBytes / 1024).toFixed(1)}K`
|
|
: `${(sizeBytes / (1024 * 1024)).toFixed(1)}M`;
|
|
|
|
console.log(`Backup saved: ${backupFile} (${sizeStr})`);
|
|
|
|
pruneOldBackups();
|
|
} finally {
|
|
await sql.end();
|
|
}
|
|
}
|
|
|
|
main().catch((err) => {
|
|
console.error(err);
|
|
process.exit(1);
|
|
});
|