Improve orphaned local heartbeat recovery

Persist child-process metadata for local adapter runs, keep detached runs alive when their pid still exists, queue a single automatic retry when the pid is confirmed dead, and clear detached warnings when the original run reports activity again.

Co-Authored-By: Paperclip <noreply@paperclip.ing>
This commit is contained in:
dotta
2026-03-19 11:20:36 -05:00
parent 7f3fad64b8
commit c844ca1a40
17 changed files with 10924 additions and 22 deletions

View File

@@ -60,6 +60,7 @@ const MAX_LIVE_LOG_CHUNK_BYTES = 8 * 1024;
const HEARTBEAT_MAX_CONCURRENT_RUNS_DEFAULT = 1;
const HEARTBEAT_MAX_CONCURRENT_RUNS_MAX = 10;
const DEFERRED_WAKE_CONTEXT_KEY = "_paperclipWakeContext";
const DETACHED_PROCESS_ERROR_CODE = "process_detached";
const startLocksByAgent = new Map<string, Promise<void>>();
const REPO_ONLY_CWD_SENTINEL = "/__paperclip_repo_only__";
const MANAGED_WORKSPACE_GIT_CLONE_TIMEOUT_MS = 10 * 60 * 1000;
@@ -163,6 +164,10 @@ const heartbeatRunListColumns = {
stderrExcerpt: sql<string | null>`NULL`.as("stderrExcerpt"),
errorCode: heartbeatRuns.errorCode,
externalRunId: heartbeatRuns.externalRunId,
processPid: heartbeatRuns.processPid,
processStartedAt: heartbeatRuns.processStartedAt,
retryOfRunId: heartbeatRuns.retryOfRunId,
processLossRetryCount: heartbeatRuns.processLossRetryCount,
contextSnapshot: heartbeatRuns.contextSnapshot,
createdAt: heartbeatRuns.createdAt,
updatedAt: heartbeatRuns.updatedAt,
@@ -598,6 +603,23 @@ function isSameTaskScope(left: string | null, right: string | null) {
return (left ?? null) === (right ?? null);
}
function isTrackedLocalChildProcessAdapter(adapterType: string) {
return SESSIONED_LOCAL_ADAPTERS.has(adapterType);
}
function isProcessAlive(pid: number | null | undefined) {
if (typeof pid !== "number" || !Number.isInteger(pid) || pid <= 0) return false;
try {
process.kill(pid, 0);
return true;
} catch (error) {
const code = (error as NodeJS.ErrnoException | undefined)?.code;
if (code === "EPERM") return true;
if (code === "ESRCH") return false;
return false;
}
}
function truncateDisplayId(value: string | null | undefined, max = 128) {
if (!value) return null;
return value.length > max ? value.slice(0, max) : value;
@@ -1326,6 +1348,156 @@ export function heartbeatService(db: Db) {
});
}
async function nextRunEventSeq(runId: string) {
const [row] = await db
.select({ maxSeq: sql<number | null>`max(${heartbeatRunEvents.seq})` })
.from(heartbeatRunEvents)
.where(eq(heartbeatRunEvents.runId, runId));
return Number(row?.maxSeq ?? 0) + 1;
}
async function persistRunProcessMetadata(
runId: string,
meta: { pid: number; startedAt: string },
) {
const startedAt = new Date(meta.startedAt);
return db
.update(heartbeatRuns)
.set({
processPid: meta.pid,
processStartedAt: Number.isNaN(startedAt.getTime()) ? new Date() : startedAt,
updatedAt: new Date(),
})
.where(eq(heartbeatRuns.id, runId))
.returning()
.then((rows) => rows[0] ?? null);
}
async function clearDetachedRunWarning(runId: string) {
const updated = await db
.update(heartbeatRuns)
.set({
error: null,
errorCode: null,
updatedAt: new Date(),
})
.where(and(eq(heartbeatRuns.id, runId), eq(heartbeatRuns.status, "running"), eq(heartbeatRuns.errorCode, DETACHED_PROCESS_ERROR_CODE)))
.returning()
.then((rows) => rows[0] ?? null);
if (!updated) return null;
await appendRunEvent(updated, await nextRunEventSeq(updated.id), {
eventType: "lifecycle",
stream: "system",
level: "info",
message: "Detached child process reported activity; cleared detached warning",
});
return updated;
}
async function enqueueProcessLossRetry(
run: typeof heartbeatRuns.$inferSelect,
agent: typeof agents.$inferSelect,
now: Date,
) {
const contextSnapshot = parseObject(run.contextSnapshot);
const issueId = readNonEmptyString(contextSnapshot.issueId);
const taskKey = deriveTaskKey(contextSnapshot, null);
const sessionBefore = await resolveSessionBeforeForWakeup(agent, taskKey);
const retryContextSnapshot = {
...contextSnapshot,
retryOfRunId: run.id,
wakeReason: "process_lost_retry",
retryReason: "process_lost",
};
const queued = await db.transaction(async (tx) => {
const wakeupRequest = await tx
.insert(agentWakeupRequests)
.values({
companyId: run.companyId,
agentId: run.agentId,
source: "automation",
triggerDetail: "system",
reason: "process_lost_retry",
payload: {
...(issueId ? { issueId } : {}),
retryOfRunId: run.id,
},
status: "queued",
requestedByActorType: "system",
requestedByActorId: null,
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
const retryRun = await tx
.insert(heartbeatRuns)
.values({
companyId: run.companyId,
agentId: run.agentId,
invocationSource: "automation",
triggerDetail: "system",
status: "queued",
wakeupRequestId: wakeupRequest.id,
contextSnapshot: retryContextSnapshot,
sessionIdBefore: sessionBefore,
retryOfRunId: run.id,
processLossRetryCount: (run.processLossRetryCount ?? 0) + 1,
updatedAt: now,
})
.returning()
.then((rows) => rows[0]);
await tx
.update(agentWakeupRequests)
.set({
runId: retryRun.id,
updatedAt: now,
})
.where(eq(agentWakeupRequests.id, wakeupRequest.id));
if (issueId) {
await tx
.update(issues)
.set({
executionRunId: retryRun.id,
executionAgentNameKey: normalizeAgentNameKey(agent.name),
executionLockedAt: now,
updatedAt: now,
})
.where(and(eq(issues.id, issueId), eq(issues.companyId, run.companyId), eq(issues.executionRunId, run.id)));
}
return retryRun;
});
publishLiveEvent({
companyId: queued.companyId,
type: "heartbeat.run.queued",
payload: {
runId: queued.id,
agentId: queued.agentId,
invocationSource: queued.invocationSource,
triggerDetail: queued.triggerDetail,
wakeupRequestId: queued.wakeupRequestId,
},
});
await appendRunEvent(queued, 1, {
eventType: "lifecycle",
stream: "system",
level: "warn",
message: "Queued automatic retry after orphaned child process was confirmed dead",
payload: {
retryOfRunId: run.id,
},
});
return queued;
}
function parseHeartbeatPolicy(agent: typeof agents.$inferSelect) {
const runtimeConfig = parseObject(agent.runtimeConfig);
const heartbeat = parseObject(runtimeConfig.heartbeat);
@@ -1453,13 +1625,17 @@ export function heartbeatService(db: Db) {
// Find all runs stuck in "running" state (queued runs are legitimately waiting; resumeQueuedRuns handles them)
const activeRuns = await db
.select()
.select({
run: heartbeatRuns,
adapterType: agents.adapterType,
})
.from(heartbeatRuns)
.innerJoin(agents, eq(heartbeatRuns.agentId, agents.id))
.where(eq(heartbeatRuns.status, "running"));
const reaped: string[] = [];
for (const run of activeRuns) {
for (const { run, adapterType } of activeRuns) {
if (runningProcesses.has(run.id) || activeRunExecutions.has(run.id)) continue;
// Apply staleness threshold to avoid false positives
@@ -1468,25 +1644,69 @@ export function heartbeatService(db: Db) {
if (now.getTime() - refTime < staleThresholdMs) continue;
}
await setRunStatus(run.id, "failed", {
error: "Process lost -- server may have restarted",
const tracksLocalChild = isTrackedLocalChildProcessAdapter(adapterType);
if (tracksLocalChild && run.processPid && isProcessAlive(run.processPid)) {
if (run.errorCode !== DETACHED_PROCESS_ERROR_CODE) {
const detachedMessage = `Lost in-memory process handle, but child pid ${run.processPid} is still alive`;
const detachedRun = await setRunStatus(run.id, "running", {
error: detachedMessage,
errorCode: DETACHED_PROCESS_ERROR_CODE,
});
if (detachedRun) {
await appendRunEvent(detachedRun, await nextRunEventSeq(detachedRun.id), {
eventType: "lifecycle",
stream: "system",
level: "warn",
message: detachedMessage,
payload: {
processPid: run.processPid,
},
});
}
}
continue;
}
const shouldRetry = tracksLocalChild && !!run.processPid && (run.processLossRetryCount ?? 0) < 1;
const baseMessage = run.processPid
? `Process lost -- child pid ${run.processPid} is no longer running`
: "Process lost -- server may have restarted";
let finalizedRun = await setRunStatus(run.id, "failed", {
error: shouldRetry ? `${baseMessage}; retrying once` : baseMessage,
errorCode: "process_lost",
finishedAt: now,
});
await setWakeupStatus(run.wakeupRequestId, "failed", {
finishedAt: now,
error: "Process lost -- server may have restarted",
error: shouldRetry ? `${baseMessage}; retrying once` : baseMessage,
});
const updatedRun = await getRun(run.id);
if (updatedRun) {
await appendRunEvent(updatedRun, 1, {
eventType: "lifecycle",
stream: "system",
level: "error",
message: "Process lost -- server may have restarted",
});
await releaseIssueExecutionAndPromote(updatedRun);
if (!finalizedRun) finalizedRun = await getRun(run.id);
if (!finalizedRun) continue;
let retriedRun: typeof heartbeatRuns.$inferSelect | null = null;
if (shouldRetry) {
const agent = await getAgent(run.agentId);
if (agent) {
retriedRun = await enqueueProcessLossRetry(finalizedRun, agent, now);
}
} else {
await releaseIssueExecutionAndPromote(finalizedRun);
}
await appendRunEvent(finalizedRun, await nextRunEventSeq(finalizedRun.id), {
eventType: "lifecycle",
stream: "system",
level: "error",
message: shouldRetry
? `${baseMessage}; queued retry ${retriedRun?.id ?? ""}`.trim()
: baseMessage,
payload: {
...(run.processPid ? { processPid: run.processPid } : {}),
...(retriedRun ? { retryRunId: retriedRun.id } : {}),
},
});
await finalizeAgentStatus(run.agentId, "failed");
await startNextQueuedRunForAgent(run.agentId);
runningProcesses.delete(run.id);
@@ -2152,6 +2372,9 @@ export function heartbeatService(db: Db) {
context,
onLog,
onMeta: onAdapterMeta,
onSpawn: async (meta) => {
await persistRunProcessMetadata(run.id, meta);
},
authToken: authToken ?? undefined,
});
const adapterManagedRuntimeServices = adapterResult.runtimeServices
@@ -3403,6 +3626,8 @@ export function heartbeatService(db: Db) {
wakeup: enqueueWakeup,
reportRunActivity: clearDetachedRunWarning,
reapOrphanedRuns,
resumeQueuedRuns,