diff --git a/src/app/api/public/health/route.ts b/src/app/api/public/health/route.ts index 6e3eb0e..f9e0337 100644 --- a/src/app/api/public/health/route.ts +++ b/src/app/api/public/health/route.ts @@ -1,4 +1,5 @@ import { NextRequest, NextResponse } from 'next/server'; +import { timingSafeEqual } from 'node:crypto'; import { env } from '@/lib/env'; @@ -19,8 +20,20 @@ import { env } from '@/lib/env'; export function GET(req: NextRequest): Response { const expected = env.WEBSITE_INTAKE_SECRET; const provided = req.headers.get('x-intake-secret'); + // Use timingSafeEqual rather than a `===` comparison — string equality + // is not constant-time and lets a remote attacker enumerate the secret + // byte-by-byte via response-time differences. const matched = - expected && provided && provided.length === expected.length && provided === expected; + !!expected && + !!provided && + provided.length === expected.length && + (() => { + try { + return timingSafeEqual(Buffer.from(provided), Buffer.from(expected)); + } catch { + return false; + } + })(); if (!matched) { return NextResponse.json( diff --git a/src/app/api/v1/me/route.ts b/src/app/api/v1/me/route.ts index 5d9f89c..bf5406a 100644 --- a/src/app/api/v1/me/route.ts +++ b/src/app/api/v1/me/route.ts @@ -62,13 +62,20 @@ export const PATCH = withAuth(async (req, ctx: AuthContext) => { if (body.phone !== undefined) updates.phone = body.phone; if (body.avatarUrl !== undefined) updates.avatarUrl = body.avatarUrl; if (body.preferences !== undefined) { - const merged = { - ...((profile.preferences as Record) ?? {}), - ...body.preferences, - }; - // Hard cap on the merged JSONB to defend against historical rows - // bloated by the previous .passthrough() schema. 8 KB is generous - // — current legitimate keys are 3 booleans/strings. + // Allow-list — only retain keys defined in the strict schema. Pre- + // strict rows may carry extra keys from when the schema was + // .passthrough(); the merge prunes them so legacy bloat doesn't + // accumulate forever, and a future schema regression that tries + // to ship arbitrary keys still gets dropped here at write time. + const ALLOWED_PREF_KEYS = new Set(['dark_mode', 'locale', 'timezone']); + const existing = (profile.preferences as Record) ?? {}; + const merged = Object.fromEntries( + Object.entries({ ...existing, ...body.preferences }).filter(([k]) => + ALLOWED_PREF_KEYS.has(k), + ), + ); + // Hard cap on the merged JSONB — defense in depth against any + // future schema growth that might re-introduce free-form keys. const serialized = JSON.stringify(merged); if (Buffer.byteLength(serialized, 'utf8') > 8 * 1024) { throw new ValidationError('preferences exceeds 8KB'); diff --git a/src/app/api/webhooks/documenso/route.ts b/src/app/api/webhooks/documenso/route.ts index 1f8d5ea..0f5a0f0 100644 --- a/src/app/api/webhooks/documenso/route.ts +++ b/src/app/api/webhooks/documenso/route.ts @@ -104,6 +104,13 @@ export async function POST(req: NextRequest): Promise { return NextResponse.json({ ok: true }, { status: 200 }); } + // Every handler accepts an optional `portId` and refuses to mutate when + // the lookup is ambiguous across multiple ports without one. Forward + // the secret-resolved portId everywhere — not just the expired path — + // so signed/completed/opened/rejected/cancelled events can't flip a + // foreign-tenant document via documensoId reuse. + const portScope = matchedPortId ? { portId: matchedPortId } : {}; + try { switch (event) { case 'DOCUMENT_SIGNED': @@ -118,6 +125,7 @@ export async function POST(req: NextRequest): Promise { documentId: documensoId, recipientEmail: r.email, signatureHash: `${signatureHash}:signed:${r.email}`, + ...portScope, }); } break; @@ -138,13 +146,14 @@ export async function POST(req: NextRequest): Promise { documentId: documensoId, recipientEmail: r.email, signatureHash: `${signatureHash}:opened:${r.email}`, + ...portScope, }); } break; } case 'DOCUMENT_COMPLETED': - await handleDocumentCompleted({ documentId: documensoId }); + await handleDocumentCompleted({ documentId: documensoId, ...portScope }); break; case 'DOCUMENT_REJECTED': { @@ -153,21 +162,17 @@ export async function POST(req: NextRequest): Promise { documentId: documensoId, recipientEmail: rejecting?.email, signatureHash, + ...portScope, }); break; } case 'DOCUMENT_CANCELLED': - await handleDocumentCancelled({ documentId: documensoId, signatureHash }); + await handleDocumentCancelled({ documentId: documensoId, signatureHash, ...portScope }); break; case 'DOCUMENT_EXPIRED': - // Forward the matched portId so cross-port documenso-id reuse - // can't flip the wrong port's document. - await handleDocumentExpired({ - documentId: documensoId, - ...(matchedPortId ? { portId: matchedPortId } : {}), - }); + await handleDocumentExpired({ documentId: documensoId, ...portScope }); break; default: diff --git a/src/lib/db/migrations/0041_role_permissions_edit_keys.sql b/src/lib/db/migrations/0041_role_permissions_edit_keys.sql index 5a57afd..dcbab88 100644 --- a/src/lib/db/migrations/0041_role_permissions_edit_keys.sql +++ b/src/lib/db/migrations/0041_role_permissions_edit_keys.sql @@ -13,6 +13,13 @@ -- jsonb_set with create_missing=true (the default) inserts the key only -- when it's absent, so re-runs are idempotent and the migration is safe -- against a partial run. +-- +-- Note: per-port overrides live in `port_role_overrides.permission_overrides` +-- and are PARTIAL — they only contain the keys a port flipped from the +-- base role. The deepMerge resolver fills in `documents.edit` from the +-- base role for any port that didn't override it, so we deliberately do +-- NOT touch `port_role_overrides` here. Backfilling there would synthesize +-- override entries that the operator never intended. UPDATE roles SET permissions = jsonb_set( @@ -33,26 +40,3 @@ SET permissions = jsonb_set( ) WHERE permissions->'files' IS NOT NULL AND NOT (permissions->'files' ? 'edit'); - --- Same backfill on per-port overrides (`port_role_overrides.permissions`) --- so an override that flipped a sibling permission stays consistent. - -UPDATE port_role_overrides -SET permissions = jsonb_set( - permissions, - '{documents,edit}', - COALESCE(permissions->'documents'->'create', 'false'::jsonb), - true -) -WHERE permissions->'documents' IS NOT NULL - AND NOT (permissions->'documents' ? 'edit'); - -UPDATE port_role_overrides -SET permissions = jsonb_set( - permissions, - '{files,edit}', - COALESCE(permissions->'files'->'upload', 'false'::jsonb), - true -) -WHERE permissions->'files' IS NOT NULL - AND NOT (permissions->'files' ? 'edit'); diff --git a/src/lib/db/migrations/0042_missing_fk_constraints.sql b/src/lib/db/migrations/0042_missing_fk_constraints.sql index 5567058..15a3d28 100644 --- a/src/lib/db/migrations/0042_missing_fk_constraints.sql +++ b/src/lib/db/migrations/0042_missing_fk_constraints.sql @@ -4,8 +4,11 @@ -- a service that writes interest_id='nonexistent' faces no DB rejection -- and downstream null-tolerant joins silently misbehave. -- --- All adds are NOT VALID-friendly (we use IF NOT EXISTS via DO blocks); --- the migration is idempotent so re-running it is safe. +-- All adds are idempotent (DO blocks swallow duplicate_object) and use +-- the NOT VALID + VALIDATE pattern so the brief table-lock phase is +-- decoupled from the slow row-scan validation. If validation fails for +-- a constraint the migration aborts before later constraints land — a +-- prod operator can clean the dirty row(s) and re-run. -- -- Cascade rule: -- nullable column → ON DELETE SET NULL (orphan tolerance) @@ -14,77 +17,106 @@ -- -- Refs: docs/audit-comprehensive-2026-05-05.md HIGH §10 (auditor-C3 Issue 1). --- documents +-- ─── Pre-cleanup: invoices.billing_entity_id non-empty ────────────────────── +-- The previous schema declared notNull().default(''), so historical rows +-- may carry empty strings. The CHECK constraint added below would reject +-- the migration on a dirty table. Backfill from clientName when present +-- so the resolver downstream can still find a usable handle, otherwise +-- fall back to the row id (which gives the operator a unique marker to +-- audit later). This UPDATE is a no-op on a clean DB. + +UPDATE invoices +SET billing_entity_id = COALESCE(NULLIF(client_name, ''), id) +WHERE billing_entity_id = ''; + +-- ─── documents ───────────────────────────────────────────────────────────── + DO $$ BEGIN ALTER TABLE documents ADD CONSTRAINT documents_interest_id_fkey - FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL; + FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE documents VALIDATE CONSTRAINT documents_interest_id_fkey; DO $$ BEGIN ALTER TABLE documents ADD CONSTRAINT documents_yacht_id_fkey - FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL; + FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE documents VALIDATE CONSTRAINT documents_yacht_id_fkey; DO $$ BEGIN ALTER TABLE documents ADD CONSTRAINT documents_company_id_fkey - FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL; + FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE documents VALIDATE CONSTRAINT documents_company_id_fkey; DO $$ BEGIN ALTER TABLE documents ADD CONSTRAINT documents_reservation_id_fkey - FOREIGN KEY (reservation_id) REFERENCES berth_reservations(id) ON DELETE SET NULL; + FOREIGN KEY (reservation_id) REFERENCES berth_reservations(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE documents VALIDATE CONSTRAINT documents_reservation_id_fkey; + +-- ─── files ───────────────────────────────────────────────────────────────── --- files DO $$ BEGIN ALTER TABLE files ADD CONSTRAINT files_yacht_id_fkey - FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL; + FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE files VALIDATE CONSTRAINT files_yacht_id_fkey; DO $$ BEGIN ALTER TABLE files ADD CONSTRAINT files_company_id_fkey - FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL; + FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE files VALIDATE CONSTRAINT files_company_id_fkey; + +-- ─── interests ───────────────────────────────────────────────────────────── --- interests (yacht_id is wired via relations only) DO $$ BEGIN ALTER TABLE interests ADD CONSTRAINT interests_yacht_id_fkey - FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL; + FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE interests VALIDATE CONSTRAINT interests_yacht_id_fkey; + +-- ─── reminders ───────────────────────────────────────────────────────────── --- reminders DO $$ BEGIN ALTER TABLE reminders ADD CONSTRAINT reminders_interest_id_fkey - FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL; + FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE reminders VALIDATE CONSTRAINT reminders_interest_id_fkey; DO $$ BEGIN ALTER TABLE reminders ADD CONSTRAINT reminders_berth_id_fkey - FOREIGN KEY (berth_id) REFERENCES berths(id) ON DELETE SET NULL; + FOREIGN KEY (berth_id) REFERENCES berths(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE reminders VALIDATE CONSTRAINT reminders_berth_id_fkey; + +-- ─── berth_waiting_list ──────────────────────────────────────────────────── --- berth_waiting_list DO $$ BEGIN ALTER TABLE berth_waiting_list ADD CONSTRAINT berth_waiting_list_yacht_id_fkey - FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL; + FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE berth_waiting_list VALIDATE CONSTRAINT berth_waiting_list_yacht_id_fkey; + +-- ─── form_submissions ────────────────────────────────────────────────────── --- form_submissions DO $$ BEGIN ALTER TABLE form_submissions ADD CONSTRAINT form_submissions_interest_id_fkey - FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL; + FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE form_submissions VALIDATE CONSTRAINT form_submissions_interest_id_fkey; -- ─── Polymorphic CHECK round 2 ────────────────────────────────────────────── -- 0036 covered yachts.current_owner_type and invoices.billing_entity_type. @@ -93,26 +125,27 @@ EXCEPTION WHEN duplicate_object THEN NULL; END $$; DO $$ BEGIN ALTER TABLE yacht_ownership_history ADD CONSTRAINT yacht_ownership_history_owner_type_chk - CHECK (owner_type IN ('client', 'company')); + CHECK (owner_type IN ('client', 'company')) NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE yacht_ownership_history VALIDATE CONSTRAINT yacht_ownership_history_owner_type_chk; DO $$ BEGIN ALTER TABLE document_sends ADD CONSTRAINT document_sends_document_kind_chk - CHECK (document_kind IN ('berth_pdf', 'brochure')); + CHECK (document_kind IN ('berth_pdf', 'brochure')) NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE document_sends VALIDATE CONSTRAINT document_sends_document_kind_chk; -- ─── invoices.billing_entity_id sanity check ─────────────────────────────── --- The schema declared notNull() with default('') which combined with the --- 0036 type CHECK lets a row insert with billing_entity_type='client' and --- billing_entity_id='' — the polymorphic resolver looks up the empty --- string and returns null with no DB-level signal. +-- The pre-cleanup at the top of this migration backfilled empty strings +-- to clientName (or row id), so the VALIDATE step is now safe. DO $$ BEGIN ALTER TABLE invoices ADD CONSTRAINT invoices_billing_entity_id_nonempty_chk - CHECK (billing_entity_id <> ''); + CHECK (billing_entity_id <> '') NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE invoices VALIDATE CONSTRAINT invoices_billing_entity_id_nonempty_chk; -- ─── clients.merged_into_client_id self-FK ───────────────────────────────── -- Already nullable; populated when a client is soft-merged into another. @@ -120,5 +153,6 @@ EXCEPTION WHEN duplicate_object THEN NULL; END $$; DO $$ BEGIN ALTER TABLE clients ADD CONSTRAINT clients_merged_into_client_id_fkey - FOREIGN KEY (merged_into_client_id) REFERENCES clients(id) ON DELETE SET NULL; + FOREIGN KEY (merged_into_client_id) REFERENCES clients(id) ON DELETE SET NULL NOT VALID; EXCEPTION WHEN duplicate_object THEN NULL; END $$; +ALTER TABLE clients VALIDATE CONSTRAINT clients_merged_into_client_id_fkey; diff --git a/src/lib/services/documenso-webhook.ts b/src/lib/services/documenso-webhook.ts index 181d85e..573982c 100644 --- a/src/lib/services/documenso-webhook.ts +++ b/src/lib/services/documenso-webhook.ts @@ -3,8 +3,16 @@ import { timingSafeEqual } from 'crypto'; // Documenso (v1.13 + 2.x) authenticates outbound webhooks by sending the // configured secret in plaintext via the `X-Documenso-Secret` header. // There is no HMAC. Compare the provided value timing-safely to the env secret. +// +// An empty `expected` MUST always reject — without this guard, +// timingSafeEqual(0-bytes, 0-bytes) returns true, so a dev environment +// with a blank DOCUMENSO_WEBHOOK_SECRET would accept any request whose +// `X-Documenso-Secret` was also empty/missing. Same for blank per-port +// secret rows in `system_settings` (the per-port writer should never +// store an empty string but defense-in-depth here is cheap). export function verifyDocumensoSecret(provided: string, expected: string): boolean { - if (!provided || provided.length !== expected.length) return false; + if (!provided || !expected) return false; + if (provided.length !== expected.length) return false; try { return timingSafeEqual(Buffer.from(provided), Buffer.from(expected)); } catch { diff --git a/src/lib/services/document-reminders.ts b/src/lib/services/document-reminders.ts index 8e67328..e7aa816 100644 --- a/src/lib/services/document-reminders.ts +++ b/src/lib/services/document-reminders.ts @@ -206,7 +206,20 @@ export async function processReminderQueue(portId: string): Promise { fileId: documents.fileId, }) .from(documents) - .leftJoin(documentTemplates, eq(documentTemplates.templateType, documents.documentType)) + // CRITICAL: scope the join to the same port — `documentTemplates.templateType` + // is not unique across ports, so a leftJoin without `portId` produces a + // cartesian explosion (one output row per template-type match across + // every port). The downstream loop fires `documensoRemind` per row, + // which means the same signer in port A would receive N reminders on + // a single cron tick (once per port that defined a template of the + // same type). Audit follow-up after Tier 3 ship. + .leftJoin( + documentTemplates, + and( + eq(documentTemplates.templateType, documents.documentType), + eq(documentTemplates.portId, portId), + ), + ) .where( and( eq(documents.portId, portId), diff --git a/src/lib/services/documents.service.ts b/src/lib/services/documents.service.ts index 2e1fef2..dad58d3 100644 --- a/src/lib/services/documents.service.ts +++ b/src/lib/services/documents.service.ts @@ -780,18 +780,59 @@ export async function listDocumentEvents(documentId: string, portId: string) { // ─── Webhook Handlers ───────────────────────────────────────────────────────── +/** + * Shared port-scoped lookup for inbound Documenso webhooks. Two ports + * sharing a Documenso instance — or migrating between instances with + * documentId reuse — would otherwise let `findFirst` return whichever + * row sorts first across tenants. When the route resolves a portId from + * the matched per-port webhook secret it threads it here; otherwise we + * fall back to a port-agnostic `findMany` and refuse to mutate when the + * lookup is ambiguous (mirrors the guard in handleDocumentExpired). + * + * Returns null when no document matches OR when the lookup is ambiguous + * across multiple ports without a resolved portId. Callers must treat + * null as "drop the event" (the cron sweep / next webhook will catch up + * once the data is consistent). + */ +async function resolveWebhookDocument( + documensoId: string, + portId: string | undefined, +): Promise { + if (portId) { + const doc = await db.query.documents.findFirst({ + where: and(eq(documents.documensoId, documensoId), eq(documents.portId, portId)), + }); + if (!doc) { + logger.warn({ documensoId, portId }, 'Document not found for webhook (port-scoped)'); + return null; + } + return doc; + } + const matches = await db.query.documents.findMany({ + where: eq(documents.documensoId, documensoId), + }); + if (matches.length === 0) { + logger.warn({ documensoId }, 'Document not found for webhook'); + return null; + } + if (matches.length > 1) { + logger.error( + { documensoId, matchCount: matches.length, ports: matches.map((m) => m.portId) }, + 'Documenso webhook ambiguous across multiple ports — refusing to mutate', + ); + return null; + } + return matches[0]!; +} + export async function handleRecipientSigned(eventData: { documentId: string; recipientEmail: string; signatureHash?: string; + portId?: string; }) { - const doc = await db.query.documents.findFirst({ - where: eq(documents.documensoId, eventData.documentId), - }); - if (!doc) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; // Update signer status const [signer] = await db @@ -826,7 +867,7 @@ export async function handleRecipientSigned(eventData: { await db .update(documents) .set({ status: 'partially_signed', updatedAt: new Date() }) - .where(eq(documents.id, doc.id)); + .where(and(eq(documents.id, doc.id), eq(documents.portId, doc.portId))); } await db.insert(documentEvents).values({ @@ -843,14 +884,9 @@ export async function handleRecipientSigned(eventData: { }); } -export async function handleDocumentCompleted(eventData: { documentId: string }) { - const doc = await db.query.documents.findFirst({ - where: eq(documents.documensoId, eventData.documentId), - }); - if (!doc) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } +export async function handleDocumentCompleted(eventData: { documentId: string; portId?: string }) { + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; // BR-022: Download signed PDF and store in MinIO const port = await db.query.ports.findFirst({ where: eq(ports.id, doc.portId) }); @@ -980,38 +1016,8 @@ export async function handleDocumentCompleted(eventData: { documentId: string }) } export async function handleDocumentExpired(eventData: { documentId: string; portId?: string }) { - // Port-scoped lookup when the caller resolved a portId from the - // webhook signature. Two ports holding the same Documenso instance - // (or migrating between instances with id reuse) would otherwise - // share a documensoId across tenants, and findFirst would return - // whichever row sorted first — flipping a foreign-port document. - const matches = await db.query.documents.findMany({ - where: eventData.portId - ? and(eq(documents.documensoId, eventData.documentId), eq(documents.portId, eventData.portId)) - : eq(documents.documensoId, eventData.documentId), - }); - - if (matches.length === 0) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } - - if (matches.length > 1 && !eventData.portId) { - // Cross-tenant ambiguity. Refuse to mutate without a resolved port — - // safer to drop the event (the cron expiry sweep will catch up) than - // flip the wrong document. - logger.error( - { - documensoId: eventData.documentId, - matchCount: matches.length, - ports: matches.map((m) => m.portId), - }, - 'Document expired webhook ambiguous across multiple ports — refusing to mutate', - ); - return; - } - - const doc = matches[0]!; + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; await db .update(documents) @@ -1038,14 +1044,10 @@ export async function handleDocumentOpened(eventData: { documentId: string; recipientEmail: string; signatureHash?: string; + portId?: string; }) { - const doc = await db.query.documents.findFirst({ - where: eq(documents.documensoId, eventData.documentId), - }); - if (!doc) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; const [signer] = await db .select() @@ -1075,14 +1077,10 @@ export async function handleDocumentRejected(eventData: { documentId: string; recipientEmail?: string; signatureHash?: string; + portId?: string; }) { - const doc = await db.query.documents.findFirst({ - where: eq(documents.documensoId, eventData.documentId), - }); - if (!doc) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; let signerId: string | null = null; if (eventData.recipientEmail) { @@ -1102,13 +1100,13 @@ export async function handleDocumentRejected(eventData: { await db .update(documents) .set({ status: 'rejected', updatedAt: new Date() }) - .where(eq(documents.id, doc.id)); + .where(and(eq(documents.id, doc.id), eq(documents.portId, doc.portId))); if (doc.interestId && doc.documentType === 'eoi') { await db .update(interests) .set({ eoiStatus: 'rejected', updatedAt: new Date() }) - .where(eq(interests.id, doc.interestId)); + .where(and(eq(interests.id, doc.interestId), eq(interests.portId, doc.portId))); } await db.insert(documentEvents).values({ @@ -1128,25 +1126,21 @@ export async function handleDocumentRejected(eventData: { export async function handleDocumentCancelled(eventData: { documentId: string; signatureHash?: string; + portId?: string; }) { - const doc = await db.query.documents.findFirst({ - where: eq(documents.documensoId, eventData.documentId), - }); - if (!doc) { - logger.warn({ documensoId: eventData.documentId }, 'Document not found for webhook'); - return; - } + const doc = await resolveWebhookDocument(eventData.documentId, eventData.portId); + if (!doc) return; await db .update(documents) .set({ status: 'cancelled', updatedAt: new Date() }) - .where(eq(documents.id, doc.id)); + .where(and(eq(documents.id, doc.id), eq(documents.portId, doc.portId))); if (doc.interestId && doc.documentType === 'eoi') { await db .update(interests) .set({ eoiStatus: 'cancelled', updatedAt: new Date() }) - .where(eq(interests.id, doc.interestId)); + .where(and(eq(interests.id, doc.interestId), eq(interests.portId, doc.portId))); } await db.insert(documentEvents).values({ diff --git a/src/lib/services/port-config.ts b/src/lib/services/port-config.ts index cf590bf..e407ab1 100644 --- a/src/lib/services/port-config.ts +++ b/src/lib/services/port-config.ts @@ -225,9 +225,13 @@ export async function listDocumensoWebhookSecrets(): Promise { logger.info({ signal }, 'Shutdown signal received; closing connections'); - // Stop accepting new HTTP connections, then drain in-flight ones. + // Order matters: close Socket.io first so it stops accepting new + // sockets and emits disconnect events while the HTTP server is still + // up to flush them. `httpServer.close` only stops new connections; + // it waits for keep-alive HTTP and long-poll websockets to drain on + // their own, so without an explicit io.close() upfront the polls hold + // the server past the compose stop_grace_period and the process gets + // SIGKILL'd mid-frame. + await closeSocketServer().catch((err) => logger.warn({ err }, 'closeSocketServer error')); + + // Then drain the HTTP layer. await new Promise((resolve) => { httpServer.close((err) => { if (err) logger.warn({ err }, 'httpServer.close emitted error'); resolve(); }); - // Hard timeout — `httpServer.close` waits for ALL keep-alive sockets - // to drain on their own, which can stretch much longer than the - // compose stop_grace_period. 25s leaves headroom under a 30s grace. + // Hard timeout — 25s leaves headroom under a 30s compose grace + // period before SIGKILL would arrive anyway. setTimeout(() => resolve(), 25_000).unref(); }); - await closeSocketServer().catch((err) => logger.warn({ err }, 'closeSocketServer error')); - try { redis.disconnect(); } catch (err) {