fix(audit-tier-1): timeouts, lifecycle, per-port Documenso, FK constraints
Closes the second wave of HIGH-priority audit findings: * fetchWithTimeout helper (new src/lib/fetch-with-timeout.ts) wraps Documenso, OCR, currency, Umami, IMAP, etc. — a hung upstream can no longer pin a worker concurrency slot indefinitely. OpenAI client passes timeout: 30_000. ImapFlow gets socket / greeting / connection timeouts. * SIGTERM / SIGINT handler in src/server.ts drains in-flight HTTP, closes Socket.io, and disconnects Redis before exit; compose stop_grace_period bumped to 30s. Adds closeSocketServer() helper. * env.ts gains zod-validated PORT and MULTI_NODE_DEPLOYMENT, and filesystem.ts now reads from env (a typo can no longer silently disable the multi-node guard). * Per-port Documenso template + recipient IDs land in system_settings with env fallback (PortDocumensoConfig now exposes eoiTemplateId, clientRecipientId, developerRecipientId, approvalRecipientId). document-templates.ts uses the per-port config and threads portId into documensoGenerateFromTemplate(). * Migration 0042 wires the eleven HIGH-tier missing FK constraints (documents/files/interests/reminders/berth_waiting_list/ form_submissions) plus polymorphic CHECK round 2 (yacht_ownership_history.owner_type, document_sends.document_kind), invoices.billing_entity_id NOT EMPTY, and clients.merged_into self-FK. Drizzle schema columns updated to .references(...) where possible so the misleading "FK wired in relations.ts" comments are gone. Test status: 1168/1168 vitest, tsc clean. Refs: docs/audit-comprehensive-2026-05-05.md HIGH §§5,6,7,8,9,10 + MED §§14,15,16,18. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -46,6 +46,10 @@ services:
|
||||
interval: 15s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
# Give the SIGTERM handler in src/server.ts time to drain in-flight
|
||||
# HTTP requests, close Socket.io, and disconnect Redis before Docker
|
||||
# SIGKILLs the process. The internal hard timeout is 25s.
|
||||
stop_grace_period: 30s
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- internal
|
||||
@@ -58,6 +62,9 @@ services:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
# Match the app: BullMQ jobs need time to finish or be released back
|
||||
# to the queue when worker.ts handles SIGTERM.
|
||||
stop_grace_period: 30s
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- internal
|
||||
|
||||
124
src/lib/db/migrations/0042_missing_fk_constraints.sql
Normal file
124
src/lib/db/migrations/0042_missing_fk_constraints.sql
Normal file
@@ -0,0 +1,124 @@
|
||||
-- Audit-final v3 — wire the FK columns currently exposed via Drizzle
|
||||
-- relations() but missing actual Postgres constraints. relations() only
|
||||
-- configures relational query JOINs; it does NOT install constraints, so
|
||||
-- a service that writes interest_id='nonexistent' faces no DB rejection
|
||||
-- and downstream null-tolerant joins silently misbehave.
|
||||
--
|
||||
-- All adds are NOT VALID-friendly (we use IF NOT EXISTS via DO blocks);
|
||||
-- the migration is idempotent so re-running it is safe.
|
||||
--
|
||||
-- Cascade rule:
|
||||
-- nullable column → ON DELETE SET NULL (orphan tolerance)
|
||||
-- notNull column → ON DELETE RESTRICT (force callers to clean up)
|
||||
-- All audit-listed columns happen to be nullable so they get SET NULL.
|
||||
--
|
||||
-- Refs: docs/audit-comprehensive-2026-05-05.md HIGH §10 (auditor-C3 Issue 1).
|
||||
|
||||
-- documents
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE documents
|
||||
ADD CONSTRAINT documents_interest_id_fkey
|
||||
FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE documents
|
||||
ADD CONSTRAINT documents_yacht_id_fkey
|
||||
FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE documents
|
||||
ADD CONSTRAINT documents_company_id_fkey
|
||||
FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE documents
|
||||
ADD CONSTRAINT documents_reservation_id_fkey
|
||||
FOREIGN KEY (reservation_id) REFERENCES berth_reservations(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- files
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE files
|
||||
ADD CONSTRAINT files_yacht_id_fkey
|
||||
FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE files
|
||||
ADD CONSTRAINT files_company_id_fkey
|
||||
FOREIGN KEY (company_id) REFERENCES companies(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- interests (yacht_id is wired via relations only)
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE interests
|
||||
ADD CONSTRAINT interests_yacht_id_fkey
|
||||
FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- reminders
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE reminders
|
||||
ADD CONSTRAINT reminders_interest_id_fkey
|
||||
FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE reminders
|
||||
ADD CONSTRAINT reminders_berth_id_fkey
|
||||
FOREIGN KEY (berth_id) REFERENCES berths(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- berth_waiting_list
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE berth_waiting_list
|
||||
ADD CONSTRAINT berth_waiting_list_yacht_id_fkey
|
||||
FOREIGN KEY (yacht_id) REFERENCES yachts(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- form_submissions
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE form_submissions
|
||||
ADD CONSTRAINT form_submissions_interest_id_fkey
|
||||
FOREIGN KEY (interest_id) REFERENCES interests(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- ─── Polymorphic CHECK round 2 ──────────────────────────────────────────────
|
||||
-- 0036 covered yachts.current_owner_type and invoices.billing_entity_type.
|
||||
-- These two discriminators were missed and remain free-text.
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE yacht_ownership_history
|
||||
ADD CONSTRAINT yacht_ownership_history_owner_type_chk
|
||||
CHECK (owner_type IN ('client', 'company'));
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE document_sends
|
||||
ADD CONSTRAINT document_sends_document_kind_chk
|
||||
CHECK (document_kind IN ('berth_pdf', 'brochure'));
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- ─── invoices.billing_entity_id sanity check ───────────────────────────────
|
||||
-- The schema declared notNull() with default('') which combined with the
|
||||
-- 0036 type CHECK lets a row insert with billing_entity_type='client' and
|
||||
-- billing_entity_id='' — the polymorphic resolver looks up the empty
|
||||
-- string and returns null with no DB-level signal.
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE invoices
|
||||
ADD CONSTRAINT invoices_billing_entity_id_nonempty_chk
|
||||
CHECK (billing_entity_id <> '');
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
|
||||
-- ─── clients.merged_into_client_id self-FK ─────────────────────────────────
|
||||
-- Already nullable; populated when a client is soft-merged into another.
|
||||
|
||||
DO $$ BEGIN
|
||||
ALTER TABLE clients
|
||||
ADD CONSTRAINT clients_merged_into_client_id_fkey
|
||||
FOREIGN KEY (merged_into_client_id) REFERENCES clients(id) ON DELETE SET NULL;
|
||||
EXCEPTION WHEN duplicate_object THEN NULL; END $$;
|
||||
@@ -295,6 +295,13 @@
|
||||
"when": 1778400000000,
|
||||
"tag": "0041_role_permissions_edit_keys",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 42,
|
||||
"version": "7",
|
||||
"when": 1778500000000,
|
||||
"tag": "0042_missing_fk_constraints",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { ports } from './ports';
|
||||
import { clients } from './clients';
|
||||
import { yachts } from './yachts';
|
||||
|
||||
export const berths = pgTable(
|
||||
'berths',
|
||||
@@ -158,7 +159,7 @@ export const berthWaitingList = pgTable(
|
||||
clientId: text('client_id')
|
||||
.notNull()
|
||||
.references(() => clients.id, { onDelete: 'cascade' }),
|
||||
yachtId: text('yacht_id'), // FK added via relation; nullable (waiting for this yacht)
|
||||
yachtId: text('yacht_id').references(() => yachts.id, { onDelete: 'set null' }),
|
||||
position: integer('position').notNull(),
|
||||
priority: text('priority').notNull().default('normal'), // normal, high
|
||||
notifyPref: text('notify_pref').default('email'), // email, in_app, both
|
||||
|
||||
@@ -34,7 +34,10 @@ export const clients = pgTable(
|
||||
/** When this client was merged into another (the "loser" of a dedup
|
||||
* merge), this points at the surviving client. Used by the
|
||||
* /admin/duplicates review queue to redirect any stragglers, and by
|
||||
* the unmerge flow to restore. Null for live clients. */
|
||||
* the unmerge flow to restore. Null for live clients. The Postgres
|
||||
* self-FK is installed via migration 0042; Drizzle's table builder
|
||||
* doesn't accept self-references in the column factory so the
|
||||
* constraint isn't reflected here in `.references(...)`. */
|
||||
mergedIntoClientId: text('merged_into_client_id'),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
|
||||
@@ -12,6 +12,10 @@ import {
|
||||
import { sql } from 'drizzle-orm';
|
||||
import { ports } from './ports';
|
||||
import { clients } from './clients';
|
||||
import { yachts } from './yachts';
|
||||
import { companies } from './companies';
|
||||
import { interests } from './interests';
|
||||
import { berthReservations } from './reservations';
|
||||
|
||||
export const files = pgTable(
|
||||
'files',
|
||||
@@ -23,8 +27,8 @@ export const files = pgTable(
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
clientId: text('client_id').references(() => clients.id),
|
||||
yachtId: text('yacht_id'), // FK wired in relations.ts
|
||||
companyId: text('company_id'), // FK wired in relations.ts
|
||||
yachtId: text('yacht_id').references(() => yachts.id, { onDelete: 'set null' }),
|
||||
companyId: text('company_id').references(() => companies.id, { onDelete: 'set null' }),
|
||||
filename: text('filename').notNull(),
|
||||
originalName: text('original_name').notNull(),
|
||||
mimeType: text('mime_type'),
|
||||
@@ -52,11 +56,13 @@ export const documents = pgTable(
|
||||
portId: text('port_id')
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
interestId: text('interest_id'), // references interests.id
|
||||
interestId: text('interest_id').references(() => interests.id, { onDelete: 'set null' }),
|
||||
clientId: text('client_id').references(() => clients.id),
|
||||
yachtId: text('yacht_id'), // FK wired in relations.ts
|
||||
companyId: text('company_id'), // FK wired in relations.ts
|
||||
reservationId: text('reservation_id'), // FK wired in relations.ts
|
||||
yachtId: text('yacht_id').references(() => yachts.id, { onDelete: 'set null' }),
|
||||
companyId: text('company_id').references(() => companies.id, { onDelete: 'set null' }),
|
||||
reservationId: text('reservation_id').references(() => berthReservations.id, {
|
||||
onDelete: 'set null',
|
||||
}),
|
||||
documentType: text('document_type').notNull(), // eoi, contract, nda, reservation_agreement, other
|
||||
title: text('title').notNull(),
|
||||
status: text('status').notNull().default('draft'), // draft, sent, partially_signed, completed, expired, cancelled
|
||||
@@ -220,7 +226,7 @@ export const formSubmissions = pgTable(
|
||||
.notNull()
|
||||
.references(() => formTemplates.id),
|
||||
clientId: text('client_id').references(() => clients.id),
|
||||
interestId: text('interest_id'), // references interests.id
|
||||
interestId: text('interest_id').references(() => interests.id, { onDelete: 'set null' }),
|
||||
token: text('token').notNull().unique(),
|
||||
prefilledData: jsonb('prefilled_data').default({}),
|
||||
submittedData: jsonb('submitted_data'),
|
||||
|
||||
@@ -13,6 +13,7 @@ import { sql } from 'drizzle-orm';
|
||||
import { ports } from './ports';
|
||||
import { clients } from './clients';
|
||||
import { berths } from './berths';
|
||||
import { yachts } from './yachts';
|
||||
|
||||
// Pipeline stages: open, details_sent, in_communication, eoi_sent, eoi_signed, deposit_10pct, contract_sent, contract_signed, completed
|
||||
|
||||
@@ -28,7 +29,7 @@ export const interests = pgTable(
|
||||
clientId: text('client_id')
|
||||
.notNull()
|
||||
.references(() => clients.id),
|
||||
yachtId: text('yacht_id'), // FK added via relation; nullable until pipeline leaves 'open'
|
||||
yachtId: text('yacht_id').references(() => yachts.id, { onDelete: 'set null' }),
|
||||
pipelineStage: text('pipeline_stage').notNull().default('open'),
|
||||
leadCategory: text('lead_category'), // general_interest, specific_qualified, hot_lead
|
||||
source: text('source'), // website, manual, referral, broker
|
||||
|
||||
@@ -1,21 +1,17 @@
|
||||
import {
|
||||
pgTable,
|
||||
text,
|
||||
boolean,
|
||||
timestamp,
|
||||
jsonb,
|
||||
index,
|
||||
uniqueIndex,
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { pgTable, text, boolean, timestamp, jsonb, index, uniqueIndex } from 'drizzle-orm/pg-core';
|
||||
import { sql } from 'drizzle-orm';
|
||||
import { ports } from './ports';
|
||||
import { clients } from './clients';
|
||||
import { files } from './documents';
|
||||
import { interests } from './interests';
|
||||
import { berths } from './berths';
|
||||
|
||||
export const reminders = pgTable(
|
||||
'reminders',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
portId: text('port_id')
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
@@ -27,8 +23,8 @@ export const reminders = pgTable(
|
||||
assignedTo: text('assigned_to'), // user ID
|
||||
createdBy: text('created_by').notNull(),
|
||||
clientId: text('client_id').references(() => clients.id),
|
||||
interestId: text('interest_id'), // references interests.id
|
||||
berthId: text('berth_id'), // references berths.id
|
||||
interestId: text('interest_id').references(() => interests.id, { onDelete: 'set null' }),
|
||||
berthId: text('berth_id').references(() => berths.id, { onDelete: 'set null' }),
|
||||
autoGenerated: boolean('auto_generated').notNull().default(false),
|
||||
googleCalendarEventId: text('google_calendar_event_id'),
|
||||
googleCalendarSynced: boolean('google_calendar_synced').notNull().default(false),
|
||||
@@ -40,16 +36,18 @@ export const reminders = pgTable(
|
||||
(table) => [
|
||||
index('idx_reminders_port').on(table.portId),
|
||||
index('idx_reminders_assigned').on(table.assignedTo, table.status),
|
||||
index('idx_reminders_due').on(table.portId, table.dueAt).where(
|
||||
sql`${table.status} IN ('pending', 'snoozed')`
|
||||
),
|
||||
index('idx_reminders_due')
|
||||
.on(table.portId, table.dueAt)
|
||||
.where(sql`${table.status} IN ('pending', 'snoozed')`),
|
||||
],
|
||||
);
|
||||
|
||||
export const googleCalendarTokens = pgTable(
|
||||
'google_calendar_tokens',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
userId: text('user_id').notNull().unique(),
|
||||
accessToken: text('access_token').notNull(), // encrypted
|
||||
refreshToken: text('refresh_token').notNull(), // encrypted
|
||||
@@ -67,7 +65,9 @@ export const googleCalendarTokens = pgTable(
|
||||
export const googleCalendarCache = pgTable(
|
||||
'google_calendar_cache',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
userId: text('user_id').notNull(),
|
||||
eventId: text('event_id').notNull(), // Google Calendar event ID
|
||||
title: text('title').notNull(),
|
||||
@@ -88,7 +88,9 @@ export const googleCalendarCache = pgTable(
|
||||
export const notifications = pgTable(
|
||||
'notifications',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
portId: text('port_id')
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
@@ -114,7 +116,9 @@ export const notifications = pgTable(
|
||||
export const scheduledReports = pgTable(
|
||||
'scheduled_reports',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
portId: text('port_id')
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
@@ -135,7 +139,9 @@ export const scheduledReports = pgTable(
|
||||
export const reportRecipients = pgTable(
|
||||
'report_recipients',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
reportId: text('report_id')
|
||||
.notNull()
|
||||
.references(() => scheduledReports.id, { onDelete: 'cascade' }),
|
||||
@@ -151,7 +157,9 @@ export const reportRecipients = pgTable(
|
||||
export const generatedReports = pgTable(
|
||||
'generated_reports',
|
||||
{
|
||||
id: text('id').primaryKey().$defaultFn(() => crypto.randomUUID()),
|
||||
id: text('id')
|
||||
.primaryKey()
|
||||
.$defaultFn(() => crypto.randomUUID()),
|
||||
portId: text('port_id')
|
||||
.notNull()
|
||||
.references(() => ports.id),
|
||||
@@ -171,9 +179,9 @@ export const generatedReports = pgTable(
|
||||
(table) => [
|
||||
index('idx_gr_port_created').on(table.portId, table.createdAt),
|
||||
index('idx_gr_port_status').on(table.portId, table.status),
|
||||
index('idx_gr_scheduled').on(table.scheduledReportId).where(
|
||||
sql`${table.scheduledReportId} IS NOT NULL`
|
||||
),
|
||||
index('idx_gr_scheduled')
|
||||
.on(table.scheduledReportId)
|
||||
.where(sql`${table.scheduledReportId} IS NOT NULL`),
|
||||
],
|
||||
);
|
||||
|
||||
|
||||
@@ -65,6 +65,22 @@ const envSchema = z.object({
|
||||
PUBLIC_SITE_URL: z.string().url(),
|
||||
NODE_ENV: z.enum(['development', 'production', 'test']).default('development'),
|
||||
LOG_LEVEL: z.enum(['fatal', 'error', 'warn', 'info', 'debug', 'trace']).default('info'),
|
||||
/**
|
||||
* HTTP listener port. zod-coerced from PORT so a typo (`PORT=foo`) hard-
|
||||
* fails at boot rather than silently listening on an ephemeral port.
|
||||
*/
|
||||
PORT: z.coerce.number().int().positive().default(3000),
|
||||
/**
|
||||
* When true, the filesystem storage backend refuses to start (per
|
||||
* src/lib/storage/filesystem.ts:192). Reading via the zod schema means
|
||||
* a typo on the env var hard-fails at boot rather than silently
|
||||
* disabling the multi-node guard. Per CLAUDE.md, multi-node deploys
|
||||
* MUST use the s3-compatible backend.
|
||||
*/
|
||||
MULTI_NODE_DEPLOYMENT: z
|
||||
.enum(['true', 'false'])
|
||||
.default('false')
|
||||
.transform((v) => v === 'true'),
|
||||
});
|
||||
|
||||
export type Env = z.infer<typeof envSchema>;
|
||||
|
||||
70
src/lib/fetch-with-timeout.ts
Normal file
70
src/lib/fetch-with-timeout.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
/**
|
||||
* Fetch with a hard wall-clock timeout. Wraps `globalThis.fetch` with an
|
||||
* AbortController so a hung upstream cannot pin a worker concurrency slot
|
||||
* indefinitely (per docs/audit-comprehensive-2026-05-05.md HIGH §§5–6 and
|
||||
* MED §13 — Documenso, OCR, and IMAP all needed this).
|
||||
*
|
||||
* - Default timeout is 30s; pass `timeoutMs` to override.
|
||||
* - When the caller already supplies an AbortSignal via `init.signal`, both
|
||||
* sources can abort the request — first one to fire wins.
|
||||
* - On timeout the rejection is a `DOMException('TimeoutError')` from
|
||||
* AbortController; callers can introspect via `err.name === 'AbortError'`
|
||||
* plus the timeout flag we attach.
|
||||
*/
|
||||
export interface FetchWithTimeoutOptions extends RequestInit {
|
||||
/** Hard timeout in ms. Default 30_000. */
|
||||
timeoutMs?: number;
|
||||
}
|
||||
|
||||
export class FetchTimeoutError extends Error {
|
||||
override readonly name = 'FetchTimeoutError';
|
||||
constructor(
|
||||
public readonly url: string,
|
||||
public readonly timeoutMs: number,
|
||||
) {
|
||||
super(`Request to ${url} exceeded ${timeoutMs}ms timeout`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function fetchWithTimeout(
|
||||
url: string,
|
||||
init: FetchWithTimeoutOptions = {},
|
||||
): Promise<Response> {
|
||||
const { timeoutMs = 30_000, signal: callerSignal, ...rest } = init;
|
||||
const controller = new AbortController();
|
||||
|
||||
// Compose: if the caller already passed a signal, abort our controller
|
||||
// when theirs aborts. We can't reuse theirs directly because we still
|
||||
// own the timeout lifecycle.
|
||||
if (callerSignal) {
|
||||
if (callerSignal.aborted) {
|
||||
controller.abort(callerSignal.reason);
|
||||
} else {
|
||||
callerSignal.addEventListener('abort', () => controller.abort(callerSignal.reason), {
|
||||
once: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const timeoutId = setTimeout(() => {
|
||||
controller.abort(new FetchTimeoutError(url, timeoutMs));
|
||||
}, timeoutMs);
|
||||
|
||||
try {
|
||||
return await fetch(url, { ...rest, signal: controller.signal });
|
||||
} catch (err) {
|
||||
// If we hit our own timeout, surface a typed error instead of the
|
||||
// generic AbortError so call sites can branch on it.
|
||||
if (
|
||||
err instanceof Error &&
|
||||
(err.name === 'AbortError' || err.name === 'TimeoutError') &&
|
||||
controller.signal.aborted
|
||||
) {
|
||||
const reason = controller.signal.reason;
|
||||
if (reason instanceof FetchTimeoutError) throw reason;
|
||||
}
|
||||
throw err;
|
||||
} finally {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ import { db } from '@/lib/db';
|
||||
import { currencyRates } from '@/lib/db/schema/system';
|
||||
import { eq, and } from 'drizzle-orm';
|
||||
import { logger } from '@/lib/logger';
|
||||
import { fetchWithTimeout } from '@/lib/fetch-with-timeout';
|
||||
|
||||
export async function getRate(from: string, to: string): Promise<number | null> {
|
||||
if (from === to) return 1;
|
||||
@@ -23,7 +24,7 @@ export async function convert(
|
||||
|
||||
export async function refreshRates(): Promise<void> {
|
||||
try {
|
||||
const res = await fetch('https://api.frankfurter.dev/v1/latest?base=USD');
|
||||
const res = await fetchWithTimeout('https://api.frankfurter.dev/v1/latest?base=USD');
|
||||
if (!res.ok) throw new Error(`Frankfurter API error: ${res.status}`);
|
||||
const data = await res.json();
|
||||
const rates = data.rates as Record<string, number>;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { env } from '@/lib/env';
|
||||
import { logger } from '@/lib/logger';
|
||||
import { getPortDocumensoConfig, type DocumensoApiVersion } from '@/lib/services/port-config';
|
||||
import { fetchWithTimeout } from '@/lib/fetch-with-timeout';
|
||||
|
||||
interface DocumensoCreds {
|
||||
baseUrl: string;
|
||||
@@ -26,7 +27,7 @@ async function documensoFetch(
|
||||
portId?: string,
|
||||
): Promise<unknown> {
|
||||
const { baseUrl, apiKey } = await resolveCreds(portId);
|
||||
const res = await fetch(`${baseUrl}${path}`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}${path}`, {
|
||||
...options,
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
@@ -241,7 +242,7 @@ export async function sendReminder(
|
||||
|
||||
export async function downloadSignedPdf(docId: string, portId?: string): Promise<Buffer> {
|
||||
const { baseUrl, apiKey } = await resolveCreds(portId);
|
||||
const res = await fetch(`${baseUrl}/api/v1/documents/${docId}/download`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}/api/v1/documents/${docId}/download`, {
|
||||
headers: { Authorization: `Bearer ${apiKey}` },
|
||||
});
|
||||
|
||||
@@ -261,7 +262,7 @@ export async function checkDocumensoHealth(
|
||||
): Promise<{ ok: boolean; status?: number; error?: string }> {
|
||||
try {
|
||||
const { baseUrl, apiKey } = await resolveCreds(portId);
|
||||
const res = await fetch(`${baseUrl}/api/v1/health`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}/api/v1/health`, {
|
||||
headers: { Authorization: `Bearer ${apiKey}` },
|
||||
});
|
||||
return { ok: res.ok, status: res.status };
|
||||
@@ -355,7 +356,7 @@ export async function placeFields(
|
||||
// Note: v2 endpoint shape (envelopeId/recipientId types) must be
|
||||
// confirmed against a live Documenso 2.x instance - see PR11 realapi
|
||||
// suite. Spec risk register flags this drift as the top v2 risk.
|
||||
const res = await fetch(`${baseUrl}/api/v2/envelope/field/create-many`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}/api/v2/envelope/field/create-many`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
@@ -387,7 +388,7 @@ export async function placeFields(
|
||||
// 1000 ms; 4xx responses (validation errors) fail-fast.
|
||||
let lastError: { status: number; body: string } | null = null;
|
||||
for (let attempt = 0; attempt < 3; attempt += 1) {
|
||||
const res = await fetch(`${baseUrl}/api/v1/documents/${docId}/fields`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}/api/v1/documents/${docId}/fields`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
@@ -470,7 +471,7 @@ export function computeDefaultSignatureLayout(
|
||||
export async function voidDocument(docId: string, portId?: string): Promise<void> {
|
||||
const { baseUrl, apiKey, apiVersion } = await resolveCreds(portId);
|
||||
const path = apiVersion === 'v2' ? `/api/v2/envelope/${docId}` : `/api/v1/documents/${docId}`;
|
||||
const res = await fetch(`${baseUrl}${path}`, {
|
||||
const res = await fetchWithTimeout(`${baseUrl}${path}`, {
|
||||
method: 'DELETE',
|
||||
headers: { Authorization: `Bearer ${apiKey}` },
|
||||
});
|
||||
|
||||
@@ -25,6 +25,7 @@ import {
|
||||
generateDocumentFromTemplate as documensoGenerateFromTemplate,
|
||||
} from '@/lib/services/documenso-client';
|
||||
import { buildDocumensoPayload, getPortEoiSigners } from '@/lib/services/documenso-payload';
|
||||
import { getPortDocumensoConfig } from '@/lib/services/port-config';
|
||||
import { generateEoiPdfFromTemplate } from '@/lib/pdf/fill-eoi-form';
|
||||
import { MERGE_FIELDS, type MergeFieldCatalog } from '@/lib/templates/merge-fields';
|
||||
import { buildEoiContext } from '@/lib/services/eoi-context';
|
||||
@@ -882,12 +883,17 @@ async function generateAndSignViaDocumensoTemplate(
|
||||
|
||||
const eoiContext = await buildEoiContext(context.interestId, portId);
|
||||
const signers = await getPortEoiSigners(portId);
|
||||
// Per-port Documenso template + recipient IDs (with env fallback). Each
|
||||
// tenant pointing at its own Documenso instance has different numeric
|
||||
// template + recipient IDs, so a global env-only setup limits the
|
||||
// platform to one Documenso instance per CRM process.
|
||||
const docCfg = await getPortDocumensoConfig(portId);
|
||||
|
||||
const payload = buildDocumensoPayload(eoiContext, {
|
||||
interestId: context.interestId,
|
||||
clientRecipientId: env.DOCUMENSO_CLIENT_RECIPIENT_ID,
|
||||
developerRecipientId: env.DOCUMENSO_DEVELOPER_RECIPIENT_ID,
|
||||
approvalRecipientId: env.DOCUMENSO_APPROVAL_RECIPIENT_ID,
|
||||
clientRecipientId: docCfg.clientRecipientId,
|
||||
developerRecipientId: docCfg.developerRecipientId,
|
||||
approvalRecipientId: docCfg.approvalRecipientId,
|
||||
developerName: signers.developer.name,
|
||||
developerEmail: signers.developer.email,
|
||||
approverName: signers.approver.name,
|
||||
@@ -896,8 +902,9 @@ async function generateAndSignViaDocumensoTemplate(
|
||||
});
|
||||
|
||||
const documensoDoc = await documensoGenerateFromTemplate(
|
||||
env.DOCUMENSO_TEMPLATE_ID_EOI,
|
||||
docCfg.eoiTemplateId,
|
||||
payload as unknown as Record<string, unknown>,
|
||||
portId,
|
||||
);
|
||||
|
||||
// Record a documents row referencing the Documenso document. No local file -
|
||||
|
||||
@@ -265,6 +265,13 @@ export async function syncInbox(accountId: string): Promise<void> {
|
||||
pass: creds.password,
|
||||
},
|
||||
logger: false,
|
||||
// Without these, a slow-streaming UID can hang the maintenance worker
|
||||
// indefinitely (worker concurrency 1 → entire BullMQ maintenance queue
|
||||
// stalls). 60s socket / 30s greeting / 30s connect is generous for any
|
||||
// working server but bounds the hang.
|
||||
socketTimeout: 60_000,
|
||||
greetingTimeout: 30_000,
|
||||
connectionTimeout: 30_000,
|
||||
});
|
||||
|
||||
try {
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
import OpenAI from 'openai';
|
||||
|
||||
import { logger } from '@/lib/logger';
|
||||
import { fetchWithTimeout } from '@/lib/fetch-with-timeout';
|
||||
|
||||
const OCR_TIMEOUT_MS = 30_000;
|
||||
|
||||
export interface ParsedReceiptLineItem {
|
||||
description: string;
|
||||
@@ -73,7 +76,10 @@ function safeParse(content: string): ParsedReceipt {
|
||||
}
|
||||
|
||||
async function runOpenAi({ imageBuffer, mimeType, apiKey, model }: RunArgs): Promise<OcrRunResult> {
|
||||
const client = new OpenAI({ apiKey });
|
||||
// Default OpenAI client has no timeout — a hung request would hold a Bull
|
||||
// documents-worker concurrency slot until the OS reset it (~15 min). The
|
||||
// 30s cap matches the cap on the (newer) email-draft worker fetch.
|
||||
const client = new OpenAI({ apiKey, timeout: OCR_TIMEOUT_MS });
|
||||
const base64 = imageBuffer.toString('base64');
|
||||
const response = await client.chat.completions.create({
|
||||
model,
|
||||
@@ -106,7 +112,8 @@ async function runOpenAi({ imageBuffer, mimeType, apiKey, model }: RunArgs): Pro
|
||||
|
||||
async function runClaude({ imageBuffer, mimeType, apiKey, model }: RunArgs): Promise<OcrRunResult> {
|
||||
const base64 = imageBuffer.toString('base64');
|
||||
const res = await fetch('https://api.anthropic.com/v1/messages', {
|
||||
const res = await fetchWithTimeout('https://api.anthropic.com/v1/messages', {
|
||||
timeoutMs: OCR_TIMEOUT_MS,
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
|
||||
@@ -30,6 +30,12 @@ export const SETTING_KEYS = {
|
||||
documensoApiKeyOverride: 'documenso_api_key_override',
|
||||
documensoApiVersionOverride: 'documenso_api_version_override',
|
||||
documensoEoiTemplateId: 'documenso_eoi_template_id',
|
||||
// Documenso template recipient slot IDs are per-Documenso-instance
|
||||
// numeric values, so they have to follow the per-port template config.
|
||||
// Falling back to env keeps single-tenant deploys working.
|
||||
documensoClientRecipientId: 'documenso_client_recipient_id',
|
||||
documensoDeveloperRecipientId: 'documenso_developer_recipient_id',
|
||||
documensoApprovalRecipientId: 'documenso_approval_recipient_id',
|
||||
eoiDefaultPathway: 'eoi_default_pathway',
|
||||
|
||||
// Branding
|
||||
@@ -136,16 +142,41 @@ export interface PortDocumensoConfig {
|
||||
apiUrl: string;
|
||||
apiKey: string;
|
||||
apiVersion: DocumensoApiVersion;
|
||||
eoiTemplateId: string | null;
|
||||
eoiTemplateId: number;
|
||||
defaultPathway: EoiPathway;
|
||||
/** Documenso template recipient slot IDs (per-instance numeric). */
|
||||
clientRecipientId: number;
|
||||
developerRecipientId: number;
|
||||
approvalRecipientId: number;
|
||||
}
|
||||
|
||||
function toIntOrNull(raw: unknown): number | null {
|
||||
if (typeof raw === 'number' && Number.isFinite(raw)) return raw;
|
||||
if (typeof raw === 'string' && raw.trim()) {
|
||||
const n = Number(raw);
|
||||
return Number.isFinite(n) ? n : null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function getPortDocumensoConfig(portId: string): Promise<PortDocumensoConfig> {
|
||||
const [apiUrl, apiKey, apiVersion, eoiTemplateId, defaultPathway] = await Promise.all([
|
||||
const [
|
||||
apiUrl,
|
||||
apiKey,
|
||||
apiVersion,
|
||||
eoiTemplateId,
|
||||
clientRecipientId,
|
||||
developerRecipientId,
|
||||
approvalRecipientId,
|
||||
defaultPathway,
|
||||
] = await Promise.all([
|
||||
readSetting<string>(SETTING_KEYS.documensoApiUrlOverride, portId),
|
||||
readSetting<string>(SETTING_KEYS.documensoApiKeyOverride, portId),
|
||||
readSetting<DocumensoApiVersion>(SETTING_KEYS.documensoApiVersionOverride, portId),
|
||||
readSetting<string>(SETTING_KEYS.documensoEoiTemplateId, portId),
|
||||
readSetting<string | number>(SETTING_KEYS.documensoEoiTemplateId, portId),
|
||||
readSetting<string | number>(SETTING_KEYS.documensoClientRecipientId, portId),
|
||||
readSetting<string | number>(SETTING_KEYS.documensoDeveloperRecipientId, portId),
|
||||
readSetting<string | number>(SETTING_KEYS.documensoApprovalRecipientId, portId),
|
||||
readSetting<EoiPathway>(SETTING_KEYS.eoiDefaultPathway, portId),
|
||||
]);
|
||||
|
||||
@@ -153,7 +184,10 @@ export async function getPortDocumensoConfig(portId: string): Promise<PortDocume
|
||||
apiUrl: apiUrl ?? env.DOCUMENSO_API_URL,
|
||||
apiKey: apiKey ?? env.DOCUMENSO_API_KEY,
|
||||
apiVersion: apiVersion ?? env.DOCUMENSO_API_VERSION,
|
||||
eoiTemplateId: eoiTemplateId ?? null,
|
||||
eoiTemplateId: toIntOrNull(eoiTemplateId) ?? env.DOCUMENSO_TEMPLATE_ID_EOI,
|
||||
clientRecipientId: toIntOrNull(clientRecipientId) ?? env.DOCUMENSO_CLIENT_RECIPIENT_ID,
|
||||
developerRecipientId: toIntOrNull(developerRecipientId) ?? env.DOCUMENSO_DEVELOPER_RECIPIENT_ID,
|
||||
approvalRecipientId: toIntOrNull(approvalRecipientId) ?? env.DOCUMENSO_APPROVAL_RECIPIENT_ID,
|
||||
defaultPathway: defaultPathway ?? 'documenso-template',
|
||||
};
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import { and, eq, inArray } from 'drizzle-orm';
|
||||
import { db } from '@/lib/db';
|
||||
import { systemSettings } from '@/lib/db/schema/system';
|
||||
import { rangeToBounds, type DateRange } from '@/lib/analytics/range';
|
||||
import { fetchWithTimeout } from '@/lib/fetch-with-timeout';
|
||||
|
||||
// ─── Settings access ────────────────────────────────────────────────────────
|
||||
|
||||
@@ -80,7 +81,7 @@ const jwtCache = new Map<string, CachedJwt>();
|
||||
const JWT_TTL_MS = 55 * 60 * 1000; // 55 min - Umami JWTs default to 1h
|
||||
|
||||
async function loginAndCache(apiUrl: string, username: string, password: string): Promise<string> {
|
||||
const res = await fetch(`${apiUrl}/api/auth/login`, {
|
||||
const res = await fetchWithTimeout(`${apiUrl}/api/auth/login`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', accept: 'application/json' },
|
||||
body: JSON.stringify({ username, password }),
|
||||
@@ -122,7 +123,7 @@ async function umamiFetch<T>(
|
||||
url.searchParams.set(k, String(v));
|
||||
}
|
||||
|
||||
const res = await fetch(url, {
|
||||
const res = await fetchWithTimeout(url.toString(), {
|
||||
headers: {
|
||||
Authorization: `Bearer ${bearer}`,
|
||||
accept: 'application/json',
|
||||
|
||||
@@ -170,6 +170,17 @@ export function getIO(): Server<ClientToServerEvents, ServerToClientEvents> {
|
||||
return io;
|
||||
}
|
||||
|
||||
/**
|
||||
* Idempotent shutdown for graceful-stop on SIGTERM/SIGINT — closes the
|
||||
* Socket.io server and (if present) its Redis adapter pub/sub clients so
|
||||
* the process can exit cleanly.
|
||||
*/
|
||||
export async function closeSocketServer(): Promise<void> {
|
||||
if (!io) return;
|
||||
await new Promise<void>((resolve) => io!.close(() => resolve()));
|
||||
io = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an event to a specific room. Used by service layer after mutations.
|
||||
*/
|
||||
|
||||
@@ -189,7 +189,12 @@ export class FilesystemBackend implements StorageBackend {
|
||||
|
||||
/** Throws if multi-node mode is set or the root isn't writable. */
|
||||
static async create(cfg: FilesystemConfig): Promise<FilesystemBackend> {
|
||||
if (process.env.MULTI_NODE_DEPLOYMENT === 'true') {
|
||||
// Read from the zod-validated env, not raw process.env — a typo
|
||||
// (MULTI_NODE_DEPLOY=true, MULTINODE_DEPLOYMENT=true) used to silently
|
||||
// pass the string-equality check, leaving the multi-node guard
|
||||
// disabled. The schema in src/lib/env.ts now coerces the value and
|
||||
// rejects unknown shapes at boot.
|
||||
if (env.MULTI_NODE_DEPLOYMENT) {
|
||||
throw new Error(
|
||||
'FilesystemBackend cannot start when MULTI_NODE_DEPLOYMENT=true. ' +
|
||||
'Use an S3-compatible backend for multi-node deployments.',
|
||||
|
||||
@@ -9,18 +9,45 @@
|
||||
* → dist/worker.js) and this file only handles Next.js + Socket.io.
|
||||
*/
|
||||
|
||||
import { createServer } from 'node:http';
|
||||
import { createServer, type Server as HttpServer } from 'node:http';
|
||||
|
||||
import next from 'next';
|
||||
|
||||
import { initSocketServer } from '@/lib/socket/server';
|
||||
import { initSocketServer, closeSocketServer } from '@/lib/socket/server';
|
||||
import { logger } from '@/lib/logger';
|
||||
import { env } from '@/lib/env';
|
||||
import { redis } from '@/lib/redis';
|
||||
|
||||
const dev = process.env.NODE_ENV !== 'production';
|
||||
const port = parseInt(process.env.PORT ?? '3000', 10);
|
||||
const dev = env.NODE_ENV !== 'production';
|
||||
|
||||
async function gracefulShutdown(signal: string, httpServer: HttpServer): Promise<void> {
|
||||
logger.info({ signal }, 'Shutdown signal received; closing connections');
|
||||
|
||||
// Stop accepting new HTTP connections, then drain in-flight ones.
|
||||
await new Promise<void>((resolve) => {
|
||||
httpServer.close((err) => {
|
||||
if (err) logger.warn({ err }, 'httpServer.close emitted error');
|
||||
resolve();
|
||||
});
|
||||
// Hard timeout — `httpServer.close` waits for ALL keep-alive sockets
|
||||
// to drain on their own, which can stretch much longer than the
|
||||
// compose stop_grace_period. 25s leaves headroom under a 30s grace.
|
||||
setTimeout(() => resolve(), 25_000).unref();
|
||||
});
|
||||
|
||||
await closeSocketServer().catch((err) => logger.warn({ err }, 'closeSocketServer error'));
|
||||
|
||||
try {
|
||||
redis.disconnect();
|
||||
} catch (err) {
|
||||
logger.warn({ err }, 'redis.disconnect error');
|
||||
}
|
||||
|
||||
logger.info({ signal }, 'Shutdown complete');
|
||||
}
|
||||
|
||||
async function main(): Promise<void> {
|
||||
const app = next({ dev, port });
|
||||
const app = next({ dev, port: env.PORT });
|
||||
const handle = app.getRequestHandler();
|
||||
|
||||
await app.prepare();
|
||||
@@ -49,9 +76,20 @@ async function main(): Promise<void> {
|
||||
void [emailWorker, documentsWorker, notificationsWorker, importWorker, exportWorker];
|
||||
}
|
||||
|
||||
httpServer.listen(port, () => {
|
||||
logger.info({ port, env: process.env.NODE_ENV }, 'Port Nimara CRM server listening');
|
||||
httpServer.listen(env.PORT, () => {
|
||||
logger.info({ port: env.PORT, env: env.NODE_ENV }, 'Port Nimara CRM server listening');
|
||||
});
|
||||
|
||||
// Graceful stop on container restart / deploy. Without this, every
|
||||
// `docker compose up -d` rolling restart drops in-flight uploads, EOI
|
||||
// generation, Documenso requests, and Socket.io frames mid-statement.
|
||||
// Match docker-compose `stop_grace_period: 30s` (or longer) so the
|
||||
// 25s drain inside gracefulShutdown can complete before SIGKILL.
|
||||
for (const sig of ['SIGTERM', 'SIGINT'] as const) {
|
||||
process.once(sig, () => {
|
||||
void gracefulShutdown(sig, httpServer).finally(() => process.exit(0));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
main().catch((err) => {
|
||||
|
||||
@@ -36,8 +36,11 @@ function configurePort(version: 'v1' | 'v2'): void {
|
||||
apiUrl: 'https://documenso.test',
|
||||
apiKey: 'sk_test',
|
||||
apiVersion: version,
|
||||
eoiTemplateId: null,
|
||||
eoiTemplateId: 8,
|
||||
defaultPathway: 'documenso-template',
|
||||
clientRecipientId: 192,
|
||||
developerRecipientId: 193,
|
||||
approvalRecipientId: 194,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -14,8 +14,20 @@ import { mkdtemp, rm, mkdir, symlink } from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
// Stub the env module BEFORE importing the backend so the
|
||||
// MULTI_NODE_DEPLOYMENT toggle works — env is now read from the zod
|
||||
// schema once at module load, not from process.env at runtime.
|
||||
vi.mock('@/lib/env', async () => {
|
||||
const actual = await vi.importActual<typeof import('@/lib/env')>('@/lib/env');
|
||||
return {
|
||||
...actual,
|
||||
env: { ...actual.env, MULTI_NODE_DEPLOYMENT: false },
|
||||
};
|
||||
});
|
||||
|
||||
import { env } from '@/lib/env';
|
||||
import {
|
||||
FilesystemBackend,
|
||||
signProxyToken,
|
||||
@@ -125,8 +137,12 @@ describe('FilesystemBackend realpath check', () => {
|
||||
});
|
||||
|
||||
it('refuses to start when MULTI_NODE_DEPLOYMENT=true', async () => {
|
||||
const prev = process.env.MULTI_NODE_DEPLOYMENT;
|
||||
process.env.MULTI_NODE_DEPLOYMENT = 'true';
|
||||
const prev = env.MULTI_NODE_DEPLOYMENT;
|
||||
// The backend reads env.MULTI_NODE_DEPLOYMENT (zod-validated, set
|
||||
// once at module load). Mutate the in-memory env for the duration of
|
||||
// this case — the surrounding vi.mock() above keeps every other env
|
||||
// field intact.
|
||||
(env as unknown as { MULTI_NODE_DEPLOYMENT: boolean }).MULTI_NODE_DEPLOYMENT = true;
|
||||
try {
|
||||
const tmp = await mkdtemp(path.join(tmpdir(), 'pn-storage-mn-'));
|
||||
await expect(
|
||||
@@ -134,8 +150,7 @@ describe('FilesystemBackend realpath check', () => {
|
||||
).rejects.toThrow(/MULTI_NODE_DEPLOYMENT/);
|
||||
await rm(tmp, { recursive: true, force: true });
|
||||
} finally {
|
||||
if (prev === undefined) delete process.env.MULTI_NODE_DEPLOYMENT;
|
||||
else process.env.MULTI_NODE_DEPLOYMENT = prev;
|
||||
(env as unknown as { MULTI_NODE_DEPLOYMENT: boolean }).MULTI_NODE_DEPLOYMENT = prev;
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
Reference in New Issue
Block a user