fix(audit): reliability HIGHs — smart-restore re-link, TOCTOU lock, bulk wrong-interest, ext-EOI tx, bulk idempotency

R2-H1: smart-restore's berth_released auto-reversal was a no-op while
the wizard claimed success. Now uses the persisted interestId from
the decision detail to re-insert the interest_berths link and flip
the berth status back to under_offer. Verifies the interest still
exists and isn't archived before re-linking.

R2-H2: smart-archive berth status update had a TOCTOU race — read
outside tx, write inside without a lock. Now selects-for-update the
berths row inside the tx and re-checks status against the locked row
before flipping to available, preventing concurrent archive+sale
from un-selling a berth.

R2-H3: bulk-archive's berth→interest lookup fell back to
dossier.interests[0]?.interestId ?? '' which sent empty-string
interestIds that silently matched zero rows. Dossier now exposes
linkedInterestIds[] per berth (authoritative interest_berths join);
bulk + single-client wizard both use it and skip berths with no
linked interest. Affected:
- src/lib/services/client-archive-dossier.service.ts (DossierBerth)
- src/app/api/v1/clients/bulk/route.ts
- src/components/clients/smart-archive-dialog.tsx

R2-H4: external-EOI ran storage upload + 4 DB writes outside a
transaction. Now wraps file/document/event/interest writes in a
single tx; storage upload stays before the tx (S3 isn't
transactional), orphan-object on tx failure is acceptable.

R2-H5: bulk archive double-submit treated already-archived clients as
per-row failures. Bulk callback now early-returns success when the
dossier shows archivedAt is set, making the endpoint idempotent.

1175/1175 vitest passing.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Matt Ciaccio
2026-05-06 22:11:00 +02:00
parent 588f8bc43c
commit 94331bd6ec
6 changed files with 208 additions and 115 deletions

View File

@@ -90,6 +90,13 @@ export const POST = withAuth(async (req, ctx) => {
// a per-client reason supplied via reasonsByClientId; the bulk- // a per-client reason supplied via reasonsByClientId; the bulk-
// archive wizard captures these one at a time before submitting. // archive wizard captures these one at a time before submitting.
const dossier = await getClientArchiveDossier(id, ctx.portId); const dossier = await getClientArchiveDossier(id, ctx.portId);
// Idempotent: if a previous request already archived this client
// (e.g. a network retry / double-click), treat it as success
// rather than letting `archiveClientWithDecisions` throw a
// ConflictError that runBulk will surface as a per-row failure.
if (dossier.client.archivedAt) {
return;
}
const perClientReason = reasonsByClientId[id]; const perClientReason = reasonsByClientId[id];
if (dossier.stakeLevel === 'high' && !perClientReason) { if (dossier.stakeLevel === 'high' && !perClientReason) {
throw new Error( throw new Error(
@@ -103,20 +110,32 @@ export const POST = withAuth(async (req, ctx) => {
(d) => d.status === 'completed' || d.status === 'signed', (d) => d.status === 'completed' || d.status === 'signed',
); );
const reason = perClientReason ?? 'Bulk archive (low-stakes auto-mode)'; const reason = perClientReason ?? 'Bulk archive (low-stakes auto-mode)';
// Pick the berth's first linked interest from the dossier
// (authoritative interest_berths join). Berths with no linked
// interest for this client are dropped — emitting an empty
// interestId causes the delete to silently match zero rows
// (audit R2-H3).
const berthDecisions = dossier.berths
.map((b) => {
const interestId = b.linkedInterestIds[0];
if (!interestId) return null;
return {
berthId: b.berthId,
interestId,
action: b.status === 'sold' ? ('retain' as const) : ('release' as const),
};
})
.filter(
(x): x is { berthId: string; interestId: string; action: 'retain' | 'release' } =>
x !== null,
);
const result = await archiveClientWithDecisions({ const result = await archiveClientWithDecisions({
dossier, dossier,
decisions: { decisions: {
reason, reason,
acknowledgedSignedDocuments: hasSignedDocs, acknowledgedSignedDocuments: hasSignedDocs,
berthDecisions: dossier.berths.map((b) => ({ berthDecisions,
berthId: b.berthId,
interestId:
dossier.interests.find((i) => i.primaryBerthMooring === b.mooringNumber)
?.interestId ??
dossier.interests[0]?.interestId ??
'',
action: b.status === 'sold' ? 'retain' : 'release',
})),
yachtDecisions: dossier.yachts.map((y) => ({ yachtId: y.yachtId, action: 'retain' })), yachtDecisions: dossier.yachts.map((y) => ({ yachtId: y.yachtId, action: 'retain' })),
reservationDecisions: dossier.reservations.map((r) => ({ reservationDecisions: dossier.reservations.map((r) => ({
reservationId: r.reservationId, reservationId: r.reservationId,

View File

@@ -23,6 +23,7 @@ interface DossierBerth {
berthId: string; berthId: string;
mooringNumber: string; mooringNumber: string;
status: string; status: string;
linkedInterestIds: string[];
otherInterests: Array<{ otherInterests: Array<{
interestId: string; interestId: string;
clientId: string | null; clientId: string | null;
@@ -155,17 +156,23 @@ export function SmartArchiveDialog({ open, onOpenChange, clientId, clientName, o
const archiveMutation = useMutation({ const archiveMutation = useMutation({
mutationFn: () => { mutationFn: () => {
if (!dossier) throw new Error('No dossier'); if (!dossier) throw new Error('No dossier');
const berthDec = dossier.berths.map((b) => ({ // Pick the first linked interest for this berth from the
berthId: b.berthId, // authoritative dossier join. Berths with no linked interest for
// The interestId for this berthuse the first interest in the // this client are skipped — sending an empty interestId would
// dossier that has this berth as its primary. Fallback to the // make the server-side delete silently match zero rows.
// first interest at all (the API only needs the link reference). const berthDec = dossier.berths
interestId: .map((b) => {
dossier.interests.find((i) => i.primaryBerthMooring === b.mooringNumber)?.interestId ?? const interestId = b.linkedInterestIds[0];
dossier.interests[0]?.interestId ?? if (!interestId) return null;
'', return {
action: berthDecisions[b.berthId] ?? 'retain', berthId: b.berthId,
})); interestId,
action: berthDecisions[b.berthId] ?? 'retain',
};
})
.filter(
(x): x is { berthId: string; interestId: string; action: BerthAction } => x !== null,
);
return apiFetch<{ data: { releasedBerths: Array<{ mooringNumber: string }> } }>( return apiFetch<{ data: { releasedBerths: Array<{ mooringNumber: string }> } }>(
`/api/v1/clients/${clientId}/archive`, `/api/v1/clients/${clientId}/archive`,
{ {

View File

@@ -47,6 +47,11 @@ export interface DossierBerth {
berthId: string; berthId: string;
mooringNumber: string; mooringNumber: string;
status: string; // 'available' | 'under_offer' | 'sold' status: string; // 'available' | 'under_offer' | 'sold'
/** Every interest of THIS client that links to the berth. The bulk
* wizard uses this to pick the right interestId per berth instead of
* guessing by primary-mooring (which fails when multiple interests
* share a primary or when none is primary). */
linkedInterestIds: string[];
/** Other interests still actively expressing interest in this berth /** Other interests still actively expressing interest in this berth
* (so the next-in-line notification can list them). */ * (so the next-in-line notification can list them). */
otherInterests: Array<{ otherInterests: Array<{
@@ -267,10 +272,18 @@ export async function getClientArchiveDossier(
.orderBy(desc(interests.updatedAt)) .orderBy(desc(interests.updatedAt))
.limit(10); .limit(10);
// Every linked interest belonging to THIS client (multiple
// interests can share a berth — primary flag is at most one per
// interest, not per berth).
const linkedInterestIds = Array.from(
new Set(interestBerthRows.filter((r) => r.berthId === berthId).map((r) => r.interestId)),
);
dossierBerths.push({ dossierBerths.push({
berthId, berthId,
mooringNumber: berth.mooringNumber, mooringNumber: berth.mooringNumber,
status: berth.berthStatus, status: berth.berthStatus,
linkedInterestIds,
otherInterests: others.map((o) => ({ otherInterests: others.map((o) => ({
interestId: o.interestId, interestId: o.interestId,
clientId: o.clientId, clientId: o.clientId,

View File

@@ -177,6 +177,19 @@ export async function archiveClientWithDecisions(args: {
const berth = dossier.berths.find((b) => b.berthId === d.berthId); const berth = dossier.berths.find((b) => b.berthId === d.berthId);
if (!berth) continue; if (!berth) continue;
if (d.action === 'release') { if (d.action === 'release') {
// Lock the berth row so a concurrent sale can't flip the status
// between our read of dossier.berths (outside the tx) and our
// write below. Without this lock, A archives client X while B
// sells berth A1 to client Y — A's pre-tx read says
// status='under_offer', B commits status='sold', A's update
// would flip it back to 'available'.
const [locked] = await tx
.select({ status: berths.status })
.from(berths)
.where(eq(berths.id, d.berthId))
.for('update');
const lockedStatus = locked?.status ?? berth.status;
// Drop the interest_berths link for this client's interest. Other // Drop the interest_berths link for this client's interest. Other
// interests on the berth survive (so the next-in-line notification // interests on the berth survive (so the next-in-line notification
// can fire). // can fire).
@@ -186,9 +199,10 @@ export async function archiveClientWithDecisions(args: {
and(eq(interestBerths.berthId, d.berthId), eq(interestBerths.interestId, d.interestId)), and(eq(interestBerths.berthId, d.berthId), eq(interestBerths.interestId, d.interestId)),
); );
// If no remaining interestBerths row marks this berth as // If no remaining interestBerths row marks this berth as
// is_specific_interest, set the berth status back to available // is_specific_interest, set the berth status back to available.
// (sold berths are immutable from this flow per design). // Sold berths are immutable from this flow — also re-checked
if (berth.status !== 'sold') { // against the freshly-locked row, not the pre-tx dossier read.
if (lockedStatus !== 'sold') {
const [stillUnderOffer] = await tx const [stillUnderOffer] = await tx
.select({ count: sql<number>`count(*)::int` }) .select({ count: sql<number>`count(*)::int` })
.from(interestBerths) .from(interestBerths)

View File

@@ -20,7 +20,7 @@ import { and, eq, isNull, ne, sql } from 'drizzle-orm';
import { db } from '@/lib/db'; import { db } from '@/lib/db';
import { clients } from '@/lib/db/schema/clients'; import { clients } from '@/lib/db/schema/clients';
import { interests } from '@/lib/db/schema/interests'; import { interests, interestBerths } from '@/lib/db/schema/interests';
import { berths } from '@/lib/db/schema/berths'; import { berths } from '@/lib/db/schema/berths';
import { yachts } from '@/lib/db/schema/yachts'; import { yachts } from '@/lib/db/schema/yachts';
import { portalUsers } from '@/lib/db/schema/portal'; import { portalUsers } from '@/lib/db/schema/portal';
@@ -42,6 +42,10 @@ export interface RestoreReversal {
label: string; label: string;
/** Why this is being shown the way it is (e.g. "berth still available"). */ /** Why this is being shown the way it is (e.g. "berth still available"). */
reason: string; reason: string;
/** Carries the persisted decision detail through to applyReversal so we
* can re-link berths to their original interest, restore yacht owners,
* etc. without re-parsing meta.decisions. */
detail?: Record<string, unknown>;
} }
export interface RestoreDossier { export interface RestoreDossier {
@@ -126,6 +130,7 @@ export async function getRestoreDossier(clientId: string, portId: string): Promi
refId: d.refId, refId: d.refId,
label: `Berth ${b.mooringNumber}`, label: `Berth ${b.mooringNumber}`,
reason: 'still available — re-attaching to the restored client', reason: 'still available — re-attaching to the restored client',
detail: d.detail,
}); });
} else if (b.status === 'sold') { } else if (b.status === 'sold') {
locked.push({ locked.push({
@@ -144,6 +149,7 @@ export async function getRestoreDossier(clientId: string, portId: string): Promi
refId: d.refId, refId: d.refId,
label: `Berth ${b.mooringNumber}`, label: `Berth ${b.mooringNumber}`,
reason: 'currently under offer to another client — re-attach as a competing interest?', reason: 'currently under offer to another client — re-attach as a competing interest?',
detail: d.detail,
}); });
} }
break; break;
@@ -357,19 +363,43 @@ async function applyReversal(
clientId: string, clientId: string,
): Promise<void> { ): Promise<void> {
switch (r.kind) { switch (r.kind) {
case 'berth_released': case 'berth_released': {
// Re-attach to whichever interest of the restored client originally // Re-link the berth to whichever interest originally owned it
// owned the link. We don't know that interest id from the reversal // (persisted in d.detail.interestId at archive time). We verify
// alone, so we pick the most recent active interest on the same // the interest still belongs to the restored client and isn't
// berth from this client; if none exists we skip (the berth is // archived — defensive in case the operator deleted the interest
// now genuinely orphaned for this client). // separately while the client was archived.
// For v1, leave the berth available — operator can re-attach const interestId = (r.detail?.interestId as string | undefined) ?? null;
// manually via the interest-berths UI. The restore wizard surfaces if (!interestId) break;
// this case as auto-reversible only when the berth is still free, const [iv] = await tx
// so the operator can immediately add it back. .select({ id: interests.id, archivedAt: interests.archivedAt })
// (The system MARKS the berth as eligible for re-link; full .from(interests)
// automation would require persisting the original interestId.) .where(and(eq(interests.id, interestId), eq(interests.clientId, clientId)))
.limit(1);
if (!iv || iv.archivedAt) break;
// Idempotent re-insert: the unique index on (interestId, berthId)
// means a duplicate is a no-op via onConflictDoNothing.
await tx
.insert(interestBerths)
.values({
interestId,
berthId: r.refId,
isPrimary: false,
isSpecificInterest: true,
isInEoiBundle: false,
})
.onConflictDoNothing();
// Flip berth status back to under_offer so the public map reflects
// the re-link. Only when berth is currently 'available' (sold
// berths are immutable; under_offer to another client is handled
// via the prompt branch which the operator may opt into).
await tx
.update(berths)
.set({ status: 'under_offer' })
.where(and(eq(berths.id, r.refId), eq(berths.status, 'available')));
break; break;
}
case 'yacht_transferred': { case 'yacht_transferred': {
// Transfer back to the restored client. // Transfer back to the restored client.
await tx await tx

View File

@@ -64,6 +64,10 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
const fileId = crypto.randomUUID(); const fileId = crypto.randomUUID();
const storagePath = buildStoragePath(port.slug, 'eoi-signed', documentId, fileId, 'pdf'); const storagePath = buildStoragePath(port.slug, 'eoi-signed', documentId, fileId, 'pdf');
// Upload to storage FIRST so we have a stable key for the DB rows,
// then commit all four DB writes in one transaction. If the tx fails
// the storage object becomes orphaned (S3 isn't transactional) but
// the DB stays clean — orphan reaper handles those.
await ( await (
await getStorageBackend() await getStorageBackend()
).put(storagePath, fileData.buffer, { ).put(storagePath, fileData.buffer, {
@@ -71,95 +75,101 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
sizeBytes: fileData.size, sizeBytes: fileData.size,
}); });
const [fileRecord] = await db
.insert(files)
.values({
portId,
clientId: interest.clientId,
filename: fileData.originalName,
originalName: fileData.originalName,
mimeType: 'application/pdf',
sizeBytes: String(fileData.size),
storagePath,
storageBucket: env.MINIO_BUCKET,
category: 'eoi',
uploadedBy: meta.userId,
})
.returning();
if (!fileRecord) {
throw new CodedError('INSERT_RETURNING_EMPTY', {
internalMessage: 'External EOI file insert returned no row',
});
}
const title = const title =
input.title ?? `External EOI — ${(input.signedAt ?? new Date()).toISOString().slice(0, 10)}`; input.title ?? `External EOI — ${(input.signedAt ?? new Date()).toISOString().slice(0, 10)}`;
const [doc] = await db const result = await db.transaction(async (tx) => {
.insert(documents) const [fileRecord] = await tx
.values({ .insert(files)
id: documentId, .values({
portId, portId,
interestId, clientId: interest.clientId,
clientId: interest.clientId, filename: fileData.originalName,
yachtId: interest.yachtId, originalName: fileData.originalName,
documentType: 'eoi', mimeType: 'application/pdf',
title, sizeBytes: String(fileData.size),
status: 'completed', storagePath,
isManualUpload: true, storageBucket: env.MINIO_BUCKET,
signedFileId: fileRecord.id, category: 'eoi',
notes: input.notes ?? null, uploadedBy: meta.userId,
createdBy: meta.userId, })
}) .returning();
.returning(); if (!fileRecord) {
if (!doc) { throw new CodedError('INSERT_RETURNING_EMPTY', {
throw new CodedError('INSERT_RETURNING_EMPTY', { internalMessage: 'External EOI file insert returned no row',
internalMessage: 'External EOI document insert returned no row', });
}); }
}
await db.insert(documentEvents).values({ const [doc] = await tx
documentId: doc.id, .insert(documents)
eventType: 'completed', .values({
eventData: { id: documentId,
isManualUpload: true, portId,
external: true, interestId,
signerNames: input.signerNames ?? [], clientId: interest.clientId,
signedAt: (input.signedAt ?? new Date()).toISOString(), yachtId: interest.yachtId,
fileId: fileRecord.id, documentType: 'eoi',
}, title,
status: 'completed',
isManualUpload: true,
signedFileId: fileRecord.id,
notes: input.notes ?? null,
createdBy: meta.userId,
})
.returning();
if (!doc) {
throw new CodedError('INSERT_RETURNING_EMPTY', {
internalMessage: 'External EOI document insert returned no row',
});
}
await tx.insert(documentEvents).values({
documentId: doc.id,
eventType: 'completed',
eventData: {
isManualUpload: true,
external: true,
signerNames: input.signerNames ?? [],
signedAt: (input.signedAt ?? new Date()).toISOString(),
fileId: fileRecord.id,
},
});
// Advance the interest stage to eoi_signed (no-op if already past it).
// We bypass canTransitionStage explicitly because the operator just
// brought concrete proof that the EOI is signed — that's higher
// confidence than a normal forward-jump.
if (
interest.pipelineStage === 'open' ||
interest.pipelineStage === 'details_sent' ||
interest.pipelineStage === 'in_communication' ||
interest.pipelineStage === 'eoi_sent'
) {
await tx
.update(interests)
.set({
pipelineStage: 'eoi_signed',
eoiStatus: 'signed',
dateEoiSigned: input.signedAt ?? new Date(),
updatedAt: new Date(),
})
.where(eq(interests.id, interestId));
} else {
// Past eoi_signed — just record the document, don't touch stage.
await tx.update(interests).set({ updatedAt: new Date() }).where(eq(interests.id, interestId));
}
return { documentId: doc.id, fileId: fileRecord.id };
}); });
// Advance the interest stage to eoi_signed (no-op if already past it). const { documentId: docId, fileId: fId } = result;
// We bypass canTransitionStage explicitly because the operator just
// brought concrete proof that the EOI is signed — that's higher
// confidence than a normal forward-jump.
if (
interest.pipelineStage === 'open' ||
interest.pipelineStage === 'details_sent' ||
interest.pipelineStage === 'in_communication' ||
interest.pipelineStage === 'eoi_sent'
) {
await db
.update(interests)
.set({
pipelineStage: 'eoi_signed',
eoiStatus: 'signed',
dateEoiSigned: input.signedAt ?? new Date(),
updatedAt: new Date(),
})
.where(eq(interests.id, interestId));
} else {
// Past eoi_signed — just record the document, don't touch stage.
await db.update(interests).set({ updatedAt: new Date() }).where(eq(interests.id, interestId));
}
void createAuditLog({ void createAuditLog({
portId, portId,
userId: meta.userId, userId: meta.userId,
action: 'create', action: 'create',
entityType: 'document', entityType: 'document',
entityId: doc.id, entityId: docId,
metadata: { metadata: {
kind: 'external_eoi_upload', kind: 'external_eoi_upload',
interestId, interestId,
@@ -172,7 +182,7 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
userAgent: meta.userAgent, userAgent: meta.userAgent,
}); });
emitToRoom(`port:${portId}`, 'document:completed', { documentId: doc.id }); emitToRoom(`port:${portId}`, 'document:completed', { documentId: docId });
return { documentId: doc.id, fileId: fileRecord.id }; return { documentId: docId, fileId: fId };
} }