fix(audit): reliability HIGHs — smart-restore re-link, TOCTOU lock, bulk wrong-interest, ext-EOI tx, bulk idempotency
R2-H1: smart-restore's berth_released auto-reversal was a no-op while the wizard claimed success. Now uses the persisted interestId from the decision detail to re-insert the interest_berths link and flip the berth status back to under_offer. Verifies the interest still exists and isn't archived before re-linking. R2-H2: smart-archive berth status update had a TOCTOU race — read outside tx, write inside without a lock. Now selects-for-update the berths row inside the tx and re-checks status against the locked row before flipping to available, preventing concurrent archive+sale from un-selling a berth. R2-H3: bulk-archive's berth→interest lookup fell back to dossier.interests[0]?.interestId ?? '' which sent empty-string interestIds that silently matched zero rows. Dossier now exposes linkedInterestIds[] per berth (authoritative interest_berths join); bulk + single-client wizard both use it and skip berths with no linked interest. Affected: - src/lib/services/client-archive-dossier.service.ts (DossierBerth) - src/app/api/v1/clients/bulk/route.ts - src/components/clients/smart-archive-dialog.tsx R2-H4: external-EOI ran storage upload + 4 DB writes outside a transaction. Now wraps file/document/event/interest writes in a single tx; storage upload stays before the tx (S3 isn't transactional), orphan-object on tx failure is acceptable. R2-H5: bulk archive double-submit treated already-archived clients as per-row failures. Bulk callback now early-returns success when the dossier shows archivedAt is set, making the endpoint idempotent. 1175/1175 vitest passing. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -90,6 +90,13 @@ export const POST = withAuth(async (req, ctx) => {
|
||||
// a per-client reason supplied via reasonsByClientId; the bulk-
|
||||
// archive wizard captures these one at a time before submitting.
|
||||
const dossier = await getClientArchiveDossier(id, ctx.portId);
|
||||
// Idempotent: if a previous request already archived this client
|
||||
// (e.g. a network retry / double-click), treat it as success
|
||||
// rather than letting `archiveClientWithDecisions` throw a
|
||||
// ConflictError that runBulk will surface as a per-row failure.
|
||||
if (dossier.client.archivedAt) {
|
||||
return;
|
||||
}
|
||||
const perClientReason = reasonsByClientId[id];
|
||||
if (dossier.stakeLevel === 'high' && !perClientReason) {
|
||||
throw new Error(
|
||||
@@ -103,20 +110,32 @@ export const POST = withAuth(async (req, ctx) => {
|
||||
(d) => d.status === 'completed' || d.status === 'signed',
|
||||
);
|
||||
const reason = perClientReason ?? 'Bulk archive (low-stakes auto-mode)';
|
||||
// Pick the berth's first linked interest from the dossier
|
||||
// (authoritative interest_berths join). Berths with no linked
|
||||
// interest for this client are dropped — emitting an empty
|
||||
// interestId causes the delete to silently match zero rows
|
||||
// (audit R2-H3).
|
||||
const berthDecisions = dossier.berths
|
||||
.map((b) => {
|
||||
const interestId = b.linkedInterestIds[0];
|
||||
if (!interestId) return null;
|
||||
return {
|
||||
berthId: b.berthId,
|
||||
interestId,
|
||||
action: b.status === 'sold' ? ('retain' as const) : ('release' as const),
|
||||
};
|
||||
})
|
||||
.filter(
|
||||
(x): x is { berthId: string; interestId: string; action: 'retain' | 'release' } =>
|
||||
x !== null,
|
||||
);
|
||||
|
||||
const result = await archiveClientWithDecisions({
|
||||
dossier,
|
||||
decisions: {
|
||||
reason,
|
||||
acknowledgedSignedDocuments: hasSignedDocs,
|
||||
berthDecisions: dossier.berths.map((b) => ({
|
||||
berthId: b.berthId,
|
||||
interestId:
|
||||
dossier.interests.find((i) => i.primaryBerthMooring === b.mooringNumber)
|
||||
?.interestId ??
|
||||
dossier.interests[0]?.interestId ??
|
||||
'',
|
||||
action: b.status === 'sold' ? 'retain' : 'release',
|
||||
})),
|
||||
berthDecisions,
|
||||
yachtDecisions: dossier.yachts.map((y) => ({ yachtId: y.yachtId, action: 'retain' })),
|
||||
reservationDecisions: dossier.reservations.map((r) => ({
|
||||
reservationId: r.reservationId,
|
||||
|
||||
@@ -23,6 +23,7 @@ interface DossierBerth {
|
||||
berthId: string;
|
||||
mooringNumber: string;
|
||||
status: string;
|
||||
linkedInterestIds: string[];
|
||||
otherInterests: Array<{
|
||||
interestId: string;
|
||||
clientId: string | null;
|
||||
@@ -155,17 +156,23 @@ export function SmartArchiveDialog({ open, onOpenChange, clientId, clientName, o
|
||||
const archiveMutation = useMutation({
|
||||
mutationFn: () => {
|
||||
if (!dossier) throw new Error('No dossier');
|
||||
const berthDec = dossier.berths.map((b) => ({
|
||||
berthId: b.berthId,
|
||||
// The interestId for this berth — use the first interest in the
|
||||
// dossier that has this berth as its primary. Fallback to the
|
||||
// first interest at all (the API only needs the link reference).
|
||||
interestId:
|
||||
dossier.interests.find((i) => i.primaryBerthMooring === b.mooringNumber)?.interestId ??
|
||||
dossier.interests[0]?.interestId ??
|
||||
'',
|
||||
action: berthDecisions[b.berthId] ?? 'retain',
|
||||
}));
|
||||
// Pick the first linked interest for this berth from the
|
||||
// authoritative dossier join. Berths with no linked interest for
|
||||
// this client are skipped — sending an empty interestId would
|
||||
// make the server-side delete silently match zero rows.
|
||||
const berthDec = dossier.berths
|
||||
.map((b) => {
|
||||
const interestId = b.linkedInterestIds[0];
|
||||
if (!interestId) return null;
|
||||
return {
|
||||
berthId: b.berthId,
|
||||
interestId,
|
||||
action: berthDecisions[b.berthId] ?? 'retain',
|
||||
};
|
||||
})
|
||||
.filter(
|
||||
(x): x is { berthId: string; interestId: string; action: BerthAction } => x !== null,
|
||||
);
|
||||
return apiFetch<{ data: { releasedBerths: Array<{ mooringNumber: string }> } }>(
|
||||
`/api/v1/clients/${clientId}/archive`,
|
||||
{
|
||||
|
||||
@@ -47,6 +47,11 @@ export interface DossierBerth {
|
||||
berthId: string;
|
||||
mooringNumber: string;
|
||||
status: string; // 'available' | 'under_offer' | 'sold'
|
||||
/** Every interest of THIS client that links to the berth. The bulk
|
||||
* wizard uses this to pick the right interestId per berth instead of
|
||||
* guessing by primary-mooring (which fails when multiple interests
|
||||
* share a primary or when none is primary). */
|
||||
linkedInterestIds: string[];
|
||||
/** Other interests still actively expressing interest in this berth
|
||||
* (so the next-in-line notification can list them). */
|
||||
otherInterests: Array<{
|
||||
@@ -267,10 +272,18 @@ export async function getClientArchiveDossier(
|
||||
.orderBy(desc(interests.updatedAt))
|
||||
.limit(10);
|
||||
|
||||
// Every linked interest belonging to THIS client (multiple
|
||||
// interests can share a berth — primary flag is at most one per
|
||||
// interest, not per berth).
|
||||
const linkedInterestIds = Array.from(
|
||||
new Set(interestBerthRows.filter((r) => r.berthId === berthId).map((r) => r.interestId)),
|
||||
);
|
||||
|
||||
dossierBerths.push({
|
||||
berthId,
|
||||
mooringNumber: berth.mooringNumber,
|
||||
status: berth.berthStatus,
|
||||
linkedInterestIds,
|
||||
otherInterests: others.map((o) => ({
|
||||
interestId: o.interestId,
|
||||
clientId: o.clientId,
|
||||
|
||||
@@ -177,6 +177,19 @@ export async function archiveClientWithDecisions(args: {
|
||||
const berth = dossier.berths.find((b) => b.berthId === d.berthId);
|
||||
if (!berth) continue;
|
||||
if (d.action === 'release') {
|
||||
// Lock the berth row so a concurrent sale can't flip the status
|
||||
// between our read of dossier.berths (outside the tx) and our
|
||||
// write below. Without this lock, A archives client X while B
|
||||
// sells berth A1 to client Y — A's pre-tx read says
|
||||
// status='under_offer', B commits status='sold', A's update
|
||||
// would flip it back to 'available'.
|
||||
const [locked] = await tx
|
||||
.select({ status: berths.status })
|
||||
.from(berths)
|
||||
.where(eq(berths.id, d.berthId))
|
||||
.for('update');
|
||||
const lockedStatus = locked?.status ?? berth.status;
|
||||
|
||||
// Drop the interest_berths link for this client's interest. Other
|
||||
// interests on the berth survive (so the next-in-line notification
|
||||
// can fire).
|
||||
@@ -186,9 +199,10 @@ export async function archiveClientWithDecisions(args: {
|
||||
and(eq(interestBerths.berthId, d.berthId), eq(interestBerths.interestId, d.interestId)),
|
||||
);
|
||||
// If no remaining interestBerths row marks this berth as
|
||||
// is_specific_interest, set the berth status back to available
|
||||
// (sold berths are immutable from this flow per design).
|
||||
if (berth.status !== 'sold') {
|
||||
// is_specific_interest, set the berth status back to available.
|
||||
// Sold berths are immutable from this flow — also re-checked
|
||||
// against the freshly-locked row, not the pre-tx dossier read.
|
||||
if (lockedStatus !== 'sold') {
|
||||
const [stillUnderOffer] = await tx
|
||||
.select({ count: sql<number>`count(*)::int` })
|
||||
.from(interestBerths)
|
||||
|
||||
@@ -20,7 +20,7 @@ import { and, eq, isNull, ne, sql } from 'drizzle-orm';
|
||||
|
||||
import { db } from '@/lib/db';
|
||||
import { clients } from '@/lib/db/schema/clients';
|
||||
import { interests } from '@/lib/db/schema/interests';
|
||||
import { interests, interestBerths } from '@/lib/db/schema/interests';
|
||||
import { berths } from '@/lib/db/schema/berths';
|
||||
import { yachts } from '@/lib/db/schema/yachts';
|
||||
import { portalUsers } from '@/lib/db/schema/portal';
|
||||
@@ -42,6 +42,10 @@ export interface RestoreReversal {
|
||||
label: string;
|
||||
/** Why this is being shown the way it is (e.g. "berth still available"). */
|
||||
reason: string;
|
||||
/** Carries the persisted decision detail through to applyReversal so we
|
||||
* can re-link berths to their original interest, restore yacht owners,
|
||||
* etc. without re-parsing meta.decisions. */
|
||||
detail?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
export interface RestoreDossier {
|
||||
@@ -126,6 +130,7 @@ export async function getRestoreDossier(clientId: string, portId: string): Promi
|
||||
refId: d.refId,
|
||||
label: `Berth ${b.mooringNumber}`,
|
||||
reason: 'still available — re-attaching to the restored client',
|
||||
detail: d.detail,
|
||||
});
|
||||
} else if (b.status === 'sold') {
|
||||
locked.push({
|
||||
@@ -144,6 +149,7 @@ export async function getRestoreDossier(clientId: string, portId: string): Promi
|
||||
refId: d.refId,
|
||||
label: `Berth ${b.mooringNumber}`,
|
||||
reason: 'currently under offer to another client — re-attach as a competing interest?',
|
||||
detail: d.detail,
|
||||
});
|
||||
}
|
||||
break;
|
||||
@@ -357,19 +363,43 @@ async function applyReversal(
|
||||
clientId: string,
|
||||
): Promise<void> {
|
||||
switch (r.kind) {
|
||||
case 'berth_released':
|
||||
// Re-attach to whichever interest of the restored client originally
|
||||
// owned the link. We don't know that interest id from the reversal
|
||||
// alone, so we pick the most recent active interest on the same
|
||||
// berth from this client; if none exists we skip (the berth is
|
||||
// now genuinely orphaned for this client).
|
||||
// For v1, leave the berth available — operator can re-attach
|
||||
// manually via the interest-berths UI. The restore wizard surfaces
|
||||
// this case as auto-reversible only when the berth is still free,
|
||||
// so the operator can immediately add it back.
|
||||
// (The system MARKS the berth as eligible for re-link; full
|
||||
// automation would require persisting the original interestId.)
|
||||
case 'berth_released': {
|
||||
// Re-link the berth to whichever interest originally owned it
|
||||
// (persisted in d.detail.interestId at archive time). We verify
|
||||
// the interest still belongs to the restored client and isn't
|
||||
// archived — defensive in case the operator deleted the interest
|
||||
// separately while the client was archived.
|
||||
const interestId = (r.detail?.interestId as string | undefined) ?? null;
|
||||
if (!interestId) break;
|
||||
const [iv] = await tx
|
||||
.select({ id: interests.id, archivedAt: interests.archivedAt })
|
||||
.from(interests)
|
||||
.where(and(eq(interests.id, interestId), eq(interests.clientId, clientId)))
|
||||
.limit(1);
|
||||
if (!iv || iv.archivedAt) break;
|
||||
|
||||
// Idempotent re-insert: the unique index on (interestId, berthId)
|
||||
// means a duplicate is a no-op via onConflictDoNothing.
|
||||
await tx
|
||||
.insert(interestBerths)
|
||||
.values({
|
||||
interestId,
|
||||
berthId: r.refId,
|
||||
isPrimary: false,
|
||||
isSpecificInterest: true,
|
||||
isInEoiBundle: false,
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
// Flip berth status back to under_offer so the public map reflects
|
||||
// the re-link. Only when berth is currently 'available' (sold
|
||||
// berths are immutable; under_offer to another client is handled
|
||||
// via the prompt branch which the operator may opt into).
|
||||
await tx
|
||||
.update(berths)
|
||||
.set({ status: 'under_offer' })
|
||||
.where(and(eq(berths.id, r.refId), eq(berths.status, 'available')));
|
||||
break;
|
||||
}
|
||||
case 'yacht_transferred': {
|
||||
// Transfer back to the restored client.
|
||||
await tx
|
||||
|
||||
@@ -64,6 +64,10 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
|
||||
const fileId = crypto.randomUUID();
|
||||
const storagePath = buildStoragePath(port.slug, 'eoi-signed', documentId, fileId, 'pdf');
|
||||
|
||||
// Upload to storage FIRST so we have a stable key for the DB rows,
|
||||
// then commit all four DB writes in one transaction. If the tx fails
|
||||
// the storage object becomes orphaned (S3 isn't transactional) but
|
||||
// the DB stays clean — orphan reaper handles those.
|
||||
await (
|
||||
await getStorageBackend()
|
||||
).put(storagePath, fileData.buffer, {
|
||||
@@ -71,95 +75,101 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
|
||||
sizeBytes: fileData.size,
|
||||
});
|
||||
|
||||
const [fileRecord] = await db
|
||||
.insert(files)
|
||||
.values({
|
||||
portId,
|
||||
clientId: interest.clientId,
|
||||
filename: fileData.originalName,
|
||||
originalName: fileData.originalName,
|
||||
mimeType: 'application/pdf',
|
||||
sizeBytes: String(fileData.size),
|
||||
storagePath,
|
||||
storageBucket: env.MINIO_BUCKET,
|
||||
category: 'eoi',
|
||||
uploadedBy: meta.userId,
|
||||
})
|
||||
.returning();
|
||||
if (!fileRecord) {
|
||||
throw new CodedError('INSERT_RETURNING_EMPTY', {
|
||||
internalMessage: 'External EOI file insert returned no row',
|
||||
});
|
||||
}
|
||||
|
||||
const title =
|
||||
input.title ?? `External EOI — ${(input.signedAt ?? new Date()).toISOString().slice(0, 10)}`;
|
||||
|
||||
const [doc] = await db
|
||||
.insert(documents)
|
||||
.values({
|
||||
id: documentId,
|
||||
portId,
|
||||
interestId,
|
||||
clientId: interest.clientId,
|
||||
yachtId: interest.yachtId,
|
||||
documentType: 'eoi',
|
||||
title,
|
||||
status: 'completed',
|
||||
isManualUpload: true,
|
||||
signedFileId: fileRecord.id,
|
||||
notes: input.notes ?? null,
|
||||
createdBy: meta.userId,
|
||||
})
|
||||
.returning();
|
||||
if (!doc) {
|
||||
throw new CodedError('INSERT_RETURNING_EMPTY', {
|
||||
internalMessage: 'External EOI document insert returned no row',
|
||||
});
|
||||
}
|
||||
const result = await db.transaction(async (tx) => {
|
||||
const [fileRecord] = await tx
|
||||
.insert(files)
|
||||
.values({
|
||||
portId,
|
||||
clientId: interest.clientId,
|
||||
filename: fileData.originalName,
|
||||
originalName: fileData.originalName,
|
||||
mimeType: 'application/pdf',
|
||||
sizeBytes: String(fileData.size),
|
||||
storagePath,
|
||||
storageBucket: env.MINIO_BUCKET,
|
||||
category: 'eoi',
|
||||
uploadedBy: meta.userId,
|
||||
})
|
||||
.returning();
|
||||
if (!fileRecord) {
|
||||
throw new CodedError('INSERT_RETURNING_EMPTY', {
|
||||
internalMessage: 'External EOI file insert returned no row',
|
||||
});
|
||||
}
|
||||
|
||||
await db.insert(documentEvents).values({
|
||||
documentId: doc.id,
|
||||
eventType: 'completed',
|
||||
eventData: {
|
||||
isManualUpload: true,
|
||||
external: true,
|
||||
signerNames: input.signerNames ?? [],
|
||||
signedAt: (input.signedAt ?? new Date()).toISOString(),
|
||||
fileId: fileRecord.id,
|
||||
},
|
||||
const [doc] = await tx
|
||||
.insert(documents)
|
||||
.values({
|
||||
id: documentId,
|
||||
portId,
|
||||
interestId,
|
||||
clientId: interest.clientId,
|
||||
yachtId: interest.yachtId,
|
||||
documentType: 'eoi',
|
||||
title,
|
||||
status: 'completed',
|
||||
isManualUpload: true,
|
||||
signedFileId: fileRecord.id,
|
||||
notes: input.notes ?? null,
|
||||
createdBy: meta.userId,
|
||||
})
|
||||
.returning();
|
||||
if (!doc) {
|
||||
throw new CodedError('INSERT_RETURNING_EMPTY', {
|
||||
internalMessage: 'External EOI document insert returned no row',
|
||||
});
|
||||
}
|
||||
|
||||
await tx.insert(documentEvents).values({
|
||||
documentId: doc.id,
|
||||
eventType: 'completed',
|
||||
eventData: {
|
||||
isManualUpload: true,
|
||||
external: true,
|
||||
signerNames: input.signerNames ?? [],
|
||||
signedAt: (input.signedAt ?? new Date()).toISOString(),
|
||||
fileId: fileRecord.id,
|
||||
},
|
||||
});
|
||||
|
||||
// Advance the interest stage to eoi_signed (no-op if already past it).
|
||||
// We bypass canTransitionStage explicitly because the operator just
|
||||
// brought concrete proof that the EOI is signed — that's higher
|
||||
// confidence than a normal forward-jump.
|
||||
if (
|
||||
interest.pipelineStage === 'open' ||
|
||||
interest.pipelineStage === 'details_sent' ||
|
||||
interest.pipelineStage === 'in_communication' ||
|
||||
interest.pipelineStage === 'eoi_sent'
|
||||
) {
|
||||
await tx
|
||||
.update(interests)
|
||||
.set({
|
||||
pipelineStage: 'eoi_signed',
|
||||
eoiStatus: 'signed',
|
||||
dateEoiSigned: input.signedAt ?? new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(interests.id, interestId));
|
||||
} else {
|
||||
// Past eoi_signed — just record the document, don't touch stage.
|
||||
await tx.update(interests).set({ updatedAt: new Date() }).where(eq(interests.id, interestId));
|
||||
}
|
||||
|
||||
return { documentId: doc.id, fileId: fileRecord.id };
|
||||
});
|
||||
|
||||
// Advance the interest stage to eoi_signed (no-op if already past it).
|
||||
// We bypass canTransitionStage explicitly because the operator just
|
||||
// brought concrete proof that the EOI is signed — that's higher
|
||||
// confidence than a normal forward-jump.
|
||||
if (
|
||||
interest.pipelineStage === 'open' ||
|
||||
interest.pipelineStage === 'details_sent' ||
|
||||
interest.pipelineStage === 'in_communication' ||
|
||||
interest.pipelineStage === 'eoi_sent'
|
||||
) {
|
||||
await db
|
||||
.update(interests)
|
||||
.set({
|
||||
pipelineStage: 'eoi_signed',
|
||||
eoiStatus: 'signed',
|
||||
dateEoiSigned: input.signedAt ?? new Date(),
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(eq(interests.id, interestId));
|
||||
} else {
|
||||
// Past eoi_signed — just record the document, don't touch stage.
|
||||
await db.update(interests).set({ updatedAt: new Date() }).where(eq(interests.id, interestId));
|
||||
}
|
||||
const { documentId: docId, fileId: fId } = result;
|
||||
|
||||
void createAuditLog({
|
||||
portId,
|
||||
userId: meta.userId,
|
||||
action: 'create',
|
||||
entityType: 'document',
|
||||
entityId: doc.id,
|
||||
entityId: docId,
|
||||
metadata: {
|
||||
kind: 'external_eoi_upload',
|
||||
interestId,
|
||||
@@ -172,7 +182,7 @@ export async function uploadExternallySignedEoi(input: ExternalEoiInput) {
|
||||
userAgent: meta.userAgent,
|
||||
});
|
||||
|
||||
emitToRoom(`port:${portId}`, 'document:completed', { documentId: doc.id });
|
||||
emitToRoom(`port:${portId}`, 'document:completed', { documentId: docId });
|
||||
|
||||
return { documentId: doc.id, fileId: fileRecord.id };
|
||||
return { documentId: docId, fileId: fId };
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user