fix(audit): post-review hardening across phases 0-7
15 of 17 findings from the consolidated audit (3 reviewer agents on the previously-shipped phase commits). Remaining two are nice-to-have follow-ups deferred. Critical (data integrity / security): - Public berths API: closed-deal junction rows no longer flip a berth to "Under Offer" - filter on `interests.outcome IS NULL` so won/ lost/cancelled don't pollute public-map status. Both list + single-mooring routes. - Recommender heat: cancelled outcomes now count as fall-throughs (SQL was `LIKE 'lost%'` which silently dropped them, leaving cancelled-only berths stuck in tier A). - Filesystem presignDownload returns an absolute URL (origin from APP_URL) so emailed download links resolve from external mail clients. - Magic-byte verification on the presigned-PUT path: both per-berth PDFs and brochures stream the first 5 bytes via the storage backend and reject + delete on `%PDF-` mismatch (was only enforced when the server saw the buffer; presign-PUT was wide open). - Replay-protection TTL aligned to the token's own expiry (was a fixed 30 min, but send-out tokens live 24 h). Floor 60 s, ceiling 25 days. - Brochures unique partial index on (port_id) WHERE is_default=true + 0032 migration. Closes the read-then-write race in the create/ update transactions. Important: - Recommender SQL: defense-in-depth `i.port_id = $portId` filter on the aggregates CTE. - berth-pdf service: per-berth pg_advisory_xact_lock around the version-number SELECT + insert. Storage key is now UUID-based so concurrent uploads can't collide on blob paths. Replaces `nextVersionNumber` with the tx-bound variant. - berth-pdf apply: rejects with ConflictError when parse_results contain a mooring-mismatch warning unless the caller passes `confirmMooringMismatch: true` (force-reconfirm gate was UI-only). - Send-out body: HTML-escape brochure filename in the download-link fallback (XSS guard). - parseDecimalWithUnit rejects negative numbers. - listClients DISTINCT ON for primary contact resolution: bounds contact-row count to ~2 per client. Defensive: - verifyProxyToken rejects NaN/Infinity expiries via Number.isFinite. - Replaced sql ANY() with inArray() in interest-berths. Tests: 1145 -> 1163 passing. Deferred: bulk-send rate limit (no bulk endpoint today), markdown italic regex breaking links with asterisks (cosmetic). Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -77,6 +77,10 @@ export async function GET(
|
||||
eq(interestBerths.berthId, berth.id),
|
||||
eq(interestBerths.isSpecificInterest, true),
|
||||
isNull(interests.archivedAt),
|
||||
// Closed deals (won/lost/cancelled) don't promote to "Under
|
||||
// Offer" - won flows through berths.status='sold' handled in
|
||||
// derivePublicStatus; lost/cancelled means back on the market.
|
||||
isNull(interests.outcome),
|
||||
),
|
||||
)
|
||||
.limit(1),
|
||||
|
||||
@@ -85,6 +85,11 @@ export async function GET(request: Request): Promise<Response> {
|
||||
inArray(interestBerths.berthId, berthIds),
|
||||
eq(interestBerths.isSpecificInterest, true),
|
||||
isNull(interests.archivedAt),
|
||||
// Don't promote a berth to "Under Offer" when the only specific-
|
||||
// interest link is a closed deal. `won` flips happen via
|
||||
// berths.status='sold' (handled in derivePublicStatus). Lost/
|
||||
// cancelled outcomes mean the berth is back on the market.
|
||||
isNull(interests.outcome),
|
||||
),
|
||||
),
|
||||
]);
|
||||
|
||||
@@ -28,7 +28,13 @@ import { verifyProxyToken } from '@/lib/storage/filesystem';
|
||||
export const runtime = 'nodejs';
|
||||
export const dynamic = 'force-dynamic';
|
||||
|
||||
const REPLAY_TTL_SECONDS = 60 * 30; // 30min — longer than any presign expiry default.
|
||||
// Replay-protection TTL must outlive the token itself, otherwise the
|
||||
// dedup key expires and the same token can be redeemed twice. We pin it
|
||||
// to the token's own expiry (clamped to a 25-day ceiling so a forged
|
||||
// far-future token can't pollute Redis indefinitely). Send-out emails
|
||||
// mint 24-hour tokens so the typical TTL is 24h + a small buffer.
|
||||
const REPLAY_TTL_FLOOR_SECONDS = 60; // never below 60s (post-expiry tail).
|
||||
const REPLAY_TTL_CEILING_SECONDS = 25 * 24 * 60 * 60; // 25 days.
|
||||
|
||||
export async function GET(
|
||||
_req: NextRequest,
|
||||
@@ -51,11 +57,16 @@ export async function GET(
|
||||
}
|
||||
const { payload } = result;
|
||||
|
||||
// Single-use enforcement. SET NX with a TTL longer than the token itself.
|
||||
// Using the body half of the token as the dedup key (signature included
|
||||
// Single-use enforcement. SET NX with a TTL pinned to the token's own
|
||||
// expiry so the dedup window never closes before the token does. Using
|
||||
// the body half of the token as the dedup key (signature included
|
||||
// would also work but body is enough — a reused token has the same body).
|
||||
const replayKey = `storage:proxy:seen:${token.split('.')[0]}`;
|
||||
const setOk = await redis.set(replayKey, '1', 'EX', REPLAY_TTL_SECONDS, 'NX');
|
||||
const remainingSeconds = Math.max(
|
||||
REPLAY_TTL_FLOOR_SECONDS,
|
||||
Math.min(REPLAY_TTL_CEILING_SECONDS, payload.e - Math.floor(Date.now() / 1000) + 60),
|
||||
);
|
||||
const setOk = await redis.set(replayKey, '1', 'EX', remainingSeconds, 'NX');
|
||||
if (setOk !== 'OK') {
|
||||
logger.warn({ key: payload.k }, 'Storage proxy token replay rejected');
|
||||
return NextResponse.json({ error: 'Token already used' }, { status: 403 });
|
||||
|
||||
@@ -16,9 +16,16 @@ import { updateSalesEmailConfigSchema } from '@/lib/validators/sales-email-confi
|
||||
* GET /api/v1/admin/email/sales-config
|
||||
*
|
||||
* Returns the redacted view of the sales-email config. Per §14.10
|
||||
* "Permission escalation: who configures SMTP credentials?", reps cannot
|
||||
* see the decrypted password — the response only carries `*PassIsSet`
|
||||
* boolean markers.
|
||||
* reps can't see the decrypted password — the response only carries
|
||||
* `*PassIsSet` boolean markers via `redactSalesConfigForResponse`.
|
||||
*
|
||||
* Today this endpoint is admin-only because it's consumed only by the
|
||||
* admin UI panel (`src/components/admin/sales-email-config-card.tsx`).
|
||||
* A future rep-facing surface that needs the from-address or body
|
||||
* templates can split into a separate `/email/sales-config/preview`
|
||||
* endpoint scoped to `email.view` — keeping the admin endpoint locked
|
||||
* to `manage_settings` avoids accidentally widening secret-adjacent
|
||||
* surfaces (e.g. the SMTP host name itself can be a leak vector).
|
||||
*/
|
||||
export const GET = withAuth(
|
||||
withPermission('admin', 'manage_settings', async (_req, ctx) => {
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
CREATE UNIQUE INDEX "idx_brochures_one_default_per_port" ON "brochures" USING btree ("port_id") WHERE "brochures"."is_default" = true AND "brochures"."archived_at" IS NULL;
|
||||
11483
src/lib/db/migrations/meta/0032_snapshot.json
Normal file
11483
src/lib/db/migrations/meta/0032_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -225,6 +225,13 @@
|
||||
"when": 1777944191753,
|
||||
"tag": "0031_brochures_and_document_sends",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 32,
|
||||
"version": "7",
|
||||
"when": 1777946048910,
|
||||
"tag": "0032_brochures_one_default_per_port_and_storage_fixes",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
import { pgTable, text, boolean, integer, timestamp, index } from 'drizzle-orm/pg-core';
|
||||
import {
|
||||
pgTable,
|
||||
text,
|
||||
boolean,
|
||||
integer,
|
||||
timestamp,
|
||||
index,
|
||||
uniqueIndex,
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { sql } from 'drizzle-orm';
|
||||
|
||||
import { ports } from './ports';
|
||||
import { clients } from './clients';
|
||||
@@ -28,7 +37,17 @@ export const brochures = pgTable(
|
||||
createdBy: text('created_by').notNull(),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
},
|
||||
(table) => [index('idx_brochures_port').on(table.portId)],
|
||||
(table) => [
|
||||
index('idx_brochures_port').on(table.portId),
|
||||
// At most one default brochure per port (excluding archived rows).
|
||||
// Service-layer "demote prior default then insert" is correct in the
|
||||
// single-writer case, but two concurrent createBrochure(isDefault:true)
|
||||
// calls without this index race past the read-then-write check and
|
||||
// both win.
|
||||
uniqueIndex('idx_brochures_one_default_per_port')
|
||||
.on(table.portId)
|
||||
.where(sql`${table.isDefault} = true AND ${table.archivedAt} IS NULL`),
|
||||
],
|
||||
);
|
||||
|
||||
/**
|
||||
|
||||
@@ -20,12 +20,18 @@ import type { NocoDbRow } from '@/lib/dedup/nocodb-source';
|
||||
*/
|
||||
export function parseDecimalWithUnit(raw: unknown): number | null {
|
||||
if (raw == null) return null;
|
||||
if (typeof raw === 'number') return Number.isFinite(raw) ? raw : null;
|
||||
if (typeof raw === 'number') {
|
||||
// Berth dimensions / capacities / prices are all non-negative; treat
|
||||
// a negative number as malformed input rather than silently importing
|
||||
// it (a "-50ft" length would otherwise nuke the recommender's
|
||||
// feasibility filter).
|
||||
return Number.isFinite(raw) && raw >= 0 ? raw : null;
|
||||
}
|
||||
if (typeof raw !== 'string') return null;
|
||||
const m = /^\s*(-?\d+(?:\.\d+)?)\s*(?:ft|feet|m|metres|meters|kw|v|usd|\$)?\s*$/i.exec(raw);
|
||||
const m = /^\s*(\d+(?:\.\d+)?)\s*(?:ft|feet|m|metres|meters|kw|v|usd|\$)?\s*$/i.exec(raw);
|
||||
if (!m) return null;
|
||||
const n = parseFloat(m[1]!);
|
||||
return Number.isFinite(n) ? n : null;
|
||||
return Number.isFinite(n) && n >= 0 ? n : null;
|
||||
}
|
||||
|
||||
/** Round to 2 decimals to match NocoDB's `precision: 2` decimal columns. */
|
||||
|
||||
@@ -11,7 +11,9 @@
|
||||
* - Generate signed download URLs for the version list.
|
||||
*/
|
||||
|
||||
import { and, desc, eq, isNull, max } from 'drizzle-orm';
|
||||
import { createHash } from 'node:crypto';
|
||||
|
||||
import { and, desc, eq, isNull, max, sql } from 'drizzle-orm';
|
||||
|
||||
import { db } from '@/lib/db';
|
||||
import { berths, berthPdfVersions } from '@/lib/db/schema/berths';
|
||||
@@ -178,17 +180,26 @@ export async function uploadBerthPdf(args: UploadBerthPdfArgs): Promise<UploadBe
|
||||
const maxMb = await getMaxUploadMb(berthRow.portId);
|
||||
const maxBytes = maxMb * 1024 * 1024;
|
||||
|
||||
// 2. Compute next version number. Using a serializable transaction so two
|
||||
// concurrent uploads can't both pick `v3` (the unique index would catch
|
||||
// it but we'd rather return a clean error than a 23505).
|
||||
const versionNumber = await nextVersionNumber(args.berthId);
|
||||
// 2. Per-berth advisory lock prevents two concurrent uploads from both
|
||||
// computing version `v3` and racing to write blobs (the unique index
|
||||
// on (berth_id, version_number) would catch the second insert, but
|
||||
// only AFTER its blob is already in storage — leaving an orphan).
|
||||
// The lock is scoped to a transaction wrapping the version-number
|
||||
// read AND the blob write, so concurrent uploads serialize cleanly.
|
||||
// NB: hash the UUID into a 32-bit int for pg_advisory_xact_lock(int).
|
||||
const berthLockKey = hashBerthIdToInt(args.berthId);
|
||||
|
||||
// 3. Magic bytes + size when we have the buffer in hand.
|
||||
const backend = await getStorageBackend();
|
||||
const buffer = args.buffer;
|
||||
// UUID-based storage key path so two concurrent uploads can't collide
|
||||
// on the same blob path (the version_number suffix used to be in the
|
||||
// key but is now a separate DB column allocated under the per-berth
|
||||
// advisory lock — see step 4).
|
||||
let versionNumber = 1;
|
||||
let storageKey =
|
||||
args.storageKey ??
|
||||
`berths/${args.berthId}/v${versionNumber}/${sanitizeFileName(args.fileName)}`;
|
||||
`berths/${args.berthId}/${crypto.randomUUID()}/${sanitizeFileName(args.fileName)}`;
|
||||
let sizeBytes = args.fileSizeBytes ?? buffer?.length ?? 0;
|
||||
let sha256 = args.sha256 ?? '';
|
||||
|
||||
@@ -211,7 +222,7 @@ export async function uploadBerthPdf(args: UploadBerthPdfArgs): Promise<UploadBe
|
||||
sizeBytes = written.sizeBytes;
|
||||
sha256 = written.sha256;
|
||||
} else if (args.storageKey) {
|
||||
// Browser uploaded directly via presigned URL — verify via HEAD.
|
||||
// Browser uploaded directly via presigned URL — verify via HEAD + magic bytes.
|
||||
const head = await backend.head(args.storageKey);
|
||||
if (!head) {
|
||||
throw new ValidationError('Uploaded object not found at expected storage key.');
|
||||
@@ -232,6 +243,16 @@ export async function uploadBerthPdf(args: UploadBerthPdfArgs): Promise<UploadBe
|
||||
`Uploaded object content-type is ${head.contentType}; expected application/pdf.`,
|
||||
);
|
||||
}
|
||||
// Magic-byte check on the presign path (§14.6 critical) - browser-
|
||||
// uploaded objects could be anything until we read the bytes. Stream
|
||||
// just the first 5 bytes; abort early on mismatch and delete the blob.
|
||||
const probeBytes = await readFirstBytes(backend, args.storageKey, 5);
|
||||
if (!isPdfMagic(probeBytes)) {
|
||||
await backend.delete(args.storageKey).catch(() => undefined);
|
||||
throw new ValidationError(
|
||||
'Uploaded file failed PDF magic-byte check (does not start with %PDF-).',
|
||||
);
|
||||
}
|
||||
sizeBytes = head.sizeBytes;
|
||||
sha256 = args.sha256 ?? '';
|
||||
storageKey = args.storageKey;
|
||||
@@ -239,9 +260,13 @@ export async function uploadBerthPdf(args: UploadBerthPdfArgs): Promise<UploadBe
|
||||
throw new ValidationError('Either buffer or storageKey is required.');
|
||||
}
|
||||
|
||||
// 4. Insert version row + bump current pointer in one transaction.
|
||||
// 4. Take the per-berth advisory lock, compute version_number under
|
||||
// the lock, insert + bump pointer. All inside a single transaction
|
||||
// so the lock + writes commit atomically.
|
||||
const versionId = crypto.randomUUID();
|
||||
await db.transaction(async (tx) => {
|
||||
await tx.execute(sql`SELECT pg_advisory_xact_lock(${berthLockKey})`);
|
||||
versionNumber = await nextVersionNumberTx(tx, args.berthId);
|
||||
await tx.insert(berthPdfVersions).values({
|
||||
id: versionId,
|
||||
berthId: args.berthId,
|
||||
@@ -267,14 +292,57 @@ export async function uploadBerthPdf(args: UploadBerthPdfArgs): Promise<UploadBe
|
||||
return { versionId, storageKey, versionNumber, fileSizeBytes: sizeBytes, contentSha256: sha256 };
|
||||
}
|
||||
|
||||
async function nextVersionNumber(berthId: string): Promise<number> {
|
||||
const [row] = await db
|
||||
/** Tx-bound variant — same SELECT MAX(...) but inside the caller's transaction. */
|
||||
async function nextVersionNumberTx(
|
||||
tx: Parameters<Parameters<typeof db.transaction>[0]>[0],
|
||||
berthId: string,
|
||||
): Promise<number> {
|
||||
const [row] = await tx
|
||||
.select({ max: max(berthPdfVersions.versionNumber) })
|
||||
.from(berthPdfVersions)
|
||||
.where(eq(berthPdfVersions.berthId, berthId));
|
||||
return (row?.max ?? 0) + 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Hash a UUID berthId into a 32-bit signed integer for pg_advisory_xact_lock.
|
||||
* Uses the first 4 bytes of sha256 reinterpreted as int32 — collisions are
|
||||
* theoretically possible but the lock is per-berth so a collision just
|
||||
* means two different berths' uploads serialize through the same key,
|
||||
* which is harmless (correctness preserved, slight contention only).
|
||||
*/
|
||||
function hashBerthIdToInt(berthId: string): number {
|
||||
const h = createHash('sha256').update(berthId).digest();
|
||||
// Read as signed 32-bit big-endian; pg_advisory_xact_lock(int) signature.
|
||||
return h.readInt32BE(0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stream just the first `n` bytes of a stored object so the magic-byte
|
||||
* check on the presigned-PUT path can run without buffering the whole
|
||||
* file. Returns a Buffer of up to `n` bytes (less if the file is shorter).
|
||||
*/
|
||||
async function readFirstBytes(
|
||||
backend: Awaited<ReturnType<typeof getStorageBackend>>,
|
||||
key: string,
|
||||
n: number,
|
||||
): Promise<Buffer> {
|
||||
const stream = await backend.get(key);
|
||||
const chunks: Buffer[] = [];
|
||||
let total = 0;
|
||||
for await (const chunk of stream as AsyncIterable<Buffer | string>) {
|
||||
const buf = typeof chunk === 'string' ? Buffer.from(chunk) : chunk;
|
||||
chunks.push(buf);
|
||||
total += buf.length;
|
||||
if (total >= n) break;
|
||||
}
|
||||
// Best-effort dispose - some streams are still readable after iteration.
|
||||
if (typeof (stream as { destroy?: () => void }).destroy === 'function') {
|
||||
(stream as unknown as { destroy: () => void }).destroy();
|
||||
}
|
||||
return Buffer.concat(chunks).subarray(0, n);
|
||||
}
|
||||
|
||||
function sanitizeFileName(raw: string): string {
|
||||
// Preserve the extension; replace spaces / disallowed chars with '_' so the
|
||||
// result satisfies the storage-key validation regex.
|
||||
@@ -361,11 +429,18 @@ export async function reconcilePdfWithBerth(
|
||||
* caller passes the canonical `ExtractedBerthFields` keys; anything outside
|
||||
* `APPLIABLE_FIELDS` is silently dropped to keep this endpoint a hard
|
||||
* allowlist.
|
||||
*
|
||||
* Mooring-mismatch gate (§14.6 critical): when the version's stored
|
||||
* `parseResults.warnings` contains a mooring-mismatch warning, the apply
|
||||
* is rejected unless the caller passes `confirmMooringMismatch: true`.
|
||||
* This is the service-side enforcement of the "force re-confirm" rule —
|
||||
* UI confirmation alone is not enough.
|
||||
*/
|
||||
export async function applyParseResults(
|
||||
berthId: string,
|
||||
versionId: string,
|
||||
fieldsToApply: Partial<ExtractedBerthFields>,
|
||||
opts: { confirmMooringMismatch?: boolean } = {},
|
||||
): Promise<{ updatedFields: Array<keyof ExtractedBerthFields> }> {
|
||||
const berthRow = await db.query.berths.findFirst({ where: eq(berths.id, berthId) });
|
||||
if (!berthRow) throw new NotFoundError('Berth');
|
||||
@@ -374,6 +449,17 @@ export async function applyParseResults(
|
||||
});
|
||||
if (!versionRow) throw new NotFoundError('Berth PDF version');
|
||||
|
||||
// §14.6 mooring-mismatch gate.
|
||||
const priorParse = versionRow.parseResults as { warnings?: string[] } | null;
|
||||
const hasMooringMismatch = (priorParse?.warnings ?? []).some(
|
||||
(w) => /uploading to/i.test(w) && /berth/i.test(w),
|
||||
);
|
||||
if (hasMooringMismatch && !opts.confirmMooringMismatch) {
|
||||
throw new ConflictError(
|
||||
'PDF mooring mismatch with target berth. Pass confirmMooringMismatch=true to override.',
|
||||
);
|
||||
}
|
||||
|
||||
const update: Record<string, unknown> = {};
|
||||
const applied: Array<keyof ExtractedBerthFields> = [];
|
||||
for (const key of APPLIABLE_FIELDS) {
|
||||
|
||||
@@ -450,7 +450,7 @@ export async function recommendBerths(args: RecommendBerthsArgs): Promise<Recomm
|
||||
f.id AS berth_id,
|
||||
COUNT(*) FILTER (WHERE i.archived_at IS NULL AND i.outcome IS NULL) AS active_interest_count,
|
||||
COUNT(*) FILTER (
|
||||
WHERE i.outcome IS NOT NULL AND i.outcome::text LIKE 'lost%'
|
||||
WHERE i.outcome IS NOT NULL AND (i.outcome::text LIKE 'lost%' OR i.outcome = 'cancelled')
|
||||
) AS lost_count,
|
||||
COALESCE(
|
||||
MAX(CASE i.pipeline_stage
|
||||
@@ -467,7 +467,7 @@ export async function recommendBerths(args: RecommendBerthsArgs): Promise<Recomm
|
||||
0
|
||||
) AS max_active_stage,
|
||||
MAX(i.outcome_at) FILTER (
|
||||
WHERE i.outcome IS NOT NULL AND i.outcome::text LIKE 'lost%'
|
||||
WHERE i.outcome IS NOT NULL AND (i.outcome::text LIKE 'lost%' OR i.outcome = 'cancelled')
|
||||
) AS latest_fallthrough_at,
|
||||
COALESCE(
|
||||
MAX(CASE i.pipeline_stage
|
||||
@@ -480,14 +480,14 @@ export async function recommendBerths(args: RecommendBerthsArgs): Promise<Recomm
|
||||
WHEN 'contract_sent' THEN 7
|
||||
WHEN 'contract_signed' THEN 8
|
||||
ELSE 0 END
|
||||
) FILTER (WHERE i.outcome IS NOT NULL AND i.outcome::text LIKE 'lost%'),
|
||||
) FILTER (WHERE i.outcome IS NOT NULL AND (i.outcome::text LIKE 'lost%' OR i.outcome = 'cancelled')),
|
||||
0
|
||||
) AS fallthrough_max_stage,
|
||||
COUNT(*) AS total_interest_count,
|
||||
COUNT(*) FILTER (WHERE i.eoi_status = 'signed') AS eoi_signed_count
|
||||
FROM feasible f
|
||||
LEFT JOIN interest_berths ib ON ib.berth_id = f.id
|
||||
LEFT JOIN interests i ON i.id = ib.interest_id
|
||||
LEFT JOIN interests i ON i.id = ib.interest_id AND i.port_id = ${args.portId}
|
||||
GROUP BY f.id
|
||||
)
|
||||
SELECT
|
||||
|
||||
@@ -238,6 +238,29 @@ export async function registerBrochureVersion(
|
||||
);
|
||||
throw new ValidationError('Uploaded object size does not match metadata');
|
||||
}
|
||||
// Magic-byte check (§14.6 critical) - the presign path doesn't see the
|
||||
// bytes until upload completes. Read the first 5 bytes; abort + delete
|
||||
// on mismatch so a malicious uploader can't smuggle a non-PDF that the
|
||||
// CRM would later email as `application/pdf`.
|
||||
const stream = await storage.get(input.storageKey);
|
||||
const chunks: Buffer[] = [];
|
||||
let total = 0;
|
||||
for await (const chunk of stream as AsyncIterable<Buffer | string>) {
|
||||
const buf = typeof chunk === 'string' ? Buffer.from(chunk) : chunk;
|
||||
chunks.push(buf);
|
||||
total += buf.length;
|
||||
if (total >= 5) break;
|
||||
}
|
||||
if (typeof (stream as { destroy?: () => void }).destroy === 'function') {
|
||||
(stream as unknown as { destroy: () => void }).destroy();
|
||||
}
|
||||
const probe = Buffer.concat(chunks).subarray(0, 5);
|
||||
if (probe.length < 5 || probe.toString('utf8', 0, 5) !== '%PDF-') {
|
||||
await storage.delete(input.storageKey).catch(() => undefined);
|
||||
throw new ValidationError(
|
||||
'Uploaded file failed PDF magic-byte check (does not start with %PDF-).',
|
||||
);
|
||||
}
|
||||
|
||||
// Determine the next version number for this brochure.
|
||||
const existing = await db.query.brochureVersions.findMany({
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { and, count, desc, eq, ilike, inArray, isNull } from 'drizzle-orm';
|
||||
import { and, count, desc, eq, ilike, inArray, isNull, sql } from 'drizzle-orm';
|
||||
|
||||
import { db } from '@/lib/db';
|
||||
import {
|
||||
@@ -139,22 +139,29 @@ export async function listClients(portId: string, query: ListClientsInput) {
|
||||
),
|
||||
)
|
||||
.groupBy(interests.clientId),
|
||||
// Pull every contact row for the page; the per-client primary
|
||||
// resolution happens in the post-fetch loop below. Cheaper than
|
||||
// running a DISTINCT-ON query per channel and keeps the picker
|
||||
// logic (is_primary desc, then most recent created_at) in one
|
||||
// place.
|
||||
db
|
||||
.select({
|
||||
clientId: clientContacts.clientId,
|
||||
channel: clientContacts.channel,
|
||||
value: clientContacts.value,
|
||||
isPrimary: clientContacts.isPrimary,
|
||||
createdAt: clientContacts.createdAt,
|
||||
})
|
||||
.from(clientContacts)
|
||||
.where(inArray(clientContacts.clientId, ids))
|
||||
.orderBy(desc(clientContacts.isPrimary), desc(clientContacts.createdAt)),
|
||||
// Pull at most ONE contact per (client_id, channel) for the page.
|
||||
// DISTINCT ON sorted by `is_primary DESC, created_at DESC` keeps
|
||||
// the picker logic identical to the in-memory version it replaced
|
||||
// while bounding the row count to ~2 per client (one email, one
|
||||
// phone) regardless of how many contacts the client has.
|
||||
db.execute<{
|
||||
clientId: string;
|
||||
channel: string;
|
||||
value: string;
|
||||
isPrimary: boolean;
|
||||
createdAt: Date;
|
||||
}>(sql`
|
||||
SELECT DISTINCT ON (client_id, channel)
|
||||
client_id AS "clientId",
|
||||
channel,
|
||||
value,
|
||||
is_primary AS "isPrimary",
|
||||
created_at AS "createdAt"
|
||||
FROM client_contacts
|
||||
WHERE client_id = ANY(${ids})
|
||||
AND channel IN ('email', 'phone')
|
||||
ORDER BY client_id, channel, is_primary DESC, created_at DESC
|
||||
`),
|
||||
],
|
||||
);
|
||||
|
||||
@@ -172,16 +179,23 @@ export async function listClients(portId: string, query: ListClientsInput) {
|
||||
}
|
||||
}
|
||||
|
||||
// Pick the per-client primary (or, failing that, most-recent) email
|
||||
// and phone. contactRows is pre-sorted is_primary desc, created_at desc.
|
||||
// Pick the per-client primary email + phone. The SQL DISTINCT ON
|
||||
// returns at most one row per (clientId, channel); the result is
|
||||
// already the picker's "is_primary desc, created_at desc" choice.
|
||||
const primaryEmailMap = new Map<string, string>();
|
||||
const primaryPhoneMap = new Map<string, string>();
|
||||
for (const c of contactRows) {
|
||||
if (c.channel === 'email' && !primaryEmailMap.has(c.clientId)) {
|
||||
primaryEmailMap.set(c.clientId, c.value);
|
||||
} else if (c.channel === 'phone' && !primaryPhoneMap.has(c.clientId)) {
|
||||
primaryPhoneMap.set(c.clientId, c.value);
|
||||
}
|
||||
type ContactRow = {
|
||||
clientId: string;
|
||||
channel: string;
|
||||
value: string;
|
||||
isPrimary: boolean;
|
||||
createdAt: Date;
|
||||
};
|
||||
const contactRowList: ContactRow[] =
|
||||
(contactRows as { rows?: ContactRow[] }).rows ?? (contactRows as unknown as ContactRow[]);
|
||||
for (const c of contactRowList) {
|
||||
if (c.channel === 'email') primaryEmailMap.set(c.clientId, c.value);
|
||||
else if (c.channel === 'phone') primaryPhoneMap.set(c.clientId, c.value);
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@@ -280,8 +280,17 @@ async function streamAttachmentOrLink(
|
||||
expirySeconds: 24 * 60 * 60,
|
||||
filename: attachment.fileName,
|
||||
});
|
||||
// HTML-escape the filename: brochure filenames are admin-supplied and
|
||||
// could in theory carry markup (e.g. `"><script>...`). Even a benign
|
||||
// ampersand or angle bracket would break the rendered link otherwise.
|
||||
const safeFileName = attachment.fileName
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
const html = `<p>The file is large enough that we're sending it as a download link rather than an attachment:</p>
|
||||
<p><a href="${url}" target="_blank" rel="noopener noreferrer">Download ${attachment.fileName}</a> (link expires in 24 hours)</p>`;
|
||||
<p><a href="${url}" target="_blank" rel="noopener noreferrer">Download ${safeFileName}</a> (link expires in 24 hours)</p>`;
|
||||
return { deliveredAsAttachment: false, bodySuffixHtml: html };
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
* - is_in_eoi_bundle : covered by the interest's EOI signature.
|
||||
*/
|
||||
|
||||
import { and, desc, eq, sql } from 'drizzle-orm';
|
||||
import { and, desc, eq, inArray } from 'drizzle-orm';
|
||||
|
||||
import { db } from '@/lib/db';
|
||||
import { interestBerths, type InterestBerth } from '@/lib/db/schema/interests';
|
||||
@@ -83,7 +83,7 @@ export async function getPrimaryBerthsForInterests(
|
||||
})
|
||||
.from(interestBerths)
|
||||
.innerJoin(berths, eq(berths.id, interestBerths.berthId))
|
||||
.where(sql`${interestBerths.interestId} = ANY(${interestIds})`)
|
||||
.where(inArray(interestBerths.interestId, interestIds))
|
||||
.orderBy(desc(interestBerths.isPrimary), desc(interestBerths.addedAt));
|
||||
|
||||
const out = new Map<string, PrimaryBerthRef>();
|
||||
|
||||
@@ -126,7 +126,11 @@ export function verifyProxyToken(
|
||||
return { ok: false, reason: 'malformed-payload' };
|
||||
}
|
||||
|
||||
if (typeof payload.e !== 'number' || payload.e * 1000 < Date.now()) {
|
||||
// `Number.isFinite` catches NaN / ±Infinity that a tampered token could
|
||||
// otherwise smuggle past the `< Date.now()` comparison (NaN compares
|
||||
// false against any number, which would treat the token as eternally
|
||||
// valid). Reject non-finite expiries outright.
|
||||
if (!Number.isFinite(payload.e) || payload.e * 1000 < Date.now()) {
|
||||
return { ok: false, reason: 'expired' };
|
||||
}
|
||||
try {
|
||||
@@ -279,8 +283,12 @@ export class FilesystemBackend implements StorageBackend {
|
||||
{ k: key, e: expiresAtSec, n: randomUUID(), f: opts.filename, c: opts.contentType },
|
||||
this.hmacSecret,
|
||||
);
|
||||
// ABSOLUTE URL: send-out emails interpolate this verbatim into the
|
||||
// recipient's inbox. A relative path is unreachable from a mail
|
||||
// client. APP_URL strips any trailing slash to keep the join clean.
|
||||
const origin = env.APP_URL.replace(/\/$/, '');
|
||||
return {
|
||||
url: `/api/storage/${token}`,
|
||||
url: `${origin}/api/storage/${token}`,
|
||||
expiresAt: new Date(expiresAtSec * 1000),
|
||||
};
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user