feat(website-intake): dual-write endpoint + migration chain repair

Adds website_submissions table + shared-secret POST endpoint so the
marketing site can dual-write inquiries alongside its NocoDB write.
Race-safe via INSERT ... ON CONFLICT, idempotent on submission_id,
refuses every request when WEBSITE_INTAKE_SECRET is unset. Also
repairs pre-existing 0020/0021/0022 prevId collision (renumbered +
journal re-sorted) so db:generate works again. 11 unit tests.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Matt Ciaccio
2026-05-04 22:52:33 +02:00
parent c612bbdfd9
commit 49d34e00c8
16 changed files with 11556 additions and 28 deletions

View File

@@ -0,0 +1,177 @@
import { NextRequest, NextResponse } from 'next/server';
import { timingSafeEqual } from 'node:crypto';
import { z } from 'zod';
import { eq } from 'drizzle-orm';
import { db } from '@/lib/db';
import { ports } from '@/lib/db/schema/ports';
import { websiteSubmissions } from '@/lib/db/schema/website-submissions';
import { env } from '@/lib/env';
import { logger } from '@/lib/logger';
import { checkRateLimit, rateLimiters } from '@/lib/rate-limit';
/**
* POST /api/public/website-inquiries
*
* Capture endpoint for the marketing website's dual-write. The website
* server (`/server/api/register.ts`, `/server/api/contact.ts`) calls this
* AFTER its existing NocoDB write succeeds, sending the same payload as a
* server-to-server fire-and-forget POST. The CRM stores the raw payload
* in `website_submissions` for later analysis / promotion to entities.
*
* Auth: shared-secret in `X-Webhook-Secret` header, timing-safe compared
* against `WEBSITE_INTAKE_SECRET`. If the env var is unset on this
* instance, the endpoint refuses every request with 503 - the correct
* posture for dev/staging that hasn't been wired up yet.
*
* Idempotency: payload carries a `submission_id` UUID. The unique index
* on `website_submissions.submission_id` makes redelivery a no-op; the
* handler returns 200 + the existing record's id instead of erroring.
*
* No emails / no `interests` rows are created here. The endpoint's job is
* pure data capture. A separate "promote" step (future) will turn captured
* submissions into proper `clients` + `interests` rows once we trust the
* pipeline.
*/
const SubmissionSchema = z.object({
submission_id: z.string().uuid(),
kind: z.enum(['berth_inquiry', 'residence_inquiry', 'contact_form']),
payload: z.record(z.unknown()),
legacy_nocodb_id: z.string().optional(),
/** Defaults to port-nimara since that's currently the only port with a
* public marketing site. Future ports can override per-submission. */
port_slug: z.string().default('port-nimara'),
});
function verifySecret(header: string | null): boolean {
const expected = env.WEBSITE_INTAKE_SECRET;
if (!expected) return false;
if (!header) return false;
// Timing-safe compare requires equal-length buffers; pad to whichever is
// longer so an early-exit on length mismatch can't leak the secret length.
const a = Buffer.from(header);
const b = Buffer.from(expected);
const pad = Buffer.alloc(Math.max(a.length, b.length));
const aPad = Buffer.concat([a, pad]).subarray(0, pad.length);
const bPad = Buffer.concat([b, pad]).subarray(0, pad.length);
return timingSafeEqual(aPad, bPad) && a.length === b.length;
}
export async function POST(req: NextRequest) {
// Refuse outright if the CRM hasn't been wired up - safer than letting
// unauthenticated traffic in just because the env var was forgotten.
if (!env.WEBSITE_INTAKE_SECRET) {
return NextResponse.json(
{ error: 'Website intake is not configured on this server.' },
{ status: 503 },
);
}
// Auth gate - shared secret in header, timing-safe compare.
const secretHeader = req.headers.get('x-webhook-secret');
if (!verifySecret(secretHeader)) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 });
}
// Rate limit. All website-side traffic shares the website's egress IP,
// so we use a dedicated bucket sized to accommodate normal traffic
// (500/hr) rather than the 5/hr publicForm bucket meant for individual
// human submissions. The shared-secret header is the real abuse
// boundary; this limiter is just a backstop if the secret ever leaks.
const ip = req.headers.get('x-forwarded-for')?.split(',')[0]?.trim() ?? 'unknown';
const rl = await checkRateLimit(ip, rateLimiters.websiteIntake);
if (!rl.allowed) {
const retryAfter = Math.max(1, Math.ceil((rl.resetAt - Date.now()) / 1000));
return NextResponse.json(
{ error: 'Rate limit exceeded' },
{ status: 429, headers: { 'Retry-After': String(retryAfter) } },
);
}
// Parse + validate body. Reject anything that doesn't conform — the
// website is a known caller; a malformed payload signals tampering.
let parsed;
try {
const body = await req.json();
parsed = SubmissionSchema.parse(body);
} catch (err) {
return NextResponse.json(
{ error: 'Invalid payload', details: err instanceof Error ? err.message : 'parse error' },
{ status: 400 },
);
}
// Resolve port. We require the slug to exist; can't capture submissions
// for a port the CRM doesn't know about.
const [port] = await db
.select({ id: ports.id })
.from(ports)
.where(eq(ports.slug, parsed.port_slug))
.limit(1);
if (!port) {
// Don't echo the input slug back in the error - generic message is
// sufficient and avoids the input-reflection pattern that complicates
// log-injection / audit reviews. The slug is logged server-side
// for debugging.
logger.warn(
{ portSlug: parsed.port_slug, submissionId: parsed.submission_id },
'website-inquiry rejected: unknown port',
);
return NextResponse.json({ error: 'Unknown port' }, { status: 400 });
}
// Idempotent insert. Two parallel requests carrying the same submission_id
// could both pass any pre-check, so we don't pre-check at all - the unique
// index on submission_id is the source of truth, and `onConflictDoNothing`
// keeps the second request's INSERT from raising 23505. When the conflict
// hits, `returning()` yields zero rows and we look up the existing row to
// return its id, mirroring the first-delivery shape so the website never
// sees a difference between fresh and dup.
const insertResult = await db
.insert(websiteSubmissions)
.values({
portId: port.id,
submissionId: parsed.submission_id,
kind: parsed.kind,
payload: parsed.payload,
legacyNocodbId: parsed.legacy_nocodb_id ?? null,
sourceIp: ip,
userAgent: req.headers.get('user-agent') ?? null,
})
.onConflictDoNothing({ target: websiteSubmissions.submissionId })
.returning({ id: websiteSubmissions.id });
if (insertResult[0]) {
logger.info(
{
submissionId: parsed.submission_id,
kind: parsed.kind,
portSlug: parsed.port_slug,
legacyNocodbId: parsed.legacy_nocodb_id,
},
'website inquiry captured',
);
return NextResponse.json({ id: insertResult[0].id, deduped: false });
}
// Conflict path: row already exists. Fetch its id so the response shape
// stays identical regardless of which request "won" the race.
const existing = await db
.select({ id: websiteSubmissions.id })
.from(websiteSubmissions)
.where(eq(websiteSubmissions.submissionId, parsed.submission_id))
.limit(1);
if (existing[0]) {
return NextResponse.json({ id: existing[0].id, deduped: true });
}
// Should be unreachable - the conflict means a row exists, so the lookup
// above should always find it. If it doesn't (e.g. simultaneous DELETE),
// surface a 500 explicitly rather than silently 200ing a missing id.
logger.error(
{ submissionId: parsed.submission_id },
'website-inquiry conflict but row not found on lookup',
);
return NextResponse.json({ error: 'Insert failed' }, { status: 500 });
}