feat(audit): wider coverage — sensitive views, cron, jobs, portal abuse

Builds on the audit infra split (severity/source) by emitting events
from every place a security or operations review would want to see:

Sensitive data views (severity=warning):
- GDPR export download URL issued
- Audit log page opened (watch-the-watchers; first page only)
- CSV export of expenses
- Webhook secret regenerated

Authentication abuse (severity=warning, source=auth):
- Portal sign-in: success + failed-credentials + portal-disabled
- Portal password reset: unknown email + portal-disabled + bad token
- Portal activation: bad/expired token

Inbound webhook hardening:
- Documenso webhook with invalid X-Documenso-Secret now writes
  webhook_failed instead of being silently logged

Background work (source=cron / job):
- New attachWorkerAudit() helper wires every BullMQ worker to emit
  job_failed (severity=error) on .on('failed') and cron_run on
  .on('completed') for any job whose name matches the recurring
  scheduler list. Applied across all 10 workers.

1175/1175 vitest passing.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Matt Ciaccio
2026-05-06 20:44:38 +02:00
parent d2171ea79b
commit 9890d065f8
17 changed files with 261 additions and 0 deletions

View File

@@ -8,6 +8,7 @@ import { searchAuditLogs } from '@/lib/services/audit-search.service';
import { db } from '@/lib/db';
import { user } from '@/lib/db/schema/users';
import { errorResponse } from '@/lib/errors';
import { createAuditLog } from '@/lib/audit';
const auditQuerySchema = z.object({
limit: z.coerce.number().int().min(1).max(200).default(50),
@@ -67,6 +68,34 @@ export const GET = withAuth(
actor: r.userId ? (userMap.get(r.userId) ?? null) : null,
}));
// Watch-the-watchers: record that an operator opened the audit log
// page. Only fire on the first page (no cursor) so paginating
// through doesn't spam the log; use 'view' at warning severity so
// the entry stands out in the inspector.
if (!cursor) {
void createAuditLog({
userId: ctx.userId,
portId: ctx.portId,
action: 'view',
entityType: 'audit_log',
entityId: 'list',
metadata: {
filters: {
entityType: query.entityType,
action: query.action,
severity: query.severity,
source: query.source,
userId: query.userId,
entityId: query.entityId,
search: query.search,
},
},
ipAddress: ctx.ipAddress,
userAgent: ctx.userAgent,
severity: 'warning',
});
}
return NextResponse.json({
data,
pagination: {

View File

@@ -3,10 +3,16 @@ import { NextResponse } from 'next/server';
import { withAuth, withPermission, withRateLimit } from '@/lib/api/helpers';
import { errorResponse } from '@/lib/errors';
import { getExportDownloadUrl } from '@/lib/services/gdpr-export.service';
import { createAuditLog } from '@/lib/audit';
/**
* Returns a fresh signed URL for an existing GDPR export. Staff use this
* from the admin UI; the email path embeds its own signed URL.
*
* Every call writes a `view` audit row at 'warning' severity — GDPR
* exports contain the entire personal data of a client and a fresh
* presigned URL would let the operator download it; we want a clear
* trail of who pulled what when.
*/
export const GET = withAuth(
withPermission(
@@ -15,6 +21,19 @@ export const GET = withAuth(
withRateLimit('exports', async (req, ctx, params) => {
try {
const url = await getExportDownloadUrl(params.exportId!, ctx.portId);
void createAuditLog({
userId: ctx.userId,
portId: ctx.portId,
action: 'view',
entityType: 'gdpr_export',
entityId: params.exportId!,
metadata: { clientId: params.id ?? null, urlIssued: true },
ipAddress: ctx.ipAddress,
userAgent: ctx.userAgent,
severity: 'warning',
});
return NextResponse.json({ data: { url } });
} catch (error) {
return errorResponse(error);

View File

@@ -4,6 +4,7 @@ import { withAuth, withPermission } from '@/lib/api/helpers';
import { errorResponse } from '@/lib/errors';
import { exportCsv } from '@/lib/services/expense-export';
import { listExpensesSchema } from '@/lib/validators/expenses';
import { createAuditLog } from '@/lib/audit';
export const POST = withAuth(
withPermission('expenses', 'view', async (req, ctx) => {
@@ -12,6 +13,18 @@ export const POST = withAuth(
const query = listExpensesSchema.parse(body);
const csv = await exportCsv(ctx.portId, query);
void createAuditLog({
userId: ctx.userId,
portId: ctx.portId,
action: 'send',
entityType: 'expense_export',
entityId: 'csv',
metadata: { format: 'csv', filterCount: Object.keys(query).length, byteSize: csv.length },
ipAddress: ctx.ipAddress,
userAgent: ctx.userAgent,
severity: 'warning',
});
return new NextResponse(csv, {
status: 200,
headers: {

View File

@@ -13,6 +13,7 @@ import {
handleDocumentCancelled,
} from '@/lib/services/documents.service';
import { logger } from '@/lib/logger';
import { createAuditLog } from '@/lib/audit';
// BR-024: Dedup via signatureHash unique index on documentEvents
// Always return 200 from webhook (webhook best practice)
@@ -66,6 +67,21 @@ export async function POST(req: NextRequest): Promise<NextResponse> {
}
if (!matched) {
logger.warn({ providedLen: providedSecret.length }, 'Invalid Documenso webhook secret');
void createAuditLog({
userId: null,
portId: null,
action: 'webhook_failed',
entityType: 'webhook_inbound',
entityId: 'documenso',
metadata: {
reason: 'invalid_secret',
providedLen: providedSecret.length,
},
ipAddress: req.headers.get('x-forwarded-for')?.split(',')[0]?.trim() ?? '',
userAgent: req.headers.get('user-agent') ?? '',
severity: 'warning',
source: 'webhook',
});
return NextResponse.json({ ok: false, error: 'Invalid secret' }, { status: 200 });
}