import { Worker, type Job } from 'bullmq'; import { env } from '@/lib/env'; import type { ConnectionOptions } from 'bullmq'; import { logger } from '@/lib/logger'; import { QUEUE_CONFIGS } from '@/lib/queue'; /** * v1 of bulk operations runs synchronously through per-entity bulk * endpoints (see `/api/v1/interests/bulk`) — a per-row loop, capped at * the page size (100). The synchronous path gives the user instant * feedback and a per-row failure list, which the queue can't. * * This worker remains here for genuinely-async cases (CSV imports, * port-wide migrations, bulk emails to >100 recipients) where the * caller polls for completion. Currently no producer enqueues to this * queue — add producers as those use cases surface. */ export const bulkWorker = new Worker( 'bulk', async (job: Job) => { logger.info({ jobId: job.id, jobName: job.name }, 'Processing bulk job'); }, { connection: { url: env.REDIS_URL } as ConnectionOptions, concurrency: QUEUE_CONFIGS.bulk.concurrency, }, ); bulkWorker.on('failed', (job, err) => { logger.error({ jobId: job?.id, jobName: job?.name, err }, 'Bulk job failed'); });