Files
pn-new-crm/tests/integration/dedup/client-merge.test.ts
Matt Ciaccio 6e3d910c76 refactor(interests): migrate callers to interest_berths junction + drop berth_id
Phase 2b of the berth-recommender refactor (plan §3.4). Every caller of
the legacy `interests.berth_id` column now reads / writes through the
`interest_berths` junction via the helper service introduced in Phase 2a;
the column itself is dropped in a final migration.

Service-layer changes
- interests.service: filter `?berthId=X` becomes EXISTS-against-junction;
  list enrichment uses `getPrimaryBerthsForInterests`; create/update/
  linkBerth/unlinkBerth all dispatch through the junction helpers, with
  createInterest's row insert + junction write sharing a single transaction.
- clients / dashboard / report-generators / search: leftJoin chains pivot
  through `interest_berths` filtered by `is_primary=true`.
- eoi-context / document-templates / berth-rules-engine / portal /
  record-export / queue worker: read primary via `getPrimaryBerth(...)`.
- interest-scoring: berthLinked is now derived from any junction row count.
- dedup/migration-apply + public interest route: write a primary junction
  row alongside the interest insert when a berth is provided.

API contract preserved: list/detail responses still emit `berthId` and
`berthMooringNumber`, derived from the primary junction row, so frontend
consumers (interest-form, interest-detail-header) need no changes.

Schema + migration
- Drop `interestsRelations.berth` and `idx_interests_berth`.
- Replace `berthsRelations.interests` with `interestBerths`.
- Migration 0029_puzzling_romulus drops `interests.berth_id` + the index.
- Tests that previously inserted `interests.berthId` now seed a primary
  junction row alongside the interest.

Verified: vitest 995 passing (1 unrelated pre-existing flake in
maintenance-cleanup.test.ts), tsc clean.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-05 02:41:52 +02:00

192 lines
6.7 KiB
TypeScript

/**
* Client merge service — end-to-end integration test.
*
* Spins up two real clients in a real port via the factory helpers,
* attaches a few satellites (interest, contact, address, note),
* merges them, and asserts everything survived in the right place
* with the merge log written.
*/
import { describe, expect, it } from 'vitest';
import { eq } from 'drizzle-orm';
import { db } from '@/lib/db';
import { clients, clientContacts, clientNotes, clientMergeLog } from '@/lib/db/schema/clients';
import { interests, interestBerths } from '@/lib/db/schema/interests';
import { mergeClients } from '@/lib/services/client-merge.service';
import { makeClient, makePort, makeBerth } from '../../helpers/factories';
describe('mergeClients', () => {
it('moves interests and contacts from loser to winner; archives loser; writes merge log', async () => {
const port = await makePort();
const winner = await makeClient({
portId: port.id,
overrides: { fullName: 'Marcus Laurent' },
});
const loser = await makeClient({
portId: port.id,
overrides: { fullName: 'Marcus Laurent (dup)' },
});
// Attach contact + interest to loser
await db.insert(clientContacts).values({
clientId: loser.id,
channel: 'email',
value: 'marcus@example.com',
isPrimary: true,
});
await db.insert(clientNotes).values({
clientId: loser.id,
authorId: 'test-user',
content: 'Loser-side note',
});
const berth = await makeBerth({ portId: port.id });
const [legacyInterest] = await db
.insert(interests)
.values({
portId: port.id,
clientId: loser.id,
pipelineStage: 'open',
leadCategory: 'general_interest',
})
.returning();
await db.insert(interestBerths).values({
interestId: legacyInterest!.id,
berthId: berth.id,
isPrimary: true,
isSpecificInterest: true,
});
// ── Merge ─────────────────────────────────────────────────────────────
const result = await mergeClients({
winnerId: winner.id,
loserId: loser.id,
mergedBy: 'test-user',
});
expect(result.movedRows.interests).toBe(1);
expect(result.movedRows.contacts).toBe(1);
expect(result.movedRows.notes).toBe(1);
// ── Loser should be archived with mergedIntoClientId set ──────────────
const [archivedLoser] = await db.select().from(clients).where(eq(clients.id, loser.id));
expect(archivedLoser?.archivedAt).not.toBeNull();
expect(archivedLoser?.mergedIntoClientId).toBe(winner.id);
// ── All loser-side rows now point at the winner ───────────────────────
const winnerInterests = await db
.select()
.from(interests)
.where(eq(interests.clientId, winner.id));
expect(winnerInterests).toHaveLength(1);
const winnerContacts = await db
.select()
.from(clientContacts)
.where(eq(clientContacts.clientId, winner.id));
expect(winnerContacts.find((c) => c.value === 'marcus@example.com')).toBeDefined();
const winnerNotes = await db
.select()
.from(clientNotes)
.where(eq(clientNotes.clientId, winner.id));
expect(winnerNotes.find((n) => n.content === 'Loser-side note')).toBeDefined();
// ── Merge log row exists with snapshot ────────────────────────────────
const [log] = await db
.select()
.from(clientMergeLog)
.where(eq(clientMergeLog.id, result.mergeLogId));
expect(log?.survivingClientId).toBe(winner.id);
expect(log?.mergedClientId).toBe(loser.id);
expect(log?.mergedBy).toBe('test-user');
expect(log?.mergeDetails).toBeDefined();
});
it('refuses to merge a client into itself', async () => {
const port = await makePort();
const c = await makeClient({ portId: port.id });
await expect(mergeClients({ winnerId: c.id, loserId: c.id, mergedBy: 'u' })).rejects.toThrow(
/itself/i,
);
});
it('refuses to merge across different ports', async () => {
const portA = await makePort();
const portB = await makePort();
const a = await makeClient({ portId: portA.id });
const b = await makeClient({ portId: portB.id });
await expect(mergeClients({ winnerId: a.id, loserId: b.id, mergedBy: 'u' })).rejects.toThrow(
/different ports/i,
);
});
it('refuses to merge a client that has already been merged', async () => {
const port = await makePort();
const winner = await makeClient({ portId: port.id });
const loser = await makeClient({ portId: port.id });
// First merge succeeds.
await mergeClients({ winnerId: winner.id, loserId: loser.id, mergedBy: 'u' });
// Second merge of the same loser should refuse.
const winner2 = await makeClient({ portId: port.id });
await expect(
mergeClients({ winnerId: winner2.id, loserId: loser.id, mergedBy: 'u' }),
).rejects.toThrow(/already merged/i);
});
it('drops duplicate contact rows during reattach', async () => {
const port = await makePort();
const winner = await makeClient({ portId: port.id });
const loser = await makeClient({ portId: port.id });
// Both have the same email contact.
await db.insert(clientContacts).values({
clientId: winner.id,
channel: 'email',
value: 'same@example.com',
isPrimary: true,
});
await db.insert(clientContacts).values({
clientId: loser.id,
channel: 'email',
value: 'same@example.com',
isPrimary: true,
});
const result = await mergeClients({
winnerId: winner.id,
loserId: loser.id,
mergedBy: 'u',
});
expect(result.movedRows.contacts).toBe(0); // duplicate dropped
const winnerEmails = await db
.select()
.from(clientContacts)
.where(eq(clientContacts.clientId, winner.id));
// Winner kept exactly one copy of the shared email.
expect(winnerEmails.filter((c) => c.value === 'same@example.com')).toHaveLength(1);
});
it('applies fieldChoices to copy loser values onto the winner', async () => {
const port = await makePort();
const winner = await makeClient({
portId: port.id,
overrides: { fullName: 'Marcus L.' },
});
const loser = await makeClient({
portId: port.id,
overrides: { fullName: 'Marcus Laurent' },
});
await mergeClients({
winnerId: winner.id,
loserId: loser.id,
mergedBy: 'u',
fieldChoices: { fullName: 'loser' },
});
const [updatedWinner] = await db.select().from(clients).where(eq(clients.id, winner.id));
expect(updatedWinner?.fullName).toBe('Marcus Laurent');
});
});