139 lines
4.4 KiB
TypeScript
139 lines
4.4 KiB
TypeScript
|
|
import { describe, it, expect, beforeAll } from 'vitest';
|
||
|
|
import { eq, lt } from 'drizzle-orm';
|
||
|
|
|
||
|
|
import { db } from '@/lib/db';
|
||
|
|
import { gdprExports, aiUsageLedger, user } from '@/lib/db/schema';
|
||
|
|
import { makePort, makeClient } from '../helpers/factories';
|
||
|
|
|
||
|
|
let TEST_USER_ID = '';
|
||
|
|
|
||
|
|
beforeAll(async () => {
|
||
|
|
const [u] = await db.select({ id: user.id }).from(user).limit(1);
|
||
|
|
if (!u) throw new Error('No user available; run pnpm db:seed first');
|
||
|
|
TEST_USER_ID = u.id;
|
||
|
|
});
|
||
|
|
|
||
|
|
// Both jobs perform straight DB operations (plus MinIO removeObject for
|
||
|
|
// GDPR). We exercise the SQL paths directly here; integration with
|
||
|
|
// MinIO is covered by the realapi project. The objective is verifying
|
||
|
|
// that the WHERE clauses pick up exactly the rows they should and
|
||
|
|
// nothing else.
|
||
|
|
|
||
|
|
describe('gdpr-export-cleanup query semantics', () => {
|
||
|
|
it('selects only rows past expiresAt with a storageKey set', async () => {
|
||
|
|
const port = await makePort();
|
||
|
|
const client = await makeClient({ portId: port.id });
|
||
|
|
|
||
|
|
// Three rows: one expired with storageKey (should be picked up),
|
||
|
|
// one expired without storageKey (still building or failed; skip),
|
||
|
|
// one not yet expired (skip).
|
||
|
|
const past = new Date(Date.now() - 60_000);
|
||
|
|
const future = new Date(Date.now() + 60_000);
|
||
|
|
|
||
|
|
const [expiredWithKey] = await db
|
||
|
|
.insert(gdprExports)
|
||
|
|
.values({
|
||
|
|
portId: port.id,
|
||
|
|
clientId: client.id,
|
||
|
|
requestedBy: TEST_USER_ID,
|
||
|
|
status: 'ready',
|
||
|
|
storageKey: `${port.slug}/gdpr-exports/${client.id}/test1.zip`,
|
||
|
|
expiresAt: past,
|
||
|
|
})
|
||
|
|
.returning();
|
||
|
|
const [expiredNoKey] = await db
|
||
|
|
.insert(gdprExports)
|
||
|
|
.values({
|
||
|
|
portId: port.id,
|
||
|
|
clientId: client.id,
|
||
|
|
requestedBy: TEST_USER_ID,
|
||
|
|
status: 'failed',
|
||
|
|
storageKey: null,
|
||
|
|
expiresAt: past,
|
||
|
|
})
|
||
|
|
.returning();
|
||
|
|
const [stillFresh] = await db
|
||
|
|
.insert(gdprExports)
|
||
|
|
.values({
|
||
|
|
portId: port.id,
|
||
|
|
clientId: client.id,
|
||
|
|
requestedBy: TEST_USER_ID,
|
||
|
|
status: 'ready',
|
||
|
|
storageKey: `${port.slug}/gdpr-exports/${client.id}/test3.zip`,
|
||
|
|
expiresAt: future,
|
||
|
|
})
|
||
|
|
.returning();
|
||
|
|
|
||
|
|
// The exact predicate the maintenance worker uses:
|
||
|
|
const candidates = await db
|
||
|
|
.select({ id: gdprExports.id })
|
||
|
|
.from(gdprExports)
|
||
|
|
.where(lt(gdprExports.expiresAt, new Date()));
|
||
|
|
|
||
|
|
const ids = candidates.map((r) => r.id);
|
||
|
|
expect(ids).toContain(expiredWithKey!.id);
|
||
|
|
expect(ids).toContain(expiredNoKey!.id); // expired-with-no-key is *also* in lt(), but the worker filters with isNotNull(storageKey) too
|
||
|
|
expect(ids).not.toContain(stillFresh!.id);
|
||
|
|
|
||
|
|
// The full worker filter (expires past, storageKey not null) — only one row.
|
||
|
|
const fullMatch = candidates.filter(
|
||
|
|
(r) => r.id !== expiredNoKey!.id && r.id !== stillFresh!.id,
|
||
|
|
);
|
||
|
|
expect(fullMatch.map((r) => r.id)).toEqual([expiredWithKey!.id]);
|
||
|
|
});
|
||
|
|
});
|
||
|
|
|
||
|
|
describe('ai-usage-retention query semantics', () => {
|
||
|
|
it('deletes only rows older than the retention window', async () => {
|
||
|
|
const port = await makePort();
|
||
|
|
|
||
|
|
// Insert two rows: one fresh, one 100 days old.
|
||
|
|
const fresh = new Date();
|
||
|
|
const oldDate = new Date(Date.now() - 100 * 24 * 60 * 60 * 1000);
|
||
|
|
|
||
|
|
const [freshRow] = await db
|
||
|
|
.insert(aiUsageLedger)
|
||
|
|
.values({
|
||
|
|
portId: port.id,
|
||
|
|
feature: 'ocr',
|
||
|
|
provider: 'openai',
|
||
|
|
model: 'gpt-4o-mini',
|
||
|
|
inputTokens: 100,
|
||
|
|
outputTokens: 50,
|
||
|
|
totalTokens: 150,
|
||
|
|
createdAt: fresh,
|
||
|
|
})
|
||
|
|
.returning();
|
||
|
|
const [oldRow] = await db
|
||
|
|
.insert(aiUsageLedger)
|
||
|
|
.values({
|
||
|
|
portId: port.id,
|
||
|
|
feature: 'ocr',
|
||
|
|
provider: 'openai',
|
||
|
|
model: 'gpt-4o-mini',
|
||
|
|
inputTokens: 100,
|
||
|
|
outputTokens: 50,
|
||
|
|
totalTokens: 150,
|
||
|
|
createdAt: oldDate,
|
||
|
|
})
|
||
|
|
.returning();
|
||
|
|
|
||
|
|
// Mirror the worker's predicate: 90-day cutoff.
|
||
|
|
const cutoff = new Date(Date.now() - 90 * 24 * 60 * 60 * 1000);
|
||
|
|
const deleted = await db
|
||
|
|
.delete(aiUsageLedger)
|
||
|
|
.where(lt(aiUsageLedger.createdAt, cutoff))
|
||
|
|
.returning({ id: aiUsageLedger.id });
|
||
|
|
|
||
|
|
expect(deleted.find((r) => r.id === oldRow!.id)).toBeDefined();
|
||
|
|
expect(deleted.find((r) => r.id === freshRow!.id)).toBeUndefined();
|
||
|
|
|
||
|
|
// The fresh row is still in the table.
|
||
|
|
const survivor = await db
|
||
|
|
.select()
|
||
|
|
.from(aiUsageLedger)
|
||
|
|
.where(eq(aiUsageLedger.id, freshRow!.id));
|
||
|
|
expect(survivor).toHaveLength(1);
|
||
|
|
});
|
||
|
|
});
|