Pass tag confidence scores to AI assignment for weighted matching

The AI assignment path was receiving project tags as flat strings, losing
the confidence scores from AI tagging. Now both the GPT path and the
fallback algorithm weight tag matches by confidence — a 0.9 tag matters
more than a 0.5 one.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Matt 2026-02-17 09:29:46 +01:00
parent fc8e58f985
commit a921731c52
3 changed files with 46 additions and 10 deletions

View File

@ -74,10 +74,22 @@ async function runAIAssignmentJob(jobId: string, roundId: string, userId: string
description: true, description: true,
tags: true, tags: true,
teamName: true, teamName: true,
projectTags: {
select: { tag: { select: { name: true } }, confidence: true },
},
_count: { select: { assignments: { where: { roundId } } } }, _count: { select: { assignments: { where: { roundId } } } },
}, },
}) })
// Enrich projects with tag confidence data for AI matching
const projectsWithConfidence = projects.map((p) => ({
...p,
tagConfidences: p.projectTags.map((pt) => ({
name: pt.tag.name,
confidence: pt.confidence,
})),
}))
const existingAssignments = await prisma.assignment.findMany({ const existingAssignments = await prisma.assignment.findMany({
where: { roundId }, where: { roundId },
select: { userId: true, projectId: true }, select: { userId: true, projectId: true },
@ -124,7 +136,7 @@ async function runAIAssignmentJob(jobId: string, roundId: string, userId: string
const result = await generateAIAssignments( const result = await generateAIAssignments(
jurors, jurors,
projects, projectsWithConfidence,
constraints, constraints,
userId, userId,
roundId, roundId,

View File

@ -38,7 +38,7 @@ const ASSIGNMENT_SYSTEM_PROMPT = `You are an expert jury assignment optimizer fo
Match jurors to projects based on expertise alignment, workload balance, and coverage requirements. Match jurors to projects based on expertise alignment, workload balance, and coverage requirements.
## Matching Criteria (Weighted) ## Matching Criteria (Weighted)
- Expertise Match (50%): How well juror tags/expertise align with project topics - Expertise Match (50%): How well juror tags/expertise align with project topics. Project tags include a confidence score (0-1) weight higher-confidence tags more heavily as they are more reliably assigned. A tag with confidence 0.9 is a strong signal; one with 0.5 is uncertain.
- Workload Balance (30%): Distribute assignments evenly; prefer jurors below capacity - Workload Balance (30%): Distribute assignments evenly; prefer jurors below capacity
- Minimum Target (20%): Prioritize jurors who haven't reached their minimum assignment count - Minimum Target (20%): Prioritize jurors who haven't reached their minimum assignment count
@ -99,6 +99,7 @@ interface ProjectForAssignment {
title: string title: string
description?: string | null description?: string | null
tags: string[] tags: string[]
tagConfidences?: Array<{ name: string; confidence: number }>
teamName?: string | null teamName?: string | null
_count?: { _count?: {
assignments: number assignments: number
@ -539,7 +540,7 @@ export function generateFallbackAssignments(
return { return {
juror, juror,
score: calculateExpertiseScore(juror.expertiseTags, project.tags), score: calculateExpertiseScore(juror.expertiseTags, project.tags, project.tagConfidences),
loadScore: calculateLoadScore(currentLoad, maxLoad), loadScore: calculateLoadScore(currentLoad, maxLoad),
underMinBonus: calculateUnderMinBonus(currentLoad, minTarget), underMinBonus: calculateUnderMinBonus(currentLoad, minTarget),
} }
@ -586,24 +587,44 @@ export function generateFallbackAssignments(
/** /**
* Calculate expertise match score based on tag overlap * Calculate expertise match score based on tag overlap
* When tagConfidences are available, weights matches by confidence
*/ */
function calculateExpertiseScore( function calculateExpertiseScore(
jurorTags: string[], jurorTags: string[],
projectTags: string[] projectTags: string[],
tagConfidences?: Array<{ name: string; confidence: number }>
): number { ): number {
if (jurorTags.length === 0 || projectTags.length === 0) { if (jurorTags.length === 0 || projectTags.length === 0) {
return 0.5 // Neutral score if no tags return 0.5 // Neutral score if no tags
} }
const jurorTagsLower = new Set(jurorTags.map((t) => t.toLowerCase())) const jurorTagsLower = new Set(jurorTags.map((t) => t.toLowerCase()))
// If we have confidence data, use weighted scoring
if (tagConfidences && tagConfidences.length > 0) {
let weightedMatches = 0
let totalWeight = 0
for (const tc of tagConfidences) {
totalWeight += tc.confidence
if (jurorTagsLower.has(tc.name.toLowerCase())) {
weightedMatches += tc.confidence
}
}
if (totalWeight === 0) return 0.5
const weightedRatio = weightedMatches / totalWeight
const hasExpertise = weightedMatches > 0 ? 0.2 : 0
return Math.min(1, weightedRatio * 0.8 + hasExpertise)
}
// Fallback: unweighted matching using flat tags
const matchingTags = projectTags.filter((t) => const matchingTags = projectTags.filter((t) =>
jurorTagsLower.has(t.toLowerCase()) jurorTagsLower.has(t.toLowerCase())
) )
// Score based on percentage of project tags matched
const matchRatio = matchingTags.length / projectTags.length const matchRatio = matchingTags.length / projectTags.length
// Boost for having expertise, even if not all match
const hasExpertise = matchingTags.length > 0 ? 0.2 : 0 const hasExpertise = matchingTags.length > 0 ? 0.2 : 0
return Math.min(1, matchRatio * 0.8 + hasExpertise) return Math.min(1, matchRatio * 0.8 + hasExpertise)

View File

@ -52,7 +52,7 @@ export interface AnonymizedProject {
anonymousId: string anonymousId: string
title: string title: string
description: string | null description: string | null
tags: string[] tags: Array<{ name: string; confidence: number }>
teamName: string | null teamName: string | null
} }
@ -209,6 +209,7 @@ interface ProjectInput {
title: string title: string
description?: string | null description?: string | null
tags: string[] tags: string[]
tagConfidences?: Array<{ name: string; confidence: number }>
teamName?: string | null teamName?: string | null
} }
@ -253,7 +254,9 @@ export function anonymizeForAI(
description: project.description description: project.description
? truncateAndSanitize(project.description, DESCRIPTION_LIMITS.ASSIGNMENT) ? truncateAndSanitize(project.description, DESCRIPTION_LIMITS.ASSIGNMENT)
: null, : null,
tags: project.tags, tags: project.tagConfidences && project.tagConfidences.length > 0
? project.tagConfidences
: project.tags.map((t) => ({ name: t, confidence: 1.0 })),
teamName: project.teamName ? `Team ${index + 1}` : null, teamName: project.teamName ? `Team ${index + 1}` : null,
} }
} }
@ -524,7 +527,7 @@ export function validateAnonymization(data: AnonymizationResult): boolean {
if (!checkText(project.title)) return false if (!checkText(project.title)) return false
if (!checkText(project.description)) return false if (!checkText(project.description)) return false
for (const tag of project.tags) { for (const tag of project.tags) {
if (!checkText(tag)) return false if (!checkText(typeof tag === 'string' ? tag : tag.name)) return false
} }
} }