feat: website analysis pipeline, voice agent, configurator improvements
All checks were successful
Build & Push / build-and-push (push) Successful in 6m2s

- Site analysis: cheerio HTML parsing, inline tech stack detection (~20 CMS/framework/analytics signatures), Google PageSpeed API integration
- Gemini Live voice agent: WebSocket-based real-time voice mode with live transcript, selection chips, and mid-conversation website analysis
- Type/Talk mode toggle with silent capability detection
- Stepped progress animation during brief generation (4 animated steps)
- URL + thoughts fields in Step 2, phone + contact preference in Step 3
- AI prompt improvements: dedicated website analysis section, 30-min call, concrete benefits, industry depth
- Email redesign: branded templates with logo, proper markdown rendering for both client and admin
- French locale support for AI-generated briefs
- Smaller checkmark, compact booking CTA, expanded brief area

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-03-28 13:41:35 +01:00
parent 16cd2a74ee
commit bab45b981e
19 changed files with 2923 additions and 119 deletions

View File

@@ -0,0 +1,73 @@
'use client';
import { useState, useEffect } from 'react';
import { useTranslations } from 'next-intl';
import { Keyboard, Mic } from 'lucide-react';
import { cn } from '@/lib/utils';
// ─── Types ───────────────────────────────────────────────────────────────────
interface ModeToggleProps {
mode: 'type' | 'talk';
onChange: (mode: 'type' | 'talk') => void;
}
// ─── Component ───────────────────────────────────────────────────────────────
export default function ModeToggle({ mode, onChange }: ModeToggleProps) {
const t = useTranslations('configurator');
const [voiceSupported, setVoiceSupported] = useState(false);
useEffect(() => {
async function check() {
if (typeof WebSocket === 'undefined') return;
if (!navigator.mediaDevices?.getUserMedia) return;
try {
const res = await fetch('/api/gemini-token', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ locale: 'en' }),
});
const data = (await res.json()) as { success: boolean };
if (data.success) setVoiceSupported(true);
} catch {
// silent — toggle stays hidden
}
}
void check();
}, []);
if (!voiceSupported) return null;
return (
<div className="flex items-center gap-1 rounded-xl bg-surface-low p-1 border border-outline-variant/30">
<button
type="button"
onClick={() => onChange('type')}
className={cn(
'flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-xs font-medium transition-all duration-200',
mode === 'type'
? 'bg-white text-on-surface shadow-card'
: 'text-outline hover:text-on-surface',
)}
>
<Keyboard size={13} />
{t('mode.type')}
</button>
<button
type="button"
onClick={() => onChange('talk')}
className={cn(
'flex items-center gap-1.5 px-3 py-1.5 rounded-lg text-xs font-medium transition-all duration-200',
mode === 'talk'
? 'bg-white text-on-surface shadow-card'
: 'text-outline hover:text-on-surface',
)}
>
<Mic size={13} />
{t('mode.talk')}
</button>
</div>
);
}

View File

@@ -2,7 +2,7 @@
import { useTranslations } from 'next-intl';
import { motion } from 'framer-motion';
import { Calendar, Mail, RotateCcw } from 'lucide-react';
import { Calendar, RotateCcw } from 'lucide-react';
import AnimatedCheckmark from '@/components/icons/AnimatedCheckmark';
import Button from '@/components/ui/Button';
import CalButton from '@/components/ui/CalButton';
@@ -96,22 +96,19 @@ export default function StepComplete({ formData, brief, onReset }: StepCompleteP
variants={containerVariants}
initial="hidden"
animate="visible"
className="flex flex-col gap-6"
className="flex flex-col gap-4"
>
{/* Checkmark + heading */}
<motion.div variants={itemVariants} className="flex flex-col items-center text-center pt-2 pb-1">
<AnimatedCheckmark size={64} color="#006494" />
<motion.div variants={itemVariants} className="flex flex-col items-center text-center pt-1 pb-0">
<AnimatedCheckmark size={40} color="#006494" />
<h3 className="font-serif text-2xl font-semibold tracking-headline text-on-surface mt-4">
<h3 className="font-serif text-xl font-semibold tracking-headline text-on-surface mt-2.5">
{t('complete.title')}
</h3>
<div className="flex items-center gap-2 mt-2">
<Mail size={14} strokeWidth={1.5} className="text-primary flex-shrink-0" />
<p className="text-sm text-outline">
{t('complete.subtitle', { email: displayEmail })}
</p>
</div>
<p className="text-sm text-outline mt-2">
{t('complete.subtitle', { email: displayEmail })}
</p>
</motion.div>
{/* Brief preview */}
@@ -123,31 +120,28 @@ export default function StepComplete({ formData, brief, onReset }: StepCompleteP
<p className="text-xs font-semibold uppercase tracking-label text-outline mb-3">
{t('complete.briefPreview')}
</p>
<div className="space-y-1 max-h-72 overflow-y-auto pr-1 scrollbar-thin">
<div className="space-y-1 max-h-[28rem] overflow-y-auto pr-1 scrollbar-thin">
{renderBrief(brief)}
</div>
</motion.div>
)}
{/* Booking */}
{/* Next step: book a call */}
<motion.div variants={itemVariants}>
<div className="rounded-xl bg-surface-low px-5 py-5 text-center">
<div className="flex justify-center mb-3">
<span className="w-10 h-10 rounded-xl bg-primary/10 flex items-center justify-center">
<Calendar size={18} strokeWidth={1.5} className="text-primary-dark" />
</span>
<div className="flex items-center justify-between gap-4 rounded-lg border border-primary/20 bg-primary/5 px-4 py-3">
<div className="min-w-0">
<p className="text-sm font-semibold text-on-surface">
{t('complete.nextStep')}
</p>
<p className="text-xs text-outline mt-0.5">
{t('complete.bookSubtitle')}
</p>
</div>
<p className="text-sm font-semibold text-on-surface mb-1">
{t('complete.bookTitle')}
</p>
<p className="text-xs text-outline mb-4">
{t('complete.bookSubtitle')}
</p>
<CalButton
className="inline-flex items-center gap-2 px-6 py-2.5 rounded-lg text-sm font-medium text-white transition-all hover:-translate-y-px active:translate-y-0"
className="inline-flex items-center gap-2 px-4 py-2 rounded-lg text-sm font-medium text-white whitespace-nowrap transition-all hover:-translate-y-px active:translate-y-0 flex-shrink-0"
style={{ background: 'linear-gradient(135deg, #006494, #5BA4D9)' }}
>
<Calendar size={16} />
<Calendar size={14} />
{t('complete.bookCall')}
</CalButton>
</div>

View File

@@ -1,7 +1,10 @@
'use client';
import React from 'react';
import { useTranslations } from 'next-intl';
import { motion, AnimatePresence } from 'framer-motion';
import { PhoneInput } from 'react-international-phone';
import 'react-international-phone/style.css';
import { cn } from '@/lib/utils';
import Button from '@/components/ui/Button';
import ProgressBar from './ProgressBar';
@@ -204,6 +207,68 @@ export default function StepContact({
required
autoComplete="email"
/>
{/* Phone field — optional */}
<div className="flex flex-col gap-1.5">
<label
htmlFor="contact-phone"
className="text-xs font-semibold uppercase tracking-label text-outline"
>
{t('fields.phone')}{' '}
<span className="normal-case font-normal">{t('fields.phoneOptional')}</span>
</label>
<PhoneInput
inputProps={{ id: 'contact-phone', autoComplete: 'tel' }}
defaultCountry="fr"
preferredCountries={['fr', 'us', 'gb', 'mc', 'ch']}
forceDialCode={true}
value={formData.phone}
onChange={(phone) => setFormData((prev) => ({ ...prev, phone }))}
style={
{
'--react-international-phone-height': '44px',
'--react-international-phone-border-radius': '12px',
'--react-international-phone-border-color': 'rgb(var(--color-outline-variant) / 0.6)',
'--react-international-phone-background-color': 'rgb(var(--color-surface-high))',
'--react-international-phone-text-color': 'rgb(var(--color-on-surface))',
'--react-international-phone-font-size': '14px',
'--react-international-phone-selected-dropdown-item-background-color': 'rgb(var(--color-surface-low))',
} as React.CSSProperties
}
className="w-full [&_.react-international-phone-input]:flex-1 [&_.react-international-phone-input]:rounded-r-xl [&_.react-international-phone-input]:border-outline-variant/60 [&_.react-international-phone-input]:bg-surface-high [&_.react-international-phone-input]:text-on-surface [&_.react-international-phone-input]:placeholder:text-outline/50 [&_.react-international-phone-input]:focus:ring-2 [&_.react-international-phone-input]:focus:ring-primary [&_.react-international-phone-input]:focus:border-primary [&_.react-international-phone-country-selector-button]:rounded-l-xl [&_.react-international-phone-country-selector-button]:border-outline-variant/60 [&_.react-international-phone-country-selector-button]:bg-surface-high"
/>
</div>
{/* Contact preference selector */}
<div className="flex flex-col gap-1.5">
<span className="text-xs font-semibold uppercase tracking-label text-outline">
{t('fields.contactPreference')}
</span>
<div className="flex gap-2">
{(['email', 'phone', 'whatsapp'] as const).map((method) => {
const labelKey = `fields.contact${method.charAt(0).toUpperCase() + method.slice(1)}` as
| 'fields.contactEmail'
| 'fields.contactPhone'
| 'fields.contactWhatsapp';
const isActive = formData.contactPreference === method;
return (
<button
key={method}
type="button"
onClick={() => setFormData((prev) => ({ ...prev, contactPreference: method }))}
className={cn(
'px-3 py-1.5 rounded-lg border text-xs transition-colors duration-150',
isActive
? 'bg-primary/10 text-primary-dark border-primary/30 font-medium'
: 'bg-white text-outline border-outline-variant/20 hover:border-outline-variant/40',
)}
>
{t(labelKey)}
</button>
);
})}
</div>
</div>
</div>
{/* Error state */}

View File

@@ -1,7 +1,7 @@
'use client';
import { useTranslations } from 'next-intl';
import { motion } from 'framer-motion';
import { motion, AnimatePresence } from 'framer-motion';
import { cn } from '@/lib/utils';
import Button from '@/components/ui/Button';
import Chip from '@/components/ui/Chip';
@@ -100,6 +100,73 @@ export default function StepDetails({ formData, setFormData, onNext, onBack }: S
/>
</div>
{/* Current Website URL */}
<div className="flex flex-col gap-2">
<label
htmlFor="current-site-url"
className="text-xs font-semibold uppercase tracking-label text-outline"
>
{t('fields.currentSiteUrl')}
<span className="ml-1.5 normal-case font-normal text-outline/70">
{t('fields.currentSiteUrlOptional')}
</span>
</label>
<input
id="current-site-url"
type="url"
value={formData.currentSiteUrl}
onChange={(e) =>
setFormData((prev) => ({ ...prev, currentSiteUrl: e.target.value }))
}
placeholder={t('fields.currentSiteUrlPlaceholder')}
autoComplete="url"
className={cn(
'w-full rounded-xl border border-outline-variant/60 bg-surface-high',
'px-4 py-3 text-sm text-on-surface placeholder:text-outline/50',
'focus:outline-none focus:ring-2 focus:ring-primary focus:border-primary',
'transition-colors duration-200',
)}
/>
</div>
{/* Thoughts on current site (conditional) */}
<AnimatePresence>
{formData.currentSiteUrl.trim().length > 0 && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: 'auto' }}
exit={{ opacity: 0, height: 0 }}
transition={{ duration: 0.3, ease: [0.16, 1, 0.3, 1] }}
className="overflow-hidden"
>
<div className="flex flex-col gap-2 pt-1">
<label
htmlFor="current-site-thoughts"
className="text-xs font-semibold uppercase tracking-label text-outline"
>
{t('fields.currentSiteThoughts')}
</label>
<textarea
id="current-site-thoughts"
value={formData.currentSiteThoughts}
onChange={(e) =>
setFormData((prev) => ({ ...prev, currentSiteThoughts: e.target.value }))
}
placeholder={t('fields.currentSiteThoughtsPlaceholder')}
rows={3}
className={cn(
'w-full resize-none rounded-xl border border-outline-variant/60 bg-surface-high',
'px-4 py-3 text-sm text-on-surface placeholder:text-outline/50',
'focus:outline-none focus:ring-2 focus:ring-primary focus:border-primary',
'transition-colors duration-200',
'leading-relaxed',
)}
/>
</div>
</motion.div>
)}
</AnimatePresence>
{/* Timeline */}
<div className="flex flex-col gap-2.5">
<label className="text-xs font-semibold uppercase tracking-label text-outline">

View File

@@ -0,0 +1,120 @@
'use client';
import { useState, useEffect } from 'react';
import { useTranslations } from 'next-intl';
import { motion, AnimatePresence } from 'framer-motion';
import { Check, Loader2 } from 'lucide-react';
// ─── Types ───────────────────────────────────────────────────────────────────
interface GeneratingStep {
id: string;
labelKey: string;
durationMs: number;
}
interface StepGeneratingProps {
hasUrl: boolean;
}
// ─── Component ───────────────────────────────────────────────────────────────
export default function StepGenerating({ hasUrl }: StepGeneratingProps) {
const t = useTranslations('configurator');
const steps: GeneratingStep[] = [
{ id: 'preparing', labelKey: 'generatingSteps.preparingBrief', durationMs: 1000 },
...(hasUrl
? [
{ id: 'analyzing', labelKey: 'generatingSteps.analyzingSite', durationMs: 3000 },
{ id: 'performance', labelKey: 'generatingSteps.runningAudit', durationMs: 5000 },
]
: []),
{ id: 'generating', labelKey: 'generatingSteps.generatingBrief', durationMs: Infinity },
];
const [completedCount, setCompletedCount] = useState(0);
useEffect(() => {
const step = steps[completedCount];
if (!step || step.durationMs === Infinity) return;
const timer = setTimeout(() => {
setCompletedCount((prev) => Math.min(prev + 1, steps.length - 1));
}, step.durationMs);
return () => clearTimeout(timer);
}, [completedCount, steps]);
return (
<div className="flex flex-col items-center gap-8 py-12">
<motion.div
initial={{ scale: 0.9, opacity: 0 }}
animate={{ scale: 1, opacity: 1 }}
transition={{ duration: 0.5, ease: [0.16, 1, 0.3, 1] }}
className="w-16 h-16 rounded-2xl bg-gradient-to-br from-primary to-primary-dark/80 flex items-center justify-center shadow-lg"
>
<motion.div
animate={{ rotate: 360 }}
transition={{ duration: 2, repeat: Infinity, ease: 'linear' }}
>
<Loader2 size={28} strokeWidth={1.5} className="text-white" />
</motion.div>
</motion.div>
<div className="flex flex-col gap-3 w-full max-w-xs">
{steps.map((step, index) => {
const isCompleted = index < completedCount;
const isActive = index === completedCount;
return (
<motion.div
key={step.id}
initial={{ opacity: 0, x: -12 }}
animate={{ opacity: 1, x: 0 }}
transition={{ delay: index * 0.1, duration: 0.35, ease: [0.16, 1, 0.3, 1] }}
className="flex items-center gap-3"
>
<div className="flex-shrink-0 w-6 h-6 flex items-center justify-center">
<AnimatePresence mode="wait">
{isCompleted ? (
<motion.div
key="check"
initial={{ scale: 0 }}
animate={{ scale: 1 }}
className="w-5 h-5 rounded-full bg-primary flex items-center justify-center"
>
<Check size={12} strokeWidth={3} className="text-white" />
</motion.div>
) : isActive ? (
<motion.div
key="spinner"
animate={{ rotate: 360 }}
transition={{ duration: 1.2, repeat: Infinity, ease: 'linear' }}
className="w-5 h-5 rounded-full border-2 border-primary/30 border-t-primary"
/>
) : (
<div
key="pending"
className="w-3 h-3 rounded-full bg-outline-variant/30"
/>
)}
</AnimatePresence>
</div>
<span
className={
isCompleted
? 'text-sm font-medium text-on-surface'
: isActive
? 'text-sm font-semibold text-primary-dark'
: 'text-sm text-outline/40'
}
>
{t(step.labelKey)}
</span>
</motion.div>
);
})}
</div>
</div>
);
}

View File

@@ -0,0 +1,275 @@
'use client';
import { useEffect, useRef } from 'react';
import { useTranslations } from 'next-intl';
import { motion, AnimatePresence, useMotionValue, useTransform } from 'framer-motion';
import { Mic, MicOff, PhoneOff, Loader2 } from 'lucide-react';
import { cn } from '@/lib/utils';
import Chip from '@/components/ui/Chip';
import { useVoiceAgent, type TranscriptEntry } from './VoiceAgentProvider';
import type { WizardFormData } from './WizardContainer';
// ─── Types ───────────────────────────────────────────────────────────────────
interface VoiceAgentProps {
locale: string;
onComplete: (brief: string, formData: WizardFormData) => void;
}
// ─── Transcript Bubble ───────────────────────────────────────────────────────
function TranscriptBubble({ entry }: { entry: TranscriptEntry }) {
return (
<motion.div
initial={{ opacity: 0, y: 6 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.25, ease: [0.16, 1, 0.3, 1] }}
className={cn(
'flex',
entry.role === 'agent' ? 'justify-start' : 'justify-end',
)}
>
<div
className={cn(
'max-w-[85%] rounded-xl px-3 py-2 text-xs leading-relaxed',
entry.role === 'agent'
? 'bg-surface-low text-on-surface'
: 'bg-primary/10 text-primary-dark',
)}
>
{entry.text}
</div>
</motion.div>
);
}
// ─── Main Component ──────────────────────────────────────────────────────────
export default function VoiceAgent({ locale, onComplete }: VoiceAgentProps) {
const t = useTranslations('configurator');
const {
status,
errorMessage,
isMicActive,
toggleMic,
transcript,
selections,
isAnalyzingSite,
agentAmplitude,
startConversation,
endConversation,
completedBrief,
completedFormData,
} = useVoiceAgent();
const transcriptEndRef = useRef<HTMLDivElement>(null);
// Auto-scroll transcript
useEffect(() => {
transcriptEndRef.current?.scrollIntoView({ behavior: 'smooth' });
}, [transcript]);
// Handle completion
useEffect(() => {
if (completedBrief && completedFormData) {
const timer = setTimeout(() => {
onComplete(completedBrief, completedFormData);
}, 1500);
return () => clearTimeout(timer);
}
}, [completedBrief, completedFormData, onComplete]);
// Orb animation driven by agent amplitude
const amplitudeValue = useMotionValue(0);
useEffect(() => {
amplitudeValue.set(agentAmplitude);
}, [agentAmplitude, amplitudeValue]);
const orbScale = useTransform(amplitudeValue, [0, 0.5], [1, 1.18]);
const orbGlow = useTransform(
amplitudeValue,
[0, 0.5],
['0px 0px 0px rgba(0,100,148,0)', '0px 0px 30px rgba(0,100,148,0.3)'],
);
// Build selection chips
const chipLabels: string[] = [];
if (selections.services) {
for (const svc of selections.services) {
try { chipLabels.push(t(`services.${svc}.title`)); } catch { chipLabels.push(svc); }
}
}
if (selections.aiEnabled && selections.aiTypes) {
for (const ai of selections.aiTypes) {
try { chipLabels.push(t(`aiTypes.${ai}.title`)); } catch { chipLabels.push(ai); }
}
}
if (selections.industry) {
try { chipLabels.push(t(`industries.${selections.industry}`)); } catch { chipLabels.push(selections.industry); }
}
if (selections.timeline) {
try { chipLabels.push(t(`timelines.${selections.timeline}`)); } catch { chipLabels.push(selections.timeline); }
}
return (
<div className="flex flex-col gap-5">
{/* Agent card header */}
<div className="flex items-center gap-3 px-1">
<div className="w-8 h-8 rounded-lg bg-gradient-to-br from-primary to-primary-dark/80 flex items-center justify-center">
<span className="text-white font-serif text-xs font-bold">L</span>
</div>
<div className="flex-1">
<p className="text-sm font-semibold text-on-surface">{t('voice.agentName')}</p>
<div className="flex items-center gap-1.5">
<span
className={cn(
'w-1.5 h-1.5 rounded-full',
status === 'active' ? 'bg-green-500' : status === 'connecting' ? 'bg-amber-400 animate-pulse' : 'bg-outline-variant/50',
)}
/>
<span className="text-[10px] text-outline">
{status === 'active' ? 'Connected' : status === 'connecting' ? t('voice.connecting') : 'Ready'}
</span>
</div>
</div>
</div>
{/* Waveform orb */}
<div className="flex flex-col items-center gap-3 py-4">
<motion.div
style={{ scale: status === 'active' ? orbScale : 1, boxShadow: status === 'active' ? orbGlow : 'none' }}
className={cn(
'w-20 h-20 rounded-full flex items-center justify-center transition-colors duration-300',
status === 'active'
? 'bg-gradient-to-br from-primary to-primary-dark'
: status === 'connecting'
? 'bg-primary/20'
: 'bg-surface-low border-2 border-outline-variant/30',
)}
>
{status === 'idle' && (
<Mic size={28} strokeWidth={1.5} className="text-outline" />
)}
{status === 'connecting' && (
<motion.div animate={{ rotate: 360 }} transition={{ duration: 1.5, repeat: Infinity, ease: 'linear' }}>
<Loader2 size={28} strokeWidth={1.5} className="text-primary" />
</motion.div>
)}
{status === 'active' && (
<motion.div
animate={{ scale: [1, 1.1, 1] }}
transition={{ duration: 2, repeat: Infinity, ease: 'easeInOut' }}
>
<Mic size={28} strokeWidth={1.5} className="text-white" />
</motion.div>
)}
</motion.div>
{/* Analyzing site badge */}
<AnimatePresence>
{isAnalyzingSite && (
<motion.div
initial={{ opacity: 0, y: -4 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -4 }}
className="flex items-center gap-1.5 px-3 py-1 rounded-full bg-primary/10 text-primary-dark text-xs font-medium"
>
<motion.div animate={{ rotate: 360 }} transition={{ duration: 1, repeat: Infinity, ease: 'linear' }}>
<Loader2 size={11} />
</motion.div>
{t('voice.analyzingSite')}
</motion.div>
)}
</AnimatePresence>
{/* Error message */}
{errorMessage && (
<p className="text-xs text-red-600 text-center max-w-xs">{errorMessage}</p>
)}
</div>
{/* Live transcript */}
{transcript.length > 0 && (
<div className="rounded-xl border border-outline-variant/30 bg-surface-high p-3 max-h-40 overflow-y-auto scrollbar-thin">
<div className="flex flex-col gap-2">
{transcript.map((entry, i) => (
<TranscriptBubble key={`${entry.timestamp}-${i}`} entry={entry} />
))}
<div ref={transcriptEndRef} />
</div>
</div>
)}
{/* Selection chips */}
<AnimatePresence>
{chipLabels.length > 0 && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: 'auto' }}
exit={{ opacity: 0, height: 0 }}
className="overflow-hidden"
>
<p className="text-xs font-semibold uppercase tracking-label text-outline mb-2">
{t('voice.capturedSoFar')}
</p>
<div className="flex flex-wrap gap-1.5">
{chipLabels.map((label, i) => (
<motion.div
key={label}
initial={{ opacity: 0, scale: 0.8 }}
animate={{ opacity: 1, scale: 1 }}
transition={{ delay: i * 0.05, duration: 0.2 }}
>
<Chip active>{label}</Chip>
</motion.div>
))}
</div>
</motion.div>
)}
</AnimatePresence>
{/* Controls */}
<div className="flex items-center justify-center gap-3 pt-2">
{status === 'idle' && (
<button
type="button"
onClick={startConversation}
className="flex items-center gap-2 px-6 py-3 rounded-xl text-sm font-medium text-white transition-all hover:-translate-y-px active:translate-y-0"
style={{ background: 'linear-gradient(135deg, #006494, #5BA4D9)' }}
>
<Mic size={16} />
{locale === 'fr' ? 'Démarrer la conversation' : 'Start Conversation'}
</button>
)}
{status === 'active' && (
<>
<button
type="button"
onClick={toggleMic}
className={cn(
'w-11 h-11 rounded-full flex items-center justify-center transition-all',
isMicActive
? 'bg-surface-low text-on-surface hover:bg-outline-variant/30'
: 'bg-red-100 text-red-600',
)}
>
{isMicActive ? <Mic size={18} /> : <MicOff size={18} />}
</button>
<button
type="button"
onClick={endConversation}
className="flex items-center gap-2 px-4 py-2.5 rounded-xl bg-red-50 text-red-700 text-xs font-medium hover:bg-red-100 transition-colors"
>
<PhoneOff size={14} />
{t('voice.endConversation')}
</button>
</>
)}
{status === 'connecting' && (
<p className="text-sm text-outline animate-pulse">{t('voice.connecting')}</p>
)}
</div>
</div>
);
}

View File

@@ -0,0 +1,432 @@
'use client';
import { createContext, useContext, useState, useRef, useCallback, type ReactNode } from 'react';
import type { WizardFormData } from './WizardContainer';
// ─── Types ───────────────────────────────────────────────────────────────────
export interface TranscriptEntry {
role: 'user' | 'agent';
text: string;
timestamp: number;
}
type ConnectionStatus = 'idle' | 'connecting' | 'active' | 'ending' | 'error';
interface VoiceAgentContextValue {
status: ConnectionStatus;
errorMessage: string | null;
isMicActive: boolean;
toggleMic: () => void;
transcript: TranscriptEntry[];
selections: Partial<WizardFormData>;
isAnalyzingSite: boolean;
userAmplitude: number;
agentAmplitude: number;
startConversation: () => Promise<void>;
endConversation: () => void;
completedBrief: string | null;
completedFormData: WizardFormData | null;
}
// ─── Context ─────────────────────────────────────────────────────────────────
const VoiceAgentContext = createContext<VoiceAgentContextValue | null>(null);
export function useVoiceAgent() {
const ctx = useContext(VoiceAgentContext);
if (!ctx) throw new Error('useVoiceAgent must be used within VoiceAgentProvider');
return ctx;
}
// ─── Audio Helpers ───────────────────────────────────────────────────────────
function int16ToFloat32(int16: Int16Array): Float32Array {
const float32 = new Float32Array(int16.length);
for (let i = 0; i < int16.length; i++) {
float32[i] = int16[i] / 32768;
}
return float32;
}
function base64ToInt16(base64: string): Int16Array {
const binary = atob(base64);
const bytes = new Uint8Array(binary.length);
for (let i = 0; i < binary.length; i++) {
bytes[i] = binary.charCodeAt(i);
}
return new Int16Array(bytes.buffer);
}
function arrayBufferToBase64(buffer: ArrayBuffer): string {
const bytes = new Uint8Array(buffer);
let binary = '';
for (let i = 0; i < bytes.length; i++) {
binary += String.fromCharCode(bytes[i]);
}
return btoa(binary);
}
// ─── Audio Worklet Processor Code ────────────────────────────────────────────
const WORKLET_CODE = `
class AudioRecordingWorklet extends AudioWorkletProcessor {
buffer = new Int16Array(2048);
bufferWriteIndex = 0;
process(inputs) {
if (inputs[0].length) {
const channel0 = inputs[0][0];
for (let i = 0; i < channel0.length; i++) {
const sample = Math.max(-1, Math.min(1, channel0[i]));
this.buffer[this.bufferWriteIndex++] = sample * 32767;
if (this.bufferWriteIndex >= this.buffer.length) {
this.port.postMessage({
event: 'chunk',
data: { int16arrayBuffer: this.buffer.slice(0, this.bufferWriteIndex).buffer },
});
this.bufferWriteIndex = 0;
}
}
}
return true;
}
}
registerProcessor('audio-recorder-worklet', AudioRecordingWorklet);
`;
// ─── Default Form Data (mirror WizardContainer) ─────────────────────────────
const DEFAULT_FORM_DATA: WizardFormData = {
services: [],
aiEnabled: false,
aiTypes: [],
industry: null,
scope: '',
timeline: null,
name: '',
company: '',
email: '',
phone: '',
contactPreference: 'email',
currentSiteUrl: '',
currentSiteThoughts: '',
};
// ─── Provider Component ──────────────────────────────────────────────────────
interface VoiceAgentProviderProps {
locale: string;
children: ReactNode;
}
export default function VoiceAgentProvider({ locale, children }: VoiceAgentProviderProps) {
const [status, setStatus] = useState<ConnectionStatus>('idle');
const [errorMessage, setErrorMessage] = useState<string | null>(null);
const [isMicActive, setIsMicActive] = useState(true);
const [transcript, setTranscript] = useState<TranscriptEntry[]>([]);
const [selections, setSelections] = useState<Partial<WizardFormData>>({});
const [isAnalyzingSite, setIsAnalyzingSite] = useState(false);
const [userAmplitude, setUserAmplitude] = useState(0);
const [agentAmplitude, setAgentAmplitude] = useState(0);
const [completedBrief, setCompletedBrief] = useState<string | null>(null);
const [completedFormData, setCompletedFormData] = useState<WizardFormData | null>(null);
const wsRef = useRef<WebSocket | null>(null);
const mediaStreamRef = useRef<MediaStream | null>(null);
const audioContextRef = useRef<AudioContext | null>(null);
const playbackContextRef = useRef<AudioContext | null>(null);
const nextStartTimeRef = useRef(0);
const analyserRef = useRef<AnalyserNode | null>(null);
const animFrameRef = useRef<number>(0);
const addTranscript = useCallback((role: 'user' | 'agent', text: string) => {
setTranscript((prev) => [...prev, { role, text, timestamp: Date.now() }]);
}, []);
const trackAmplitude = useCallback(() => {
if (!analyserRef.current) return;
const data = new Uint8Array(analyserRef.current.fftSize);
analyserRef.current.getByteTimeDomainData(data);
let sum = 0;
for (let i = 0; i < data.length; i++) {
const v = (data[i] - 128) / 128;
sum += v * v;
}
setUserAmplitude(Math.sqrt(sum / data.length));
animFrameRef.current = requestAnimationFrame(trackAmplitude);
}, []);
const handleToolCall = useCallback(
async (name: string, args: Record<string, unknown>, callId: string) => {
if (name === 'update_selections') {
setSelections((prev) => ({ ...prev, ...(args as Partial<WizardFormData>) }));
return JSON.stringify({ success: true });
}
if (name === 'analyze_website') {
setIsAnalyzingSite(true);
try {
const res = await fetch('/api/analyze-site', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ url: args.url }),
});
const data = await res.json();
setIsAnalyzingSite(false);
return JSON.stringify(data);
} catch {
setIsAnalyzingSite(false);
return JSON.stringify({ success: false, summary: "I wasn't able to analyze that site." });
}
}
if (name === 'complete_brief') {
try {
const formData = { ...DEFAULT_FORM_DATA, ...(args as Partial<WizardFormData>), locale };
const res = await fetch('/api/configure', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(formData),
});
const data = (await res.json()) as { success: boolean; brief?: string };
if (data.success && data.brief) {
setCompletedBrief(data.brief);
setCompletedFormData(formData as WizardFormData);
}
return JSON.stringify(data);
} catch {
return JSON.stringify({ success: false, error: 'Brief generation failed' });
}
}
return JSON.stringify({ error: `Unknown tool: ${name}` });
},
[locale],
);
const playAudioChunk = useCallback((base64Audio: string) => {
if (!playbackContextRef.current) {
playbackContextRef.current = new AudioContext({ sampleRate: 24000 });
nextStartTimeRef.current = playbackContextRef.current.currentTime;
}
const ctx = playbackContextRef.current;
const int16 = base64ToInt16(base64Audio);
const float32 = int16ToFloat32(int16);
const buffer = ctx.createBuffer(1, float32.length, 24000);
buffer.copyToChannel(new Float32Array(float32), 0);
const source = ctx.createBufferSource();
source.buffer = buffer;
source.connect(ctx.destination);
if (nextStartTimeRef.current < ctx.currentTime) {
nextStartTimeRef.current = ctx.currentTime;
}
source.start(nextStartTimeRef.current);
nextStartTimeRef.current += buffer.duration;
const amplitude = Math.sqrt(float32.reduce((sum, v) => sum + v * v, 0) / float32.length);
setAgentAmplitude(amplitude);
}, []);
const startConversation = useCallback(async () => {
setStatus('connecting');
setErrorMessage(null);
setTranscript([]);
setSelections({});
setCompletedBrief(null);
setCompletedFormData(null);
try {
const tokenRes = await fetch('/api/gemini-token', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ locale }),
});
const tokenData = await tokenRes.json();
if (!tokenData.success) throw new Error('Token generation failed');
const { apiKey, model, config } = tokenData;
const stream = await navigator.mediaDevices.getUserMedia({
audio: { sampleRate: 16000, channelCount: 1, echoCancellation: true, noiseSuppression: true },
});
mediaStreamRef.current = stream;
// Create AudioContext for mic capture (must be in user gesture handler)
const audioCtx = new AudioContext({ sampleRate: 16000 });
audioContextRef.current = audioCtx;
const source = audioCtx.createMediaStreamSource(stream);
const analyser = audioCtx.createAnalyser();
analyser.fftSize = 256;
source.connect(analyser);
analyserRef.current = analyser;
// Register AudioWorklet
const workletBlob = new Blob([WORKLET_CODE], { type: 'application/javascript' });
const workletUrl = URL.createObjectURL(workletBlob);
await audioCtx.audioWorklet.addModule(workletUrl);
URL.revokeObjectURL(workletUrl);
const workletNode = new AudioWorkletNode(audioCtx, 'audio-recorder-worklet');
source.connect(workletNode);
workletNode.connect(audioCtx.destination);
// Open WebSocket to Gemini Live API
const wsUrl = `wss://generativelanguage.googleapis.com/ws/google.ai.generativelanguage.v1beta.GenerativeService.BidiGenerateContent?key=${apiKey}`;
const ws = new WebSocket(wsUrl);
wsRef.current = ws;
ws.onopen = () => {
// Send setup message with config
ws.send(JSON.stringify({
setup: {
model: `models/${model}`,
generationConfig: {
responseModalities: config.responseModalities,
speechConfig: config.speechConfig,
},
systemInstruction: {
parts: [{ text: config.systemInstruction }],
},
tools: config.tools,
},
}));
};
// Send audio chunks from worklet
workletNode.port.onmessage = (event) => {
if (event.data.event === 'chunk' && ws.readyState === WebSocket.OPEN) {
const base64 = arrayBufferToBase64(event.data.data.int16arrayBuffer);
ws.send(JSON.stringify({
realtimeInput: {
mediaChunks: [{ mimeType: 'audio/pcm;rate=16000', data: base64 }],
},
}));
}
};
ws.onmessage = async (event) => {
const msg = JSON.parse(event.data as string);
// Setup complete
if (msg.setupComplete) {
setStatus('active');
trackAmplitude();
return;
}
// Server content (audio + text)
if (msg.serverContent) {
const parts = msg.serverContent.modelTurn?.parts;
if (parts) {
for (const part of parts) {
if (part.inlineData?.mimeType?.startsWith('audio/')) {
playAudioChunk(part.inlineData.data);
}
if (part.text) {
addTranscript('agent', part.text);
}
}
}
// Input transcription
if (msg.serverContent.inputTranscription?.text) {
addTranscript('user', msg.serverContent.inputTranscription.text);
}
// Output transcription
if (msg.serverContent.outputTranscription?.text) {
addTranscript('agent', msg.serverContent.outputTranscription.text);
}
}
// Tool call
if (msg.toolCall) {
const calls = msg.toolCall.functionCalls;
if (calls) {
const responses = [];
for (const call of calls) {
const result = await handleToolCall(call.name, call.args ?? {}, call.id);
responses.push({ id: call.id, name: call.name, response: { result } });
}
ws.send(JSON.stringify({ toolResponse: { functionResponses: responses } }));
}
}
};
ws.onerror = () => {
setStatus('error');
setErrorMessage('Connection error. Please try again.');
};
ws.onclose = () => {
if (status === 'active') {
setStatus('idle');
}
};
} catch (error) {
setStatus('error');
if (error instanceof DOMException && error.name === 'NotAllowedError') {
setErrorMessage('Microphone access was denied.');
} else {
setErrorMessage('Failed to start conversation. Please try again.');
}
}
}, [locale, trackAmplitude, handleToolCall, playAudioChunk, addTranscript, status]);
const endConversation = useCallback(() => {
setStatus('ending');
cancelAnimationFrame(animFrameRef.current);
if (wsRef.current) {
wsRef.current.close();
wsRef.current = null;
}
if (mediaStreamRef.current) {
mediaStreamRef.current.getTracks().forEach((track) => track.stop());
mediaStreamRef.current = null;
}
if (audioContextRef.current) {
void audioContextRef.current.close();
audioContextRef.current = null;
}
if (playbackContextRef.current) {
void playbackContextRef.current.close();
playbackContextRef.current = null;
}
setUserAmplitude(0);
setAgentAmplitude(0);
setStatus('idle');
}, []);
const toggleMic = useCallback(() => {
if (!mediaStreamRef.current) return;
const track = mediaStreamRef.current.getAudioTracks()[0];
if (track) {
track.enabled = !track.enabled;
setIsMicActive(track.enabled);
}
}, []);
const value: VoiceAgentContextValue = {
status,
errorMessage,
isMicActive,
toggleMic,
transcript,
selections,
isAnalyzingSite,
userAmplitude,
agentAmplitude,
startConversation,
endConversation,
completedBrief,
completedFormData,
};
return (
<VoiceAgentContext.Provider value={value}>
{children}
</VoiceAgentContext.Provider>
);
}

View File

@@ -1,12 +1,16 @@
'use client';
import { useState } from 'react';
import { useTranslations } from 'next-intl';
import { useLocale, useTranslations } from 'next-intl';
import { AnimatePresence, motion } from 'framer-motion';
import StepServices from './StepServices';
import StepDetails from './StepDetails';
import StepContact from './StepContact';
import StepGenerating from './StepGenerating';
import StepComplete from './StepComplete';
import ModeToggle from './ModeToggle';
import VoiceAgent from './VoiceAgent';
import VoiceAgentProvider from './VoiceAgentProvider';
// ─── Types ────────────────────────────────────────────────────────────────────
@@ -20,6 +24,10 @@ export interface WizardFormData {
name: string;
company: string;
email: string;
phone: string;
contactPreference: string;
currentSiteUrl: string;
currentSiteThoughts: string;
}
export interface StepProps {
@@ -66,16 +74,23 @@ const DEFAULT_FORM_DATA: WizardFormData = {
name: '',
company: '',
email: '',
phone: '',
contactPreference: 'email',
currentSiteUrl: '',
currentSiteThoughts: '',
};
export default function WizardContainer() {
const t = useTranslations('configurator');
const locale = useLocale();
const [currentStep, setCurrentStep] = useState<1 | 2 | 3 | 4>(1);
const [direction, setDirection] = useState<1 | -1>(1);
const [formData, setFormData] = useState<WizardFormData>(DEFAULT_FORM_DATA);
const [brief, setBrief] = useState<string>('');
const [isSubmitting, setIsSubmitting] = useState(false);
const [isGenerating, setIsGenerating] = useState(false);
const [submitError, setSubmitError] = useState<string | null>(null);
const [mode, setMode] = useState<'type' | 'talk'>('type');
const goNext = () => {
setDirection(1);
@@ -92,37 +107,50 @@ export default function WizardContainer() {
setFormData(DEFAULT_FORM_DATA);
setBrief('');
setSubmitError(null);
setIsGenerating(false);
setMode('type');
setCurrentStep(1);
};
const handleSubmit = async () => {
setIsSubmitting(true);
setIsGenerating(true);
setSubmitError(null);
try {
const response = await fetch('/api/configure', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(formData),
body: JSON.stringify({ ...formData, locale }),
});
const data = (await response.json()) as { success: boolean; brief?: string; error?: string };
if (!response.ok || !data.success) {
setSubmitError(data.error ?? t('errors.general'));
setIsGenerating(false);
setIsSubmitting(false);
return;
}
setBrief(data.brief ?? '');
setDirection(1);
setIsGenerating(false);
setCurrentStep(4);
} catch {
setSubmitError(t('errors.network'));
setIsGenerating(false);
} finally {
setIsSubmitting(false);
}
};
const handleVoiceComplete = (voiceBrief: string, voiceFormData: WizardFormData) => {
setFormData(voiceFormData);
setBrief(voiceBrief);
setDirection(1);
setCurrentStep(4);
};
const stepVariants = makeVariants(direction);
const sharedProps: StepProps = {
@@ -134,8 +162,28 @@ export default function WizardContainer() {
return (
<div className="relative overflow-hidden">
{!isGenerating && currentStep !== 4 && (
<div className="flex justify-center mb-4">
<ModeToggle mode={mode} onChange={setMode} />
</div>
)}
<AnimatePresence mode="wait" initial={false}>
{currentStep === 1 && (
{mode === 'talk' && !isGenerating && currentStep !== 4 && (
<motion.div
key="voice-mode"
variants={stepVariants}
initial="initial"
animate="animate"
exit="exit"
>
<VoiceAgentProvider locale={locale}>
<VoiceAgent locale={locale} onComplete={handleVoiceComplete} />
</VoiceAgentProvider>
</motion.div>
)}
{mode === 'type' && !isGenerating && currentStep === 1 && (
<motion.div
key="step-1"
variants={stepVariants}
@@ -147,7 +195,7 @@ export default function WizardContainer() {
</motion.div>
)}
{currentStep === 2 && (
{mode === 'type' && !isGenerating && currentStep === 2 && (
<motion.div
key="step-2"
variants={stepVariants}
@@ -159,7 +207,7 @@ export default function WizardContainer() {
</motion.div>
)}
{currentStep === 3 && (
{mode === 'type' && !isGenerating && currentStep === 3 && (
<motion.div
key="step-3"
variants={stepVariants}
@@ -176,7 +224,19 @@ export default function WizardContainer() {
</motion.div>
)}
{currentStep === 4 && (
{isGenerating && (
<motion.div
key="step-generating"
variants={stepVariants}
initial="initial"
animate="animate"
exit="exit"
>
<StepGenerating hasUrl={!!formData.currentSiteUrl.trim()} />
</motion.div>
)}
{!isGenerating && currentStep === 4 && (
<motion.div
key="step-4"
variants={stepVariants}