import { useAuth } from "@/_core/hooks/useAuth"; import { trpc } from "@/lib/trpc"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle } from "@/components/ui/dialog"; import { Progress } from "@/components/ui/progress"; import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"; import { toast } from "sonner"; import { Activity, Camera, CameraOff, CheckCircle2, FlipHorizontal, Maximize2, Minimize2, Monitor, PlayCircle, RotateCcw, Smartphone, Sparkles, Target, Video, Zap, } from "lucide-react"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; type CameraFacing = "user" | "environment"; type SessionMode = "practice" | "pk"; type ActionType = "forehand" | "backhand" | "serve" | "volley" | "overhead" | "slice" | "lob" | "unknown"; type PoseScore = { overall: number; posture: number; balance: number; technique: number; footwork: number; consistency: number; confidence: number; }; type ActionSegment = { actionType: ActionType; isUnknown: boolean; startMs: number; endMs: number; durationMs: number; confidenceAvg: number; score: number; peakScore: number; frameCount: number; issueSummary: string[]; keyFrames: number[]; clipLabel: string; }; type Point = { x: number; y: number; visibility?: number; }; type TrackingState = { prevTimestamp?: number; prevRightWrist?: Point; prevLeftWrist?: Point; prevHipCenter?: Point; lastAction?: ActionType; }; type AnalyzedFrame = { action: ActionType; confidence: number; score: PoseScore; feedback: string[]; }; type ActionObservation = { action: ActionType; confidence: number; }; const ACTION_META: Record = { forehand: { label: "正手挥拍", tone: "bg-emerald-500/10 text-emerald-700", accent: "bg-emerald-500" }, backhand: { label: "反手挥拍", tone: "bg-sky-500/10 text-sky-700", accent: "bg-sky-500" }, serve: { label: "发球", tone: "bg-amber-500/10 text-amber-700", accent: "bg-amber-500" }, volley: { label: "截击", tone: "bg-indigo-500/10 text-indigo-700", accent: "bg-indigo-500" }, overhead: { label: "高压", tone: "bg-rose-500/10 text-rose-700", accent: "bg-rose-500" }, slice: { label: "切削", tone: "bg-orange-500/10 text-orange-700", accent: "bg-orange-500" }, lob: { label: "挑高球", tone: "bg-fuchsia-500/10 text-fuchsia-700", accent: "bg-fuchsia-500" }, unknown: { label: "未知动作", tone: "bg-slate-500/10 text-slate-700", accent: "bg-slate-500" }, }; const POSE_CONNECTIONS: Array<[number, number]> = [ [11, 12], [11, 13], [13, 15], [12, 14], [14, 16], [11, 23], [12, 24], [23, 24], [23, 25], [24, 26], [25, 27], [26, 28], [15, 17], [16, 18], [15, 19], [16, 20], [17, 19], [18, 20], ]; const SETUP_STEPS = [ { title: "固定设备", desc: "手机或平板保持稳定,避免分析阶段发生晃动", icon: }, { title: "保留全身", desc: "画面尽量覆盖从头到脚,便于识别重心和脚步", icon: }, { title: "确认视角", desc: "后置摄像头优先,横屏更适合完整挥拍追踪", icon: }, { title: "开始分析", desc: "动作会按连续区间自动聚合,最长单段不超过 10 秒", icon: }, ]; const SEGMENT_MAX_MS = 10_000; const MERGE_GAP_MS = 500; const MIN_SEGMENT_MS = 250; function clamp(value: number, min: number, max: number) { return Math.max(min, Math.min(max, value)); } function distance(a?: Point, b?: Point) { if (!a || !b) return 0; const dx = a.x - b.x; const dy = a.y - b.y; return Math.sqrt(dx * dx + dy * dy); } function getAngle(a?: Point, b?: Point, c?: Point) { if (!a || !b || !c) return 0; const radians = Math.atan2(c.y - b.y, c.x - b.x) - Math.atan2(a.y - b.y, a.x - b.x); let angle = Math.abs((radians * 180) / Math.PI); if (angle > 180) angle = 360 - angle; return angle; } function formatDuration(ms: number) { const totalSeconds = Math.max(0, Math.round(ms / 1000)); const minutes = Math.floor(totalSeconds / 60); const seconds = totalSeconds % 60; return `${minutes.toString().padStart(2, "0")}:${seconds.toString().padStart(2, "0")}`; } function isMobileDevice() { if (typeof window === "undefined") return false; return /Android|iPhone|iPad|iPod/i.test(navigator.userAgent) || window.matchMedia("(max-width: 768px)").matches; } function pickRecorderMimeType() { const supported = typeof MediaRecorder !== "undefined" && typeof MediaRecorder.isTypeSupported === "function"; if (supported && MediaRecorder.isTypeSupported("video/mp4;codecs=avc1.42E01E,mp4a.40.2")) { return "video/mp4"; } if (supported && MediaRecorder.isTypeSupported("video/webm;codecs=vp9,opus")) { return "video/webm;codecs=vp9,opus"; } if (supported && MediaRecorder.isTypeSupported("video/webm;codecs=vp8,opus")) { return "video/webm;codecs=vp8,opus"; } return "video/webm"; } function blobToBase64(blob: Blob) { return new Promise((resolve, reject) => { const reader = new FileReader(); reader.onloadend = () => { const result = reader.result; if (typeof result !== "string") { reject(new Error("无法读取录制文件")); return; } const [, base64 = ""] = result.split(","); resolve(base64); }; reader.onerror = () => reject(reader.error || new Error("文件读取失败")); reader.readAsDataURL(blob); }); } function createSegment(action: ActionType, elapsedMs: number, frame: AnalyzedFrame): ActionSegment { return { actionType: action, isUnknown: action === "unknown", startMs: elapsedMs, endMs: elapsedMs, durationMs: 0, confidenceAvg: frame.confidence, score: frame.score.overall, peakScore: frame.score.overall, frameCount: 1, issueSummary: frame.feedback.slice(0, 3), keyFrames: [elapsedMs], clipLabel: `${ACTION_META[action].label} ${formatDuration(elapsedMs)}`, }; } function stabilizeAnalyzedFrame(frame: AnalyzedFrame, history: ActionObservation[]): AnalyzedFrame { const nextHistory = [...history, { action: frame.action, confidence: frame.confidence }].slice(-6); history.splice(0, history.length, ...nextHistory); const weights = nextHistory.map((_, index) => index + 1); const actionScores = nextHistory.reduce>((acc, sample, index) => { const weighted = sample.confidence * weights[index]; acc[sample.action] = (acc[sample.action] || 0) + weighted; return acc; }, { forehand: 0, backhand: 0, serve: 0, volley: 0, overhead: 0, slice: 0, lob: 0, unknown: 0, }); const ranked = Object.entries(actionScores).sort((a, b) => b[1] - a[1]) as Array<[ActionType, number]>; const [winner = "unknown", winnerScore = 0] = ranked[0] || []; const [, runnerScore = 0] = ranked[1] || []; const winnerSamples = nextHistory.filter((sample) => sample.action === winner); const averageConfidence = winnerSamples.length > 0 ? winnerSamples.reduce((sum, sample) => sum + sample.confidence, 0) / winnerSamples.length : frame.confidence; const stableAction = winner === "unknown" && frame.action !== "unknown" && frame.confidence >= 0.52 ? frame.action : winnerScore - runnerScore < 0.2 && frame.confidence >= 0.65 ? frame.action : winner; const stableConfidence = stableAction === frame.action ? Math.max(frame.confidence, averageConfidence) : averageConfidence; return { ...frame, action: stableAction, confidence: clamp(stableConfidence, 0, 1), feedback: stableAction === "unknown" ? ["系统正在继续观察,当前窗口内未形成稳定动作特征。", ...frame.feedback].slice(0, 3) : frame.feedback, }; } function analyzePoseFrame(landmarks: Point[], tracking: TrackingState, timestamp: number): AnalyzedFrame { const nose = landmarks[0]; const leftShoulder = landmarks[11]; const rightShoulder = landmarks[12]; const leftElbow = landmarks[13]; const rightElbow = landmarks[14]; const leftWrist = landmarks[15]; const rightWrist = landmarks[16]; const leftHip = landmarks[23]; const rightHip = landmarks[24]; const leftKnee = landmarks[25]; const rightKnee = landmarks[26]; const leftAnkle = landmarks[27]; const rightAnkle = landmarks[28]; const hipCenter = { x: ((leftHip?.x ?? 0.5) + (rightHip?.x ?? 0.5)) / 2, y: ((leftHip?.y ?? 0.7) + (rightHip?.y ?? 0.7)) / 2, }; const dtMs = tracking.prevTimestamp ? Math.max(16, timestamp - tracking.prevTimestamp) : 33; const rightSpeed = distance(rightWrist, tracking.prevRightWrist) * (1000 / dtMs); const leftSpeed = distance(leftWrist, tracking.prevLeftWrist) * (1000 / dtMs); const hipSpeed = distance(hipCenter, tracking.prevHipCenter) * (1000 / dtMs); const rightVerticalMotion = tracking.prevRightWrist ? tracking.prevRightWrist.y - (rightWrist?.y ?? tracking.prevRightWrist.y) : 0; const shoulderTilt = Math.abs((leftShoulder?.y ?? 0.3) - (rightShoulder?.y ?? 0.3)); const hipTilt = Math.abs((leftHip?.y ?? 0.55) - (rightHip?.y ?? 0.55)); const headOffset = Math.abs((nose?.x ?? 0.5) - (((leftShoulder?.x ?? 0.45) + (rightShoulder?.x ?? 0.55)) / 2)); const kneeBend = ((getAngle(leftHip, leftKnee, leftAnkle) || 165) + (getAngle(rightHip, rightKnee, rightAnkle) || 165)) / 2; const rightElbowAngle = getAngle(rightShoulder, rightElbow, rightWrist) || 145; const leftElbowAngle = getAngle(leftShoulder, leftElbow, leftWrist) || 145; const footSpread = Math.abs((leftAnkle?.x ?? 0.42) - (rightAnkle?.x ?? 0.58)); const visibility = landmarks.reduce((sum, point) => sum + (point.visibility ?? 0.95), 0) / Math.max(1, landmarks.length); const posture = clamp(100 - shoulderTilt * 780 - headOffset * 640, 0, 100); const balance = clamp(100 - hipTilt * 900 - Math.max(0, 0.16 - footSpread) * 260, 0, 100); const footwork = clamp(45 + Math.min(36, hipSpeed * 120) + Math.max(0, 165 - kneeBend) * 0.35, 0, 100); const consistency = clamp(visibility * 100 - Math.abs(rightSpeed - leftSpeed) * 10, 0, 100); const candidates: Array<{ action: ActionType; confidence: number }> = [ { action: "serve", confidence: clamp( (rightWrist && nose && rightWrist.y < nose.y ? 0.45 : 0.1) + (rightElbow && rightShoulder && rightElbow.y < rightShoulder.y ? 0.18 : 0.04) + clamp((rightElbowAngle - 135) / 55, 0, 0.22) + clamp(rightVerticalMotion * 4.5, 0, 0.15), 0, 0.98, ), }, { action: "overhead", confidence: clamp( (rightWrist && rightShoulder && rightWrist.y < rightShoulder.y - 0.1 ? 0.34 : 0.08) + clamp(rightSpeed * 0.08, 0, 0.28) + clamp((rightElbowAngle - 125) / 70, 0, 0.18), 0, 0.92, ), }, { action: "forehand", confidence: clamp( (rightWrist && nose && rightWrist.x > nose.x ? 0.28 : 0.08) + clamp(rightSpeed * 0.12, 0, 0.36) + clamp((rightElbowAngle - 85) / 70, 0, 0.2), 0, 0.94, ), }, { action: "backhand", confidence: clamp( ((leftWrist && nose && leftWrist.x < nose.x) || (rightWrist && nose && rightWrist.x < nose.x) ? 0.28 : 0.08) + clamp(Math.max(leftSpeed, rightSpeed) * 0.1, 0, 0.34) + clamp((leftElbowAngle - 85) / 70, 0, 0.18), 0, 0.92, ), }, { action: "volley", confidence: clamp( (rightWrist && rightShoulder && Math.abs(rightWrist.y - rightShoulder.y) < 0.12 ? 0.3 : 0.08) + clamp((0.22 - Math.abs((rightWrist?.x ?? 0.5) - hipCenter.x)) * 1.5, 0, 0.18) + clamp((1.8 - rightSpeed) * 0.14, 0, 0.18), 0, 0.88, ), }, { action: "slice", confidence: clamp( (rightWrist && rightShoulder && rightWrist.y > rightShoulder.y ? 0.18 : 0.06) + clamp((tracking.prevRightWrist && rightWrist && rightWrist.y > tracking.prevRightWrist.y ? 0.18 : 0.04), 0, 0.18) + clamp(rightSpeed * 0.08, 0, 0.24), 0, 0.82, ), }, { action: "lob", confidence: clamp( (rightWrist && nose && rightWrist.y < nose.y + 0.1 ? 0.22 : 0.08) + clamp(rightVerticalMotion * 4.2, 0, 0.28) + clamp((0.18 - Math.abs((rightWrist?.x ?? 0.5) - hipCenter.x)) * 1.4, 0, 0.18), 0, 0.86, ), }, ]; candidates.sort((a, b) => b.confidence - a.confidence); const topCandidate = candidates[0] ?? { action: "unknown" as ActionType, confidence: 0.2 }; const action = topCandidate.confidence >= 0.5 ? topCandidate.action : "unknown"; const techniqueBase = action === "serve" || action === "overhead" ? clamp(100 - Math.abs(rightElbowAngle - 160) * 0.9, 0, 100) : action === "backhand" ? clamp(100 - Math.abs(leftElbowAngle - 118) * 0.9, 0, 100) : clamp(100 - Math.abs(rightElbowAngle - 118) * 0.85, 0, 100); const technique = clamp(techniqueBase + topCandidate.confidence * 8, 0, 100); const overall = clamp( posture * 0.22 + balance * 0.18 + technique * 0.28 + footwork * 0.16 + consistency * 0.16, 0, 100, ); const feedback: string[] = []; if (action === "unknown") { feedback.push("当前片段缺少完整挥拍特征,系统已归为未知动作。"); } if (posture < 72) { feedback.push("上体轴线偏移较明显,击球准备时保持头肩稳定。"); } if (balance < 70) { feedback.push("重心波动偏大,建议扩大支撑面并缩短恢复时间。"); } if (footwork < 68) { feedback.push("脚步启动不足,击球前先完成小碎步调整。"); } if ((action === "serve" || action === "overhead") && technique < 75) { feedback.push("抬臂延展不够,击球点再高一些会更完整。"); } if ((action === "forehand" || action === "backhand") && technique < 75) { feedback.push("肘腕角度偏紧,击球点前移并完成收拍。"); } if (feedback.length === 0) { feedback.push("节奏稳定,可以继续累积高质量动作片段。"); } tracking.prevTimestamp = timestamp; tracking.prevRightWrist = rightWrist; tracking.prevLeftWrist = leftWrist; tracking.prevHipCenter = hipCenter; tracking.lastAction = action; return { action, confidence: clamp(topCandidate.confidence, 0, 1), score: { overall: Math.round(overall), posture: Math.round(posture), balance: Math.round(balance), technique: Math.round(technique), footwork: Math.round(footwork), consistency: Math.round(consistency), confidence: Math.round(clamp(topCandidate.confidence * 100, 0, 100)), }, feedback: feedback.slice(0, 3), }; } function drawOverlay(canvas: HTMLCanvasElement | null, landmarks: Point[] | undefined) { const ctx = canvas?.getContext("2d"); if (!canvas || !ctx) return; ctx.clearRect(0, 0, canvas.width, canvas.height); if (!landmarks) return; ctx.strokeStyle = "rgba(25, 211, 155, 0.9)"; ctx.lineWidth = 3; for (const [from, to] of POSE_CONNECTIONS) { const a = landmarks[from]; const b = landmarks[to]; if (!a || !b || (a.visibility ?? 1) < 0.25 || (b.visibility ?? 1) < 0.25) continue; ctx.beginPath(); ctx.moveTo(a.x * canvas.width, a.y * canvas.height); ctx.lineTo(b.x * canvas.width, b.y * canvas.height); ctx.stroke(); } landmarks.forEach((point, index) => { if ((point.visibility ?? 1) < 0.25) return; ctx.fillStyle = index >= 11 && index <= 16 ? "rgba(253, 224, 71, 0.95)" : "rgba(255,255,255,0.88)"; ctx.beginPath(); ctx.arc(point.x * canvas.width, point.y * canvas.height, index >= 11 && index <= 16 ? 5 : 4, 0, Math.PI * 2); ctx.fill(); }); } function ScoreBar({ label, value, accent }: { label: string; value: number; accent?: string }) { return (
{label} {Math.round(value)}
); } export default function LiveCamera() { useAuth(); const utils = trpc.useUtils(); const mobile = useMemo(() => isMobileDevice(), []); const videoRef = useRef(null); const canvasRef = useRef(null); const streamRef = useRef(null); const poseRef = useRef(null); const recorderRef = useRef(null); const recorderMimeTypeRef = useRef("video/webm"); const recorderChunksRef = useRef([]); const recorderStopPromiseRef = useRef | null>(null); const analyzingRef = useRef(false); const animationRef = useRef(0); const sessionStartedAtRef = useRef(0); const trackingRef = useRef({}); const actionHistoryRef = useRef([]); const currentSegmentRef = useRef(null); const segmentsRef = useRef([]); const frameSamplesRef = useRef([]); const [cameraActive, setCameraActive] = useState(false); const [facing, setFacing] = useState("environment"); const [hasMultipleCameras, setHasMultipleCameras] = useState(false); const [showSetupGuide, setShowSetupGuide] = useState(true); const [setupStep, setSetupStep] = useState(0); const [sessionMode, setSessionMode] = useState("practice"); const [analyzing, setAnalyzing] = useState(false); const [saving, setSaving] = useState(false); const [immersivePreview, setImmersivePreview] = useState(false); const [liveScore, setLiveScore] = useState(null); const [currentAction, setCurrentAction] = useState("unknown"); const [feedback, setFeedback] = useState([]); const [segments, setSegments] = useState([]); const [durationMs, setDurationMs] = useState(0); const uploadMutation = trpc.video.upload.useMutation(); const saveLiveSessionMutation = trpc.analysis.liveSessionSave.useMutation({ onSuccess: () => { utils.profile.stats.invalidate(); utils.analysis.liveSessionList.invalidate(); utils.record.list.invalidate(); utils.achievement.list.invalidate(); utils.rating.current.invalidate(); utils.rating.history.invalidate(); }, }); const liveSessionsQuery = trpc.analysis.liveSessionList.useQuery({ limit: 8 }); const visibleSegments = useMemo( () => segments.filter((segment) => !segment.isUnknown).sort((a, b) => b.startMs - a.startMs), [segments], ); const unknownSegments = useMemo(() => segments.filter((segment) => segment.isUnknown), [segments]); useEffect(() => { navigator.mediaDevices?.enumerateDevices().then((devices) => { const cameras = devices.filter((device) => device.kind === "videoinput"); setHasMultipleCameras(cameras.length > 1); }).catch(() => undefined); }, []); useEffect(() => { if (!cameraActive || !streamRef.current || !videoRef.current) return; if (videoRef.current.srcObject !== streamRef.current) { videoRef.current.srcObject = streamRef.current; void videoRef.current.play().catch(() => undefined); } }, [cameraActive, immersivePreview]); const stopSessionRecorder = useCallback(async () => { const recorder = recorderRef.current; if (!recorder) return null; const stopPromise = recorderStopPromiseRef.current; if (recorder.state !== "inactive") { recorder.stop(); } recorderRef.current = null; recorderStopPromiseRef.current = null; return stopPromise ?? null; }, []); const stopCamera = useCallback(() => { if (animationRef.current) { cancelAnimationFrame(animationRef.current); animationRef.current = 0; } if (poseRef.current?.close) { poseRef.current.close(); poseRef.current = null; } analyzingRef.current = false; setAnalyzing(false); void stopSessionRecorder(); if (streamRef.current) { streamRef.current.getTracks().forEach((track) => track.stop()); streamRef.current = null; } if (videoRef.current) { videoRef.current.srcObject = null; } setCameraActive(false); }, [stopSessionRecorder]); useEffect(() => { return () => { stopCamera(); }; }, [stopCamera]); const startCamera = useCallback(async () => { try { if (streamRef.current) { streamRef.current.getTracks().forEach((track) => track.stop()); } const constraints: MediaStreamConstraints = { video: { facingMode: facing, width: { ideal: mobile ? 1280 : 1920 }, height: { ideal: mobile ? 720 : 1080 }, frameRate: { ideal: 30, max: 30 }, }, audio: false, }; const stream = await navigator.mediaDevices.getUserMedia(constraints); streamRef.current = stream; if (videoRef.current) { videoRef.current.srcObject = stream; await videoRef.current.play(); } setCameraActive(true); toast.success("摄像头已启动"); } catch (error: any) { toast.error(`摄像头启动失败: ${error?.message || "未知错误"}`); } }, [facing, mobile]); const switchCamera = useCallback(async () => { const nextFacing: CameraFacing = facing === "user" ? "environment" : "user"; setFacing(nextFacing); if (!cameraActive) return; stopCamera(); await new Promise((resolve) => setTimeout(resolve, 250)); await startCamera(); }, [cameraActive, facing, startCamera, stopCamera]); const flushSegment = useCallback((segment: ActionSegment | null) => { if (!segment || segment.durationMs < MIN_SEGMENT_MS) { return; } const finalized: ActionSegment = { ...segment, durationMs: Math.max(segment.durationMs, segment.endMs - segment.startMs), clipLabel: `${ACTION_META[segment.actionType].label} ${formatDuration(segment.startMs)} - ${formatDuration(segment.endMs)}`, keyFrames: Array.from(new Set(segment.keyFrames)).slice(-4), issueSummary: segment.issueSummary.slice(0, 4), }; segmentsRef.current = [...segmentsRef.current, finalized]; setSegments(segmentsRef.current); }, []); const appendFrameToSegment = useCallback((frame: AnalyzedFrame, elapsedMs: number) => { const current = currentSegmentRef.current; if (!current) { currentSegmentRef.current = createSegment(frame.action, elapsedMs, frame); return; } const sameAction = current.actionType === frame.action; const gap = elapsedMs - current.endMs; const nextDuration = elapsedMs - current.startMs; if (sameAction && gap <= MERGE_GAP_MS && nextDuration <= SEGMENT_MAX_MS) { const nextFrameCount = current.frameCount + 1; current.endMs = elapsedMs; current.durationMs = current.endMs - current.startMs; current.frameCount = nextFrameCount; current.confidenceAvg = ((current.confidenceAvg * (nextFrameCount - 1)) + frame.confidence) / nextFrameCount; current.score = ((current.score * (nextFrameCount - 1)) + frame.score.overall) / nextFrameCount; current.peakScore = Math.max(current.peakScore, frame.score.overall); current.issueSummary = Array.from(new Set([...current.issueSummary, ...frame.feedback])).slice(0, 4); current.keyFrames = [...current.keyFrames.slice(-3), elapsedMs]; return; } flushSegment(current); currentSegmentRef.current = createSegment(frame.action, elapsedMs, frame); }, [flushSegment]); const startSessionRecorder = useCallback((stream: MediaStream) => { if (typeof MediaRecorder === "undefined") { recorderRef.current = null; recorderStopPromiseRef.current = Promise.resolve(null); return; } recorderChunksRef.current = []; const mimeType = pickRecorderMimeType(); recorderMimeTypeRef.current = mimeType; const recorder = new MediaRecorder(stream, { mimeType, videoBitsPerSecond: mobile ? 1_300_000 : 2_300_000 }); recorderRef.current = recorder; recorder.ondataavailable = (event) => { if (event.data && event.data.size > 0) { recorderChunksRef.current.push(event.data); } }; recorderStopPromiseRef.current = new Promise((resolve) => { recorder.onstop = () => { const type = recorderMimeTypeRef.current.includes("mp4") ? "video/mp4" : "video/webm"; const blob = recorderChunksRef.current.length > 0 ? new Blob(recorderChunksRef.current, { type }) : null; resolve(blob); }; }); recorder.start(1000); }, [mobile]); const persistSession = useCallback(async () => { const endedAt = Date.now(); const sessionDuration = Math.max(0, endedAt - sessionStartedAtRef.current); const currentSegment = currentSegmentRef.current; if (currentSegment) { currentSegment.endMs = sessionDuration; currentSegment.durationMs = currentSegment.endMs - currentSegment.startMs; flushSegment(currentSegment); currentSegmentRef.current = null; } const scoreSamples = frameSamplesRef.current; const finalSegments = [...segmentsRef.current]; const segmentDurations = finalSegments.reduce>((acc, segment) => { acc[segment.actionType] = (acc[segment.actionType] || 0) + segment.durationMs; return acc; }, { forehand: 0, backhand: 0, serve: 0, volley: 0, overhead: 0, slice: 0, lob: 0, unknown: 0, }); const dominantAction = (Object.entries(segmentDurations).sort((a, b) => b[1] - a[1])[0]?.[0] || "unknown") as ActionType; const effectiveSegments = finalSegments.filter((segment) => !segment.isUnknown); const unknownCount = finalSegments.length - effectiveSegments.length; const averageScore = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.overall, 0) / scoreSamples.length : liveScore?.overall || 0; const averagePosture = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.posture, 0) / scoreSamples.length : liveScore?.posture || 0; const averageBalance = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.balance, 0) / scoreSamples.length : liveScore?.balance || 0; const averageTechnique = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.technique, 0) / scoreSamples.length : liveScore?.technique || 0; const averageFootwork = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.footwork, 0) / scoreSamples.length : liveScore?.footwork || 0; const averageConsistency = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.consistency, 0) / scoreSamples.length : liveScore?.consistency || 0; const sessionFeedback = Array.from(new Set(finalSegments.flatMap((segment) => segment.issueSummary))).slice(0, 5); let uploadedVideo: { videoId: number; url: string } | null = null; const recordedBlob = await stopSessionRecorder(); if (recordedBlob && recordedBlob.size > 0) { const format = recorderMimeTypeRef.current.includes("mp4") ? "mp4" : "webm"; const fileBase64 = await blobToBase64(recordedBlob); uploadedVideo = await uploadMutation.mutateAsync({ title: `实时分析 ${new Date().toLocaleString("zh-CN", { month: "2-digit", day: "2-digit", hour: "2-digit", minute: "2-digit" })}`, format, fileSize: recordedBlob.size, exerciseType: dominantAction, fileBase64, }); } if (finalSegments.length === 0) { return; } await saveLiveSessionMutation.mutateAsync({ title: `实时分析 ${ACTION_META[dominantAction].label}`, sessionMode, startedAt: sessionStartedAtRef.current, endedAt, durationMs: sessionDuration, dominantAction, overallScore: Math.round(averageScore), postureScore: Math.round(averagePosture), balanceScore: Math.round(averageBalance), techniqueScore: Math.round(averageTechnique), footworkScore: Math.round(averageFootwork), consistencyScore: Math.round(averageConsistency), totalActionCount: effectiveSegments.length, effectiveSegments: effectiveSegments.length, totalSegments: finalSegments.length, unknownSegments: unknownCount, feedback: sessionFeedback, metrics: { actionDurations: segmentDurations, averageConfidence: Math.round((scoreSamples.reduce((sum, item) => sum + item.confidence, 0) / Math.max(1, scoreSamples.length)) * 10) / 10, sampleCount: scoreSamples.length, mobile, }, segments: finalSegments.map((segment) => ({ actionType: segment.actionType, isUnknown: segment.isUnknown, startMs: segment.startMs, endMs: segment.endMs, durationMs: segment.durationMs, confidenceAvg: Number(segment.confidenceAvg.toFixed(4)), score: Math.round(segment.score), peakScore: Math.round(segment.peakScore), frameCount: segment.frameCount, issueSummary: segment.issueSummary, keyFrames: segment.keyFrames, clipLabel: segment.clipLabel, })), videoId: uploadedVideo?.videoId, videoUrl: uploadedVideo?.url, }); }, [flushSegment, liveScore, mobile, saveLiveSessionMutation, sessionMode, stopSessionRecorder, uploadMutation]); const startAnalysis = useCallback(async () => { if (!cameraActive || !videoRef.current || !streamRef.current) { toast.error("请先启动摄像头"); return; } if (analyzingRef.current || saving) return; analyzingRef.current = true; setAnalyzing(true); setSaving(false); setSegments([]); segmentsRef.current = []; currentSegmentRef.current = null; trackingRef.current = {}; actionHistoryRef.current = []; frameSamplesRef.current = []; sessionStartedAtRef.current = Date.now(); setDurationMs(0); startSessionRecorder(streamRef.current); try { const testFactory = ( window as typeof window & { __TEST_MEDIAPIPE_FACTORY__?: () => Promise<{ Pose: any }>; } ).__TEST_MEDIAPIPE_FACTORY__; const { Pose } = testFactory ? await testFactory() : await import("@mediapipe/pose"); const pose = new Pose({ locateFile: (file: string) => `https://cdn.jsdelivr.net/npm/@mediapipe/pose/${file}`, }); poseRef.current = pose; pose.setOptions({ modelComplexity: 1, smoothLandmarks: true, enableSegmentation: false, minDetectionConfidence: 0.5, minTrackingConfidence: 0.5, }); pose.onResults((results: { poseLandmarks?: Point[] }) => { const video = videoRef.current; const canvas = canvasRef.current; if (!video || !canvas) return; if (video.videoWidth > 0 && video.videoHeight > 0) { canvas.width = video.videoWidth; canvas.height = video.videoHeight; } drawOverlay(canvas, results.poseLandmarks); if (!results.poseLandmarks) return; const analyzed = stabilizeAnalyzedFrame( analyzePoseFrame(results.poseLandmarks, trackingRef.current, performance.now()), actionHistoryRef.current, ); const elapsedMs = Date.now() - sessionStartedAtRef.current; appendFrameToSegment(analyzed, elapsedMs); frameSamplesRef.current.push(analyzed.score); setLiveScore(analyzed.score); setCurrentAction(analyzed.action); setFeedback(analyzed.feedback); setDurationMs(elapsedMs); }); const processFrame = async () => { if (!analyzingRef.current || !videoRef.current || !poseRef.current) return; if (videoRef.current.readyState >= 2 || testFactory) { await poseRef.current.send({ image: videoRef.current }); } animationRef.current = requestAnimationFrame(processFrame); }; toast.success("动作识别已启动"); processFrame(); } catch (error: any) { analyzingRef.current = false; setAnalyzing(false); await stopSessionRecorder(); toast.error(`实时分析启动失败: ${error?.message || "未知错误"}`); } }, [appendFrameToSegment, cameraActive, saving, startSessionRecorder, stopSessionRecorder]); const stopAnalysis = useCallback(async () => { if (!analyzingRef.current) return; analyzingRef.current = false; setAnalyzing(false); setSaving(true); if (animationRef.current) { cancelAnimationFrame(animationRef.current); animationRef.current = 0; } try { if (poseRef.current?.close) { poseRef.current.close(); poseRef.current = null; } await persistSession(); toast.success("实时分析已保存,并同步写入训练记录"); await liveSessionsQuery.refetch(); } catch (error: any) { toast.error(`保存实时分析失败: ${error?.message || "未知错误"}`); } finally { setSaving(false); } }, [liveSessionsQuery, persistSession]); const handleSetupComplete = useCallback(async () => { setShowSetupGuide(false); await startCamera(); }, [startCamera]); const heroAction = ACTION_META[currentAction]; const previewTitle = analyzing ? `${heroAction.label} 识别中` : cameraActive ? "准备开始实时分析" : "摄像头待启动"; const renderPrimaryActions = (rail = false) => { const buttonClass = rail ? "h-14 w-14 rounded-2xl border border-white/10 bg-white/10 text-white hover:bg-white/20" : "h-11 rounded-2xl px-4"; if (!cameraActive) { return ( ); } return ( <> {hasMultipleCameras ? ( ) : null} {!analyzing ? ( ) : ( )} {!rail ? ( ) : null} ); }; return (
实时分析校准 按顺序确认拍摄位置,后续动作会自动识别并按区间保存。
{SETUP_STEPS.map((step, index) => (
{index < setupStep ? : step.icon}
{step.title}
{step.desc}
))}
{setupStep > 0 ? ( ) : null} {setupStep < SETUP_STEPS.length - 1 ? ( ) : ( )}
自动动作识别 {sessionMode === "practice" ? "练习会话" : "训练 PK"}

实时分析中枢

摄像头启动后默认自动识别正手、反手、发球、截击、高压、切削、挑高球与未知动作,连续片段会自动聚合,并回写训练记录、成就进度和综合评分。

当前动作
{heroAction.label}
识别时长
{formatDuration(durationMs)}
已聚合片段
{segments.length}
{renderPrimaryActions()}
连续动作区间 自动保留非未知动作区间,单段最长 10 秒,方便后续查看和回放。 {visibleSegments.length === 0 ? (
开始分析后,这里会按时间区间显示识别出的动作片段。
) : ( visibleSegments.map((segment) => { const meta = ACTION_META[segment.actionType]; return (
{meta.label} {formatDuration(segment.startMs)} - {formatDuration(segment.endMs)} 时长 {formatDuration(segment.durationMs)}
{segment.issueSummary.join(" · ") || "当前片段节奏稳定"}
片段得分 {Math.round(segment.score)}
置信度 {Math.round(segment.confidenceAvg * 100)}%
); }) )}
{mobile && immersivePreview ? (
{renderPrimaryActions(true)}
) : null}
); }