文件
tennis-training-hub/client/src/pages/LiveCamera.tsx
2026-03-16 23:53:10 +08:00

2763 行
119 KiB
TypeScript
原始文件 Blame 文件历史

此文件含有模棱两可的 Unicode 字符
此文件含有可能会与其他字符混淆的 Unicode 字符。 如果您是想特意这样的,可以安全地忽略该警告。 使用 Escape 按钮显示他们。
import { useAuth } from "@/_core/hooks/useAuth";
import { trpc } from "@/lib/trpc";
import {
createMediaSession,
getMediaAssetUrl,
uploadMediaLiveFrame,
} from "@/lib/media";
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert";
import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle } from "@/components/ui/dialog";
import { Input } from "@/components/ui/input";
import { Progress } from "@/components/ui/progress";
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select";
import { Slider } from "@/components/ui/slider";
import { Switch } from "@/components/ui/switch";
import { formatDateTimeShanghai } from "@/lib/time";
import { toast } from "sonner";
import { applyTrackZoom, type CameraQualityPreset, getLiveAnalysisBitrate, readTrackZoomState, requestCameraStream } from "@/lib/camera";
import {
ACTION_WINDOW_FRAMES,
AVATAR_PRESETS,
createEmptyStabilizedActionMeta,
createStableActionState,
drawLiveCameraOverlay,
getAvatarPreset,
renderLiveCameraOverlayToContext,
resolveAvatarKeyFromPrompt,
stabilizeActionStream,
type AvatarKey,
type AvatarPreset,
type AvatarRenderState,
type FrameActionSample,
type LiveActionType,
type StabilizedActionMeta,
} from "@/lib/liveCamera";
import {
Activity,
Camera,
CameraOff,
CheckCircle2,
ExternalLink,
FlipHorizontal,
Maximize2,
Minus,
Minimize2,
Monitor,
PlayCircle,
Plus,
RotateCcw,
Smartphone,
Sparkles,
Target,
Video,
Zap,
} from "lucide-react";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
type CameraFacing = "user" | "environment";
type SessionMode = "practice" | "pk";
type ActionType = LiveActionType;
type PoseScore = {
overall: number;
posture: number;
balance: number;
technique: number;
footwork: number;
consistency: number;
confidence: number;
};
type ActionSegment = {
actionType: ActionType;
isUnknown: boolean;
startMs: number;
endMs: number;
durationMs: number;
confidenceAvg: number;
score: number;
peakScore: number;
frameCount: number;
issueSummary: string[];
keyFrames: number[];
clipLabel: string;
};
type ArchivedAnalysisVideo = {
videoId: number;
url: string;
sequence: number;
durationMs: number;
title: string;
};
type Point = {
x: number;
y: number;
visibility?: number;
};
type TrackingState = {
prevTimestamp?: number;
prevRightWrist?: Point;
prevLeftWrist?: Point;
prevHipCenter?: Point;
lastAction?: ActionType;
};
type AnalyzedFrame = {
action: ActionType;
confidence: number;
score: PoseScore;
feedback: string[];
};
type RuntimeRole = "idle" | "owner" | "viewer";
type RuntimeSnapshot = {
phase?: "idle" | "analyzing" | "saving" | "safe" | "failed";
startedAt?: number;
durationMs?: number;
title?: string;
sessionMode?: SessionMode;
qualityPreset?: CameraQualityPreset;
facingMode?: CameraFacing;
deviceKind?: "mobile" | "desktop";
avatarEnabled?: boolean;
avatarKey?: AvatarKey;
avatarLabel?: string;
updatedAt?: number;
currentAction?: ActionType;
rawAction?: ActionType;
feedback?: string[];
liveScore?: PoseScore | null;
stabilityMeta?: Partial<StabilizedActionMeta>;
visibleSegments?: number;
unknownSegments?: number;
archivedVideoCount?: number;
recentSegments?: ActionSegment[];
};
type RuntimeSession = {
id: number;
title: string | null;
sessionMode: SessionMode;
mediaSessionId: string | null;
status: "idle" | "active" | "ended";
startedAt: string | null;
endedAt: string | null;
lastHeartbeatAt: string | null;
snapshot: RuntimeSnapshot | null;
};
const ACTION_META: Record<ActionType, { label: string; tone: string; accent: string }> = {
forehand: { label: "正手挥拍", tone: "bg-emerald-500/10 text-emerald-700", accent: "bg-emerald-500" },
backhand: { label: "反手挥拍", tone: "bg-sky-500/10 text-sky-700", accent: "bg-sky-500" },
serve: { label: "发球", tone: "bg-amber-500/10 text-amber-700", accent: "bg-amber-500" },
volley: { label: "截击", tone: "bg-indigo-500/10 text-indigo-700", accent: "bg-indigo-500" },
overhead: { label: "高压", tone: "bg-rose-500/10 text-rose-700", accent: "bg-rose-500" },
slice: { label: "切削", tone: "bg-orange-500/10 text-orange-700", accent: "bg-orange-500" },
lob: { label: "挑高球", tone: "bg-fuchsia-500/10 text-fuchsia-700", accent: "bg-fuchsia-500" },
unknown: { label: "未知动作", tone: "bg-slate-500/10 text-slate-700", accent: "bg-slate-500" },
};
const SETUP_STEPS = [
{ title: "固定设备", desc: "手机或平板保持稳定,避免分析阶段发生晃动", icon: <Smartphone className="h-5 w-5" /> },
{ title: "保留全身", desc: "画面尽量覆盖从头到脚,便于识别重心和脚步", icon: <Monitor className="h-5 w-5" /> },
{ title: "确认视角", desc: "后置摄像头优先,横屏更适合完整挥拍追踪", icon: <Camera className="h-5 w-5" /> },
{ title: "开始分析", desc: "动作会先经过 24 帧稳定窗口确认,再按连续区间聚合保存", icon: <Target className="h-5 w-5" /> },
];
const SEGMENT_MAX_MS = 10_000;
const MERGE_GAP_MS = 900;
const MIN_SEGMENT_MS = 1_200;
const ANALYSIS_RECORDING_SEGMENT_MS = 60_000;
const CAMERA_QUALITY_PRESETS: Record<CameraQualityPreset, { label: string; subtitle: string; description: string }> = {
economy: {
label: "节省流量",
subtitle: "540p-720p · 低码率",
description: "默认模式,优先减少本地录制文件大小与移动网络流量。",
},
balanced: {
label: "均衡模式",
subtitle: "720p-900p · 中码率",
description: "兼顾动作识别稳定度与录制体积。",
},
clarity: {
label: "清晰优先",
subtitle: "720p-1080p · 高码率",
description: "适合 Wi-Fi 和需要保留更多回放细节的场景。",
},
};
function clamp(value: number, min: number, max: number) {
return Math.max(min, Math.min(max, value));
}
function distance(a?: Point, b?: Point) {
if (!a || !b) return 0;
const dx = a.x - b.x;
const dy = a.y - b.y;
return Math.sqrt(dx * dx + dy * dy);
}
function getAngle(a?: Point, b?: Point, c?: Point) {
if (!a || !b || !c) return 0;
const radians = Math.atan2(c.y - b.y, c.x - b.x) - Math.atan2(a.y - b.y, a.x - b.x);
let angle = Math.abs((radians * 180) / Math.PI);
if (angle > 180) angle = 360 - angle;
return angle;
}
function formatDuration(ms: number) {
const totalSeconds = Math.max(0, Math.round(ms / 1000));
const minutes = Math.floor(totalSeconds / 60);
const seconds = totalSeconds % 60;
return `${minutes.toString().padStart(2, "0")}:${seconds.toString().padStart(2, "0")}`;
}
function normalizeRuntimeTitle(value: string | null | undefined) {
if (typeof value !== "string") return "";
const trimmed = value.trim();
if (!trimmed) return "";
const suspicious = /[ÃÂÆÐÑØæåçéèêëïîôöûüœŠŽƒ€¦]/;
if (!suspicious.test(trimmed)) {
return trimmed;
}
try {
const bytes = Uint8Array.from(Array.from(trimmed).map((char) => char.charCodeAt(0) & 0xff));
const decoded = new TextDecoder("utf-8").decode(bytes).trim();
if (!decoded || decoded === trimmed) {
return trimmed;
}
const score = (text: string) => {
const cjkCount = text.match(/[\u3400-\u9fff]/g)?.length ?? 0;
const badCount = text.match(/[ÃÂÆÐÑØæåçéèêëïîôöûüœŠŽƒ€¦]/g)?.length ?? 0;
return (cjkCount * 2) - badCount;
};
return score(decoded) > score(trimmed) ? decoded : trimmed;
} catch {
return trimmed;
}
}
function isMobileDevice() {
if (typeof window === "undefined") return false;
return /Android|iPhone|iPad|iPod/i.test(navigator.userAgent) || window.matchMedia("(max-width: 768px)").matches;
}
function pickRecorderMimeType() {
const supported = typeof MediaRecorder !== "undefined" && typeof MediaRecorder.isTypeSupported === "function";
if (supported && MediaRecorder.isTypeSupported("video/mp4;codecs=avc1.42E01E,mp4a.40.2")) {
return "video/mp4";
}
if (supported && MediaRecorder.isTypeSupported("video/webm;codecs=vp9,opus")) {
return "video/webm;codecs=vp9,opus";
}
if (supported && MediaRecorder.isTypeSupported("video/webm;codecs=vp8,opus")) {
return "video/webm;codecs=vp8,opus";
}
return "video/webm";
}
function blobToBase64(blob: Blob) {
return new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.onloadend = () => {
const result = reader.result;
if (typeof result !== "string") {
reject(new Error("无法读取录制文件"));
return;
}
const [, base64 = ""] = result.split(",");
resolve(base64);
};
reader.onerror = () => reject(reader.error || new Error("文件读取失败"));
reader.readAsDataURL(blob);
});
}
function createSegment(action: ActionType, elapsedMs: number, frame: AnalyzedFrame): ActionSegment {
return {
actionType: action,
isUnknown: action === "unknown",
startMs: elapsedMs,
endMs: elapsedMs,
durationMs: 0,
confidenceAvg: frame.confidence,
score: frame.score.overall,
peakScore: frame.score.overall,
frameCount: 1,
issueSummary: frame.feedback.slice(0, 3),
keyFrames: [elapsedMs],
clipLabel: `${ACTION_META[action].label} ${formatDuration(elapsedMs)}`,
};
}
function analyzePoseFrame(landmarks: Point[], tracking: TrackingState, timestamp: number): AnalyzedFrame {
const nose = landmarks[0];
const leftShoulder = landmarks[11];
const rightShoulder = landmarks[12];
const leftElbow = landmarks[13];
const rightElbow = landmarks[14];
const leftWrist = landmarks[15];
const rightWrist = landmarks[16];
const leftHip = landmarks[23];
const rightHip = landmarks[24];
const leftKnee = landmarks[25];
const rightKnee = landmarks[26];
const leftAnkle = landmarks[27];
const rightAnkle = landmarks[28];
const hipCenter = {
x: ((leftHip?.x ?? 0.5) + (rightHip?.x ?? 0.5)) / 2,
y: ((leftHip?.y ?? 0.7) + (rightHip?.y ?? 0.7)) / 2,
};
const dtMs = tracking.prevTimestamp ? Math.max(16, timestamp - tracking.prevTimestamp) : 33;
const rightSpeed = distance(rightWrist, tracking.prevRightWrist) * (1000 / dtMs);
const leftSpeed = distance(leftWrist, tracking.prevLeftWrist) * (1000 / dtMs);
const hipSpeed = distance(hipCenter, tracking.prevHipCenter) * (1000 / dtMs);
const rightVerticalMotion = tracking.prevRightWrist ? tracking.prevRightWrist.y - (rightWrist?.y ?? tracking.prevRightWrist.y) : 0;
const shoulderTilt = Math.abs((leftShoulder?.y ?? 0.3) - (rightShoulder?.y ?? 0.3));
const hipTilt = Math.abs((leftHip?.y ?? 0.55) - (rightHip?.y ?? 0.55));
const headOffset = Math.abs((nose?.x ?? 0.5) - (((leftShoulder?.x ?? 0.45) + (rightShoulder?.x ?? 0.55)) / 2));
const kneeBend = ((getAngle(leftHip, leftKnee, leftAnkle) || 165) + (getAngle(rightHip, rightKnee, rightAnkle) || 165)) / 2;
const rightElbowAngle = getAngle(rightShoulder, rightElbow, rightWrist) || 145;
const leftElbowAngle = getAngle(leftShoulder, leftElbow, leftWrist) || 145;
const footSpread = Math.abs((leftAnkle?.x ?? 0.42) - (rightAnkle?.x ?? 0.58));
const shoulderSpan = Math.abs((rightShoulder?.x ?? 0.56) - (leftShoulder?.x ?? 0.44));
const wristSpread = Math.abs((rightWrist?.x ?? 0.62) - (leftWrist?.x ?? 0.38));
const shoulderCenterX = ((leftShoulder?.x ?? 0.45) + (rightShoulder?.x ?? 0.55)) / 2;
const torsoOffset = Math.abs(shoulderCenterX - hipCenter.x);
const rightForward = (rightWrist?.x ?? shoulderCenterX) - hipCenter.x;
const leftForward = hipCenter.x - (leftWrist?.x ?? shoulderCenterX);
const contactHeight = hipCenter.y - (rightWrist?.y ?? hipCenter.y);
const visibility =
landmarks.reduce((sum, point) => sum + (point.visibility ?? 0.95), 0) /
Math.max(1, landmarks.length);
if (visibility < 0.42 || shoulderSpan < 0.08) {
tracking.prevTimestamp = timestamp;
tracking.prevRightWrist = rightWrist;
tracking.prevLeftWrist = leftWrist;
tracking.prevHipCenter = hipCenter;
tracking.lastAction = "unknown";
return {
action: "unknown",
confidence: 0.2,
score: {
overall: 48,
posture: 50,
balance: 48,
technique: 45,
footwork: 42,
consistency: 40,
confidence: 20,
},
feedback: ["当前画面人体可见度不足,请尽量让头肩和双脚都留在画面内。"],
};
}
const posture = clamp(100 - shoulderTilt * 780 - headOffset * 640, 0, 100);
const balance = clamp(100 - hipTilt * 900 - Math.max(0, 0.16 - footSpread) * 260, 0, 100);
const footwork = clamp(45 + Math.min(36, hipSpeed * 120) + Math.max(0, 165 - kneeBend) * 0.35, 0, 100);
const consistency = clamp(visibility * 100 - Math.abs(rightSpeed - leftSpeed) * 10, 0, 100);
const candidates: Array<{ action: ActionType; confidence: number }> = [
{
action: "serve",
confidence: clamp(
(rightWrist && nose && rightWrist.y < nose.y ? 0.45 : 0.1) +
(rightElbow && rightShoulder && rightElbow.y < rightShoulder.y ? 0.18 : 0.04) +
clamp(contactHeight * 1.4, 0, 0.14) +
clamp((0.24 - footSpread) * 1.2, 0, 0.08) +
clamp((rightElbowAngle - 135) / 55, 0, 0.22) +
clamp(rightVerticalMotion * 4.5, 0, 0.15),
0,
0.98,
),
},
{
action: "overhead",
confidence: clamp(
(rightWrist && rightShoulder && rightWrist.y < rightShoulder.y - 0.1 ? 0.34 : 0.08) +
clamp(rightSpeed * 0.08, 0, 0.28) +
clamp((rightElbowAngle - 125) / 70, 0, 0.18),
0,
0.92,
),
},
{
action: "forehand",
confidence: clamp(
(rightWrist && nose && rightWrist.x > nose.x ? 0.24 : 0.08) +
(rightForward > 0.11 ? 0.16 : 0.04) +
clamp((wristSpread - 0.2) * 0.8, 0, 0.16) +
clamp((0.08 - torsoOffset) * 1.8, 0, 0.08) +
clamp(rightSpeed * 0.12, 0, 0.28) +
clamp((rightElbowAngle - 85) / 70, 0, 0.2),
0,
0.94,
),
},
{
action: "backhand",
confidence: clamp(
((leftWrist && nose && leftWrist.x < nose.x) || (rightWrist && nose && rightWrist.x < nose.x) ? 0.2 : 0.06) +
(leftForward > 0.1 ? 0.16 : 0.04) +
(rightWrist && hipCenter && rightWrist.x < hipCenter.x ? 0.12 : 0.02) +
clamp((wristSpread - 0.22) * 0.75, 0, 0.14) +
clamp(Math.max(leftSpeed, rightSpeed) * 0.1, 0, 0.22) +
clamp((leftElbowAngle - 85) / 70, 0, 0.18),
0,
0.92,
),
},
{
action: "volley",
confidence: clamp(
(rightWrist && rightShoulder && Math.abs(rightWrist.y - rightShoulder.y) < 0.12 ? 0.3 : 0.08) +
clamp((0.16 - Math.abs(contactHeight - 0.08)) * 1.2, 0, 0.1) +
clamp((0.22 - Math.abs((rightWrist?.x ?? 0.5) - hipCenter.x)) * 1.5, 0, 0.18) +
clamp((1.8 - rightSpeed) * 0.14, 0, 0.18),
0,
0.88,
),
},
{
action: "slice",
confidence: clamp(
(rightWrist && rightShoulder && rightWrist.y > rightShoulder.y ? 0.18 : 0.06) +
clamp((contactHeight + 0.06) * 0.7, 0, 0.08) +
clamp((tracking.prevRightWrist && rightWrist && rightWrist.y > tracking.prevRightWrist.y ? 0.18 : 0.04), 0, 0.18) +
clamp(rightSpeed * 0.08, 0, 0.24),
0,
0.82,
),
},
{
action: "lob",
confidence: clamp(
(rightWrist && nose && rightWrist.y < nose.y + 0.1 ? 0.22 : 0.08) +
clamp((0.18 - Math.abs(rightForward)) * 1.2, 0, 0.08) +
clamp(rightVerticalMotion * 4.2, 0, 0.28) +
clamp((0.18 - Math.abs((rightWrist?.x ?? 0.5) - hipCenter.x)) * 1.4, 0, 0.18),
0,
0.86,
),
},
];
candidates.sort((a, b) => b.confidence - a.confidence);
const topCandidate = candidates[0] ?? { action: "unknown" as ActionType, confidence: 0.2 };
const action = topCandidate.confidence >= 0.52 ? topCandidate.action : "unknown";
const techniqueBase =
action === "serve" || action === "overhead"
? clamp(100 - Math.abs(rightElbowAngle - 160) * 0.9, 0, 100)
: action === "backhand"
? clamp(100 - Math.abs(leftElbowAngle - 118) * 0.9, 0, 100)
: clamp(100 - Math.abs(rightElbowAngle - 118) * 0.85, 0, 100);
const technique = clamp(techniqueBase + topCandidate.confidence * 8, 0, 100);
const overall = clamp(
posture * 0.22 +
balance * 0.18 +
technique * 0.28 +
footwork * 0.16 +
consistency * 0.16,
0,
100,
);
const feedback: string[] = [];
if (action === "unknown") {
feedback.push("当前片段缺少完整挥拍特征,系统已归为未知动作。");
}
if (visibility < 0.65) {
feedback.push("人体关键点可见度偏低,建议调整机位让双臂和双脚完全入镜。");
}
if (posture < 72) {
feedback.push("上体轴线偏移较明显,击球准备时保持头肩稳定。");
}
if (balance < 70) {
feedback.push("重心波动偏大,建议扩大支撑面并缩短恢复时间。");
}
if (footwork < 68) {
feedback.push("脚步启动不足,击球前先完成小碎步调整。");
}
if ((action === "serve" || action === "overhead") && technique < 75) {
feedback.push("抬臂延展不够,击球点再高一些会更完整。");
}
if ((action === "forehand" || action === "backhand") && technique < 75) {
feedback.push("肘腕角度偏紧,击球点前移并完成收拍。");
}
if (feedback.length === 0) {
feedback.push("节奏稳定,可以继续累积高质量动作片段。");
}
tracking.prevTimestamp = timestamp;
tracking.prevRightWrist = rightWrist;
tracking.prevLeftWrist = leftWrist;
tracking.prevHipCenter = hipCenter;
tracking.lastAction = action;
return {
action,
confidence: clamp(topCandidate.confidence, 0, 1),
score: {
overall: Math.round(overall),
posture: Math.round(posture),
balance: Math.round(balance),
technique: Math.round(technique),
footwork: Math.round(footwork),
consistency: Math.round(consistency),
confidence: Math.round(clamp(topCandidate.confidence * 100, 0, 100)),
},
feedback: feedback.slice(0, 3),
};
}
function ScoreBar({ label, value, accent }: { label: string; value: number; accent?: string }) {
return (
<div className="space-y-1">
<div className="flex items-center justify-between text-xs">
<span className="text-muted-foreground">{label}</span>
<span className="font-medium">{Math.round(value)}</span>
</div>
<div className="h-2 rounded-full bg-muted/70">
<div
className={`h-full rounded-full transition-all ${accent || "bg-primary"}`}
style={{ width: `${clamp(value, 0, 100)}%` }}
/>
</div>
</div>
);
}
function getSessionBand(input: { overallScore: number; knownRatio: number; effectiveSegments: number }) {
if (input.overallScore >= 85 && input.knownRatio >= 0.72 && input.effectiveSegments >= 4) {
return { label: "高质量", tone: "bg-emerald-500/10 text-emerald-700" };
}
if (input.overallScore >= 72 && input.knownRatio >= 0.55 && input.effectiveSegments >= 2) {
return { label: "稳定", tone: "bg-sky-500/10 text-sky-700" };
}
return { label: "待加强", tone: "bg-amber-500/10 text-amber-700" };
}
function getRuntimeSyncDelayMs(lastHeartbeatAt?: string | null) {
if (!lastHeartbeatAt) return null;
const heartbeatMs = new Date(lastHeartbeatAt).getTime();
if (Number.isNaN(heartbeatMs)) return null;
return Math.max(0, Date.now() - heartbeatMs);
}
function formatRuntimeSyncDelay(delayMs: number | null) {
if (delayMs == null) return "等待同步";
if (delayMs < 1500) return "同步中";
if (delayMs < 10_000) return `${(delayMs / 1000).toFixed(1)}s 延迟`;
return "同步较慢";
}
export default function LiveCamera() {
const { user } = useAuth();
const utils = trpc.useUtils();
const mobile = useMemo(() => isMobileDevice(), []);
const videoRef = useRef<HTMLVideoElement>(null);
const canvasRef = useRef<HTMLCanvasElement>(null);
const streamRef = useRef<MediaStream | null>(null);
const poseRef = useRef<any>(null);
const compositeCanvasRef = useRef<HTMLCanvasElement | null>(null);
const broadcastSessionIdRef = useRef<string | null>(null);
const viewerSessionIdRef = useRef<string | null>(null);
const viewerRetryTimerRef = useRef<number>(0);
const frameRelayTimerRef = useRef<number>(0);
const frameRelayInFlightRef = useRef(false);
const runtimeIdRef = useRef<number | null>(null);
const heartbeatTimerRef = useRef<number>(0);
const recorderRef = useRef<MediaRecorder | null>(null);
const recorderStreamRef = useRef<MediaStream | null>(null);
const recorderMimeTypeRef = useRef("video/webm");
const recorderChunksRef = useRef<Blob[]>([]);
const recorderStopPromiseRef = useRef<Promise<void> | null>(null);
const recorderSegmentStartedAtRef = useRef<number>(0);
const recorderSequenceRef = useRef(0);
const recorderRotateTimerRef = useRef<number>(0);
const recorderUploadQueueRef = useRef(Promise.resolve());
const archivedVideosRef = useRef<ArchivedAnalysisVideo[]>([]);
const analyzingRef = useRef(false);
const animationRef = useRef<number>(0);
const sessionStartedAtRef = useRef<number>(0);
const trackingRef = useRef<TrackingState>({});
const actionHistoryRef = useRef<FrameActionSample[]>([]);
const stableActionStateRef = useRef(createStableActionState());
const currentSegmentRef = useRef<ActionSegment | null>(null);
const segmentsRef = useRef<ActionSegment[]>([]);
const frameSamplesRef = useRef<PoseScore[]>([]);
const volatilitySamplesRef = useRef<number[]>([]);
const currentActionRef = useRef<ActionType>("unknown");
const rawActionRef = useRef<ActionType>("unknown");
const liveScoreRef = useRef<PoseScore | null>(null);
const feedbackRef = useRef<string[]>([]);
const durationMsRef = useRef(0);
const leaveStatusRef = useRef<"idle" | "analyzing" | "saving" | "safe" | "failed">("idle");
const sessionModeRef = useRef<SessionMode>("practice");
const stabilityMetaRef = useRef<StabilizedActionMeta>(createEmptyStabilizedActionMeta());
const zoomTargetRef = useRef(1);
const avatarRenderRef = useRef<AvatarRenderState>({
enabled: false,
avatarKey: "gorilla",
});
const [cameraActive, setCameraActive] = useState(false);
const [facing, setFacing] = useState<CameraFacing>("environment");
const [hasMultipleCameras, setHasMultipleCameras] = useState(false);
const [showSetupGuide, setShowSetupGuide] = useState(true);
const [setupStep, setSetupStep] = useState(0);
const [sessionMode, setSessionMode] = useState<SessionMode>("practice");
const [analyzing, setAnalyzing] = useState(false);
const [saving, setSaving] = useState(false);
const [leaveStatus, setLeaveStatus] = useState<"idle" | "analyzing" | "saving" | "safe" | "failed">("idle");
const [immersivePreview, setImmersivePreview] = useState(false);
const [liveScore, setLiveScore] = useState<PoseScore | null>(null);
const [currentAction, setCurrentAction] = useState<ActionType>("unknown");
const [rawAction, setRawAction] = useState<ActionType>("unknown");
const [feedback, setFeedback] = useState<string[]>([]);
const [segments, setSegments] = useState<ActionSegment[]>([]);
const [durationMs, setDurationMs] = useState(0);
const [segmentFilter, setSegmentFilter] = useState<ActionType | "all">("all");
const [qualityPreset, setQualityPreset] = useState<CameraQualityPreset>("economy");
const [zoomState, setZoomState] = useState(() => readTrackZoomState(null));
const [stabilityMeta, setStabilityMeta] = useState<StabilizedActionMeta>(() => createEmptyStabilizedActionMeta());
const [avatarEnabled, setAvatarEnabled] = useState(false);
const [avatarKey, setAvatarKey] = useState<AvatarKey>("gorilla");
const [avatarPrompt, setAvatarPrompt] = useState("");
const [archivedVideoCount, setArchivedVideoCount] = useState(0);
const [viewerConnected, setViewerConnected] = useState(false);
const [viewerError, setViewerError] = useState("");
const [viewerFrameVersion, setViewerFrameVersion] = useState(0);
const resolvedAvatarKey = useMemo(
() => resolveAvatarKeyFromPrompt(avatarPrompt, avatarKey),
[avatarKey, avatarPrompt],
);
const uploadMutation = trpc.video.upload.useMutation();
const saveLiveSessionMutation = trpc.analysis.liveSessionSave.useMutation({
onSuccess: () => {
utils.profile.stats.invalidate();
utils.analysis.liveSessionList.invalidate();
utils.video.list.invalidate();
utils.record.list.invalidate();
utils.achievement.list.invalidate();
utils.rating.current.invalidate();
utils.rating.history.invalidate();
},
});
const liveSessionsQuery = trpc.analysis.liveSessionList.useQuery({ limit: 8 });
const runtimeQuery = trpc.analysis.runtimeGet.useQuery(undefined, {
refetchInterval: 1000,
refetchIntervalInBackground: true,
});
const runtimeAcquireMutation = trpc.analysis.runtimeAcquire.useMutation();
const runtimeHeartbeatMutation = trpc.analysis.runtimeHeartbeat.useMutation();
const runtimeReleaseMutation = trpc.analysis.runtimeRelease.useMutation();
const runtimeRole = (runtimeQuery.data?.role ?? "idle") as RuntimeRole;
const runtimeSession = (runtimeQuery.data?.runtimeSession ?? null) as RuntimeSession | null;
const runtimeSnapshot = runtimeSession?.snapshot ?? null;
const normalizedRuntimeTitle = normalizeRuntimeTitle(runtimeSession?.title);
const normalizedSnapshotTitle = normalizeRuntimeTitle(runtimeSnapshot?.title);
useEffect(() => {
avatarRenderRef.current = {
enabled: avatarEnabled,
avatarKey: resolvedAvatarKey,
customLabel: avatarPrompt.trim() || undefined,
};
}, [avatarEnabled, avatarPrompt, resolvedAvatarKey]);
useEffect(() => {
currentActionRef.current = currentAction;
}, [currentAction]);
useEffect(() => {
rawActionRef.current = rawAction;
}, [rawAction]);
useEffect(() => {
liveScoreRef.current = liveScore;
}, [liveScore]);
useEffect(() => {
feedbackRef.current = feedback;
}, [feedback]);
useEffect(() => {
durationMsRef.current = durationMs;
}, [durationMs]);
useEffect(() => {
leaveStatusRef.current = leaveStatus;
}, [leaveStatus]);
useEffect(() => {
if (runtimeRole === "viewer") {
setShowSetupGuide(false);
setSetupStep(0);
}
}, [runtimeRole]);
useEffect(() => {
sessionModeRef.current = sessionMode;
}, [sessionMode]);
useEffect(() => {
stabilityMetaRef.current = stabilityMeta;
}, [stabilityMeta]);
const visibleSegments = useMemo(
() => segments.filter((segment) => !segment.isUnknown).sort((a, b) => b.startMs - a.startMs),
[segments],
);
const unknownSegments = useMemo(() => segments.filter((segment) => segment.isUnknown), [segments]);
const filteredVisibleSegments = useMemo(
() => segmentFilter === "all" ? visibleSegments : visibleSegments.filter((segment) => segment.actionType === segmentFilter),
[segmentFilter, visibleSegments],
);
const viewerRecentSegments = useMemo(
() => (runtimeSnapshot?.recentSegments ?? []).filter((segment) => !segment.isUnknown),
[runtimeSnapshot?.recentSegments],
);
const displayVisibleSegments = runtimeRole === "viewer" ? viewerRecentSegments : visibleSegments;
const displayFilteredSegments = runtimeRole === "viewer"
? (segmentFilter === "all"
? viewerRecentSegments
: viewerRecentSegments.filter((segment) => segment.actionType === segmentFilter))
: filteredVisibleSegments;
const actionStats = useMemo(() => {
const totals = new Map<ActionType, { count: number; durationMs: number; averageScore: number; averageConfidence: number }>();
displayVisibleSegments.forEach((segment) => {
const current = totals.get(segment.actionType) ?? {
count: 0,
durationMs: 0,
averageScore: 0,
averageConfidence: 0,
};
const nextCount = current.count + 1;
totals.set(segment.actionType, {
count: nextCount,
durationMs: current.durationMs + segment.durationMs,
averageScore: ((current.averageScore * current.count) + segment.score) / nextCount,
averageConfidence: ((current.averageConfidence * current.count) + segment.confidenceAvg) / nextCount,
});
});
const totalDuration = Math.max(1, displayVisibleSegments.reduce((sum, segment) => sum + segment.durationMs, 0));
return Array.from(totals.entries())
.map(([actionType, value]) => ({
actionType,
...value,
sharePct: Math.round((value.durationMs / totalDuration) * 100),
}))
.sort((a, b) => b.durationMs - a.durationMs);
}, [displayVisibleSegments]);
const bestSegment = useMemo(
() => displayVisibleSegments.reduce<ActionSegment | null>((best, segment) => {
if (!best) return segment;
return segment.score > best.score ? segment : best;
}, null),
[displayVisibleSegments],
);
const totalDisplaySegments = runtimeRole === "viewer"
? (runtimeSnapshot?.visibleSegments ?? displayVisibleSegments.length) + (runtimeSnapshot?.unknownSegments ?? 0)
: segments.length;
const knownRatio = totalDisplaySegments > 0 ? displayVisibleSegments.length / totalDisplaySegments : 0;
const sessionBand = useMemo(
() => getSessionBand({
overallScore: (runtimeRole === "viewer" ? runtimeSnapshot?.liveScore?.overall : liveScore?.overall) || 0,
knownRatio,
effectiveSegments: displayVisibleSegments.length,
}),
[displayVisibleSegments.length, knownRatio, liveScore?.overall, runtimeRole, runtimeSnapshot?.liveScore?.overall],
);
const refreshRuntimeState = useCallback(async () => {
const result = await runtimeQuery.refetch();
return {
role: (result.data?.role ?? runtimeRole) as RuntimeRole,
runtimeSession: (result.data?.runtimeSession ?? runtimeSession) as RuntimeSession | null,
};
}, [runtimeQuery, runtimeRole, runtimeSession]);
useEffect(() => {
navigator.mediaDevices?.enumerateDevices().then((devices) => {
const cameras = devices.filter((device) => device.kind === "videoinput");
setHasMultipleCameras(cameras.length > 1);
}).catch(() => undefined);
}, []);
useEffect(() => {
if (!cameraActive || !streamRef.current || !videoRef.current) return;
if (videoRef.current.srcObject !== streamRef.current) {
videoRef.current.srcObject = streamRef.current;
void videoRef.current.play().catch(() => undefined);
}
}, [cameraActive, immersivePreview]);
const ensureCompositeCanvas = useCallback(() => {
if (typeof document === "undefined") {
return null;
}
if (!compositeCanvasRef.current) {
compositeCanvasRef.current = document.createElement("canvas");
}
return compositeCanvasRef.current;
}, []);
const renderCompositeFrame = useCallback((landmarks?: Point[]) => {
const video = videoRef.current;
const compositeCanvas = ensureCompositeCanvas();
if (!video || !compositeCanvas || video.videoWidth <= 0 || video.videoHeight <= 0) {
return;
}
if (compositeCanvas.width !== video.videoWidth || compositeCanvas.height !== video.videoHeight) {
compositeCanvas.width = video.videoWidth;
compositeCanvas.height = video.videoHeight;
}
const ctx = compositeCanvas.getContext("2d");
if (!ctx) return;
ctx.clearRect(0, 0, compositeCanvas.width, compositeCanvas.height);
ctx.drawImage(video, 0, 0, compositeCanvas.width, compositeCanvas.height);
renderLiveCameraOverlayToContext(
ctx,
compositeCanvas.width,
compositeCanvas.height,
landmarks,
avatarRenderRef.current,
{ clear: false },
);
}, [ensureCompositeCanvas]);
const queueArchivedVideoUpload = useCallback(async (blob: Blob, sequence: number, durationMs: number) => {
const format = recorderMimeTypeRef.current.includes("mp4") ? "mp4" : "webm";
const title = `实时分析录像 ${formatDateTimeShanghai(new Date(), {
year: undefined,
second: undefined,
})} · 第 ${sequence}`;
recorderUploadQueueRef.current = recorderUploadQueueRef.current
.then(async () => {
const fileBase64 = await blobToBase64(blob);
const uploaded = await uploadMutation.mutateAsync({
title,
format,
fileSize: blob.size,
duration: Math.max(1, Math.round(durationMs / 1000)),
exerciseType: "live_analysis",
fileBase64,
});
const nextVideo: ArchivedAnalysisVideo = {
videoId: uploaded.videoId,
url: uploaded.url,
sequence,
durationMs,
title,
};
archivedVideosRef.current = [...archivedVideosRef.current, nextVideo].sort((a, b) => a.sequence - b.sequence);
setArchivedVideoCount(archivedVideosRef.current.length);
})
.catch((error: any) => {
toast.error(`分析录像第 ${sequence} 段归档失败: ${error?.message || "未知错误"}`);
});
return recorderUploadQueueRef.current;
}, [uploadMutation]);
const stopSessionRecorder = useCallback(async () => {
const recorder = recorderRef.current;
if (recorderRotateTimerRef.current) {
window.clearTimeout(recorderRotateTimerRef.current);
recorderRotateTimerRef.current = 0;
}
if (!recorder) {
await recorderUploadQueueRef.current;
return;
}
const stopPromise = recorderStopPromiseRef.current;
if (recorder.state !== "inactive") {
recorder.stop();
}
await (stopPromise ?? Promise.resolve());
await recorderUploadQueueRef.current;
}, []);
const buildRuntimeSnapshot = useCallback((phase?: RuntimeSnapshot["phase"]): RuntimeSnapshot => ({
phase: phase ?? leaveStatusRef.current,
startedAt: sessionStartedAtRef.current || undefined,
durationMs: durationMsRef.current,
title: normalizedRuntimeTitle || `实时分析 ${ACTION_META[currentActionRef.current].label}`,
sessionMode: sessionModeRef.current,
qualityPreset,
facingMode: facing,
deviceKind: mobile ? "mobile" : "desktop",
avatarEnabled: avatarRenderRef.current.enabled,
avatarKey: avatarRenderRef.current.avatarKey,
avatarLabel: getAvatarPreset(avatarRenderRef.current.avatarKey)?.label || "猩猩",
updatedAt: Date.now(),
currentAction: currentActionRef.current,
rawAction: rawActionRef.current,
feedback: feedbackRef.current,
liveScore: liveScoreRef.current,
stabilityMeta: stabilityMetaRef.current,
visibleSegments: segmentsRef.current.filter((segment) => !segment.isUnknown).length,
unknownSegments: segmentsRef.current.filter((segment) => segment.isUnknown).length,
archivedVideoCount: archivedVideosRef.current.length,
recentSegments: segmentsRef.current.slice(-5),
}), [facing, mobile, normalizedRuntimeTitle, qualityPreset]);
const openSetupGuide = useCallback(async () => {
const latest = await refreshRuntimeState();
if (latest.role === "viewer") {
setShowSetupGuide(false);
toast.error("当前账号已有其他设备正在实时分析,请先切换到同步观看模式");
return;
}
setShowSetupGuide(true);
}, [refreshRuntimeState]);
const uploadLiveFrame = useCallback(async (sessionId: string) => {
const compositeCanvas = ensureCompositeCanvas();
if (!compositeCanvas || frameRelayInFlightRef.current) {
return;
}
renderCompositeFrame();
frameRelayInFlightRef.current = true;
try {
const blob = await new Promise<Blob | null>((resolve) => {
compositeCanvas.toBlob(resolve, "image/jpeg", mobile ? 0.7 : 0.76);
});
if (!blob) {
return;
}
await uploadMediaLiveFrame(sessionId, blob);
} finally {
frameRelayInFlightRef.current = false;
}
}, [ensureCompositeCanvas, mobile, renderCompositeFrame]);
const startFrameRelayLoop = useCallback((sessionId: string) => {
broadcastSessionIdRef.current = sessionId;
if (frameRelayTimerRef.current) {
window.clearInterval(frameRelayTimerRef.current);
frameRelayTimerRef.current = 0;
}
void uploadLiveFrame(sessionId);
frameRelayTimerRef.current = window.setInterval(() => {
void uploadLiveFrame(sessionId);
}, 900);
}, [uploadLiveFrame]);
const closeBroadcastPeer = useCallback(() => {
broadcastSessionIdRef.current = null;
if (frameRelayTimerRef.current) {
window.clearInterval(frameRelayTimerRef.current);
frameRelayTimerRef.current = 0;
}
frameRelayInFlightRef.current = false;
}, []);
const closeViewerPeer = useCallback(() => {
if (viewerRetryTimerRef.current) {
window.clearTimeout(viewerRetryTimerRef.current);
viewerRetryTimerRef.current = 0;
}
viewerSessionIdRef.current = null;
if (videoRef.current && !cameraActive) {
videoRef.current.srcObject = null;
}
setViewerConnected(false);
}, [cameraActive]);
const releaseRuntime = useCallback(async (phase: RuntimeSnapshot["phase"]) => {
if (!runtimeIdRef.current) return;
try {
await runtimeReleaseMutation.mutateAsync({
runtimeId: runtimeIdRef.current,
snapshot: buildRuntimeSnapshot(phase),
});
} catch {
// Ignore runtime release errors and let the server-side stale timeout recover.
} finally {
runtimeIdRef.current = null;
broadcastSessionIdRef.current = null;
if (heartbeatTimerRef.current) {
window.clearInterval(heartbeatTimerRef.current);
heartbeatTimerRef.current = 0;
}
void runtimeQuery.refetch();
}
}, [buildRuntimeSnapshot, runtimeQuery, runtimeReleaseMutation]);
const startRuntimeHeartbeatLoop = useCallback((mediaSessionId?: string | null) => {
if (!runtimeIdRef.current) return;
if (typeof mediaSessionId === "string") {
broadcastSessionIdRef.current = mediaSessionId;
}
if (heartbeatTimerRef.current) {
window.clearInterval(heartbeatTimerRef.current);
heartbeatTimerRef.current = 0;
}
const sendHeartbeat = () => {
if (!runtimeIdRef.current) return;
runtimeHeartbeatMutation.mutate({
runtimeId: runtimeIdRef.current,
mediaSessionId: broadcastSessionIdRef.current,
snapshot: buildRuntimeSnapshot(),
});
};
sendHeartbeat();
heartbeatTimerRef.current = window.setInterval(sendHeartbeat, 1000);
}, [buildRuntimeSnapshot, runtimeHeartbeatMutation]);
const startBroadcastSession = useCallback(async () => {
if (!user?.id) {
throw new Error("当前用户信息未就绪");
}
const compositeCanvas = ensureCompositeCanvas();
if (!compositeCanvas) {
throw new Error("当前浏览器不支持同步观看画面");
}
renderCompositeFrame();
const sessionResponse = await createMediaSession({
userId: String(user.id),
title: `实时分析同步 ${formatDateTimeShanghai(new Date(), {
year: undefined,
second: undefined,
})}`,
format: "webm",
mimeType: "video/webm",
qualityPreset,
facingMode: facing,
deviceKind: mobile ? "mobile" : "desktop",
});
const sessionId = sessionResponse.session.id;
startFrameRelayLoop(sessionId);
return sessionId;
}, [ensureCompositeCanvas, facing, mobile, qualityPreset, renderCompositeFrame, startFrameRelayLoop, user?.id]);
const startViewerStream = useCallback(async (mediaSessionId: string) => {
if (viewerSessionIdRef.current === mediaSessionId && viewerConnected) {
setViewerFrameVersion(Date.now());
return;
}
closeViewerPeer();
setViewerError("");
viewerSessionIdRef.current = mediaSessionId;
setViewerFrameVersion(Date.now());
}, [closeViewerPeer, viewerConnected]);
const stopCamera = useCallback(() => {
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
animationRef.current = 0;
}
if (poseRef.current?.close) {
poseRef.current.close();
poseRef.current = null;
}
analyzingRef.current = false;
setAnalyzing(false);
void stopSessionRecorder();
const localStream = streamRef.current;
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
streamRef.current = null;
}
if (videoRef.current && localStream && videoRef.current.srcObject === localStream) {
videoRef.current.srcObject = null;
}
actionHistoryRef.current = [];
stableActionStateRef.current = createStableActionState();
volatilitySamplesRef.current = [];
setCurrentAction("unknown");
setRawAction("unknown");
setStabilityMeta(createEmptyStabilizedActionMeta());
setZoomState(readTrackZoomState(null));
archivedVideosRef.current = [];
recorderSequenceRef.current = 0;
setArchivedVideoCount(0);
setCameraActive(false);
}, [stopSessionRecorder]);
useEffect(() => {
if (runtimeRole === "viewer" && cameraActive) {
stopCamera();
}
}, [cameraActive, runtimeRole, stopCamera]);
useEffect(() => {
if (runtimeRole !== "viewer" || !runtimeSession?.mediaSessionId) {
if (!cameraActive) {
closeViewerPeer();
}
setViewerError("");
return;
}
void startViewerStream(runtimeSession.mediaSessionId).catch((error: any) => {
setViewerError(error?.message || "同步画面连接失败");
});
if (viewerRetryTimerRef.current) {
window.clearInterval(viewerRetryTimerRef.current);
viewerRetryTimerRef.current = 0;
}
viewerRetryTimerRef.current = window.setInterval(() => {
setViewerFrameVersion(Date.now());
}, 900);
return () => {
if (viewerRetryTimerRef.current) {
window.clearInterval(viewerRetryTimerRef.current);
viewerRetryTimerRef.current = 0;
}
};
}, [
cameraActive,
closeViewerPeer,
runtimeRole,
runtimeSession?.mediaSessionId,
startViewerStream,
]);
useEffect(() => {
return () => {
stopCamera();
closeBroadcastPeer();
closeViewerPeer();
};
}, [closeBroadcastPeer, closeViewerPeer, stopCamera]);
const syncZoomState = useCallback(async (preferredZoom?: number, providedTrack?: MediaStreamTrack | null) => {
const track = providedTrack || streamRef.current?.getVideoTracks()[0] || null;
if (!track) {
zoomTargetRef.current = 1;
setZoomState(readTrackZoomState(null));
return;
}
let nextState = readTrackZoomState(track);
if (nextState.supported && preferredZoom != null && Math.abs(preferredZoom - nextState.current) > nextState.step / 2) {
try {
nextState = await applyTrackZoom(track, preferredZoom);
} catch {
nextState = readTrackZoomState(track);
}
}
zoomTargetRef.current = nextState.current;
setZoomState(nextState);
}, []);
const updateZoom = useCallback(async (nextZoom: number) => {
const track = streamRef.current?.getVideoTracks()[0] || null;
if (!track) return;
try {
const nextState = await applyTrackZoom(track, nextZoom);
zoomTargetRef.current = nextState.current;
setZoomState(nextState);
} catch (error: any) {
toast.error(`镜头缩放调整失败: ${error?.message || "当前设备不支持"}`);
}
}, []);
const stepZoom = useCallback((direction: -1 | 1) => {
if (!zoomState.supported) return;
const nextZoom = clamp(zoomState.current + zoomState.step * direction, zoomState.min, zoomState.max);
void updateZoom(nextZoom);
}, [updateZoom, zoomState]);
const startCamera = useCallback(async (
nextFacing: CameraFacing = facing,
preferredZoom = zoomTargetRef.current,
preset: CameraQualityPreset = qualityPreset,
) => {
const latest = await refreshRuntimeState();
if (latest.role === "viewer") {
toast.error("当前账号已有其他设备正在实时分析,请切换到同步观看模式");
return;
}
try {
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
}
const { stream, appliedFacingMode, usedFallback } = await requestCameraStream({
facingMode: nextFacing,
isMobile: mobile,
preset,
});
streamRef.current = stream;
if (appliedFacingMode !== nextFacing) {
setFacing(appliedFacingMode);
}
setCameraActive(true);
if (videoRef.current) {
try {
videoRef.current.srcObject = stream;
await videoRef.current.play().catch(() => undefined);
} catch {
// Keep the camera session alive even if preview binding is flaky on the current browser.
}
}
await syncZoomState(preferredZoom, stream.getVideoTracks()[0] || null);
if (usedFallback) {
toast.info("当前设备已自动切换到兼容摄像头模式");
}
toast.success("摄像头已启动");
} catch (error: any) {
toast.error(`摄像头启动失败: ${error?.message || "未知错误"}`);
}
}, [facing, mobile, qualityPreset, refreshRuntimeState, syncZoomState]);
const switchCamera = useCallback(async () => {
const nextFacing: CameraFacing = facing === "user" ? "environment" : "user";
setFacing(nextFacing);
if (!cameraActive) return;
stopCamera();
await new Promise((resolve) => setTimeout(resolve, 250));
await startCamera(nextFacing, zoomTargetRef.current);
}, [cameraActive, facing, startCamera, stopCamera]);
const handleQualityPresetChange = useCallback(async (nextPreset: CameraQualityPreset) => {
setQualityPreset(nextPreset);
if (cameraActive && !analyzing && !saving) {
await startCamera(facing, zoomTargetRef.current, nextPreset);
}
}, [analyzing, cameraActive, facing, saving, startCamera]);
const flushSegment = useCallback((segment: ActionSegment | null) => {
if (!segment || segment.durationMs < MIN_SEGMENT_MS) {
return;
}
const finalized: ActionSegment = {
...segment,
durationMs: Math.max(segment.durationMs, segment.endMs - segment.startMs),
clipLabel: `${ACTION_META[segment.actionType].label} ${formatDuration(segment.startMs)} - ${formatDuration(segment.endMs)}`,
keyFrames: Array.from(new Set(segment.keyFrames)).slice(-4),
issueSummary: segment.issueSummary.slice(0, 4),
};
segmentsRef.current = [...segmentsRef.current, finalized];
setSegments(segmentsRef.current);
}, []);
const appendFrameToSegment = useCallback((frame: AnalyzedFrame, elapsedMs: number) => {
const current = currentSegmentRef.current;
if (!current) {
currentSegmentRef.current = createSegment(frame.action, elapsedMs, frame);
return;
}
const sameAction = current.actionType === frame.action;
const gap = elapsedMs - current.endMs;
const nextDuration = elapsedMs - current.startMs;
if (sameAction && gap <= MERGE_GAP_MS && nextDuration <= SEGMENT_MAX_MS) {
const nextFrameCount = current.frameCount + 1;
current.endMs = elapsedMs;
current.durationMs = current.endMs - current.startMs;
current.frameCount = nextFrameCount;
current.confidenceAvg = ((current.confidenceAvg * (nextFrameCount - 1)) + frame.confidence) / nextFrameCount;
current.score = ((current.score * (nextFrameCount - 1)) + frame.score.overall) / nextFrameCount;
current.peakScore = Math.max(current.peakScore, frame.score.overall);
current.issueSummary = Array.from(new Set([...current.issueSummary, ...frame.feedback])).slice(0, 4);
current.keyFrames = [...current.keyFrames.slice(-3), elapsedMs];
return;
}
flushSegment(current);
currentSegmentRef.current = createSegment(frame.action, elapsedMs, frame);
}, [flushSegment]);
const startSessionRecorder = useCallback(function startSessionRecorderInternal() {
if (typeof MediaRecorder === "undefined") {
recorderRef.current = null;
recorderStopPromiseRef.current = Promise.resolve();
return;
}
const compositeCanvas = ensureCompositeCanvas();
if (!compositeCanvas || typeof compositeCanvas.captureStream !== "function") {
recorderRef.current = null;
recorderStopPromiseRef.current = Promise.resolve();
return;
}
renderCompositeFrame();
recorderChunksRef.current = [];
const mimeType = pickRecorderMimeType();
recorderMimeTypeRef.current = mimeType;
if (!recorderStreamRef.current) {
recorderStreamRef.current = compositeCanvas.captureStream(mobile ? 24 : 30);
}
const recorder = new MediaRecorder(recorderStreamRef.current, {
mimeType,
videoBitsPerSecond: getLiveAnalysisBitrate(qualityPreset, mobile),
});
recorderRef.current = recorder;
const sequence = recorderSequenceRef.current + 1;
recorderSequenceRef.current = sequence;
recorderSegmentStartedAtRef.current = Date.now();
recorder.ondataavailable = (event) => {
if (event.data && event.data.size > 0) {
recorderChunksRef.current.push(event.data);
}
};
recorderStopPromiseRef.current = new Promise((resolve) => {
recorder.onstop = () => {
const durationMs = Math.max(0, Date.now() - recorderSegmentStartedAtRef.current);
const type = recorderMimeTypeRef.current.includes("mp4") ? "video/mp4" : "video/webm";
const blob = recorderChunksRef.current.length > 0 ? new Blob(recorderChunksRef.current, { type }) : null;
recorderChunksRef.current = [];
recorderRef.current = null;
recorderStopPromiseRef.current = null;
if (blob && blob.size > 0 && durationMs > 0) {
void queueArchivedVideoUpload(blob, sequence, durationMs);
}
if (analyzingRef.current) {
startSessionRecorderInternal();
} else if (recorderStreamRef.current) {
recorderStreamRef.current.getTracks().forEach((track) => track.stop());
recorderStreamRef.current = null;
}
resolve();
};
});
recorder.start();
recorderRotateTimerRef.current = window.setTimeout(() => {
if (recorder.state === "recording") {
recorder.stop();
}
}, ANALYSIS_RECORDING_SEGMENT_MS);
}, [ensureCompositeCanvas, mobile, qualityPreset, queueArchivedVideoUpload, renderCompositeFrame]);
const persistSession = useCallback(async () => {
const endedAt = Date.now();
const sessionDuration = Math.max(0, endedAt - sessionStartedAtRef.current);
const currentSegment = currentSegmentRef.current;
if (currentSegment) {
currentSegment.endMs = sessionDuration;
currentSegment.durationMs = currentSegment.endMs - currentSegment.startMs;
flushSegment(currentSegment);
currentSegmentRef.current = null;
}
const scoreSamples = frameSamplesRef.current;
const finalSegments = [...segmentsRef.current];
const segmentDurations = finalSegments.reduce<Record<ActionType, number>>((acc, segment) => {
acc[segment.actionType] = (acc[segment.actionType] || 0) + segment.durationMs;
return acc;
}, {
forehand: 0,
backhand: 0,
serve: 0,
volley: 0,
overhead: 0,
slice: 0,
lob: 0,
unknown: 0,
});
const dominantAction = (Object.entries(segmentDurations).sort((a, b) => b[1] - a[1])[0]?.[0] || "unknown") as ActionType;
const effectiveSegments = finalSegments.filter((segment) => !segment.isUnknown);
const unknownCount = finalSegments.length - effectiveSegments.length;
const averageScore = scoreSamples.length > 0
? scoreSamples.reduce((sum, item) => sum + item.overall, 0) / scoreSamples.length
: liveScore?.overall || 0;
const averagePosture = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.posture, 0) / scoreSamples.length : liveScore?.posture || 0;
const averageBalance = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.balance, 0) / scoreSamples.length : liveScore?.balance || 0;
const averageTechnique = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.technique, 0) / scoreSamples.length : liveScore?.technique || 0;
const averageFootwork = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.footwork, 0) / scoreSamples.length : liveScore?.footwork || 0;
const averageConsistency = scoreSamples.length > 0 ? scoreSamples.reduce((sum, item) => sum + item.consistency, 0) / scoreSamples.length : liveScore?.consistency || 0;
const sessionFeedback = Array.from(new Set(finalSegments.flatMap((segment) => segment.issueSummary))).slice(0, 5);
const averageRawVolatility = volatilitySamplesRef.current.length > 0
? volatilitySamplesRef.current.reduce((sum, value) => sum + value, 0) / volatilitySamplesRef.current.length
: 0;
const avatarState = avatarRenderRef.current;
await stopSessionRecorder();
const archivedVideos = [...archivedVideosRef.current].sort((a, b) => a.sequence - b.sequence);
const primaryArchivedVideo = archivedVideos[0] ?? null;
await saveLiveSessionMutation.mutateAsync({
title: `实时分析 ${ACTION_META[dominantAction].label}`,
sessionMode,
startedAt: sessionStartedAtRef.current,
endedAt,
durationMs: sessionDuration,
dominantAction,
overallScore: Math.round(averageScore),
postureScore: Math.round(averagePosture),
balanceScore: Math.round(averageBalance),
techniqueScore: Math.round(averageTechnique),
footworkScore: Math.round(averageFootwork),
consistencyScore: Math.round(averageConsistency),
totalActionCount: effectiveSegments.length,
effectiveSegments: effectiveSegments.length,
totalSegments: finalSegments.length,
unknownSegments: unknownCount,
feedback: sessionFeedback,
metrics: {
actionDurations: segmentDurations,
stabilizedActionDurations: segmentDurations,
averageConfidence: Math.round((scoreSamples.reduce((sum, item) => sum + item.confidence, 0) / Math.max(1, scoreSamples.length)) * 10) / 10,
sampleCount: scoreSamples.length,
stableWindowFrames: ACTION_WINDOW_FRAMES,
actionSwitchCount: stableActionStateRef.current.switchCount,
rawActionVolatility: Number(averageRawVolatility.toFixed(4)),
avatarEnabled: avatarState.enabled,
avatarKey: avatarState.enabled ? avatarState.avatarKey : null,
autoRecordingEnabled: true,
autoRecordingSegmentMs: ANALYSIS_RECORDING_SEGMENT_MS,
archivedVideos,
mobile,
},
segments: finalSegments.map((segment) => ({
actionType: segment.actionType,
isUnknown: segment.isUnknown,
startMs: segment.startMs,
endMs: segment.endMs,
durationMs: segment.durationMs,
confidenceAvg: Number(segment.confidenceAvg.toFixed(4)),
score: Math.round(segment.score),
peakScore: Math.round(segment.peakScore),
frameCount: segment.frameCount,
issueSummary: segment.issueSummary,
keyFrames: segment.keyFrames,
clipLabel: segment.clipLabel,
})),
videoId: primaryArchivedVideo?.videoId,
videoUrl: primaryArchivedVideo?.url,
});
}, [flushSegment, liveScore, mobile, saveLiveSessionMutation, sessionMode, stopSessionRecorder]);
const startAnalysis = useCallback(async () => {
const latest = await refreshRuntimeState();
if (!cameraActive || !videoRef.current || !streamRef.current) {
toast.error("请先启动摄像头");
return;
}
if (analyzingRef.current || saving) return;
if (latest.role === "viewer") {
toast.error("当前设备处于同步观看模式,不能重复开启分析");
return;
}
try {
const title = `实时分析 ${ACTION_META[currentActionRef.current].label}`;
const runtime = await runtimeAcquireMutation.mutateAsync({
title,
sessionMode,
});
if (runtime.role === "viewer") {
runtimeIdRef.current = null;
toast.error("同一账号已有其他设备正在实时分析,本机已切换为同步观看模式");
await runtimeQuery.refetch();
return;
}
runtimeIdRef.current = runtime.runtimeSession?.id ?? null;
setViewerError("");
analyzingRef.current = true;
setAnalyzing(true);
setSaving(false);
setLeaveStatus("analyzing");
setSegments([]);
segmentsRef.current = [];
currentSegmentRef.current = null;
trackingRef.current = {};
actionHistoryRef.current = [];
stableActionStateRef.current = createStableActionState();
frameSamplesRef.current = [];
volatilitySamplesRef.current = [];
archivedVideosRef.current = [];
recorderSequenceRef.current = 0;
setArchivedVideoCount(0);
sessionStartedAtRef.current = Date.now();
setCurrentAction("unknown");
setRawAction("unknown");
setLiveScore(null);
setFeedback([]);
setStabilityMeta(createEmptyStabilizedActionMeta());
setDurationMs(0);
startSessionRecorder();
const mediaSessionId = await startBroadcastSession();
startRuntimeHeartbeatLoop(mediaSessionId);
const testFactory = (
window as typeof window & {
__TEST_MEDIAPIPE_FACTORY__?: () => Promise<{ Pose: any }>;
}
).__TEST_MEDIAPIPE_FACTORY__;
const { Pose } = testFactory ? await testFactory() : await import("@mediapipe/pose");
const pose = new Pose({
locateFile: (file: string) => `https://cdn.jsdelivr.net/npm/@mediapipe/pose/${file}`,
});
poseRef.current = pose;
pose.setOptions({
modelComplexity: 1,
smoothLandmarks: true,
enableSegmentation: false,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
pose.onResults((results: { poseLandmarks?: Point[] }) => {
const video = videoRef.current;
const canvas = canvasRef.current;
if (!video || !canvas) return;
if (video.videoWidth > 0 && video.videoHeight > 0) {
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
}
drawLiveCameraOverlay(canvas, results.poseLandmarks, avatarRenderRef.current);
renderCompositeFrame(results.poseLandmarks);
if (!results.poseLandmarks) return;
const frameTimestamp = performance.now();
const analyzed = analyzePoseFrame(results.poseLandmarks, trackingRef.current, frameTimestamp);
const nextStabilityMeta = stabilizeActionStream(
{
action: analyzed.action,
confidence: analyzed.confidence,
timestamp: frameTimestamp,
},
actionHistoryRef.current,
stableActionStateRef.current,
);
const elapsedMs = Date.now() - sessionStartedAtRef.current;
const stabilityLabel = nextStabilityMeta.pendingAction ?? nextStabilityMeta.windowAction;
const stabilityFeedback = nextStabilityMeta.pending && stabilityLabel !== "unknown"
? [`正在确认 ${ACTION_META[stabilityLabel].label},需要持续约 0.7 秒后再切换。`, ...analyzed.feedback]
: nextStabilityMeta.stableAction === "unknown"
? ["系统正在积累 24 帧动作窗口,当前先作为观察片段处理。", ...analyzed.feedback]
: analyzed.action !== nextStabilityMeta.stableAction
? [`原始候选为 ${ACTION_META[analyzed.action].label},当前保持 ${ACTION_META[nextStabilityMeta.stableAction].label}`, ...analyzed.feedback]
: analyzed.feedback;
const displayedScore: PoseScore = {
...analyzed.score,
confidence: Math.round(nextStabilityMeta.stableConfidence * 100),
};
const stabilizedFrame: AnalyzedFrame = {
...analyzed,
action: nextStabilityMeta.stableAction,
confidence: nextStabilityMeta.stableConfidence,
score: displayedScore,
feedback: stabilityFeedback.slice(0, 3),
};
appendFrameToSegment(stabilizedFrame, elapsedMs);
frameSamplesRef.current.push(displayedScore);
volatilitySamplesRef.current.push(nextStabilityMeta.rawVolatility);
setLiveScore(displayedScore);
setCurrentAction(nextStabilityMeta.stableAction);
setRawAction(analyzed.action);
setStabilityMeta(nextStabilityMeta);
setFeedback(stabilizedFrame.feedback);
setDurationMs(elapsedMs);
});
const processFrame = async () => {
if (!analyzingRef.current || !videoRef.current || !poseRef.current) return;
if (videoRef.current.readyState >= 2 || testFactory) {
await poseRef.current.send({ image: videoRef.current });
}
animationRef.current = requestAnimationFrame(processFrame);
};
toast.success("动作识别已启动");
processFrame();
} catch (error: any) {
analyzingRef.current = false;
setAnalyzing(false);
setLeaveStatus("idle");
await stopSessionRecorder();
closeBroadcastPeer();
await releaseRuntime("failed");
toast.error(`实时分析启动失败: ${error?.message || "未知错误"}`);
}
}, [
appendFrameToSegment,
cameraActive,
closeBroadcastPeer,
refreshRuntimeState,
releaseRuntime,
runtimeAcquireMutation,
runtimeQuery,
saving,
sessionMode,
startBroadcastSession,
startRuntimeHeartbeatLoop,
startSessionRecorder,
stopSessionRecorder,
]);
const stopAnalysis = useCallback(async () => {
if (!analyzingRef.current) return;
analyzingRef.current = false;
setAnalyzing(false);
setSaving(true);
setLeaveStatus("saving");
let releasePhase: RuntimeSnapshot["phase"] = "safe";
if (animationRef.current) {
cancelAnimationFrame(animationRef.current);
animationRef.current = 0;
}
try {
if (poseRef.current?.close) {
poseRef.current.close();
poseRef.current = null;
}
await persistSession();
setLeaveStatus("safe");
releasePhase = "safe";
toast.success(`实时分析已保存,并同步写入训练记录${archivedVideosRef.current.length > 0 ? `;已归档 ${archivedVideosRef.current.length} 段分析录像` : ""}`);
await liveSessionsQuery.refetch();
} catch (error: any) {
setLeaveStatus("failed");
releasePhase = "failed";
toast.error(`保存实时分析失败: ${error?.message || "未知错误"}`);
} finally {
closeBroadcastPeer();
await releaseRuntime(releasePhase);
setSaving(false);
}
}, [closeBroadcastPeer, liveSessionsQuery, persistSession, releaseRuntime]);
useEffect(() => {
if (!analyzing && !saving) {
return;
}
const handleBeforeUnload = (event: BeforeUnloadEvent) => {
event.preventDefault();
event.returnValue = "实时分析数据仍在处理中,请先等待保存完成。";
return event.returnValue;
};
window.addEventListener("beforeunload", handleBeforeUnload);
return () => window.removeEventListener("beforeunload", handleBeforeUnload);
}, [analyzing, saving]);
const handleSetupComplete = useCallback(async () => {
const latest = await refreshRuntimeState();
if (latest.role === "viewer") {
setShowSetupGuide(false);
toast.error("当前账号已有其他设备正在实时分析,请切换到同步观看模式");
return;
}
setShowSetupGuide(false);
await startCamera(facing, zoomTargetRef.current, qualityPreset);
}, [facing, qualityPreset, refreshRuntimeState, startCamera]);
const displayLeaveStatus = runtimeRole === "viewer" ? (runtimeSnapshot?.phase ?? "idle") : leaveStatus;
const displayAction = runtimeRole === "viewer" ? (runtimeSnapshot?.currentAction ?? "unknown") : currentAction;
const displayRawAction = runtimeRole === "viewer" ? (runtimeSnapshot?.rawAction ?? "unknown") : rawAction;
const displayScore = runtimeRole === "viewer" ? (runtimeSnapshot?.liveScore ?? null) : liveScore;
const displayFeedback = runtimeRole === "viewer" ? (runtimeSnapshot?.feedback ?? []) : feedback;
const displayDurationMs = runtimeRole === "viewer" ? (runtimeSnapshot?.durationMs ?? 0) : durationMs;
const displayStabilityMeta = runtimeRole === "viewer"
? {
...createEmptyStabilizedActionMeta(),
...runtimeSnapshot?.stabilityMeta,
}
: stabilityMeta;
const displaySessionMode = runtimeRole === "viewer"
? (runtimeSnapshot?.sessionMode ?? runtimeSession?.sessionMode ?? sessionMode)
: sessionMode;
const displayQualityPreset = runtimeRole === "viewer"
? (runtimeSnapshot?.qualityPreset ?? qualityPreset)
: qualityPreset;
const displayFacing = runtimeRole === "viewer"
? (runtimeSnapshot?.facingMode ?? facing)
: facing;
const displayDeviceKind = runtimeRole === "viewer"
? (runtimeSnapshot?.deviceKind ?? (mobile ? "mobile" : "desktop"))
: (mobile ? "mobile" : "desktop");
const displayAvatarEnabled = runtimeRole === "viewer"
? Boolean(runtimeSnapshot?.avatarEnabled)
: avatarEnabled;
const displayAvatarKey = runtimeRole === "viewer"
? ((runtimeSnapshot?.avatarKey as AvatarKey | undefined) ?? resolvedAvatarKey)
: resolvedAvatarKey;
const displayAvatarPreset = getAvatarPreset(displayAvatarKey);
const displayAvatarLabel = runtimeRole === "viewer"
? (runtimeSnapshot?.avatarLabel ?? displayAvatarPreset?.label ?? "猩猩")
: (displayAvatarPreset?.label || "猩猩");
const runtimeSyncDelayMs = runtimeRole === "viewer" ? getRuntimeSyncDelayMs(runtimeSession?.lastHeartbeatAt) : null;
const runtimeSyncLabel = runtimeRole === "viewer" ? formatRuntimeSyncDelay(runtimeSyncDelayMs) : "";
const displayRuntimeTitle = runtimeRole === "viewer"
? (normalizedSnapshotTitle || normalizedRuntimeTitle || "其他设备实时分析")
: (normalizedRuntimeTitle || `实时分析 ${ACTION_META[currentAction].label}`);
const viewerFrameSrc = runtimeRole === "viewer" && runtimeSession?.mediaSessionId
? getMediaAssetUrl(`/assets/sessions/${runtimeSession.mediaSessionId}/live-frame.jpg?ts=${viewerFrameVersion || runtimeSnapshot?.updatedAt || Date.now()}`)
: "";
const hasVideoFeed = cameraActive || viewerConnected;
const heroAction = ACTION_META[displayAction];
const rawActionMeta = ACTION_META[displayRawAction];
const pendingActionMeta = displayStabilityMeta.pendingAction ? ACTION_META[displayStabilityMeta.pendingAction] : null;
const resolvedAvatarPreset = getAvatarPreset(resolvedAvatarKey);
const resolvedAvatarLabel = resolvedAvatarPreset?.label || "猩猩";
const animalAvatarPresets = AVATAR_PRESETS.filter((preset) => preset.category === "animal");
const fullBodyAvatarPresets = AVATAR_PRESETS.filter((preset) => preset.category === "full-body-3d");
const previewTitle = runtimeRole === "viewer"
? viewerConnected
? `${runtimeSyncLabel} · 服务端同步中`
: "正在获取服务端同步画面"
: analyzing
? displayStabilityMeta.pending && pendingActionMeta
? `${pendingActionMeta.label} 切换确认中`
: `${heroAction.label} 识别中`
: cameraActive
? "准备开始实时分析"
: "摄像头待启动";
const viewerModeLabel = normalizedRuntimeTitle || "其他设备正在实时分析";
const renderPrimaryActions = (rail = false) => {
const buttonClass = rail
? "h-14 w-14 rounded-2xl border border-white/10 bg-white/10 text-white hover:bg-white/20"
: "h-11 rounded-2xl px-4";
if (runtimeRole === "viewer") {
return (
<>
<Button
variant={rail ? "secondary" : "default"}
className={buttonClass}
onClick={() => {
if (runtimeSession?.mediaSessionId) {
void startViewerStream(runtimeSession.mediaSessionId).catch((error: any) => {
toast.error(`同步观看连接失败: ${error?.message || "未知错误"}`);
});
}
}}
disabled={!runtimeSession?.mediaSessionId}
>
<Monitor className={rail ? "h-5 w-5" : "mr-2 h-4 w-4"} />
{!rail && (viewerConnected ? "重新同步" : "同步观看")}
</Button>
{!rail ? (
<Button variant="outline" className={buttonClass} disabled>
<CameraOff className="mr-2 h-4 w-4" />
</Button>
) : null}
</>
);
}
if (!cameraActive) {
return (
<Button
data-testid={rail ? undefined : "live-camera-toolbar-start-button"}
className={buttonClass}
onClick={() => void openSetupGuide()}
>
<Camera className={rail ? "h-5 w-5" : "mr-2 h-4 w-4"} />
{!rail && "启动摄像头"}
</Button>
);
}
return (
<>
{hasMultipleCameras ? (
<Button variant={rail ? "secondary" : "outline"} className={buttonClass} onClick={() => void switchCamera()}>
<FlipHorizontal className={rail ? "h-5 w-5" : "mr-2 h-4 w-4"} />
{!rail && "切换镜头"}
</Button>
) : null}
{!analyzing ? (
<Button
data-testid="live-camera-analyze-button"
className={buttonClass}
onClick={() => void startAnalysis()}
disabled={saving}
>
<Zap className={rail ? "h-5 w-5" : "mr-2 h-4 w-4"} />
{!rail && "开始分析"}
</Button>
) : (
<Button variant="destructive" className={buttonClass} onClick={() => void stopAnalysis()} disabled={saving}>
<Activity className={rail ? "h-5 w-5" : "mr-2 h-4 w-4"} />
{!rail && "结束分析"}
</Button>
)}
{!rail ? (
<Button variant="outline" className={buttonClass} onClick={() => setShowSetupGuide(true)} disabled={analyzing || saving}>
<RotateCcw className="mr-2 h-4 w-4" />
</Button>
) : null}
</>
);
};
const renderZoomOverlay = () => (
<div className="absolute right-3 bottom-3 z-20 flex items-center gap-2 rounded-2xl border border-white/10 bg-black/65 px-2 py-2 text-white shadow-lg">
<Button
type="button"
size="icon"
variant="secondary"
onClick={() => stepZoom(-1)}
disabled={!zoomState.supported}
className="h-10 w-10 rounded-xl border border-white/10 bg-white/10 text-white hover:bg-white/20 disabled:opacity-40"
>
<Minus className="h-4 w-4" />
</Button>
<div className="min-w-[78px] text-center">
<div className="text-[10px] uppercase tracking-[0.16em] text-white/50"></div>
<div className="mt-1 text-sm font-semibold">{zoomState.supported ? `${zoomState.current.toFixed(1)}x` : "自动"}</div>
</div>
<Button
type="button"
size="icon"
variant="secondary"
onClick={() => stepZoom(1)}
disabled={!zoomState.supported}
className="h-10 w-10 rounded-xl border border-white/10 bg-white/10 text-white hover:bg-white/20 disabled:opacity-40"
>
<Plus className="h-4 w-4" />
</Button>
</div>
);
const renderAvatarShowcaseCard = (preset: AvatarPreset) => {
const active = resolvedAvatarKey === preset.key;
return (
<div
key={preset.key}
className={`overflow-hidden rounded-[22px] border transition ${
active
? "border-primary/50 bg-primary/5 shadow-lg shadow-primary/10"
: "border-border/60 bg-background hover:border-primary/30 hover:bg-muted/40"
}`}
>
<button
type="button"
onClick={() => {
setAvatarKey(preset.key);
setAvatarEnabled(true);
}}
className="group block w-full text-left"
>
<div className="relative aspect-[4/5] overflow-hidden bg-[radial-gradient(circle_at_top,_rgba(255,255,255,0.9),_rgba(226,232,240,0.18)_38%,_rgba(15,23,42,0.92))]">
<img
src={`/avatars/opensource3d/${
preset.key === "beachKing"
? "beach-king"
: preset.key === "sportTv"
? "sport-tv"
: preset.key === "juanita3d"
? "juanita"
: "jenny"
}.webp`}
alt={preset.label}
className="h-full w-full object-contain p-3 transition duration-300 group-hover:scale-[1.03]"
loading="lazy"
/>
<div className="pointer-events-none absolute inset-x-0 bottom-0 h-20 bg-gradient-to-t from-slate-950 via-slate-950/55 to-transparent" />
<div className="absolute left-3 top-3">
<Badge className="border-white/10 bg-black/60 text-white hover:bg-black/60">3D </Badge>
</div>
{active ? (
<div className="absolute right-3 top-3">
<Badge className="border-primary/20 bg-primary text-primary-foreground hover:bg-primary">使</Badge>
</div>
) : null}
</div>
<div className="space-y-3 p-4">
<div className="flex items-start justify-between gap-3">
<div>
<div className="text-sm font-semibold">{preset.label}</div>
<div className="mt-1 text-xs text-muted-foreground">{preset.collection} · {preset.license}</div>
</div>
</div>
<p className="text-xs leading-5 text-muted-foreground">{preset.description}</p>
</div>
</button>
<div className="flex items-center justify-between gap-3 border-t border-border/60 px-4 py-3">
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground">VRM </div>
{preset.modelUrl ? (
<a
href={preset.modelUrl}
target="_blank"
rel="noreferrer"
className="inline-flex items-center gap-1 text-xs font-medium text-primary"
>
<ExternalLink className="h-3.5 w-3.5" />
</a>
) : null}
</div>
</div>
);
};
return (
<div className="space-y-4 mobile-safe-bottom">
<Dialog open={showSetupGuide} onOpenChange={setShowSetupGuide}>
<DialogContent className="max-w-md">
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
<Camera className="h-5 w-5 text-primary" />
</DialogTitle>
<DialogDescription>
</DialogDescription>
</DialogHeader>
<div className="space-y-3 py-2">
{SETUP_STEPS.map((step, index) => (
<div
key={step.title}
className={`flex gap-3 rounded-2xl border px-4 py-3 ${
index === setupStep ? "border-primary/40 bg-primary/5" : index < setupStep ? "border-emerald-200 bg-emerald-50" : "border-border/60 bg-muted/30"
}`}
>
<div className={`flex h-10 w-10 shrink-0 items-center justify-center rounded-full ${
index < setupStep ? "bg-emerald-100 text-emerald-700" : index === setupStep ? "bg-primary/10 text-primary" : "bg-muted text-muted-foreground"
}`}>
{index < setupStep ? <CheckCircle2 className="h-5 w-5" /> : step.icon}
</div>
<div>
<div className="text-sm font-medium">{step.title}</div>
<div className="mt-1 text-xs leading-5 text-muted-foreground">{step.desc}</div>
</div>
</div>
))}
</div>
<DialogFooter className="flex gap-2">
{setupStep > 0 ? (
<Button variant="outline" onClick={() => setSetupStep((value) => value - 1)}></Button>
) : null}
{setupStep < SETUP_STEPS.length - 1 ? (
<Button onClick={() => setSetupStep((value) => value + 1)}></Button>
) : (
<Button onClick={() => void handleSetupComplete()}>
</Button>
)}
</DialogFooter>
</DialogContent>
</Dialog>
{displayLeaveStatus === "analyzing" ? (
<Alert>
<Activity className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
{runtimeRole === "viewer"
? "持有端仍在采集和识别动作数据,本页会按会话心跳持续同步视频与动作信息。"
: "当前仍在采集和识别动作数据,请先不要关闭浏览器或切走页面。"}
</AlertDescription>
</Alert>
) : null}
{displayLeaveStatus === "saving" ? (
<Alert>
<Activity className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
{runtimeRole === "viewer"
? "持有端正在提交录像、动作区间和训练记录;本页会同步保存状态,可以稍后再刷新查看。"
: "实时分析录像、动作区间和训练记录正在提交,请暂时停留当前页面;保存完成后会提示你可以离开。"}
</AlertDescription>
</Alert>
) : null}
{displayLeaveStatus === "safe" ? (
<Alert>
<CheckCircle2 className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
{runtimeRole === "viewer"
? "持有端分析数据已经提交完成;本页显示的是同步结果,你现在可以离开,不会影响已保存的数据。"
: "当前分析数据已经提交完成。现在可以关闭浏览器、返回上一页,或切换到其他页面,不会影响已保存的数据。"}
</AlertDescription>
</Alert>
) : null}
{displayLeaveStatus === "failed" ? (
<Alert>
<Activity className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
{runtimeRole === "viewer"
? "持有端当前会话还没有完整写入,本页会继续显示最后一次同步状态。"
: "当前会话还没有完整写入,请先留在本页并重新尝试结束分析或检查网络状态。"}
</AlertDescription>
</Alert>
) : null}
{runtimeRole === "viewer" ? (
<Alert>
<Monitor className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
{viewerModeLabel} media 1
</AlertDescription>
</Alert>
) : null}
{viewerError ? (
<Alert>
<Activity className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>{viewerError}</AlertDescription>
</Alert>
) : null}
<section className="rounded-[28px] border border-border/60 bg-[radial-gradient(circle_at_top_left,_rgba(249,115,22,0.16),_transparent_32%),linear-gradient(135deg,rgba(12,18,24,0.98),rgba(26,31,43,0.96))] p-5 text-white shadow-xl shadow-black/10 md:p-7">
<div className="flex flex-col gap-4 lg:flex-row lg:items-end lg:justify-between">
<div className="space-y-3">
<div className="flex flex-wrap items-center gap-2">
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<Sparkles className="h-3.5 w-3.5" />
24
</Badge>
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<Video className="h-3.5 w-3.5" />
+
</Badge>
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<PlayCircle className="h-3.5 w-3.5" />
60
</Badge>
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<Camera className="h-3.5 w-3.5" />
{displayAvatarEnabled ? `虚拟形象 ${displayAvatarLabel}` : "骨架叠加"}
</Badge>
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<PlayCircle className="h-3.5 w-3.5" />
{displaySessionMode === "practice" ? "练习会话" : "训练 PK"}
</Badge>
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10">
<Video className="h-3.5 w-3.5" />
{CAMERA_QUALITY_PRESETS[displayQualityPreset].label}
</Badge>
{runtimeRole === "viewer" ? (
<Badge className="gap-1.5 border-white/10 bg-white/10 text-white hover:bg-white/10" data-testid="live-camera-viewer-delay-badge">
<Monitor className="h-3.5 w-3.5" />
{runtimeSyncLabel}
</Badge>
) : null}
</div>
<div>
<h1 className="text-3xl font-semibold tracking-tight">{displayRuntimeTitle}</h1>
<p className="mt-2 max-w-2xl text-sm leading-6 text-white/70">
{runtimeRole === "viewer"
? `当前正在通过服务端中转同步 ${displayDeviceKind === "mobile" ? "移动端" : "桌面端"} ${displayFacing === "environment" ? "后置/主摄视角" : "前置视角"} 画面。同步画面、动作、评分、最近区间、虚拟形象和会话状态会自动跟随持有端刷新,允许少量网络延迟。`
: "摄像头启动后会持续识别正手、反手、发球、截击、高压、切削、挑高球与未知动作。系统会用 24 帧时间窗口统一动作,再把稳定动作写入片段、训练记录与评分;分析过程中会自动录制“视频画面 + 骨架/关键点叠层”的合成回放,并按 60 秒分段归档进视频库。开启虚拟形象后,画面中的人体可切换为 10 个轻量动物替身,或 4 个免费的全身 3D Avatar 示例覆盖显示。"}
</p>
</div>
</div>
<div className="grid grid-cols-2 gap-2 rounded-2xl border border-white/10 bg-white/5 p-2 text-center text-xs text-white/75 sm:w-[420px]">
<div className="rounded-xl bg-black/15 px-3 py-3">
<div className="text-[11px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-2 text-sm font-semibold text-white">{heroAction.label}</div>
</div>
<div className="rounded-xl bg-black/15 px-3 py-3">
<div className="text-[11px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-2 text-sm font-semibold text-white">{rawActionMeta.label}</div>
</div>
<div className="rounded-xl bg-black/15 px-3 py-3">
<div className="text-[11px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-2 text-lg font-semibold text-white">{formatDuration(displayDurationMs)}</div>
</div>
<div className="rounded-xl bg-black/15 px-3 py-3">
<div className="text-[11px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-2 text-lg font-semibold text-white">{displayStabilityMeta.windowFrames}/{ACTION_WINDOW_FRAMES}</div>
</div>
</div>
</div>
</section>
<div className="grid gap-4 xl:grid-cols-[minmax(0,1.65fr)_minmax(360px,0.95fr)]">
<section className="space-y-4">
<Card className="overflow-hidden border-0 shadow-lg">
<CardContent className="p-0">
<div className="relative aspect-[16/10] overflow-hidden bg-black sm:aspect-video">
<video
ref={videoRef}
className={`absolute inset-0 h-full w-full object-contain ${immersivePreview || runtimeRole === "viewer" ? "opacity-0" : ""}`}
playsInline
muted
autoPlay
/>
{runtimeRole === "viewer" && viewerFrameSrc ? (
<img
key={viewerFrameSrc}
src={viewerFrameSrc}
alt="同步中的实时分析画面"
className="absolute inset-0 h-full w-full object-contain"
onLoad={() => {
setViewerConnected(true);
setViewerError("");
}}
onError={() => {
setViewerConnected(false);
setViewerError("持有端正在上传同步画面,正在自动重试...");
}}
/>
) : null}
<canvas
ref={canvasRef}
className={`pointer-events-none absolute inset-0 h-full w-full object-contain ${runtimeRole === "viewer" ? "hidden" : analyzing ? "" : "opacity-70"}`}
/>
{!hasVideoFeed ? (
<div className="absolute inset-0 flex flex-col items-center justify-center gap-4 bg-[radial-gradient(circle_at_center,_rgba(249,115,22,0.12),_rgba(0,0,0,0.78))] px-6 text-center text-white/75">
<CameraOff className="h-14 w-14" />
<div className="space-y-1">
<div className="text-xl font-medium">{runtimeRole === "viewer" ? "等待同步画面" : "摄像头未启动"}</div>
<div className="text-sm text-white/60">
{runtimeRole === "viewer" ? `${viewerModeLabel},当前设备只能观看同步内容。` : "先完成拍摄校准,再开启自动动作识别。"}
</div>
</div>
{runtimeRole === "viewer" ? (
<Button
data-testid="live-camera-viewer-button"
onClick={() => {
if (runtimeSession?.mediaSessionId) {
void startViewerStream(runtimeSession.mediaSessionId).catch((error: any) => {
toast.error(`同步观看连接失败: ${error?.message || "未知错误"}`);
});
}
}}
className="rounded-2xl"
disabled={!runtimeSession?.mediaSessionId}
>
<Monitor className="mr-2 h-4 w-4" />
{viewerConnected ? "刷新同步" : "获取同步画面"}
</Button>
) : (
<Button data-testid="live-camera-start-button" onClick={() => void openSetupGuide()} className="rounded-2xl">
<Camera className="mr-2 h-4 w-4" />
</Button>
)}
</div>
) : null}
<div className="pointer-events-none absolute left-3 top-3 flex flex-wrap gap-2">
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Activity className="h-3.5 w-3.5" />
{previewTitle}
</Badge>
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Target className="h-3.5 w-3.5" />
{displayVisibleSegments.length}
</Badge>
{displayAvatarEnabled ? (
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Sparkles className="h-3.5 w-3.5" />
{displayAvatarLabel}
</Badge>
) : null}
</div>
{mobile ? (
<Button
type="button"
size="icon"
variant="secondary"
onClick={() => setImmersivePreview(true)}
className="absolute right-3 top-3 z-20 h-11 w-11 rounded-full border border-white/10 bg-black/60 text-white shadow-lg hover:bg-black/75"
>
<Maximize2 className="h-4 w-4" />
</Button>
) : null}
{cameraActive && zoomState.supported && runtimeRole !== "viewer" ? renderZoomOverlay() : null}
{(hasVideoFeed || saving) ? (
<div className="absolute bottom-3 left-3 right-20 rounded-[24px] border border-white/10 bg-black/65 px-3 py-3 text-white shadow-lg backdrop-blur-sm sm:right-[112px]">
<div className="grid gap-2 sm:grid-cols-2">
<div>
<div className="text-[10px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1 text-sm font-semibold">{heroAction.label}</div>
<div className="mt-1 text-xs text-white/60"> {rawActionMeta.label}</div>
</div>
<div>
<div className="text-[10px] uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1 text-sm font-semibold">
{displayStabilityMeta.windowFrames}/{ACTION_WINDOW_FRAMES} · {Math.round(displayStabilityMeta.windowShare * 100)}%
</div>
<div className="mt-1 text-xs text-white/60">
{saving
? "正在保存会话..."
: displayStabilityMeta.pending && pendingActionMeta
? `切换确认中 · ${pendingActionMeta.label} · ${Math.max(0, displayStabilityMeta.candidateMs / 1000).toFixed(1)}s`
: `已稳定 ${Math.max(0, displayStabilityMeta.stableMs / 1000).toFixed(1)}s · 波动 ${Math.round(displayStabilityMeta.rawVolatility * 100)}%`}
</div>
</div>
</div>
</div>
) : null}
</div>
<div className="border-t border-border/60 bg-card/80 p-4">
<div className="grid gap-3 md:grid-cols-[180px_minmax(0,1fr)]">
<Select
value={displaySessionMode}
onValueChange={(value) => setSessionMode(value as SessionMode)}
disabled={analyzing || saving || runtimeRole === "viewer"}
>
<SelectTrigger className="h-12 rounded-2xl border-border/60">
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="practice"></SelectItem>
<SelectItem value="pk"> PK</SelectItem>
</SelectContent>
</Select>
<div className="flex flex-wrap gap-2">
{renderPrimaryActions()}
</div>
</div>
<div className="mt-4 grid gap-3 rounded-[24px] border border-border/60 bg-muted/15 p-4 md:grid-cols-3">
{runtimeRole === "viewer" ? (
<div className="rounded-2xl border border-border/60 bg-background/90 p-4 md:col-span-3" data-testid="live-camera-viewer-sync-card">
<div className="flex flex-wrap items-start justify-between gap-3">
<div>
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold">{displayRuntimeTitle}</div>
<div className="mt-2 grid gap-2 text-xs text-muted-foreground sm:grid-cols-2">
<div>{displayDeviceKind === "mobile" ? "移动端" : "桌面端"}</div>
<div>{displayFacing === "environment" ? "后置 / 主摄" : "前置"}</div>
<div>{CAMERA_QUALITY_PRESETS[displayQualityPreset].label}</div>
<div>{displayAvatarEnabled ? displayAvatarLabel : "未开启"}</div>
</div>
</div>
<div className="min-w-[150px] rounded-2xl border border-border/60 bg-muted/20 px-4 py-3 text-sm">
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 font-semibold">{runtimeSyncLabel}</div>
<div className="mt-1 text-xs text-muted-foreground">
{runtimeSession?.lastHeartbeatAt ? formatDateTimeShanghai(runtimeSession.lastHeartbeatAt) : "等待首个心跳"}
</div>
</div>
</div>
</div>
) : null}
<div className="rounded-2xl border border-border/60 bg-background/90 p-4">
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold"> 60 </div>
<div className="mt-2 text-xs leading-5 text-muted-foreground">
线
</div>
</div>
<div className="rounded-2xl border border-border/60 bg-background/90 p-4">
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold">{runtimeRole === "viewer" ? (runtimeSnapshot?.archivedVideoCount ?? 0) : archivedVideoCount}</div>
<div className="mt-2 text-xs leading-5 text-muted-foreground">
</div>
</div>
<div className="rounded-2xl border border-border/60 bg-background/90 p-4">
<div className="text-[11px] uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold"></div>
<div className="mt-2 text-xs leading-5 text-muted-foreground">
使
</div>
</div>
</div>
<div className="mt-4 grid gap-3 rounded-[24px] border border-border/60 bg-muted/20 p-4 lg:grid-cols-[minmax(0,1.1fr)_180px_220px]">
<div className="space-y-3">
<div className="flex items-center justify-between gap-3">
<div>
<div className="text-sm font-medium"></div>
<div className="mt-1 text-xs text-muted-foreground">
使 10 4 3D Avatar
</div>
</div>
<Switch
checked={avatarEnabled}
onCheckedChange={setAvatarEnabled}
disabled={runtimeRole === "viewer" || (!cameraActive && !analyzing)}
data-testid="live-camera-avatar-switch"
/>
</div>
<div className="text-xs text-muted-foreground">
{resolvedAvatarLabel}
{avatarPrompt.trim() ? ` · 输入 ${avatarPrompt.trim()}` : " · 可输入猩猩、狐狸、熊猫、兔子,或 BeachKing、Juanita 等别名自动映射"}
</div>
</div>
<div>
<div className="mb-2 text-xs uppercase tracking-[0.18em] text-muted-foreground"></div>
<Select value={avatarKey} onValueChange={(value) => setAvatarKey(value as AvatarKey)} disabled={runtimeRole === "viewer"}>
<SelectTrigger className="h-12 rounded-2xl border-border/60">
<SelectValue />
</SelectTrigger>
<SelectContent>
{animalAvatarPresets.map((preset) => (
<SelectItem key={preset.key} value={preset.key}>{preset.label}</SelectItem>
))}
{fullBodyAvatarPresets.map((preset) => (
<SelectItem key={preset.key} value={preset.key}>{preset.label} · 3D</SelectItem>
))}
</SelectContent>
</Select>
</div>
<div>
<div className="mb-2 text-xs uppercase tracking-[0.18em] text-muted-foreground"></div>
<Input
value={avatarPrompt}
onChange={(event) => setAvatarPrompt(event.target.value)}
placeholder="例如 狐狸 / panda coach / BeachKing / Juanita"
className="h-12 rounded-2xl border-border/60"
disabled={runtimeRole === "viewer"}
/>
</div>
</div>
<div className="mt-4 rounded-[24px] border border-border/60 bg-background/80 p-4">
<div className="flex flex-wrap items-center justify-between gap-3">
<div>
<div className="text-sm font-medium"> 3D </div>
<div className="mt-1 text-xs text-muted-foreground">
4 Open Source Avatars CC0 VRM/three-vrm沿
</div>
</div>
<Badge variant="secondary" className="rounded-full px-3 py-1 text-xs">CC0 · Open Source Avatars</Badge>
</div>
<div className="mt-4 grid gap-3 sm:grid-cols-2 xl:grid-cols-4">
{fullBodyAvatarPresets.map(renderAvatarShowcaseCard)}
</div>
</div>
</div>
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
<CardDescription>使 24 </CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="grid gap-3 lg:grid-cols-3">
{Object.entries(CAMERA_QUALITY_PRESETS).map(([key, preset]) => {
const active = qualityPreset === key;
const disabled = analyzing || saving || runtimeRole === "viewer";
return (
<button
key={key}
type="button"
onClick={() => void handleQualityPresetChange(key as CameraQualityPreset)}
disabled={disabled}
className={`rounded-2xl border px-4 py-4 text-left transition ${
active
? "border-primary/60 bg-primary/5 shadow-sm"
: "border-border/60 hover:border-primary/30 hover:bg-muted/50"
} ${disabled ? "cursor-not-allowed opacity-60" : ""}`}
>
<div className="text-sm font-semibold">{preset.label}</div>
<div className="mt-1 text-xs text-muted-foreground">{preset.subtitle}</div>
<p className="mt-3 text-sm leading-6 text-muted-foreground">{preset.description}</p>
</button>
);
})}
</div>
<div className="grid gap-3 md:grid-cols-2">
<div className="rounded-2xl border border-border/60 bg-muted/25 p-4">
<div className="text-sm font-medium"></div>
<div className="mt-2 text-sm text-muted-foreground">
{CAMERA_QUALITY_PRESETS[qualityPreset].subtitle} ·
</div>
</div>
<div className="rounded-2xl border border-border/60 bg-muted/25 p-4">
<div className="text-sm font-medium"> / </div>
<div className="mt-2 text-sm text-muted-foreground">
{zoomState.supported
? `当前 ${zoomState.current.toFixed(1)}x,可在分析过程中直接微调取景;焦点模式为 ${zoomState.focusMode}`
: "当前设备或浏览器未开放镜头缩放能力,仍会保持自动对焦。Chrome 安卓和部分后置摄像头通常支持此能力。"}
</div>
</div>
</div>
{zoomState.supported ? (
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<Slider
value={[zoomState.current]}
min={zoomState.min}
max={zoomState.max}
step={zoomState.step}
onValueChange={(value) => {
if (typeof value[0] === "number") {
void updateZoom(value[0]);
}
}}
/>
<div className="mt-3 flex items-center justify-between text-xs text-muted-foreground">
<span>{zoomState.min.toFixed(1)}x</span>
<span> 1.0x-1.5x </span>
<span>{zoomState.max.toFixed(1)}x</span>
</div>
</div>
) : null}
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
<CardDescription>
10 便
</CardDescription>
</CardHeader>
<CardContent className="space-y-3">
{actionStats.length > 0 ? (
<div className="flex flex-wrap gap-2">
<Button
variant={segmentFilter === "all" ? "default" : "outline"}
size="sm"
onClick={() => setSegmentFilter("all")}
>
</Button>
{actionStats.map((item) => (
<Button
key={item.actionType}
variant={segmentFilter === item.actionType ? "default" : "outline"}
size="sm"
onClick={() => setSegmentFilter(item.actionType)}
>
{ACTION_META[item.actionType].label} · {item.count}
</Button>
))}
</div>
) : null}
{displayFilteredSegments.length === 0 ? (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
{runtimeRole === "viewer" ? "当前会同步最近识别到的动作片段,持有端开始分析后会自动刷新。" : "开始分析后,这里会按时间区间显示识别出的动作片段。"}
</div>
) : (
displayFilteredSegments.map((segment) => {
const meta = ACTION_META[segment.actionType];
return (
<div key={`${segment.actionType}-${segment.startMs}`} className="rounded-2xl border border-border/60 bg-muted/25 p-4">
<div className="flex flex-col gap-3 sm:flex-row sm:items-start sm:justify-between">
<div className="space-y-2">
<div className="flex flex-wrap items-center gap-2">
<Badge className={meta.tone}>{meta.label}</Badge>
<Badge variant="outline">{formatDuration(segment.startMs)} - {formatDuration(segment.endMs)}</Badge>
<Badge variant="outline"> {formatDuration(segment.durationMs)}</Badge>
<Badge variant="outline"> {segment.keyFrames.length}</Badge>
</div>
<div className="text-sm text-muted-foreground">{segment.issueSummary.join(" · ") || "当前片段节奏稳定"}</div>
</div>
<div className="min-w-[120px] text-sm">
<div className="flex items-center justify-between">
<span className="text-muted-foreground"></span>
<span className="font-semibold">{Math.round(segment.score)}</span>
</div>
<div className="mt-2 flex items-center justify-between">
<span className="text-muted-foreground"></span>
<span className="font-semibold">{Math.round(segment.confidenceAvg * 100)}%</span>
</div>
</div>
</div>
</div>
);
})
)}
</CardContent>
</Card>
</section>
<aside className="space-y-4">
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
</CardHeader>
<CardContent className="space-y-4">
{displayScore ? (
<>
<div className="rounded-3xl border border-border/60 bg-muted/20 p-5 text-center">
<div className="text-xs uppercase tracking-[0.18em] text-muted-foreground"></div>
<div data-testid="live-camera-score-overall" className="mt-3 text-5xl font-semibold tracking-tight">
{displayScore.overall}
</div>
<div className="mt-3 flex items-center justify-center gap-2">
<Badge className={heroAction.tone}>{heroAction.label}</Badge>
<Badge variant="outline"> {displayScore.confidence}%</Badge>
<Badge className={sessionBand.tone}>{sessionBand.label}</Badge>
</div>
</div>
<div className="space-y-3">
<ScoreBar label="姿态" value={displayScore.posture} accent="bg-emerald-500" />
<ScoreBar label="平衡" value={displayScore.balance} accent="bg-sky-500" />
<ScoreBar label="技术" value={displayScore.technique} accent="bg-amber-500" />
<ScoreBar label="脚步" value={displayScore.footwork} accent="bg-indigo-500" />
<ScoreBar label="连贯性" value={displayScore.consistency} accent="bg-rose-500" />
</div>
</>
) : (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
</div>
)}
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
<CardDescription></CardDescription>
</CardHeader>
<CardContent className="space-y-3">
{actionStats.length === 0 ? (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
</div>
) : (
actionStats.map((item) => (
<div key={item.actionType} className="space-y-2 rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-center justify-between gap-3">
<div className="flex items-center gap-2">
<Badge className={ACTION_META[item.actionType].tone}>{ACTION_META[item.actionType].label}</Badge>
<span className="text-xs text-muted-foreground">{item.count} </span>
</div>
<div className="text-xs text-muted-foreground">
{Math.round(item.averageScore)} · {Math.round(item.averageConfidence * 100)}%
</div>
</div>
<Progress value={item.sharePct} className="h-2" />
<div className="flex items-center justify-between text-xs text-muted-foreground">
<span> {formatDuration(item.durationMs)}</span>
<span> {item.sharePct}%</span>
</div>
</div>
))
)}
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
</CardHeader>
<CardContent className="space-y-2">
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-center justify-between text-sm">
<span></span>
<Badge className={heroAction.tone}>{heroAction.label}</Badge>
</div>
<div className="mt-3 grid grid-cols-2 gap-2 text-xs text-muted-foreground">
<div> {rawActionMeta.label}</div>
<div> {displayStabilityMeta.windowFrames}/{ACTION_WINDOW_FRAMES}</div>
<div> {Math.round(displayStabilityMeta.windowShare * 100)}%</div>
<div> {displayStabilityMeta.switchCount} </div>
</div>
<Progress value={displayStabilityMeta.windowProgress * 100} className="mt-3 h-2" />
<div className="mt-2 text-xs text-muted-foreground">
{displayStabilityMeta.pending && pendingActionMeta
? `当前正在确认 ${pendingActionMeta.label},确认后才会切段入库。`
: "当前区间只会按稳定动作聚合,短时抖动不会直接切换动作。"}
</div>
</div>
{displayFeedback.length > 0 ? displayFeedback.map((item) => (
<div key={item} className="rounded-2xl border border-border/60 bg-muted/25 px-4 py-3 text-sm">
{item}
</div>
)) : (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
</div>
)}
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-center justify-between text-sm">
<span></span>
<span className="font-medium">
{totalDisplaySegments > 0 ? `${Math.round(((runtimeRole === "viewer" ? (runtimeSnapshot?.unknownSegments ?? 0) : unknownSegments.length) / totalDisplaySegments) * 100)}%` : "0%"}
</span>
</div>
<Progress
value={totalDisplaySegments > 0 ? (((runtimeRole === "viewer" ? (runtimeSnapshot?.unknownSegments ?? 0) : unknownSegments.length) / totalDisplaySegments) * 100) : 0}
className="mt-3 h-2"
/>
</div>
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-center justify-between text-sm">
<span></span>
<span className="font-medium">{Math.round(knownRatio * 100)}%</span>
</div>
<Progress value={knownRatio * 100} className="mt-3 h-2" />
<div className="mt-3 grid grid-cols-2 gap-2 text-xs text-muted-foreground">
<div> {bestSegment ? `${Math.round(bestSegment.score)}` : "暂无"}</div>
<div> {actionStats[0] ? ACTION_META[actionStats[0].actionType].label : "未知"}</div>
</div>
</div>
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
</CardHeader>
<CardContent className="space-y-3">
{(liveSessionsQuery.data ?? []).length === 0 ? (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
</div>
) : (
(liveSessionsQuery.data ?? []).map((session: any) => (
<div key={session.id} className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-start justify-between gap-3">
<div>
<div className="font-medium">{session.title}</div>
<div className="mt-1 text-xs text-muted-foreground">
{formatDateTimeShanghai(session.createdAt)}
</div>
</div>
<Badge className={ACTION_META[(session.dominantAction as ActionType) || "unknown"].tone}>
{ACTION_META[(session.dominantAction as ActionType) || "unknown"].label}
</Badge>
</div>
<div className="mt-3 grid grid-cols-3 gap-2 text-xs text-muted-foreground">
<div> {Math.round(session.overallScore || 0)}</div>
<div> {session.effectiveSegments || 0}</div>
<div> {formatDuration(session.durationMs || 0)}</div>
</div>
{session.videoUrl ? (
<div className="mt-3">
<Button
variant="outline"
size="sm"
onClick={() => window.open(session.videoUrl, "_blank", "noopener,noreferrer")}
>
</Button>
</div>
) : null}
</div>
))
)}
</CardContent>
</Card>
</aside>
</div>
{mobile && immersivePreview ? (
<div className="fixed inset-0 z-[80] bg-black/95 px-3 py-4 mobile-safe-top mobile-safe-bottom mobile-safe-inline">
<div className="grid h-full grid-cols-[minmax(0,1fr)_72px] gap-3">
<div className="relative min-h-0 overflow-hidden rounded-[32px] border border-white/10 bg-black shadow-2xl shadow-black/40">
<video ref={videoRef} className="absolute inset-0 h-full w-full object-contain" playsInline muted autoPlay />
<canvas ref={canvasRef} className={`pointer-events-none absolute inset-0 h-full w-full object-contain ${runtimeRole === "viewer" ? "hidden" : ""}`} />
<div className="pointer-events-none absolute left-3 top-3 flex flex-wrap gap-2">
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Sparkles className="h-3.5 w-3.5" />
{heroAction.label}
</Badge>
{displayAvatarEnabled ? (
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Camera className="h-3.5 w-3.5" />
{displayAvatarLabel}
</Badge>
) : null}
<Badge className="gap-1.5 bg-black/60 text-white shadow-sm">
<Target className="h-3.5 w-3.5" />
</Badge>
</div>
<div className="absolute bottom-3 left-3 right-3 rounded-[24px] border border-white/10 bg-black/65 px-3 py-3 text-white shadow-lg backdrop-blur-sm">
<div className="grid grid-cols-2 gap-2 text-xs">
<div>
<div className="uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1 text-sm font-semibold">{heroAction.label}</div>
</div>
<div>
<div className="uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1 text-sm font-semibold">{rawActionMeta.label}</div>
</div>
<div>
<div className="uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1">{displayStabilityMeta.windowFrames}/{ACTION_WINDOW_FRAMES}</div>
</div>
<div>
<div className="uppercase tracking-[0.18em] text-white/45"></div>
<div className="mt-1">
{displayStabilityMeta.pending && pendingActionMeta ? `确认 ${pendingActionMeta.label}` : runtimeRole === "viewer" ? "同步观看中" : "稳定跟踪中"}
</div>
</div>
</div>
</div>
<Button
type="button"
size="icon"
variant="secondary"
onClick={() => setImmersivePreview(false)}
className="absolute right-3 top-3 z-20 h-11 w-11 rounded-full border border-white/10 bg-black/60 text-white shadow-lg hover:bg-black/75"
>
<Minimize2 className="h-4 w-4" />
</Button>
{cameraActive && zoomState.supported && runtimeRole !== "viewer" ? renderZoomOverlay() : null}
</div>
<div className="flex flex-col items-center justify-center gap-3">
{renderPrimaryActions(true)}
</div>
</div>
</div>
) : null}
</div>
);
}