Improve live analysis stability and video clip drafting

这个提交包含在:
cryptocommuniums-afk
2026-03-15 02:11:34 +08:00
父节点 edc66ea5bc
当前提交 815f96d4e8
修改 9 个文件,包含 570 行新增56 行删除

查看文件

@@ -16,11 +16,19 @@
实时分析页现在采用“识别 + 录制 + 落库”一体化流程: 实时分析页现在采用“识别 + 录制 + 落库”一体化流程:
- 浏览器端基于 MediaPipe Pose 自动识别 `forehand / backhand / serve / volley / overhead / slice / lob / unknown` - 浏览器端基于 MediaPipe Pose 自动识别 `forehand / backhand / serve / volley / overhead / slice / lob / unknown`
- 最近 6 帧动作结果会做时序加权稳定化,降低正手/反手/未知动作间的瞬时抖动
- 连续同类动作会自动合并为片段,最长单段不超过 10 秒 - 连续同类动作会自动合并为片段,最长单段不超过 10 秒
- 停止分析后会自动保存动作区间、评分维度、反馈摘要和可选本地录制视频 - 停止分析后会自动保存动作区间、评分维度、反馈摘要和可选本地录制视频
- 实时分析结果会自动回写训练记录、日训练聚合、成就进度与 NTRP 评分链路 - 实时分析结果会自动回写训练记录、日训练聚合、成就进度与 NTRP 评分链路
- 移动端支持竖屏最大化预览,主要操作按钮固定在侧边 - 移动端支持竖屏最大化预览,主要操作按钮固定在侧边
## Video Library And PC Editing
- 视频库支持直接打开 `PC 轻剪辑工作台`
- 轻剪辑支持播放器预览、手动入点/出点、从当前播放位置快速设点
- 分析关键时刻会自动生成建议片段;即使视频 metadata 尚未返回,也会按分析帧数估算时间轴
- 剪辑草稿保存在浏览器本地,可导出 JSON 供后续后台剪辑任务或人工复核使用
## Online Recording ## Online Recording
在线录制模块采用双链路设计: 在线录制模块采用双链路设计:
@@ -124,6 +132,12 @@ set -a && source .env && set +a && pnpm exec drizzle-kit migrate
- `docs/media-architecture.md` - `docs/media-architecture.md`
- `docs/frontend-recording.md` - `docs/frontend-recording.md`
2026-03-15 已在真实环境执行一次重建与 smoke test
- `docker compose up -d --build migrate app app-worker`
- Playwright 复测 `https://te.hao.work/login``/checkin``/videos``/recorder``/live-camera``/admin`
- 复测后关键链路全部通过,确认线上已切换到最新前端与业务版本
## Documentation Index ## Documentation Index
- `docs/FEATURES.md`: 当前功能特性与能力边界 - `docs/FEATURES.md`: 当前功能特性与能力边界

查看文件

@@ -76,6 +76,11 @@ type AnalyzedFrame = {
feedback: string[]; feedback: string[];
}; };
type ActionObservation = {
action: ActionType;
confidence: number;
};
const ACTION_META: Record<ActionType, { label: string; tone: string; accent: string }> = { const ACTION_META: Record<ActionType, { label: string; tone: string; accent: string }> = {
forehand: { label: "正手挥拍", tone: "bg-emerald-500/10 text-emerald-700", accent: "bg-emerald-500" }, forehand: { label: "正手挥拍", tone: "bg-emerald-500/10 text-emerald-700", accent: "bg-emerald-500" },
backhand: { label: "反手挥拍", tone: "bg-sky-500/10 text-sky-700", accent: "bg-sky-500" }, backhand: { label: "反手挥拍", tone: "bg-sky-500/10 text-sky-700", accent: "bg-sky-500" },
@@ -184,6 +189,55 @@ function createSegment(action: ActionType, elapsedMs: number, frame: AnalyzedFra
}; };
} }
function stabilizeAnalyzedFrame(frame: AnalyzedFrame, history: ActionObservation[]): AnalyzedFrame {
const nextHistory = [...history, { action: frame.action, confidence: frame.confidence }].slice(-6);
history.splice(0, history.length, ...nextHistory);
const weights = nextHistory.map((_, index) => index + 1);
const actionScores = nextHistory.reduce<Record<ActionType, number>>((acc, sample, index) => {
const weighted = sample.confidence * weights[index];
acc[sample.action] = (acc[sample.action] || 0) + weighted;
return acc;
}, {
forehand: 0,
backhand: 0,
serve: 0,
volley: 0,
overhead: 0,
slice: 0,
lob: 0,
unknown: 0,
});
const ranked = Object.entries(actionScores).sort((a, b) => b[1] - a[1]) as Array<[ActionType, number]>;
const [winner = "unknown", winnerScore = 0] = ranked[0] || [];
const [, runnerScore = 0] = ranked[1] || [];
const winnerSamples = nextHistory.filter((sample) => sample.action === winner);
const averageConfidence = winnerSamples.length > 0
? winnerSamples.reduce((sum, sample) => sum + sample.confidence, 0) / winnerSamples.length
: frame.confidence;
const stableAction =
winner === "unknown" && frame.action !== "unknown" && frame.confidence >= 0.52
? frame.action
: winnerScore - runnerScore < 0.2 && frame.confidence >= 0.65
? frame.action
: winner;
const stableConfidence = stableAction === frame.action
? Math.max(frame.confidence, averageConfidence)
: averageConfidence;
return {
...frame,
action: stableAction,
confidence: clamp(stableConfidence, 0, 1),
feedback: stableAction === "unknown"
? ["系统正在继续观察,当前窗口内未形成稳定动作特征。", ...frame.feedback].slice(0, 3)
: frame.feedback,
};
}
function analyzePoseFrame(landmarks: Point[], tracking: TrackingState, timestamp: number): AnalyzedFrame { function analyzePoseFrame(landmarks: Point[], tracking: TrackingState, timestamp: number): AnalyzedFrame {
const nose = landmarks[0]; const nose = landmarks[0];
const leftShoulder = landmarks[11]; const leftShoulder = landmarks[11];
@@ -428,6 +482,7 @@ export default function LiveCamera() {
const animationRef = useRef<number>(0); const animationRef = useRef<number>(0);
const sessionStartedAtRef = useRef<number>(0); const sessionStartedAtRef = useRef<number>(0);
const trackingRef = useRef<TrackingState>({}); const trackingRef = useRef<TrackingState>({});
const actionHistoryRef = useRef<ActionObservation[]>([]);
const currentSegmentRef = useRef<ActionSegment | null>(null); const currentSegmentRef = useRef<ActionSegment | null>(null);
const segmentsRef = useRef<ActionSegment[]>([]); const segmentsRef = useRef<ActionSegment[]>([]);
const frameSamplesRef = useRef<PoseScore[]>([]); const frameSamplesRef = useRef<PoseScore[]>([]);
@@ -746,6 +801,7 @@ export default function LiveCamera() {
segmentsRef.current = []; segmentsRef.current = [];
currentSegmentRef.current = null; currentSegmentRef.current = null;
trackingRef.current = {}; trackingRef.current = {};
actionHistoryRef.current = [];
frameSamplesRef.current = []; frameSamplesRef.current = [];
sessionStartedAtRef.current = Date.now(); sessionStartedAtRef.current = Date.now();
setDurationMs(0); setDurationMs(0);
@@ -785,7 +841,10 @@ export default function LiveCamera() {
drawOverlay(canvas, results.poseLandmarks); drawOverlay(canvas, results.poseLandmarks);
if (!results.poseLandmarks) return; if (!results.poseLandmarks) return;
const analyzed = analyzePoseFrame(results.poseLandmarks, trackingRef.current, performance.now()); const analyzed = stabilizeAnalyzedFrame(
analyzePoseFrame(results.poseLandmarks, trackingRef.current, performance.now()),
actionHistoryRef.current,
);
const elapsedMs = Date.now() - sessionStartedAtRef.current; const elapsedMs = Date.now() - sessionStartedAtRef.current;
appendFrameToSegment(analyzed, elapsedMs); appendFrameToSegment(analyzed, elapsedMs);
frameSamplesRef.current.push(analyzed.score); frameSamplesRef.current.push(analyzed.score);

查看文件

@@ -4,7 +4,7 @@ import { Card, CardContent, CardHeader, CardTitle, CardDescription } from "@/com
import { Badge } from "@/components/ui/badge"; import { Badge } from "@/components/ui/badge";
import { Skeleton } from "@/components/ui/skeleton"; import { Skeleton } from "@/components/ui/skeleton";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { Activity, Calendar, CheckCircle2, Clock, TrendingUp, Target } from "lucide-react"; import { Activity, Calendar, CheckCircle2, Clock, TrendingUp, Target, Sparkles } from "lucide-react";
import { import {
ResponsiveContainer, BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip,
LineChart, Line, Legend LineChart, Line, Legend
@@ -95,6 +95,14 @@ export default function Progress() {
<p className="text-2xl font-bold">{analyses?.length || 0}<span className="text-sm font-normal ml-1"></span></p> <p className="text-2xl font-bold">{analyses?.length || 0}<span className="text-sm font-normal ml-1"></span></p>
</CardContent> </CardContent>
</Card> </Card>
<Card className="border-0 shadow-sm">
<CardContent className="pt-4 pb-3">
<div className="flex items-center gap-2 text-xs text-muted-foreground mb-1">
<Sparkles className="h-3 w-3" />
</div>
<p className="text-2xl font-bold">{stats?.recentLiveSessions?.length || 0}<span className="text-sm font-normal ml-1"></span></p>
</CardContent>
</Card>
</div> </div>
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
@@ -183,6 +191,7 @@ export default function Progress() {
<p className="text-xs text-muted-foreground"> <p className="text-xs text-muted-foreground">
{new Date(record.trainingDate || record.createdAt).toLocaleDateString("zh-CN")} {new Date(record.trainingDate || record.createdAt).toLocaleDateString("zh-CN")}
{record.durationMinutes ? ` · ${record.durationMinutes}分钟` : ""} {record.durationMinutes ? ` · ${record.durationMinutes}分钟` : ""}
{record.sourceType ? ` · ${record.sourceType}` : ""}
</p> </p>
</div> </div>
</div> </div>

查看文件

@@ -1,12 +1,39 @@
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { useAuth } from "@/_core/hooks/useAuth"; import { useAuth } from "@/_core/hooks/useAuth";
import { trpc } from "@/lib/trpc"; import { trpc } from "@/lib/trpc";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Badge } from "@/components/ui/badge"; import { Badge } from "@/components/ui/badge";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card";
import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle } from "@/components/ui/dialog";
import { Skeleton } from "@/components/ui/skeleton"; import { Skeleton } from "@/components/ui/skeleton";
import { Video, Play, BarChart3, Clock, Zap, ChevronRight, FileVideo } from "lucide-react"; import { Slider } from "@/components/ui/slider";
import { Textarea } from "@/components/ui/textarea";
import { Input } from "@/components/ui/input";
import { toast } from "sonner";
import {
BarChart3,
Clock,
Download,
FileVideo,
Play,
PlayCircle,
Scissors,
Sparkles,
Trash2,
Video,
Zap,
} from "lucide-react";
import { useLocation } from "wouter"; import { useLocation } from "wouter";
type ClipDraft = {
id: string;
startSec: number;
endSec: number;
label: string;
notes: string;
source: "manual" | "suggested";
};
const statusMap: Record<string, { label: string; color: string }> = { const statusMap: Record<string, { label: string; color: string }> = {
pending: { label: "待分析", color: "bg-yellow-100 text-yellow-700" }, pending: { label: "待分析", color: "bg-yellow-100 text-yellow-700" },
analyzing: { label: "分析中", color: "bg-blue-100 text-blue-700" }, analyzing: { label: "分析中", color: "bg-blue-100 text-blue-700" },
@@ -22,48 +49,192 @@ const exerciseTypeMap: Record<string, string> = {
footwork: "脚步移动", footwork: "脚步移动",
shadow: "影子挥拍", shadow: "影子挥拍",
wall: "墙壁练习", wall: "墙壁练习",
recording: "录制归档",
live_analysis: "实时分析",
}; };
function formatSeconds(totalSeconds: number) {
const seconds = Math.max(0, Math.floor(totalSeconds));
const minutes = Math.floor(seconds / 60);
const rest = seconds % 60;
return `${minutes.toString().padStart(2, "0")}:${rest.toString().padStart(2, "0")}`;
}
function clamp(value: number, min: number, max: number) {
return Math.max(min, Math.min(max, value));
}
function localStorageKey(videoId: number) {
return `clip-plan:${videoId}`;
}
function resolveTimelineDurationSec(analysis: any, durationSec: number) {
if (durationSec > 0) return durationSec;
if (typeof analysis?.durationSec === "number" && analysis.durationSec > 0) return analysis.durationSec;
if (typeof analysis?.durationMs === "number" && analysis.durationMs > 0) return analysis.durationMs / 1000;
if (typeof analysis?.framesAnalyzed === "number" && analysis.framesAnalyzed > 0) {
return Math.max(5, Math.round((analysis.framesAnalyzed / 30) * 10) / 10);
}
return 0;
}
function buildSuggestedClips(analysis: any, durationSec: number) {
const timelineDurationSec = resolveTimelineDurationSec(analysis, durationSec);
if (!analysis?.keyMoments || !Array.isArray(analysis.keyMoments) || timelineDurationSec <= 0) {
return [] as ClipDraft[];
}
const framesAnalyzed = Math.max(analysis.framesAnalyzed || 0, 1);
return analysis.keyMoments.slice(0, 6).map((moment: any, index: number) => {
const centerSec = clamp(((moment.frame || 0) / framesAnalyzed) * timelineDurationSec, 0, timelineDurationSec);
const startSec = clamp(centerSec - 1.5, 0, Math.max(0, timelineDurationSec - 0.5));
const endSec = clamp(centerSec + 2.5, startSec + 0.5, timelineDurationSec);
return {
id: `suggested-${index}-${moment.frame || index}`,
startSec,
endSec,
label: moment.description || `建议片段 ${index + 1}`,
notes: moment.type ? `来源于分析事件:${moment.type}` : "来源于分析关键时刻",
source: "suggested" as const,
};
});
}
function downloadJson(filename: string, data: unknown) {
const blob = new Blob([JSON.stringify(data, null, 2)], { type: "application/json" });
const url = URL.createObjectURL(blob);
const link = document.createElement("a");
link.href = url;
link.download = filename;
link.click();
URL.revokeObjectURL(url);
}
export default function Videos() { export default function Videos() {
const { user } = useAuth(); useAuth();
const { data: videos, isLoading } = trpc.video.list.useQuery(); const { data: videos, isLoading } = trpc.video.list.useQuery();
const { data: analyses } = trpc.analysis.list.useQuery(); const { data: analyses } = trpc.analysis.list.useQuery();
const [, setLocation] = useLocation(); const [, setLocation] = useLocation();
const getAnalysis = (videoId: number) => { const previewRef = useRef<HTMLVideoElement>(null);
return analyses?.find((a: any) => a.videoId === videoId); const [editorOpen, setEditorOpen] = useState(false);
const [selectedVideo, setSelectedVideo] = useState<any | null>(null);
const [videoDurationSec, setVideoDurationSec] = useState(0);
const [playbackSec, setPlaybackSec] = useState(0);
const [clipRange, setClipRange] = useState<[number, number]>([0, 5]);
const [clipLabel, setClipLabel] = useState("");
const [clipNotes, setClipNotes] = useState("");
const [clipDrafts, setClipDrafts] = useState<ClipDraft[]>([]);
const getAnalysis = useCallback((videoId: number) => {
return analyses?.find((analysis: any) => analysis.videoId === videoId);
}, [analyses]);
const activeAnalysis = selectedVideo ? getAnalysis(selectedVideo.id) : null;
const timelineDurationSec = useMemo(
() => resolveTimelineDurationSec(activeAnalysis, videoDurationSec),
[activeAnalysis, videoDurationSec],
);
const suggestedClips = useMemo(
() => buildSuggestedClips(activeAnalysis, timelineDurationSec),
[activeAnalysis, timelineDurationSec],
);
useEffect(() => {
if (!editorOpen || timelineDurationSec <= 0) return;
setClipRange((current) => {
const start = clamp(current[0] ?? 0, 0, Math.max(0, timelineDurationSec - 0.5));
const minEnd = clamp(start + 0.5, 0.5, timelineDurationSec);
const end = clamp(current[1] ?? Math.min(timelineDurationSec, 5), minEnd, timelineDurationSec);
if (start === current[0] && end === current[1]) {
return current;
}
return [start, end];
});
}, [editorOpen, timelineDurationSec]);
useEffect(() => {
if (!selectedVideo) return;
try {
const saved = localStorage.getItem(localStorageKey(selectedVideo.id));
if (saved) {
const parsed = JSON.parse(saved) as ClipDraft[];
setClipDrafts(parsed);
return;
}
} catch {
// Ignore corrupted local clip drafts and fall back to suggested clips.
}
setClipDrafts(suggestedClips);
}, [selectedVideo, suggestedClips]);
useEffect(() => {
if (!selectedVideo) return;
localStorage.setItem(localStorageKey(selectedVideo.id), JSON.stringify(clipDrafts));
}, [clipDrafts, selectedVideo]);
const openEditor = useCallback((video: any) => {
setSelectedVideo(video);
setEditorOpen(true);
setVideoDurationSec(0);
setPlaybackSec(0);
setClipLabel("");
setClipNotes("");
setClipRange([0, 5]);
}, []);
const addClip = useCallback((source: "manual" | "suggested", preset?: ClipDraft) => {
const nextStart = preset?.startSec ?? clipRange[0];
const nextEnd = preset?.endSec ?? clipRange[1];
const clip: ClipDraft = {
id: `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
startSec: nextStart,
endSec: nextEnd,
label: preset?.label || clipLabel || `片段 ${clipDrafts.length + 1}`,
notes: preset?.notes || clipNotes,
source,
}; };
setClipDrafts((current) => [...current, clip].sort((a, b) => a.startSec - b.startSec));
setClipLabel("");
setClipNotes("");
toast.success("片段已加入轻剪辑草稿");
}, [clipDrafts.length, clipLabel, clipNotes, clipRange]);
if (isLoading) { if (isLoading) {
return ( return (
<div className="space-y-4"> <div className="space-y-4">
<Skeleton className="h-20 w-full" /> <Skeleton className="h-20 w-full" />
{[1, 2, 3].map(i => <Skeleton key={i} className="h-32 w-full" />)} {[1, 2, 3].map((index) => <Skeleton key={index} className="h-32 w-full" />)}
</div> </div>
); );
} }
return ( return (
<div className="space-y-6"> <div className="space-y-6">
<div className="flex items-center justify-between"> <section className="rounded-[28px] border border-border/60 bg-[radial-gradient(circle_at_top_left,_rgba(14,165,233,0.12),_transparent_28%),linear-gradient(180deg,rgba(255,255,255,1),rgba(248,250,252,0.96))] p-5 shadow-sm md:p-6">
<div className="flex flex-col gap-4 lg:flex-row lg:items-end lg:justify-between">
<div> <div>
<h1 className="text-2xl font-bold tracking-tight" data-testid="videos-title"></h1> <h1 className="text-2xl font-semibold tracking-tight" data-testid="videos-title"></h1>
<p className="text-muted-foreground text-sm mt-1"> <p className="mt-2 max-w-2xl text-sm leading-6 text-muted-foreground">
· {videos?.length || 0} /稿
</p> </p>
</div> </div>
<div className="flex flex-wrap gap-2">
<Button data-testid="videos-upload-button" onClick={() => setLocation("/analysis")} className="gap-2"> <Button data-testid="videos-upload-button" onClick={() => setLocation("/analysis")} className="gap-2">
<Video className="h-4 w-4" /> <Video className="h-4 w-4" />
</Button> </Button>
</div> </div>
</div>
</section>
{(!videos || videos.length === 0) ? ( {(!videos || videos.length === 0) ? (
<Card className="border-0 shadow-sm"> <Card className="border-0 shadow-sm">
<CardContent className="py-16 text-center"> <CardContent className="py-16 text-center">
<FileVideo className="h-12 w-12 mx-auto mb-4 text-muted-foreground/30" /> <FileVideo className="mx-auto mb-4 h-12 w-12 text-muted-foreground/30" />
<h3 className="font-semibold text-lg mb-2"></h3> <h3 className="mb-2 text-lg font-semibold"></h3>
<p className="text-muted-foreground text-sm mb-4">AI将自动分析姿势并给出建议</p> <p className="mb-4 text-sm text-muted-foreground"></p>
<Button onClick={() => setLocation("/analysis")} className="gap-2"> <Button onClick={() => setLocation("/analysis")} className="gap-2">
<Video className="h-4 w-4" /> <Video className="h-4 w-4" />
@@ -77,11 +248,10 @@ export default function Videos() {
const status = statusMap[video.analysisStatus] || statusMap.pending; const status = statusMap[video.analysisStatus] || statusMap.pending;
return ( return (
<Card key={video.id} className="border-0 shadow-sm hover:shadow-md transition-shadow" data-testid="video-card"> <Card key={video.id} className="border-0 shadow-sm transition-shadow hover:shadow-md" data-testid="video-card">
<CardContent className="p-4"> <CardContent className="p-4">
<div className="flex items-start gap-4"> <div className="flex items-start gap-4">
{/* Thumbnail / icon */} <div className="flex h-20 w-28 shrink-0 items-center justify-center overflow-hidden rounded-lg bg-black/5">
<div className="h-20 w-28 rounded-lg bg-black/5 flex items-center justify-center shrink-0 overflow-hidden">
{video.url ? ( {video.url ? (
<video src={video.url} className="h-full w-full object-cover" muted preload="metadata" /> <video src={video.url} className="h-full w-full object-cover" muted preload="metadata" />
) : ( ) : (
@@ -89,54 +259,65 @@ export default function Videos() {
)} )}
</div> </div>
{/* Info */} <div className="min-w-0 flex-1">
<div className="flex-1 min-w-0"> <div className="flex flex-col gap-3 lg:flex-row lg:items-start lg:justify-between">
<div className="flex items-start justify-between gap-2">
<div> <div>
<h3 className="font-medium text-sm truncate">{video.title}</h3> <h3 className="truncate text-sm font-medium">{video.title}</h3>
<div className="flex items-center gap-2 mt-1 flex-wrap"> <div className="mt-1 flex flex-wrap items-center gap-2">
<Badge className={`${status.color} border text-xs`}>{status.label}</Badge> <Badge className={`${status.color} border text-xs`}>{status.label}</Badge>
{video.exerciseType && ( {video.exerciseType ? (
<Badge variant="outline" className="text-xs"> <Badge variant="outline" className="text-xs">
{exerciseTypeMap[video.exerciseType] || video.exerciseType} {exerciseTypeMap[video.exerciseType] || video.exerciseType}
</Badge> </Badge>
)} ) : null}
<span className="text-xs text-muted-foreground flex items-center gap-1"> <span className="inline-flex items-center gap-1 text-xs text-muted-foreground">
<Clock className="h-3 w-3" /> <Clock className="h-3 w-3" />
{new Date(video.createdAt).toLocaleDateString("zh-CN")} {new Date(video.createdAt).toLocaleDateString("zh-CN")}
</span> </span>
<span className="text-xs text-muted-foreground"> <span className="text-xs text-muted-foreground">
{(video.fileSize / 1024 / 1024).toFixed(1)}MB {((video.fileSize || 0) / 1024 / 1024).toFixed(1)}MB
</span> </span>
</div> </div>
</div> </div>
<div className="flex flex-wrap gap-2">
{video.url ? (
<Button variant="outline" size="sm" className="gap-2" onClick={() => window.open(video.url, "_blank", "noopener,noreferrer")}>
<PlayCircle className="h-4 w-4" />
</Button>
) : null}
<Button variant="outline" size="sm" className="gap-2" onClick={() => openEditor(video)}>
<Scissors className="h-4 w-4" />
</Button>
</div>
</div> </div>
{/* Analysis summary */} {analysis ? (
{analysis && ( <div className="mt-3 flex flex-wrap items-center gap-4 text-xs">
<div className="flex items-center gap-4 mt-3 text-xs">
<div className="flex items-center gap-1"> <div className="flex items-center gap-1">
<BarChart3 className="h-3 w-3 text-primary" /> <BarChart3 className="h-3 w-3 text-primary" />
<span className="font-medium">{Math.round(analysis.overallScore || 0)}</span> <span className="font-medium">{Math.round(analysis.overallScore || 0)}</span>
</div> </div>
{(analysis.shotCount ?? 0) > 0 && ( {(analysis.shotCount ?? 0) > 0 ? (
<div className="flex items-center gap-1"> <div className="flex items-center gap-1">
<Zap className="h-3 w-3 text-orange-500" /> <Zap className="h-3 w-3 text-orange-500" />
<span>{analysis.shotCount}</span> <span>{analysis.shotCount} </span>
</div> </div>
)} ) : null}
{(analysis.avgSwingSpeed ?? 0) > 0 && ( {(analysis.strokeConsistency ?? 0) > 0 ? (
<div className="flex items-center gap-1"> <div className="text-muted-foreground">
{(analysis.avgSwingSpeed ?? 0).toFixed(1)}
</div>
)}
{(analysis.strokeConsistency ?? 0) > 0 && (
<div className="flex items-center gap-1 text-muted-foreground">
{Math.round(analysis.strokeConsistency ?? 0)}% {Math.round(analysis.strokeConsistency ?? 0)}%
</div> </div>
)} ) : null}
{Array.isArray(analysis.keyMoments) && analysis.keyMoments.length > 0 ? (
<Badge variant="outline" className="gap-1 text-xs">
<Sparkles className="h-3 w-3" />
{analysis.keyMoments.length}
</Badge>
) : null}
</div> </div>
)} ) : null}
</div> </div>
</div> </div>
</CardContent> </CardContent>
@@ -145,6 +326,222 @@ export default function Videos() {
})} })}
</div> </div>
)} )}
<Dialog open={editorOpen} onOpenChange={setEditorOpen}>
<DialogContent className="max-h-[92vh] max-w-5xl overflow-y-auto">
<DialogHeader>
<DialogTitle className="flex items-center gap-2">
<Scissors className="h-5 w-5 text-primary" />
PC
</DialogTitle>
<DialogDescription>
/稿 JSON
</DialogDescription>
</DialogHeader>
{selectedVideo ? (
<div className="grid gap-4 xl:grid-cols-[minmax(0,1.35fr)_minmax(320px,0.9fr)]">
<section className="space-y-4">
<div className="overflow-hidden rounded-3xl border border-border/60 bg-black">
<video
ref={previewRef}
src={selectedVideo.url}
className="aspect-video w-full object-contain"
controls
playsInline
onLoadedMetadata={(event) => {
const duration = event.currentTarget.duration || 0;
setVideoDurationSec(duration);
setClipRange([0, Math.min(duration, 5)]);
}}
onTimeUpdate={(event) => setPlaybackSec(event.currentTarget.currentTime || 0)}
/>
</div>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
<CardDescription></CardDescription>
</CardHeader>
<CardContent className="space-y-4">
<div className="grid gap-3 md:grid-cols-3">
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="text-xs uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold">{formatSeconds(playbackSec)}</div>
</div>
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="text-xs uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold">{formatSeconds(clipRange[0])}</div>
</div>
<div className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="text-xs uppercase tracking-[0.16em] text-muted-foreground"></div>
<div className="mt-2 text-lg font-semibold">{formatSeconds(clipRange[1])}</div>
</div>
</div>
{timelineDurationSec > 0 ? (
<Slider
value={clipRange}
min={0}
max={timelineDurationSec}
step={0.1}
onValueChange={(value) => {
if (value.length === 2) {
setClipRange([value[0] || 0, value[1] || Math.max(0.5, timelineDurationSec)]);
}
}}
/>
) : null}
<div className="flex flex-wrap gap-2">
<Button
variant="outline"
size="sm"
onClick={() => setClipRange(([_, end]) => [clamp(playbackSec, 0, Math.max(0, end - 0.5)), end])}
>
</Button>
<Button
variant="outline"
size="sm"
onClick={() => setClipRange(([start]) => [start, clamp(playbackSec, start + 0.5, timelineDurationSec || playbackSec + 0.5)])}
>
</Button>
<Button
variant="outline"
size="sm"
onClick={() => {
if (previewRef.current) previewRef.current.currentTime = clipRange[0];
}}
>
</Button>
</div>
<div className="grid gap-3 md:grid-cols-2">
<Input
value={clipLabel}
onChange={(event) => setClipLabel(event.target.value)}
placeholder="片段名称,例如:正手节奏稳定段"
className="h-11 rounded-2xl"
/>
<Button onClick={() => addClip("manual")} className="h-11 rounded-2xl gap-2">
<Scissors className="h-4 w-4" />
稿
</Button>
</div>
<Textarea
value={clipNotes}
onChange={(event) => setClipNotes(event.target.value)}
placeholder="记录这个片段为什么要保留,或后续想怎么讲解"
className="min-h-24 rounded-2xl"
/>
</CardContent>
</Card>
</section>
<aside className="space-y-4">
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base"></CardTitle>
<CardDescription>稿</CardDescription>
</CardHeader>
<CardContent className="space-y-3">
{suggestedClips.length === 0 ? (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
</div>
) : (
suggestedClips.map((clip: ClipDraft) => (
<div key={clip.id} className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="font-medium">{clip.label}</div>
<div className="mt-1 text-xs text-muted-foreground">
{formatSeconds(clip.startSec)} - {formatSeconds(clip.endSec)}
</div>
<div className="mt-2 text-sm text-muted-foreground">{clip.notes}</div>
<div className="mt-3 flex gap-2">
<Button
variant="outline"
size="sm"
onClick={() => {
setClipRange([clip.startSec, clip.endSec]);
if (previewRef.current) previewRef.current.currentTime = clip.startSec;
}}
>
</Button>
<Button size="sm" onClick={() => addClip("suggested", clip)}>稿</Button>
</div>
</div>
))
)}
</CardContent>
</Card>
<Card className="border-0 shadow-sm">
<CardHeader className="pb-3">
<CardTitle className="text-base">稿</CardTitle>
<CardDescription>稿使</CardDescription>
</CardHeader>
<CardContent className="space-y-3">
{clipDrafts.length === 0 ? (
<div className="rounded-2xl border border-dashed border-border/60 px-4 py-8 text-center text-sm text-muted-foreground">
稿
</div>
) : (
clipDrafts.map((clip: ClipDraft) => (
<div key={clip.id} className="rounded-2xl border border-border/60 bg-muted/20 p-4">
<div className="flex items-start justify-between gap-3">
<div>
<div className="flex items-center gap-2">
<span className="font-medium">{clip.label}</span>
<Badge variant="outline">{clip.source === "manual" ? "手动" : "建议"}</Badge>
</div>
<div className="mt-1 text-xs text-muted-foreground">
{formatSeconds(clip.startSec)} - {formatSeconds(clip.endSec)}
</div>
</div>
<Button
variant="ghost"
size="icon"
onClick={() => setClipDrafts((current) => current.filter((item) => item.id !== clip.id))}
>
<Trash2 className="h-4 w-4" />
</Button>
</div>
{clip.notes ? <div className="mt-2 text-sm text-muted-foreground">{clip.notes}</div> : null}
</div>
))
)}
</CardContent>
</Card>
</aside>
</div>
) : null}
<DialogFooter className="flex gap-2">
<Button
variant="outline"
onClick={() => {
if (!selectedVideo) return;
downloadJson(`${selectedVideo.title}-clip-plan.json`, {
videoId: selectedVideo.id,
title: selectedVideo.title,
url: selectedVideo.url,
clipDrafts,
exportedAt: new Date().toISOString(),
});
}}
className="gap-2"
>
<Download className="h-4 w-4" />
稿
</Button>
<Button variant="outline" onClick={() => setEditorOpen(false)}></Button>
</DialogFooter>
</DialogContent>
</Dialog>
</div> </div>
); );
} }

查看文件

@@ -19,6 +19,7 @@
- 视频上传分析:上传 `webm/mp4` 视频进入视频库并触发分析流程 - 视频上传分析:上传 `webm/mp4` 视频进入视频库并触发分析流程
- 实时摄像头分析:浏览器端调用 MediaPipe,自动识别 `forehand/backhand/serve/volley/overhead/slice/lob/unknown` - 实时摄像头分析:浏览器端调用 MediaPipe,自动识别 `forehand/backhand/serve/volley/overhead/slice/lob/unknown`
- 识别稳定化:最近 6 帧动作结果会做时序加权和 winner/runner-up 比较,降低动作标签抖动
- 连续动作片段:自动聚合连续同类动作区间,单段不超过 10 秒,并保存得分、置信度与反馈摘要 - 连续动作片段:自动聚合连续同类动作区间,单段不超过 10 秒,并保存得分、置信度与反馈摘要
- 实时分析录制:分析阶段可同步保留浏览器端本地录制视频,停止分析后自动登记到系统 - 实时分析录制:分析阶段可同步保留浏览器端本地录制视频,停止分析后自动登记到系统
- 训练数据回写:实时分析与录制数据自动写入训练记录、日训练聚合、成就系统和 NTRP 评分 - 训练数据回写:实时分析与录制数据自动写入训练记录、日训练聚合、成就系统和 NTRP 评分
@@ -26,6 +27,7 @@
- 多模态图片输入:上传关键帧后会转换为公网可访问的绝对 URL,再提交给视觉模型 - 多模态图片输入:上传关键帧后会转换为公网可访问的绝对 URL,再提交给视觉模型
- 视觉标准图库:内置网球公网参考图,可直接发起视觉识别测试并保存结果 - 视觉标准图库:内置网球公网参考图,可直接发起视觉识别测试并保存结果
- 视频库:集中展示录制结果、上传结果和分析摘要 - 视频库:集中展示录制结果、上传结果和分析摘要
- PC 轻剪辑:视频库内可直接打开轻剪辑工作台,支持预览、设定入点/出点、建议片段和草稿导出
### 在线录制与媒体链路 ### 在线录制与媒体链路
@@ -67,7 +69,7 @@
- 仪表盘、训练、视频、录制、分析等模块一致的布局结构 - 仪表盘、训练、视频、录制、分析等模块一致的布局结构
- 全局任务中心:桌面侧边栏和移动端头部都可查看后台任务 - 全局任务中心:桌面侧边栏和移动端头部都可查看后台任务
- Admin 视觉测试页:`H1` 这类 admin 用户可查看全部视觉测试数据 - Admin 视觉测试页:`H1` 这类 admin 用户可查看全部视觉测试数据
- 为后续 PC 粗剪时间线预留媒体域与文档规范 - 视频库内置轻剪辑工作台,可在桌面端快速完成粗剪草稿、建议片段复核和导出
## 架构能力 ## 架构能力
@@ -87,7 +89,7 @@
- 浏览器录制兼容目标以 Chrome 为主 - 浏览器录制兼容目标以 Chrome 为主
- 当前 WebRTC 重点是浏览器到服务端的实时上行,不是多观众直播分发 - 当前 WebRTC 重点是浏览器到服务端的实时上行,不是多观众直播分发
- 当前 PC 剪辑仍处于基础媒体域准备阶段,未交付完整多轨编辑器 - 当前 PC 剪辑已交付轻量草稿工作台,但未交付完整多轨编辑器、批量转码和最终成片渲染
- 当前存储策略为本地卷优先,未接入对象存储归档 - 当前存储策略为本地卷优先,未接入对象存储归档
- 当前 `.env` 配置的视觉网关若忽略 `LLM_VISION_MODEL`,系统会回退到文本纠正;代码已支持独立视觉模型配置,但上游网关能力仍需单独确认 - 当前 `.env` 配置的视觉网关若忽略 `LLM_VISION_MODEL`,系统会回退到文本纠正;代码已支持独立视觉模型配置,但上游网关能力仍需单独确认
- 当前实时动作识别仍基于姿态启发式分类,不是专门训练的动作识别模型 - 当前实时动作识别仍基于姿态启发式分类,不是专门训练的动作识别模型
@@ -103,7 +105,7 @@
### PC 轻剪与训练回放 ### PC 轻剪与训练回放
- 交付单轨时间线粗剪:入点、出点、片段删除、关键帧封面和 marker 跳转 - 在当前轻剪辑工作台基础上继续交付单轨时间线粗剪:片段拖拽、片段删除、关键帧封面和 marker 跳转
- 增加“剪辑计划”实体,允许把自动 marker、手动 marker 和 AI 建议片段一起保存 - 增加“剪辑计划”实体,允许把自动 marker、手动 marker 和 AI 建议片段一起保存
- 提供双栏回放模式:左侧原视频,右侧姿态轨迹、节奏评分和文字纠正同步滚动 - 提供双栏回放模式:左侧原视频,右侧姿态轨迹、节奏评分和文字纠正同步滚动
- 支持从视频库直接发起导出任务,在后台生成训练集锦或问题片段合集 - 支持从视频库直接发起导出任务,在后台生成训练集锦或问题片段合集

查看文件

@@ -24,6 +24,7 @@
- 视觉模型 per-request model override 能力 - 视觉模型 per-request model override 能力
- 视觉标准图库路由与 admin/H1 全量可见性逻辑 - 视觉标准图库路由与 admin/H1 全量可见性逻辑
- 媒体工具函数,例如录制时长格式化与码率选择 - 媒体工具函数,例如录制时长格式化与码率选择
- 实时分析动作片段保存、成就回写和 NTRP 刷新入队逻辑
### 3. Go 媒体服务测试 ### 3. Go 媒体服务测试
@@ -48,6 +49,7 @@
这样可以自动验证前端主流程,而不依赖真实摄像头权限和真实 WebRTC 网络环境。 这样可以自动验证前端主流程,而不依赖真实摄像头权限和真实 WebRTC 网络环境。
当前 E2E 已覆盖新的后台任务流、实时分析入口、录制焦点视图和任务中心依赖的接口 mock。 当前 E2E 已覆盖新的后台任务流、实时分析入口、录制焦点视图和任务中心依赖的接口 mock。
当前 E2E 还覆盖视频库轻剪辑工作台,包括建议片段渲染、轻剪辑入口和草稿导出入口。
首次在新库或新 schema 上执行前,先跑: 首次在新库或新 schema 上执行前,先跑:
@@ -123,6 +125,18 @@ pnpm test:llm
- 打开 `https://te.hao.work/live-camera` - 打开 `https://te.hao.work/live-camera`
- 确认没有 `pageerror` 或首屏 `console.error` - 确认没有 `pageerror` 或首屏 `console.error`
真实站点 Playwright smoke script 可直接复用:
```bash
xvfb-run -a bash -lc 'cd /root/.codex/skills/playwright-skill && node run.js /tmp/playwright-te-smoke.js'
```
2026-03-15 已实际完成一次真实环境联调:
- 初次 smoke 发现 `https://te.hao.work/checkin` 仍显示旧版“每日打卡 / 训练打卡”,确认现网落后于仓库代码
- 执行 `docker compose up -d --build migrate app app-worker` 后再次 smoke
- 复测 `login / checkin / videos / recorder / live-camera / admin` 全部通过,且未捕获 `pageerror` / `console.error`
## Local browser prerequisites ## Local browser prerequisites
首次运行 Playwright 前执行: 首次运行 Playwright 前执行:

查看文件

@@ -1,12 +1,12 @@
# Verified Features # Verified Features
本文档记录当前已经通过自动化验证或构建验证的项目。更新时间2026-03-15 01:39 CST。 本文档记录当前已经通过自动化验证或构建验证的项目。更新时间2026-03-15 02:09 CST。
## 最新完整验证记录 ## 最新完整验证记录
- 通过命令:`pnpm verify` - 通过命令:`pnpm verify`
- 验证时间2026-03-15 01:38 - 01:39 CST - 验证时间2026-03-15 02:09 - 02:10 CST
- 结果摘要:`pnpm check` 通过,`pnpm test` 通过95/95`pnpm test:go` 通过,`pnpm build` 通过,`pnpm test:e2e` 通过(6/6 - 结果摘要:`pnpm check` 通过,`pnpm test` 通过95/95`pnpm test:go` 通过,`pnpm build` 通过,`pnpm test:e2e` 通过(7/7
- 数据库状态:已执行 `set -a && source .env && set +a && pnpm exec drizzle-kit migrate``0007_grounded_live_ops` 已成功应用 - 数据库状态:已执行 `set -a && source .env && set +a && pnpm exec drizzle-kit migrate``0007_grounded_live_ops` 已成功应用
## 生产部署联测 ## 生产部署联测
@@ -14,6 +14,7 @@
| 项目 | 验证方式 | 状态 | | 项目 | 验证方式 | 状态 |
|------|----------|------| |------|----------|------|
| `https://te.hao.work/` HTTPS 访问 | `curl -I https://te.hao.work/` | 通过 | | `https://te.hao.work/` HTTPS 访问 | `curl -I https://te.hao.work/` | 通过 |
| `https://te.hao.work/checkin` 成就系统路由 | Playwright 登录后检查“成就系统” | 通过 |
| `https://te.hao.work/logs` 日志页访问 | `curl -I https://te.hao.work/logs` | 通过 | | `https://te.hao.work/logs` 日志页访问 | `curl -I https://te.hao.work/logs` | 通过 |
| `https://te.hao.work/vision-lab` 视觉测试页访问 | `curl -I https://te.hao.work/vision-lab` | 通过 | | `https://te.hao.work/vision-lab` 视觉测试页访问 | `curl -I https://te.hao.work/vision-lab` | 通过 |
| `http://te.hao.work:8302/` 4 位端口访问 | `curl -I http://te.hao.work:8302/` | 通过 | | `http://te.hao.work:8302/` 4 位端口访问 | `curl -I http://te.hao.work:8302/` | 通过 |
@@ -22,11 +23,13 @@
| 新用户邀请码校验 | Playwright 验证无邀请码被拦截、正确邀请码 `CA2026` 可创建新账号 | 通过 | | 新用户邀请码校验 | Playwright 验证无邀请码被拦截、正确邀请码 `CA2026` 可创建新账号 | 通过 |
| 日志页访问 | Playwright 以 `H1` 登录并访问 `/logs` | 通过 | | 日志页访问 | Playwright 以 `H1` 登录并访问 `/logs` | 通过 |
| 生产训练 / 实时分析 / 录制 / 视频库页面加载 | Playwright 访问 `/training``/live-camera``/recorder``/videos` | 通过 | | 生产训练 / 实时分析 / 录制 / 视频库页面加载 | Playwright 访问 `/training``/live-camera``/recorder``/videos` | 通过 |
| 生产视频库轻剪辑入口 | 本地 `pnpm test:e2e` + 真实站点 `/videos` smoke | 通过 |
| 生产训练计划后台任务提交 | Playwright 点击训练计划生成按钮并收到后台任务反馈 | 通过 | | 生产训练计划后台任务提交 | Playwright 点击训练计划生成按钮并收到后台任务反馈 | 通过 |
| 生产移动端录制焦点视图 | Playwright 移动端视口打开 `/recorder` 并验证焦点入口与操作壳层 | 通过 | | 生产移动端录制焦点视图 | Playwright 移动端视口打开 `/recorder` 并验证焦点入口与操作壳层 | 通过 |
| 生产前端运行时异常检查 | Playwright `pageerror` / `console.error` 检查 | 通过 | | 生产前端运行时异常检查 | Playwright `pageerror` / `console.error` 检查 | 通过 |
| 媒体健康检查 | `curl http://127.0.0.1:8081/media/health` | 通过 | | 媒体健康检查 | `curl http://127.0.0.1:8081/media/health` | 通过 |
| compose 自包含服务 | `docker compose ps -a``app` / `app-worker` / `db` / `media` / `media-worker` 正常运行,`migrate` 成功退出 | 通过 | | compose 自包含服务 | `docker compose ps -a``app` / `app-worker` / `db` / `media` / `media-worker` 正常运行,`migrate` 成功退出 | 通过 |
| 生产版本追平 | `docker compose up -d --build migrate app app-worker` 后复测 `login / checkin / videos / recorder / live-camera / admin` | 通过 |
## 构建与编译通过 ## 构建与编译通过
@@ -75,6 +78,7 @@
| 仪表盘 | 认证后主标题与入口按钮渲染 | 通过 | | 仪表盘 | 认证后主标题与入口按钮渲染 | 通过 |
| 训练计划 | 训练计划页加载与生成入口可见 | 通过 | | 训练计划 | 训练计划页加载与生成入口可见 | 通过 |
| 视频库 | 视频卡片渲染 | 通过 | | 视频库 | 视频卡片渲染 | 通过 |
| 视频库轻剪辑 | 打开轻剪辑工作台、显示建议片段、展示导出草稿入口 | 通过 |
| 实时分析 | 摄像头启动入口渲染 | 通过 | | 实时分析 | 摄像头启动入口渲染 | 通过 |
| 实时分析打分 | 启动分析后出现实时评分结果 | 通过 | | 实时分析打分 | 启动分析后出现实时评分结果 | 通过 |
| 在线录制 | 启动摄像头、开始录制、手动标记、结束归档 | 通过 | | 在线录制 | 启动摄像头、开始录制、手动标记、结束归档 | 通过 |

查看文件

@@ -35,6 +35,16 @@ test("videos page renders video library items", async ({ page }) => {
await expect(page.getByTestId("video-card")).toHaveCount(1); await expect(page.getByTestId("video-card")).toHaveCount(1);
}); });
test("videos page opens lightweight clip editor", async ({ page }) => {
await installAppMocks(page, { authenticated: true });
await page.goto("/videos");
await page.getByRole("button", { name: "轻剪辑" }).click();
await expect(page.getByText("PC 轻剪辑工作台")).toBeVisible();
await expect(page.locator("text=建议保留:正手启动").first()).toBeVisible();
await expect(page.getByRole("button", { name: "导出草稿" })).toBeVisible();
});
test("live camera page exposes camera startup controls", async ({ page }) => { test("live camera page exposes camera startup controls", async ({ page }) => {
await installAppMocks(page, { authenticated: true }); await installAppMocks(page, { authenticated: true });

查看文件

@@ -581,6 +581,11 @@ export async function installAppMocks(
shotCount: 16, shotCount: 16,
avgSwingSpeed: 6.2, avgSwingSpeed: 6.2,
strokeConsistency: 82, strokeConsistency: 82,
framesAnalyzed: 180,
keyMoments: [
{ frame: 45, type: "shot", description: "建议保留:正手启动" },
{ frame: 110, type: "shot", description: "建议保留:击球后收拍" },
],
createdAt: nowIso(), createdAt: nowIso(),
}, },
], ],