feat(annotation): 支持音频和视频数据类型的标注任务

- 添加了音频和视频数据类型常量定义
- 实现了音频和视频标注模板的内置配置
- 扩展前端组件以支持按数据类型过滤标注模板
- 重构后端编辑器服务以处理音频和视频任务构建
- 更新数据库初始化脚本包含音频和视频标注模板
- 添加音频和视频数据类型的预览URL映射逻辑
This commit is contained in:
2026-01-26 23:54:40 +08:00
parent 47295e8cdf
commit 977a930c97
5 changed files with 461 additions and 59 deletions

View File

@@ -12,7 +12,7 @@ import {
createAnnotationTaskUsingPost,
queryAnnotationTemplatesUsingGet,
} from "../annotation.api";
import type { AnnotationTemplate } from "../annotation.model";
import { DataType, type AnnotationTemplate } from "../annotation.model";
import TemplateConfigurationTreeEditor from "../components/TemplateConfigurationTreeEditor";
const DEFAULT_SEGMENTATION_ENABLED = true;
@@ -20,6 +20,22 @@ const SEGMENTATION_OPTIONS = [
{ label: "需要切片段", value: true },
{ label: "不需要切片段", value: false },
];
const resolveTemplateDataType = (datasetType?: DatasetType) => {
switch (datasetType) {
case DatasetType.TEXT:
return DataType.TEXT;
case DatasetType.IMAGE:
return DataType.IMAGE;
case DatasetType.AUDIO:
return DataType.AUDIO;
case DatasetType.VIDEO:
return DataType.VIDEO;
default:
return undefined;
}
};
const resolveDefaultTemplate = (items: AnnotationTemplate[]) =>
items.find((template) => template.builtIn) || items[0];
export default function AnnotationTaskCreate() {
const navigate = useNavigate();
@@ -48,9 +64,17 @@ export default function AnnotationTaskCreate() {
}
};
const fetchTemplates = async () => {
const fetchTemplates = async (dataType?: string) => {
if (!dataType) {
setTemplates([]);
return;
}
try {
const response = await queryAnnotationTemplatesUsingGet({ page: 1, size: 200 });
const response = await queryAnnotationTemplatesUsingGet({
page: 1,
size: 200,
dataType,
});
if (response.code === 200 && response.data) {
setTemplates(response.data.content || []);
} else {
@@ -64,9 +88,39 @@ export default function AnnotationTaskCreate() {
useEffect(() => {
fetchDatasets();
fetchTemplates();
}, []);
useEffect(() => {
if (!selectedDataset) {
setTemplates([]);
form.setFieldsValue({ templateId: undefined });
setLabelConfig("");
return;
}
const dataType = resolveTemplateDataType(selectedDataset.datasetType);
fetchTemplates(dataType);
}, [form, selectedDataset]);
useEffect(() => {
if (configMode !== "template" || !selectedDataset) {
return;
}
if (templates.length === 0) {
form.setFieldsValue({ templateId: undefined });
setLabelConfig("");
return;
}
const currentTemplateId = form.getFieldValue("templateId");
const currentTemplate = templates.find((template) => template.id === currentTemplateId);
if (currentTemplate) {
return;
}
const defaultTemplate = resolveDefaultTemplate(templates);
if (defaultTemplate) {
form.setFieldsValue({ templateId: defaultTemplate.id });
setLabelConfig(defaultTemplate.labelConfig || "");
}
}, [configMode, form, selectedDataset, templates]);
const handleTemplateSelect = (value?: string) => {
if (!value) {
setLabelConfig("");
@@ -171,6 +225,8 @@ export default function AnnotationTaskCreate() {
}))}
onChange={(value) => {
setSelectedDatasetId(value);
form.setFieldsValue({ templateId: undefined });
setLabelConfig("");
const dataset = datasets.find((item) => item.id === value);
if (dataset?.datasetType === DatasetType.TEXT) {
const currentValue = form.getFieldValue("segmentationEnabled");