feat(重构): 实现新的渲染服务架构

- 新增 RenderTask
This commit is contained in:
2025-09-12 14:41:58 +08:00
parent c36e838d4f
commit d770d84927
22 changed files with 1987 additions and 170 deletions

View File

@@ -0,0 +1,25 @@
from .base import EffectProcessor, EffectRegistry
from .camera_shot import CameraShotEffect
from .speed import SpeedEffect
from .zoom import ZoomEffect
from .skip import SkipEffect
from .tail import TailEffect
# 注册所有效果处理器
registry = EffectRegistry()
registry.register('cameraShot', CameraShotEffect)
registry.register('ospeed', SpeedEffect)
registry.register('zoom', ZoomEffect)
registry.register('skip', SkipEffect)
registry.register('tail', TailEffect)
__all__ = [
'EffectProcessor',
'EffectRegistry',
'registry',
'CameraShotEffect',
'SpeedEffect',
'ZoomEffect',
'SkipEffect',
'TailEffect'
]

94
entity/effects/base.py Normal file
View File

@@ -0,0 +1,94 @@
from abc import ABC, abstractmethod
from typing import Dict, List, Type, Any, Optional
import json
import logging
logger = logging.getLogger(__name__)
class EffectProcessor(ABC):
"""效果处理器抽象基类"""
def __init__(self, params: str = "", ext_data: Optional[Dict[str, Any]] = None):
self.params = params
self.ext_data = ext_data or {}
self.frame_rate = 25 # 默认帧率
@abstractmethod
def validate_params(self) -> bool:
"""验证参数是否有效"""
pass
@abstractmethod
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""
生成FFmpeg滤镜参数
Args:
video_input: 输入视频流标识符 (例如: "[0:v]", "[v_eff1]")
effect_index: 效果索引,用于生成唯一的输出标识符
Returns:
tuple: (filter_args_list, output_stream_identifier)
"""
pass
@abstractmethod
def get_effect_name(self) -> str:
"""获取效果名称"""
pass
def parse_params(self) -> List[str]:
"""解析参数字符串为列表"""
if not self.params:
return []
return self.params.split(',')
def get_pos_json(self) -> Dict[str, Any]:
"""获取位置JSON数据"""
pos_json_str = self.ext_data.get('posJson', '{}')
try:
return json.loads(pos_json_str) if pos_json_str != '{}' else {}
except Exception as e:
logger.warning(f"Failed to parse posJson: {e}")
return {}
class EffectRegistry:
"""效果处理器注册表"""
def __init__(self):
self._processors: Dict[str, Type[EffectProcessor]] = {}
def register(self, name: str, processor_class: Type[EffectProcessor]):
"""注册效果处理器"""
if not issubclass(processor_class, EffectProcessor):
raise ValueError(f"{processor_class} must be a subclass of EffectProcessor")
self._processors[name] = processor_class
logger.debug(f"Registered effect processor: {name}")
def get_processor(self, effect_name: str, params: str = "", ext_data: Optional[Dict[str, Any]] = None) -> Optional[EffectProcessor]:
"""获取效果处理器实例"""
if effect_name not in self._processors:
logger.warning(f"Unknown effect: {effect_name}")
return None
processor_class = self._processors[effect_name]
return processor_class(params, ext_data)
def list_effects(self) -> List[str]:
"""列出所有注册的效果"""
return list(self._processors.keys())
def parse_effect_string(self, effect_string: str) -> tuple[str, str]:
"""
解析效果字符串
Args:
effect_string: 效果字符串,格式为 "effect_name:params"
Returns:
tuple: (effect_name, params)
"""
if ':' in effect_string:
parts = effect_string.split(':', 2)
return parts[0], parts[1] if len(parts) > 1 else ""
return effect_string, ""

View File

@@ -0,0 +1,79 @@
from typing import List, Dict, Any
from .base import EffectProcessor
class CameraShotEffect(EffectProcessor):
"""相机镜头效果处理器"""
def validate_params(self) -> bool:
"""验证参数:start_time,duration,rotate_deg"""
params = self.parse_params()
if not params:
return True # 使用默认参数
# 参数格式: "start_time,duration,rotate_deg"
if len(params) > 3:
return False
try:
for i, param in enumerate(params):
if param == '':
continue
if i == 2: # rotate_deg
int(param)
else: # start_time, duration
float(param)
return True
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成相机镜头效果的滤镜参数"""
if not self.validate_params():
return [], video_input
params = self.parse_params()
# 设置默认值
start = 3.0
duration = 1.0
rotate_deg = 0
if len(params) >= 1 and params[0] != '':
start = float(params[0])
if len(params) >= 2 and params[1] != '':
duration = float(params[1])
if len(params) >= 3 and params[2] != '':
rotate_deg = int(params[2])
filter_args = []
# 生成输出流标识符
start_out_str = "[eff_s]"
mid_out_str = "[eff_m]"
end_out_str = "[eff_e]"
final_output = f"[v_eff{effect_index}]"
# 分割视频流为三部分
filter_args.append(f"{video_input}split=3{start_out_str}{mid_out_str}{end_out_str}")
# 选择开始部分帧
filter_args.append(f"{start_out_str}select=lt(n\\,{int(start * self.frame_rate)}){start_out_str}")
# 选择结束部分帧
filter_args.append(f"{end_out_str}select=gt(n\\,{int(start * self.frame_rate)}){end_out_str}")
# 选择中间特定帧并扩展
filter_args.append(f"{mid_out_str}select=eq(n\\,{int(start * self.frame_rate)}){mid_out_str}")
filter_args.append(f"{mid_out_str}tpad=start_mode=clone:start_duration={duration:.4f}{mid_out_str}")
# 如果需要旋转
if rotate_deg != 0:
filter_args.append(f"{mid_out_str}rotate=PI*{rotate_deg}/180{mid_out_str}")
# 连接三部分
filter_args.append(f"{start_out_str}{mid_out_str}{end_out_str}concat=n=3:v=1:a=0,setpts=N/{self.frame_rate}/TB{final_output}")
return filter_args, final_output
def get_effect_name(self) -> str:
return "cameraShot"

38
entity/effects/skip.py Normal file
View File

@@ -0,0 +1,38 @@
from typing import List
from .base import EffectProcessor
class SkipEffect(EffectProcessor):
"""跳过开头效果处理器"""
def validate_params(self) -> bool:
"""验证参数:跳过的秒数"""
if not self.params:
return True # 默认不跳过
try:
skip_seconds = float(self.params)
return skip_seconds >= 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成跳过开头效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params:
return [], video_input
skip_seconds = float(self.params)
if skip_seconds <= 0:
return [], video_input
output_stream = f"[v_eff{effect_index}]"
# 使用trim滤镜跳过开头
filter_args = [f"{video_input}trim=start={skip_seconds}{output_stream}"]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "skip"

35
entity/effects/speed.py Normal file
View File

@@ -0,0 +1,35 @@
from typing import List
from .base import EffectProcessor
class SpeedEffect(EffectProcessor):
"""视频变速效果处理器"""
def validate_params(self) -> bool:
"""验证参数:速度倍数"""
if not self.params:
return True # 默认不变速
try:
speed = float(self.params)
return speed > 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成变速效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params or self.params == "1":
return [], video_input # 不需要变速
speed = float(self.params)
output_stream = f"[v_eff{effect_index}]"
# 使用setpts进行变速
filter_args = [f"{video_input}setpts={speed}*PTS{output_stream}"]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "ospeed"

42
entity/effects/tail.py Normal file
View File

@@ -0,0 +1,42 @@
from typing import List
from .base import EffectProcessor
class TailEffect(EffectProcessor):
"""保留末尾效果处理器"""
def validate_params(self) -> bool:
"""验证参数:保留的秒数"""
if not self.params:
return True # 默认不截取
try:
tail_seconds = float(self.params)
return tail_seconds >= 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成保留末尾效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params:
return [], video_input
tail_seconds = float(self.params)
if tail_seconds <= 0:
return [], video_input
output_stream = f"[v_eff{effect_index}]"
# 使用reverse+trim+reverse的方法来精确获取最后N秒
filter_args = [
f"{video_input}reverse[v_rev{effect_index}]",
f"[v_rev{effect_index}]trim=duration={tail_seconds}[v_trim{effect_index}]",
f"[v_trim{effect_index}]reverse{output_stream}"
]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "tail"

89
entity/effects/zoom.py Normal file
View File

@@ -0,0 +1,89 @@
from typing import List
import json
from .base import EffectProcessor
class ZoomEffect(EffectProcessor):
"""缩放效果处理器"""
def validate_params(self) -> bool:
"""验证参数:start_time,zoom_factor,duration"""
params = self.parse_params()
if len(params) < 3:
return False
try:
start_time = float(params[0])
zoom_factor = float(params[1])
duration = float(params[2])
return (start_time >= 0 and
zoom_factor > 0 and
duration >= 0)
except (ValueError, IndexError):
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成缩放效果的滤镜参数"""
if not self.validate_params():
return [], video_input
params = self.parse_params()
start_time = float(params[0])
zoom_factor = float(params[1])
duration = float(params[2])
if zoom_factor == 1:
return [], video_input # 不需要缩放
output_stream = f"[v_eff{effect_index}]"
# 获取缩放中心点
center_x, center_y = self._get_zoom_center()
filter_args = []
if duration == 0:
# 静态缩放(整个视频时长)
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(
f"{video_input}trim=start={start_time},zoompan=z={zoom_factor}:x={x_expr}:y={y_expr}:d=1{output_stream}"
)
else:
# 动态缩放(指定时间段内)
zoom_expr = f"if(between(t\\,{start_time}\\,{start_time + duration})\\,{zoom_factor}\\,1)"
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(
f"{video_input}zoompan=z={zoom_expr}:x={x_expr}:y={y_expr}:d=1{output_stream}"
)
return filter_args, output_stream
def _get_zoom_center(self) -> tuple[str, str]:
"""获取缩放中心点坐标表达式"""
# 默认中心点
center_x = "iw/2"
center_y = "ih/2"
pos_json = self.get_pos_json()
if pos_json:
_f_x = pos_json.get('ltX', 0)
_f_x2 = pos_json.get('rbX', 0)
_f_y = pos_json.get('ltY', 0)
_f_y2 = pos_json.get('rbY', 0)
_v_w = pos_json.get('imgWidth', 1)
_v_h = pos_json.get('imgHeight', 1)
if _v_w > 0 and _v_h > 0:
# 计算坐标系统中的中心点
center_x_ratio = (_f_x + _f_x2) / (2 * _v_w)
center_y_ratio = (_f_y + _f_y2) / (2 * _v_h)
# 转换为视频坐标系统
center_x = f"iw*{center_x_ratio:.6f}"
center_y = f"ih*{center_y_ratio:.6f}"
return center_x, center_y
def get_effect_name(self) -> str:
return "zoom"

View File

@@ -0,0 +1,281 @@
import json
import os
import time
from typing import List, Optional
from config.settings import get_ffmpeg_config
from entity.render_task import RenderTask, TaskType
from entity.effects import registry as effect_registry
from util.exceptions import FFmpegError
from util.ffmpeg import probe_video_info, probe_video_audio
import logging
logger = logging.getLogger(__name__)
class FFmpegCommandBuilder:
"""FFmpeg命令构建器"""
def __init__(self, task: RenderTask):
self.task = task
self.config = get_ffmpeg_config()
def build_command(self) -> List[str]:
"""构建FFmpeg命令"""
self.task.update_task_type()
if self.task.task_type == TaskType.COPY:
return self._build_copy_command()
elif self.task.task_type == TaskType.CONCAT:
return self._build_concat_command()
elif self.task.task_type == TaskType.ENCODE:
return self._build_encode_command()
else:
raise FFmpegError(f"Unsupported task type: {self.task.task_type}")
def _build_copy_command(self) -> List[str]:
"""构建复制命令"""
if len(self.task.input_files) == 1:
input_file = self.task.input_files[0]
if input_file == self.task.output_file:
return [] # 不需要处理
return [
"ffmpeg", "-y", "-hide_banner",
"-i", self.task.input_files[0],
"-c", "copy",
self.task.output_file
]
def _build_concat_command(self) -> List[str]:
"""构建拼接命令"""
args = ["ffmpeg", "-y", "-hide_banner"]
input_args = []
output_args = [*self.config.default_args]
filter_args = []
if len(self.task.input_files) == 1:
# 单个文件
file = self.task.input_files[0]
input_args.extend(["-i", file])
self.task.mute = not probe_video_audio(file)
else:
# 多个文件使用concat协议
tmp_file = f"tmp_concat_{time.time()}.txt"
with open(tmp_file, "w", encoding="utf-8") as f:
for input_file in self.task.input_files:
f.write(f"file '{input_file}'\n")
input_args.extend(["-f", "concat", "-safe", "0", "-i", tmp_file])
self.task.mute = not probe_video_audio(tmp_file, "concat")
# 视频流映射
output_args.extend(["-map", "0:v", "-c:v", "copy"])
# 音频处理
audio_output_str = self._handle_audio_concat(input_args, filter_args)
if audio_output_str:
output_args.extend(["-map", audio_output_str])
output_args.extend(self.config.audio_args)
# annexb处理
if self.task.annexb:
output_args.extend(["-bsf:v", self._get_mp4toannexb_filter()])
output_args.extend(["-bsf:a", "setts=pts=DTS"])
output_args.extend(["-f", "mpegts"])
else:
output_args.extend(["-f", "mp4"])
filter_complex = ["-filter_complex", ";".join(filter_args)] if filter_args else []
return args + input_args + filter_complex + output_args + [self.task.output_file]
def _build_encode_command(self) -> List[str]:
"""构建编码命令"""
args = ["ffmpeg", "-y", "-hide_banner"]
input_args = []
filter_args = []
output_args = [
*self.config.video_args,
*self.config.audio_args,
*self.config.encoder_args,
*self.config.default_args
]
# annexb处理
if self.task.annexb:
output_args.extend(["-bsf:v", self._get_mp4toannexb_filter()])
output_args.extend(["-reset_timestamps", "1"])
# 处理输入文件
for input_file in self.task.input_files:
input_args.extend(["-i", input_file])
# 处理视频流
video_output_str = "[0:v]"
effect_index = 0
# 处理中心裁剪
if self.task.center_cut == 1:
video_output_str, effect_index = self._add_center_cut(filter_args, video_output_str, effect_index)
# 处理缩放裁剪
if self.task.zoom_cut == 1 and self.task.resolution:
video_output_str, effect_index = self._add_zoom_cut(filter_args, video_output_str, effect_index)
# 处理效果
video_output_str, effect_index = self._add_effects(filter_args, video_output_str, effect_index)
# 处理分辨率
if self.task.resolution:
filter_args.append(f"{video_output_str}scale={self.task.resolution.replace('x', ':')}[v]")
video_output_str = "[v]"
# 处理LUT
for lut in self.task.luts:
filter_args.append(f"{video_output_str}lut3d=file={lut}{video_output_str}")
# 处理覆盖层
video_output_str = self._add_overlays(input_args, filter_args, video_output_str)
# 处理字幕
for subtitle in self.task.subtitles:
filter_args.append(f"{video_output_str}ass={subtitle}[v]")
video_output_str = "[v]"
# 映射视频流
output_args.extend(["-map", video_output_str])
output_args.extend(["-r", str(self.task.frame_rate)])
output_args.extend(["-fps_mode", "cfr"])
# 处理音频
audio_output_str = self._handle_audio_encode(input_args, filter_args)
if audio_output_str:
output_args.extend(["-map", audio_output_str])
filter_complex = ["-filter_complex", ";".join(filter_args)] if filter_args else []
return args + input_args + filter_complex + output_args + [self.task.output_file]
def _add_center_cut(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加中心裁剪"""
pos_json = self.task.ext_data.get('posJson', '{}')
try:
pos_data = json.loads(pos_json) if pos_json != '{}' else {}
except:
pos_data = {}
_v_w = pos_data.get('imgWidth', 1)
_f_x = pos_data.get('ltX', 0)
_f_x2 = pos_data.get('rbX', 0)
_x = f'{float((_f_x2 + _f_x)/(2 * _v_w)):.4f}*iw-ih*ih/(2*iw)'
filter_args.append(f"{video_input}crop=x={_x}:y=0:w=ih*ih/iw:h=ih[v_cut{effect_index}]")
return f"[v_cut{effect_index}]", effect_index + 1
def _add_zoom_cut(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加缩放裁剪"""
# 获取输入视频尺寸
input_file = self.task.input_files[0]
_iw, _ih, _ = probe_video_info(input_file)
_w, _h = self.task.resolution.split('x', 1)
pos_json = self.task.ext_data.get('posJson', '{}')
try:
pos_data = json.loads(pos_json) if pos_json != '{}' else {}
except:
pos_data = {}
_v_w = pos_data.get('imgWidth', 1)
_v_h = pos_data.get('imgHeight', 1)
_f_x = pos_data.get('ltX', 0)
_f_x2 = pos_data.get('rbX', 0)
_f_y = pos_data.get('ltY', 0)
_f_y2 = pos_data.get('rbY', 0)
_x = min(max(0, int((_f_x + _f_x2) / 2 - int(_w) / 2)), _iw - int(_w))
_y = min(max(0, int((_f_y + _f_y2) / 2 - int(_h) / 2)), _ih - int(_h))
filter_args.append(f"{video_input}crop=x={_x}:y={_y}:w={_w}:h={_h}[vz_cut{effect_index}]")
return f"[vz_cut{effect_index}]", effect_index + 1
def _add_effects(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加效果处理"""
current_input = video_input
for effect_str in self.task.effects:
effect_name, params = effect_registry.parse_effect_string(effect_str)
processor = effect_registry.get_processor(effect_name, params, self.task.ext_data)
if processor:
processor.frame_rate = self.task.frame_rate
effect_filters, output_stream = processor.generate_filter_args(current_input, effect_index)
if effect_filters:
filter_args.extend(effect_filters)
current_input = output_stream
effect_index += 1
return current_input, effect_index
def _add_overlays(self, input_args: List[str], filter_args: List[str], video_input: str) -> str:
"""添加覆盖层"""
current_input = video_input
for overlay in self.task.overlays:
input_index = input_args.count("-i") // 2 # 每个输入占两个参数 -i filename
input_args.extend(["-i", overlay])
if self.config.old_ffmpeg:
filter_args.append(f"{current_input}[{input_index}:v]scale2ref=iw:ih[v]")
else:
filter_args.append(f"{current_input}[{input_index}:v]scale=rw:rh[v]")
filter_args.append(f"[v][{input_index}:v]overlay=1:eof_action=endall[v]")
current_input = "[v]"
return current_input
def _handle_audio_concat(self, input_args: List[str], filter_args: List[str]) -> Optional[str]:
"""处理concat模式的音频"""
audio_output_str = ""
if self.task.mute:
input_index = input_args.count("-i") // 2
input_args.extend(["-f", "lavfi", "-i", "anullsrc=cl=stereo:r=48000"])
audio_output_str = f"[{input_index}:a]"
else:
audio_output_str = "[0:a]"
for audio in self.task.audios:
input_index = input_args.count("-i") // 2
input_args.extend(["-i", audio.replace("\\", "/")])
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
return audio_output_str.strip("[]") if audio_output_str else None
def _handle_audio_encode(self, input_args: List[str], filter_args: List[str]) -> Optional[str]:
"""处理encode模式的音频"""
audio_output_str = ""
if self.task.mute:
input_index = input_args.count("-i") // 2
input_args.extend(["-f", "lavfi", "-i", "anullsrc=cl=stereo:r=48000"])
filter_args.append(f"[{input_index}:a]acopy[a]")
audio_output_str = "[a]"
else:
audio_output_str = "[0:a]"
for audio in self.task.audios:
input_index = input_args.count("-i") // 2
input_args.extend(["-i", audio.replace("\\", "/")])
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
return audio_output_str if audio_output_str else None
def _get_mp4toannexb_filter(self) -> str:
"""获取mp4toannexb滤镜"""
encoder_args_str = " ".join(self.config.encoder_args).lower()
if "hevc" in encoder_args_str:
return "hevc_mp4toannexb"
return "h264_mp4toannexb"

146
entity/render_task.py Normal file
View File

@@ -0,0 +1,146 @@
import os
import uuid
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from enum import Enum
from config.settings import get_ffmpeg_config
from util.exceptions import TaskValidationError, EffectError
from entity.effects import registry as effect_registry
class TaskType(Enum):
COPY = "copy"
CONCAT = "concat"
ENCODE = "encode"
@dataclass
class RenderTask:
"""渲染任务数据类,只包含任务数据,不包含处理逻辑"""
input_files: List[str] = field(default_factory=list)
output_file: str = ""
task_type: TaskType = TaskType.COPY
# 视频参数
resolution: Optional[str] = None
frame_rate: int = 25
speed: float = 1.0
mute: bool = True
annexb: bool = False
# 裁剪参数
zoom_cut: Optional[int] = None
center_cut: Optional[int] = None
# 资源列表
subtitles: List[str] = field(default_factory=list)
luts: List[str] = field(default_factory=list)
audios: List[str] = field(default_factory=list)
overlays: List[str] = field(default_factory=list)
effects: List[str] = field(default_factory=list)
# 扩展数据
ext_data: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self):
"""初始化后处理"""
# 检测annexb格式
for input_file in self.input_files:
if isinstance(input_file, str) and input_file.endswith(".ts"):
self.annexb = True
break
# 自动生成输出文件名
if not self.output_file:
self._generate_output_filename()
def _generate_output_filename(self):
"""生成输出文件名"""
if self.annexb:
self.output_file = f"rand_{uuid.uuid4()}.ts"
else:
self.output_file = f"rand_{uuid.uuid4()}.mp4"
def add_input_file(self, file_path: str):
"""添加输入文件"""
self.input_files.append(file_path)
if file_path.endswith(".ts"):
self.annexb = True
def add_overlay(self, *overlays: str):
"""添加覆盖层"""
for overlay in overlays:
if overlay.endswith('.ass'):
self.subtitles.append(overlay)
else:
self.overlays.append(overlay)
def add_audios(self, *audios: str):
"""添加音频"""
self.audios.extend(audios)
def add_lut(self, *luts: str):
"""添加LUT"""
self.luts.extend(luts)
def add_effect(self, *effects: str):
"""添加效果"""
self.effects.extend(effects)
def validate(self) -> bool:
"""验证任务参数"""
if not self.input_files:
raise TaskValidationError("No input files specified")
# 验证所有效果
for effect_str in self.effects:
effect_name, params = effect_registry.parse_effect_string(effect_str)
processor = effect_registry.get_processor(effect_name, params, self.ext_data)
if processor and not processor.validate_params():
raise EffectError(f"Invalid parameters for effect {effect_name}: {params}", effect_name, params)
return True
def can_copy(self) -> bool:
"""检查是否可以直接复制"""
return (len(self.luts) == 0 and
len(self.overlays) == 0 and
len(self.subtitles) == 0 and
len(self.effects) == 0 and
self.speed == 1 and
len(self.audios) == 0 and
len(self.input_files) == 1 and
self.zoom_cut is None and
self.center_cut is None)
def can_concat(self) -> bool:
"""检查是否可以使用concat模式"""
return (len(self.luts) == 0 and
len(self.overlays) == 0 and
len(self.subtitles) == 0 and
len(self.effects) == 0 and
self.speed == 1 and
self.zoom_cut is None and
self.center_cut is None)
def determine_task_type(self) -> TaskType:
"""自动确定任务类型"""
if self.can_copy():
return TaskType.COPY
elif self.can_concat():
return TaskType.CONCAT
else:
return TaskType.ENCODE
def update_task_type(self):
"""更新任务类型"""
self.task_type = self.determine_task_type()
def need_processing(self) -> bool:
"""检查是否需要处理"""
if self.annexb:
return True
return not self.can_copy()
def get_output_extension(self) -> str:
"""获取输出文件扩展名"""
return ".ts" if self.annexb else ".mp4"