1 Commits

Author SHA1 Message Date
d770d84927 feat(重构): 实现新的渲染服务架构
- 新增 RenderTask
2025-09-12 14:41:58 +08:00
54 changed files with 3478 additions and 5823 deletions

View File

@@ -1,65 +1,13 @@
# ===================
# API 配置
# ===================
API_ENDPOINT=http://127.0.0.1:18084/api
TEMPLATE_DIR=template/
API_ENDPOINT=https://zhentuai.com/task/v1
ACCESS_KEY=TEST_ACCESS_KEY
WORKER_ID=1
# ===================
# 目录配置
# ===================
TEMP_DIR=tmp/
# ===================
# 并发与调度
# ===================
#MAX_CONCURRENCY=4 # 最大并发任务数
#HEARTBEAT_INTERVAL=5 # 心跳间隔(秒)
#LEASE_EXTENSION_THRESHOLD=60 # 租约续期阈值(秒),提前多久续期
#LEASE_EXTENSION_DURATION=300 # 租约续期时长(秒)
# ===================
# 能力配置
# ===================
# 支持的任务类型,逗号分隔,默认全部支持
#CAPABILITIES=RENDER_SEGMENT_VIDEO,PREPARE_JOB_AUDIO,PACKAGE_SEGMENT_TS,FINALIZE_MP4
# ===================
# 超时配置
# ===================
#FFMPEG_TIMEOUT=3600 # FFmpeg 执行超时(秒)
#DOWNLOAD_TIMEOUT=300 # 下载超时(秒)
#UPLOAD_TIMEOUT=600 # 上传超时(秒)
# ===================
# 硬件加速与多显卡
# ===================
# 硬件加速类型: none, qsv, cuda
HW_ACCEL=none
# GPU 设备列表(逗号分隔的设备索引)
# 不配置时:自动检测所有设备
# 单设备示例:GPU_DEVICES=0
# 多设备示例:GPU_DEVICES=0,1,2
#GPU_DEVICES=0,1
# ===================
# 素材缓存
# ===================
#CACHE_ENABLED=true # 是否启用素材缓存
#CACHE_DIR= # 缓存目录,默认 TEMP_DIR/cache
#CACHE_MAX_SIZE_GB=0 # 最大缓存大小(GB),0 表示不限制
# ===================
# URL 映射(内网下载加速)
# ===================
# 格式: src1|dst1,src2|dst2
#HTTP_REPLACE_MAP="https://cdcdn.zhentuai.com|http://192.168.10.254:9000"
# ===================
# 上传配置
# ===================
# 上传方式: 默认 HTTP,可选 rclone
#UPLOAD_METHOD=rclone
#RCLONE_CONFIG_FILE= # rclone 配置文件路径
#RCLONE_REPLACE_MAP="https://oss.example.com|alioss://bucket"
#REDIRECT_TO_URL=https://renderworker-deuvulkhes.cn-shanghai.fcapp.run/
# QSV
ENCODER_ARGS="-c:v h264_qsv -global_quality 28 -look_ahead 1"
# NVENC
#ENCODER_ARGS="-c:v h264_nvenc -cq:v 24 -preset:v p7 -tune:v hq -profile:v high"
# HEVC
#VIDEO_ARGS="-profile:v main
UPLOAD_METHOD="rclone"
RCLONE_REPLACE_MAP="https://oss.zhentuai.com|alioss://frametour-assets,https://frametour-assets.oss-cn-shanghai.aliyuncs.com|alioss://frametour-assets"

4
.gitignore vendored
View File

@@ -31,6 +31,4 @@ target/
.venv
venv/
cython_debug/
.env
.serena
.claude
.env

42
app.py Normal file
View File

@@ -0,0 +1,42 @@
import time
import flask
import config
import biz.task
from services import DefaultTemplateService
from telemetry import init_opentelemetry
from util import api
# 使用新的服务架构
template_service = DefaultTemplateService()
template_service.load_local_templates()
import logging
LOGGER = logging.getLogger(__name__)
init_opentelemetry(batch=False)
app = flask.Flask(__name__)
@app.get('/health/check')
def health_check():
return api.sync_center()
@app.post('/')
def do_nothing():
return "NOOP"
@app.post('/<task_id>')
def do_task(task_id):
task_info = api.get_task_info(task_id)
local_template_info = template_service.get_template(task_info.get("templateId"))
template_info = api.get_template_info(task_info.get("templateId"))
if local_template_info:
if local_template_info.get("updateTime") != template_info.get("updateTime"):
template_service.download_template(task_info.get("templateId"))
biz.task.start_task(task_info)
return "OK"
if __name__ == '__main__':
app.run(host="0.0.0.0", port=9998)

185
biz/ffmpeg.py Normal file
View File

@@ -0,0 +1,185 @@
import json
import os.path
import time
from concurrent.futures import ThreadPoolExecutor
from opentelemetry.trace import Status, StatusCode
# 使用新架构组件,保持对旧FfmpegTask的兼容
from entity.ffmpeg import FfmpegTask
from entity.render_task import RenderTask, TaskType
from services import DefaultRenderService
import logging
from util import ffmpeg, oss
from util.ffmpeg import fade_out_audio
from telemetry import get_tracer
logger = logging.getLogger('biz/ffmpeg')
_render_service = None
def _get_render_service():
"""获取渲染服务实例"""
global _render_service
if _render_service is None:
_render_service = DefaultRenderService()
return _render_service
def parse_ffmpeg_task(task_info, template_info):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("parse_ffmpeg_task") as span:
tasks = []
# 中间片段
task_params_str = task_info.get("taskParams", "{}")
span.set_attribute("task_params", task_params_str)
task_params: dict = json.loads(task_params_str)
task_params_orig = json.loads(task_params_str)
# 统计only_if占位符的使用次数
only_if_usage_count = {}
with tracer.start_as_current_span("parse_ffmpeg_task.download_all") as sub_span:
with ThreadPoolExecutor(max_workers=8) as executor:
param_list: list[dict]
for param_list in task_params.values():
for param in param_list:
url = param.get("url")
if url.startswith("http"):
_, fn = os.path.split(url)
executor.submit(oss.download_from_oss, url, fn, True)
executor.shutdown(wait=True)
for part in template_info.get("video_parts"):
source, ext_data = parse_video(part.get('source'), task_params, template_info)
if not source:
logger.warning("no video found for part: " + str(part))
continue
only_if = part.get('only_if', '')
if only_if:
only_if_usage_count[only_if] = only_if_usage_count.get(only_if, 0) + 1
required_count = only_if_usage_count.get(only_if)
if not check_placeholder_exist_with_count(only_if, task_params_orig, required_count):
logger.info("because only_if exist, placeholder: %s insufficient (need %d), skip part: %s", only_if, required_count, part)
continue
sub_ffmpeg_task = FfmpegTask(source)
sub_ffmpeg_task.resolution = template_info.get("video_size", "")
sub_ffmpeg_task.annexb = True
sub_ffmpeg_task.ext_data = ext_data or {}
sub_ffmpeg_task.frame_rate = template_info.get("frame_rate", 25)
sub_ffmpeg_task.center_cut = part.get("crop_mode", None)
sub_ffmpeg_task.zoom_cut = part.get("zoom_cut", None)
for effect in part.get('effects', []):
sub_ffmpeg_task.add_effect(effect)
for lut in part.get('luts', []):
sub_ffmpeg_task.add_lut(os.path.join(template_info.get("local_path"), lut).replace("\\", "/"))
for audio in part.get('audios', []):
sub_ffmpeg_task.add_audios(os.path.join(template_info.get("local_path"), audio))
for overlay in part.get('overlays', []):
sub_ffmpeg_task.add_overlay(os.path.join(template_info.get("local_path"), overlay))
tasks.append(sub_ffmpeg_task)
output_file = "out_" + str(time.time()) + ".mp4"
task = FfmpegTask(tasks, output_file=output_file)
task.resolution = template_info.get("video_size", "")
overall = template_info.get("overall_template")
task.center_cut = template_info.get("crop_mode", None)
task.zoom_cut = template_info.get("zoom_cut", None)
task.frame_rate = template_info.get("frame_rate", 25)
# if overall.get('source', ''):
# source, ext_data = parse_video(overall.get('source'), task_params, template_info)
# task.add_inputs(source)
# task.ext_data = ext_data or {}
for effect in overall.get('effects', []):
task.add_effect(effect)
for lut in overall.get('luts', []):
task.add_lut(os.path.join(template_info.get("local_path"), lut).replace("\\", "/"))
for audio in overall.get('audios', []):
task.add_audios(os.path.join(template_info.get("local_path"), audio))
for overlay in overall.get('overlays', []):
task.add_overlay(os.path.join(template_info.get("local_path"), overlay))
return task
def parse_video(source, task_params, template_info):
if source.startswith('PLACEHOLDER_'):
placeholder_id = source.replace('PLACEHOLDER_', '')
new_sources = task_params.get(placeholder_id, [])
_pick_source = {}
if type(new_sources) is list:
if len(new_sources) == 0:
logger.debug("no video found for placeholder: " + placeholder_id)
return None, _pick_source
else:
_pick_source = new_sources.pop(0)
new_sources = _pick_source.get("url")
if new_sources.startswith("http"):
_, source_name = os.path.split(new_sources)
oss.download_from_oss(new_sources, source_name, True)
return source_name, _pick_source
return new_sources, _pick_source
return os.path.join(template_info.get("local_path"), source), None
def check_placeholder_exist(placeholder_id, task_params):
if placeholder_id in task_params:
new_sources = task_params.get(placeholder_id, [])
if type(new_sources) is list:
if len(new_sources) == 0:
return False
else:
return True
return True
return False
def check_placeholder_exist_with_count(placeholder_id, task_params, required_count=1):
"""检查占位符是否存在足够数量的片段"""
if placeholder_id in task_params:
new_sources = task_params.get(placeholder_id, [])
if type(new_sources) is list:
return len(new_sources) >= required_count
return required_count <= 1
return False
def start_ffmpeg_task(ffmpeg_task):
"""启动FFmpeg任务 - 使用新的渲染服务"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("start_ffmpeg_task") as span:
try:
# 使用新的渲染服务
render_service = _get_render_service()
result = render_service.render(ffmpeg_task)
if result:
span.set_status(Status(StatusCode.OK))
else:
span.set_status(Status(StatusCode.ERROR))
return result
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error(f"FFmpeg task failed: {e}", exc_info=True)
return False
def clear_task_tmp_file(ffmpeg_task):
for task in ffmpeg_task.analyze_input_render_tasks():
clear_task_tmp_file(task)
try:
if os.getenv("TEMPLATE_DIR") not in ffmpeg_task.get_output_file():
os.remove(ffmpeg_task.get_output_file())
logger.info("delete tmp file: " + ffmpeg_task.get_output_file())
else:
logger.info("skip delete template file: " + ffmpeg_task.get_output_file())
except OSError:
logger.warning("delete tmp file failed: " + ffmpeg_task.get_output_file())
return False
return True
def probe_video_info(ffmpeg_task):
"""获取视频长度宽度和时长 - 使用新的渲染服务"""
render_service = _get_render_service()
return render_service.get_video_info(ffmpeg_task.get_output_file())

0
biz/render.py Normal file
View File

55
biz/task.py Normal file
View File

@@ -0,0 +1,55 @@
import json
import logging
from opentelemetry.trace import Status, StatusCode
# 使用新的服务架构
from services import DefaultTaskService, DefaultRenderService, DefaultTemplateService
from telemetry import get_tracer
logger = logging.getLogger(__name__)
# 创建服务实例(单例模式)
_render_service = None
_template_service = None
_task_service = None
def _get_services():
"""获取服务实例(懒加载)"""
global _render_service, _template_service, _task_service
if _render_service is None:
_render_service = DefaultRenderService()
if _template_service is None:
_template_service = DefaultTemplateService()
_template_service.load_local_templates() # 加载本地模板
if _task_service is None:
_task_service = DefaultTaskService(_render_service, _template_service)
return _task_service, _render_service, _template_service
def start_task(task_info):
"""启动任务处理(保持向后兼容的接口)"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("start_task_legacy") as span:
try:
task_service, _, _ = _get_services()
# 使用新的任务服务处理
result = task_service.process_task(task_info)
if result:
span.set_status(Status(StatusCode.OK))
logger.info("Task completed successfully: %s", task_info.get("id"))
else:
span.set_status(Status(StatusCode.ERROR))
logger.error("Task failed: %s", task_info.get("id"))
return None # 保持原有返回值格式
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error("Task processing failed: %s", e, exc_info=True)
return None

19
config/__init__.py Normal file
View File

@@ -0,0 +1,19 @@
import datetime
import logging
from logging.handlers import TimedRotatingFileHandler
from dotenv import load_dotenv
# 导入新的配置系统,保持向后兼容
from .settings import get_config, get_ffmpeg_config, get_api_config, get_storage_config, get_server_config
load_dotenv()
logging.basicConfig(level=logging.INFO)
root_logger = logging.getLogger()
rf_handler = TimedRotatingFileHandler('all_log.log', when='midnight')
rf_handler.setFormatter(logging.Formatter("[%(asctime)s][%(name)s]%(levelname)s - %(message)s"))
rf_handler.setLevel(logging.DEBUG)
f_handler = TimedRotatingFileHandler('error.log', when='midnight')
f_handler.setLevel(logging.ERROR)
f_handler.setFormatter(logging.Formatter("[%(asctime)s][%(name)s][:%(lineno)d]%(levelname)s - - %(message)s"))
root_logger.addHandler(rf_handler)
root_logger.addHandler(f_handler)

139
config/settings.py Normal file
View File

@@ -0,0 +1,139 @@
import os
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
import logging
from dotenv import load_dotenv
load_dotenv()
@dataclass
class FFmpegConfig:
"""FFmpeg相关配置"""
encoder_args: List[str]
video_args: List[str]
audio_args: List[str]
default_args: List[str]
old_ffmpeg: bool = False
re_encode_video_args: Optional[List[str]] = None
re_encode_encoder_args: Optional[List[str]] = None
@classmethod
def from_env(cls) -> 'FFmpegConfig':
encoder_args = os.getenv("ENCODER_ARGS", "-c:v h264").split(" ")
video_args = os.getenv("VIDEO_ARGS", "-profile:v high -level:v 4").split(" ")
audio_args = ["-c:a", "aac", "-b:a", "128k", "-ar", "48000", "-ac", "2"]
default_args = ["-shortest"]
re_encode_video_args = None
if os.getenv("RE_ENCODE_VIDEO_ARGS"):
re_encode_video_args = os.getenv("RE_ENCODE_VIDEO_ARGS").split(" ")
re_encode_encoder_args = None
if os.getenv("RE_ENCODE_ENCODER_ARGS"):
re_encode_encoder_args = os.getenv("RE_ENCODE_ENCODER_ARGS").split(" ")
return cls(
encoder_args=encoder_args,
video_args=video_args,
audio_args=audio_args,
default_args=default_args,
old_ffmpeg=bool(os.getenv("OLD_FFMPEG", False)),
re_encode_video_args=re_encode_video_args,
re_encode_encoder_args=re_encode_encoder_args
)
@dataclass
class APIConfig:
"""API相关配置"""
endpoint: str
access_key: str
timeout: int = 10
redirect_to_url: Optional[str] = None
@classmethod
def from_env(cls) -> 'APIConfig':
endpoint = os.getenv('API_ENDPOINT', '')
if not endpoint:
raise ValueError("API_ENDPOINT environment variable is required")
access_key = os.getenv('ACCESS_KEY', '')
if not access_key:
raise ValueError("ACCESS_KEY environment variable is required")
return cls(
endpoint=endpoint,
access_key=access_key,
timeout=int(os.getenv('API_TIMEOUT', '10')),
redirect_to_url=os.getenv("REDIRECT_TO_URL") or None
)
@dataclass
class StorageConfig:
"""存储相关配置"""
template_dir: str
@classmethod
def from_env(cls) -> 'StorageConfig':
template_dir = os.getenv('TEMPLATE_DIR', './template')
return cls(template_dir=template_dir)
@dataclass
class ServerConfig:
"""服务器相关配置"""
host: str = "0.0.0.0"
port: int = 9998
debug: bool = False
@classmethod
def from_env(cls) -> 'ServerConfig':
return cls(
host=os.getenv('HOST', '0.0.0.0'),
port=int(os.getenv('PORT', '9998')),
debug=bool(os.getenv('DEBUG', False))
)
@dataclass
class AppConfig:
"""应用总配置"""
ffmpeg: FFmpegConfig
api: APIConfig
storage: StorageConfig
server: ServerConfig
@classmethod
def from_env(cls) -> 'AppConfig':
return cls(
ffmpeg=FFmpegConfig.from_env(),
api=APIConfig.from_env(),
storage=StorageConfig.from_env(),
server=ServerConfig.from_env()
)
# 全局配置实例
_config: Optional[AppConfig] = None
def get_config() -> AppConfig:
"""获取全局配置实例"""
global _config
if _config is None:
_config = AppConfig.from_env()
return _config
def reload_config() -> AppConfig:
"""重新加载配置"""
global _config
_config = AppConfig.from_env()
return _config
# 向后兼容的配置获取函数
def get_ffmpeg_config() -> FFmpegConfig:
return get_config().ffmpeg
def get_api_config() -> APIConfig:
return get_config().api
def get_storage_config() -> StorageConfig:
return get_config().storage
def get_server_config() -> ServerConfig:
return get_config().server

View File

@@ -1,99 +1,9 @@
# -*- coding: utf-8 -*-
"""
常量定义
v2 版本常量,用于 Render Worker v2 API。
"""
# 软件版本
SOFTWARE_VERSION = '2.0.0'
# 支持的任务类型
TASK_TYPES = (
'RENDER_SEGMENT_VIDEO',
'COMPOSE_TRANSITION',
'PREPARE_JOB_AUDIO',
'PACKAGE_SEGMENT_TS',
'FINALIZE_MP4',
SUPPORT_FEATURE = (
'simple_render_algo',
'gpu_accelerate',
'hevc_encode',
'rapid_download',
'rclone_upload',
'custom_re_encode',
)
# 默认能力
DEFAULT_CAPABILITIES = list(TASK_TYPES)
# 支持的转场类型(对应 FFmpeg xfade 参数)
TRANSITION_TYPES = (
'fade', # 淡入淡出(默认)
'dissolve', # 溶解过渡
'wipeleft', # 向左擦除
'wiperight', # 向右擦除
'wipeup', # 向上擦除
'wipedown', # 向下擦除
'slideleft', # 向左滑动
'slideright', # 向右滑动
'slideup', # 向上滑动
'slidedown', # 向下滑动
)
# 支持的特效类型
EFFECT_TYPES = (
'cameraShot', # 相机定格效果:在指定时间点冻结画面
'zoom', # 缩放效果(预留)
'blur', # 模糊效果(预留)
)
# 硬件加速类型
HW_ACCEL_NONE = 'none' # 纯软件编解码
HW_ACCEL_QSV = 'qsv' # Intel Quick Sync Video (核显/独显)
HW_ACCEL_CUDA = 'cuda' # NVIDIA NVENC/NVDEC
HW_ACCEL_TYPES = (HW_ACCEL_NONE, HW_ACCEL_QSV, HW_ACCEL_CUDA)
# 统一视频编码参数(软件编码,来自集成文档)
VIDEO_ENCODE_PARAMS = {
'codec': 'libx264',
'preset': 'medium',
'profile': 'main',
'level': '4.0',
'crf': '23',
'pix_fmt': 'yuv420p',
}
# QSV 硬件加速视频编码参数(Intel Quick Sync)
VIDEO_ENCODE_PARAMS_QSV = {
'codec': 'h264_qsv',
'preset': 'medium', # QSV 支持: veryfast, faster, fast, medium, slow, slower, veryslow
'profile': 'main',
'level': '4.0',
'global_quality': '23', # QSV 使用 global_quality 代替 crf(1-51,值越低质量越高)
'look_ahead': '1', # 启用前瞻分析提升质量
'pix_fmt': 'nv12', # QSV 硬件表面格式
}
# CUDA 硬件加速视频编码参数(NVIDIA NVENC)
VIDEO_ENCODE_PARAMS_CUDA = {
'codec': 'h264_nvenc',
'preset': 'p4', # NVENC 预设 p1-p7(p1 最快,p7 最慢/质量最高),p4 ≈ medium
'profile': 'main',
'level': '4.0',
'rc': 'vbr', # 码率控制模式:vbr 可变码率
'cq': '23', # 恒定质量模式的质量值(0-51)
'pix_fmt': 'yuv420p', # NVENC 输入格式(会自动转换)
}
# 统一音频编码参数
AUDIO_ENCODE_PARAMS = {
'codec': 'aac',
'bitrate': '128k',
'sample_rate': '48000',
'channels': '2',
}
# 错误码
ERROR_CODES = {
'E_INPUT_UNAVAILABLE': '素材不可访问',
'E_FFMPEG_FAILED': 'FFmpeg 执行失败',
'E_UPLOAD_FAILED': '上传失败',
'E_SPEC_INVALID': '渲染规格非法',
'E_TIMEOUT': '执行超时',
'E_UNKNOWN': '未知错误',
}
SOFTWARE_VERSION = '0.0.5'

View File

@@ -1,12 +0,0 @@
# -*- coding: utf-8 -*-
"""
核心抽象层
包含任务处理器抽象基类等核心接口定义。
"""
from core.handler import TaskHandler
__all__ = [
'TaskHandler',
]

View File

@@ -1,79 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务处理器抽象基类
定义任务处理器的接口规范。
"""
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from domain.task import Task, TaskType
from domain.result import TaskResult
class TaskHandler(ABC):
"""
任务处理器抽象基类
所有任务处理器都必须继承此类并实现相应方法。
"""
@abstractmethod
def handle(self, task: 'Task') -> 'TaskResult':
"""
处理任务的主方法
Args:
task: 任务实体
Returns:
TaskResult: 任务结果(成功或失败)
"""
pass
@abstractmethod
def get_supported_type(self) -> 'TaskType':
"""
返回此处理器支持的任务类型
Returns:
TaskType: 支持的任务类型枚举值
"""
pass
def before_handle(self, task: 'Task') -> None:
"""
处理前钩子(可选重写)
用于任务执行前的准备工作,如日志记录、资源检查等。
Args:
task: 任务实体
"""
pass
def after_handle(self, task: 'Task', result: 'TaskResult') -> None:
"""
处理后钩子(可选重写)
用于任务执行后的清理工作,如资源释放、统计记录等。
Args:
task: 任务实体
result: 任务结果
"""
pass
def validate_task(self, task: 'Task') -> bool:
"""
验证任务是否有效(可选重写)
Args:
task: 任务实体
Returns:
bool: 任务是否有效
"""
return True

View File

@@ -1,24 +0,0 @@
# -*- coding: utf-8 -*-
"""
领域模型层
包含任务实体、结果、配置等核心数据结构。
"""
from domain.task import Task, TaskType, TaskStatus, RenderSpec, OutputSpec, AudioSpec, AudioProfile
from domain.result import TaskResult, ErrorCode, RETRY_CONFIG
from domain.config import WorkerConfig
__all__ = [
'Task',
'TaskType',
'TaskStatus',
'RenderSpec',
'OutputSpec',
'AudioSpec',
'AudioProfile',
'TaskResult',
'ErrorCode',
'RETRY_CONFIG',
'WorkerConfig',
]

View File

@@ -1,183 +0,0 @@
# -*- coding: utf-8 -*-
"""
Worker 配置模型
定义 Worker 运行时的配置参数。
"""
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional
from constant import HW_ACCEL_NONE, HW_ACCEL_QSV, HW_ACCEL_CUDA, HW_ACCEL_TYPES
logger = logging.getLogger(__name__)
# 默认支持的任务类型
DEFAULT_CAPABILITIES = [
"RENDER_SEGMENT_VIDEO",
"PREPARE_JOB_AUDIO",
"PACKAGE_SEGMENT_TS",
"FINALIZE_MP4"
]
@dataclass
class WorkerConfig:
"""
Worker 配置
包含 Worker 运行所需的所有配置参数。
"""
# API 配置
api_endpoint: str
access_key: str
worker_id: str
# 并发控制
max_concurrency: int = 4
# 心跳配置
heartbeat_interval: int = 5 # 秒
# 租约配置
lease_extension_threshold: int = 60 # 秒,提前多久续期
lease_extension_duration: int = 300 # 秒,每次续期时长
# 目录配置
temp_dir: str = "/tmp/render_worker"
# 能力配置
capabilities: List[str] = field(default_factory=lambda: DEFAULT_CAPABILITIES.copy())
# FFmpeg 配置
ffmpeg_timeout: int = 3600 # 秒,FFmpeg 执行超时
# 下载/上传配置
download_timeout: int = 300 # 秒,下载超时
upload_timeout: int = 600 # 秒,上传超时
# 硬件加速配置
hw_accel: str = HW_ACCEL_NONE # 硬件加速类型: none, qsv, cuda
# GPU 设备配置(多显卡调度)
gpu_devices: List[int] = field(default_factory=list) # 空列表表示使用默认设备
# 素材缓存配置
cache_enabled: bool = True # 是否启用素材缓存
cache_dir: str = "" # 缓存目录,默认为 temp_dir/cache
cache_max_size_gb: float = 0 # 最大缓存大小(GB),0 表示不限制
@classmethod
def from_env(cls) -> 'WorkerConfig':
"""从环境变量创建配置"""
# API 端点,优先使用 V2 版本
api_endpoint = os.getenv('API_ENDPOINT_V2') or os.getenv('API_ENDPOINT', '')
if not api_endpoint:
raise ValueError("API_ENDPOINT_V2 or API_ENDPOINT environment variable is required")
# Access Key
access_key = os.getenv('ACCESS_KEY', '')
if not access_key:
raise ValueError("ACCESS_KEY environment variable is required")
# Worker ID
worker_id = os.getenv('WORKER_ID', '100001')
# 并发数
max_concurrency = int(os.getenv('MAX_CONCURRENCY', '4'))
# 心跳间隔
heartbeat_interval = int(os.getenv('HEARTBEAT_INTERVAL', '5'))
# 租约配置
lease_extension_threshold = int(os.getenv('LEASE_EXTENSION_THRESHOLD', '60'))
lease_extension_duration = int(os.getenv('LEASE_EXTENSION_DURATION', '300'))
# 临时目录
temp_dir = os.getenv('TEMP_DIR', os.getenv('TEMP', '/tmp/render_worker'))
# 能力列表
capabilities_str = os.getenv('CAPABILITIES', '')
if capabilities_str:
capabilities = [c.strip() for c in capabilities_str.split(',') if c.strip()]
else:
capabilities = DEFAULT_CAPABILITIES.copy()
# FFmpeg 超时
ffmpeg_timeout = int(os.getenv('FFMPEG_TIMEOUT', '3600'))
# 下载/上传超时
download_timeout = int(os.getenv('DOWNLOAD_TIMEOUT', '300'))
upload_timeout = int(os.getenv('UPLOAD_TIMEOUT', '600'))
# 硬件加速配置
hw_accel = os.getenv('HW_ACCEL', HW_ACCEL_NONE).lower()
if hw_accel not in HW_ACCEL_TYPES:
hw_accel = HW_ACCEL_NONE
# GPU 设备列表(用于多显卡调度)
gpu_devices_str = os.getenv('GPU_DEVICES', '')
gpu_devices: List[int] = []
if gpu_devices_str:
try:
gpu_devices = [int(d.strip()) for d in gpu_devices_str.split(',') if d.strip()]
except ValueError:
logger.warning(f"Invalid GPU_DEVICES value: {gpu_devices_str}, using auto-detect")
gpu_devices = []
# 素材缓存配置
cache_enabled = os.getenv('CACHE_ENABLED', 'true').lower() in ('true', '1', 'yes')
cache_dir = os.getenv('CACHE_DIR', '') # 空字符串表示使用默认路径
cache_max_size_gb = float(os.getenv('CACHE_MAX_SIZE_GB', '0'))
return cls(
api_endpoint=api_endpoint,
access_key=access_key,
worker_id=worker_id,
max_concurrency=max_concurrency,
heartbeat_interval=heartbeat_interval,
lease_extension_threshold=lease_extension_threshold,
lease_extension_duration=lease_extension_duration,
temp_dir=temp_dir,
capabilities=capabilities,
ffmpeg_timeout=ffmpeg_timeout,
download_timeout=download_timeout,
upload_timeout=upload_timeout,
hw_accel=hw_accel,
gpu_devices=gpu_devices,
cache_enabled=cache_enabled,
cache_dir=cache_dir if cache_dir else os.path.join(temp_dir, 'cache'),
cache_max_size_gb=cache_max_size_gb
)
def get_work_dir_path(self, task_id: str) -> str:
"""获取任务工作目录路径"""
return os.path.join(self.temp_dir, f"task_{task_id}")
def ensure_temp_dir(self) -> None:
"""确保临时目录存在"""
os.makedirs(self.temp_dir, exist_ok=True)
def is_hw_accel_enabled(self) -> bool:
"""是否启用了硬件加速"""
return self.hw_accel != HW_ACCEL_NONE
def is_qsv(self) -> bool:
"""是否使用 QSV 硬件加速"""
return self.hw_accel == HW_ACCEL_QSV
def is_cuda(self) -> bool:
"""是否使用 CUDA 硬件加速"""
return self.hw_accel == HW_ACCEL_CUDA
def has_multi_gpu(self) -> bool:
"""是否配置了多 GPU"""
return len(self.gpu_devices) > 1
def get_gpu_devices(self) -> List[int]:
"""获取 GPU 设备列表"""
return self.gpu_devices.copy()

View File

@@ -1,31 +0,0 @@
# -*- coding: utf-8 -*-
"""
GPU 设备模型
定义 GPU 设备的数据结构。
"""
from dataclasses import dataclass
from typing import Optional
@dataclass
class GPUDevice:
"""
GPU 设备信息
Attributes:
index: 设备索引(对应 nvidia-smi 中的 GPU ID)
name: 设备名称(如 "NVIDIA GeForce RTX 3090"
memory_total: 显存总量(MB),可选
available: 设备是否可用
"""
index: int
name: str
memory_total: Optional[int] = None
available: bool = True
def __str__(self) -> str:
status = "available" if self.available else "unavailable"
mem_info = f", {self.memory_total}MB" if self.memory_total else ""
return f"GPU[{self.index}]: {self.name}{mem_info} ({status})"

View File

@@ -1,105 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务结果模型
定义错误码、重试配置、任务结果等数据结构。
"""
from enum import Enum
from dataclasses import dataclass
from typing import Optional, Dict, Any, List
class ErrorCode(Enum):
"""错误码枚举"""
E_INPUT_UNAVAILABLE = "E_INPUT_UNAVAILABLE" # 素材不可访问/404
E_FFMPEG_FAILED = "E_FFMPEG_FAILED" # FFmpeg 执行失败
E_UPLOAD_FAILED = "E_UPLOAD_FAILED" # 上传失败
E_SPEC_INVALID = "E_SPEC_INVALID" # renderSpec 非法
E_TIMEOUT = "E_TIMEOUT" # 执行超时
E_UNKNOWN = "E_UNKNOWN" # 未知错误
# 重试配置
RETRY_CONFIG: Dict[ErrorCode, Dict[str, Any]] = {
ErrorCode.E_INPUT_UNAVAILABLE: {
'max_retries': 3,
'backoff': [1, 2, 5] # 重试间隔(秒)
},
ErrorCode.E_FFMPEG_FAILED: {
'max_retries': 2,
'backoff': [1, 3]
},
ErrorCode.E_UPLOAD_FAILED: {
'max_retries': 3,
'backoff': [1, 2, 5]
},
ErrorCode.E_SPEC_INVALID: {
'max_retries': 0, # 不重试
'backoff': []
},
ErrorCode.E_TIMEOUT: {
'max_retries': 2,
'backoff': [5, 10]
},
ErrorCode.E_UNKNOWN: {
'max_retries': 1,
'backoff': [2]
},
}
@dataclass
class TaskResult:
"""
任务结果
封装任务执行的结果,包括成功数据或失败信息。
"""
success: bool
data: Optional[Dict[str, Any]] = None
error_code: Optional[ErrorCode] = None
error_message: Optional[str] = None
@classmethod
def ok(cls, data: Dict[str, Any]) -> 'TaskResult':
"""创建成功结果"""
return cls(success=True, data=data)
@classmethod
def fail(cls, error_code: ErrorCode, error_message: str) -> 'TaskResult':
"""创建失败结果"""
return cls(
success=False,
error_code=error_code,
error_message=error_message
)
def to_report_dict(self) -> Dict[str, Any]:
"""
转换为上报格式
用于 API 上报时的数据格式转换。
"""
if self.success:
return {'result': self.data}
else:
return {
'errorCode': self.error_code.value if self.error_code else 'E_UNKNOWN',
'errorMessage': self.error_message or 'Unknown error'
}
def can_retry(self) -> bool:
"""是否可以重试"""
if self.success:
return False
if not self.error_code:
return True
config = RETRY_CONFIG.get(self.error_code, {})
return config.get('max_retries', 0) > 0
def get_retry_config(self) -> Dict[str, Any]:
"""获取重试配置"""
if not self.error_code:
return {'max_retries': 1, 'backoff': [2]}
return RETRY_CONFIG.get(self.error_code, {'max_retries': 1, 'backoff': [2]})

View File

@@ -1,554 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务领域模型
定义任务类型、任务实体、渲染规格、输出规格等数据结构。
"""
from enum import Enum
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, List
from datetime import datetime
from urllib.parse import urlparse, unquote
import os
# 支持的图片扩展名
IMAGE_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.webp', '.bmp', '.gif'}
class TaskType(Enum):
"""任务类型枚举"""
RENDER_SEGMENT_VIDEO = "RENDER_SEGMENT_VIDEO" # 渲染视频片段
COMPOSE_TRANSITION = "COMPOSE_TRANSITION" # 合成转场效果
PREPARE_JOB_AUDIO = "PREPARE_JOB_AUDIO" # 生成全局音频
PACKAGE_SEGMENT_TS = "PACKAGE_SEGMENT_TS" # 封装 TS 分片
FINALIZE_MP4 = "FINALIZE_MP4" # 产出最终 MP4
# 支持的转场类型(对应 FFmpeg xfade 参数)
TRANSITION_TYPES = {
'fade': 'fade', # 淡入淡出(默认)
'dissolve': 'dissolve', # 溶解过渡
'wipeleft': 'wipeleft', # 向左擦除
'wiperight': 'wiperight', # 向右擦除
'wipeup': 'wipeup', # 向上擦除
'wipedown': 'wipedown', # 向下擦除
'slideleft': 'slideleft', # 向左滑动
'slideright': 'slideright', # 向右滑动
'slideup': 'slideup', # 向上滑动
'slidedown': 'slidedown', # 向下滑动
}
# 支持的特效类型
EFFECT_TYPES = {
'cameraShot', # 相机定格效果
'zoom', # 缩放效果(预留)
'blur', # 模糊效果(预留)
}
class TaskStatus(Enum):
"""任务状态枚举"""
PENDING = "PENDING"
RUNNING = "RUNNING"
SUCCESS = "SUCCESS"
FAILED = "FAILED"
@dataclass
class TransitionConfig:
"""
转场配置
用于 RENDER_SEGMENT_VIDEO 任务的入场/出场转场配置。
"""
type: str = "fade" # 转场类型
duration_ms: int = 500 # 转场时长(毫秒)
@classmethod
def from_dict(cls, data: Optional[Dict]) -> Optional['TransitionConfig']:
"""从字典创建 TransitionConfig"""
if not data:
return None
trans_type = data.get('type', 'fade')
# 验证转场类型是否支持
if trans_type not in TRANSITION_TYPES:
trans_type = 'fade'
return cls(
type=trans_type,
duration_ms=int(data.get('durationMs', 500))
)
def get_overlap_ms(self) -> int:
"""获取 overlap 时长(单边,为转场时长的一半)"""
return self.duration_ms // 2
def get_ffmpeg_transition(self) -> str:
"""获取 FFmpeg xfade 参数"""
return TRANSITION_TYPES.get(self.type, 'fade')
@dataclass
class Effect:
"""
特效配置
格式:type:params
例如:cameraShot:3,1 表示在第3秒定格1秒
"""
effect_type: str # 效果类型
params: str = "" # 参数字符串
@classmethod
def from_string(cls, effect_str: str) -> Optional['Effect']:
"""
从字符串解析 Effect
格式:type:params 或 type(无参数时)
"""
if not effect_str:
return None
parts = effect_str.split(':', 1)
effect_type = parts[0].strip()
if effect_type not in EFFECT_TYPES:
return None
params = parts[1].strip() if len(parts) > 1 else ""
return cls(effect_type=effect_type, params=params)
@classmethod
def parse_effects(cls, effects_str: Optional[str]) -> List['Effect']:
"""
解析效果字符串
格式:effect1|effect2|effect3
例如:cameraShot:3,1|blur:5
"""
if not effects_str:
return []
effects = []
for part in effects_str.split('|'):
effect = cls.from_string(part.strip())
if effect:
effects.append(effect)
return effects
def get_camera_shot_params(self) -> tuple:
"""
获取 cameraShot 效果参数
Returns:
(start_sec, duration_sec): 开始时间和持续时间(秒)
"""
if self.effect_type != 'cameraShot':
return (0, 0)
if not self.params:
return (3, 1) # 默认值
parts = self.params.split(',')
try:
start = int(parts[0]) if len(parts) >= 1 else 3
duration = int(parts[1]) if len(parts) >= 2 else 1
return (start, duration)
except ValueError:
return (3, 1)
@dataclass
class RenderSpec:
"""
渲染规格
用于 RENDER_SEGMENT_VIDEO 任务,定义视频渲染参数。
"""
crop_enable: bool = False
crop_size: Optional[str] = None
speed: str = "1.0"
lut_url: Optional[str] = None
overlay_url: Optional[str] = None
effects: Optional[str] = None
zoom_cut: bool = False
video_crop: Optional[str] = None
face_pos: Optional[str] = None
transitions: Optional[str] = None
# 转场配置(PRD v2 新增)
transition_in: Optional[TransitionConfig] = None # 入场转场
transition_out: Optional[TransitionConfig] = None # 出场转场
@classmethod
def from_dict(cls, data: Optional[Dict]) -> 'RenderSpec':
"""从字典创建 RenderSpec"""
if not data:
return cls()
return cls(
crop_enable=data.get('cropEnable', False),
crop_size=data.get('cropSize'),
speed=str(data.get('speed', '1.0')),
lut_url=data.get('lutUrl'),
overlay_url=data.get('overlayUrl'),
effects=data.get('effects'),
zoom_cut=data.get('zoomCut', False),
video_crop=data.get('videoCrop'),
face_pos=data.get('facePos'),
transitions=data.get('transitions'),
transition_in=TransitionConfig.from_dict(data.get('transitionIn')),
transition_out=TransitionConfig.from_dict(data.get('transitionOut'))
)
def has_transition_in(self) -> bool:
"""是否有入场转场"""
return self.transition_in is not None and self.transition_in.duration_ms > 0
def has_transition_out(self) -> bool:
"""是否有出场转场"""
return self.transition_out is not None and self.transition_out.duration_ms > 0
def get_overlap_head_ms(self) -> int:
"""获取头部 overlap 时长(毫秒)"""
if self.has_transition_in():
return self.transition_in.get_overlap_ms()
return 0
def get_overlap_tail_ms(self) -> int:
"""获取尾部 overlap 时长(毫秒)"""
if self.has_transition_out():
return self.transition_out.get_overlap_ms()
return 0
def get_effects(self) -> List['Effect']:
"""获取解析后的特效列表"""
return Effect.parse_effects(self.effects)
@dataclass
class OutputSpec:
"""
输出规格
用于 RENDER_SEGMENT_VIDEO 任务,定义视频输出参数。
"""
width: int = 1080
height: int = 1920
fps: int = 30
bitrate: int = 4000000
codec: str = "h264"
@classmethod
def from_dict(cls, data: Optional[Dict]) -> 'OutputSpec':
"""从字典创建 OutputSpec"""
if not data:
return cls()
return cls(
width=data.get('width', 1080),
height=data.get('height', 1920),
fps=data.get('fps', 30),
bitrate=data.get('bitrate', 4000000),
codec=data.get('codec', 'h264')
)
@dataclass
class AudioSpec:
"""
音频规格
用于 PREPARE_JOB_AUDIO 任务中的片段叠加音效。
"""
audio_url: Optional[str] = None
volume: float = 1.0
fade_in_ms: int = 10
fade_out_ms: int = 10
start_ms: int = 0
delay_ms: int = 0
loop_enable: bool = False
@classmethod
def from_dict(cls, data: Optional[Dict]) -> Optional['AudioSpec']:
"""从字典创建 AudioSpec"""
if not data:
return None
return cls(
audio_url=data.get('audioUrl'),
volume=float(data.get('volume', 1.0)),
fade_in_ms=int(data.get('fadeInMs', 10)),
fade_out_ms=int(data.get('fadeOutMs', 10)),
start_ms=int(data.get('startMs', 0)),
delay_ms=int(data.get('delayMs', 0)),
loop_enable=data.get('loopEnable', False)
)
@dataclass
class AudioProfile:
"""
音频配置
用于 PREPARE_JOB_AUDIO 任务的全局音频参数。
"""
sample_rate: int = 48000
channels: int = 2
codec: str = "aac"
@classmethod
def from_dict(cls, data: Optional[Dict]) -> 'AudioProfile':
"""从字典创建 AudioProfile"""
if not data:
return cls()
return cls(
sample_rate=data.get('sampleRate', 48000),
channels=data.get('channels', 2),
codec=data.get('codec', 'aac')
)
@dataclass
class Task:
"""
任务实体
表示一个待执行的渲染任务。
"""
task_id: str
task_type: TaskType
priority: int
lease_expire_time: datetime
payload: Dict[str, Any]
@classmethod
def from_dict(cls, data: Dict) -> 'Task':
"""从 API 响应字典创建 Task"""
lease_time_str = data.get('leaseExpireTime', '')
# 解析 ISO 8601 时间格式
if lease_time_str:
if lease_time_str.endswith('Z'):
lease_time_str = lease_time_str[:-1] + '+00:00'
try:
lease_expire_time = datetime.fromisoformat(lease_time_str)
except ValueError:
# 解析失败时使用当前时间 + 5分钟
lease_expire_time = datetime.now()
else:
lease_expire_time = datetime.now()
return cls(
task_id=str(data['taskId']),
task_type=TaskType(data['taskType']),
priority=data.get('priority', 0),
lease_expire_time=lease_expire_time,
payload=data.get('payload', {})
)
def get_job_id(self) -> str:
"""获取作业 ID"""
return str(self.payload.get('jobId', ''))
def get_segment_id(self) -> Optional[str]:
"""获取片段 ID(如果有)"""
segment_id = self.payload.get('segmentId')
return str(segment_id) if segment_id else None
def get_plan_segment_index(self) -> int:
"""获取计划片段索引"""
return int(self.payload.get('planSegmentIndex', 0))
def get_duration_ms(self) -> int:
"""获取时长(毫秒)"""
return int(self.payload.get('durationMs', 5000))
def get_material_url(self) -> Optional[str]:
"""
获取素材 URL
优先使用 boundMaterialUrl(实际可下载的 HTTP URL),
如果不存在则回退到 sourceRef(可能是 slot 引用)。
Returns:
素材 URL,如果都不存在返回 None
"""
return self.payload.get('boundMaterialUrl') or self.payload.get('sourceRef')
def get_source_ref(self) -> Optional[str]:
"""获取素材源引用(slot 标识符,如 device:xxx)"""
return self.payload.get('sourceRef')
def get_bound_material_url(self) -> Optional[str]:
"""获取绑定的素材 URL(实际可下载的 HTTP URL)"""
return self.payload.get('boundMaterialUrl')
def get_material_type(self) -> str:
"""
获取素材类型
优先使用服务端下发的 materialType 字段,
如果不存在则根据 URL 后缀自动推断。
Returns:
素材类型:"video""image"
"""
# 优先使用服务端下发的类型
material_type = self.payload.get('materialType')
if material_type in ('video', 'image'):
return material_type
# 降级:根据 URL 后缀推断
material_url = self.get_material_url()
if material_url:
parsed = urlparse(material_url)
path = unquote(parsed.path)
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTENSIONS:
return 'image'
# 默认视频类型
return 'video'
def is_image_material(self) -> bool:
"""判断素材是否为图片类型"""
return self.get_material_type() == 'image'
def get_render_spec(self) -> RenderSpec:
"""获取渲染规格"""
return RenderSpec.from_dict(self.payload.get('renderSpec'))
def get_output_spec(self) -> OutputSpec:
"""获取输出规格"""
return OutputSpec.from_dict(self.payload.get('output'))
def get_transition_type(self) -> Optional[str]:
"""获取转场类型(来自 TaskPayload 顶层)"""
return self.payload.get('transitionType')
def get_transition_ms(self) -> int:
"""获取转场时长(毫秒,来自 TaskPayload 顶层)"""
return int(self.payload.get('transitionMs', 0))
def has_transition(self) -> bool:
"""是否有转场效果"""
return self.get_transition_ms() > 0
def get_overlap_tail_ms(self) -> int:
"""
获取尾部 overlap 时长(毫秒)
转场发生在当前片段与下一片段之间,当前片段需要在尾部多渲染 overlap 帧。
overlap = transitionMs / 2
"""
return self.get_transition_ms() // 2
def get_transition_in_type(self) -> Optional[str]:
"""获取入场转场类型(来自前一片段的出场转场)"""
return self.payload.get('transitionInType')
def get_transition_in_ms(self) -> int:
"""获取入场转场时长(毫秒)"""
return int(self.payload.get('transitionInMs', 0))
def get_transition_out_type(self) -> Optional[str]:
"""获取出场转场类型(当前片段的转场配置)"""
return self.payload.get('transitionOutType')
def get_transition_out_ms(self) -> int:
"""获取出场转场时长(毫秒)"""
return int(self.payload.get('transitionOutMs', 0))
def has_transition_in(self) -> bool:
"""是否有入场转场"""
return self.get_transition_in_ms() > 0
def has_transition_out(self) -> bool:
"""是否有出场转场"""
return self.get_transition_out_ms() > 0
def get_overlap_head_ms(self) -> int:
"""
获取头部 overlap 时长(毫秒)
入场转场来自前一个片段,当前片段需要在头部多渲染 overlap 帧。
overlap = transitionInMs / 2
"""
return self.get_transition_in_ms() // 2
def get_overlap_tail_ms_v2(self) -> int:
"""
获取尾部 overlap 时长(毫秒)- 使用新的字段名
出场转场用于当前片段与下一片段之间,当前片段需要在尾部多渲染 overlap 帧。
overlap = transitionOutMs / 2
"""
return self.get_transition_out_ms() // 2
def get_bgm_url(self) -> Optional[str]:
"""获取 BGM URL"""
return self.payload.get('bgmUrl')
def get_total_duration_ms(self) -> int:
"""获取总时长(毫秒)"""
return int(self.payload.get('totalDurationMs', 0))
def get_segments(self) -> List[Dict]:
"""获取片段列表"""
return self.payload.get('segments', [])
def get_audio_profile(self) -> AudioProfile:
"""获取音频配置"""
return AudioProfile.from_dict(self.payload.get('audioProfile'))
def get_video_url(self) -> Optional[str]:
"""获取视频 URL(用于 PACKAGE_SEGMENT_TS)"""
return self.payload.get('videoUrl')
def get_audio_url(self) -> Optional[str]:
"""获取音频 URL(用于 PACKAGE_SEGMENT_TS)"""
return self.payload.get('audioUrl')
def get_start_time_ms(self) -> int:
"""获取开始时间(毫秒)"""
return int(self.payload.get('startTimeMs', 0))
def get_m3u8_url(self) -> Optional[str]:
"""获取 m3u8 URL(用于 FINALIZE_MP4)"""
return self.payload.get('m3u8Url')
def get_ts_list(self) -> List[str]:
"""获取 TS 列表(用于 FINALIZE_MP4)"""
return self.payload.get('tsList', [])
# ========== COMPOSE_TRANSITION 相关方法 ==========
def get_transition_id(self) -> Optional[str]:
"""获取转场 ID(用于 COMPOSE_TRANSITION)"""
return self.payload.get('transitionId')
def get_prev_segment(self) -> Optional[Dict]:
"""获取前一个片段信息(用于 COMPOSE_TRANSITION)"""
return self.payload.get('prevSegment')
def get_next_segment(self) -> Optional[Dict]:
"""获取后一个片段信息(用于 COMPOSE_TRANSITION)"""
return self.payload.get('nextSegment')
def get_transition_config(self) -> Optional[TransitionConfig]:
"""获取转场配置(用于 COMPOSE_TRANSITION)"""
return TransitionConfig.from_dict(self.payload.get('transition'))
# ========== PACKAGE_SEGMENT_TS 转场相关方法 ==========
def is_transition_segment(self) -> bool:
"""是否为转场分片(用于 PACKAGE_SEGMENT_TS)"""
return self.payload.get('isTransitionSegment', False)
def should_trim_head(self) -> bool:
"""是否需要裁剪头部 overlap(用于 PACKAGE_SEGMENT_TS)"""
return self.payload.get('trimHead', False)
def should_trim_tail(self) -> bool:
"""是否需要裁剪尾部 overlap(用于 PACKAGE_SEGMENT_TS)"""
return self.payload.get('trimTail', False)
def get_trim_head_ms(self) -> int:
"""获取头部裁剪时长(毫秒)"""
return int(self.payload.get('trimHeadMs', 0))
def get_trim_tail_ms(self) -> int:
"""获取尾部裁剪时长(毫秒)"""
return int(self.payload.get('trimTailMs', 0))

View File

@@ -0,0 +1,25 @@
from .base import EffectProcessor, EffectRegistry
from .camera_shot import CameraShotEffect
from .speed import SpeedEffect
from .zoom import ZoomEffect
from .skip import SkipEffect
from .tail import TailEffect
# 注册所有效果处理器
registry = EffectRegistry()
registry.register('cameraShot', CameraShotEffect)
registry.register('ospeed', SpeedEffect)
registry.register('zoom', ZoomEffect)
registry.register('skip', SkipEffect)
registry.register('tail', TailEffect)
__all__ = [
'EffectProcessor',
'EffectRegistry',
'registry',
'CameraShotEffect',
'SpeedEffect',
'ZoomEffect',
'SkipEffect',
'TailEffect'
]

94
entity/effects/base.py Normal file
View File

@@ -0,0 +1,94 @@
from abc import ABC, abstractmethod
from typing import Dict, List, Type, Any, Optional
import json
import logging
logger = logging.getLogger(__name__)
class EffectProcessor(ABC):
"""效果处理器抽象基类"""
def __init__(self, params: str = "", ext_data: Optional[Dict[str, Any]] = None):
self.params = params
self.ext_data = ext_data or {}
self.frame_rate = 25 # 默认帧率
@abstractmethod
def validate_params(self) -> bool:
"""验证参数是否有效"""
pass
@abstractmethod
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""
生成FFmpeg滤镜参数
Args:
video_input: 输入视频流标识符 (例如: "[0:v]", "[v_eff1]")
effect_index: 效果索引,用于生成唯一的输出标识符
Returns:
tuple: (filter_args_list, output_stream_identifier)
"""
pass
@abstractmethod
def get_effect_name(self) -> str:
"""获取效果名称"""
pass
def parse_params(self) -> List[str]:
"""解析参数字符串为列表"""
if not self.params:
return []
return self.params.split(',')
def get_pos_json(self) -> Dict[str, Any]:
"""获取位置JSON数据"""
pos_json_str = self.ext_data.get('posJson', '{}')
try:
return json.loads(pos_json_str) if pos_json_str != '{}' else {}
except Exception as e:
logger.warning(f"Failed to parse posJson: {e}")
return {}
class EffectRegistry:
"""效果处理器注册表"""
def __init__(self):
self._processors: Dict[str, Type[EffectProcessor]] = {}
def register(self, name: str, processor_class: Type[EffectProcessor]):
"""注册效果处理器"""
if not issubclass(processor_class, EffectProcessor):
raise ValueError(f"{processor_class} must be a subclass of EffectProcessor")
self._processors[name] = processor_class
logger.debug(f"Registered effect processor: {name}")
def get_processor(self, effect_name: str, params: str = "", ext_data: Optional[Dict[str, Any]] = None) -> Optional[EffectProcessor]:
"""获取效果处理器实例"""
if effect_name not in self._processors:
logger.warning(f"Unknown effect: {effect_name}")
return None
processor_class = self._processors[effect_name]
return processor_class(params, ext_data)
def list_effects(self) -> List[str]:
"""列出所有注册的效果"""
return list(self._processors.keys())
def parse_effect_string(self, effect_string: str) -> tuple[str, str]:
"""
解析效果字符串
Args:
effect_string: 效果字符串,格式为 "effect_name:params"
Returns:
tuple: (effect_name, params)
"""
if ':' in effect_string:
parts = effect_string.split(':', 2)
return parts[0], parts[1] if len(parts) > 1 else ""
return effect_string, ""

View File

@@ -0,0 +1,79 @@
from typing import List, Dict, Any
from .base import EffectProcessor
class CameraShotEffect(EffectProcessor):
"""相机镜头效果处理器"""
def validate_params(self) -> bool:
"""验证参数:start_time,duration,rotate_deg"""
params = self.parse_params()
if not params:
return True # 使用默认参数
# 参数格式: "start_time,duration,rotate_deg"
if len(params) > 3:
return False
try:
for i, param in enumerate(params):
if param == '':
continue
if i == 2: # rotate_deg
int(param)
else: # start_time, duration
float(param)
return True
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成相机镜头效果的滤镜参数"""
if not self.validate_params():
return [], video_input
params = self.parse_params()
# 设置默认值
start = 3.0
duration = 1.0
rotate_deg = 0
if len(params) >= 1 and params[0] != '':
start = float(params[0])
if len(params) >= 2 and params[1] != '':
duration = float(params[1])
if len(params) >= 3 and params[2] != '':
rotate_deg = int(params[2])
filter_args = []
# 生成输出流标识符
start_out_str = "[eff_s]"
mid_out_str = "[eff_m]"
end_out_str = "[eff_e]"
final_output = f"[v_eff{effect_index}]"
# 分割视频流为三部分
filter_args.append(f"{video_input}split=3{start_out_str}{mid_out_str}{end_out_str}")
# 选择开始部分帧
filter_args.append(f"{start_out_str}select=lt(n\\,{int(start * self.frame_rate)}){start_out_str}")
# 选择结束部分帧
filter_args.append(f"{end_out_str}select=gt(n\\,{int(start * self.frame_rate)}){end_out_str}")
# 选择中间特定帧并扩展
filter_args.append(f"{mid_out_str}select=eq(n\\,{int(start * self.frame_rate)}){mid_out_str}")
filter_args.append(f"{mid_out_str}tpad=start_mode=clone:start_duration={duration:.4f}{mid_out_str}")
# 如果需要旋转
if rotate_deg != 0:
filter_args.append(f"{mid_out_str}rotate=PI*{rotate_deg}/180{mid_out_str}")
# 连接三部分
filter_args.append(f"{start_out_str}{mid_out_str}{end_out_str}concat=n=3:v=1:a=0,setpts=N/{self.frame_rate}/TB{final_output}")
return filter_args, final_output
def get_effect_name(self) -> str:
return "cameraShot"

38
entity/effects/skip.py Normal file
View File

@@ -0,0 +1,38 @@
from typing import List
from .base import EffectProcessor
class SkipEffect(EffectProcessor):
"""跳过开头效果处理器"""
def validate_params(self) -> bool:
"""验证参数:跳过的秒数"""
if not self.params:
return True # 默认不跳过
try:
skip_seconds = float(self.params)
return skip_seconds >= 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成跳过开头效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params:
return [], video_input
skip_seconds = float(self.params)
if skip_seconds <= 0:
return [], video_input
output_stream = f"[v_eff{effect_index}]"
# 使用trim滤镜跳过开头
filter_args = [f"{video_input}trim=start={skip_seconds}{output_stream}"]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "skip"

35
entity/effects/speed.py Normal file
View File

@@ -0,0 +1,35 @@
from typing import List
from .base import EffectProcessor
class SpeedEffect(EffectProcessor):
"""视频变速效果处理器"""
def validate_params(self) -> bool:
"""验证参数:速度倍数"""
if not self.params:
return True # 默认不变速
try:
speed = float(self.params)
return speed > 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成变速效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params or self.params == "1":
return [], video_input # 不需要变速
speed = float(self.params)
output_stream = f"[v_eff{effect_index}]"
# 使用setpts进行变速
filter_args = [f"{video_input}setpts={speed}*PTS{output_stream}"]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "ospeed"

42
entity/effects/tail.py Normal file
View File

@@ -0,0 +1,42 @@
from typing import List
from .base import EffectProcessor
class TailEffect(EffectProcessor):
"""保留末尾效果处理器"""
def validate_params(self) -> bool:
"""验证参数:保留的秒数"""
if not self.params:
return True # 默认不截取
try:
tail_seconds = float(self.params)
return tail_seconds >= 0
except ValueError:
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成保留末尾效果的滤镜参数"""
if not self.validate_params():
return [], video_input
if not self.params:
return [], video_input
tail_seconds = float(self.params)
if tail_seconds <= 0:
return [], video_input
output_stream = f"[v_eff{effect_index}]"
# 使用reverse+trim+reverse的方法来精确获取最后N秒
filter_args = [
f"{video_input}reverse[v_rev{effect_index}]",
f"[v_rev{effect_index}]trim=duration={tail_seconds}[v_trim{effect_index}]",
f"[v_trim{effect_index}]reverse{output_stream}"
]
return filter_args, output_stream
def get_effect_name(self) -> str:
return "tail"

89
entity/effects/zoom.py Normal file
View File

@@ -0,0 +1,89 @@
from typing import List
import json
from .base import EffectProcessor
class ZoomEffect(EffectProcessor):
"""缩放效果处理器"""
def validate_params(self) -> bool:
"""验证参数:start_time,zoom_factor,duration"""
params = self.parse_params()
if len(params) < 3:
return False
try:
start_time = float(params[0])
zoom_factor = float(params[1])
duration = float(params[2])
return (start_time >= 0 and
zoom_factor > 0 and
duration >= 0)
except (ValueError, IndexError):
return False
def generate_filter_args(self, video_input: str, effect_index: int) -> tuple[List[str], str]:
"""生成缩放效果的滤镜参数"""
if not self.validate_params():
return [], video_input
params = self.parse_params()
start_time = float(params[0])
zoom_factor = float(params[1])
duration = float(params[2])
if zoom_factor == 1:
return [], video_input # 不需要缩放
output_stream = f"[v_eff{effect_index}]"
# 获取缩放中心点
center_x, center_y = self._get_zoom_center()
filter_args = []
if duration == 0:
# 静态缩放(整个视频时长)
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(
f"{video_input}trim=start={start_time},zoompan=z={zoom_factor}:x={x_expr}:y={y_expr}:d=1{output_stream}"
)
else:
# 动态缩放(指定时间段内)
zoom_expr = f"if(between(t\\,{start_time}\\,{start_time + duration})\\,{zoom_factor}\\,1)"
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(
f"{video_input}zoompan=z={zoom_expr}:x={x_expr}:y={y_expr}:d=1{output_stream}"
)
return filter_args, output_stream
def _get_zoom_center(self) -> tuple[str, str]:
"""获取缩放中心点坐标表达式"""
# 默认中心点
center_x = "iw/2"
center_y = "ih/2"
pos_json = self.get_pos_json()
if pos_json:
_f_x = pos_json.get('ltX', 0)
_f_x2 = pos_json.get('rbX', 0)
_f_y = pos_json.get('ltY', 0)
_f_y2 = pos_json.get('rbY', 0)
_v_w = pos_json.get('imgWidth', 1)
_v_h = pos_json.get('imgHeight', 1)
if _v_w > 0 and _v_h > 0:
# 计算坐标系统中的中心点
center_x_ratio = (_f_x + _f_x2) / (2 * _v_w)
center_y_ratio = (_f_y + _f_y2) / (2 * _v_h)
# 转换为视频坐标系统
center_x = f"iw*{center_x_ratio:.6f}"
center_y = f"ih*{center_y_ratio:.6f}"
return center_x, center_y
def get_effect_name(self) -> str:
return "zoom"

507
entity/ffmpeg.py Normal file
View File

@@ -0,0 +1,507 @@
import json
import os
import time
import uuid
from typing import Any
DEFAULT_ARGS = ("-shortest",)
ENCODER_ARGS = ("-c:v", "h264", ) if not os.getenv("ENCODER_ARGS", False) else os.getenv("ENCODER_ARGS", "").split(" ")
VIDEO_ARGS = ("-profile:v", "high", "-level:v", "4", ) if not os.getenv("VIDEO_ARGS", False) else os.getenv("VIDEO_ARGS", "").split(" ")
AUDIO_ARGS = ("-c:a", "aac", "-b:a", "128k", "-ar", "48000", "-ac", "2", )
MUTE_AUDIO_INPUT = ("-f", "lavfi", "-i", "anullsrc=cl=stereo:r=48000", )
def get_mp4toannexb_filter():
"""
Determine which mp4toannexb filter to use based on ENCODER_ARGS.
Returns 'hevc_mp4toannexb' if ENCODER_ARGS contains 'hevc', otherwise 'h264_mp4toannexb'.
"""
encoder_args_str = os.getenv("ENCODER_ARGS", "").lower()
if "hevc" in encoder_args_str:
return "hevc_mp4toannexb"
return "h264_mp4toannexb"
class FfmpegTask(object):
effects: list[str]
def __init__(self, input_file, task_type='copy', output_file=''):
self.annexb = False
if type(input_file) is str:
if input_file.endswith(".ts"):
self.annexb = True
self.input_file = [input_file]
elif type(input_file) is list:
self.input_file = input_file
else:
self.input_file = []
self.zoom_cut = None
self.center_cut = None
self.ext_data = {}
self.task_type = task_type
self.output_file = output_file
self.mute = True
self.speed = 1
self.frame_rate = 25
self.resolution = None
self.subtitles = []
self.luts = []
self.audios = []
self.overlays = []
self.effects = []
def __repr__(self):
_str = f'FfmpegTask(input_file={self.input_file}, task_type={self.task_type}'
if len(self.luts) > 0:
_str += f', luts={self.luts}'
if len(self.audios) > 0:
_str += f', audios={self.audios}'
if len(self.overlays) > 0:
_str += f', overlays={self.overlays}'
if self.annexb:
_str += f', annexb={self.annexb}'
if self.effects:
_str += f', effects={self.effects}'
if self.mute:
_str += f', mute={self.mute}'
_str += f', center_cut={self.center_cut}'
return _str + ')'
def analyze_input_render_tasks(self):
for i in self.input_file:
if type(i) is str:
continue
elif isinstance(i, FfmpegTask):
if i.need_run():
yield i
def need_run(self):
"""
判断是否需要运行
:rtype: bool
:return:
"""
if self.annexb:
return True
# TODO: copy from url
return not self.check_can_copy()
def add_inputs(self, *inputs):
self.input_file.extend(inputs)
def add_overlay(self, *overlays):
for overlay in overlays:
if str(overlay).endswith('.ass'):
self.subtitles.append(overlay)
else:
self.overlays.append(overlay)
self.correct_task_type()
def add_audios(self, *audios):
self.audios.extend(audios)
self.correct_task_type()
self.check_audio_track()
def add_lut(self, *luts):
self.luts.extend(luts)
self.correct_task_type()
def add_effect(self, *effects):
self.effects.extend(effects)
self.correct_task_type()
def get_output_file(self):
if self.task_type == 'copy':
return self.input_file[0]
if self.output_file == '':
self.set_output_file()
return self.output_file
def correct_task_type(self):
if self.check_can_copy():
self.task_type = 'copy'
elif self.check_can_concat():
self.task_type = 'concat'
else:
self.task_type = 'encode'
def check_can_concat(self):
if len(self.luts) > 0:
return False
if len(self.overlays) > 0:
return False
if len(self.subtitles) > 0:
return False
if len(self.effects) > 0:
return False
if self.speed != 1:
return False
if self.zoom_cut is not None:
return False
if self.center_cut is not None:
return False
return True
def check_can_copy(self):
if len(self.luts) > 0:
return False
if len(self.overlays) > 0:
return False
if len(self.subtitles) > 0:
return False
if len(self.effects) > 0:
return False
if self.speed != 1:
return False
if len(self.audios) >= 1:
return False
if len(self.input_file) > 1:
return False
if self.zoom_cut is not None:
return False
if self.center_cut is not None:
return False
return True
def check_audio_track(self):
...
def get_ffmpeg_args(self):
args = ['-y', '-hide_banner']
if self.task_type == 'encode':
input_args = []
filter_args = []
output_args = [*VIDEO_ARGS, *AUDIO_ARGS, *ENCODER_ARGS, *DEFAULT_ARGS]
if self.annexb:
output_args.append("-bsf:v")
output_args.append(get_mp4toannexb_filter())
output_args.append("-reset_timestamps")
output_args.append("1")
video_output_str = "[0:v]"
audio_output_str = ""
audio_track_index = 0
effect_index = 0
for input_file in self.input_file:
input_args.append("-i")
if type(input_file) is str:
input_args.append(input_file)
elif isinstance(input_file, FfmpegTask):
input_args.append(input_file.get_output_file())
if self.center_cut == 1:
pos_json_str = self.ext_data.get('posJson', '{}')
try:
pos_json = json.loads(pos_json_str)
except Exception as e:
pos_json = {}
_v_w = pos_json.get('imgWidth', 1)
_f_x = pos_json.get('ltX', 0)
_f_x2 = pos_json.get('rbX', 0)
_x = f'{float((_f_x2 + _f_x)/(2 * _v_w)) :.4f}*iw-ih*ih/(2*iw)'
filter_args.append(f"{video_output_str}crop=x={_x}:y=0:w=ih*ih/iw:h=ih[v_cut{effect_index}]")
video_output_str = f"[v_cut{effect_index}]"
effect_index += 1
if self.zoom_cut == 1 and self.resolution:
_input = None
for input_file in self.input_file:
if type(input_file) is str:
_input = input_file
break
elif isinstance(input_file, FfmpegTask):
_input = input_file.get_output_file()
break
if _input:
from util.ffmpeg import probe_video_info
_iw, _ih, _ = probe_video_info(_input)
_w, _h = self.resolution.split('x', 1)
pos_json_str = self.ext_data.get('posJson', '{}')
try:
pos_json = json.loads(pos_json_str)
except Exception as e:
pos_json = {}
_v_w = pos_json.get('imgWidth', 1)
_v_h = pos_json.get('imgHeight', 1)
_f_x = pos_json.get('ltX', 0)
_f_x2 = pos_json.get('rbX', 0)
_f_y = pos_json.get('ltY', 0)
_f_y2 = pos_json.get('rbY', 0)
_x = min(max(0, int((_f_x + _f_x2) / 2 - int(_w) / 2)), _iw - int(_w))
_y = min(max(0, int((_f_y + _f_y2) / 2 - int(_h) / 2)), _ih - int(_h))
filter_args.append(f"{video_output_str}crop=x={_x}:y={_y}:w={_w}:h={_h}[vz_cut{effect_index}]")
video_output_str = f"[vz_cut{effect_index}]"
effect_index += 1
for effect in self.effects:
if effect.startswith("cameraShot:"):
param = effect.split(":", 2)[1]
if param == '':
param = "3,1,0"
_split = param.split(",")
start = 3
duration = 1
rotate_deg = 0
if len(_split) >= 3:
if _split[2] == '':
rotate_deg = 0
else:
rotate_deg = int(_split[2])
if len(_split) >= 2:
duration = float(_split[1])
if len(_split) >= 1:
start = float(_split[0])
_start_out_str = "[eff_s]"
_mid_out_str = "[eff_m]"
_end_out_str = "[eff_e]"
filter_args.append(f"{video_output_str}split=3{_start_out_str}{_mid_out_str}{_end_out_str}")
filter_args.append(f"{_start_out_str}select=lt(n\\,{int(start * self.frame_rate)}){_start_out_str}")
filter_args.append(f"{_end_out_str}select=gt(n\\,{int(start * self.frame_rate)}){_end_out_str}")
filter_args.append(f"{_mid_out_str}select=eq(n\\,{int(start * self.frame_rate)}){_mid_out_str}")
filter_args.append(
f"{_mid_out_str}tpad=start_mode=clone:start_duration={duration:.4f}{_mid_out_str}")
if rotate_deg != 0:
filter_args.append(f"{_mid_out_str}rotate=PI*{rotate_deg}/180{_mid_out_str}")
# filter_args.append(f"{video_output_str}trim=start=0:end={start+duration},tpad=stop_mode=clone:stop_duration={duration},setpts=PTS-STARTPTS{_start_out_str}")
# filter_args.append(f"tpad=start_mode=clone:start_duration={duration},setpts=PTS-STARTPTS{_start_out_str}")
# filter_args.append(f"{_end_out_str}trim=start={start}{_end_out_str}")
video_output_str = f"[v_eff{effect_index}]"
# filter_args.append(f"{_end_out_str}{_start_out_str}overlay=eof_action=pass{video_output_str}")
filter_args.append(f"{_start_out_str}{_mid_out_str}{_end_out_str}concat=n=3:v=1:a=0,setpts=N/{self.frame_rate}/TB{video_output_str}")
effect_index += 1
elif effect.startswith("ospeed:"):
param = effect.split(":", 2)[1]
if param == '':
param = "1"
if param != "1":
# 视频变速
effect_index += 1
filter_args.append(f"{video_output_str}setpts={param}*PTS[v_eff{effect_index}]")
video_output_str = f"[v_eff{effect_index}]"
elif effect.startswith("zoom:"):
param = effect.split(":", 2)[1]
if param == '':
continue
_split = param.split(",")
if len(_split) < 3:
continue
try:
start_time = float(_split[0])
zoom_factor = float(_split[1])
duration = float(_split[2])
if start_time < 0:
start_time = 0
if duration < 0:
duration = 0
if zoom_factor <= 0:
zoom_factor = 1
except (ValueError, IndexError):
start_time = 0
duration = 0
zoom_factor = 1
if zoom_factor == 1:
continue
effect_index += 1
# 获取缩放中心点(从pos_json或使用默认中心)
center_x = "iw/2"
center_y = "ih/2"
pos_json_str = self.ext_data.get('posJson', '{}')
try:
pos_json = json.loads(pos_json_str) if pos_json_str != '{}' else {}
if pos_json:
_f_x = pos_json.get('ltX', 0)
_f_x2 = pos_json.get('rbX', 0)
_f_y = pos_json.get('ltY', 0)
_f_y2 = pos_json.get('rbY', 0)
_v_w = pos_json.get('imgWidth', 1)
_v_h = pos_json.get('imgHeight', 1)
if _v_w > 0 and _v_h > 0:
# 计算坐标系统中的中心点
center_x_ratio = (_f_x + _f_x2) / (2 * _v_w)
center_y_ratio = (_f_y + _f_y2) / (2 * _v_h)
# 转换为视频坐标系统
center_x = f"iw*{center_x_ratio:.6f}"
center_y = f"ih*{center_y_ratio:.6f}"
except Exception as e:
# 解析失败使用默认中心
pass
if duration == 0:
# 静态缩放(整个视频时长)
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(f"{video_output_str}trim=start={start_time},zoompan=z={zoom_factor}:x={x_expr}:y={y_expr}:d=1[v_eff{effect_index}]")
else:
# 动态缩放(指定时间段内)
zoom_expr = f"if(between(t\\,{start_time}\\,{start_time + duration})\\,{zoom_factor}\\,1)"
x_expr = f"({center_x})-(ow*zoom)/2"
y_expr = f"({center_y})-(oh*zoom)/2"
filter_args.append(f"{video_output_str}zoompan=z={zoom_expr}:x={x_expr}:y={y_expr}:d=1[v_eff{effect_index}]")
video_output_str = f"[v_eff{effect_index}]"
elif effect.startswith("skip:"):
param = effect.split(":", 2)[1]
if param == '':
param = "0"
skip_seconds = float(param)
if skip_seconds > 0:
effect_index += 1
filter_args.append(f"{video_output_str}trim=start={skip_seconds}[v_eff{effect_index}]")
video_output_str = f"[v_eff{effect_index}]"
elif effect.startswith("tail:"):
param = effect.split(":", 2)[1]
if param == '':
param = "0"
tail_seconds = float(param)
if tail_seconds > 0:
effect_index += 1
# 首先获取视频总时长,然后计算开始时间
# 使用reverse+trim+reverse的方法来精确获取最后N秒
filter_args.append(f"{video_output_str}reverse[v_rev{effect_index}]")
filter_args.append(f"[v_rev{effect_index}]trim=duration={tail_seconds}[v_trim{effect_index}]")
filter_args.append(f"[v_trim{effect_index}]reverse[v_eff{effect_index}]")
video_output_str = f"[v_eff{effect_index}]"
...
if self.resolution:
filter_args.append(f"{video_output_str}scale={self.resolution.replace('x', ':')}[v]")
video_output_str = "[v]"
for lut in self.luts:
filter_args.append(f"{video_output_str}lut3d=file={lut}{video_output_str}")
for overlay in self.overlays:
input_index = input_args.count("-i")
input_args.append("-i")
input_args.append(overlay)
if os.getenv("OLD_FFMPEG"):
filter_args.append(f"{video_output_str}[{input_index}:v]scale2ref=iw:ih[v]")
else:
filter_args.append(f"{video_output_str}[{input_index}:v]scale=rw:rh[v]")
filter_args.append(f"[v][{input_index}:v]overlay=1:eof_action=endall[v]")
video_output_str = "[v]"
for subtitle in self.subtitles:
filter_args.append(f"{video_output_str}ass={subtitle}[v]")
video_output_str = "[v]"
output_args.append("-map")
output_args.append(video_output_str)
output_args.append("-r")
output_args.append(f"{self.frame_rate}")
output_args.append("-fps_mode")
output_args.append("cfr")
if self.mute:
input_index = input_args.count("-i")
input_args += MUTE_AUDIO_INPUT
filter_args.append(f"[{input_index}:a]acopy[a]")
audio_track_index += 1
audio_output_str = "[a]"
else:
audio_output_str = "[0:a]"
audio_track_index += 1
for audio in self.audios:
input_index = input_args.count("-i")
input_args.append("-i")
input_args.append(audio.replace("\\", "/"))
audio_track_index += 1
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
if audio_output_str:
output_args.append("-map")
output_args.append(audio_output_str)
_filter_args = [] if len(filter_args) == 0 else ["-filter_complex", ";".join(filter_args)]
return args + input_args + _filter_args + output_args + [self.get_output_file()]
elif self.task_type == 'concat':
# 无法通过 annexb 合并的
input_args = []
output_args = [*DEFAULT_ARGS]
filter_args = []
audio_output_str = ""
audio_track_index = 0
# output_args
if len(self.input_file) == 1:
_file = self.input_file[0]
from util.ffmpeg import probe_video_audio
if type(_file) is str:
input_args += ["-i", _file]
self.mute = not probe_video_audio(_file)
elif isinstance(_file, FfmpegTask):
input_args += ["-i", _file.get_output_file()]
self.mute = not probe_video_audio(_file.get_output_file())
else:
_tmp_file = "tmp_concat_" + str(time.time()) + ".txt"
from util.ffmpeg import probe_video_audio
with open(_tmp_file, "w", encoding="utf-8") as f:
for input_file in self.input_file:
if type(input_file) is str:
f.write("file '" + input_file + "'\n")
elif isinstance(input_file, FfmpegTask):
f.write("file '" + input_file.get_output_file() + "'\n")
input_args += ["-f", "concat", "-safe", "0", "-i", _tmp_file]
self.mute = not probe_video_audio(_tmp_file, "concat")
output_args.append("-map")
output_args.append("0:v")
output_args.append("-c:v")
output_args.append("copy")
if self.mute:
input_index = input_args.count("-i")
input_args += MUTE_AUDIO_INPUT
audio_output_str = f"[{input_index}:a]"
audio_track_index += 1
else:
audio_output_str = "[0:a]"
audio_track_index += 1
for audio in self.audios:
input_index = input_args.count("-i")
input_args.append("-i")
input_args.append(audio.replace("\\", "/"))
audio_track_index += 1
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
if audio_output_str:
output_args.append("-map")
if audio_track_index <= 1:
output_args.append(audio_output_str[1:-1])
else:
output_args.append(audio_output_str)
output_args += AUDIO_ARGS
if self.annexb:
output_args.append("-bsf:v")
output_args.append(get_mp4toannexb_filter())
output_args.append("-bsf:a")
output_args.append("setts=pts=DTS")
output_args.append("-f")
output_args.append("mpegts" if self.annexb else "mp4")
_filter_args = [] if len(filter_args) == 0 else ["-filter_complex", ";".join(filter_args)]
return args + input_args + _filter_args + output_args + [self.get_output_file()]
elif self.task_type == 'copy':
if len(self.input_file) == 1:
if type(self.input_file[0]) is str:
if self.input_file[0] == self.get_output_file():
return []
return args + ["-i", self.input_file[0]] + ["-c", "copy", self.get_output_file()]
return []
def set_output_file(self, file=None):
if file is None:
if self.output_file == '':
if self.annexb:
self.output_file = "rand_" + str(uuid.uuid4()) + ".ts"
else:
self.output_file = "rand_" + str(uuid.uuid4()) + ".mp4"
else:
if isinstance(file, FfmpegTask):
if file == self:
return
self.output_file = file.get_output_file()
if type(file) is str:
self.output_file = file
def check_annexb(self):
for input_file in self.input_file:
if type(input_file) is str:
if self.task_type == 'encode':
return self.annexb
elif self.task_type == 'concat':
return False
elif self.task_type == 'copy':
return self.annexb
else:
return False
elif isinstance(input_file, FfmpegTask):
if not input_file.check_annexb():
return False
return True

View File

@@ -0,0 +1,281 @@
import json
import os
import time
from typing import List, Optional
from config.settings import get_ffmpeg_config
from entity.render_task import RenderTask, TaskType
from entity.effects import registry as effect_registry
from util.exceptions import FFmpegError
from util.ffmpeg import probe_video_info, probe_video_audio
import logging
logger = logging.getLogger(__name__)
class FFmpegCommandBuilder:
"""FFmpeg命令构建器"""
def __init__(self, task: RenderTask):
self.task = task
self.config = get_ffmpeg_config()
def build_command(self) -> List[str]:
"""构建FFmpeg命令"""
self.task.update_task_type()
if self.task.task_type == TaskType.COPY:
return self._build_copy_command()
elif self.task.task_type == TaskType.CONCAT:
return self._build_concat_command()
elif self.task.task_type == TaskType.ENCODE:
return self._build_encode_command()
else:
raise FFmpegError(f"Unsupported task type: {self.task.task_type}")
def _build_copy_command(self) -> List[str]:
"""构建复制命令"""
if len(self.task.input_files) == 1:
input_file = self.task.input_files[0]
if input_file == self.task.output_file:
return [] # 不需要处理
return [
"ffmpeg", "-y", "-hide_banner",
"-i", self.task.input_files[0],
"-c", "copy",
self.task.output_file
]
def _build_concat_command(self) -> List[str]:
"""构建拼接命令"""
args = ["ffmpeg", "-y", "-hide_banner"]
input_args = []
output_args = [*self.config.default_args]
filter_args = []
if len(self.task.input_files) == 1:
# 单个文件
file = self.task.input_files[0]
input_args.extend(["-i", file])
self.task.mute = not probe_video_audio(file)
else:
# 多个文件使用concat协议
tmp_file = f"tmp_concat_{time.time()}.txt"
with open(tmp_file, "w", encoding="utf-8") as f:
for input_file in self.task.input_files:
f.write(f"file '{input_file}'\n")
input_args.extend(["-f", "concat", "-safe", "0", "-i", tmp_file])
self.task.mute = not probe_video_audio(tmp_file, "concat")
# 视频流映射
output_args.extend(["-map", "0:v", "-c:v", "copy"])
# 音频处理
audio_output_str = self._handle_audio_concat(input_args, filter_args)
if audio_output_str:
output_args.extend(["-map", audio_output_str])
output_args.extend(self.config.audio_args)
# annexb处理
if self.task.annexb:
output_args.extend(["-bsf:v", self._get_mp4toannexb_filter()])
output_args.extend(["-bsf:a", "setts=pts=DTS"])
output_args.extend(["-f", "mpegts"])
else:
output_args.extend(["-f", "mp4"])
filter_complex = ["-filter_complex", ";".join(filter_args)] if filter_args else []
return args + input_args + filter_complex + output_args + [self.task.output_file]
def _build_encode_command(self) -> List[str]:
"""构建编码命令"""
args = ["ffmpeg", "-y", "-hide_banner"]
input_args = []
filter_args = []
output_args = [
*self.config.video_args,
*self.config.audio_args,
*self.config.encoder_args,
*self.config.default_args
]
# annexb处理
if self.task.annexb:
output_args.extend(["-bsf:v", self._get_mp4toannexb_filter()])
output_args.extend(["-reset_timestamps", "1"])
# 处理输入文件
for input_file in self.task.input_files:
input_args.extend(["-i", input_file])
# 处理视频流
video_output_str = "[0:v]"
effect_index = 0
# 处理中心裁剪
if self.task.center_cut == 1:
video_output_str, effect_index = self._add_center_cut(filter_args, video_output_str, effect_index)
# 处理缩放裁剪
if self.task.zoom_cut == 1 and self.task.resolution:
video_output_str, effect_index = self._add_zoom_cut(filter_args, video_output_str, effect_index)
# 处理效果
video_output_str, effect_index = self._add_effects(filter_args, video_output_str, effect_index)
# 处理分辨率
if self.task.resolution:
filter_args.append(f"{video_output_str}scale={self.task.resolution.replace('x', ':')}[v]")
video_output_str = "[v]"
# 处理LUT
for lut in self.task.luts:
filter_args.append(f"{video_output_str}lut3d=file={lut}{video_output_str}")
# 处理覆盖层
video_output_str = self._add_overlays(input_args, filter_args, video_output_str)
# 处理字幕
for subtitle in self.task.subtitles:
filter_args.append(f"{video_output_str}ass={subtitle}[v]")
video_output_str = "[v]"
# 映射视频流
output_args.extend(["-map", video_output_str])
output_args.extend(["-r", str(self.task.frame_rate)])
output_args.extend(["-fps_mode", "cfr"])
# 处理音频
audio_output_str = self._handle_audio_encode(input_args, filter_args)
if audio_output_str:
output_args.extend(["-map", audio_output_str])
filter_complex = ["-filter_complex", ";".join(filter_args)] if filter_args else []
return args + input_args + filter_complex + output_args + [self.task.output_file]
def _add_center_cut(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加中心裁剪"""
pos_json = self.task.ext_data.get('posJson', '{}')
try:
pos_data = json.loads(pos_json) if pos_json != '{}' else {}
except:
pos_data = {}
_v_w = pos_data.get('imgWidth', 1)
_f_x = pos_data.get('ltX', 0)
_f_x2 = pos_data.get('rbX', 0)
_x = f'{float((_f_x2 + _f_x)/(2 * _v_w)):.4f}*iw-ih*ih/(2*iw)'
filter_args.append(f"{video_input}crop=x={_x}:y=0:w=ih*ih/iw:h=ih[v_cut{effect_index}]")
return f"[v_cut{effect_index}]", effect_index + 1
def _add_zoom_cut(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加缩放裁剪"""
# 获取输入视频尺寸
input_file = self.task.input_files[0]
_iw, _ih, _ = probe_video_info(input_file)
_w, _h = self.task.resolution.split('x', 1)
pos_json = self.task.ext_data.get('posJson', '{}')
try:
pos_data = json.loads(pos_json) if pos_json != '{}' else {}
except:
pos_data = {}
_v_w = pos_data.get('imgWidth', 1)
_v_h = pos_data.get('imgHeight', 1)
_f_x = pos_data.get('ltX', 0)
_f_x2 = pos_data.get('rbX', 0)
_f_y = pos_data.get('ltY', 0)
_f_y2 = pos_data.get('rbY', 0)
_x = min(max(0, int((_f_x + _f_x2) / 2 - int(_w) / 2)), _iw - int(_w))
_y = min(max(0, int((_f_y + _f_y2) / 2 - int(_h) / 2)), _ih - int(_h))
filter_args.append(f"{video_input}crop=x={_x}:y={_y}:w={_w}:h={_h}[vz_cut{effect_index}]")
return f"[vz_cut{effect_index}]", effect_index + 1
def _add_effects(self, filter_args: List[str], video_input: str, effect_index: int) -> tuple[str, int]:
"""添加效果处理"""
current_input = video_input
for effect_str in self.task.effects:
effect_name, params = effect_registry.parse_effect_string(effect_str)
processor = effect_registry.get_processor(effect_name, params, self.task.ext_data)
if processor:
processor.frame_rate = self.task.frame_rate
effect_filters, output_stream = processor.generate_filter_args(current_input, effect_index)
if effect_filters:
filter_args.extend(effect_filters)
current_input = output_stream
effect_index += 1
return current_input, effect_index
def _add_overlays(self, input_args: List[str], filter_args: List[str], video_input: str) -> str:
"""添加覆盖层"""
current_input = video_input
for overlay in self.task.overlays:
input_index = input_args.count("-i") // 2 # 每个输入占两个参数 -i filename
input_args.extend(["-i", overlay])
if self.config.old_ffmpeg:
filter_args.append(f"{current_input}[{input_index}:v]scale2ref=iw:ih[v]")
else:
filter_args.append(f"{current_input}[{input_index}:v]scale=rw:rh[v]")
filter_args.append(f"[v][{input_index}:v]overlay=1:eof_action=endall[v]")
current_input = "[v]"
return current_input
def _handle_audio_concat(self, input_args: List[str], filter_args: List[str]) -> Optional[str]:
"""处理concat模式的音频"""
audio_output_str = ""
if self.task.mute:
input_index = input_args.count("-i") // 2
input_args.extend(["-f", "lavfi", "-i", "anullsrc=cl=stereo:r=48000"])
audio_output_str = f"[{input_index}:a]"
else:
audio_output_str = "[0:a]"
for audio in self.task.audios:
input_index = input_args.count("-i") // 2
input_args.extend(["-i", audio.replace("\\", "/")])
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
return audio_output_str.strip("[]") if audio_output_str else None
def _handle_audio_encode(self, input_args: List[str], filter_args: List[str]) -> Optional[str]:
"""处理encode模式的音频"""
audio_output_str = ""
if self.task.mute:
input_index = input_args.count("-i") // 2
input_args.extend(["-f", "lavfi", "-i", "anullsrc=cl=stereo:r=48000"])
filter_args.append(f"[{input_index}:a]acopy[a]")
audio_output_str = "[a]"
else:
audio_output_str = "[0:a]"
for audio in self.task.audios:
input_index = input_args.count("-i") // 2
input_args.extend(["-i", audio.replace("\\", "/")])
filter_args.append(f"{audio_output_str}[{input_index}:a]amix=duration=shortest:dropout_transition=0:normalize=0[a]")
audio_output_str = "[a]"
return audio_output_str if audio_output_str else None
def _get_mp4toannexb_filter(self) -> str:
"""获取mp4toannexb滤镜"""
encoder_args_str = " ".join(self.config.encoder_args).lower()
if "hevc" in encoder_args_str:
return "hevc_mp4toannexb"
return "h264_mp4toannexb"

146
entity/render_task.py Normal file
View File

@@ -0,0 +1,146 @@
import os
import uuid
from typing import List, Optional, Dict, Any
from dataclasses import dataclass, field
from enum import Enum
from config.settings import get_ffmpeg_config
from util.exceptions import TaskValidationError, EffectError
from entity.effects import registry as effect_registry
class TaskType(Enum):
COPY = "copy"
CONCAT = "concat"
ENCODE = "encode"
@dataclass
class RenderTask:
"""渲染任务数据类,只包含任务数据,不包含处理逻辑"""
input_files: List[str] = field(default_factory=list)
output_file: str = ""
task_type: TaskType = TaskType.COPY
# 视频参数
resolution: Optional[str] = None
frame_rate: int = 25
speed: float = 1.0
mute: bool = True
annexb: bool = False
# 裁剪参数
zoom_cut: Optional[int] = None
center_cut: Optional[int] = None
# 资源列表
subtitles: List[str] = field(default_factory=list)
luts: List[str] = field(default_factory=list)
audios: List[str] = field(default_factory=list)
overlays: List[str] = field(default_factory=list)
effects: List[str] = field(default_factory=list)
# 扩展数据
ext_data: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self):
"""初始化后处理"""
# 检测annexb格式
for input_file in self.input_files:
if isinstance(input_file, str) and input_file.endswith(".ts"):
self.annexb = True
break
# 自动生成输出文件名
if not self.output_file:
self._generate_output_filename()
def _generate_output_filename(self):
"""生成输出文件名"""
if self.annexb:
self.output_file = f"rand_{uuid.uuid4()}.ts"
else:
self.output_file = f"rand_{uuid.uuid4()}.mp4"
def add_input_file(self, file_path: str):
"""添加输入文件"""
self.input_files.append(file_path)
if file_path.endswith(".ts"):
self.annexb = True
def add_overlay(self, *overlays: str):
"""添加覆盖层"""
for overlay in overlays:
if overlay.endswith('.ass'):
self.subtitles.append(overlay)
else:
self.overlays.append(overlay)
def add_audios(self, *audios: str):
"""添加音频"""
self.audios.extend(audios)
def add_lut(self, *luts: str):
"""添加LUT"""
self.luts.extend(luts)
def add_effect(self, *effects: str):
"""添加效果"""
self.effects.extend(effects)
def validate(self) -> bool:
"""验证任务参数"""
if not self.input_files:
raise TaskValidationError("No input files specified")
# 验证所有效果
for effect_str in self.effects:
effect_name, params = effect_registry.parse_effect_string(effect_str)
processor = effect_registry.get_processor(effect_name, params, self.ext_data)
if processor and not processor.validate_params():
raise EffectError(f"Invalid parameters for effect {effect_name}: {params}", effect_name, params)
return True
def can_copy(self) -> bool:
"""检查是否可以直接复制"""
return (len(self.luts) == 0 and
len(self.overlays) == 0 and
len(self.subtitles) == 0 and
len(self.effects) == 0 and
self.speed == 1 and
len(self.audios) == 0 and
len(self.input_files) == 1 and
self.zoom_cut is None and
self.center_cut is None)
def can_concat(self) -> bool:
"""检查是否可以使用concat模式"""
return (len(self.luts) == 0 and
len(self.overlays) == 0 and
len(self.subtitles) == 0 and
len(self.effects) == 0 and
self.speed == 1 and
self.zoom_cut is None and
self.center_cut is None)
def determine_task_type(self) -> TaskType:
"""自动确定任务类型"""
if self.can_copy():
return TaskType.COPY
elif self.can_concat():
return TaskType.CONCAT
else:
return TaskType.ENCODE
def update_task_type(self):
"""更新任务类型"""
self.task_type = self.determine_task_type()
def need_processing(self) -> bool:
"""检查是否需要处理"""
if self.annexb:
return True
return not self.can_copy()
def get_output_extension(self) -> str:
"""获取输出文件扩展名"""
return ".ts" if self.annexb else ".mp4"

View File

@@ -1,22 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务处理器层
包含各种任务类型的具体处理器实现。
"""
from handlers.base import BaseHandler
from handlers.render_video import RenderSegmentVideoHandler
from handlers.compose_transition import ComposeTransitionHandler
from handlers.prepare_audio import PrepareJobAudioHandler
from handlers.package_ts import PackageSegmentTsHandler
from handlers.finalize_mp4 import FinalizeMp4Handler
__all__ = [
'BaseHandler',
'RenderSegmentVideoHandler',
'ComposeTransitionHandler',
'PrepareJobAudioHandler',
'PackageSegmentTsHandler',
'FinalizeMp4Handler',
]

View File

@@ -1,585 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务处理器基类
提供所有处理器共用的基础功能。
"""
import os
import json
import logging
import shutil
import tempfile
import subprocess
import threading
from abc import ABC
from typing import Optional, List, Dict, Any, Tuple, TYPE_CHECKING
from core.handler import TaskHandler
from domain.task import Task
from domain.result import TaskResult, ErrorCode
from domain.config import WorkerConfig
from services import storage
from services.cache import MaterialCache
from constant import (
HW_ACCEL_NONE, HW_ACCEL_QSV, HW_ACCEL_CUDA,
VIDEO_ENCODE_PARAMS, VIDEO_ENCODE_PARAMS_QSV, VIDEO_ENCODE_PARAMS_CUDA
)
if TYPE_CHECKING:
from services.api_client import APIClientV2
logger = logging.getLogger(__name__)
def get_video_encode_args(hw_accel: str = HW_ACCEL_NONE) -> List[str]:
"""
根据硬件加速配置获取视频编码参数
Args:
hw_accel: 硬件加速类型 (none, qsv, cuda)
Returns:
FFmpeg 视频编码参数列表
"""
if hw_accel == HW_ACCEL_QSV:
params = VIDEO_ENCODE_PARAMS_QSV
return [
'-c:v', params['codec'],
'-preset', params['preset'],
'-profile:v', params['profile'],
'-level', params['level'],
'-global_quality', params['global_quality'],
'-look_ahead', params['look_ahead'],
]
elif hw_accel == HW_ACCEL_CUDA:
params = VIDEO_ENCODE_PARAMS_CUDA
return [
'-c:v', params['codec'],
'-preset', params['preset'],
'-profile:v', params['profile'],
'-level', params['level'],
'-rc', params['rc'],
'-cq', params['cq'],
'-b:v', '0', # 配合 vbr 模式使用 cq
]
else:
# 软件编码(默认)
params = VIDEO_ENCODE_PARAMS
return [
'-c:v', params['codec'],
'-preset', params['preset'],
'-profile:v', params['profile'],
'-level', params['level'],
'-crf', params['crf'],
'-pix_fmt', params['pix_fmt'],
]
def get_hwaccel_decode_args(hw_accel: str = HW_ACCEL_NONE, device_index: Optional[int] = None) -> List[str]:
"""
获取硬件加速解码参数(输入文件之前使用)
Args:
hw_accel: 硬件加速类型 (none, qsv, cuda)
device_index: GPU 设备索引,用于多显卡调度
Returns:
FFmpeg 硬件加速解码参数列表
"""
if hw_accel == HW_ACCEL_CUDA:
# CUDA 硬件加速解码
args = ['-hwaccel', 'cuda']
# 多显卡模式下指定设备
if device_index is not None:
args.extend(['-hwaccel_device', str(device_index)])
args.extend(['-hwaccel_output_format', 'cuda'])
return args
elif hw_accel == HW_ACCEL_QSV:
# QSV 硬件加速解码
args = ['-hwaccel', 'qsv']
# QSV 在 Windows 上使用 -qsv_device
if device_index is not None:
args.extend(['-qsv_device', str(device_index)])
args.extend(['-hwaccel_output_format', 'qsv'])
return args
else:
return []
def get_hwaccel_filter_prefix(hw_accel: str = HW_ACCEL_NONE) -> str:
"""
获取硬件加速滤镜前缀(用于 hwdownload 从 GPU 到 CPU)
注意:由于大多数复杂滤镜(如 lut3d, overlay, crop 等)不支持硬件表面,
我们需要在滤镜链开始时将硬件表面下载到系统内存。
CUDA/QSV hwdownload 只支持 nv12 格式输出,因此需要两步转换:
1. hwdownload,format=nv12 - 从 GPU 下载到 CPU
2. format=yuv420p - 转换为标准格式(确保与 RGBA/YUVA overlay 混合时颜色正确)
Args:
hw_accel: 硬件加速类型
Returns:
需要添加到滤镜链开头的 hwdownload 滤镜字符串
"""
if hw_accel == HW_ACCEL_CUDA:
return 'hwdownload,format=nv12,format=yuv420p,'
elif hw_accel == HW_ACCEL_QSV:
return 'hwdownload,format=nv12,format=yuv420p,'
else:
return ''
# v2 统一视频编码参数(兼容旧代码,使用软件编码)
VIDEO_ENCODE_ARGS = get_video_encode_args(HW_ACCEL_NONE)
# v2 统一音频编码参数
AUDIO_ENCODE_ARGS = [
'-c:a', 'aac',
'-b:a', '128k',
'-ar', '48000',
'-ac', '2',
]
FFMPEG_LOGLEVEL = 'error'
def subprocess_args(include_stdout: bool = True) -> Dict[str, Any]:
"""
创建跨平台的 subprocess 参数
在 Windows 上使用 Pyinstaller --noconsole 打包时,需要特殊处理以避免弹出命令行窗口。
Args:
include_stdout: 是否包含 stdout 捕获
Returns:
subprocess.run 使用的参数字典
"""
ret: Dict[str, Any] = {}
# Windows 特殊处理
if hasattr(subprocess, 'STARTUPINFO'):
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
ret['startupinfo'] = si
ret['env'] = os.environ
# 重定向 stdin 避免 "handle is invalid" 错误
ret['stdin'] = subprocess.PIPE
if include_stdout:
ret['stdout'] = subprocess.PIPE
return ret
def probe_video_info(video_file: str) -> Tuple[int, int, float]:
"""
探测视频信息(宽度、高度、时长)
Args:
video_file: 视频文件路径
Returns:
(width, height, duration) 元组,失败返回 (0, 0, 0)
"""
try:
result = subprocess.run(
[
'ffprobe', '-v', 'error',
'-select_streams', 'v:0',
'-show_entries', 'stream=width,height:format=duration',
'-of', 'csv=s=x:p=0',
video_file
],
capture_output=True,
timeout=30,
**subprocess_args(False)
)
if result.returncode != 0:
logger.warning(f"ffprobe failed for {video_file}")
return 0, 0, 0
output = result.stdout.decode('utf-8').strip()
if not output:
return 0, 0, 0
lines = output.split('\n')
if len(lines) >= 2:
wh = lines[0].strip()
duration_str = lines[1].strip()
width, height = wh.split('x')
return int(width), int(height), float(duration_str)
return 0, 0, 0
except Exception as e:
logger.warning(f"probe_video_info error: {e}")
return 0, 0, 0
def probe_duration_json(file_path: str) -> Optional[float]:
"""
使用 ffprobe JSON 输出探测媒体时长
Args:
file_path: 媒体文件路径
Returns:
时长(秒),失败返回 None
"""
try:
result = subprocess.run(
[
'ffprobe', '-v', 'error',
'-show_entries', 'format=duration',
'-of', 'json',
file_path
],
capture_output=True,
timeout=30,
**subprocess_args(False)
)
if result.returncode != 0:
return None
data = json.loads(result.stdout.decode('utf-8'))
duration = data.get('format', {}).get('duration')
return float(duration) if duration else None
except Exception as e:
logger.warning(f"probe_duration_json error: {e}")
return None
class BaseHandler(TaskHandler, ABC):
"""
任务处理器基类
提供所有处理器共用的基础功能,包括:
- 临时目录管理
- 文件下载/上传
- FFmpeg 命令执行
- GPU 设备管理(多显卡调度)
- 日志记录
"""
# 线程本地存储:用于存储当前线程的 GPU 设备索引
_thread_local = threading.local()
def __init__(self, config: WorkerConfig, api_client: 'APIClientV2'):
"""
初始化处理器
Args:
config: Worker 配置
api_client: API 客户端
"""
self.config = config
self.api_client = api_client
self.material_cache = MaterialCache(
cache_dir=config.cache_dir,
enabled=config.cache_enabled,
max_size_gb=config.cache_max_size_gb
)
# ========== GPU 设备管理 ==========
def set_gpu_device(self, device_index: int) -> None:
"""
设置当前线程的 GPU 设备索引
由 TaskExecutor 在任务执行前调用。
Args:
device_index: GPU 设备索引
"""
self._thread_local.gpu_device = device_index
def get_gpu_device(self) -> Optional[int]:
"""
获取当前线程的 GPU 设备索引
Returns:
GPU 设备索引,未设置则返回 None
"""
return getattr(self._thread_local, 'gpu_device', None)
def clear_gpu_device(self) -> None:
"""
清除当前线程的 GPU 设备索引
由 TaskExecutor 在任务执行后调用。
"""
if hasattr(self._thread_local, 'gpu_device'):
del self._thread_local.gpu_device
# ========== FFmpeg 参数生成 ==========
def get_video_encode_args(self) -> List[str]:
"""
获取当前配置的视频编码参数
Returns:
FFmpeg 视频编码参数列表
"""
return get_video_encode_args(self.config.hw_accel)
def get_hwaccel_decode_args(self) -> List[str]:
"""
获取硬件加速解码参数(支持设备指定)
Returns:
FFmpeg 硬件加速解码参数列表
"""
device_index = self.get_gpu_device()
return get_hwaccel_decode_args(self.config.hw_accel, device_index)
def get_hwaccel_filter_prefix(self) -> str:
"""
获取硬件加速滤镜前缀
Returns:
需要添加到滤镜链开头的 hwdownload 滤镜字符串
"""
return get_hwaccel_filter_prefix(self.config.hw_accel)
def before_handle(self, task: Task) -> None:
"""处理前钩子"""
logger.debug(f"[task:{task.task_id}] Before handle: {task.task_type.value}")
def after_handle(self, task: Task, result: TaskResult) -> None:
"""处理后钩子"""
status = "success" if result.success else "failed"
logger.debug(f"[task:{task.task_id}] After handle: {status}")
def create_work_dir(self, task_id: str = None) -> str:
"""
创建临时工作目录
Args:
task_id: 任务 ID(用于目录命名)
Returns:
工作目录路径
"""
# 确保临时根目录存在
os.makedirs(self.config.temp_dir, exist_ok=True)
# 创建唯一的工作目录
prefix = f"task_{task_id}_" if task_id else "task_"
work_dir = tempfile.mkdtemp(dir=self.config.temp_dir, prefix=prefix)
logger.debug(f"Created work directory: {work_dir}")
return work_dir
def cleanup_work_dir(self, work_dir: str) -> None:
"""
清理临时工作目录
Args:
work_dir: 工作目录路径
"""
if not work_dir or not os.path.exists(work_dir):
return
try:
shutil.rmtree(work_dir)
logger.debug(f"Cleaned up work directory: {work_dir}")
except Exception as e:
logger.warning(f"Failed to cleanup work directory {work_dir}: {e}")
def download_file(self, url: str, dest: str, timeout: int = None, use_cache: bool = True) -> bool:
"""
下载文件(支持缓存)
Args:
url: 文件 URL
dest: 目标路径
timeout: 超时时间(秒)
use_cache: 是否使用缓存(默认 True)
Returns:
是否成功
"""
if timeout is None:
timeout = self.config.download_timeout
try:
if use_cache:
# 使用缓存下载
result = self.material_cache.get_or_download(url, dest, timeout=timeout)
else:
# 直接下载(不走缓存)
result = storage.download_file(url, dest, timeout=timeout)
if result:
file_size = os.path.getsize(dest) if os.path.exists(dest) else 0
logger.debug(f"Downloaded: {url} -> {dest} ({file_size} bytes)")
return result
except Exception as e:
logger.error(f"Download failed: {url} -> {e}")
return False
def upload_file(
self,
task_id: str,
file_type: str,
file_path: str,
file_name: str = None
) -> Optional[str]:
"""
上传文件并返回访问 URL
Args:
task_id: 任务 ID
file_type: 文件类型(video/audio/ts/mp4)
file_path: 本地文件路径
file_name: 文件名(可选)
Returns:
访问 URL,失败返回 None
"""
# 获取上传 URL
upload_info = self.api_client.get_upload_url(task_id, file_type, file_name)
if not upload_info:
logger.error(f"[task:{task_id}] Failed to get upload URL")
return None
upload_url = upload_info.get('uploadUrl')
access_url = upload_info.get('accessUrl')
if not upload_url:
logger.error(f"[task:{task_id}] Invalid upload URL response")
return None
# 上传文件
try:
result = storage.upload_file(upload_url, file_path, timeout=self.config.upload_timeout)
if result:
file_size = os.path.getsize(file_path)
logger.info(f"[task:{task_id}] Uploaded: {file_path} ({file_size} bytes)")
# 将上传成功的文件加入缓存
if access_url:
self.material_cache.add_to_cache(access_url, file_path)
return access_url
else:
logger.error(f"[task:{task_id}] Upload failed: {file_path}")
return None
except Exception as e:
logger.error(f"[task:{task_id}] Upload error: {e}")
return None
def run_ffmpeg(
self,
cmd: List[str],
task_id: str,
timeout: int = None
) -> bool:
"""
执行 FFmpeg 命令
Args:
cmd: FFmpeg 命令参数列表
task_id: 任务 ID(用于日志)
timeout: 超时时间(秒)
Returns:
是否成功
"""
if timeout is None:
timeout = self.config.ffmpeg_timeout
cmd_to_run = list(cmd)
if cmd_to_run and cmd_to_run[0] == 'ffmpeg' and '-loglevel' not in cmd_to_run:
cmd_to_run[1:1] = ['-loglevel', FFMPEG_LOGLEVEL]
# 日志记录命令(限制长度)
cmd_str = ' '.join(cmd_to_run)
if len(cmd_str) > 500:
cmd_str = cmd_str[:500] + '...'
logger.info(f"[task:{task_id}] FFmpeg: {cmd_str}")
try:
run_args = subprocess_args(False)
run_args['stdout'] = subprocess.DEVNULL
run_args['stderr'] = subprocess.PIPE
result = subprocess.run(
cmd_to_run,
timeout=timeout,
**run_args
)
if result.returncode != 0:
stderr = (result.stderr or b'').decode('utf-8', errors='replace')[:1000]
logger.error(f"[task:{task_id}] FFmpeg failed (code={result.returncode}): {stderr}")
return False
return True
except subprocess.TimeoutExpired:
logger.error(f"[task:{task_id}] FFmpeg timeout after {timeout}s")
return False
except Exception as e:
logger.error(f"[task:{task_id}] FFmpeg error: {e}")
return False
def probe_duration(self, file_path: str) -> Optional[float]:
"""
探测媒体文件时长
Args:
file_path: 文件路径
Returns:
时长(秒),失败返回 None
"""
# 首先尝试 JSON 输出方式
duration = probe_duration_json(file_path)
if duration is not None:
return duration
# 回退到旧方式
try:
_, _, duration = probe_video_info(file_path)
return float(duration) if duration else None
except Exception as e:
logger.warning(f"Failed to probe duration: {file_path} -> {e}")
return None
def get_file_size(self, file_path: str) -> int:
"""
获取文件大小
Args:
file_path: 文件路径
Returns:
文件大小(字节)
"""
try:
return os.path.getsize(file_path)
except Exception:
return 0
def ensure_file_exists(self, file_path: str, min_size: int = 0) -> bool:
"""
确保文件存在且大小满足要求
Args:
file_path: 文件路径
min_size: 最小大小(字节)
Returns:
是否满足要求
"""
if not os.path.exists(file_path):
return False
return os.path.getsize(file_path) >= min_size

View File

@@ -1,273 +0,0 @@
# -*- coding: utf-8 -*-
"""
转场合成处理器
处理 COMPOSE_TRANSITION 任务,将相邻两个片段的 overlap 区域进行混合,生成转场效果。
使用 FFmpeg xfade 滤镜实现多种转场效果。
"""
import os
import logging
from typing import List, Optional
from handlers.base import BaseHandler
from domain.task import Task, TaskType, TransitionConfig, TRANSITION_TYPES
from domain.result import TaskResult, ErrorCode
logger = logging.getLogger(__name__)
class ComposeTransitionHandler(BaseHandler):
"""
转场合成处理器
职责:
- 下载前一个片段的视频(含尾部 overlap)
- 下载后一个片段的视频(含头部 overlap)
- 使用 xfade 滤镜合成转场效果
- 上传转场视频产物
关键约束:
- 转场任务必须等待前后两个片段的 RENDER_SEGMENT_VIDEO 都完成后才能执行
- 输出编码参数必须与片段视频一致,确保后续 TS 封装兼容
- 转场视频不含音频轨道(音频由 PREPARE_JOB_AUDIO 统一处理)
"""
def get_supported_type(self) -> TaskType:
return TaskType.COMPOSE_TRANSITION
def handle(self, task: Task) -> TaskResult:
"""处理转场合成任务"""
work_dir = self.create_work_dir(task.task_id)
try:
# 解析参数
transition_id = task.get_transition_id()
prev_segment = task.get_prev_segment()
next_segment = task.get_next_segment()
transition_config = task.get_transition_config()
output_spec = task.get_output_spec()
# 参数验证
if not transition_id:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing transitionId"
)
if not prev_segment or not prev_segment.get('videoUrl'):
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing prevSegment.videoUrl"
)
if not next_segment or not next_segment.get('videoUrl'):
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing nextSegment.videoUrl"
)
if not transition_config:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing transition config"
)
# 获取 overlap 时长
overlap_tail_ms = prev_segment.get('overlapTailMs', 0)
overlap_head_ms = next_segment.get('overlapHeadMs', 0)
transition_duration_ms = transition_config.duration_ms
# 验证 overlap 时长
if overlap_tail_ms <= 0 or overlap_head_ms <= 0:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
f"Invalid overlap duration: tail={overlap_tail_ms}ms, head={overlap_head_ms}ms"
)
logger.info(
f"[task:{task.task_id}] Composing transition: {transition_config.type}, "
f"duration={transition_duration_ms}ms, "
f"overlap_tail={overlap_tail_ms}ms, overlap_head={overlap_head_ms}ms"
)
# 1. 下载前一个片段视频
prev_video_file = os.path.join(work_dir, 'prev_segment.mp4')
if not self.download_file(prev_segment['videoUrl'], prev_video_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download prev segment video: {prev_segment['videoUrl']}"
)
# 2. 下载后一个片段视频
next_video_file = os.path.join(work_dir, 'next_segment.mp4')
if not self.download_file(next_segment['videoUrl'], next_video_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download next segment video: {next_segment['videoUrl']}"
)
# 3. 获取前一个片段的实际时长
prev_duration = self.probe_duration(prev_video_file)
if not prev_duration:
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Failed to probe prev segment duration"
)
# 4. 构建转场合成命令
output_file = os.path.join(work_dir, 'transition.mp4')
cmd = self._build_command(
prev_video_file=prev_video_file,
next_video_file=next_video_file,
output_file=output_file,
prev_duration_sec=prev_duration,
overlap_tail_ms=overlap_tail_ms,
overlap_head_ms=overlap_head_ms,
transition_config=transition_config,
output_spec=output_spec
)
# 5. 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"FFmpeg transition composition failed"
)
# 6. 验证输出文件
if not self.ensure_file_exists(output_file, min_size=1024):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Transition output file is missing or too small"
)
# 7. 获取实际时长
actual_duration = self.probe_duration(output_file)
actual_duration_ms = int(actual_duration * 1000) if actual_duration else transition_duration_ms
# 8. 上传产物
transition_video_url = self.upload_file(task.task_id, 'video', output_file)
if not transition_video_url:
return TaskResult.fail(
ErrorCode.E_UPLOAD_FAILED,
"Failed to upload transition video"
)
return TaskResult.ok({
'transitionVideoUrl': transition_video_url,
'actualDurationMs': actual_duration_ms
})
except Exception as e:
logger.error(f"[task:{task.task_id}] Unexpected error: {e}", exc_info=True)
return TaskResult.fail(ErrorCode.E_UNKNOWN, str(e))
finally:
self.cleanup_work_dir(work_dir)
def _build_command(
self,
prev_video_file: str,
next_video_file: str,
output_file: str,
prev_duration_sec: float,
overlap_tail_ms: int,
overlap_head_ms: int,
transition_config: TransitionConfig,
output_spec
) -> List[str]:
"""
构建转场合成命令
使用 xfade 滤镜合成转场效果:
1. 从前一个片段截取尾部 overlap 区域
2. 从后一个片段截取头部 overlap 区域
3. 使用 xfade 进行混合
注意:
- 转场视频时长很短,需要特别处理 GOP 大小
- 确保第一帧是关键帧以便后续 TS 封装
Args:
prev_video_file: 前一个片段视频路径
next_video_file: 后一个片段视频路径
output_file: 输出文件路径
prev_duration_sec: 前一个片段总时长(秒)
overlap_tail_ms: 尾部 overlap 时长(毫秒)
overlap_head_ms: 头部 overlap 时长(毫秒)
transition_config: 转场配置
output_spec: 输出规格
Returns:
FFmpeg 命令参数列表
"""
# 计算时间参数
overlap_tail_sec = overlap_tail_ms / 1000.0
overlap_head_sec = overlap_head_ms / 1000.0
# 前一个片段的尾部 overlap 起始位置
tail_start_sec = prev_duration_sec - overlap_tail_sec
# 转场时长(使用两个 overlap 区域的总和,xfade 会将两段合成为此时长)
# 注意:xfade 的输出时长 = overlap_tail + overlap_head - duration
# 当 duration = overlap_tail + overlap_head 时,输出时长约等于 duration
transition_duration_sec = min(overlap_tail_sec, overlap_head_sec)
# 获取 xfade 转场类型
xfade_transition = transition_config.get_ffmpeg_transition()
# 构建滤镜
# [0:v] trim 截取前一个片段的尾部 overlap
# [1:v] trim 截取后一个片段的头部 overlap
# xfade 混合两段视频
filter_complex = (
f"[0:v]trim=start={tail_start_sec},setpts=PTS-STARTPTS[v0];"
f"[1:v]trim=end={overlap_head_sec},setpts=PTS-STARTPTS[v1];"
f"[v0][v1]xfade=transition={xfade_transition}:duration={transition_duration_sec}:offset=0[outv]"
)
cmd = [
'ffmpeg', '-y', '-hide_banner',
'-i', prev_video_file,
'-i', next_video_file,
'-filter_complex', filter_complex,
'-map', '[outv]',
]
# 编码参数(根据硬件加速配置动态获取)
cmd.extend(self.get_video_encode_args())
# 帧率
fps = output_spec.fps
# 计算输出视频的预估帧数
# xfade 输出时长 ≈ overlap_tail + overlap_head - transition_duration
output_duration_sec = overlap_tail_sec + overlap_head_sec - transition_duration_sec
total_frames = int(output_duration_sec * fps)
# 动态调整 GOP 大小:对于短视频,GOP 不能大于总帧数
# 确保至少有 1 个关键帧(第一帧),最小 GOP = 1
if total_frames <= 1:
gop_size = 1
elif total_frames < fps:
# 短于 1 秒的视频,使用全部帧数作为 GOP(整个视频只有开头一个关键帧)
gop_size = total_frames
else:
# 正常情况,每秒一个关键帧(比标准的 2 秒更密集,适合短视频)
gop_size = fps
cmd.extend(['-r', str(fps)])
cmd.extend(['-g', str(gop_size)])
cmd.extend(['-keyint_min', str(min(gop_size, fps // 2 or 1))])
# 强制第一帧为关键帧
cmd.extend(['-force_key_frames', 'expr:eq(n,0)'])
# 无音频
cmd.append('-an')
# 输出文件
cmd.append(output_file)
return cmd

View File

@@ -1,190 +0,0 @@
# -*- coding: utf-8 -*-
"""
最终 MP4 合并处理器
处理 FINALIZE_MP4 任务,将所有 TS 分片合并为最终可下载的 MP4 文件。
"""
import os
import logging
from typing import List
from handlers.base import BaseHandler
from domain.task import Task, TaskType
from domain.result import TaskResult, ErrorCode
logger = logging.getLogger(__name__)
class FinalizeMp4Handler(BaseHandler):
"""
最终 MP4 合并处理器
职责:
- 下载所有 TS 分片
- 使用 concat demuxer 合并
- 产出最终 MP4(remux,不重编码)
- 上传 MP4 产物
关键约束:
- 优先使用 remux(复制流,不重新编码)
- 使用 aac_adtstoasc bitstream filter 处理音频
"""
def get_supported_type(self) -> TaskType:
return TaskType.FINALIZE_MP4
def handle(self, task: Task) -> TaskResult:
"""处理 MP4 合并任务"""
work_dir = self.create_work_dir(task.task_id)
try:
# 获取 TS 列表
ts_list = task.get_ts_list()
m3u8_url = task.get_m3u8_url()
if not ts_list and not m3u8_url:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing tsList or m3u8Url"
)
output_file = os.path.join(work_dir, 'final.mp4')
if ts_list:
# 方式1:使用 TS 列表
result = self._process_ts_list(task, work_dir, ts_list, output_file)
else:
# 方式2:使用 m3u8 URL
result = self._process_m3u8(task, work_dir, m3u8_url, output_file)
if not result.success:
return result
# 验证输出文件
if not self.ensure_file_exists(output_file, min_size=4096):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"MP4 output file is missing or too small"
)
# 获取文件大小
file_size = self.get_file_size(output_file)
# 上传产物
mp4_url = self.upload_file(task.task_id, 'mp4', output_file)
if not mp4_url:
return TaskResult.fail(
ErrorCode.E_UPLOAD_FAILED,
"Failed to upload MP4"
)
return TaskResult.ok({
'mp4Url': mp4_url,
'fileSizeBytes': file_size
})
except Exception as e:
logger.error(f"[task:{task.task_id}] Unexpected error: {e}", exc_info=True)
return TaskResult.fail(ErrorCode.E_UNKNOWN, str(e))
finally:
self.cleanup_work_dir(work_dir)
def _process_ts_list(
self,
task: Task,
work_dir: str,
ts_list: List[str],
output_file: str
) -> TaskResult:
"""
使用 TS 列表处理
Args:
task: 任务实体
work_dir: 工作目录
ts_list: TS URL 列表
output_file: 输出文件路径
Returns:
TaskResult
"""
# 1. 下载所有 TS 分片
ts_files = []
for i, ts_url in enumerate(ts_list):
ts_file = os.path.join(work_dir, f'seg_{i}.ts')
if not self.download_file(ts_url, ts_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download TS segment {i}: {ts_url}"
)
ts_files.append(ts_file)
logger.info(f"[task:{task.task_id}] Downloaded {len(ts_files)} TS segments")
# 2. 创建 concat 文件列表
concat_file = os.path.join(work_dir, 'concat.txt')
with open(concat_file, 'w', encoding='utf-8') as f:
for ts_file in ts_files:
# FFmpeg concat 路径相对于 concat.txt 所在目录,只需写文件名
ts_filename = os.path.basename(ts_file)
f.write(f"file '{ts_filename}'\n")
# 3. 构建合并命令(remux,不重编码)
cmd = [
'ffmpeg', '-y', '-hide_banner',
'-f', 'concat',
'-safe', '0',
'-i', concat_file,
'-c', 'copy', # 复制流,不重编码
'-bsf:a', 'aac_adtstoasc', # 音频 bitstream filter
output_file
]
# 4. 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"MP4 concatenation failed"
)
return TaskResult.ok({})
def _process_m3u8(
self,
task: Task,
work_dir: str,
m3u8_url: str,
output_file: str
) -> TaskResult:
"""
使用 m3u8 URL 处理
Args:
task: 任务实体
work_dir: 工作目录
m3u8_url: m3u8 URL
output_file: 输出文件路径
Returns:
TaskResult
"""
# 构建命令
cmd = [
'ffmpeg', '-y', '-hide_banner',
'-protocol_whitelist', 'file,http,https,tcp,tls',
'-i', m3u8_url,
'-c', 'copy',
'-bsf:a', 'aac_adtstoasc',
output_file
]
# 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"MP4 conversion from m3u8 failed"
)
return TaskResult.ok({})

View File

@@ -1,304 +0,0 @@
# -*- coding: utf-8 -*-
"""
TS 分片封装处理器
处理 PACKAGE_SEGMENT_TS 任务,将视频片段和对应时间区间的音频封装为 TS 分片。
支持转场相关的 overlap 裁剪和转场分片封装。
"""
import os
import logging
from typing import List, Optional
from handlers.base import BaseHandler, VIDEO_ENCODE_ARGS
from domain.task import Task, TaskType
from domain.result import TaskResult, ErrorCode
logger = logging.getLogger(__name__)
class PackageSegmentTsHandler(BaseHandler):
"""
TS 分片封装处理器
职责:
- 下载视频片段
- 下载全局音频
- 截取对应时间区间的音频
- 封装为 TS 分片
- 上传 TS 产物
关键约束:
- TS 必须包含音视频同轨
- 使用 output_ts_offset 保证时间戳连续
- 输出 extinfDurationSec 供 m3u8 使用
转场相关:
- 普通片段 TS:需要裁剪掉 overlap 区域(已被转场分片使用)
- 转场分片 TS:直接封装转场视频产物,无需裁剪
- 无转场时:走原有逻辑,不做裁剪
精确裁剪:
- 当需要裁剪 overlap 区域时,必须使用重编码方式(-vf trim)才能精确切割
- 使用 -c copy 只能从关键帧切割,会导致不精确
"""
def get_supported_type(self) -> TaskType:
return TaskType.PACKAGE_SEGMENT_TS
def handle(self, task: Task) -> TaskResult:
"""处理 TS 封装任务"""
work_dir = self.create_work_dir(task.task_id)
try:
# 解析参数
video_url = task.get_video_url()
audio_url = task.get_audio_url()
start_time_ms = task.get_start_time_ms()
duration_ms = task.get_duration_ms()
output_spec = task.get_output_spec()
# 转场相关参数
is_transition_segment = task.is_transition_segment()
trim_head = task.should_trim_head()
trim_tail = task.should_trim_tail()
trim_head_ms = task.get_trim_head_ms()
trim_tail_ms = task.get_trim_tail_ms()
if not video_url:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing videoUrl"
)
if not audio_url:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing audioUrl"
)
# 计算时间参数
start_sec = start_time_ms / 1000.0
duration_sec = duration_ms / 1000.0
# 1. 下载视频片段
video_file = os.path.join(work_dir, 'video.mp4')
if not self.download_file(video_url, video_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download video: {video_url}"
)
# 2. 下载全局音频
audio_file = os.path.join(work_dir, 'audio.aac')
if not self.download_file(audio_url, audio_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download audio: {audio_url}"
)
# 3. 判断是否需要精确裁剪视频
needs_video_trim = not is_transition_segment and (
(trim_head and trim_head_ms > 0) or
(trim_tail and trim_tail_ms > 0)
)
# 4. 如果需要裁剪,先重编码裁剪视频
processed_video_file = video_file
if needs_video_trim:
processed_video_file = os.path.join(work_dir, 'trimmed_video.mp4')
trim_cmd = self._build_trim_command(
video_file=video_file,
output_file=processed_video_file,
trim_head_ms=trim_head_ms if trim_head else 0,
trim_tail_ms=trim_tail_ms if trim_tail else 0,
output_spec=output_spec
)
logger.info(f"[task:{task.task_id}] Trimming video: head={trim_head_ms}ms, tail={trim_tail_ms}ms")
if not self.run_ffmpeg(trim_cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Video trim failed"
)
if not self.ensure_file_exists(processed_video_file, min_size=1024):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Trimmed video file is missing or too small"
)
# 5. 构建 TS 封装命令
output_file = os.path.join(work_dir, 'segment.ts')
cmd = self._build_package_command(
video_file=processed_video_file,
audio_file=audio_file,
output_file=output_file,
start_sec=start_sec,
duration_sec=duration_sec
)
# 6. 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"TS packaging failed"
)
# 7. 验证输出文件
if not self.ensure_file_exists(output_file, min_size=1024):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"TS output file is missing or too small"
)
# 8. 获取实际时长(用于 EXTINF)
actual_duration = self.probe_duration(output_file)
extinf_duration = actual_duration if actual_duration else duration_sec
# 9. 上传产物
ts_url = self.upload_file(task.task_id, 'ts', output_file)
if not ts_url:
return TaskResult.fail(
ErrorCode.E_UPLOAD_FAILED,
"Failed to upload TS"
)
return TaskResult.ok({
'tsUrl': ts_url,
'extinfDurationSec': extinf_duration
})
except Exception as e:
logger.error(f"[task:{task.task_id}] Unexpected error: {e}", exc_info=True)
return TaskResult.fail(ErrorCode.E_UNKNOWN, str(e))
finally:
self.cleanup_work_dir(work_dir)
def _build_trim_command(
self,
video_file: str,
output_file: str,
trim_head_ms: int,
trim_tail_ms: int,
output_spec
) -> List[str]:
"""
构建视频精确裁剪命令(重编码方式)
使用 trim 滤镜进行精确帧级裁剪,而非 -ss/-t 参数的关键帧裁剪。
Args:
video_file: 输入视频路径
output_file: 输出视频路径
trim_head_ms: 头部裁剪时长(毫秒)
trim_tail_ms: 尾部裁剪时长(毫秒)
output_spec: 输出规格
Returns:
FFmpeg 命令参数列表
"""
# 获取原视频时长
original_duration = self.probe_duration(video_file)
if not original_duration:
original_duration = 10.0 # 默认值,避免除零
trim_head_sec = trim_head_ms / 1000.0
trim_tail_sec = trim_tail_ms / 1000.0
# 计算裁剪后的起止时间
start_time = trim_head_sec
end_time = original_duration - trim_tail_sec
# 构建 trim 滤镜
vf_filter = f"trim=start={start_time}:end={end_time},setpts=PTS-STARTPTS"
cmd = [
'ffmpeg', '-y', '-hide_banner',
'-i', video_file,
'-vf', vf_filter,
]
# 编码参数
cmd.extend(VIDEO_ENCODE_ARGS)
# 帧率
fps = output_spec.fps
cmd.extend(['-r', str(fps)])
# 计算输出视频帧数,动态调整 GOP
output_duration_sec = end_time - start_time
total_frames = int(output_duration_sec * fps)
# 动态 GOP:短视频使用较小的 GOP
if total_frames <= 1:
gop_size = 1
elif total_frames < fps:
gop_size = total_frames
else:
gop_size = fps # 每秒一个关键帧
cmd.extend(['-g', str(gop_size)])
cmd.extend(['-keyint_min', str(min(gop_size, fps // 2 or 1))])
# 强制第一帧为关键帧
cmd.extend(['-force_key_frames', 'expr:eq(n,0)'])
# 无音频(音频单独处理)
cmd.append('-an')
cmd.append(output_file)
return cmd
def _build_package_command(
self,
video_file: str,
audio_file: str,
output_file: str,
start_sec: float,
duration_sec: float
) -> List[str]:
"""
构建 TS 封装命令
将视频和对应时间区间的音频封装为 TS 分片。
视频使用 copy 模式(已经过精确裁剪或无需裁剪)。
Args:
video_file: 视频文件路径(已处理)
audio_file: 音频文件路径
output_file: 输出文件路径
start_sec: 音频开始时间(秒)
duration_sec: 音频时长(秒)
Returns:
FFmpeg 命令参数列表
"""
cmd = [
'ffmpeg', '-y', '-hide_banner',
# 视频输入
'-i', video_file,
# 音频输入(从 start_sec 开始截取 duration_sec)
'-ss', str(start_sec),
'-t', str(duration_sec),
'-i', audio_file,
# 映射流
'-map', '0:v:0', # 使用第一个输入的视频流
'-map', '1:a:0', # 使用第二个输入的音频流
# 复制编码(视频已处理,无需重编码)
'-c:v', 'copy',
'-c:a', 'copy',
# 关键:时间戳偏移,保证整体连续
'-output_ts_offset', str(start_sec),
# 复用参数
'-muxdelay', '0',
'-muxpreload', '0',
# 输出格式
'-f', 'mpegts',
output_file
]
return cmd

View File

@@ -1,251 +0,0 @@
# -*- coding: utf-8 -*-
"""
全局音频准备处理器
处理 PREPARE_JOB_AUDIO 任务,生成整个视频的连续音频轨道。
"""
import os
import logging
from typing import List, Dict, Optional
from handlers.base import BaseHandler, AUDIO_ENCODE_ARGS
from domain.task import Task, TaskType, AudioSpec, AudioProfile
from domain.result import TaskResult, ErrorCode
logger = logging.getLogger(__name__)
class PrepareJobAudioHandler(BaseHandler):
"""
全局音频准备处理器
职责:
- 下载全局 BGM
- 下载各片段叠加音效
- 构建复杂混音命令
- 执行混音
- 上传音频产物
关键约束:
- 全局 BGM 连续生成一次,贯穿整个时长
- 禁止使用 amix normalize=1
- 只对叠加音轨做极短淡入淡出(5-20ms)
- 不对 BGM 做边界 fade
"""
def get_supported_type(self) -> TaskType:
return TaskType.PREPARE_JOB_AUDIO
def handle(self, task: Task) -> TaskResult:
"""处理音频准备任务"""
work_dir = self.create_work_dir(task.task_id)
try:
# 解析参数
total_duration_ms = task.get_total_duration_ms()
if total_duration_ms <= 0:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Invalid totalDurationMs"
)
total_duration_sec = total_duration_ms / 1000.0
audio_profile = task.get_audio_profile()
bgm_url = task.get_bgm_url()
segments = task.get_segments()
# 1. 下载 BGM(如有)
bgm_file = None
if bgm_url:
bgm_file = os.path.join(work_dir, 'bgm.mp3')
if not self.download_file(bgm_url, bgm_file):
logger.warning(f"[task:{task.task_id}] Failed to download BGM")
bgm_file = None
# 2. 下载叠加音效
sfx_files = []
for i, seg in enumerate(segments):
audio_spec_data = seg.get('audioSpecJson')
if audio_spec_data:
audio_spec = AudioSpec.from_dict(audio_spec_data)
if audio_spec and audio_spec.audio_url:
sfx_file = os.path.join(work_dir, f'sfx_{i}.mp3')
if self.download_file(audio_spec.audio_url, sfx_file):
sfx_files.append({
'file': sfx_file,
'spec': audio_spec,
'segment': seg
})
else:
logger.warning(f"[task:{task.task_id}] Failed to download SFX {i}")
# 3. 构建音频混音命令
output_file = os.path.join(work_dir, 'audio_full.aac')
cmd = self._build_audio_command(
bgm_file=bgm_file,
sfx_files=sfx_files,
output_file=output_file,
total_duration_sec=total_duration_sec,
audio_profile=audio_profile
)
# 4. 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Audio mixing failed"
)
# 5. 验证输出文件
if not self.ensure_file_exists(output_file, min_size=1024):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Audio output file is missing or too small"
)
# 6. 上传产物
audio_url = self.upload_file(task.task_id, 'audio', output_file)
if not audio_url:
return TaskResult.fail(
ErrorCode.E_UPLOAD_FAILED,
"Failed to upload audio"
)
return TaskResult.ok({
'audioUrl': audio_url
})
except Exception as e:
logger.error(f"[task:{task.task_id}] Unexpected error: {e}", exc_info=True)
return TaskResult.fail(ErrorCode.E_UNKNOWN, str(e))
finally:
self.cleanup_work_dir(work_dir)
def _build_audio_command(
self,
bgm_file: Optional[str],
sfx_files: List[Dict],
output_file: str,
total_duration_sec: float,
audio_profile: AudioProfile
) -> List[str]:
"""
构建音频混音命令
Args:
bgm_file: BGM 文件路径(可选)
sfx_files: 叠加音效列表
output_file: 输出文件路径
total_duration_sec: 总时长(秒)
audio_profile: 音频配置
Returns:
FFmpeg 命令参数列表
"""
sample_rate = audio_profile.sample_rate
channels = audio_profile.channels
# 情况1:无 BGM 也无叠加音效 -> 生成静音
if not bgm_file and not sfx_files:
return [
'ffmpeg', '-y', '-hide_banner',
'-f', 'lavfi',
'-i', f'anullsrc=r={sample_rate}:cl=stereo',
'-t', str(total_duration_sec),
'-c:a', 'aac', '-b:a', '128k',
output_file
]
# 情况2:仅 BGM,无叠加音效
if not sfx_files:
return [
'ffmpeg', '-y', '-hide_banner',
'-i', bgm_file,
'-t', str(total_duration_sec),
'-c:a', 'aac', '-b:a', '128k',
'-ar', str(sample_rate), '-ac', str(channels),
output_file
]
# 情况3:BGM + 叠加音效 -> 复杂滤镜
inputs = []
if bgm_file:
inputs.extend(['-i', bgm_file])
for sfx in sfx_files:
inputs.extend(['-i', sfx['file']])
filter_parts = []
input_idx = 0
# BGM 处理(或生成静音底轨)
if bgm_file:
filter_parts.append(
f"[0:a]atrim=0:{total_duration_sec},asetpts=PTS-STARTPTS,"
f"apad=whole_dur={total_duration_sec}[bgm]"
)
input_idx = 1
else:
filter_parts.append(
f"anullsrc=r={sample_rate}:cl=stereo,"
f"atrim=0:{total_duration_sec}[bgm]"
)
input_idx = 0
# 叠加音效处理
sfx_labels = []
for i, sfx in enumerate(sfx_files):
idx = input_idx + i
spec = sfx['spec']
seg = sfx['segment']
# 计算时间参数
start_time_ms = seg.get('startTimeMs', 0)
duration_ms = seg.get('durationMs', 5000)
delay_ms = start_time_ms + spec.delay_ms
delay_sec = delay_ms / 1000.0
duration_sec = duration_ms / 1000.0
# 淡入淡出参数(极短,5-20ms)
fade_in_sec = spec.fade_in_ms / 1000.0
fade_out_sec = spec.fade_out_ms / 1000.0
# 音量
volume = spec.volume
label = f"sfx{i}"
sfx_labels.append(f"[{label}]")
# 构建滤镜:延迟 + 淡入淡出 + 音量
# 注意:只对叠加音轨做淡入淡出,不对 BGM 做
sfx_filter = (
f"[{idx}:a]"
f"adelay={int(delay_ms)}|{int(delay_ms)},"
f"afade=t=in:st={delay_sec}:d={fade_in_sec},"
f"afade=t=out:st={delay_sec + duration_sec - fade_out_sec}:d={fade_out_sec},"
f"volume={volume}"
f"[{label}]"
)
filter_parts.append(sfx_filter)
# 混音(关键:normalize=0,禁止归一化)
# dropout_transition=0 表示输入结束时不做渐变
mix_inputs = "[bgm]" + "".join(sfx_labels)
num_inputs = 1 + len(sfx_files)
filter_parts.append(
f"{mix_inputs}amix=inputs={num_inputs}:duration=first:"
f"dropout_transition=0:normalize=0[out]"
)
filter_complex = ';'.join(filter_parts)
cmd = ['ffmpeg', '-y', '-hide_banner'] + inputs + [
'-filter_complex', filter_complex,
'-map', '[out]',
'-c:a', 'aac', '-b:a', '128k',
'-ar', str(sample_rate), '-ac', str(channels),
output_file
]
return cmd

View File

@@ -1,726 +0,0 @@
# -*- coding: utf-8 -*-
"""
视频片段渲染处理器
处理 RENDER_SEGMENT_VIDEO 任务,将原素材渲染为符合输出规格的视频片段。
支持转场 overlap 区域的帧冻结生成。
"""
import os
import logging
from typing import List, Optional, Tuple
from urllib.parse import urlparse, unquote
from handlers.base import BaseHandler
from domain.task import Task, TaskType, RenderSpec, OutputSpec, Effect, IMAGE_EXTENSIONS
from domain.result import TaskResult, ErrorCode
logger = logging.getLogger(__name__)
def _get_extension_from_url(url: str) -> str:
"""从 URL 提取文件扩展名"""
parsed = urlparse(url)
path = unquote(parsed.path)
_, ext = os.path.splitext(path)
return ext.lower() if ext else ''
class RenderSegmentVideoHandler(BaseHandler):
"""
视频片段渲染处理器
职责:
- 下载素材文件
- 下载 LUT 文件(如有)
- 下载叠加层(如有)
- 构建 FFmpeg 渲染命令
- 执行渲染(支持帧冻结生成 overlap 区域)
- 上传产物
"""
def get_supported_type(self) -> TaskType:
return TaskType.RENDER_SEGMENT_VIDEO
def handle(self, task: Task) -> TaskResult:
"""处理视频渲染任务"""
work_dir = self.create_work_dir(task.task_id)
try:
# 解析参数
material_url = task.get_material_url()
if not material_url:
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
"Missing material URL (boundMaterialUrl or sourceRef)"
)
# 检查 URL 格式:必须是 HTTP 或 HTTPS 协议
if not material_url.startswith(('http://', 'https://')):
source_ref = task.get_source_ref()
bound_url = task.get_bound_material_url()
logger.error(
f"[task:{task.task_id}] Invalid material URL format: '{material_url}'. "
f"boundMaterialUrl={bound_url}, sourceRef={source_ref}. "
f"Server should provide boundMaterialUrl with HTTP/HTTPS URL."
)
return TaskResult.fail(
ErrorCode.E_SPEC_INVALID,
f"Invalid material URL: '{material_url}' is not a valid HTTP/HTTPS URL. "
f"Server must provide boundMaterialUrl."
)
render_spec = task.get_render_spec()
output_spec = task.get_output_spec()
duration_ms = task.get_duration_ms()
# 1. 检测素材类型并确定输入文件扩展名
is_image = task.is_image_material()
if is_image:
# 图片素材:根据 URL 确定扩展名
ext = _get_extension_from_url(material_url)
if not ext or ext not in IMAGE_EXTENSIONS:
ext = '.jpg' # 默认扩展名
input_file = os.path.join(work_dir, f'input{ext}')
else:
input_file = os.path.join(work_dir, 'input.mp4')
# 2. 下载素材
if not self.download_file(material_url, input_file):
return TaskResult.fail(
ErrorCode.E_INPUT_UNAVAILABLE,
f"Failed to download material: {material_url}"
)
# 3. 图片素材转换为视频
if is_image:
video_input_file = os.path.join(work_dir, 'input_video.mp4')
if not self._convert_image_to_video(
image_file=input_file,
output_file=video_input_file,
duration_ms=duration_ms,
output_spec=output_spec,
render_spec=render_spec,
task_id=task.task_id
):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Failed to convert image to video"
)
# 使用转换后的视频作为输入
input_file = video_input_file
logger.info(f"[task:{task.task_id}] Image converted to video successfully")
# 4. 下载 LUT(如有)
lut_file = None
if render_spec.lut_url:
lut_file = os.path.join(work_dir, 'lut.cube')
if not self.download_file(render_spec.lut_url, lut_file):
logger.warning(f"[task:{task.task_id}] Failed to download LUT, continuing without it")
lut_file = None
# 5. 下载叠加层(如有)
overlay_file = None
if render_spec.overlay_url:
# 根据 URL 后缀确定文件扩展名
url_lower = render_spec.overlay_url.lower()
if url_lower.endswith('.jpg') or url_lower.endswith('.jpeg'):
ext = '.jpg'
elif url_lower.endswith('.mov'):
ext = '.mov'
else:
ext = '.png' # 默认
overlay_file = os.path.join(work_dir, f'overlay{ext}')
if not self.download_file(render_spec.overlay_url, overlay_file):
logger.warning(f"[task:{task.task_id}] Failed to download overlay, continuing without it")
overlay_file = None
# 6. 探测源视频时长(仅对视频素材)
# 用于检测时长不足并通过冻结最后一帧补足
source_duration_sec = None
if not is_image:
source_duration = self.probe_duration(input_file)
if source_duration:
source_duration_sec = source_duration
speed = float(render_spec.speed) if render_spec.speed else 1.0
if speed > 0:
# 计算变速后的有效时长
effective_duration_sec = source_duration_sec / speed
required_duration_sec = duration_ms / 1000.0
# 如果源视频时长不足,记录日志
if effective_duration_sec < required_duration_sec:
shortage_sec = required_duration_sec - effective_duration_sec
logger.warning(
f"[task:{task.task_id}] Source video duration insufficient: "
f"effective={effective_duration_sec:.2f}s (speed={speed}), "
f"required={required_duration_sec:.2f}s, "
f"will freeze last frame for {shortage_sec:.2f}s"
)
# 7. 计算 overlap 时长(用于转场帧冻结)
# 头部 overlap: 来自前一片段的出场转场
overlap_head_ms = task.get_overlap_head_ms()
# 尾部 overlap: 当前片段的出场转场
overlap_tail_ms = task.get_overlap_tail_ms_v2()
# 8. 构建 FFmpeg 命令
output_file = os.path.join(work_dir, 'output.mp4')
cmd = self._build_command(
input_file=input_file,
output_file=output_file,
render_spec=render_spec,
output_spec=output_spec,
duration_ms=duration_ms,
lut_file=lut_file,
overlay_file=overlay_file,
overlap_head_ms=overlap_head_ms,
overlap_tail_ms=overlap_tail_ms,
source_duration_sec=source_duration_sec
)
# 9. 执行 FFmpeg
if not self.run_ffmpeg(cmd, task.task_id):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"FFmpeg rendering failed"
)
# 10. 验证输出文件
if not self.ensure_file_exists(output_file, min_size=4096):
return TaskResult.fail(
ErrorCode.E_FFMPEG_FAILED,
"Output file is missing or too small"
)
# 11. 获取实际时长
actual_duration = self.probe_duration(output_file)
actual_duration_ms = int(actual_duration * 1000) if actual_duration else duration_ms
# 12. 上传产物
video_url = self.upload_file(task.task_id, 'video', output_file)
if not video_url:
return TaskResult.fail(
ErrorCode.E_UPLOAD_FAILED,
"Failed to upload video"
)
# 13. 构建结果(包含 overlap 信息)
result_data = {
'videoUrl': video_url,
'actualDurationMs': actual_duration_ms,
'overlapHeadMs': overlap_head_ms,
'overlapTailMs': overlap_tail_ms
}
return TaskResult.ok(result_data)
except Exception as e:
logger.error(f"[task:{task.task_id}] Unexpected error: {e}", exc_info=True)
return TaskResult.fail(ErrorCode.E_UNKNOWN, str(e))
finally:
self.cleanup_work_dir(work_dir)
def _convert_image_to_video(
self,
image_file: str,
output_file: str,
duration_ms: int,
output_spec: OutputSpec,
render_spec: RenderSpec,
task_id: str
) -> bool:
"""
将图片转换为视频
使用 FFmpeg 将静态图片转换为指定时长的视频,
同时应用缩放填充和变速处理。
Args:
image_file: 输入图片文件路径
output_file: 输出视频文件路径
duration_ms: 目标时长(毫秒)
output_spec: 输出规格
render_spec: 渲染规格
task_id: 任务 ID(用于日志)
Returns:
是否成功
"""
width = output_spec.width
height = output_spec.height
fps = output_spec.fps
# 计算实际时长(考虑变速)
speed = float(render_spec.speed) if render_spec.speed else 1.0
if speed <= 0:
speed = 1.0
# 变速后的实际播放时长
actual_duration_sec = (duration_ms / 1000.0) / speed
# 构建 FFmpeg 命令
cmd = [
'ffmpeg', '-y', '-hide_banner',
'-loop', '1', # 循环输入图片
'-i', image_file,
'-t', str(actual_duration_sec), # 输出时长
]
# 构建滤镜:缩放填充到目标尺寸
filters = []
# 裁切处理(与视频相同逻辑)
if render_spec.crop_enable and render_spec.face_pos:
try:
fx, fy = map(float, render_spec.face_pos.split(','))
target_ratio = width / height
filters.append(
f"crop='min(iw,ih*{target_ratio})':'min(ih,iw/{target_ratio})':"
f"'(iw-min(iw,ih*{target_ratio}))*{fx}':"
f"'(ih-min(ih,iw/{target_ratio}))*{fy}'"
)
except (ValueError, ZeroDivisionError):
logger.warning(f"[task:{task_id}] Invalid face position: {render_spec.face_pos}")
elif render_spec.zoom_cut:
target_ratio = width / height
filters.append(
f"crop='min(iw,ih*{target_ratio})':'min(ih,iw/{target_ratio})'"
)
# 缩放填充
filters.append(
f"scale={width}:{height}:force_original_aspect_ratio=decrease,"
f"pad={width}:{height}:(ow-iw)/2:(oh-ih)/2:black"
)
# 格式转换(确保兼容性)
filters.append("format=yuv420p")
cmd.extend(['-vf', ','.join(filters)])
# 计算总帧数,动态调整 GOP
total_frames = int(actual_duration_sec * fps)
if total_frames <= 1:
gop_size = 1
elif total_frames < fps:
gop_size = total_frames
else:
gop_size = fps * 2 # 正常情况,2 秒一个关键帧
# 编码参数
cmd.extend([
'-c:v', 'libx264',
'-preset', 'fast',
'-crf', '18',
'-r', str(fps),
'-g', str(gop_size),
'-keyint_min', str(min(gop_size, fps // 2 or 1)),
'-force_key_frames', 'expr:eq(n,0)',
'-an', # 无音频
output_file
])
logger.info(f"[task:{task_id}] Converting image to video: {actual_duration_sec:.2f}s at {fps}fps")
return self.run_ffmpeg(cmd, task_id)
def _build_command(
self,
input_file: str,
output_file: str,
render_spec: RenderSpec,
output_spec: OutputSpec,
duration_ms: int,
lut_file: Optional[str] = None,
overlay_file: Optional[str] = None,
overlap_head_ms: int = 0,
overlap_tail_ms: int = 0,
source_duration_sec: Optional[float] = None
) -> List[str]:
"""
构建 FFmpeg 渲染命令
Args:
input_file: 输入文件路径
output_file: 输出文件路径
render_spec: 渲染规格
output_spec: 输出规格
duration_ms: 目标时长(毫秒)
lut_file: LUT 文件路径(可选)
overlay_file: 叠加层文件路径(可选)
overlap_head_ms: 头部 overlap 时长(毫秒)
overlap_tail_ms: 尾部 overlap 时长(毫秒)
source_duration_sec: 源视频实际时长(秒),用于检测时长不足
Returns:
FFmpeg 命令参数列表
"""
cmd = ['ffmpeg', '-y', '-hide_banner']
# 硬件加速解码参数(在输入文件之前)
hwaccel_args = self.get_hwaccel_decode_args()
if hwaccel_args:
cmd.extend(hwaccel_args)
# 输入文件
cmd.extend(['-i', input_file])
# 叠加层输入
if overlay_file:
cmd.extend(['-i', overlay_file])
# 构建视频滤镜链
filters = self._build_video_filters(
render_spec=render_spec,
output_spec=output_spec,
duration_ms=duration_ms,
lut_file=lut_file,
overlay_file=overlay_file,
overlap_head_ms=overlap_head_ms,
overlap_tail_ms=overlap_tail_ms,
source_duration_sec=source_duration_sec
)
# 应用滤镜
# 检测是否为 filter_complex 格式(包含分号或方括号标签)
is_filter_complex = ';' in filters or (filters.startswith('[') and ']' in filters)
if is_filter_complex or overlay_file:
# 使用 filter_complex 处理
cmd.extend(['-filter_complex', filters])
elif filters:
cmd.extend(['-vf', filters])
# 编码参数(根据硬件加速配置动态获取)
cmd.extend(self.get_video_encode_args())
# 帧率
fps = output_spec.fps
cmd.extend(['-r', str(fps)])
# 时长(包含 overlap 区域)
total_duration_ms = duration_ms + overlap_head_ms + overlap_tail_ms
duration_sec = total_duration_ms / 1000.0
cmd.extend(['-t', str(duration_sec)])
# 动态调整 GOP 大小:对于短视频,GOP 不能大于总帧数
total_frames = int(duration_sec * fps)
if total_frames <= 1:
gop_size = 1
elif total_frames < fps:
# 短于 1 秒的视频,使用全部帧数作为 GOP(整个视频只有开头一个关键帧)
gop_size = total_frames
else:
# 正常情况,2 秒一个关键帧
gop_size = fps * 2
cmd.extend(['-g', str(gop_size)])
cmd.extend(['-keyint_min', str(min(gop_size, fps // 2 or 1))])
# 强制第一帧为关键帧
cmd.extend(['-force_key_frames', 'expr:eq(n,0)'])
# 无音频(视频片段不包含音频)
cmd.append('-an')
# 输出文件
cmd.append(output_file)
return cmd
def _build_video_filters(
self,
render_spec: RenderSpec,
output_spec: OutputSpec,
duration_ms: int,
lut_file: Optional[str] = None,
overlay_file: Optional[str] = None,
overlap_head_ms: int = 0,
overlap_tail_ms: int = 0,
source_duration_sec: Optional[float] = None
) -> str:
"""
构建视频滤镜链
Args:
render_spec: 渲染规格
output_spec: 输出规格
duration_ms: 目标时长(毫秒)
lut_file: LUT 文件路径
overlay_file: 叠加层文件路径(支持图片 png/jpg 和视频 mov)
overlap_head_ms: 头部 overlap 时长(毫秒)
overlap_tail_ms: 尾部 overlap 时长(毫秒)
source_duration_sec: 源视频实际时长(秒),用于检测时长不足
Returns:
滤镜字符串
"""
filters = []
width = output_spec.width
height = output_spec.height
fps = output_spec.fps
# 判断 overlay 类型
has_overlay = overlay_file is not None
is_video_overlay = has_overlay and overlay_file.lower().endswith('.mov')
# 解析 effects
effects = render_spec.get_effects()
has_camera_shot = any(e.effect_type == 'cameraShot' for e in effects)
# 硬件加速时需要先 hwdownload(将 GPU 表面下载到系统内存)
hwaccel_prefix = self.get_hwaccel_filter_prefix()
if hwaccel_prefix:
# 去掉末尾的逗号,作为第一个滤镜
filters.append(hwaccel_prefix.rstrip(','))
# 1. 变速处理
speed = float(render_spec.speed) if render_spec.speed else 1.0
if speed != 1.0 and speed > 0:
# setpts 公式:PTS / speed
pts_factor = 1.0 / speed
filters.append(f"setpts={pts_factor}*PTS")
# 2. LUT 调色
if lut_file:
# 路径中的反斜杠需要转换,冒号需要转义(FFmpeg filter语法中冒号是特殊字符)
lut_path = lut_file.replace('\\', '/').replace(':', r'\:')
filters.append(f"lut3d='{lut_path}'")
# 3. 裁切处理
if render_spec.crop_enable and render_spec.face_pos:
# 根据人脸位置进行智能裁切
try:
fx, fy = map(float, render_spec.face_pos.split(','))
# 计算裁切区域(保持输出比例)
target_ratio = width / height
# 假设裁切到目标比例
filters.append(
f"crop='min(iw,ih*{target_ratio})':'min(ih,iw/{target_ratio})':"
f"'(iw-min(iw,ih*{target_ratio}))*{fx}':"
f"'(ih-min(ih,iw/{target_ratio}))*{fy}'"
)
except (ValueError, ZeroDivisionError):
logger.warning(f"Invalid face position: {render_spec.face_pos}")
elif render_spec.zoom_cut:
# 中心缩放裁切
target_ratio = width / height
filters.append(
f"crop='min(iw,ih*{target_ratio})':'min(ih,iw/{target_ratio})'"
)
# 4. 缩放和填充
scale_filter = (
f"scale={width}:{height}:force_original_aspect_ratio=decrease,"
f"pad={width}:{height}:(ow-iw)/2:(oh-ih)/2:black"
)
filters.append(scale_filter)
# 5. 特效处理(cameraShot 需要特殊处理)
if has_camera_shot:
# cameraShot 需要使用 filter_complex 格式
return self._build_filter_complex_with_effects(
base_filters=filters,
effects=effects,
fps=fps,
width=width,
height=height,
has_overlay=has_overlay,
is_video_overlay=is_video_overlay,
overlap_head_ms=overlap_head_ms,
overlap_tail_ms=overlap_tail_ms,
use_hwdownload=bool(hwaccel_prefix),
duration_ms=duration_ms,
render_spec=render_spec,
source_duration_sec=source_duration_sec
)
# 6. 帧冻结(tpad)- 用于转场 overlap 区域和时长不足补足
# 注意:tpad 必须在缩放之后应用
tpad_parts = []
# 计算是否需要额外的尾部冻结(源视频时长不足)
extra_tail_freeze_sec = 0.0
if source_duration_sec is not None:
speed = float(render_spec.speed) if render_spec.speed else 1.0
if speed > 0:
# 计算变速后的有效时长
effective_duration_sec = source_duration_sec / speed
required_duration_sec = duration_ms / 1000.0
# 如果源视频时长不足,需要冻结最后一帧来补足
if effective_duration_sec < required_duration_sec:
extra_tail_freeze_sec = required_duration_sec - effective_duration_sec
if overlap_head_ms > 0:
# 头部冻结:将第一帧冻结指定时长
head_duration_sec = overlap_head_ms / 1000.0
tpad_parts.append(f"start_mode=clone:start_duration={head_duration_sec}")
# 尾部冻结:合并 overlap 和时长不足的冻结
total_tail_freeze_sec = (overlap_tail_ms / 1000.0) + extra_tail_freeze_sec
if total_tail_freeze_sec > 0:
# 将最后一帧冻结指定时长
tpad_parts.append(f"stop_mode=clone:stop_duration={total_tail_freeze_sec}")
if tpad_parts:
filters.append(f"tpad={':'.join(tpad_parts)}")
# 7. 构建最终滤镜
if has_overlay:
# 使用 filter_complex 格式
base_filters = ','.join(filters) if filters else 'copy'
overlay_scale = f"scale={width}:{height}"
# 视频 overlay 使用 eof_action=pass(结束后消失),图片 overlay 使用默认行为(保持显示)
overlay_params = 'eof_action=pass' if is_video_overlay else ''
overlay_filter = f"overlay=0:0:{overlay_params}" if overlay_params else 'overlay=0:0'
# 视频 overlay 需要在末尾统一颜色范围,避免 overlay 结束后 range 从 tv 变为 pc
range_fix = ',format=yuv420p,setrange=tv' if is_video_overlay else ''
return f"[0:v]{base_filters}[base];[1:v]{overlay_scale}[overlay];[base][overlay]{overlay_filter}{range_fix}"
else:
return ','.join(filters) if filters else ''
def _build_filter_complex_with_effects(
self,
base_filters: List[str],
effects: List[Effect],
fps: int,
width: int,
height: int,
has_overlay: bool = False,
is_video_overlay: bool = False,
overlap_head_ms: int = 0,
overlap_tail_ms: int = 0,
use_hwdownload: bool = False,
duration_ms: int = 0,
render_spec: Optional[RenderSpec] = None,
source_duration_sec: Optional[float] = None
) -> str:
"""
构建包含特效的 filter_complex 滤镜图
cameraShot 效果需要使用 split/freezeframes/concat 滤镜组合。
Args:
base_filters: 基础滤镜列表
effects: 特效列表
fps: 帧率
width: 输出宽度
height: 输出高度
has_overlay: 是否有叠加层
is_video_overlay: 叠加层是否为视频格式(如 .mov)
overlap_head_ms: 头部 overlap 时长
overlap_tail_ms: 尾部 overlap 时长
use_hwdownload: 是否使用了硬件加速解码(已在 base_filters 中包含 hwdownload)
duration_ms: 目标时长(毫秒)
render_spec: 渲染规格(用于获取变速参数)
source_duration_sec: 源视频实际时长(秒),用于检测时长不足
Returns:
filter_complex 格式的滤镜字符串
"""
filter_parts = []
# 基础滤镜链
base_chain = ','.join(base_filters) if base_filters else 'copy'
# 当前输出标签
current_output = '[v_base]'
filter_parts.append(f"[0:v]{base_chain}{current_output}")
# 处理每个特效
effect_idx = 0
for effect in effects:
if effect.effect_type == 'cameraShot':
start_sec, duration_sec = effect.get_camera_shot_params()
if start_sec <= 0 or duration_sec <= 0:
continue
# cameraShot 实现(定格效果):
# 1. fps + split 分割
# 2. 第一路:trim(0, start) + tpad冻结duration秒
# 3. 第二路:trim(start, end)
# 4. concat 拼接
split_out_a = f'[eff{effect_idx}_a]'
split_out_b = f'[eff{effect_idx}_b]'
frozen_out = f'[eff{effect_idx}_frozen]'
rest_out = f'[eff{effect_idx}_rest]'
effect_output = f'[v_eff{effect_idx}]'
# fps + split
filter_parts.append(
f"{current_output}fps=fps={fps},split{split_out_a}{split_out_b}"
)
# 第一路:trim(0, start) + tpad冻结
# tpad=stop_mode=clone 将最后一帧冻结指定时长
filter_parts.append(
f"{split_out_a}trim=start=0:end={start_sec},setpts=PTS-STARTPTS,"
f"tpad=stop_mode=clone:stop_duration={duration_sec}{frozen_out}"
)
# 第二路:trim 从 start 开始
filter_parts.append(
f"{split_out_b}trim=start={start_sec},setpts=PTS-STARTPTS{rest_out}"
)
# concat 拼接
filter_parts.append(
f"{frozen_out}{rest_out}concat=n=2:v=1:a=0{effect_output}"
)
current_output = effect_output
effect_idx += 1
# 帧冻结(tpad)- 用于转场 overlap 区域和时长不足补足
tpad_parts = []
# 计算是否需要额外的尾部冻结(源视频时长不足)
extra_tail_freeze_sec = 0.0
if source_duration_sec is not None and render_spec is not None and duration_ms > 0:
speed = float(render_spec.speed) if render_spec.speed else 1.0
if speed > 0:
# 计算变速后的有效时长
effective_duration_sec = source_duration_sec / speed
required_duration_sec = duration_ms / 1000.0
# 如果源视频时长不足,需要冻结最后一帧来补足
if effective_duration_sec < required_duration_sec:
extra_tail_freeze_sec = required_duration_sec - effective_duration_sec
if overlap_head_ms > 0:
head_duration_sec = overlap_head_ms / 1000.0
tpad_parts.append(f"start_mode=clone:start_duration={head_duration_sec}")
# 尾部冻结:合并 overlap 和时长不足的冻结
total_tail_freeze_sec = (overlap_tail_ms / 1000.0) + extra_tail_freeze_sec
if total_tail_freeze_sec > 0:
tpad_parts.append(f"stop_mode=clone:stop_duration={total_tail_freeze_sec}")
if tpad_parts:
tpad_output = '[v_tpad]'
filter_parts.append(f"{current_output}tpad={':'.join(tpad_parts)}{tpad_output}")
current_output = tpad_output
# 最终输出
if has_overlay:
# 叠加层处理
# 视频 overlay 使用 eof_action=pass(结束后消失),图片 overlay 使用默认行为(保持显示)
overlay_params = 'eof_action=pass' if is_video_overlay else ''
overlay_filter = f"overlay=0:0:{overlay_params}" if overlay_params else 'overlay=0:0'
overlay_scale = f"scale={width}:{height}"
overlay_output = '[v_overlay]'
# 视频 overlay 需要在末尾统一颜色范围,避免 overlay 结束后 range 从 tv 变为 pc
range_fix = ',format=yuv420p,setrange=tv' if is_video_overlay else ''
filter_parts.append(f"[1:v]{overlay_scale}{overlay_output}")
filter_parts.append(f"{current_output}{overlay_output}{overlay_filter}{range_fix}")
else:
# 移除最后一个标签,直接输出
# 将最后一个滤镜的输出标签替换为空(直接输出)
if filter_parts:
last_filter = filter_parts[-1]
# 移除末尾的输出标签
if last_filter.endswith(current_output):
filter_parts[-1] = last_filter[:-len(current_output)]
return ';'.join(filter_parts)

268
index.py
View File

@@ -1,228 +1,54 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
RenderWorker v2 入口
支持 v2 API 协议的渲染 Worker,处理以下任务类型:
- RENDER_SEGMENT_VIDEO: 渲染视频片段
- PREPARE_JOB_AUDIO: 生成全局音频
- PACKAGE_SEGMENT_TS: 封装 TS 分片
- FINALIZE_MP4: 产出最终 MP4
使用方法:
python index.py
环境变量:
API_ENDPOINT_V2: v2 API 端点(或使用 API_ENDPOINT)
ACCESS_KEY: Worker 认证密钥
WORKER_ID: Worker ID(默认 100001)
MAX_CONCURRENCY: 最大并发数(默认 4)
HEARTBEAT_INTERVAL: 心跳间隔秒数(默认 5)
TEMP_DIR: 临时文件目录
"""
from time import sleep
import sys
import time
import signal
import logging
import config
import biz.task
from telemetry import init_opentelemetry
from services import DefaultTemplateService
from util import api
import os
from logging.handlers import RotatingFileHandler
import glob
from dotenv import load_dotenv
# 使用新的服务架构
template_service = DefaultTemplateService()
template_service.load_local_templates()
from domain.config import WorkerConfig
from services.api_client import APIClientV2
from services.task_executor import TaskExecutor
from constant import SOFTWARE_VERSION
# Check for redownload parameter
if 'redownload' in sys.argv:
print("Redownloading all templates...")
for template_name in template_service.templates.keys():
print(f"Redownloading template: {template_name}")
template_service.download_template(template_name)
print("All templates redownloaded successfully!")
sys.exit(0)
import logging
# 日志配置
def setup_logging():
"""配置日志系统,输出到控制台和文件"""
# 日志格式
log_format = '[%(asctime)s] [%(levelname)s] [%(name)s] %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, date_format)
LOGGER = logging.getLogger(__name__)
init_opentelemetry()
# 获取根logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
# 清除已有的handlers(避免重复)
root_logger.handlers.clear()
# 1. 控制台handler(只输出WARNING及以上级别)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARNING)
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
# 确保日志文件所在目录存在
log_dir = os.path.dirname(os.path.abspath(__file__))
# 2. 所有日志文件handler(all_log.log)
all_log_path = os.path.join(log_dir, 'all_log.log')
all_log_handler = RotatingFileHandler(
all_log_path,
maxBytes=10*1024*1024, # 10MB
backupCount=5,
encoding='utf-8'
)
all_log_handler.setLevel(logging.DEBUG) # 记录所有级别
all_log_handler.setFormatter(formatter)
root_logger.addHandler(all_log_handler)
# 3. 错误日志文件handler(error.log)
error_log_path = os.path.join(log_dir, 'error.log')
error_log_handler = RotatingFileHandler(
error_log_path,
maxBytes=10*1024*1024, # 10MB
backupCount=5,
encoding='utf-8'
)
error_log_handler.setLevel(logging.ERROR) # 只记录ERROR及以上
error_log_handler.setFormatter(formatter)
root_logger.addHandler(error_log_handler)
# 初始化日志系统
setup_logging()
logger = logging.getLogger('worker')
class WorkerV2:
"""
v2 渲染 Worker 主类
负责:
- 配置加载
- API 客户端初始化
- 任务执行器管理
- 主循环运行
- 优雅退出处理
"""
def __init__(self):
"""初始化 Worker"""
# 加载配置
while True:
# print(get_sys_info())
print("waiting for task...")
try:
task_list = api.sync_center()
except Exception as e:
LOGGER.error("sync_center error", exc_info=e)
sleep(5)
continue
if len(task_list) == 0:
# 删除当前文件夹下所有以.mp4、.ts结尾的文件
for file_globs in ['*.mp4', '*.ts', 'tmp_concat*.txt']:
for file_path in glob.glob(file_globs):
try:
os.remove(file_path)
print(f"Deleted file: {file_path}")
except Exception as e:
LOGGER.error(f"Error deleting file {file_path}", exc_info=e)
sleep(5)
for task in task_list:
print("start task:", task)
try:
self.config = WorkerConfig.from_env()
except ValueError as e:
logger.error(f"Configuration error: {e}")
sys.exit(1)
# 初始化 API 客户端
self.api_client = APIClientV2(self.config)
# 初始化任务执行器
self.task_executor = TaskExecutor(self.config, self.api_client)
# 运行状态
self.running = True
# 确保临时目录存在
self.config.ensure_temp_dir()
# 注册信号处理器
self._setup_signal_handlers()
def _setup_signal_handlers(self):
"""设置信号处理器"""
# Windows 不支持 SIGTERM
signal.signal(signal.SIGINT, self._signal_handler)
if hasattr(signal, 'SIGTERM'):
signal.signal(signal.SIGTERM, self._signal_handler)
def _signal_handler(self, signum, frame):
"""
信号处理,优雅退出
Args:
signum: 信号编号
frame: 当前栈帧
"""
signal_name = signal.Signals(signum).name
logger.info(f"Received signal {signal_name}, initiating shutdown...")
self.running = False
def run(self):
"""主循环"""
logger.info("=" * 60)
logger.info("RenderWorker v2 Starting")
logger.info("=" * 60)
logger.info(f"Worker ID: {self.config.worker_id}")
logger.info(f"API Endpoint: {self.config.api_endpoint}")
logger.info(f"Max Concurrency: {self.config.max_concurrency}")
logger.info(f"Heartbeat Interval: {self.config.heartbeat_interval}s")
logger.info(f"Capabilities: {', '.join(self.config.capabilities)}")
logger.info(f"Temp Directory: {self.config.temp_dir}")
logger.info("=" * 60)
consecutive_errors = 0
max_consecutive_errors = 10
while self.running:
try:
# 心跳同步并拉取任务
current_task_ids = self.task_executor.get_current_task_ids()
tasks = self.api_client.sync(current_task_ids)
# 提交新任务
for task in tasks:
if self.task_executor.submit_task(task):
logger.info(f"Submitted task: {task.task_id} ({task.task_type.value})")
# 重置错误计数
consecutive_errors = 0
# 等待下次心跳
time.sleep(self.config.heartbeat_interval)
except KeyboardInterrupt:
logger.info("Keyboard interrupt received")
self.running = False
except Exception as e:
consecutive_errors += 1
logger.error(f"Worker loop error ({consecutive_errors}/{max_consecutive_errors}): {e}", exc_info=True)
# 连续错误过多,增加等待时间
if consecutive_errors >= max_consecutive_errors:
logger.error("Too many consecutive errors, waiting 30 seconds...")
time.sleep(30)
consecutive_errors = 0
else:
time.sleep(5)
# 优雅关闭
self._shutdown()
def _shutdown(self):
"""优雅关闭"""
logger.info("Shutting down...")
# 等待当前任务完成
current_count = self.task_executor.get_current_task_count()
if current_count > 0:
logger.info(f"Waiting for {current_count} running task(s) to complete...")
# 关闭执行器
self.task_executor.shutdown(wait=True)
# 关闭 API 客户端
self.api_client.close()
logger.info("Worker stopped")
def main():
"""主函数"""
# 加载 .env 文件(如果存在)
load_dotenv()
logger.info(f"RenderWorker v{SOFTWARE_VERSION}")
# 创建并运行 Worker
worker = WorkerV2()
worker.run()
if __name__ == '__main__':
main()
biz.task.start_task(task)
except Exception as e:
LOGGER.error("task_start error", exc_info=e)

View File

@@ -1,18 +1,12 @@
# -*- coding: utf-8 -*-
"""
服务层
包含 API 客户端、任务执行器、租约服务、存储服务等组件。
"""
from services.api_client import APIClientV2
from services.lease_service import LeaseService
from services.task_executor import TaskExecutor
from services import storage
from .render_service import RenderService, DefaultRenderService
from .task_service import TaskService, DefaultTaskService
from .template_service import TemplateService, DefaultTemplateService
__all__ = [
'APIClientV2',
'LeaseService',
'TaskExecutor',
'storage',
]
'RenderService',
'DefaultRenderService',
'TaskService',
'DefaultTaskService',
'TemplateService',
'DefaultTemplateService'
]

View File

@@ -1,417 +0,0 @@
# -*- coding: utf-8 -*-
"""
v2 API 客户端
实现与渲染服务端 v2 接口的通信。
"""
import logging
import subprocess
import time
import requests
from typing import Dict, List, Optional, Any
from domain.task import Task
from domain.config import WorkerConfig
from util.system import get_hw_accel_info_str
logger = logging.getLogger(__name__)
class APIClientV2:
"""
v2 API 客户端
负责与渲染服务端的所有 HTTP 通信。
"""
SYSTEM_INFO_TTL_SECONDS = 30
def __init__(self, config: WorkerConfig):
"""
初始化 API 客户端
Args:
config: Worker 配置
"""
self.config = config
self.base_url = config.api_endpoint.rstrip('/')
self.access_key = config.access_key
self.worker_id = config.worker_id
self.session = requests.Session()
self._ffmpeg_version: Optional[str] = None
self._codec_info: Optional[str] = None
self._hw_accel_info: Optional[str] = None
self._gpu_info: Optional[str] = None
self._gpu_info_checked = False
self._static_system_info: Optional[Dict[str, Any]] = None
self._system_info_cache: Optional[Dict[str, Any]] = None
self._system_info_cache_ts = 0.0
# 设置默认请求头
self.session.headers.update({
'Content-Type': 'application/json',
'Accept': 'application/json'
})
def sync(self, current_task_ids: List[str]) -> List[Task]:
"""
心跳同步并拉取任务
Args:
current_task_ids: 当前正在执行的任务 ID 列表
Returns:
List[Task]: 新分配的任务列表
"""
url = f"{self.base_url}/render/v2/worker/sync"
# 将 task_id 转换为整数(服务端期望 []int64)
task_ids_int = [int(tid) for tid in current_task_ids if tid.isdigit()]
payload = {
'accessKey': self.access_key,
'workerId': self.worker_id,
'capabilities': self.config.capabilities,
'maxConcurrency': self.config.max_concurrency,
'currentTaskCount': len(current_task_ids),
'currentTaskIds': task_ids_int,
'ffmpegVersion': self._get_ffmpeg_version(),
'codecInfo': self._get_codec_info(),
'systemInfo': self._get_system_info()
}
try:
resp = self.session.post(url, json=payload, timeout=10)
resp.raise_for_status()
data = resp.json()
if data.get('code') != 200:
logger.warning(f"Sync failed: {data.get('message')}")
return []
# 解析任务列表
tasks = []
for task_data in data.get('data', {}).get('tasks') or []:
try:
task = Task.from_dict(task_data)
tasks.append(task)
except Exception as e:
logger.error(f"Failed to parse task: {e}")
if tasks:
logger.info(f"Received {len(tasks)} new tasks")
return tasks
except requests.exceptions.Timeout:
logger.warning("Sync timeout")
return []
except requests.exceptions.RequestException as e:
logger.error(f"Sync request error: {e}")
return []
except Exception as e:
logger.error(f"Sync error: {e}")
return []
def report_start(self, task_id: str) -> bool:
"""
报告任务开始
Args:
task_id: 任务 ID
Returns:
bool: 是否成功
"""
url = f"{self.base_url}/render/v2/task/{task_id}/start"
try:
resp = self.session.post(
url,
json={'workerId': self.worker_id},
timeout=10
)
if resp.status_code == 200:
logger.debug(f"[task:{task_id}] Start reported")
return True
else:
logger.warning(f"[task:{task_id}] Report start failed: {resp.status_code}")
return False
except Exception as e:
logger.error(f"[task:{task_id}] Report start error: {e}")
return False
def report_success(self, task_id: str, result: Dict[str, Any]) -> bool:
"""
报告任务成功
Args:
task_id: 任务 ID
result: 任务结果数据
Returns:
bool: 是否成功
"""
url = f"{self.base_url}/render/v2/task/{task_id}/success"
try:
resp = self.session.post(
url,
json={
'workerId': self.worker_id,
'result': result
},
timeout=10
)
if resp.status_code == 200:
logger.debug(f"[task:{task_id}] Success reported")
return True
else:
logger.warning(f"[task:{task_id}] Report success failed: {resp.status_code}")
return False
except Exception as e:
logger.error(f"[task:{task_id}] Report success error: {e}")
return False
def report_fail(self, task_id: str, error_code: str, error_message: str) -> bool:
"""
报告任务失败
Args:
task_id: 任务 ID
error_code: 错误码
error_message: 错误信息
Returns:
bool: 是否成功
"""
url = f"{self.base_url}/render/v2/task/{task_id}/fail"
try:
resp = self.session.post(
url,
json={
'workerId': self.worker_id,
'errorCode': error_code,
'errorMessage': error_message[:1000] # 限制长度
},
timeout=10
)
if resp.status_code == 200:
logger.debug(f"[task:{task_id}] Failure reported")
return True
else:
logger.warning(f"[task:{task_id}] Report fail failed: {resp.status_code}")
return False
except Exception as e:
logger.error(f"[task:{task_id}] Report fail error: {e}")
return False
def get_upload_url(self, task_id: str, file_type: str, file_name: str = None) -> Optional[Dict[str, str]]:
"""
获取上传 URL
Args:
task_id: 任务 ID
file_type: 文件类型(video/audio/ts/mp4)
file_name: 文件名(可选)
Returns:
Dict 包含 uploadUrl 和 accessUrl,失败返回 None
"""
url = f"{self.base_url}/render/v2/task/{task_id}/uploadUrl"
payload = {'fileType': file_type}
if file_name:
payload['fileName'] = file_name
try:
resp = self.session.post(url, json=payload, timeout=10)
if resp.status_code == 200:
data = resp.json()
if data.get('code') == 200:
return data.get('data')
logger.warning(f"[task:{task_id}] Get upload URL failed: {resp.status_code}")
return None
except Exception as e:
logger.error(f"[task:{task_id}] Get upload URL error: {e}")
return None
def extend_lease(self, task_id: str, extension: int = None) -> bool:
"""
延长租约
Args:
task_id: 任务 ID
extension: 延长秒数(默认使用配置值)
Returns:
bool: 是否成功
"""
if extension is None:
extension = self.config.lease_extension_duration
url = f"{self.base_url}/render/v2/task/{task_id}/extend-lease"
try:
resp = self.session.post(
url,
params={
'workerId': self.worker_id,
'extension': extension
},
timeout=10
)
if resp.status_code == 200:
logger.debug(f"[task:{task_id}] Lease extended by {extension}s")
return True
else:
logger.warning(f"[task:{task_id}] Extend lease failed: {resp.status_code}")
return False
except Exception as e:
logger.error(f"[task:{task_id}] Extend lease error: {e}")
return False
def get_task_info(self, task_id: str) -> Optional[Dict]:
"""
获取任务详情
Args:
task_id: 任务 ID
Returns:
任务详情字典,失败返回 None
"""
url = f"{self.base_url}/render/v2/task/{task_id}"
try:
resp = self.session.get(url, timeout=10)
if resp.status_code == 200:
data = resp.json()
if data.get('code') == 200:
return data.get('data')
return None
except Exception as e:
logger.error(f"[task:{task_id}] Get task info error: {e}")
return None
def _get_ffmpeg_version(self) -> str:
"""获取 FFmpeg 版本"""
if self._ffmpeg_version is not None:
return self._ffmpeg_version
try:
result = subprocess.run(
['ffmpeg', '-version'],
capture_output=True,
text=True,
timeout=5
)
first_line = result.stdout.split('\n')[0]
if 'version' in first_line:
parts = first_line.split()
for i, part in enumerate(parts):
if part == 'version' and i + 1 < len(parts):
self._ffmpeg_version = parts[i + 1]
return self._ffmpeg_version
self._ffmpeg_version = 'unknown'
return self._ffmpeg_version
except Exception:
self._ffmpeg_version = 'unknown'
return self._ffmpeg_version
def _get_codec_info(self) -> str:
"""获取支持的编解码器信息"""
if self._codec_info is not None:
return self._codec_info
try:
result = subprocess.run(
['ffmpeg', '-codecs'],
capture_output=True,
text=True,
timeout=5
)
# 检查常用编解码器
codecs = []
output = result.stdout
if 'libx264' in output:
codecs.append('libx264')
if 'libx265' in output or 'hevc' in output:
codecs.append('libx265')
if 'aac' in output:
codecs.append('aac')
if 'libfdk_aac' in output:
codecs.append('libfdk_aac')
self._codec_info = ', '.join(codecs) if codecs else 'unknown'
return self._codec_info
except Exception:
self._codec_info = 'unknown'
return self._codec_info
def _get_system_info(self) -> Dict[str, Any]:
"""获取系统信息"""
try:
now = time.monotonic()
if (
self._system_info_cache
and now - self._system_info_cache_ts < self.SYSTEM_INFO_TTL_SECONDS
):
return self._system_info_cache
import platform
import psutil
if self._hw_accel_info is None:
self._hw_accel_info = get_hw_accel_info_str()
if self._static_system_info is None:
self._static_system_info = {
'os': platform.system(),
'cpu': f"{psutil.cpu_count()} cores",
'memory': f"{psutil.virtual_memory().total // (1024**3)}GB",
'hwAccelConfig': self.config.hw_accel, # 当前配置的硬件加速
'hwAccelSupport': self._hw_accel_info, # 系统支持的硬件加速
}
info = dict(self._static_system_info)
info.update({
'cpuUsage': f"{psutil.cpu_percent()}%",
'memoryAvailable': f"{psutil.virtual_memory().available // (1024**3)}GB",
})
# 尝试获取 GPU 信息
gpu_info = self._get_gpu_info()
if gpu_info:
info['gpu'] = gpu_info
self._system_info_cache = info
self._system_info_cache_ts = now
return info
except Exception:
return {}
def _get_gpu_info(self) -> Optional[str]:
"""获取 GPU 信息"""
if self._gpu_info_checked:
return self._gpu_info
self._gpu_info_checked = True
try:
result = subprocess.run(
['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
gpu_name = result.stdout.strip().split('\n')[0]
self._gpu_info = gpu_name
except Exception:
self._gpu_info = None
return self._gpu_info
def close(self):
"""关闭会话"""
self.session.close()

View File

@@ -1,513 +0,0 @@
# -*- coding: utf-8 -*-
"""
素材缓存服务
提供素材下载缓存功能,避免相同素材重复下载。
"""
import json
import os
import hashlib
import logging
import shutil
import time
import uuid
from typing import Optional, Tuple
from urllib.parse import urlparse, unquote
import psutil
from services import storage
logger = logging.getLogger(__name__)
def _extract_cache_key(url: str) -> str:
"""
从 URL 提取缓存键
去除签名等查询参数,保留路径作为唯一标识。
Args:
url: 完整的素材 URL
Returns:
缓存键(URL 路径的 MD5 哈希)
"""
parsed = urlparse(url)
# 使用 scheme + host + path 作为唯一标识(忽略签名等查询参数)
cache_key_source = f"{parsed.scheme}://{parsed.netloc}{unquote(parsed.path)}"
return hashlib.md5(cache_key_source.encode('utf-8')).hexdigest()
def _get_file_extension(url: str) -> str:
"""
从 URL 提取文件扩展名
Args:
url: 素材 URL
Returns:
文件扩展名(如 .mp4, .png),无法识别时返回空字符串
"""
parsed = urlparse(url)
path = unquote(parsed.path)
_, ext = os.path.splitext(path)
return ext.lower() if ext else ''
class MaterialCache:
"""
素材缓存管理器
负责素材文件的缓存存储和检索。
"""
LOCK_TIMEOUT_SEC = 30.0
LOCK_POLL_INTERVAL_SEC = 0.1
LOCK_STALE_SECONDS = 24 * 60 * 60
def __init__(self, cache_dir: str, enabled: bool = True, max_size_gb: float = 0):
"""
初始化缓存管理器
Args:
cache_dir: 缓存目录路径
enabled: 是否启用缓存
max_size_gb: 最大缓存大小(GB),0 表示不限制
"""
self.cache_dir = cache_dir
self.enabled = enabled
self.max_size_bytes = int(max_size_gb * 1024 * 1024 * 1024) if max_size_gb > 0 else 0
if self.enabled:
os.makedirs(self.cache_dir, exist_ok=True)
logger.info(f"Material cache initialized: {cache_dir}")
def get_cache_path(self, url: str) -> str:
"""
获取素材的缓存文件路径
Args:
url: 素材 URL
Returns:
缓存文件的完整路径
"""
cache_key = _extract_cache_key(url)
ext = _get_file_extension(url)
filename = f"{cache_key}{ext}"
return os.path.join(self.cache_dir, filename)
def _get_lock_path(self, cache_key: str) -> str:
"""获取缓存锁文件路径"""
assert self.cache_dir
return os.path.join(self.cache_dir, f"{cache_key}.lock")
def _write_lock_metadata(self, lock_fd: int, lock_path: str) -> bool:
"""写入锁元数据,失败则清理锁文件"""
try:
try:
process_start_time = psutil.Process(os.getpid()).create_time()
except Exception as e:
process_start_time = None
logger.warning(f"Cache lock process start time error: {e}")
metadata = {
'pid': os.getpid(),
'process_start_time': process_start_time,
'created_at': time.time()
}
with os.fdopen(lock_fd, 'w', encoding='utf-8') as lock_file:
json.dump(metadata, lock_file)
return True
except Exception as e:
try:
os.close(lock_fd)
except Exception:
pass
self._remove_lock_file(lock_path, f"write metadata failed: {e}")
return False
def _read_lock_metadata(self, lock_path: str) -> Optional[dict]:
"""读取锁元数据,失败返回 None(兼容历史空锁文件)"""
try:
with open(lock_path, 'r', encoding='utf-8') as lock_file:
data = json.load(lock_file)
return data if isinstance(data, dict) else None
except Exception:
return None
def _is_process_alive(self, pid: int, expected_start_time: Optional[float]) -> bool:
"""判断进程是否存活并校验启动时间(防止 PID 复用)"""
try:
process = psutil.Process(pid)
if expected_start_time is None:
return process.is_running()
actual_start_time = process.create_time()
return abs(actual_start_time - expected_start_time) < 1.0
except psutil.NoSuchProcess:
return False
except Exception as e:
logger.warning(f"Cache lock process check error: {e}")
return True
def _is_lock_stale(self, lock_path: str) -> bool:
"""判断锁是否过期(进程已退出或超过最大存活时长)"""
if not os.path.exists(lock_path):
return False
now = time.time()
metadata = self._read_lock_metadata(lock_path)
if metadata:
created_at = metadata.get('created_at')
if isinstance(created_at, (int, float)) and now - created_at > self.LOCK_STALE_SECONDS:
return True
pid = metadata.get('pid')
pid_value = int(pid) if isinstance(pid, int) or (isinstance(pid, str) and pid.isdigit()) else None
expected_start_time = metadata.get('process_start_time')
expected_start_time_value = (
expected_start_time if isinstance(expected_start_time, (int, float)) else None
)
if pid_value is not None and not self._is_process_alive(pid_value, expected_start_time_value):
return True
return self._is_lock_stale_by_mtime(lock_path, now)
return self._is_lock_stale_by_mtime(lock_path, now)
def _is_lock_stale_by_mtime(self, lock_path: str, now: float) -> bool:
"""基于文件时间判断锁是否过期"""
try:
mtime = os.path.getmtime(lock_path)
return now - mtime > self.LOCK_STALE_SECONDS
except Exception as e:
logger.warning(f"Cache lock stat error: {e}")
return False
def _remove_lock_file(self, lock_path: str, reason: str = "") -> bool:
"""删除锁文件"""
try:
os.remove(lock_path)
if reason:
logger.info(f"Cache lock removed: {lock_path} ({reason})")
return True
except FileNotFoundError:
return True
except Exception as e:
logger.warning(f"Cache lock remove error: {e}")
return False
def _acquire_lock(self, cache_key: str) -> Optional[str]:
"""获取缓存锁(跨进程安全)"""
if not self.enabled:
return None
lock_path = self._get_lock_path(cache_key)
deadline = time.monotonic() + self.LOCK_TIMEOUT_SEC
while True:
try:
fd = os.open(lock_path, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
if not self._write_lock_metadata(fd, lock_path):
return None
return lock_path
except FileExistsError:
if self._is_lock_stale(lock_path):
removed = self._remove_lock_file(lock_path, "stale lock")
if removed:
continue
if time.monotonic() >= deadline:
logger.warning(f"Cache lock timeout: {lock_path}")
return None
time.sleep(self.LOCK_POLL_INTERVAL_SEC)
except Exception as e:
logger.warning(f"Cache lock error: {e}")
return None
def _release_lock(self, lock_path: Optional[str]) -> None:
"""释放缓存锁"""
if not lock_path:
return
self._remove_lock_file(lock_path)
def is_cached(self, url: str) -> Tuple[bool, str]:
"""
检查素材是否已缓存
Args:
url: 素材 URL
Returns:
(是否已缓存, 缓存文件路径)
"""
if not self.enabled:
return False, ''
cache_path = self.get_cache_path(url)
exists = os.path.exists(cache_path) and os.path.getsize(cache_path) > 0
return exists, cache_path
def get_or_download(
self,
url: str,
dest: str,
timeout: int = 300,
max_retries: int = 5
) -> bool:
"""
从缓存获取素材,若未缓存则下载并缓存
Args:
url: 素材 URL
dest: 目标文件路径(任务工作目录中的路径)
timeout: 下载超时时间(秒)
max_retries: 最大重试次数
Returns:
是否成功
"""
# 确保目标目录存在
dest_dir = os.path.dirname(dest)
if dest_dir:
os.makedirs(dest_dir, exist_ok=True)
# 缓存未启用时直接下载
if not self.enabled:
return storage.download_file(url, dest, max_retries=max_retries, timeout=timeout)
cache_key = _extract_cache_key(url)
lock_path = self._acquire_lock(cache_key)
if not lock_path:
logger.warning(f"Cache lock unavailable, downloading without cache: {url[:80]}...")
return storage.download_file(url, dest, max_retries=max_retries, timeout=timeout)
try:
cache_path = self.get_cache_path(url)
cached = os.path.exists(cache_path) and os.path.getsize(cache_path) > 0
if cached:
# 命中缓存,复制到目标路径
try:
shutil.copy2(cache_path, dest)
# 更新访问时间(用于 LRU 清理)
os.utime(cache_path, None)
file_size = os.path.getsize(dest)
logger.info(f"Cache hit: {url[:80]}... -> {dest} ({file_size} bytes)")
return True
except Exception as e:
logger.warning(f"Failed to copy from cache: {e}, will re-download")
# 缓存复制失败,删除可能损坏的缓存文件
try:
os.remove(cache_path)
except Exception:
pass
# 未命中缓存,下载到缓存目录
logger.debug(f"Cache miss: {url[:80]}...")
# 先下载到临时文件(唯一文件名,避免并发覆盖)
temp_cache_path = os.path.join(
self.cache_dir,
f"{cache_key}.{uuid.uuid4().hex}.downloading"
)
try:
if not storage.download_file(url, temp_cache_path, max_retries=max_retries, timeout=timeout):
# 下载失败,清理临时文件
if os.path.exists(temp_cache_path):
os.remove(temp_cache_path)
return False
if not os.path.exists(temp_cache_path) or os.path.getsize(temp_cache_path) <= 0:
if os.path.exists(temp_cache_path):
os.remove(temp_cache_path)
return False
# 下载成功,原子替换缓存文件
os.replace(temp_cache_path, cache_path)
# 复制到目标路径
shutil.copy2(cache_path, dest)
file_size = os.path.getsize(dest)
logger.info(f"Downloaded and cached: {url[:80]}... ({file_size} bytes)")
# 检查是否需要清理缓存
if self.max_size_bytes > 0:
self._cleanup_if_needed()
return True
except Exception as e:
logger.error(f"Cache download error: {e}")
# 清理临时文件
if os.path.exists(temp_cache_path):
try:
os.remove(temp_cache_path)
except Exception:
pass
return False
finally:
self._release_lock(lock_path)
def add_to_cache(self, url: str, source_path: str) -> bool:
"""
将本地文件添加到缓存
Args:
url: 对应的 URL(用于生成缓存键)
source_path: 本地文件路径
Returns:
是否成功
"""
if not self.enabled:
return False
if not os.path.exists(source_path):
logger.warning(f"Source file not found for cache: {source_path}")
return False
cache_key = _extract_cache_key(url)
lock_path = self._acquire_lock(cache_key)
if not lock_path:
logger.warning(f"Cache lock unavailable for adding: {url[:80]}...")
return False
try:
cache_path = self.get_cache_path(url)
# 先复制到临时文件
temp_cache_path = os.path.join(
self.cache_dir,
f"{cache_key}.{uuid.uuid4().hex}.adding"
)
shutil.copy2(source_path, temp_cache_path)
# 原子替换
os.replace(temp_cache_path, cache_path)
# 更新访问时间
os.utime(cache_path, None)
logger.info(f"Added to cache: {url[:80]}... <- {source_path}")
# 检查清理
if self.max_size_bytes > 0:
self._cleanup_if_needed()
return True
except Exception as e:
logger.error(f"Failed to add to cache: {e}")
if 'temp_cache_path' in locals() and os.path.exists(temp_cache_path):
try:
os.remove(temp_cache_path)
except Exception:
pass
return False
finally:
self._release_lock(lock_path)
def _cleanup_if_needed(self) -> None:
"""
检查并清理缓存(LRU 策略)
当缓存大小超过限制时,删除最久未访问的文件。
"""
if self.max_size_bytes <= 0:
return
try:
# 获取所有缓存文件及其信息
cache_files = []
total_size = 0
for filename in os.listdir(self.cache_dir):
if filename.endswith('.downloading') or filename.endswith('.lock'):
continue
file_path = os.path.join(self.cache_dir, filename)
if os.path.isfile(file_path):
stat = os.stat(file_path)
cache_files.append({
'path': file_path,
'size': stat.st_size,
'atime': stat.st_atime
})
total_size += stat.st_size
# 如果未超过限制,无需清理
if total_size <= self.max_size_bytes:
return
# 按访问时间排序(最久未访问的在前)
cache_files.sort(key=lambda x: x['atime'])
# 删除文件直到低于限制的 80%
target_size = int(self.max_size_bytes * 0.8)
deleted_count = 0
for file_info in cache_files:
if total_size <= target_size:
break
# 从文件名提取 cache_key,检查是否有锁(说明正在被使用)
filename = os.path.basename(file_info['path'])
cache_key = os.path.splitext(filename)[0]
lock_path = self._get_lock_path(cache_key)
if os.path.exists(lock_path):
if self._is_lock_stale(lock_path):
self._remove_lock_file(lock_path, "cleanup stale lock")
else:
# 该文件正在被其他任务使用,跳过删除
logger.debug(f"Cache cleanup: skipping locked file {filename}")
continue
try:
os.remove(file_info['path'])
total_size -= file_info['size']
deleted_count += 1
except Exception as e:
logger.warning(f"Failed to delete cache file: {e}")
if deleted_count > 0:
logger.info(f"Cache cleanup: deleted {deleted_count} files, current size: {total_size / (1024*1024*1024):.2f} GB")
except Exception as e:
logger.warning(f"Cache cleanup error: {e}")
def clear(self) -> None:
"""清空所有缓存"""
if not self.enabled:
return
try:
if os.path.exists(self.cache_dir):
shutil.rmtree(self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
logger.info("Cache cleared")
except Exception as e:
logger.error(f"Failed to clear cache: {e}")
def get_stats(self) -> dict:
"""
获取缓存统计信息
Returns:
包含缓存统计的字典
"""
if not self.enabled or not os.path.exists(self.cache_dir):
return {'enabled': False, 'file_count': 0, 'total_size_mb': 0}
file_count = 0
total_size = 0
for filename in os.listdir(self.cache_dir):
if filename.endswith('.downloading') or filename.endswith('.lock'):
continue
file_path = os.path.join(self.cache_dir, filename)
if os.path.isfile(file_path):
file_count += 1
total_size += os.path.getsize(file_path)
return {
'enabled': True,
'cache_dir': self.cache_dir,
'file_count': file_count,
'total_size_mb': round(total_size / (1024 * 1024), 2),
'max_size_gb': self.max_size_bytes / (1024 * 1024 * 1024) if self.max_size_bytes > 0 else 0
}

View File

@@ -1,164 +0,0 @@
# -*- coding: utf-8 -*-
"""
GPU 调度器
提供多 GPU 设备的轮询调度功能。
"""
import logging
import threading
from typing import List, Optional
from domain.config import WorkerConfig
from domain.gpu import GPUDevice
from util.system import get_all_gpu_info, validate_gpu_device
from constant import HW_ACCEL_CUDA, HW_ACCEL_QSV
logger = logging.getLogger(__name__)
class GPUScheduler:
"""
GPU 调度器
实现多 GPU 设备的轮询(Round Robin)调度。
线程安全,支持并发任务执行。
使用方式:
scheduler = GPUScheduler(config)
# 在任务执行时
device_index = scheduler.acquire()
try:
# 执行任务
pass
finally:
scheduler.release(device_index)
"""
def __init__(self, config: WorkerConfig):
"""
初始化调度器
Args:
config: Worker 配置
"""
self._config = config
self._devices: List[GPUDevice] = []
self._next_index: int = 0
self._lock = threading.Lock()
self._enabled = False
# 初始化设备列表
self._init_devices()
def _init_devices(self) -> None:
"""初始化 GPU 设备列表"""
# 仅在启用硬件加速时才初始化
if self._config.hw_accel not in (HW_ACCEL_CUDA, HW_ACCEL_QSV):
logger.info("Hardware acceleration not enabled, GPU scheduler disabled")
return
configured_devices = self._config.gpu_devices
if configured_devices:
# 使用配置指定的设备
self._devices = self._validate_configured_devices(configured_devices)
else:
# 自动检测所有设备
self._devices = self._auto_detect_devices()
if self._devices:
self._enabled = True
device_info = ', '.join(str(d) for d in self._devices)
logger.info(f"GPU scheduler initialized with {len(self._devices)} device(s): {device_info}")
else:
logger.warning("No GPU devices available, scheduler disabled")
def _validate_configured_devices(self, indices: List[int]) -> List[GPUDevice]:
"""
验证配置的设备列表
Args:
indices: 配置的设备索引列表
Returns:
验证通过的设备列表
"""
devices = []
for index in indices:
if validate_gpu_device(index):
devices.append(GPUDevice(
index=index,
name=f"GPU-{index}",
available=True
))
else:
logger.warning(f"GPU device {index} is not available, skipping")
return devices
def _auto_detect_devices(self) -> List[GPUDevice]:
"""
自动检测所有可用 GPU
Returns:
检测到的设备列表
"""
all_devices = get_all_gpu_info()
# 过滤不可用设备
return [d for d in all_devices if d.available]
@property
def enabled(self) -> bool:
"""调度器是否启用"""
return self._enabled
@property
def device_count(self) -> int:
"""设备数量"""
return len(self._devices)
def acquire(self) -> Optional[int]:
"""
获取下一个可用的 GPU 设备(轮询调度)
Returns:
GPU 设备索引,如果调度器未启用或无设备则返回 None
"""
if not self._enabled or not self._devices:
return None
with self._lock:
device = self._devices[self._next_index]
self._next_index = (self._next_index + 1) % len(self._devices)
logger.debug(f"Acquired GPU device: {device.index}")
return device.index
def release(self, device_index: Optional[int]) -> None:
"""
释放 GPU 设备
当前实现为无状态轮询,此方法仅用于日志记录。
Args:
device_index: 设备索引
"""
if device_index is not None:
logger.debug(f"Released GPU device: {device_index}")
def get_status(self) -> dict:
"""
获取调度器状态信息
Returns:
状态字典
"""
return {
'enabled': self._enabled,
'device_count': len(self._devices),
'devices': [
{'index': d.index, 'name': d.name, 'available': d.available}
for d in self._devices
],
'hw_accel': self._config.hw_accel,
}

View File

@@ -1,110 +0,0 @@
# -*- coding: utf-8 -*-
"""
租约续期服务
后台线程定期为正在执行的任务续期租约。
"""
import logging
import threading
import time
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from services.api_client import APIClientV2
logger = logging.getLogger(__name__)
class LeaseService:
"""
租约续期服务
在后台线程中定期调用 API 延长任务租约,
防止长时间任务因租约过期被回收。
"""
def __init__(
self,
api_client: 'APIClientV2',
task_id: str,
interval: int = 60,
extension: int = 300
):
"""
初始化租约服务
Args:
api_client: API 客户端
task_id: 任务 ID
interval: 续期间隔(秒),默认 60 秒
extension: 每次续期时长(秒),默认 300 秒
"""
self.api_client = api_client
self.task_id = task_id
self.interval = interval
self.extension = extension
self.running = False
self.thread: threading.Thread = None
self._stop_event = threading.Event()
def start(self):
"""启动租约续期线程"""
if self.running:
logger.warning(f"[task:{self.task_id}] Lease service already running")
return
self.running = True
self._stop_event.clear()
self.thread = threading.Thread(
target=self._run,
name=f"LeaseService-{self.task_id}",
daemon=True
)
self.thread.start()
logger.debug(f"[task:{self.task_id}] Lease service started (interval={self.interval}s)")
def stop(self):
"""停止租约续期线程"""
if not self.running:
return
self.running = False
self._stop_event.set()
if self.thread and self.thread.is_alive():
self.thread.join(timeout=5)
logger.debug(f"[task:{self.task_id}] Lease service stopped")
def _run(self):
"""续期线程主循环"""
while self.running:
# 等待指定间隔或收到停止信号
if self._stop_event.wait(timeout=self.interval):
# 收到停止信号
break
if self.running:
self._extend_lease()
def _extend_lease(self):
"""执行租约续期"""
try:
success = self.api_client.extend_lease(self.task_id, self.extension)
if success:
logger.debug(f"[task:{self.task_id}] Lease extended by {self.extension}s")
else:
logger.warning(f"[task:{self.task_id}] Failed to extend lease")
except Exception as e:
logger.warning(f"[task:{self.task_id}] Lease extension error: {e}")
def __enter__(self):
"""上下文管理器入口"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""上下文管理器出口"""
self.stop()
return False

237
services/render_service.py Normal file
View File

@@ -0,0 +1,237 @@
import subprocess
import os
import logging
from abc import ABC, abstractmethod
from typing import Optional, Union
from opentelemetry.trace import Status, StatusCode
from entity.render_task import RenderTask
from entity.ffmpeg_command_builder import FFmpegCommandBuilder
from util.exceptions import RenderError, FFmpegError
from util.ffmpeg import probe_video_info, fade_out_audio, handle_ffmpeg_output, subprocess_args
from telemetry import get_tracer
logger = logging.getLogger(__name__)
def _convert_ffmpeg_task_to_render_task(ffmpeg_task):
"""将旧的FfmpegTask转换为新的RenderTask"""
from entity.render_task import RenderTask, TaskType
# 获取输入文件
input_files = []
for inp in ffmpeg_task.input_file:
if hasattr(inp, 'get_output_file'):
input_files.append(inp.get_output_file())
else:
input_files.append(str(inp))
# 确定任务类型
task_type = TaskType.COPY
if ffmpeg_task.task_type == 'concat':
task_type = TaskType.CONCAT
elif ffmpeg_task.task_type == 'encode':
task_type = TaskType.ENCODE
# 创建新任务
render_task = RenderTask(
input_files=input_files,
output_file=ffmpeg_task.output_file,
task_type=task_type,
resolution=ffmpeg_task.resolution,
frame_rate=ffmpeg_task.frame_rate,
annexb=ffmpeg_task.annexb,
center_cut=ffmpeg_task.center_cut,
zoom_cut=ffmpeg_task.zoom_cut,
ext_data=getattr(ffmpeg_task, 'ext_data', {})
)
# 复制各种资源
render_task.effects = getattr(ffmpeg_task, 'effects', [])
render_task.luts = getattr(ffmpeg_task, 'luts', [])
render_task.audios = getattr(ffmpeg_task, 'audios', [])
render_task.overlays = getattr(ffmpeg_task, 'overlays', [])
render_task.subtitles = getattr(ffmpeg_task, 'subtitles', [])
return render_task
class RenderService(ABC):
"""渲染服务抽象接口"""
@abstractmethod
def render(self, task: Union[RenderTask, 'FfmpegTask']) -> bool:
"""
执行渲染任务
Args:
task: 渲染任务
Returns:
bool: 渲染是否成功
"""
pass
@abstractmethod
def get_video_info(self, file_path: str) -> tuple[int, int, float]:
"""
获取视频信息
Args:
file_path: 视频文件路径
Returns:
tuple: (width, height, duration)
"""
pass
@abstractmethod
def fade_out_audio(self, file_path: str, duration: float, fade_seconds: float = 2.0) -> str:
"""
音频淡出处理
Args:
file_path: 音频文件路径
duration: 音频总时长
fade_seconds: 淡出时长
Returns:
str: 处理后的文件路径
"""
pass
class DefaultRenderService(RenderService):
"""默认渲染服务实现"""
def render(self, task: Union[RenderTask, 'FfmpegTask']) -> bool:
"""执行渲染任务"""
# 兼容旧的FfmpegTask
if hasattr(task, 'get_ffmpeg_args'): # 这是FfmpegTask
# 使用旧的方式执行
return self._render_legacy_ffmpeg_task(task)
tracer = get_tracer(__name__)
with tracer.start_as_current_span("render_task") as span:
try:
# 验证任务
task.validate()
span.set_attribute("task.type", task.task_type.value)
span.set_attribute("task.input_files", len(task.input_files))
span.set_attribute("task.output_file", task.output_file)
# 检查是否需要处理
if not task.need_processing():
if len(task.input_files) == 1:
task.output_file = task.input_files[0]
span.set_status(Status(StatusCode.OK))
return True
# 构建FFmpeg命令
builder = FFmpegCommandBuilder(task)
ffmpeg_args = builder.build_command()
if not ffmpeg_args:
# 不需要处理,直接返回
if len(task.input_files) == 1:
task.output_file = task.input_files[0]
span.set_status(Status(StatusCode.OK))
return True
# 执行FFmpeg命令
return self._execute_ffmpeg(ffmpeg_args, span)
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error(f"Render failed: {e}", exc_info=True)
raise RenderError(f"Render failed: {e}") from e
def _execute_ffmpeg(self, args: list[str], span) -> bool:
"""执行FFmpeg命令"""
span.set_attribute("ffmpeg.args", " ".join(args))
logger.info("Executing FFmpeg: %s", " ".join(args))
try:
# 执行FFmpeg进程
process = subprocess.run(
["ffmpeg", "-progress", "-", "-loglevel", "error"] + args[1:],
stderr=subprocess.PIPE,
**subprocess_args(True)
)
span.set_attribute("ffmpeg.return_code", process.returncode)
# 处理输出
if process.stdout:
output = handle_ffmpeg_output(process.stdout)
span.set_attribute("ffmpeg.output", output)
logger.info("FFmpeg output: %s", output)
# 检查返回码
if process.returncode != 0:
error_msg = process.stderr.decode() if process.stderr else "Unknown error"
span.set_attribute("ffmpeg.error", error_msg)
span.set_status(Status(StatusCode.ERROR))
logger.error("FFmpeg failed with return code %d: %s", process.returncode, error_msg)
raise FFmpegError(
f"FFmpeg execution failed",
command=args,
return_code=process.returncode,
stderr=error_msg
)
# 检查输出文件
output_file = args[-1] # 输出文件总是最后一个参数
if not os.path.exists(output_file):
span.set_status(Status(StatusCode.ERROR))
raise RenderError(f"Output file not created: {output_file}")
# 检查文件大小
file_size = os.path.getsize(output_file)
span.set_attribute("output.file_size", file_size)
if file_size < 4096: # 文件过小
span.set_status(Status(StatusCode.ERROR))
raise RenderError(f"Output file too small: {file_size} bytes")
span.set_status(Status(StatusCode.OK))
logger.info("FFmpeg execution completed successfully")
return True
except subprocess.SubprocessError as e:
span.set_status(Status(StatusCode.ERROR))
logger.error("Subprocess error: %s", e)
raise FFmpegError(f"Subprocess error: {e}") from e
def get_video_info(self, file_path: str) -> tuple[int, int, float]:
"""获取视频信息"""
return probe_video_info(file_path)
def fade_out_audio(self, file_path: str, duration: float, fade_seconds: float = 2.0) -> str:
"""音频淡出处理"""
return fade_out_audio(file_path, duration, fade_seconds)
def _render_legacy_ffmpeg_task(self, ffmpeg_task) -> bool:
"""兼容处理旧的FfmpegTask"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("render_legacy_ffmpeg_task") as span:
try:
# 处理依赖任务
for sub_task in ffmpeg_task.analyze_input_render_tasks():
if not self.render(sub_task):
span.set_status(Status(StatusCode.ERROR))
return False
# 获取FFmpeg参数
ffmpeg_args = ffmpeg_task.get_ffmpeg_args()
if not ffmpeg_args:
# 不需要处理,直接返回
span.set_status(Status(StatusCode.OK))
return True
# 执行FFmpeg命令
return self._execute_ffmpeg(ffmpeg_args, span)
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error(f"Legacy FFmpeg task render failed: {e}", exc_info=True)
raise RenderError(f"Legacy render failed: {e}") from e

View File

@@ -1,239 +0,0 @@
# -*- coding: utf-8 -*-
"""
存储服务
提供文件上传/下载功能,支持 OSS 签名 URL 和 HTTP_REPLACE_MAP 环境变量。
"""
import os
import logging
import subprocess
from typing import Optional
from urllib.parse import unquote
import requests
logger = logging.getLogger(__name__)
# 文件扩展名到 Content-Type 的映射
_CONTENT_TYPE_MAP = {
'.mp4': 'video/mp4',
'.aac': 'audio/aac',
'.ts': 'video/mp2t',
'.m4a': 'audio/mp4',
}
def _get_content_type(file_path: str) -> str:
"""
根据文件扩展名获取 Content-Type
Args:
file_path: 文件路径
Returns:
Content-Type 字符串
"""
ext = os.path.splitext(file_path)[1].lower()
return _CONTENT_TYPE_MAP.get(ext, 'application/octet-stream')
def _apply_http_replace_map(url: str) -> str:
"""
应用 HTTP_REPLACE_MAP 环境变量替换 URL
Args:
url: 原始 URL
Returns:
替换后的 URL
"""
replace_map = os.getenv("HTTP_REPLACE_MAP", "")
if not replace_map:
return url
new_url = url
replace_list = [i.split("|", 1) for i in replace_map.split(",") if "|" in i]
for src, dst in replace_list:
new_url = new_url.replace(src, dst)
if new_url != url:
logger.debug(f"HTTP_REPLACE_MAP: {url} -> {new_url}")
return new_url
def upload_file(url: str, file_path: str, max_retries: int = 5, timeout: int = 60) -> bool:
"""
使用签名 URL 上传文件到 OSS
Args:
url: 签名 URL
file_path: 本地文件路径
max_retries: 最大重试次数
timeout: 超时时间(秒)
Returns:
是否成功
"""
if not os.path.exists(file_path):
logger.error(f"File not found: {file_path}")
return False
file_size = os.path.getsize(file_path)
logger.info(f"Uploading: {file_path} ({file_size} bytes)")
# 检查是否使用 rclone 上传
if os.getenv("UPLOAD_METHOD") == "rclone":
logger.info(f"Uploading to: {url}")
result = _upload_with_rclone(url, file_path)
if result:
return True
# rclone 失败时回退到 HTTP
# 应用 HTTP_REPLACE_MAP 替换 URL
http_url = _apply_http_replace_map(url)
content_type = _get_content_type(file_path)
logger.info(f"Uploading to: {http_url} (Content-Type: {content_type})")
retries = 0
while retries < max_retries:
try:
with open(file_path, 'rb') as f:
with requests.put(
http_url,
data=f,
stream=True,
timeout=timeout,
headers={"Content-Type": content_type}
) as response:
response.raise_for_status()
logger.info(f"Upload succeeded: {file_path}")
return True
except requests.exceptions.Timeout:
retries += 1
logger.warning(f"Upload timed out. Retrying {retries}/{max_retries}...")
except requests.exceptions.RequestException as e:
retries += 1
logger.warning(f"Upload failed ({e}). Retrying {retries}/{max_retries}...")
logger.error(f"Upload failed after {max_retries} retries: {file_path}")
return False
def _upload_with_rclone(url: str, file_path: str) -> bool:
"""
使用 rclone 上传文件
Args:
url: 目标 URL
file_path: 本地文件路径
Returns:
是否成功
"""
replace_map = os.getenv("RCLONE_REPLACE_MAP", "")
if not replace_map:
return False
config_file = os.getenv("RCLONE_CONFIG_FILE", "")
# 替换 URL
new_url = url
replace_list = [i.split("|", 1) for i in replace_map.split(",") if "|" in i]
for src, dst in replace_list:
new_url = new_url.replace(src, dst)
new_url = new_url.split("?", 1)[0] # 移除查询参数
new_url = unquote(new_url) # 解码 URL 编码的字符(如 %2F -> /)
if new_url == url:
return False
cmd = [
"rclone",
"copyto",
"--no-check-dest",
"--ignore-existing",
"--multi-thread-chunk-size",
"8M",
"--multi-thread-streams",
"8",
]
if config_file:
cmd.extend(["--config", config_file])
cmd.extend([file_path, new_url])
logger.debug(f"rclone command: {' '.join(cmd)}")
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode == 0:
logger.info(f"rclone upload succeeded: {file_path}")
return True
stderr = (result.stderr or '').strip()
stderr = stderr[:500] if stderr else ""
logger.warning(f"rclone upload failed (code={result.returncode}): {file_path} {stderr}")
return False
def download_file(
url: str,
file_path: str,
max_retries: int = 5,
timeout: int = 30,
skip_if_exist: bool = False
) -> bool:
"""
使用签名 URL 下载文件
Args:
url: 签名 URL
file_path: 本地文件路径
max_retries: 最大重试次数
timeout: 超时时间(秒)
skip_if_exist: 如果文件存在则跳过
Returns:
是否成功
"""
# 如果文件已存在且跳过
if skip_if_exist and os.path.exists(file_path):
logger.debug(f"File exists, skipping download: {file_path}")
return True
logger.info(f"Downloading: {url}")
# 确保目标目录存在
file_dir = os.path.dirname(file_path)
if file_dir:
os.makedirs(file_dir, exist_ok=True)
# 应用 HTTP_REPLACE_MAP 替换 URL
http_url = _apply_http_replace_map(url)
retries = 0
while retries < max_retries:
try:
with requests.get(http_url, timeout=timeout, stream=True) as response:
response.raise_for_status()
with open(file_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
file_size = os.path.getsize(file_path)
logger.info(f"Download succeeded: {file_path} ({file_size} bytes)")
return True
except requests.exceptions.Timeout:
retries += 1
logger.warning(f"Download timed out. Retrying {retries}/{max_retries}...")
except requests.exceptions.RequestException as e:
retries += 1
logger.warning(f"Download failed ({e}). Retrying {retries}/{max_retries}...")
logger.error(f"Download failed after {max_retries} retries: {url}")
return False

View File

@@ -1,278 +0,0 @@
# -*- coding: utf-8 -*-
"""
任务执行器
管理任务的并发执行,协调处理器、租约服务等组件。
"""
import logging
import threading
from concurrent.futures import ThreadPoolExecutor, Future
from typing import Dict, Optional, TYPE_CHECKING
from domain.task import Task, TaskType
from domain.result import TaskResult, ErrorCode
# 需要 GPU 加速的任务类型
GPU_REQUIRED_TASK_TYPES = {
TaskType.RENDER_SEGMENT_VIDEO,
TaskType.COMPOSE_TRANSITION,
}
from domain.config import WorkerConfig
from core.handler import TaskHandler
from services.lease_service import LeaseService
from services.gpu_scheduler import GPUScheduler
if TYPE_CHECKING:
from services.api_client import APIClientV2
logger = logging.getLogger(__name__)
class TaskExecutor:
"""
任务执行器
负责任务的并发调度和执行,包括:
- 注册和管理任务处理器
- 维护任务执行状态
- 协调租约续期
- 上报执行结果
"""
def __init__(self, config: WorkerConfig, api_client: 'APIClientV2'):
"""
初始化任务执行器
Args:
config: Worker 配置
api_client: API 客户端
"""
self.config = config
self.api_client = api_client
# 任务处理器注册表
self.handlers: Dict[TaskType, TaskHandler] = {}
# 当前任务跟踪
self.current_tasks: Dict[str, Task] = {}
self.current_futures: Dict[str, Future] = {}
# 线程池
self.executor = ThreadPoolExecutor(
max_workers=config.max_concurrency,
thread_name_prefix="TaskWorker"
)
# 线程安全锁
self.lock = threading.Lock()
# GPU 调度器(如果启用硬件加速)
self.gpu_scheduler = GPUScheduler(config)
if self.gpu_scheduler.enabled:
logger.info(f"GPU scheduler enabled with {self.gpu_scheduler.device_count} device(s)")
# 注册处理器
self._register_handlers()
def _register_handlers(self):
"""注册所有任务处理器"""
# 延迟导入以避免循环依赖
from handlers.render_video import RenderSegmentVideoHandler
from handlers.compose_transition import ComposeTransitionHandler
from handlers.prepare_audio import PrepareJobAudioHandler
from handlers.package_ts import PackageSegmentTsHandler
from handlers.finalize_mp4 import FinalizeMp4Handler
handlers = [
RenderSegmentVideoHandler(self.config, self.api_client),
ComposeTransitionHandler(self.config, self.api_client),
PrepareJobAudioHandler(self.config, self.api_client),
PackageSegmentTsHandler(self.config, self.api_client),
FinalizeMp4Handler(self.config, self.api_client),
]
for handler in handlers:
task_type = handler.get_supported_type()
self.handlers[task_type] = handler
logger.debug(f"Registered handler for {task_type.value}")
def get_current_task_ids(self) -> list:
"""
获取当前正在执行的任务 ID 列表
Returns:
任务 ID 列表
"""
with self.lock:
return list(self.current_tasks.keys())
def get_current_task_count(self) -> int:
"""
获取当前正在执行的任务数量
Returns:
任务数量
"""
with self.lock:
return len(self.current_tasks)
def can_accept_task(self) -> bool:
"""
检查是否可以接受新任务
Returns:
是否可以接受
"""
return self.get_current_task_count() < self.config.max_concurrency
def submit_task(self, task: Task) -> bool:
"""
提交任务到线程池
Args:
task: 任务实体
Returns:
是否提交成功
"""
with self.lock:
# 检查任务是否已在执行
if task.task_id in self.current_tasks:
logger.warning(f"[task:{task.task_id}] Task already running, skipping")
return False
# 检查并发上限
if len(self.current_tasks) >= self.config.max_concurrency:
logger.info(
f"[task:{task.task_id}] Max concurrency reached "
f"({self.config.max_concurrency}), rejecting task"
)
return False
# 检查是否有对应的处理器
if task.task_type not in self.handlers:
logger.error(f"[task:{task.task_id}] No handler for type: {task.task_type.value}")
return False
# 记录任务
self.current_tasks[task.task_id] = task
# 提交到线程池
future = self.executor.submit(self._process_task, task)
self.current_futures[task.task_id] = future
logger.info(f"[task:{task.task_id}] Submitted ({task.task_type.value})")
return True
def _process_task(self, task: Task):
"""
处理单个任务(在线程池中执行)
Args:
task: 任务实体
"""
task_id = task.task_id
logger.info(f"[task:{task_id}] Starting {task.task_type.value}")
# 启动租约续期服务
lease_service = LeaseService(
self.api_client,
task_id,
interval=self.config.lease_extension_threshold,
extension=self.config.lease_extension_duration
)
lease_service.start()
# 获取 GPU 设备(仅对需要 GPU 的任务类型)
device_index = None
needs_gpu = task.task_type in GPU_REQUIRED_TASK_TYPES
if needs_gpu and self.gpu_scheduler.enabled:
device_index = self.gpu_scheduler.acquire()
if device_index is not None:
logger.info(f"[task:{task_id}] Assigned to GPU device {device_index}")
# 获取处理器(需要在设置 GPU 设备前获取)
handler = self.handlers.get(task.task_type)
try:
# 报告任务开始
self.api_client.report_start(task_id)
if not handler:
raise ValueError(f"No handler for task type: {task.task_type}")
# 设置 GPU 设备(线程本地存储)
if device_index is not None:
handler.set_gpu_device(device_index)
# 执行前钩子
handler.before_handle(task)
# 执行任务
result = handler.handle(task)
# 执行后钩子
handler.after_handle(task, result)
# 上报结果
if result.success:
self.api_client.report_success(task_id, result.data)
logger.info(f"[task:{task_id}] Completed successfully")
else:
error_code = result.error_code.value if result.error_code else 'E_UNKNOWN'
self.api_client.report_fail(task_id, error_code, result.error_message or '')
logger.error(f"[task:{task_id}] Failed: {result.error_message}")
except Exception as e:
logger.error(f"[task:{task_id}] Exception: {e}", exc_info=True)
self.api_client.report_fail(task_id, 'E_UNKNOWN', str(e))
finally:
# 清除 GPU 设备设置
if handler:
handler.clear_gpu_device()
# 释放 GPU 设备(仅当实际分配了设备时)
if device_index is not None:
self.gpu_scheduler.release(device_index)
# 停止租约续期
lease_service.stop()
# 从当前任务中移除
with self.lock:
self.current_tasks.pop(task_id, None)
self.current_futures.pop(task_id, None)
def shutdown(self, wait: bool = True):
"""
关闭执行器
Args:
wait: 是否等待所有任务完成
"""
logger.info("Shutting down task executor...")
# 关闭线程池
self.executor.shutdown(wait=wait)
# 清理状态
with self.lock:
self.current_tasks.clear()
self.current_futures.clear()
logger.info("Task executor shutdown complete")
def get_handler(self, task_type: TaskType) -> Optional[TaskHandler]:
"""
获取指定类型的处理器
Args:
task_type: 任务类型
Returns:
处理器实例,不存在则返回 None
"""
return self.handlers.get(task_type)

289
services/task_service.py Normal file
View File

@@ -0,0 +1,289 @@
import json
import logging
import os
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from typing import Dict, Any, List, Optional
from opentelemetry.trace import Status, StatusCode
from entity.render_task import RenderTask
from services.render_service import RenderService
from services.template_service import TemplateService
from util.exceptions import TaskError, TaskValidationError
from util import api, oss
from telemetry import get_tracer
logger = logging.getLogger(__name__)
class TaskService(ABC):
"""任务服务抽象接口"""
@abstractmethod
def process_task(self, task_info: Dict[str, Any]) -> bool:
"""
处理任务
Args:
task_info: 任务信息
Returns:
bool: 处理是否成功
"""
pass
@abstractmethod
def create_render_task(self, task_info: Dict[str, Any], template_info: Dict[str, Any]) -> RenderTask:
"""
创建渲染任务
Args:
task_info: 任务信息
template_info: 模板信息
Returns:
RenderTask: 渲染任务对象
"""
pass
class DefaultTaskService(TaskService):
"""默认任务服务实现"""
def __init__(self, render_service: RenderService, template_service: TemplateService):
self.render_service = render_service
self.template_service = template_service
def process_task(self, task_info: Dict[str, Any]) -> bool:
"""处理任务"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("process_task") as span:
try:
# 标准化任务信息
task_info = api.normalize_task(task_info)
span.set_attribute("task.id", task_info.get("id", "unknown"))
span.set_attribute("task.template_id", task_info.get("templateId", "unknown"))
# 获取模板信息
template_id = task_info.get("templateId")
template_info = self.template_service.get_template(template_id)
if not template_info:
raise TaskError(f"Template not found: {template_id}")
# 报告任务开始
api.report_task_start(task_info)
# 创建渲染任务
render_task = self.create_render_task(task_info, template_info)
# 执行渲染
success = self.render_service.render(render_task)
if not success:
span.set_status(Status(StatusCode.ERROR))
api.report_task_failed(task_info, "Render failed")
return False
# 获取视频信息
width, height, duration = self.render_service.get_video_info(render_task.output_file)
span.set_attribute("video.width", width)
span.set_attribute("video.height", height)
span.set_attribute("video.duration", duration)
# 音频淡出
new_file = self.render_service.fade_out_audio(render_task.output_file, duration)
render_task.output_file = new_file
# 上传文件 - 创建一个兼容对象
class TaskCompat:
def __init__(self, output_file):
self.output_file = output_file
def get_output_file(self):
return self.output_file
task_compat = TaskCompat(render_task.output_file)
upload_success = api.upload_task_file(task_info, task_compat)
if not upload_success:
span.set_status(Status(StatusCode.ERROR))
api.report_task_failed(task_info, "Upload failed")
return False
# 清理临时文件
self._cleanup_temp_files(render_task)
# 报告任务成功
api.report_task_success(task_info, videoInfo={
"width": width,
"height": height,
"duration": duration
})
span.set_status(Status(StatusCode.OK))
return True
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error(f"Task processing failed: {e}", exc_info=True)
api.report_task_failed(task_info, str(e))
return False
def create_render_task(self, task_info: Dict[str, Any], template_info: Dict[str, Any]) -> RenderTask:
"""创建渲染任务"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("create_render_task") as span:
# 解析任务参数
task_params_str = task_info.get("taskParams", "{}")
span.set_attribute("task_params", task_params_str)
try:
task_params = json.loads(task_params_str)
task_params_orig = json.loads(task_params_str)
except json.JSONDecodeError as e:
raise TaskValidationError(f"Invalid task params JSON: {e}")
# 并行下载资源
self._download_resources(task_params)
# 创建子任务列表
sub_tasks = []
only_if_usage_count = {}
for part in template_info.get("video_parts", []):
source, ext_data = self._parse_video_source(
part.get('source'), task_params, template_info
)
if not source:
logger.warning("No video found for part: %s", part)
continue
# 检查only_if条件
only_if = part.get('only_if', '')
if only_if:
only_if_usage_count[only_if] = only_if_usage_count.get(only_if, 0) + 1
required_count = only_if_usage_count[only_if]
if not self._check_placeholder_exist_with_count(only_if, task_params_orig, required_count):
logger.info("Skipping part due to only_if condition: %s (need %d)", only_if, required_count)
continue
# 创建子任务
sub_task = self._create_sub_task(part, source, ext_data, template_info)
sub_tasks.append(sub_task)
# 创建主任务
output_file = f"out_{task_info.get('id', 'unknown')}.mp4"
main_task = RenderTask(
input_files=[task.output_file for task in sub_tasks],
output_file=output_file,
resolution=template_info.get("video_size", ""),
frame_rate=template_info.get("frame_rate", 25),
center_cut=template_info.get("crop_mode"),
zoom_cut=template_info.get("zoom_cut")
)
# 应用整体模板设置
overall_template = template_info.get("overall_template", {})
self._apply_template_settings(main_task, overall_template, template_info)
# 设置扩展数据
main_task.ext_data = task_info
span.set_attribute("render_task.sub_tasks", len(sub_tasks))
span.set_attribute("render_task.effects", len(main_task.effects))
return main_task
def _download_resources(self, task_params: Dict[str, Any]):
"""并行下载资源"""
with ThreadPoolExecutor(max_workers=8) as executor:
for param_list in task_params.values():
if isinstance(param_list, list):
for param in param_list:
url = param.get("url", "")
if url.startswith("http"):
_, filename = os.path.split(url)
executor.submit(oss.download_from_oss, url, filename, True)
def _parse_video_source(self, source: str, task_params: Dict[str, Any],
template_info: Dict[str, Any]) -> tuple[Optional[str], Dict[str, Any]]:
"""解析视频源"""
if source.startswith('PLACEHOLDER_'):
placeholder_id = source.replace('PLACEHOLDER_', '')
new_sources = task_params.get(placeholder_id, [])
pick_source = {}
if isinstance(new_sources, list):
if len(new_sources) == 0:
logger.debug("No video found for placeholder: %s", placeholder_id)
return None, pick_source
else:
pick_source = new_sources.pop(0)
new_sources = pick_source.get("url", "")
if new_sources.startswith("http"):
_, source_name = os.path.split(new_sources)
oss.download_from_oss(new_sources, source_name, True)
return source_name, pick_source
return new_sources, pick_source
return os.path.join(template_info.get("local_path", ""), source), {}
def _check_placeholder_exist_with_count(self, placeholder_id: str, task_params: Dict[str, Any],
required_count: int = 1) -> bool:
"""检查占位符是否存在足够数量的片段"""
if placeholder_id in task_params:
new_sources = task_params.get(placeholder_id, [])
if isinstance(new_sources, list):
return len(new_sources) >= required_count
return required_count <= 1
return False
def _create_sub_task(self, part: Dict[str, Any], source: str, ext_data: Dict[str, Any],
template_info: Dict[str, Any]) -> RenderTask:
"""创建子任务"""
sub_task = RenderTask(
input_files=[source],
resolution=template_info.get("video_size", ""),
frame_rate=template_info.get("frame_rate", 25),
annexb=True,
center_cut=part.get("crop_mode"),
zoom_cut=part.get("zoom_cut"),
ext_data=ext_data
)
# 应用部分模板设置
self._apply_template_settings(sub_task, part, template_info)
return sub_task
def _apply_template_settings(self, task: RenderTask, template_part: Dict[str, Any],
template_info: Dict[str, Any]):
"""应用模板设置到任务"""
# 添加效果
for effect in template_part.get('effects', []):
task.add_effect(effect)
# 添加LUT
for lut in template_part.get('luts', []):
full_path = os.path.join(template_info.get("local_path", ""), lut)
task.add_lut(full_path.replace("\\", "/"))
# 添加音频
for audio in template_part.get('audios', []):
full_path = os.path.join(template_info.get("local_path", ""), audio)
task.add_audios(full_path)
# 添加覆盖层
for overlay in template_part.get('overlays', []):
full_path = os.path.join(template_info.get("local_path", ""), overlay)
task.add_overlay(full_path)
def _cleanup_temp_files(self, task: RenderTask):
"""清理临时文件"""
try:
template_dir = os.getenv("TEMPLATE_DIR", "")
if template_dir and template_dir not in task.output_file:
if os.path.exists(task.output_file):
os.remove(task.output_file)
logger.info("Cleaned up temp file: %s", task.output_file)
else:
logger.info("Skipped cleanup of template file: %s", task.output_file)
except OSError as e:
logger.warning("Failed to cleanup temp file %s: %s", task.output_file, e)

View File

@@ -0,0 +1,266 @@
import json
import os
import logging
from abc import ABC, abstractmethod
from typing import Dict, Any, Optional
from opentelemetry.trace import Status, StatusCode
from util.exceptions import TemplateError, TemplateNotFoundError, TemplateValidationError
from util import api, oss
from config.settings import get_storage_config
from telemetry import get_tracer
logger = logging.getLogger(__name__)
class TemplateService(ABC):
"""模板服务抽象接口"""
@abstractmethod
def get_template(self, template_id: str) -> Optional[Dict[str, Any]]:
"""
获取模板信息
Args:
template_id: 模板ID
Returns:
Dict[str, Any]: 模板信息,如果不存在则返回None
"""
pass
@abstractmethod
def load_local_templates(self):
"""加载本地模板"""
pass
@abstractmethod
def download_template(self, template_id: str) -> bool:
"""
下载模板
Args:
template_id: 模板ID
Returns:
bool: 下载是否成功
"""
pass
@abstractmethod
def validate_template(self, template_info: Dict[str, Any]) -> bool:
"""
验证模板
Args:
template_info: 模板信息
Returns:
bool: 验证是否通过
"""
pass
class DefaultTemplateService(TemplateService):
"""默认模板服务实现"""
def __init__(self):
self.templates: Dict[str, Dict[str, Any]] = {}
self.storage_config = get_storage_config()
def get_template(self, template_id: str) -> Optional[Dict[str, Any]]:
"""获取模板信息"""
if template_id not in self.templates:
# 尝试下载模板
if not self.download_template(template_id):
return None
return self.templates.get(template_id)
def load_local_templates(self):
"""加载本地模板"""
template_dir = self.storage_config.template_dir
if not os.path.exists(template_dir):
logger.warning("Template directory does not exist: %s", template_dir)
return
for template_name in os.listdir(template_dir):
if template_name.startswith("_") or template_name.startswith("."):
continue
target_path = os.path.join(template_dir, template_name)
if os.path.isdir(target_path):
try:
self._load_template(template_name, target_path)
except Exception as e:
logger.error("Failed to load template %s: %s", template_name, e)
def download_template(self, template_id: str) -> bool:
"""下载模板"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("download_template") as span:
try:
span.set_attribute("template.id", template_id)
# 获取远程模板信息
template_info = api.get_template_info(template_id)
if template_info is None:
logger.warning("Failed to get template info: %s", template_id)
return False
local_path = template_info.get('local_path')
if not local_path:
local_path = os.path.join(self.storage_config.template_dir, str(template_id))
template_info['local_path'] = local_path
# 创建本地目录
if not os.path.isdir(local_path):
os.makedirs(local_path)
# 下载模板资源
overall_template = template_info.get('overall_template', {})
video_parts = template_info.get('video_parts', [])
self._download_template_assets(overall_template, template_info)
for video_part in video_parts:
self._download_template_assets(video_part, template_info)
# 保存模板定义文件
template_file = os.path.join(local_path, 'template.json')
with open(template_file, 'w', encoding='utf-8') as f:
json.dump(template_info, f, ensure_ascii=False, indent=2)
# 加载到内存
self._load_template(template_id, local_path)
span.set_status(Status(StatusCode.OK))
logger.info("Template downloaded successfully: %s", template_id)
return True
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error("Failed to download template %s: %s", template_id, e)
return False
def validate_template(self, template_info: Dict[str, Any]) -> bool:
"""验证模板"""
try:
local_path = template_info.get("local_path")
if not local_path:
raise TemplateValidationError("Template missing local_path")
# 验证视频部分
for video_part in template_info.get("video_parts", []):
self._validate_template_part(video_part, local_path)
# 验证整体模板
overall_template = template_info.get("overall_template", {})
if overall_template:
self._validate_template_part(overall_template, local_path)
return True
except TemplateValidationError:
raise
except Exception as e:
raise TemplateValidationError(f"Template validation failed: {e}")
def _load_template(self, template_name: str, local_path: str):
"""加载单个模板"""
logger.info("Loading template: %s (%s)", template_name, local_path)
template_def_file = os.path.join(local_path, "template.json")
if not os.path.exists(template_def_file):
raise TemplateNotFoundError(f"Template definition file not found: {template_def_file}")
try:
with open(template_def_file, 'r', encoding='utf-8') as f:
template_info = json.load(f)
except json.JSONDecodeError as e:
raise TemplateError(f"Invalid template JSON: {e}")
template_info["local_path"] = local_path
try:
self.validate_template(template_info)
self.templates[template_name] = template_info
logger.info("Template loaded successfully: %s", template_name)
except TemplateValidationError as e:
logger.error("Template validation failed for %s: %s. Attempting to re-download.", template_name, e)
# 模板验证失败,尝试重新下载
if self.download_template(template_name):
logger.info("Template re-downloaded successfully: %s", template_name)
else:
logger.error("Failed to re-download template: %s", template_name)
raise
def _download_template_assets(self, template_part: Dict[str, Any], template_info: Dict[str, Any]):
"""下载模板资源"""
local_path = template_info['local_path']
# 下载源文件
if 'source' in template_part:
source = template_part['source']
if isinstance(source, str) and source.startswith("http"):
_, filename = os.path.split(source)
new_file_path = os.path.join(local_path, filename)
oss.download_from_oss(source, new_file_path)
if filename.endswith(".mp4"):
from util.ffmpeg import re_encode_and_annexb
new_file_path = re_encode_and_annexb(new_file_path)
template_part['source'] = os.path.relpath(new_file_path, local_path)
# 下载覆盖层
if 'overlays' in template_part:
for i, overlay in enumerate(template_part['overlays']):
if isinstance(overlay, str) and overlay.startswith("http"):
_, filename = os.path.split(overlay)
oss.download_from_oss(overlay, os.path.join(local_path, filename))
template_part['overlays'][i] = filename
# 下载LUT
if 'luts' in template_part:
for i, lut in enumerate(template_part['luts']):
if isinstance(lut, str) and lut.startswith("http"):
_, filename = os.path.split(lut)
oss.download_from_oss(lut, os.path.join(local_path, filename))
template_part['luts'][i] = filename
# 下载音频
if 'audios' in template_part:
for i, audio in enumerate(template_part['audios']):
if isinstance(audio, str) and audio.startswith("http"):
_, filename = os.path.split(audio)
oss.download_from_oss(audio, os.path.join(local_path, filename))
template_part['audios'][i] = filename
def _validate_template_part(self, template_part: Dict[str, Any], base_dir: str):
"""验证模板部分"""
# 验证源文件
source_file = template_part.get("source", "")
if source_file and not source_file.startswith("http") and not source_file.startswith("PLACEHOLDER_"):
if not os.path.isabs(source_file):
source_file = os.path.join(base_dir, source_file)
if not os.path.exists(source_file):
raise TemplateValidationError(f"Source file not found: {source_file}")
# 验证音频文件
for audio in template_part.get("audios", []):
if not os.path.isabs(audio):
audio = os.path.join(base_dir, audio)
if not os.path.exists(audio):
raise TemplateValidationError(f"Audio file not found: {audio}")
# 验证LUT文件
for lut in template_part.get("luts", []):
if not os.path.isabs(lut):
lut = os.path.join(base_dir, lut)
if not os.path.exists(lut):
raise TemplateValidationError(f"LUT file not found: {lut}")
# 验证覆盖层文件
for overlay in template_part.get("overlays", []):
if not os.path.isabs(overlay):
overlay = os.path.join(base_dir, overlay)
if not os.path.exists(overlay):
raise TemplateValidationError(f"Overlay file not found: {overlay}")

37
telemetry/__init__.py Normal file
View File

@@ -0,0 +1,37 @@
import os
from constant import SOFTWARE_VERSION
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter as OTLPSpanHttpExporter
from opentelemetry.sdk.resources import DEPLOYMENT_ENVIRONMENT, HOST_NAME, Resource, SERVICE_NAME, SERVICE_VERSION
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SimpleSpanProcessor
from opentelemetry.instrumentation.threading import ThreadingInstrumentor
ThreadingInstrumentor().instrument()
def get_tracer(name):
return trace.get_tracer(name)
# 初始化 OpenTelemetry
def init_opentelemetry(batch=True):
# 设置服务名、主机名
resource = Resource(attributes={
SERVICE_NAME: "RENDER_WORKER",
SERVICE_VERSION: SOFTWARE_VERSION,
DEPLOYMENT_ENVIRONMENT: "Python",
HOST_NAME: os.getenv("ACCESS_KEY"),
})
# 使用HTTP协议上报
if batch:
span_processor = BatchSpanProcessor(OTLPSpanHttpExporter(
endpoint="https://oltp.jerryyan.top/v1/traces",
))
else:
span_processor = SimpleSpanProcessor(OTLPSpanHttpExporter(
endpoint="https://oltp.jerryyan.top/v1/traces",
))
trace_provider = TracerProvider(resource=resource, active_span_processor=span_processor)
trace.set_tracer_provider(trace_provider)

1
template/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
**/*

69
template/__init__.py Normal file
View File

@@ -0,0 +1,69 @@
import json
import os
import logging
from telemetry import get_tracer
from util import api, oss
from services.template_service import DefaultTemplateService
logger = logging.getLogger("template")
# 全局模板服务实例
_template_service = None
def _get_template_service():
"""获取模板服务实例"""
global _template_service
if _template_service is None:
_template_service = DefaultTemplateService()
return _template_service
# 向后兼容的全局变量和函数
TEMPLATES = {}
def _update_templates_dict():
"""更新全局TEMPLATES字典以保持向后兼容"""
service = _get_template_service()
TEMPLATES.clear()
TEMPLATES.update(service.templates)
def check_local_template(local_name):
"""向后兼容函数"""
service = _get_template_service()
template_def = service.templates.get(local_name)
if template_def:
try:
service.validate_template(template_def)
except Exception as e:
logger.error(f"Template validation failed: {e}")
raise
def load_template(template_name, local_path):
"""向后兼容函数"""
service = _get_template_service()
service._load_template(template_name, local_path)
_update_templates_dict()
def load_local_template():
"""加载本地模板(向后兼容函数)"""
service = _get_template_service()
service.load_local_templates()
_update_templates_dict()
def get_template_def(template_id):
"""获取模板定义(向后兼容函数)"""
service = _get_template_service()
template = service.get_template(template_id)
_update_templates_dict()
return template
def download_template(template_id):
"""下载模板(向后兼容函数)"""
service = _get_template_service()
success = service.download_template(template_id)
_update_templates_dict()
return success
def analyze_template(template_id):
"""分析模板(占位符函数)"""
pass

View File

@@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
import os
from services.cache import MaterialCache, _extract_cache_key
def test_cache_lock_acquire_release(tmp_path):
cache = MaterialCache(cache_dir=str(tmp_path), enabled=True, max_size_gb=0)
cache_key = _extract_cache_key("https://example.com/path/file.mp4?token=abc")
lock_path = cache._acquire_lock(cache_key)
assert lock_path
assert os.path.exists(lock_path)
cache._release_lock(lock_path)
assert not os.path.exists(lock_path)

View File

@@ -1,15 +0,0 @@
# -*- coding: utf-8 -*-
"""
工具模块
提供系统信息采集等工具函数。
"""
from util.system import get_sys_info, get_capabilities, get_gpu_info, get_ffmpeg_version
__all__ = [
'get_sys_info',
'get_capabilities',
'get_gpu_info',
'get_ffmpeg_version',
]

261
util/api.py Normal file
View File

@@ -0,0 +1,261 @@
import json
import logging
import os
import threading
import requests
from opentelemetry.trace import Status, StatusCode
import util.system
from telemetry import get_tracer
from util import oss
session = requests.Session()
logger = logging.getLogger(__name__)
def normalize_task(task_info):
...
return task_info
def sync_center():
"""
通过接口获取任务
:return: 任务列表
"""
from services import DefaultTemplateService
template_service = DefaultTemplateService()
try:
response = session.post(os.getenv('API_ENDPOINT') + "/sync", json={
'accessKey': os.getenv('ACCESS_KEY'),
'clientStatus': util.system.get_sys_info(),
'templateList': [{'id': t.get('id', ''), 'updateTime': t.get('updateTime', '')} for t in
template_service.templates.values()]
}, timeout=10)
response.raise_for_status()
except requests.RequestException as e:
logger.error("请求失败!", e)
return []
data = response.json()
logger.debug("获取任务结果:【%s", data)
if data.get('code', 0) == 200:
templates = data.get('data', {}).get('templates', [])
tasks = data.get('data', {}).get('tasks', [])
else:
tasks = []
templates = []
logger.warning("获取任务失败")
if os.getenv("REDIRECT_TO_URL", False) != False:
for task in tasks:
_sess = requests.Session()
logger.info("重定向任务【%s】至配置的地址:%s", task.get("id"), os.getenv("REDIRECT_TO_URL"))
url = f"{os.getenv('REDIRECT_TO_URL')}{task.get('id')}"
threading.Thread(target=requests.post, args=(url,)).start()
return []
for template in templates:
template_id = template.get('id', '')
if template_id:
logger.info("更新模板:【%s", template_id)
template_service.download_template(template_id)
return tasks
def get_template_info(template_id):
"""
通过接口获取模板信息
:rtype: Template
:param template_id: 模板id
:type template_id: str
:return: 模板信息
"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("get_template_info"):
with tracer.start_as_current_span("get_template_info.request") as req_span:
try:
req_span.set_attribute("http.method", "POST")
req_span.set_attribute("http.url", '{0}/template/{1}'.format(os.getenv('API_ENDPOINT'), template_id))
response = session.post('{0}/template/{1}'.format(os.getenv('API_ENDPOINT'), template_id), json={
'accessKey': os.getenv('ACCESS_KEY'),
}, timeout=10)
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
except requests.RequestException as e:
req_span.set_attribute("api.error", str(e))
logger.error("请求失败!", e)
return None
data = response.json()
logger.debug("获取模板信息结果:【%s", data)
remote_template_info = data.get('data', {})
if not remote_template_info:
logger.warning("获取模板信息结果为空", data)
return None
template = {
'id': template_id,
'updateTime': remote_template_info.get('updateTime', template_id),
'scenic_name': remote_template_info.get('scenicName', '景区'),
'name': remote_template_info.get('name', '模版'),
'video_size': remote_template_info.get('resolution', '1920x1080'),
'frame_rate': 25,
'overall_duration': 30,
'video_parts': [
]
}
def _template_normalizer(template_info):
_template = {}
_placeholder_type = template_info.get('isPlaceholder', -1)
if _placeholder_type == 0:
# 固定视频
_template['source'] = template_info.get('sourceUrl', '')
elif _placeholder_type == 1:
# 占位符
_template['source'] = "PLACEHOLDER_" + template_info.get('sourceUrl', '')
_template['mute'] = template_info.get('mute', True)
_template['crop_mode'] = template_info.get('cropEnable', None)
_template['zoom_cut'] = template_info.get('zoomCut', None)
else:
_template['source'] = None
_overlays = template_info.get('overlays', '')
if _overlays:
_template['overlays'] = _overlays.split(",")
_audios = template_info.get('audios', '')
if _audios:
_template['audios'] = _audios.split(",")
_luts = template_info.get('luts', '')
if _luts:
_template['luts'] = _luts.split(",")
_only_if = template_info.get('onlyIf', '')
if _only_if:
_template['only_if'] = _only_if
_effects = template_info.get('effects', '')
if _effects:
_template['effects'] = _effects.split("|")
return _template
# outer template definition
overall_template = _template_normalizer(remote_template_info)
template['overall_template'] = overall_template
# inter template definition
inter_template_list = remote_template_info.get('children', [])
for children_template in inter_template_list:
parts = _template_normalizer(children_template)
template['video_parts'].append(parts)
template['local_path'] = os.path.join(os.getenv('TEMPLATE_DIR'), str(template_id))
with get_tracer("api").start_as_current_span("get_template_info.template") as res_span:
res_span.set_attribute("normalized.response", json.dumps(template))
return template
def report_task_success(task_info, **kwargs):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("report_task_success"):
with tracer.start_as_current_span("report_task_success.request") as req_span:
try:
req_span.set_attribute("http.method", "POST")
req_span.set_attribute("http.url",
'{0}/{1}/success'.format(os.getenv('API_ENDPOINT'), task_info.get("id")))
response = session.post('{0}/{1}/success'.format(os.getenv('API_ENDPOINT'), task_info.get("id")), json={
'accessKey': os.getenv('ACCESS_KEY'),
**kwargs
}, timeout=10)
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
req_span.set_status(Status(StatusCode.OK))
except requests.RequestException as e:
req_span.set_attribute("api.error", str(e))
logger.error("请求失败!", e)
return None
def report_task_start(task_info):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("report_task_start"):
with tracer.start_as_current_span("report_task_start.request") as req_span:
try:
req_span.set_attribute("http.method", "POST")
req_span.set_attribute("http.url",
'{0}/{1}/start'.format(os.getenv('API_ENDPOINT'), task_info.get("id")))
response = session.post('{0}/{1}/start'.format(os.getenv('API_ENDPOINT'), task_info.get("id")), json={
'accessKey': os.getenv('ACCESS_KEY'),
}, timeout=10)
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
req_span.set_status(Status(StatusCode.OK))
except requests.RequestException as e:
req_span.set_attribute("api.error", str(e))
logger.error("请求失败!", e)
return None
def report_task_failed(task_info, reason=''):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("report_task_failed") as span:
span.set_attribute("task_id", task_info.get("id"))
span.set_attribute("reason", reason)
with tracer.start_as_current_span("report_task_failed.request") as req_span:
try:
req_span.set_attribute("http.method", "POST")
req_span.set_attribute("http.url",
'{0}/{1}/fail'.format(os.getenv('API_ENDPOINT'), task_info.get("id")))
response = session.post('{0}/{1}/fail'.format(os.getenv('API_ENDPOINT'), task_info.get("id")), json={
'accessKey': os.getenv('ACCESS_KEY'),
'reason': reason
}, timeout=10)
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
req_span.set_status(Status(StatusCode.OK))
except requests.RequestException as e:
req_span.set_attribute("api.error", str(e))
req_span.set_status(Status(StatusCode.ERROR))
logger.error("请求失败!", e)
return None
def upload_task_file(task_info, ffmpeg_task):
tracer = get_tracer(__name__)
with get_tracer("api").start_as_current_span("upload_task_file") as span:
logger.info("开始上传文件: %s", task_info.get("id"))
span.set_attribute("file.id", task_info.get("id"))
with tracer.start_as_current_span("upload_task_file.request_upload_url") as req_span:
try:
req_span.set_attribute("http.method", "POST")
req_span.set_attribute("http.url",
'{0}/{1}/uploadUrl'.format(os.getenv('API_ENDPOINT'), task_info.get("id")))
response = session.post('{0}/{1}/uploadUrl'.format(os.getenv('API_ENDPOINT'), task_info.get("id")),
json={
'accessKey': os.getenv('ACCESS_KEY'),
}, timeout=10)
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
req_span.set_status(Status(StatusCode.OK))
except requests.RequestException as e:
span.set_attribute("api.error", str(e))
req_span.set_status(Status(StatusCode.ERROR))
logger.error("请求失败!", e)
return False
data = response.json()
url = data.get('data', "")
logger.info("开始上传文件: %s%s", task_info.get("id"), url)
return oss.upload_to_oss(url, ffmpeg_task.get_output_file())
def get_task_info(id):
try:
response = session.get(os.getenv('API_ENDPOINT') + "/" + id + "/info", params={
'accessKey': os.getenv('ACCESS_KEY'),
}, timeout=10)
response.raise_for_status()
except requests.RequestException as e:
logger.error("请求失败!", e)
return []
data = response.json()
logger.debug("获取任务结果:【%s", data)
if data.get('code', 0) == 200:
return data.get('data', {})

72
util/exceptions.py Normal file
View File

@@ -0,0 +1,72 @@
class RenderWorkerError(Exception):
"""RenderWorker基础异常类"""
def __init__(self, message: str, error_code: str = None):
super().__init__(message)
self.message = message
self.error_code = error_code or self.__class__.__name__
class ConfigurationError(RenderWorkerError):
"""配置错误"""
pass
class TemplateError(RenderWorkerError):
"""模板相关错误"""
pass
class TemplateNotFoundError(TemplateError):
"""模板未找到错误"""
pass
class TemplateValidationError(TemplateError):
"""模板验证错误"""
pass
class TaskError(RenderWorkerError):
"""任务处理错误"""
pass
class TaskValidationError(TaskError):
"""任务参数验证错误"""
pass
class RenderError(RenderWorkerError):
"""渲染处理错误"""
pass
class FFmpegError(RenderError):
"""FFmpeg执行错误"""
def __init__(self, message: str, command: list = None, return_code: int = None, stderr: str = None):
super().__init__(message)
self.command = command
self.return_code = return_code
self.stderr = stderr
class EffectError(RenderError):
"""效果处理错误"""
def __init__(self, message: str, effect_name: str = None, effect_params: str = None):
super().__init__(message)
self.effect_name = effect_name
self.effect_params = effect_params
class StorageError(RenderWorkerError):
"""存储相关错误"""
pass
class APIError(RenderWorkerError):
"""API调用错误"""
def __init__(self, message: str, status_code: int = None, response_body: str = None):
super().__init__(message)
self.status_code = status_code
self.response_body = response_body
class ResourceError(RenderWorkerError):
"""资源相关错误"""
pass
class ResourceNotFoundError(ResourceError):
"""资源未找到错误"""
pass
class DownloadError(ResourceError):
"""下载错误"""
pass

255
util/ffmpeg.py Normal file
View File

@@ -0,0 +1,255 @@
import json
import logging
import os
import subprocess
from datetime import datetime
from typing import Optional, IO
from opentelemetry.trace import Status, StatusCode
from entity.ffmpeg import FfmpegTask, ENCODER_ARGS, VIDEO_ARGS, AUDIO_ARGS, MUTE_AUDIO_INPUT, get_mp4toannexb_filter
from telemetry import get_tracer
logger = logging.getLogger(__name__)
def re_encode_and_annexb(file):
with get_tracer("ffmpeg").start_as_current_span("re_encode_and_annexb") as span:
span.set_attribute("file.path", file)
if not os.path.exists(file):
span.set_status(Status(StatusCode.ERROR))
return file
logger.info("ReEncodeAndAnnexb: %s", file)
has_audio = not not probe_video_audio(file)
# 优先使用RE_ENCODE_VIDEO_ARGS环境变量,其次使用默认的VIDEO_ARGS
if os.getenv("RE_ENCODE_VIDEO_ARGS", False):
_video_args = tuple(os.getenv("RE_ENCODE_VIDEO_ARGS", "").split(" "))
else:
_video_args = VIDEO_ARGS
# 优先使用RE_ENCODE_ENCODER_ARGS环境变量,其次使用默认的ENCODER_ARGS
if os.getenv("RE_ENCODE_ENCODER_ARGS", False):
_encoder_args = tuple(os.getenv("RE_ENCODE_ENCODER_ARGS", "").split(" "))
else:
_encoder_args = ENCODER_ARGS
ffmpeg_process = subprocess.run(["ffmpeg", "-y", "-hide_banner", "-i", file,
*(set() if has_audio else MUTE_AUDIO_INPUT),
"-fps_mode", "cfr",
"-map", "0:v", "-map", "0:a" if has_audio else "1:a",
*_video_args, "-bsf:v", get_mp4toannexb_filter(),
*AUDIO_ARGS, "-bsf:a", "setts=pts=DTS",
*_encoder_args, "-shortest", "-fflags", "+genpts",
"-f", "mpegts", file + ".ts"])
logger.info(" ".join(ffmpeg_process.args))
span.set_attribute("ffmpeg.args", json.dumps(ffmpeg_process.args))
logger.info("ReEncodeAndAnnexb: %s, returned: %s", file, ffmpeg_process.returncode)
span.set_attribute("ffmpeg.code", ffmpeg_process.returncode)
if ffmpeg_process.returncode == 0:
span.set_status(Status(StatusCode.OK))
span.set_attribute("file.size", os.path.getsize(file+".ts"))
# os.remove(file)
return file+".ts"
else:
span.set_status(Status(StatusCode.ERROR))
return file
def start_render(ffmpeg_task: FfmpegTask):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("start_render") as span:
span.set_attribute("ffmpeg.task", str(ffmpeg_task))
if not ffmpeg_task.need_run():
ffmpeg_task.set_output_file(ffmpeg_task.input_file[0])
span.set_status(Status(StatusCode.OK))
return True
ffmpeg_args = ffmpeg_task.get_ffmpeg_args()
if len(ffmpeg_args) == 0:
ffmpeg_task.set_output_file(ffmpeg_task.input_file[0])
span.set_status(Status(StatusCode.OK))
return True
ffmpeg_process = subprocess.run(["ffmpeg", "-progress", "-", "-loglevel", "error", *ffmpeg_args], stderr=subprocess.PIPE, **subprocess_args(True))
span.set_attribute("ffmpeg.args", json.dumps(ffmpeg_process.args))
logger.info(" ".join(ffmpeg_process.args))
ffmpeg_final_out = handle_ffmpeg_output(ffmpeg_process.stdout)
span.set_attribute("ffmpeg.out", ffmpeg_final_out)
logger.info("FINISH TASK, OUTPUT IS %s", ffmpeg_final_out)
code = ffmpeg_process.returncode
span.set_attribute("ffmpeg.code", code)
if code != 0:
span.set_attribute("ffmpeg.err", str(ffmpeg_process.stderr))
span.set_status(Status(StatusCode.ERROR, "FFMPEG异常退出"))
logger.error("FFMPEG ERROR: %s", ffmpeg_process.stderr)
return False
span.set_attribute("ffmpeg.out_file", ffmpeg_task.output_file)
try:
file_size = os.path.getsize(ffmpeg_task.output_file)
span.set_attribute("file.size", file_size)
if file_size < 4096:
span.set_status(Status(StatusCode.ERROR, "输出文件过小"))
logger.error("FFMPEG ERROR: OUTPUT FILE IS TOO SMALL")
return False
except OSError as e:
span.set_attribute("file.size", 0)
span.set_attribute("file.error", e.strerror)
span.set_status(Status(StatusCode.ERROR, "输出文件不存在"))
logger.error("FFMPEG ERROR: OUTPUT FILE NOT FOUND")
return False
span.set_status(Status(StatusCode.OK))
return True
def handle_ffmpeg_output(stdout: Optional[bytes]) -> str:
out_time = "0:0:0.0"
if stdout is None:
print("[!]STDOUT is null")
return out_time
speed = "0"
for line in stdout.split(b"\n"):
if line == b"":
break
if line.strip() == b"progress=end":
# 处理完毕
break
if line.startswith(b"out_time="):
out_time = line.replace(b"out_time=", b"").decode().strip()
if line.startswith(b"speed="):
speed = line.replace(b"speed=", b"").decode().strip()
print("[ ]Speed:", out_time, "@", speed)
return out_time+"@"+speed
def duration_str_to_float(duration_str: str) -> float:
_duration = datetime.strptime(duration_str, "%H:%M:%S.%f") - datetime(1900, 1, 1)
return _duration.total_seconds()
def probe_video_info(video_file):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("probe_video_info") as span:
span.set_attribute("video.file", video_file)
# 获取宽度和高度
result = subprocess.run(
["ffprobe", '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=width,height:format=duration', '-of',
'csv=s=x:p=0', video_file],
stderr=subprocess.STDOUT,
**subprocess_args(True)
)
span.set_attribute("ffprobe.args", json.dumps(result.args))
span.set_attribute("ffprobe.code", result.returncode)
if result.returncode != 0:
span.set_status(Status(StatusCode.ERROR))
return 0, 0, 0
all_result = result.stdout.decode('utf-8').strip()
span.set_attribute("ffprobe.out", all_result)
if all_result == '':
span.set_status(Status(StatusCode.ERROR))
return 0, 0, 0
span.set_status(Status(StatusCode.OK))
wh, duration = all_result.split('\n')
width, height = wh.strip().split('x')
return int(width), int(height), float(duration)
def probe_video_audio(video_file, type=None):
tracer = get_tracer(__name__)
with tracer.start_as_current_span("probe_video_audio") as span:
span.set_attribute("video.file", video_file)
args = ["ffprobe", "-hide_banner", "-v", "error", "-select_streams", "a", "-show_entries", "stream=index", "-of", "csv=p=0"]
if type == 'concat':
args.append("-safe")
args.append("0")
args.append("-f")
args.append("concat")
args.append(video_file)
logger.info(" ".join(args))
result = subprocess.run(args, stderr=subprocess.STDOUT, **subprocess_args(True))
span.set_attribute("ffprobe.args", json.dumps(result.args))
span.set_attribute("ffprobe.code", result.returncode)
logger.info("probe_video_audio: %s", result.stdout.decode('utf-8').strip())
if result.returncode != 0:
return False
if result.stdout.decode('utf-8').strip() == '':
return False
return True
# 音频淡出2秒
def fade_out_audio(file, duration, fade_out_sec = 2):
if type(duration) == str:
try:
duration = float(duration)
except Exception as e:
logger.error("duration is not float: %s", e)
return file
tracer = get_tracer(__name__)
with tracer.start_as_current_span("fade_out_audio") as span:
span.set_attribute("audio.file", file)
if duration <= fade_out_sec:
return file
else:
new_fn = file + "_.mp4"
if os.path.exists(new_fn):
os.remove(new_fn)
logger.info("delete tmp file: " + new_fn)
try:
process = subprocess.run(["ffmpeg", "-i", file, "-c:v", "copy", "-c:a", "aac", "-af", "afade=t=out:st=" + str(duration - fade_out_sec) + ":d=" + str(fade_out_sec), "-y", new_fn], **subprocess_args(True))
span.set_attribute("ffmpeg.args", json.dumps(process.args))
logger.info(" ".join(process.args))
if process.returncode != 0:
span.set_status(Status(StatusCode.ERROR))
logger.error("FFMPEG ERROR: %s", process.stderr)
return file
else:
span.set_status(Status(StatusCode.OK))
return new_fn
except Exception as e:
span.set_status(Status(StatusCode.ERROR))
logger.error("FFMPEG ERROR: %s", e)
return file
# Create a set of arguments which make a ``subprocess.Popen`` (and
# variants) call work with or without Pyinstaller, ``--noconsole`` or
# not, on Windows and Linux. Typical use::
#
# subprocess.call(['program_to_run', 'arg_1'], **subprocess_args())
#
# When calling ``check_output``::
#
# subprocess.check_output(['program_to_run', 'arg_1'],
# **subprocess_args(False))
def subprocess_args(include_stdout=True):
# The following is true only on Windows.
if hasattr(subprocess, 'STARTUPINFO'):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Windows doesn't search the path by default. Pass it an environment so
# it will.
env = os.environ
else:
si = None
env = None
# ``subprocess.check_output`` doesn't allow specifying ``stdout``::
#
# Traceback (most recent call last):
# File "test_subprocess.py", line 58, in <module>
# **subprocess_args(stdout=None))
# File "C:\Python27\lib\subprocess.py", line 567, in check_output
# raise ValueError('stdout argument not allowed, it will be overridden.')
# ValueError: stdout argument not allowed, it will be overridden.
#
# So, add it only if it's needed.
if include_stdout:
ret = {'stdout': subprocess.PIPE}
else:
ret = {}
# On Windows, running this from the binary produced by Pyinstaller
# with the ``--noconsole`` option requires redirecting everything
# (stdin, stdout, stderr) to avoid an OSError exception
# "[Error 6] the handle is invalid."
ret.update({'stdin': subprocess.PIPE,
'startupinfo': si,
'env': env})
return ret

127
util/oss.py Normal file
View File

@@ -0,0 +1,127 @@
import logging
import os
import sys
import requests
from opentelemetry.trace import Status, StatusCode
from telemetry import get_tracer
logger = logging.getLogger(__name__)
def upload_to_oss(url, file_path):
"""
使用签名URL上传文件到OSS
:param str url: 签名URL
:param str file_path: 文件路径
:return bool: 是否成功
"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("upload_to_oss") as span:
span.set_attribute("file.url", url)
span.set_attribute("file.path", file_path)
span.set_attribute("file.size", os.path.getsize(file_path))
max_retries = 5
retries = 0
if os.getenv("UPLOAD_METHOD") == "rclone":
with tracer.start_as_current_span("rclone_to_oss") as r_span:
replace_map = os.getenv("RCLONE_REPLACE_MAP")
r_span.set_attribute("rclone.replace_map", replace_map)
if replace_map != "":
replace_list = [i.split("|", 1) for i in replace_map.split(",")]
new_url = url
for (_src, _dst) in replace_list:
new_url = new_url.replace(_src, _dst)
new_url = new_url.split("?", 1)[0]
r_span.set_attribute("rclone.target_dir", new_url)
if new_url != url:
result = os.system(f"rclone copyto --no-check-dest --ignore-existing --multi-thread-chunk-size 8M --multi-thread-streams 8 {file_path} {new_url}")
r_span.set_attribute("rclone.result", result)
if result == 0:
span.set_status(Status(StatusCode.OK))
return True
else:
span.set_status(Status(StatusCode.ERROR))
while retries < max_retries:
with tracer.start_as_current_span("upload_to_oss.request") as req_span:
req_span.set_attribute("http.retry_count", retries)
try:
req_span.set_attribute("http.method", "PUT")
req_span.set_attribute("http.url", url)
with open(file_path, 'rb') as f:
response = requests.put(url, data=f, stream=True, timeout=60, headers={"Content-Type": "video/mp4"})
req_span.set_attribute("http.status_code", response.status_code)
req_span.set_attribute("http.response", response.text)
response.raise_for_status()
req_span.set_status(Status(StatusCode.OK))
span.set_status(Status(StatusCode.OK))
return True
except requests.exceptions.Timeout:
req_span.set_attribute("http.error", "Timeout")
req_span.set_status(Status(StatusCode.ERROR))
retries += 1
logger.warning(f"Upload timed out. Retrying {retries}/{max_retries}...")
except Exception as e:
req_span.set_attribute("http.error", str(e))
req_span.set_status(Status(StatusCode.ERROR))
retries += 1
logger.warning(f"Upload failed. Retrying {retries}/{max_retries}...")
span.set_status(Status(StatusCode.ERROR))
return False
def download_from_oss(url, file_path, skip_if_exist=None):
"""
使用签名URL下载文件到OSS
:param skip_if_exist: 如果存在就不下载了
:param str url: 签名URL
:param Union[LiteralString, str, bytes] file_path: 文件路径
:return bool: 是否成功
"""
tracer = get_tracer(__name__)
with tracer.start_as_current_span("download_from_oss") as span:
span.set_attribute("file.url", url)
span.set_attribute("file.path", file_path)
# 如果skip_if_exist为None,则从启动参数中读取
if skip_if_exist is None:
skip_if_exist = 'skip_if_exist' in sys.argv
if skip_if_exist and os.path.exists(file_path):
span.set_attribute("file.exist", True)
span.set_attribute("file.size", os.path.getsize(file_path))
return True
logging.info("download_from_oss: %s", url)
file_dir, file_name = os.path.split(file_path)
if file_dir:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
max_retries = 5
retries = 0
while retries < max_retries:
with tracer.start_as_current_span("download_from_oss.request") as req_span:
req_span.set_attribute("http.retry_count", retries)
try:
req_span.set_attribute("http.method", "GET")
req_span.set_attribute("http.url", url)
response = requests.get(url, timeout=15) # 设置超时时间
req_span.set_attribute("http.status_code", response.status_code)
with open(file_path, 'wb') as f:
f.write(response.content)
req_span.set_attribute("file.size", os.path.getsize(file_path))
req_span.set_status(Status(StatusCode.OK))
span.set_status(Status(StatusCode.OK))
return True
except requests.exceptions.Timeout:
req_span.set_attribute("http.error", "Timeout")
req_span.set_status(Status(StatusCode.ERROR))
retries += 1
logger.warning(f"Download timed out. Retrying {retries}/{max_retries}...")
except Exception as e:
req_span.set_attribute("http.error", str(e))
req_span.set_status(Status(StatusCode.ERROR))
retries += 1
logger.warning(f"Download failed. Retrying {retries}/{max_retries}...")
span.set_status(Status(StatusCode.ERROR))
return False

View File

@@ -1,345 +1,24 @@
# -*- coding: utf-8 -*-
"""
系统信息工具
提供系统信息采集功能。
"""
import logging
import os
import platform
import subprocess
from typing import Optional, Dict, Any, List
from datetime import datetime
import psutil
from constant import SOFTWARE_VERSION, DEFAULT_CAPABILITIES, HW_ACCEL_NONE, HW_ACCEL_QSV, HW_ACCEL_CUDA
from domain.gpu import GPUDevice
logger = logging.getLogger(__name__)
from constant import SUPPORT_FEATURE, SOFTWARE_VERSION
def get_sys_info():
"""
获取系统信息
Returns:
dict: 系统信息字典
Returns a dictionary with system information.
"""
mem = psutil.virtual_memory()
info = {
'os': platform.system(),
'cpu': f"{os.cpu_count()} cores",
'memory': f"{mem.total // (1024**3)}GB",
'cpuUsage': f"{psutil.cpu_percent()}%",
'memoryAvailable': f"{mem.available // (1024**3)}GB",
'platform': platform.system(),
'pythonVersion': platform.python_version(),
'version': SOFTWARE_VERSION,
'client_datetime': datetime.now().isoformat(),
'platform': platform.system(),
'runtime_version': 'Python ' + platform.python_version(),
'cpu_count': os.cpu_count(),
'cpu_usage': psutil.cpu_percent(),
'memory_total': psutil.virtual_memory().total,
'memory_available': psutil.virtual_memory().available,
'support_feature': SUPPORT_FEATURE
}
# 尝试获取 GPU 信息
gpu_info = get_gpu_info()
if gpu_info:
info['gpu'] = gpu_info
return info
def get_capabilities():
"""
获取 Worker 支持的能力列表
Returns:
list: 能力列表
"""
return DEFAULT_CAPABILITIES.copy()
def get_gpu_info() -> Optional[str]:
"""
尝试获取 GPU 信息
Returns:
str: GPU 信息,失败返回 None
"""
try:
# 尝试使用 nvidia-smi
result = subprocess.run(
['nvidia-smi', '--query-gpu=name', '--format=csv,noheader'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
gpu_name = result.stdout.strip().split('\n')[0]
return gpu_name
except Exception:
pass
return None
def get_ffmpeg_version() -> str:
"""
获取 FFmpeg 版本
Returns:
str: FFmpeg 版本号
"""
try:
result = subprocess.run(
['ffmpeg', '-version'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
first_line = result.stdout.split('\n')[0]
# 解析版本号,例如 "ffmpeg version 6.0 ..."
parts = first_line.split()
for i, part in enumerate(parts):
if part == 'version' and i + 1 < len(parts):
return parts[i + 1]
except Exception:
pass
return 'unknown'
def check_ffmpeg_encoder(encoder: str) -> bool:
"""
检查 FFmpeg 是否支持指定的编码器
Args:
encoder: 编码器名称,如 'h264_nvenc', 'h264_qsv'
Returns:
bool: 是否支持该编码器
"""
try:
result = subprocess.run(
['ffmpeg', '-hide_banner', '-encoders'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
return encoder in result.stdout
except Exception:
pass
return False
def check_ffmpeg_decoder(decoder: str) -> bool:
"""
检查 FFmpeg 是否支持指定的解码器
Args:
decoder: 解码器名称,如 'h264_cuvid', 'h264_qsv'
Returns:
bool: 是否支持该解码器
"""
try:
result = subprocess.run(
['ffmpeg', '-hide_banner', '-decoders'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
return decoder in result.stdout
except Exception:
pass
return False
def check_ffmpeg_hwaccel(hwaccel: str) -> bool:
"""
检查 FFmpeg 是否支持指定的硬件加速方法
Args:
hwaccel: 硬件加速方法,如 'cuda', 'qsv', 'dxva2', 'd3d11va'
Returns:
bool: 是否支持该硬件加速方法
"""
try:
result = subprocess.run(
['ffmpeg', '-hide_banner', '-hwaccels'],
capture_output=True,
text=True,
timeout=5
)
if result.returncode == 0:
return hwaccel in result.stdout
except Exception:
pass
return False
def detect_hw_accel_support() -> Dict[str, Any]:
"""
检测系统的硬件加速支持情况
Returns:
dict: 硬件加速支持信息
{
'cuda': {
'available': bool,
'gpu': str or None,
'encoder': bool, # h264_nvenc
'decoder': bool, # h264_cuvid
},
'qsv': {
'available': bool,
'encoder': bool, # h264_qsv
'decoder': bool, # h264_qsv
},
'recommended': str # 推荐的加速方式: 'cuda', 'qsv', 'none'
}
"""
result = {
'cuda': {
'available': False,
'gpu': None,
'encoder': False,
'decoder': False,
},
'qsv': {
'available': False,
'encoder': False,
'decoder': False,
},
'recommended': HW_ACCEL_NONE
}
# 检测 CUDA/NVENC 支持
gpu_info = get_gpu_info()
if gpu_info:
result['cuda']['gpu'] = gpu_info
result['cuda']['available'] = check_ffmpeg_hwaccel('cuda')
result['cuda']['encoder'] = check_ffmpeg_encoder('h264_nvenc')
result['cuda']['decoder'] = check_ffmpeg_decoder('h264_cuvid')
# 检测 QSV 支持
result['qsv']['available'] = check_ffmpeg_hwaccel('qsv')
result['qsv']['encoder'] = check_ffmpeg_encoder('h264_qsv')
result['qsv']['decoder'] = check_ffmpeg_decoder('h264_qsv')
# 推荐硬件加速方式(优先 CUDA,其次 QSV)
if result['cuda']['available'] and result['cuda']['encoder']:
result['recommended'] = HW_ACCEL_CUDA
elif result['qsv']['available'] and result['qsv']['encoder']:
result['recommended'] = HW_ACCEL_QSV
return result
def get_hw_accel_info_str() -> str:
"""
获取硬件加速支持信息的可读字符串
Returns:
str: 硬件加速支持信息描述
"""
support = detect_hw_accel_support()
parts = []
if support['cuda']['available']:
gpu = support['cuda']['gpu'] or 'Unknown GPU'
status = 'encoder+decoder' if support['cuda']['encoder'] and support['cuda']['decoder'] else (
'encoder only' if support['cuda']['encoder'] else 'decoder only' if support['cuda']['decoder'] else 'hwaccel only'
)
parts.append(f"CUDA({gpu}, {status})")
if support['qsv']['available']:
status = 'encoder+decoder' if support['qsv']['encoder'] and support['qsv']['decoder'] else (
'encoder only' if support['qsv']['encoder'] else 'decoder only' if support['qsv']['decoder'] else 'hwaccel only'
)
parts.append(f"QSV({status})")
if not parts:
return "No hardware acceleration available"
return ', '.join(parts) + f" [recommended: {support['recommended']}]"
def get_all_gpu_info() -> List[GPUDevice]:
"""
获取所有 NVIDIA GPU 信息
使用 nvidia-smi 查询所有 GPU 设备。
Returns:
GPU 设备列表,失败返回空列表
"""
try:
result = subprocess.run(
[
'nvidia-smi',
'--query-gpu=index,name,memory.total',
'--format=csv,noheader,nounits'
],
capture_output=True,
text=True,
timeout=10
)
if result.returncode != 0:
return []
devices = []
for line in result.stdout.strip().split('\n'):
if not line.strip():
continue
parts = [p.strip() for p in line.split(',')]
if len(parts) >= 2:
index = int(parts[0])
name = parts[1]
memory = int(parts[2]) if len(parts) >= 3 else None
devices.append(GPUDevice(
index=index,
name=name,
memory_total=memory,
available=True
))
return devices
except Exception as e:
logger.warning(f"Failed to detect GPUs: {e}")
return []
def validate_gpu_device(index: int) -> bool:
"""
验证指定索引的 GPU 设备是否可用
Args:
index: GPU 设备索引
Returns:
设备是否可用
"""
try:
result = subprocess.run(
[
'nvidia-smi',
'-i', str(index),
'--query-gpu=name',
'--format=csv,noheader'
],
capture_output=True,
text=True,
timeout=5
)
return result.returncode == 0 and bool(result.stdout.strip())
except Exception:
return False