# 启用bash的扩展功能(对应bat的enabledelayedexpansion) shopt -s extglob # 检查参数是否传入 if [ $# -eq 0 ]; then echo "Error: Insufficient parameters" echo echo "Usage:" echo " ./run_train_and_clone.sh \"source_image\"" echo echo "Examples:" echo " ./run_train_and_clone.sh \"LivePortrait-main/assets/examples/source/s9.jpg\"" exit 1 fi # 定义核心变量 SOURCE_IMAGE="$1" DRIVING_VIDEO="LivePortrait-main/assets/examples/driving/output.mp4" # 获取脚本所在目录(对应bat的%~dp0) SCRIPT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/ cd "$SCRIPT_DIR" || exit 1 # 生成时间戳(对应bat的powershell时间戳) TIMESTAMP=$(date +%Y%m%d_%H%M%S) AVATAR_ID="avatar_${TIMESTAMP}" AVATAR_DIR="${SCRIPT_DIR}data/avatars/${AVATAR_ID}" # 停止脚本时自动删除本次生成的头像目录 cleanup_avatar_dir() { if [ -n "${AVATAR_DIR:-}" ] && [ -d "${AVATAR_DIR}" ]; then echo "Cleaning generated avatar directory: ${AVATAR_DIR}" rm -rf -- "${AVATAR_DIR}" fi } trap 'cleanup_avatar_dir' EXIT # 默认环境变量(可通过外部传入覆盖) : "${CUDA_VISIBLE_DEVICES:=6,7}" : "${AIOICE_PORT_MIN:=50000}" : "${AIOICE_PORT_MAX:=50010}" : "${AIOICE_BIND_IP:=192.168.22.9}" : "${WEBRTC_NAT_IP:=183.252.196.135}" export CUDA_VISIBLE_DEVICES AIOICE_PORT_MIN AIOICE_PORT_MAX AIOICE_BIND_IP WEBRTC_NAT_IP echo "========================================" echo "Digital Human Generation Pipeline" echo "========================================" echo echo "Source Image: ${SOURCE_IMAGE}" echo "Driving Video: ${DRIVING_VIDEO} (Fixed)" echo "Avatar ID: ${AVATAR_ID}" echo "Timestamp: ${TIMESTAMP}" echo "Using env: CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} AIOICE_BIND_IP=${AIOICE_BIND_IP} WEBRTC_NAT_IP=${WEBRTC_NAT_IP} AIOICE_PORT_MIN=${AIOICE_PORT_MIN} AIOICE_PORT_MAX=${AIOICE_PORT_MAX}" echo # 检查源图片是否存在 if [ ! -f "${SOURCE_IMAGE}" ]; then echo "Error: Source image not found: ${SOURCE_IMAGE}" exit 1 fi # 检查驱动视频是否存在 if [ ! -f "${DRIVING_VIDEO}" ]; then echo "Error: Driving video not found: ${DRIVING_VIDEO}" exit 1 fi echo "Current Directory: $(pwd)" echo # ======================================== # Step 1: Run LivePortrait to generate video # ======================================== echo echo "[Step 1/3] Running LivePortrait to generate video..." echo # 转换驱动视频为绝对路径(对应bat的DRIVING_VIDEO_ABS) DRIVING_VIDEO_ABS="${DRIVING_VIDEO}" if [[ ! "${DRIVING_VIDEO_ABS}" =~ ^/ ]]; then DRIVING_VIDEO_ABS="${SCRIPT_DIR}${DRIVING_VIDEO}" fi echo "Debug: SCRIPT_DIR = ${SCRIPT_DIR}" echo "Debug: DRIVING_VIDEO_ABS = ${DRIVING_VIDEO_ABS}" echo "Debug: File exists check:" # 检查驱动视频绝对路径是否存在 if [ ! -f "${DRIVING_VIDEO_ABS}" ]; then echo "Error: Driving video file does not exist at: ${DRIVING_VIDEO_ABS}" echo echo "Checking if the path is correct..." echo "Checking: LivePortrait-main/assets/examples/driving/output.mp4" if [ -f "LivePortrait-main/assets/examples/driving/output.mp4" ]; then echo "File exists with relative path!" else echo "File NOT found with relative path!" fi exit 1 fi # 创建临时驱动视频副本(对应bat的TEMP_DRIVING) DRIVE_EXT="${DRIVING_VIDEO_ABS##*.}" DRIVE_BASE="${DRIVING_VIDEO_ABS%.*}" TEMP_DRIVING="temp_driving_${TIMESTAMP}.${DRIVE_EXT}" TEMP_DRIVING_PATH="${SCRIPT_DIR}LivePortrait-main/${TEMP_DRIVING}" echo "Creating temporary copy of driving video: ${TEMP_DRIVING}" echo "From: ${DRIVING_VIDEO_ABS}" echo "To: ${TEMP_DRIVING_PATH}" # 复制文件(对应bat的copy命令) cp "${DRIVING_VIDEO_ABS}" "${TEMP_DRIVING_PATH}" if [ $? -ne 0 ]; then echo "Error: Failed to copy driving video" echo "Check if file exists: ${DRIVING_VIDEO_ABS}" cd "${SCRIPT_DIR}" || exit 1 exit 1 fi # 进入LivePortrait目录 cd "${SCRIPT_DIR}LivePortrait-main" || exit 1 # 处理源图片路径(移除LivePortrait-main前缀) LP_SOURCE="${SOURCE_IMAGE}" LP_SOURCE=${LP_SOURCE//LivePortrait-main\//} LP_SOURCE=${LP_SOURCE//LivePortrait-main\\/} echo "Running LivePortrait with:" echo " Source: ${LP_SOURCE}" echo " Driving: ${TEMP_DRIVING}" echo # 运行 LivePortrait 推理(确保使用 live conda 环境) source "$(conda info --base)"/etc/profile.d/conda.sh conda activate live python inference.py -s "${LP_SOURCE}" -d "${TEMP_DRIVING}" if [ $? -ne 0 ]; then echo "Error: LivePortrait failed!" # 清理临时文件 if [ -f "${TEMP_DRIVING_PATH}" ]; then rm "${TEMP_DRIVING_PATH}" fi cd "${SCRIPT_DIR}" || exit 1 exit 1 fi # 清理临时驱动视频 if [ -f "${TEMP_DRIVING_PATH}" ]; then rm "${TEMP_DRIVING_PATH}" fi echo "Temporary file deleted." # 查找生成的视频文件 # 获取源图片的basename(支持.jpg 和.png 等扩展名) SOURCE_BASENAME=$(basename "${SOURCE_IMAGE}" | sed 's/\.[^.]*$//') EXPECTED_VIDEO="${SOURCE_BASENAME}--temp_driving_${TIMESTAMP}.mp4" GENERATED_VIDEO="" # 检查预期视频是否存在 if [ -f "${SCRIPT_DIR}LivePortrait-main/animations/${EXPECTED_VIDEO}" ]; then GENERATED_VIDEO="${SCRIPT_DIR}LivePortrait-main/animations/${EXPECTED_VIDEO}" echo "Found expected face-only video: ${EXPECTED_VIDEO}" elif [ -n "$(ls "${SCRIPT_DIR}LivePortrait-main/animations/"*--temp_driving_${TIMESTAMP}.mp4 2>/dev/null | grep -v "_concat" | head -1)" ]; then # 查找最新生成的视频(不含_concat) GENERATED_VIDEO=$(ls -t "${SCRIPT_DIR}LivePortrait-main/animations/"*--temp_driving_${TIMESTAMP}.mp4 2>/dev/null | grep -v "_concat" | head -1) echo "Found video: $(basename "${GENERATED_VIDEO}")" else # 降级查找:匹配*--output.mp4且不含_concat的文件 FALLBACK_VIDEO=$(ls "${SCRIPT_DIR}LivePortrait-main/animations/"*--output.mp4 2>/dev/null | grep -v "_concat" | head -1) if [ -n "${FALLBACK_VIDEO}" ]; then GENERATED_VIDEO="${FALLBACK_VIDEO}" echo "Found fallback face-only video: $(basename "${GENERATED_VIDEO}")" else echo "Error: Could not find generated video in animations/" echo "Expected: ${EXPECTED_VIDEO}" echo "Searching in animations/..." ls "${SCRIPT_DIR}LivePortrait-main/animations/" 2>/dev/null cd "${SCRIPT_DIR}" || exit 1 exit 1 fi fi echo echo "LivePortrait completed!" echo "Generated Video: ${GENERATED_VIDEO}" echo cd "${SCRIPT_DIR}" || exit 1 # ======================================== # Step 2: Run genavatar.py for pre-processing # ======================================== echo echo "[Step 2/3] Running genavatar.py for pre-processing..." echo # 进入wav2lip目录 cd "${SCRIPT_DIR}wav2lip" || exit 1 echo "Generating avatar data with:" echo " Video: ${GENERATED_VIDEO}" echo " Avatar ID: ${AVATAR_ID}" echo " Image Size: 256" echo # 运行 genavatar.py python genavatar.py --video_path "${GENERATED_VIDEO}" --img_size 256 --avatar_id "${AVATAR_ID}" if [ $? -ne 0 ]; then echo "Error: genavatar.py failed!" cd "${SCRIPT_DIR}" || exit 1 exit 1 fi # 清理 LivePortrait-main 目录中的临时文件 (pkl 和视频) echo "Cleaning up temporary files in LivePortrait-main..." cd "${SCRIPT_DIR}LivePortrait-main" || exit 1 # 删除 temp_driving_*.pkl 文件 find . -maxdepth 1 -name "temp_driving_*.pkl" -type f -delete echo "Temporary pkl files deleted." # 删除 animations 目录中对应的视频文件 if [ -n "${GENERATED_VIDEO}" ] && [ -f "${GENERATED_VIDEO}" ]; then echo "Deleting generated video: ${GENERATED_VIDEO}" rm -f "${GENERATED_VIDEO}" # 同时删除 _concat 版本 (如果存在) CONCAT_VIDEO="${GENERATED_VIDEO%.mp4}_concat.mp4" if [ -f "${CONCAT_VIDEO}" ]; then echo "Deleting concat version: $(basename "${CONCAT_VIDEO}")" rm -f "${CONCAT_VIDEO}" fi fi echo "Temporary video files deleted." cd "${SCRIPT_DIR}" || exit 1 echo echo "Pre-processing completed!" echo "Avatar data saved to: data/avatars/${AVATAR_ID}" echo # ======================================== # Step 3: Run app.py with edgetts for digital human generation # ======================================== echo echo "[Step 3/3] Starting app.py with edgetts for digital human generation..." echo echo "Starting digital human server..." echo "Avatar ID: ${AVATAR_ID}" echo "TTS: edgetts (Edge TTS via local API http://127.0.0.1:1024/tts)" echo echo "The server will start and you can access it via web interface." echo "Press Ctrl+C to stop the server." echo # 检查并安装pyaudio(对应bat的pyaudio检查) echo "Checking pyaudio installation..." python -c "import pyaudio" 2>/dev/null || (echo "Installing pyaudio..." && pip install pyaudio) # 启动 app.py # 关键环境变量说明: # AIOICE_PORT_MIN/MAX : aiortc 绑定的 UDP 端口范围,必须与防火墙放行范围一致 # AIOICE_BIND_IP : aiortc 只绑定这一个 IP(服务器有多网卡时,指定浏览器能路由到的那个) # WEBRTC_NAT_IP : 如果浏览器用的是跳板机 IP 而非服务器 IP,设置此项做 SDP IP 替换 # # 默认值(可通过外部传入环境变量覆盖): # CUDA_VISIBLE_DEVICES=6,7 (使用 GPU 6,7 运行) # AIOICE_PORT_MIN=50000 AIOICE_PORT_MAX=50010 # AIOICE_BIND_IP=192.168.22.9 WEBRTC_NAT_IP=183.252.196.135 # echo "Using env: CUDA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} AIOICE_BIND_IP=${AIOICE_BIND_IP} WEBRTC_NAT_IP=${WEBRTC_NAT_IP} AIOICE_PORT_MIN=${AIOICE_PORT_MIN} AIOICE_PORT_MAX=${AIOICE_PORT_MAX}" CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES}" \ AIOICE_BIND_IP="${AIOICE_BIND_IP}" \ WEBRTC_NAT_IP="${WEBRTC_NAT_IP}" \ AIOICE_PORT_MIN="${AIOICE_PORT_MIN}" \ AIOICE_PORT_MAX="${AIOICE_PORT_MAX}" \ python app.py \ --avatar_id "${AVATAR_ID}" \ --tts edgetts \ --REF_FILE zh-CN-YunxiaNeural \ --model wav2lip \ --transport webrtc \ --listenport 7868 echo echo "========================================" echo "Pipeline completed!" echo "========================================"