From c91c9dca6b11caf3e6375c4574af10fba1c2d062 Mon Sep 17 00:00:00 2001 From: zhihang Date: Sat, 26 Jul 2025 03:03:46 +0000 Subject: [PATCH 1/2] add AvatarChatbot for 24.03-LTS Signed-off-by: zhihang --- AI/image-list.yml | 3 + AI/opea/animation/1.2/24.03-lts/Dockerfile | 41 +++++++ AI/opea/animation/meta.yml | 3 + .../avatarchatbot/1.2/24.03-lts/Dockerfile | 59 ++++++++++ AI/opea/avatarchatbot/README.md | 98 ++++++++++++++++ AI/opea/avatarchatbot/doc/compose.yml | 109 ++++++++++++++++++ AI/opea/avatarchatbot/doc/set_env.sh | 28 +++++ AI/opea/avatarchatbot/meta.yml | 3 + AI/opea/wav2lip/1.2/24.03-lts/Dockerfile | 85 ++++++++++++++ AI/opea/wav2lip/meta.yml | 3 + 10 files changed, 432 insertions(+) create mode 100644 AI/opea/animation/1.2/24.03-lts/Dockerfile create mode 100644 AI/opea/animation/meta.yml create mode 100644 AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile create mode 100644 AI/opea/avatarchatbot/README.md create mode 100644 AI/opea/avatarchatbot/doc/compose.yml create mode 100644 AI/opea/avatarchatbot/doc/set_env.sh create mode 100644 AI/opea/avatarchatbot/meta.yml create mode 100644 AI/opea/wav2lip/1.2/24.03-lts/Dockerfile create mode 100644 AI/opea/wav2lip/meta.yml diff --git a/AI/image-list.yml b/AI/image-list.yml index da4f69da..38836b2a 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -2,12 +2,15 @@ images: onnx: onnx cann: cann dlrm: dlrm + avatarchatbot: opea/avatarchatbot euler-copilot-fast-inference-qwen: euler-copilot-fast-inference-qwen llm: llm llm-server: llm-server llvm-build-deps: llvm-build-deps mindspore: mindspore mlflow: mlflow + wav2lip: opea/wav2lip + animation: opea/animation oneapi-basekit: oneapi-basekit oneapi-runtime: oneapi-runtime asr: opea/asr diff --git a/AI/opea/animation/1.2/24.03-lts/Dockerfile b/AI/opea/animation/1.2/24.03-lts/Dockerfile new file mode 100644 index 00000000..fc324827 --- /dev/null +++ b/AI/opea/animation/1.2/24.03-lts/Dockerfile @@ -0,0 +1,41 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +ARG BASE=openeuler/python:3.11.13-oe2403lts +ARG VERSION=v1.2 + +# Use a base image +FROM $BASE + +ARG VERSION + +RUN yum update -y && \ + yum install -y \ + shadow \ + git && \ + yum clean all && \ + rm -rf /var/cache/yum + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ +USER user + +ENV LANG=C.UTF-8 +ARG ARCH=cpu + +WORKDIR /home/user + +ARG GENAICOMPS_REPO=https://github.com/opea-project/GenAIComps.git +RUN git clone -b $VERSION $GENAICOMPS_REPO && \ + cp -r GenAIComps/comps /home/user/comps && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/comps/animation/src/requirements.txt ; + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/animation/src + +ENTRYPOINT ["python3", "opea_animation_microservice.py"] \ No newline at end of file diff --git a/AI/opea/animation/meta.yml b/AI/opea/animation/meta.yml new file mode 100644 index 00000000..ee4b49e9 --- /dev/null +++ b/AI/opea/animation/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile b/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile new file mode 100644 index 00000000..1a92d19f --- /dev/null +++ b/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile @@ -0,0 +1,59 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +ARG BASE=openeuler/python:3.11.13-oe2403lts +ARG VERSION=v1.2 + +# Stage 1: base setup used by other stages +FROM $BASE AS base + +ARG VERSION +# get security updates +RUN yum update -y && \ + yum install -y \ + shadow \ + yum clean all && \ + rm -rf /var/cache/yum + +ENV HOME=/home/user + +RUN useradd -m -s /bin/bash user && \ + mkdir -p $HOME && \ + chown -R user $HOME + +WORKDIR $HOME + + +# Stage 2: latest GenAIComps sources +FROM base AS git + +RUN yum update -y && \ + yum install -y git && \ + yum clean all && \ + rm -rf /var/cache/yum + +ARG GENAICOMPS_REPO=https://github.com/opea-project/GenAIComps.git +RUN git clone -b $VERSION $GENAICOMPS_REPO + +ARG GENAIEXAMPLES_REPO=https://github.com/opea-project/GenAIExamples.git +RUN git clone -b $VERSION $GENAIEXAMPLES_REPO + + +# Stage 3: common layer shared by services using GenAIComps +FROM base AS comps-base + +# copy just relevant parts +COPY --from=git $HOME/GenAIComps/comps $HOME/GenAIComps/comps +COPY --from=git $HOME/GenAIComps/*.* $HOME/GenAIComps/LICENSE $HOME/GenAIComps/ +COPY --from=git $HOME/GenAIExamples/AvatarChatbot/avatarchatbot.py $HOME/avatarchatbot.py + +WORKDIR $HOME/GenAIComps +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r $HOME/GenAIComps/requirements.txt +WORKDIR $HOME + +ENV PYTHONPATH=$PYTHONPATH:$HOME/GenAIComps + +USER user + +ENTRYPOINT ["python", "avatarchatbot.py"] \ No newline at end of file diff --git a/AI/opea/avatarchatbot/README.md b/AI/opea/avatarchatbot/README.md new file mode 100644 index 00000000..8f0ebfde --- /dev/null +++ b/AI/opea/avatarchatbot/README.md @@ -0,0 +1,98 @@ +# Quick reference + +- The offical OPEA docker images + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# OPEA | openEuler + +Current OPEA docker images are built on the [openEuler](https://repo.openeuler.org/)⁠. This repository is free to use and exempted from per-user rate limits. + +OPEA is an open platform project that lets you create open, multi-provider, robust, and composable GenAI solutions that harness the best innovation across the ecosystem. + +The OPEA platform includes: + +- Detailed framework of composable building blocks for state-of-the-art generative AI systems including LLMs, data stores, and prompt engines + +- Architectural blueprints of retrieval-augmented generative AI component stack structure and end-to-end workflows + +- A four-step assessment for grading generative AI systems around performance, features, trustworthiness, and enterprise-grade readiness + +Read more about OPEA at [opea.dev](https://opea.dev/) and explore the OPEA technical documentation at [opea-project.github.io](https://opea-project.github.io/) + +# Supported tags and respective Dockerfile links + +The tag of each AvatarChatbot docker image is consist of the version of AvatarChatbot and the version of basic image. The details are as follows + +| Tags | Currently | Architectures| +|--|--|--| +|[1.2-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile)| AvatarChatbot 1.2 on openEuler 24.03-LTS | amd64 | + +# Usage + +The AvatarChatbot service can be effortlessly deployed on either Intel Gaudi2 or Intel XEON Scalable Processors. + +Quick Start Deployment Steps: + +1. Set up the environment variables. +2. Run Docker Compose. +3. Consume the AvatarChatbot Service. + +### Quick Start: 1.Setup Environment Variable + +To set up environment variables for deploying AvatarChatbot services, follow these steps: + +1. Set the required environment variables: + + ```bash + # Example: host_ip="192.168.1.1" + export host_ip="External_Public_IP" + # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1" + export no_proxy="Your_No_Proxy" + export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" + ``` + +2. If you are in a proxy environment, also set the proxy-related environment variables: + + ```bash + export http_proxy="Your_HTTP_Proxy" + export https_proxy="Your_HTTPs_Proxy" + ``` + +3. Set up other environment variables: + + > Get `set_env.sh` here: [set_env.sh](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/avatarchatbot/doc/set_env.sh) + + ```bash + # on Xeon + source set_env.sh + ``` + +### Quick Start: 2.Run Docker Compose + +Select the compose.yaml file that matches your hardware. + +CPU example: + +> Get `compose.yml` here: [compose.yml](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/avatarchatbot/doc/compose.yml) + +```bash +docker compose -f compose.yml up -d +``` + +It will automatically download the docker image on `docker hub`: + +```bash +docker pull openeuler/avatarchatbot:latest +``` + +### QuickStart: 3.Consume the ChatQnA Service + +```bash +curl http://${host_ip}:3009/v1/avatarchatbot \ + -X POST \ + -d @assets/audio/sample_whoareyou.json \ + -H 'Content-Type: application/json' +``` \ No newline at end of file diff --git a/AI/opea/avatarchatbot/doc/compose.yml b/AI/opea/avatarchatbot/doc/compose.yml new file mode 100644 index 00000000..254350c7 --- /dev/null +++ b/AI/opea/avatarchatbot/doc/compose.yml @@ -0,0 +1,109 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + whisper-service: + image: openeuler/whisper:${TAG:-latest} + container_name: whisper-service + ports: + - "7066:7066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + speecht5-service: + image: openeuler/speecht5:${TAG:-latest} + container_name: speecht5-service + ports: + - "7055:7055" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + restart: unless-stopped + tgi-service: + image: openeuler/text-generation-inference-cpu:2.4.0-oe2403lts + container_name: tgi-service + ports: + - "3006:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + healthcheck: + test: ["CMD-SHELL", "curl -f http://${host_ip}:3006/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 + wav2lip-service: + image: openeuler/wav2lip:${TAG:-latest} + container_name: wav2lip-service + ports: + - "7860:7860" + ipc: host + volumes: + - ${PWD}:/outputs + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + DEVICE: ${DEVICE} + INFERENCE_MODE: ${INFERENCE_MODE} + CHECKPOINT_PATH: ${CHECKPOINT_PATH} + FACE: ${FACE} + AUDIO: ${AUDIO} + FACESIZE: ${FACESIZE} + OUTFILE: ${OUTFILE} + GFPGAN_MODEL_VERSION: ${GFPGAN_MODEL_VERSION} + UPSCALE_FACTOR: ${UPSCALE_FACTOR} + FPS: ${FPS} + WAV2LIP_PORT: ${WAV2LIP_PORT} + restart: unless-stopped + animation: + image: openeuler/animation:${TAG:-latest} + container_name: animation-server + ports: + - "3008:9066" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + WAV2LIP_ENDPOINT: ${WAV2LIP_ENDPOINT} + restart: unless-stopped + avatarchatbot-xeon-backend-server: + image: openeuler/avatarchatbot:${TAG:-latest} + container_name: avatarchatbot-xeon-backend-server + depends_on: + - animation + ports: + - "3009:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - MEGA_SERVICE_PORT=${MEGA_SERVICE_PORT} + - WHISPER_SERVER_HOST_IP=${WHISPER_SERVER_HOST_IP} + - WHISPER_SERVER_PORT=${WHISPER_SERVER_PORT} + - LLM_SERVER_HOST_IP=${LLM_SERVER_HOST_IP} + - LLM_SERVER_PORT=${LLM_SERVER_PORT} + - SPEECHT5_SERVER_HOST_IP=${SPEECHT5_SERVER_HOST_IP} + - SPEECHT5_SERVER_PORT=${SPEECHT5_SERVER_PORT} + - ANIMATION_SERVICE_HOST_IP=${ANIMATION_SERVICE_HOST_IP} + - ANIMATION_SERVICE_PORT=${ANIMATION_SERVICE_PORT} + ipc: host + restart: always + +networks: + default: + driver: bridge \ No newline at end of file diff --git a/AI/opea/avatarchatbot/doc/set_env.sh b/AI/opea/avatarchatbot/doc/set_env.sh new file mode 100644 index 00000000..e1623680 --- /dev/null +++ b/AI/opea/avatarchatbot/doc/set_env.sh @@ -0,0 +1,28 @@ +export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 + +export WAV2LIP_ENDPOINT=http://$host_ip:7860 + +export MEGA_SERVICE_HOST_IP=${host_ip} +export WHISPER_SERVER_HOST_IP=${host_ip} +export WHISPER_SERVER_PORT=7066 +export SPEECHT5_SERVER_HOST_IP=${host_ip} +export SPEECHT5_SERVER_PORT=7055 +export LLM_SERVER_HOST_IP=${host_ip} +export LLM_SERVER_PORT=3006 +export ANIMATION_SERVICE_HOST_IP=${host_ip} +export ANIMATION_SERVICE_PORT=3008 + +export MEGA_SERVICE_PORT=8888 + +export DEVICE="cpu" +export WAV2LIP_PORT=7860 +export INFERENCE_MODE='wav2lip_only' +export CHECKPOINT_PATH='/usr/local/lib/python3.11/site-packages/Wav2Lip/checkpoints/wav2lip_gan.pth' +export FACE="assets/img/avatar1.jpg" +# export AUDIO='assets/audio/eg3_ref.wav' # audio file path is optional, will use base64str in the post request as input if is 'None' +export AUDIO='None' +export FACESIZE=96 +export OUTFILE="/outputs/result.mp4" +export GFPGAN_MODEL_VERSION=1.4 # latest version, can roll back to v1.3 if needed +export UPSCALE_FACTOR=1 +export FPS=10 \ No newline at end of file diff --git a/AI/opea/avatarchatbot/meta.yml b/AI/opea/avatarchatbot/meta.yml new file mode 100644 index 00000000..ee4b49e9 --- /dev/null +++ b/AI/opea/avatarchatbot/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/wav2lip/1.2/24.03-lts/Dockerfile b/AI/opea/wav2lip/1.2/24.03-lts/Dockerfile new file mode 100644 index 00000000..e53dc83b --- /dev/null +++ b/AI/opea/wav2lip/1.2/24.03-lts/Dockerfile @@ -0,0 +1,85 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +ARG BASE=openeuler/python:3.11.13-oe2403lts +ARG VERSION=v1.2 + +# Use a base image +FROM $BASE + +ARG VERSION + +# Set environment variables +ENV LANG=en_US.UTF-8 +ENV PYTHONPATH=/usr/local/python3.11.13/lib/python3.11/site-packages:/home:/home/user +ENV PYTHON=/usr/bin/python3.11 + +# Install dependencies +RUN yum update -y && \ + yum install -y \ + yasm \ + gcc g++ make cmake \ + pkgconf \ + x264-devel \ + git \ + nasm \ + wget \ + mesa-libGL \ + glib2 && \ + yum clean all && \ + rm -rf /var/cache/yum + +# Install GenAIComps +WORKDIR /home/user +ARG GENAICOMPS_REPO=https://github.com/opea-project/GenAIComps.git +RUN git clone -b $VERSION $GENAICOMPS_REPO && \ + cp -r GenAIComps/comps /home/user/comps && \ + cp -r GenAIComps/comps/third_parties/wav2lip/src/entrypoint.sh \ + /usr/local/bin/entrypoint.sh && \ + rm -rf GenAIComps + +# Install ffmpeg with x264 software codec +RUN git clone https://github.com/FFmpeg/FFmpeg.git /home/user/comps/animation/src/FFmpeg +WORKDIR /home/user/comps/animation/src/FFmpeg +RUN ./configure --enable-gpl --enable-libx264 --enable-cross-compile && \ + make -j$(nproc-1) && \ + make install && \ + hash -r +RUN chmod +x $(which ffmpeg) + +# Upgrade pip +RUN python3 -m pip install --upgrade pip + +# Install Wav2Lip from pip +RUN pip install --no-deps Wav2Lipy +RUN rm /usr/local/python3.11.13/lib/python3.11/site-packages/Wav2Lip/__init__.py && touch /usr/local/python3.11.13/lib/python3.11/site-packages/Wav2Lip/__init__.py +ENV PYTHONPATH="$PYTHONPATH:/usr/local/python3.11.13/lib/python3.11/site-packages/Wav2Lip" + +# Install GFPGAN from pip +RUN pip install --no-deps gfpgan +RUN touch /usr/local/python3.11.13/lib/python3.11/site-packages/gfpgan/__init__.py +ENV PYTHONPATH="$PYTHONPATH:/usr/local/python3.11.13/lib/python3.11/site-packages/gfpgan" + +# Download pre-trained models +WORKDIR /usr/local/python3.11.13/lib/python3.11/site-packages + +# Install pip dependencies +RUN pip install -r /home/user/comps/animation/src/requirements.txt + +# Custom patches +# Modify the degradations.py file to import rgb_to_grayscale from torchvision.transforms.functional +RUN sed -i 's/from torchvision.transforms.functional_tensor import rgb_to_grayscale/from torchvision.transforms.functional import rgb_to_grayscale/' /usr/local/python3.11.13/lib/python3.11/site-packages/basicsr/data/degradations.py + +# Modify the core.py file to include 'hpu' in the device check +RUN sed -i "s/if 'cpu' not in device and 'cuda' not in device:/if 'cpu' not in device and 'cuda' not in device and 'hpu' not in device:/" /usr/local/python3.11.13/lib/python3.11/site-packages/Wav2Lip/face_detection/detection/core.py + +# To be compatible with librosa==0.10.2, instead of librosa==0.7.0 because the co-dependency numba==0.48 cannot be pip installed +RUN sed -i 's/hp.sample_rate, hp.n_fft/sr=hp.sample_rate, n_fft=hp.n_fft/' /usr/local/python3.11.13/lib/python3.11/site-packages/Wav2Lip/audio.py + +# Set the working directory +WORKDIR /home/user/comps/animation/src/ + +# Define the command to run when the container starts +RUN chmod +x /usr/local/bin/entrypoint.sh +ENV DEVICE="cpu" +ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] \ No newline at end of file diff --git a/AI/opea/wav2lip/meta.yml b/AI/opea/wav2lip/meta.yml new file mode 100644 index 00000000..ee4b49e9 --- /dev/null +++ b/AI/opea/wav2lip/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file -- Gitee From a85ef1b51980cb49303b83a052ab78d49f4d9c4a Mon Sep 17 00:00:00 2001 From: zhihang Date: Sat, 26 Jul 2025 03:50:08 +0000 Subject: [PATCH 2/2] fix bug Signed-off-by: zhihang --- AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile b/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile index 1a92d19f..4f999722 100644 --- a/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile +++ b/AI/opea/avatarchatbot/1.2/24.03-lts/Dockerfile @@ -11,7 +11,7 @@ ARG VERSION # get security updates RUN yum update -y && \ yum install -y \ - shadow \ + shadow && \ yum clean all && \ rm -rf /var/cache/yum -- Gitee