From aaf99460f5713938adcf09a7baf87e3dc9baa687 Mon Sep 17 00:00:00 2001 From: zhihang Date: Sun, 27 Apr 2025 07:43:25 +0000 Subject: [PATCH 1/3] add videoqna --- AI/image-list.yml | 10 +- AI/opea/chatqna/doc/compose.yml | 14 +- .../1.0/24.03-lts/Dockerfile | 49 ++++++ AI/opea/dataprep-multimodal-vdms/meta.yml | 3 + .../1.0/24.03-lts/Dockerfile | 38 +++++ AI/opea/embedding-multimodal-clip/meta.yml | 3 + .../lvm-video-llama/1.0/24.03-lts/Dockerfile | 30 ++++ AI/opea/lvm-video-llama/meta.yml | 3 + .../1.0/24.03-lts/Dockerfile | 36 +++++ AI/opea/reranking-videoqna/meta.yml | 3 + .../retriever-vdms/1.0/24.03-lts/Dockerfile | 55 +++++++ AI/opea/retriever-vdms/meta.yml | 3 + .../1.0/24.03-lts/Dockerfile | 55 +++++++ AI/opea/video-llama-lvm-server/meta.yml | 3 + AI/opea/videoqna-ui/1.0/24.03-lts/Dockerfile | 26 +++ AI/opea/videoqna-ui/meta.yml | 3 + AI/opea/videoqna/1.0/24.03-lts/Dockerfile | 38 +++++ AI/opea/videoqna/doc/compose.yml | 151 ++++++++++++++++++ AI/opea/videoqna/doc/set_env.sh | 21 +++ AI/opea/videoqna/meta.yml | 3 + 20 files changed, 539 insertions(+), 8 deletions(-) create mode 100644 AI/opea/dataprep-multimodal-vdms/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/dataprep-multimodal-vdms/meta.yml create mode 100644 AI/opea/embedding-multimodal-clip/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/embedding-multimodal-clip/meta.yml create mode 100644 AI/opea/lvm-video-llama/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/lvm-video-llama/meta.yml create mode 100644 AI/opea/reranking-videoqna/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/reranking-videoqna/meta.yml create mode 100644 AI/opea/retriever-vdms/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/retriever-vdms/meta.yml create mode 100644 AI/opea/video-llama-lvm-server/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/video-llama-lvm-server/meta.yml create mode 100644 AI/opea/videoqna-ui/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/videoqna-ui/meta.yml create mode 100644 AI/opea/videoqna/1.0/24.03-lts/Dockerfile create mode 100644 AI/opea/videoqna/doc/compose.yml create mode 100644 AI/opea/videoqna/doc/set_env.sh create mode 100644 AI/opea/videoqna/meta.yml diff --git a/AI/image-list.yml b/AI/image-list.yml index 03abcd39..78769a81 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -47,4 +47,12 @@ images: vllm-ascend: vllm-ascend langchain: langchain open-webui: open-webui - caffe: caffe \ No newline at end of file + caffe: caffe + dataprep-multimodal-vdms: opea/dataprep-multimodal-vdms + lvm-video-llama: opea/lvm-video-llama + video-llama-lvm-server: opea/video-llama-lvm-server + videoqna: opea/videoqna + videoqna-ui: opea/videoqna-ui + embedding-multimodal-clip: opea/embedding-multimodal-clip + reranking-videoqna: opea/reranking-videoqna + retriever-vdms: opea/retriever-vdms \ No newline at end of file diff --git a/AI/opea/chatqna/doc/compose.yml b/AI/opea/chatqna/doc/compose.yml index c39341a8..50187b77 100644 --- a/AI/opea/chatqna/doc/compose.yml +++ b/AI/opea/chatqna/doc/compose.yml @@ -9,7 +9,7 @@ services: - "6379:6379" - "8001:8001" dataprep-redis-service: - image: openeuler/dataprep-redis:1.0-oe2403lts + image: openeuler/dataprep-redis:latest container_name: dataprep-redis-server depends_on: - redis-vector-db @@ -39,7 +39,7 @@ services: https_proxy: ${https_proxy} command: --model-id ${EMBEDDING_MODEL_ID} --auto-truncate embedding: - image: openeuler/embedding-tei:1.0-oe2403lts + image: openeuler/embedding-tei:latest container_name: embedding-tei-server depends_on: - tei-embedding-service @@ -53,7 +53,7 @@ services: TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} restart: unless-stopped retriever: - image: openeuler/retriever-redis:1.0-oe2403lts + image: openeuler/retriever-redis:latest container_name: retriever-redis-server depends_on: - redis-vector-db @@ -86,7 +86,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${RERANK_MODEL_ID} --auto-truncate reranking: - image: openeuler/reranking-tei:1.0-oe2403lts + image: openeuler/reranking-tei:latest container_name: reranking-tei-xeon-server depends_on: - tei-reranking-service @@ -119,7 +119,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 llm: - image: openeuler/llm-tgi:1.0-oe2403lts + image: openeuler/llm-tgi:latest container_name: llm-tgi-server depends_on: - tgi-service @@ -136,7 +136,7 @@ services: HF_HUB_ENABLE_HF_TRANSFER: 0 restart: unless-stopped chaqna-xeon-backend-server: - image: openeuler/chatqna:1.0-oe2403lts + image: openeuler/chatqna:latest container_name: chatqna-xeon-backend-server depends_on: - redis-vector-db @@ -162,7 +162,7 @@ services: ipc: host restart: always chaqna-xeon-ui-server: - image: openeuler/chatqna-ui:1.0-oe2403lts + image: openeuler/chatqna-ui:latest container_name: chatqna-xeon-ui-server depends_on: - chaqna-xeon-backend-server diff --git a/AI/opea/dataprep-multimodal-vdms/1.0/24.03-lts/Dockerfile b/AI/opea/dataprep-multimodal-vdms/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..c68cbafd --- /dev/null +++ b/AI/opea/dataprep-multimodal-vdms/1.0/24.03-lts/Dockerfile @@ -0,0 +1,49 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ENV LANG=C.UTF-8 + +ARG ARCH="cpu" + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + python-pip python \ + shadow-utils \ + mesa-libGL \ + jemalloc-devel \ + gcc gcc-g++ make \ + git \ + cairo-devel \ + vim + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + if [ ${ARCH} = "cpu" ]; then pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \ + pip install --no-cache-dir -r /home/user/comps/dataprep/vdms/multimodal_langchain/requirements.txt + +ENV PYTHONPATH=/home/user + +USER root + +RUN mkdir -p /home/user/comps/dataprep/vdms/multimodal_langchain/uploaded_files && chown -R user /home/user/comps/dataprep/vdms/multimodal_langchain + +USER user + +WORKDIR /home/user/comps/dataprep/vdms/multimodal_langchain + +ENTRYPOINT ["python", "ingest_videos.py"] \ No newline at end of file diff --git a/AI/opea/dataprep-multimodal-vdms/meta.yml b/AI/opea/dataprep-multimodal-vdms/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/dataprep-multimodal-vdms/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/embedding-multimodal-clip/1.0/24.03-lts/Dockerfile b/AI/opea/embedding-multimodal-clip/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..91cd98f0 --- /dev/null +++ b/AI/opea/embedding-multimodal-clip/1.0/24.03-lts/Dockerfile @@ -0,0 +1,38 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ARG ARCH="cpu" + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + python-pip python \ + shadow-utils \ + mesa-libGL \ + jemalloc-devel \ + git \ + vim + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip && \ + if [ ${ARCH} = "cpu" ]; then pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \ + pip install --no-cache-dir -r /home/user/comps/embeddings/multimodal_clip/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/embeddings/multimodal_clip + +ENTRYPOINT ["python", "embedding_multimodal.py"] diff --git a/AI/opea/embedding-multimodal-clip/meta.yml b/AI/opea/embedding-multimodal-clip/meta.yml new file mode 100644 index 00000000..1df7c49a --- /dev/null +++ b/AI/opea/embedding-multimodal-clip/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: embedding-multimodal-clip/1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/lvm-video-llama/1.0/24.03-lts/Dockerfile b/AI/opea/lvm-video-llama/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..ccc5b927 --- /dev/null +++ b/AI/opea/lvm-video-llama/1.0/24.03-lts/Dockerfile @@ -0,0 +1,30 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +# Set environment variables +ENV LANG=en_US.UTF-8 + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + python-pip python \ + shadow-utils \ + git + +WORKDIR /home + +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/comps/lvms/video-llama/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home + +WORKDIR /home/comps/lvms/video-llama + +ENTRYPOINT ["python", "lvm.py"] \ No newline at end of file diff --git a/AI/opea/lvm-video-llama/meta.yml b/AI/opea/lvm-video-llama/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/lvm-video-llama/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/reranking-videoqna/1.0/24.03-lts/Dockerfile b/AI/opea/reranking-videoqna/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..96627b0d --- /dev/null +++ b/AI/opea/reranking-videoqna/1.0/24.03-lts/Dockerfile @@ -0,0 +1,36 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ENV LANG=C.UTF-8 + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + python-pip python \ + shadow-utils \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user + +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/comps/reranks/videoqna/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/reranks/videoqna + +ENTRYPOINT ["python", "local_reranking.py"] \ No newline at end of file diff --git a/AI/opea/reranking-videoqna/meta.yml b/AI/opea/reranking-videoqna/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/reranking-videoqna/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/retriever-vdms/1.0/24.03-lts/Dockerfile b/AI/opea/retriever-vdms/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..413cf322 --- /dev/null +++ b/AI/opea/retriever-vdms/1.0/24.03-lts/Dockerfile @@ -0,0 +1,55 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ARG ARCH="cpu" + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + python-pip python \ + shadow-utils \ + mesa-libGL \ + jemalloc-devel \ + iputils \ + vim \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user + +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip && \ + if [ ${ARCH} = "cpu" ]; then pip install torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \ + pip install --no-cache-dir -r /home/user/comps/retrievers/vdms/langchain/requirements.txt + +RUN pip install -U langchain +RUN pip install -U langchain-community + +RUN pip install --upgrade huggingface-hub + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +ENV HUGGINGFACEHUB_API_TOKEN=dummy + +ENV USECLIP 0 + +ENV no_proxy=localhost,127.0.0.1 + +ENV http_proxy="" +ENV https_proxy="" + +WORKDIR /home/user/comps/retrievers/vdms/langchain + +ENTRYPOINT ["python", "retriever_vdms.py"] diff --git a/AI/opea/retriever-vdms/meta.yml b/AI/opea/retriever-vdms/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/retriever-vdms/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/video-llama-lvm-server/1.0/24.03-lts/Dockerfile b/AI/opea/video-llama-lvm-server/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..631e9832 --- /dev/null +++ b/AI/opea/video-llama-lvm-server/1.0/24.03-lts/Dockerfile @@ -0,0 +1,55 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/python:3.10.17-oe2403lts + +ENV LANG=C.UTF-8 + +RUN yum update -y && \ + yum install -y \ + --setopt=install_weak_deps=False \ + shadow-utils \ + git \ + wget + +RUN mkdir -p /tmp/git-lfs/git-lfs && \ + wget -O /tmp/git-lfs/git-lfs-3.6.1.tar.gz https://github.com/git-lfs/git-lfs/releases/download/v3.6.1/git-lfs-linux-amd64-v3.6.1.tar.gz && \ + tar xvf /tmp/git-lfs/git-lfs-3.6.1.tar.gz -C /tmp/git-lfs/git-lfs --strip-components 1 && \ + ./tmp/git-lfs/git-lfs/install.sh && \ + rm -rf /tmp/git-lfs + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user:user /home/user/ + +RUN mkdir /home/user/model && chown user:user -R /home/user/model + +USER user + +ENV PATH=/usr/local/python3.10/bin:$PATH + +WORKDIR /home/user +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +RUN cp -r GenAIComps/comps . && \ + rm -rf GenAIComps + +WORKDIR /home/user/comps/lvms/video-llama/dependency + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/comps/lvms/video-llama/dependency/requirements.txt + +ARG VIDEO_LLAMA_REPO=https://github.com/DAMO-NLP-SG/Video-LLaMA.git +ARG VIDEO_LLAMA_COMMIT=0adb19e +RUN tar -xvf video-llama.patch.tar && \ + git clone ${VIDEO_LLAMA_REPO} Video-LLaMA && \ + cd Video-LLaMA && git checkout ${VIDEO_LLAMA_COMMIT} && \ + git apply --whitespace=fix ../video-llama.patch && \ + mv video_llama ../ && \ + cd ../ && rm -rf Video-LLaMA + + +ENV PYTHONPATH=/home/user + + +ENTRYPOINT ["bash", "start.sh"] \ No newline at end of file diff --git a/AI/opea/video-llama-lvm-server/meta.yml b/AI/opea/video-llama-lvm-server/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/video-llama-lvm-server/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/videoqna-ui/1.0/24.03-lts/Dockerfile b/AI/opea/videoqna-ui/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..3b486b8f --- /dev/null +++ b/AI/opea/videoqna-ui/1.0/24.03-lts/Dockerfile @@ -0,0 +1,26 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +WORKDIR /app + +RUN yum update -y && yum install -y \ + --setopt=install_weak_deps=False \ + python python-pip \ + git \ + curl + +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir streamlit + +RUN git clone https://github.com/opea-project/GenAIExamples.git && cd GenAIExamples && git checkout v1.0 + +RUN cp GenAIExamples/VideoQnA/ui/ui.py /app/ui.py && \ + rm -rf GenAIExamples + +EXPOSE 5173 + +HEALTHCHECK CMD curl --fail http://localhost:5173/_stcore/health + +ENTRYPOINT ["streamlit", "run", "ui.py", "--server.port=5173", "--server.address=0.0.0.0"] diff --git a/AI/opea/videoqna-ui/meta.yml b/AI/opea/videoqna-ui/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/videoqna-ui/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/videoqna/1.0/24.03-lts/Dockerfile b/AI/opea/videoqna/1.0/24.03-lts/Dockerfile new file mode 100644 index 00000000..f7e7b116 --- /dev/null +++ b/AI/opea/videoqna/1.0/24.03-lts/Dockerfile @@ -0,0 +1,38 @@ + + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +RUN yum update -y && yum install -y \ + --setopt=install_weak_deps=False \ + mesa-libGL \ + jemalloc-devel \ + python python-pip \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +WORKDIR /home/user/ + +RUN git clone https://github.com/opea-project/GenAIComps.git && cd GenAIComps && git checkout v1.0 + +WORKDIR /home/user/GenAIComps +RUN pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir -r /home/user/GenAIComps/requirements.txt + +WORKDIR /home/user/ +RUN git clone https://github.com/opea-project/GenAIExamples.git && cd GenAIExamples && git checkout v1.0 +RUN cp GenAIExamples/VideoQnA/videoqna.py . +RUN rm -rf GenAIExamples + +ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps + +USER user + +WORKDIR /home/user + +ENTRYPOINT ["python", "videoqna.py"] diff --git a/AI/opea/videoqna/doc/compose.yml b/AI/opea/videoqna/doc/compose.yml new file mode 100644 index 00000000..1fed368f --- /dev/null +++ b/AI/opea/videoqna/doc/compose.yml @@ -0,0 +1,151 @@ + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +services: + vdms-vector-db: + image: intellabs/vdms:v2.8.0 + container_name: vdms-vector-db + ports: + - "8001:55555" + dataprep: + image: openeuler/dataprep-multimodal-vdms:latest + container_name: dataprep-vdms-server + depends_on: + - vdms-vector-db + ports: + - "6007:6007" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + VDMS_HOST: ${VDMS_HOST} + VDMS_PORT: ${VDMS_PORT} + INDEX_NAME: ${INDEX_NAME} + entrypoint: sh -c 'sleep 15 && python ingest_videos.py' + volumes: + - /home/$USER/.cache/clip:/home/user/.cache/clip + - /home/$USER/.cache/huggingface/hub:/home/user/.cache/huggingface/hub + embedding: + image: openeuler/embedding-multimodal-clip:latest + container_name: embedding-multimodal-server + ports: + - "6000:6000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + volumes: + - /home/$USER/.cache/huggingface/hub:/home/user/.cache/huggingface/hub + restart: unless-stopped + retriever: + image: openeuler/retriever-vdms:latest + container_name: retriever-vdms-server + depends_on: + - vdms-vector-db + ports: + - "7000:7000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + INDEX_NAME: ${INDEX_NAME} + VDMS_HOST: ${VDMS_HOST} + VDMS_PORT: ${VDMS_PORT} + USECLIP: ${USECLIP} + entrypoint: sh -c 'sleep 30 && python retriever_vdms.py' + restart: unless-stopped + volumes: + - /home/$USER/.cache/huggingface/hub:/home/user/.cache/huggingface/hub + reranking: + image: openeuler/reranking-videoqna:latest + container_name: reranking-videoqna-server + ports: + - "8000:8000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + CHUNK_DURATION: ${CHUNK_DURATION} + FILE_SERVER_ENDPOINT: ${DATAPREP_GET_FILE_ENDPOINT} + DATAPREP_GET_VIDEO_LIST_ENDPOINT: ${DATAPREP_GET_VIDEO_LIST_ENDPOINT} + restart: unless-stopped + lvm-video-llama: + image: openeuler/video-llama-lvm-server:latest + container_name: video-llama-lvm-server + ports: + - "9009:9009" + ipc: host + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + llm_download: ${LLM_DOWNLOAD} + volumes: + - "/home/$USER/.cache:/home/user/.cache" + - video-llama-model:/home/user/model + restart: unless-stopped + lvm: + image: openeuler/lvm-video-llama:latest + container_name: lvm-video-llama + ports: + - "9000:9000" + ipc: host + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + LVM_ENDPOINT: ${LVM_ENDPOINT} + restart: unless-stopped + depends_on: + - lvm-video-llama + videoqna-xeon-backend-server: + image: openeuler/videoqna:latest + container_name: videoqna-xeon-backend-server + depends_on: + - vdms-vector-db + - dataprep + - embedding + - retriever + - reranking + - lvm-video-llama + - lvm + ports: + - "8888:8888" + entrypoint: sh -c 'sleep 45 && python videoqna.py' + environment: + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + no_proxy: ${no_proxy} + MEGA_SERVICE_HOST_IP: ${MEGA_SERVICE_HOST_IP} + EMBEDDING_SERVICE_HOST_IP: ${EMBEDDING_SERVICE_HOST_IP} + RETRIEVER_SERVICE_HOST_IP: ${RETRIEVER_SERVICE_HOST_IP} + RERANK_SERVICE_HOST_IP: ${RERANK_SERVICE_HOST_IP} + LVM_SERVICE_HOST_IP: ${LVM_SERVICE_HOST_IP} + ipc: host + restart: always + videoqna-xeon-ui-server: + image: openeuler/videoqna-ui:latest + container_name: videoqna-xeon-ui-server + depends_on: + - videoqna-xeon-backend-server + ports: + - "5173:5173" + environment: + https_proxy: ${https_proxy} + http_proxy: ${http_proxy} + no_proxy: ${no_proxy} + BACKEND_SERVICE_ENDPOINT: ${BACKEND_SERVICE_ENDPOINT} + BACKEND_HEALTH_CHECK_ENDPOINT: ${BACKEND_HEALTH_CHECK_ENDPOINT} + ipc: host + restart: always +volumes: + video-llama-model: + external: true +networks: + default: + driver: bridge \ No newline at end of file diff --git a/AI/opea/videoqna/doc/set_env.sh b/AI/opea/videoqna/doc/set_env.sh new file mode 100644 index 00000000..ec95fc62 --- /dev/null +++ b/AI/opea/videoqna/doc/set_env.sh @@ -0,0 +1,21 @@ +export no_proxy=${your_no_proxy} +export http_proxy=${your_http_proxy} +export https_proxy=${your_http_proxy} +export MEGA_SERVICE_HOST_IP=${host_ip} +export EMBEDDING_SERVICE_HOST_IP=${host_ip} +export RETRIEVER_SERVICE_HOST_IP=${host_ip} +export RERANK_SERVICE_HOST_IP=${host_ip} +export LVM_SERVICE_HOST_IP=${host_ip} + +export LVM_ENDPOINT="http://${host_ip}:9009" +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/videoqna" +export BACKEND_HEALTH_CHECK_ENDPOINT="http://${host_ip}:8888/v1/health_check" +export DATAPREP_SERVICE_ENDPOINT="http://${host_ip}:6007/v1/dataprep" +export DATAPREP_GET_FILE_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_file" +export DATAPREP_GET_VIDEO_LIST_ENDPOINT="http://${host_ip}:6007/v1/dataprep/get_videos" + +export VDMS_HOST=${host_ip} +export VDMS_PORT=8001 +export INDEX_NAME="mega-videoqna" +export LLM_DOWNLOAD="True" +export USECLIP=1 \ No newline at end of file diff --git a/AI/opea/videoqna/meta.yml b/AI/opea/videoqna/meta.yml new file mode 100644 index 00000000..4cae4dd4 --- /dev/null +++ b/AI/opea/videoqna/meta.yml @@ -0,0 +1,3 @@ +1.0-oe2403lts: + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file -- Gitee From 3386d609ffc387eafced3e87f9a75af543484f70 Mon Sep 17 00:00:00 2001 From: zhihang Date: Sun, 27 Apr 2025 07:55:53 +0000 Subject: [PATCH 2/3] add README.md for videoqna --- AI/opea/videoqna/README.md | 101 +++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 AI/opea/videoqna/README.md diff --git a/AI/opea/videoqna/README.md b/AI/opea/videoqna/README.md new file mode 100644 index 00000000..4dd9404b --- /dev/null +++ b/AI/opea/videoqna/README.md @@ -0,0 +1,101 @@ +# Quick reference + +- The offical OPEA docker images + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# OPEA | openEuler + +Current OPEA docker images are built on the [openEuler](https://repo.openeuler.org/)⁠. This repository is free to use and exempted from per-user rate limits. + +OPEA is an open platform project that lets you create open, multi-provider, robust, and composable GenAI solutions that harness the best innovation across the ecosystem. + +The OPEA platform includes: + +- Detailed framework of composable building blocks for state-of-the-art generative AI systems including LLMs, data stores, and prompt engines + +- Architectural blueprints of retrieval-augmented generative AI component stack structure and end-to-end workflows + +- A four-step assessment for grading generative AI systems around performance, features, trustworthiness, and enterprise-grade readiness + +Read more about OPEA at [opea.dev](https://opea.dev/) and explore the OPEA technical documentation at [opea-project.github.io](https://opea-project.github.io/) + +# Supported tags and respective Dockerfile links + +The tag of each VideoQnA docker image is consist of the version of VideoQnA and the version of basic image. The details are as follows + +| Tags | Currently | Architectures| +|--|--|--| +|[1.0-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/videoqna/1.0/24.03-lts/Dockerfile)| VideoQnA 1.0 on openEuler 24.03-LTS | amd64 | + +# Usage + +VideoQnA is a pipeline that retrieves video based on provided user prompt. It uses only the video embeddings to perform vector similarity search in Intel's VDMS vector database and performs all operations on Intel Xeon CPU. The pipeline supports long form videos and time-based search. + +The VideoQnA service can be effortlessly deployed on Intel Gaudi2, Intel Xeon Scalable Processors and Nvidia GPU. + +Quick Start Deployment Steps: + +1. Set up the environment variables. +2. Run Docker Compose. +3. Consume the VideoQnA Service. + +### Quick Start: 1.Setup Environment Variable + +To set up environment variables for deploying VideoQnA services, follow these steps: + +1. Set the required environment variables: + + ```bash + # Example: host_ip="192.168.1.1" + export host_ip="External_Public_IP" + # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1" + export no_proxy="Your_No_Proxy" + export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" + ``` + +2. If you are in a proxy environment, also set the proxy-related environment variables: + + ```bash + export http_proxy="Your_HTTP_Proxy" + export https_proxy="Your_HTTPs_Proxy" + ``` + +3. Set up other environment variables: + + > Get `set_env.sh` here: [set_env.sh](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/videoqna/doc/set_env.sh) + + ```bash + # on Xeon + source set_env.sh + ``` + +### Quick Start: 2.Run Docker Compose + +Select the compose.yaml file that matches your hardware. + +CPU example: + +> Get `compose.yml` here: [compose.yml](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/videoqna/doc/compose.yml) + +```bash +docker compose -f compose.yml up -d +``` + +It will automatically download the docker image on `docker hub`: + +```bash +docker pull openeuler/videoqna:latest +docker pull openeuler/videoqna-ui:latest +``` + +### QuickStart: 3.Consume the VideoQnA Service + +```bash +curl http://${host_ip}:8888/v1/videoqna -H "Content-Type: application/json" -d '{ + "messages": "What is the man doing?", + "stream": "True" + }' +``` \ No newline at end of file -- Gitee From 90452169866449c3b053241707aa1d2ae724cc75 Mon Sep 17 00:00:00 2001 From: zhihang Date: Mon, 28 Apr 2025 01:41:38 +0000 Subject: [PATCH 3/3] fix meta.yaml --- AI/opea/embedding-multimodal-clip/meta.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/AI/opea/embedding-multimodal-clip/meta.yml b/AI/opea/embedding-multimodal-clip/meta.yml index 1df7c49a..55f9e33c 100644 --- a/AI/opea/embedding-multimodal-clip/meta.yml +++ b/AI/opea/embedding-multimodal-clip/meta.yml @@ -1,3 +1,3 @@ 1.0-oe2403lts: - path: embedding-multimodal-clip/1.0/24.03-lts/Dockerfile - arch: x86_64 \ No newline at end of file + path: 1.0/24.03-lts/Dockerfile + arch: x86_64 -- Gitee