From 1d2da0425530b90f1d226aa38bf45376fabc60a6 Mon Sep 17 00:00:00 2001 From: zhihang Date: Tue, 27 May 2025 06:16:40 +0000 Subject: [PATCH] add Translation v1.2 --- .../translation-ui/1.2/24.03-lts/Dockerfile | 33 +++++++ AI/opea/translation-ui/meta.yml | 6 +- AI/opea/translation/1.2/24.03-lts/Dockerfile | 34 +++++++ AI/opea/translation/README.md | 98 +++++++++++++++++++ AI/opea/translation/doc/compose.yml | 91 +++++++++++++++++ AI/opea/translation/doc/set_env.sh | 16 +++ AI/opea/translation/meta.yml | 6 +- 7 files changed, 282 insertions(+), 2 deletions(-) create mode 100644 AI/opea/translation-ui/1.2/24.03-lts/Dockerfile create mode 100644 AI/opea/translation/1.2/24.03-lts/Dockerfile create mode 100644 AI/opea/translation/README.md create mode 100644 AI/opea/translation/doc/compose.yml create mode 100644 AI/opea/translation/doc/set_env.sh diff --git a/AI/opea/translation-ui/1.2/24.03-lts/Dockerfile b/AI/opea/translation-ui/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000..d26baec --- /dev/null +++ b/AI/opea/translation-ui/1.2/24.03-lts/Dockerfile @@ -0,0 +1,33 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM openeuler/openeuler:24.03-lts + +# Update package manager and install Git +RUN yum update -y && \ + yum install -y \ + git \ + npm + +WORKDIR /home/user + +# Copy the front-end code repository +RUN git clone -b v1.2 https://github.com/opea-project/GenAIExamples.git + +RUN cp -r GenAIExamples/Translation/ui/svelte /home/user/svelte + +# Set the working directory +WORKDIR /home/user/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] \ No newline at end of file diff --git a/AI/opea/translation-ui/meta.yml b/AI/opea/translation-ui/meta.yml index 4cae4dd..0343efe 100644 --- a/AI/opea/translation-ui/meta.yml +++ b/AI/opea/translation-ui/meta.yml @@ -1,3 +1,7 @@ 1.0-oe2403lts: path: 1.0/24.03-lts/Dockerfile - arch: x86_64 \ No newline at end of file + arch: x86_64 + +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 diff --git a/AI/opea/translation/1.2/24.03-lts/Dockerfile b/AI/opea/translation/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000..38b072e --- /dev/null +++ b/AI/opea/translation/1.2/24.03-lts/Dockerfile @@ -0,0 +1,34 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Stage 1: base setup used by other stages +FROM openeuler/openeuler:24.03-lts + +# get security updates +RUN yum update -y && \ + yum install -y \ + python python-pip \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +WORKDIR /home/user/GenAIComps +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + pip install --no-cache-dir -r /home/user/GenAIComps/requirements.txt +WORKDIR /home/user + +ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps + +USER user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIExamples.git + +RUN cp -r GenAIExamples/Translation/translation.py /home/user/translation.py + +ENTRYPOINT ["python", "translation.py"] \ No newline at end of file diff --git a/AI/opea/translation/README.md b/AI/opea/translation/README.md new file mode 100644 index 0000000..4558c71 --- /dev/null +++ b/AI/opea/translation/README.md @@ -0,0 +1,98 @@ +# Quick reference + +- The offical OPEA docker images + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# OPEA | openEuler + +Current OPEA docker images are built on the [openEuler](https://repo.openeuler.org/)⁠. This repository is free to use and exempted from per-user rate limits. + +OPEA is an open platform project that lets you create open, multi-provider, robust, and composable GenAI solutions that harness the best innovation across the ecosystem. + +The OPEA platform includes: + +- Detailed framework of composable building blocks for state-of-the-art generative AI systems including LLMs, data stores, and prompt engines + +- Architectural blueprints of retrieval-augmented generative AI component stack structure and end-to-end workflows + +- A four-step assessment for grading generative AI systems around performance, features, trustworthiness, and enterprise-grade readiness + +Read more about OPEA at [opea.dev](https://opea.dev/) and explore the OPEA technical documentation at [opea-project.github.io](https://opea-project.github.io/) + +# Supported tags and respective Dockerfile links + +The tag of each Translation docker image is consist of the version of Translation and the version of basic image. The details are as follows + +| Tags | Currently | Architectures| +|--|--|--| +|[1.0-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/translation/1.0/24.03-lts/Dockerfile)| Translation 1.0 on openEuler 24.03-LTS | amd64 | +|[1.2-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/translation/1.2/24.03-lts/Dockerfile)| Translation 1.2 on openEuler 24.03-LTS | amd64 | + +# Usage + +The Translation service can be effortlessly deployed on Intel Gaudi2, Intel Xeon Scalable Processors and Nvidia GPU. + +Quick Start Deployment Steps: + +1. Set up the environment variables. +2. Run Docker Compose. +3. Consume the Translation Service. + +### Quick Start: 1.Setup Environment Variable + +To set up environment variables for deploying Translation services, follow these steps: + +1. Set the required environment variables: + + ```bash + # Example: host_ip="192.168.1.1" + export host_ip="External_Public_IP" + # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1" + export no_proxy="Your_No_Proxy" + export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" + ``` + +2. If you are in a proxy environment, also set the proxy-related environment variables: + + ```bash + export http_proxy="Your_HTTP_Proxy" + export https_proxy="Your_HTTPs_Proxy" + ``` + +3. Set up other environment variables: + + > Get `set_env.sh` here: [set_env.sh](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/translation/doc/set_env.sh) + + ```bash + # on Xeon + source set_env.sh + ``` + +### Quick Start: 2.Run Docker Compose + +Select the compose.yaml file that matches your hardware. + +CPU example: + +> Get `compose.yml` here: [compose.yml](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/translation/doc/compose.yml) + +```bash +docker compose -f compose.yml up -d +``` + +It will automatically download the docker image on `docker hub`: + +```bash +docker pull openeuler/translation:latest +docker pull openeuler/translation-ui:latest +``` + +### QuickStart: 3.Consume the Translation Service + +```bash +curl http://${host_ip}:8888/v1/translation -H "Content-Type: application/json" -d '{ + "language_from": "Chinese","language_to": "English","source_language": "我爱机器翻译。"}' +``` \ No newline at end of file diff --git a/AI/opea/translation/doc/compose.yml b/AI/opea/translation/doc/compose.yml new file mode 100644 index 0000000..cdae091 --- /dev/null +++ b/AI/opea/translation/doc/compose.yml @@ -0,0 +1,91 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + tgi-service: + image: openeuler/text-generation-inference-cpu:2.4.0-oe2403lts + container_name: tgi-service + ports: + - "8008:80" + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + host_ip: ${host_ip} + volumes: + - "./data:/data" + shm_size: 1g + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 + llm: + image: openeuler/llm-textgen:1.2-oe2403lts + container_name: llm-textgen-server + depends_on: + - tgi-service + ports: + - "9000:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + HF_HUB_DISABLE_PROGRESS_BARS: 1 + HF_HUB_ENABLE_HF_TRANSFER: 0 + restart: unless-stopped + translation-xeon-backend-server: + image: openeuler/translation:1.2-oe2403lts + container_name: translation-xeon-backend-server + depends_on: + - tgi-service + - llm + ports: + - "8888:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + ipc: host + restart: always + translation-xeon-ui-server: + image: openeuler/translation-ui:1.2-oe2403lts + container_name: translation-xeon-ui-server + depends_on: + - translation-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BASE_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + translation-xeon-nginx-server: + image: openeuler/nginx:latest + container_name: translation-xeon-nginx-server + depends_on: + - translation-xeon-backend-server + - translation-xeon-ui-server + ports: + - "${NGINX_PORT:-80}:80" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - FRONTEND_SERVICE_IP=${FRONTEND_SERVICE_IP} + - FRONTEND_SERVICE_PORT=${FRONTEND_SERVICE_PORT} + - BACKEND_SERVICE_NAME=${BACKEND_SERVICE_NAME} + - BACKEND_SERVICE_IP=${BACKEND_SERVICE_IP} + - BACKEND_SERVICE_PORT=${BACKEND_SERVICE_PORT} + ipc: host + restart: always +networks: + default: + driver: bridge \ No newline at end of file diff --git a/AI/opea/translation/doc/set_env.sh b/AI/opea/translation/doc/set_env.sh new file mode 100644 index 0000000..91dc83f --- /dev/null +++ b/AI/opea/translation/doc/set_env.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +export LLM_MODEL_ID="Intel/neural-chat-7b-v3-3" +export TGI_LLM_ENDPOINT="http://${host_ip}:8008" +export MEGA_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} +export BACKEND_SERVICE_ENDPOINT="http://${host_ip}:8888/v1/translation" +export NGINX_PORT=80 +export FRONTEND_SERVICE_IP=${host_ip} +export FRONTEND_SERVICE_PORT=5173 +export BACKEND_SERVICE_NAME=translation +export BACKEND_SERVICE_IP=${host_ip} +export BACKEND_SERVICE_PORT=8888 diff --git a/AI/opea/translation/meta.yml b/AI/opea/translation/meta.yml index 4cae4dd..0343efe 100644 --- a/AI/opea/translation/meta.yml +++ b/AI/opea/translation/meta.yml @@ -1,3 +1,7 @@ 1.0-oe2403lts: path: 1.0/24.03-lts/Dockerfile - arch: x86_64 \ No newline at end of file + arch: x86_64 + +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 -- Gitee