diff --git a/AI/image-list.yml b/AI/image-list.yml index 0b3cf26465c8108866d556563dd79af562d7ba85..6e214bb7063beafbc998f6c65bb1c1349290c5b2 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -67,4 +67,5 @@ images: retriever-vdms: opea/retriever-vdms text2sql: opea/text2sql text2sql-ui: opea/text2sql-ui - + text2image: opea/text2image + text2image-ui: opea/text2image-ui diff --git a/AI/opea/codegen/README.md b/AI/opea/codegen/README.md index ac8b8d64878f10e53469a55df31dda5bf1d75264..f0b2d8bfd6c0bdb80f39a5f3e2ea7904cb817e2f 100644 --- a/AI/opea/codegen/README.md +++ b/AI/opea/codegen/README.md @@ -114,6 +114,6 @@ docker pull openeuler/codegen-ui:latest 2. Access via frontend - To access the frontend, open the following URL in your browser: http://{host_ip}:5173. + To access the frontend, open the following URL in your browser: `http://{host_ip}:5173`. By default, the UI runs on port 5173 internally. \ No newline at end of file diff --git a/AI/opea/text2image-ui/1.2/24.03-lts/Dockerfile b/AI/opea/text2image-ui/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..73cda181af5d4b0547997b6b0af9f92dd1fcb48c --- /dev/null +++ b/AI/opea/text2image-ui/1.2/24.03-lts/Dockerfile @@ -0,0 +1,30 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM openeuler/openeuler:24.03-lts + +# Update package manager and install Git +RUN yum update -y && \ + yum install -y \ + npm \ + git + +WORKDIR /home/user + +# Copy the front-end code repository +RUN git clone -b v1.2 https://github.com/opea-project/GenAIExamples.git + +WORKDIR /home/user/GenAIExamples/Text2Image/ui/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] \ No newline at end of file diff --git a/AI/opea/text2image-ui/meta.yml b/AI/opea/text2image-ui/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..ee4b49e9afa63ff59b2371473ff499d093b30722 --- /dev/null +++ b/AI/opea/text2image-ui/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file diff --git a/AI/opea/text2image/1.2/24.03-lts/Dockerfile b/AI/opea/text2image/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..64f596de2e545d5c4c1769ed7b342b410751e0ad --- /dev/null +++ b/AI/opea/text2image/1.2/24.03-lts/Dockerfile @@ -0,0 +1,34 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +# Set environment variables +ENV LANG=en_US.UTF-8 + +ARG ARCH="cpu" + +RUN yum update -y && \ + yum install -y \ + python python-pip \ + git + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +RUN cp -r GenAIComps/comps /home/user/comps && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + if [ ${ARCH} = "cpu" ]; then \ + pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/cpu; fi && \ + pip install --no-cache-dir -r /home/user/comps/text2image/src/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/text2image/src + +RUN echo python opea_text2image_microservice.py --bf16 >> run.sh + +CMD bash run.sh \ No newline at end of file diff --git a/AI/opea/text2image/README.md b/AI/opea/text2image/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e799de7fc19bddfd0e5bd0c988decc3f5a8f4808 --- /dev/null +++ b/AI/opea/text2image/README.md @@ -0,0 +1,90 @@ +# Quick reference + +- The offical OPEA docker images + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# OPEA | openEuler + +Current OPEA docker images are built on the [openEuler](https://repo.openeuler.org/)⁠. This repository is free to use and exempted from per-user rate limits. + +OPEA is an open platform project that lets you create open, multi-provider, robust, and composable GenAI solutions that harness the best innovation across the ecosystem. + +The OPEA platform includes: + +- Detailed framework of composable building blocks for state-of-the-art generative AI systems including LLMs, data stores, and prompt engines + +- Architectural blueprints of retrieval-augmented generative AI component stack structure and end-to-end workflows + +- A four-step assessment for grading generative AI systems around performance, features, trustworthiness, and enterprise-grade readiness + +Read more about OPEA at [opea.dev](https://opea.dev/) and explore the OPEA technical documentation at [opea-project.github.io](https://opea-project.github.io/) + +# Supported tags and respective Dockerfile links + +The tag of each Text2Image docker image is consist of the version of Text2Image and the version of basic image. The details are as follows + +| Tags | Currently | Architectures| +|--|--|--| +|[1.2-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/text2image/1.2/24.03-lts/Dockerfile)| Text2Image 1.2 on openEuler 24.03-LTS | amd64 | + +# Usage + +The Text2Image service can be effortlessly deployed on either Intel Gaudi2 or Intel Xeon Scalable Processor. + +Currently we support two ways of deploying Text2Image services with docker compose: + +1. Start services using the docker image on `docker hub`: + + ```bash + docker pull openeuler/text2image:latest + docker pull openeuler/text2image-ui:latest + ``` + +2. Start services using the docker images built from source. + +### Quick Start: 1.Setup Environment Variable + +To set up environment variables for deploying Text2Image services, follow these steps: + +1. Set the required environment variables: + + ```bash + # Example: host_ip="192.168.1.1" + export host_ip="External_Public_IP" + # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1" + export no_proxy="Your_No_Proxy" + export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" + ``` + +2. If you are in a proxy environment, also set the proxy-related environment variables: + + ```bash + export http_proxy="Your_HTTP_Proxy" + export https_proxy="Your_HTTPs_Proxy" + ``` + +### Quick Start: 2.Run Docker Compose + +> Get `compose.yml` here: [compose.yml](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/text2image/doc/compose.yml) + +```bash +docker compose -f compose.yml up -d +``` + +It will automatically download the docker image on `docker hub`: + +```bash +docker pull openeuler/text2image:latest +docker pull openeuler/text2image-ui:latest +``` + +### QuickStart: 3.Consume the Text2Image Service + +1. Access via frontend + + To access the frontend, open the following URL in your browser: `http://{host_ip}:5173`. + + By default, the UI runs on port 5173 internally. \ No newline at end of file diff --git a/AI/opea/text2image/doc/compose.yml b/AI/opea/text2image/doc/compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..6124f15ddefe248caaf9194fdb9cfd24e9262fd2 --- /dev/null +++ b/AI/opea/text2image/doc/compose.yml @@ -0,0 +1,53 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + text2image-server: + image: openeuler/text2image:1.2-oe2403lts + container_name: text2image-server + ports: + - "9379:9379" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MODEL=${MODEL} + - HF_TOKEN=${HF_TOKEN} + ipc: host + restart: always + text2image-ui-server: + image: openeuler/text2image-ui:1.2-oe2403lts + container_name: text2image-ui-server + depends_on: + - text2image-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + ipc: host + restart: always + text2image-nginx-server: + image: openeuler/nginx:latest + container_name: text2image-nginx-server + depends_on: + - text2image-server + - text2image-ui-server + ports: + - "${NGINX_PORT:-80}:80" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - FRONTEND_SERVICE_IP=text2image-ui-server + - FRONTEND_SERVICE_PORT=5173 + - BACKEND_SERVICE_NAME=text2image + - BACKEND_SERVICE_IP=text2image-server + - BACKEND_SERVICE_PORT=9379 + ipc: host + restart: always + +networks: + default: + driver: bridge \ No newline at end of file diff --git a/AI/opea/text2image/meta.yml b/AI/opea/text2image/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..ee4b49e9afa63ff59b2371473ff499d093b30722 --- /dev/null +++ b/AI/opea/text2image/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 \ No newline at end of file