diff --git a/AI/image-list.yml b/AI/image-list.yml index 0494e6de17bba4d54df04545d7b2c05c83c5e34f..0b3cf26465c8108866d556563dd79af562d7ba85 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -19,11 +19,13 @@ images: codegen: opea/codegen codetrans-ui: opea/codetrans-ui codetrans: opea/codetrans + dataprep: opea/dataprep dataprep-redis: opea/dataprep-redis doc-index-retriever: opea/doc-index-retriever docsum-ui: opea/docsum-ui docsum: opea/docsum embedding-tei: opea/embedding-tei + embedding: opea/embedding faqgen-ui: opea/faqgen-ui faqgen: opea/faqgen llm-faqgen: opea/llm-faqgen @@ -43,6 +45,7 @@ images: translation: opea/translation tts: opea/tts web-retriever-chroma: opea/web-retriever-chroma + web-retriever: opea/web-retriever whisper: opea/whisper finetuning: opea/finetuning pytorch: pytorch @@ -60,6 +63,7 @@ images: videoqna-ui: opea/videoqna-ui embedding-multimodal-clip: opea/embedding-multimodal-clip reranking-videoqna: opea/reranking-videoqna + reranking: opea/reranking retriever-vdms: opea/retriever-vdms text2sql: opea/text2sql text2sql-ui: opea/text2sql-ui diff --git a/AI/opea/dataprep-redis/meta.yml b/AI/opea/dataprep-redis/meta.yml index 0343efe1949d963f2ec255875c881c6161802a33..55f9e33c3012e71d7d18b43ba328c1fc4595cd5f 100644 --- a/AI/opea/dataprep-redis/meta.yml +++ b/AI/opea/dataprep-redis/meta.yml @@ -1,7 +1,3 @@ 1.0-oe2403lts: path: 1.0/24.03-lts/Dockerfile arch: x86_64 - -1.2-oe2403lts: - path: 1.2/24.03-lts/Dockerfile - arch: x86_64 diff --git a/AI/opea/dataprep-redis/1.2/24.03-lts/Dockerfile b/AI/opea/dataprep/1.2/24.03-lts/Dockerfile similarity index 100% rename from AI/opea/dataprep-redis/1.2/24.03-lts/Dockerfile rename to AI/opea/dataprep/1.2/24.03-lts/Dockerfile diff --git a/AI/opea/dataprep/meta.yml b/AI/opea/dataprep/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d855cefca21cfe09a49183738c014e3571c06a7 --- /dev/null +++ b/AI/opea/dataprep/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 diff --git a/AI/opea/embedding/1.2/24.03-lts/Dockerfile b/AI/opea/embedding/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8382b400b06dbc4794a2e325f10b8d591375aa20 --- /dev/null +++ b/AI/opea/embedding/1.2/24.03-lts/Dockerfile @@ -0,0 +1,27 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +RUN yum update -y && \ + yum install -y \ + python python-pip \ + git + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +RUN cp -r GenAIComps/comps /home/user/comps && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + pip install --no-cache-dir -r /home/user/comps/embeddings/src/requirements.txt + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/embeddings/src/ + +ENV MULTIMODAL_EMBEDDING="false" + +ENTRYPOINT ["sh", "-c", "python $( [ \"$MULTIMODAL_EMBEDDING\" = \"true\" ] && echo 'opea_multimodal_embedding_microservice.py' || echo 'opea_embedding_microservice.py')"] diff --git a/AI/opea/embedding/meta.yml b/AI/opea/embedding/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..1209cda97ec4fa1d54f9517e033a879788939c06 --- /dev/null +++ b/AI/opea/embedding/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 diff --git a/AI/opea/reranking/1.2/24.03-lts/Dockerfile b/AI/opea/reranking/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..079320ab059c04a181a00373c191f00c6ce6b65a --- /dev/null +++ b/AI/opea/reranking/1.2/24.03-lts/Dockerfile @@ -0,0 +1,53 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ENV LANG=C.UTF-8 + +ARG ARCH="cpu" +ARG SERVICE="all" + +RUN yum update -y && \ + yum install -y \ + python python-pip \ + git \ + mesa-libGL \ + jemalloc-devel + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +RUN cp -r GenAIComps/comps /home/user/comps && \ + rm -rf GenAIComps + +RUN if [ ${ARCH} = "cpu" ]; then \ + pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu; \ +fi && \ +if [ ${SERVICE} = "videoqna" ]; then \ + pip install --no-cache-dir --upgrade pip setuptools && \ + pip install --no-cache-dir -r /home/user/comps/rerankings/src/requirements_videoqna.txt; \ +elif [ ${SERVICE} = "all" ]; then \ + git clone https://github.com/IntelLabs/fastRAG.git /home/user/fastRAG && \ + cd /home/user/fastRAG && \ + pip install --no-cache-dir --upgrade pip && \ + pip install --no-cache-dir . && \ + pip install --no-cache-dir .[intel] && \ + pip install --no-cache-dir -r /home/user/comps/rerankings/src/requirements_videoqna.txt; \ +fi && \ +pip install --no-cache-dir --upgrade pip setuptools && \ +pip install --no-cache-dir -r /home/user/comps/rerankings/src/requirements.txt; + + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/rerankings/src + +ENTRYPOINT ["python", "opea_reranking_microservice.py"] \ No newline at end of file diff --git a/AI/opea/reranking/meta.yml b/AI/opea/reranking/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..1209cda97ec4fa1d54f9517e033a879788939c06 --- /dev/null +++ b/AI/opea/reranking/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64 diff --git a/AI/opea/searchqna-ui/1.2/24.03-lts/Dockerfile b/AI/opea/searchqna-ui/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..105066170a3e25fd1a6905e6514ba3508534fc76 --- /dev/null +++ b/AI/opea/searchqna-ui/1.2/24.03-lts/Dockerfile @@ -0,0 +1,31 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Use node 20.11.1 as the base image +FROM openeuler/openeuler:24.03-lts + +# Update package manager and install Git +RUN yum update -y && \ + yum install -y \ + npm \ + git + +WORKDIR /home/user + +# Copy the front-end code repository +RUN git clone -b v1.2 https://github.com/opea-project/GenAIExamples.git + +# Set the working directory +WORKDIR /home/user/GenAIExamples/SearchQnA/ui/svelte + +# Install front-end dependencies +RUN npm install + +# Build the front-end application +RUN npm run build + +# Expose the port of the front-end application +EXPOSE 5173 + +# Run the front-end application in preview mode +CMD ["npm", "run", "preview", "--", "--port", "5173", "--host", "0.0.0.0"] \ No newline at end of file diff --git a/AI/opea/searchqna-ui/meta.yml b/AI/opea/searchqna-ui/meta.yml index bc70b5d79ba44f3b59700a936723478f80cd740c..2d601655937b6361d0d6df3924ee7ddf389fb8bd 100644 --- a/AI/opea/searchqna-ui/meta.yml +++ b/AI/opea/searchqna-ui/meta.yml @@ -1,2 +1,5 @@ 1.0-oe2403lts: path: 1.0/24.03-lts/Dockerfile + +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile diff --git a/AI/opea/searchqna/1.2/24.03-lts/Dockerfile b/AI/opea/searchqna/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1eff7cfd405f20f86fccf5bbe9ec3f84fd8f8388 --- /dev/null +++ b/AI/opea/searchqna/1.2/24.03-lts/Dockerfile @@ -0,0 +1,36 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# Stage 1: base setup used by other stages +FROM openeuler/openeuler:24.03-lts + +# get security updates +RUN yum update -y && \ + yum install -y \ + python python-pip \ + git + + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +WORKDIR /home/user/GenAIComps +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + pip install --no-cache-dir -r /home/user/GenAIComps/requirements.txt +WORKDIR /home/user + +ENV PYTHONPATH=$PYTHONPATH:/home/user/GenAIComps + +USER user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIExamples.git + +RUN cp -r GenAIExamples/SearchQnA/searchqna.py /home/user/searchqna.py && \ + rm -rf GenAIExamples + +ENTRYPOINT ["python", "searchqna.py"] \ No newline at end of file diff --git a/AI/opea/searchqna/README.md b/AI/opea/searchqna/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e099ab0b3dc181be2f9b0cb10e9d649baf386538 --- /dev/null +++ b/AI/opea/searchqna/README.md @@ -0,0 +1,102 @@ +# Quick reference + +- The offical OPEA docker images + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# OPEA | openEuler + +Current OPEA docker images are built on the [openEuler](https://repo.openeuler.org/)⁠. This repository is free to use and exempted from per-user rate limits. + +OPEA is an open platform project that lets you create open, multi-provider, robust, and composable GenAI solutions that harness the best innovation across the ecosystem. + +The OPEA platform includes: + +- Detailed framework of composable building blocks for state-of-the-art generative AI systems including LLMs, data stores, and prompt engines + +- Architectural blueprints of retrieval-augmented generative AI component stack structure and end-to-end workflows + +- A four-step assessment for grading generative AI systems around performance, features, trustworthiness, and enterprise-grade readiness + +Read more about OPEA at [opea.dev](https://opea.dev/) and explore the OPEA technical documentation at [opea-project.github.io](https://opea-project.github.io/) + +# Supported tags and respective Dockerfile links + +The tag of each SearchQnA docker image is consist of the version of SearchQnA and the version of basic image. The details are as follows + +| Tags | Currently | Architectures| +|--|--|--| +|[1.0-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/searchqna/1.0/24.03-lts/Dockerfile)| SearchQnA 1.0 on openEuler 24.03-LTS | amd64 | +|[1.2-oe2403lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/opea/searchqna/1.2/24.03-lts/Dockerfile)| SearchQnA 1.2 on openEuler 24.03-LTS | amd64 | + +# Usage + +The SearchQnA service can be effortlessly deployed on Intel Gaudi2, Intel Xeon Scalable Processors and Nvidia GPU. + +Two types of SearchQnA pipeline are supported now: `SearchQnA with/without Rerank`. And the `SearchQnA without Rerank` pipeline (including Embedding, Retrieval, and LLM) is offered for Xeon customers who can not run rerank service on HPU yet require high performance and accuracy. + +Quick Start Deployment Steps: + +1. Set up the environment variables. +2. Run Docker Compose. +3. Consume the SearchQnA Service. + +### Quick Start: 1.Setup Environment Variable + +To set up environment variables for deploying SearchQnA services, follow these steps: + +1. Set the required environment variables: + + ```bash + # Example: host_ip="192.168.1.1" + export host_ip="External_Public_IP" + # Example: no_proxy="localhost, 127.0.0.1, 192.168.1.1" + export no_proxy="Your_No_Proxy" + export HUGGINGFACEHUB_API_TOKEN="Your_Huggingface_API_Token" + ``` + +2. If you are in a proxy environment, also set the proxy-related environment variables: + + ```bash + export http_proxy="Your_HTTP_Proxy" + export https_proxy="Your_HTTPs_Proxy" + ``` + +3. Set up other environment variables: + + > Get `set_env.sh` here: [set_env.sh](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/searchqna/doc/set_env.sh) + + ```bash + # on Xeon + source set_env.sh + ``` + +### Quick Start: 2.Run Docker Compose + +Select the compose.yaml file that matches your hardware. + +CPU example: + +> Get `compose.yml` here: [compose.yml](https://gitee.com/openeuler/openeuler-docker-images/tree/master/AI/opea/searchqna/doc/compose.yml) + +```bash +docker compose -f compose.yml up -d +``` + +It will automatically download the docker image on `docker hub`: + +```bash +docker pull openeuler/searchqna:latest +docker pull openeuler/searchqna-ui:latest +``` + +### QuickStart: 3.Consume the SearchQnA Service + +```bash +curl http://${host_ip}:3008/v1/searchqna -H "Content-Type: application/json" -d '{ + "messages": "What is the latest news? Give me also the source link.", + "stream": "True" + }' +``` \ No newline at end of file diff --git a/AI/opea/searchqna/doc/compose.yml b/AI/opea/searchqna/doc/compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..03d286db95813f814b4e60b827eebc8ee9eacd4d --- /dev/null +++ b/AI/opea/searchqna/doc/compose.yml @@ -0,0 +1,165 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +services: + tei-embedding-service: + image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts + entrypoint: /bin/sh -c "yum update -y && yum install -y curl && text-embeddings-router --json-output --model-id ${EMBEDDING_MODEL_ID} --auto-truncate" + container_name: tei-embedding-server + ports: + - "3001:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + host_ip: ${host_ip} + embedding: + image: openeuler/embedding:1.2-oe2403lts + container_name: embedding-server + depends_on: + - tei-embedding-service + ports: + - "3002:6000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + LOGFLAG: ${LOGFLAG} + restart: unless-stopped + web-retriever: + image: openeuler/web-retriever:1.2-oe2403lts + container_name: web-retriever-server + ports: + - "3003:7077" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + TEI_EMBEDDING_ENDPOINT: ${TEI_EMBEDDING_ENDPOINT} + GOOGLE_API_KEY: ${GOOGLE_API_KEY} + GOOGLE_CSE_ID: ${GOOGLE_CSE_ID} + LOGFLAG: ${LOGFLAG} + restart: unless-stopped + tei-reranking-service: + image: openeuler/text-embeddings-inference-cpu:1.5.0-oe2403lts + entrypoint: /bin/sh -c "yum update -y && yum install -y curl && text-embeddings-router --json-output --model-id ${RERANK_MODEL_ID} --auto-truncate" + container_name: tei-reranking-server + ports: + - "3004:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + host_ip: ${host_ip} + reranking: + image: openeuler/reranking:1.2-oe2403lts + container_name: reranking-tei-xeon-server + depends_on: + - tei-reranking-service + ports: + - "3005:8000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + RERANK_TYPE: ${RERANK_TYPE} + TEI_RERANKING_ENDPOINT: ${TEI_RERANKING_ENDPOINT} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + LOGFLAG: ${LOGFLAG} + restart: unless-stopped + tgi-service: + image: openeuler/text-generation-inference-cpu:2.4.0-oe2403lts + container_name: tgi-service + ports: + - "3006:80" + volumes: + - "./data:/data" + shm_size: 1g + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + HF_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + host_ip: ${host_ip} + healthcheck: + test: ["CMD-SHELL", "curl -f http://$host_ip:3006/health || exit 1"] + interval: 10s + timeout: 10s + retries: 100 + command: --model-id ${LLM_MODEL_ID} --cuda-graphs 0 + llm: + image: openeuler/llm-textgen:1.2-oe2403lts + container_name: llm-textgen-server + depends_on: + - tgi-service + ports: + - "3007:9000" + ipc: host + environment: + no_proxy: ${no_proxy} + http_proxy: ${http_proxy} + https_proxy: ${https_proxy} + LLM_ENDPOINT: ${TGI_LLM_ENDPOINT} + LLM_MODEL_ID: ${LLM_MODEL_ID} + HUGGINGFACEHUB_API_TOKEN: ${HUGGINGFACEHUB_API_TOKEN} + LOGFLAG: ${LOGFLAG} + restart: unless-stopped + searchqna-xeon-backend-server: + image: openeuler/searchqna:1.2-oe2403lts + container_name: searchqna-xeon-backend-server + depends_on: + - tei-embedding-service + - embedding + - web-retriever + - tei-reranking-service + - reranking + - tgi-service + - llm + ports: + - "3008:8888" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - MEGA_SERVICE_HOST_IP=${MEGA_SERVICE_HOST_IP} + - EMBEDDING_SERVICE_HOST_IP=${EMBEDDING_SERVICE_HOST_IP} + - WEB_RETRIEVER_SERVICE_HOST_IP=${WEB_RETRIEVER_SERVICE_HOST_IP} + - RERANK_SERVICE_HOST_IP=${RERANK_SERVICE_HOST_IP} + - LLM_SERVICE_HOST_IP=${LLM_SERVICE_HOST_IP} + - EMBEDDING_SERVICE_PORT=${EMBEDDING_SERVICE_PORT} + - WEB_RETRIEVER_SERVICE_PORT=${WEB_RETRIEVER_SERVICE_PORT} + - RERANK_SERVICE_PORT=${RERANK_SERVICE_PORT} + - LLM_SERVICE_PORT=${LLM_SERVICE_PORT} + - LOGFLAG=${LOGFLAG} + ipc: host + restart: always + searchqna-xeon-ui-server: + image: openeuler/searchqna-ui:1.2-oe2403lts + container_name: searchqna-xeon-ui-server + depends_on: + - searchqna-xeon-backend-server + ports: + - "5173:5173" + environment: + - no_proxy=${no_proxy} + - https_proxy=${https_proxy} + - http_proxy=${http_proxy} + - BACKEND_BASE_URL=${BACKEND_SERVICE_ENDPOINT} + ipc: host + restart: always + + +networks: + default: + driver: bridge diff --git a/AI/opea/searchqna/doc/set_env.sh b/AI/opea/searchqna/doc/set_env.sh new file mode 100644 index 0000000000000000000000000000000000000000..8e60e0ce083fdb1dc6cb08fa13c62e08e0b36495 --- /dev/null +++ b/AI/opea/searchqna/doc/set_env.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +export EMBEDDING_MODEL_ID=BAAI/bge-base-en-v1.5 +export TEI_EMBEDDING_ENDPOINT=http://${host_ip}:3001 +export RERANK_MODEL_ID=BAAI/bge-reranker-base +export TEI_RERANKING_ENDPOINT=http://${host_ip}:3004 + +export TGI_LLM_ENDPOINT=http://${host_ip}:3006 +export LLM_MODEL_ID=Intel/neural-chat-7b-v3-3 + +export MEGA_SERVICE_HOST_IP=${host_ip} +export EMBEDDING_SERVICE_HOST_IP=${host_ip} +export WEB_RETRIEVER_SERVICE_HOST_IP=${host_ip} +export RERANK_SERVICE_HOST_IP=${host_ip} +export LLM_SERVICE_HOST_IP=${host_ip} + +export EMBEDDING_SERVICE_PORT=3002 +export WEB_RETRIEVER_SERVICE_PORT=3003 +export RERANK_SERVICE_PORT=3005 +export LLM_SERVICE_PORT=3007 diff --git a/AI/opea/searchqna/meta.yml b/AI/opea/searchqna/meta.yml index bc70b5d79ba44f3b59700a936723478f80cd740c..2d601655937b6361d0d6df3924ee7ddf389fb8bd 100644 --- a/AI/opea/searchqna/meta.yml +++ b/AI/opea/searchqna/meta.yml @@ -1,2 +1,5 @@ 1.0-oe2403lts: path: 1.0/24.03-lts/Dockerfile + +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile diff --git a/AI/opea/web-retriever/1.2/24.03-lts/Dockerfile b/AI/opea/web-retriever/1.2/24.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6579e58ce065d451988c863b75e9463ff303f8d5 --- /dev/null +++ b/AI/opea/web-retriever/1.2/24.03-lts/Dockerfile @@ -0,0 +1,40 @@ +# Copyright (C) 2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +FROM openeuler/openeuler:24.03-lts + +ARG ARCH="cpu" # Set this to "cpu" or "gpu" + +RUN yum update -y && \ + yum install -y \ + python python-pip \ + mesa-libGL \ + jemalloc-devel \ + curl \ + git + +RUN useradd -m -s /bin/bash user && \ + mkdir -p /home/user && \ + chown -R user /home/user/ + +USER user + +WORKDIR /home/user + +RUN git clone -b v1.2 https://github.com/opea-project/GenAIComps.git + +RUN cp -r GenAIComps/comps /home/user/comps && \ + rm -rf GenAIComps + +RUN pip install --no-cache-dir --upgrade pip setuptools && \ + if [ ${ARCH} = "cpu" ]; then \ + pip install --no-cache-dir --extra-index-url https://download.pytorch.org/whl/cpu -r /home/user/comps/web_retrievers/src/requirements.txt; \ + else \ + pip install --no-cache-dir -r /home/user/comps/web_retrievers/src/requirements.txt; \ + fi + +ENV PYTHONPATH=$PYTHONPATH:/home/user + +WORKDIR /home/user/comps/web_retrievers/src + +ENTRYPOINT ["python", "opea_web_retrievers_microservice.py"] \ No newline at end of file diff --git a/AI/opea/web-retriever/meta.yml b/AI/opea/web-retriever/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..2d855cefca21cfe09a49183738c014e3571c06a7 --- /dev/null +++ b/AI/opea/web-retriever/meta.yml @@ -0,0 +1,3 @@ +1.2-oe2403lts: + path: 1.2/24.03-lts/Dockerfile + arch: x86_64