diff --git a/AI/vllm-cpu/0.8.5/22.03-lts-sp4/Dockerfile b/AI/vllm-cpu/0.8.5/22.03-lts-sp4/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..8662e9dafe5fb6b8d9c72a0ec15b43f7a0a97b17 --- /dev/null +++ b/AI/vllm-cpu/0.8.5/22.03-lts-sp4/Dockerfile @@ -0,0 +1,25 @@ +ARG BASE=openeuler/openeuler:22.03-lts-sp4 +ARG VERSION=v0.8.5 + +FROM ${BASE} + +ARG VERSION + +RUN yum update -y && \ + yum install -y make gcc-toolset-12-gcc gcc-toolset-12-gcc-c++ python python-pip python3-devel git vim wget net-tools numactl-devel && \ + rm -rf /var/cache/yum + +ENV PATH=/opt/openEuler/gcc-toolset-12/root/usr/bin/:$PATH +ENV LD_LIBRARY_PATH=/opt/openEuler/gcc-toolset-12/root/usr/lib64/:$LD_LIBRARY_PATH + +WORKDIR /workspace + +RUN git clone -b ${VERSION} https://github.com/vllm-project/vllm.git + +WORKDIR /workspace/vllm + +RUN pip install "cmake>=3.26" wheel packaging ninja "setuptools-scm>=8" numpy + +RUN VLLM_TARGET_DEVICE="cpu" pip install -e . --extra-index-url https://download.pytorch.org/whl/cpu + +ENTRYPOINT ["python", "-m", "vllm.entrypoints.openai.api_server"] \ No newline at end of file diff --git a/AI/vllm-cpu/0.9.0/22.03-lts-sp4/Dockerfile b/AI/vllm-cpu/0.9.0/22.03-lts-sp4/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..6f7b0be22561e61d328456a85245b6d04acdab1a --- /dev/null +++ b/AI/vllm-cpu/0.9.0/22.03-lts-sp4/Dockerfile @@ -0,0 +1,25 @@ +ARG BASE=openeuler/openeuler:22.03-lts-sp4 +ARG VERSION=v0.9.0 + +FROM ${BASE} + +ARG VERSION + +RUN yum update -y && \ + yum install -y make gcc-toolset-12-gcc gcc-toolset-12-gcc-c++ python python-pip python3-devel git vim wget net-tools numactl-devel && \ + rm -rf /var/cache/yum + +ENV PATH=/opt/openEuler/gcc-toolset-12/root/usr/bin/:$PATH +ENV LD_LIBRARY_PATH=/opt/openEuler/gcc-toolset-12/root/usr/lib64/:$LD_LIBRARY_PATH + +WORKDIR /workspace + +RUN git clone -b ${VERSION} https://github.com/vllm-project/vllm.git + +WORKDIR /workspace/vllm + +RUN pip install "cmake>=3.26" wheel packaging ninja "setuptools-scm>=8" numpy + +RUN VLLM_TARGET_DEVICE="cpu" pip install -e . --extra-index-url https://download.pytorch.org/whl/cpu + +ENTRYPOINT ["python", "-m", "vllm.entrypoints.openai.api_server"] \ No newline at end of file diff --git a/AI/vllm-cpu/meta.yml b/AI/vllm-cpu/meta.yml index 88d4440d7ee4ba8228cff3370c2bef0e167f83b2..d3532d2c0e2591c80b760da8e143ccb08bbd2e1d 100644 --- a/AI/vllm-cpu/meta.yml +++ b/AI/vllm-cpu/meta.yml @@ -16,4 +16,10 @@ 0.8.4-oe2203sp4: path: 0.8.4/22.03-lts-sp4/Dockerfile - arch: x86_64 \ No newline at end of file + arch: x86_64 + +0.8.5-oe2203sp4: + path: 0.8.5/22.03-lts-sp4/Dockerfile + +0.9.0-oe2203sp4: + path: 0.9.0/22.03-lts-sp4/Dockerfile \ No newline at end of file