diff --git a/cuda/README.md b/cuda/README.md new file mode 100644 index 0000000000000000000000000000000000000000..c4f6b76f545499e17e0189b82ac7b5a4b37b59fb --- /dev/null +++ b/cuda/README.md @@ -0,0 +1,30 @@ +# cuda + +# Quick reference + +- cuda devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +2. Run: +```shell +docker run -d -it --gpus all openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- cuda12.2.0-cudnn8.9.5.30-oe2203sp2 cuda v12.2.0, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..b0129e673f33bbe2482086a60904be6933e42442 --- /dev/null +++ b/cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,46 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +ARG CUDA_URL=https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda_12.2.0_535.54.03_linux.run +ARG LIBNCCL_URL=https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/libnccl-2.18.5-1+cuda12.2.x86_64.rpm +ARG CUDA_CUPTI_URL=https://developer.download.nvidia.cn/compute/cuda/repos/rhel8/x86_64/cuda-cupti-12-2-12.2.142-1.x86_64.rpm +ARG CUDNN_LINUX=cudnn-linux-x86_64-8.9.5.30_cuda12-archive +ARG CUDNN_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/${CUDNN_LINUX}.tar.xz +ARG NCCL=nccl_2.19.3-1+cuda12.2_x86_64 +ARG NCCL_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/nccl_2.19.3-1%2Bcuda12.2_x86_64.txz + +ENV PATH="$PATH:/usr/local/cuda/bin" +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64" + +RUN dnf install wget kmod gcc tar make dkms xz python3 python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN wget ${CUDA_URL} -O ~/cuda-toolkit.run && \ + sh ~/cuda-toolkit.run --silent --toolkit && \ + rm -rf ~/cuda-toolkit.run && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN wget ${LIBNCCL_URL} -O ~/libnccl.rpm && \ + dnf install -y ~/libnccl.rpm && \ + rm -rf ~/libnccl.rpm && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN wget ${CUDA_CUPTI_URL} -O ~/cuda-cupti.rpm && \ + dnf install -y ~/cuda-cupti.rpm && \ + rm -rf ~/cuda-cupti.rpm && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN cd ~/ && wget ${CUDNN_URL} && \ + tar -xf ${CUDNN_LINUX}.tar.xz && \ + cp ~/${CUDNN_LINUX}/include/* /usr/local/cuda/include/ && \ + cp ~/${CUDNN_LINUX}/lib/* /usr/local/cuda/lib64/ && \ + rm -rf ~/${CUDNN_LINUX}* && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +RUN cd ~/ && wget ${NCCL_URL} && \ + tar -xf ${NCCL}.txz && \ + cp ~/${NCCL}/include/* /usr/local/cuda/include/ && \ + cp -r ~/${NCCL}/lib/* /usr/local/cuda/lib64/ && \ + rm -rf ~/${NCCL}* && \ + find /usr/local/cuda/ -name "*.a" | xargs rm -f + +WORKDIR /root diff --git a/cuda/meta.yml b/cuda/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..6939e0591473a814a8913237be36f95d7ce3d19f --- /dev/null +++ b/cuda/meta.yml @@ -0,0 +1,2 @@ +cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/llm/README.md b/llm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8ef2197b0f2c8f26b85464e99ddabc9c605014ce --- /dev/null +++ b/llm/README.md @@ -0,0 +1,56 @@ +# lla + +# Quick reference + +- lla devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build llama int8 images and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/llama2-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llm:llama2-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build llama images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2 && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llm:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +3. Build chatglm int8 image and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/llm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +4. Build chatglm image and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/llm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +5. Run: +```shell +docker run -d -it openeuler/llm:llama2-int8-pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/llm:llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +docker run -d -it openeuler/llm:chatglm2-6b-int8-pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/llm:chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- llama2-int8-pytorch2.1.0-oe2203sp2: llama2 int8 quantization model, openEuler 22.03 LTS-SP2 +- llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: llama2 model, openEuler 22.03 LTS-SP2 +- chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: ChatGLM 2-6b int8 quantization model, openEuler 22.03 LTS-SP2 +- chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: ChatGLM 2-6b, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5c24476d37e63f0e7c739a3a4ceea3a834016099 --- /dev/null +++ b/llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,29 @@ +FROM openeuler/pytorch:pytorch2.1.0-oe2203sp2 + +RUN dnf install -y git python3 python3-pip cmake make gcc gcc-c++ && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu + +RUN cd ~/ && \ + git clone https://github.com/li-plus/chatglm.cpp.git && \ + cd chatglm.cpp && \ + git checkout v0.3.0 && \ + cd ~/chatglm.cpp && \ + git submodule update --init --recursive && \ + cmake -B build && \ + cmake --build build -j --config Release && \ + pip install --no-cache-dir torch tabulate tqdm transformers accelerate sentencepiece + +RUN cd ~/ && \ + pip install huggingface_hub && \ + export HF_ENDPOINT=https://hf-mirror.com && \ + huggingface-cli download --resume-download THUDM/chatglm2-6b --local-dir chatglm2-6b && \ + python3 chatglm.cpp/chatglm_cpp/convert.py -i chatglm2-6b -t q8_0 -o chatglm2-6b-int8.bin && \ + rm -rf chatglm2-6b && \ + rm -rf /root/.cache/huggingface + +WORKDIR /root/chatglm.cpp diff --git a/llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2f44eba045dd271ce5541e410d0693c69977a196 --- /dev/null +++ b/llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,23 @@ +FROM openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN dnf install -y git && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ~/ && \ + git clone https://github.com/THUDM/ChatGLM2-6B.git && \ + cd ChatGLM2-6B && \ + pip install --no-cache-dir -r requirements.txt && \ + sed -i 's/THUDM/\/root/g' web_demo.py && \ + sed -i 's/THUDM/\/root/g' web_demo2.py && \ + sed -i 's/THUDM/\/root/g' cli_demo.py && \ + pip install --no-cache-dir gradio==3.50.0 + +RUN cd ~/ && \ + pip install --no-cache-dir huggingface_hub && \ + export HF_ENDPOINT=https://hf-mirror.com && \ + huggingface-cli download --resume-download THUDM/chatglm2-6b --local-dir chatglm2-6b + +WORKDIR /root/ChatGLM2-6B diff --git a/llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile b/llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..5db4f6af18f224c7f1a4aab3e12a6978517eba06 --- /dev/null +++ b/llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,21 @@ +FROM openeuler/pytorch:pytorch2.1.0-oe2203sp2 + +RUN dnf install -y wget git python3 python3-pip cmake make gcc gcc-c++ tar && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ~/ && git clone https://github.com/ggerganov/llama.cpp.git && cd llama.cpp && git checkout b1555 + +RUN cd ~/llama.cpp && \ + cmake -B build && cmake --build build --config Release && \ + pip install --no-cache-dir -r requirements.txt && \ + wget https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/model/llama/llama.tar.gz && \ + tar -xf llama.tar.gz && \ + sed -i '924d' convert.py && \ + python3 convert.py ./llama/llama-2-7b && \ + ./build/bin/quantize ./llama/llama-2-7b/ggml-model-f16.gguf ./llama-2-7b-q8_0.gguf q8_0 && \ + rm -rf llama llama.tar.gz + +WORKDIR /root/llama.cpp diff --git a/llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..993b7a6e25adf683386295a8ce8bb4e094820afd --- /dev/null +++ b/llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,16 @@ +FROM openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN dnf install -y git python3 python3-pip wget && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + + +RUN cd ~/ && \ + wget https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/model/llama/llama.tar.gz && \ + tar -xf llama.tar.gz && rm -rf llama.tar.gz && \ + cd llama && \ + pip install --no-cache-dir -e . + +WORKDIR /root/llama diff --git a/llm/meta.yml b/llm/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fc139c849f25d17e89578be6ddddb1df852a99d --- /dev/null +++ b/llm/meta.yml @@ -0,0 +1,11 @@ +chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - llm/chatglm2-6b-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +chatglm2-6b-int8-pytorch2.1.0-oe2203sp2: + - llm/chatglm2-6b-int8-pytorch2.1.0-oe2203sp2/Dockerfile + +llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - llm/llama2-pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +llama2-int8-pytorch2.1.0-oe2203sp2: + - llm/llama2-int8-pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/pytorch/README.md b/pytorch/README.md new file mode 100644 index 0000000000000000000000000000000000000000..979e602b42a2dfcb1231967065bb913262b7af53 --- /dev/null +++ b/pytorch/README.md @@ -0,0 +1,38 @@ +# pytorch + +# Quick reference + +- pytorch devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images for cpu and push: +```shell +cd pytorch/pytorch2.1.0-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build images for gpu and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d -it openeuler/pytorch:pytorch2.1.0-oe2203sp2 +docker run -d -it --gpus all openeuler/pytorch:pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- pytorch2.1.0-oe2203sp2: pytorch 2.1.0-cpu, openEuler 22.03 LTS-SP2 +- pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: pytorch 2.1.0-gpu, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/pytorch/meta.yml b/pytorch/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..884390cdda44027491f9a28345b0d45b41b120f0 --- /dev/null +++ b/pytorch/meta.yml @@ -0,0 +1,5 @@ +pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +pytorch2.1.0-oe2203sp2: + - pytorch/pytorch2.1.0-oe2203sp2/Dockerfile diff --git a/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..fed90f9e54fef727199c6296031d8999b2a6ab72 --- /dev/null +++ b/pytorch/pytorch2.1.0-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,9 @@ +FROM openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 numpy + +WORKDIR /root diff --git a/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile b/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2175fb1648500c933e2ac1a9dea70b81f6110b46 --- /dev/null +++ b/pytorch/pytorch2.1.0-oe2203sp2/Dockerfile @@ -0,0 +1,12 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +RUN dnf install python3 python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu +RUN pip install --no-cache-dir numpy + +WORKDIR /root diff --git a/tensorflow/README.md b/tensorflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7eeda33fa876621235c45c27a24006014e1f5f8c --- /dev/null +++ b/tensorflow/README.md @@ -0,0 +1,39 @@ +# tensorflow + +# Quick reference + +- tensorflow devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images for cpu and push: +```shell +cd tensorflow/tensorflow2.15.0-oe2203sp2/ && docker buildx build -t "openeuler/tensorflow:tensorflow2.15.0-oe2203sp2" --platform linux/x86_64 . --push +``` + +2. Build images for gpu and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +3. Run: +```shell +docker run -d -it openeuler/tensorflow:tensorflow2.15.0-oe2203sp2 +docker run -d -it --gpus all openeuler/tensorflow:tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- tensorflow2.15.0-oe2203sp2: tensorflow 2.15.0, openEuler 22.03 LTS-SP2 +- tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: tensorflow 2.15.0 cuda image, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/tensorflow/meta.yml b/tensorflow/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..1c37779dfd12b39c386f41fcd1395a4a403d6987 --- /dev/null +++ b/tensorflow/meta.yml @@ -0,0 +1,5 @@ +tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile + +tensorflow2.15.0-oe2203sp2: + - tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile diff --git a/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile b/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1310fd1897514caf2dc3b0bcf53a62b8aeb86d32 --- /dev/null +++ b/tensorflow/tensorflow2.15.0-oe2203sp2/Dockerfile @@ -0,0 +1,11 @@ +FROM openeuler/openeuler:22.03-lts-sp2 + +RUN dnf install wget tar xz python3-pip -y && \ + rm -rf /var/cache/dnf + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir tensorflow==2.15.0 + +WORKDIR /root diff --git a/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..7750ed716a36f4fa18559d78fa854d6c44193c76 --- /dev/null +++ b/tensorflow/tensorflow2.15.0-trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,8 @@ +FROM openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN pip install --no-cache-dir tensorflow==2.15.0 numpy + +WORKDIR /root diff --git a/tensorrt/README.md b/tensorrt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9608b0a2ed4f83b56d6434e5f6ef9bd20c0f91e8 --- /dev/null +++ b/tensorrt/README.md @@ -0,0 +1,31 @@ +# tensorrt + +# Quick reference + +- tensorrt devel image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative) + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community) + +# Build reference + +1. Build images and push: +```shell +cd cuda/cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +cd ../../tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/ && docker buildx build -t "openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2" --platform linux/x86_64 . --push +``` + +We are using `buildx` in here to generate multi-arch images, see more in [Docker Buildx](https://docs.docker.com/buildx/working-with-buildx/) + +2. Run: +```shell +docker run -d -it --gpus all openeuler/tensorrt:trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2 +``` + +# Supported tags and respective Dockerfile links + +- trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: tensorrt 8.6.1.6, openEuler 22.03 LTS-SP2 + +## Operating System +Linux/Unix, x86-64 diff --git a/tensorrt/meta.yml b/tensorrt/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..0bb3efc0d6be58033df6fd209e98520510c274a8 --- /dev/null +++ b/tensorrt/meta.yml @@ -0,0 +1,2 @@ +trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2: + - tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile diff --git a/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile b/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..05e67574ed345fa851ccaa863db04a7debca0ea6 --- /dev/null +++ b/tensorrt/trt8.6.1-cuda12.2.0-cudnn8.9.5.30-oe2203sp2/Dockerfile @@ -0,0 +1,26 @@ +FROM openeuler/cuda:cuda12.2.0-cudnn8.9.5.30-oe2203sp2 + +ARG TENSORRT=TensorRT-8.6.1.6 +ARG TENSORRT_TAR_GZ=${TENSORRT}.Linux.x86_64-gnu.cuda-12.0.tar.gz +ARG TENSORRT_URL=https://repo.oepkgs.net/openeuler/rpm/openEuler-22.03-LTS-SP2/contrib/Artificial_Intelligence/source/Packages/${TENSORRT_TAR_GZ} + +ENV LOCAL_DIR=/usr/local + +ENV LD_LIBRARY_PATH=$LD_LIBRAR_PATH:${LOCAL_DIR}/${TENSORRT}/lib:${LOCAL_DIR}/cuda/lib64 + +RUN pip config set global.index-url http://pypi.tuna.tsinghua.edu.cn/simple +RUN pip config set global.trusted-host pypi.tuna.tsinghua.edu.cn + +RUN cd ${LOCAL_DIR} && \ + wget ${TENSORRT_URL} && \ + tar -xf ${TENSORRT_TAR_GZ} && \ + cd ${TENSORRT} && \ + pip install --no-cache-dir python/tensorrt-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir python/tensorrt_lean-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir python/tensorrt_dispatch-8.6.1-cp39-none-linux_x86_64.whl && \ + pip install --no-cache-dir uff/uff-0.6.9-py2.py3-none-any.whl && \ + pip install --no-cache-dir graphsurgeon/graphsurgeon-0.4.6-py2.py3-none-any.whl && \ + find ${LOCAL_DIR}/${TENSORRT}/ -name "*.a" | xargs rm -f && \ + rm -r ${LOCAL_DIR}/${TENSORRT_TAR_GZ} + +WORKDIR /root