diff --git a/AI/cuda/11.8.0-cudnn8.9.0/22.03-lts/Dockerfile b/AI/cuda/11.8.0-cudnn8.9.0/22.03-lts/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e2f29ea2b91638b566d83ba6f71c107ea7da2158 --- /dev/null +++ b/AI/cuda/11.8.0-cudnn8.9.0/22.03-lts/Dockerfile @@ -0,0 +1,41 @@ +ARG BASE=openeuler/openeuler:22.03 + +FROM ${BASE} AS cuda-installer + +ARG TARGETPLATFORM + +ENV PATH=${PATH}:/usr/local/cuda-11.8/bin +ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda-11.8/lib64 + +RUN yum update -y && \ + yum install -y \ + wget \ + gcc \ + make \ + xz-devel \ + && yum clean all \ + && rm -rf /var/cache/yum + +RUN \ + if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \ + CUDA_URL="https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux_sbsa.run"; \ + else \ + CUDA_URL="https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run"; \ + fi && \ + wget --quiet ${CUDA_URL} -O ~/cuda_11.8.0_520.61.05_linux.run && \ + chmod +x ~/cuda_11.8.0_520.61.05_linux.run && \ + ~/cuda_11.8.0_520.61.05_linux.run --toolkit --silent && \ + rm -f ~/cuda_11.8.0_520.61.05_linux.run + +RUN if [ "${TARGETPLATFORM}" = "linux/arm64" ]; then \ + CUDNN_URL="https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-sbsa/cudnn-linux-sbsa-8.9.0.131_cuda11-archive.tar.xz"; \ + else \ + CUDNN_URL="https://developer.download.nvidia.com/compute/cudnn/redist/cudnn/linux-x86_64/cudnn-linux-x86_64-8.9.0.131_cuda11-archive.tar.xz"; \ + fi && \ + mkdir -p /tmp/cudnn && \ + wget --quiet ${CUDNN_URL} -O /tmp/cudnn.tar.xz && \ + tar -xvf /tmp/cudnn.tar.xz -C /tmp/cudnn --strip-components=1 && \ + cp /tmp/cudnn/include/cudnn.h /usr/local/cuda-11.8/include && \ + cp /tmp/cudnn/lib/libcudnn* /usr/local/cuda-11.8/lib64 && \ + chmod a+r /usr/local/cuda-11.8/include/cudnn.h /usr/local/cuda-11.8/lib64/libcudnn* && \ + rm -rf /tmp/cudnn* diff --git a/AI/cuda/README.md b/AI/cuda/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8cfcfd32354140932cf0adedefaac8a82c3cee06 --- /dev/null +++ b/AI/cuda/README.md @@ -0,0 +1,62 @@ +# Quick reference + +- The official CUDA docker image. + +- Maintained by: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative). + +- Where to get help: [openEuler CloudNative SIG](https://gitee.com/openeuler/cloudnative), [openEuler](https://gitee.com/openeuler/community). + +# CUDA | openEuler +Current CUDA docker images are built on the [openEuler](https://repo.openeuler.org/). This repository is free to use and exempted from per-user rate limits. + +A heterogeneous computing platform for general-purpose parallel computing that provides layered APIs and high-level libraries to help you quickly build high-performance computing applications and AI services based on NVIDIA GPUs. + +Learn more about on [CUDA Document](https://docs.nvidia.com/cuda/). + +# Supported tags and respective Dockerfile links +The tag of each `cuda` docker image is consist of the complete software stack version. The details are as follows +| Tag | Currently | Architectures | +|----------|-------------|------------------| +|[11.8.0-cudnn8.9.0-oe2203lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/cuda/11.8.0-cudnn8.9.0/22.03-lts/Dockerfile)| CUDA 11.8.0 with cudnn 8.9.0 on openEuler 22.03-LTS | arm64,amd64 | + +# Usage +In this usage, users can select the corresponding `{Tag}` and `container startup options` based on their requirements. + +- Pull the `openeuler/cudas` image from docker + + ```bash + docker pull openeuler/cuda:{Tag} + ``` + +- Start a cann instance + + ```bash + docker run \ + --name my-cuda \ + --gpus all \ + -it openeuler/cuda:{Tag} bash + ``` + +- Container startup options + + | Option | Description | + |--|--| + | `--name my-cuda` | Names the container `my-cuda`. | + | `--gpus all` | The specified container can access all GPU devices, you can also specify a specific GPU, such as --gpus '"device=0,1"' | + | `-it` | Starts the container in interactive mode with a terminal (bash). | + | `openeuler/cuda:{Tag}` | Specifies the Docker image to run, replace `{Tag}` with the specific version or tag of the `openeuler/cuda` image you want to use. | + +- View container running logs + + ```bash + docker logs -f my-cuda + ``` + +- To get an interactive shell + + ```bash + docker exec -it my-cuda /bin/bash + ``` + +# Question and answering +If you have any questions or want to use some special features, please submit an issue or a pull request on [openeuler-docker-images](https://gitee.com/openeuler/openeuler-docker-images). \ No newline at end of file diff --git a/AI/cuda/doc/image-info.yml b/AI/cuda/doc/image-info.yml new file mode 100644 index 0000000000000000000000000000000000000000..e95603e5f95e9f2f68f6708b94db66a52d5c4976 --- /dev/null +++ b/AI/cuda/doc/image-info.yml @@ -0,0 +1,62 @@ +name: cuda +category: ai +description: CUDA(Compute Unified Device Architecture)是由NVIDIA公司开发的并行计算平台和编程模型,它允许开发者利用NVIDIA GPU进行通用计算以加速应用程序。通过CUDA,开发者可以显著提升科学计算、机器学习等领域应用的性能。 +environment: | + 本应用在Docker环境中运行,安装Docker执行如下命令 + ``` + yum install -y docker + ``` +tags: | + CUDA镜像的Tag由其版本信息和基础镜像版本信息组成,详细内容如下 + + | Tag | Currently | Architectures | + |----------|-------------|------------------| + |[cuda11.8.0-cudnn8.9.0-oe2203lts](https://gitee.com/openeuler/openeuler-docker-images/blob/master/AI/cuda/11.8.0-cudnn8.9.0/22.03-lts/Dockerfile)| CUDA 11.8.0 on openEuler 22.03-LTS | amd64, arm64 | + +download: | + 拉取镜像到本地 + ``` + docker pull openeuler/cuda:{Tag} + ``` + +usage: | + - 启动容器 + ``` + docker run \ + --name my-cuda \ + --gpus all \ + -it openeuler/cuda:{Tag} bash + ``` + 用户可根据自身需求选择对应版本的`{Tag}`、对应的NPU设备`{device}`以及容器启动的其他参数配置。 + + - 参数说明 + | 配置项 | 描述 | + |--|--| + | `--name my-cann` | 容器名称。| + | `--gpus all` | 指定容器可以访问所有GPU设备,也可指定具体的GPU,如 --gpus '"device=0,1"' | + | `-it` | 以交互模式启动容器。 | + | `openeuler/cuda:{Tag}` | 指定要运行的镜像为 `openeuler/cuda`,其中` {Tag}` 是需要替换的镜像标签。 | + + - 容器测试 + + 查看运行日志 + ``` + docker logs -f my-cuda + ``` + + 使用shell交互 + ``` + docker exec -it my-cuda /bin/bash + ``` + +license: Apache-2.0 license +similar_packages: + - CANN: CANN是华为针对AI场景推出的异构计算架构,对上支持多种AI框架,对下服务AI处理器与编程,发挥承上启下的关键作用,是提升昇腾AI处理器计算效率的关键平台。 +dependency: + - nvcc + - libcudart.so + - libcublas.so + - libcufft.so + - libcurand.so + - libcusparse.so + diff --git a/AI/cuda/doc/picture/logo.png b/AI/cuda/doc/picture/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..14534f63fa19bd2af2082ee20b6a87783923ec7f Binary files /dev/null and b/AI/cuda/doc/picture/logo.png differ diff --git a/AI/cuda/meta.yml b/AI/cuda/meta.yml new file mode 100644 index 0000000000000000000000000000000000000000..1efed1e13d78b809a7c676c6723466efed9ebd59 --- /dev/null +++ b/AI/cuda/meta.yml @@ -0,0 +1,2 @@ +11.8.0-cudnn8.9.0-oe2203lts: + path: 11.8.0-cudnn8.9.0/22.03-lts/Dockerfile \ No newline at end of file diff --git a/AI/image-list.yml b/AI/image-list.yml index ac2fc0db053fbd43f25c7291fad1a1460e557b3c..fed6f44f3d39842a855f8bece6e7c21066bfd2c7 100644 --- a/AI/image-list.yml +++ b/AI/image-list.yml @@ -69,3 +69,4 @@ images: text2sql-ui: opea/text2sql-ui text2image: opea/text2image text2image-ui: opea/text2image-ui + cuda: cuda