From 8d1471cf028791fea4e3ec27c2beac6fe78cf127 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 26 Mar 2025 10:33:44 +0800 Subject: [PATCH 1/4] add supported env for cv/classification in readme --- cv/classification/acmix/pytorch/README.md | 6 ++++++ cv/classification/acnet/pytorch/README.md | 6 ++++++ cv/classification/alexnet/pytorch/README.md | 6 ++++++ cv/classification/alexnet/tensorflow/README.md | 6 ++++++ cv/classification/byol/pytorch/README.md | 6 ++++++ cv/classification/cbam/pytorch/README.md | 6 ++++++ cv/classification/convnext/pytorch/README.md | 6 ++++++ cv/classification/cspdarknet53/pytorch/README.md | 6 ++++++ cv/classification/densenet/paddlepaddle/README.md | 6 ++++++ cv/classification/densenet/pytorch/README.md | 6 ++++++ cv/classification/dpn107/pytorch/README.md | 6 ++++++ cv/classification/dpn92/pytorch/README.md | 6 ++++++ cv/classification/eca_mobilenet_v2/pytorch/README.md | 6 ++++++ cv/classification/eca_resnet152/pytorch/README.md | 6 ++++++ cv/classification/efficientnet_b0/paddlepaddle/README.md | 6 ++++++ cv/classification/efficientnet_b4/pytorch/README.md | 6 ++++++ cv/classification/fasternet/pytorch/README.md | 6 ++++++ cv/classification/googlenet/paddlepaddle/README.md | 6 ++++++ cv/classification/googlenet/pytorch/README.md | 6 ++++++ cv/classification/inceptionv3/mindspore/README.md | 6 ++++++ cv/classification/inceptionv3/pytorch/README.md | 6 ++++++ cv/classification/inceptionv3/tensorflow/README.md | 6 ++++++ cv/classification/inceptionv4/pytorch/README.md | 6 ++++++ cv/classification/internimage/pytorch/README.md | 6 ++++++ cv/classification/lenet/pytorch/README.md | 6 ++++++ cv/classification/mobilenetv2/pytorch/README.md | 6 ++++++ cv/classification/mobilenetv3/mindspore/README.md | 6 ++++++ cv/classification/mobilenetv3/paddlepaddle/README.md | 6 ++++++ cv/classification/mobilenetv3/pytorch/README.md | 6 ++++++ .../mobilenetv3_large_x1_0/paddlepaddle/README.md | 6 ++++++ cv/classification/mobileone/pytorch/README.md | 6 ++++++ cv/classification/mocov2/pytorch/README.md | 6 ++++++ cv/classification/pp-lcnet/paddlepaddle/README.md | 6 ++++++ cv/classification/repmlp/pytorch/README.md | 6 ++++++ cv/classification/repvgg/paddlepaddle/README.md | 6 ++++++ cv/classification/repvgg/pytorch/README.md | 6 ++++++ cv/classification/repvit/pytorch/README.md | 6 ++++++ cv/classification/res2net50_14w_8s/paddlepaddle/README.md | 6 ++++++ cv/classification/resnest101/pytorch/README.md | 6 ++++++ cv/classification/resnest14/pytorch/README.md | 6 ++++++ cv/classification/resnest269/pytorch/README.md | 6 ++++++ cv/classification/resnest50/paddlepaddle/README.md | 6 ++++++ cv/classification/resnest50/pytorch/README.md | 6 ++++++ cv/classification/resnet101/pytorch/README.md | 6 ++++++ cv/classification/resnet152/pytorch/README.md | 6 ++++++ cv/classification/resnet18/pytorch/README.md | 6 ++++++ cv/classification/resnet50/paddlepaddle/README.md | 6 ++++++ cv/classification/resnet50/pytorch/README.md | 6 ++++++ cv/classification/resnet50/tensorflow/README.md | 6 ++++++ cv/classification/resnext101_32x8d/pytorch/README.md | 6 ++++++ cv/classification/resnext50_32x4d/mindspore/README.md | 6 ++++++ cv/classification/resnext50_32x4d/pytorch/README.md | 6 ++++++ cv/classification/se_resnet50_vd/paddlepaddle/README.md | 6 ++++++ cv/classification/seresnext/pytorch/README.md | 6 ++++++ cv/classification/shufflenetv2/paddlepaddle/README.md | 6 ++++++ cv/classification/shufflenetv2/pytorch/README.md | 6 ++++++ cv/classification/squeezenet/pytorch/README.md | 6 ++++++ cv/classification/swin_transformer/paddlepaddle/README.md | 6 ++++++ cv/classification/swin_transformer/pytorch/README.md | 6 ++++++ cv/classification/vgg/paddlepaddle/README.md | 6 ++++++ cv/classification/vgg/pytorch/README.md | 6 ++++++ cv/classification/vgg/tensorflow/README.md | 6 ++++++ cv/classification/wavemlp/pytorch/README.md | 6 ++++++ cv/classification/wide_resnet101_2/pytorch/README.md | 6 ++++++ cv/classification/xception/paddlepaddle/README.md | 6 ++++++ cv/classification/xception/pytorch/README.md | 6 ++++++ 66 files changed, 396 insertions(+) diff --git a/cv/classification/acmix/pytorch/README.md b/cv/classification/acmix/pytorch/README.md index 724ed82fc..37e5cda4a 100644 --- a/cv/classification/acmix/pytorch/README.md +++ b/cv/classification/acmix/pytorch/README.md @@ -9,6 +9,12 @@ the local feature extraction of convolutions and the global context modeling of improved performance on image recognition tasks with minimal computational overhead compared to pure convolution or attention-based approaches. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/acnet/pytorch/README.md b/cv/classification/acnet/pytorch/README.md index 65a5e8037..7800add37 100755 --- a/cv/classification/acnet/pytorch/README.md +++ b/cv/classification/acnet/pytorch/README.md @@ -9,6 +9,12 @@ be seamlessly integrated into existing architectures, boosting accuracy without training, ACNet converts back to the original architecture, maintaining efficiency. It demonstrates consistent performance improvements across various models on datasets like CIFAR and ImageNet. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/alexnet/pytorch/README.md b/cv/classification/alexnet/pytorch/README.md index 699735310..515abbf3f 100644 --- a/cv/classification/alexnet/pytorch/README.md +++ b/cv/classification/alexnet/pytorch/README.md @@ -10,6 +10,12 @@ principles continue to influence modern neural network architectures in computer classic convolutional neural network architecture. It consists of convolutions, max pooling and dense layers as the basic building blocks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/alexnet/tensorflow/README.md b/cv/classification/alexnet/tensorflow/README.md index 59a8cb476..af5946b33 100644 --- a/cv/classification/alexnet/tensorflow/README.md +++ b/cv/classification/alexnet/tensorflow/README.md @@ -8,6 +8,12 @@ innovations like ReLU activations, dropout regularization, and GPU acceleration. success popularized deep learning and established CNNs as the dominant approach for image recognition. AlexNet's design principles continue to influence modern neural network architectures in computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/byol/pytorch/README.md b/cv/classification/byol/pytorch/README.md index 3b53a8d0b..6d56a91b4 100644 --- a/cv/classification/byol/pytorch/README.md +++ b/cv/classification/byol/pytorch/README.md @@ -8,6 +8,12 @@ through contrasting augmented views of the same image. BYOL's unique approach el achieving state-of-the-art performance in unsupervised learning. It's particularly effective for pre-training models on large datasets before fine-tuning for specific tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/cbam/pytorch/README.md b/cv/classification/cbam/pytorch/README.md index 43d353876..ae09018e6 100644 --- a/cv/classification/cbam/pytorch/README.md +++ b/cv/classification/cbam/pytorch/README.md @@ -8,6 +8,12 @@ significant computational overhead. CBAM helps networks focus on important featu leading to better object recognition and localization. The module is lightweight and can be easily integrated into existing CNN architectures, making it a versatile tool for improving various computer vision tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/convnext/pytorch/README.md b/cv/classification/convnext/pytorch/README.md index aab8f3f66..ef5e7d59c 100644 --- a/cv/classification/convnext/pytorch/README.md +++ b/cv/classification/convnext/pytorch/README.md @@ -9,6 +9,12 @@ modernized ConvNets can match or exceed Transformer-based models in accuracy and Its simplicity and strong performance make it a compelling choice for image classification and other computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/cspdarknet53/pytorch/README.md b/cv/classification/cspdarknet53/pytorch/README.md index c4812e97f..ca220531a 100644 --- a/cv/classification/cspdarknet53/pytorch/README.md +++ b/cv/classification/cspdarknet53/pytorch/README.md @@ -8,6 +8,12 @@ maps across stages. The model achieves better gradient flow and reduces memory u architectures. CspDarknet53 is particularly effective in real-time detection tasks, offering a good balance between accuracy and speed, making it popular in modern object detection frameworks like YOLOv4. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/densenet/paddlepaddle/README.md b/cv/classification/densenet/paddlepaddle/README.md index df4f00024..cde6c1399 100644 --- a/cv/classification/densenet/paddlepaddle/README.md +++ b/cv/classification/densenet/paddlepaddle/README.md @@ -8,6 +8,12 @@ subsequent layers. This dense connectivity pattern improves gradient flow, encou vanishing gradient problems. DenseNet achieves state-of-the-art performance with fewer parameters compared to traditional CNNs, making it efficient for various computer vision tasks like image classification and object detection. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/densenet/pytorch/README.md b/cv/classification/densenet/pytorch/README.md index 454f846ae..a98088034 100755 --- a/cv/classification/densenet/pytorch/README.md +++ b/cv/classification/densenet/pytorch/README.md @@ -8,6 +8,12 @@ subsequent layers. This dense connectivity pattern improves gradient flow, encou vanishing gradient problems. DenseNet achieves state-of-the-art performance with fewer parameters compared to traditional CNNs, making it efficient for various computer vision tasks like image classification and object detection. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/dpn107/pytorch/README.md b/cv/classification/dpn107/pytorch/README.md index 3bf787dee..4ccd50337 100644 --- a/cv/classification/dpn107/pytorch/README.md +++ b/cv/classification/dpn107/pytorch/README.md @@ -8,6 +8,12 @@ preserving important features and another for discovering new ones. DPN107 achie image classification tasks while maintaining computational efficiency. Its unique design makes it particularly effective for complex visual recognition tasks, offering a balance between model accuracy and resource utilization. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/dpn92/pytorch/README.md b/cv/classification/dpn92/pytorch/README.md index 9dc35d694..c31862f79 100644 --- a/cv/classification/dpn92/pytorch/README.md +++ b/cv/classification/dpn92/pytorch/README.md @@ -8,6 +8,12 @@ enables efficient learning of both shared and new features. DPN92 achieves state classification tasks while maintaining computational efficiency. Its unique architecture makes it particularly effective for tasks requiring both feature preservation and discovery. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/eca_mobilenet_v2/pytorch/README.md b/cv/classification/eca_mobilenet_v2/pytorch/README.md index 7a0a34717..24c55affa 100644 --- a/cv/classification/eca_mobilenet_v2/pytorch/README.md +++ b/cv/classification/eca_mobilenet_v2/pytorch/README.md @@ -9,6 +9,12 @@ maintaining computational efficiency, making it suitable for mobile and edge dev accuracy than standard MobileNet V2 with minimal additional parameters, making it ideal for resource-constrained image classification tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/eca_resnet152/pytorch/README.md b/cv/classification/eca_resnet152/pytorch/README.md index 93d955757..3dabe7116 100644 --- a/cv/classification/eca_resnet152/pytorch/README.md +++ b/cv/classification/eca_resnet152/pytorch/README.md @@ -9,6 +9,12 @@ superior accuracy in image classification tasks compared to standard ResNet152, complex visual recognition problems. Its architecture balances performance and efficiency, making it suitable for various computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/efficientnet_b0/paddlepaddle/README.md b/cv/classification/efficientnet_b0/paddlepaddle/README.md index 284168922..5b988747c 100644 --- a/cv/classification/efficientnet_b0/paddlepaddle/README.md +++ b/cv/classification/efficientnet_b0/paddlepaddle/README.md @@ -9,6 +9,12 @@ convolution (MBConv) blocks with squeeze-and-excitation optimization. EfficientN mobile and edge devices, offering high accuracy in image classification tasks while maintaining low computational requirements. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/efficientnet_b4/pytorch/README.md b/cv/classification/efficientnet_b4/pytorch/README.md index 91585de19..7b48c4075 100755 --- a/cv/classification/efficientnet_b4/pytorch/README.md +++ b/cv/classification/efficientnet_b4/pytorch/README.md @@ -8,6 +8,12 @@ superior accuracy compared to smaller EfficientNet variants. The model maintains more complex visual recognition tasks. EfficientNetB4 is particularly effective for high-accuracy image classification scenarios where computational resources are available, offering a good trade-off between performance and efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/fasternet/pytorch/README.md b/cv/classification/fasternet/pytorch/README.md index 09b0e132a..2d168679b 100644 --- a/cv/classification/fasternet/pytorch/README.md +++ b/cv/classification/fasternet/pytorch/README.md @@ -9,6 +9,12 @@ and speed. Its innovative architecture makes it particularly effective for mobil resources are limited. The model demonstrates state-of-the-art results in various computer vision tasks while maintaining low latency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/googlenet/paddlepaddle/README.md b/cv/classification/googlenet/paddlepaddle/README.md index 61b77aba6..f69ef4244 100644 --- a/cv/classification/googlenet/paddlepaddle/README.md +++ b/cv/classification/googlenet/paddlepaddle/README.md @@ -8,6 +8,12 @@ extraction at various scales. The network uses 1x1 convolutions for dimensionali efficient. GoogLeNet achieved state-of-the-art performance in image classification tasks while maintaining relatively low computational complexity. Its innovative design has influenced many subsequent CNN architectures in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/googlenet/pytorch/README.md b/cv/classification/googlenet/pytorch/README.md index 759dd4d28..a82e40c7b 100755 --- a/cv/classification/googlenet/pytorch/README.md +++ b/cv/classification/googlenet/pytorch/README.md @@ -8,6 +8,12 @@ extraction at various scales. The network uses 1x1 convolutions for dimensionali efficient. GoogLeNet achieved state-of-the-art performance in image classification tasks while maintaining relatively low computational complexity. Its innovative design has influenced many subsequent CNN architectures in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/inceptionv3/mindspore/README.md b/cv/classification/inceptionv3/mindspore/README.md index 45c8a0701..d5cd0d63a 100644 --- a/cv/classification/inceptionv3/mindspore/README.md +++ b/cv/classification/inceptionv3/mindspore/README.md @@ -9,6 +9,12 @@ flow and convergence. InceptionV3 achieves state-of-the-art performance in image computational efficiency, making it suitable for various computer vision applications requiring high accuracy and robust feature learning. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.1.0 | 23.09 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/inceptionv3/pytorch/README.md b/cv/classification/inceptionv3/pytorch/README.md index c8f869658..af9be6dc9 100644 --- a/cv/classification/inceptionv3/pytorch/README.md +++ b/cv/classification/inceptionv3/pytorch/README.md @@ -3,6 +3,12 @@ ## Model Description Inception-v3 is a convolutional neural network architecture from the Inception family that makes several improvements including using Label Smoothing, Factorized 7 x 7 convolutions, and the use of an auxiliary classifer to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Install Dependencies diff --git a/cv/classification/inceptionv3/tensorflow/README.md b/cv/classification/inceptionv3/tensorflow/README.md index ae20907e7..8ad1ebff3 100644 --- a/cv/classification/inceptionv3/tensorflow/README.md +++ b/cv/classification/inceptionv3/tensorflow/README.md @@ -4,6 +4,12 @@ InceptionV3 is a convolutional neural network architecture from the Inception family that makes several improvements including using Label Smoothing, Factorized 7 x 7 convolutions, and the use of an auxiliary classifer to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Install Dependencies diff --git a/cv/classification/inceptionv4/pytorch/README.md b/cv/classification/inceptionv4/pytorch/README.md index 625b75f18..1efb8d65a 100644 --- a/cv/classification/inceptionv4/pytorch/README.md +++ b/cv/classification/inceptionv4/pytorch/README.md @@ -9,6 +9,12 @@ InceptionV4 demonstrates improved accuracy over its predecessors while maintaini suitable for various computer vision applications. Its design focuses on optimizing network structure for better feature representation and classification performance. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/internimage/pytorch/README.md b/cv/classification/internimage/pytorch/README.md index 6f61e8052..31ae1ee0d 100644 --- a/cv/classification/internimage/pytorch/README.md +++ b/cv/classification/internimage/pytorch/README.md @@ -9,6 +9,12 @@ InternImage demonstrates exceptional scalability and efficiency, making it suita general image recognition to complex autonomous driving perception systems. Its design focuses on balancing model capacity with computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.1.0 | 23.09 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/lenet/pytorch/README.md b/cv/classification/lenet/pytorch/README.md index a6b0bcd6f..4eebd180b 100755 --- a/cv/classification/lenet/pytorch/README.md +++ b/cv/classification/lenet/pytorch/README.md @@ -8,6 +8,12 @@ for modern deep learning. Designed for the MNIST dataset, LeNet demonstrated the recognition tasks. Its simple yet effective architecture inspired subsequent networks like AlexNet and VGG, making it a cornerstone in the evolution of deep learning for computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobilenetv2/pytorch/README.md b/cv/classification/mobilenetv2/pytorch/README.md index 3f1723793..4570db566 100644 --- a/cv/classification/mobilenetv2/pytorch/README.md +++ b/cv/classification/mobilenetv2/pytorch/README.md @@ -8,6 +8,12 @@ computational complexity. This architecture maintains high accuracy while signif latency compared to traditional CNNs. MobileNetV2's design focuses on balancing performance and efficiency, making it ideal for real-time applications on resource-constrained devices like smartphones and IoT devices. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobilenetv3/mindspore/README.md b/cv/classification/mobilenetv3/mindspore/README.md index 068999ce2..fdb6da777 100644 --- a/cv/classification/mobilenetv3/mindspore/README.md +++ b/cv/classification/mobilenetv3/mindspore/README.md @@ -9,6 +9,12 @@ mobile vision tasks, offering variants for different computational budgets. Its power consumption, making it ideal for real-time applications on resource-constrained devices like smartphones and embedded systems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.1.0 | 23.09 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobilenetv3/paddlepaddle/README.md b/cv/classification/mobilenetv3/paddlepaddle/README.md index a0dd9fba3..cb43ee4ac 100644 --- a/cv/classification/mobilenetv3/paddlepaddle/README.md +++ b/cv/classification/mobilenetv3/paddlepaddle/README.md @@ -9,6 +9,12 @@ mobile vision tasks, offering variants for different computational budgets. Its power consumption, making it ideal for real-time applications on resource-constrained devices like smartphones and embedded systems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobilenetv3/pytorch/README.md b/cv/classification/mobilenetv3/pytorch/README.md index 29c69a342..ec311199a 100644 --- a/cv/classification/mobilenetv3/pytorch/README.md +++ b/cv/classification/mobilenetv3/pytorch/README.md @@ -9,6 +9,12 @@ mobile vision tasks, offering variants for different computational budgets. Its power consumption, making it ideal for real-time applications on resource-constrained devices like smartphones and embedded systems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md b/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md index f76bf582c..6d39c4032 100644 --- a/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md +++ b/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md @@ -9,6 +9,12 @@ accuracy on ImageNet. Its design focuses on reducing latency while maintaining p mobile applications. MobileNetV3_large_x1_0 serves as a general-purpose backbone for various computer vision tasks on resource-constrained devices. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mobileone/pytorch/README.md b/cv/classification/mobileone/pytorch/README.md index f1770469e..cdcfe7b91 100644 --- a/cv/classification/mobileone/pytorch/README.md +++ b/cv/classification/mobileone/pytorch/README.md @@ -8,6 +8,12 @@ speed on mobile chips. Achieving under 1ms inference time on iPhone 12 with 75.9 outperforms other efficient architectures in both speed and accuracy. It's versatile for tasks like image classification, object detection, and segmentation, making it ideal for mobile deployment. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/mocov2/pytorch/README.md b/cv/classification/mocov2/pytorch/README.md index 9c81bd977..9b2cb9a52 100644 --- a/cv/classification/mocov2/pytorch/README.md +++ b/cv/classification/mocov2/pytorch/README.md @@ -8,6 +8,12 @@ techniques to boost performance without requiring large batch sizes. This approa from unlabeled data, establishing strong baselines for self-supervised learning. MoCoV2 outperforms previous methods like SimCLR while maintaining computational efficiency, making it accessible for various computer vision tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/pp-lcnet/paddlepaddle/README.md b/cv/classification/pp-lcnet/paddlepaddle/README.md index 04f937853..44b2e2e9f 100644 --- a/cv/classification/pp-lcnet/paddlepaddle/README.md +++ b/cv/classification/pp-lcnet/paddlepaddle/README.md @@ -9,6 +9,12 @@ vision applications like object detection and semantic segmentation. PP-LCNet's with minimal computational overhead, making it ideal for resource-constrained environments requiring fast and efficient inference. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/repmlp/pytorch/README.md b/cv/classification/repmlp/pytorch/README.md index fbc158f2d..5255c37e6 100644 --- a/cv/classification/repmlp/pytorch/README.md +++ b/cv/classification/repmlp/pytorch/README.md @@ -9,6 +9,12 @@ these components into pure FC layers for inference, achieving both high accuracy architecture is particularly effective for image recognition tasks, offering a novel approach to balance global and local feature learning. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/repvgg/paddlepaddle/README.md b/cv/classification/repvgg/paddlepaddle/README.md index 0f638266b..ce7e75a38 100644 --- a/cv/classification/repvgg/paddlepaddle/README.md +++ b/cv/classification/repvgg/paddlepaddle/README.md @@ -9,6 +9,12 @@ state-of-the-art performance in image classification tasks while maintaining hig is particularly suitable for applications requiring both high accuracy and fast inference, making it ideal for real-world deployment scenarios. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/repvgg/pytorch/README.md b/cv/classification/repvgg/pytorch/README.md index 2b23b995f..479542dbb 100755 --- a/cv/classification/repvgg/pytorch/README.md +++ b/cv/classification/repvgg/pytorch/README.md @@ -9,6 +9,12 @@ state-of-the-art performance in image classification tasks while maintaining hig is particularly suitable for applications requiring both high accuracy and fast inference, making it ideal for real-world deployment scenarios. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/repvit/pytorch/README.md b/cv/classification/repvit/pytorch/README.md index 05c43e7fe..c124fe284 100644 --- a/cv/classification/repvit/pytorch/README.md +++ b/cv/classification/repvit/pytorch/README.md @@ -8,6 +8,12 @@ latency than lightweight ViTs. RepViT demonstrates state-of-the-art accuracy on inference speeds, making it ideal for resource-constrained applications. Its pure CNN architecture ensures mobile-friendliness, with the largest variant achieving 83.7% accuracy at just 2.3ms latency on an iPhone 12. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/res2net50_14w_8s/paddlepaddle/README.md b/cv/classification/res2net50_14w_8s/paddlepaddle/README.md index c19b2c642..c0b56580d 100644 --- a/cv/classification/res2net50_14w_8s/paddlepaddle/README.md +++ b/cv/classification/res2net50_14w_8s/paddlepaddle/README.md @@ -8,6 +8,12 @@ improving feature representation. The 14w_8s variant uses 14 width and 8 scales, in image classification tasks. This architecture effectively balances model complexity and computational efficiency, making it suitable for various computer vision applications requiring both high accuracy and efficient processing. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnest101/pytorch/README.md b/cv/classification/resnest101/pytorch/README.md index 8177b6a51..acc6a269b 100644 --- a/cv/classification/resnest101/pytorch/README.md +++ b/cv/classification/resnest101/pytorch/README.md @@ -9,6 +9,12 @@ by effectively balancing computational efficiency and model capacity. ResNeSt101 large-scale visual recognition tasks, offering improved accuracy over standard ResNet variants while maintaining efficient training and inference capabilities. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnest14/pytorch/README.md b/cv/classification/resnest14/pytorch/README.md index 230d10c6a..63816609c 100644 --- a/cv/classification/resnest14/pytorch/README.md +++ b/cv/classification/resnest14/pytorch/README.md @@ -9,6 +9,12 @@ complexity and computational efficiency. ResNeSt14's design is particularly suit resources, offering improved accuracy over standard ResNet variants while maintaining fast training and inference capabilities. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnest269/pytorch/README.md b/cv/classification/resnest269/pytorch/README.md index d19cd53da..c32e8d9b1 100644 --- a/cv/classification/resnest269/pytorch/README.md +++ b/cv/classification/resnest269/pytorch/README.md @@ -9,6 +9,12 @@ by effectively balancing computational efficiency and model capacity. ResNeSt269 large-scale visual recognition tasks, offering improved accuracy over standard ResNet variants while maintaining efficient training and inference capabilities. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnest50/paddlepaddle/README.md b/cv/classification/resnest50/paddlepaddle/README.md index 0fae57eef..5299ed9bc 100644 --- a/cv/classification/resnest50/paddlepaddle/README.md +++ b/cv/classification/resnest50/paddlepaddle/README.md @@ -9,6 +9,12 @@ balancing computational efficiency and model capacity. ResNeSt50's design is par visual recognition tasks, offering improved accuracy over standard ResNet variants while maintaining efficient training and inference capabilities. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnest50/pytorch/README.md b/cv/classification/resnest50/pytorch/README.md index 8f3fba24a..e13f9417e 100644 --- a/cv/classification/resnest50/pytorch/README.md +++ b/cv/classification/resnest50/pytorch/README.md @@ -9,6 +9,12 @@ balancing computational efficiency and model capacity. ResNeSt50's design is par visual recognition tasks, offering improved accuracy over standard ResNet variants while maintaining efficient training and inference capabilities. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet101/pytorch/README.md b/cv/classification/resnet101/pytorch/README.md index ec5c7a7f5..3c386c2a2 100644 --- a/cv/classification/resnet101/pytorch/README.md +++ b/cv/classification/resnet101/pytorch/README.md @@ -9,6 +9,12 @@ ResNet101 achieves state-of-the-art performance in image classification tasks wh efficiency. Its architecture is widely used as a backbone for various computer vision applications, including object detection and segmentation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet152/pytorch/README.md b/cv/classification/resnet152/pytorch/README.md index d21818f66..015e911c3 100644 --- a/cv/classification/resnet152/pytorch/README.md +++ b/cv/classification/resnet152/pytorch/README.md @@ -9,6 +9,12 @@ hierarchical features. ResNet152's architecture is particularly effective for la offering improved accuracy over smaller ResNet variants while maintaining computational efficiency through its residual connections. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet18/pytorch/README.md b/cv/classification/resnet18/pytorch/README.md index e2004f281..c3b6ee976 100644 --- a/cv/classification/resnet18/pytorch/README.md +++ b/cv/classification/resnet18/pytorch/README.md @@ -8,6 +8,12 @@ problems and allowing for better feature learning. ResNet18 achieves strong perf while maintaining computational efficiency. Its compact architecture makes it suitable for applications with limited resources, serving as a backbone for various computer vision tasks like object detection and segmentation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet50/paddlepaddle/README.md b/cv/classification/resnet50/paddlepaddle/README.md index 5e029d807..57773e498 100644 --- a/cv/classification/resnet50/paddlepaddle/README.md +++ b/cv/classification/resnet50/paddlepaddle/README.md @@ -8,6 +8,12 @@ gradient problems. This architecture achieved breakthrough performance in image ImageNet competition. ResNet50's efficient design and strong feature extraction capabilities make it widely used in computer vision applications, serving as a backbone for various tasks like object detection and segmentation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet50/pytorch/README.md b/cv/classification/resnet50/pytorch/README.md index ce76b5930..53184def9 100644 --- a/cv/classification/resnet50/pytorch/README.md +++ b/cv/classification/resnet50/pytorch/README.md @@ -8,6 +8,12 @@ gradient problems. This architecture achieved breakthrough performance in image ImageNet competition. ResNet50's efficient design and strong feature extraction capabilities make it widely used in computer vision applications, serving as a backbone for various tasks like object detection and segmentation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnet50/tensorflow/README.md b/cv/classification/resnet50/tensorflow/README.md index d789c558c..120184b5e 100644 --- a/cv/classification/resnet50/tensorflow/README.md +++ b/cv/classification/resnet50/tensorflow/README.md @@ -8,6 +8,12 @@ gradient problems. This architecture achieved breakthrough performance in image ImageNet competition. ResNet50's efficient design and strong feature extraction capabilities make it widely used in computer vision applications, serving as a backbone for various tasks like object detection and segmentation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnext101_32x8d/pytorch/README.md b/cv/classification/resnext101_32x8d/pytorch/README.md index fa33d3a43..cef26304b 100644 --- a/cv/classification/resnext101_32x8d/pytorch/README.md +++ b/cv/classification/resnext101_32x8d/pytorch/README.md @@ -9,6 +9,12 @@ state-of-the-art performance in image classification tasks by combining the bene multi-branch transformations. Its architecture is particularly effective for large-scale visual recognition tasks, offering improved accuracy over standard ResNet models. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnext50_32x4d/mindspore/README.md b/cv/classification/resnext50_32x4d/mindspore/README.md index 6a1ea169b..6204cffa2 100644 --- a/cv/classification/resnext50_32x4d/mindspore/README.md +++ b/cv/classification/resnext50_32x4d/mindspore/README.md @@ -8,6 +8,12 @@ representation. The 32x4d variant has 32 groups with 4-dimensional transformatio accuracy than ResNet50 with similar computational complexity, making it efficient for image classification tasks. ResNeXt50's design has influenced many subsequent CNN architectures in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/resnext50_32x4d/pytorch/README.md b/cv/classification/resnext50_32x4d/pytorch/README.md index 011b82080..821ecf9e0 100644 --- a/cv/classification/resnext50_32x4d/pytorch/README.md +++ b/cv/classification/resnext50_32x4d/pytorch/README.md @@ -8,6 +8,12 @@ representation. The 32x4d variant has 32 groups with 4-dimensional transformatio accuracy than ResNet50 with similar computational complexity, making it efficient for image classification tasks. ResNeXt50's design has influenced many subsequent CNN architectures in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/se_resnet50_vd/paddlepaddle/README.md b/cv/classification/se_resnet50_vd/paddlepaddle/README.md index 88ae57427..383eda58e 100644 --- a/cv/classification/se_resnet50_vd/paddlepaddle/README.md +++ b/cv/classification/se_resnet50_vd/paddlepaddle/README.md @@ -8,6 +8,12 @@ variant downsampling preserves more information during feature map reduction. Th than standard ResNet50 while maintaining computational efficiency. SE_ResNet50_vd is particularly effective for image classification tasks, offering improved performance through better feature learning and channel attention mechanisms. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/seresnext/pytorch/README.md b/cv/classification/seresnext/pytorch/README.md index ffc3ffb75..5821fe05e 100644 --- a/cv/classification/seresnext/pytorch/README.md +++ b/cv/classification/seresnext/pytorch/README.md @@ -9,6 +9,12 @@ each block while maintaining computational efficiency. SEResNeXt achieves state- classification tasks by effectively combining multi-branch transformations with channel-wise attention, making it particularly suitable for complex visual recognition problems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/shufflenetv2/paddlepaddle/README.md b/cv/classification/shufflenetv2/paddlepaddle/README.md index 00d024039..e88e2f7e0 100644 --- a/cv/classification/shufflenetv2/paddlepaddle/README.md +++ b/cv/classification/shufflenetv2/paddlepaddle/README.md @@ -8,6 +8,12 @@ like FLOPs. The model features a channel split operation and optimized channel s accuracy and inference speed. ShuffleNetv2 achieves state-of-the-art performance in mobile image classification tasks while maintaining low computational complexity, making it ideal for resource-constrained applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/shufflenetv2/pytorch/README.md b/cv/classification/shufflenetv2/pytorch/README.md index 1f28a20ca..c141e52b3 100644 --- a/cv/classification/shufflenetv2/pytorch/README.md +++ b/cv/classification/shufflenetv2/pytorch/README.md @@ -8,6 +8,12 @@ like FLOPs. The model features a channel split operation and optimized channel s accuracy and inference speed. ShuffleNetv2 achieves state-of-the-art performance in mobile image classification tasks while maintaining low computational complexity, making it ideal for resource-constrained applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/squeezenet/pytorch/README.md b/cv/classification/squeezenet/pytorch/README.md index b084df03d..8a9c31f88 100644 --- a/cv/classification/squeezenet/pytorch/README.md +++ b/cv/classification/squeezenet/pytorch/README.md @@ -9,6 +9,12 @@ maintaining good classification performance. SqueezeNet is particularly suitable where model size and computational efficiency are critical, offering a balance between accuracy and resource requirements. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/swin_transformer/paddlepaddle/README.md b/cv/classification/swin_transformer/paddlepaddle/README.md index dda047a8d..1bd8eec92 100644 --- a/cv/classification/swin_transformer/paddlepaddle/README.md +++ b/cv/classification/swin_transformer/paddlepaddle/README.md @@ -9,6 +9,12 @@ suitable for both image classification and dense prediction tasks. Swin Transfor performance in various vision tasks, offering a powerful alternative to traditional convolutional networks with its transformer-based approach. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/swin_transformer/pytorch/README.md b/cv/classification/swin_transformer/pytorch/README.md index d62655a1a..a2994855e 100644 --- a/cv/classification/swin_transformer/pytorch/README.md +++ b/cv/classification/swin_transformer/pytorch/README.md @@ -9,6 +9,12 @@ suitable for both image classification and dense prediction tasks. Swin Transfor performance in various vision tasks, offering a powerful alternative to traditional convolutional networks with its transformer-based approach. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/vgg/paddlepaddle/README.md b/cv/classification/vgg/paddlepaddle/README.md index 44829679e..2ddea9d2a 100644 --- a/cv/classification/vgg/paddlepaddle/README.md +++ b/cv/classification/vgg/paddlepaddle/README.md @@ -8,6 +8,12 @@ includes 16 or 19 weight layers, with VGG16 being the most popular variant. VGG image classification tasks and became a benchmark for subsequent CNN architectures. Its uniform structure and deep design have influenced many modern deep learning models in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/vgg/pytorch/README.md b/cv/classification/vgg/pytorch/README.md index 34ce4101f..f9cc2353f 100644 --- a/cv/classification/vgg/pytorch/README.md +++ b/cv/classification/vgg/pytorch/README.md @@ -8,6 +8,12 @@ includes 16 or 19 weight layers, with VGG16 being the most popular variant. VGG image classification tasks and became a benchmark for subsequent CNN architectures. Its uniform structure and deep design have influenced many modern deep learning models in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/vgg/tensorflow/README.md b/cv/classification/vgg/tensorflow/README.md index 0a1ec7786..2e6ec1f04 100644 --- a/cv/classification/vgg/tensorflow/README.md +++ b/cv/classification/vgg/tensorflow/README.md @@ -8,6 +8,12 @@ includes 16 or 19 weight layers, with VGG16 being the most popular variant. VGG image classification tasks and became a benchmark for subsequent CNN architectures. Its uniform structure and deep design have influenced many modern deep learning models in computer vision. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/wavemlp/pytorch/README.md b/cv/classification/wavemlp/pytorch/README.md index b9a884a00..6b04581c1 100644 --- a/cv/classification/wavemlp/pytorch/README.md +++ b/cv/classification/wavemlp/pytorch/README.md @@ -8,6 +8,12 @@ in different images. This approach enhances feature aggregation in pure MLP arch CNNs and transformers in tasks like image classification and object detection. Wave-MLP offers efficient computation while maintaining high accuracy, making it suitable for various computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/wide_resnet101_2/pytorch/README.md b/cv/classification/wide_resnet101_2/pytorch/README.md index 495bec852..4616facb5 100644 --- a/cv/classification/wide_resnet101_2/pytorch/README.md +++ b/cv/classification/wide_resnet101_2/pytorch/README.md @@ -8,6 +8,12 @@ This architecture achieves superior performance in image classification tasks by efficient training. Wide_ResNet101_2 demonstrates improved accuracy over standard ResNet variants while maintaining computational efficiency, making it suitable for complex visual recognition tasks requiring high performance. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/xception/paddlepaddle/README.md b/cv/classification/xception/paddlepaddle/README.md index c31dd9ef9..8e95b762d 100644 --- a/cv/classification/xception/paddlepaddle/README.md +++ b/cv/classification/xception/paddlepaddle/README.md @@ -9,6 +9,12 @@ spatial correlations. The architecture achieves state-of-the-art performance in efficient alternative to traditional CNNs. Its design is particularly suitable for applications requiring both high accuracy and computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/classification/xception/pytorch/README.md b/cv/classification/xception/pytorch/README.md index beb31b487..108f6d76d 100755 --- a/cv/classification/xception/pytorch/README.md +++ b/cv/classification/xception/pytorch/README.md @@ -9,6 +9,12 @@ spatial correlations. The architecture achieves state-of-the-art performance in efficient alternative to traditional CNNs. Its design is particularly suitable for applications requiring both high accuracy and computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources -- Gitee From 286da9f8a2aac6032c19097ac9ea2eda4163ceb9 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 26 Mar 2025 10:42:15 +0800 Subject: [PATCH 2/4] add support env for cv/detection --- cv/detection/atss_mmdet/pytorch/README.md | 6 ++++++ cv/detection/autoassign/pytorch/README.md | 6 ++++++ cv/detection/cascade_rcnn_mmdet/pytorch/README.md | 6 ++++++ cv/detection/centermask2/pytorch/README.md | 6 ++++++ cv/detection/centernet/paddlepaddle/README.md | 6 ++++++ cv/detection/centernet/pytorch/README.md | 6 ++++++ cv/detection/co-detr/pytorch/README.md | 6 ++++++ cv/detection/cornernet_mmdet/pytorch/README.md | 6 ++++++ cv/detection/dcnv2_mmdet/pytorch/README.md | 6 ++++++ cv/detection/detr/paddlepaddle/README.md | 6 ++++++ cv/detection/fasterrcnn/pytorch/README.md | 6 ++++++ cv/detection/fcos/paddlepaddle/README.md | 6 ++++++ cv/detection/fcos/pytorch/README.md | 6 ++++++ cv/detection/mamba_yolo/pytorch/README.md | 6 ++++++ cv/detection/maskrcnn/paddlepaddle/README.md | 6 ++++++ cv/detection/maskrcnn/pytorch/README.md | 6 ++++++ cv/detection/oc_sort/paddlepaddle/README.md | 6 ++++++ cv/detection/oriented_reppoints/pytorch/README.md | 6 ++++++ cv/detection/picodet/paddlepaddle/README.md | 6 ++++++ cv/detection/pp-yoloe/paddlepaddle/README.md | 6 ++++++ cv/detection/pp_yoloe+/paddlepaddle/README.md | 6 ++++++ cv/detection/pvanet/pytorch/README.md | 6 ++++++ cv/detection/reppoints_mmdet/pytorch/README.md | 6 ++++++ cv/detection/retinanet/paddlepaddle/README.md | 6 ++++++ cv/detection/retinanet/pytorch/README.md | 6 ++++++ cv/detection/rt-detr/pytorch/README.md | 6 ++++++ cv/detection/rtmdet/pytorch/README.md | 6 ++++++ cv/detection/solov2/paddlepaddle/README.md | 6 ++++++ cv/detection/ssd/mindspore/README.md | 6 ++++++ cv/detection/ssd/paddlepaddle/README.md | 6 ++++++ cv/detection/ssd/pytorch/README.md | 6 ++++++ cv/detection/ssd/tensorflow/README.md | 6 ++++++ cv/detection/yolof/pytorch/README.md | 6 ++++++ cv/detection/yolov10/pytorch/README.md | 6 ++++++ cv/detection/yolov3/paddlepaddle/README.md | 6 ++++++ cv/detection/yolov3/pytorch/README.md | 6 ++++++ cv/detection/yolov3/tensorflow/README.md | 6 ++++++ cv/detection/yolov5/paddlepaddle/README.md | 6 ++++++ cv/detection/yolov5/pytorch/README.md | 6 ++++++ cv/detection/yolov6/pytorch/README.md | 6 ++++++ cv/detection/yolov7/pytorch/README.md | 6 ++++++ cv/detection/yolov8/pytorch/README.md | 6 ++++++ cv/detection/yolov9/pytorch/README.md | 6 ++++++ 43 files changed, 258 insertions(+) diff --git a/cv/detection/atss_mmdet/pytorch/README.md b/cv/detection/atss_mmdet/pytorch/README.md index f5c0fc7b5..b429120ce 100644 --- a/cv/detection/atss_mmdet/pytorch/README.md +++ b/cv/detection/atss_mmdet/pytorch/README.md @@ -9,6 +9,12 @@ optimal sample selection thresholds, eliminating the need for manual tuning. Thi and anchor-free detectors, achieving state-of-the-art performance on benchmarks like COCO without additional computational overhead. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/autoassign/pytorch/README.md b/cv/detection/autoassign/pytorch/README.md index f874fe8bd..473d6fb6d 100755 --- a/cv/detection/autoassign/pytorch/README.md +++ b/cv/detection/autoassign/pytorch/README.md @@ -9,6 +9,12 @@ assignment strategies for each instance. This approach eliminates the need for m appearance-aware detection through automatic sample selection, resulting in improved performance and reduced human intervention in the detection process. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/cascade_rcnn_mmdet/pytorch/README.md b/cv/detection/cascade_rcnn_mmdet/pytorch/README.md index e673a73d2..1675f1b98 100644 --- a/cv/detection/cascade_rcnn_mmdet/pytorch/README.md +++ b/cv/detection/cascade_rcnn_mmdet/pytorch/README.md @@ -8,6 +8,12 @@ stage, addressing the paradox of high-quality detection by minimizing overfittin between training and inference. This architecture achieves state-of-the-art performance on various datasets, including COCO, and can be extended to instance segmentation tasks, outperforming models like Mask R-CNN. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/centermask2/pytorch/README.md b/cv/detection/centermask2/pytorch/README.md index 4d5d8e3ce..4f67b38bf 100644 --- a/cv/detection/centermask2/pytorch/README.md +++ b/cv/detection/centermask2/pytorch/README.md @@ -9,6 +9,12 @@ predictions effectively. The model achieves state-of-the-art performance on COCO training and inference capabilities. It's particularly effective for complex scenes with overlapping objects and varying scales. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/centernet/paddlepaddle/README.md b/cv/detection/centernet/paddlepaddle/README.md index afec74dc0..02ab0bf1d 100644 --- a/cv/detection/centernet/paddlepaddle/README.md +++ b/cv/detection/centernet/paddlepaddle/README.md @@ -8,6 +8,12 @@ properties like size and orientation. This approach eliminates the need for anch making it simpler and faster. CenterNet achieves state-of-the-art speed-accuracy trade-offs on benchmarks like COCO and can be extended to 3D detection and pose estimation tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/centernet/pytorch/README.md b/cv/detection/centernet/pytorch/README.md index dd0198cf5..ffe785bf3 100644 --- a/cv/detection/centernet/pytorch/README.md +++ b/cv/detection/centernet/pytorch/README.md @@ -8,6 +8,12 @@ properties like size and orientation. This approach eliminates the need for anch making it simpler and faster. CenterNet achieves state-of-the-art speed-accuracy trade-offs on benchmarks like COCO and can be extended to 3D detection and pose estimation tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/co-detr/pytorch/README.md b/cv/detection/co-detr/pytorch/README.md index b73ec656b..367a8870d 100644 --- a/cv/detection/co-detr/pytorch/README.md +++ b/cv/detection/co-detr/pytorch/README.md @@ -8,6 +8,12 @@ assignments and optimizes decoder attention using customized positive queries. C performance, being the first model to reach 66.0 AP on COCO test-dev with ViT-L. This approach significantly boosts detection accuracy and efficiency while maintaining end-to-end training simplicity. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/cornernet_mmdet/pytorch/README.md b/cv/detection/cornernet_mmdet/pytorch/README.md index ffaf95957..3009eb6c0 100644 --- a/cv/detection/cornernet_mmdet/pytorch/README.md +++ b/cv/detection/cornernet_mmdet/pytorch/README.md @@ -9,6 +9,12 @@ addition to our novel formulation, we introduce corner pooling, a new type of po better localize corners. Experiments show that CornerNet achieves a 42.2% AP on MS COCO, outperforming all existing one-stage detectors. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/dcnv2_mmdet/pytorch/README.md b/cv/detection/dcnv2_mmdet/pytorch/README.md index 8977ca8f0..af5f1238b 100644 --- a/cv/detection/dcnv2_mmdet/pytorch/README.md +++ b/cv/detection/dcnv2_mmdet/pytorch/README.md @@ -9,6 +9,12 @@ comprehensively throughout the network, enabling superior adaptation to object g state-of-the-art performance on object detection and instance segmentation tasks, particularly on benchmarks like COCO, while maintaining computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/detr/paddlepaddle/README.md b/cv/detection/detr/paddlepaddle/README.md index 701b4f8b1..f20fb6ba9 100644 --- a/cv/detection/detr/paddlepaddle/README.md +++ b/cv/detection/detr/paddlepaddle/README.md @@ -8,6 +8,12 @@ anchor boxes and non-maximum suppression. DETR uses a transformer encoder-decode and predict object bounding boxes and classes simultaneously. This end-to-end approach simplifies the detection pipeline while achieving competitive performance on benchmarks like COCO, offering a new paradigm for object detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/fasterrcnn/pytorch/README.md b/cv/detection/fasterrcnn/pytorch/README.md index 9b071be03..1c9eae299 100644 --- a/cv/detection/fasterrcnn/pytorch/README.md +++ b/cv/detection/fasterrcnn/pytorch/README.md @@ -8,6 +8,12 @@ cost-free region proposals. This architecture significantly improves detection s predecessors. Faster R-CNN achieves excellent performance on benchmarks like PASCAL VOC and COCO, and serves as the foundation for many winning entries in computer vision competitions. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/fcos/paddlepaddle/README.md b/cv/detection/fcos/paddlepaddle/README.md index bede6c790..cae72bd27 100644 --- a/cv/detection/fcos/paddlepaddle/README.md +++ b/cv/detection/fcos/paddlepaddle/README.md @@ -8,6 +8,12 @@ bounding boxes and class labels. FCOS simplifies the detection pipeline, reduces competitive performance on benchmarks like COCO. Its center-ness branch helps suppress low-quality predictions, making it efficient and effective for various detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/fcos/pytorch/README.md b/cv/detection/fcos/pytorch/README.md index 2236e5764..0a8dc6043 100755 --- a/cv/detection/fcos/pytorch/README.md +++ b/cv/detection/fcos/pytorch/README.md @@ -8,6 +8,12 @@ bounding boxes and class labels. FCOS simplifies the detection pipeline, reduces competitive performance on benchmarks like COCO. Its center-ness branch helps suppress low-quality predictions, making it efficient and effective for various detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.0.0 | 23.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/mamba_yolo/pytorch/README.md b/cv/detection/mamba_yolo/pytorch/README.md index be6a2484b..d85c08423 100644 --- a/cv/detection/mamba_yolo/pytorch/README.md +++ b/cv/detection/mamba_yolo/pytorch/README.md @@ -6,6 +6,12 @@ Mamba-YOLO is an innovative object detection model that integrates State Space M Look Once) architecture to enhance performance in complex visual tasks. This integration aims to improve the model's ability to capture global dependencies and process long-range information efficiently. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.1.1 | 24.12 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/maskrcnn/paddlepaddle/README.md b/cv/detection/maskrcnn/paddlepaddle/README.md index f98a7cd69..bec9004a8 100644 --- a/cv/detection/maskrcnn/paddlepaddle/README.md +++ b/cv/detection/maskrcnn/paddlepaddle/README.md @@ -8,6 +8,12 @@ segmentation masks for each instance. Mask R-CNN maintains the two-stage archite fully convolutional network for mask prediction. This model achieves state-of-the-art performance on tasks like object detection, instance segmentation, and human pose estimation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/maskrcnn/pytorch/README.md b/cv/detection/maskrcnn/pytorch/README.md index 9361b58f9..d93a2faae 100644 --- a/cv/detection/maskrcnn/pytorch/README.md +++ b/cv/detection/maskrcnn/pytorch/README.md @@ -8,6 +8,12 @@ segmentation masks for each instance. Mask R-CNN maintains the two-stage archite fully convolutional network for mask prediction. This model achieves state-of-the-art performance on tasks like object detection, instance segmentation, and human pose estimation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/oc_sort/paddlepaddle/README.md b/cv/detection/oc_sort/paddlepaddle/README.md index 2d8a0559e..3b3cc4bb0 100644 --- a/cv/detection/oc_sort/paddlepaddle/README.md +++ b/cv/detection/oc_sort/paddlepaddle/README.md @@ -9,6 +9,12 @@ observation-centric updates, making it more reliable for object tracking in chal flexible for integration with various detectors and matching modules, offering improved accuracy without compromising speed. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/oriented_reppoints/pytorch/README.md b/cv/detection/oriented_reppoints/pytorch/README.md index da3c445d9..4e6b8c8d0 100644 --- a/cv/detection/oriented_reppoints/pytorch/README.md +++ b/cv/detection/oriented_reppoints/pytorch/README.md @@ -8,6 +8,12 @@ instances, offering more precise detection than traditional bounding box approac oriented conversion functions for accurate classification and localization, along with a quality assessment scheme to handle cluttered backgrounds. It achieves state-of-the-art performance on aerial datasets like DOTA and HRSC2016. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/picodet/paddlepaddle/README.md b/cv/detection/picodet/paddlepaddle/README.md index cee3cff1f..ef89da10d 100644 --- a/cv/detection/picodet/paddlepaddle/README.md +++ b/cv/detection/picodet/paddlepaddle/README.md @@ -8,6 +8,12 @@ end-to-end inference by including post-processing in the network, enabling direc achieves an excellent balance between speed and accuracy, making it ideal for applications requiring real-time detection on resource-constrained devices. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/pp-yoloe/paddlepaddle/README.md b/cv/detection/pp-yoloe/paddlepaddle/README.md index eed132eaa..5b391afaa 100644 --- a/cv/detection/pp-yoloe/paddlepaddle/README.md +++ b/cv/detection/pp-yoloe/paddlepaddle/README.md @@ -9,6 +9,12 @@ Convolution, ensuring compatibility with diverse hardware. It achieves excellent suitable for real-time applications. The model's efficient architecture and optimization techniques make it a top choice for object detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/pp_yoloe+/paddlepaddle/README.md b/cv/detection/pp_yoloe+/paddlepaddle/README.md index e724119bc..fc3345330 100644 --- a/cv/detection/pp_yoloe+/paddlepaddle/README.md +++ b/cv/detection/pp_yoloe+/paddlepaddle/README.md @@ -8,6 +8,12 @@ configurable through width and depth multipliers. PP-YOLOE+ maintains hardware c operators while achieving state-of-the-art speed-accuracy trade-offs. Its optimized architecture makes it ideal for real-time applications, offering superior detection performance across various scenarios and hardware platforms. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/pvanet/pytorch/README.md b/cv/detection/pvanet/pytorch/README.md index ef0709db5..627ddb597 100755 --- a/cv/detection/pvanet/pytorch/README.md +++ b/cv/detection/pvanet/pytorch/README.md @@ -8,6 +8,12 @@ channels," incorporating innovations like C.ReLU and Inception structures. PVANe benchmarks with significantly reduced computational requirements compared to heavier networks. Its optimized design makes it suitable for real-time applications where both speed and accuracy are crucial. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/reppoints_mmdet/pytorch/README.md b/cv/detection/reppoints_mmdet/pytorch/README.md index 82370f5b2..2edb05dbd 100644 --- a/cv/detection/reppoints_mmdet/pytorch/README.md +++ b/cv/detection/reppoints_mmdet/pytorch/README.md @@ -8,6 +8,12 @@ that bound objects and indicate semantically significant areas. RepPoints achiev benchmarks while eliminating the need for anchor boxes. Its finer representation enables better object understanding and more accurate detection, particularly for complex shapes and overlapping objects. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/retinanet/paddlepaddle/README.md b/cv/detection/retinanet/paddlepaddle/README.md index 9a007f676..afc895912 100644 --- a/cv/detection/retinanet/paddlepaddle/README.md +++ b/cv/detection/retinanet/paddlepaddle/README.md @@ -8,6 +8,12 @@ efficiently. RetinaNet achieves high accuracy while maintaining competitive spee detection tasks. Its single-stage architecture combines the accuracy of two-stage detectors with the speed of single-stage approaches, offering an excellent balance between performance and efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/retinanet/pytorch/README.md b/cv/detection/retinanet/pytorch/README.md index 0b0dd12f7..a94c3df12 100644 --- a/cv/detection/retinanet/pytorch/README.md +++ b/cv/detection/retinanet/pytorch/README.md @@ -8,6 +8,12 @@ efficiently. RetinaNet achieves high accuracy while maintaining competitive spee detection tasks. Its single-stage architecture combines the accuracy of two-stage detectors with the speed of single-stage approaches, offering an excellent balance between performance and efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/rt-detr/pytorch/README.md b/cv/detection/rt-detr/pytorch/README.md index 449d226d6..0e5fa2b51 100644 --- a/cv/detection/rt-detr/pytorch/README.md +++ b/cv/detection/rt-detr/pytorch/README.md @@ -8,6 +8,12 @@ speed. RT-DETR achieves competitive accuracy with significantly faster inference applications requiring real-time performance. The model preserves the end-to-end detection capabilities of DETR while addressing its computational challenges, offering a practical solution for time-sensitive detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/rtmdet/pytorch/README.md b/cv/detection/rtmdet/pytorch/README.md index f5feef5d0..12a61aa3b 100644 --- a/cv/detection/rtmdet/pytorch/README.md +++ b/cv/detection/rtmdet/pytorch/README.md @@ -8,6 +8,12 @@ achieves state-of-the-art accuracy with exceptional speed, reaching 300+ FPS on sizes for different applications and excels in tasks like instance segmentation and rotated object detection. Its design provides insights for versatile real-time detection systems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/solov2/paddlepaddle/README.md b/cv/detection/solov2/paddlepaddle/README.md index 8213cdc6b..1faab97ac 100644 --- a/cv/detection/solov2/paddlepaddle/README.md +++ b/cv/detection/solov2/paddlepaddle/README.md @@ -8,6 +8,12 @@ and faster approach compared to traditional methods. SOLOv2 introduces dynamic c suppression to improve mask quality and processing speed. The model achieves strong performance on instance segmentation tasks while maintaining real-time capabilities, making it suitable for various computer vision applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/ssd/mindspore/README.md b/cv/detection/ssd/mindspore/README.md index f114071d8..05ebee898 100755 --- a/cv/detection/ssd/mindspore/README.md +++ b/cv/detection/ssd/mindspore/README.md @@ -7,6 +7,12 @@ class scores in a single forward pass. It uses a set of default boxes at differe multiple feature maps to detect objects of various sizes. SSD combines predictions from different layers to handle objects at different resolutions, offering a good balance between speed and accuracy for real-time detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.0.0 | 23.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/ssd/paddlepaddle/README.md b/cv/detection/ssd/paddlepaddle/README.md index 306f9c2cc..48ca88297 100644 --- a/cv/detection/ssd/paddlepaddle/README.md +++ b/cv/detection/ssd/paddlepaddle/README.md @@ -7,6 +7,12 @@ class scores in a single forward pass. It uses a set of default boxes at differe multiple feature maps to detect objects of various sizes. SSD combines predictions from different layers to handle objects at different resolutions, offering a good balance between speed and accuracy for real-time detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/ssd/pytorch/README.md b/cv/detection/ssd/pytorch/README.md index 7535f4023..0f918fcfc 100644 --- a/cv/detection/ssd/pytorch/README.md +++ b/cv/detection/ssd/pytorch/README.md @@ -7,6 +7,12 @@ class scores in a single forward pass. It uses a set of default boxes at differe multiple feature maps to detect objects of various sizes. SSD combines predictions from different layers to handle objects at different resolutions, offering a good balance between speed and accuracy for real-time detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 2.2.0 | 22.09 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/ssd/tensorflow/README.md b/cv/detection/ssd/tensorflow/README.md index 921a0966e..1ead9c194 100644 --- a/cv/detection/ssd/tensorflow/README.md +++ b/cv/detection/ssd/tensorflow/README.md @@ -7,6 +7,12 @@ class scores in a single forward pass. It uses a set of default boxes at differe multiple feature maps to detect objects of various sizes. SSD combines predictions from different layers to handle objects at different resolutions, offering a good balance between speed and accuracy for real-time detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolof/pytorch/README.md b/cv/detection/yolof/pytorch/README.md index f71b4de5d..e2bc7a935 100755 --- a/cv/detection/yolof/pytorch/README.md +++ b/cv/detection/yolof/pytorch/README.md @@ -8,6 +8,12 @@ multi-level approaches. YOLOF introduces two key components: Dilated Encoder for Uniform Matching for balanced positive samples. The model achieves competitive accuracy with RetinaNet while being 2.5x faster, making it suitable for real-time detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov10/pytorch/README.md b/cv/detection/yolov10/pytorch/README.md index 4392fa5f3..192a1ebab 100644 --- a/cv/detection/yolov10/pytorch/README.md +++ b/cv/detection/yolov10/pytorch/README.md @@ -8,6 +8,12 @@ state-of-the-art performance with reduced computational overhead, offering super various model scales. Built on the Ultralytics framework, it addresses limitations of previous YOLO versions, making it ideal for real-time applications requiring fast and accurate object detection in diverse scenarios. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov3/paddlepaddle/README.md b/cv/detection/yolov3/paddlepaddle/README.md index b7f6ceadc..7bed2ee17 100644 --- a/cv/detection/yolov3/paddlepaddle/README.md +++ b/cv/detection/yolov3/paddlepaddle/README.md @@ -8,6 +8,12 @@ competitive performance with faster inference times compared to other detectors. pass, making it efficient for real-time applications. The model balances speed and accuracy, making it popular for practical detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov3/pytorch/README.md b/cv/detection/yolov3/pytorch/README.md index 7cdc40ca4..4630f3b1b 100755 --- a/cv/detection/yolov3/pytorch/README.md +++ b/cv/detection/yolov3/pytorch/README.md @@ -8,6 +8,12 @@ competitive performance with faster inference times compared to other detectors. pass, making it efficient for real-time applications. The model balances speed and accuracy, making it popular for practical detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov3/tensorflow/README.md b/cv/detection/yolov3/tensorflow/README.md index cfed87ca2..6d583c616 100644 --- a/cv/detection/yolov3/tensorflow/README.md +++ b/cv/detection/yolov3/tensorflow/README.md @@ -8,6 +8,12 @@ competitive performance with faster inference times compared to other detectors. pass, making it efficient for real-time applications. The model balances speed and accuracy, making it popular for practical detection tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov5/paddlepaddle/README.md b/cv/detection/yolov5/paddlepaddle/README.md index dc7b48915..003cf1455 100644 --- a/cv/detection/yolov5/paddlepaddle/README.md +++ b/cv/detection/yolov5/paddlepaddle/README.md @@ -7,6 +7,12 @@ accuracy. It features a streamlined design with enhanced data augmentation and a multiple model sizes (n/s/m/l/x) for different performance needs. The model is known for its ease of use, fast training, and efficient inference, making it popular for real-time detection tasks across various applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 3.1.1 | 24.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov5/pytorch/README.md b/cv/detection/yolov5/pytorch/README.md index d44594250..5c04066d9 100644 --- a/cv/detection/yolov5/pytorch/README.md +++ b/cv/detection/yolov5/pytorch/README.md @@ -7,6 +7,12 @@ accuracy. It features a streamlined design with enhanced data augmentation and a multiple model sizes (n/s/m/l/x) for different performance needs. The model is known for its ease of use, fast training, and efficient inference, making it popular for real-time detection tasks across various applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov6/pytorch/README.md b/cv/detection/yolov6/pytorch/README.md index 819900288..2b73b0842 100644 --- a/cv/detection/yolov6/pytorch/README.md +++ b/cv/detection/yolov6/pytorch/README.md @@ -9,6 +9,12 @@ speed. It introduces innovative quantization methods for efficient deployment. T performance compared to other YOLO variants, making it suitable for diverse real-world applications requiring fast and accurate object detection. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov7/pytorch/README.md b/cv/detection/yolov7/pytorch/README.md index 468570543..e17e5d95e 100644 --- a/cv/detection/yolov7/pytorch/README.md +++ b/cv/detection/yolov7/pytorch/README.md @@ -8,6 +8,12 @@ optimizes model architecture, training strategies, and inference efficiency with model supports various scales for different performance needs and demonstrates exceptional results on COCO benchmarks. Its efficient design makes it suitable for real-world applications requiring fast and accurate object detection. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov8/pytorch/README.md b/cv/detection/yolov8/pytorch/README.md index 0f209d864..d06d8eff5 100644 --- a/cv/detection/yolov8/pytorch/README.md +++ b/cv/detection/yolov8/pytorch/README.md @@ -8,6 +8,12 @@ multiple tasks including instance segmentation, pose estimation, and image class efficiency and ease of use, making it suitable for real-time applications. It maintains the YOLO tradition of fast inference while delivering superior detection performance across various scenarios. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/cv/detection/yolov9/pytorch/README.md b/cv/detection/yolov9/pytorch/README.md index 2b7c01fac..268dd5e38 100644 --- a/cv/detection/yolov9/pytorch/README.md +++ b/cv/detection/yolov9/pytorch/README.md @@ -8,6 +8,12 @@ introduces innovative features that optimize performance across various hardware tradition of real-time detection while delivering superior results in complex scenarios. Its efficient design makes it suitable for applications requiring fast and accurate object recognition in diverse environments. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources -- Gitee From 79c9e42136e1981719a75433dfa56945cadff4ff Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 26 Mar 2025 10:46:24 +0800 Subject: [PATCH 3/4] add supported env for nlp/llm --- nlp/llm/Yi-1.5-6B/pytorch/README.md | 6 ++++++ nlp/llm/Yi-6B/pytorch/README.md | 6 ++++++ nlp/llm/Yi-VL-6B/pytorch/README.md | 6 ++++++ nlp/llm/aquila2-34b/pytorch/README.md | 6 ++++++ nlp/llm/baichuan2-7b/pytorch/README.md | 6 ++++++ nlp/llm/bloom-7b1/pytorch/README.md | 6 ++++++ nlp/llm/chatglm-6b/pytorch/README.md | 6 ++++++ nlp/llm/chatglm2-6b-sft/pytorch/README.md | 6 ++++++ nlp/llm/chatglm3-6b/pytorch/README.md | 6 ++++++ nlp/llm/deepseek_moe_7b/pytorch/README.md | 6 ++++++ nlp/llm/glm-4/pytorch/README.md | 6 ++++++ nlp/llm/gpt2-medium-en/paddlepaddle/README.md | 6 ++++++ nlp/llm/llama-7b/pytorch/README.md | 6 ++++++ nlp/llm/llama2-13b/pytorch/README.md | 6 ++++++ nlp/llm/llama2-34b/pytorch/README.md | 6 ++++++ nlp/llm/llama2-7b/pytorch/README.md | 6 ++++++ nlp/llm/llama2-7b_reward_sft/pytorch/README.md | 6 ++++++ nlp/llm/llama2-7b_rlhf/pytorch/README.md | 6 ++++++ nlp/llm/llama2-7b_sft/pytorch/README.md | 6 ++++++ nlp/llm/llama3_8b/pytorch/README.md | 6 ++++++ nlp/llm/llama3_8b_sft/pytorch/README.md | 6 ++++++ nlp/llm/mamba-2/pytorch/README.md | 6 ++++++ nlp/llm/minicpm/pytorch/README.md | 6 ++++++ nlp/llm/mixtral/pytorch/README.md | 6 ++++++ nlp/llm/phi-3/pytorch/README.md | 6 ++++++ nlp/llm/qwen-7b/pytorch/README.md | 6 ++++++ nlp/llm/qwen1.5-14b/pytorch/README.md | 6 ++++++ nlp/llm/qwen1.5-7b/pytorch/README.md | 6 ++++++ nlp/llm/qwen2.5-7b/pytorch/README.md | 6 ++++++ 29 files changed, 174 insertions(+) diff --git a/nlp/llm/Yi-1.5-6B/pytorch/README.md b/nlp/llm/Yi-1.5-6B/pytorch/README.md index 9f1b47113..1d0d8e255 100644 --- a/nlp/llm/Yi-1.5-6B/pytorch/README.md +++ b/nlp/llm/Yi-1.5-6B/pytorch/README.md @@ -7,6 +7,12 @@ Targeted as a bilingual language model and trained on 3T multilingual corpus, th strongest LLM worldwide, showing promise in language understanding, commonsense reasoning, reading comprehension, and more. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/Yi-6B/pytorch/README.md b/nlp/llm/Yi-6B/pytorch/README.md index 60a7b1b65..5710553cd 100644 --- a/nlp/llm/Yi-6B/pytorch/README.md +++ b/nlp/llm/Yi-6B/pytorch/README.md @@ -4,6 +4,12 @@ The Yi series models are the next generation of open-source large language models trained from scratch by 01.AI. Targeted as a bilingual language model and trained on 3T multilingual corpus, the Yi series models become one of the strongest LLM worldwide, showing promise in language understanding, commonsense reasoning, reading comprehension, and more. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/Yi-VL-6B/pytorch/README.md b/nlp/llm/Yi-VL-6B/pytorch/README.md index dd4f9929b..218c59821 100644 --- a/nlp/llm/Yi-VL-6B/pytorch/README.md +++ b/nlp/llm/Yi-VL-6B/pytorch/README.md @@ -8,6 +8,12 @@ conversations involving both text and images. Supporting English and Chinese, Yi performance in benchmarks like MMMU and CMMMU. Its ability to process high-resolution images (448×448) and engage in detailed visual question answering makes it a powerful tool for AI-driven image-text analysis and dialogue systems. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/aquila2-34b/pytorch/README.md b/nlp/llm/aquila2-34b/pytorch/README.md index 9c95b4742..dbb0d64b0 100644 --- a/nlp/llm/aquila2-34b/pytorch/README.md +++ b/nlp/llm/aquila2-34b/pytorch/README.md @@ -9,6 +9,12 @@ optimizing computational resources. Its architecture enables advanced performanc text generation, summarization, and question answering. The model represents a significant advancement in Chinese language processing, offering improved context understanding and response generation for diverse linguistic tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Configure 4-node environment diff --git a/nlp/llm/baichuan2-7b/pytorch/README.md b/nlp/llm/baichuan2-7b/pytorch/README.md index b863e06db..8a8a3aaf6 100644 --- a/nlp/llm/baichuan2-7b/pytorch/README.md +++ b/nlp/llm/baichuan2-7b/pytorch/README.md @@ -9,6 +9,12 @@ domain-specific applications. Baichuan2-7B's architecture is optimized for effic suitable for both academic research and commercial use. Its open-source nature encourages innovation and development in the field of natural language processing. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/bloom-7b1/pytorch/README.md b/nlp/llm/bloom-7b1/pytorch/README.md index 5cb505315..915d022b6 100755 --- a/nlp/llm/bloom-7b1/pytorch/README.md +++ b/nlp/llm/bloom-7b1/pytorch/README.md @@ -7,6 +7,12 @@ data using industrial-scale computational resources. As such, it is able to outp programming languages that is hardly distinguishable from text written by humans. BLOOM can also be instructed to perform text tasks it hasn't been explicitly trained for, by casting them as text generation tasks. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/chatglm-6b/pytorch/README.md b/nlp/llm/chatglm-6b/pytorch/README.md index ecec64e6c..e9fbced07 100644 --- a/nlp/llm/chatglm-6b/pytorch/README.md +++ b/nlp/llm/chatglm-6b/pytorch/README.md @@ -9,6 +9,12 @@ in generating human-like responses, particularly in Chinese QA scenarios. Its tr corpora enables it to handle diverse conversational contexts while maintaining computational efficiency and accessibility for local deployment. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/chatglm2-6b-sft/pytorch/README.md b/nlp/llm/chatglm2-6b-sft/pytorch/README.md index 83e52f672..75614e360 100644 --- a/nlp/llm/chatglm2-6b-sft/pytorch/README.md +++ b/nlp/llm/chatglm2-6b-sft/pytorch/README.md @@ -8,6 +8,12 @@ fine-tuning with minimal computational resources. Through techniques like model it can operate on GPUs with as little as 7GB of memory. ChatGLM2-6B SFT maintains the original model's bilingual capabilities while offering improved task-specific performance and resource efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/chatglm3-6b/pytorch/README.md b/nlp/llm/chatglm3-6b/pytorch/README.md index a1ac21347..990047acd 100644 --- a/nlp/llm/chatglm3-6b/pytorch/README.md +++ b/nlp/llm/chatglm3-6b/pytorch/README.md @@ -9,6 +9,12 @@ efficiency and language understanding. ChatGLM3-6B excels in generating coherent particularly in Chinese dialogue scenarios. Its architecture supports various fine-tuning techniques, making it adaptable for diverse applications while maintaining a low deployment threshold for practical implementation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/deepseek_moe_7b/pytorch/README.md b/nlp/llm/deepseek_moe_7b/pytorch/README.md index 1e6abe9e9..8195d5a26 100644 --- a/nlp/llm/deepseek_moe_7b/pytorch/README.md +++ b/nlp/llm/deepseek_moe_7b/pytorch/README.md @@ -9,6 +9,12 @@ performance, making it suitable for various natural language processing applicat capabilities in language understanding and generation while maintaining a more compact structure compared to its larger counterpart. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/glm-4/pytorch/README.md b/nlp/llm/glm-4/pytorch/README.md index 83b509887..29d4059b1 100644 --- a/nlp/llm/glm-4/pytorch/README.md +++ b/nlp/llm/glm-4/pytorch/README.md @@ -9,6 +9,12 @@ languages and demonstrates exceptional performance in multilingual scenarios. De applications, GLM-4-9B represents a significant advancement in large language model technology with its enhanced capabilities and versatility. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Install Dependencies diff --git a/nlp/llm/gpt2-medium-en/paddlepaddle/README.md b/nlp/llm/gpt2-medium-en/paddlepaddle/README.md index c3e7a5cbb..d8c4e37a3 100644 --- a/nlp/llm/gpt2-medium-en/paddlepaddle/README.md +++ b/nlp/llm/gpt2-medium-en/paddlepaddle/README.md @@ -9,6 +9,12 @@ generation. Pretrained on a vast corpus of English text, it demonstrates strong generating coherent, contextually relevant text. The model's architecture enables it to capture long-range dependencies in text, making it versatile for diverse language processing applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama-7b/pytorch/README.md b/nlp/llm/llama-7b/pytorch/README.md index f4d6836d9..4b1151da1 100644 --- a/nlp/llm/llama-7b/pytorch/README.md +++ b/nlp/llm/llama-7b/pytorch/README.md @@ -9,6 +9,12 @@ generation, question answering, and sentence completion. Its efficient architect while maintaining computational feasibility, making it a versatile tool for various NLP applications and research in language model development. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V100 | 3.1.0 | 23.09 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama2-13b/pytorch/README.md b/nlp/llm/llama2-13b/pytorch/README.md index 1e76ae223..fde31d632 100644 --- a/nlp/llm/llama2-13b/pytorch/README.md +++ b/nlp/llm/llama2-13b/pytorch/README.md @@ -6,6 +6,12 @@ Llama 2 is a large language model released by Meta in 2023, with parameters rang the training corpus of Llama 2 is 40% longer, and the context length has been upgraded from 2048 to 4096, allowing for understanding and generating longer texts. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Configure 2-node environment diff --git a/nlp/llm/llama2-34b/pytorch/README.md b/nlp/llm/llama2-34b/pytorch/README.md index aa3c3bd5e..3e7475585 100644 --- a/nlp/llm/llama2-34b/pytorch/README.md +++ b/nlp/llm/llama2-34b/pytorch/README.md @@ -8,6 +8,12 @@ better understanding of longer texts. This model excels in various natural langu comprehension, and dialogue. Its enhanced architecture and training methodology make it a versatile tool for AI applications, offering state-of-the-art performance in language understanding and generation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Configure 4-node environment diff --git a/nlp/llm/llama2-7b/pytorch/README.md b/nlp/llm/llama2-7b/pytorch/README.md index fda863dc7..c54c1e4f6 100644 --- a/nlp/llm/llama2-7b/pytorch/README.md +++ b/nlp/llm/llama2-7b/pytorch/README.md @@ -8,6 +8,12 @@ better understanding of longer texts. This model excels in various natural langu comprehension. Its enhanced architecture and training methodology make it a powerful tool for AI applications while maintaining computational efficiency compared to larger models in the Llama-2 series. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama2-7b_reward_sft/pytorch/README.md b/nlp/llm/llama2-7b_reward_sft/pytorch/README.md index 7cfaa1525..b4d3827af 100644 --- a/nlp/llm/llama2-7b_reward_sft/pytorch/README.md +++ b/nlp/llm/llama2-7b_reward_sft/pytorch/README.md @@ -9,6 +9,12 @@ excels in understanding and generating coherent, contextually relevant responses efficient training and inference, making it a powerful tool for developing high-quality conversational AI systems while maintaining computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama2-7b_rlhf/pytorch/README.md b/nlp/llm/llama2-7b_rlhf/pytorch/README.md index 1b18409e7..1e7834e30 100644 --- a/nlp/llm/llama2-7b_rlhf/pytorch/README.md +++ b/nlp/llm/llama2-7b_rlhf/pytorch/README.md @@ -11,6 +11,12 @@ to larger models. **Notion: You would better to fine-tune this two models, then do RLHF training as below. So that can get good training result.** +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama2-7b_sft/pytorch/README.md b/nlp/llm/llama2-7b_sft/pytorch/README.md index 4020a1a61..aab9f57ca 100644 --- a/nlp/llm/llama2-7b_sft/pytorch/README.md +++ b/nlp/llm/llama2-7b_sft/pytorch/README.md @@ -9,6 +9,12 @@ making it particularly effective for applications requiring precise language und combines the foundational capabilities of Llama-2 with task-specific optimizations, offering improved performance while maintaining computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama3_8b/pytorch/README.md b/nlp/llm/llama3_8b/pytorch/README.md index 1f7a136e8..37f418078 100644 --- a/nlp/llm/llama3_8b/pytorch/README.md +++ b/nlp/llm/llama3_8b/pytorch/README.md @@ -9,6 +9,12 @@ incorporates supervised fine-tuning (SFT) and reinforcement learning with human preferences, ensuring both helpfulness and safety in its responses. Llama3-8B offers state-of-the-art performance in language understanding and generation. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/llama3_8b_sft/pytorch/README.md b/nlp/llm/llama3_8b_sft/pytorch/README.md index f578e7755..4de45c1a5 100644 --- a/nlp/llm/llama3_8b_sft/pytorch/README.md +++ b/nlp/llm/llama3_8b_sft/pytorch/README.md @@ -9,6 +9,12 @@ particularly effective for applications requiring precise language understanding foundational capabilities of Llama3 with task-specific optimizations, offering improved performance while maintaining computational efficiency. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/mamba-2/pytorch/README.md b/nlp/llm/mamba-2/pytorch/README.md index 5a8def877..053fe20a5 100644 --- a/nlp/llm/mamba-2/pytorch/README.md +++ b/nlp/llm/mamba-2/pytorch/README.md @@ -7,6 +7,12 @@ Transformer-based large language models (LLMs). It is the second version of the of its predecessor by offering faster inference, improved scalability for long sequences, and lower computational complexity. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.1.1 | 24.12 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/minicpm/pytorch/README.md b/nlp/llm/minicpm/pytorch/README.md index 7df56801f..ca00046b2 100644 --- a/nlp/llm/minicpm/pytorch/README.md +++ b/nlp/llm/minicpm/pytorch/README.md @@ -9,6 +9,12 @@ Falcon-40B. Furthermore, on the MT-Bench, currently the closest benchmark to use many representative open-source large language models, including Llama2-70B-Chat, Vicuna-33B, Mistral-7B-Instruct-v0.1, and Zephyr-7B-alpha. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Install Dependencies diff --git a/nlp/llm/mixtral/pytorch/README.md b/nlp/llm/mixtral/pytorch/README.md index f2b009e62..e0ce2c35a 100644 --- a/nlp/llm/mixtral/pytorch/README.md +++ b/nlp/llm/mixtral/pytorch/README.md @@ -6,6 +6,12 @@ The Mixtral model is a Mixture of Experts (MoE)-based large language model devel company focusing on open-source AI models. Mixtral is designed to achieve high performance while maintaining computational efficiency, making it an excellent choice for real-world applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/phi-3/pytorch/README.md b/nlp/llm/phi-3/pytorch/README.md index 1c2264178..86d4d278b 100644 --- a/nlp/llm/phi-3/pytorch/README.md +++ b/nlp/llm/phi-3/pytorch/README.md @@ -9,6 +9,12 @@ class, offering a balance between computational efficiency and capability. Their architecture make them ideal for applications requiring lightweight yet powerful language processing solutions across diverse domains. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Install Dependencies diff --git a/nlp/llm/qwen-7b/pytorch/README.md b/nlp/llm/qwen-7b/pytorch/README.md index 18b476a72..bd4c4eba4 100644 --- a/nlp/llm/qwen-7b/pytorch/README.md +++ b/nlp/llm/qwen-7b/pytorch/README.md @@ -7,6 +7,12 @@ Cloud. Qwen-7B is a Transformer-based large language model, which is pretrained texts, books, codes, etc. Additionally, based on the pretrained Qwen-7B, we release Qwen-7B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/qwen1.5-14b/pytorch/README.md b/nlp/llm/qwen1.5-14b/pytorch/README.md index 140281577..c3b494f35 100644 --- a/nlp/llm/qwen1.5-14b/pytorch/README.md +++ b/nlp/llm/qwen1.5-14b/pytorch/README.md @@ -8,6 +8,12 @@ data. In comparison with the previous released Qwen, the improvements include:8 Chat models;Multilingual support of both base and chat models;Stable support of 32K context length for models of all sizes;No need of trust_remote_code. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/qwen1.5-7b/pytorch/README.md b/nlp/llm/qwen1.5-7b/pytorch/README.md index bc822f259..ffe8724d8 100644 --- a/nlp/llm/qwen1.5-7b/pytorch/README.md +++ b/nlp/llm/qwen1.5-7b/pytorch/README.md @@ -8,6 +8,12 @@ data. In comparison with the previous released Qwen, the improvements include:8 Chat models;Multilingual support of both base and chat models;Stable support of 32K context length for models of all sizes;No need of trust_remote_code. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources diff --git a/nlp/llm/qwen2.5-7b/pytorch/README.md b/nlp/llm/qwen2.5-7b/pytorch/README.md index 035da33c0..faf3f6157 100644 --- a/nlp/llm/qwen2.5-7b/pytorch/README.md +++ b/nlp/llm/qwen2.5-7b/pytorch/README.md @@ -8,6 +8,12 @@ lengths up to 128K tokens and generates outputs up to 8K tokens. The model excel languages and demonstrates robust performance in instruction following and role-play scenarios. Qwen2.5's optimized architecture and specialized expert models make it a versatile tool for diverse AI applications. +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | + ## Model Preparation ### Prepare Resources -- Gitee From 430d7bbf01c750dd71bf78434a0066ed0addcb97 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 26 Mar 2025 14:19:45 +0800 Subject: [PATCH 4/4] a dd release sdk info --- cv/classification/acmix/pytorch/README.md | 1 + cv/classification/acnet/pytorch/README.md | 1 + cv/classification/alexnet/pytorch/README.md | 1 + .../alexnet/tensorflow/README.md | 1 + cv/classification/byol/pytorch/README.md | 1 + cv/classification/cbam/pytorch/README.md | 1 + cv/classification/convnext/pytorch/README.md | 1 + .../cspdarknet53/pytorch/README.md | 1 + .../densenet/paddlepaddle/README.md | 1 + cv/classification/densenet/pytorch/README.md | 1 + cv/classification/dpn107/pytorch/README.md | 1 + cv/classification/dpn92/pytorch/README.md | 1 + .../eca_mobilenet_v2/pytorch/README.md | 1 + .../eca_resnet152/pytorch/README.md | 1 + .../efficientnet_b0/paddlepaddle/README.md | 1 + .../efficientnet_b4/pytorch/README.md | 1 + cv/classification/fasternet/pytorch/README.md | 1 + .../googlenet/paddlepaddle/README.md | 1 + cv/classification/googlenet/pytorch/README.md | 1 + .../inceptionv3/pytorch/README.md | 1 + .../inceptionv3/tensorflow/README.md | 1 + .../inceptionv4/pytorch/README.md | 1 + cv/classification/lenet/pytorch/README.md | 1 + .../mobilenetv2/pytorch/README.md | 1 + .../mobilenetv3/paddlepaddle/README.md | 1 + .../mobilenetv3/pytorch/README.md | 1 + .../paddlepaddle/README.md | 1 + cv/classification/mobileone/pytorch/README.md | 1 + cv/classification/mocov2/pytorch/README.md | 1 + .../pp-lcnet/paddlepaddle/README.md | 1 + cv/classification/repmlp/pytorch/README.md | 1 + .../repvgg/paddlepaddle/README.md | 1 + cv/classification/repvgg/pytorch/README.md | 1 + cv/classification/repvit/pytorch/README.md | 1 + .../res2net50_14w_8s/paddlepaddle/README.md | 1 + .../resnest101/pytorch/README.md | 1 + cv/classification/resnest14/pytorch/README.md | 1 + .../resnest269/pytorch/README.md | 1 + .../resnest50/paddlepaddle/README.md | 1 + cv/classification/resnest50/pytorch/README.md | 1 + cv/classification/resnet101/pytorch/README.md | 1 + cv/classification/resnet152/pytorch/README.md | 1 + cv/classification/resnet18/pytorch/README.md | 1 + .../resnet50/paddlepaddle/README.md | 1 + cv/classification/resnet50/pytorch/README.md | 1 + .../resnet50/tensorflow/README.md | 1 + .../resnext101_32x8d/pytorch/README.md | 1 + .../resnext50_32x4d/mindspore/README.md | 1 + .../resnext50_32x4d/pytorch/README.md | 1 + .../se_resnet50_vd/paddlepaddle/README.md | 1 + cv/classification/seresnext/pytorch/README.md | 1 + .../shufflenetv2/paddlepaddle/README.md | 1 + .../shufflenetv2/pytorch/README.md | 1 + .../squeezenet/pytorch/README.md | 1 + .../swin_transformer/paddlepaddle/README.md | 1 + .../swin_transformer/pytorch/README.md | 1 + cv/classification/vgg/paddlepaddle/README.md | 1 + cv/classification/vgg/pytorch/README.md | 1 + cv/classification/vgg/tensorflow/README.md | 1 + cv/classification/wavemlp/pytorch/README.md | 1 + .../wide_resnet101_2/pytorch/README.md | 1 + .../xception/paddlepaddle/README.md | 1 + cv/classification/xception/pytorch/README.md | 1 + cv/detection/atss_mmdet/pytorch/README.md | 1 + cv/detection/autoassign/pytorch/README.md | 169 +++++++++--------- .../cascade_rcnn_mmdet/pytorch/README.md | 1 + cv/detection/centermask2/pytorch/README.md | 1 + cv/detection/centernet/paddlepaddle/README.md | 1 + cv/detection/centernet/pytorch/README.md | 1 + cv/detection/co-detr/pytorch/README.md | 1 + .../cornernet_mmdet/pytorch/README.md | 1 + cv/detection/dcnv2_mmdet/pytorch/README.md | 1 + cv/detection/detr/paddlepaddle/README.md | 1 + cv/detection/fasterrcnn/pytorch/README.md | 1 + cv/detection/fcos/paddlepaddle/README.md | 1 + cv/detection/maskrcnn/paddlepaddle/README.md | 1 + cv/detection/maskrcnn/pytorch/README.md | 1 + cv/detection/oc_sort/paddlepaddle/README.md | 1 + .../oriented_reppoints/pytorch/README.md | 1 + cv/detection/picodet/paddlepaddle/README.md | 1 + cv/detection/pp-yoloe/paddlepaddle/README.md | 1 + cv/detection/pp_yoloe+/paddlepaddle/README.md | 1 + cv/detection/pvanet/pytorch/README.md | 1 + .../reppoints_mmdet/pytorch/README.md | 1 + cv/detection/retinanet/paddlepaddle/README.md | 1 + cv/detection/retinanet/pytorch/README.md | 1 + cv/detection/rt-detr/pytorch/README.md | 1 + cv/detection/rtmdet/pytorch/README.md | 1 + cv/detection/solov2/paddlepaddle/README.md | 1 + cv/detection/ssd/paddlepaddle/README.md | 1 + cv/detection/ssd/tensorflow/README.md | 1 + cv/detection/yolof/pytorch/README.md | 1 + cv/detection/yolov10/pytorch/README.md | 1 + cv/detection/yolov3/paddlepaddle/README.md | 1 + cv/detection/yolov3/pytorch/README.md | 1 + cv/detection/yolov3/tensorflow/README.md | 1 + cv/detection/yolov5/pytorch/README.md | 1 + cv/detection/yolov6/pytorch/README.md | 1 + cv/detection/yolov7/pytorch/README.md | 1 + cv/detection/yolov8/pytorch/README.md | 1 + cv/detection/yolov9/pytorch/README.md | 1 + nlp/llm/aquila2-34b/pytorch/README.md | 1 + nlp/llm/baichuan2-7b/pytorch/README.md | 1 + nlp/llm/bloom-7b1/pytorch/README.md | 1 + nlp/llm/chatglm-6b/pytorch/README.md | 1 + nlp/llm/chatglm2-6b-sft/pytorch/README.md | 1 + nlp/llm/chatglm3-6b/pytorch/README.md | 1 + nlp/llm/deepseek_moe_7b/pytorch/README.md | 1 + nlp/llm/glm-4/pytorch/README.md | 1 + nlp/llm/gpt2-medium-en/paddlepaddle/README.md | 1 + nlp/llm/llama2-13b/pytorch/README.md | 1 + nlp/llm/llama2-34b/pytorch/README.md | 1 + nlp/llm/llama2-7b/pytorch/README.md | 1 + .../llama2-7b_reward_sft/pytorch/README.md | 1 + nlp/llm/llama2-7b_rlhf/pytorch/README.md | 1 + nlp/llm/llama2-7b_sft/pytorch/README.md | 1 + nlp/llm/llama3_8b/pytorch/README.md | 1 + nlp/llm/llama3_8b_sft/pytorch/README.md | 1 + nlp/llm/minicpm/pytorch/README.md | 1 + nlp/llm/mixtral/pytorch/README.md | 1 + nlp/llm/phi-3/pytorch/README.md | 1 + nlp/llm/qwen-7b/pytorch/README.md | 1 + nlp/llm/qwen1.5-14b/pytorch/README.md | 1 + nlp/llm/qwen1.5-7b/pytorch/README.md | 1 + nlp/llm/qwen2.5-7b/pytorch/README.md | 1 + 125 files changed, 209 insertions(+), 84 deletions(-) diff --git a/cv/classification/acmix/pytorch/README.md b/cv/classification/acmix/pytorch/README.md index 37e5cda4a..68e584912 100644 --- a/cv/classification/acmix/pytorch/README.md +++ b/cv/classification/acmix/pytorch/README.md @@ -14,6 +14,7 @@ attention-based approaches. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/acnet/pytorch/README.md b/cv/classification/acnet/pytorch/README.md index 7800add37..2b8f3fedf 100755 --- a/cv/classification/acnet/pytorch/README.md +++ b/cv/classification/acnet/pytorch/README.md @@ -14,6 +14,7 @@ performance improvements across various models on datasets like CIFAR and ImageN | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/alexnet/pytorch/README.md b/cv/classification/alexnet/pytorch/README.md index 515abbf3f..b7eee5b18 100644 --- a/cv/classification/alexnet/pytorch/README.md +++ b/cv/classification/alexnet/pytorch/README.md @@ -15,6 +15,7 @@ basic building blocks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/alexnet/tensorflow/README.md b/cv/classification/alexnet/tensorflow/README.md index af5946b33..22c026808 100644 --- a/cv/classification/alexnet/tensorflow/README.md +++ b/cv/classification/alexnet/tensorflow/README.md @@ -13,6 +13,7 @@ principles continue to influence modern neural network architectures in computer | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/byol/pytorch/README.md b/cv/classification/byol/pytorch/README.md index 6d56a91b4..db43138c7 100644 --- a/cv/classification/byol/pytorch/README.md +++ b/cv/classification/byol/pytorch/README.md @@ -13,6 +13,7 @@ large datasets before fine-tuning for specific tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/cbam/pytorch/README.md b/cv/classification/cbam/pytorch/README.md index ae09018e6..370bc1a21 100644 --- a/cv/classification/cbam/pytorch/README.md +++ b/cv/classification/cbam/pytorch/README.md @@ -13,6 +13,7 @@ existing CNN architectures, making it a versatile tool for improving various com | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/classification/convnext/pytorch/README.md b/cv/classification/convnext/pytorch/README.md index ef5e7d59c..f2ccfdac6 100644 --- a/cv/classification/convnext/pytorch/README.md +++ b/cv/classification/convnext/pytorch/README.md @@ -14,6 +14,7 @@ applications. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/cspdarknet53/pytorch/README.md b/cv/classification/cspdarknet53/pytorch/README.md index ca220531a..1d5535339 100644 --- a/cv/classification/cspdarknet53/pytorch/README.md +++ b/cv/classification/cspdarknet53/pytorch/README.md @@ -13,6 +13,7 @@ accuracy and speed, making it popular in modern object detection frameworks like | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/classification/densenet/paddlepaddle/README.md b/cv/classification/densenet/paddlepaddle/README.md index cde6c1399..579500a9a 100644 --- a/cv/classification/densenet/paddlepaddle/README.md +++ b/cv/classification/densenet/paddlepaddle/README.md @@ -13,6 +13,7 @@ traditional CNNs, making it efficient for various computer vision tasks like ima | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/densenet/pytorch/README.md b/cv/classification/densenet/pytorch/README.md index a98088034..8283926e9 100755 --- a/cv/classification/densenet/pytorch/README.md +++ b/cv/classification/densenet/pytorch/README.md @@ -13,6 +13,7 @@ traditional CNNs, making it efficient for various computer vision tasks like ima | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/dpn107/pytorch/README.md b/cv/classification/dpn107/pytorch/README.md index 4ccd50337..84f8bf94a 100644 --- a/cv/classification/dpn107/pytorch/README.md +++ b/cv/classification/dpn107/pytorch/README.md @@ -13,6 +13,7 @@ for complex visual recognition tasks, offering a balance between model accuracy | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/dpn92/pytorch/README.md b/cv/classification/dpn92/pytorch/README.md index c31862f79..cbfaa7eaf 100644 --- a/cv/classification/dpn92/pytorch/README.md +++ b/cv/classification/dpn92/pytorch/README.md @@ -13,6 +13,7 @@ for tasks requiring both feature preservation and discovery. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/eca_mobilenet_v2/pytorch/README.md b/cv/classification/eca_mobilenet_v2/pytorch/README.md index 24c55affa..25f34b0fe 100644 --- a/cv/classification/eca_mobilenet_v2/pytorch/README.md +++ b/cv/classification/eca_mobilenet_v2/pytorch/README.md @@ -14,6 +14,7 @@ classification tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/eca_resnet152/pytorch/README.md b/cv/classification/eca_resnet152/pytorch/README.md index 3dabe7116..aff3dcb71 100644 --- a/cv/classification/eca_resnet152/pytorch/README.md +++ b/cv/classification/eca_resnet152/pytorch/README.md @@ -14,6 +14,7 @@ various computer vision applications. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/efficientnet_b0/paddlepaddle/README.md b/cv/classification/efficientnet_b0/paddlepaddle/README.md index 5b988747c..c74ec5666 100644 --- a/cv/classification/efficientnet_b0/paddlepaddle/README.md +++ b/cv/classification/efficientnet_b0/paddlepaddle/README.md @@ -14,6 +14,7 @@ requirements. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/efficientnet_b4/pytorch/README.md b/cv/classification/efficientnet_b4/pytorch/README.md index 7b48c4075..84952a164 100755 --- a/cv/classification/efficientnet_b4/pytorch/README.md +++ b/cv/classification/efficientnet_b4/pytorch/README.md @@ -13,6 +13,7 @@ scenarios where computational resources are available, offering a good trade-off | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/fasternet/pytorch/README.md b/cv/classification/fasternet/pytorch/README.md index 2d168679b..ff6686ff9 100644 --- a/cv/classification/fasternet/pytorch/README.md +++ b/cv/classification/fasternet/pytorch/README.md @@ -14,6 +14,7 @@ maintaining low latency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/classification/googlenet/paddlepaddle/README.md b/cv/classification/googlenet/paddlepaddle/README.md index f69ef4244..5b7a80239 100644 --- a/cv/classification/googlenet/paddlepaddle/README.md +++ b/cv/classification/googlenet/paddlepaddle/README.md @@ -13,6 +13,7 @@ low computational complexity. Its innovative design has influenced many subseque | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/classification/googlenet/pytorch/README.md b/cv/classification/googlenet/pytorch/README.md index a82e40c7b..18cc31154 100755 --- a/cv/classification/googlenet/pytorch/README.md +++ b/cv/classification/googlenet/pytorch/README.md @@ -13,6 +13,7 @@ low computational complexity. Its innovative design has influenced many subseque | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/inceptionv3/pytorch/README.md b/cv/classification/inceptionv3/pytorch/README.md index af9be6dc9..10760cce6 100644 --- a/cv/classification/inceptionv3/pytorch/README.md +++ b/cv/classification/inceptionv3/pytorch/README.md @@ -8,6 +8,7 @@ Inception-v3 is a convolutional neural network architecture from the Inception f | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/inceptionv3/tensorflow/README.md b/cv/classification/inceptionv3/tensorflow/README.md index 8ad1ebff3..ee389f1b7 100644 --- a/cv/classification/inceptionv3/tensorflow/README.md +++ b/cv/classification/inceptionv3/tensorflow/README.md @@ -9,6 +9,7 @@ InceptionV3 is a convolutional neural network architecture from the Inception fa | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/inceptionv4/pytorch/README.md b/cv/classification/inceptionv4/pytorch/README.md index 1efb8d65a..b13ee0ec0 100644 --- a/cv/classification/inceptionv4/pytorch/README.md +++ b/cv/classification/inceptionv4/pytorch/README.md @@ -14,6 +14,7 @@ representation and classification performance. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/lenet/pytorch/README.md b/cv/classification/lenet/pytorch/README.md index 4eebd180b..98614a718 100755 --- a/cv/classification/lenet/pytorch/README.md +++ b/cv/classification/lenet/pytorch/README.md @@ -13,6 +13,7 @@ cornerstone in the evolution of deep learning for computer vision applications. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/mobilenetv2/pytorch/README.md b/cv/classification/mobilenetv2/pytorch/README.md index 4570db566..971367ea5 100644 --- a/cv/classification/mobilenetv2/pytorch/README.md +++ b/cv/classification/mobilenetv2/pytorch/README.md @@ -13,6 +13,7 @@ ideal for real-time applications on resource-constrained devices like smartphone | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/mobilenetv3/paddlepaddle/README.md b/cv/classification/mobilenetv3/paddlepaddle/README.md index cb43ee4ac..dc0601cc3 100644 --- a/cv/classification/mobilenetv3/paddlepaddle/README.md +++ b/cv/classification/mobilenetv3/paddlepaddle/README.md @@ -14,6 +14,7 @@ embedded systems. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/classification/mobilenetv3/pytorch/README.md b/cv/classification/mobilenetv3/pytorch/README.md index ec311199a..90c11c71f 100644 --- a/cv/classification/mobilenetv3/pytorch/README.md +++ b/cv/classification/mobilenetv3/pytorch/README.md @@ -14,6 +14,7 @@ embedded systems. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md b/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md index 6d39c4032..223db00a4 100644 --- a/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md +++ b/cv/classification/mobilenetv3_large_x1_0/paddlepaddle/README.md @@ -14,6 +14,7 @@ resource-constrained devices. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/mobileone/pytorch/README.md b/cv/classification/mobileone/pytorch/README.md index cdcfe7b91..c283b31be 100644 --- a/cv/classification/mobileone/pytorch/README.md +++ b/cv/classification/mobileone/pytorch/README.md @@ -13,6 +13,7 @@ classification, object detection, and segmentation, making it ideal for mobile d | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/mocov2/pytorch/README.md b/cv/classification/mocov2/pytorch/README.md index 9b2cb9a52..278c8a4cd 100644 --- a/cv/classification/mocov2/pytorch/README.md +++ b/cv/classification/mocov2/pytorch/README.md @@ -13,6 +13,7 @@ like SimCLR while maintaining computational efficiency, making it accessible for | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/pp-lcnet/paddlepaddle/README.md b/cv/classification/pp-lcnet/paddlepaddle/README.md index 44b2e2e9f..c1ab063eb 100644 --- a/cv/classification/pp-lcnet/paddlepaddle/README.md +++ b/cv/classification/pp-lcnet/paddlepaddle/README.md @@ -14,6 +14,7 @@ inference. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/repmlp/pytorch/README.md b/cv/classification/repmlp/pytorch/README.md index 5255c37e6..bdfb39ffa 100644 --- a/cv/classification/repmlp/pytorch/README.md +++ b/cv/classification/repmlp/pytorch/README.md @@ -14,6 +14,7 @@ local feature learning. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/repvgg/paddlepaddle/README.md b/cv/classification/repvgg/paddlepaddle/README.md index ce7e75a38..202589185 100644 --- a/cv/classification/repvgg/paddlepaddle/README.md +++ b/cv/classification/repvgg/paddlepaddle/README.md @@ -14,6 +14,7 @@ real-world deployment scenarios. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/classification/repvgg/pytorch/README.md b/cv/classification/repvgg/pytorch/README.md index 479542dbb..3ce02ff59 100755 --- a/cv/classification/repvgg/pytorch/README.md +++ b/cv/classification/repvgg/pytorch/README.md @@ -14,6 +14,7 @@ real-world deployment scenarios. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/classification/repvit/pytorch/README.md b/cv/classification/repvit/pytorch/README.md index c124fe284..8f3990c33 100644 --- a/cv/classification/repvit/pytorch/README.md +++ b/cv/classification/repvit/pytorch/README.md @@ -13,6 +13,7 @@ mobile-friendliness, with the largest variant achieving 83.7% accuracy at just 2 | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/res2net50_14w_8s/paddlepaddle/README.md b/cv/classification/res2net50_14w_8s/paddlepaddle/README.md index c0b56580d..32b396def 100644 --- a/cv/classification/res2net50_14w_8s/paddlepaddle/README.md +++ b/cv/classification/res2net50_14w_8s/paddlepaddle/README.md @@ -13,6 +13,7 @@ making it suitable for various computer vision applications requiring both high | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/resnest101/pytorch/README.md b/cv/classification/resnest101/pytorch/README.md index acc6a269b..84f9fc2b5 100644 --- a/cv/classification/resnest101/pytorch/README.md +++ b/cv/classification/resnest101/pytorch/README.md @@ -14,6 +14,7 @@ efficient training and inference capabilities. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnest14/pytorch/README.md b/cv/classification/resnest14/pytorch/README.md index 63816609c..a02324aaf 100644 --- a/cv/classification/resnest14/pytorch/README.md +++ b/cv/classification/resnest14/pytorch/README.md @@ -14,6 +14,7 @@ capabilities. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnest269/pytorch/README.md b/cv/classification/resnest269/pytorch/README.md index c32e8d9b1..059485c96 100644 --- a/cv/classification/resnest269/pytorch/README.md +++ b/cv/classification/resnest269/pytorch/README.md @@ -14,6 +14,7 @@ efficient training and inference capabilities. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnest50/paddlepaddle/README.md b/cv/classification/resnest50/paddlepaddle/README.md index 5299ed9bc..1d2ae4c51 100644 --- a/cv/classification/resnest50/paddlepaddle/README.md +++ b/cv/classification/resnest50/paddlepaddle/README.md @@ -14,6 +14,7 @@ and inference capabilities. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/classification/resnest50/pytorch/README.md b/cv/classification/resnest50/pytorch/README.md index e13f9417e..b10372fbf 100644 --- a/cv/classification/resnest50/pytorch/README.md +++ b/cv/classification/resnest50/pytorch/README.md @@ -14,6 +14,7 @@ and inference capabilities. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnet101/pytorch/README.md b/cv/classification/resnet101/pytorch/README.md index 3c386c2a2..916b86ff9 100644 --- a/cv/classification/resnet101/pytorch/README.md +++ b/cv/classification/resnet101/pytorch/README.md @@ -14,6 +14,7 @@ detection and segmentation. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnet152/pytorch/README.md b/cv/classification/resnet152/pytorch/README.md index 015e911c3..2edecc7ce 100644 --- a/cv/classification/resnet152/pytorch/README.md +++ b/cv/classification/resnet152/pytorch/README.md @@ -14,6 +14,7 @@ connections. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnet18/pytorch/README.md b/cv/classification/resnet18/pytorch/README.md index c3b6ee976..5b6be1875 100644 --- a/cv/classification/resnet18/pytorch/README.md +++ b/cv/classification/resnet18/pytorch/README.md @@ -13,6 +13,7 @@ resources, serving as a backbone for various computer vision tasks like object d | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnet50/paddlepaddle/README.md b/cv/classification/resnet50/paddlepaddle/README.md index 57773e498..1963a69f5 100644 --- a/cv/classification/resnet50/paddlepaddle/README.md +++ b/cv/classification/resnet50/paddlepaddle/README.md @@ -13,6 +13,7 @@ computer vision applications, serving as a backbone for various tasks like objec | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/classification/resnet50/pytorch/README.md b/cv/classification/resnet50/pytorch/README.md index 53184def9..bf8583620 100644 --- a/cv/classification/resnet50/pytorch/README.md +++ b/cv/classification/resnet50/pytorch/README.md @@ -13,6 +13,7 @@ computer vision applications, serving as a backbone for various tasks like objec | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnet50/tensorflow/README.md b/cv/classification/resnet50/tensorflow/README.md index 120184b5e..75e39525a 100644 --- a/cv/classification/resnet50/tensorflow/README.md +++ b/cv/classification/resnet50/tensorflow/README.md @@ -13,6 +13,7 @@ computer vision applications, serving as a backbone for various tasks like objec | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/classification/resnext101_32x8d/pytorch/README.md b/cv/classification/resnext101_32x8d/pytorch/README.md index cef26304b..dce3cc2b6 100644 --- a/cv/classification/resnext101_32x8d/pytorch/README.md +++ b/cv/classification/resnext101_32x8d/pytorch/README.md @@ -14,6 +14,7 @@ offering improved accuracy over standard ResNet models. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/resnext50_32x4d/mindspore/README.md b/cv/classification/resnext50_32x4d/mindspore/README.md index 6204cffa2..43309b4e0 100644 --- a/cv/classification/resnext50_32x4d/mindspore/README.md +++ b/cv/classification/resnext50_32x4d/mindspore/README.md @@ -13,6 +13,7 @@ ResNeXt50's design has influenced many subsequent CNN architectures in computer | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/resnext50_32x4d/pytorch/README.md b/cv/classification/resnext50_32x4d/pytorch/README.md index 821ecf9e0..868513e33 100644 --- a/cv/classification/resnext50_32x4d/pytorch/README.md +++ b/cv/classification/resnext50_32x4d/pytorch/README.md @@ -13,6 +13,7 @@ ResNeXt50's design has influenced many subsequent CNN architectures in computer | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/se_resnet50_vd/paddlepaddle/README.md b/cv/classification/se_resnet50_vd/paddlepaddle/README.md index 383eda58e..c8f840a2e 100644 --- a/cv/classification/se_resnet50_vd/paddlepaddle/README.md +++ b/cv/classification/se_resnet50_vd/paddlepaddle/README.md @@ -13,6 +13,7 @@ classification tasks, offering improved performance through better feature learn | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/seresnext/pytorch/README.md b/cv/classification/seresnext/pytorch/README.md index 5821fe05e..1ca557c5a 100644 --- a/cv/classification/seresnext/pytorch/README.md +++ b/cv/classification/seresnext/pytorch/README.md @@ -14,6 +14,7 @@ particularly suitable for complex visual recognition problems. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/shufflenetv2/paddlepaddle/README.md b/cv/classification/shufflenetv2/paddlepaddle/README.md index e88e2f7e0..6246c4060 100644 --- a/cv/classification/shufflenetv2/paddlepaddle/README.md +++ b/cv/classification/shufflenetv2/paddlepaddle/README.md @@ -13,6 +13,7 @@ while maintaining low computational complexity, making it ideal for resource-con | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/shufflenetv2/pytorch/README.md b/cv/classification/shufflenetv2/pytorch/README.md index c141e52b3..679e31bf2 100644 --- a/cv/classification/shufflenetv2/pytorch/README.md +++ b/cv/classification/shufflenetv2/pytorch/README.md @@ -13,6 +13,7 @@ while maintaining low computational complexity, making it ideal for resource-con | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/squeezenet/pytorch/README.md b/cv/classification/squeezenet/pytorch/README.md index 8a9c31f88..d4d26fe97 100644 --- a/cv/classification/squeezenet/pytorch/README.md +++ b/cv/classification/squeezenet/pytorch/README.md @@ -14,6 +14,7 @@ requirements. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/swin_transformer/paddlepaddle/README.md b/cv/classification/swin_transformer/paddlepaddle/README.md index 1bd8eec92..00894d79b 100644 --- a/cv/classification/swin_transformer/paddlepaddle/README.md +++ b/cv/classification/swin_transformer/paddlepaddle/README.md @@ -14,6 +14,7 @@ transformer-based approach. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/classification/swin_transformer/pytorch/README.md b/cv/classification/swin_transformer/pytorch/README.md index a2994855e..049d43702 100644 --- a/cv/classification/swin_transformer/pytorch/README.md +++ b/cv/classification/swin_transformer/pytorch/README.md @@ -14,6 +14,7 @@ transformer-based approach. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/vgg/paddlepaddle/README.md b/cv/classification/vgg/paddlepaddle/README.md index 2ddea9d2a..19403801b 100644 --- a/cv/classification/vgg/paddlepaddle/README.md +++ b/cv/classification/vgg/paddlepaddle/README.md @@ -13,6 +13,7 @@ design have influenced many modern deep learning models in computer vision. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/classification/vgg/pytorch/README.md b/cv/classification/vgg/pytorch/README.md index f9cc2353f..62504a868 100644 --- a/cv/classification/vgg/pytorch/README.md +++ b/cv/classification/vgg/pytorch/README.md @@ -13,6 +13,7 @@ design have influenced many modern deep learning models in computer vision. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/vgg/tensorflow/README.md b/cv/classification/vgg/tensorflow/README.md index 2e6ec1f04..5147b80ff 100644 --- a/cv/classification/vgg/tensorflow/README.md +++ b/cv/classification/vgg/tensorflow/README.md @@ -13,6 +13,7 @@ design have influenced many modern deep learning models in computer vision. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/classification/wavemlp/pytorch/README.md b/cv/classification/wavemlp/pytorch/README.md index 6b04581c1..10578219d 100644 --- a/cv/classification/wavemlp/pytorch/README.md +++ b/cv/classification/wavemlp/pytorch/README.md @@ -13,6 +13,7 @@ while maintaining high accuracy, making it suitable for various computer vision | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/wide_resnet101_2/pytorch/README.md b/cv/classification/wide_resnet101_2/pytorch/README.md index 4616facb5..bdbd900e4 100644 --- a/cv/classification/wide_resnet101_2/pytorch/README.md +++ b/cv/classification/wide_resnet101_2/pytorch/README.md @@ -13,6 +13,7 @@ computational efficiency, making it suitable for complex visual recognition task | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/classification/xception/paddlepaddle/README.md b/cv/classification/xception/paddlepaddle/README.md index 8e95b762d..cde39ea88 100644 --- a/cv/classification/xception/paddlepaddle/README.md +++ b/cv/classification/xception/paddlepaddle/README.md @@ -14,6 +14,7 @@ accuracy and computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/classification/xception/pytorch/README.md b/cv/classification/xception/pytorch/README.md index 108f6d76d..a8293176f 100755 --- a/cv/classification/xception/pytorch/README.md +++ b/cv/classification/xception/pytorch/README.md @@ -14,6 +14,7 @@ accuracy and computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/atss_mmdet/pytorch/README.md b/cv/detection/atss_mmdet/pytorch/README.md index b429120ce..a7af4f929 100644 --- a/cv/detection/atss_mmdet/pytorch/README.md +++ b/cv/detection/atss_mmdet/pytorch/README.md @@ -14,6 +14,7 @@ computational overhead. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/autoassign/pytorch/README.md b/cv/detection/autoassign/pytorch/README.md index 473d6fb6d..8769f4370 100755 --- a/cv/detection/autoassign/pytorch/README.md +++ b/cv/detection/autoassign/pytorch/README.md @@ -1,84 +1,85 @@ -# AutoAssign - -## Model Description - -AutoAssign is an anchor-free object detection model that introduces a fully differentiable label assignment mechanism. -It combines Center Weighting and Confidence Weighting to adaptively determine positive and negative samples during -training. Center Weighting adjusts category-specific prior distributions, while Confidence Weighting customizes -assignment strategies for each instance. This approach eliminates the need for manual anchor design and achieves -appearance-aware detection through automatic sample selection, resulting in improved performance and reduced human -intervention in the detection process. - -## Supported Environments - -| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | -|--------|-----------|---------| -| BI-V150 | 4.2.0 | 25.03 | - -## Model Preparation - -### Prepare Resources - -```bash -mkdir -p data -ln -s /path/to/coco/ ./data - -# Prepare resnet50_msra-5891d200.pth, skip this if fast network -mkdir -p /root/.cache/torch/hub/checkpoints/ -wget https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth -O /root/.cache/torch/hub/checkpoints/resnet50_msra-5891d200.pth -``` - -Go to visit [COCO official website](https://cocodataset.org/#download), then select the COCO dataset you want to -download. - -Take coco2017 dataset as an example, specify `/path/to/coco2017` to your COCO path in later training process, the -unzipped dataset path structure sholud look like: - -```bash -coco2017 -├── annotations -│   ├── instances_train2017.json -│   ├── instances_val2017.json -│ └── ... -├── train2017 -│ ├── 000000000009.jpg -│ ├── 000000000025.jpg -│ └── ... -├── val2017 -│ ├── 000000000139.jpg -│ ├── 000000000285.jpg -│ └── ... -├── train2017.txt -├── val2017.txt -└── ... -``` - -### Install Dependencies - -```bash -# Install libGL -## CentOS -yum install -y mesa-libGL -## Ubuntu -apt install -y libgl1-mesa-glx - -# install MMDetection -git clone https://github.com/open-mmlab/mmdetection.git -b v3.3.0 --depth=1 -cd mmdetection -pip install -v -e . -``` - -## Model Training - -```bash -# One single GPU -python3 tools/train.py configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py - -# Multiple GPUs on one machine -sed -i 's/python /python3 /g' tools/dist_train.sh -bash tools/dist_train.sh configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py 8 -``` - -## References - -[mmdetection](https://github.com/open-mmlab/mmdetection) +# AutoAssign + +## Model Description + +AutoAssign is an anchor-free object detection model that introduces a fully differentiable label assignment mechanism. +It combines Center Weighting and Confidence Weighting to adaptively determine positive and negative samples during +training. Center Weighting adjusts category-specific prior distributions, while Confidence Weighting customizes +assignment strategies for each instance. This approach eliminates the need for manual anchor design and achieves +appearance-aware detection through automatic sample selection, resulting in improved performance and reduced human +intervention in the detection process. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +|--------|-----------|---------| +| BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | + +## Model Preparation + +### Prepare Resources + +```bash +mkdir -p data +ln -s /path/to/coco/ ./data + +# Prepare resnet50_msra-5891d200.pth, skip this if fast network +mkdir -p /root/.cache/torch/hub/checkpoints/ +wget https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth -O /root/.cache/torch/hub/checkpoints/resnet50_msra-5891d200.pth +``` + +Go to visit [COCO official website](https://cocodataset.org/#download), then select the COCO dataset you want to +download. + +Take coco2017 dataset as an example, specify `/path/to/coco2017` to your COCO path in later training process, the +unzipped dataset path structure sholud look like: + +```bash +coco2017 +├── annotations +│   ├── instances_train2017.json +│   ├── instances_val2017.json +│ └── ... +├── train2017 +│ ├── 000000000009.jpg +│ ├── 000000000025.jpg +│ └── ... +├── val2017 +│ ├── 000000000139.jpg +│ ├── 000000000285.jpg +│ └── ... +├── train2017.txt +├── val2017.txt +└── ... +``` + +### Install Dependencies + +```bash +# Install libGL +## CentOS +yum install -y mesa-libGL +## Ubuntu +apt install -y libgl1-mesa-glx + +# install MMDetection +git clone https://github.com/open-mmlab/mmdetection.git -b v3.3.0 --depth=1 +cd mmdetection +pip install -v -e . +``` + +## Model Training + +```bash +# One single GPU +python3 tools/train.py configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py + +# Multiple GPUs on one machine +sed -i 's/python /python3 /g' tools/dist_train.sh +bash tools/dist_train.sh configs/autoassign/autoassign_r50-caffe_fpn_1x_coco.py 8 +``` + +## References + +[mmdetection](https://github.com/open-mmlab/mmdetection) diff --git a/cv/detection/cascade_rcnn_mmdet/pytorch/README.md b/cv/detection/cascade_rcnn_mmdet/pytorch/README.md index 1675f1b98..d22c83b4d 100644 --- a/cv/detection/cascade_rcnn_mmdet/pytorch/README.md +++ b/cv/detection/cascade_rcnn_mmdet/pytorch/README.md @@ -13,6 +13,7 @@ COCO, and can be extended to instance segmentation tasks, outperforming models l | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/centermask2/pytorch/README.md b/cv/detection/centermask2/pytorch/README.md index 4f67b38bf..3ae78062d 100644 --- a/cv/detection/centermask2/pytorch/README.md +++ b/cv/detection/centermask2/pytorch/README.md @@ -14,6 +14,7 @@ scales. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/cv/detection/centernet/paddlepaddle/README.md b/cv/detection/centernet/paddlepaddle/README.md index 02ab0bf1d..051c3c6b0 100644 --- a/cv/detection/centernet/paddlepaddle/README.md +++ b/cv/detection/centernet/paddlepaddle/README.md @@ -13,6 +13,7 @@ can be extended to 3D detection and pose estimation tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/centernet/pytorch/README.md b/cv/detection/centernet/pytorch/README.md index ffe785bf3..ee6d7c70c 100644 --- a/cv/detection/centernet/pytorch/README.md +++ b/cv/detection/centernet/pytorch/README.md @@ -13,6 +13,7 @@ can be extended to 3D detection and pose estimation tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/co-detr/pytorch/README.md b/cv/detection/co-detr/pytorch/README.md index 367a8870d..eae12ca49 100644 --- a/cv/detection/co-detr/pytorch/README.md +++ b/cv/detection/co-detr/pytorch/README.md @@ -13,6 +13,7 @@ detection accuracy and efficiency while maintaining end-to-end training simplici | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/detection/cornernet_mmdet/pytorch/README.md b/cv/detection/cornernet_mmdet/pytorch/README.md index 3009eb6c0..4122e33f1 100644 --- a/cv/detection/cornernet_mmdet/pytorch/README.md +++ b/cv/detection/cornernet_mmdet/pytorch/README.md @@ -14,6 +14,7 @@ one-stage detectors. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/dcnv2_mmdet/pytorch/README.md b/cv/detection/dcnv2_mmdet/pytorch/README.md index af5f1238b..e001452df 100644 --- a/cv/detection/dcnv2_mmdet/pytorch/README.md +++ b/cv/detection/dcnv2_mmdet/pytorch/README.md @@ -14,6 +14,7 @@ while maintaining computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/detr/paddlepaddle/README.md b/cv/detection/detr/paddlepaddle/README.md index f20fb6ba9..d0a1792ba 100644 --- a/cv/detection/detr/paddlepaddle/README.md +++ b/cv/detection/detr/paddlepaddle/README.md @@ -13,6 +13,7 @@ while achieving competitive performance on benchmarks like COCO, offering a new | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/fasterrcnn/pytorch/README.md b/cv/detection/fasterrcnn/pytorch/README.md index 1c9eae299..ea068f848 100644 --- a/cv/detection/fasterrcnn/pytorch/README.md +++ b/cv/detection/fasterrcnn/pytorch/README.md @@ -13,6 +13,7 @@ foundation for many winning entries in computer vision competitions. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/fcos/paddlepaddle/README.md b/cv/detection/fcos/paddlepaddle/README.md index cae72bd27..872373472 100644 --- a/cv/detection/fcos/paddlepaddle/README.md +++ b/cv/detection/fcos/paddlepaddle/README.md @@ -13,6 +13,7 @@ it efficient and effective for various detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/maskrcnn/paddlepaddle/README.md b/cv/detection/maskrcnn/paddlepaddle/README.md index bec9004a8..569c30fa3 100644 --- a/cv/detection/maskrcnn/paddlepaddle/README.md +++ b/cv/detection/maskrcnn/paddlepaddle/README.md @@ -13,6 +13,7 @@ detection, instance segmentation, and human pose estimation. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/detection/maskrcnn/pytorch/README.md b/cv/detection/maskrcnn/pytorch/README.md index d93a2faae..4e6f11d80 100644 --- a/cv/detection/maskrcnn/pytorch/README.md +++ b/cv/detection/maskrcnn/pytorch/README.md @@ -13,6 +13,7 @@ detection, instance segmentation, and human pose estimation. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/oc_sort/paddlepaddle/README.md b/cv/detection/oc_sort/paddlepaddle/README.md index 3b3cc4bb0..1b52593d1 100644 --- a/cv/detection/oc_sort/paddlepaddle/README.md +++ b/cv/detection/oc_sort/paddlepaddle/README.md @@ -14,6 +14,7 @@ speed. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/detection/oriented_reppoints/pytorch/README.md b/cv/detection/oriented_reppoints/pytorch/README.md index 4e6b8c8d0..4baa32c72 100644 --- a/cv/detection/oriented_reppoints/pytorch/README.md +++ b/cv/detection/oriented_reppoints/pytorch/README.md @@ -13,6 +13,7 @@ handle cluttered backgrounds. It achieves state-of-the-art performance on aerial | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/detection/picodet/paddlepaddle/README.md b/cv/detection/picodet/paddlepaddle/README.md index ef89da10d..831cf8508 100644 --- a/cv/detection/picodet/paddlepaddle/README.md +++ b/cv/detection/picodet/paddlepaddle/README.md @@ -13,6 +13,7 @@ on resource-constrained devices. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/cv/detection/pp-yoloe/paddlepaddle/README.md b/cv/detection/pp-yoloe/paddlepaddle/README.md index 5b391afaa..f796946c8 100644 --- a/cv/detection/pp-yoloe/paddlepaddle/README.md +++ b/cv/detection/pp-yoloe/paddlepaddle/README.md @@ -14,6 +14,7 @@ for object detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/detection/pp_yoloe+/paddlepaddle/README.md b/cv/detection/pp_yoloe+/paddlepaddle/README.md index fc3345330..1a8d8fcff 100644 --- a/cv/detection/pp_yoloe+/paddlepaddle/README.md +++ b/cv/detection/pp_yoloe+/paddlepaddle/README.md @@ -13,6 +13,7 @@ real-time applications, offering superior detection performance across various s | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.1 | 24.03 | ## Model Preparation diff --git a/cv/detection/pvanet/pytorch/README.md b/cv/detection/pvanet/pytorch/README.md index 627ddb597..5385dd153 100755 --- a/cv/detection/pvanet/pytorch/README.md +++ b/cv/detection/pvanet/pytorch/README.md @@ -13,6 +13,7 @@ makes it suitable for real-time applications where both speed and accuracy are c | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/reppoints_mmdet/pytorch/README.md b/cv/detection/reppoints_mmdet/pytorch/README.md index 2edb05dbd..10283fb05 100644 --- a/cv/detection/reppoints_mmdet/pytorch/README.md +++ b/cv/detection/reppoints_mmdet/pytorch/README.md @@ -13,6 +13,7 @@ more accurate detection, particularly for complex shapes and overlapping objects | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/retinanet/paddlepaddle/README.md b/cv/detection/retinanet/paddlepaddle/README.md index afc895912..d9281fb9a 100644 --- a/cv/detection/retinanet/paddlepaddle/README.md +++ b/cv/detection/retinanet/paddlepaddle/README.md @@ -13,6 +13,7 @@ single-stage approaches, offering an excellent balance between performance and e | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/retinanet/pytorch/README.md b/cv/detection/retinanet/pytorch/README.md index a94c3df12..fa7d824eb 100644 --- a/cv/detection/retinanet/pytorch/README.md +++ b/cv/detection/retinanet/pytorch/README.md @@ -13,6 +13,7 @@ single-stage approaches, offering an excellent balance between performance and e | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/rt-detr/pytorch/README.md b/cv/detection/rt-detr/pytorch/README.md index 0e5fa2b51..eea8b4b61 100644 --- a/cv/detection/rt-detr/pytorch/README.md +++ b/cv/detection/rt-detr/pytorch/README.md @@ -13,6 +13,7 @@ addressing its computational challenges, offering a practical solution for time- | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.06 | ## Model Preparation diff --git a/cv/detection/rtmdet/pytorch/README.md b/cv/detection/rtmdet/pytorch/README.md index 12a61aa3b..c071d9953 100644 --- a/cv/detection/rtmdet/pytorch/README.md +++ b/cv/detection/rtmdet/pytorch/README.md @@ -13,6 +13,7 @@ provides insights for versatile real-time detection systems. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/cv/detection/solov2/paddlepaddle/README.md b/cv/detection/solov2/paddlepaddle/README.md index 1faab97ac..0ee186d75 100644 --- a/cv/detection/solov2/paddlepaddle/README.md +++ b/cv/detection/solov2/paddlepaddle/README.md @@ -13,6 +13,7 @@ tasks while maintaining real-time capabilities, making it suitable for various c | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/ssd/paddlepaddle/README.md b/cv/detection/ssd/paddlepaddle/README.md index 48ca88297..70d27ec7e 100644 --- a/cv/detection/ssd/paddlepaddle/README.md +++ b/cv/detection/ssd/paddlepaddle/README.md @@ -12,6 +12,7 @@ objects at different resolutions, offering a good balance between speed and accu | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/detection/ssd/tensorflow/README.md b/cv/detection/ssd/tensorflow/README.md index 1ead9c194..d317a4c2a 100644 --- a/cv/detection/ssd/tensorflow/README.md +++ b/cv/detection/ssd/tensorflow/README.md @@ -12,6 +12,7 @@ objects at different resolutions, offering a good balance between speed and accu | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/yolof/pytorch/README.md b/cv/detection/yolof/pytorch/README.md index e2bc7a935..5f72a8b0b 100755 --- a/cv/detection/yolof/pytorch/README.md +++ b/cv/detection/yolof/pytorch/README.md @@ -13,6 +13,7 @@ faster, making it suitable for real-time detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/yolov10/pytorch/README.md b/cv/detection/yolov10/pytorch/README.md index 192a1ebab..e88348f25 100644 --- a/cv/detection/yolov10/pytorch/README.md +++ b/cv/detection/yolov10/pytorch/README.md @@ -13,6 +13,7 @@ ideal for real-time applications requiring fast and accurate object detection in | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/cv/detection/yolov3/paddlepaddle/README.md b/cv/detection/yolov3/paddlepaddle/README.md index 7bed2ee17..0d3c607be 100644 --- a/cv/detection/yolov3/paddlepaddle/README.md +++ b/cv/detection/yolov3/paddlepaddle/README.md @@ -13,6 +13,7 @@ practical detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.3.0 | 22.12 | ## Model Preparation diff --git a/cv/detection/yolov3/pytorch/README.md b/cv/detection/yolov3/pytorch/README.md index 4630f3b1b..a16d7f8fe 100755 --- a/cv/detection/yolov3/pytorch/README.md +++ b/cv/detection/yolov3/pytorch/README.md @@ -13,6 +13,7 @@ practical detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/yolov3/tensorflow/README.md b/cv/detection/yolov3/tensorflow/README.md index 6d583c616..6a75348e5 100644 --- a/cv/detection/yolov3/tensorflow/README.md +++ b/cv/detection/yolov3/tensorflow/README.md @@ -13,6 +13,7 @@ practical detection tasks. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/yolov5/pytorch/README.md b/cv/detection/yolov5/pytorch/README.md index 5c04066d9..81bdb144f 100644 --- a/cv/detection/yolov5/pytorch/README.md +++ b/cv/detection/yolov5/pytorch/README.md @@ -12,6 +12,7 @@ and efficient inference, making it popular for real-time detection tasks across | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 2.2.0 | 22.09 | ## Model Preparation diff --git a/cv/detection/yolov6/pytorch/README.md b/cv/detection/yolov6/pytorch/README.md index 2b73b0842..b5e3f681a 100644 --- a/cv/detection/yolov6/pytorch/README.md +++ b/cv/detection/yolov6/pytorch/README.md @@ -14,6 +14,7 @@ accurate object detection. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/yolov7/pytorch/README.md b/cv/detection/yolov7/pytorch/README.md index e17e5d95e..6cce01685 100644 --- a/cv/detection/yolov7/pytorch/README.md +++ b/cv/detection/yolov7/pytorch/README.md @@ -13,6 +13,7 @@ Its efficient design makes it suitable for real-world applications requiring fas | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.03 | ## Model Preparation diff --git a/cv/detection/yolov8/pytorch/README.md b/cv/detection/yolov8/pytorch/README.md index d06d8eff5..c7705d4b9 100644 --- a/cv/detection/yolov8/pytorch/README.md +++ b/cv/detection/yolov8/pytorch/README.md @@ -13,6 +13,7 @@ inference while delivering superior detection performance across various scenari | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.0.0 | 23.06 | ## Model Preparation diff --git a/cv/detection/yolov9/pytorch/README.md b/cv/detection/yolov9/pytorch/README.md index 268dd5e38..3b6b620fd 100644 --- a/cv/detection/yolov9/pytorch/README.md +++ b/cv/detection/yolov9/pytorch/README.md @@ -13,6 +13,7 @@ suitable for applications requiring fast and accurate object recognition in dive | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.06 | ## Model Preparation diff --git a/nlp/llm/aquila2-34b/pytorch/README.md b/nlp/llm/aquila2-34b/pytorch/README.md index dbb0d64b0..ef3b0070c 100644 --- a/nlp/llm/aquila2-34b/pytorch/README.md +++ b/nlp/llm/aquila2-34b/pytorch/README.md @@ -14,6 +14,7 @@ language processing, offering improved context understanding and response genera | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/baichuan2-7b/pytorch/README.md b/nlp/llm/baichuan2-7b/pytorch/README.md index 8a8a3aaf6..6cae758d1 100644 --- a/nlp/llm/baichuan2-7b/pytorch/README.md +++ b/nlp/llm/baichuan2-7b/pytorch/README.md @@ -14,6 +14,7 @@ the field of natural language processing. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/bloom-7b1/pytorch/README.md b/nlp/llm/bloom-7b1/pytorch/README.md index 915d022b6..97db5ac5d 100755 --- a/nlp/llm/bloom-7b1/pytorch/README.md +++ b/nlp/llm/bloom-7b1/pytorch/README.md @@ -12,6 +12,7 @@ perform text tasks it hasn't been explicitly trained for, by casting them as tex | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/chatglm-6b/pytorch/README.md b/nlp/llm/chatglm-6b/pytorch/README.md index e9fbced07..c70598672 100644 --- a/nlp/llm/chatglm-6b/pytorch/README.md +++ b/nlp/llm/chatglm-6b/pytorch/README.md @@ -14,6 +14,7 @@ accessibility for local deployment. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/nlp/llm/chatglm2-6b-sft/pytorch/README.md b/nlp/llm/chatglm2-6b-sft/pytorch/README.md index 75614e360..46c940b3f 100644 --- a/nlp/llm/chatglm2-6b-sft/pytorch/README.md +++ b/nlp/llm/chatglm2-6b-sft/pytorch/README.md @@ -13,6 +13,7 @@ capabilities while offering improved task-specific performance and resource effi | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/chatglm3-6b/pytorch/README.md b/nlp/llm/chatglm3-6b/pytorch/README.md index 990047acd..4569ed7c6 100644 --- a/nlp/llm/chatglm3-6b/pytorch/README.md +++ b/nlp/llm/chatglm3-6b/pytorch/README.md @@ -14,6 +14,7 @@ adaptable for diverse applications while maintaining a low deployment threshold | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/nlp/llm/deepseek_moe_7b/pytorch/README.md b/nlp/llm/deepseek_moe_7b/pytorch/README.md index 8195d5a26..3c2c8d50b 100644 --- a/nlp/llm/deepseek_moe_7b/pytorch/README.md +++ b/nlp/llm/deepseek_moe_7b/pytorch/README.md @@ -14,6 +14,7 @@ counterpart. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.12 | ## Model Preparation diff --git a/nlp/llm/glm-4/pytorch/README.md b/nlp/llm/glm-4/pytorch/README.md index 29d4059b1..66e24fd8c 100644 --- a/nlp/llm/glm-4/pytorch/README.md +++ b/nlp/llm/glm-4/pytorch/README.md @@ -14,6 +14,7 @@ capabilities and versatility. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.2.0 | 25.03 | ## Model Preparation diff --git a/nlp/llm/gpt2-medium-en/paddlepaddle/README.md b/nlp/llm/gpt2-medium-en/paddlepaddle/README.md index d8c4e37a3..5307a40b5 100644 --- a/nlp/llm/gpt2-medium-en/paddlepaddle/README.md +++ b/nlp/llm/gpt2-medium-en/paddlepaddle/README.md @@ -14,6 +14,7 @@ in text, making it versatile for diverse language processing applications. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.09 | ## Model Preparation diff --git a/nlp/llm/llama2-13b/pytorch/README.md b/nlp/llm/llama2-13b/pytorch/README.md index fde31d632..9825486fa 100644 --- a/nlp/llm/llama2-13b/pytorch/README.md +++ b/nlp/llm/llama2-13b/pytorch/README.md @@ -11,6 +11,7 @@ understanding and generating longer texts. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/llama2-34b/pytorch/README.md b/nlp/llm/llama2-34b/pytorch/README.md index 3e7475585..bf40e2d9a 100644 --- a/nlp/llm/llama2-34b/pytorch/README.md +++ b/nlp/llm/llama2-34b/pytorch/README.md @@ -13,6 +13,7 @@ applications, offering state-of-the-art performance in language understanding an | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/llama2-7b/pytorch/README.md b/nlp/llm/llama2-7b/pytorch/README.md index c54c1e4f6..1455bf72d 100644 --- a/nlp/llm/llama2-7b/pytorch/README.md +++ b/nlp/llm/llama2-7b/pytorch/README.md @@ -13,6 +13,7 @@ maintaining computational efficiency compared to larger models in the Llama-2 se | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.0 | 23.12 | ## Model Preparation diff --git a/nlp/llm/llama2-7b_reward_sft/pytorch/README.md b/nlp/llm/llama2-7b_reward_sft/pytorch/README.md index b4d3827af..bfbb89f10 100644 --- a/nlp/llm/llama2-7b_reward_sft/pytorch/README.md +++ b/nlp/llm/llama2-7b_reward_sft/pytorch/README.md @@ -14,6 +14,7 @@ maintaining computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.1 | 24.03 | ## Model Preparation diff --git a/nlp/llm/llama2-7b_rlhf/pytorch/README.md b/nlp/llm/llama2-7b_rlhf/pytorch/README.md index 1e7834e30..3ac2d9895 100644 --- a/nlp/llm/llama2-7b_rlhf/pytorch/README.md +++ b/nlp/llm/llama2-7b_rlhf/pytorch/README.md @@ -16,6 +16,7 @@ to larger models. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/llama2-7b_sft/pytorch/README.md b/nlp/llm/llama2-7b_sft/pytorch/README.md index aab9f57ca..db92037f9 100644 --- a/nlp/llm/llama2-7b_sft/pytorch/README.md +++ b/nlp/llm/llama2-7b_sft/pytorch/README.md @@ -14,6 +14,7 @@ maintaining computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V100 | 3.1.1 | 24.03 | ## Model Preparation diff --git a/nlp/llm/llama3_8b/pytorch/README.md b/nlp/llm/llama3_8b/pytorch/README.md index 37f418078..2638ea20d 100644 --- a/nlp/llm/llama3_8b/pytorch/README.md +++ b/nlp/llm/llama3_8b/pytorch/README.md @@ -14,6 +14,7 @@ language understanding and generation. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/nlp/llm/llama3_8b_sft/pytorch/README.md b/nlp/llm/llama3_8b_sft/pytorch/README.md index 4de45c1a5..75ce851dc 100644 --- a/nlp/llm/llama3_8b_sft/pytorch/README.md +++ b/nlp/llm/llama3_8b_sft/pytorch/README.md @@ -14,6 +14,7 @@ computational efficiency. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.12 | ## Model Preparation diff --git a/nlp/llm/minicpm/pytorch/README.md b/nlp/llm/minicpm/pytorch/README.md index ca00046b2..ae71d5bad 100644 --- a/nlp/llm/minicpm/pytorch/README.md +++ b/nlp/llm/minicpm/pytorch/README.md @@ -14,6 +14,7 @@ and Zephyr-7B-alpha. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.2.0 | 25.03 | ## Model Preparation diff --git a/nlp/llm/mixtral/pytorch/README.md b/nlp/llm/mixtral/pytorch/README.md index e0ce2c35a..b30cce7eb 100644 --- a/nlp/llm/mixtral/pytorch/README.md +++ b/nlp/llm/mixtral/pytorch/README.md @@ -11,6 +11,7 @@ computational efficiency, making it an excellent choice for real-world applicati | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.12 | ## Model Preparation diff --git a/nlp/llm/phi-3/pytorch/README.md b/nlp/llm/phi-3/pytorch/README.md index 86d4d278b..70a8c7f81 100644 --- a/nlp/llm/phi-3/pytorch/README.md +++ b/nlp/llm/phi-3/pytorch/README.md @@ -14,6 +14,7 @@ diverse domains. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.2.0 | 25.03 | ## Model Preparation diff --git a/nlp/llm/qwen-7b/pytorch/README.md b/nlp/llm/qwen-7b/pytorch/README.md index bd4c4eba4..f31f50d27 100644 --- a/nlp/llm/qwen-7b/pytorch/README.md +++ b/nlp/llm/qwen-7b/pytorch/README.md @@ -12,6 +12,7 @@ assistant, which is trained with alignment techniques. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 3.4.0 | 24.06 | ## Model Preparation diff --git a/nlp/llm/qwen1.5-14b/pytorch/README.md b/nlp/llm/qwen1.5-14b/pytorch/README.md index c3b494f35..e9be99f0e 100644 --- a/nlp/llm/qwen1.5-14b/pytorch/README.md +++ b/nlp/llm/qwen1.5-14b/pytorch/README.md @@ -13,6 +13,7 @@ sizes;No need of trust_remote_code. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/nlp/llm/qwen1.5-7b/pytorch/README.md b/nlp/llm/qwen1.5-7b/pytorch/README.md index ffe8724d8..d123ab339 100644 --- a/nlp/llm/qwen1.5-7b/pytorch/README.md +++ b/nlp/llm/qwen1.5-7b/pytorch/README.md @@ -13,6 +13,7 @@ sizes;No need of trust_remote_code. | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.09 | ## Model Preparation diff --git a/nlp/llm/qwen2.5-7b/pytorch/README.md b/nlp/llm/qwen2.5-7b/pytorch/README.md index faf3f6157..b2a150d21 100644 --- a/nlp/llm/qwen2.5-7b/pytorch/README.md +++ b/nlp/llm/qwen2.5-7b/pytorch/README.md @@ -13,6 +13,7 @@ architecture and specialized expert models make it a versatile tool for diverse | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | |--------|-----------|---------| | BI-V150 | 4.2.0 | 25.03 | +| BI-V150 | 4.1.1 | 24.12 | ## Model Preparation -- Gitee