diff --git a/ACL_TensorFlow/contrib/cv/OSNet_ID1379_for_ACL/.keep b/ACL_TensorFlow/contrib/cv/OSNet_ID1379_for_ACL/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/.gitignore b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a150ee4c9d6ccfa82fa878cf086d203fe2264f85
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/.gitignore
@@ -0,0 +1,133 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+.vscode/
+
+model_checkpoint
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/.keep b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/README.md b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..711005ecdf9c67107c0525ba9311a80420143fad
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/README.md
@@ -0,0 +1,221 @@
+- [基本信息](#基本信息.md)
+- [概述](#概述.md)
+- [训练环境准备](#训练环境准备.md)
+- [快速上手](#快速上手.md)
+- [训练结果](#训练结果.md)
+- [高级参考](#高级参考.md)
+
基本信息
+
+**发布者(Publisher):Huawei**
+
+**应用领域(Application Domain):Natural Language Processing**
+
+**版本(Version):**
+
+**修改时间(Modified) :2022.3.16**
+
+**大小(Size):**
+
+**框架(Framework):TensorFlow 1.15.0**
+
+**模型格式(Model Format):h5**
+
+**精度(Precision):Mixed**
+
+**处理器(Processor):昇腾910**
+
+**应用级别(Categories):Research**
+
+**描述(Description):基于TensorFlow框架的OSNet网络训练代码**
+
+概述
+
+ CVPR 2019的文章Omni-Scale Feature Learning for Person Re-Identification,作者自己根据Reid任务重新设计了一种新的网络结构,比较轻便,对比shuffleNet,mobileNet,SqueezeNet等具有更好的性能。
+
+- 参考论文:
+
+ https://link.zhihu.com/?target=https%3A//arxiv.org/pdf/1905.00953.pdf
+
+- 参考实现:
+
+ https://github.com/Purnay04/a2m_osnet_keras
+
+- 适配昇腾 AI 处理器的实现:
+
+
+ https://gitee.com/dw8023/modelzoo/tree/master/contrib/TensorFlow/Research/cv/OSNet_ID1379_for_TensorFlow
+
+
+
+- 通过Git获取对应commit\_id的代码方法如下:
+
+ ```
+ git clone {repository_url} # 克隆仓库的代码
+ cd {repository_name} # 切换到模型的代码仓目录
+ git checkout {branch} # 切换到对应分支
+ git reset --hard {commit_id} # 代码设置到对应的commit_id
+ cd {code_path} # 切换到模型代码所在路径,若仓库下只有该模型,则无需切换
+ ```
+
+## 默认配置
+
+- 训练数据集预处理:
+
+ 数据增强:
+ (1)将图像随机裁剪出256128的尺寸然后按1.25的因子重新缩放;
+ (2)随机水平翻转;
+ (3)随机擦除
+ - 图像的输入尺寸为128×64×3
+ - 图像输入格式:.jpg
+
+- 测试数据集预处理:
+
+ - 图像的输入尺寸为128×64×3
+ - 图像输入格式:.jpg
+
+- 训练超参
+
+ - Batch size: 128
+ - Train epoch: 100
+ - initial learning rate:0.065
+
+
+## 支持特性
+
+| 特性列表 | 是否支持 |
+|-------|------|
+| 分布式训练 | 否 |
+| 混合精度 | 是 |
+| 并行数据 | 否 |
+
+## 混合精度训练
+
+昇腾910 AI处理器提供自动混合精度功能,可以针对全网中float32数据类型的算子,按照内置的优化策略,自动将部分float32的算子降低精度到float16,从而在精度损失很小的情况下提升系统性能并减少内存使用。
+
+## 开启混合精度
+
+脚本已默认开启混合精度,设置precision_mode参数的脚本参考如下。
+
+ ```
+ custom_op = session_config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = 'NpuOptimizer'
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes(str(args.precision_mode))
+ ```
+
+训练环境准备
+
+1. 硬件环境准备请参见各硬件产品文档"[驱动和固件安装升级指南]( https://support.huawei.com/enterprise/zh/category/ai-computing-platform-pid-1557196528909)"。需要在硬件设备上安装与CANN版本配套的固件与驱动。
+2. 宿主机上需要安装Docker并登录[Ascend Hub中心](https://ascendhub.huawei.com/#/detail?name=ascend-tensorflow-arm)获取镜像。
+
+ 当前模型支持的镜像列表如[表1](#zh-cn_topic_0000001074498056_table1519011227314)所示。
+
+ **表 1** 镜像列表
+
+
+ 镜像名称
+ |
+ 镜像版本
+ |
+ 配套CANN版本
+ |
+
+
+
+ |
+ 20.2.0
+ |
+ 20.2
+ |
+
+
+
+
+
+快速上手
+
+- 数据集准备
+1. 模型训练使用Market-1501数据集,数据集obs链接如下:
+
+ obs://osnet-id1379/dataset/
+## 模型训练
+
+- 源码obs链接如下:
+
+ obs://osnet-id1379/npu/
+- 启动训练之前,首先要配置程序运行相关环境变量。
+
+ 环境变量配置信息参见:
+
+ [Ascend 910训练平台环境变量设置](https://gitee.com/ascend/modelzoo/wikis/Ascend%20910%E8%AE%AD%E7%BB%83%E5%B9%B3%E5%8F%B0%E7%8E%AF%E5%A2%83%E5%8F%98%E9%87%8F%E8%AE%BE%E7%BD%AE?sort_id=3148819)
+
+- 单卡训练
+
+ 1. 配置训练参数。
+
+ 首先在脚本train_full_1p.sh中,配置batch_size、num_epochs、train_image_dir等参数,请用户根据实际路径配置train_image_dir,或者在启动训练的命令行中以参数形式下发。
+
+ ```
+ batch_size=128
+ num_epochs=100
+ initial_lr=0.065
+ train_image_dir="../Market-1501-v15.09.15/bounding_box_train"
+ ```
+
+ 2. 启动训练。
+
+ 启动单卡训练 (脚本为LeNet_for_TensorFlow/test/train_full_1p.sh)
+
+ ```
+ bash train_full_1p.sh
+ ```
+
+训练结果
+
+- 精度结果比对
+
+|精度指标项|论文发布|GPU实测|NPU实测|
+|---|---|---|---|
+|mAP|xxx|56.0%|56.5%|
+
+- 性能结果比对 (暂无)
+
+|性能指标项|论文发布|GPU实测|NPU实测|
+|---|---|---|---|
+|FPS|XXX|YYY|ZZZ|
+
+
+高级参考
+
+## 脚本和示例代码
+
+```
+├── train_cnn.py //网络训练与测试代码
+├── README.md //代码说明文档
+├── eval.py //精度测试代码
+├── data.py //数据集处理代码
+├── osnet.py //模型网络结构代码
+├── requirements.txt //训练python依赖列表
+├── train_full_1p.sh //训练启动脚本
+├── utils //训练与测试流程工具代码
+│ ├──general.py
+│ ├──ranking.py
+│ 、、、
+```
+
+## 脚本参数
+
+```
+--train_image_dir 数据集路径,默认:'/home/dingwei/osnet/dataset/Market-1501-v15.09.15/bounding_box_train/'
+--batch_size 每个NPU的batch size,默认:128
+--initial_lr 初始学习率,默认:0.065
+--num_epoch 训练epcoh数量,默认:100
+
+```
+
+## 训练过程
+
+1. 通过“模型训练”中的训练指令启动单卡卡训练。
+
+2. 参考脚本的模型存储路径为./osnet.h5。
+
+
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/__init__.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca10a8fe0a73bb167cafcfcb0b1761eb0d92ee6
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/data.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/data.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb991cb8b6b15ae579d11e4fc1619d629c6cd687
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/data.py
@@ -0,0 +1,71 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import math
+from tensorflow.keras.utils import Sequence, to_categorical
+from tensorflow.keras.preprocessing.image import load_img
+from sklearn.utils import shuffle as shuffle_tuple
+from utils.general import seq
+
+
+# Here, `x_set` is list of path to the images
+# and `y_set` are the associated classes.
+
+class DataGenerator(Sequence):
+
+ def __init__(self, x_set, y_set, batch_size, num_classes, shuffle=False, augment=False):
+ self.x, self.y = x_set, y_set
+ self.total_num_image = len(x_set)
+ self.batch_size = batch_size
+ self.num_classes = num_classes
+ self.shuffle = shuffle
+ self.augment = augment
+
+ def __len__(self):
+ return math.ceil(len(self.x) / self.batch_size)
+
+ def __getitem__(self, idx):
+
+ batch_x = self.x[idx * self.batch_size: (idx + 1) * self.batch_size]
+ batch_y = self.y[idx * self.batch_size: (idx + 1) * self.batch_size]
+
+ if self.shuffle:
+ batch_x, batch_y = shuffle_tuple(batch_x, batch_y)
+
+ if self.augment:
+ batch_x = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in batch_x]).astype(np.uint8)
+ batch_x = seq.augment_images(batch_x)
+ batch_x = batch_x / 255.
+ else:
+ batch_x = np.array([np.asarray(load_img(file_name)) / 255. for file_name in batch_x])
+
+ batch_x = (batch_x - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+ batch_y = to_categorical(np.array(batch_y), num_classes=self.num_classes)
+
+ return batch_x, batch_y
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/eval.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b578e80f96e136ec6f0ab20722786644238c471
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/eval.py
@@ -0,0 +1,184 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tensorflow.keras.models import load_model, Model
+from tensorflow.keras.optimizers import SGD
+from utils.triplet import generator_batch_test
+from sklearn.preprocessing import normalize
+import os
+import argparse
+import numpy as np
+from osnet import OSNet
+from utils.ranking import cmc, mean_ap
+from utils.rerank import re_ranking
+
+
+def pairwise_distance(mat1, mat2):
+ m = mat1.shape[0] # query number
+ n = mat2.shape[0] # gallery number
+ x = np.repeat(np.sum(np.square(mat1), axis=1, keepdims=True), n, axis=1) # mxn
+ y = np.repeat(np.sum(np.square(mat2), axis=1, keepdims=True), m, axis=1) # nxm
+ y = np.transpose(y) # mxn
+ return x + y - 2 * np.dot(mat1, mat2.T)
+
+
+def evaluate(query_features, query_labels, query_cams, gallery_features, gallery_labels,
+ gallery_cams):
+ #query_feature: array, NxD
+ #query_cam: array, 1xN
+ #query_label: array, 1xN
+ #gallery_feature: array, MxD
+ #gallery_cam:array, 1xM
+ #gallery_label array, 1xM
+ distmat = pairwise_distance(query_features, gallery_features)
+
+ print('Applying person re-ranking ...')
+ distmat_qq = pairwise_distance(query_features, query_features)
+ distmat_gg = pairwise_distance(gallery_features, gallery_features)
+ distmat = re_ranking(distmat, distmat_qq, distmat_gg)
+ # Compute mean AP
+ mAP = mean_ap(distmat, query_labels, gallery_labels, query_cams, gallery_cams)
+ print('Mean AP: {:4.1%}'.format(mAP))
+
+ cmc_configs = {
+ 'market1501': dict(separate_camera_set=False,
+ single_gallery_shot=False,
+ first_match_break=True),}
+ cmc_scores = {name: cmc(distmat, query_labels, gallery_labels, query_cams,
+ gallery_cams, **params)
+ for name, params in cmc_configs.items()}
+ cmc_topk = (1, 5, 10)
+ print('CMC Scores:')
+ for k in cmc_topk:
+ print(' top-{:<4}{:12.1%}'.format(k, cmc_scores['market1501'][k-1]))
+
+
+def pairwise_distance(mat1, mat2):
+ m = mat1.shape[0] # query number
+ n = mat2.shape[0] # gallery number
+ x = np.repeat(np.sum(np.square(mat1), axis=1, keepdims=True), n, axis=1) # mxn
+ y = np.repeat(np.sum(np.square(mat2), axis=1, keepdims=True), m, axis=1) # nxm
+ y = np.transpose(y) # mxn
+ return x + y - 2 * np.dot(mat1, mat2.T)
+
+
+def evaluate(query_features, query_labels, query_cams, gallery_features, gallery_labels,
+ gallery_cams):
+ #query_feature: array, NxD
+ #query_cam: array, 1xN
+ #query_label: array, 1xN
+ #gallery_feature: array, MxD
+ #gallery_cam:array, 1xM
+ #gallery_label array, 1xM
+ distmat = pairwise_distance(query_features, gallery_features)
+
+ print('Applying person re-ranking ...')
+ distmat_qq = pairwise_distance(query_features, query_features)
+ distmat_gg = pairwise_distance(gallery_features, gallery_features)
+ distmat = re_ranking(distmat, distmat_qq, distmat_gg)
+ # Compute mean AP
+ mAP = mean_ap(distmat, query_labels, gallery_labels, query_cams, gallery_cams)
+ print('Final Accuracy accuracy: {:4.1%}'.format(mAP))
+
+ cmc_configs = {
+ 'market1501': dict(separate_camera_set=False,
+ single_gallery_shot=False,
+ first_match_break=True),}
+ cmc_scores = {name: cmc(distmat, query_labels, gallery_labels, query_cams,
+ gallery_cams, **params)
+ for name, params in cmc_configs.items()}
+ cmc_topk = (1, 5, 10)
+ print('CMC Scores:')
+ for k in cmc_topk:
+ print('top-{:<4}{:12.1%}'.format(k, cmc_scores['market1501'][k-1]))
+
+
+def get_data_information(data_root):
+ img_path_list = []
+ img_name_list = []
+ img_cams_list = []
+ image_names = os.listdir(data_root) #the best way is to use sorted list,i.e., sorted()
+ image_names = sorted(image_names)[:-1]
+ for item in image_names:
+ if item[-4:] == '.jpg':
+ img_path_list.append(os.path.join(data_root, item))
+ img_name_list.append(item.split('_')[0])
+ img_cams_list.append(item.split('c')[1][0])
+ return img_path_list, np.array(img_name_list), np.array(img_cams_list)
+
+
+def main():
+ args = parser.parse_args()
+ # build model to extract features
+ model = OSNet(751).model
+ print("done")
+ print(model.summary())
+ model.load_weights("osnet.h5")
+ model.summary()
+ dense_feature = model.get_layer('features').output
+ model_extract_features = Model(inputs=model.input, outputs=dense_feature)
+ model_extract_features.compile(loss=['categorical_crossentropy'], optimizer=SGD(lr=0.1), metrics=['accuracy'])
+
+ # image_path, image_names, image_cams
+ query_dir = os.path.join(args.data_path, 'query')
+ gallery_dir = os.path.join(args.data_path, 'bounding_box_test')
+ query_img_list, query_name_list, query_cams_list = \
+ get_data_information(query_dir)
+ gallery_img_list, gallery_name_list, gallery_cams_list = \
+ get_data_information(gallery_dir)
+
+ # obtain features
+ query_generator = generator_batch_test(query_img_list, args.img_width,
+ args.img_height, args.batch_size, shuffle=False)
+ query_features = model_extract_features.predict(query_generator, verbose=1,
+ steps=len(query_img_list)//args.batch_size if len(query_img_list)%args.batch_size==0 \
+ else len(query_img_list)//args.batch_size+1)
+ query_features = normalize(query_features, norm='l2')
+ assert len(query_img_list) == query_features.shape[0], "something wrong with query samples"
+
+ gallery_generator = generator_batch_test(gallery_img_list, args.img_width, args.img_height,
+ args.batch_size, shuffle=False)
+ gallery_features = model_extract_features.predict(gallery_generator,verbose=1,
+ steps=len(gallery_img_list)//args.batch_size if len(gallery_img_list)%args.batch_size==0 \
+ else len(gallery_img_list)//args.batch_size+1)
+ gallery_features = normalize(gallery_features, norm='l2')
+ assert len(gallery_img_list) == gallery_features.shape[0], "something wrong with gallery samples"
+ #evaluate
+ evaluate(query_features, query_name_list, query_cams_list,
+ gallery_features, gallery_name_list, gallery_cams_list)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description="Test")
+ parser.add_argument('--data_path', help='path to train_image', type=str, default='/home/dingwei/osnet/dataset/Market-1501-v15.09.15')
+ parser.add_argument('--img_width', type=int, default='64')
+ parser.add_argument('--img_height', type=int, default='128')
+ parser.add_argument('--learning_rate', type=float, default='0.01')
+ parser.add_argument('--batch_size', type=int, default='128')
+ parser.add_argument('--USE_Label_Smoothing', type=bool, default=True)
+ main()
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/osnet.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/osnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dcb9121ddb3ee403ffd4f97a2459d434b3ab79f
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/osnet.py
@@ -0,0 +1,323 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import tensorflow as tf
+import tensorflow.keras.backend as K
+from tensorflow.keras import backend
+from tensorflow.keras import layers
+from tensorflow.keras import models, Sequential
+from tensorflow.keras import utils as keras_utils
+from tensorflow.keras.layers import Input
+import numpy as np
+from tensorflow_core.python.keras.layers import Activation
+from tensorflow.keras.models import Model
+
+
+def get_aggregation_gate(in_filters, reduction=16):
+ gate = Sequential([
+ layers.GlobalAveragePooling2D(),
+ layers.Dense(in_filters // reduction, use_bias=False),
+ layers.BatchNormalization(),
+ layers.Activation('relu'),
+ layers.Dense(in_filters),
+ layers.Activation('sigmoid'),
+ layers.Reshape((1, 1, -1)) # reshape as (H, W, C)
+ ])
+ return gate
+
+
+def construct_fc_layer(feature_dims, input_dim, dropout_p = None, name="features"):
+ if feature_dims is None or feature_dims < 0:
+ feature_dims = input_dim
+ return None
+
+ if isinstance(feature_dims, int):
+ feature_dims = [feature_dims]
+
+ new_layers = []
+ for dim in feature_dims:
+ new_layers.append(layers.Dense(dim))
+ new_layers.append(layers.BatchNormalization())
+ new_layers.append(layers.Activation('relu'))
+ if dropout_p is not None:
+ new_layers.append(layers.Dropout(dropout_p))
+ input_dim = dim
+ feature_dims = feature_dims[-1]
+
+ return Sequential(new_layers, name=name)
+
+class conv2d_bn(tf.keras.Model):
+ def __init__(self,
+ filters,
+ kernel_size = (3, 3),
+ padding = 'same',
+ strides = (1, 1),
+ activation = 'relu',
+ **kwargs):
+ super(conv2d_bn, self).__init__(**kwargs)
+ self.filters = filters
+ self.kernel_size = kernel_size
+ self.padding = padding
+ self.strides = strides
+ self.activation = activation
+
+ self.bn_axis = -1
+ self.conv = layers.Conv2D(self.filters, self.kernel_size, strides=self.strides, padding = self.padding, use_bias=False)
+ self.bn_norm = layers.BatchNormalization(axis=self.bn_axis)
+
+ def call(self, x):
+ x = self.conv(x)
+ x = self.bn_norm(x)
+ if self.activation is not None:
+ x = layers.Activation(self.activation)(x)
+ return x
+
+class Light_conv3x3_bn(tf.keras.Model):
+ def __init__(self, filters):
+ super(Light_conv3x3_bn, self).__init__()
+ self.bn_axis = 3
+ self.filters = filters
+
+ self.conv = layers.Conv2D(self.filters, kernel_size=1, strides=1, padding='same', use_bias=False)
+ self.depth_conv = layers.DepthwiseConv2D(kernel_size=3, strides=1, padding='same', use_bias=False)
+ self.bn_norm = layers.BatchNormalization(axis=self.bn_axis)
+ self.activation = layers.Activation('relu')
+
+ def call(self, x):
+ x = self.conv(x)
+ x = self.depth_conv(x)
+ x = self.bn_norm(x)
+ x = self.activation(x)
+ return x
+
+class os_bottleneck(tf.keras.Model):
+ def __init__(self,out_filters, bottleneck_reduction=4, **kwargs):
+ super(os_bottleneck, self).__init__(**kwargs)
+ self.out_filters = out_filters
+ self.mid_filters = self.out_filters // bottleneck_reduction
+
+ self.conv1 = conv2d_bn(self.mid_filters, kernel_size=(1, 1))
+
+ self.conv2a = Light_conv3x3_bn(self.mid_filters)
+ self.conv2b = Sequential([
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters)
+ ])
+ self.conv2c = Sequential([
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters)
+ ])
+ self.conv2d = Sequential([
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters),
+ Light_conv3x3_bn(self.mid_filters)
+ ])
+
+ self.conv3 = conv2d_bn(self.out_filters, kernel_size=(1, 1), activation=None)
+ self.ident_conv = conv2d_bn(self.out_filters, kernel_size=(1, 1), activation=None)
+ self.gate = get_aggregation_gate(self.mid_filters)
+
+ self.Adder = layers.Add()
+ self.Multiply = layers.Multiply()
+ self.activation = layers.Activation('relu')
+
+ def call(self, x):
+ in_filters = x.shape[-1]
+ identity = x
+
+ x1 = self.conv1(x)
+
+ #CONV2a
+ branch1 = self.conv2a(x1)
+
+ #CONV2b
+ branch2 = self.conv2b(x1)
+
+ #CONV2c
+ branch3 = self.conv2c(x1)
+
+ #CONV2d
+ branch4 = self.conv2d(x1)
+
+ x2 = self.Adder([
+ self.Multiply([branch1, self.gate(branch1)]),
+ self.Multiply([branch2, self.gate(branch2)]),
+ self.Multiply([branch3, self.gate(branch3)]),
+ self.Multiply([branch4, self.gate(branch4)])
+ ])
+
+ x3 = self.conv3(x2)
+
+ if in_filters != self.out_filters:
+ #print("before",identity.shape, in_filters ,self.out_filters)
+ identity = self.ident_conv(identity)
+ #print("after",identity.shape)
+
+ out = self.Adder([identity, x3]) # residual connection, out = x3 + identity in Pytorch
+ out = self.activation(out)
+ return out
+
+
+class OSNet(tf.keras.Model):
+ def __init__(self,
+ classes,
+ include_top = False,
+ input_tensor = None,
+ pooling = None,
+ feature_dim = 512,
+ loss_type = {'xent'},
+ **kwargs):
+
+ super(OSNet, self).__init__()
+
+ self.include_top = include_top
+ self.input_tensor = input_tensor
+ self.pooling = pooling
+ self.feature_dim = feature_dim
+ self.loss_type = loss_type
+
+ #Network component
+
+ self.conv1 = conv2d_bn(64, (7, 7), strides=(2, 2), name="conv1") # conv1: 128x64x64
+ self.maxpool = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same', name="1p") # 1p: 64x32x64
+
+ self.a2 = os_bottleneck(256, name="2a") # 2a: 64x32x256
+ self.b2 = os_bottleneck(256, name="2b") # 2b: 64x32x256
+ self.t2 = conv2d_bn(256, (1, 1), name="2t") # 2t: 64x32x256
+ self.p2 = layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2), name="2p") # 2p: 32x16x256
+
+ self.a3 = os_bottleneck(384, name="3a") # 3a: 16x8x384
+ self.b3 = os_bottleneck(384, name="3b") # 3b: 16x8x384
+ self.t3 = conv2d_bn(384, (1, 1), name="3t") # 3t: 16x8x384
+ self.p3 = layers.AveragePooling2D(pool_size = (2, 2), strides = (2, 2), name="3p") # 3p: 8x4x384
+
+ self.a4 = os_bottleneck(512, name="4a") # 4a: 8x4x512
+ self.b4 = os_bottleneck(512, name="4b") # 4b: 8x4x512
+ self.t4 = conv2d_bn(512, (1, 1), name="4t") # 4t: 8x4x512
+
+ self.global_avgpool = layers.GlobalAveragePooling2D(name="Avg_pooling")
+ self.aft_dropout = layers.Dropout(0.2)
+ self.fc = construct_fc_layer(self.feature_dim, 512)
+
+ # self.classifier = layers.Dense(self.classes, name="classifier")
+ self.activation = layers.Activation("softmax", name="softmax")
+ self.model = self.build(Input(shape=(128, 64, 3)), classes)
+
+ def build(self, input_tensor, classes, training=True, return_featuremaps = False):
+ """
+ self.img_input = layers.Input(tensor=self.input_tensor, shape=self.input_shape)
+ if self.input_tensor is None:
+ if self.input_shape is None:
+ raise ValueError('neither input_tensor nor input_shape is given')
+ else:
+ if not backend.is_keras_tensor(self.input_tensor):
+ self.img_input = layers.Input(tensor=self.input_tensor, shape=self.input_shape)
+ else:
+ self.img_input = self.input_tensor
+ """
+
+
+ x = self.conv1(input_tensor)
+ x = self.maxpool(x)
+
+ x = self.a2(x)
+ x = self.b2(x)
+ x = self.t2(x)
+ x = self.p2(x)
+
+ x = self.a3(x)
+ x = self.b3(x)
+ x = self.t3(x)
+ x = self.p3(x)
+
+ x = self.a4(x)
+ x = self.b4(x)
+ x = self.t4(x)
+
+ if return_featuremaps:
+ return x
+
+ v = self.global_avgpool(x)
+ v = self.aft_dropout(v)
+
+ if self.fc is not None:
+ v = self.fc(v)
+ # return v
+
+ if not training:
+ return v
+ y = layers.Dense(classes, name="classifier")(v)
+ if self.loss_type == {'xent'}:
+ w = self.activation(y)
+ # return layers.Model(Input(shape=(128, 64, 3)), output=w, name='osnet')
+ return Model(input_tensor, w)
+ elif self.loss_type == 'triplet':
+ return y, v
+
+ else:
+ raise KeyError("Unsupported Loss: {}".format(self.loss))
+
+
+if __name__ == "__main__":
+ # from keras.utils.vis_utils import plot_model
+ # model = OSNet(751)
+ # plot_model(model, to_file="model.png", show_shapes=True);
+
+
+ raw_input = (1, 256, 128, 3)
+ model = OSNet(751).model
+ # output = model(Input(shape=(256, 128, 3)), training=True)
+ print("done")
+
+ loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.1)
+ optimizer = tf.keras.optimizers.SGD(lr=0.065, momentum=0.9)
+ model.compile(
+ optimizer = optimizer,
+ loss = loss,
+ metrics = ['accuracy'])
+ # print(output)
+ # model.build(input_shape=raw_input)
+ print(model.summary())
+ # https://stackoverflow.com/questions/62242330/error-when-subclassing-the-model-class-you-should-implement-a-call-method
+
+ names = [layer.name for layer in model.layers]
+ print(names, len(names))
+
+ # tf.keras.utils.plot_model(
+ # model, # here is the trick (for now)
+ # to_file='model.png', dpi=96, # saving
+ # show_shapes=True, show_layer_names=True, # show shapes and layer name
+ # expand_nested=True # will show nested block
+ # )
+
+ # trainable_count = np.sum([K.count_params(w) for w in model.trainable_weights])
+ # print(trainable_count)
+
+
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/requirements.txt b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..00379840ef885bfb36af0942893de75f18de1757
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/requirements.txt
@@ -0,0 +1,4 @@
+imgaug
+tensorflow-gpu
+scikit-learn
+Pillow
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..faf2bc5f178cf70d6f8b9e06d30dafd405f7deed
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train.py
@@ -0,0 +1,129 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from sklearn.preprocessing import LabelEncoder, normalize
+from sklearn.model_selection import train_test_split
+import tensorflow as tf
+import tensorflow.keras.backend as K
+from tensorflow.keras.callbacks import LearningRateScheduler
+from utils.general import categorical_crossentropy_label_smoothing
+from tensorflow.keras.optimizers import SGD
+from data import DataGenerator
+from osnet import OSNet
+from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
+import argparse
+from npu_bridge.npu_init import *
+# Set training parameters
+image_shape = (128, 64, 3) # h x w x c
+use_label_smoothing = True
+
+
+def parse_args(args):
+ """
+ Parse the arguments.
+ """
+ parser = argparse.ArgumentParser(description='Simple training script for training OSNet.')
+ parser.add_argument('--initial_lr', help='initial learning rate', type=float, default=0.065)
+ parser.add_argument('--batch_size', help='batch size', type=int, default=128)
+ parser.add_argument('--num_epoch', help='total epoch to train', type=int, default=100)
+ parser.add_argument('--train_image_dir', help='path to train_image', type=str, default='/home/dingwei/osnet/dataset/Market-1501-v15.09.15/bounding_box_train')
+ # parser.add_argument('--output_path', help='path to output', type=str, default='/home/dingwei/osnet/osnet_tf/output')
+ print(vars(parser.parse_args(args)))
+ return parser.parse_args(args)
+
+
+def main(args=None):
+ # Npu setting
+ sess_config = tf.ConfigProto()
+ custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add()
+ custom_op.name = "NpuOptimizer"
+ custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("allow_mix_precision")
+ custom_op.parameter_map["dynamic_input"].b = True
+ custom_op.parameter_map["dynamic_graph_execute_mode"].s = tf.compat.as_bytes("lazy_recompile")
+ custom_op.parameter_map["use_off_line"].b = True # 必须显式开启,在昇腾AI处理器执行训练
+ sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF # 必须显式关闭remap
+ sess_config.graph_options.rewrite_options.memory_optimization = RewriterConfig.OFF
+ sess = tf.Session(config=sess_config)
+ K.set_session(sess)
+
+ args = parse_args(args)
+ # Preprocess data
+ train_image_filenames = sorted([filename for filename in os.listdir(args.train_image_dir) if filename.endswith(".jpg")])
+ train_image_paths = [os.path.join(args.train_image_dir, name) for name in train_image_filenames]
+ train_person_ids = [name[:4] for name in train_image_filenames]
+ label_encoder = LabelEncoder()
+ label_encoder.fit(train_person_ids)
+ train_person_ids_encoded = label_encoder.transform(train_person_ids)
+ num_person_ids = len(set(train_person_ids_encoded))
+
+ train_img_paths, val_img_paths, train_person_ids, val_person_ids = train_test_split(
+ train_image_paths, train_person_ids_encoded, test_size=0.1, random_state=2021,
+ stratify=train_person_ids_encoded)
+ print(
+ f"# train images: {len(train_img_paths)}, # val images: {len(val_img_paths)}, # image labels: {num_person_ids}")
+
+ # Contruct and show model
+ baseline_model = OSNet(751).model
+ print(baseline_model.summary())
+
+ loss = categorical_crossentropy_label_smoothing if use_label_smoothing else "categorical_crossentropy"
+ # loss = tf.keras.losses.SparseCategoricalCrossentropy()
+ optimizer = SGD(lr=args.initial_lr, momentum=0.9)
+ baseline_model.compile(optimizer=optimizer, loss=loss, metrics=["accuracy"])
+
+ # Set lr decay
+ def scheduler(epoch):
+ # 每隔100个epoch,学习率减小为原来的9/10
+ if epoch % 30 == 0 and epoch != 0:
+ lr = K.get_value(baseline_model.optimizer.lr)
+ K.set_value(baseline_model.optimizer.lr, lr * 0.9)
+ print("lr changed to {}".format(lr * 0.9))
+ return K.get_value(baseline_model.optimizer.lr)
+ reduce_lr = LearningRateScheduler(scheduler)
+
+ train_generator = DataGenerator(train_img_paths, train_person_ids, batch_size=args.batch_size,
+ num_classes=num_person_ids, shuffle=True, augment=True)
+ val_generator = DataGenerator(val_img_paths, val_person_ids, batch_size=args.batch_size, num_classes=num_person_ids)
+
+ # Train model
+ baseline_model.fit(
+ train_generator,
+ epochs=args.num_epoch,
+ # validation_data=val_generator,
+ callbacks=[reduce_lr],
+ shuffle=True,
+ )
+
+ # Save model
+ baseline_model.save_weights("osnet.h5")
+ print("Training completed and model saved.")
+ sess.close()
+
+if __name__ == "__main__":
+ main()
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train_full_1p.sh b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train_full_1p.sh
new file mode 100644
index 0000000000000000000000000000000000000000..8297653997da9bd0d1231b6c4393114d778aa998
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/train_full_1p.sh
@@ -0,0 +1,187 @@
+#!/bin/bash
+
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+# shell脚本所在路径
+cur_path=`echo $(cd $(dirname $0);pwd)`
+
+# 判断当前shell是否是performance
+perf_flag=`echo $0 | grep performance | wc -l`
+
+# 当前执行网络的名称
+Network=`echo $(cd $(dirname $0);pwd) | awk -F"/" '{print $(NF-1)}'`
+
+export RANK_SIZE=1
+export RANK_ID=0
+export JOB_ID=10087
+
+# 路径参数初始化
+data_path='/home/dingwei/osnet/dataset/Market-1501-v15.09.15'
+output_path='/home/dingwei/osnet/osnet_tf/output'
+
+# 帮助信息,不需要修改
+if [[ $1 == --help || $1 == -h ]];then
+ echo"usage:./train_performance_1P.sh "
+ echo " "
+ echo "parameter explain:
+ --data_path # dataset of training
+ --output_path # output of training
+ --train_steps # max_step for training
+ --train_epochs # max_epoch for training
+ --batch_size # batch size
+ -h/--help show help message
+ "
+ exit 1
+fi
+
+# 参数校验,不需要修改
+for para in $*
+do
+ if [[ $para == --data_path* ]];then
+ data_path=`echo ${para#*=}`
+ elif [[ $para == --output_path* ]];then
+ output_path=`echo ${para#*=}`
+ elif [[ $para == --train_steps* ]];then
+ train_steps=`echo ${para#*=}`
+ elif [[ $para == --train_epochs* ]];then
+ train_epochs=`echo ${para#*=}`
+ elif [[ $para == --batch_size* ]];then
+ batch_size=`echo ${para#*=}`
+ fi
+done
+
+# 校验是否传入data_path,不需要修改
+if [[ $data_path == "" ]];then
+ echo "[Error] para \"data_path\" must be config"
+ exit 1
+fi
+
+# 校验是否传入output_path,不需要修改
+if [[ $output_path == "" ]];then
+ output_path="./test/output/${ASCEND_DEVICE_ID}"
+fi
+
+# 设置打屏日志文件名,请保留,文件名为${print_log}
+print_log="./test/output/${ASCEND_DEVICE_ID}/train_${ASCEND_DEVICE_ID}.log"
+modelarts_flag=${MODELARTS_MODEL_PATH}
+if [ x"${modelarts_flag}" != x ];
+then
+ echo "running without etp..."
+ print_log_name=`ls /home/ma-user/modelarts/log/ | grep proc-rank`
+ print_log="/home/ma-user/modelarts/log/${print_log_name}"
+fi
+echo "### get your log here : ${print_log}"
+
+CaseName=""
+function get_casename()
+{
+ if [ x"${perf_flag}" = x1 ];
+ then
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'perf'
+ else
+ CaseName=${Network}_bs${batch_size}_${RANK_SIZE}'p'_'acc'
+ fi
+}
+
+# 跳转到code目录
+cd ${cur_path}/../
+rm -rf ./test/output/${ASCEND_DEVICE_ID}
+mkdir -p ./test/output/${ASCEND_DEVICE_ID}
+
+# 训练开始时间记录,不需要修改
+start_time=$(date +%s)
+##########################################################
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+#########第3行 至 100行,请一定不要、不要、不要修改##########
+##########################################################
+
+#=========================================================
+#=========================================================
+#========训练执行命令,需要根据您的网络进行修改==============
+#=========================================================
+#=========================================================
+# 基础参数,需要模型审视修改
+# 您的训练数据集在${data_path}路径下,请直接使用这个变量获取
+# 您的训练输出目录在${output_path}路径下,请直接使用这个变量获取
+# 您的其他基础参数,可以自定义增加,但是batch_size请保留,并且设置正确的值
+batch_size=128
+
+
+if [ x"${modelarts_flag}" != x ];
+then
+ python3.7 ./train.py --train_image_dir=${data_path}/bounding_box_train --num_epoch=3
+ python3.7 ./eval.py --data_path=${data_path}
+else
+ python3.7 ./train.py --train_image_dir=${data_path}/bounding_box_train --num_epoch=1 1>>${print_log} 2>&1
+ python3.7 ./eval.py --data_path=${data_path} 1>>${print_log} 2>&1
+fi
+
+# 性能相关数据计算
+StepTime=`grep "ms/step :" ${print_log} | tail -n 10 | awk '{print $NF}' | awk '{sum+=$1} END {print sum/NR}'`
+FPS=`awk 'BEGIN{printf "%.2f\n", '${batch_size}'/'${StepTime}*1000'}'`
+
+# 精度相关数据计算
+train_accuracy=`grep "Final Accuracy accuracy" ${print_log} | awk '{print $NF}'`
+# 提取所有loss打印信息
+grep "loss :" ${print_log} | awk -F ":" '{print $4}' | awk -F "-" '{print $1}' > ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt
+
+
+###########################################################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+#########后面的所有内容请不要修改###########################
+###########################################################
+
+# 判断本次执行是否正确使用Ascend NPU
+use_npu_flag=`grep "The model has been compiled on the Ascend AI processor" ${print_log} | wc -l`
+if [ x"${use_npu_flag}" == x0 ];
+then
+ echo "------------------ ERROR NOTICE START ------------------"
+ echo "ERROR, your task haven't used Ascend NPU, please check your npu Migration."
+ echo "------------------ ERROR NOTICE END------------------"
+else
+ echo "------------------ INFO NOTICE START------------------"
+ echo "INFO, your task have used Ascend NPU, please check your result."
+ echo "------------------ INFO NOTICE END------------------"
+fi
+
+# 获取最终的casename,请保留,case文件名为${CaseName}
+get_casename
+
+# 重命名loss文件
+if [ -f ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ];
+then
+ mv ./test/output/${ASCEND_DEVICE_ID}/my_output_loss.txt ./test/output/${ASCEND_DEVICE_ID}/${CaseName}_loss.txt
+fi
+
+# 训练端到端耗时
+end_time=$(date +%s)
+e2e_time=$(( $end_time - $start_time ))
+
+echo "------------------ Final result ------------------"
+# 输出性能FPS/单step耗时/端到端耗时
+echo "Final Performance images/sec : $FPS"
+echo "Final Performance ms/step : $StepTime"
+echo "E2E Training Duration sec : $e2e_time"
+
+# 输出训练精度
+echo "Final Train Accuracy : ${train_accuracy}"
+
+# 最后一个迭代loss值,不需要修改
+ActualLoss=(`awk 'END {print $NF}' $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}_loss.txt`)
+
+#关键信息打印到${CaseName}.log中,不需要修改
+echo "Network = ${Network}" > $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "RankSize = ${RANK_SIZE}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "BatchSize = ${batch_size}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "DeviceType = `uname -m`" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "CaseName = ${CaseName}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualFPS = ${FPS}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainingTime = ${StepTime}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "ActualLoss = ${ActualLoss}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "E2ETrainingTime = ${e2e_time}" >> $cur_path/output/$ASCEND_DEVICE_ID/${CaseName}.log
+echo "TrainAccuracy = ${train_accuracy}" >> $cur_path/test/output/$ASCEND_DEVICE_ID/${CaseName}.log
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/.keep b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/.keep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/__init__.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdb22dca64c3c8c63e6a5c2972abbc36d50db629
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/general.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/general.py
new file mode 100644
index 0000000000000000000000000000000000000000..070a2182db41fe6be8a0bc78f2249c78e093791a
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/general.py
@@ -0,0 +1,59 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tensorflow.keras.losses import categorical_crossentropy
+
+def categorical_crossentropy_label_smoothing(y_true, y_pred):
+ label_smoothing = 0.1
+ return categorical_crossentropy(y_true, y_pred, label_smoothing=label_smoothing)
+
+from imgaug import augmenters as iaa
+seq = iaa.Sequential([
+ iaa.Fliplr(0.5), # horizontal flips
+ iaa.Crop(percent=(0, 0.1)), # random crops
+ # Small gaussian blur with random sigma between 0 and 0.5.
+ # But we only blur about 50% of all images.
+ iaa.Sometimes(
+ 0.5,
+ iaa.GaussianBlur(sigma=(0, 0.5))
+ ),
+ # Random Erase
+ iaa.Sometimes(
+ 0.5,
+ iaa.Cutout(nb_iterations=1, size=[0.3, 0.4], squared=False)
+ ),
+ # Apply affine transformations to each image.
+ # Scale/zoom them, translate/move them, rotate them and shear them.
+ iaa.Sometimes(
+ 0.3,
+ iaa.Affine(
+ rotate=(-10, 10),
+ shear=(-8, 8)
+ )
+ ),
+], random_order=True, random_state=2021)
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/ranking.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/ranking.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3f9eff4dfe96b64e1bb147447fb60ecbc0b2f89
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/ranking.py
@@ -0,0 +1,140 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from collections import defaultdict
+
+import numpy as np
+from sklearn.metrics import average_precision_score
+
+
+
+def _unique_sample(ids_dict, num):
+ mask = np.zeros(num, dtype=np.bool)
+ for _, indices in ids_dict.items():
+ i = np.random.choice(indices)
+ mask[i] = True
+ return mask
+
+
+def cmc(distmat, query_ids=None, gallery_ids=None,
+ query_cams=None, gallery_cams=None, topk=100,
+ separate_camera_set=False,
+ single_gallery_shot=False,
+ first_match_break=False):
+ m, n = distmat.shape
+ # Fill up default values
+ if query_ids is None:
+ query_ids = np.arange(m)
+ if gallery_ids is None:
+ gallery_ids = np.arange(n)
+ if query_cams is None:
+ query_cams = np.zeros(m).astype(np.int32)
+ if gallery_cams is None:
+ gallery_cams = np.ones(n).astype(np.int32)
+ # Ensure numpy array
+ query_ids = np.asarray(query_ids)
+ gallery_ids = np.asarray(gallery_ids)
+ query_cams = np.asarray(query_cams)
+ gallery_cams = np.asarray(gallery_cams)
+ # Sort and find correct matches
+ indices = np.argsort(distmat, axis=1)
+ matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
+ # Compute CMC for each query
+ ret = np.zeros(topk)
+ num_valid_queries = 0
+ for i in range(m):
+ # Filter out the same id and same camera
+ valid = ((gallery_ids[indices[i]] != query_ids[i]) |
+ (gallery_cams[indices[i]] != query_cams[i]))
+ if separate_camera_set:
+ # Filter out samples from same camera
+ valid &= (gallery_cams[indices[i]] != query_cams[i])
+ if not np.any(matches[i, valid]): continue
+ if single_gallery_shot:
+ repeat = 10
+ gids = gallery_ids[indices[i][valid]]
+ inds = np.where(valid)[0]
+ ids_dict = defaultdict(list)
+ for j, x in zip(inds, gids):
+ ids_dict[x].append(j)
+ else:
+ repeat = 1
+ for _ in range(repeat):
+ if single_gallery_shot:
+ # Randomly choose one instance for each id
+ sampled = (valid & _unique_sample(ids_dict, len(valid)))
+ index = np.nonzero(matches[i, sampled])[0]
+ else:
+ index = np.nonzero(matches[i, valid])[0]
+ delta = 1. / (len(index) * repeat)
+ for j, k in enumerate(index):
+ if k - j >= topk: break
+ if first_match_break:
+ ret[k - j] += 1
+ break
+ ret[k - j] += delta
+ num_valid_queries += 1
+ if num_valid_queries == 0:
+ raise RuntimeError("No valid query")
+ return ret.cumsum() / num_valid_queries
+
+
+def mean_ap(distmat, query_ids=None, gallery_ids=None,
+ query_cams=None, gallery_cams=None):
+ m, n = distmat.shape
+ # Fill up default values
+ if query_ids is None:
+ query_ids = np.arange(m)
+ if gallery_ids is None:
+ gallery_ids = np.arange(n)
+ if query_cams is None:
+ query_cams = np.zeros(m).astype(np.int32)
+ if gallery_cams is None:
+ gallery_cams = np.ones(n).astype(np.int32)
+ # Ensure numpy array
+ query_ids = np.asarray(query_ids)
+ gallery_ids = np.asarray(gallery_ids)
+ query_cams = np.asarray(query_cams)
+ gallery_cams = np.asarray(gallery_cams)
+ # Sort and find correct matches
+ indices = np.argsort(distmat, axis=1)
+ matches = (gallery_ids[indices] == query_ids[:, np.newaxis])
+ # Compute AP for each query
+ aps = []
+ for i in range(m):
+ # Filter out the same id and same camera
+ valid = ((gallery_ids[indices[i]] != query_ids[i]) |
+ (gallery_cams[indices[i]] != query_cams[i]))
+ y_true = matches[i, valid]
+ y_score = -distmat[i][indices[i]][valid]
+ if not np.any(y_true): continue
+ aps.append(average_precision_score(y_true, y_score))
+ if len(aps) == 0:
+ raise RuntimeError("No valid query")
+ return np.mean(aps)
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/rerank.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/rerank.py
new file mode 100644
index 0000000000000000000000000000000000000000..519ae9c796cca7685315d3764b39e13bc69470a4
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/rerank.py
@@ -0,0 +1,125 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#!/usr/bin/env python2/python3
+# -*- coding: utf-8 -*-
+"""
+Source: https://github.com/zhunzhong07/person-re-ranking
+Created on Mon Jun 26 14:46:56 2017
+@author: luohao
+Modified by Houjing Huang, 2017-12-22.
+- This version accepts distance matrix instead of raw features.
+- The difference of `/` division between python 2 and 3 is handled.
+- numpy.float16 is replaced by numpy.float32 for numerical precision.
+CVPR2017 paper:Zhong Z, Zheng L, Cao D, et al. Re-ranking Person Re-identification with k-reciprocal Encoding[J]. 2017.
+url:http://openaccess.thecvf.com/content_cvpr_2017/papers/Zhong_Re-Ranking_Person_Re-Identification_CVPR_2017_paper.pdf
+Matlab version: https://github.com/zhunzhong07/person-re-ranking
+API
+q_g_dist: query-gallery distance matrix, numpy array, shape [num_query, num_gallery]
+q_q_dist: query-query distance matrix, numpy array, shape [num_query, num_query]
+g_g_dist: gallery-gallery distance matrix, numpy array, shape [num_gallery, num_gallery]
+k1, k2, lambda_value: parameters, the original paper is (k1=20, k2=6, lambda_value=0.3)
+Returns:
+ final_dist: re-ranked distance, numpy array, shape [num_query, num_gallery]
+"""
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import division
+
+__all__ = ['re_ranking']
+
+import numpy as np
+
+
+def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):
+
+ # The following naming, e.g. gallery_num, is different from outer scope.
+ # Don't care about it.
+
+ original_dist = np.concatenate(
+ [np.concatenate([q_q_dist, q_g_dist], axis=1),
+ np.concatenate([q_g_dist.T, g_g_dist], axis=1)],
+ axis=0)
+ original_dist = np.power(original_dist, 2).astype(np.float32)
+ original_dist = np.transpose(1. * original_dist/np.max(original_dist,axis = 0))
+ V = np.zeros_like(original_dist).astype(np.float32)
+ initial_rank = np.argsort(original_dist).astype(np.int32)
+
+ query_num = q_g_dist.shape[0]
+ gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
+ all_num = gallery_num
+
+ for i in range(all_num):
+ # k-reciprocal neighbors
+ forward_k_neigh_index = initial_rank[i,:k1+1]
+ backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]
+ fi = np.where(backward_k_neigh_index==i)[0]
+ k_reciprocal_index = forward_k_neigh_index[fi]
+ k_reciprocal_expansion_index = k_reciprocal_index
+ for j in range(len(k_reciprocal_index)):
+ candidate = k_reciprocal_index[j]
+ candidate_forward_k_neigh_index = initial_rank[candidate,:int(np.around(k1/2.))+1]
+ candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,:int(np.around(k1/2.))+1]
+ fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
+ candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
+ if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index):
+ k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)
+
+ k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
+ weight = np.exp(-original_dist[i,k_reciprocal_expansion_index])
+ V[i,k_reciprocal_expansion_index] = 1.*weight/np.sum(weight)
+ original_dist = original_dist[:query_num,]
+ if k2 != 1:
+ V_qe = np.zeros_like(V,dtype=np.float32)
+ for i in range(all_num):
+ V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0)
+ V = V_qe
+ del V_qe
+ del initial_rank
+ invIndex = []
+ for i in range(gallery_num):
+ invIndex.append(np.where(V[:,i] != 0)[0])
+
+ jaccard_dist = np.zeros_like(original_dist,dtype = np.float32)
+
+
+ for i in range(query_num):
+ temp_min = np.zeros(shape=[1,gallery_num],dtype=np.float32)
+ indNonZero = np.where(V[i,:] != 0)[0]
+ indImages = []
+ indImages = [invIndex[ind] for ind in indNonZero]
+ for j in range(len(indNonZero)):
+ temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])
+ jaccard_dist[i] = 1-temp_min/(2.-temp_min)
+
+ final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value
+ del original_dist
+ del V
+ del jaccard_dist
+ final_dist = final_dist[:query_num,query_num:]
+ return final_dist
\ No newline at end of file
diff --git a/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/triplet.py b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/triplet.py
new file mode 100644
index 0000000000000000000000000000000000000000..5073703489fa4870d6eded9e4f02d3bcc3ae8c22
--- /dev/null
+++ b/TensorFlow/contrib/cv/OSNet_ID1379_for_TensorFlow/utils/triplet.py
@@ -0,0 +1,358 @@
+# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy as np
+import math
+import random
+from sklearn.utils import shuffle as shuffle_tuple
+from tensorflow.keras.utils import Sequence, to_categorical
+from tensorflow.keras.preprocessing.image import load_img
+from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
+from tensorflow.python.keras.applications.resnet_v2 import ResNet50V2
+from tensorflow.keras.layers import Dense, Activation, Lambda, BatchNormalization, Input, concatenate, Embedding
+from tensorflow.keras.models import Model, model_from_json
+from tensorflow.keras import backend as K
+import cv2
+from .general import seq
+
+random.seed(2021)
+
+
+def create_model(image_shape, num_person_ids, show_model_summary=False):
+ anchor_input = Input(image_shape, name="anchor_input")
+ positive_input = Input(image_shape, name="positive_input")
+ negative_input = Input(image_shape, name="negative_input")
+
+ cnn_model = MobileNetV2(input_shape=image_shape, alpha=0.5, include_top=False, pooling="max")
+ cnn_model.trainable = False
+
+ anchor_embedding = cnn_model(anchor_input)
+ positive_embedding = cnn_model(positive_input)
+ negative_embedding = cnn_model(negative_input)
+
+ merged_vector = concatenate([anchor_embedding, positive_embedding, negative_embedding], axis=-1, name="triplet")
+
+ dense_anchor = Dense(num_person_ids)(anchor_embedding)
+ softmax_anchor_output = Activation("softmax", name="softmax")(dense_anchor)
+
+ triplet_model = Model([anchor_input, positive_input, negative_input], [merged_vector, softmax_anchor_output])
+
+ if show_model_summary:
+ triplet_model.summary()
+
+ return triplet_model
+
+
+def create_semi_hard_triplet_model(image_shape, num_person_ids, show_model_summary=False, resnet=True, last_stride_reduce=True, bn=True, center_loss=True, average_pooling=True):
+ if resnet:
+ print("Using model ResNet50V2\n")
+ cnn_model = ResNet50V2(input_shape=image_shape, include_top=False, pooling=("avg" if average_pooling else "max"))
+ if last_stride_reduce:
+ cnn_model.get_layer("conv4_block6_2_conv").strides = (1,1)
+ cnn_model.get_layer("max_pooling2d_2").strides = (1,1)
+ cnn_model = model_from_json(cnn_model.to_json())
+ else:
+ print("Using model MobileNetV2\n")
+ cnn_model = MobileNetV2(input_shape=image_shape, alpha=0.5, include_top=False, pooling=("avg" if average_pooling else "max"))
+ if last_stride_reduce:
+ cnn_model.get_layer("block_13_pad").padding = (1,1)
+ cnn_model.get_layer("block_13_depthwise").strides = (1,1)
+ cnn_model = model_from_json(cnn_model.to_json())
+
+ global_pool = cnn_model.layers[-1].output
+ cnn_model.layers[-1]._name = "triplet"
+
+ if bn:
+ features_bn = BatchNormalization(name="features_bn")(global_pool)
+ dense = Dense(num_person_ids)(features_bn)
+ else:
+ dense = Dense(num_person_ids)(global_pool)
+ softmax_output = Activation("softmax", name="softmax")(dense)
+
+ if center_loss:
+ input_target = Input(shape=(1,))
+ centers = Embedding(num_person_ids, global_pool.shape[-1], name="embedding_center")(input_target)
+ center_loss = Lambda(lambda x: 0.5 * K.sum(K.square((x[0] - x[1])), axis=1, keepdims=True), name="center")((global_pool, centers))
+ triplet_model = Model([cnn_model.input, input_target], [global_pool, softmax_output, center_loss])
+ else:
+ triplet_model = Model(cnn_model.input, [global_pool, softmax_output])
+
+ if show_model_summary:
+ triplet_model.summary()
+
+ return triplet_model
+
+####
+# Self defined triplet loss
+####
+def triplet_loss(y_true, y_pred, alpha=0.3):
+ y_pred = K.l2_normalize(y_pred, axis=1)
+ batch_num = y_pred.shape.as_list()[-1] // 3
+
+ anchor = y_pred[:, 0:batch_num]
+ positive = y_pred[:, batch_num:2*batch_num]
+ negative = y_pred[:, 2*batch_num:3*batch_num]
+
+ pos_dist = K.sum(K.square(anchor - positive), axis=1)
+ neg_dist = K.sum(K.square(anchor - negative), axis=1)
+
+ loss = K.maximum(pos_dist - neg_dist + alpha, 0.0)
+
+ return loss
+
+####
+# Learning rate from paper
+####
+def lr_decay_warmup(epoch, initial_rate):
+ epoch += 1
+ if epoch < 11:
+ return 3.5e-4 * epoch / 10
+ elif epoch < 41:
+ return 3.5e-4
+ elif epoch < 71:
+ return 3.5e-5
+ else:
+ return 3.5e-6
+
+
+
+# Train data generator for cnn
+class DataGenerator(Sequence):
+
+ # Here, `x_set` is list of path to the images
+ # and `y_set` are the associated classes.
+ def __init__(self, x_set, y_set, batch_size, num_classes, shuffle=False, augment=False):
+ self.x, self.y = x_set, y_set
+ self.batch_size = batch_size
+ self.num_classes = num_classes
+ self.shuffle = shuffle
+ self.augment = augment
+
+ def __len__(self):
+ return math.ceil(len(self.x) / self.batch_size)
+
+ def __getitem__(self, idx):
+
+ batch_x = self.x[idx * self.batch_size: (idx + 1) * self.batch_size]
+ batch_y = self.y[idx * self.batch_size: (idx + 1) * self.batch_size]
+
+ if self.shuffle:
+ batch_x, batch_y = shuffle_tuple(batch_x, batch_y)
+
+ if self.augment:
+ batch_x = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in batch_x]).astype(np.uint8)
+ batch_x = seq.augment_images(batch_x)
+ batch_x = batch_x / 255.
+ else:
+ batch_x = np.array([np.asarray(load_img(file_path)) / 255. for file_path in batch_x])
+
+ batch_x = (batch_x - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+ batch_y = to_categorical(np.array(batch_y), num_classes=self.num_classes)
+
+ return batch_x, batch_y
+
+# Train data generator for self defined triplet loss model
+class DataGeneratorTriplet(Sequence):
+ def __init__(self, x_set, y_set, batch_size, num_classes, shuffle=False, augment=False):
+ self.x, self.y = x_set, y_set
+
+ # Make dict with key -> person_id, value -> list of associated images
+ self.image_to_label = {}
+ for image_path, image_label in zip(self.x, self.y):
+ self.image_to_label.setdefault(image_label, []).append(image_path)
+
+ # Get only anchor_id with more than 1 image
+ self.anchor_filtered = [k for k, v in self.image_to_label.items() if len(v) > 1]
+
+ self.batch_size = batch_size
+ self.num_classes = num_classes
+ self.shuffle = shuffle
+ self.augment = augment
+
+ def __len__(self):
+ return math.ceil(len(self.x) / self.batch_size)
+
+ def __getitem__(self, idx):
+
+ if self.shuffle:
+ random.shuffle(self.anchor_filtered)
+
+ # Get random sample of anchor_ids; amount: batch_size
+ anchor_ids_sampled = random.sample(self.anchor_filtered, k=self.batch_size)
+ # Get candidates of nagetive sample ids
+ negative_id_cands = list(set(self.image_to_label.keys()) - set(anchor_ids_sampled))
+
+ # Get anchor and positive image paths
+ anchor_positive_list = [tuple(random.sample(self.image_to_label[id], k=2)) for id in anchor_ids_sampled]
+ anchor_img_paths, positive_img_paths = zip(*anchor_positive_list)
+
+ # Get negative image_paths
+ negative_id_sampled = random.sample(negative_id_cands, k=self.batch_size)
+ negative_img_paths = [random.choice(self.image_to_label[id]) for id in negative_id_sampled]
+
+ if self.augment:
+ anchor_X_batch = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in anchor_img_paths]).astype(np.uint8)
+ anchor_X_batch = seq.augment_images(anchor_X_batch)
+
+ positive_X_batch = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in positive_img_paths]).astype(np.uint8)
+ positive_X_batch = seq.augment_images(positive_X_batch)
+
+ negative_X_batch = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in negative_img_paths]).astype(np.uint8)
+ negative_X_batch = seq.augment_images(negative_X_batch)
+
+ else:
+ anchor_X_batch = np.array([np.asarray(load_img(file_path)) for file_path in anchor_img_paths])
+ positive_X_batch = np.array([np.asarray(load_img(file_path)) for file_path in positive_img_paths])
+ negative_X_batch = np.array([np.asarray(load_img(file_path)) for file_path in negative_img_paths])
+
+ anchor_X_batch = anchor_X_batch / 255.
+ positive_X_batch = positive_X_batch / 255.
+ negative_X_batch = negative_X_batch / 255.
+
+ # Minus mean, devide by standard_deviation
+ anchor_X_batch = (anchor_X_batch - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+ positive_X_batch = (positive_X_batch - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+ negative_X_batch = (negative_X_batch - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+
+ anchor_Y_batch = to_categorical(np.array(anchor_ids_sampled), num_classes=self.num_classes)
+
+ return ([anchor_X_batch, positive_X_batch, negative_X_batch], [anchor_Y_batch, anchor_Y_batch])
+
+
+# Train data generator for tensorflow-addons semihardtriplet loss model
+class DataGeneratorHardTriplet(Sequence):
+ def __init__(self, x_set, y_set, person_id_num, image_per_person_id, num_classes, shuffle=False, augment=False, center_loss=True):
+ self.x, self.y = x_set, y_set
+
+ # Make dict with key -> person_id, value -> list of associated images
+ self.image_to_label = {}
+ for image_path, image_label in zip(self.x, self.y):
+ self.image_to_label.setdefault(image_label, []).append(image_path)
+
+ # Get only anchor_id with at least `image_per_person_id`
+ self.y_filtered = [k for k, v in self.image_to_label.items() if len(v) >= image_per_person_id]
+
+ self.person_id_num = person_id_num
+ self.image_per_person_id = image_per_person_id
+ self.num_classes = num_classes
+ self.shuffle = shuffle
+ self.augment = augment
+ self.center_loss = center_loss
+
+ def __len__(self):
+ return math.ceil(len(self.x) / (self.person_id_num * self.image_per_person_id))
+
+ def __getitem__(self, idx):
+
+ if self.shuffle:
+ random.shuffle(self.y_filtered)
+
+ # Get random sample of ids; amount: `person_id_num`
+ person_ids_chosen = random.sample(self.y_filtered, k=self.person_id_num)
+ # For each id, get random sample of associate images; amount: `image_per_person_id`
+ img_paths_sampled = [random.sample(self.image_to_label[id], k=self.image_per_person_id) for id in person_ids_chosen]
+ img_paths_sampled = [path for paths in img_paths_sampled for path in paths] # Flattening `img_paths_sampled`
+
+ # Expand person_ids_chosen by `image_per_person_id` times to map with `img_paths_sampled`
+ label_sampled = [[id] * self.image_per_person_id for id in person_ids_chosen]
+ label_sampled = np.array([label for labels in label_sampled for label in labels]) # Flattening `label_sampled`
+
+ if self.augment:
+ X_batch = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in img_paths_sampled]).astype(np.uint8)
+ X_batch = seq.augment_images(X_batch)
+ else:
+ X_batch = np.array([np.asarray(load_img(file_path)) for file_path in img_paths_sampled])
+
+ X_batch = X_batch / 255.
+ X_batch = (X_batch - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+
+ Y_batch = to_categorical(np.array(label_sampled), num_classes=self.num_classes)
+
+ if self.center_loss:
+ return ([X_batch, label_sampled], [label_sampled, Y_batch, label_sampled])
+ else:
+ return (X_batch, [label_sampled, Y_batch])
+
+
+# Test data generator
+class DataGeneratorPredict(Sequence):
+
+ def __init__(self, x_set, batch_size, shuffle=False, augment=False):
+ self.x = x_set
+ self.batch_size = batch_size
+ self.shuffle = shuffle
+ self.augment = augment
+
+ def __len__(self):
+ return math.ceil(len(self.x) / self.batch_size)
+
+ def __getitem__(self, idx):
+
+ batch_x = self.x[idx * self.batch_size: (idx + 1) * self.batch_size]
+
+ if self.augment:
+ batch_x = np.array([np.asarray(load_img(file_path)).astype(np.uint8) for file_path in batch_x]).astype(np.uint8)
+ batch_x = seq.augment_images(batch_x)
+ batch_x = batch_x / 255.
+ else:
+ batch_x = np.array([np.asarray(load_img(file_path)) / 255. for file_path in batch_x])
+
+ batch_x = (batch_x - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+
+ return batch_x
+
+
+def generator_batch_test(img_path_list, img_width, img_height, batch_size=32, shuffle=False):
+ N = len(img_path_list)
+
+ if shuffle:
+ img_path_list = shuffle_tuple(img_path_list)
+
+ batch_index = 0 # indicates batch_size
+
+ while True:
+ current_index = (batch_index*batch_size) % N #the first index for each batch per epoch
+ if N >= (current_index+batch_size): # judge whether the current end index is over the train num
+ current_batch_size = batch_size
+ batch_index += 1 # indicates the next batch_size
+ else:
+ current_batch_size = N - current_index
+ batch_index = 0
+ img_batch_list = img_path_list[current_index:current_index + current_batch_size]
+
+ X_batch = np.zeros((current_batch_size, img_height, img_width, 3))
+ for i, img_path in enumerate(img_batch_list):
+ img = cv2.imread(img_path)
+ if img.shape[:2] != (img_height, img_width):
+ img = cv2.resize(img, (img_width, img_height))
+ img[:, :, [0, 1, 2]] = img[:, :, [2, 1, 0]]
+ X_batch[i, :, :, :] = img
+ # normalization
+ X_batch = X_batch / 255.
+ X_batch = (X_batch - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
+ yield X_batch
\ No newline at end of file