From 00067834043a7ca22d06aac93dbca433b4e652f0 Mon Sep 17 00:00:00 2001 From: chenjianbinC <2312144889@qq.com> Date: Tue, 15 Nov 2022 15:35:08 +0800 Subject: [PATCH] add ReIDv2 --- contrib/ReIDv2/README.md | 218 +++++++++++++++++ contrib/ReIDv2/data/.keep | 0 contrib/ReIDv2/main.py | 462 ++++++++++++++++++++++++++++++++++++ contrib/ReIDv2/models/.keep | 0 contrib/ReIDv2/result/.keep | 0 5 files changed, 680 insertions(+) create mode 100644 contrib/ReIDv2/README.md create mode 100644 contrib/ReIDv2/data/.keep create mode 100644 contrib/ReIDv2/main.py create mode 100644 contrib/ReIDv2/models/.keep create mode 100644 contrib/ReIDv2/result/.keep diff --git a/contrib/ReIDv2/README.md b/contrib/ReIDv2/README.md new file mode 100644 index 000000000..5b4c82104 --- /dev/null +++ b/contrib/ReIDv2/README.md @@ -0,0 +1,218 @@ +# MindXSDK 行人重识别 + +## 1 简介 +本开发样例基于MindX SDK实现了端到端的行人重识别(Person Re-identification, ReID),支持检索给定照片中的行人ID。其主要流程为: +- 程序入口分别接收查询图片和行人底库所在的文件路径。 +- 对于查询图片:利用目标检测模型YOLOv3推理,检测图片中的行人,检测结果经过抠图与调整大小,再利用ReID模型提取图片中每个行人的特征向量。 +- 对于行人底库:将底库图片调整大小,利用ReID模型提取相应的特征向量。 +- 行人检索:将查询图片中行人的特征向量与底库中的特征向量,为每个查询图片中的行人检索最有可能的ID,通过识别框和文字信息进行可视化标记。 + +## 2 目录结构 +本工程名称为ReID,工程目录如下图所示: +``` +ReID +|---- data +| |---- gallerySet // 查询场景图片文件夹 +| |---- querySet // 行人底库图片文件夹 +|---- models // 目标检测、ReID模型与配置文件夹 +| | |---- yolov3.cfg +| | |---- coco.names +| | |---- ReID_pth2onnx.cfg +|---- result // 结果保存文件夹 +|---- main.py +|---- makeYourOwnDataset.py +|---- README.md +``` +> 由于无法在Gitee上创建空文件夹,请按照该工程目录,自行创建result文件夹、data文件夹与其内部的文件夹 +> 如果没有创建result文件夹,将无法产生输出 +## 3 依赖 +| 软件名称 | 版本 | +| :--------: | :------: | +|ubantu 18.04|18.04.1 LTS | +|CANN|5.0.4| +|MindX SDK|2.0.4| +|Python| 3.9.2| +|numpy | 1.21.0 | +|opencv_python|4.5.2| +请注意MindX SDK使用python版本为3.9.2,如出现无法找到python对应lib库请在root下安装python3.9开发库 +``` +apt-get install libpython3.9 +``` +## 4 模型转换 +行人重识别先采用了yolov3模型将图片中的行人检测出来,然后利用ReID模型获取行人的特征向量。由于yolov3模型和ReID模型分别是基于Pytorch和Tensorflow的深度模型,我们需要借助ATC工具分别将其转换成对应的.om模型。 + +4.1 yolov3的模型转换: + +**步骤1** 获取yolov3的原始模型(.pb文件)和相应的配置文件(.cfg文件) +      [原始模型下载链接](https://c7xcode.obs.myhuaweicloud.com/models/YOLOV3_coco_detection_picture_with_postprocess_op/yolov3_tensorflow_1.5.pb) +      [配置文件下载链接](https://c7xcode.obs.myhuaweicloud.com/models/YOLOV3_coco_detection_picture_with_postprocess_op/aipp_nv12.cfg) + +**步骤2** 将获取到的yolov3模型.pb文件和.cfg文件存放至:“项目所在目录/models” + +**步骤3** .om模型转换 +以下操作均在“项目所在目录/models”路径下进行: +- 设置环境变量(请确认install_path路径是否正确) +``` +export install_path=/usr/local/Ascend/ascend-toolkit/latest + +export PATH=/usr/local/python3.9.2/bin:${install_path}/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH +export PYTHONPATH=${install_path}/atc/python/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=${install_path}/atc/lib64:${install_path}/acllib/lib64:$LD_LIBRARY_PATH +export ASCEND_OPP_PATH=${install_path}/opp +export ASCEND_AICPU_PATH=/usr/local/Ascend/ascend-toolkit/latest/ +``` +- 使用ATC将.pb文件转成为.om文件 +``` +atc --model=yolov3_tensorflow_1.5.pb --framework=3 --output=yolov3 --output_type=FP32 --soc_version=Ascend310 --input_shape="input:1,416,416,3" --out_nodes="yolov3/yolov3_head/Conv_6/BiasAdd:0;yolov3/yolov3_head/Conv_14/BiasAdd:0;yolov3/yolov3_head/Conv_22/BiasAdd:0" --log=info --insert_op_conf=aipp_nv12.cfg +``` +- 执行完模型转换脚本后,若提示如下信息说明模型转换成功,可以在该路径下找到名为yolov3.om模型文件。 +(可以通过修改output参数来重命名这个.om文件) +``` +ATC run success, welcome to the next use. +``` + +4.2 ReID的模型转换 + +4.2.1 模型概述 +      [ReID论文地址](https://arxiv.org/pdf/1903.07071.pdf) +      [ReID代码地址](https://github.com/michuanhaohao/reid-strong-baseline) + +4.2.2 模型转换环境需求 +``` +- 框架需求 + CANN == 5.0.4 + torch == 1.5.0 + torchvision == 0.6.0 + onnx == 1.7.0 + +- python第三方库 + numpy == 1.21.0 + opencv-python == 4.5.2 + Pillow == 8.2.0 + yacs == 0.1.8 + pytorch-ignite == 0.4.5 +``` + +4.2.3 模型转换步骤 + +**步骤1** .pth模型转.onnx模型 + +***1*** 从GitHub上拉取ReID模型源代码,在“项目所在目录/models”路径下输入: +``` +git clone https://github.com/michuanhaohao/reid-strong-baseline +``` +此时会出现“项目所在目录/models/reid-strong-baseline”路径,路径内是ReID模型的源代码 + +***2*** 获取.pth权重文件,将该.pth权重文件放在“项目所在目录/models”路径下 +文件名:market_resnet50_model_120_rank1_945.pth +      [Google Drive](https://drive.google.com/drive/folders/1hn0sXLZ5yJcxtmuY-ItQfYD7hBtHwt7A) +      [Huawei Cloud](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/ReID/ReID%E7%9B%B8%E5%85%B3%E6%96%87%E4%BB%B6.rar) + +***3*** 获取ReID_pth2onnx.py:[获取链接](https://gitee.com/ascend/modelzoo/blob/master/contrib/ACL_PyTorch/Research/cv/classfication/ReID_for_Pytorch/ReID_pth2onnx.py) +  将该脚本放在“项目所在目录/models”路径下,执行下列命令,生成.onnx模型文件 +``` +python3 ReID_pth2onnx.py --config_file='reid-strong-baseline/configs/softmax_triplet_with_center.yml' MODEL.PRETRAIN_CHOICE "('self')" TEST.WEIGHT "('market_resnet50_model_120_rank1_945.pth')" +``` +> 注意目前ATC支持的onnx算子版本为11 + +此时在“项目所在目录/models”路径下会出现ReID.onnx模型,到此步骤1已完成 +如果在线环境中无法安装pytorch,你可以在本地环境中进行上述.pth模型转.onnx模型操作,然后将得到的.onnx模型放在“项目所在目录/models”即可 + + +**步骤2** .onnx模型转.om模型 + +***1*** 设置环境变量 +> 请重复一次4.1中步骤3的“设置环境变量(请确认install_path路径是否正确)”操作 + +***2*** 使用ATC将.onnx文件转成为.om文件 +``` +atc --framework=5 --model=ReID.onnx --output=ReID --input_format=NCHW --input_shape="image:1,3,256,128" --insert_op_conf=ReID_onnx2om.cfg --log=debug --soc_version=Ascend310 +``` +- 执行完模型转换脚本后,若提示如下信息说明模型转换成功,可以在“项目所在目录/models”路径下找到名为ReID.om模型文件。(同样的,可以通过修改output参数来重命名这个.om文件) +``` +ATC run success, welcome to the next use. +``` + +经过上述操作,可以在“项目所在目录/models”找到yolov3.om模型和ReID.om模型,模型转换操作已全部完成 + +4.3 参考链接 +> 模型转换使用了ATC工具,如需更多信息请参考:[ATC工具使用指南-快速入门](https://support.huaweicloud.com/tg-cannApplicationDev330/atlasatc_16_0005.html) +> Yolov3模型转换的参考链接:[ATC_yolov3_tensorflow](https://gitee.com/ascend/modelzoo/tree/master/contrib/TensorFlow/Research/cv/yolov3/ATC_yolov3_tensorflow/) +> ReID模型转换的参考链接:[ReID_for_Pytorch](https://gitee.com/ascend/modelzoo/tree/master/contrib/ACL_PyTorch/Research/cv/classfication/ReID_for_Pytorch/#31-pth%E8%BD%AConnx%E6%A8%A1%E5%9E%8B) + +## 5 数据集 +5.1 Market1501数据集 + +文件名:Market-1501-v15.09.15.zip +      [Google Drive](https://drive.google.com/file/d/0B8-rUzbwVRk0c054eEozWG9COHM/view?resourcekey=0-8nyl7K9_x37HlQm34MmrYQ) +      [Huawei Cloud](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/ReID/ReID%E7%9B%B8%E5%85%B3%E6%96%87%E4%BB%B6.rar) + +5.1.1 行人底库 +请解压“Market-1501-v15.09.15.zip”文件,在“Market-1501-v15.09.15\Market1501\gt_bbox”中选择想要查询的行人图片,将图片放在“项目所在目录/data/querySet”中 +> 推荐每次查询1人,使用2-6张图片作为底库,效果较好 +> 如需要查询多人,请保证待查询行人之间的着装风格差异较大,否则会较容易出现误报 +> 该项目需要为每张图片提取行人ID,行人图片的命名格式为 +>> '0001(行人ID)_c1(相机ID)s1(录像序列ID)_000151(视频帧ID)_00(检测框ID).jpg' + +5.1.2 场景图片数据集 +这里使用的是market1501中的部分场景图片数据,来源于 +[Person Search Demo](https://github.com/songwsx/person_search_demo/tree/master/data/samples) +,也可以通过[Huawei Cloud](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/ReID/ReID%E7%9B%B8%E5%85%B3%E6%96%87%E4%BB%B6.rar) +获取,然后将获取的图片放在“项目所在目录/data/gallerySet”中 + +5.2 自制数据集 +这里需要注意的是,自制数据集中的所有图片必须严格控制为横屏风格(图片的长度必须严格大于宽度) +涉及文件夹 +> “项目所在目录/data/ownDataset”:用于存放制作行人底库的场景图片 +> “项目所在目录/data/cropOwnDataset”:用于保存从场景图片提取的行人图片 + +**步骤1** 请将所有的场景图片分成不相交的两个部分: +> 一个部分用于制作行人底库(放在“项目所在目录/data/ownDataset”路径下) +> 另一个部分用于查询(放在“项目所在目录/data/gallerySet”路径下) + +**步骤2** 调用makeYourOwnDataset.py将“项目所在目录/data/ownDataset”路径下场景图片中的所有行人提取出来,结果存放在“项目所在目录/data/cropOwnDataset”中 +``` +python3 makeYourOwnDataset.py --imageFilePath='data/ownDataset' --outputFilePath='data/cropOwnDataset' +``` +**步骤3** 根据“项目所在目录/data/cropOwnDataset”中的结果,选择自己想要查询的行人,按照market1501的命名方式命名(请务必按照这种命名方式命名,否则行人的标识会出现问题) +> 将同一个行人的不同照片重命名成“xxxx_xx”,其中前4位是行人ID,后2位是该照片ID,例:第1个行人的第2张照片:0001_02 +> 将制作好的行人底库图片放在“项目所在目录/data/querySet”中 + + +---------------------------------------------------- + + +## 6 测试 + +6.1 获取om模型 +``` +步骤详见4: 模型转换 +``` +6.2 准备数据集 +``` +步骤详见5: 数据集 +``` +6.3 配置环境变量 +``` +步骤详见4: 配置环境变量 +``` +6.4 执行 +``` +python3 main.py --queryFilePath='data/querySet' --galleryFilePath='data/gallerySet' --matchThreshold=0.3 +``` +7.5 查看结果 +``` +执行`main.py`文件后,可在“项目所在目录/result”路径下查看结果。 +``` +7.6 精度性能说明 +``` +没有误测少测,功能通过,性能与V1对齐,V2检测结果与V1一样 +``` + +## 8 参考链接 +> 特定行人检索:[Person Search Demo](https://github.com/songwsx/person_search_demo) + + +## 9 Q&A +· 在运行main.py时出现"Vpc cropping failed",或者"The image height zoom ratio is out of range [1/32, 16]" +> 这里的错误是因为yolov3模型检测到的目标过小,抠图后放大的比例超过系统给定的阈值[1/32, 16],更新“项目所在目录/models/yolov3.cfg”文件,将OBJECTNESS_THRESH适度调大可解决该问题 \ No newline at end of file diff --git a/contrib/ReIDv2/data/.keep b/contrib/ReIDv2/data/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/contrib/ReIDv2/main.py b/contrib/ReIDv2/main.py new file mode 100644 index 000000000..6c8a714a9 --- /dev/null +++ b/contrib/ReIDv2/main.py @@ -0,0 +1,462 @@ +#!/usr/bin/env python +# coding=utf-8 + +""" +Copyright(C) Huawei Technologies Co.,Ltd. 2012-2021 All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import argparse +import os +import io +import time +import threading +import multiprocessing +import cv2 +import numpy as np + +from mindx.sdk import base +from mindx.sdk.base import ImageProcessor +from PIL import Image +from mindx.sdk.base import Tensor, Model, Size, Rect, log, ImageProcessor, post, BTensor, Point + +IN_PLUGIN_ID = 0 + +INITIAL_MIN_DISTANCE = 99 + +LINE_THICKNESS = 2 +FONT_SCALE = 1.0 +FIND_COLOR = (0, 255, 0) +NONE_FIND_COLOR = (255, 0, 0) + +DEFAULT_MATCH_THRESHOLD = 0.3 +FEATURE_RESHAPE_ROW = -1 +FEATURE_RESHAPE_COLUMN = 2048 +ADMM_BETA = 1 +ADMM_ALPHA = -2 + +MIN_IMAGE_SIZE = 32 +MAX_IMAGE_SIZE = 8192 +MIN_IMAGE_WIDTH = 6 +DEVICE_ID = 1 +NUM_THREADS_OF_INFER = 32 +YOLOV3_MODEL_PATH = "./models/yolov3.om" +YOLOV3 = base.model(YOLOV3_MODEL_PATH, deviceId=DEVICE_ID) +REID_MODEL_PATH = "./models/ReID.om" +REID = base.model(REID_MODEL_PATH, deviceId=DEVICE_ID) +LABEL_PATH = "/home/yuanlei2/cjb/mindxsdk-referenceapps/contrib/ReID/models/coco.names" # 分类标签文件的路�? +CONFIG_PATH = "/home/yuanlei2/cjb/mindxsdk-referenceapps/contrib/ReID/models/yolov3.cfg" + +imageProcessor1 = ImageProcessor(DEVICE_ID) +yolov3_post = post.Yolov3PostProcess(config_path=CONFIG_PATH, label_path=LABEL_PATH) + + +def extract_query_feature(querypath): + """ + Extract the features of query images, return the feature vector and the corresponding Pid vector + + + :arg: + querypath: the directory of query images + streamApi: stream api + + :return: + queryfeatures: the vectors of queryfeatures + querypid: the vectors of querypid + """ + queryfeatures = [] + querypid = [] + # constructing the results returned by the queryImageProcess + # check the query file + if os.path.exists(querypath) != 1: + errormessage = 'The query file does not exist.' + print(errormessage) + exit() + if len(os.listdir(querypath)) == 0: + errormessage = 'The query file is empty.' + print(errormessage) + exit() + + # extract the features for all images in query file + for root, dirs, files in os.walk(querypath): + for file in files: + if file.endswith('.jpg'): + # store the corresponding pid + # we use the market1501 as dataset, which is named by + # 0001(person id)_c1(camera id)s1(sequence id)_000151(frame id)_00(box id).jpg + # if you use other dataset, modify it to identify the person label + querypid.append(file[:4]) + imageprocessor = ImageProcessor(DEVICE_ID) + filepath = os.path.join(root, file) + decodedimg = imageprocessor.decode(filepath, base.nv12) + size_cof = Size(128, 256) + resizedimg = imageprocessor.resize(decodedimg, size_cof, base.huaweiu_high_order_filter) + imgtensor = [resizedimg.to_tensor()] + reid_output = REID.infer(imgtensor) + reid_output0 = reid_output[0] + reid_output0.to_host() + queryfeature = np.array(reid_output0) + cv2.normalize(src=queryfeature, dst=queryfeature, norm_type=cv2.NORM_L2) + queryfeatures.append(queryfeature) + + else: + print('Input image only support jpg') + exit() + return queryfeatures, querypid + + +def get_pipeline_results(filepath): + """ + Get the results of current gallery image in pipeline + + :arg: + filepath: directory of current gallery image + streamApi: stream api + + :return: + objectList: results from mxpi_objectpostprocessor0 + featureList: results from mxpi_tensorinfer1 + """ + # constructing the results returned by the galleryImageProcess stream + + try: + image = Image.open(filepath) + if image.format != 'JPEG': + print('Input image only support jpg') + exit() + elif image.width < MIN_IMAGE_SIZE or image.width > MAX_IMAGE_SIZE: + print('Input image width must in range [32, 8192], curr is {}'.format(image.width)) + exit() + elif image.height < MIN_IMAGE_SIZE or image.height > MAX_IMAGE_SIZE: + print('Input image height must in range [32, 8192], curr is {}'.format(image.height)) + exit() + else: + # read input image bytes + image_bytes = io.BytesIO() + image.save(image_bytes, format='JPEG') + except IOError: + print('An IOError occurred while opening {}, maybe your input is not a picture'.format(filepath)) + exit() + + resizeinfo = base.ResizedImageInfo() + + resizeinfo.heightResize = 416 + resizeinfo.widthResize = 416 + resizeinfo.heightOriginal = 1080 + resizeinfo.widthOriginal = 1920 + + decodedimg = imageProcessor1.decode(filepath, base.nv12) + size_cof = Size(416, 416) + resizedimg = imageProcessor1.resize(decodedimg, size_cof, base.huaweiu_high_order_filter) + imgtensor1 = [resizedimg.to_tensor()] + yolov3_outputs = YOLOV3.infer(imgtensor1) + + # 构造后处理的输入 + inputs = [] + #len1 = len(yolov3_outputs) + for x in range(len(yolov3_outputs)): + yolov3_outputs[x].to_host() + n = np.array(yolov3_outputs[x]) + tensor = BTensor(n) + inputs.append(tensor) + yolov3_post_results = yolov3_post.process(inputs, [resizeinfo]) + cropresizevec = [] + objectlist = [] + #len2 = len(yolov3_post_results) + for i in range(len(yolov3_post_results)): + for j in range(len(yolov3_post_results[i])): + x0 = int(yolov3_post_results[i][j].x0) + y0 = int(yolov3_post_results[i][j].y0) + x1 = int(yolov3_post_results[i][j].x1) + y1 = int(yolov3_post_results[i][j].y1) + classname = yolov3_post_results[i][j].className + objectlist.append([x0, y0, x1, y1, classname]) + cropresizevec.append((Rect(x0, y0, x1, y1), Size(128, 256))) + yolov3_crop = imageProcessor1.crop_resize(decodedimg, cropresizevec) + imgtensor2 = [x.to_tensor() for x in yolov3_crop] + featurelist = [] + #len3 = len(imgtensor2) + for x in range(len(imgtensor2)): + reid_output = REID.infer([imgtensor2[x]]) + reid_output[0].to_host() + featurelist.append(np.array(reid_output[0])) + + return objectlist, featurelist + + +def compute_feature_distance(objectlist, featurelist, queryfeatures): + """ + Record the location and features of the person in gallery image + Compute the feature distance between persons in gallery image and query image + + :arg: + objectlist: the results from mxpi_objectpostprocessor0 + featurelist: the results from mxpi_tensorinfer1 + queryfeatures: the vectors of queryfeatures + + :return: + detectedpersoninformation: location information of the detected person in gallery image + detectedpersonfeature: feature of the detected person in gallery image + galleryFeatureLength: the length of gallery feature set + queryFeatureLength: the length of query feature set + minDistanceIndexMatrix: the index of minimal distance in distance matrix + minDistanceMatrix: the index of minimal distance value in distance matrix + """ + # store the information and features for detected person + detectedpersoninformation = [] + detectedpersonfeature = [] + + filterimagecount = 0 + + persondetectedflag = False + + # select the detected person, and store its location and features + #len1 = len(objectlist) + for detecteditemindex in range(0, len(objectlist)): + detecteditem = objectlist[detecteditemindex] + xlength = int(detecteditem[2]) - int(detecteditem[0]) + ylength = int(detecteditem[3]) - int(detecteditem[1]) + if xlength < MIN_IMAGE_SIZE or ylength < MIN_IMAGE_WIDTH: + filterimagecount += 1 + continue + if detecteditem[4] == "person": + persondetectedflag = True + detectedpersoninformation.append({'x0': int(detecteditem[0]), 'x1': int(detecteditem[2]), + 'y0': int(detecteditem[1]), 'y1': int(detecteditem[3])}) + detectedfeature = featurelist[detecteditemindex] + cv2.normalize(src=detectedfeature, dst=detectedfeature, norm_type=cv2.NORM_L2) + detectedpersonfeature.append(detectedfeature) + + if not persondetectedflag: + return None + + # get the number of the query images + queryfeaturelength = len(queryfeatures) + queryfeaturevector1 = np.array(queryFeatureVector).reshape(queryfeaturelength, FEATURE_RESHAPE_COLUMN) + + # get the number of the detected persons in this gallery image + galleryfeaturelength = len(detectedpersonfeature) + detectedpersonfeature = np.array(detectedpersonfeature).reshape(galleryfeaturelength, FEATURE_RESHAPE_COLUMN) + + # # compute the distance between query feature and gallery feature + distancematrix = np.tile(np.sum(np.power(queryfeaturevector1, 2), axis=1, keepdims=True), + reps=galleryfeaturelength) + \ + np.tile(np.sum(np.power(detectedpersonfeature, 2), axis=1, keepdims=True), + reps=queryfeaturelength).T + distancematrix = ADMM_BETA * distancematrix + \ + ADMM_ALPHA * np.dot(queryfeaturevector1, detectedpersonfeature.T) + + # find minimal distance for each query image + mindistanceindexmatrix = distancematrix.argmin(axis=1) + mindistancematrix = distancematrix.min(axis=1) + + return {'detectedpersoninformation': detectedpersoninformation, + 'galleryfeaturelength': galleryfeaturelength, 'queryfeaturelength': queryfeaturelength, + 'mindistanceindexmatrix': mindistanceindexmatrix, 'mindistancematrix': mindistancematrix} + + +def label_for_gallery_image(galleryfeaturelength, queryfeaturelength, querypid, mindistanceindexmatrix, + mindistancematrix, matchthreshold): + """ + Label each detected person in gallery image, find the most possible Pid + + :arg: + galleryfeaturelength: the length of gallery feature set + queryfeaturelength: the length of query feature set + querypid: the vectors of querypid + mindistanceindexmatrix: the index of minimal distance in distance matrix + mindistancematrix: the index of minimal distance value in distance matrix + matchthreshold: match threshold + + :return: + gallerylabelset: labels for current gallery image + """ + # one person only exists once in each gallery image, thus the Pid in this gallerylabelset must be unique + gallerylabelset = np.full(shape=galleryfeaturelength, fill_value='None') + gallerylabeldistance = np.full(shape=galleryfeaturelength, fill_value=INITIAL_MIN_DISTANCE, dtype=float) + + for queryindex in range(0, queryfeaturelength): + currentpid = querypid[queryindex] + prefergalleryindex = mindistanceindexmatrix[queryindex] + preferdistance = mindistancematrix[queryindex] + if preferdistance < matchthreshold: + pidexistset = np.where(gallerylabelset == currentpid) + pidexistindex = pidexistset[0] + if len(pidexistindex) == 0: + if gallerylabelset[prefergalleryindex] == 'None': + gallerylabelset[prefergalleryindex] = currentpid + gallerylabeldistance[prefergalleryindex] = preferdistance + else: + if preferdistance < gallerylabeldistance[prefergalleryindex]: + gallerylabelset[prefergalleryindex] = currentpid + gallerylabeldistance[prefergalleryindex] = preferdistance + else: + if preferdistance < gallerylabeldistance[pidexistindex]: + gallerylabelset[pidexistindex] = 'None' + gallerylabeldistance[pidexistindex] = INITIAL_MIN_DISTANCE + gallerylabelset[prefergalleryindex] = currentpid + gallerylabeldistance[prefergalleryindex] = preferdistance + return gallerylabelset + + +def draw_results(filepath, galleryfeaturelength, detectedpersoninformation, gallerylabelset, file): + """ + Draw and label the detection and re-identification results + + :arg: + filepath: directory of current gallery image + galleryfeaturelength: the length of gallery feature set + detectedpersoninformation: location information of the detected person in gallery image + gallerylabelset: labels for current gallery image + file: name of current gallery image + + :return: + None + """ + # read the original image and label the detection results + image = cv2.imread(filepath) + + for galleryindex in range(0, galleryfeaturelength): + # get the locations of the detected person in gallery image + locations = detectedpersoninformation[galleryindex] + # if some pid meets the constraints, change the legendText and color + if gallerylabelset[galleryindex] == 'None': + color = NONE_FIND_COLOR + else: + color = FIND_COLOR + # label the detected person in the original image + cv2.rectangle(image, (locations.get('x0'), locations.get('y0')), + (locations.get('x1'), locations.get('y1')), color, LINE_THICKNESS) + cv2.putText(image, gallerylabelset[galleryindex], (locations.get('x0'), locations.get('y0')), + cv2.FONT_HERSHEY_SIMPLEX, FONT_SCALE, color, LINE_THICKNESS) + cv2.imwrite("./result/result_{}".format(str(file)), image) + print("Detect ", file, " successfully.") + + +class DrawThread(threading.Thread): + def __init__(self, queue_in, queue_out): + threading.Thread.__init__(self) + self.flag = True + self.queue_in = queue_in + self.queue_out = queue_out + + def run(self): + while self.flag: + if self.queue_in.empty() is False: + try: + inputs = self.queue_in.get(timeout=1) + except ValueError as e: + print(e) + continue + draw_results(*inputs) + self.queue_out.put(1) + + +def process_new(gallerypath, queryfeatures, querypid, matchthreshold): + """ + Detect and re-identify person in gallery image + + :arg: + gallerypath: the directory of gallery images + queryfeatures: the vectors of queryfeatures + querypid: the vectors of querypid + matchthreshold: match threshold + + :return: + None + """ + + if os.path.exists(gallerypath) != 1: + errormessage = 'The gallery file does not exist.' + print(errormessage) + exit() + if len(os.listdir(gallerypath)) == 0: + errormessage = 'The gallery file is empty.' + print(errormessage) + exit() + outputpath = 'result' + if os.path.exists(outputpath) != 1: + errormessage = 'The result file does not exist.' + print(errormessage) + exit() + + # 进程间的通信队列 + queue_result = multiprocessing.Manager().Queue() + queue_count = multiprocessing.Manager().Queue() + + thread2 = DrawThread(queue_result, queue_count) + thread2.start() + + num_files = 0 + count = 0 + inputs = [] + for root, dirs, files in os.walk(gallerypath): + for file in files: + if file.endswith('.jpg'): + filepath = os.path.join(root, file) + num_files += 1 + objectlist, featurelist = get_pipeline_results(filepath) + metricdirectory = compute_feature_distance(objectlist, featurelist, queryfeatures) + + if not metricdirectory: + print("Cannot detect person for image:", file) + continue + + detectedpersoninformation = metricdirectory.get('detectedpersoninformation') + galleryfeaturelength = metricdirectory.get('galleryfeaturelength') + queryfeaturelength = metricdirectory.get('queryfeaturelength') + mindistanceindexmatrix = metricdirectory.get('mindistanceindexmatrix') + mindistancematrix = metricdirectory.get('mindistancematrix') + + gallerylabelset = label_for_gallery_image(galleryfeaturelength, queryfeaturelength, querypid, + mindistanceindexmatrix, mindistancematrix, matchthreshold) + + queue_result.put([filepath, galleryfeaturelength, detectedpersoninformation, gallerylabelset, file]) + + while count != num_files: + if queue_count.empty() is False: + try: + inputs = queue_count.get(timeout=1) + except ValueError as e: + print(e) + continue + count += 1 + + if count == num_files: + thread2.flag = False + thread2.join() + + + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--queryFilePath', type=str, default='data_bak/querySet/', help="Query File Path") + parser.add_argument('--galleryFilePath', type=str, default='data_bak/gallerySet/', help="Gallery File Path") + parser.add_argument('--matchThreshold', type=float, default=DEFAULT_MATCH_THRESHOLD, + help="Match Threshold for ReID Processing") + opt = parser.parse_args() + queryFeatureVector, queryPidVector = extract_query_feature(opt.queryFilePath) + + process_new(opt.galleryFilePath, queryFeatureVector, queryPidVector, opt.matchThreshold) + + + + + + + + + + diff --git a/contrib/ReIDv2/models/.keep b/contrib/ReIDv2/models/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/contrib/ReIDv2/result/.keep b/contrib/ReIDv2/result/.keep new file mode 100644 index 000000000..e69de29bb -- Gitee