diff --git a/contrib/Burpee_Detection/App_burpee_detection/app_main.py b/contrib/Burpee_Detection/App_burpee_detection/app_main.py new file mode 100644 index 0000000000000000000000000000000000000000..d7df0f6ebd9f4f84409b1cf01442aa771cb5d783 --- /dev/null +++ b/contrib/Burpee_Detection/App_burpee_detection/app_main.py @@ -0,0 +1,202 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil +import json +import os +import sys +import logging +import cv2 + +import MxpiDataType_pb2 as MxpiDataType +from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector + +from qcloud_cos import CosConfig +from qcloud_cos import CosS3Client + + +class OStream: + def __init__(self, file): + self.file = file + + def __lshift__(self, obj): + self.file.write(str(obj)) + return self + + +cout = OStream(sys.stdout) +END_L = '\n' + +# The following belongs to the SDK Process +streamManagerApi = StreamManagerApi() +# Init stream manager +ret = streamManagerApi.InitManager() +if ret != 0: + cout << 'Failed to init Stream manager, ret=' << str(ret) << END_L + exit() +# Create streams by pipeline config file +# load pipline +with open("../pipeline/burpee_detection_p.pipeline", 'rb') as f: + pipelineStr = f.read() +ret = streamManagerApi.CreateMultipleStreams(pipelineStr) +# Print error message +if ret != 0: + cout << 'Failed to create Stream, ret=' << str(ret) << END_L + exit() + +# 正常情况日志级别使用INFO,需要定位时可以修改为DEBUG,此时SDK会打印和服务端的通信信息 +logging.basicConfig(level=logging.INFO, stream=sys.stdout) + +# 设置用户属性, 包括 secret_id, secret_key, region等。App_id 已在CosConfig中移除,请在参数 Bucket 中带上 App_id。Bucket 由 BucketName-App_id 组成 +SECRET_ID = 'AKIDq23sVu40iANL5bz93iAPRIxPdleIgjYA' # 替换为用户的 SecretId,登录https://console.cloud.tencent.com/cam/capi查看 +SECRET_KEY = 'QbXIoPlvtd9RUJuHROIxMYVDfsrcrsi2' # 替换为用户的 SecretKey,登录https://console.cloud.tencent.com/cam/capi查看 +REGION = 'ap-shanghai' # 替换为用户的 region,已创建桶的region可在https://console.cloud.tencent.com/cos5/bucket查看 +# COS支持的所有region列表参见https://cloud.tencent.com/document/product/436/6224 +TOKEN = None # 如果使用永久密钥不需填入token,若使用临时密钥需填入,临时密钥生成和使用见https://cloud.tencent.com/document/product/436/14048 +SCHEME = 'https' # 指定使用 http/https 协议来访问 COS,默认为 https,可不填 + +CONFIG = CosConfig(Region=REGION, SecretId=SECRET_ID, + SecretKey=SECRET_KEY, Token=TOKEN, Scheme=SCHEME) +CLIENT = CosS3Client(CONFIG) + +IMG_NUM = 0 +ACTION = "" +ACTION_CNT = 0 +STATE = 0 +INPUT_COUNT = 0 +ERR_FILE = False +FPS = 1 +INPUT_PATH = "./input/" +RESULT_PATH = 'result.txt' + +# Release the input +if os.path.exists(INPUT_PATH): + shutil.rmtree(INPUT_PATH) + +while True: + + # Check the state of app + RESPONSE = CLIENT.list_objects(Bucket='burpee-1312708737', + Prefix='state') + + if len(RESPONSE['Contents']) == 2: + IMG_NUM = 0 + ACTION_CNT = 0 + STATE = 0 + INPUT_COUNT = 0 + if os.path.exists(INPUT_PATH): + shutil.rmtree(INPUT_PATH) + continue + + # Check the number of input images + RESPONSE = CLIENT.list_objects(Bucket='burpee-1312708737', + Prefix='input') + + if len(RESPONSE['Contents']) < IMG_NUM + 2: + cout << 'wait for inputs' << END_L + continue + # Check the target input image + RESPONSE = CLIENT.object_exists(Bucket='burpee-1312708737', + Key='input/img' + str(IMG_NUM) + '.jpg') + + if not RESPONSE: + cout << 'no such file' << END_L + continue + + # Download the data of input + if os.path.exists(INPUT_PATH) != 1: + os.makedirs("./input/") + + RESPONSE = CLIENT.get_object(Bucket='burpee-1312708737', + Key='input/img' + str(IMG_NUM) + '.jpg') + RESPONSE['Body'].get_stream_to_file('/input/img' + str(IMG_NUM) + '.jpg') + cout << 'Get the input successfully' << END_L + + # Input object of streams -- detection target + IMG_PATH = os.path.join(INPUT_PATH, 'img' + str(IMG_NUM) + '.jpg') + + DATA_INPUT = MxDataInput() + if os.path.exists(IMG_PATH) != 1: + cout << 'The image does not exist.' << END_L + + with open(IMG_PATH, 'rb') as f: + DATA_INPUT.data = f.read() + + STREAM_NAME = b'detection' + IN_PLUGIN_ID = 0 + # Send data to streams by SendDataWithUniqueId() + UNIQUEID = streamManagerApi.SendDataWithUniqueId(STREAM_NAME, IN_PLUGIN_ID, DATA_INPUT) + + if UNIQUEID < 0: + cout << 'Failed to send data to stream.' << END_L + exit() + + # Get results from streams by GetResultWithUniqueId() + INFER_RESULT = streamManagerApi.GetResultWithUniqueId(STREAM_NAME, UNIQUEID, 3000) + if INFER_RESULT.errorCode != 0: + cout << 'GetResultWithUniqueId error. errorCode=' << INFER_RESULT.errorCode \ + << ', errorMsg=' << INFER_RESULT.data.decode() << END_L + exit() + + # Get Object class + RESULTS = json.loads(INFER_RESULT.data.decode()) + IMG = cv2.imread(IMG_PATH) + IMG_NUM = IMG_NUM + 1 + + BEST_CONFIDENCE = 0 + KEY = "MxpiObject" + + if KEY not in RESULTS.keys(): + continue + + # Save the best confidence and its information + for BBOX in RESULTS['MxpiObject']: + if round(BBOX['classVec'][0]['confidence'], 4) >= BEST_CONFIDENCE: + ACTION = BBOX['classVec'][0]['className'] + BEST_CONFIDENCE = round(BBOX['classVec'][0]['confidence'], 4) + + # State change + if STATE == 0: + if ACTION == "crouch": + STATE = 1 + elif STATE == 1: + if ACTION == "support": + STATE = 2 + elif STATE == 2: + if ACTION == "crouch": + STATE = 3 + elif STATE == 3: + if ACTION == "jump": + STATE = 0 + ACTION_CNT = ACTION_CNT + 1 + + # Save txt for results + FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL + if os.path.exists(RESULT_PATH): + os.remove(RESULT_PATH) + with os.fdopen(os.open('result.txt', FLAGS, 0o755), 'w') as f: + f.write(str(ACTION_CNT)) + # Upload the result file + with open('result.txt', 'rb') as fp: + RESPONSE = CLIENT.put_object( + Bucket='burpee-1312708737', + Body=fp, + Key='result/result.txt', + StorageClass='STANDARD', + EnableMD5=False + ) + cout << 'upload the result file successfully!!!' << END_L + +# Destroy All Streams +streamManagerApi.DestroyAllStreams() diff --git a/contrib/Burpee_Detection/App_burpee_detection/run.sh b/contrib/Burpee_Detection/App_burpee_detection/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..fb4e528953f5e6b247a2ae8db986f832b35a00d8 --- /dev/null +++ b/contrib/Burpee_Detection/App_burpee_detection/run.sh @@ -0,0 +1,19 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# load envs +source ../envs/env.sh + +# running inference process +python3.9.2 app_main.py diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py b/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py new file mode 100644 index 0000000000000000000000000000000000000000..d28a6fbe1924e37128d264bd18aacde8fd3277a7 --- /dev/null +++ b/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py @@ -0,0 +1,407 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import glob +import os +import sys +import argparse +import collections + +""" + 0,0 ------> x (width) + | + | (Left,Top) + | *_________ + | | | + | | + y |_________| + (height) * + (Right,Bottom) +""" + +MIN_OVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge) +TOP_MARGIN = 0.15 # in percentage of the figure height +BOTTOM_MARGIN = 0.05 # in percentage of the figure height + + +def file_lines_to_list(path): + """ + Convert the lines of a file to a list + """ + # open txt file lines to a list + with open(path) as f: + content = f.readlines() + # remove whitespace characters like `\n` at the end of each line + content = [x.strip() for x in content] + return content + + +def voc_ap(recall, precision): + """ + Calculate the AP given the recall and precision array + 1) We calculate a version of the measured + precision/recall curve with precision monotonically decreasing + 2) We calculate the AP as the area + under this curve by numerical integration. + """ + """ + --- Official matlab code VOC2012--- + m_recall=[0 ; recall ; 1]; + m_precision=[0 ; precision ; 0]; + for j=numeral(m_precision)-1:-1:1 + m_precision(i)=max(m_precision(j),m_precision(j+1)); + end + i=find(m_recall(2:end)~=m_recall(1:end-1))+1; + ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i)); + """ + recall.insert(0, 0.0) # insert 0.0 at beginning of list + recall.append(1.0) # insert 1.0 at end of list + m_recall = recall[:] + precision.insert(0, 0.0) # insert 0.0 at beginning of list + precision.append(0.0) # insert 0.0 at end of list + m_precision = precision[:] + """ + This part makes the precision monotonically decreasing + (goes from the end to the beginning) + matlab: for i=numeral(m_precision)-1:-1:1 + m_precision(i)=max(m_precision(i),m_precision(i+1)); + """ + + for i in range(len(m_precision) - 2, -1, -1): + m_precision[i] = max(m_precision[i], m_precision[i + 1]) + """ + This part creates a list of indexes where the recall changes + matlab: i=find(m_recall(2:end)~=m_recall(1:end-1))+1; + """ + i_list = [] + for i in range(1, len(m_recall)): + if m_recall[i] != m_recall[i - 1]: + i_list.append(i) # if it was matlab would be i + 1 + """ + The Average Precision (AP) is the area under the curve + (numerical integration) + matlab: ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i)); + """ + ap = 0.0 + for i in i_list: + ap += ((m_recall[i] - m_recall[i - 1]) * m_precision[i]) + return ap, m_recall, m_precision + + +def is_float_between_0_and_1(value): + """ + check if the number is a float between 0.0 and 1.0 + """ + try: + val = float(value) + if 0.0 < val < 1.0: + return True + else: + return False + except ValueError: + return False + + +def error(msg): + """ + throw error and exit + """ + print(msg) + sys.exit(0) + + +def check_args(args): + """ + check arguments + """ + if not (os.path.exists(args.label_path)): + error("annotation file:{} does not exist.".format(args.label_path)) + + if not (os.path.exists(args.npu_txt_path)): + error("txt path:{} does not exist.".format(args.npu_txt_path)) + + if args.ignore is None: + args.ignore = [] + return args + + +def parse_line(txt_file, lines_list, bounding_boxes, counter_per_class, already_seen_classes): + """ parse line + :param txt_file: + :param lines_list: + :param bounding_boxes: + :param counter_per_class: + :param already_seen_classes: + :return: bounding_boxes, counter_per_class + """ + for line in lines_list: + try: + class_name, left, top, right, bottom = line.split() + except ValueError: + error_msg = "Error: File " + txt_file + " in the wrong format.\n" + error_msg += " Expected: \n" + error_msg += " Received: " + line + error(error_msg) + if class_name in arg.ignore: + continue + bbox = left + " " + top + " " + right + " " + bottom + if class_name == '0': + class_name = 'crouch' + elif class_name == '1': + class_name = 'support' + elif class_name == '2': + class_name = 'jump' + bounding_boxes.append({"class_name": class_name, "bbox": bbox, "used": False}) + counter_per_class[class_name] += 1 + + if class_name not in already_seen_classes: + already_seen_classes.append(class_name) + return bounding_boxes, counter_per_class + + +def get_label_list(file_path): + """ get label list via file paths + :param file_path: label file path + :return: ret + map , include file_bbox, classes, n_classes, counter_per_class + """ + files_list = glob.glob(file_path + '/*.txt') + if len(files_list) == 0: + error("Error: No ground-truth files found!") + files_list.sort() + # dictionary with counter per class + counter_per_class = collections.defaultdict(int) + file_bbox = {} + + for txt_file in files_list: + file_id = txt_file.split(".txt", 1)[0] + file_id = os.path.basename(os.path.normpath(file_id)) + # check if there is a correspondent detection-results file + temp_path = os.path.join(file_path, (file_id + ".txt")) + if not os.path.exists(temp_path): + error_msg = "Error. File not found: {}\n".format(temp_path) + error(error_msg) + lines_list = file_lines_to_list(txt_file) + # create ground-truth dictionary + bounding_boxes = [] + already_seen_classes = [] + boxes, counter_per_class = parse_line(txt_file, lines_list, bounding_boxes, counter_per_class, + already_seen_classes) + file_bbox[file_id] = boxes + + classes = list(counter_per_class.keys()) + + # let's sort the classes alphabetically + classes = sorted(classes) + n_classes = len(classes) + ret = dict() + ret['file_bbox'] = file_bbox + ret['classes'] = classes + ret['n_classes'] = n_classes + ret['counter_per_class'] = counter_per_class + return ret + + +def get_predict_list(file_path, gt_classes): + """ get predict list with file paths and class names + :param file_path: predict txt file path + :param gt_classes: class information + :return: class_bbox bbox of every class + """ + dr_files_list = glob.glob(file_path + '/*.txt') + dr_files_list.sort() + class_bbox = {} + for class_index, class_name in enumerate(gt_classes): + bounding_boxes = [] + for txt_file in dr_files_list: + # the first time it checks + # if all the corresponding ground-truth files exist + file_id = os.path.splitext(txt_file)[0] + file_id = os.path.basename(os.path.normpath(file_id)) + lines = file_lines_to_list(txt_file) + for line in lines: + try: + sl = line.split() + tmp_class_name, confidence, left, top, right, bottom = sl + if float(confidence) < float(arg.threshold): + continue + except ValueError: + error_msg = "Error: File " + txt_file + " wrong format.\n" + error_msg += " Expected: \n" + error_msg += " Received: " + line + error(error_msg) + if tmp_class_name == class_name: + bbox = left + " " + top + " " + right + " " + bottom + bounding_boxes.append({"confidence": confidence, "file_id": file_id, "bbox": bbox}) + # sort detection-results by decreasing confidence + bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True) + class_bbox[class_name] = bounding_boxes + return class_bbox + + +def calculate_pr(sum_ap, fp, tp, counter_per_class, class_name): + """ + @description: calculate PR + @param sum_ap + @param fp + @param tp + @param counter_per_class + @param class_name + @return ret + map, include sum_AP, text, prec, rec + """ + cumsum = 0 + for idx, val in enumerate(fp): + fp[idx] += cumsum + cumsum += val + cumsum = 0 + for idx, val in enumerate(tp): + tp[idx] += cumsum + cumsum += val + rec = tp[:] + for idx, val in enumerate(tp): + rec[idx] = float(tp[idx]) / counter_per_class[class_name] + prec = tp[:] + for idx, val in enumerate(tp): + prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx]) + + ap, mrec, mprec = voc_ap(rec[:], prec[:]) + sum_ap += ap + text = "{0:.2f}%".format(ap * 100) + " = " + class_name + " AP " + ret = dict() + ret['sum_ap'] = sum_ap + ret['text'] = text + ret['p_rec'] = prec + ret['rec'] = rec + return ret + + +def calculate_ap(output_file, gt_classes, labels, class_bbox, counter_per_class): + """ + Calculate the AP for each class + :param output_file: + :param gt_classes: [80] + :param labels: {file_index:[{"class_name": class_name, "bbox": bbox, "used": False}]} + :param class_bbox: {class_name:[{"confidence": confidence, + "file_id": file_id, "bbox": bbox}]} + :param counter_per_class + :return: + """ + sum_ap = 0.0 + if os.path.exists(output_file): + os.remove(output_file) + flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL + writer = os.fdopen(os.open(output_file, flags, 0o755), 'w') + writer.write("# AP and precision/recall per class\n") + count_true_positives = {} + n_classes = len(gt_classes) + for class_index, class_name in enumerate(gt_classes): + count_true_positives[class_name] = 0 + """ + Load detection-results of that class + Assign detection-results to ground-truth objects + """ + dr_data = class_bbox[class_name] + nd = len(dr_data) + tp = [0] * nd # creates an array of zeros of size nd + fp = [0] * nd + for idx, detection in enumerate(dr_data): + file_id = detection["file_id"] + ground_truth_data = labels[file_id] + + ov_max = -1 + gt_match = -1 + # load detected object bounding-box + bb = [float(x) for x in detection["bbox"].split()] + for obj in ground_truth_data: + # look for a class_name match + if obj["class_name"] == class_name: + bbgt = [float(x) for x in obj["bbox"].split()] + bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]), + min(bb[2], bbgt[2]), min(bb[3], bbgt[3])] + iw = bi[2] - bi[0] + 1 + ih = bi[3] - bi[1] + 1 + if iw > 0 and ih > 0: + # compute overlap (IoU) + ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \ + (bbgt[2] - bbgt[0] + 1) * \ + (bbgt[3] - bbgt[1] + 1) - iw * ih + ov = iw * ih / ua + if ov > ov_max: + ov_max = ov + gt_match = obj + + # set minimum overlap + min_overlap = MIN_OVERLAP + if ov_max >= min_overlap: + if "difficult" not in gt_match: + if not bool(gt_match["used"]): + # true positive + tp[idx] = 1 + gt_match["used"] = True + count_true_positives[class_name] += 1 + else: + # false positive (multiple detection) + fp[idx] = 1 + else: + # false positive + fp[idx] = 1 + # compute precision / recall + ret = calculate_pr(sum_ap, fp, tp, counter_per_class, class_name) + sum_ap = ret.get('sum_ap', "abc") + text = ret.get('text', "abc") + p_rec = ret.get('p_rec', "abc") + rec = ret.get('rec', "abc") + value = sum_ap and text and p_rec and rec + if value: + pass + else: + return + print(text) + rounded_p_rec = ['%.2f' % elem for elem in p_rec] + rounded_rec = ['%.2f' % elem for elem in rec] + writer.write(text + "\n Precision: " + str(rounded_p_rec) + + "\n Recall :" + str(rounded_rec) + "\n\n") + writer.write("\n# m_ap of all classes\n") + m_ap = sum_ap / n_classes + text = "m_ap = {0:.2f}%".format(m_ap * 100) + writer.write(text + "\n") + print(text) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('mAP calculate') + parser.add_argument('-i', '--ignore', nargs='+', type=str, + help="ignore a list of classes.") + parser.add_argument('--label_path', default="../data/labels/test", help='the path of the label files') + parser.add_argument('--npu_txt_path', default="./result_test", help='the path of the predict result') + parser.add_argument('--output_file', default="./result.txt", help='save result file') + parser.add_argument('--threshold', default=0, help='threshold of the object score') + + arg = parser.parse_args() + arg = check_args(arg) + + label_list = get_label_list(arg.label_path) + gt_file_bbox = label_list.get('file_bbox', "abc") + get_classes = label_list.get('classes', "abc") + gt_n_classes = label_list.get('n_classes', "abc") + count_per_class = label_list.get('counter_per_class', "abc") + + key_value = gt_file_bbox and get_classes and gt_n_classes and count_per_class + if key_value: + predict_bbox = get_predict_list(arg.npu_txt_path, get_classes) + calculate_ap(arg.output_file, get_classes, gt_file_bbox, predict_bbox, count_per_class) + else: + print("no label exists") + diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/pic_main.py b/contrib/Burpee_Detection/Pic_burpee_detection/pic_main.py new file mode 100644 index 0000000000000000000000000000000000000000..5e40c17a5f5169b16961355ffb55ffe57847b0f0 --- /dev/null +++ b/contrib/Burpee_Detection/Pic_burpee_detection/pic_main.py @@ -0,0 +1,197 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil +import json +import os +import sys +import time +import cv2 + +import MxpiDataType_pb2 as MxpiDataType +from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector + + +class OStream: + def __init__(self, file): + self.file = file + + def __lshift__(self, obj): + self.file.write(str(obj)) + return self + + +cout = OStream(sys.stdout) +END_L = '\n' + +# Init the directory of input and output +INPUT_PATH = [".././data/images/test/"] # the path of input + +OUTPUT_PATH = ["./result_test/"] # the output path of txt file + +OUTPUT_PIC_PATH = ["./result_test_pic/"] # the output path of pictures + +for path in INPUT_PATH: + if len(os.listdir(path)) == 0: + cout << 'The folder is empty:' << path << END_L + exit() + +# The following belongs to the SDK Process +streamManagerApi = StreamManagerApi() +# Init stream manager +ret = streamManagerApi.InitManager() +if ret != 0: + cout << 'Failed to init Stream manager, ret=' << str(ret) << END_L + exit() +# Mark start time +start = time.time() +# Create streams by pipeline config file +# Load pipline +with open(".././pipeline/burpee_detection_p.pipeline", 'rb') as f: + PIPELINE_STR = f.read() +ret = streamManagerApi.CreateMultipleStreams(PIPELINE_STR) +# Print error message +if ret != 0: + cout << 'Failed to create Stream, ret=' << str(ret) << END_L + exit() + +DET_IMG_COUNT = 0 # the number of detected pictures + + +for index, path in enumerate(INPUT_PATH): + + RESULT_PATH = OUTPUT_PATH[index] + + # Create the output directory + if os.path.exists(RESULT_PATH) != 1: + os.makedirs(RESULT_PATH) + else: + shutil.rmtree(RESULT_PATH) + os.makedirs(RESULT_PATH) + + if os.path.exists(OUTPUT_PIC_PATH[index]) != 1: + os.makedirs(OUTPUT_PIC_PATH[index]) + else: + shutil.rmtree(OUTPUT_PIC_PATH[index]) + os.makedirs(OUTPUT_PIC_PATH[index]) + + # Input object of streams -- detection target + for item in os.listdir(path): + IMG_PATH = os.path.join(path, item) + cout << 'read file path:' << IMG_PATH << END_L + IMG_NAME = os.path.splitext(item)[0] + IMG_TXT = RESULT_PATH + IMG_NAME + ".txt" + if os.path.exists(IMG_TXT): + os.remove(IMG_TXT) + DATA_INPUT = MxDataInput() + if os.path.exists(IMG_PATH) != 1: + cout << 'The image does not exist:' << IMG_PATH << END_L + continue + with open(IMG_PATH, 'rb') as f: + DATA_INPUT.data = f.read() + STREAM_NAME = b'detection' + IN_PLUGIN_ID = 0 + # Send data to streams by SendDataWithUniqueId() + UNIQUE_ID = streamManagerApi.SendDataWithUniqueId(STREAM_NAME, IN_PLUGIN_ID, DATA_INPUT) + + if UNIQUE_ID < 0: + cout << 'Failed to send data to stream.' << END_L + exit() + + # Get results from streams by GetResultWithUniqueId() + INFER_RESULT = streamManagerApi.GetResultWithUniqueId(STREAM_NAME, UNIQUE_ID, 3000) + if INFER_RESULT.errorCode != 0: + cout << 'GetResultWithUniqueId error. errorCode=' << INFER_RESULT.errorCode \ + << ', errorMsg=' << INFER_RESULT.data.decode() << END_L + exit() + + DET_IMG_COUNT = DET_IMG_COUNT + 1 + + # Get ObjectList + RESULTS = json.loads(INFER_RESULT.data.decode()) + + IMG = cv2.imread(IMG_PATH) + BBOXES = [] + best_class = {} + TEXT = "" + BEST_CONFIDENCE = 0 + KEY = "MxpiObject" + if KEY not in RESULTS.keys(): + continue + for BBOX in RESULTS['MxpiObject']: + BBOXES = {'x0': int(BBOX['x0']), + 'x1': int(BBOX['x1']), + 'y0': int(BBOX['y0']), + 'y1': int(BBOX['y1']), + 'confidence': round(BBOX['classVec'][0]['confidence'], 4), + 'text': BBOX['classVec'][0]['className']} + key_value = BBOXES.get('confidence', "abc") + if key_value: + pass + else: + continue + if key_value > BEST_CONFIDENCE: + L1 = [] + # Convert the label as Yolo label + x_center = round((BBOXES['x1'] + BBOXES['x0']) * 0.5 / IMG.shape[1], 6) + y_center = round((BBOXES['y1'] + BBOXES['y0']) * 0.5 / IMG.shape[0], 6) + w_nor = round((BBOXES['x1'] - BBOXES['x0']) / IMG.shape[1], 6) + h_nor = round((BBOXES['y1'] - BBOXES['y0']) / IMG.shape[0], 6) + L1.append(x_center) + L1.append(y_center) + L1.append(w_nor) + L1.append(h_nor) + L1.append(BBOXES['confidence']) + L1.append(BBOXES['text']) + BEST_CONFIDENCE = BBOXES['confidence'] + TEXT = "{}{}".format(str(BBOXES['confidence']), " ") + for CONTENT in BBOXES['text']: + TEXT += CONTENT + best_class = {'x0': int(BBOX['x0']), + 'x1': int(BBOX['x1']), + 'y0': int(BBOX['y0']), + 'y1': int(BBOX['y1']), + 'confidence': round(BBOX['classVec'][0]['confidence'], 4), + 'text': BBOX['classVec'][0]['className']} + # Draw rectangle and txt for visualization + key_value = (best_class.get('x0', "abc") and best_class.get('y0', "abc")) and \ + (best_class.get('x1', "abc") and best_class.get('y1', "abc")) + if key_value: + pass + else: + continue + cv2.putText(IMG, TEXT, (best_class['x0'] + 10, best_class['y0'] + 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0, + (0, 100, 255), 2) + cv2.rectangle(IMG, (best_class['x0'], best_class['y0']), (best_class['x1'], best_class['y1']), + (255, 0, 0), 2) + + # Save picture + originImgFile = OUTPUT_PIC_PATH[index] + IMG_NAME + '.jpg' + cv2.imwrite(originImgFile, IMG) + + # Save txt for results + FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL + with os.fdopen(os.open(IMG_TXT, FLAGS, 0o755), 'w') as f: + CONTENT = '{} {} {} {} {} {}'.format(L1[5], L1[4], L1[0], L1[1], L1[2], L1[3]) + f.write(CONTENT) + f.write('\n') + +end = time.time() +cost_time = end - start +# Mark spend time +cout << 'Image count:' << DET_IMG_COUNT << END_L +cout << 'Spend time:' << cost_time << END_L +cout << 'fps:' << (DET_IMG_COUNT / cost_time) << END_L +# Destroy All Streams +streamManagerApi.DestroyAllStreams() diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/run.sh b/contrib/Burpee_Detection/Pic_burpee_detection/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..3812774fa07855781c70994c013cc2f52138cd62 --- /dev/null +++ b/contrib/Burpee_Detection/Pic_burpee_detection/run.sh @@ -0,0 +1,19 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# load envs +source ../envs/env.sh + +# running inference process +python3.9.2 pic_main.py diff --git a/contrib/Burpee_Detection/README.md b/contrib/Burpee_Detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4ab2e10899292a025e6c5af82bda936582207b2b --- /dev/null +++ b/contrib/Burpee_Detection/README.md @@ -0,0 +1,256 @@ +# 波比跳运动小程序 + +## 1 介绍 + +波比跳运动小程序基于 MindX SDK 开发,在 Ascend 310 芯片上进行目标检测,将检测结果保存成视频。项目主要流程:1)视频流程:通过 live555 服务器进行拉流输入视频,然后进行视频解码将 H.264 格式的视频解码为图片,图片缩放后经过模型推理进行波比跳检测,识别结果经过后处理后利用 cv 可视化识别框,以视频的形式输出,同时生成文本文件记录视频中完成的波比跳个数。2)小程序流程:通过微信小程序开发者将摄像头截取的图片数据上传至腾讯云桶中,然后后端将桶中数据下载至本地并将数据输入至流水线内,接着进行图片解码和缩放,最后经过模型推理进行波比跳检测,识别结果经过后处理后上传至腾讯云桶中,为前端小程序使用。 + +### 1.1 支持的产品 + +昇腾 310(推理) + +### 1.2 支持的版本 + +本样例配套的 CANN 版本为 [5.1.RC1](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fsoftware%2Fcann%2Fcommercial),MindX SDK 版本为 [3.0.RC3](https://www.hiascend.com/software/Mindx-sdk)。 + +MindX SDK 安装前准备可参考《用户指南》,[安装教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/quickStart/1-1安装SDK开发套件.md) + +### 1.3 软件方案介绍 + +基于 MindX SDK 的波比跳运动小程序业务流程为:通过微信小程序开发者将摄像头截取的图片数据上传至腾讯云桶中,然后后端将桶中数据下载至本地并经`mxpi_appsrc`拉流插件输入,然后使用图片解码插件`mxpi_imagedecoder`将图片解码,再通过图像缩放插件`mxpi_imageresize`将图像缩放至满足检测模型要求的输入图像大小要求,缩放后的图像输入模型推理插件`mxpi_modelinfer`得到检测结果,根据检测结果改变波比跳识别的状态机状态,并更新波比跳识别个数,最后上传记录波比跳识别个数的txt文件到腾讯云桶中,以供小程序使用。 + +### 1.4 代码目录结构与说明 + +本 Sample 工程名称为 **Burpee_Detection**,工程目录如下图所示: + +``` +├── envs +│   └── env.sh //基础环境变量与atc转换需要的环境变量 +├── readme_img //ReadMe图片资源 +│   ├── app_1.jpg +│   ├── app_2.jpg +│   ├── app_3.jpg +│   ├── app_4.jpg +│   ├── app_5.jpg +│   ├── dataset.jpg +│   ├── video.jpg +│   ├── app_flow.jpg +│   ├── video_flow.jpg +│   ├── dark.jpg +│   └── dark_res.jpg +├── model +│   ├── atc.sh //atc运行脚本 +├── pipeline +│ ├── burpee_detection_p.pipeline //图片识别使用的pipeline文件 +│   └── burpee_detection_v.pipeline //视频流识别使用的pipeline文件 +├── App_burpee_detection +│ ├── app_main.py //识别,保存结果,并进行性能测试 +| └── run.sh //运行脚本 +├── Pic_burpee_detection +│ ├── map_calculate.py //mAP计算(精度计算) +│ ├── pic_main.py //识别,保存结果,并进行性能测试 +| └── run.sh //运行脚本 +├── Video_burpee_detection +│ ├── video_main.py //识别,保存结果,并进行性能测试 +| └── run.sh //运行脚本 +└── README.md +``` + +### 1.5 技术实现流程图 +视频识别: + +![video-flow](readme_img/video_flow.jpg) + +小程序应用后端流程: + +![app-flow](readme_img/app_flow.jpg) + +### 1.6 特性及使用场景举例 + + 该应用特色在于mindsdk与小程序的结合,其并不局限于波比跳的计数,该应用模式仍适用于其他实时图像识别类小程序功能的开发。 + 对于画面内单人,且光线正常的情况下小程序都能正常且精准的计数。在精度和性能上,该应用实现流程都超过了要求水平,但仍有一些场景,情况不适用: + +· 光线极暗的情况下,由于前端获取的数据的清晰度较低且mindsdk中图片解码插件只适用jpg,精度会大幅下降 +· mindsdk只存在rtsp拉流的插件,如若推流以视频形式在小程序与后端之间传输,需自行编写新的推流插件 +· 当画面人物为正对摄像头做波比跳时,会出现检测不到动作的情况,用户应侧对摄像头。正对很难判断动作是否标准。 + +## 2 环境依赖 + +| 软件名称 | 版本 | 说明 | 获取方式 | +|:-------------------:|:------------:|:---------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| +| MindX SDK | 3.0.RC3 | mxVision软件包 | [链接](https://www.hiascend.com/zh/software/mindx-sdk/mxVision/community) | +| ubuntu | 18.04.1 LTS | 操作系统 | Ubuntu官网获取 | +| Ascend-CANN-toolkit | 5.1.RC1 | Ascend-cann-toolkit开发套件包 | [链接](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fsoftware%2Fcann%2Fcommercial) | +| live555 | 1.09 | 实现视频转 rtsp 进行推流 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md) | +| ffmpeg | 2021-10-14 | 实现 mp4 格式视频转为 H.264 格式视频 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md#https://gitee.com/link?target=https%3A%2F%2Fffmpeg.org%2Fdownload.html) | +| 微信开发者工具 | 1.06.2207210 | 实现小程序的使用 | [链接](https://developers.weixin.qq.com/miniprogram/dev/devtools/download.html) | +| 小程序导入代码 | - | 微信小程序代码 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com:443/%E5%B0%8F%E7%A8%8B%E5%BA%8F%E4%BB%A3%E7%A0%81.zip?AccessKeyId=3M18UT7HRLKP58NPPFUO&Expires=1690270238&Signature=SHjFgSLUrGMPGbYNYyNgS3VmBMw%3D) | +| 腾讯桶内文件夹格式 | - | 压缩包解压后文件夹内文件形式即为桶内文件形式 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com/%E6%A1%B6%E5%86%85%E6%96%87%E4%BB%B6.zip) | +| 对象储存 python sdk | - | 小程序相关python sdk快速入门 | [链接](https://cloud.tencent.com/document/product/436/12269) | +| 模型文件 | - | pt 模型文件,onnx 模型文件,om 模型文件,names文件,模型配置文件 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com/models.zip) | +| | - | pt 模型文件,onnx 模型文件,om 模型文件,names文件,模型配置文件 | [链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/Burpee/models%E5%92%8C%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6.zip) | +在运行项目需要的环境变量如下,运行前不需要特别设置,环境依赖已经写入脚本中,脚本在`Burpee_Detection/envs`目录下: + + ```bash + export MX_SDK_path=""# mxVision 安装路径 + export Ascend_toolkit_path=""#CANN 安装路径 + + # MindXSDK 环境变量: + . /${MX_SDK_path}/set_env.sh + + # CANN 环境变量: + . /${Ascend_toolkit_path}/set_env.sh + ``` + +注:其中`${MX_SDK_path}`替换为用户的SDK安装路径;`Ascend_toolkit_path`替换为ascend-toolkit开发套件包所在路径。 + +## 3 模型转换以及依赖安装 + +本项目使用的模型是波比跳识别的模型。模型文件可以直接下载。(下载链接在第二小节) + +### 3.1 模型转换 + +使用模型转换工具 ATC 将 onnx 模型转换为 om 模型,模型转换工具相关介绍参考链接:[CANN 社区版]([前言_昇腾CANN社区版(5.0.4.alpha002)(推理)_ATC模型转换_华为云 (huaweicloud.com)](https://support.huaweicloud.com/atctool-cann504alpha2infer/atlasatc_16_0001.html)) 。 + +步骤如下: + +- **步骤1** 下载`onnx`模型,请移动至`Burpee_Detection/model`目录下;若下载`om`模型文件,请跳过模型转换步骤。(下载链接在第二小节) + +- **步骤2** 将`best.onnx`文件移动至`Burpee_Detection/model`目录下,然后运行model目录下的`atc.sh` + + ```bash + bash /model/atc.sh + ``` + + 执行该命令后会在当前文件夹下生成项目需要的模型文件 + + ``` + ATC start working now, please wait for a moment. + ATC run success, welcome to the next use. + ``` + + 表示命令执行成功。 + +### 3.2 准备 + +按照第3小节**软件依赖**安装 live555 和 ffmpeg,按照 [Live555离线视频转RTSP说明文档](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md)将 mp4 视频转换为 H.264 格式。并将生成的 H.264 格式的视频上传到`live/mediaServer`目录下,然后修改`pipeline`目录下的`burpee_detection_v.pipeline`文件中`mxpi_rtspsrc0`的内容。 + +```json +"mxpi_rtspsrc0": { + "props": { + "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxx.264", // 修改为自己所使用的的服务器和文件名 + }, + "factory": "mxpi_rtspsrc", + "next": "mxpi_videodecoder0" +} +``` + +## 4 运行与测试 + +### 4.1 运行 + +#### 4.1.1 视频 + +- **步骤1** 按照第 2 小节 **环境依赖** 中的步骤设置环境变量。 + +- **步骤2** 按照第 3 小节 **模型转换** 中的步骤获得 `om` 模型文件,放置在 `Burpee_Detection/models` 目录下。 + +- **步骤3** 修改`burpee_detection_v.pipeline`中`mxpi_modelinfer0`中`postProcessLibPath`的值`${MX_SDK_path}`为 MindX SDK 的安装路径 + +- **步骤4** 按照 3 小节 **准备** 中的步骤创建rtsp流以实现本地视频的rtsp拉流操作。 + +- **步骤5** 运行。在 `Burpee_Detection/Video_burpee_detection` 目录下执行命令: + +```bash +bash run.sh +``` + +运行可视化结果会以`video_result.mp4`视频形式保存在`Burpee_Detection/Video_burpee_detection`目录下 +波比跳识别个数会以`result.txt`文件形式保存在`Burpee_Detection/Video_burpee_detection`目录下 + +![dataset](readme_img/video.jpg) + +#### 4.1.2 小程序 + +- **步骤1** 按照第 4 小节 **视频** 中的**步骤1**到**步骤3**搭建小程序后端环境。 + +![app_6](readme_img/app_6.jpg) + +- **步骤2** 按照第 4 小节 **视频** 中的**步骤1**到**步骤3**搭建小程序后端环境。 + +- **步骤3** 运行。进入 `Burpee_Detection/` 目录,在 `Burpee_Detection/App_burpee_detection` 目录下执行命令: + +```bash +bash run.sh +``` +- **步骤4** 下载`微信开发者工具`并登录,在微信公众平台注册小程序并获取AppID + +![app_1](readme_img/app_1.jpg) + +- **步骤5** 点击导入,选择小程序代码文件夹并打开(代码可下载,下载链接在第二小节),点击编译模式选中`pages`目录下的子目录`bind`并选择`bind`,点击`详情-本地设置`,选中不效验合法域名后(可在小程序公众平台开发管理-开发设置中,配置合法域名),点击`真机调试`,然后用手机扫描二维码 + +![app_2](readme_img/app_2.jpg) +![app_3](readme_img/app_3.jpg) +![app_4](readme_img/app_4.jpg) + +- **步骤6** 进入微信小程序页面,点击`开始计数`,小程序将摄像头以40ms(fps=25)的速率拍摄照片,并上传至腾讯云桶内,后台接收图片并处理 + +![app_5](readme_img/app_5.jpg) + +- **步骤7** 人物在摄像头前进行波比跳,后台实时更新波比跳个数并将结果发送至桶内,小程序端以0.1s的速率刷新页面展示的个数 + +- **步骤8** 点击`结束`,小程序停止发送图像并清理上传至桶内的图片释放内存,后端等待小程序下次开始计数 + + +### 4.2 性能与精度测试 + +- **步骤1** 准备测试数据集,并将`data`目录放在`Burpee_Detection`目录下 + +![dataset](readme_img/dataset.jpg) + +- **步骤2** 打开`Burpee_Detection/Pic_burpee_detection`目录下`pic_burpee_detection.py`文件,将变量 `INPUT_PATH` ,`OUTPUT_PATH` ,`OUTPUT_PIC_PATH`分别初始化为 `["../data/images/test/"]`,`[".././Pic_burpee_detection/result_test/"]`,`[".././Pic_burpee_detection/result_test_pic/"]` + + +- **步骤3** 在`Burpee_Detection/Pic_burpee_detection`目录下运行`run.sh`脚本,对`data/images/test`目录下的图片进行识别并输出结果 + + ```bash + bash run.sh + ``` + + 运行脚本后会生成经过 SDK 后的推理结果结果保留在`result_test`目录下以`.txt`格式保存。 + 结果可视化效果保留在`result_test_pic`目录下以`.jpg`格式保存 + + 运行结果中会有`Spend time:`是识别所有图片所用的时间,`fps:`计算得到的帧数 +- + ```bash + Image count:237 + Spend time:5.785876989364624 + fps:40.961811050536376 + ``` + +- **步骤4** 在`Burpee_Detection/Pic_burpee_detection`目录下运行`map_calculate.py`脚本,计算精度。 + + ```bash + python3.9.2 map_calculate.py + ``` + + 测试结果 + + ```bash + 98.67% = crouch AP + 84.93% = jump AP + 94.91% = support AP + m_ap = 92.83% + ``` + +### 4.3 特殊情况测试 + + 数据为光线较暗时(无光源的情况下)图片 + + ![dataset](readme_img/dark.jpg) + + 测试结果: + + ![dataset](readme_img/dark_res.jpg) + + 当测试无目标时会有对应报错 + \ No newline at end of file diff --git a/contrib/Burpee_Detection/Video_burpee_detection/run.sh b/contrib/Burpee_Detection/Video_burpee_detection/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..8b09c7eb9bea4b23abf5d4cd4b1c18f73bc0bdb3 --- /dev/null +++ b/contrib/Burpee_Detection/Video_burpee_detection/run.sh @@ -0,0 +1,19 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# load envs +source ../envs/env.sh + +# running inference process +python3.9.2 video_main.py diff --git a/contrib/Burpee_Detection/Video_burpee_detection/video_main.py b/contrib/Burpee_Detection/Video_burpee_detection/video_main.py new file mode 100644 index 0000000000000000000000000000000000000000..12a51b6b673d63c5d3533fb54787a6c3570338df --- /dev/null +++ b/contrib/Burpee_Detection/Video_burpee_detection/video_main.py @@ -0,0 +1,188 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import cv2 +import numpy as np + +from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector +import MxpiDataType_pb2 as MxpiDataType + + +class OStream: + def __init__(self, file): + self.file = file + + def __lshift__(self, obj): + self.file.write(str(obj)) + return self + + +cout = OStream(sys.stdout) +END_L = '\n' + +# The following belongs to the SDK Process +streamManagerApi = StreamManagerApi() +# Init stream manager +ret = streamManagerApi.InitManager() +if ret != 0: + cout << 'Failed to init Stream manager, ret=' << str(ret) << END_L + exit() +# Create streams by pipeline config file +# load pipline +with open(".././pipeline/burpee_detection_v.pipeline", 'rb') as f: + pipelineStr = f.read() +ret = streamManagerApi.CreateMultipleStreams(pipelineStr) +# Print error message +if ret != 0: + cout << 'Failed to create Stream, ret=' << str(ret) << END_L + exit() +# Stream name +STREAM_NAME = b'detection' +# Obtain the inference result by specifying streamName and keyVec +# The data that needs to be obtained is searched by the plug-in name +keys = [b"ReservedFrameInfo", b"mxpi_modelinfer0", b"mxpi_videodecoder0"] +keyVec = StringVector() +for key in keys: + keyVec.push_back(key) + +STATE = 0 +ACTION_CNT = 0 +# Config the output video +fourcc = cv2.VideoWriter_fourcc(*'mp4v') +out = cv2.VideoWriter('video_result.mp4', fourcc, 30, (1280, 720)) + +while True: + + # Get data through GetResult + infer_result = streamManagerApi.GetResult(STREAM_NAME, b'appsink0', keyVec) + + # Determine whether the output is empty + if infer_result.metadataVec.size() == 0: + cout << 'infer_result is null' << END_L + continue + + # Frame information structure + frameList = MxpiDataType.MxpiFrameInfo() + frameList.ParseFromString(infer_result.metadataVec[0].serializedMetadata) + + # Object postprocessor information + objectList = MxpiDataType.MxpiObjectList() + objectList.ParseFromString(infer_result.metadataVec[1].serializedMetadata) + + # Video-decoder information + visionList = MxpiDataType.MxpiVisionList() + visionList.ParseFromString(infer_result.metadataVec[2].serializedMetadata) + vision_data = visionList.visionVec[0].visionData.dataStr + visionInfo = visionList.visionVec[0].visionInfo + + # cv2 func YUV to BGR + YUV_BYTES_NU = 3 + YUV_BYTES_DE = 2 + img_yuv = np.frombuffer(vision_data, np.uint8) + # Reshape + img_bgr = img_yuv.reshape(visionInfo.heightAligned * YUV_BYTES_NU // YUV_BYTES_DE, + visionInfo.widthAligned) + # Color gamut conversion + img = cv2.cvtColor(img_bgr, getattr(cv2, "COLOR_YUV2BGR_NV12")) + + BEST_CONFIDENCE = 0 + TEXT = "" + best_bboxes = {} + + if len(objectList.objectVec) == 0: + continue + + for i in range(len(objectList.objectVec)): + # Get ObjectList + results = objectList.objectVec[i] + # Get the confidence + confidence = round(results.classVec[0].confidence, 4) + # Save the best confidence and its information + if confidence > BEST_CONFIDENCE: + BEST_CONFIDENCE = confidence + best_bboxes = {'x0': int(results.x0), + 'x1': int(results.x1), + 'y0': int(results.y0), + 'y1': int(results.y1), + 'text': results.classVec[0].className} + key_value = best_bboxes.get('text', "abc") + if key_value: + pass + else: + continue + action = key_value + TEXT = "{}{}".format(str(BEST_CONFIDENCE), " ") + + # Draw rectangle and txt for visualization + key_value = best_bboxes.get('text', "abc") + if key_value: + pass + else: + continue + for item in best_bboxes['text']: + TEXT += item + key_value = (best_bboxes.get('x0', "abc") and best_bboxes.get('y0', "abc")) and \ + (best_bboxes.get('x1', "abc") and best_bboxes.get('y1', "abc")) + if key_value: + pass + else: + continue + cv2.putText(img, TEXT, (best_bboxes['x0'] + 10, best_bboxes['y0'] + 10), + cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 1) + cv2.rectangle(img, (best_bboxes['x0'], best_bboxes['y0']), (best_bboxes['x1'], best_bboxes['y1']), + (255, 0, 0), 2) + + # State change + if STATE == 0: + if action == "crouch": + STATE = 1 + elif STATE == 1: + if action == "support": + STATE = 2 + elif STATE == 2: + if action == "crouch": + STATE = 3 + elif STATE == 3: + if action == "jump": + STATE = 0 + ACTION_CNT = ACTION_CNT + 1 + + # Save txt for results + FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL + if os.path.exists("result.txt"): + os.remove("result.txt") + with os.fdopen(os.open('result.txt', FLAGS, 0o755), 'w') as f: + f.write(str(ACTION_CNT)) + + # Save picture + Id = frameList.frameId + RESULT_PIC_PATH = "./result_pic/" + if os.path.exists(RESULT_PIC_PATH) != 1: + os.makedirs(RESULT_PIC_PATH) + ORIGIN_IMG_FILE = './result_pic/image' + '-' + str(Id) + '.jpg' + cv2.imwrite(ORIGIN_IMG_FILE, img) + + # Write the video + out.write(img) + + # Stop detection when it is the lase frame + # Or when the frame id comes to be the number you set + if frameList.isEos or Id > 63: + out.release() + break + +# Destroy All Streams +streamManagerApi.DestroyAllStreams() diff --git a/contrib/Burpee_Detection/envs/env.sh b/contrib/Burpee_Detection/envs/env.sh new file mode 100644 index 0000000000000000000000000000000000000000..bd0667ebf4f58665285170757dc0f73b7adf8448 --- /dev/null +++ b/contrib/Burpee_Detection/envs/env.sh @@ -0,0 +1,23 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# 变量说明 +export MX_SDK_path=""# mxVision 安装路径 +export Ascend_toolkit_path=""# CANN 安装路径 + +# MindXSDK 环境变量: +. /${MX_SDK_path}/set_env.sh + +# CANN 环境变量: +. /${Ascend_toolkit_path}/set_env.sh + diff --git a/contrib/Burpee_Detection/model/atc.sh b/contrib/Burpee_Detection/model/atc.sh new file mode 100644 index 0000000000000000000000000000000000000000..e7c23da8a7f3ef961b3af8e8f0ad16ca7d9f6345 --- /dev/null +++ b/contrib/Burpee_Detection/model/atc.sh @@ -0,0 +1,30 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# atc environment +source ../envs/env.sh + +# atc transform model +atc \ + --model=./burpee_detection.onnx \ + --framework=5 \ + --output=./burpee_detection \ + --input_format=NCHW \ + --input_shape="images:1,3,640,640" \ + --out_nodes="Transpose_213:0;Transpose_262:0;Transpose_311:0" \ + --enable_small_channel=1 \ + --insert_op_conf=./aipp_yolov5.cfg \ + --soc_version=Ascend310 \ + --log=info + diff --git a/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline b/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline new file mode 100644 index 0000000000000000000000000000000000000000..b4d8910bb150255c0dca7057a4b2b8d24b6a9eec --- /dev/null +++ b/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline @@ -0,0 +1,54 @@ +{ + "detection": { + "stream_config": { + "deviceId": "0" + }, + "appsrc0": { + "props": { + "blocksize": "409600" + }, + "factory": "appsrc", + "next": "mxpi_imagedecoder0" + }, + "mxpi_imagedecoder0": { + "props": { + "deviceId": "0" + }, + "factory": "mxpi_imagedecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_imagedecoder0", + "resizeHeight": "640", + "resizeWidth": "640", + "resizeType": "Resizer_KeepAspectRatio_Fit" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "modelPath": ".././model/burpee_detection.om", + "postProcessConfigPath": ".././model/yolov5.cfg", + "labelPath": ".././model/yolov5.names", + "postProcessLibPath": "../.././Ascend/mindx_sdk/mxVision-2.0.4/lib/libMpYOLOv5PostProcessor.so" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} diff --git a/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline b/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline new file mode 100644 index 0000000000000000000000000000000000000000..48b4f2b64f0c7a8dcd3542d6cc0db3a1d965a704 --- /dev/null +++ b/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline @@ -0,0 +1,59 @@ +{ + "detection": { + "stream_config": { + "deviceId": "0" + }, + "mxpi_rtspsrc0": { + "props": { + "rtspUrl": "rtsp://xxx.xxx.x.x:xxxx/burpee_detection.264", + "fpsMode": "1" + }, + "factory": "mxpi_rtspsrc", + "next": "mxpi_videodecoder0" + }, + "mxpi_videodecoder0": { + "props": { + "deviceId": "0", + "inputVideoFormat": "H264", + "outputImageFormat": "YUV420SP_NV12" + }, + "factory": "mxpi_videodecoder", + "next": "mxpi_imageresize0" + }, + "mxpi_imageresize0": { + "props": { + "dataSource": "mxpi_videodecoder0", + "resizeHeight": "640", + "resizeWidth": "640", + "resizeType": "Resizer_KeepAspectRatio_Fit" + }, + "factory": "mxpi_imageresize", + "next": "mxpi_modelinfer0" + }, + "mxpi_modelinfer0": { + "props": { + "dataSource": "mxpi_imageresize0", + "modelPath": "../model/burpee_detection.om", + "postProcessConfigPath": "../model/yolov5.cfg", + "labelPath": "../model/yolov5.names", + "postProcessLibPath": "../../Ascend/mindx_sdk/mxVision-2.0.4/lib/libMpYOLOv5PostProcessor.so", + "deviceId": "0" + }, + "factory": "mxpi_modelinfer", + "next": "mxpi_dataserialize0" + }, + "mxpi_dataserialize0": { + "props": { + "outputDataKeys": "mxpi_modelinfer0,ReservedFrameInfo" + }, + "factory": "mxpi_dataserialize", + "next": "appsink0" + }, + "appsink0": { + "props": { + "blocksize": "4096000" + }, + "factory": "appsink" + } + } +} diff --git a/contrib/Burpee_Detection/readme_img/app_1.jpg b/contrib/Burpee_Detection/readme_img/app_1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d56cc7976396199691287c08c01c476492d27e3 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_1.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_2.jpg b/contrib/Burpee_Detection/readme_img/app_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f67c0c31a0d3960c5ba6c61e707eb5dd88d8fe1c Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_2.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_3.jpg b/contrib/Burpee_Detection/readme_img/app_3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47980330bc16184f356724d8cb4bd17cc35e0a48 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_3.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_4.jpg b/contrib/Burpee_Detection/readme_img/app_4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2aa4851813a254e5fe5043264939a65a4c5db368 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_4.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_5.jpg b/contrib/Burpee_Detection/readme_img/app_5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d80b6fd43bf17aa6e103d6e83961186fa38bc0b Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_5.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_6.jpg b/contrib/Burpee_Detection/readme_img/app_6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b8fc285fc2eaed2b28e576ec1dd42076405caa8 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_6.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/app_flow.jpg b/contrib/Burpee_Detection/readme_img/app_flow.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a81113f7dc164f1009a26d216b042b6210012ae Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_flow.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/dark.jpg b/contrib/Burpee_Detection/readme_img/dark.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c07a7ffb0dae6956ae23cf83cce91b2d9fe905e2 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/dark.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/dark_res.jpg b/contrib/Burpee_Detection/readme_img/dark_res.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c9cf0f5e971529c07fd4da8527dbdc8e485c6b0 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/dark_res.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/dataset.jpg b/contrib/Burpee_Detection/readme_img/dataset.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0d64809eee663dc6e04126da46578b061a394c6 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/dataset.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/video.jpg b/contrib/Burpee_Detection/readme_img/video.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3c07c23babd4859fad6418583d7ab55c2c3e552 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/video.jpg differ diff --git a/contrib/Burpee_Detection/readme_img/video_flow.jpg b/contrib/Burpee_Detection/readme_img/video_flow.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49365d320111ee4a58296c4ff3483d9ab5c74961 Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/video_flow.jpg differ diff --git a/contrib/Overlap-CRNN/README.md b/contrib/Overlap-CRNN/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4d22d4a80886ecf0d6c80c6bb3825c364092628c --- /dev/null +++ b/contrib/Overlap-CRNN/README.md @@ -0,0 +1,316 @@ +# Overlap-CRNN重叠文本识别参考设计 + +## 1 介绍 + +本开发样例使用CRNN完成重叠文本的识别任务,供用户参考。 本系统基于昇腾Ascend310卡。本仓库是重叠文本任务的下游任务,即在完成对重叠文本还原后针对mask识别出文本。 + +### 1.1 支持的产品 + +本系统采用Atlas300-3010作为实验验证的硬件平台,并支持Atlas200RC以及Atlas500的硬件平台.具体产品实物图和硬件参数请参见《Atlas 300 AI加速卡 用户指南(型号 3010)》。由于采用的硬件平台为含有Atlas 300的Atlas 800 AI服务器 (型号3010),而服务器一般需要通过网络访问,因此需要通过笔记本或PC等客户端访问服务器,而且展示界面一般在客户端。 + +### 1.2 支持的版本 + +版本号查询方法,在Ascend产品环境下,运行命令: + +``` +npu-smi info +``` + + + +### 1.3 软件方案介绍 + +软件方案主要为还原文本的识别系统,子系统功能具体描述请参考 表1.1 系统方案各子系统功能描述。重叠文本识别子系统可以实现识别还原后图像的mask的输入,然后通过等比例放缩等操作最后对图像进行识别,本方案选择使用crnn作为重叠文本识别模型。系统方案中各模块功能如表1.2 所示。 + +表1.1 系统方案各子系统功能描述: + +| 序号 | 子系统 | 功能描述 | +| :--: | :----------------: | :----------------------------------------------------------: | +| 1 | 重叠文本识别子系统 | 重叠文本识别子系统将上一个子系统得到的结果,进行等比例放缩操作,放缩的大小与模型的输入大小有关,之后将结果送入到文字识别模型进行文字识别,并将识别结果进行输出。 | + +表1.2 系统方案中各模块功能: + +| 序号 | 子系统 | 功能描述 | +| :--: | :--------: | :----------------------------------------------------------: | +| 1 | 输入图像 | 将图像(JPG格式)通过pillow库读入。 | +| 2 | 输入字典 | 将字典(txt格式)通过本地代码输入到pipeline中。 | +| 3 | 图像解码 | 通过PILLOW第三方库对图像解码。 | +| 4 | 图像放缩 | 文字识别模型的输入为固定维度,所以需要放射变换的结果进行等比例放缩。 | +| 5 | 文字识别 | 在图像放缩后,将缓存区数据送入文字识别模型。本方案选用crnn进行文本识别 | +| 6 | 结果可视化 | 通过pillow库可视化单张图像的识别。 | + + + +### 1.4 代码目录结构与说明 + +eg:本sample工程名称为Overlap-CRNN,工程目录如下图所示: + +```pytnon +├── crnn_single_infer.py #单张图片推理 +├── crnn_infer.py #精度测试 +├── README.md +├── ch_sim_en_digit_symble.txt #字典 +├── models #不同类型的模型文件 +│ ├── air_model +│ │ ├── crnn.air +│ └── ckpt_model +│ │ ├── crnn.ckpt +│ └── om_model +│ │ ├── crnn.om +├── dataset #测试数据集 +│ ├── img +│ ├── map_record.json +│ └── annotation.json +``` + +### 1.5 技术实现流程图 + +实现流程图如下图所示: + +![image-20221201214655261](../Overlap-CRNN/流程图.png) + + + +### 1.6 特性及适用场景 + +本案例中的 CRNN模型适用于英文的灰度图像的识别,并可以返回测试图像的word-based的精度值。 + +本模型在以下几种情况去噪效果良好:图像中文字清晰可见、排版工整、字符大小适中等。 + +在以下几种情况去噪效果不太好:图像中文字模糊、排版随意、字符较小等。 + + + +## 2 环境依赖 + +请列出环境依赖软件和版本。 + +推荐系统为ubuntu 18.04或centos 7.6,环境依赖软件和版本如下表: + +| 软件名称 | 版本 | +| ------------------- | ----------- | +| mindspore | 1.8.1 | +| MindX SDK | 3.0RC2 | +| Ascend-CANN-toolkit | 5.1.RC2 | +| ubuntu | 18.04.1 LTS | +| python | 3.9.2 | +| cv2 | 4.5.5.64 | +| numpy | 1.23.1 | +| pillow | 9.1.0 | + +在编译运行项目前,需要设置环境变量: + +- 环境变量介绍 + +``` +. ${sdk_path}/set_env.sh +. ${ascend_toolkit_path}/set_env.sh +``` + + + +## 3 模型训练 +模型均在GPU下训练得到,如果需要使用本仓库提供的模型进行推理或模型转换,请务必参照GPU所需的参数设置,然后将模型按照提供的文件夹目录放至即可。 + +相关模型的下载链接如下:[下载链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/Overlap-CRNN/models.zip) + +**步骤1** 从昇腾社区的modelzoo中下载官方CRNN模型代码,并按安装官方文档中的步骤完成训练:[下载链接](https://www.hiascend.com/zh/software/modelzoo/models/detail/C/c4945b2fc8aa47f6af9b4f2870e41062/1) + + + +1. **CRNN_for_MindSpore_1.2_code/default_config.yaml** + + ```yaml + model_version: "V2" # V2可以在GPU和Ascend上训练 + label_dict: "PATH/TO/ch_sim_en_digit_symble.txt" # 使用自己的字典的路径 + max_text_length: 12 + image_width: 112 + class_num: 6703 + blank: 6702 + train_dataset_path: "" # 训练数据集路径 + train_eval_dataset: "synth" # 名称使用synth + train_eval_dataset_path: "" # 测试数据路径 + ``` + +2. **CRNN_for_MindSpore_1.2_code/src/dataset.py** + + 将第41行的: + + ```python + letters = [letter for letter in config1.label_dict] + ``` + + 修改为: + + ```python + letters = [] + with open(config1.label_dict, 'r') as f: + for line in f: + letter = line.strip('\n') + letters.append(letter) + f.close() + ``` + + + + 将`CaptchaDataset`函数更换为: + + ```python + class CaptchaDataset: + """ + create train or evaluation dataset for crnn + + Args: + img_root_dir(str): root path of images + max_text_length(int): max number of digits in images. + device_target(str): platform of training, support Ascend and GPU. + """ + + def __init__(self, img_root_dir, is_training=True, config=config1): + if not os.path.exists(img_root_dir): + raise RuntimeError( + "the input image dir {} is invalid!".format(img_root_dir)) + self.img_root_dir = img_root_dir + if is_training: + self.imgslist = os.path.join(self.img_root_dir, + 'annotation_train.txt') + else: + self.imgslist = os.path.join(self.img_root_dir, + 'annotation_test.txt') + self.img_names = {} + self.img_list = [] + with open(self.imgslist, 'r') as f: + for line in f: + img_name, img_label = line.strip('\n').split('\t') + self.img_list.append(img_name) + self.img_names[img_name] = str(img_label) + f.close() + self.max_text_length = config.max_text_length + self.blank = config.blank + self.class_num = config.class_num + self.sample_num = len(self.img_list) + self.batch_size = config.batch_size + print("There are totally {} samples".format(self.sample_num)) + + def __len__(self): + return self.sample_num + + def __getitem__(self, item): + img_name = self.img_list[item] + try: + im = Image.open(os.path.join(self.img_root_dir, img_name)) + except IOError: + print("%s is a corrupted image" % img_name) + return self[item + 1] + im = im.convert("RGB") + r, g, b = im.split() + im = Image.merge("RGB", (b, g, r)) + image = np.array(im) + if not check_image_is_valid(image): + print("%s is a corrupted image" % img_name) + return self[item + 1] + + text = self.img_names[img_name] + + label_unexpanded = text_to_labels(text) + label = np.full(self.max_text_length, self.blank) + if self.max_text_length < len(label_unexpanded): + label_len = self.max_text_length + else: + label_len = len(label_unexpanded) + for j in range(label_len): + label[j] = label_unexpanded[j] + return image, label + ``` + + + +3. **CRNN_for_MindSpore_1.2_code/src/metric.py** + + 将第18行的字典 + + ```python + label_dict = "abcdefghijklmnopqrstuvwxyz0123456789" + ``` + + 修改为( `dict_path `为自行准备的字典 `ch_sim_en_digit_symble.txt `,可在本仓库下找到): + + ``` + label_dict = [] + with open("[dict_path]", 'r') as f: + for line in f: + letter = line.strip('\n') + label_dict.append(letter) + f.close() + ``` + +**步骤2** 训练得到ckpt模型文件后,进入CRNN_for_MindSpore_1.2_code文件夹下执行命令(修改`ckpt_file`和`air_file_name`参数为自己的路径): + ``` + python export.py --ckpt_file [ckpt_file] --file_name [air_file_name] --file_format AIR + ``` + + + + +## 4 模型推理 + +**步骤1** 将生成的AIR模型转移到推理服务器,放至在Overlap-CRNN/models/air_model路径下。 +``` +cd ./Overlap-CRNN +mkdir models +cd models +mkdir air_model +``` + +**步骤2** 进入推理服务器执行命令(修改`air_model_path`和`output_model_path`参数为自己的路径): + ``` + atc --model=[air_model_path] --framework=1 --output=[output_model_path] --soc_version=Ascend310 --output_type=FP32 --op_select_implmode=high_precision --input_shape="input:1,3,32,112" + ``` +模型转换工具(ATC)相关介绍如下:[ATC介绍](https://support.huawei.com/enterprise/zh/doc/EDOC1100234054) + +相关模型的下载链接如下:[模型下载](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/Overlap-CRNN/models.zip) + +**步骤3** 执行该命令会在当前目录下生成项目需要的模型文件`[output_model].om`。执行后终端输出为: + + ``` + ATC start working now, please wait for a moment. + ATC run success, welcome to the next use. + ``` + +表示命令执行成功。 + +**步骤4** 将任意一张jpg格式的图片存到当前目录下(./Overlap-CRNN),命名为test.jpg。 + +**步骤5** 按照模型转换获取om模型,放置在Overlap-CRNN/models/om_model/路径下。若未自行转换模型,使用的是仓库提供的模型,则无需修改相关文件,否则修改`crnn_single_infer.py`中相关配置,将`MODEL_PATH`对象的路径改成实际的om模型的路径;`IMAGE_PATH`对象的路径改成实际的测试图片的路径;`SAVE_PATH`对象设置成需要保存可视化图像的路径。 + +相关参数在Overlap-CRNN/crnn_single_infer.py下: +``` +MODEL_PATH = "./models/om_model/crnn.om" +IMAGE_PATH = "./test.jpg" +SAVE_PATH = "./show.jpg" +``` + + +**步骤6** 在命令行输入 如下命令运行整个工程: + +``` +python crnn_single_infer.py +``` + +**步骤7** 运行结束输出`show.jpg` + + + +## 5 测试精度 + +**步骤1** 在Overlap-CRNN/dataset/路径下准备相同格式的数据集(已提供测试用的数据集,按照文件目录放至即可:[下载链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/Overlap-CRNN/dataset.zip)) + +**步骤2** 在命令行输入 如下命令运行整个工程: + +``` +python crnn_infer.py +``` + +模型在测试集上的精度达标,最终模型的的acc为89.17%,满足精度要求(acc≥80%)。 + +![image-20221202155839483](../Overlap-CRNN/测试结果.png) \ No newline at end of file diff --git a/contrib/Overlap-CRNN/Ubuntu-Regular.ttf b/contrib/Overlap-CRNN/Ubuntu-Regular.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2001d6ec68f3022580a87e7ca69b092ff1c520de Binary files /dev/null and b/contrib/Overlap-CRNN/Ubuntu-Regular.ttf differ diff --git a/contrib/Overlap-CRNN/ch_sim_en_digit_symble.txt b/contrib/Overlap-CRNN/ch_sim_en_digit_symble.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b2972ffc79a968fea9454a64c2fe2a3d17afe3a --- /dev/null +++ b/contrib/Overlap-CRNN/ch_sim_en_digit_symble.txt @@ -0,0 +1,6702 @@ +0 +1 +2 +3 +4 +5 +6 +7 +8 +9 +a +b +c +d +e +f +g +h +i +j +k +l +m +n +o +p +q +r +s +t +u +v +w +x +y +z +! +" +” +# +$ +% +& +' +ˈ +’ +( +( +) +) +* ++ +, +, +- +. +/ +: +: +; +; +< +《 += +> +》 +? +? +@ +[ +\ +] +_ +` +~ +| +{ +} +! +。 +“ +^ +æ +— +· +一 +丁 +七 +万 +丈 +三 +上 +下 +不 +与 +丐 +丑 +专 +且 +丕 +世 +丘 +丙 +业 +丛 +东 +丝 +丞 +丢 +两 +严 +丧 +个 +丫 +中 +丰 +串 +临 +丸 +丹 +为 +主 +丽 +举 +乃 +久 +么 +义 +之 +乌 +乍 +乎 +乏 +乐 +乒 +乓 +乔 +乖 +乘 +乙 +乜 +九 +乞 +也 +习 +乡 +书 +乩 +买 +乱 +乳 +乾 +了 +予 +争 +事 +二 +亍 +于 +亏 +云 +互 +亓 +五 +井 +亘 +亚 +些 +亟 +亡 +亢 +交 +亥 +亦 +产 +亨 +亩 +享 +京 +亭 +亮 +亲 +亳 +亵 +人 +亿 +什 +仁 +仂 +仃 +仄 +仅 +仆 +仇 +仉 +今 +介 +仍 +从 +仑 +仓 +仔 +仕 +他 +仗 +付 +仙 +仞 +仟 +仡 +代 +令 +以 +仨 +仪 +仫 +们 +仰 +仲 +仳 +仵 +件 +价 +任 +份 +仿 +企 +伉 +伊 +伍 +伎 +伏 +伐 +休 +众 +优 +伙 +会 +伛 +伞 +伟 +传 +伢 +伤 +伦 +伧 +伪 +伫 +伯 +估 +伴 +伶 +伸 +伺 +似 +伽 +佃 +但 +位 +低 +住 +佐 +佑 +体 +何 +佗 +佘 +余 +佚 +佛 +作 +佝 +佞 +佟 +你 +佣 +佤 +佥 +佩 +佬 +佯 +佰 +佳 +佴 +佶 +佻 +佼 +佾 +使 +侃 +侄 +侈 +侉 +例 +侍 +侏 +侑 +侔 +侗 +供 +依 +侠 +侣 +侥 +侦 +侧 +侨 +侩 +侪 +侬 +侮 +侯 +侵 +便 +促 +俄 +俅 +俊 +俎 +俏 +俐 +俑 +俗 +俘 +俚 +俜 +保 +俞 +俟 +信 +俣 +俦 +俨 +俩 +俪 +俭 +修 +俯 +俱 +俳 +俸 +俺 +俾 +倌 +倍 +倏 +倒 +倔 +倘 +候 +倚 +倜 +借 +倡 +倥 +倦 +倨 +倩 +倪 +倬 +倭 +债 +值 +倾 +偃 +假 +偈 +偌 +偎 +偏 +偕 +做 +停 +健 +偬 +偶 +偷 +偻 +偾 +偿 +傀 +傅 +傈 +傍 +傣 +傥 +傧 +储 +傩 +催 +傲 +傻 +像 +僖 +僚 +僦 +僧 +僬 +僭 +僮 +僳 +僵 +僻 +儆 +儇 +儋 +儒 +儡 +儿 +兀 +允 +元 +兄 +充 +兆 +先 +光 +克 +免 +兑 +兔 +兕 +兖 +党 +兜 +兢 +入 +全 +八 +公 +六 +兮 +兰 +共 +关 +兴 +兵 +其 +具 +典 +兹 +养 +兼 +兽 +冀 +冁 +内 +冈 +冉 +册 +再 +冒 +冕 +冗 +写 +军 +农 +冠 +冢 +冤 +冥 +冬 +冯 +冰 +冱 +冲 +决 +况 +冶 +冷 +冻 +冼 +冽 +净 +凄 +准 +凇 +凉 +凋 +凌 +减 +凑 +凛 +凝 +几 +凡 +凤 +凫 +凭 +凯 +凰 +凳 +凶 +凸 +凹 +出 +击 +凼 +函 +凿 +刀 +刁 +刃 +分 +切 +刈 +刊 +刍 +刎 +刑 +划 +刖 +列 +刘 +则 +刚 +创 +初 +删 +判 +刨 +利 +别 +刭 +刮 +到 +刳 +制 +刷 +券 +刹 +刺 +刻 +刽 +刿 +剀 +剁 +剂 +剃 +削 +剌 +前 +剐 +剑 +剔 +剖 +剜 +剞 +剡 +剥 +剧 +剩 +剪 +副 +割 +剽 +剿 +劁 +劂 +劈 +劐 +劓 +力 +劝 +办 +功 +加 +务 +劢 +劣 +动 +助 +努 +劫 +劬 +劭 +励 +劲 +劳 +劾 +势 +勃 +勇 +勉 +勋 +勐 +勒 +勖 +勘 +募 +勤 +勰 +勺 +勾 +勿 +匀 +包 +匆 +匈 +匍 +匏 +匐 +匕 +化 +北 +匙 +匝 +匠 +匡 +匣 +匦 +匪 +匮 +匹 +区 +医 +匾 +匿 +十 +千 +卅 +升 +午 +卉 +半 +华 +协 +卑 +卒 +卓 +单 +卖 +南 +博 +卜 +卞 +卟 +占 +卡 +卢 +卣 +卤 +卦 +卧 +卫 +卮 +卯 +印 +危 +即 +却 +卵 +卷 +卸 +卺 +卿 +厂 +厄 +厅 +历 +厉 +压 +厌 +厍 +厕 +厘 +厚 +厝 +原 +厢 +厣 +厥 +厦 +厨 +厩 +厮 +去 +县 +叁 +参 +又 +叉 +及 +友 +双 +反 +发 +叔 +取 +受 +变 +叙 +叛 +叟 +叠 +口 +古 +句 +另 +叨 +叩 +只 +叫 +召 +叭 +叮 +可 +台 +叱 +史 +右 +叵 +叶 +号 +司 +叹 +叻 +叼 +叽 +吁 +吃 +各 +吆 +合 +吉 +吊 +同 +名 +后 +吏 +吐 +向 +吒 +吓 +吕 +吗 +君 +吝 +吞 +吟 +吠 +吡 +吣 +否 +吧 +吨 +吩 +含 +听 +吭 +吮 +启 +吱 +吲 +吴 +吵 +吸 +吹 +吻 +吼 +吾 +呀 +呃 +呆 +呈 +告 +呋 +呐 +呓 +呔 +呕 +呖 +呗 +员 +呙 +呛 +呜 +呢 +呤 +呦 +周 +呱 +呲 +味 +呵 +呶 +呷 +呸 +呻 +呼 +命 +咀 +咂 +咄 +咆 +咋 +和 +咎 +咏 +咐 +咒 +咔 +咕 +咖 +咙 +咚 +咛 +咝 +咣 +咤 +咦 +咧 +咨 +咩 +咪 +咫 +咬 +咭 +咯 +咱 +咳 +咴 +咸 +咻 +咽 +咿 +哀 +品 +哂 +哄 +哆 +哇 +哈 +哉 +哌 +响 +哎 +哏 +哐 +哑 +哓 +哔 +哕 +哗 +哙 +哚 +哝 +哞 +哟 +哥 +哦 +哧 +哨 +哩 +哪 +哭 +哮 +哲 +哳 +哺 +哼 +哽 +哿 +唁 +唆 +唇 +唉 +唏 +唐 +唑 +唔 +唛 +唠 +唢 +唣 +唤 +唧 +唪 +唬 +售 +唯 +唱 +唳 +唷 +唼 +唾 +唿 +啁 +啃 +啄 +商 +啉 +啊 +啐 +啕 +啖 +啜 +啡 +啤 +啥 +啦 +啧 +啪 +啬 +啭 +啮 +啵 +啶 +啷 +啸 +啻 +啼 +啾 +喀 +喁 +喂 +喃 +善 +喇 +喈 +喉 +喊 +喋 +喏 +喑 +喔 +喘 +喙 +喜 +喝 +喟 +喧 +喱 +喳 +喵 +喷 +喹 +喻 +喽 +喾 +嗄 +嗅 +嗉 +嗌 +嗍 +嗑 +嗒 +嗓 +嗔 +嗖 +嗜 +嗝 +嗟 +嗡 +嗣 +嗤 +嗥 +嗦 +嗨 +嗪 +嗫 +嗬 +嗯 +嗲 +嗳 +嗵 +嗷 +嗽 +嗾 +嘀 +嘁 +嘈 +嘉 +嘌 +嘎 +嘏 +嘘 +嘛 +嘞 +嘟 +嘣 +嘤 +嘧 +嘬 +嘭 +嘱 +嘲 +嘴 +嘶 +嘹 +嘻 +嘿 +噌 +噍 +噎 +噔 +噗 +噙 +噜 +噢 +噤 +器 +噩 +噪 +噫 +噬 +噱 +噶 +噻 +噼 +嚅 +嚆 +嚎 +嚏 +嚓 +嚣 +嚯 +嚷 +嚼 +囊 +囔 +囚 +四 +囝 +回 +囟 +因 +囡 +团 +囤 +囫 +园 +困 +囱 +围 +囵 +囹 +固 +国 +图 +囿 +圃 +圄 +圆 +圈 +圉 +圊 +圜 +土 +圣 +在 +圩 +圪 +圬 +圭 +圮 +圯 +地 +圳 +圹 +场 +圻 +圾 +址 +坂 +均 +坊 +坌 +坍 +坎 +坏 +坐 +坑 +块 +坚 +坛 +坜 +坝 +坞 +坟 +坠 +坡 +坤 +坦 +坨 +坩 +坪 +坫 +坭 +坯 +坳 +坷 +坻 +坼 +垂 +垃 +垄 +垅 +垆 +型 +垌 +垒 +垓 +垛 +垠 +垡 +垢 +垣 +垤 +垦 +垧 +垩 +垫 +垭 +垮 +垲 +垸 +埂 +埃 +埋 +城 +埏 +埒 +埔 +埕 +埘 +埙 +埚 +埝 +域 +埠 +埤 +埭 +埯 +埴 +埸 +培 +基 +埽 +堂 +堆 +堇 +堋 +堍 +堑 +堕 +堙 +堞 +堠 +堡 +堤 +堪 +堰 +堵 +塄 +塌 +塍 +塑 +塔 +塘 +塞 +塥 +填 +塬 +塾 +墀 +墁 +境 +墅 +墉 +墒 +墓 +墙 +增 +墟 +墨 +墩 +墼 +壁 +壅 +壑 +壕 +壤 +士 +壬 +壮 +声 +壳 +壶 +壹 +处 +备 +复 +夏 +夔 +夕 +外 +夙 +多 +夜 +够 +夤 +夥 +大 +天 +太 +夫 +夭 +央 +夯 +失 +头 +夷 +夸 +夹 +夺 +夼 +奁 +奂 +奄 +奇 +奈 +奉 +奋 +奎 +奏 +契 +奔 +奕 +奖 +套 +奘 +奚 +奠 +奢 +奥 +女 +奴 +奶 +奸 +她 +好 +妁 +如 +妃 +妄 +妆 +妇 +妈 +妊 +妍 +妒 +妓 +妖 +妗 +妙 +妞 +妣 +妤 +妥 +妨 +妩 +妪 +妫 +妮 +妯 +妲 +妹 +妻 +妾 +姆 +姊 +始 +姐 +姑 +姒 +姓 +委 +姗 +姘 +姚 +姜 +姝 +姣 +姥 +姨 +姬 +姹 +姻 +姿 +威 +娃 +娄 +娅 +娆 +娇 +娈 +娉 +娌 +娑 +娓 +娘 +娜 +娟 +娠 +娣 +娥 +娩 +娱 +娲 +娴 +娶 +娼 +婀 +婆 +婉 +婊 +婕 +婚 +婢 +婧 +婪 +婴 +婵 +婶 +婷 +婺 +婿 +媒 +媚 +媛 +媪 +媲 +媳 +媵 +媸 +媾 +嫁 +嫂 +嫉 +嫌 +嫒 +嫔 +嫖 +嫘 +嫜 +嫠 +嫡 +嫣 +嫦 +嫩 +嫫 +嫱 +嬉 +嬖 +嬗 +嬴 +嬷 +孀 +子 +孑 +孓 +孔 +孕 +字 +存 +孙 +孚 +孛 +孜 +孝 +孟 +孢 +季 +孤 +孥 +学 +孩 +孪 +孬 +孰 +孱 +孳 +孵 +孺 +孽 +宁 +它 +宄 +宅 +宇 +守 +安 +宋 +完 +宏 +宓 +宕 +宗 +官 +宙 +定 +宛 +宜 +宝 +实 +宠 +审 +客 +宣 +室 +宥 +宦 +宪 +宫 +宰 +害 +宴 +宵 +家 +宸 +容 +宽 +宾 +宿 +寂 +寄 +寅 +密 +寇 +富 +寐 +寒 +寓 +寝 +寞 +察 +寡 +寤 +寥 +寨 +寮 +寰 +寸 +对 +寺 +寻 +导 +寿 +封 +射 +将 +尉 +尊 +小 +少 +尔 +尕 +尖 +尘 +尚 +尜 +尝 +尤 +尥 +尧 +尬 +就 +尴 +尸 +尹 +尺 +尻 +尼 +尽 +尾 +尿 +局 +屁 +层 +居 +屈 +屉 +届 +屋 +屎 +屏 +屐 +屑 +展 +屙 +属 +屠 +屡 +屣 +履 +屦 +屯 +山 +屹 +屺 +屿 +岁 +岂 +岈 +岌 +岍 +岐 +岑 +岔 +岖 +岗 +岘 +岙 +岚 +岛 +岢 +岣 +岩 +岫 +岬 +岭 +岱 +岳 +岵 +岷 +岸 +岿 +峁 +峄 +峋 +峒 +峙 +峡 +峤 +峥 +峦 +峨 +峪 +峭 +峰 +峻 +崂 +崃 +崆 +崇 +崎 +崔 +崖 +崛 +崞 +崤 +崦 +崧 +崩 +崭 +崮 +崴 +崽 +嵇 +嵊 +嵋 +嵌 +嵘 +嵛 +嵝 +嵩 +嵫 +嵬 +嵯 +嵴 +嶂 +嶙 +嶝 +嶷 +巅 +巍 +川 +州 +巡 +巢 +工 +左 +巧 +巨 +巩 +巫 +差 +巯 +己 +已 +巳 +巴 +巷 +巽 +巾 +币 +市 +布 +帅 +帆 +师 +希 +帏 +帐 +帑 +帔 +帕 +帖 +帘 +帙 +帚 +帛 +帜 +帝 +带 +帧 +席 +帮 +帱 +帷 +常 +帻 +帼 +帽 +幂 +幄 +幅 +幌 +幔 +幕 +幛 +幞 +幡 +幢 +干 +平 +年 +并 +幸 +幺 +幻 +幼 +幽 +广 +庀 +庄 +庆 +庇 +床 +庋 +序 +庐 +庑 +库 +应 +底 +庖 +店 +庙 +庚 +府 +庞 +废 +庠 +庥 +度 +座 +庭 +庳 +庵 +庶 +康 +庸 +庹 +庾 +廉 +廊 +廒 +廓 +廖 +廛 +廨 +廪 +延 +廷 +建 +廿 +开 +弁 +异 +弃 +弄 +弈 +弊 +弋 +式 +弑 +弓 +引 +弗 +弘 +弛 +弟 +张 +弥 +弦 +弧 +弩 +弭 +弯 +弱 +弹 +强 +弼 +彀 +归 +当 +录 +彖 +彗 +彘 +彝 +形 +彤 +彦 +彩 +彪 +彬 +彭 +彰 +影 +彳 +彷 +役 +彻 +彼 +往 +征 +徂 +径 +待 +徇 +很 +徉 +徊 +律 +後 +徐 +徒 +徕 +得 +徘 +徙 +徜 +御 +徨 +循 +徭 +微 +徵 +德 +徼 +徽 +心 +必 +忆 +忉 +忌 +忍 +忏 +忐 +忑 +忒 +忖 +志 +忘 +忙 +忝 +忠 +忡 +忤 +忧 +忪 +快 +忭 +忮 +忱 +念 +忸 +忻 +忽 +忾 +忿 +怀 +态 +怂 +怃 +怄 +怅 +怆 +怊 +怍 +怎 +怏 +怒 +怔 +怕 +怖 +怙 +怛 +怜 +思 +怠 +怡 +急 +怦 +性 +怨 +怩 +怪 +怫 +怯 +怵 +总 +怼 +怿 +恁 +恂 +恃 +恋 +恍 +恐 +恒 +恕 +恙 +恚 +恝 +恢 +恣 +恤 +恧 +恨 +恩 +恪 +恫 +恬 +恭 +息 +恰 +恳 +恶 +恸 +恹 +恺 +恻 +恼 +恽 +恿 +悃 +悄 +悉 +悌 +悍 +悒 +悔 +悖 +悚 +悛 +悝 +悟 +悠 +患 +悦 +您 +悫 +悬 +悭 +悯 +悱 +悲 +悴 +悸 +悻 +悼 +情 +惆 +惊 +惋 +惑 +惕 +惘 +惚 +惜 +惝 +惟 +惠 +惦 +惧 +惨 +惩 +惫 +惬 +惭 +惮 +惯 +惰 +想 +惴 +惶 +惹 +惺 +愀 +愁 +愆 +愈 +愉 +愍 +愎 +意 +愕 +愚 +感 +愠 +愣 +愤 +愦 +愧 +愫 +愿 +慈 +慊 +慌 +慎 +慑 +慕 +慝 +慢 +慧 +慨 +慰 +慵 +慷 +憋 +憎 +憔 +憝 +憧 +憨 +憩 +憬 +憷 +憾 +懂 +懈 +懊 +懋 +懑 +懒 +懦 +懵 +懿 +戆 +戈 +戊 +戋 +戌 +戍 +戎 +戏 +成 +我 +戒 +戕 +或 +戗 +战 +戚 +戛 +戟 +戡 +戢 +戥 +截 +戬 +戮 +戳 +戴 +户 +戽 +戾 +房 +所 +扁 +扃 +扇 +扈 +扉 +手 +扌 +才 +扎 +扑 +扒 +打 +扔 +托 +扛 +扣 +扦 +执 +扩 +扪 +扫 +扬 +扭 +扮 +扯 +扰 +扳 +扶 +批 +扼 +找 +承 +技 +抄 +抉 +把 +抑 +抒 +抓 +投 +抖 +抗 +折 +抚 +抛 +抟 +抠 +抡 +抢 +护 +报 +抨 +披 +抬 +抱 +抵 +抹 +抻 +押 +抽 +抿 +拂 +拄 +担 +拆 +拇 +拈 +拉 +拊 +拌 +拍 +拎 +拐 +拒 +拓 +拔 +拖 +拗 +拘 +拙 +拚 +招 +拜 +拟 +拢 +拣 +拥 +拦 +拧 +拨 +择 +括 +拭 +拮 +拯 +拱 +拳 +拴 +拶 +拷 +拼 +拽 +拾 +拿 +持 +挂 +指 +挈 +按 +挎 +挑 +挖 +挚 +挛 +挝 +挞 +挟 +挠 +挡 +挢 +挣 +挤 +挥 +挨 +挪 +挫 +振 +挲 +挹 +挺 +挽 +捂 +捃 +捅 +捆 +捉 +捋 +捌 +捍 +捎 +捏 +捐 +捕 +捞 +损 +捡 +换 +捣 +捧 +捩 +捭 +据 +捱 +捶 +捷 +捺 +捻 +掀 +掂 +掇 +授 +掉 +掊 +掌 +掎 +掏 +掐 +排 +掖 +掘 +掠 +探 +掣 +接 +控 +推 +掩 +措 +掬 +掭 +掮 +掰 +掳 +掴 +掷 +掸 +掺 +掼 +掾 +揄 +揆 +揉 +揍 +揎 +描 +提 +插 +揖 +揞 +揠 +握 +揣 +揩 +揪 +揭 +援 +揶 +揸 +揽 +揿 +搀 +搁 +搂 +搅 +搋 +搌 +搏 +搐 +搓 +搔 +搛 +搜 +搞 +搠 +搡 +搦 +搪 +搬 +搭 +搴 +携 +搽 +摁 +摄 +摅 +摆 +摇 +摈 +摊 +摒 +摔 +摘 +摞 +摧 +摩 +摭 +摸 +摹 +摺 +撂 +撄 +撅 +撇 +撑 +撒 +撕 +撖 +撙 +撞 +撤 +撩 +撬 +播 +撮 +撰 +撵 +撷 +撸 +撺 +撼 +擀 +擂 +擅 +操 +擎 +擐 +擒 +擘 +擞 +擢 +擤 +擦 +攀 +攉 +攒 +攘 +攥 +攫 +攮 +支 +收 +攸 +改 +攻 +放 +政 +故 +效 +敉 +敌 +敏 +救 +敕 +敖 +教 +敛 +敝 +敞 +敢 +散 +敦 +敫 +敬 +数 +敲 +整 +敷 +文 +斋 +斌 +斐 +斑 +斓 +斗 +料 +斛 +斜 +斟 +斡 +斤 +斥 +斧 +斩 +斫 +断 +斯 +新 +方 +於 +施 +旁 +旃 +旄 +旅 +旆 +旋 +旌 +旎 +族 +旒 +旖 +旗 +无 +既 +日 +旦 +旧 +旨 +早 +旬 +旭 +旮 +旯 +旰 +旱 +时 +旷 +旺 +昀 +昂 +昃 +昆 +昊 +昌 +明 +昏 +易 +昔 +昕 +昙 +昝 +星 +映 +春 +昧 +昨 +昭 +是 +昱 +昴 +昵 +昶 +昼 +显 +晁 +晃 +晋 +晌 +晏 +晒 +晓 +晔 +晕 +晖 +晗 +晚 +晟 +晡 +晤 +晦 +晨 +普 +景 +晰 +晴 +晶 +晷 +智 +晾 +暂 +暄 +暇 +暌 +暑 +暖 +暗 +暝 +暧 +暨 +暮 +暴 +暹 +暾 +曙 +曛 +曜 +曝 +曦 +曩 +曰 +曲 +曳 +更 +曷 +曹 +曼 +曾 +替 +最 +月 +有 +朊 +朋 +服 +朐 +朔 +朕 +朗 +望 +朝 +期 +朦 +木 +未 +末 +本 +札 +术 +朱 +朴 +朵 +机 +朽 +杀 +杂 +权 +杆 +杈 +杉 +杌 +李 +杏 +材 +村 +杓 +杖 +杜 +杞 +束 +杠 +条 +来 +杨 +杪 +杭 +杯 +杰 +杲 +杳 +杵 +杷 +杼 +松 +板 +极 +构 +枇 +枉 +枋 +析 +枕 +林 +枘 +枚 +果 +枝 +枞 +枢 +枣 +枥 +枧 +枨 +枪 +枫 +枭 +枯 +枰 +枳 +枵 +架 +枷 +枸 +柁 +柃 +柄 +柏 +某 +柑 +柒 +染 +柔 +柘 +柙 +柚 +柜 +柝 +柞 +柠 +柢 +查 +柩 +柬 +柯 +柰 +柱 +柳 +柴 +柽 +柿 +栀 +栅 +标 +栈 +栉 +栊 +栋 +栌 +栎 +栏 +树 +栓 +栖 +栗 +栝 +校 +栩 +株 +栲 +栳 +样 +核 +根 +格 +栽 +栾 +桀 +桁 +桂 +桃 +桄 +桅 +框 +案 +桉 +桊 +桌 +桎 +桐 +桑 +桓 +桔 +桕 +桠 +桡 +桢 +档 +桤 +桥 +桦 +桧 +桨 +桩 +桫 +桴 +桶 +桷 +梁 +梃 +梅 +梆 +梏 +梓 +梗 +梢 +梦 +梧 +梨 +梭 +梯 +械 +梳 +梵 +检 +棂 +棉 +棋 +棍 +棒 +棕 +棘 +棚 +棠 +棣 +森 +棰 +棱 +棵 +棹 +棺 +棼 +椁 +椅 +椋 +植 +椎 +椐 +椒 +椟 +椠 +椤 +椭 +椰 +椴 +椹 +椽 +椿 +楂 +楔 +楗 +楚 +楝 +楞 +楠 +楣 +楦 +楫 +楮 +楷 +楸 +楹 +楼 +榀 +概 +榄 +榆 +榇 +榈 +榉 +榍 +榔 +榕 +榛 +榜 +榧 +榨 +榫 +榭 +榱 +榴 +榷 +榻 +槁 +槊 +槌 +槎 +槐 +槔 +槛 +槟 +槠 +槭 +槲 +槽 +槿 +樊 +樗 +樘 +樟 +模 +樨 +横 +樯 +樱 +樵 +樽 +樾 +橄 +橇 +橐 +橘 +橙 +橛 +橡 +橥 +橱 +橹 +橼 +檀 +檄 +檎 +檐 +檑 +檗 +檠 +檩 +檫 +檬 +欠 +次 +欢 +欣 +欤 +欧 +欲 +欷 +欺 +款 +歃 +歆 +歇 +歉 +歌 +歙 +止 +正 +此 +步 +武 +歧 +歪 +歹 +死 +歼 +殁 +殂 +殃 +殄 +殆 +殇 +殉 +殊 +残 +殍 +殒 +殓 +殖 +殚 +殛 +殡 +殪 +殳 +殴 +段 +殷 +殿 +毁 +毂 +毅 +毋 +母 +每 +毒 +毓 +比 +毕 +毖 +毗 +毙 +毛 +毡 +毪 +毫 +毯 +毳 +毵 +毹 +毽 +氅 +氆 +氇 +氍 +氏 +氐 +民 +氓 +气 +氕 +氖 +氘 +氙 +氚 +氛 +氟 +氡 +氢 +氤 +氦 +氧 +氨 +氩 +氪 +氮 +氯 +氰 +氲 +水 +永 +氽 +汀 +汁 +求 +汆 +汇 +汉 +汊 +汐 +汔 +汕 +汗 +汛 +汜 +汝 +汞 +江 +池 +污 +汤 +汨 +汩 +汪 +汰 +汲 +汴 +汶 +汹 +汽 +汾 +沁 +沂 +沃 +沅 +沆 +沈 +沉 +沌 +沏 +沐 +沓 +沔 +沙 +沛 +沟 +没 +沣 +沤 +沥 +沦 +沧 +沩 +沪 +沫 +沭 +沮 +沱 +河 +沸 +油 +治 +沼 +沽 +沾 +沿 +泄 +泅 +泉 +泊 +泌 +泐 +泓 +泔 +法 +泖 +泗 +泛 +泞 +泠 +泡 +波 +泣 +泥 +注 +泪 +泫 +泮 +泯 +泰 +泱 +泳 +泵 +泷 +泸 +泺 +泻 +泼 +泽 +泾 +洁 +洄 +洇 +洋 +洌 +洎 +洒 +洗 +洙 +洚 +洛 +洞 +津 +洧 +洪 +洫 +洮 +洱 +洲 +洳 +洵 +洹 +活 +洼 +洽 +派 +流 +浃 +浅 +浆 +浇 +浈 +浊 +测 +浍 +济 +浏 +浑 +浒 +浓 +浔 +浙 +浚 +浜 +浞 +浠 +浣 +浦 +浩 +浪 +浮 +浯 +浴 +海 +浸 +浼 +涂 +涅 +消 +涉 +涌 +涎 +涑 +涓 +涔 +涕 +涛 +涝 +涞 +涟 +涠 +涡 +涣 +涤 +润 +涧 +涨 +涩 +涪 +涫 +涮 +涯 +液 +涵 +涸 +涿 +淀 +淄 +淅 +淆 +淇 +淋 +淌 +淑 +淖 +淘 +淙 +淝 +淞 +淠 +淡 +淤 +淦 +淫 +淬 +淮 +深 +淳 +混 +淹 +添 +淼 +清 +渊 +渌 +渍 +渎 +渐 +渑 +渔 +渖 +渗 +渚 +渝 +渠 +渡 +渣 +渤 +渥 +温 +渫 +渭 +港 +渲 +渴 +游 +渺 +湃 +湄 +湍 +湎 +湔 +湖 +湘 +湛 +湟 +湫 +湮 +湾 +湿 +溃 +溅 +溆 +溉 +溏 +源 +溘 +溜 +溟 +溢 +溥 +溧 +溪 +溯 +溱 +溲 +溴 +溶 +溷 +溺 +溻 +溽 +滁 +滂 +滇 +滋 +滏 +滑 +滓 +滔 +滕 +滗 +滚 +滞 +滟 +滠 +满 +滢 +滤 +滥 +滦 +滨 +滩 +滴 +滹 +漂 +漆 +漉 +漏 +漓 +演 +漕 +漠 +漤 +漩 +漪 +漫 +漭 +漯 +漱 +漳 +漶 +漾 +潆 +潇 +潋 +潍 +潘 +潜 +潞 +潢 +潦 +潭 +潮 +潲 +潴 +潸 +潺 +潼 +澄 +澈 +澉 +澌 +澍 +澎 +澜 +澡 +澧 +澳 +澶 +澹 +激 +濂 +濉 +濑 +濒 +濞 +濠 +濡 +濮 +濯 +瀑 +瀚 +瀛 +瀣 +瀵 +瀹 +灌 +灏 +灞 +火 +灭 +灯 +灰 +灵 +灶 +灸 +灼 +灾 +灿 +炀 +炅 +炉 +炊 +炎 +炒 +炔 +炕 +炖 +炙 +炜 +炝 +炫 +炬 +炭 +炮 +炯 +炱 +炳 +炷 +炸 +点 +炻 +炼 +炽 +烀 +烁 +烂 +烃 +烈 +烊 +烘 +烙 +烛 +烟 +烤 +烦 +烧 +烨 +烩 +烫 +烬 +热 +烯 +烷 +烹 +烽 +焉 +焊 +焐 +焓 +焕 +焖 +焘 +焙 +焚 +焦 +焯 +焰 +焱 +然 +煅 +煊 +煌 +煎 +煜 +煞 +煤 +煦 +照 +煨 +煮 +煲 +煳 +煸 +煺 +煽 +熄 +熊 +熏 +熔 +熘 +熙 +熟 +熠 +熨 +熬 +熵 +熹 +燃 +燎 +燔 +燕 +燠 +燥 +燧 +燮 +燹 +爆 +爝 +爨 +爪 +爬 +爰 +爱 +爵 +父 +爷 +爸 +爹 +爻 +爽 +爿 +片 +版 +牌 +牍 +牒 +牖 +牙 +牛 +牝 +牟 +牡 +牢 +牦 +牧 +物 +牮 +牯 +牲 +牵 +特 +牺 +牾 +犀 +犁 +犄 +犊 +犋 +犍 +犏 +犒 +犟 +犬 +犯 +犰 +犴 +状 +犷 +犸 +犹 +狁 +狂 +狃 +狄 +狈 +狍 +狎 +狐 +狒 +狗 +狙 +狞 +狠 +狡 +狨 +狩 +独 +狭 +狮 +狯 +狰 +狱 +狲 +狳 +狴 +狷 +狸 +狺 +狻 +狼 +猁 +猃 +猊 +猎 +猕 +猖 +猗 +猛 +猜 +猝 +猞 +猡 +猢 +猥 +猩 +猪 +猫 +猬 +献 +猱 +猴 +猷 +猸 +猹 +猾 +猿 +獍 +獐 +獒 +獗 +獠 +獬 +獭 +獯 +獾 +玄 +率 +玉 +王 +玎 +玑 +玖 +玛 +玢 +玩 +玫 +玮 +环 +现 +玲 +玳 +玷 +玺 +玻 +珀 +珂 +珈 +珉 +珊 +珍 +珏 +珐 +珑 +珙 +珞 +珠 +珥 +珧 +珩 +班 +珲 +球 +琅 +理 +琉 +琏 +琐 +琚 +琛 +琢 +琥 +琦 +琨 +琪 +琬 +琮 +琰 +琳 +琴 +琵 +琶 +琼 +瑁 +瑕 +瑗 +瑙 +瑚 +瑛 +瑜 +瑞 +瑟 +瑭 +瑰 +瑶 +瑾 +璀 +璁 +璃 +璇 +璋 +璎 +璐 +璜 +璞 +璧 +璨 +璩 +瓒 +瓜 +瓞 +瓠 +瓢 +瓣 +瓤 +瓦 +瓮 +瓯 +瓴 +瓶 +瓷 +瓿 +甄 +甏 +甑 +甓 +甘 +甙 +甚 +甜 +生 +甥 +用 +甩 +甫 +甬 +甭 +田 +由 +甲 +申 +电 +男 +甸 +町 +画 +甾 +畀 +畅 +畈 +畋 +界 +畎 +畏 +畔 +留 +畚 +畛 +畜 +略 +畦 +番 +畲 +畴 +畸 +畹 +畿 +疃 +疆 +疋 +疏 +疑 +疔 +疖 +疗 +疙 +疚 +疝 +疟 +疠 +疡 +疣 +疤 +疥 +疫 +疬 +疮 +疯 +疰 +疱 +疲 +疳 +疴 +疵 +疸 +疹 +疼 +疽 +疾 +痂 +痃 +痄 +病 +症 +痈 +痉 +痊 +痍 +痒 +痔 +痕 +痘 +痛 +痞 +痢 +痣 +痤 +痦 +痧 +痨 +痪 +痫 +痰 +痱 +痴 +痹 +痼 +痿 +瘀 +瘁 +瘃 +瘅 +瘊 +瘌 +瘐 +瘗 +瘘 +瘙 +瘛 +瘟 +瘠 +瘢 +瘤 +瘥 +瘦 +瘩 +瘪 +瘫 +瘭 +瘰 +瘳 +瘴 +瘵 +瘸 +瘼 +瘾 +瘿 +癀 +癃 +癌 +癍 +癔 +癖 +癜 +癞 +癣 +癫 +癯 +癸 +登 +白 +百 +皂 +的 +皆 +皇 +皈 +皋 +皎 +皑 +皓 +皖 +皙 +皤 +皮 +皱 +皲 +皴 +皿 +盂 +盅 +盆 +盈 +益 +盍 +盎 +盏 +盐 +监 +盒 +盔 +盖 +盗 +盘 +盛 +盟 +盥 +目 +盯 +盱 +盲 +直 +相 +盹 +盼 +盾 +省 +眄 +眇 +眈 +眉 +看 +眍 +眙 +眚 +真 +眠 +眢 +眦 +眨 +眩 +眭 +眯 +眵 +眶 +眷 +眸 +眺 +眼 +着 +睁 +睃 +睇 +睐 +睑 +睚 +睛 +睡 +睢 +督 +睥 +睦 +睨 +睫 +睬 +睹 +睽 +睾 +睿 +瞀 +瞄 +瞅 +瞌 +瞍 +瞎 +瞑 +瞒 +瞟 +瞠 +瞢 +瞥 +瞧 +瞩 +瞪 +瞬 +瞰 +瞳 +瞵 +瞻 +瞽 +瞿 +矍 +矗 +矛 +矜 +矢 +矣 +知 +矧 +矩 +矫 +矬 +短 +矮 +石 +矶 +矸 +矽 +矾 +矿 +砀 +码 +砂 +砉 +砌 +砍 +砑 +砒 +研 +砖 +砗 +砘 +砚 +砜 +砝 +砟 +砣 +砥 +砧 +砭 +砰 +破 +砷 +砸 +砹 +砺 +砻 +砼 +砾 +础 +硅 +硇 +硌 +硎 +硐 +硒 +硕 +硖 +硗 +硝 +硪 +硫 +硬 +硭 +确 +硷 +硼 +碇 +碉 +碌 +碍 +碎 +碑 +碓 +碗 +碘 +碚 +碛 +碜 +碟 +碡 +碣 +碥 +碧 +碰 +碱 +碲 +碳 +碴 +碹 +碾 +磁 +磅 +磉 +磊 +磋 +磐 +磔 +磕 +磙 +磨 +磬 +磲 +磴 +磷 +磺 +礁 +礅 +礓 +礞 +礤 +礴 +示 +礻 +礼 +社 +祀 +祁 +祆 +祈 +祉 +祓 +祖 +祗 +祚 +祛 +祜 +祝 +神 +祟 +祠 +祢 +祥 +祧 +票 +祭 +祯 +祷 +祸 +祺 +禀 +禁 +禄 +禅 +禊 +福 +禚 +禧 +禳 +禹 +禺 +离 +禽 +禾 +秀 +私 +秃 +秆 +秉 +秋 +种 +科 +秒 +秕 +秘 +租 +秣 +秤 +秦 +秧 +秩 +秫 +秭 +积 +称 +秸 +移 +秽 +稀 +稂 +稆 +程 +稍 +税 +稔 +稗 +稚 +稞 +稠 +稣 +稳 +稷 +稻 +稼 +稽 +稿 +穆 +穑 +穗 +穰 +穴 +究 +穷 +穸 +穹 +空 +穿 +窀 +突 +窃 +窄 +窈 +窍 +窑 +窒 +窕 +窖 +窗 +窘 +窜 +窝 +窟 +窠 +窥 +窦 +窨 +窬 +窭 +窳 +窿 +立 +竖 +站 +竞 +竟 +章 +竣 +童 +竦 +竭 +端 +竹 +竺 +竽 +竿 +笃 +笄 +笆 +笈 +笊 +笋 +笏 +笑 +笔 +笕 +笙 +笛 +笞 +笠 +笤 +笥 +符 +笨 +笪 +笫 +第 +笮 +笱 +笳 +笸 +笺 +笼 +笾 +筅 +筇 +等 +筋 +筌 +筏 +筐 +筑 +筒 +答 +策 +筘 +筚 +筛 +筝 +筠 +筢 +筮 +筱 +筲 +筵 +筷 +筹 +筻 +签 +简 +箅 +箍 +箐 +箔 +箕 +算 +箜 +管 +箢 +箦 +箧 +箨 +箩 +箪 +箫 +箬 +箭 +箱 +箴 +箸 +篁 +篆 +篇 +篌 +篑 +篓 +篙 +篚 +篝 +篡 +篥 +篦 +篪 +篮 +篱 +篷 +篼 +篾 +簇 +簋 +簌 +簏 +簖 +簟 +簦 +簧 +簪 +簸 +簿 +籀 +籁 +籍 +米 +籴 +类 +籼 +籽 +粉 +粑 +粒 +粕 +粗 +粘 +粜 +粝 +粞 +粟 +粤 +粥 +粪 +粮 +粱 +粲 +粳 +粹 +粼 +粽 +精 +糁 +糅 +糇 +糈 +糊 +糌 +糍 +糕 +糖 +糗 +糙 +糜 +糟 +糠 +糨 +糯 +系 +紊 +素 +索 +紧 +紫 +累 +絮 +絷 +綦 +綮 +縻 +繁 +繇 +纂 +纛 +纠 +纡 +红 +纣 +纤 +纥 +约 +级 +纨 +纩 +纪 +纫 +纬 +纭 +纯 +纰 +纱 +纲 +纳 +纵 +纶 +纷 +纸 +纹 +纺 +纽 +纾 +线 +绀 +绁 +绂 +练 +组 +绅 +细 +织 +终 +绉 +绊 +绋 +绌 +绍 +绎 +经 +绐 +绑 +绒 +结 +绔 +绕 +绗 +绘 +给 +绚 +绛 +络 +绝 +绞 +统 +绠 +绡 +绢 +绣 +绥 +绦 +继 +绨 +绩 +绪 +绫 +续 +绮 +绯 +绰 +绲 +绳 +维 +绵 +绶 +绷 +绸 +绺 +绻 +综 +绽 +绾 +绿 +缀 +缁 +缂 +缃 +缄 +缅 +缆 +缇 +缈 +缉 +缌 +缎 +缏 +缑 +缒 +缓 +缔 +缕 +编 +缗 +缘 +缙 +缚 +缛 +缜 +缝 +缟 +缠 +缡 +缢 +缣 +缤 +缥 +缦 +缧 +缨 +缩 +缪 +缫 +缬 +缭 +缮 +缯 +缰 +缱 +缲 +缳 +缴 +缵 +缶 +缸 +缺 +罂 +罄 +罅 +罐 +网 +罔 +罕 +罗 +罘 +罚 +罟 +罡 +罢 +罨 +罩 +罪 +置 +罱 +署 +罴 +罹 +罾 +羁 +羊 +羌 +美 +羔 +羚 +羝 +羞 +羟 +羡 +群 +羧 +羯 +羰 +羲 +羸 +羹 +羼 +羽 +羿 +翁 +翅 +翊 +翌 +翎 +翔 +翕 +翘 +翟 +翠 +翡 +翥 +翦 +翩 +翮 +翰 +翱 +翳 +翻 +翼 +耀 +老 +考 +耄 +者 +耆 +耋 +而 +耍 +耐 +耒 +耔 +耕 +耖 +耗 +耘 +耙 +耜 +耠 +耢 +耥 +耦 +耧 +耨 +耩 +耪 +耱 +耳 +耵 +耶 +耷 +耸 +耻 +耽 +耿 +聂 +聃 +聆 +聊 +聋 +职 +聍 +聒 +联 +聘 +聚 +聩 +聪 +聱 +聿 +肃 +肄 +肆 +肇 +肉 +肋 +肌 +肓 +肖 +肘 +肚 +肛 +肝 +肟 +肠 +股 +肢 +肤 +肥 +肩 +肪 +肫 +肭 +肮 +肯 +肱 +育 +肴 +肷 +肺 +肼 +肽 +肾 +肿 +胀 +胁 +胂 +胃 +胄 +胆 +背 +胍 +胎 +胖 +胗 +胙 +胚 +胛 +胜 +胝 +胞 +胡 +胤 +胥 +胧 +胨 +胩 +胪 +胫 +胬 +胭 +胯 +胰 +胱 +胲 +胳 +胴 +胶 +胸 +胺 +胼 +能 +脂 +脆 +脉 +脊 +脍 +脎 +脏 +脐 +脑 +脒 +脓 +脔 +脖 +脘 +脚 +脞 +脬 +脯 +脱 +脲 +脶 +脸 +脾 +腆 +腈 +腊 +腋 +腌 +腐 +腑 +腓 +腔 +腕 +腙 +腚 +腠 +腥 +腧 +腩 +腭 +腮 +腰 +腱 +腴 +腹 +腺 +腻 +腼 +腽 +腾 +腿 +膀 +膂 +膈 +膊 +膏 +膑 +膘 +膛 +膜 +膝 +膦 +膨 +膪 +膳 +膺 +膻 +臀 +臁 +臂 +臃 +臆 +臊 +臌 +臣 +臧 +自 +臬 +臭 +至 +致 +臻 +臼 +臾 +舀 +舁 +舂 +舄 +舅 +舆 +舌 +舍 +舐 +舒 +舔 +舛 +舜 +舞 +舟 +舡 +舢 +舣 +舨 +航 +舫 +般 +舰 +舱 +舳 +舴 +舵 +舶 +舷 +舸 +船 +舻 +舾 +艄 +艇 +艋 +艘 +艚 +艟 +艨 +艮 +良 +艰 +色 +艳 +艴 +艺 +艽 +艾 +艿 +节 +芄 +芈 +芊 +芋 +芍 +芎 +芏 +芑 +芒 +芗 +芘 +芙 +芜 +芝 +芟 +芡 +芥 +芦 +芨 +芩 +芪 +芫 +芬 +芭 +芮 +芯 +芰 +花 +芳 +芴 +芷 +芸 +芹 +芽 +芾 +苁 +苄 +苇 +苈 +苊 +苋 +苌 +苍 +苎 +苏 +苑 +苒 +苓 +苔 +苕 +苗 +苘 +苛 +苜 +苞 +苟 +苠 +苡 +苣 +苤 +若 +苦 +苫 +苯 +英 +苴 +苷 +苹 +苻 +茁 +茂 +范 +茄 +茅 +茆 +茈 +茉 +茌 +茎 +茏 +茑 +茔 +茕 +茗 +茚 +茛 +茜 +茧 +茨 +茫 +茬 +茭 +茯 +茱 +茳 +茴 +茵 +茶 +茸 +茹 +茼 +荀 +荃 +荆 +荇 +草 +荏 +荐 +荑 +荒 +荔 +荚 +荛 +荜 +荞 +荟 +荠 +荡 +荣 +荤 +荥 +荦 +荧 +荨 +荩 +荪 +荫 +荬 +荭 +药 +荷 +荸 +荻 +荼 +荽 +莅 +莆 +莉 +莎 +莒 +莓 +莘 +莛 +莜 +莞 +莠 +莨 +莩 +莪 +莫 +莰 +莱 +莲 +莳 +莴 +莶 +获 +莸 +莹 +莺 +莼 +莽 +菀 +菁 +菅 +菇 +菊 +菌 +菏 +菔 +菖 +菘 +菜 +菝 +菟 +菠 +菡 +菥 +菩 +菪 +菰 +菱 +菲 +菹 +菽 +萁 +萃 +萄 +萋 +萌 +萍 +萎 +萏 +萑 +萘 +萜 +萝 +萤 +营 +萦 +萧 +萨 +萱 +萸 +萼 +落 +葆 +葑 +著 +葚 +葛 +葜 +葡 +董 +葩 +葫 +葬 +葭 +葱 +葳 +葵 +葶 +葸 +葺 +蒂 +蒇 +蒈 +蒉 +蒋 +蒌 +蒎 +蒗 +蒙 +蒜 +蒡 +蒯 +蒲 +蒴 +蒸 +蒹 +蒺 +蒽 +蒿 +蓁 +蓄 +蓉 +蓊 +蓍 +蓐 +蓑 +蓓 +蓖 +蓝 +蓟 +蓠 +蓣 +蓥 +蓦 +蓬 +蓰 +蓼 +蓿 +蔌 +蔑 +蔓 +蔗 +蔚 +蔟 +蔡 +蔫 +蔬 +蔷 +蔸 +蔹 +蔺 +蔻 +蔼 +蔽 +蕃 +蕈 +蕉 +蕊 +蕖 +蕙 +蕞 +蕤 +蕨 +蕲 +蕴 +蕹 +蕺 +蕻 +蕾 +薄 +薅 +薇 +薏 +薛 +薜 +薤 +薨 +薪 +薮 +薯 +薰 +薷 +薹 +藁 +藉 +藏 +藐 +藓 +藕 +藜 +藤 +藩 +藻 +藿 +蘅 +蘑 +蘖 +蘧 +蘩 +蘸 +蘼 +虎 +虏 +虐 +虑 +虔 +虚 +虞 +虢 +虫 +虬 +虮 +虱 +虹 +虺 +虻 +虼 +虽 +虾 +虿 +蚀 +蚁 +蚂 +蚊 +蚋 +蚌 +蚍 +蚓 +蚕 +蚜 +蚝 +蚣 +蚤 +蚧 +蚨 +蚩 +蚬 +蚯 +蚰 +蚱 +蚴 +蚶 +蚺 +蛀 +蛄 +蛆 +蛇 +蛉 +蛊 +蛋 +蛎 +蛏 +蛐 +蛑 +蛔 +蛘 +蛙 +蛛 +蛞 +蛟 +蛤 +蛩 +蛭 +蛮 +蛰 +蛱 +蛲 +蛳 +蛴 +蛸 +蛹 +蛾 +蜀 +蜂 +蜃 +蜇 +蜈 +蜉 +蜊 +蜍 +蜒 +蜓 +蜕 +蜗 +蜘 +蜚 +蜜 +蜞 +蜡 +蜢 +蜣 +蜥 +蜩 +蜮 +蜱 +蜴 +蜷 +蜻 +蜾 +蜿 +蝇 +蝈 +蝉 +蝌 +蝎 +蝓 +蝗 +蝙 +蝠 +蝣 +蝤 +蝥 +蝮 +蝰 +蝴 +蝶 +蝻 +蝼 +蝽 +蝾 +螂 +螃 +螅 +螈 +螋 +融 +螗 +螟 +螨 +螫 +螬 +螭 +螯 +螳 +螵 +螺 +螽 +蟀 +蟆 +蟊 +蟋 +蟑 +蟒 +蟛 +蟠 +蟥 +蟪 +蟮 +蟹 +蟾 +蠃 +蠊 +蠓 +蠕 +蠖 +蠡 +蠢 +蠲 +蠹 +蠼 +血 +衄 +衅 +行 +衍 +衔 +街 +衙 +衡 +衢 +衣 +补 +表 +衩 +衫 +衬 +衮 +衰 +衲 +衷 +衽 +衾 +衿 +袁 +袂 +袄 +袅 +袈 +袋 +袍 +袒 +袖 +袜 +袢 +袤 +被 +袭 +袱 +袼 +裁 +裂 +装 +裆 +裉 +裎 +裒 +裔 +裕 +裘 +裙 +裟 +裢 +裣 +裤 +裥 +裨 +裰 +裱 +裳 +裴 +裸 +裹 +裼 +裾 +褂 +褊 +褐 +褒 +褓 +褙 +褚 +褛 +褡 +褥 +褪 +褫 +褰 +褴 +褶 +襁 +襄 +襞 +襟 +襦 +襻 +西 +要 +覃 +覆 +见 +观 +规 +觅 +视 +觇 +览 +觉 +觊 +觋 +觌 +觎 +觏 +觐 +觑 +角 +觖 +觚 +觜 +觞 +解 +觥 +触 +觫 +觯 +觳 +言 +訇 +訾 +詈 +詹 +誉 +誊 +誓 +謇 +警 +譬 +计 +订 +讣 +认 +讥 +讦 +讧 +讨 +让 +讪 +讫 +训 +议 +讯 +记 +讲 +讳 +讴 +讵 +讶 +讷 +许 +讹 +论 +讼 +讽 +设 +访 +诀 +证 +诂 +诃 +评 +诅 +识 +诈 +诉 +诊 +诋 +诌 +词 +诎 +诏 +译 +诒 +诓 +诔 +试 +诖 +诗 +诘 +诙 +诚 +诛 +诜 +话 +诞 +诟 +诠 +诡 +询 +诣 +诤 +该 +详 +诧 +诨 +诩 +诫 +诬 +语 +诮 +误 +诰 +诱 +诲 +诳 +说 +诵 +请 +诸 +诹 +诺 +读 +诼 +诽 +课 +诿 +谀 +谁 +谂 +调 +谄 +谅 +谆 +谇 +谈 +谊 +谋 +谌 +谍 +谎 +谏 +谐 +谑 +谒 +谓 +谔 +谕 +谖 +谗 +谙 +谚 +谛 +谜 +谝 +谟 +谠 +谡 +谢 +谣 +谤 +谥 +谦 +谧 +谨 +谩 +谪 +谫 +谬 +谭 +谮 +谯 +谰 +谱 +谲 +谳 +谴 +谵 +谶 +谷 +豁 +豆 +豇 +豉 +豌 +豕 +豚 +象 +豢 +豪 +豫 +豳 +豸 +豹 +豺 +貂 +貅 +貉 +貊 +貌 +貔 +貘 +贝 +贞 +负 +贡 +财 +责 +贤 +败 +账 +货 +质 +贩 +贪 +贫 +贬 +购 +贮 +贯 +贰 +贱 +贲 +贳 +贴 +贵 +贶 +贷 +贸 +费 +贺 +贻 +贼 +贽 +贾 +贿 +赀 +赁 +赂 +赃 +资 +赅 +赆 +赇 +赈 +赉 +赊 +赋 +赌 +赍 +赎 +赏 +赐 +赓 +赔 +赖 +赘 +赙 +赚 +赛 +赜 +赝 +赞 +赠 +赡 +赢 +赣 +赤 +赦 +赧 +赫 +赭 +走 +赳 +赴 +赵 +赶 +起 +趁 +趄 +超 +越 +趋 +趑 +趔 +趟 +趣 +趱 +足 +趴 +趵 +趸 +趺 +趼 +趾 +趿 +跃 +跄 +跆 +跋 +跌 +跎 +跏 +跑 +跖 +跗 +跚 +跛 +距 +跞 +跟 +跣 +跤 +跨 +跪 +跬 +路 +跳 +践 +跷 +跸 +跹 +跺 +跻 +跽 +踅 +踉 +踊 +踌 +踏 +踔 +踝 +踞 +踟 +踢 +踣 +踩 +踪 +踬 +踮 +踯 +踱 +踵 +踹 +踺 +踽 +蹀 +蹁 +蹂 +蹄 +蹇 +蹈 +蹉 +蹊 +蹋 +蹑 +蹒 +蹙 +蹦 +蹩 +蹬 +蹭 +蹯 +蹰 +蹲 +蹴 +蹶 +蹼 +蹿 +躁 +躅 +躇 +躏 +躐 +躔 +躜 +躞 +身 +躬 +躯 +躲 +躺 +车 +轧 +轨 +轩 +轫 +转 +轭 +轮 +软 +轰 +轱 +轲 +轳 +轴 +轵 +轶 +轷 +轸 +轺 +轻 +轼 +载 +轾 +轿 +辁 +辂 +较 +辄 +辅 +辆 +辇 +辈 +辉 +辊 +辋 +辍 +辎 +辏 +辐 +辑 +输 +辔 +辕 +辖 +辗 +辘 +辙 +辚 +辛 +辜 +辞 +辟 +辣 +辨 +辩 +辫 +辰 +辱 +边 +辽 +达 +迁 +迂 +迄 +迅 +过 +迈 +迎 +运 +近 +迓 +返 +迕 +还 +这 +进 +远 +违 +连 +迟 +迢 +迤 +迥 +迦 +迨 +迩 +迪 +迫 +迭 +迮 +述 +迷 +迸 +迹 +追 +退 +送 +适 +逃 +逄 +逅 +逆 +选 +逊 +逋 +逍 +透 +逐 +逑 +递 +途 +逖 +逗 +通 +逛 +逝 +逞 +速 +造 +逡 +逢 +逦 +逭 +逮 +逯 +逵 +逶 +逸 +逻 +逼 +逾 +遁 +遂 +遄 +遇 +遍 +遏 +遐 +遑 +遒 +道 +遗 +遘 +遛 +遢 +遣 +遥 +遨 +遭 +遮 +遴 +遵 +遽 +避 +邀 +邂 +邃 +邈 +邋 +邑 +邓 +邕 +邗 +邙 +邛 +邝 +邡 +邢 +那 +邦 +邪 +邬 +邮 +邯 +邰 +邱 +邳 +邴 +邵 +邶 +邸 +邹 +邺 +邻 +邾 +郁 +郄 +郅 +郇 +郊 +郎 +郏 +郐 +郑 +郓 +郗 +郛 +郜 +郝 +郡 +郢 +郦 +郧 +部 +郫 +郭 +郯 +郴 +郸 +都 +郾 +鄂 +鄄 +鄙 +鄞 +鄢 +鄣 +鄯 +鄱 +鄹 +酃 +酆 +酉 +酊 +酋 +酌 +配 +酎 +酏 +酐 +酒 +酗 +酚 +酝 +酞 +酡 +酢 +酣 +酤 +酥 +酩 +酪 +酬 +酮 +酯 +酰 +酱 +酲 +酴 +酵 +酶 +酷 +酸 +酹 +酽 +酾 +酿 +醅 +醇 +醉 +醋 +醌 +醍 +醐 +醑 +醒 +醚 +醛 +醢 +醪 +醭 +醮 +醯 +醴 +醵 +醺 +采 +釉 +释 +里 +重 +野 +量 +金 +釜 +鉴 +銎 +銮 +鋈 +錾 +鍪 +鎏 +鏊 +鏖 +鐾 +鑫 +钆 +钇 +针 +钉 +钊 +钋 +钌 +钍 +钎 +钏 +钐 +钒 +钓 +钔 +钕 +钗 +钙 +钚 +钛 +钜 +钝 +钞 +钟 +钠 +钡 +钢 +钣 +钤 +钥 +钦 +钧 +钨 +钩 +钪 +钫 +钬 +钭 +钮 +钯 +钰 +钱 +钲 +钳 +钴 +钵 +钷 +钹 +钺 +钻 +钼 +钽 +钾 +钿 +铀 +铁 +铂 +铃 +铄 +铅 +铆 +铈 +铉 +铊 +铋 +铌 +铍 +铎 +铐 +铑 +铒 +铕 +铗 +铘 +铙 +铛 +铜 +铝 +铞 +铟 +铠 +铡 +铢 +铣 +铤 +铥 +铧 +铨 +铩 +铪 +铫 +铬 +铭 +铮 +铯 +铰 +铱 +铲 +铳 +铴 +铵 +银 +铷 +铸 +铹 +铺 +铼 +铽 +链 +铿 +销 +锁 +锂 +锃 +锄 +锅 +锆 +锇 +锈 +锉 +锊 +锋 +锌 +锎 +锏 +锐 +锑 +锒 +锓 +锔 +锕 +锖 +锗 +锘 +错 +锚 +锛 +锝 +锞 +锟 +锡 +锢 +锣 +锤 +锥 +锦 +锨 +锩 +锪 +锫 +锬 +锭 +键 +锯 +锰 +锱 +锲 +锴 +锵 +锶 +锷 +锸 +锹 +锺 +锻 +锾 +锿 +镀 +镁 +镂 +镄 +镅 +镆 +镇 +镉 +镊 +镌 +镍 +镎 +镏 +镐 +镑 +镒 +镓 +镔 +镖 +镗 +镘 +镛 +镜 +镝 +镞 +镡 +镢 +镣 +镤 +镥 +镦 +镧 +镨 +镩 +镪 +镫 +镬 +镭 +镯 +镰 +镱 +镲 +镳 +镶 +长 +门 +闩 +闪 +闫 +闭 +问 +闯 +闰 +闱 +闲 +闳 +间 +闵 +闶 +闷 +闸 +闹 +闺 +闻 +闼 +闽 +闾 +阀 +阁 +阂 +阃 +阄 +阅 +阆 +阈 +阉 +阊 +阋 +阌 +阍 +阎 +阏 +阐 +阑 +阒 +阔 +阕 +阖 +阗 +阙 +阚 +阜 +队 +阡 +阢 +阪 +阮 +阱 +防 +阳 +阴 +阵 +阶 +阻 +阼 +阽 +阿 +陀 +陂 +附 +际 +陆 +陇 +陈 +陉 +陋 +陌 +降 +限 +陔 +陕 +陛 +陟 +陡 +院 +除 +陧 +陨 +险 +陪 +陬 +陲 +陴 +陵 +陶 +陷 +隅 +隆 +隈 +隋 +隍 +随 +隐 +隔 +隗 +隘 +隙 +障 +隧 +隰 +隳 +隶 +隼 +隽 +难 +雀 +雁 +雄 +雅 +集 +雇 +雉 +雌 +雍 +雎 +雏 +雒 +雕 +雠 +雨 +雩 +雪 +雯 +雳 +零 +雷 +雹 +雾 +需 +霁 +霄 +霆 +震 +霈 +霉 +霍 +霎 +霏 +霓 +霖 +霜 +霞 +霪 +霭 +霰 +露 +霸 +霹 +霾 +青 +靓 +靖 +静 +靛 +非 +靠 +靡 +面 +靥 +革 +靳 +靴 +靶 +靼 +鞅 +鞋 +鞍 +鞑 +鞒 +鞘 +鞠 +鞣 +鞫 +鞭 +鞯 +鞲 +鞴 +韦 +韧 +韩 +韪 +韫 +韬 +韭 +音 +韵 +韶 +页 +顶 +顷 +顸 +项 +顺 +须 +顼 +顽 +顾 +顿 +颀 +颁 +颂 +颃 +预 +颅 +领 +颇 +颈 +颉 +颊 +颌 +颍 +颏 +颐 +频 +颓 +颔 +颖 +颗 +题 +颚 +颛 +颜 +额 +颞 +颟 +颠 +颡 +颢 +颤 +颥 +颦 +颧 +风 +飑 +飒 +飓 +飕 +飘 +飙 +飚 +飞 +食 +飧 +飨 +餍 +餐 +餮 +饔 +饕 +饥 +饧 +饨 +饩 +饪 +饫 +饬 +饭 +饮 +饯 +饰 +饱 +饲 +饴 +饵 +饶 +饷 +饺 +饼 +饽 +饿 +馀 +馁 +馄 +馅 +馆 +馇 +馈 +馊 +馋 +馍 +馏 +馐 +馑 +馒 +馓 +馔 +馕 +首 +馗 +馘 +香 +馥 +馨 +马 +驭 +驮 +驯 +驰 +驱 +驳 +驴 +驵 +驶 +驷 +驸 +驹 +驺 +驻 +驼 +驽 +驾 +驿 +骀 +骁 +骂 +骄 +骅 +骆 +骇 +骈 +骊 +骋 +验 +骏 +骐 +骑 +骒 +骓 +骖 +骗 +骘 +骚 +骛 +骜 +骝 +骞 +骟 +骠 +骡 +骢 +骣 +骤 +骥 +骧 +骨 +骰 +骶 +骷 +骸 +骺 +骼 +髀 +髁 +髂 +髅 +髋 +髌 +髑 +髓 +高 +髡 +髦 +髫 +髭 +髯 +髹 +髻 +鬃 +鬈 +鬏 +鬓 +鬟 +鬣 +鬯 +鬲 +鬻 +鬼 +魁 +魂 +魃 +魄 +魅 +魇 +魈 +魉 +魍 +魏 +魑 +魔 +鱼 +鱿 +鲁 +鲂 +鲅 +鲆 +鲇 +鲈 +鲋 +鲍 +鲎 +鲐 +鲑 +鲔 +鲚 +鲛 +鲜 +鲞 +鲟 +鲠 +鲡 +鲢 +鲣 +鲤 +鲥 +鲦 +鲧 +鲨 +鲩 +鲫 +鲭 +鲮 +鲰 +鲱 +鲲 +鲳 +鲴 +鲵 +鲷 +鲸 +鲺 +鲻 +鲼 +鲽 +鳃 +鳄 +鳅 +鳆 +鳇 +鳊 +鳌 +鳍 +鳎 +鳏 +鳐 +鳓 +鳔 +鳕 +鳖 +鳗 +鳘 +鳙 +鳜 +鳝 +鳞 +鳟 +鳢 +鸟 +鸠 +鸡 +鸢 +鸣 +鸥 +鸦 +鸨 +鸩 +鸪 +鸫 +鸬 +鸭 +鸯 +鸱 +鸲 +鸳 +鸵 +鸶 +鸷 +鸸 +鸹 +鸺 +鸽 +鸾 +鸿 +鹁 +鹂 +鹃 +鹄 +鹅 +鹆 +鹇 +鹈 +鹉 +鹊 +鹋 +鹌 +鹎 +鹏 +鹑 +鹕 +鹗 +鹘 +鹚 +鹛 +鹜 +鹞 +鹣 +鹤 +鹦 +鹧 +鹨 +鹩 +鹪 +鹫 +鹬 +鹭 +鹰 +鹱 +鹳 +鹿 +麂 +麇 +麈 +麋 +麒 +麓 +麝 +麟 +麦 +麸 +麻 +麽 +麾 +黄 +黉 +黍 +黎 +黏 +黑 +黔 +默 +黛 +黜 +黝 +黟 +黠 +黢 +黥 +黧 +黩 +黯 +黹 +黻 +黼 +黾 +鼋 +鼍 +鼎 +鼐 +鼓 +鼗 +鼙 +鼠 +鼢 +鼬 +鼯 +鼷 +鼹 +鼻 +鼾 +齐 +齑 +齿 +龀 +龃 +龄 +龅 +龆 +龇 +龈 +龉 +龊 +龋 +龌 +龙 +龚 +龛 +龟 +龠 +埌 +係 + \ No newline at end of file diff --git a/contrib/Overlap-CRNN/crnn_infer.py b/contrib/Overlap-CRNN/crnn_infer.py new file mode 100644 index 0000000000000000000000000000000000000000..6867f53bbdd56d7ca5e4991d24697c838fc82efd --- /dev/null +++ b/contrib/Overlap-CRNN/crnn_infer.py @@ -0,0 +1,171 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import mindx.sdk as sdk +from mindx.sdk.base import Tensor + +import numpy as np +import cv2 +from PIL import Image + +MODEL_PATH = "./models/om_model/crnn.om" +LABEL_DICT_PATH = "./ch_sim_en_digit_symble.txt" +IMAGE_PATH = "./dataset" +DEVICE_ID = 0 + +BLANK = 6702 + + +def infer(): + crnn_model = sdk.model(MODEL_PATH, DEVICE_ID) + imgs, labels = json_data_loader() + results = [] + for i, img in enumerate(imgs): + img_path = img + label = labels[i] + image_tensor_list = load_img_data(img_path) + output = crnn_model.infer(image_tensor_list) + output[0].to_host() + output[0] = np.array(output[0]) + result = ctc_post_process(y_pred=output[0], blank=BLANK) + result = result[0] + results.append(result) + get_acc(results, labels) + + +def load_img_data(image_name): + image_name = os.path.join(IMAGE_PATH, image_name) + im = Image.open(image_name) + + # rgb->bgr + im = im.convert("RGB") + r, g, b = im.split() + im = Image.merge("RGB", (b, g, r)) + + # resize + im = im.resize((112, 32)) # (w,h) + im = np.array(im) + + # normalize + mean = np.array([127.5, 127.5, 127.5], dtype=np.float32) + std = np.array([127.5, 127.5, 127.5], dtype=np.float32) + img = im.copy().astype(np.float32) + mean = np.float64(mean.reshape(1, -1)) + stdinv = 1 / np.float64(std.reshape(1, -1)) + cv2.subtract(img, mean, img) # inplace + cv2.multiply(img, stdinv, img) # inplace + + # HWC-> CHW + img = img.transpose(2, 0, 1) + resize_img = img + + # add batch dim + resize_img = np.expand_dims(resize_img, 0) + + resize_img = np.ascontiguousarray(resize_img) + image_tensor = Tensor(resize_img) # 推理前需要转换为tensor的List,使用Tensor类来构建。 + image_tensor.to_device(DEVICE_ID) # !!!!!重要,需要转移至device侧,该函数单独执行 + image_tensor_list = [image_tensor] # 推理前需要转换为tensor的List + return image_tensor_list + + +def arr2char(inputs): + string = "" + for num in inputs: + if num < BLANK: + string += LABEL_DICT[num] + return string + + +def ctc_post_process(y_pred, blank): + indices = [] + seq_len, batch_size, _ = y_pred.shape + indices = y_pred.argmax(axis=2) + lens = [seq_len] * batch_size + pred_labels = [] + for i in range(batch_size): + idx = indices[:, i] + last_idx = blank + pred_label = [] + for j in range(lens[i]): + cur_idx = idx[j] + if cur_idx not in [last_idx, blank]: + pred_label.append(cur_idx) + last_idx = cur_idx + pred_labels.append(pred_label) + str_results = [] + for pred in pred_labels: + pred = arr2char(pred) + str_results.append(pred) + return str_results + + +def json_data_loader(): + annotation_path = os.path.join(IMAGE_PATH, 'annotation.json') + imgs = [] + texts = [] + with open(annotation_path, 'r') as r_annotation: + datas = json.loads(r_annotation.read()) + print(len(datas)) + + for _, data in enumerate(datas): + for _, text_data in enumerate(data['texts']): + imgs.append(text_data['mask']) + texts.append(text_data['label']) + return imgs, texts + + +def data_loader(): + annotation_path = os.path.join(IMAGE_PATH, 'mask_annotation.txt') + imgs = [] + texts = [] + r_annotation = open(annotation_path, 'r') + datas = r_annotation.read().splitlines() + + for data in datas: + img, text = data.split('\t') + imgs.append(img) + texts.append(text) + return imgs, texts + + +def get_acc(pred_labels, gt_labels): + true_num = 0 + total_num = len(pred_labels) + for num in range(total_num): + if (pred_labels[num].lower() == gt_labels[num].lower()): + true_num += 1 + else: + print("pred_label:{}, gt_label:{}".format(pred_labels[num].lower(), + gt_labels[num].lower())) + print("==============================") + print("精度测试结果如下:") + print("total number:", total_num) + print("true number:", true_num) + print("accuracy_rate %.2f" % (true_num / total_num * 100) + '%') + print("==============================") + + +try: + LABEL_DICT = "" + f = open(LABEL_DICT_PATH, 'r') + LABEL_DICT = f.read().splitlines() + print('label len:', len(LABEL_DICT)) + infer() + +except Exception as e: + print(e) diff --git a/contrib/Overlap-CRNN/crnn_single_infer.py b/contrib/Overlap-CRNN/crnn_single_infer.py new file mode 100644 index 0000000000000000000000000000000000000000..aed2646550233a600038d0ed91e15b5f085f74c5 --- /dev/null +++ b/contrib/Overlap-CRNN/crnn_single_infer.py @@ -0,0 +1,155 @@ +# Copyright 2022 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import mindx.sdk as sdk +from mindx.sdk.base import Tensor + +import numpy as np +import cv2 +from PIL import Image, ImageDraw, ImageFont + +MODEL_PATH = "./models/om_model/crnn.om" +LABEL_DICT_PATH = "./ch_sim_en_digit_symble.txt" +IMAGE_PATH = "./test.jpg" +SAVE_PATH = "./show.jpg" +font = ImageFont.truetype(font='./Ubuntu-Regular.ttf', size=20) +DEVICE_ID = 0 +BLANK = 6702 + + +def infer(): + if not os.path.exists(MODEL_PATH): + print("The input model path is empty!!!") + print("plz place the model in ./Overlap-CRNN/models/om_model/") + exit() + crnn_model = sdk.model(MODEL_PATH, DEVICE_ID) + if not os.path.exists(IMAGE_PATH): + print("The input image path is empty!!!") + print("plz place the image in ./Overlap-CRNN/") + exit() + img = load_img_data(IMAGE_PATH) + output = crnn_model.infer(img) + output[0].to_host() + output[0] = np.array(output[0]) + result = ctc_post_process(y_pred=output[0], blank=BLANK) + result = result[0] + img_show(IMAGE_PATH, result) + print("predict text: ", result) + + +def load_img_data(image_name): + if cv2.imread(image_name) is None: + print("=============!Error!================") + print("The input image is empty, plz check out!") + print("====================================") + exit() + else: + im = Image.open(image_name) + # rgb->bgr + im = im.convert("RGB") + r, g, b = im.split() + im = Image.merge("RGB", (b, g, r)) + + # resize + im = im.resize((112, 32)) # (w,h) + im = np.array(im) + + # normalize + mean = np.array([127.5, 127.5, 127.5], dtype=np.float32) + std = np.array([127.5, 127.5, 127.5], dtype=np.float32) + img = im.copy().astype(np.float32) + mean = np.float64(mean.reshape(1, -1)) + stdinv = 1 / np.float64(std.reshape(1, -1)) + cv2.subtract(img, mean, img) # inplace + cv2.multiply(img, stdinv, img) # inplace + + # HWC-> CHW + img = img.transpose(2, 0, 1) + resize_img = img + + # add batch dim + resize_img = np.expand_dims(resize_img, 0) + + resize_img = np.ascontiguousarray(resize_img) + image_tensor = Tensor(resize_img) # 推理前需要转换为tensor的List,使用Tensor类来构建。 + image_tensor.to_device(DEVICE_ID) # !!!!!重要,需要转移至device侧,该函数单独执行 + image_tensor_list = [image_tensor] # 推理前需要转换为tensor的List + return image_tensor_list + + +def resize(img, height, width): + img = sdk.dvpp.resize(img, height, width) + img = img.get_tensor() + return img + + +def arr2char(inputs): + string = "" + for num in inputs: + if num < BLANK: + string += LABEL_DICT[num] + return string + + +def ctc_post_process(y_pred, blank): + indices = [] + seq_len, batch_size, _ = y_pred.shape + indices = y_pred.argmax(axis=2) + lens = [seq_len] * batch_size + pred_labels = [] + for i in range(batch_size): + idx = indices[:, i] + last_idx = blank + pred_label = [] + for j in range(lens[i]): + cur_idx = idx[j] + if cur_idx not in [last_idx, blank]: + pred_label.append(cur_idx) + last_idx = cur_idx + pred_labels.append(pred_label) + str_results = [] + for pred in pred_labels: + pred = arr2char(pred) + str_results.append(pred) + return str_results + + +def img_show(img, pred): + img = Image.open(img) + canvas = Image.new('RGB', (img.size[0], int(img.size[1] * 1.5)), + (255, 255, 255)) + draw = ImageDraw.Draw(canvas) + label_size = draw.textsize(pred, font) + text_origin = np.array( + [int((img.size[0] - label_size[0]) / 2), img.size[1] + 1]) + draw.text(text_origin, pred, fill='red', font=font) + canvas.paste(img, (0, 0)) + canvas.save(SAVE_PATH) + canvas.show() + + +try: + LABEL_DICT = "" + if not os.path.exists(LABEL_DICT_PATH): + print("The input dictionary path is empty!!!") + print("plz place the model in ./Overlap-CRNN/") + exit() + r_dict = open(LABEL_DICT_PATH, 'r') + LABEL_DICT = r_dict.read().splitlines() # 将字典读入一个str中 + infer() + +except Exception as e: + print(e) diff --git "a/contrib/Overlap-CRNN/\346\265\201\347\250\213\345\233\276.png" "b/contrib/Overlap-CRNN/\346\265\201\347\250\213\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..b624fb4a3f6f51fd6ac2fd9633811d5e25eca5cd Binary files /dev/null and "b/contrib/Overlap-CRNN/\346\265\201\347\250\213\345\233\276.png" differ diff --git "a/contrib/Overlap-CRNN/\346\265\213\350\257\225\347\273\223\346\236\234.png" "b/contrib/Overlap-CRNN/\346\265\213\350\257\225\347\273\223\346\236\234.png" new file mode 100644 index 0000000000000000000000000000000000000000..4b3f205ecf47025cee11ebf5564fdf04978a2dbf Binary files /dev/null and "b/contrib/Overlap-CRNN/\346\265\213\350\257\225\347\273\223\346\236\234.png" differ diff --git a/contrib/VCOD_SLTNet/README.md b/contrib/VCOD_SLTNet/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9bf7f6114f31e937e072678189662d3cabed8ff9 --- /dev/null +++ b/contrib/VCOD_SLTNet/README.md @@ -0,0 +1,276 @@ +# 视频伪装物体检测 + +## 1 介绍 + +基于 MindX SDK 实现 SLT-Net 模型的推理,在 MoCA-Mask 数据集上 Sm 达到大于 0.6。输入连续几帧伪装物体的视频序列,输出伪装物体掩膜 Mask 图。 + + +### 1.1 支持的产品 + +支持昇腾310芯片 + + +### 1.2 支持的版本 + +支持的SDK版本:SDK3.0 RC2 + +版本号查询方法,在Atlas产品环境下,运行命令: + +``` +npu-smi info +``` + + +### 1.3 软件方案介绍 + + +本方案中,先通过 `torch2onnx.py` 脚本将 PyTorch 版本的伪装视频物体检测模型 SLT-Net 转换为 onnx 模型;然后通过 `inference.py` 脚本调用晟腾om模型,将输入视频帧进行图像处理,最终生成视频伪装物体的掩膜 Mask 图。 + + +### 1.4 代码目录结构与说明 + +本sample工程名称为 VCOD_SLTNet,工程目录如下图所示: + +``` +──VCOD_SLTNet + ├── flowchart.jpeg + ├── inference.py # 推理文件 + ├── torch2onnx.py # 模型转换脚本 + └── README.md +``` + + +### 1.5 技术实现流程图 + +![Flowchart](./flowchart.jpeg) + +图1 视频伪装物体检测流程图 + + +### 1.6 特性及适用场景 + +对于伪装视频数据的分割任务均适用,输入视频需要转换为图片序列输入到模型中,具体可以参考 MoCA 数据格式与目录结构(如下所示),详见 [SLT-Net](https://xueliancheng.github.io/SLT-Net-project/) 与 [MoCA 数据集主页](https://www.robots.ox.ac.uk/~vgg/data/MoCA/)。 + + +``` +--data + └── TestDataset_per_sq # 测试数据集 + ├── flower_crab_spider_1 # 不同场景 + ├── GT # Ground Truth + ├── 00000.png + ├── ..... + └── Imgs # 输入图片序列 + ├── 00000.jpg + ├── ..... + ...... + +``` + + +## 2 环境依赖 + +环境依赖软件和版本如下表: + +| 软件名称 | 版本 | +| -------- | ------ | +| MindX SDK | mxVision-3.0.RC2 | +| Python | 3.9.2 | +| CANN | 5.1RC2 | +| PyTorch | 1.12.1 | +| numpy | 1.21.5 | +| imageio | 2.22.3| +| Pillow | 9.3.0 | +| cv2 | 4.5.5 | +| timm | 0.4.12 | +| tqdm | 4.64.1 | + + +## 3. 数据准备 + +### 3.1 准备相关文件 + +1、SLT-Net代码包准备 + +点击访问 [SLT-Net](https://github.com/XuelianCheng/SLT-Net) 并下载 SLT-Net-master.zip 代码压缩包,上传服务器并解压得到“SLT-Net-master”目录及文件; + +2、SLT-Net模型文件准备 + +方法一:通过访问 [SLT-Net 模型官方链接](https://drive.google.com/file/d/1_u4dEdxM4AKuuh6EcWHAlo8EtR7e8q5v/view) 下载模型压缩包 (注意,需要访问 Google Drive ),解压后将 `Net_epoch_MoCA_short_term_pseudo.pth` 模型拷贝至 `SLT-Net-master` 目录下; + +方法二:下载 [models.zip 备份模型压缩包](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/sltnet/models.zip) 并解压获得 `sltnet.pth`、`sltnet.onnx`、`sltnet.om` 三个模型文件,将 `sltnet.pth` 模型拷贝至 `SLT-Net-master` 目录下 + + +3、数据集准备 + +通过访问[MoCA官方链接](https://xueliancheng.github.io/SLT-Net-project/)下载 `MoCA_Video` 数据集,或者通过[数据集备份链接](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/sltnet/MoCA_Video.zip)来下载 `MoCA_Video.zip` 数据集压缩包并解压; + + +### 3.2 模型转换 + +1、SLT-Net代码预处理 + +进入 `SLT-Net-master/lib` 目录下,对 `__init__.py`、`short_term_model.py`、`pvtv2_afterTEM.py`三个文件做以下修改: + +1)`__init__.py`文件注释如下: + +``` +from .short_term_model import VideoModel as VideoModel_pvtv2 +# from .long_term_model import VideoModel as VideoModel_long_term +``` + +注:因为长期模型依赖 CUDA,并且需要在 CUDA 平台进行编译,而本项目基于 MindX SDK 实现,因此使用短期模型。并且,短期模型的评价指标满足预期。 + + +2)修改 `short_term_model.py` 文件中,如下代码行: + +修改 + +``` +def forward(self, x): + image1, image2, image3 = x[:, :3], x[:, 3:6], x[:, 6:] # 替换之前的 image1, image2, image3 = x[0],x[1],x[2] + fmap1=self.backbone.feat_net(image1) + fmap2=self.backbone.feat_net(image2) + fmap3=self.backbone.feat_net(image3) +``` + +修改 + +``` + def __init__(self, args): + super(ImageModel, self).__init__() + self.args = args + # self.backbone = Network(pvtv2_pretrained=self.args.pvtv2_pretrained, imgsize=self.args.trainsize) + self.backbone = Network(pvtv2_pretrained=self.args.pvtv2_pretrained, imgsize=352) # 指定图片大小 + + .... + + # self.backbone = Network(pvtv2_pretrained=False, imgsize=self.args.trainsize) + self.backbone = Network(pvtv2_pretrained=False, imgsize=352) # 指定图片大小 + if self.args.pretrained_cod10k is not None: + self.load_backbone(self.args.pretrained_cod10k ) +``` + + +删除 + +``` +if self.args.pretrained_cod10k is not None: + self.load_backbone(self.args.pretrained_cod10k ) +``` + + +3)`pvtv2_afterTEM.py` 文件注释如下: + +``` +from timm.models import create_model +#from mmseg.models import build_segmentor +#from mmcv import ConfigDict +import pdb +``` + + +修改“SLT-Net-master/mypath.py”文件如下: + +``` +elif dataset == 'MoCA': + return './dataset/MoCA-Mask/' # 将此处路径修改指定为“MoCA_Video”目录的相对路径 +``` + + +可参考已经完成修改的 [SLT_Net_MindXsdk_torch](https://github.com/shuowang-ai/SLT_Net_MindXsdk_torch),也可直接使用该项目进行下面的 onnx 模型转换操作,替代以上步骤。 + + +2、模型转换 + +步骤一、pth模型转onnx模型 + +将 `VCOD_SLTNet` 代码包中的 `torch2onnx.py` 脚本拷贝至 `SLT-Net-master` 目录下,并在 `SLT-Net-master` 目录下执行以下命令将 pth 模型转换成 onnx 模型: + +``` +python torch2onnx.py --pth_path ${pth模型文件路径} --onnx_path ./sltnet.onnx +``` + +参数说明: + +pth_path:pth模型文件名称及所在路径 + +onnx_path:生成输出的onnx模型文件 + + +注意,timm 的版本为 `0.4.12`,其他版本可能有兼容性问题。 + + +步骤二、简化onnx文件(可选操作) + +``` +python -m onnxsim --input-shape="1,9,352,352" --dynamic-input-shape sltnet.onnx sltnet_sim.onnx +``` + +步骤三、onnx模型转om模型 + +``` +atc --framework=5 --model=sltnet.onnx --output=sltnet --input_shape="image:1,9,352,352" --soc_version=Ascend310 --log=error +``` + +注意: + +1. 若想使用转换好的onnx模型或om模型,可通过下载 [models.zip备份模型压缩包](https://mindx.sdk.obs.cn-north-4.myhuaweicloud.com/mindxsdk-referenceapps%20/contrib/sltnet/models.zip) 解压获得转换好的 onnx 模型或 om 模型。 + +2. pth模型转onnx模型,onnx模型转om模型,均可能花费约1小时左右,视不同运行环境而定。如无报错,请耐心等待。 + + +## 4. 运行推理 + + +使用如下命令,运行 `inference.py` 脚本: + +``` +python inference.py --datapath ${MoCA_Video数据集路径} --save_root ./results/ --om_path ./sltnet.om --testsize 352 --device_id 0 +``` + +参数说明: + +datapath:下载数据以后,目录中 `TestDataset_per_sq` 的上一级目录, + +save_root:结果保存路径 + +om_path:om 模型路径 + +testsize:图片 resize 的大小,当前固定为 352 + +device_id:设备编号 + + +注意,该脚本无需放入修改的 SLT-Net 目录,在任意位置均可执行,只需设置好上述参数即可。 + + +运行输出如下: + +``` + 0%| | 0/713 [00:00 ./results/arctic_fox/Pred/00000.png + 0%|▏ | 1/713 [00:00<10:31, 1.13it/s]> ./results/arctic_fox/Pred/00005.png + 0%|▎ | 2/713 [00:01<09:01, 1.31it/s]> ./results/arctic_fox/Pred/00010.png + 0%|▍ | 3/713 [00:02<08:30, 1.39it/s]> ./results/arctic_fox/Pred/00015.png + 1%|▌ | 4/713 [00:02<08:13, 1.44it/s]> ./results/arctic_fox/Pred/00020.png +``` + +将展示剩余运行时间以及生成图片的路径。 + + +## 5. 精度评估 + +点击访问 [SLT_Net_MindXsdk_torch](https://github.com/shuowang-ai/SLT_Net_MindXsdk_torch) 并下载 `SLT_Net_MindXsdk_torch-master.zip` 代码压缩包,上传服务器并解压获得 `SLT_Net_MindXsdk_torch-master` 目录及相关文件; + +进入 `SLT_Net_MindXsdk_torch-master` 目录,修改 `eval_python/run_eval.py` 脚本中的 `gt_dir` 为本地的 `MoCA_Video/TestDataset_per_sq/` 目录的绝对路径,`pred_dir` 为预测结果目录的绝对路径,并执行以下命令进行精度评估: + +``` +python eval_python/run_eval.py +``` + +完成评估后的结果如下: + +{'Smeasure': 0.6539, 'wFmeasure': 0.3245, 'MAE': 0.0161, 'adpEm': 0.6329, 'meanEm': 0.7229, 'maxEm': 0.7554, 'adpFm': 0.3025, 'meanFm': 0.3577, 'maxFm': 0.3738} + +评测结果高于交付所要求的 Smeasure 0.6 的指标。 + +注:评估还可参考基于 基于 [MATLAB](https://github.com/XuelianCheng/SLT-Net/tree/master/eval) 的 SLT-Net 的评测代码或参考基于 Python 的 [PySODEvalToolkit](https://github.com/lartpang/PySODEvalToolkit) 的评测代码。 diff --git a/contrib/VCOD_SLTNet/flowchart.jpeg b/contrib/VCOD_SLTNet/flowchart.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..c1ce730a6d3765594a6a09e9ae00338c4252e4e2 Binary files /dev/null and b/contrib/VCOD_SLTNet/flowchart.jpeg differ diff --git a/contrib/VCOD_SLTNet/inference.py b/contrib/VCOD_SLTNet/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..b7be46b8d449c0226a3234f883877d4fe7ea924a --- /dev/null +++ b/contrib/VCOD_SLTNet/inference.py @@ -0,0 +1,145 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import argparse +from glob import glob +from tqdm import tqdm +import numpy as np +import imageio +import cv2 +from PIL import Image +import imageio +import mindspore as ms +import mindspore.nn as nn +import mindspore.ops as ops +from mindx.sdk.base import Tensor, Model + + +class TestDataset: + def __init__(self, datapath, testsize): + self.testsize = testsize + self.image_list = [] + self.gt_list = [] + self.extra_info = [] + + img_format = '*.jpg' + data_root = os.path.join(datapath, 'TestDataset_per_sq') + + self.mean = [0.485, 0.456, 0.406] + self.std = [0.229, 0.224, 0.225] + + for scene_i in os.listdir(os.path.join(data_root)): + images_i = sorted(glob(os.path.join(data_root, scene_i, 'Imgs', img_format))) + gt_list = sorted(glob(os.path.join(data_root, scene_i, 'GT', '*.png'))) + + for ii in range(len(images_i)-2): + self.extra_info += [ (scene_i, ii) ] + self.gt_list += [ gt_list[ii] ] + self.image_list += [ [images_i[ii], + images_i[ii+1], + images_i[ii+2]] ] + + self.index = 0 + self.size = len(self.gt_list) + + def __len__(self): + return self.size + + @staticmethod + def rgb_loader(path): + image_bgr = cv2.imread(path) + imge_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB) + return imge_rgb + + @staticmethod + def binary_loader(path): + with open(path, 'rb') as f: + img = Image.open(f) + return img.convert('L') + + def load_data(self): + imgs = [] + names = [] + + for idx in range(len(self.image_list[self.index])): + imgs += [self.rgb_loader(self.image_list[self.index][idx])] + names += [self.image_list[self.index][idx].split('/')[-1]] + + imgs[idx] = cv2.resize(imgs[idx], (self.testsize, self.testsize)) + imgs[idx] = np.array([imgs[idx]]) + imgs[idx] = imgs[idx].transpose(0, 3, 1, 2).astype(np.float32) / 255.0 + imgs[idx] = (imgs[idx] - np.asarray(self.mean)[None, :, None, None]) / \ + np.asarray(self.std)[None, :, None, None] + + scenes = self.image_list[self.index][0].split('/')[-3] + gt_i = self.binary_loader(self.gt_list[self.index]) + + self.index += 1 + self.index = self.index % self.size + + return {'imgs': imgs, 'gt': gt_i, 'names': names, 'scenes': scenes} + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--datapath', type=str, default='./data/') + parser.add_argument('--save_root', type=str, default='./results/') + parser.add_argument('--om_path', type=str, default='sltnet.om') + parser.add_argument('--testsize', type=int, default=352) + parser.add_argument('--device_id', type=int, default=0) + opt = parser.parse_args() + + if not opt.om_path.endswith('om'): + print("Please check the correctness of om file:", opt.om_path) + sys.exit() + + if 'TestDataset_per_sq' not in os.listdir(opt.datapath): + print("Please check the correctness of dataset path:", opt.datapath) + sys.exit() + + test_loader = TestDataset(datapath=opt.datapath, testsize=opt.testsize) + + model = Model(opt.om_path, opt.device_id) + + for i in tqdm(range(test_loader.size)): + dataset = test_loader.load_data() + images, gt, name, scene = dataset.get('imgs'), dataset.get('gt'), \ + dataset.get('names'), dataset.get('scenes') + gt = np.asarray(gt, np.float32) + save_path = opt.save_root + scene + '/Pred/' + if not os.path.exists(save_path): + os.makedirs(save_path) + + model_in = np.concatenate(images, axis=1) + model_in = np.ascontiguousarray(model_in, dtype=np.float32) + model_in = Tensor(model_in) + model_in.to_device(opt.device_id) + out = model.infer(model_in) + out = out[0] + out.to_host() + res = np.array(out) + + res = ms.Tensor(res) + res = ops.Sigmoid()(res) + res = nn.ResizeBilinear()(res, (gt.shape[0], gt.shape[1])) + res = (res - res.min()) / (res.max() - res.min() + 1e-8) * 255 + res = res.astype('uint8') + res = res.asnumpy().squeeze() + + name = name[0].replace('jpg', 'png') + fp = save_path + name + imageio.imwrite(fp, res) + print('> ', fp) diff --git a/contrib/VCOD_SLTNet/torch2onnx.py b/contrib/VCOD_SLTNet/torch2onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..47321b2c5e063b8171f988098913db06e8b96408 --- /dev/null +++ b/contrib/VCOD_SLTNet/torch2onnx.py @@ -0,0 +1,36 @@ +# Copyright(C) 2022. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import torch +from lib import VideoModel_pvtv2 as Network + + +parser = argparse.ArgumentParser() +parser.add_argument('--pth_path', type=str, default='./snapshot/Net_epoch_MoCA_short_term_pseudo.pth') +parser.add_argument('--onnx_path', type=str, default='./sltnet.onnx') + +opt = parser.parse_args() + +model = Network(opt) + +model.load_state_dict(torch.load(opt.pth_path, map_location=torch.device('cpu'))) +model.eval() + +input_names = ["image"] +output_names = ["pred"] +dynamic_axes = {'image': {0: '-1'}, 'pred': {0: '-1'}} +dummy_input = torch.randn(1, 9, 352, 352) +torch.onnx.export(model, dummy_input, opt.onnx_path, input_names=input_names, \ + dynamic_axes=dynamic_axes, output_names=output_names, opset_version=11, verbose=True) diff --git a/text.txt b/text.txt deleted file mode 100644 index 58c9bdf9d017fcd178dc8c073cbfcbb7ff240d6c..0000000000000000000000000000000000000000 --- a/text.txt +++ /dev/null @@ -1 +0,0 @@ -111