diff --git a/contrib/Burpee_Detection/.idea/.gitignore b/contrib/Burpee_Detection/.idea/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..26d33521af10bcc7fd8cea344038eaaeb78d0ef5
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/contrib/Burpee_Detection/.idea/Burpee_Detection.iml b/contrib/Burpee_Detection/.idea/Burpee_Detection.iml
new file mode 100644
index 0000000000000000000000000000000000000000..8b8c395472a5a6b3598af42086e590417ace9933
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/Burpee_Detection.iml
@@ -0,0 +1,12 @@
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/.idea/inspectionProfiles/Project_Default.xml b/contrib/Burpee_Detection/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 0000000000000000000000000000000000000000..49c79fe47d8b97728bb91c5624b6d68582acaa69
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/.idea/inspectionProfiles/profiles_settings.xml b/contrib/Burpee_Detection/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..105ce2da2d6447d11dfe32bfb846c3d5b199fc99
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/.idea/misc.xml b/contrib/Burpee_Detection/.idea/misc.xml
new file mode 100644
index 0000000000000000000000000000000000000000..8404982a1bfa42ac4a3204a53c6c73100c26c3e2
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/misc.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/.idea/modules.xml b/contrib/Burpee_Detection/.idea/modules.xml
new file mode 100644
index 0000000000000000000000000000000000000000..97ad35e7acc49bc27f11909b60f774bf251b6b24
--- /dev/null
+++ b/contrib/Burpee_Detection/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/App_burpee_detection/App_main.py b/contrib/Burpee_Detection/App_burpee_detection/App_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2a33952ea5b10c2e30a73079e17b126ea74cafb
--- /dev/null
+++ b/contrib/Burpee_Detection/App_burpee_detection/App_main.py
@@ -0,0 +1,205 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import shutil
+import json
+import os
+import sys
+import logging
+import cv2
+# import time
+
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector
+
+from qcloud_cos import CosConfig
+from qcloud_cos import CosS3Client
+
+
+class ostream:
+ def __init__(self, file):
+ self.file = file
+
+ def __lshift__(self, obj):
+ self.file.write(str(obj))
+ return self
+
+
+cout = ostream(sys.stdout)
+endl = '/n'
+
+# The following belongs to the SDK Process
+streamManagerApi = StreamManagerApi()
+# Init stream manager
+ret = streamManagerApi.InitManager()
+if ret != 0:
+ cout << 'Failed to init Stream manager, ret=' << str(ret) << endl
+ exit()
+# Mark start time
+# start = time.time()
+# Create streams by pipeline config file
+# load pipline
+with open("../pipeline/burpee_detection_p.pipeline", 'rb') as f:
+ pipelineStr = f.read()
+ret = streamManagerApi.CreateMultipleStreams(pipelineStr)
+# Print error message
+if ret != 0:
+ cout << 'Failed to create Stream, ret=' << str(ret) << endl
+ exit()
+
+# 正常情况日志级别使用INFO,需要定位时可以修改为DEBUG,此时SDK会打印和服务端的通信信息
+logging.basicConfig(level=logging.INFO, stream=sys.stdout)
+
+# 1. 设置用户属性, 包括 secret_id, secret_key, region等。App_id 已在CosConfig中移除,请在参数 Bucket 中带上 App_id。Bucket 由 BucketName-App_id 组成
+SECRET_ID = 'AKIDq23sVu40iANL5bz93iAPRIxPdleIgjYA' # 替换为用户的 SecretId,登录https://console.cloud.tencent.com/cam/capi查看
+SECRET_KEY = 'QbXIoPlvtd9RUJuHROIxMYVDfsrcrsi2' # 替换为用户的 SecretKey,登录https://console.cloud.tencent.com/cam/capi查看
+REGION = 'ap-shanghai' # 替换为用户的 region,已创建桶归属的region可在https://console.cloud.tencent.com/cos5/bucket查看
+# COS支持的所有region列表参见https://cloud.tencent.com/document/product/436/6224
+TOKEN = None # 如果使用永久密钥不需填入token,若使用临时密钥需填入,临时密钥生成和使用见https://cloud.tencent.com/document/product/436/14048
+SCHEME = 'https' # 指定使用 http/https 协议来访问 COS,默认为 https,可不填
+
+CONFIG = CosConfig(Region=REGION, SecretId=SECRET_ID,
+ SecretKey=SECRET_KEY, Token=TOKEN, Scheme=SCHEME)
+CLIENT = CosS3Client(CONFIG)
+
+IMG_NUM = 0
+ACTION = ""
+ACTION_CNT = 0
+STATE = 0
+INPUT_COUNT = 0
+ERR_FILE = False
+FPS = 1
+INPUT_PATH = "./input/"
+RESULT_PATH = 'result.txt'
+
+# Release the input
+if os.path.exists(INPUT_PATH):
+ shutil.rmtree(INPUT_PATH)
+
+while True:
+
+ # Check the state of app
+ RESPONSE = CLIENT.list_objects(Bucket='burpee-1312708737',
+ Prefix='state')
+
+ if len(RESPONSE['Contents']) == 2:
+ IMG_NUM = 0
+ ACTION_CNT = 0
+ STATE = 0
+ INPUT_COUNT = 0
+ if os.path.exists(INPUT_PATH):
+ shutil.rmtree(INPUT_PATH)
+ continue
+
+ # Check the number of input images
+ RESPONSE = CLIENT.list_objects(Bucket='burpee-1312708737',
+ Prefix='input')
+
+ if len(RESPONSE['Contents']) < IMG_NUM + 2:
+ cout << 'wait for inputs' << endl
+ continue
+ # Check the target input image
+ RESPONSE = CLIENT.object_exists(Bucket='burpee-1312708737',
+ Key='input/img' + str(IMG_NUM) + '.jpg')
+
+ if not RESPONSE:
+ cout << 'no such file' << endl
+ continue
+
+ # Download the data of input
+ if os.path.exists(INPUT_PATH) != 1:
+ os.makedirs("./input/")
+
+ RESPONSE = CLIENT.get_object(Bucket='burpee-1312708737',
+ Key='input/img' + str(IMG_NUM) + '.jpg')
+ RESPONSE['Body'].get_stream_to_file('/input/img' + str(IMG_NUM) + '.jpg')
+ cout << 'Get the input successfully' << endl
+
+ # Input object of streams -- detection target
+ IMG_PATH = os.path.join(INPUT_PATH, 'img' + str(IMG_NUM) + '.jpg')
+
+ DATA_INPUT = MxDataInput()
+ if os.path.exists(IMG_PATH) != 1:
+ cout << 'The image does not exist.' << endl
+
+ with open(IMG_PATH, 'rb') as f:
+ DATA_INPUT.data = f.read()
+
+ STREAM_NAME = b'detection'
+ IN_PLUGIN_ID = 0
+ # Send data to streams by SendDataWithUniqueId()
+ UNIQUEID = streamManagerApi.SendDataWithUniqueId(STREAM_NAME, IN_PLUGIN_ID, DATA_INPUT)
+
+ if UNIQUEID < 0:
+ cout << 'Failed to send data to stream.' << endl
+ exit()
+
+ # Get results from streams by GetResultWithUniqueId()
+ INFER_RESULT = streamManagerApi.GetResultWithUniqueId(STREAM_NAME, UNIQUEID, 3000)
+ if INFER_RESULT.errorCode != 0:
+ cout << 'GetResultWithUniqueId error. errorCode=' << INFER_RESULT.errorCode \
+ << ', errorMsg=' << INFER_RESULT.data.decode() << endl
+ exit()
+
+ # Get Object class
+ RESULTS = json.loads(INFER_RESULT.data.decode())
+ IMG = cv2.imread(IMG_PATH)
+ IMG_NUM = IMG_NUM + 1
+
+ BEST_CONFIDENCE = 0
+ KEY = "MxpiObject"
+
+ if KEY not in RESULTS.keys():
+ continue
+
+ # Save the best confidence and its information
+ for BBOX in RESULTS['MxpiObject']:
+ if round(BBOX['classVec'][0]['confidence'], 4) >= BEST_CONFIDENCE:
+ ACTION = BBOX['classVec'][0]['className']
+ BEST_CONFIDENCE = round(BBOX['classVec'][0]['confidence'], 4)
+
+ # State change
+ if STATE == 0:
+ if ACTION == "crouch":
+ STATE = 1
+ elif STATE == 1:
+ if ACTION == "support":
+ STATE = 2
+ elif STATE == 2:
+ if ACTION == "crouch":
+ STATE = 3
+ elif STATE == 3:
+ if ACTION == "jump":
+ STATE = 0
+ ACTION_CNT = ACTION_CNT + 1
+
+ # Save txt for results
+ FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL
+ if os.path.exists(RESULT_PATH):
+ os.remove(RESULT_PATH)
+ with os.fdopen(os.open('result.txt', FLAGS, 0o755), 'w') as f:
+ f.write(str(ACTION_CNT))
+ # Upload the result file
+ with open('result.txt', 'rb') as fp:
+ RESPONSE = CLIENT.put_object(
+ Bucket='burpee-1312708737',
+ Body=fp,
+ Key='result/result.txt',
+ StorageClass='STANDARD',
+ EnableMD5=False
+ )
+ cout << 'upload the result file successfully!!!' << endl
+
+# Destroy All Streams
+streamManagerApi.DestroyAllStreams()
diff --git a/contrib/Burpee_Detection/App_burpee_detection/run.sh b/contrib/Burpee_Detection/App_burpee_detection/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a5ff5ca62c5a567b455701f67b60d42bb4ca00cd
--- /dev/null
+++ b/contrib/Burpee_Detection/App_burpee_detection/run.sh
@@ -0,0 +1,19 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# load envs
+source ../envs/env.sh
+
+# running inference process
+python3.9.2 App_main.py
diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/Pic_main.py b/contrib/Burpee_Detection/Pic_burpee_detection/Pic_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c78562e62c78d0847ce246f281842965eea75f0
--- /dev/null
+++ b/contrib/Burpee_Detection/Pic_burpee_detection/Pic_main.py
@@ -0,0 +1,179 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import shutil
+import json
+import os
+import time
+import cv2
+import sys
+
+import MxpiDataType_pb2 as MxpiDataType
+from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector
+
+
+class ostream:
+ def __init__(self, file):
+ self.file = file
+
+ def __lshift__(self, obj):
+ self.file.write(str(obj))
+ return self
+
+
+cout = ostream(sys.stdout)
+endl = '/n'
+
+# The following belongs to the SDK Process
+streamManagerApi = StreamManagerApi()
+# Init stream manager
+ret = streamManagerApi.InitManager()
+if ret != 0:
+ cout << 'Failed to init Stream manager, ret=' << str(ret) << endl
+ exit()
+# Mark start time
+start = time.time()
+# Create streams by pipeline config file
+# Load pipline
+with open("../pipeline/burpee_detection_p.pipeline", 'rb') as f:
+ PIPELINE_STR = f.read()
+ret = streamManagerApi.CreateMultipleStreams(PIPELINE_STR)
+# Print error message
+if ret != 0:
+ cout << 'Failed to create Stream, ret=' << str(ret) << endl
+ exit()
+
+DET_IMG_COUNT = 0 # the number of detected pictures
+
+# Init the directory of input and output
+INPUT_PATH = ["../data/images/test/"] # the path of input
+
+OUTPUT_PATH = ["./result_test/"] # the output path of txt file
+
+OUTPUT_PIC_PATH = ["./result_test_pic/"] # the output path of pictures
+
+for index, path in enumerate(INPUT_PATH):
+
+ RESULT_PATH = OUTPUT_PATH[index]
+
+ # Create the output directory
+ if os.path.exists(RESULT_PATH) != 1:
+ os.makedirs(RESULT_PATH)
+ else:
+ shutil.rmtree(RESULT_PATH)
+ os.makedirs(RESULT_PATH)
+
+ if os.path.exists(OUTPUT_PIC_PATH[index]) != 1:
+ os.makedirs(OUTPUT_PIC_PATH[index])
+ else:
+ shutil.rmtree(OUTPUT_PIC_PATH[index])
+ os.makedirs(OUTPUT_PIC_PATH[index])
+
+ # Input object of streams -- detection target
+ for item in os.listdir(path):
+ IMG_PATH = os.path.join(path, item)
+ cout << 'read file path:' << IMG_PATH << endl
+ IMG_NAME = os.path.splitext(item)[0]
+ IMG_TXT = RESULT_PATH + IMG_NAME + ".txt"
+ if os.path.exists(IMG_TXT):
+ os.remove(IMG_TXT)
+ DATA_INPUT = MxDataInput()
+ if os.path.exists(IMG_PATH) != 1:
+ cout << 'The image does not exist.' << endl
+ continue
+ with open(IMG_PATH, 'rb') as f:
+ DATA_INPUT.data = f.read()
+ STREAM_NAME = b'detection'
+ IN_PLUGIN_ID = 0
+ # Send data to streams by SendDataWithUniqueId()
+ UNIQUE_ID = streamManagerApi.SendDataWithUniqueId(STREAM_NAME, IN_PLUGIN_ID, DATA_INPUT)
+
+ if UNIQUE_ID < 0:
+ cout << 'Failed to send data to stream.' << endl
+ exit()
+
+ # Get results from streams by GetResultWithUniqueId()
+ INFER_RESULT = streamManagerApi.GetResultWithUniqueId(STREAM_NAME, UNIQUE_ID, 3000)
+ if INFER_RESULT.errorCode != 0:
+ cout << 'GetResultWithUniqueId error. errorCode=' << INFER_RESULT.errorCode \
+ << ', errorMsg=' << INFER_RESULT.data.decode() << endl
+ exit()
+
+ DET_IMG_COUNT = DET_IMG_COUNT + 1
+
+ # Get ObjectList
+ RESULTS = json.loads(INFER_RESULT.data.decode())
+
+ IMG = cv2.imread(IMG_PATH)
+ BBOXES = []
+ BEST_CONFIDENCE = 0
+ KEY = "MxpiObject"
+ if KEY not in RESULTS.keys():
+ continue
+ for BBOX in RESULTS['MxpiObject']:
+ BBOXES = {'x0': int(BBOX['x0']),
+ 'x1': int(BBOX['x1']),
+ 'y0': int(BBOX['y0']),
+ 'y1': int(BBOX['y1']),
+ 'confidence': round(BBOX['classVec'][0]['confidence'], 4),
+ 'text': BBOX['classVec'][0]['className']}
+
+ if BBOXES['confidence'] > BEST_CONFIDENCE:
+ L1 = []
+ # Convert the label as Yolo label
+ x_center = round((BBOXES['x1'] + BBOXES['x0']) * 0.5 / IMG.shape[1], 6)
+ y_center = round((BBOXES['y1'] + BBOXES['y0']) * 0.5 / IMG.shape[0], 6)
+ w_nor = round((BBOXES['x1'] - BBOXES['x0']) / IMG.shape[1], 6)
+ h_nor = round((BBOXES['y1'] - BBOXES['y0']) / IMG.shape[0], 6)
+ L1.append(x_center)
+ L1.append(y_center)
+ L1.append(w_nor)
+ L1.append(h_nor)
+ L1.append(BBOXES['confidence'])
+ L1.append(BBOXES['text'])
+ BEST_CONFIDENCE = BBOXES['confidence']
+ TEXT = "{}{}".format(str(BBOXES['confidence']), " ")
+ for CONTENT in BBOXES['text']:
+ TEXT += CONTENT
+ best_class = {'x0': int(BBOX['x0']),
+ 'x1': int(BBOX['x1']),
+ 'y0': int(BBOX['y0']),
+ 'y1': int(BBOX['y1']),
+ 'confidence': round(BBOX['classVec'][0]['confidence'], 4),
+ 'text': BBOX['classVec'][0]['className']}
+ # Draw rectangle and txt for visualization
+ cv2.putText(IMG, TEXT, (best_class['x0'] + 10, best_class['y0'] + 10), cv2.FONT_HERSHEY_SIMPLEX, 1.0,
+ (0, 100, 255), 2)
+ cv2.rectangle(IMG, (best_class['x0'], best_class['y0']), (best_class['x1'], best_class['y1']),
+ (255, 0, 0), 2)
+
+ # Save picture
+ originImgFile = OUTPUT_PIC_PATH[index] + IMG_NAME + '.jpg'
+ cv2.imwrite(originImgFile, IMG)
+
+ # Save txt for results
+ FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL
+ with os.fdopen(os.open(IMG_TXT, FLAGS, 0o755), 'w') as f:
+ CONTENT = '{} {} {} {} {} {}'.format(L1[5], L1[4], L1[0], L1[1], L1[2], L1[3])
+ f.write(CONTENT)
+ f.write('\n')
+
+end = time.time()
+cost_time = end - start
+# Mark spend time
+cout << 'Image count:' << DET_IMG_COUNT << endl
+cout << 'Spend time:' << cost_time << endl
+cout << 'fps:' << (DET_IMG_COUNT / cost_time) << endl
+# Destroy All Streams
+streamManagerApi.DestroyAllStreams()
diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py b/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py
new file mode 100644
index 0000000000000000000000000000000000000000..3167e5dc9db5c64c8bdcceae145f20cf0f926906
--- /dev/null
+++ b/contrib/Burpee_Detection/Pic_burpee_detection/map_calculate.py
@@ -0,0 +1,397 @@
+"""
+ Copyright 2020 Huawei Technologies Co., Ltd
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ Typical usage example:
+"""
+
+import glob
+import os
+import sys
+import argparse
+import collections
+
+"""
+ 0,0 ------> x (width)
+ |
+ | (Left,Top)
+ | *_________
+ | | |
+ | |
+ y |_________|
+ (height) *
+ (Right,Bottom)
+"""
+
+MIN_OVERLAP = 0.5 # default value (defined in the PASCAL VOC2012 challenge)
+TOP_MARGIN = 0.15 # in percentage of the figure height
+BOTTOM_MARGIN = 0.05 # in percentage of the figure height
+
+
+def file_lines_to_list(path):
+ """
+ Convert the lines of a file to a list
+ """
+ # open txt file lines to a list
+ with open(path) as f:
+ content = f.readlines()
+ # remove whitespace characters like `\n` at the end of each line
+ content = [x.strip() for x in content]
+ return content
+
+
+def voc_ap(recall, precision):
+ """
+ Calculate the AP given the recall and precision array
+ 1) We calculate a version of the measured
+ precision/recall curve with precision monotonically decreasing
+ 2) We calculate the AP as the area
+ under this curve by numerical integration.
+ """
+ """
+ --- Official matlab code VOC2012---
+ m_recall=[0 ; recall ; 1];
+ m_precision=[0 ; precision ; 0];
+ for j=numeral(m_precision)-1:-1:1
+ m_precision(i)=max(m_precision(j),m_precision(j+1));
+ end
+ i=find(m_recall(2:end)~=m_recall(1:end-1))+1;
+ ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i));
+ """
+ recall.insert(0, 0.0) # insert 0.0 at beginning of list
+ recall.append(1.0) # insert 1.0 at end of list
+ m_recall = recall[:]
+ precision.insert(0, 0.0) # insert 0.0 at beginning of list
+ precision.append(0.0) # insert 0.0 at end of list
+ m_precision = precision[:]
+ """
+ This part makes the precision monotonically decreasing
+ (goes from the end to the beginning)
+ matlab: for i=numeral(m_precision)-1:-1:1
+ m_precision(i)=max(m_precision(i),m_precision(i+1));
+ """
+
+ for i in range(len(m_precision) - 2, -1, -1):
+ m_precision[i] = max(m_precision[i], m_precision[i + 1])
+ """
+ This part creates a list of indexes where the recall changes
+ matlab: i=find(m_recall(2:end)~=m_recall(1:end-1))+1;
+ """
+ i_list = []
+ for i in range(1, len(m_recall)):
+ if m_recall[i] != m_recall[i - 1]:
+ i_list.append(i) # if it was matlab would be i + 1
+ """
+ The Average Precision (AP) is the area under the curve
+ (numerical integration)
+ matlab: ap=sum((m_recall(i)-m_recall(i-1)).*m_precision(i));
+ """
+ ap = 0.0
+ for i in i_list:
+ ap += ((m_recall[i] - m_recall[i - 1]) * m_precision[i])
+ return ap, m_recall, m_precision
+
+
+def is_float_between_0_and_1(value):
+ """
+ check if the number is a float between 0.0 and 1.0
+ """
+ try:
+ val = float(value)
+ if 0.0 < val < 1.0:
+ return True
+ else:
+ return False
+ except ValueError:
+ return False
+
+
+def error(msg):
+ """
+ throw error and exit
+ """
+ print(msg)
+ sys.exit(0)
+
+
+def check_args(args):
+ """
+ check arguments
+ """
+ if not (os.path.exists(args.label_path)):
+ error("annotation file:{} does not exist.".format(args.label_path))
+
+ if not (os.path.exists(args.npu_txt_path)):
+ error("txt path:{} does not exist.".format(args.npu_txt_path))
+
+ if args.ignore is None:
+ args.ignore = []
+ return args
+
+
+def parse_line(txt_file, lines_list, bounding_boxes, counter_per_class, already_seen_classes):
+ """ parse line
+ :param txt_file:
+ :param lines_list:
+ :param bounding_boxes:
+ :param counter_per_class:
+ :param already_seen_classes:
+ :return: bounding_boxes, counter_per_class
+ """
+ for line in lines_list:
+ try:
+ class_name, left, top, right, bottom = line.split()
+ except ValueError:
+ error_msg = "Error: File " + txt_file + " in the wrong format.\n"
+ error_msg += " Expected: \n"
+ error_msg += " Received: " + line
+ error(error_msg)
+ if class_name in arg.ignore:
+ continue
+ bbox = left + " " + top + " " + right + " " + bottom
+ if class_name == '0':
+ class_name = 'crouch'
+ elif class_name == '1':
+ class_name = 'support'
+ elif class_name == '2':
+ class_name = 'jump'
+ bounding_boxes.append({"class_name": class_name, "bbox": bbox, "used": False})
+ counter_per_class[class_name] += 1
+
+ if class_name not in already_seen_classes:
+ already_seen_classes.append(class_name)
+ return bounding_boxes, counter_per_class
+
+
+def get_label_list(file_path):
+ """ get label list via file paths
+ :param file_path: label file path
+ :return: ret
+ map , include file_bbox, classes, n_classes, counter_per_class
+ """
+ files_list = glob.glob(file_path + '/*.txt')
+ if len(files_list) == 0:
+ error("Error: No ground-truth files found!")
+ files_list.sort()
+ # dictionary with counter per class
+ counter_per_class = collections.defaultdict(int)
+ file_bbox = {}
+
+ for txt_file in files_list:
+ file_id = txt_file.split(".txt", 1)[0]
+ file_id = os.path.basename(os.path.normpath(file_id))
+ # check if there is a correspondent detection-results file
+ temp_path = os.path.join(file_path, (file_id + ".txt"))
+ if not os.path.exists(temp_path):
+ error_msg = "Error. File not found: {}\n".format(temp_path)
+ error(error_msg)
+ lines_list = file_lines_to_list(txt_file)
+ # create ground-truth dictionary
+ bounding_boxes = []
+ already_seen_classes = []
+ boxes, counter_per_class = parse_line(txt_file, lines_list, bounding_boxes, counter_per_class,
+ already_seen_classes)
+ file_bbox[file_id] = boxes
+
+ classes = list(counter_per_class.keys())
+
+ # let's sort the classes alphabetically
+ classes = sorted(classes)
+ n_classes = len(classes)
+ ret = dict()
+ ret['file_bbox'] = file_bbox
+ ret['classes'] = classes
+ ret['n_classes'] = n_classes
+ ret['counter_per_class'] = counter_per_class
+ return ret
+
+
+def get_predict_list(file_path, gt_classes):
+ """ get predict list with file paths and class names
+ :param file_path: predict txt file path
+ :param gt_classes: class information
+ :return: class_bbox bbox of every class
+ """
+ dr_files_list = glob.glob(file_path + '/*.txt')
+ dr_files_list.sort()
+ class_bbox = {}
+ for class_index, class_name in enumerate(gt_classes):
+ bounding_boxes = []
+ for txt_file in dr_files_list:
+ # the first time it checks
+ # if all the corresponding ground-truth files exist
+ file_id = os.path.splitext(txt_file)[0]
+ file_id = os.path.basename(os.path.normpath(file_id))
+ lines = file_lines_to_list(txt_file)
+ for line in lines:
+ try:
+ sl = line.split()
+ tmp_class_name, confidence, left, top, right, bottom = sl
+ if float(confidence) < float(arg.threshold):
+ continue
+ except ValueError:
+ error_msg = "Error: File " + txt_file + " wrong format.\n"
+ error_msg += " Expected: \n"
+ error_msg += " Received: " + line
+ error(error_msg)
+ if tmp_class_name == class_name:
+ bbox = left + " " + top + " " + right + " " + bottom
+ bounding_boxes.append({"confidence": confidence, "file_id": file_id, "bbox": bbox})
+ # sort detection-results by decreasing confidence
+ bounding_boxes.sort(key=lambda x: float(x['confidence']), reverse=True)
+ class_bbox[class_name] = bounding_boxes
+ return class_bbox
+
+
+def calculate_pr(sum_ap, fp, tp, counter_per_class, class_name):
+ """
+ @description: calculate PR
+ @param sum_ap
+ @param fp
+ @param tp
+ @param counter_per_class
+ @param class_name
+ @return ret
+ map, include sum_AP, text, prec, rec
+ """
+ cumsum = 0
+ for idx, val in enumerate(fp):
+ fp[idx] += cumsum
+ cumsum += val
+ cumsum = 0
+ for idx, val in enumerate(tp):
+ tp[idx] += cumsum
+ cumsum += val
+ rec = tp[:]
+ for idx, val in enumerate(tp):
+ rec[idx] = float(tp[idx]) / counter_per_class[class_name]
+ prec = tp[:]
+ for idx, val in enumerate(tp):
+ prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
+
+ ap, mrec, mprec = voc_ap(rec[:], prec[:])
+ sum_ap += ap
+ text = "{0:.2f}%".format(ap * 100) + " = " + class_name + " AP "
+ ret = dict()
+ ret['sum_AP'] = sum_ap
+ ret['text'] = text
+ ret['prec'] = prec
+ ret['rec'] = rec
+ return ret
+
+
+def calculate_ap(output_file, gt_classes, labels, class_bbox, counter_per_class):
+ """
+ Calculate the AP for each class
+ :param output_file:
+ :param gt_classes: [80]
+ :param labels: {file_index:[{"class_name": class_name, "bbox": bbox, "used": False}]}
+ :param class_bbox: {class_name:[{"confidence": confidence,
+ "file_id": file_id, "bbox": bbox}]}
+ :return:
+ """
+ sum_ap = 0.0
+ FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL
+ writer = os.fdopen(os.open('result.txt', FLAGS, 0o755), 'w')
+ writer.write("# AP and precision/recall per class\n")
+ count_true_positives = {}
+ n_classes = len(gt_classes)
+ for class_index, class_name in enumerate(gt_classes):
+ count_true_positives[class_name] = 0
+ """
+ Load detection-results of that class
+ Assign detection-results to ground-truth objects
+ """
+ dr_data = class_bbox[class_name]
+ nd = len(dr_data)
+ tp = [0] * nd # creates an array of zeros of size nd
+ fp = [0] * nd
+ for idx, detection in enumerate(dr_data):
+ file_id = detection["file_id"]
+ ground_truth_data = labels[file_id]
+
+ ovmax = -1
+ gt_match = -1
+ # load detected object bounding-box
+ bb = [float(x) for x in detection["bbox"].split()]
+ for obj in ground_truth_data:
+ # look for a class_name match
+ if obj["class_name"] == class_name:
+ bbgt = [float(x) for x in obj["bbox"].split()]
+ bi = [max(bb[0], bbgt[0]), max(bb[1], bbgt[1]),
+ min(bb[2], bbgt[2]), min(bb[3], bbgt[3])]
+ iw = bi[2] - bi[0] + 1
+ ih = bi[3] - bi[1] + 1
+ if iw > 0 and ih > 0:
+ # compute overlap (IoU)
+ ua = (bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) + \
+ (bbgt[2] - bbgt[0] + 1) * \
+ (bbgt[3] - bbgt[1] + 1) - iw * ih
+ ov = iw * ih / ua
+ if ov > ovmax:
+ ovmax = ov
+ gt_match = obj
+
+ # set minimum overlap
+ min_overlap = MIN_OVERLAP
+ if ovmax >= min_overlap:
+ if "difficult" not in gt_match:
+ if not bool(gt_match["used"]):
+ # true positive
+ tp[idx] = 1
+ gt_match["used"] = True
+ count_true_positives[class_name] += 1
+ else:
+ # false positive (multiple detection)
+ fp[idx] = 1
+ else:
+ # false positive
+ fp[idx] = 1
+ # compute precision / recall
+ ret = calculate_pr(sum_ap, fp, tp, counter_per_class, class_name)
+ sum_ap = ret['sum_ap']
+ text = ret['text']
+ prec = ret['prec']
+ rec = ret['rec']
+ print(text)
+ rounded_prec = ['%.2f' % elem for elem in prec]
+ rounded_rec = ['%.2f' % elem for elem in rec]
+ writer.write(text + "\n Precision: " + str(rounded_prec) +
+ "\n Recall :" + str(rounded_rec) + "\n\n")
+ writer.write("\n# m_ap of all classes\n")
+ m_ap = sum_ap / n_classes
+ text = "m_ap = {0:.2f}%".format(m_ap * 100)
+ writer.write(text + "\n")
+ print(text)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser('mAP calculate')
+ parser.add_argument('-i', '--ignore', nargs='+', type=str,
+ help="ignore a list of classes.")
+ parser.add_argument('--label_path', default="../data/labels/test", help='the path of the label files')
+ parser.add_argument('--npu_txt_path', default="./result_test", help='the path of the predict result')
+ parser.add_argument('--output_file', default="./performance_test.txt", help='save result file')
+ parser.add_argument('--threshold', default=0, help='threshold of the object score')
+
+ arg = parser.parse_args()
+ arg = check_args(arg)
+
+ label_list = get_label_list(arg.label_path)
+ gt_file_bbox = label_list['file_bbox']
+ get_classes = label_list['classes']
+ gt_n_classes = label_list['n_classes']
+ count_per_class = label_list['counter_per_class']
+
+ predict_bbox = get_predict_list(arg.npu_txt_path, get_classes)
+ calculate_ap(arg.output_file, get_classes, gt_file_bbox, predict_bbox, count_per_class)
diff --git a/contrib/Burpee_Detection/Pic_burpee_detection/run.sh b/contrib/Burpee_Detection/Pic_burpee_detection/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0e0c76548d093c00741862c84795eb302805bac0
--- /dev/null
+++ b/contrib/Burpee_Detection/Pic_burpee_detection/run.sh
@@ -0,0 +1,19 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# load envs
+source ../envs/env.sh
+
+# running inference process
+python3.9.2 Pic_main.py
diff --git a/contrib/Burpee_Detection/README.md b/contrib/Burpee_Detection/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..656f9cb81746784f9ab980d6056622e7e76ebb5b
--- /dev/null
+++ b/contrib/Burpee_Detection/README.md
@@ -0,0 +1,244 @@
+# 波比跳运动小程序
+
+## 1 介绍
+
+波比跳运动小程序基于 MindX SDK 开发,在 Ascend 310 芯片上进行目标检测,将检测结果保存成视频。项目主要流程:1)视频流程:通过 live555 服务器进行拉流输入视频,然后进行视频解码将 H.264 格式的视频解码为图片,图片缩放后经过模型推理进行波比跳检测,识别结果经过后处理后利用 cv 可视化识别框,以视频的形式输出,同时生成文本文件记录视频中完成的波比跳个数。2)小程序流程:通过微信小程序开发者将摄像头截取的图片数据上传至腾讯云桶中,然后后端将桶中数据下载至本地并将数据输入至流水线内,接着进行图片解码和缩放,最后经过模型推理进行波比跳检测,识别结果经过后处理后上传至腾讯云桶中,为前端小程序使用。
+
+### 1.1 支持的产品
+
+昇腾 310(推理)
+
+### 1.2 支持的版本
+
+本样例配套的 CANN 版本为 [5.0.4](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fsoftware%2Fcann%2Fcommercial),MindX SDK 版本为 [2.0.4](https://www.hiascend.com/software/Mindx-sdk)。
+
+MindX SDK 安装前准备可参考《用户指南》,[安装教程](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/quickStart/1-1安装SDK开发套件.md)
+
+### 1.3 软件方案介绍
+
+基于 MindX SDK 的波比跳运动小程序业务流程为:通过微信小程序开发者将摄像头截取的图片数据上传至腾讯云桶中,然后后端将桶中数据下载至本地并经`mxpi_appsrc`拉流插件输入,然后使用图片解码插件`mxpi_imagedecoder`将图片解码,再通过图像缩放插件`mxpi_imageresize`将图像缩放至满足检测模型要求的输入图像大小要求,缩放后的图像输入模型推理插件`mxpi_modelinfer`得到检测结果,根据检测结果改变波比跳识别的状态机状态,并更新波比跳识别个数,最后上传记录波比跳识别个数的txt文件到腾讯云桶中,以供小程序使用。
+
+### 1.4 代码目录结构与说明
+
+本 Sample 工程名称为 **Burpee_Detection**,工程目录如下图所示:
+
+```
+├── envs
+│ ├── atc_env.sh //atc转换需要的环境变量
+│ └── env.sh //基础环境变量
+├── readme_img //ReadMe图片资源
+│ ├── dataset.png
+│ ├── video.png
+│ ├── map.png
+│ ├── fps.png
+│ ├── app_flow.png
+│ └── video_flow.png
+├── data
+│ ├── images //数据集
+| | ├── big //大型图片数据集
+| | ├── dark //低光照环境数据集
+| | ├── empty //空数据集
+| | ├── multi //多人环境数据集
+| | ├── png //PNG数据集
+| | ├── forward //正面角度数据集
+| | └── test //测试集
+| └── labels
+| | └── test //测试集标签
+├── model
+│ ├── aipp_yolov5.cfg //atc转换时需要的aipp配置文件
+│ ├── atc.sh //atc运行脚本
+│ ├── yolov5.cfg //om模型后处理配置文件
+│ └── yolov5.names //om模型识别类别文件
+├── pipeline
+│ ├── burpee_detection_p.pipeline //图片识别使用的pipeline文件
+│ └── burpee_detection_v.pipeline //视频流识别使用的pipeline文件
+├── App_burpee_detection
+│ ├── App_main.py //识别,保存结果,并进行性能测试
+| └── run.sh //运行脚本
+├── Pic_burpee_detection
+│ ├── map_calculate.py //mAP计算(精度计算)
+│ ├── Pic_main.py //识别,保存结果,并进行性能测试
+| └── run.sh //运行脚本
+├── Video_burpee_detection
+│ ├── Video_main.py //识别,保存结果,并进行性能测试
+| └── run.sh //运行脚本
+└── README.md
+```
+
+### 1.5 技术实现流程图
+视频识别:
+
+
+
+小程序应用后端流程:
+
+
+
+## 2 环境依赖
+
+| 软件名称 | 版本 | 说明 | 获取方式 |
+| ------------------- | ----------- | ----------------------------- | ------------------------------------------------------------ |
+| MindX SDK | 2.0.4 | mxVision软件包 | [链接](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fsoftware%2Fmindx-sdk%2Fmxvision) |
+| ubuntu | 18.04.1 LTS | 操作系统 | Ubuntu官网获取 |
+| Ascend-CANN-toolkit | 5.0.4 | Ascend-cann-toolkit开发套件包 | [链接](https://gitee.com/link?target=https%3A%2F%2Fwww.hiascend.com%2Fsoftware%2Fcann%2Fcommercial) |
+
+在运行项目需要的环境变量如下,运行前不需要特别设置,环境依赖已经写入脚本中,脚本在`Burpee_Detection/envs`目录下:
+
+```bash
+# 基础环境变量——env.sh
+export MX_SDK_HOME="${SDK安装路径}/mxVision-2.0.4"
+export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${LD_LIBRARY_PATH}"
+export PYTHONPATH="${MX_SDK_HOME}/python:${PYTHONPATH}"
+
+# ATC工具环境变量——atc_env.sh
+export install_path=/home/HwHiAiUser/Ascend/ascend-toolkit/latest
+export PATH=/usr/local/python3.7.5/bin:${install_path}/x86_64-linux/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
+export PYTHONPATH=${install_path}/x86_64-linux/atc/python/site-packages:${install_path}/x86_64-linux/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/x86_64-linux/atc/python/site-packages/schedule_search.egg$PYTHONPATH
+export LD_LIBRARY_PATH=${install_path}/x86_64-linux/atc/lib64:$LD_LIBRARY_PATH
+export ASCEND_OPP_PATH=${install_path}/opp
+```
+
+注:其中`${SDK安装路径}`替换为用户的SDK安装路径;`install_path`替换为ascend-toolkit开发套件包所在路径。`LD_LIBRARY_PATH`用以加载开发套件包中lib库。
+
+## 3 模型转换以及依赖安装
+
+本项目使用的模型是波比跳识别的模型。模型文件可以直接下载。
+
+### 3.1 模型转换
+
+使用模型转换工具 ATC 将 onnx 模型转换为 om 模型,模型转换工具相关介绍参考链接:[CANN 社区版]([前言_昇腾CANN社区版(5.0.4.alpha002)(推理)_ATC模型转换_华为云 (huaweicloud.com)](https://support.huaweicloud.com/atctool-cann504alpha2infer/atlasatc_16_0001.html)) 。
+
+步骤如下:
+
+- **步骤1** 下载`onnx`模型,请移动至`Burpee_Detection/model`目录下;若下载`om`模型文件,请跳过模型转换步骤。
+
+- **步骤2** 将`best.onnx`文件移动至`Burpee_Detection/model`目录下,然后运行model目录下的`atc.sh`
+
+ ```bash
+ bash /model/atc.sh
+ ```
+
+ 执行该命令后会在当前文件夹下生成项目需要的模型文件
+
+ ```
+ ATC start working now, please wait for a moment.
+ ATC run success, welcome to the next use.
+ ```
+
+ 表示命令执行成功。
+
+### 3.2 准备
+
+按照第3小节**软件依赖**安装 live555 和 ffmpeg,按照 [Live555离线视频转RTSP说明文档](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md)将 mp4 视频转换为 H.264 格式。并将生成的 H.264 格式的视频上传到`live/mediaServer`目录下,然后修改`pipeline`目录下的`burpee_detection_v.pipeline`文件中`mxpi_rtspsrc0`的内容。
+
+```json
+"mxpi_rtspsrc0": {
+ "props": {
+ "rtspUrl":"rtsp://xxx.xxx.xxx.xxx:xxxx/xxx.264", // 修改为自己所使用的的服务器和文件名
+ },
+ "factory": "mxpi_rtspsrc",
+ "next": "mxpi_videodecoder0"
+}
+```
+
+## 4 运行与测试
+
+### 4.1 运行
+
+#### 4.1.1 视频
+
+- **步骤1** 按照第 2 小节 **环境依赖** 中的步骤设置环境变量。
+
+- **步骤2** 按照第 3 小节 **模型转换** 中的步骤获得 `om` 模型文件,放置在 `Burpee_Detection/models` 目录下。
+
+- **步骤3** 修改`burpee_detection_v.pipeline`中`mxpi_modelinfer0`中`postProcessLibPath`的值`${SDK安装路径}`为 MindX SDK 的安装路径
+
+- **步骤4** 按照 3 小节 **准备** 中的步骤创建rtsp流以实现本地视频的rtsp拉流操作。
+
+- **步骤5** 运行。在 `Burpee_Detection/Video_burpee_detection` 目录下执行命令:
+
+```bash
+bash run.sh
+```
+
+运行可视化结果会以`video_result.mp4`视频形式保存在`Burpee_Detection/Video_burpee_detection`目录下
+波比跳识别个数会以`result.txt`文件形式保存在`Burpee_Detection/Video_burpee_detection`目录下
+
+
+
+#### 4.1.2 小程序
+
+- **步骤1** 按照第 4 小节 **视频** 中的**步骤1**到**步骤3**搭建小程序后端环境。
+
+- **步骤2** 运行。进入 `Burpee_Detection/` 目录,在 `Burpee_Detection/App_burpee_detection` 目录下执行命令:
+
+```bash
+bash run.sh
+```
+- **步骤3** 下载`微信开发者工具`并登录,在微信公众平台注册小程序并获取AppID
+
+
+- 
+
+
+- **步骤4** 点击导入,选择小程序代码文件夹并打开(代码可下载),点击编译模式选中`pages/bind/bind`,点击`详情-本地设置`,选中不效验合法域名后(可在小程序公众平台开发管理-开发设置中,配置合法域名),点击`真机调试`,手机扫描二维码
+
+
+-  -  - 
+
+
+- **步骤5** 进入微信小程序页面,点击`开始计数`,小程序将摄像头以40ms(fps=25)的速率拍摄照片,并上传至腾讯云桶内,后台接收图片并处理
+-
+- 
+-
+- **步骤6** 人物在摄像头前进行波比跳,后台实时更新波比跳个数并将结果发送至桶内,小程序端以0.1s的速率刷新页面展示的个数
+
+- **步骤7** 点击`结束`,小程序停止发送图像并清理上传至桶内的图片释放内存,后端等待小程序下次开始计数
+
+
+
+### 4.2 性能与精度测试
+
+- **步骤1** 下载测试数据集,并将`data`目录放在`Burpee_Detection`目录下
+
+ 
+
+- **步骤2** 打开`Burpee_Detection/Pic_burpee_detection`目录下`Pic_burpee_detection.py`文件,将变量 `PATH` ,`Result_PATH` ,`Result_Pic_PATH`分别初始化为 `["../data/images/test/"]`,`["./result_test/"]`,`["./result_test_pic/"]`
+
+
+- **步骤3** 在`Burpee_Detection/Pic_burpee_detection`目录下运行`run.sh`脚本,对`data/images/test`目录下的图片进行识别并输出结果
+
+ ```bash
+ bash run.sh
+ ```
+
+ 运行脚本后会生成经过 SDK 后的推理结果结果保留在`result_test`目录下以`.txt`格式保存。
+ 结果可视化效果保留在`result_test_pic`目录下以`.jpg`格式保存
+
+ 运行结果中会有`Spend time:`是识别所有图片所用的时间,`fps:`计算得到的帧数
+
+ 
+
+- **步骤4** 在`Burpee_Detection/Pic_burpee_detection`目录下运行`map_calculate.py`脚本,计算精度。
+
+ ```bash
+ python3.9.2 map_calculate.py
+ ```
+
+ 测试结果
+
+ 
+
+ ## 5 软件依赖以及资源链接
+
+ 推理中涉及到第三方软件依赖如下表所示。
+
+ | 依赖软件 | 版本 | 说明 | 使用教程 |
+ |------------| ---------- | ------------------------------------------ | ------------------------------------------------------------ |
+ | live555 | 1.09 | 实现视频转 rtsp 进行推流 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/Live555离线视频转RTSP说明文档.md) |
+ | ffmpeg | 2021-10-14 | 实现 mp4 格式视频转为 H.264 格式视频 | [链接](https://gitee.com/ascend/mindxsdk-referenceapps/blob/master/docs/参考资料/pc端ffmpeg安装教程.md#https://gitee.com/link?target=https%3A%2F%2Fffmpeg.org%2Fdownload.html) |
+ | 微信开发者工具 |1.06.2207210| 实现小程序的使用 | [链接](https://developers.weixin.qq.com/miniprogram/dev/devtools/download.html) |
+ | 小程序导入代码 |- | 微信小程序代码 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com:443/%E5%B0%8F%E7%A8%8B%E5%BA%8F%E4%BB%A3%E7%A0%81.zip?AccessKeyId=3M18UT7HRLKP58NPPFUO&Expires=1690270238&Signature=SHjFgSLUrGMPGbYNYyNgS3VmBMw%3D) |
+ | 模型文件 | - | pt 模型文件,onnx 模型文件,om 模型文件 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com:443/models.zip?AccessKeyId=LMDYAAERYH5FWMJIHJOM&Expires=1689495724&Signature=f3y/GUL8po3menuj5Dsfa%2B21kPE%3D) |
+ | 测试数据集 | | 由237张图片构成,Yolo 格式 | [链接](https://burpee.obs.cn-east-3.myhuaweicloud.com:443/data.zip?AccessKeyId=3M18UT7HRLKP58NPPFUO&Expires=1690267377&Signature=ihE8HZ2et2BysOCsyIvZVAgfrm0%3D) |
+
\ No newline at end of file
diff --git a/contrib/Burpee_Detection/Video_burpee_detection/Video_main.py b/contrib/Burpee_Detection/Video_burpee_detection/Video_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..770a92323f19ea96d63aeba18d7d9f0c679500ba
--- /dev/null
+++ b/contrib/Burpee_Detection/Video_burpee_detection/Video_main.py
@@ -0,0 +1,171 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import cv2
+import numpy as np
+
+from StreamManagerApi import StreamManagerApi, MxDataInput, StringVector
+import MxpiDataType_pb2 as MxpiDataType
+
+
+class ostream:
+ def __init__(self, file):
+ self.file = file
+
+ def __lshift__(self, obj):
+ self.file.write(str(obj))
+ return self
+
+
+cout = ostream(sys.stdout)
+endl = '/n'
+
+# The following belongs to the SDK Process
+streamManagerApi = StreamManagerApi()
+# Init stream manager
+ret = streamManagerApi.InitManager()
+if ret != 0:
+ cout << 'Failed to init Stream manager, ret=' << str(ret) << endl
+ exit()
+# Mark start time
+# start = time.time()
+# Create streams by pipeline config file
+# load pipline
+with open("../pipeline/burpee_detection_p.pipeline", 'rb') as f:
+ pipelineStr = f.read()
+ret = streamManagerApi.CreateMultipleStreams(pipelineStr)
+# Print error message
+if ret != 0:
+ cout << 'Failed to create Stream, ret=' << str(ret) << endl
+ exit()
+# Stream name
+STREAM_NAME = b'detection'
+# Obtain the inference result by specifying streamName and keyVec
+# The data that needs to be obtained is searched by the plug-in name
+keys = [b"ReservedFrameInfo", b"mxpi_modelinfer0", b"mxpi_videodecoder0"]
+keyVec = StringVector()
+for key in keys:
+ keyVec.push_back(key)
+
+STATE = 0
+ACTION_CNT = 0
+# Config the output video
+fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+out = cv2.VideoWriter('video_result.mp4', fourcc, 30, (1280, 720))
+
+while True:
+
+ # Get data through GetResult
+ infer_result = streamManagerApi.GetResult(STREAM_NAME, b'appsink0', keyVec)
+
+ # Determine whether the output is empty
+ if infer_result.metadataVec.size() == 0:
+ cout << 'infer_result is null' << endl
+ continue
+
+ # Frame information structure
+ frameList = MxpiDataType.MxpiFrameInfo()
+ frameList.ParseFromString(infer_result.metadataVec[0].serializedMetadata)
+
+ # Object postprocessor information
+ objectList = MxpiDataType.MxpiObjectList()
+ objectList.ParseFromString(infer_result.metadataVec[1].serializedMetadata)
+
+ # Video-decoder information
+ visionList = MxpiDataType.MxpiVisionList()
+ visionList.ParseFromString(infer_result.metadataVec[2].serializedMetadata)
+ vision_data = visionList.visionVec[0].visionData.dataStr
+ visionInfo = visionList.visionVec[0].visionInfo
+
+ # cv2 func YUV to BGR
+ YUV_BYTES_NU = 3
+ YUV_BYTES_DE = 2
+ img_yuv = np.frombuffer(vision_data, np.uint8)
+ # Reshape
+ img_bgr = img_yuv.reshape(visionInfo.heightAligned * YUV_BYTES_NU // YUV_BYTES_DE,
+ visionInfo.widthAligned)
+ # Color gamut conversion
+ img = cv2.cvtColor(img_bgr, getattr(cv2, "COLOR_YUV2BGR_NV12"))
+
+ BEST_CONFIDENCE = 0
+
+ if len(objectList.objectVec) == 0:
+ continue
+
+ for i in range(len(objectList.objectVec)):
+ # Get ObjectList
+ results = objectList.objectVec[i]
+ # Get the confidence
+ confidence = round(results.classVec[0].confidence, 4)
+ # Save the best confidence and its information
+ if confidence > BEST_CONFIDENCE:
+ BEST_CONFIDENCE = confidence
+ best_bboxes = {'x0': int(results.x0),
+ 'x1': int(results.x1),
+ 'y0': int(results.y0),
+ 'y1': int(results.y1),
+ 'text': results.classVec[0].className}
+ action = best_bboxes['text']
+ TEXT = "{}{}".format(str(BEST_CONFIDENCE), " ")
+
+ # Draw rectangle and txt for visualization
+ for item in best_bboxes['text']:
+ TEXT += item
+ cv2.putText(img, TEXT, (best_bboxes['x0'] + 10, best_bboxes['y0'] + 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 1)
+ cv2.rectangle(img, (best_bboxes['x0'], best_bboxes['y0']), (best_bboxes['x1'], best_bboxes['y1']),
+ (255, 0, 0), 2)
+
+ # State change
+ if STATE == 0:
+ if action == "crouch":
+ STATE = 1
+ elif STATE == 1:
+ if action == "support":
+ STATE = 2
+ elif STATE == 2:
+ if action == "crouch":
+ STATE = 3
+ elif STATE == 3:
+ if action == "jump":
+ STATE = 0
+ ACTION_CNT = ACTION_CNT + 1
+
+ # Save txt for results
+ FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL
+ if os.path.exists("result.txt"):
+ os.remove("result.txt")
+ with os.fdopen(os.open('result.txt', FLAGS, 0o755), 'w') as f:
+ f.write(str(ACTION_CNT))
+
+ # Save picture
+ Id = frameList.frameId
+ RESULT_PIC_PATH = "./result_pic/"
+ if os.path.exists(RESULT_PIC_PATH) != 1:
+ os.makedirs("./result_pic/")
+ origin_img_file = './result_pic/image' + '-' + str(Id) + '.jpg'
+ cv2.imwrite(origin_img_file, img)
+
+ # Write the video
+ out.write(img)
+
+ # Stop detection when it is the lase frame
+ # Or when the frame id comes to be the number you set
+ if frameList.isEos or Id > 63:
+ out.release()
+ break
+
+# Destroy All Streams
+streamManagerApi.DestroyAllStreams()
diff --git a/contrib/Burpee_Detection/Video_burpee_detection/run.sh b/contrib/Burpee_Detection/Video_burpee_detection/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..aab4daeefea553d4a0dbf51ed09ddf6c55cf0ba6
--- /dev/null
+++ b/contrib/Burpee_Detection/Video_burpee_detection/run.sh
@@ -0,0 +1,19 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# load envs
+source ../envs/env.sh
+
+# running inference process
+python3.9.2 Video_main.py
diff --git a/contrib/Burpee_Detection/envs/atc_env.sh b/contrib/Burpee_Detection/envs/atc_env.sh
new file mode 100644
index 0000000000000000000000000000000000000000..fc800a908c17a2140231793f6ccaff6dc51eb071
--- /dev/null
+++ b/contrib/Burpee_Detection/envs/atc_env.sh
@@ -0,0 +1,24 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# 该脚本用来将模型文件转换成.om模型文件的环境变量
+
+# set ATC environment variate
+export install_path=/home/HwHiAiUser/Ascend/ascend-toolkit/latest
+export PATH=/usr/local/python3.7.5/bin:${install_path}/x86_64-linux/atc/ccec_compiler/bin:${install_path}/atc/bin:$PATH
+export PYTHONPATH=${install_path}/x86_64-linux/atc/python/site-packages:${install_path}/x86_64-linux/atc/python/site-packages/auto_tune.egg/auto_tune:${install_path}/x86_64-linux/atc/python/site-packages/schedule_search.egg$PYTHONPATH
+export LD_LIBRARY_PATH=${install_path}/x86_64-linux/atc/lib64:$LD_LIBRARY_PATH
+export ASCEND_OPP_PATH=${install_path}/opp
+# insert sucessfully
+echo "insert success!"
diff --git a/contrib/Burpee_Detection/envs/env.sh b/contrib/Burpee_Detection/envs/env.sh
new file mode 100644
index 0000000000000000000000000000000000000000..3b8bc98d1f9a9f16ee78168c37c1eccec92a5e66
--- /dev/null
+++ b/contrib/Burpee_Detection/envs/env.sh
@@ -0,0 +1,17 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+export MX_SDK_HOME="/home/HwHiAiUser/Ascend/mindx_sdk/mxVision_2.0.4/linux-x86_64/mxVision-2.0.4"
+export LD_LIBRARY_PATH="${MX_SDK_HOME}/lib:${MX_SDK_HOME}/opensource/lib:${LD_LIBRARY_PATH}"
+export PYTHONPATH="${MX_SDK_HOME}/python:${PYTHONPATH}"
diff --git a/contrib/Burpee_Detection/model/aipp_yolov5.cfg b/contrib/Burpee_Detection/model/aipp_yolov5.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..ad450a5e698982ef36c6de1ef51453ae6e7b66c4
--- /dev/null
+++ b/contrib/Burpee_Detection/model/aipp_yolov5.cfg
@@ -0,0 +1,26 @@
+aipp_op {
+ aipp_mode : static
+ related_input_rank : 0
+ input_format : YUV420SP_U8
+
+ src_image_size_w : 640
+ src_image_size_h : 640
+ crop : false
+ csc_switch : true
+ rbuv_swap_switch : false
+ matrix_r0c0 : 256
+ matrix_r0c1 : 0
+ matrix_r0c2 : 359
+ matrix_r1c0 : 256
+ matrix_r1c1 : -88
+ matrix_r1c2 : -183
+ matrix_r2c0 : 256
+ matrix_r2c1 : 454
+ matrix_r2c2 : 0
+ input_bias_0 : 0
+ input_bias_1 : 128
+ input_bias_2 : 128
+ var_reci_chn_0 : 0.0039216
+ var_reci_chn_1 : 0.0039216
+ var_reci_chn_2 : 0.0039216
+}
diff --git a/contrib/Burpee_Detection/model/atc.sh b/contrib/Burpee_Detection/model/atc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..5047a1dfc690d0ba9a101426b276a41d000d51bb
--- /dev/null
+++ b/contrib/Burpee_Detection/model/atc.sh
@@ -0,0 +1,31 @@
+# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# atc environment
+source ../envs/env.sh
+source ../envs/atc_env.sh
+
+# atc transform model
+atc \
+ --model=./burpee_detection.onnx \
+ --framework=5 \
+ --output=./burpee_detection \
+ --input_format=NCHW \
+ --input_shape="images:1,3,640,640" \
+ --out_nodes="Transpose_213:0;Transpose_262:0;Transpose_311:0" \
+ --enable_small_channel=1 \
+ --insert_op_conf=./aipp_yolov5.cfg \
+ --soc_version=Ascend310 \
+ --log=info
+
diff --git a/contrib/Burpee_Detection/model/yolov5.cfg b/contrib/Burpee_Detection/model/yolov5.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..269770ae50d9e51ee6d3ed41a01793f9bf3e3346
--- /dev/null
+++ b/contrib/Burpee_Detection/model/yolov5.cfg
@@ -0,0 +1,10 @@
+CLASS_NUM=3
+BIASES_NUM=18
+BIASES=10,13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326
+SCORE_THRESH=0.6
+OBJECTNESS_THRESH=0.5
+IOU_THRESH=0.6
+YOLO_TYPE=3
+ANCHOR_DIM=3
+MODEL_TYPE=1
+YOLO_VERSION=5
diff --git a/contrib/Burpee_Detection/model/yolov5.names b/contrib/Burpee_Detection/model/yolov5.names
new file mode 100644
index 0000000000000000000000000000000000000000..8f332ae26b594f47f1f6b8a8eac62f2efcbad46a
--- /dev/null
+++ b/contrib/Burpee_Detection/model/yolov5.names
@@ -0,0 +1,3 @@
+support
+jump
+crouch
diff --git a/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline b/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..a5b3c8e62676923bada4b1689dd47698fb4ca4c8
--- /dev/null
+++ b/contrib/Burpee_Detection/pipeline/burpee_detection_p.pipeline
@@ -0,0 +1,54 @@
+{
+ "detection": {
+ "stream_config": {
+ "deviceId": "0"
+ },
+ "appsrc0": {
+ "props": {
+ "blocksize": "409600"
+ },
+ "factory": "appsrc",
+ "next": "mxpi_imagedecoder0"
+ },
+ "mxpi_imagedecoder0": {
+ "props": {
+ "deviceId": "0"
+ },
+ "factory": "mxpi_imagedecoder",
+ "next": "mxpi_imageresize0"
+ },
+ "mxpi_imageresize0": {
+ "props": {
+ "dataSource": "mxpi_imagedecoder0",
+ "resizeHeight": "640",
+ "resizeWidth": "640",
+ "resizeType": "Resizer_KeepAspectRatio_Fit"
+ },
+ "factory": "mxpi_imageresize",
+ "next": "mxpi_modelinfer0"
+ },
+ "mxpi_modelinfer0": {
+ "props": {
+ "modelPath": "../model/burpee_detection.om",
+ "postProcessConfigPath": "../model/yolov5.cfg",
+ "labelPath": "../model/yolov5.names",
+ "postProcessLibPath": "/home/HwHiAiUser/Ascend/mindx_sdk/mxVision_2.0.4/linux-x86_64/mxVision-2.0.4/lib/libMpYOLOv5PostProcessor.so"
+ },
+ "factory": "mxpi_modelinfer",
+ "next": "mxpi_dataserialize0"
+ },
+ "mxpi_dataserialize0": {
+ "props": {
+ "outputDataKeys": "mxpi_modelinfer0"
+ },
+ "factory": "mxpi_dataserialize",
+ "next": "appsink0"
+ },
+ "appsink0": {
+ "props": {
+ "blocksize": "4096000"
+ },
+ "factory": "appsink"
+ }
+ }
+}
diff --git a/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline b/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline
new file mode 100644
index 0000000000000000000000000000000000000000..d80926d36e6778861fe3a86463a67c8cf69a356b
--- /dev/null
+++ b/contrib/Burpee_Detection/pipeline/burpee_detection_v.pipeline
@@ -0,0 +1,59 @@
+{
+ "detection": {
+ "stream_config": {
+ "deviceId": "0"
+ },
+ "mxpi_rtspsrc0": {
+ "props": {
+ "rtspUrl": "rtsp://192.168.0.5:8554/burpee_detection.264",
+ "fpsMode": "1"
+ },
+ "factory": "mxpi_rtspsrc",
+ "next": "mxpi_videodecoder0"
+ },
+ "mxpi_videodecoder0": {
+ "props": {
+ "deviceId": "0",
+ "inputVideoFormat": "H264",
+ "outputImageFormat": "YUV420SP_NV12"
+ },
+ "factory": "mxpi_videodecoder",
+ "next": "mxpi_imageresize0"
+ },
+ "mxpi_imageresize0": {
+ "props": {
+ "dataSource": "mxpi_videodecoder0",
+ "resizeHeight": "640",
+ "resizeWidth": "640",
+ "resizeType": "Resizer_KeepAspectRatio_Fit"
+ },
+ "factory": "mxpi_imageresize",
+ "next": "mxpi_modelinfer0"
+ },
+ "mxpi_modelinfer0": {
+ "props": {
+ "dataSource": "mxpi_imageresize0",
+ "modelPath": "../model/burpee_detection.om",
+ "postProcessConfigPath": "../model/yolov5.cfg",
+ "labelPath": "../model/yolov5.names",
+ "postProcessLibPath": "/home/HwHiAiUser/Ascend/mindx_sdk/mxVision_2.0.4/linux-x86_64/mxVision-2.0.4/lib/libMpYOLOv5PostProcessor.so",
+ "deviceId": "0"
+ },
+ "factory": "mxpi_modelinfer",
+ "next": "mxpi_dataserialize0"
+ },
+ "mxpi_dataserialize0": {
+ "props": {
+ "outputDataKeys": "mxpi_modelinfer0,ReservedFrameInfo"
+ },
+ "factory": "mxpi_dataserialize",
+ "next": "appsink0"
+ },
+ "appsink0": {
+ "props": {
+ "blocksize": "4096000"
+ },
+ "factory": "appsink"
+ }
+ }
+}
diff --git a/contrib/Burpee_Detection/readme_img/app_1.png b/contrib/Burpee_Detection/readme_img/app_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..9d56cc7976396199691287c08c01c476492d27e3
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_1.png differ
diff --git a/contrib/Burpee_Detection/readme_img/app_2.png b/contrib/Burpee_Detection/readme_img/app_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..f67c0c31a0d3960c5ba6c61e707eb5dd88d8fe1c
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_2.png differ
diff --git a/contrib/Burpee_Detection/readme_img/app_3.png b/contrib/Burpee_Detection/readme_img/app_3.png
new file mode 100644
index 0000000000000000000000000000000000000000..47980330bc16184f356724d8cb4bd17cc35e0a48
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_3.png differ
diff --git a/contrib/Burpee_Detection/readme_img/app_4.png b/contrib/Burpee_Detection/readme_img/app_4.png
new file mode 100644
index 0000000000000000000000000000000000000000..2aa4851813a254e5fe5043264939a65a4c5db368
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_4.png differ
diff --git a/contrib/Burpee_Detection/readme_img/app_5.png b/contrib/Burpee_Detection/readme_img/app_5.png
new file mode 100644
index 0000000000000000000000000000000000000000..7d80b6fd43bf17aa6e103d6e83961186fa38bc0b
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_5.png differ
diff --git a/contrib/Burpee_Detection/readme_img/app_flow.png b/contrib/Burpee_Detection/readme_img/app_flow.png
new file mode 100644
index 0000000000000000000000000000000000000000..4a81113f7dc164f1009a26d216b042b6210012ae
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/app_flow.png differ
diff --git a/contrib/Burpee_Detection/readme_img/dataset.png b/contrib/Burpee_Detection/readme_img/dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..601a64a6e9093d4d4372014df0f0c341798d477f
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/dataset.png differ
diff --git a/contrib/Burpee_Detection/readme_img/fps.png b/contrib/Burpee_Detection/readme_img/fps.png
new file mode 100644
index 0000000000000000000000000000000000000000..30f1e13556fc078f3c3014bb61c8599f4d05af01
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/fps.png differ
diff --git a/contrib/Burpee_Detection/readme_img/map.png b/contrib/Burpee_Detection/readme_img/map.png
new file mode 100644
index 0000000000000000000000000000000000000000..1e2ff1ae91e486efc4512ffeda9ca5427d1e7062
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/map.png differ
diff --git a/contrib/Burpee_Detection/readme_img/video.png b/contrib/Burpee_Detection/readme_img/video.png
new file mode 100644
index 0000000000000000000000000000000000000000..c3c07c23babd4859fad6418583d7ab55c2c3e552
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/video.png differ
diff --git a/contrib/Burpee_Detection/readme_img/video_flow.png b/contrib/Burpee_Detection/readme_img/video_flow.png
new file mode 100644
index 0000000000000000000000000000000000000000..49365d320111ee4a58296c4ff3483d9ab5c74961
Binary files /dev/null and b/contrib/Burpee_Detection/readme_img/video_flow.png differ