diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/README.md b/MindIE/MindIE-Torch/built-in/audio/Paraformer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5efeea18b60efa1f49de2f677ff2ca6b61b1f3bb --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/README.md @@ -0,0 +1,258 @@ +# Paraformer模型-推理指导 + +- [概述](#概述) +- [推理环境准备](#推理环境准备) +- [快速上手](#快速上手) + - [获取源码](#获取源码) + - [模型推理](#模型推理) + +# 概述 + +该工程使用mindietorch部署Paraformer语音识别模型,同时该工程还适配了VAD音频切分模型以及PUNC标点符号模型,三个模型可组成VAD+Paraformer+PUNC的pipeline,实现对于长音频的识别 + +- 模型路径: + ```bash + https://modelscope.cn/models/iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch + ``` + +- 参考实现: + ```bash + https://github.com/modelscope/FunASR + ``` + +# 推理环境准备 + +- 该模型需要以下插件与驱动 + + **表 1** 版本配套表 + + | 配套 | 版本 | 环境准备指导 | + | ------ | ------- | ------------ | + | Python | 3.10.13 | - | + | torch | 2.1.0+cpu | - | + | torch_audio | 2.1.0+cpu | - | + | CANN | 8.0.RC3 | - | + | MindIE | 1.0.RC3 | - | + +# 快速上手 +## 获取源码 + +1. 安装mindie包 + + ```bash + # 安装mindie + chmod +x ./Ascend-mindie_xxx.run + ./Ascend-mindie_xxx.run --install + source /usr/local/Ascend/mindie/set_env.sh + ``` + +2. 获取Funasr源码 + + ``` + git clone https://github.com/modelscope/FunASR.git + cd ./FunASR + git reset fdac68e1d09645c48adf540d6091b194bac71075 --hard + cd .. + ``` + +3. 修改Funasr的源码,将patch应用到代码中(若patch应用失败,则需要手动进行修改) + ``` + cd ./FunASR + git apply ../mindie.patch --ignore-whitespace + cd .. + ``` + + (可选)若为Atlas 800I A2服务器,可以使用如下命令将Attention替换为Flash Attention,可以提升Paraformer模型的性能 + ``` + cd ./FunASR + git apply ../mindie_fa.patch --ignore-whitespace + cd .. + ``` + +4. 获取模型文件 + + 将[Paraformer](https://modelscope.cn/models/iic/speech_paraformer-large-vad-punc_asr_nat-zh-cn-16k-common-vocab8404-pytorch/files)的模型文件下载到本地,并保存在./model文件夹下 + + 将[vad](https://modelscope.cn/models/iic/speech_fsmn_vad_zh-cn-16k-common-pytorch/files)的模型文件下载到本地,并保存在./model_vad文件夹下 + + 将[punc](https://modelscope.cn/models/iic/punc_ct-transformer_cn-en-common-vocab471067-large/files)的模型文件下载到本地,并保存在./model_punc文件夹下 + + 目录结构如下所示 + + ``` + Paraformer + ├── FunASR + └── model + └── model.pt + └── config.yaml + └── ... + └── model_vad + └── model.pt + └── config.yaml + └── ... + └── model_punc + └── model.pt + └── config.yaml + └── ... + ``` + +5. 安装Funasr的依赖 + ``` + apt install ffmpeg + pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cpu + pip install jieba omegaconf kaldiio librosa tqdm hydra-core six attrs psutil tornado + ``` + +6. 安装配套版本的torch_npu,同时参考[昇腾文档](https://www.hiascend.com/document/detail/zh/mindie/10RC3/mindietorch/Torchdev/mindie_torch0018.html)兼容mindie和torch_npu + +7. (可选) 若要进行精度或性能测试,可下载数据集[AISHELL-1](https://www.aishelltech.com/kysjcp)并保存于任意路径 + +# 模型编译 +1. (可选)模型序列化 + 若CPU为aarch64的架构,则在编译encoder和decoder时会出现RuntimeError: could not create a primitive descriptor for a matmul primitive,此时需要新创建一个Python环境(推荐使用conda创建),使用如下命令安装torch 2.2.1及相关依赖 + ``` + pip install torch==2.2.1 torchvision==0.17.1 torchaudio==2.2.1 --index-url https://download.pytorch.org/whl/cpu + pip install omegaconf kaldiio librosa tqdm hydra-core six + ``` + + 而后,执行如下脚本将encoder和decoder序列化 + ```bash + python trace_encoder_decoder.py \ + --model ./model \ + --traced_encoder ./compiled_model/traced_encoder.pt \ + --traced_decoder ./compiled_model/traced_decoder.pt + ``` + 参数说明: + - --model:预训练模型路径 + - --traced_encoder: 序列化后的encoder保存路径 + - --traced_decoder: 序列化后的decoder保存路径 + + 该步骤获得序列化后的encoder和decoder模型,后续模型编译仍需回到原始环境 + + +2. 模型编译 + 执行下述命令进行模型编译(如编译后的模型保存于compiled_model目录下,需要首先mkdir compiled_model) + ```bash + python compile.py \ + --model ./model \ + --model_vad ./model_vad \ + --model_punc ./model_punc \ + --compiled_encoder ./compiled_model/compiled_encoder.pt \ + --compiled_decoder ./compiled_model/compiled_decoder.pt \ + --compiled_cif ./compiled_model/compiled_cif.pt \ + --compiled_cif_timestamp ./compiled_model/compiled_cif_timestamp.pt \ + --compiled_vad ./compiled_model/compiled_vad.pt \ + --compiled_punc ./compiled_model/compiled_punc.pt \ + --traced_encoder ./compiled_model/traced_encoder.pt \ + --traced_decoder ./compiled_model/traced_decoder.pt \ + --soc_version Ascendxxx + ``` + + 参数说明: + - --model:预训练模型路径 + - --model_vad:VAD预训练模型路径,若不使用VAD模型则设置为None + - --model_punc:PUNC预训练模型路径,若不使用PUNC模型则设置为None + - --compiled_encoder:编译后的encoder模型的保存路径 + - --compiled_decoder:编译后的decoder模型的保存路径 + - --compiled_cif:编译后的cif函数的保存路径 + - --compiled_cif_timestamp:编译后的cif_timestamp函数的保存路径 + - --compiled_vad:编译后的vad的保存路径 + - --compiled_punc:编译后的punc的保存路径 + - --traced_encoder:预先序列化的encoder模型的路径,若并未执行第2步提前编译模型,则无需指定该参数 + - --traced_decoder:预先序列化的decoder模型的路径,若并未执行第2步提前编译模型,则无需指定该参数 + - --soc_version:昇腾芯片的型号,输入格式为Ascendxxx(其中xxx请使用npu-smi info命令查看) + + +## 模型推理 +1. 设置mindie内存池上限为12G,执行如下命令设置环境变量 + ``` + export INF_NAN_MODE_ENABLE=0 + export TORCH_AIE_NPU_CACHE_MAX_SIZE=12 + ``` + +2. 样本测试 + 执行下述命令进行音频样本测试,该脚本将会测试VAD+Paraformer+PUNC整个Pipeline,脚本单次只会读取一个音频文件,音频文件可以为任意长度 + ```bash + python test.py \ + --model ./model \ + --model_vad ./model_vad \ + --model_punc ./model_punc \ + --compiled_encoder ./compiled_model/compiled_encoder.pt \ + --compiled_decoder ./compiled_model/compiled_decoder.pt \ + --compiled_cif ./compiled_model/compiled_cif.pt \ + --compiled_cif_timestamp ./compiled_model/compiled_cif_timestamp.pt \ + --compiled_punc ./compiled_model/compiled_punc.pt \ + --compiled_vad ./compiled_model/compiled_vad.pt \ + --paraformer_batch_size 16 \ + --sample_path ./model/example \ + --soc_version Ascendxxx + ``` + + 参数说明: + - --model:预训练模型路径 + - --model_vad:VAD预训练模型路径 + - --model_punc:PUNC预训练模型路径 + - --compiled_encoder:编译后的encoder模型的路径 + - --compiled_decoder:编译后的decoder模型的路径 + - --compiled_cif:编译后的cif函数的路径 + - --compiled_cif_timestamp:编译后的cif_timestamp函数的路径 + - --compiled_punc:编译后的punc模型的路径 + - --compiled_vad:编译后的vad模型的路径 + - --paraformer_batch_size:Paraformer模型所使用的batch_size + - --sample_path:测试音频的路径或所在的文件夹路径,若为文件夹路径则会遍历文件夹下的所有音频文件 + - --soc_version:昇腾芯片的型号,输入格式为Ascendxxx(其中xxx请使用npu-smi info命令查看) + +3. 性能测试 + 执行下述命令对于Paraformer进行性能测试,该脚本仅针对Paraformer模型进行测试,batch_size参数用于控制同时处理的最大音频数量(例如设置为64,则会在sample_path下同时读取64个音频,并组合成一个输入进行处理),但需要注意音频的长度不能过长,否则可能超出NPU的显存 + ``` + python test_performance.py \ + --model ./model \ + --compiled_encoder ./compiled_model/compiled_encoder.pt \ + --compiled_decoder ./compiled_model/compiled_decoder.pt \ + --compiled_cif ./compiled_model/compiled_cif.pt \ + --compiled_cif_timestamp ./compiled_model/compiled_cif_timestamp.pt \ + --batch_size 64 \ + --result_path ./aishell_test_result.txt \ + --sample_path /path/to/AISHELL-1/wav/test \ + --soc_version Ascendxxx + ``` + + 参数说明: + - --model:预训练模型路径 + - --compiled_encoder:编译后的encoder模型的路径 + - --compiled_decoder:编译后的decoder模型的路径 + - --compiled_cif:编译后的cif函数的路径 + - --compiled_cif_timestamp:编译后的cif_timestamp函数的路径 + - --batch_size:Paraformer模型所使用的batch_size + - --sample_path:AISHELL-1测试集音频所在路径,模型会递归查找该路径下的所有音频文件 + - --result_path:测试音频的推理结果的保存路径 + - --soc_version:昇腾芯片的型号,输入格式为Ascendxxx(其中xxx请使用npu-smi info命令查看) + + +4. 精度测试 + + 利用如下命令安装中文文本精度对比库nltk + ``` + pip install nltk + ``` + + 需要首先执行第4步完成性能测试,而后利用性能测试保存到result_path的结果进行精度验证,执行如下命令 + ``` + python test_accuracy.py \ + --result_path ./aishell_test_result.txt \ + --ref_path /path/to/AISHELL-1/transcript/aishell_transcript_v0.8.txt + ``` + + 参数说明: + - --result_path:测试音频的推理结果的保存路径 + - --ref_path:AISHELL-1测试音频的Ground Truth所在路径 + + +## 模型精度及性能 + +该模型在Atlas 310I pro和Atlas 800I A2上的参考性能及精度如下所示(其中性能数据为Paraformer模型纯推理性能,并非端到端推理性能) + +| NPU | batch_size | rtx_avg | cer | +|----------------|------------|---------|---------| +| Atlas 310I pro | 16 | 217.175 | 0.0198 | +| Atlas 800I A2 | 64 | 461.775 | 0.0198 | \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/compile.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/compile.py new file mode 100644 index 0000000000000000000000000000000000000000..2287b61da3338a06d4dc5c5d276370c3132fbc77 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/compile.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import torch +import mindietorch + +from mindie_auto_model import MindieAutoModel + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", default="./model", + help="path of pretrained model") + parser.add_argument("--model_vad", default="./model_vad", + help="path of pretrained vad model") + parser.add_argument("--model_punc", default="./model_punc", + help="path of pretrained punc model") + parser.add_argument("--compiled_encoder", default="./compiled_model/compiled_encoder.pt", + help="path to save compiled encoder") + parser.add_argument("--compiled_decoder", default="./compiled_model/compiled_decoder.pt", + help="path to save compiled decoder") + parser.add_argument("--compiled_cif", default="./compiled_model/compiled_cif.pt", + help="path to save compiled cif function") + parser.add_argument("--compiled_cif_timestamp", default="./compiled_model/compiled_cif_timestamp.pt", + help="path to save compiled cif timestamp function") + parser.add_argument("--compiled_punc", default="./compiled_model/compiled_punc.pt", + help="path to save compiled punc model") + parser.add_argument("--compiled_vad", default="./compiled_model/compiled_vad.pt", + help="path to save compiled punc model") + parser.add_argument("--traced_encoder", default=None, + help="path to save traced encoder model") + parser.add_argument("--traced_decoder", default=None, + help="path to save traced decoder model") + parser.add_argument("--soc_version", required=True, type=str, + help="soc version of Ascend") + args = parser.parse_args() + + mindietorch.set_device(0) + + # use mindietorch to compile sub-models in Paraformer + print("Begin compiling sub-models.") + MindieAutoModel.export_model(model=args.model_vad, compiled_path=args.compiled_vad, + compile_type="vad", soc_version=args.soc_version) + MindieAutoModel.export_model(model=args.model_punc, compiled_path=args.compiled_punc, + compile_type="punc", soc_version=args.soc_version) + MindieAutoModel.export_model(model=args.model, compiled_encoder=args.compiled_encoder, + compiled_decoder=args.compiled_decoder, compiled_cif=args.compiled_cif, + compiled_cif_timestamp=args.compiled_cif_timestamp, + traced_encoder=args.traced_encoder, traced_decoder=args.traced_decoder, + cif_interval=200, cif_timestamp_interval=500, + compile_type="paraformer", soc_version=args.soc_version) + print("Finish compiling sub-models.") \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie.patch b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie.patch new file mode 100644 index 0000000000000000000000000000000000000000..a485f5eb8643e4fc1ea20341b07768a5fda8c572 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie.patch @@ -0,0 +1,749 @@ +diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py +index 01e6aaf6..0a80f1c0 100644 +--- a/funasr/auto/auto_model.py ++++ b/funasr/auto/auto_model.py +@@ -171,7 +171,8 @@ class AutoModel: + self.spk_kwargs = spk_kwargs + self.model_path = kwargs.get("model_path") + +- def build_model(self, **kwargs): ++ @staticmethod ++ def build_model(**kwargs): + assert "model" in kwargs + if "model_conf" not in kwargs: + logging.info("download models from model hub: {}".format(kwargs.get("hub", "ms"))) +@@ -277,9 +278,9 @@ class AutoModel: + asr_result_list = [] + num_samples = len(data_list) + disable_pbar = self.kwargs.get("disable_pbar", False) +- pbar = ( +- tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None +- ) ++ # pbar = ( ++ # tqdm(colour="blue", total=num_samples, dynamic_ncols=True) if not disable_pbar else None ++ # ) + time_speech_total = 0.0 + time_escape_total = 0.0 + for beg_idx in range(0, num_samples, batch_size): +@@ -311,27 +312,32 @@ class AutoModel: + speed_stats["batch_size"] = f"{len(results)}" + speed_stats["rtf"] = f"{(time_escape) / batch_data_time:0.3f}" + description = f"{speed_stats}, " +- if pbar: +- pbar.update(1) +- pbar.set_description(description) ++ # if pbar: ++ # pbar.update(1) ++ # pbar.set_description(description) + time_speech_total += batch_data_time + time_escape_total += time_escape + +- if pbar: +- # pbar.update(1) +- pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}") ++ # if pbar: ++ # # pbar.update(1) ++ # pbar.set_description(f"rtf_avg: {time_escape_total/time_speech_total:0.3f}") + torch.cuda.empty_cache() + return asr_result_list + + def inference_with_vad(self, input, input_len=None, **cfg): + kwargs = self.kwargs ++ time_stats = {"input_speech_time": 0.0, "end_to_end_time": 0.0, "vad_time" : 0.0, ++ "paraformer_time": 0.0, "punc_time": 0.0} + # step.1: compute the vad model ++ print("Start using VAD model to segment input audios.") + deep_update(self.vad_kwargs, cfg) + beg_vad = time.time() + res = self.inference( + input, input_len=input_len, model=self.vad_model, kwargs=self.vad_kwargs, **cfg + ) + end_vad = time.time() ++ time_stats["vad_time"] = end_vad - beg_vad ++ print("Finish segmenting audios within {:.3f} seconds.".format(time_stats["vad_time"])) + + # FIX(gcf): concat the vad clips for sense vocie model for better aed + if kwargs.get("merge_vad", False): +@@ -352,12 +358,13 @@ class AutoModel: + time_speech_total_all_samples = 1e-6 + + beg_total = time.time() +- pbar_total = ( +- tqdm(colour="red", total=len(res), dynamic_ncols=True) +- if not kwargs.get("disable_pbar", False) +- else None +- ) ++ # pbar_total = ( ++ # tqdm(colour="red", total=len(res), dynamic_ncols=True) ++ # if not kwargs.get("disable_pbar", False) ++ # else None ++ # ) + for i in range(len(res)): ++ print("Begin processing audio with Paraformer and PUNC model.") + key = res[i]["key"] + vadsegments = res[i]["value"] + input_i = data_list[i] +@@ -366,7 +373,7 @@ class AutoModel: + speech_lengths = len(speech) + n = len(vadsegments) + data_with_index = [(vadsegments[i], i) for i in range(n)] +- sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0]) ++ sorted_data = sorted(data_with_index, key=lambda x: x[0][1] - x[0][0], reverse=True) + results_sorted = [] + + if not len(sorted_data): +@@ -385,26 +392,15 @@ class AutoModel: + # pbar_sample = tqdm(colour="blue", total=n, dynamic_ncols=True) + + all_segments = [] +- max_len_in_batch = 0 +- end_idx = 1 +- for j, _ in enumerate(range(0, n)): +- # pbar_sample.update(1) +- sample_length = sorted_data[j][0][1] - sorted_data[j][0][0] +- potential_batch_length = max(max_len_in_batch, sample_length) * (j + 1 - beg_idx) +- # batch_size_ms_cum += sorted_data[j][0][1] - sorted_data[j][0][0] +- if ( +- j < n - 1 +- and sample_length < batch_size_threshold_ms +- and potential_batch_length < batch_size +- ): +- max_len_in_batch = max(max_len_in_batch, sample_length) +- end_idx += 1 +- continue ++ batch_segments = kwargs["paraformer_batch_size"] ++ loop_num = n // batch_segments if n % batch_segments == 0 else n // batch_segments + 1 ++ end_idx = batch_segments + ++ for j in range(loop_num): + speech_j, speech_lengths_j = slice_padding_audio_samples( + speech, speech_lengths, sorted_data[beg_idx:end_idx] + ) +- results = self.inference( ++ results, meta_data = self.inference_with_asr( + speech_j, input_len=None, model=model, kwargs=kwargs, **cfg + ) + if self.spk_model is not None: +@@ -425,8 +421,7 @@ class AutoModel: + ) + results[_b]["spk_embedding"] = spk_res[0]["spk_embedding"] + beg_idx = end_idx +- end_idx += 1 +- max_len_in_batch = sample_length ++ end_idx += batch_segments + if len(results) < 1: + continue + results_sorted.extend(results) +@@ -478,6 +473,13 @@ class AutoModel: + if not len(result["text"].strip()): + continue + return_raw_text = kwargs.get("return_raw_text", False) ++ ++ end_paraformer = time.time() ++ time_stats["paraformer_time"] = time_stats["paraformer_time"] + end_paraformer - beg_asr_total ++ print("\tFinish recognizing audio using Paraformer within {:.3f} seconds, " ++ "which contains {} segments and batch_size is {}." ++ .format(time_stats["paraformer_time"], n, batch_segments)) ++ + # step.3 compute punc model + raw_text = None + if self.punc_model is not None: +@@ -489,6 +491,9 @@ class AutoModel: + if return_raw_text: + result["raw_text"] = raw_text + result["text"] = punc_res[0]["text"] ++ end_punc = time.time() ++ time_stats["punc_time"] = time_stats["punc_time"] + end_punc - end_paraformer ++ print("\tFinish adding punctuation using PUNC model within {:.3f} seconds.".format(time_stats["punc_time"])) + + # speaker embedding cluster after resorted + if self.spk_model is not None and kwargs.get("return_spk_res", True): +@@ -567,20 +572,24 @@ class AutoModel: + results_ret_list.append(result) + end_asr_total = time.time() + time_escape_total_per_sample = end_asr_total - beg_asr_total +- if pbar_total: +- pbar_total.update(1) +- pbar_total.set_description( +- f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, " +- f"time_speech: {time_speech_total_per_sample: 0.3f}, " +- f"time_escape: {time_escape_total_per_sample:0.3f}" +- ) +- +- # end_total = time.time() +- # time_escape_total_all_samples = end_total - beg_total ++ print("Finish processing audio which is {:.3f} seconds. " ++ "Time consumption of Paraformer and PUNC is {:.3f} seconds." ++ .format(time_speech_total_per_sample, time_escape_total_per_sample)) ++ # if pbar_total: ++ # pbar_total.update(1) ++ # pbar_total.set_description( ++ # f"rtf_avg: {time_escape_total_per_sample / time_speech_total_per_sample:0.3f}, " ++ # f"time_speech: {time_speech_total_per_sample: 0.3f}, " ++ # f"time_escape: {time_escape_total_per_sample:0.3f}" ++ # ) ++ ++ end_total = time.time() ++ time_stats["end_to_end_time"] = end_total - beg_vad ++ time_stats["input_speech_time"] = time_speech_total_all_samples + # print(f"rtf_avg_all: {time_escape_total_all_samples / time_speech_total_all_samples:0.3f}, " + # f"time_speech_all: {time_speech_total_all_samples: 0.3f}, " + # f"time_escape_all: {time_escape_total_all_samples:0.3f}") +- return results_ret_list ++ return results_ret_list, time_stats + + def export(self, input=None, **cfg): + """ +diff --git a/funasr/frontends/wav_frontend.py b/funasr/frontends/wav_frontend.py +index a4002df5..e62f3baf 100644 +--- a/funasr/frontends/wav_frontend.py ++++ b/funasr/frontends/wav_frontend.py +@@ -134,7 +134,7 @@ class WavFrontend(nn.Module): + mat = kaldi.fbank( + waveform, + num_mel_bins=self.n_mels, +- frame_length=self.frame_length, ++ frame_length=min(self.frame_length, waveform_length/self.fs*1000), + frame_shift=self.frame_shift, + dither=self.dither, + energy_floor=0.0, +@@ -282,30 +282,42 @@ class WavFrontendOnline(nn.Module): + Apply lfr with data + """ + +- LFR_inputs = [] +- # inputs = torch.vstack((inputs_lfr_cache, inputs)) +- T = inputs.shape[0] # include the right context +- T_lfr = int( +- np.ceil((T - (lfr_m - 1) // 2) / lfr_n) +- ) # minus the right context: (lfr_m - 1) // 2 +- splice_idx = T_lfr +- for i in range(T_lfr): +- if lfr_m <= T - i * lfr_n: +- LFR_inputs.append((inputs[i * lfr_n : i * lfr_n + lfr_m]).view(1, -1)) +- else: # process last LFR frame +- if is_final: +- num_padding = lfr_m - (T - i * lfr_n) +- frame = (inputs[i * lfr_n :]).view(-1) +- for _ in range(num_padding): +- frame = torch.hstack((frame, inputs[-1])) +- LFR_inputs.append(frame) +- else: +- # update splice_idx and break the circle +- splice_idx = i +- break +- splice_idx = min(T - 1, splice_idx * lfr_n) ++ T, D = inputs.shape ++ if T == 0: ++ return torch.empty(0, D * lfr_m), inputs, 0 ++ ++ # Calculate the number of LFR frames ++ T_lfr = (T - lfr_m) // lfr_n + 1 ++ if T_lfr <= 0: ++ T_lfr = 0 ++ required_length = (T_lfr - 1) * lfr_n + lfr_m ++ ++ # Handle padding ++ if required_length > T: ++ if is_final: ++ pad_amount = required_length - T ++ padding_frame = inputs[-1].unsqueeze(0).expand(pad_amount, D) ++ inputs_padded = torch.cat([inputs, padding_frame], dim=0) ++ else: ++ T_lfr -= 1 ++ required_length = (T_lfr - 1) * lfr_n + lfr_m ++ inputs_padded = inputs[:required_length] ++ else: ++ inputs_padded = inputs[:required_length] ++ ++ # Transpose to [D, T_speed] ++ inputs_padded = inputs_padded.transpose(0, 1) ++ # Apply unfold ++ frames = inputs_padded.unfold(1, lfr_m, lfr_n) # Shape: [D, T_lfr, lfr_m] ++ # Permute to [T_lfr, lfr_m, D] ++ frames = frames.permute(1, 2, 0) ++ # Flatten frames ++ LFR_outputs = frames.contiguous().view(T_lfr, -1) ++ # Update splice_idx and cache ++ splice_idx = T_lfr * lfr_n ++ splice_idx = min(splice_idx, T) ++ + lfr_splice_cache = inputs[splice_idx:, :] +- LFR_outputs = torch.vstack(LFR_inputs) + return LFR_outputs.type(torch.float32), lfr_splice_cache, splice_idx + + @staticmethod +diff --git a/funasr/models/bicif_paraformer/cif_predictor.py b/funasr/models/bicif_paraformer/cif_predictor.py +index ca98cdc2..796b24dc 100644 +--- a/funasr/models/bicif_paraformer/cif_predictor.py ++++ b/funasr/models/bicif_paraformer/cif_predictor.py +@@ -412,11 +412,12 @@ class CifPredictorV3Export(torch.nn.Module): + + mask = mask.squeeze(-1) + hidden, alphas, token_num = self.tail_process_fn(hidden, alphas, mask=mask) ++ return hidden, alphas, token_num + acoustic_embeds, cif_peak = cif_export(hidden, alphas, self.threshold) + + return acoustic_embeds, token_num, alphas, cif_peak + +- def get_upsample_timestmap(self, hidden, mask=None, token_num=None): ++ def get_upsample_timestamp(self, hidden, mask=None, token_num=None): + h = hidden + b = hidden.shape[0] + context = h.transpose(1, 2) +@@ -437,6 +438,7 @@ class CifPredictorV3Export(torch.nn.Module): + alphas2 = alphas2.squeeze(-1) + _token_num = alphas2.sum(-1) + alphas2 *= (token_num / _token_num)[:, None].repeat(1, alphas2.size(1)) ++ return alphas2 + # upsampled alphas and cif_peak + us_alphas = alphas2 + us_cif_peak = cif_wo_hidden_export(us_alphas, self.threshold - 1e-4) +diff --git a/funasr/models/fsmn_vad_streaming/encoder.py b/funasr/models/fsmn_vad_streaming/encoder.py +index 6668c5d5..6b7c80fc 100755 +--- a/funasr/models/fsmn_vad_streaming/encoder.py ++++ b/funasr/models/fsmn_vad_streaming/encoder.py +@@ -231,7 +231,7 @@ class FSMN(nn.Module): + pass + + def forward( +- self, input: torch.Tensor, cache: Dict[str, torch.Tensor] ++ self, input: torch.Tensor, cache: Dict[str, torch.Tensor] = {} + ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + """ + Args: +diff --git a/funasr/models/fsmn_vad_streaming/model.py b/funasr/models/fsmn_vad_streaming/model.py +index 04689bed..9db5bfd0 100644 +--- a/funasr/models/fsmn_vad_streaming/model.py ++++ b/funasr/models/fsmn_vad_streaming/model.py +@@ -18,19 +18,21 @@ from funasr.utils.datadir_writer import DatadirWriter + from funasr.utils.load_utils import load_audio_text_image_video, extract_fbank + + ++# 表示VAD状态机的状态,包括起点未检测、语音片段中、终点已检测 + class VadStateMachine(Enum): + kVadInStateStartPointNotDetected = 1 + kVadInStateInSpeechSegment = 2 + kVadInStateEndPointDetected = 3 + + ++# 表示每一帧的状态,是语音还是静音 + class FrameState(Enum): + kFrameStateInvalid = -1 + kFrameStateSpeech = 1 + kFrameStateSil = 0 + + +-# final voice/unvoice state per frame ++# 表示音频状态的变化,如从静音到语音,从语音到静音等(final voice/unvoice state per frame) + class AudioChangeState(Enum): + kChangeStateSpeech2Speech = 0 + kChangeStateSpeech2Sil = 1 +@@ -40,6 +42,7 @@ class AudioChangeState(Enum): + kChangeStateInvalid = 5 + + ++# 表示VAD的检测模式,支持单一和多重语音检测模式 + class VadDetectMode(Enum): + kVadSingleUtteranceDetectMode = 0 + kVadMutipleUtteranceDetectMode = 1 +@@ -299,6 +302,9 @@ class FsmnVADStreaming(nn.Module): + self.encoder = encoder + self.encoder_conf = encoder_conf + ++ self.ten_sil_pdf_ids = torch.tensor([0]) ++ self.tol_idx = 0 ++ + def ResetDetection(self, cache: dict = {}): + cache["stats"].continous_silence_frame_count = 0 + cache["stats"].latest_confirmed_speech_frame = 0 +@@ -323,32 +329,44 @@ class FsmnVADStreaming(nn.Module): + cache["stats"].scores = cache["stats"].scores[:, real_drop_frames:, :] + + def ComputeDecibel(self, cache: dict = {}) -> None: +- frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000) +- frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000) ++ frame_sample_length = int(self.vad_opts.frame_length_ms * self.vad_opts.sample_rate / 1000) # 每帧的样本长度 ++ frame_shift_length = int(self.vad_opts.frame_in_ms * self.vad_opts.sample_rate / 1000) # 帧移的样本长度 ++ waveform = cache["stats"].waveform[0] ++ # 如果缓存中的"data_buf_all"为空,初始化并将当前波形作为数据缓冲区 + if cache["stats"].data_buf_all is None: +- cache["stats"].data_buf_all = cache["stats"].waveform[ +- 0 +- ] # cache["stats"].data_buf is pointed to cache["stats"].waveform[0] ++ cache["stats"].data_buf_all = waveform + cache["stats"].data_buf = cache["stats"].data_buf_all ++ cache["stats"].prev_waveform_length = 0 # 初始化,用于记录之前处理的波形长度 + else: +- cache["stats"].data_buf_all = torch.cat( +- (cache["stats"].data_buf_all, cache["stats"].waveform[0]) +- ) +- for offset in range( +- 0, cache["stats"].waveform.shape[1] - frame_sample_length + 1, frame_shift_length +- ): +- cache["stats"].decibel.append( +- 10 +- * math.log10( +- (cache["stats"].waveform[0][offset : offset + frame_sample_length]) +- .square() +- .sum() +- + 0.000001 +- ) +- ) ++ # 如果"data_buf_all"不为空,将新的波形数据拼接到已有的缓冲区 ++ cache["stats"].data_buf_all = torch.cat((cache["stats"].data_buf_all, waveform)) ++ cache["stats"].data_buf = cache["stats"].data_buf_all ++ ++ total_waveform_length = cache["stats"].data_buf_all.shape[0] ++ num_total_frames = (total_waveform_length - frame_sample_length) ++ num_existing_frames = len(cache["stats"].decibel) # 已有的帧数 ++ num_new_frames = num_total_frames - num_existing_frames # 需要处理的新帧数 ++ ++ if num_new_frames > 0: ++ # 计算新帧起始索引 ++ start_index = num_existing_frames * frame_shift_length ++ end_index = start_index + num_new_frames * frame_shift_length + frame_sample_length - frame_shift_length ++ ++ # 获取波形数据 ++ processing_waveform = cache["stats"].data_buf_all[start_index:end_index] ++ ++ # 使用unfold将波形数据切分为帧,形状为[num_new_frames, frame_sample_length] ++ frames = processing_waveform.unfold(0, frame_sample_length, frame_shift_length) ++ frame_energies = frames.pow(2).sum(dim=1) + 1e-6 # 形状为[num_new_frames] ++ decibels = 10 * torch.log10(frame_energies) # 计算分贝值 ++ cache["stats"].decibel.extend(decibels.tolist()) # 结果添加到cache["stats"].decibel ++ ++ # 更新prev_waveform_length,指向已处理的波形位置 ++ cache["stats"].prev_waveform_length = start_index + num_new_frames * frame_shift_length ++ + + def ComputeScores(self, feats: torch.Tensor, cache: dict = {}) -> None: +- scores = self.encoder(feats, cache=cache["encoder"]).to("cpu") # return B * T * D ++ scores = self.encoder(feats).to("cpu") # return B * T * D + assert ( + scores.shape[1] == feats.shape[1] + ), "The shape between feats and scores does not match" +@@ -498,13 +516,14 @@ class FsmnVADStreaming(nn.Module): + return vad_latency + + def GetFrameState(self, t: int, cache: dict = {}): +- frame_state = FrameState.kFrameStateInvalid ++ frame_state = FrameState.kFrameStateInvalid # 初始化当前帧的状态为无效状态 + cur_decibel = cache["stats"].decibel[t] + cur_snr = cur_decibel - cache["stats"].noise_average_decibel + # for each frame, calc log posterior probability of each state ++ # 判断当前帧的分贝值是否小于预设的分贝阈值,如果是,则认为是静音帧 + if cur_decibel < self.vad_opts.decibel_thres: +- frame_state = FrameState.kFrameStateSil +- self.DetectOneFrame(frame_state, t, False, cache=cache) ++ frame_state = FrameState.kFrameStateSil # 设置帧状态为静音 ++ self.DetectOneFrame(frame_state, t, False, cache=cache) # 检测静音帧 + return frame_state + + sum_score = 0.0 +@@ -512,14 +531,18 @@ class FsmnVADStreaming(nn.Module): + assert len(cache["stats"].sil_pdf_ids) == self.vad_opts.silence_pdf_num + if len(cache["stats"].sil_pdf_ids) > 0: + assert len(cache["stats"].scores) == 1 # 只支持batch_size = 1的测试 +- sil_pdf_scores = [ +- cache["stats"].scores[0][t][sil_pdf_id] for sil_pdf_id in cache["stats"].sil_pdf_ids +- ] ++ ++ scores_tensor = cache["stats"].scores[0][t] ++ sil_pdf_scores = scores_tensor[self.ten_sil_pdf_ids].tolist() # 使用张量索引直接获取sil_pdf_scores ++ ++ # 计算噪声的概率,使用对数概率并乘以语音与噪声的比例 + sum_score = sum(sil_pdf_scores) + noise_prob = math.log(sum_score) * self.vad_opts.speech_2_noise_ratio + total_score = 1.0 + sum_score = total_score - sum_score + speech_prob = math.log(sum_score) ++ ++ # 如果需要输出帧的概率,则将噪声和语音概率保存到缓存中 + if self.vad_opts.output_frame_probs: + frame_prob = E2EVadFrameProb() + frame_prob.noise_prob = noise_prob +@@ -527,16 +550,22 @@ class FsmnVADStreaming(nn.Module): + frame_prob.score = sum_score + frame_prob.frame_id = t + cache["stats"].frame_probs.append(frame_prob) ++ ++ # 判断当前帧是否为语音帧,基于语音和噪声的概率以及设定的阈值 + if math.exp(speech_prob) >= math.exp(noise_prob) + cache["stats"].speech_noise_thres: ++ # 如果信噪比和分贝值都超过了阈值,则认为是语音帧 + if cur_snr >= self.vad_opts.snr_thres and cur_decibel >= self.vad_opts.decibel_thres: +- frame_state = FrameState.kFrameStateSpeech ++ frame_state = FrameState.kFrameStateSpeech # 设置帧状态为语音 + else: +- frame_state = FrameState.kFrameStateSil ++ frame_state = FrameState.kFrameStateSil # 设置帧状态为静音 ++ # 如果语音概率低于噪声概率,直接将帧状态设置为静音 + else: + frame_state = FrameState.kFrameStateSil ++ # 更新噪声的平均分贝值,用于后续帧的信噪比计算 + if cache["stats"].noise_average_decibel < -99.9: + cache["stats"].noise_average_decibel = cur_decibel + else: ++ # 平滑更新噪声的平均分贝值,基于一定数量的帧 + cache["stats"].noise_average_decibel = ( + cur_decibel + + cache["stats"].noise_average_decibel +@@ -556,21 +585,26 @@ class FsmnVADStreaming(nn.Module): + # if len(cache) == 0: + # self.AllResetDetection() + # self.waveform = waveform # compute decibel for each frame +- cache["stats"].waveform = waveform +- is_streaming_input = kwargs.get("is_streaming_input", True) ++ cache["stats"].waveform = waveform # 将输入的音频波形存入缓存的统计部分 ++ is_streaming_input = kwargs.get("is_streaming_input", True) # 是否为流式输入,默认是流式模式 ++ # 计算当前音频的分贝值,并更新缓存中的相关统计数据 + self.ComputeDecibel(cache=cache) ++ # 根据提取的音频特征计算得分 + self.ComputeScores(feats, cache=cache) ++ # 如果当前音频段不是最后一个段,则检测常规帧 + if not is_final: + self.DetectCommonFrames(cache=cache) + else: + self.DetectLastFrames(cache=cache) + segments = [] ++ # 遍历每个批次的特征数据,当前只支持batch_size=1 + for batch_num in range(0, feats.shape[0]): # only support batch_size = 1 now + segment_batch = [] + if len(cache["stats"].output_data_buf) > 0: + for i in range( + cache["stats"].output_data_buf_offset, len(cache["stats"].output_data_buf) + ): ++ # 流式输入情况 + if ( + is_streaming_input + ): # in this case, return [beg, -1], [], [-1, end], [beg, end] +@@ -594,22 +628,24 @@ class FsmnVADStreaming(nn.Module): + end_ms = -1 + cache["stats"].next_seg = False + segment = [start_ms, end_ms] +- ++ # 非流式输入情况 + else: # in this case, return [beg, end] +- ++ # 如果当前段没有起始或者结束点,并且不是最后一个段,则跳过 + if not is_final and ( + not cache["stats"].output_data_buf[i].contain_seg_start_point + or not cache["stats"].output_data_buf[i].contain_seg_end_point + ): + continue ++ # 获取当前段的起始和结束时间 + segment = [ + cache["stats"].output_data_buf[i].start_ms, + cache["stats"].output_data_buf[i].end_ms, + ] ++ # 更新缓存中的偏移量 + cache["stats"].output_data_buf_offset += 1 # need update this parameter + +- segment_batch.append(segment) +- ++ segment_batch.append(segment) # 将当前段加入批次段列表 ++ # 如果当前批次中有有效的段,加入到总体段列表中 + if segment_batch: + segments.append(segment_batch) + # if is_final: +@@ -655,24 +691,28 @@ class FsmnVADStreaming(nn.Module): + cache: dict = {}, + **kwargs, + ): +- ++ # 初始化缓存,如果缓存为空,初始化缓存以进行推理 + if len(cache) == 0: + self.init_cache(cache, **kwargs) + + meta_data = {} ++ # 获取分块大小,默认为60000 + chunk_size = kwargs.get("chunk_size", 60000) # 50ms +- chunk_stride_samples = int(chunk_size * frontend.fs / 1000) ++ chunk_stride_samples = int(chunk_size * frontend.fs / 1000) # 计算每个分块的步长,以采样率frontend.fs为基础,计算出每块的音频样本数 + + time1 = time.perf_counter() ++ # 判断是否为流式输入,依据chunk_size决定是否开启流式模式 + is_streaming_input = ( + kwargs.get("is_streaming_input", False) + if chunk_size >= 15000 + else kwargs.get("is_streaming_input", True) + ) ++ # 判断是否为最终块,流式输入时由"is_streaming_input"决定,非流式时总是"True" + is_final = ( + kwargs.get("is_final", False) if is_streaming_input else kwargs.get("is_final", True) + ) +- cfg = {"is_final": is_final, "is_streaming_input": is_streaming_input} ++ cfg = {"is_final": is_final, "is_streaming_input": is_streaming_input} # 传递推理配置参数,包括是否为流式输入和是否为最终块 ++ # 加载音频数据,同时支持加载文本、图像和视频,默认是"sound"类型 + audio_sample_list = load_audio_text_image_video( + data_in, + fs=frontend.fs, +@@ -685,16 +725,18 @@ class FsmnVADStreaming(nn.Module): + is_streaming_input = cfg["is_streaming_input"] + time2 = time.perf_counter() + meta_data["load_data"] = f"{time2 - time1:0.3f}" +- assert len(audio_sample_list) == 1, "batch_size must be set 1" +- ++ self.ten_sil_pdf_ids = torch.tensor(cache["stats"].sil_pdf_ids) ++ assert len(audio_sample_list) == 1, "batch_size must be set 1" # 确保一次只能处理一个音频样本 ++ # 将当前缓存中的音频样本与新的音频样本拼接在一起 + audio_sample = torch.cat((cache["prev_samples"], audio_sample_list[0])) + +- n = int(len(audio_sample) // chunk_stride_samples + int(_is_final)) +- m = int(len(audio_sample) % chunk_stride_samples * (1 - int(_is_final))) ++ n = int(len(audio_sample) // chunk_stride_samples + int(_is_final)) # 根据音频样本的长度和块大小计算分块数 ++ m = int(len(audio_sample) % chunk_stride_samples * (1 - int(_is_final))) # 计算最后一个块的剩余长度 + segments = [] ++ # 遍历每个分块进行推理 + for i in range(n): +- kwargs["is_final"] = _is_final and i == n - 1 +- audio_sample_i = audio_sample[i * chunk_stride_samples : (i + 1) * chunk_stride_samples] ++ kwargs["is_final"] = _is_final and i == n - 1 # 对每一个分块,确定是否为最后一块 ++ audio_sample_i = audio_sample[i * chunk_stride_samples : (i + 1) * chunk_stride_samples] # 取出当前分块的音频样本 + + # extract fbank feats + speech, speech_lengths = extract_fbank( +@@ -719,20 +761,20 @@ class FsmnVADStreaming(nn.Module): + "cache": cache, + "is_streaming_input": is_streaming_input, + } +- segments_i = self.forward(**batch) ++ segments_i = self.forward(**batch) # 前向传播 + if len(segments_i) > 0: + segments.extend(*segments_i) +- ++ # 如果当前分块是最后一块,重新初始化缓存 + cache["prev_samples"] = audio_sample[:-m] + if _is_final: + self.init_cache(cache) +- ++ # 如果输出目录存在,初始化文件写入器 + ibest_writer = None + if kwargs.get("output_dir") is not None: + if not hasattr(self, "writer"): + self.writer = DatadirWriter(kwargs.get("output_dir")) + ibest_writer = self.writer[f"{1}best_recog"] +- ++ # 最终结果列表 + results = [] + result_i = {"key": key[0], "value": segments} + # if "MODELSCOPE_ENVIRONMENT" in os.environ and os.environ["MODELSCOPE_ENVIRONMENT"] == "eas": +@@ -755,34 +797,35 @@ class FsmnVADStreaming(nn.Module): + def DetectCommonFrames(self, cache: dict = {}) -> int: + if cache["stats"].vad_state_machine == VadStateMachine.kVadInStateEndPointDetected: + return 0 ++ frame_states = [FrameState.kFrameStateInvalid] * self.vad_opts.nn_eval_block_size ++ # 批量计算多个帧的状态 + for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1): +- frame_state = FrameState.kFrameStateInvalid +- frame_state = self.GetFrameState( ++ frame_states[i] = self.GetFrameState( + cache["stats"].frm_cnt - 1 - i - cache["stats"].last_drop_frames, cache=cache + ) +- self.DetectOneFrame(frame_state, cache["stats"].frm_cnt - 1 - i, False, cache=cache) ++ for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1): ++ self.DetectOneFrame(frame_states[i], cache["stats"].frm_cnt - 1 - i, False, cache=cache) + + return 0 + + def DetectLastFrames(self, cache: dict = {}) -> int: +- if cache["stats"].vad_state_machine == VadStateMachine.kVadInStateEndPointDetected: +- return 0 ++ frame_states = [FrameState.kFrameStateInvalid] * self.vad_opts.nn_eval_block_size + for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1): +- frame_state = FrameState.kFrameStateInvalid +- frame_state = self.GetFrameState( ++ frame_states[i] = self.GetFrameState( + cache["stats"].frm_cnt - 1 - i - cache["stats"].last_drop_frames, cache=cache + ) ++ for i in range(self.vad_opts.nn_eval_block_size - 1, -1, -1): + if i != 0: +- self.DetectOneFrame(frame_state, cache["stats"].frm_cnt - 1 - i, False, cache=cache) ++ self.DetectOneFrame(frame_states[i], cache["stats"].frm_cnt - 1 - i, False, cache=cache) + else: +- self.DetectOneFrame(frame_state, cache["stats"].frm_cnt - 1, True, cache=cache) +- +- return 0 ++ self.DetectOneFrame(frame_states[i], cache["stats"].frm_cnt - 1, True, cache=cache) + + def DetectOneFrame( + self, cur_frm_state: FrameState, cur_frm_idx: int, is_final_frame: bool, cache: dict = {} + ) -> None: +- tmp_cur_frm_state = FrameState.kFrameStateInvalid ++ tmp_cur_frm_state = FrameState.kFrameStateInvalid # 初始化当前帧的临时状态为无效状态 ++ ++ # 根据当前帧的状态确定临时状态 + if cur_frm_state == FrameState.kFrameStateSpeech: + if math.fabs(1.0) > self.vad_opts.fe_prior_thres: + tmp_cur_frm_state = FrameState.kFrameStateSpeech +@@ -793,7 +836,8 @@ class FsmnVADStreaming(nn.Module): + state_change = cache["windows_detector"].DetectOneFrame( + tmp_cur_frm_state, cur_frm_idx, cache=cache + ) +- frm_shift_in_ms = self.vad_opts.frame_in_ms ++ frm_shift_in_ms = self.vad_opts.frame_in_ms # 获取帧移的时间 ++ # 检测状态转换:从静音到语音 + if AudioChangeState.kChangeStateSil2Speech == state_change: + silence_frame_count = cache["stats"].continous_silence_frame_count + cache["stats"].continous_silence_frame_count = 0 +@@ -873,10 +917,12 @@ class FsmnVADStreaming(nn.Module): + self.OnVoiceEnd(0, True, False, cache=cache) + cache["stats"].vad_state_machine = VadStateMachine.kVadInStateEndPointDetected + else: +- if cur_frm_idx >= self.LatencyFrmNumAtStartPoint(cache=cache): ++ lfasp = self.LatencyFrmNumAtStartPoint(cache=cache) ++ if cur_frm_idx >= lfasp: + self.OnSilenceDetected( +- cur_frm_idx - self.LatencyFrmNumAtStartPoint(cache=cache), cache=cache ++ cur_frm_idx - lfasp, cache=cache + ) ++ # 如果处于语音段中,检查是否超时或静音段是否过长 + elif cache["stats"].vad_state_machine == VadStateMachine.kVadInStateInSpeechSegment: + if ( + cache["stats"].continous_silence_frame_count * frm_shift_in_ms +diff --git a/funasr/models/sanm/encoder.py b/funasr/models/sanm/encoder.py +index b2a442bd..4b244353 100644 +--- a/funasr/models/sanm/encoder.py ++++ b/funasr/models/sanm/encoder.py +@@ -16,6 +16,7 @@ import torch.nn.functional as F + import numpy as np + from funasr.train_utils.device_funcs import to_device + from funasr.models.transformer.utils.nets_utils import make_pad_mask ++from funasr.utils.torch_function import sequence_mask + from funasr.models.sanm.attention import MultiHeadedAttention, MultiHeadedAttentionSANM + from funasr.models.transformer.embedding import ( + SinusoidalPositionEncoder, +@@ -355,6 +356,8 @@ class SANMEncoder(nn.Module): + self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch + self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf + ++ self.make_pad_mask = sequence_mask() ++ + def output_size(self) -> int: + return self._output_size + +@@ -374,7 +377,7 @@ class SANMEncoder(nn.Module): + Returns: + position embedded tensor and mask + """ +- masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) ++ masks = (self.make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) + xs_pad = xs_pad * self.output_size() ** 0.5 + if self.embed is None: + xs_pad = xs_pad +diff --git a/funasr/models/transformer/utils/repeat.py b/funasr/models/transformer/utils/repeat.py +index a44c1a01..0935d854 100644 +--- a/funasr/models/transformer/utils/repeat.py ++++ b/funasr/models/transformer/utils/repeat.py +@@ -28,8 +28,9 @@ class MultiSequential(torch.nn.Sequential): + """Repeat.""" + _probs = torch.empty(len(self)).uniform_() + for idx, m in enumerate(self): +- if not self.training or (_probs[idx] >= self.layer_drop_rate): +- args = m(*args) ++ # if not self.training or (_probs[idx] >= self.layer_drop_rate): ++ # args = m(*args) ++ args = m(*args) + return args + + diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_auto_model.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_auto_model.py new file mode 100644 index 0000000000000000000000000000000000000000..7f6d8293036171494cb8db875ad66da7341963b7 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_auto_model.py @@ -0,0 +1,225 @@ +import sys +sys.path.append("./FunASR") + +import torch +import time +import logging +from tqdm import tqdm + +from mindie_paraformer import MindieBiCifParaformer +from mindie_encoder_decoder import MindieEncoder, MindieDecoder +from mindie_punc import MindiePunc, MindieCTTransformer +from mindie_cif import MindieCifTimestamp, MindieCif +from mindie_vad import MindieVAD +from funasr.auto.auto_model import AutoModel, download_model, tables, deep_update, \ + load_pretrained_model, prepare_data_iterator + + +class MindieAutoModel(AutoModel): + def __init__(self, **kwargs): + log_level = getattr(logging, kwargs.get("log_level", "INFO").upper()) + logging.basicConfig(level=log_level) + + if not kwargs.get("disable_log", True): + tables.print() + + kwargs["compile_type"] = "paraformer" + model, kwargs = self.build_model_with_mindie(**kwargs) + + # if vad_model is not None, build vad model else None + vad_model = kwargs.get("vad_model", None) + vad_kwargs = {} if kwargs.get("vad_kwargs", {}) is None else kwargs.get("vad_kwargs", {}) + if vad_model is not None: + logging.info("Building VAD model.") + vad_kwargs["model"] = vad_model + vad_kwargs["model_revision"] = kwargs.get("vad_model_revision", "master") + vad_model, vad_kwargs = self.build_model(**vad_kwargs) + vad_kwargs["device"] = "npu" + compiled_vad = torch.jit.load(kwargs["compiled_vad"]) + vad_model.encoder = compiled_vad + + # if punc_model is not None, build punc model else None + punc_model = kwargs.get("punc_model", None) + punc_kwargs = {} if kwargs.get("punc_kwargs", {}) is None else kwargs.get("punc_kwargs", {}) + if punc_model is not None: + logging.info("Building punc model.") + punc_kwargs["model"] = punc_model + punc_kwargs["model_revision"] = kwargs.get("punc_model_revision", "master") + punc_kwargs["device"] = "cpu" + punc_kwargs["compile_type"] = "punc" + punc_kwargs["compiled_punc"] = kwargs["compiled_punc"] + punc_model, punc_kwargs = self.build_model_with_mindie(**punc_kwargs) + + self.kwargs = kwargs + self.model = model + self.vad_model = vad_model + self.vad_kwargs = vad_kwargs + self.punc_model = punc_model + self.punc_kwargs = punc_kwargs + self.spk_model = None + self.spk_kwargs = {} + self.model_path = kwargs.get("model_path") + + def generate(self, input, input_len=None, **cfg): + if self.vad_model is None: + return self.inference_with_asr(input, input_len=input_len, **cfg) + + else: + return self.inference_with_vad(input, input_len=input_len, **cfg) + + @staticmethod + def export_model(**kwargs): + model, kwargs = AutoModel.build_model(**kwargs) + + if kwargs["compile_type"] == "punc": + punc = MindiePunc(model) + MindiePunc.export(punc, kwargs["compiled_path"], kwargs["soc_version"]) + elif kwargs["compile_type"] == "vad": + vad = MindieVAD(model) + MindieVAD.export(vad, kwargs["compiled_path"], kwargs["soc_version"]) + else: + import copy + from funasr.models.bicif_paraformer.export_meta import export_rebuild_model + + kwargs_new = copy.deepcopy(kwargs) + kwargs_new['onnx'] = False + kwargs_new["max_seq_len"] = 512 + del kwargs_new["model"] + model = export_rebuild_model(model, **kwargs_new) + + encoder = MindieEncoder(model) + MindieEncoder.export_ts(encoder, kwargs["compiled_encoder"], kwargs["soc_version"], kwargs["traced_encoder"]) + + decoder = MindieDecoder(model) + MindieDecoder.export_ts(decoder, kwargs["compiled_decoder"], kwargs["soc_version"], kwargs["traced_decoder"]) + + mindie_cif = MindieCif(model.predictor.threshold, kwargs["cif_interval"]) + mindie_cif.export_ts(kwargs["compiled_cif"], kwargs["soc_version"]) + + mindie_cif_timestamp = MindieCifTimestamp(model.predictor.threshold - 1e-4, kwargs["cif_timestamp_interval"]) + mindie_cif_timestamp.export_ts(kwargs["compiled_cif_timestamp"], kwargs["soc_version"]) + + def build_model_with_mindie(self, **kwargs): + assert "model" in kwargs + if "model_conf" not in kwargs: + logging.info("download models from model hub: {}".format(kwargs.get("hub", "ms"))) + kwargs = download_model(**kwargs) + + torch.set_num_threads(kwargs.get("ncpu", 4)) + + # build tokenizer + tokenizer = kwargs.get("tokenizer", None) + if tokenizer is not None: + tokenizer_class = tables.tokenizer_classes.get(tokenizer) + tokenizer = tokenizer_class(**kwargs.get("tokenizer_conf", {})) + kwargs["token_list"] = ( + tokenizer.token_list if hasattr(tokenizer, "token_list") else None + ) + kwargs["token_list"] = ( + tokenizer.get_vocab() if hasattr(tokenizer, "get_vocab") else kwargs["token_list"] + ) + vocab_size = len(kwargs["token_list"]) if kwargs["token_list"] is not None else -1 + if vocab_size == -1 and hasattr(tokenizer, "get_vocab_size"): + vocab_size = tokenizer.get_vocab_size() + else: + vocab_size = -1 + kwargs["tokenizer"] = tokenizer + + # build frontend + frontend = kwargs.get("frontend", None) + kwargs["input_size"] = None + if frontend is not None: + frontend_class = tables.frontend_classes.get(frontend) + frontend = frontend_class(**kwargs.get("frontend_conf", {})) + kwargs["input_size"] = ( + frontend.output_size() if hasattr(frontend, "output_size") else None + ) + kwargs["frontend"] = frontend + + # build model + model_conf = {} + deep_update(model_conf, kwargs.get("model_conf", {})) + deep_update(model_conf, kwargs) + + if kwargs["compile_type"] == "punc": + model = MindieCTTransformer(**model_conf, vocab_size=vocab_size) + else: + model = MindieBiCifParaformer(**model_conf, vocab_size=vocab_size) + + # init_param + init_param = kwargs.get("init_param", None) + logging.info(f"Loading pretrained params from {init_param}") + load_pretrained_model( + model=model, + path=init_param, + ignore_init_mismatch=kwargs.get("ignore_init_mismatch", True), + oss_bucket=kwargs.get("oss_bucket", None), + scope_map=kwargs.get("scope_map", []), + excludes=kwargs.get("excludes", None), + ) + + return model, kwargs + + def inference_with_asr(self, input, input_len=None, model=None, kwargs=None, key=None, display_pbar=False, **cfg): + kwargs = self.kwargs if kwargs is None else kwargs + deep_update(kwargs, cfg) + model = self.model if model is None else model + model.eval() + + batch_size = kwargs.get("batch_size", 1) + + key_list, data_list = prepare_data_iterator( + input, input_len=input_len, data_type=kwargs.get("data_type", None), key=key + ) + + time_stats = {"rtf_avg": 0.0, "input_speech_time": 0.0, "end_to_end_time": 0.0, "pure_infer_time": 0.0, + "load_data": 0.0, "encoder": 0.0, "predictor": 0.0, "decoder": 0.0, + "predictor_timestamp": 0.0, "post_process": 0.0} + asr_result_list = [] + num_samples = len(data_list) + + if display_pbar: + pbar = tqdm(colour="blue", total=num_samples, dynamic_ncols=True) + + for beg_idx in range(0, num_samples, batch_size): + end_idx = min(num_samples, beg_idx + batch_size) + data_batch = data_list[beg_idx:end_idx] + key_batch = key_list[beg_idx:end_idx] + batch = {"data_in": data_batch, "key": key_batch} + + if (end_idx - beg_idx) == 1 and kwargs.get("data_type", None) == "fbank": # fbank + batch["data_in"] = data_batch[0] + batch["data_lengths"] = input_len + + with torch.no_grad(): + time1 = time.perf_counter() + res = model.inference_with_npu(**batch, **kwargs) + time2 = time.perf_counter() + if isinstance(res, (list, tuple)): + results = res[0] if len(res) > 0 else [{"text": ""}] + meta_data = res[1] if len(res) > 1 else {} + + asr_result_list.extend(results) + + # batch_data_time = time_per_frame_s * data_batch_i["speech_lengths"].sum().item() + batch_data_time = meta_data.get("batch_data_time", -1) + time_escape = time2 - time1 + + time_stats["load_data"] += meta_data.get("load_data", 0.0) + time_stats["encoder"] += meta_data.get("encoder", 0.0) + time_stats["predictor"] += meta_data.get("calc_predictor", 0.0) + time_stats["decoder"] += meta_data.get("decoder", 0.0) + time_stats["predictor_timestamp"] += meta_data.get("calc_predictor_timestamp", 0.0) + time_stats["post_process"] += meta_data.get("post_process", 0.0) + time_stats["end_to_end_time"] += time_escape + + time_stats["input_speech_time"] += batch_data_time + + time_stats["pure_infer_time"] = time_stats["end_to_end_time"] - time_stats["load_data"] + time_stats["rtf_avg"] = time_stats["input_speech_time"] / time_stats["pure_infer_time"] + + if display_pbar: + pbar.update(batch_size) + pbar.set_description("rtf_avg:{:.3f}".format(time_stats["rtf_avg"])) + + return asr_result_list, time_stats \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_cif.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_cif.py new file mode 100644 index 0000000000000000000000000000000000000000..1782973ed3799301ff90cb29bc3d54a1b3ef08db --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_cif.py @@ -0,0 +1,172 @@ +import sys +sys.path.append("./FunASR") + +import torch +import mindietorch + +from mindie_paraformer import precision_eval + + +def cif(hidden, alphas, integrate, frame, threshold): + batch_size, len_time, hidden_size = hidden.size() + + # intermediate vars along time + list_fires = [] + list_frames = [] + + constant = torch.ones([batch_size], device=hidden.device) + for t in range(len_time): + alpha = alphas[:, t] + distribution_completion = constant - integrate + + integrate += alpha + list_fires.append(integrate) + + fire_place = integrate >= threshold + integrate = torch.where( + fire_place, integrate - constant, integrate + ) + cur = torch.where(fire_place, distribution_completion, alpha) + remainds = alpha - cur + + frame += cur[:, None] * hidden[:, t, :] + list_frames.append(frame) + frame = torch.where( + fire_place[:, None].repeat(1, hidden_size), remainds[:, None] * hidden[:, t, :], frame + ) + + fires = torch.stack(list_fires, 1) + frames = torch.stack(list_frames, 1) + + return fires, frames, integrate, frame + + +def cif_wo_hidden(alphas, integrate, threshold): + batch_size, len_time = alphas.size() + + list_fires = [] + + constant = torch.ones([batch_size], device=alphas.device) * threshold + + for t in range(len_time): + alpha = alphas[:, t] + + integrate += alpha + list_fires.append(integrate) + + fire_place = integrate >= threshold + integrate = torch.where( + fire_place, + integrate - constant, + integrate, + ) + + fire_list = [] + for i in range(0, len(list_fires), 500): + batch = list_fires[i:i + 500] + fire = torch.stack(batch, 1) + fire_list.append(fire) + + fires = torch.cat(fire_list, 1) + return fires, integrate + + +class MindieCif(torch.nn.Module): + def __init__(self, threshold, seq_len): + super().__init__() + self.threshold = threshold + self.seq_len = seq_len + + def forward(self, hidden, alphas, integrate, frame): + fires, frames, integrate_new, frame_new = cif(hidden, alphas, integrate, frame, self.threshold) + + frame = torch.index_select(frames[0, :, :], 0, torch.nonzero(fires[0, :] >= self.threshold).squeeze(1)) + + return frame, integrate_new, frame_new + + def export_ts(self, path="./compiled_cif.pt", soc_version="Ascendxxx"): + print("Begin tracing cif function.") + + input_shape1 = (1, self.seq_len, 512) + input_shape2 = (1, self.seq_len) + input_shape3 = (1, ) + input_shape4 = (1, 512) + + hidden = torch.randn(input_shape1, dtype=torch.float32) + alphas = torch.randn(input_shape2, dtype=torch.float32) + integrate = torch.randn(input_shape3, dtype=torch.float32) + frame = torch.randn(input_shape4, dtype=torch.float32) + compile_inputs = [mindietorch.Input(shape = input_shape1, dtype = torch.float32), + mindietorch.Input(shape = input_shape2, dtype = torch.float32), + mindietorch.Input(shape = input_shape3, dtype = torch.float32), + mindietorch.Input(shape = input_shape4, dtype = torch.float32)] + + export_model = torch.jit.trace(self, example_inputs=(hidden, alphas, integrate, frame)) + print("Finish tracing cif function.") + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP16, + default_buffer_size_vec = [1, 10, 10], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling cif function, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of cif function.") + sample_hidden = torch.randn(input_shape1, dtype=torch.float32) + sample_alphas = torch.randn(input_shape2, dtype=torch.float32) + sample_integrate = torch.randn(input_shape3, dtype=torch.float32) + sample_frame = torch.randn(input_shape4, dtype=torch.float32) + mrt_res = compiled_model(sample_hidden.to("npu"), sample_alphas.to("npu"), + sample_integrate.to("npu"), sample_frame.to("npu")) + ref_res = self.forward(sample_hidden, sample_alphas, sample_integrate, sample_frame) + precision_eval(mrt_res, ref_res) + + +class MindieCifTimestamp(torch.nn.Module): + def __init__(self, threshold, seq_len): + super().__init__() + self.threshold = threshold + self.seq_len = seq_len + + def forward(self, us_alphas, integrate): + us_peaks, integrate_new = cif_wo_hidden(us_alphas, integrate, self.threshold) + + return us_peaks, integrate_new + + def export_ts(self, path="./compiled_cif_timestamp.ts", soc_version="Ascend310P3"): + print("Begin tracing cif_timestamp function.") + + input_shape1 = (1, self.seq_len) + input_shape2 = (1, ) + + us_alphas = torch.randn(input_shape1, dtype=torch.float32) + integrate = torch.randn(input_shape2, dtype=torch.float32) + compile_inputs = [mindietorch.Input(shape = input_shape1, dtype = torch.float32), + mindietorch.Input(shape = input_shape2, dtype = torch.float32)] + + export_model = torch.jit.trace(self, example_inputs=(us_alphas, integrate)) + print("Finish tracing cif_timestamp function.") + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP16, + default_buffer_size_vec = [1, 10], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling cif_timestamp function, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of cif_timestamp function.") + sample_input1 = torch.randn(input_shape1, dtype=torch.float32) + sample_input2 = torch.randn(input_shape2, dtype=torch.float32) + mrt_res = compiled_model(sample_input1.to("npu"), sample_input2.to("npu")) + ref_res = self.forward(sample_input1, sample_input2) + precision_eval(mrt_res, ref_res) \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_encoder_decoder.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..39ca45313dd746f389fb8a11d2d01f6b9f557798 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_encoder_decoder.py @@ -0,0 +1,124 @@ +import sys +import os +sys.path.append("./FunASR") + +import torch +import mindietorch + + +class MindieEncoder(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model.eval() + + def forward(self, speech, speech_length): + batch = {"speech": speech, "speech_lengths": speech_length} + enc, enc_len = self.model.encoder(**batch) + mask = self.model.make_pad_mask(enc_len)[:, None, :] + hidden, alphas, pre_token_length = self.model.predictor(enc, mask) + return enc, hidden, alphas, pre_token_length + + @staticmethod + def export_ts(encoder, path="./compiled_encoder.pt", soc_version="Ascendxxx", traced_path=None): + print("Begin tracing encoder.") + + input_shape = (2, 50, 560) + min_shape = (-1, -1, 560) + max_shape = (-1, -1, 560) + + if traced_path is not None and os.path.exists(traced_path): + export_model = torch.load(traced_path) + print("Load existing traced encoder from {}".format(traced_path)) + else: + input_speech = torch.randn(input_shape, dtype=torch.float32) + input_speech_lens = torch.tensor([50, 25], dtype=torch.int32) + + export_model = torch.jit.trace(encoder, example_inputs=(input_speech, input_speech_lens)) + print("Finish tracing encoder.") + + compile_inputs = [mindietorch.Input(min_shape = min_shape, max_shape = max_shape, dtype = torch.float32), + mindietorch.Input(min_shape = (-1, ), max_shape = (-1, ), dtype = torch.int32)] + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP16, + default_buffer_size_vec = [400, 1, 400, 1], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling encoder, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of encoder.") + sample_speech = torch.randn((4, 100, 560), dtype=torch.float32) + sample_speech_lens = torch.tensor([100, 50, 100, 25], dtype=torch.int32) + _ = compiled_model(sample_speech.to("npu"), sample_speech_lens.to("npu")) + print("Finish checking encoder.") + + +class MindieDecoder(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model.eval() + + def forward(self, encoder_out, encoder_out_lens, sematic_embeds, pre_token_length): + decoder_outs = self.model.decoder(encoder_out, encoder_out_lens, sematic_embeds, pre_token_length) + decoder_out = decoder_outs[0] + decoder_out = torch.log_softmax(decoder_out, dim=-1) + + encoder_out_mask = self.model.make_pad_mask(encoder_out_lens)[:, None, :] + + us_alphas = self.model.predictor.get_upsample_timestamp(encoder_out, encoder_out_mask, pre_token_length) + + return decoder_out, us_alphas + + @staticmethod + def export_ts(decoder, path="./compiled_decoder.pt", soc_version="Ascendxxx", traced_path=None): + print("Begin tracing decoder.") + + input_shape1 = (2, 939, 512) + min_shape1 = (-1, -1, 512) + max_shape1 = (-1, -1, 512) + + input_shape2 = (2, 261, 512) + min_shape2 = (-1, -1, 512) + max_shape2 = (-1, -1, 512) + + if traced_path is not None and os.path.exists(traced_path): + export_model = torch.load(traced_path) + print("Load existing traced decoder from {}".format(traced_path)) + else: + encoder_out = torch.randn(input_shape1, dtype=torch.float32) + encoder_out_lens = torch.tensor([939, 500], dtype=torch.int32) + sematic_embeds = torch.randn(input_shape2, dtype=torch.float32) + sematic_embeds_lens = torch.tensor([261, 100], dtype=torch.int32) + + export_model = torch.jit.trace(decoder, example_inputs=(encoder_out, encoder_out_lens, sematic_embeds, sematic_embeds_lens)) + print("Finish tracing decoder.") + + compile_inputs = [mindietorch.Input(min_shape = min_shape1, max_shape = max_shape1, dtype = torch.float32), + mindietorch.Input(min_shape = (-1, ), max_shape = (-1, ), dtype = torch.int32), + mindietorch.Input(min_shape = min_shape2, max_shape = max_shape2, dtype = torch.float32), + mindietorch.Input(min_shape = (-1, ), max_shape = (-1, ), dtype = torch.int32)] + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP16, + default_buffer_size_vec = [800, 10], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling decoder, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of decoder.") + sample_encoder = torch.randn((4, 150, 512), dtype=torch.float32) + sample_encoder_lens = torch.tensor([150, 100, 150, 50], dtype=torch.int32) + sample_sematic = torch.randn((4, 50, 512), dtype=torch.float32) + sample_sematic_lens = torch.tensor([50, 30, 50, 10], dtype=torch.int32) + _ = compiled_model(sample_encoder.to("npu"), sample_encoder_lens.to("npu"), sample_sematic.to("npu"), sample_sematic_lens.to("npu")) + print("Finish checking decoder.") \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_fa.patch b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_fa.patch new file mode 100644 index 0000000000000000000000000000000000000000..6e251497187eda7791337f1561024a0081e8bf7e --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_fa.patch @@ -0,0 +1,121 @@ +diff --git a/funasr/models/sanm/attention.py b/funasr/models/sanm/attention.py +index c7e8a8e0..9bfccb78 100644 +--- a/funasr/models/sanm/attention.py ++++ b/funasr/models/sanm/attention.py +@@ -365,76 +365,26 @@ class MultiHeadedAttentionSANMExport(nn.Module): + + def forward(self, x, mask): + mask_3d_btd, mask_4d_bhlt = mask +- q_h, k_h, v_h, v = self.forward_qkv(x) ++ q_h, k_h, v_h, v = self.forward_qkv(x) # [b, s, h, d] + fsmn_memory = self.forward_fsmn(v, mask_3d_btd) +- q_h = q_h * self.d_k ** (-0.5) +- scores = torch.matmul(q_h, k_h.transpose(-2, -1)) +- att_outs = self.forward_attention(v_h, scores, mask_4d_bhlt) +- return att_outs + fsmn_memory + +- def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: +- new_x_shape = x.size()[:-1] + (self.h, self.d_k) +- x = x.view(new_x_shape) +- return x.permute(0, 2, 1, 3) ++ scale = self.d_k ** (-0.5) + +- def forward_qkv(self, x): +- q_k_v = self.linear_q_k_v(x) +- q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1) +- q_h = self.transpose_for_scores(q) +- k_h = self.transpose_for_scores(k) +- v_h = self.transpose_for_scores(v) +- return q_h, k_h, v_h, v ++ seq_len = mask_4d_bhlt.size(-1) ++ attn_mask = mask_4d_bhlt != 0 ++ attn_mask = attn_mask.expand(-1, -1, seq_len, seq_len) ++ ++ context_layer = torch.ops.aie.flash_attention(q_h, k_h, v_h, num_head=self.h, attn_mask=attn_mask, pse=None, scale=scale, layout="BSND", type="PFA") + +- def forward_fsmn(self, inputs, mask): +- # b, t, d = inputs.size() +- # mask = torch.reshape(mask, (b, -1, 1)) +- inputs = inputs * mask +- x = inputs.transpose(1, 2) +- x = self.pad_fn(x) +- x = self.fsmn_block(x) +- x = x.transpose(1, 2) +- x = x + inputs +- x = x * mask +- return x +- +- def forward_attention(self, value, scores, mask): +- scores = scores + mask +- +- self.attn = torch.softmax(scores, dim=-1) +- context_layer = torch.matmul(self.attn, value) # (batch, head, time1, d_k) +- +- context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) +- return self.linear_out(context_layer) # (batch, time1, d_model) +- +- +-class MultiHeadedAttentionSANMExport(nn.Module): +- def __init__(self, model): +- super().__init__() +- self.d_k = model.d_k +- self.h = model.h +- self.linear_out = model.linear_out +- self.linear_q_k_v = model.linear_q_k_v +- self.fsmn_block = model.fsmn_block +- self.pad_fn = model.pad_fn +- +- self.attn = None +- self.all_head_size = self.h * self.d_k +- +- def forward(self, x, mask): +- mask_3d_btd, mask_4d_bhlt = mask +- q_h, k_h, v_h, v = self.forward_qkv(x) +- fsmn_memory = self.forward_fsmn(v, mask_3d_btd) +- q_h = q_h * self.d_k ** (-0.5) +- scores = torch.matmul(q_h, k_h.transpose(-2, -1)) +- att_outs = self.forward_attention(v_h, scores, mask_4d_bhlt) ++ att_outs = self.linear_out(context_layer) + return att_outs + fsmn_memory + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.h, self.d_k) + x = x.view(new_x_shape) +- return x.permute(0, 2, 1, 3) ++ return x + + def forward_qkv(self, x): + q_k_v = self.linear_q_k_v(x) +@@ -760,14 +710,26 @@ class MultiHeadedAttentionCrossAttExport(nn.Module): + self.all_head_size = self.h * self.d_k + + def forward(self, x, memory, memory_mask, ret_attn=False): +- q, k, v = self.forward_qkv(x, memory) +- scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) +- return self.forward_attention(v, scores, memory_mask, ret_attn) ++ q, k, v = self.forward_qkv(x, memory) # [b, s, h, d] ++ ++ scale = 1 / math.sqrt(self.d_k) ++ ++ seq_len = q.size(1) ++ attn_mask = memory_mask != 0 ++ attn_mask = attn_mask.expand(-1, -1, seq_len, -1) ++ ++ context_layer = torch.ops.aie.flash_attention(q, k, v, num_head=self.h, attn_mask=attn_mask, pse=None, scale=scale, layout="BSND", type="FA") ++ ++ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) ++ context_layer = context_layer.view(new_context_layer_shape) ++ att_outs = self.linear_out(context_layer) ++ ++ return att_outs + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.h, self.d_k) + x = x.view(new_x_shape) +- return x.permute(0, 2, 1, 3) ++ return x + + def forward_qkv(self, x, memory): + q = self.linear_q(x) diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_paraformer.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_paraformer.py new file mode 100644 index 0000000000000000000000000000000000000000..1d0b73cf890094e17516fd0eac6f19c5ceb3bcc1 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_paraformer.py @@ -0,0 +1,233 @@ +import sys +sys.path.append("./FunASR") + +import copy +import time +import torch +import torch.nn.functional as F + +from funasr.models.bicif_paraformer.model import BiCifParaformer, load_audio_text_image_video, \ + extract_fbank, Hypothesis, ts_prediction_lfr6_standard, postprocess_utils + + +COSINE_THRESHOLD = 0.999 +def cosine_similarity(gt_tensor, pred_tensor): + gt_tensor = gt_tensor.flatten().to(torch.float32) + pred_tensor = pred_tensor.flatten().to(torch.float32) + if torch.sum(gt_tensor) == 0.0 or torch.sum(pred_tensor) == 0.0: + if torch.allclose(gt_tensor, pred_tensor, atol=1e-4, rtol=1e-4, equal_nan=True): + return 1.0 + res = torch.nn.functional.cosine_similarity(gt_tensor, pred_tensor, dim=0, eps=1e-6) + res = res.cpu().detach().item() + return res + + +def precision_eval(mrt_res, ref_res): + if not isinstance(mrt_res, (list, tuple)): + mrt_res = [mrt_res, ] + if not isinstance(ref_res, (list, tuple)): + ref_res = [ref_res, ] + + com_res = True + for j, a in zip(mrt_res, ref_res): + res = cosine_similarity(j.to("cpu"), a) + print(res) + if res < COSINE_THRESHOLD: + com_res = False + + if com_res: + print("Compare success! NPU model have the same output with CPU model!") + else: + print("Compare failed! Outputs of NPU model are not the same with CPU model!") + + +class MindieBiCifParaformer(BiCifParaformer): + def __init__( + self, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.mindie_encoder = torch.jit.load(kwargs["compiled_encoder"]) + self.mindie_decoder = torch.jit.load(kwargs["compiled_decoder"]) + self.mindie_cif = torch.jit.load(kwargs["compiled_cif"]) + self.mindie_cif_timestamp = torch.jit.load(kwargs["compiled_cif_timestamp"]) + + def inference_with_npu( + self, + data_in, + data_lengths=None, + key: list = None, + tokenizer=None, + frontend=None, + **kwargs, + ): + # Step1: load input data + time1 = time.perf_counter() + meta_data = {} + + is_use_ctc = kwargs.get("decoding_ctc_weight", 0.0) > 0.00001 and self.ctc != None + is_use_lm = ( + kwargs.get("lm_weight", 0.0) > 0.00001 and kwargs.get("lm_file", None) is not None + ) + if self.beam_search is None and (is_use_lm or is_use_ctc): + self.init_beam_search(**kwargs) + self.nbest = kwargs.get("nbest", 1) + audio_sample_list = load_audio_text_image_video( + data_in, fs=frontend.fs, audio_fs=kwargs.get("fs", 16000) + ) + + speech, speech_lengths = extract_fbank( + audio_sample_list, data_type=kwargs.get("data_type", "sound"), frontend=frontend + ) + meta_data["batch_data_time"] = ( + speech_lengths.sum().item() * frontend.frame_shift * frontend.lfr_n / 1000 + ) + speech = speech.to("npu") + speech_lengths = speech_lengths.to("npu") + + time2 = time.perf_counter() + meta_data["load_data"] = time2 - time1 + + # Step2: run with compiled encoder + encoder_out, hidden, alphas, pre_token_length = self.mindie_encoder(speech, speech_lengths) + encoder_out_lens = speech_lengths + + hidden = hidden.to(kwargs["mindie_device"]) + alphas = alphas.to(kwargs["mindie_device"]) + pre_token_length = pre_token_length.to(kwargs["mindie_device"]) + + pre_token_length = pre_token_length.round().to(torch.int32) + time3 = time.perf_counter() + meta_data["encoder"] = time3 - time2 + + + # Step3: divide dynamic loop into multiple smaller loops for calculation + # each with a number of iterations based on kwargs["cif_interval"] + batch_size, len_time, hidden_size = hidden.size() + loop_num = len_time // kwargs["cif_interval"] + 1 + padding_len = loop_num * kwargs["cif_interval"] + padding_size = padding_len - len_time + padded_hidden = F.pad(hidden, (0, 0, 0, padding_size), "constant", 0) + padded_alphas = F.pad(alphas, (0, padding_size), "constant", 0) + + len_labels = torch.round(alphas.sum(-1)).int() + max_label_len = len_labels.max() + + frames_batch = [] + for b in range(batch_size): + frames_list = [] + integrate = torch.zeros([1, ]).to("npu") + frame = torch.zeros([1, hidden_size]).to("npu") + for i in range(loop_num): + cur_hidden = padded_hidden[b : b + 1, i * kwargs["cif_interval"] : (i + 1) * kwargs["cif_interval"], :] + cur_alphas = padded_alphas[b : b + 1, i * kwargs["cif_interval"] : (i + 1) * kwargs["cif_interval"]] + cur_frames, integrate, frame = self.mindie_cif(cur_hidden.to("npu"), cur_alphas.to("npu"), integrate, frame) + frames_list.append(cur_frames.to(kwargs["mindie_device"])) + frame = torch.cat(frames_list, 0) + pad_frame = torch.zeros([max_label_len - frame.size(0), hidden_size], device=hidden.device) + frames_batch.append(torch.cat([frame, pad_frame], 0)) + + acoustic_embeds = torch.stack(frames_batch, 0) + token_num_int = torch.max(pre_token_length) + pre_acoustic_embeds = acoustic_embeds[:, :token_num_int, :] + + if torch.max(pre_token_length) < 1: + return [] + time4 = time.perf_counter() + meta_data["calc_predictor"] = time4 - time3 + + + # Step4: run with compiled decoder + decoder_out, us_alphas = self.mindie_decoder(encoder_out, encoder_out_lens, + pre_acoustic_embeds.contiguous().to("npu"), pre_token_length.contiguous().to("npu")) + us_alphas = us_alphas.to(kwargs["mindie_device"]) + time5 = time.perf_counter() + meta_data["decoder"] = time5 - time4 + + + # Step5: divide dynamic loop into multiple smaller loops for calculation + # each with a number of iterations based on kwargs["cif_timestamp_interval"] + len_alphas = us_alphas.shape[1] + loop_num = len_alphas // kwargs["cif_timestamp_interval"] + 1 + padding_len = loop_num * kwargs["cif_timestamp_interval"] + padding_size = padding_len - len_alphas + padded_alphas = F.pad(us_alphas, (0, padding_size), "constant", 0) + + peak_batch = [] + for b in range(batch_size): + peak_list = [] + integrate_alphas = torch.zeros([1]).to("npu") + for i in range(loop_num): + cur_alphas = padded_alphas[b:b+1, i * kwargs["cif_timestamp_interval"] : (i + 1) * kwargs["cif_timestamp_interval"]] + peak, integrate_alphas = self.mindie_cif_timestamp(cur_alphas.to("npu"), integrate_alphas) + peak_list.append(peak.to(kwargs["mindie_device"])) + us_peak = torch.cat(peak_list, 1)[:, :len_alphas] + peak_batch.append(us_peak) + us_peaks = torch.cat(peak_batch, 0) + + time6 = time.perf_counter() + meta_data["calc_predictor_timestamp"] = time6 - time5 + + + # Step6: post process + decoder_out = decoder_out.to(kwargs["mindie_device"]) + us_alphas = us_alphas.to(kwargs["mindie_device"]) + us_peaks = us_peaks.to(kwargs["mindie_device"]) + encoder_out_lens = encoder_out_lens.to(kwargs["mindie_device"]) + results = [] + b, n, d = decoder_out.size() + for i in range(b): + am_scores = decoder_out[i, : pre_token_length[i], :] + + yseq = am_scores.argmax(dim=-1) + score = am_scores.max(dim=-1)[0] + score = torch.sum(score, dim=-1) + + # pad with mask tokens to ensure compatibility with sos/eos tokens + yseq = torch.tensor([self.sos] + yseq.tolist() + [self.eos], device=yseq.device) + + nbest_hyps = [Hypothesis(yseq=yseq, score=score)] + + for nbest_idx, hyp in enumerate(nbest_hyps): + # remove sos/eos and get results + last_pos = -1 + if isinstance(hyp.yseq, list): + token_int = hyp.yseq[1:last_pos] + else: + token_int = hyp.yseq[1:last_pos].tolist() + + # remove blank symbol id, which is assumed to be 0 + token_int = list( + filter( + lambda x: x != self.eos and x != self.sos and x != self.blank_id, token_int + ) + ) + + # Change integer-ids to tokens + token = tokenizer.ids2tokens(token_int) + + _, timestamp = ts_prediction_lfr6_standard( + us_alphas[i][: encoder_out_lens[i] * 3], + us_peaks[i][: encoder_out_lens[i] * 3], + copy.copy(token), + vad_offset=kwargs.get("begin_time", 0), + ) + + text_postprocessed, time_stamp_postprocessed, word_lists = ( + postprocess_utils.sentence_postprocess(token, timestamp) + ) + + result_i = { + "key": key[i], + "text": text_postprocessed, + "timestamp": time_stamp_postprocessed, + } + + results.append(result_i) + + time7 = time.perf_counter() + meta_data["post_process"] = time7 - time6 + + return results, meta_data diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_punc.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_punc.py new file mode 100644 index 0000000000000000000000000000000000000000..25b1898b641a2300f1ac45a909f7cd9a3597cd74 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_punc.py @@ -0,0 +1,195 @@ +import sys +sys.path.append("./FunASR") + +import torch +import mindietorch + +from mindie_paraformer import precision_eval +from funasr.utils.load_utils import load_audio_text_image_video +from funasr.models.ct_transformer.model import CTTransformer +from funasr.models.ct_transformer.utils import split_to_mini_sentence, split_words + + +class MindiePunc(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model.eval() + + def forward(self, text, text_lengths): + y, _ = self.model.punc_forward(text, text_lengths) + _, indices = y.view(-1, y.shape[-1]).topk(1, dim=1) + punctuations = torch.squeeze(indices, dim=1) + + return punctuations + + @staticmethod + def export(punc, path="./compiled_punc.pt", soc_version="Ascendxxx"): + print("Begin tracing punc model.") + + input_shape = (1, 20) + min_shape = (1, -1) + max_shape = (1, -1) + input_speech = torch.randint(1, 10, input_shape, dtype=torch.int32) + input_speech_lengths = torch.tensor([20, ], dtype=torch.int32) + compile_inputs = [mindietorch.Input(min_shape = min_shape, max_shape = max_shape, dtype = torch.int32), + mindietorch.Input(min_shape = (1, ), max_shape = (1, ), dtype = torch.int32)] + + export_model = torch.jit.trace(punc, example_inputs=(input_speech, input_speech_lengths)) + print("Finish tracing punc model.") + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP16, + default_buffer_size_vec = [10, ], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling punc model, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of punc model.") + sample_speech = torch.randint(1, 10, (1, 10), dtype=torch.int32) + sample_speech_lengths = torch.tensor([10, ], dtype=torch.int32) + mrt_res = compiled_model(sample_speech.to("npu"), sample_speech_lengths.to("npu")) + ref_res = punc(sample_speech, sample_speech_lengths) + precision_eval(mrt_res, ref_res) + + +class MindieCTTransformer(CTTransformer): + def __init__( + self, + *args, + **kwargs, + ): + super().__init__(*args, **kwargs) + + self.mindie_punc = torch.jit.load(kwargs["compiled_punc"]) + + def inference( + self, + data_in, + data_lengths=None, + key: list = None, + tokenizer=None, + frontend=None, + **kwargs, + ): + assert len(data_in) == 1 + text = load_audio_text_image_video(data_in, data_type=kwargs.get("kwargs", "text"))[0] + + split_size = kwargs.get("split_size", 20) + + tokens = split_words(text, jieba_usr_dict=self.jieba_usr_dict) + tokens_int = tokenizer.encode(tokens) + + mini_sentences = split_to_mini_sentence(tokens, split_size) + mini_sentences_id = split_to_mini_sentence(tokens_int, split_size) + assert len(mini_sentences) == len(mini_sentences_id) + + mini_sentences_id = [torch.unsqueeze(torch.tensor(id, dtype=torch.int32), 0) for id in mini_sentences_id] + + cache_sent = [] + cache_sent_id = torch.tensor([[]], dtype=torch.int32) + new_mini_sentence = "" + cache_pop_trigger_limit = 200 + results = [] + meta_data = {} + + for mini_sentence_i in range(len(mini_sentences)): + mini_sentence = mini_sentences[mini_sentence_i] + mini_sentence_id = mini_sentences_id[mini_sentence_i] + mini_sentence = cache_sent + mini_sentence + mini_sentence_id = torch.cat([cache_sent_id, mini_sentence_id], dim=1) + + text = mini_sentence_id.to("npu") + text_lengths = torch.tensor([text.shape[1], ], dtype=torch.int32).to("npu") + punctuations = self.mindie_punc(text, text_lengths) + punctuations = punctuations.to("cpu") + + assert punctuations.size()[0] == len(mini_sentence) + + # Search for the last Period/QuestionMark as cache + if mini_sentence_i < len(mini_sentences) - 1: + sentenceEnd = -1 + last_comma_index = -1 + for i in range(len(punctuations) - 2, 1, -1): + if ( + self.punc_list[punctuations[i]] == "。" + or self.punc_list[punctuations[i]] == "?" + ): + sentenceEnd = i + break + if last_comma_index < 0 and self.punc_list[punctuations[i]] == ",": + last_comma_index = i + + if ( + sentenceEnd < 0 + and len(mini_sentence) > cache_pop_trigger_limit + and last_comma_index >= 0 + ): + # The sentence it too long, cut off at a comma. + sentenceEnd = last_comma_index + punctuations[sentenceEnd] = self.sentence_end_id + cache_sent = mini_sentence[sentenceEnd + 1 :] + cache_sent_id = mini_sentence_id[:, sentenceEnd + 1 :] + mini_sentence = mini_sentence[0 : sentenceEnd + 1] + punctuations = punctuations[0 : sentenceEnd + 1] + + words_with_punc = [] + for i in range(len(mini_sentence)): + if ( + i == 0 + or self.punc_list[punctuations[i - 1]] == "。" + or self.punc_list[punctuations[i - 1]] == "?" + ) and len(mini_sentence[i][0].encode()) == 1: + mini_sentence[i] = mini_sentence[i].capitalize() + if i == 0: + if len(mini_sentence[i][0].encode()) == 1: + mini_sentence[i] = " " + mini_sentence[i] + if i > 0: + if ( + len(mini_sentence[i][0].encode()) == 1 + and len(mini_sentence[i - 1][0].encode()) == 1 + ): + mini_sentence[i] = " " + mini_sentence[i] + words_with_punc.append(mini_sentence[i]) + if self.punc_list[punctuations[i]] != "_": + punc_res = self.punc_list[punctuations[i]] + if len(mini_sentence[i][0].encode()) == 1: + if punc_res == ",": + punc_res = "," + elif punc_res == "。": + punc_res = "." + elif punc_res == "?": + punc_res = "?" + words_with_punc.append(punc_res) + new_mini_sentence += "".join(words_with_punc) + # Add Period for the end of the sentence + new_mini_sentence_out = new_mini_sentence + if mini_sentence_i == len(mini_sentences) - 1: + if new_mini_sentence[-1] == "," or new_mini_sentence[-1] == "、": + new_mini_sentence_out = new_mini_sentence[:-1] + "。" + elif new_mini_sentence[-1] == ",": + new_mini_sentence_out = new_mini_sentence[:-1] + "." + elif ( + new_mini_sentence[-1] != "。" + and new_mini_sentence[-1] != "?" + and len(new_mini_sentence[-1].encode()) != 1 + ): + new_mini_sentence_out = new_mini_sentence + "。" + if len(punctuations): + punctuations[-1] = 2 + elif ( + new_mini_sentence[-1] != "." + and new_mini_sentence[-1] != "?" + and len(new_mini_sentence[-1].encode()) == 1 + ): + new_mini_sentence_out = new_mini_sentence + "." + if len(punctuations): + punctuations[-1] = 2 + + result_i = {"key": key[0], "text": new_mini_sentence_out} + results.append(result_i) + return results, meta_data diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_vad.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_vad.py new file mode 100644 index 0000000000000000000000000000000000000000..753548aff1d95d88208936936137468b5380007c --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/mindie_vad.py @@ -0,0 +1,51 @@ +import sys +import os +sys.path.append("./FunASR") + +import torch +import mindietorch + +from mindie_paraformer import precision_eval + + +class MindieVAD(torch.nn.Module): + def __init__(self, model): + super().__init__() + model.encoder.eval() + for para in model.encoder.parameters(): + para.requires_grad = False + self.model = model + + def forward(self, feat): + result = self.model.encoder(feat, {}) + return result + + @staticmethod + def export(vad, path="./compiled_vad.pt", soc_version="Ascendxxx"): + print("Begin tracing vad model.") + input_shape = (1, 5996, 400) + min_shape = (1, -1, 400) + max_shape = (1, -1, 400) + input_feat = torch.randn(input_shape, dtype=torch.float32) + compile_inputs = [mindietorch.Input(min_shape = min_shape, max_shape = max_shape, dtype = torch.float32)] + + export_model = torch.jit.trace(vad, input_feat) + print("Finish tracing vad model.") + + compiled_model = mindietorch.compile( + export_model, + inputs = compile_inputs, + precision_policy = mindietorch.PrecisionPolicy.PREF_FP32, + default_buffer_size_vec = [50, ], + soc_version = soc_version, + ir = "ts" + ) + compiled_model.save(path) + print("Finish compiling vad model, compiled model is saved in {}.".format(path)) + # compiled_model = torch.jit.load(path) + + print("Start checking the percision of vad model.") + sample_feat = torch.randn(input_shape, dtype=torch.float32) + mrt_res = compiled_model(sample_feat.to("npu")) + ref_res = vad(sample_feat) + precision_eval(mrt_res, ref_res) diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/test.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test.py new file mode 100644 index 0000000000000000000000000000000000000000..ae1f2a40af512b7a032867a16d119024ebc22d33 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse + +import torch +import torch_npu + +import mindietorch + +from mindie_auto_model import MindieAutoModel + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", default="./model", + help="path of pretrained model") + parser.add_argument("--model_vad", default="./model_vad", + help="path of pretrained vad model") + parser.add_argument("--model_punc", default="./model_punc", + help="path of pretrained punc model") + parser.add_argument("--compiled_encoder", default="./compiled_model/compiled_encoder.pt", + help="path to save compiled encoder") + parser.add_argument("--compiled_decoder", default="./compiled_model/compiled_decoder.pt", + help="path to save compiled decoder") + parser.add_argument("--compiled_cif", default="./compiled_model/compiled_cif.pt", + help="path to save compiled cif function") + parser.add_argument("--compiled_cif_timestamp", default="./compiled_model/compiled_cif_timestamp.pt", + help="path to save compiled cif timestamp function") + parser.add_argument("--compiled_punc", default="./compiled_model/compiled_punc.ts", + help="path to save compiled punc model") + parser.add_argument("--compiled_vad", default="./compiled_model/compiled_vad.ts", + help="path to save compiled vad model") + parser.add_argument("--paraformer_batch_size", default=16, type=int, + help="batch size of paraformer model") + parser.add_argument("--sample_path", default="./audio/", + help="directory or path of sample audio") + parser.add_argument("--soc_version", default="Ascendxxx", type=str, + help="soc version of Ascend") + args = parser.parse_args() + + mindietorch.set_device(0) + + valid_extensions = ['.wav'] + audio_files = [] + + if os.path.isfile(args.sample_path): + if any(args.sample_path.endswith(ext) for ext in valid_extensions): + audio_files.append(args.sample_path) + elif os.path.isdir(args.sample_path): + for root, dirs, files in os.walk(args.sample_path): + for file in files: + if any(file.endswith(ext) for ext in valid_extensions): + audio_files.append(os.path.join(root, file)) + + if len(audio_files) == 0: + print("There is no valid wav file in sample_dir.") + else: + # initialize auto model + model = MindieAutoModel(model=args.model, vad_model=args.model_vad, punc_model=args.model_punc, + compiled_encoder=args.compiled_encoder, compiled_decoder=args.compiled_decoder, + compiled_cif=args.compiled_cif, compiled_cif_timestamp=args.compiled_cif_timestamp, + compiled_punc=args.compiled_punc, compiled_vad=args.compiled_vad, + paraformer_batch_size=args.paraformer_batch_size, + cif_interval=200, cif_timestamp_interval=500) + + if "910" in args.soc_version: + model.kwargs["mindie_device"] = "npu" + else: + model.kwargs["mindie_device"] = "cpu" + + # warm up + print("Begin warming up.") + _ = model.generate(input=audio_files[0]) + print("Finish warming up.") + + # iterate over sample_dir + for wav_file in audio_files: + print("\nBegin evaluating {}.".format(wav_file)) + + res, time_stats = model.generate(input=wav_file) + print("Model output: {}".format(res[0]["text"])) + print("Time comsumption:") + print(" ".join(f"{key}: {value:.3f}s" for key, value in time_stats.items())) \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_accuracy.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_accuracy.py new file mode 100644 index 0000000000000000000000000000000000000000..4af01ede69396ef4760e6a0e0ed8caf3a9b55699 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_accuracy.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse +from tqdm import tqdm + +import torch +import torch_npu +import mindietorch + +from mindie_auto_model import MindieAutoModel +from nltk.metrics.distance import edit_distance + + +def load_txt(file_name): + result = {} + + with open(file_name, "r") as file: + for line in file: + parts = line.strip().split(maxsplit=1) + + if len(parts) == 2: + result[parts[0]] = parts[1] + + return result + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--result_path", default="./aishell_test_result.txt", type=str, + help="path to save infer result") + parser.add_argument("--ref_path", default="/path/to/AISHELL-1/transcript/aishell_transcript_v0.8.txt", + type=str, help="directory or path of sample audio") + args = parser.parse_args() + + infer_result = load_txt(args.result_path) + ref_result = load_txt(args.ref_path) + + infer_list = [] + refer_list = [] + for key, value in infer_result.items(): + if key in ref_result: + infer_list.append(value.replace(" ", "")) + refer_list.append(ref_result[key].replace(" ", "")) + + cer_total = 0 + step = 0 + for infer, refer in tqdm(zip(infer_list, refer_list)): + infer = [i for i in infer] + refer = [r for r in refer] + cer_total += edit_distance(infer, refer) / len(refer) + step += 1 + + cer = cer_total / step + accuracy = 1 - cer + print("character-errer-rate: {:.4f}, accuracy: {:.4f}".format(cer, accuracy)) \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_performance.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_performance.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3cc39f5f83e92e3d96aa185fe3fdebee8cb178 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/test_performance.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import argparse + +import torch +import torch_npu +import mindietorch + +from mindie_auto_model import MindieAutoModel + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", default="./model", + help="path of pretrained model") + parser.add_argument("--compiled_encoder", default="./compiled_model/compiled_encoder.pt", + help="path to save compiled encoder") + parser.add_argument("--compiled_decoder", default="./compiled_model/compiled_decoder.pt", + help="path to save compiled decoder") + parser.add_argument("--compiled_cif", default="./compiled_model/compiled_cif.pt", + help="path to save compiled cif function") + parser.add_argument("--compiled_cif_timestamp", default="./compiled_model/compiled_cif_timestamp.pt", + help="path to save compiled cif timestamp function") + parser.add_argument("--batch_size", default=64, type=int, + help="batch size of paraformer model") + parser.add_argument("--sample_path", default="/path/to/AISHELL-1", type=str, + help="directory or path of sample audio") + parser.add_argument("--result_path", default="./aishell_test_result.txt", type=str, + help="path to save infer result") + parser.add_argument("--soc_version", default="Ascend310P3", type=str, + help="soc version of Ascend") + args = parser.parse_args() + + mindietorch.set_device(0) + + valid_extensions = ['.wav'] + audio_files = [] + + if os.path.isfile(args.sample_path): + if any(args.sample_path.endswith(ext) for ext in valid_extensions): + audio_files.append(args.sample_path) + elif os.path.isdir(args.sample_path): + for root, dirs, files in os.walk(args.sample_path): + for file in files: + if any(file.endswith(ext) for ext in valid_extensions): + audio_files.append(os.path.join(root, file)) + + # filter out wav files which is smaller than 1KB + audio_files = [file for file in audio_files if os.path.getsize(file) >= 1024] + + if len(audio_files) == 0: + print("There is no valid wav file in sample_dir.") + else: + # initialize auto model + model = MindieAutoModel(model=args.model, + compiled_encoder=args.compiled_encoder, compiled_decoder=args.compiled_decoder, + compiled_cif=args.compiled_cif, compiled_cif_timestamp=args.compiled_cif_timestamp, + batch_size=args.batch_size, + cif_interval=200, cif_timestamp_interval=500) + + if "910" in args.soc_version: + model.kwargs["mindie_device"] = "npu" + else: + model.kwargs["mindie_device"] = "cpu" + + # warm up + print("Begin warming up.") + for i in range(3): + _ = model.inference_with_asr(input=audio_files[0]) + print("Finish warming up") + + # iterate over sample_dir + print("Begin evaluating.") + + results, time_stats = model.inference_with_asr(input=audio_files, display_pbar=True) + print("Average RTX: {:.3f}".format(time_stats["rtf_avg"])) + print("Time comsumption:") + print(" ".join(f"{key}: {value:.3f}s" for key, value in time_stats.items() if key != "rtf_avg")) + + with open(args.result_path, "w") as f: + for res in results: + f.write("{} {}\n".format(res["key"], res["text"])) \ No newline at end of file diff --git a/MindIE/MindIE-Torch/built-in/audio/Paraformer/trace_encoder_decoder.py b/MindIE/MindIE-Torch/built-in/audio/Paraformer/trace_encoder_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..91a7d3af0100ed58526afffefb3ea9b43c324607 --- /dev/null +++ b/MindIE/MindIE-Torch/built-in/audio/Paraformer/trace_encoder_decoder.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright(C) 2024. Huawei Technologies Co.,Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import torch +from torch import library + +import sys +sys.path.append("./FunASR") + +from funasr.auto.auto_model import AutoModel + + +torch.library.define("aie::flash_attention", "(Tensor query, Tensor key, Tensor value, int num_head, " + "Tensor? attn_mask=None, Tensor? pse=None, float scale=1.0, str layout='BSH', str type='PFA') -> Tensor") + +@torch.library.impl('aie::flash_attention', "cpu") +def flash_attention_wrapper(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, num_head: int, + attn_mask: torch.Tensor = None, pse: torch.Tensor = None, scale: float = 1.0, + layout: str = 'BSH', type: str = 'PFA') -> torch.Tensor: + return query + +class ParaformerEncoder(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model.eval() + + def forward(self, speech, speech_length): + batch = {"speech": speech, "speech_lengths": speech_length} + enc, enc_len = self.model.encoder(**batch) + mask = self.model.make_pad_mask(enc_len)[:, None, :] + hidden, alphas, pre_token_length = self.model.predictor(enc, mask) + return enc, hidden, alphas, pre_token_length + + def trace_model(encoder, path="./traced_encoder.pt"): + print("Begin trace encoder!") + + input_shape = (2, 50, 560) + input_speech = torch.randn(input_shape, dtype=torch.float32) + input_speech_lens = torch.tensor([50, 25], dtype=torch.int32) + + trace_model = torch.jit.trace(encoder, example_inputs=(input_speech, input_speech_lens)) + trace_model.save(path) + print("Finish trace encoder") + + +class ParaformerDecoder(torch.nn.Module): + def __init__(self, model): + super().__init__() + self.model = model.eval() + + def forward(self, encoder_out, encoder_out_lens, sematic_embeds, pre_token_length): + decoder_outs = self.model.decoder(encoder_out, encoder_out_lens, sematic_embeds, pre_token_length) + decoder_out = decoder_outs[0] + decoder_out = torch.log_softmax(decoder_out, dim=-1) + + encoder_out_mask = self.model.make_pad_mask(encoder_out_lens)[:, None, :] + + us_alphas = self.model.predictor.get_upsample_timestamp(encoder_out, encoder_out_mask, pre_token_length) + + return decoder_out, us_alphas + + def trace_model(decoder, path="./traced_decoder.pt"): + print("Begin trace decoder!") + + input_shape1 = (2, 939, 512) + input_shape2 = (2, 261, 512) + + encoder_out = torch.randn(input_shape1, dtype=torch.float32) + encoder_out_lens = torch.tensor([939, 500], dtype=torch.int32) + sematic_embeds = torch.randn(input_shape2, dtype=torch.float32) + sematic_embeds_lens = torch.tensor([261, 100], dtype=torch.int32) + + trace_model = torch.jit.trace(decoder, example_inputs=(encoder_out, encoder_out_lens, sematic_embeds, sematic_embeds_lens)) + trace_model.save(path) + print("Finish trace decoder") + + +class AutoModelParaformer(AutoModel): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + @staticmethod + def trace(**kwargs): + model, kwargs = AutoModel.build_model(**kwargs) + + import copy + from funasr.models.bicif_paraformer.export_meta import export_rebuild_model + + kwargs_new = copy.deepcopy(kwargs) + kwargs_new['onnx'] = False + kwargs_new["max_seq_len"] = 512 + del kwargs_new["model"] + model = export_rebuild_model(model, **kwargs_new) + + encoder = ParaformerEncoder(model) + ParaformerEncoder.trace_model(encoder, kwargs["traced_encoder"]) + + decoder = ParaformerDecoder(model) + ParaformerDecoder.trace_model(decoder, kwargs["traced_decoder"]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--model", default="./model", + help="path of pretrained model") + parser.add_argument("--traced_encoder", default="./compiled_model/traced_encoder.pt", + help="path to save compiled decoder") + parser.add_argument("--traced_decoder", default="./compiled_model/traced_decoder.pt", + help="path to save compiled decoder") + args = parser.parse_args() + + AutoModelParaformer.trace(model=args.model, traced_encoder=args.traced_encoder, traced_decoder=args.traced_decoder) \ No newline at end of file