代码拉取完成,页面将自动刷新
import pyaudio
import pvporcupine
import os
import io
import sys
import logging
import pdb
import sounddevice as sd
import numpy as np
import pvcobra
import time
import wave
from tencentcloud.asr.v20190614 import asr_client, models
from tencentcloud.common import credential
import json
logging.basicConfig(
#filename = os.path.join(expanduser('~'), 'peoplebank.log'),
level = logging.DEBUG,
format = "[%(asctime)s] - %(levelname)s - %(lineno)s] %(message)s",
datefmt = "%Y-%m-%d %H:%M:%S"
)
# 访问密钥
ACCESS_KEY = os.environ.get('ACCESS_KEY')
print('access key:', ACCESS_KEY)
# 腾讯云配置
SECRET_ID = os.environ.get('TENCENT_SECRET_ID')
SECRET_KEY = os.environ.get('TENCENT_SECRET_KEY')
REGION = "ap-beijing"
# 设备编号
DEVICE_INDEX = 0
# 唤醒词
KEYWORD_PATH = ['models/暖宝宝_zh_mac_v3_0_0.ppn']
# 唤醒词的敏感度
KEYWORD_SENSITIVITIES = [0.5]
# 中文唤醒词对应的模型
MODEL_PATH = 'models/porcupine_params_zh.pv'
# Porcupine 的采样率
SAMPLE_RATE = 16000
# 单声道
CHANNELS = 1
# Porcupine 每帧音频样本数
FRAME_LENGTH = 512
# 设置录音设备编号
sd.default.device = [DEVICE_INDEX, 1]
# 开启录音
audio_stream = sd.InputStream(samplerate=SAMPLE_RATE, channels=1, dtype='int16')
audio_stream.start()
# cobra 初始化
cobra = pvcobra.create(access_key=f'{ACCESS_KEY}')
# porcupine 初始化
porcupine = pvporcupine.create(
access_key = f'{ACCESS_KEY}',
keyword_paths = KEYWORD_PATH,
sensitivities = KEYWORD_SENSITIVITIES,
model_path = MODEL_PATH
)
# 从音频流中读取一个帧的数据
def get_next_audio_frame():
audio_frame, _ = audio_stream.read(FRAME_LENGTH)
# 将音频帧转换为一维数组,并确保是 int16 类型
pcm_data = np.frombuffer(audio_frame, dtype=np.int16)
return pcm_data
# 录音
def record_audio(duration=5):
print("开始录音...")
audio_data = sd.rec(int(SAMPLE_RATE * duration), samplerate=SAMPLE_RATE, channels=CHANNELS, dtype='int16')
sd.wait() # 等待录音结束
return audio_data
# 将音频数据保存为 WAV 格式
def save_audio_to_wav(audio_data):
with io.BytesIO() as byte_io:
with wave.open(byte_io, 'wb') as wf:
wf.setnchannels(CHANNELS)
wf.setsampwidth(2) # 16 位深度
wf.setframerate(SAMPLE_RATE)
wf.writeframes(audio_data.tobytes())
byte_io.seek(0)
return byte_io
# 调用腾讯ASR服务将语音转换为文字
def transcribe_audio_tencent(audio_file):
cred = credential.Credential(SECRET_ID, SECRET_KEY)
client = asr_client.AsrClient(cred, REGION)
audio_data = audio_file.read()
audio_base64 = audio_data.encode('base64')
req = models.SentenceRecognitionRequest()
params = {
"EngSerViceType": "16k_zh",
"SourceType": 1,
"VoiceFormat": "wav",
"Data": audio_base64,
}
req.from_json_string(json.dumps(params))
resp = client.SentenceRecognition(req)
print("识别结果:", resp.Result)
def start_agent():
# 初始化 PyAudio
#audio = pyaudio.PyAudio()
## 打开音频流
#stream = audio.open(format=pyaudio.paInt16,
# channels=1,
# rate=porcupine.sample_rate,
# input=True,
# frames_per_buffer=porcupine.frame_length)
logging.debug('等待唤醒词...')
try:
while True:
# 从音频流中读取数据
audio_frame = get_next_audio_frame()
result = porcupine.process(audio_frame)
print('result:', result)
if result >= 0:
# 检测到唤醒词
while True:
# 判断是否有人说话
audio_frame = get_next_audio_frame()
voice_probability = cobra.process(audio_frame)
if voice_probability >= 0.3:
# 检测到人声 启动录音
audio_data = record_audio(duration=5)
audio_file = save_audio_to_wav(audio_data)
transcribe_audio_tencent(audio_file)
break
#else:
#print("没有语音活动,继续监听...")
#time.sleep(1)
except KeyboardInterrupt:
print('停止运行.')
# 清理资源
#stream.stop_stream()
#stream.close()
#audio.terminate()
def choose_device():
# 初始化 PyAudio
audio = pyaudio.PyAudio()
# 列出所有音频输入设备
for i in range(audio.get_device_count()):
device_info = audio.get_device_info_by_index(i)
if device_info['maxInputChannels'] > 0:
print(f"Device {i}: {device_info['name']}")
audio.terminate()
print(40 * '*')
print('>>请输入麦克风编号:', end='', flush=True)
DEVICE_INDEX = int(sys.stdin.readline().strip())
if __name__ == '__main__':
choose_device()
start_agent()
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。