前端实现 web获取麦克风权限 录制音频 (需求:ai对话问答)
下载插件
npm i recordrtc
文档:https://recordrtc.org/RecordRTC.html
相关方法整理
获取设备
getvice() {if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {var Mic = [];// 弹框获取麦克风navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {navigator.mediaDevices.enumerateDevices().then((devices) => {devices.forEach((device) => {if (device.kind === "audioinput") {// 麦克风if (device.deviceId != "default" &&device.deviceId != "communications") {this.Mic.push(device);//获取到的麦克风设备}}});});// 只是为了获取麦克风权限,获取以后立马关闭stream.getTracks().forEach((track) => track.stop());console.log(this.Mic, 22);});}},
开始录音
// 开始录音startRecord() {var that = this;// this.voiceStatus = true;// mediaDevices可提供对相机和麦克风等媒体输入设备的连接访问window.navigator.mediaDevices.getUserMedia({ audio: { deviceId: this.Mic[0].deviceId } })//设置{ audio: true}会选择电脑默认设置的麦克风设备.then((stream) => {this.stream = stream;this.getVoice();this.recorder = RecordRTC(stream, {type: "audio",//类型mimeType: "audio/wav",//音频格式recorderType: RecordRTC.StereoAudioRecorder,desiredSampRate: 16000,//频率numberOfAudioChannels: 2, // 单声道timeSlice: 1000,// bufferSize: 4096, // 缓存大小// ondataavailable: this.sendData,//有音频时的回调,一般及时传输websocket有用checkForInactiveTracks: true,});this.recorder.startRecording();}).catch(function (err) {console.log(err);console.log("当前浏览器不支持开启麦克风!");// that.voiceStatus = false;});},
停止录音
// 结束录音stopRecord() {// this.voiceStatus = false;if (this.recorder != null) {let recorder = this.recorder;// var internalRecorder = recorder.getInternalRecorder();// console.log("停止录音回调internalRecorder", internalRecorder);//停止录音recorder.stopRecording(() => {// var BB = new Blob([recorder.getBlob()], {// type: "audio/wav; codecs=opus",// });let blob = recorder.getBlob();//获取blobvar audioURL = window.URL.createObjectURL(blob);// let a = document.createElement("a");// a.href = audioURL;// a.download = "测试";// a.click();// 将录音文件 以文件对象形式传给后端var form = new FormData();form.append("upfile", blob);console.log("form", form, blob);// 释放这个临时的对象urlwindow.URL.revokeObjectURL(audioURL);});let stream = this.stream;clearInterval(this.timer1);RMSList = [//记录音频大小,用于音频波动样式绘制用0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 0, 0, 0,];//结束媒体stream.getAudioTracks().forEach((track) => track.stop());}},
获取音量大小
// 获取音量值大小getVoice() {const audioContext = new (window.AudioContext ||window.webkitAudioContext)();// 将麦克风的声音输入这个对象const mediaStreamSource = audioContext.createMediaStreamSource(this.stream);// 创建分析节点const analyserNode = audioContext.createAnalyser();// 连接节点mediaStreamSource.connect(analyserNode);// 可以实时听到麦克风采集的声音// analyserNode.connect(audioContext.destination)// 获取音量数据const dataArray = new Uint8Array(analyserNode.frequencyBinCount);function getVolume() {analyserNode.getByteFrequencyData(dataArray);let sum = 0;for (let i = 0; i < dataArray.length; i++) {sum += dataArray[i];}// 计算平均音量const averageVolume = sum / dataArray.length;return averageVolume;}// 每隔一段时间获取一次音量this.timer1 = setInterval(() => {const volume = getVolume();console.log("音量:", Math.round(volume));RMSList.unshift(Math.round(volume));RMSList.pop();// 在这里可以根据需要进行相应的处理}, 100);},
以上用于记录,后续补充文章,整理AI对话搭建成果