提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
最近实现一个录音上传功能,并且识别语音转为汉字。
一、js-audio-recorder是什么?
js-audio-recorder是基于第三方的vue插件,实现录音,播放等功能。
二、使用步骤
1.引入库
代码如下(示例):
import Vue from 'vue'
import App from './App.vue'
import router from './router'
import store from './store'
import ElementUI from 'element-ui'
import 'element-ui/lib/theme-chalk/index.css'
import axios from 'axios'
Vue.use(ElementUI)
Vue.config.productionTip = false
Vue.prototype.$axios = axios
new Vue({
el:'#app',
router,
store,
render: h => h(App)
}).$mount('#app')
2.读入数据
代码如下(示例):
data = pd.read_csv(
'https://labfile.oss.aliyuncs.com/courses/1283/adult.data.csv')
print(data.head())
该处使用的url网络请求的数据。文章来源:https://www.toymoban.com/news/detail-526721.html
3,完整代码
<template>
<div class="hea1" style="padding: 20px;">
<h3>录音上传</h3>
<div style="font-size: 14px">
<h3>录音时长:{{ recorder && recorder.duration.toFixed(4) }}</h3>
<el-button type="primary" @click="handleStart">开始录音</el-button>
<el-button type="info" @click="handlePause">暂停录音</el-button>
<el-button type="success" @click="handleResume">继续录音</el-button>
<el-button type="warning" @click="handleStop">停止录音</el-button>
<br><br>
<h3>
播放时长:{{
recorder &&
(playTime > recorder.duration
? recorder.duration.toFixed(4)
: playTime.toFixed(4))
}}
</h3>
<audio ref="audios" :src="this.audio" controls ></audio>
<br />
<div class="bo">
<!-- <el-button type="primary" @click="handlePlay">播放录音</el-button>
<el-button type="info" @click="handlePausePlay">暂停播放</el-button>
<el-button type="success" @click="handleResumePlay">继续播放</el-button>
<el-button type="warning" @click="handleStopPlay">停止播放</el-button> -->
<el-button type="error" @click="handleDestroy">销毁录音</el-button>
<el-button type="info" @click="downWAV">下载WAV</el-button>
<el-button type="info" @click="downPCM">下载pcm</el-button>
<el-button type="primary" @click="uploadRecord">上传</el-button>
<el-button type="success" @click="getResults">识别结果</el-button>
</div>
<br />
<br />
<textarea
name=""
id="textarea"
style="width: 100%; height: 100px"
cols="30"
rows="10"
v-model="result"
/>
</div>
</div>
</template>
<script>
import Recorder from "js-audio-recorder";
//import MyRecorder from '../js/MyRecorder'
import { v4 as uuidv4 } from "uuid";
import axios from 'axios';
uuidv4();
//const baseURL = "http://10.34.23.133:8899/asr/non_real";
export default {
data() {
return {
recorder: null,
playTime: 0,
timer: null,
src: null,
sessionId: "",
audio: "",
data: "",
result: "",
};
},
created() {},
mounted() {
this.dataRecorder = this.data;
},
methods: {
// 开始录音
handleStart() {
const uuid = require("uuid");
this.sessionId = uuid.v4();
console.log(this.sessionId);
this.recorder = new Recorder({
sampleBits: 16, // 采样位数,支持 8 或 16,默认是16
sampleRate: 16000, // 采样率,支持 11025、16000、22050、24000、44100、48000,根据浏览器默认值,我的chrome是48000
numChannels: 1, // 声道,支持 1 或 2, 默认是1
// compiling: false,(0.x版本中生效,1.x增加中) // 是否边录边转换,默认是false
});
console.log(this.recorder)
Recorder.getPermission().then(
() => {
console.log("开始录音");
this.recorder.start(); // 开始录音
},
(error) => {
this.$message({
message: "请先允许该网页使用麦克风",
type: "info",
});
console.log(`${error.name} : ${error.message}`);
}
);
},
handlePause() {
console.log("暂停录音");
this.recorder.pause(); // 暂停录音
},
handleResume() {
console.log("恢复录音");
this.recorder.resume(); // 恢复录音
},
handleStop() {
console.log("停止录音");
this.recorder.stop(); // 停止录音
this.handlePlay(); //
},
handlePlay() {
console.log("播放录音");
console.log(this.recorder);
const bl=this.recorder.getWAVBlob()
const r = new FileReader();
r.readAsArrayBuffer(bl);
r.onload = (e) => {
const bufer = e.srcElement.result;
const b = this.addWavHeader(bufer, 16000, 16, 1);
this.audio = window.URL.createObjectURL(b);
};
console.log(this.$refs.audios);
// this.recorder.play(); // 播放录音
// 播放时长
this.timer = setInterval(() => {
try {
this.playTime = this.recorder.getPlayTime();
} catch (error) {
this.timer = null;
}
}, 100);
},
addWavHeader(samples, sampleRateTmp, sampleBits, channelCount) {
const dataLength = samples.byteLength;
/* 新的buffer类,预留44bytes的heaer空间 */
const buffer = new ArrayBuffer(44 + dataLength);
/* 转为 Dataview, 利用API来填充字节 */
const view = new DataView(buffer);
let offset = 0;
/* ChunkID, 4 bytes, 资源交换文件标识符 */
this.writeString(view, offset, 'RIFF'); offset += 4;
/* ChunkSize, 4 bytes, 下个地址开始到文件尾总字节数,即文件大小-8 */
view.setUint32(offset, /* 32 */ 36 + dataLength, true); offset += 4;
/* Format, 4 bytes, WAV文件标志 */
this.writeString(view, offset, 'WAVE'); offset += 4;
/* Subchunk1 ID, 4 bytes, 波形格式标志 */
this.writeString(view, offset, 'fmt '); offset += 4;
/* Subchunk1 Size, 4 bytes, 过滤字节,一般为 0x10 = 16 */
view.setUint32(offset, 16, true); offset += 4;
/* Audio Format, 2 bytes, 格式类别 (PCM形式采样数据) */
view.setUint16(offset, 1, true); offset += 2;
/* Num Channels, 2 bytes, 通道数 */
view.setUint16(offset, channelCount, true); offset += 2;
/* SampleRate, 4 bytes, 采样率,每秒样本数,表示每个通道的播放速度 */
view.setUint32(offset, sampleRateTmp, true); offset += 4;
/* ByteRate, 4 bytes, 波形数据传输率 (每秒平均字节数) 通道数×每秒数据位数×每样本数据位/8 */
view.setUint32(offset, sampleRateTmp * channelCount * (sampleBits / 8), true); offset += 4;
/* BlockAlign, 2 bytes, 快数据调整数 采样一次占用字节数 通道数×每样本的数据位数/8 */
view.setUint16(offset, channelCount * (sampleBits / 8), true); offset += 2;
/* BitsPerSample, 2 bytes, 每样本数据位数 */
view.setUint16(offset, sampleBits, true); offset += 2;
/* Subchunk2 ID, 4 bytes, 数据标识符 */
this.writeString(view, offset, 'data'); offset += 4;
/* Subchunk2 Size, 4 bytes, 采样数据总数,即数据总大小-44 */
view.setUint32(offset, dataLength, true); offset += 4;
if (sampleBits === 16) {
this.floatTo16BitPCM(view, samples);
} else if (sampleBits === 8) {
this.floatTo8BitPCM(view, samples);
} else {
this.floatTo32BitPCM(view, samples);
}
return new Blob([view], { type: 'audio/wav' });
},
writeString(view, offset, string) {
for (let i = 0; i < string.length; i += 1) {
view.setUint8(offset + i, string.charCodeAt(i));
}
},
floatTo32BitPCM(output, input) {
const oinput = new Int32Array(input);
let newoffset = 44;
for (let i = 0; i < oinput.length; i += 1, newoffset += 4) {
output.setInt32(newoffset, oinput[i], true);
}
},
floatTo16BitPCM(output, input) {
const oinput = new Int16Array(input);
let newoffset = 44;
for (let i = 0; i < oinput.length; i += 1, newoffset += 2) {
output.setInt16(newoffset, oinput[i], true);
}
},
floatTo8BitPCM(output, input) {
const oinput = new Int8Array(input);
let newoffset = 44;
for (let i = 0; i < oinput.length; i += 1, newoffset += 1) {
output.setInt8(newoffset, oinput[i], true);
}
},
handlePausePlay() {
console.log("暂停播放");
this.recorder.pausePlay(); // 暂停播放
// 播放时长
this.playTime = this.recorder.getPlayTime();
this.time = null;
},
handleResumePlay() {
console.log("恢复播放");
this.recorder.resumePlay(); // 恢复播放
// 播放时长
this.timer = setInterval(() => {
try {
this.playTime = this.recorder.getPlayTime();
} catch (error) {
this.timer = null;
}
}, 100);
},
handleStopPlay() {
console.log("停止播放");
this.recorder.stopPlay(); // 停止播放
// 播放时长
this.playTime = this.recorder.getPlayTime();
this.timer = null;
},
handleDestroy() {
console.log("销毁实例");
this.recorder.destroy(); // 毁实例
this.timer = null;
},
downWAV() {
this.recorder.downloadWAV("下载wav");
},
downPCM() {
this.recorder.downloadPCM("下载pcm");
},
uploadRecord() {
if (this.recorder == null || this.recorder.duration === 0) {
this.$message({
message: "请先录音",
type: "error",
});
return false;
}
this.recorder.pause(); // 暂停录音
this.timer = null;
console.log("上传录音"); // 上传录音
const formData = new FormData();
const blob = this.recorder.getWAVBlob(); // 获取wav格式音频数据
console.log(blob);
// 此处获取到blob对象后需要设置fileName满足当前项目上传需求,可直接传把blob作为file塞入formData
const newbolb = new Blob([blob], { type: "audio/wav" });
const fileOfBlob = new File([newbolb], new Date().getTime() + ".wav");
console.log(fileOfBlob);
formData.append("file", blob);
console.log(formData);
const url = window.URL.createObjectURL(fileOfBlob);
this.src = url;
console.log(url);
this.$axios({
url:'https://10.45.192.42:8899/asr/non_real/putAudio?asrType=non_real&sessionId=' + this.sessionId ,
//url: this.baseURL + '/putAudio?asrType=non_real&sessionId=' + this.sessionID,
method: "post",
headers: {
"Content-Type": "multipart/form-data",
},
data: formData,
}).then(function (res) {
if (res.data == 200) {
alert("上传完成可以识别");
} else {
alert("上传失败请重试");
}
});
},
//识别结果
getResults() {
console.log("识别结果");
this.$axios({
url:"https://10.85.134.230:8899/asr/non_real/getResult",
method: "get",
params: {
sessionId: this.sessionId,
}
}).then((response) => (this.result = response.data));
},
},
};
</script>
<style scoped>
.hea1 {
line-height: 20px;
}
.bo {
display: flex;
justify-content: space-between;
flex:1
}
</style>
总结
前端调用js-audio-recorder使用封装好的功能比较方便,但是在与后端对接过程,要把语音音频传给后端,再返回识别结果也会遇到很多困难,重要的是要多查资料,积极沟通。文章来源地址https://www.toymoban.com/news/detail-526721.html
到了这里,关于vue项目,实现语音识别文字,前后端交互的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!