java获取pcm音频流并通过websocket发送给前端,同时接收前端发送的bytebuffer音频流并解码播放,实现语音对讲。文章来源:https://www.toymoban.com/news/detail-512793.html
package com.example.audiodemo.component;
import cn.hutool.core.util.ObjectUtil;
import lombok.SneakyThrows;
import org.java_websocket.client.WebSocketClient;
import org.java_websocket.drafts.Draft_6455;
import org.java_websocket.handshake.ServerHandshake;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.stereotype.Component;
import javax.sound.sampled.*;
import javax.websocket.server.ServerEndpoint;
import java.net.URI;
import java.nio.ByteBuffer;
import java.util.concurrent.CompletableFuture;
@Component
public class AudioServer {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
@Bean
public WebSocketClient webSocketClient() {
try {
WebSocketClient webSocketClient = new WebSocketClient(new URI("ws://" + "172.16.20.69:8080" + "/websocket/audio/123"), new Draft_6455()) {
@SneakyThrows
@Override
public void onOpen(ServerHandshake handshakedata) {
logger.info("[websocket] 连接成功");
}
@Override
public void onMessage(String s) {
}
@SneakyThrows
@Override
public void onMessage(ByteBuffer message) {
if (ObjectUtil.isEmpty(message)) {
return;
}
byte[] bytes = new byte[1024];
bytes = message.array();
AudioFormat format = new AudioFormat(44100, 16, 1, true, false);
SourceDataLine sourceDataLine = AudioSystem.getSourceDataLine(format);
sourceDataLine.open(format);
sourceDataLine.start();
byte[] finalBytes = bytes;
logger.info("播放中");
CompletableFuture.runAsync(() -> {
// 休眠 防止过快
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
sourceDataLine.write(finalBytes, 0, finalBytes.length );
});
};
@Override
public void onClose(int code, String reason, boolean remote) {
logger.info("[websocket] 退出连接");
}
@Override
public void onError(Exception ex) {
logger.info("[websocket] 连接错误={}", ex.getMessage());
}
};
webSocketClient.connect();
AudioFormat audioFormat = new AudioFormat(44100, 16, 1, true, false);
TargetDataLine targetDataLine = AudioSystem.getTargetDataLine(audioFormat);
targetDataLine.open(audioFormat);
targetDataLine.start();
logger.info("录音中");
SourceDataLine sourceDataLine = AudioSystem.getSourceDataLine(audioFormat);
sourceDataLine.open(audioFormat);
sourceDataLine.start();
byte[] b = new byte[1024];//缓存音频数据
new Thread(new Runnable() {
@Override
public void run() {
int a = 0;
while(a!=-1) {
a = targetDataLine.read(b, 0, b.length);//捕获录音数据
webSocketClient.send(b);
for (int i = 0; i < b.length; i++) {
}
}
}
}).start();
return webSocketClient;
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
}
java获取pcm音频流参考以下链接内容Java编程:实时获取麦克风数据流并进行播放_浅玩电子的博客-CSDN博客_java 获取麦克风文章来源地址https://www.toymoban.com/news/detail-512793.html
到了这里,关于利用websocket实现语音实时对讲的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!