有些时候我们需要将手机上的视频投到ubuntu上并进行处理,scrcpy就是一个非常低延时并且可以wifi共享的工具,本文主要针对其源码进行修改,实现ubuntu上实时分割手机画面,分为安装编译源(大坑)、修改源码、修改分割推理代码三部分。
scrcpy的使用和wifi连接可参考
Ubuntu安卓手机投屏_zekdot的博客-CSDN博客
源码阅读和前两个部分我是参考这篇博客的,但中间还是不少坑:Scrcpy源码的阅读及在Ubuntu上的实现(二)——获取手机屏幕yuv数据_郑烯烃快去学习的博客-CSDN博客
一、安装编译源码
后面的源码修改有两个重要文件需要修改:stream.c和decoder.c,前者是用来把yuv流推到深度学习那边去的,后者是为了从scrcpy中把流推出来。如果你按照其他博客下载,即使跌跌撞撞编译成功,但发现没有这两个文件,太狗了,我这边找到最近的有这两个文件的是scrcpy1.22版本,千万别下错了!!!。需要下载的文件有两个:scrcpy源码和server
源码链接:https://github.com/Genymobile/scrcpy/tree/v1.22
server链接:Release scrcpy v1.22 · Genymobile/scrcpy · GitHub
下载完成后,解压scrcpy,善后meson build生成build文件夹,build-app里面把scrcpy-sever放进去,此时在build下运行ninja -Cbuild,此时如果会报错,如果报错消息是server没生成,此时去看看build-app里面有没有可执行文件,如果有那就编译结束了,./scrcpy运行即可
如果此时报错出现什么版本不一致或者其他错,进到/usr/local/share/scrcpy中把server往这里面拷贝一份。
至此,第一步编译运行源码完成。
二、修改源码
源码最重要的就是输出yuv流并通过本地端口广播出去,进入app/src下。修改decoder.c和stream.c以及对应的h文件,需要修改的部分如下:
decoder.c-----输出yuv流
#include "decoder.h"
#include <zmq.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <stdio.h>
#include "events.h"
#include "video_buffer.h"
#include "trait/frame_sink.h"
#include "util/log.h"
#######by zengyb
static bool
push_frame_to_sinks(struct decoder *decoder, const AVFrame *frame) {
//声明结构体
myframe yuv_frame;
//初始化长宽
yuv_frame.width= decoder->codec_ctx->width;
yuv_frame.height=decoder->codec_ctx->height;
//读取数据
int offest=0;
for(int i=0;i<yuv_frame.height;i++)
{
memcpy(yuv_frame.data_Y+offest,frame->data[0]+frame->linesize[0]*i,yuv_frame.width);
//指向下一行
offest += yuv_frame.width;
}
offest=0;
for(int i=0;i<yuv_frame.height/2;i++)
{
memcpy(yuv_frame.data_U+offest,frame->data[1]+frame->linesize[1]*i,yuv_frame.width/2);
offest += yuv_frame.width/2;
}
offest=0;
for(int i=0;i<yuv_frame.height/2;i++)
{
memcpy(yuv_frame.data_V+offest,frame->data[2]+frame->linesize[2]*i,yuv_frame.width/2);
offest += yuv_frame.width/2;
}
// printf("a");
int aa = zmq_send(response,&yuv_frame,sizeof(yuv_frame),ZMQ_DONTWAIT);
if (aa == -1) {
printf("zmq_send error: %s\n", zmq_strerror(errno));
}
// printf("%d",aa);
for (unsigned i = 0; i < decoder->sink_count; ++i) {
struct sc_frame_sink *sink = decoder->sinks[i];
if (!sink->ops->push(sink, frame)) {
LOGE("Could not send frame to sink %d", i);
return false;
}
}
return true;
}
decoder.h-------声明结构体和scoket全局变量
#define yuv_size 448*1024
typedef struct{
int width;
int height;
uint8_t data_Y[yuv_size];
uint8_t data_U[yuv_size/4];
uint8_t data_V[yuv_size/4];
}myframe;
extern void *response;
stream.c----初始化socket端口,for循环前初始,最终end的时候删除
###先声明变量
void *response = NULL;
####
void *context = zmq_ctx_new ();
response = zmq_socket (context, ZMQ_PUB);
int rc = zmq_bind(response, "tcp://127.0.0.1:5555");
// printf("%d",rc);
// printf("111");
assert (rc == 0);
for (;;) {
bool ok = stream_recv_packet(stream, packet);
if (!ok) {
// end of stream
break;
}
ok = stream_push_packet(stream, packet);
av_packet_unref(packet);
if (!ok) {
// cannot process packet (error already logged)
break;
}
}
LOGD("End of frames");
if (stream->pending) {
av_packet_free(&stream->pending);
}
av_packet_free(&packet);
finally_close_parser:
av_parser_close(stream->parser);
finally_close_sinks:
stream_close_sinks(stream);
finally_free_codec_ctx:
avcodec_free_context(&stream->codec_ctx);
end:
if (response) {
zmq_close(response);
}
if (context) {
zmq_ctx_term(context);
}
stream->cbs->on_eos(stream, stream->cbs_userdata);
return 0;
}
至此,源码修改完成,yuv流输出到本地端口,下一步让我们用起来吧!!!
三、订阅与深度学习实时分割
我先给一份实例直接订阅imshow的,如果需要融合到其他深度学习比如检测分割之类的程序基于此进行修改即可
import zmq
import numpy as np
import cv2
# 创建一个ZeroMQ Context
context = zmq.Context()
# 创建一个套接字并连接
socket = context.socket(zmq.SUB)
socket.connect("tcp://127.0.0.1:5555")
socket.setsockopt_string(zmq.SUBSCRIBE, '')
width = 1024# 这里设置你的宽度
height = 448# 这里设置你的高度
while True:
# 从套接字接收myframe结构体
yuv_frame = socket.recv()
# 将bytes数据转化为numpy数组
y_data = np.frombuffer(yuv_frame[8:8+width*height], dtype=np.uint8).reshape(height, width)
u_data = np.frombuffer(yuv_frame[8+width*height:8+width*height*5//4], dtype=np.uint8).reshape(height//2, width//2)
v_data = np.frombuffer(yuv_frame[8+width*height*5//4:8+width*height*3//2], dtype=np.uint8).reshape(height//2, width//2)
# 这里你可以将YUV格式的图像转化为RGB或者其他你需要的格式,然后进行显示或者处理
# 例如,使用OpenCV库的cvtColor函数转化为BGR格式
yuv_image = cv2.merge([y_data, cv2.resize(u_data, (width, height)), cv2.resize(v_data, (width, height))]).astype(np.uint8)
bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)
cv2.imwrite("./1.jpg",bgr_image)
# 实时显示视频
cv2.imshow('Video', bgr_image)
# 等待1毫秒,如果此时按下“q”键,那么退出循环
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 销毁所有窗口
cv2.destroyAllWindows()
然后给一份完用unet做分割的代码
elif mode == "video":
# 创建一个ZeroMQ Context
context = zmq.Context()
# 创建一个套接字并连接
socket = context.socket(zmq.SUB)
socket.connect("tcp://127.0.0.1:5555")
socket.setsockopt_string(zmq.SUBSCRIBE, '')
width = 1024 # 这里设置你的宽度
height = 448 # 这里设置你的高度
frame_counter = 0
fps = 0.0
while True:
t1 = time.time()
# 从套接字接收myframe结构体
yuv_frame = socket.recv()
frame_counter += 1
if frame_counter % 10 != 0: # Only process every 10th frame
continue
# 将bytes数据转化为numpy数组
y_data = np.frombuffer(yuv_frame[8:8+width*height], dtype=np.uint8).reshape(height, width)
u_data = np.frombuffer(yuv_frame[8+width*height:8+width*height*5//4], dtype=np.uint8).reshape(height//2, width//2)
v_data = np.frombuffer(yuv_frame[8+width*height*5//4:8+width*height*3//2], dtype=np.uint8).reshape(height//2, width//2)
yuv_image = cv2.merge([y_data, cv2.resize(u_data, (width, height)), cv2.resize(v_data, (width, height))]).astype(np.uint8)
frame = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)
frame1 = frame
frame = Image.fromarray(np.uint8(frame))
# # 进行检测
# t2 = time.time()
# print(t2-t1)
frame = np.array(unet.detect_image(frame))
cv2.imshow("video", frame)
# 等待1毫秒,如果此时按下“q”键,那么退出循环
if cv2.waitKey(1) & 0xFF == ord('q'):
break
t2 = time.time()
print(t2-t1)
print("Video Detection Done!")
# if video_save_path!="":
# print("Save processed video to the path :" + video_save_path)
# out.release()
cv2.destroyAllWindows()
记得一定要搁几帧读一下socket,不然推理无法实时,速度太慢,到时候socket里面堵塞太多东西了会巨卡------这里卡太久了,一步步debug才发现的
这里跑的是voc数据集的unet分割,可见才0.3s的延时,凑合凑合就够用了!!!
error1:
sdl2-config found: NO need '>= 2.0.5'
Run-time dependency sdl2 found: NO (tried pkgconfig and config-tool)
app/meson.build:94:4: ERROR: Dependency "sdl2" not found, tried pkgconfig and config-tool
solution:sudo apt-get install libsdl2-dev
error2:ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.文章来源:https://www.toymoban.com/news/detail-663527.html
solution:sudo apt-get install openjdk-8-jdk文章来源地址https://www.toymoban.com/news/detail-663527.html
到了这里,关于scrcpy实现手机屏幕与ubuntu共享、推流、实时分割(wifi环境下)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!