这是两个不同的任务,需要分别实现。
任务一:基于ffmpeg库接收rtsp视频数据,并利用udp将以RTP协议将数据发送。
以下是基于ffmpeg库接收rtsp视频数据,然后使用udp发送RTP协议数据的示例代码:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#define MAX_PKT_SIZE 1500
#define H264_PAYLOAD_TYPE 96
struct sockaddr_in servaddr;
int sockfd;
int init_udp_socket(){
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if(sockfd < 0){
printf("Could not create socket\n");
exit(-1);
}
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(5000); // 设置RTP传输的端口号
if(inet_aton("127.0.0.1", &servaddr.sin_addr) == 0){ //设置接收端IP地址
printf("IP address conversion error\n");
exit(-1);
}
return 0;
}
int send_pkt(unsigned char* buf, int len, int rtptime){
unsigned char packet[MAX_PKT_SIZE];
memset(packet, 0, MAX_PKT_SIZE);
memcpy(packet, buf, len);
packet[1] = packet[1] | 0x80; //设置RTP版本号
packet[1] = packet[1] | H264_PAYLOAD_TYPE;
packet[2] = (rtptime >> 24) & 0xFF;
packet[3] = (rtptime >> 16) & 0xFF;
packet[4] = (rtptime >> 8) & 0xFF;
packet[5] = rtptime & 0xFF;
// 发送RTP包
if(sendto(sockfd, packet, len, 0, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0) {
printf("Packet sending error\n");
exit(-1);
}
return 0;
}
int main(int argc, char* argv[])
{
AVFormatContext* ifmt_ctx = NULL;
AVCodecContext* codec_ctx = NULL;
AVCodec* codec = NULL;
AVPacket* pkt = NULL;
int video_stream_index = -1, ret;
int64_t last_pts = AV_NOPTS_VALUE;
int64_t last_time = 0;
int rtp_time=0;
/* Initialize AVFormatContext */
av_register_all();
avformat_network_init();
AVDictionary *options = NULL;
av_dict_set(&options, "rtsp_transport", "tcp", 0);
/* Open input media file */
if ((ret = avformat_open_input(&ifmt_ctx, "rtsp://xxx.xxx.xxx.xxx:xxx/live.sdp", NULL, &options)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
/* Get stream information */
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* Find the first video stream */
for (int i = 0; i < ifmt_ctx->nb_streams; i++) {
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
break;
}
}
if (video_stream_index == -1) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream\n");
return AVERROR_STREAM_NOT_FOUND;
}
/* Get codec parameters */
codec = avcodec_find_decoder(ifmt_ctx->streams[video_stream_index]->codecpar->codec_id);
if (codec == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot find decoder\n");
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(codec);
if (codec_ctx == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate codec context\n");
return AVERROR(ENOMEM);
}
if ((ret = avcodec_parameters_to_context(codec_ctx, ifmt_ctx->streams[video_stream_index]->codecpar)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot initialize codec context\n");
return ret;
}
if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open codec\n");
return ret;
}
/* Allocate packet */
pkt = av_packet_alloc();
if (pkt == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate packet\n");
return AVERROR(ENOMEM);
}
init_udp_socket();
while (1) {
if ((ret = av_read_frame(ifmt_ctx, pkt)) < 0)
break;
if (pkt->stream_index == video_stream_index) {
if ((ret = avcodec_send_packet(codec_ctx, pkt)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot send packet to decoder\n");
break;
}
while (1) {
AVFrame* frame = av_frame_alloc();
if (frame == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate frame\n");
return AVERROR(ENOMEM);
}
ret = avcodec_receive_frame(codec_ctx, frame);
if (ret < 0) {
av_frame_free(&frame);
break;
}
int delayed_frames = (pkt->pts - last_pts) * av_q2d(ifmt_ctx->streams[video_stream_index]->time_base) * 1000;
int64_t cur_time = av_gettime_relative() / 1000;
if (last_time + delayed_frames > cur_time) {
av_frame_free(&frame);
av_usleep((last_time + delayed_frames - cur_time) * 1000);
cur_time = av_gettime_relative() / 1000;
}
last_pts = pkt->pts;
last_time = cur_time;
uint8_t *buffer = NULL;
int buffer_size;
ret = av_image_alloc(buffer, buffer_size, codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt, 1);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate image\n");
return ret;
}
ret = av_image_copy_to_buffer(buffer, buffer_size, frame->data, frame->linesize,
codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, 1);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error copying frame to buffer\n");
return ret;
}
int size = ret;
send_pkt(buffer, size, rtp_time);
rtp_time += 90000 / 25;
av_free(buffer);
av_frame_free(&frame);
}
}
}
av_packet_free(&pkt);
avcodec_free_context(&codec_ctx);
avformat_close_input(&ifmt_ctx);
return 0;
}
任务二:利用c++利用udp接收RTP数据包,并基于ffmpeg库播放视频。
以下是利用c++利用udp接收RTP数据包,并可以基于ffmpeg库播放视频的示例代码:
```cpp
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <unistd.h>
#define MAX_PKT_SIZE 1500
#define H264_PAYLOAD_TYPE 96
struct sockaddr_in servaddr;
int sockfd;
int init_udp_socket(){
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if(sockfd < 0){
printf("Could not create socket\n");
exit(-1);
}
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(5000); // 设置RTP传输的端口号
servaddr.sin_addr.s_addr = htons(INADDR_ANY); // 接收任意地址
if(bind(sockfd, (struct sockaddr*)&servaddr, sizeof(servaddr)) < 0){
printf("Could not bind socket\n");
exit(-1);
}
return 0;
}
int recv_pkt(unsigned char* buf, int max_len){
int length = 0;
struct sockaddr_in recvaddr;
socklen_t recvlen = sizeof(recvaddr);
length = recvfrom(sockfd, buf, max_len, 0, (struct sockaddr*)&recvaddr, &recvlen);
if(length < 0){
printf("Packet receiving error\n");
exit(-1);
}
return length;
}
int main(int argc, char* argv[])
{
AVCodecContext* codec_ctx = NULL;
AVCodec* codec = NULL;
AVPacket* pkt = NULL;
int ret;
int64_t last_pts = AV_NOPTS_VALUE;
int64_t last_time = 0;
/* Initialize AVFormatContext */
av_register_all();
/* Get codec parameters */
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (codec == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot find decoder\n");
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(codec);
if (codec_ctx == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate codec context\n");
return AVERROR(ENOMEM);
}
if ((ret = avcodec_open2(codec_ctx, codec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open codec\n");
return ret;
}
/* Allocate packet */
pkt = av_packet_alloc();
if (pkt == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate packet\n");
return AVERROR(ENOMEM);
}
init_udp_socket();
/* Initialize frame and buffer */
AVFrame* frame = av_frame_alloc();
if (frame == NULL) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate frame\n");
return AVERROR(ENOMEM);
}
uint8_t *buffer = NULL;
int buffer_size;
int bpp;
bpp = av_get_bytes_per_pixel(AV_PIX_FMT_RGB24);
buffer_size = codec_ctx->width * codec_ctx->height * bpp;
ret = av_image_alloc(buffer, buffer_size, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, 1);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate image\n");
return ret;
}
AVFormatContext* ofmt_ctx = NULL;
AVStream* out_stream = NULL;
/* Initialize output context */
if (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, "output.mp4") < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot allocate output format context\n");
return -1;
}
out_stream = avformat_new_stream(ofmt_ctx, NULL);
avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
avio_open(&ofmt_ctx->pb, "output.mp4", AVIO_FLAG_WRITE);
avformat_write_header(ofmt_ctx, NULL);
SwsContext* img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24,
SWS_BICUBIC, NULL, NULL, NULL);
while (1) {
unsigned char buf[MAX_PKT_SIZE];
int pkt_size = recv_pkt(buf, MAX_PKT_SIZE);
if((buf[0] & 0x1F) == H264_PAYLOAD_TYPE){ // 判断是否为H.264视频流
pkt->data = buf + 12; // RTP头为12字节
pkt->size = pkt_size - 12;
/* Send packet to decoder */
if ((ret = avcodec_send_packet(codec_ctx, pkt)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot send packet to decoder\n");
break;
}
while (1) {
ret = avcodec_receive_frame(codec_ctx, frame);
if (ret < 0) {
break;
}
int delayed_frames = (frame->pts - last_pts) * av_q2d(codec_ctx->time_base) * 1000;
int64_t cur_time = av_gettime_relative() / 1000;
if (last_time + delayed_frames > cur_time) {
av_usleep((last_time + delayed_frames - cur_time) * 1000);
cur_time = av_gettime_relative() / 1000;
}
last_pts = frame->pts;
last_time = cur_time;
sws_scale(img_convert_ctx, frame->data, frame->linesize, 0, codec_ctx->height, buffer, frame->width * bpp);
/* Write the frame to the file */
AVPacket out_pkt;
av_init_packet(&out_pkt);
out_pkt.stream_index = out_stream->index;
out_pkt.data = buffer;
out_pkt.size = buffer_size;
av_write_frame(ofmt_ctx, &out_pkt);
}
}
}
avcodec_free_context(&codec_ctx);
av_packet_free(&pkt);
av_frame_free(&frame);
av_free(buffer);
sws_freeContext(img_convert_ctx);
av_write_trailer(ofmt_ctx);
avformat_close_input(&ofmt_ctx);
avformat_free_context(ofmt_ctx);
return 0;
}
需要注意的是,为了使两段代码配套使用,请确保两段代码中RTP端口号相同,并且发送端的IP地址需要设置为接收端的地址。文章来源:https://www.toymoban.com/news/detail-664349.html
另外,上述代码只是示例,并不保证在所有环境中都能正常运行。应根据实际情况,考虑系统架构、网络带宽、传输协议、解码器等因素,综合进行调整和优化。文章来源地址https://www.toymoban.com/news/detail-664349.html
到了这里,关于利用c++基于ffmpeg库接收rtsp视频数据,并利用udp将以RTP协议将数据发送,同时利用udp接收RTP数据包,并基于ffmpeg库播放视频。的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!