ffmpeg综合应用示例(五)——多路视频合并(Linux版本)

这篇具有很好参考价值的文章主要介绍了ffmpeg综合应用示例(五)——多路视频合并(Linux版本)。希望对大家有所帮助。如果存在错误或未考虑完全的地方,请大家不吝赐教,您也可以点击"举报违法"按钮提交疑问。

ffmpeg综合应用示例(五)——多路视频合并(Linux版本),ffmpeg,ffmpeg,音视频,linux

来源:微信公众号「编程学习基地」

本文的目的为方便Linux下编译运行多路视频合成Demo

原文:ffmpeg综合应用示例(五)——多路视频合并

Ubuntu 20.04 + ffmpeg version ffmpeg-4.4-x86_64

编译

export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/workspace/dengzr/linux-x64/lib:/home/workspace/dengzr/ffmpeg-4.4-x86_64/lib/  D_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/workspace/dengzr/linux-x64/lib:/home/works
g++ -o video_combine video_combine.cpp -I /home/workspace/dengzr/ffmpeg-4.4-x86_64/include/ -L /home/workspace/dengzr/ffmpeg-4.4-x86_64/lib/ -Wl,-Bstatic -Wl,-Bdynamic -lpthread -Wl,-Bstatic -Wl,-Bdynamic -lstdc++ -Wl,-Bstatic -Wl,-Bdynamic -ldl -Wl,-Bstatic -Wl,-Bdynamic -lavcodec -lavformat -lswscale -lavutil -lavfilter -lfdk-aac -lvpx -lpostproc -lavfilter -lm -lva -lOpenCL -lmfx -lstdc++ -ldl -lavformat -lm -lz -lavcodec -lvpx -lm -lvpx -lm -lvpx -lm -lvpx -lm -lz -lfdk-aac -lvorbis -lvorbisenc -lx264 -lpthread -lm -ldl -lx265 -lva -lmfx -lstdc++ -ldl -lpostproc -lm -lswscale -lm -lswresample -lm -lavutil -lva-drm -lva -lva-x11 -lva -lm -ldrm -lmfx -lstdc++ -ldl -lOpenCL -lva -lX11 -lXext -lva -ldrm -lm -lpthread -lz -Wl,-Bstatic -Wl,-Bdynamic -lavcodec -lavformat -lswscale -lavutil -lavfilter -lfdk-aac -lvpx -lpostproc -lOpenCL -lmfx -lstdc++ -ldl -lvorbis -lvorbisenc -lx264 -lx265 -lswresample -lva-drm -lva-x11 -lX11 -lXext -Wl,-Bstatic -Wl,-Bdynamic -L /home/workspace/dengzr/ffmpeg-4.4-x86_64/lib/  -L /home/workspace/dengzr/linux-x64/lib -I /home/workspace/dengzr/linux-x64/include/ -lSDL2

Linux下修改后的代码文章来源地址https://www.toymoban.com/news/detail-626527.html

/**
*
* 张晖 Hui Zhang
* zhanghuicuc@gmail.com
* 中国传媒大学/数字电视技术
* Communication University of China / Digital TV Technology
*
* 本程序实现了对多路视频进行合并实现分屏效果,并且可以添加不同的视频滤镜。
* 目前还不支持音频合并
*/

#include <stdio.h>
extern "C"
{
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/mathematics.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
// #include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
};
// #include <crtdbg.h>

/*
FIX: H.264 in some container format (FLV, MP4, MKV etc.) need
"h264_mp4toannexb" bitstream filter (BSF)
*Add SPS,PPS in front of IDR frame
*Add start code ("0,0,0,1") in front of NALU
H.264 in some container (MPEG2TS) don't need this BSF.
*/
//'1': Use H.264 Bitstream Filter   
#define USE_H264BSF 0  

/*
FIX:AAC in some container format (FLV, MP4, MKV etc.) need
"aac_adtstoasc" bitstream filter (BSF)
*/
//'1': Use AAC Bitstream Filter   
#define USE_AACBSF 0  

typedef enum{
	VFX_NULL = 0,
	VFX_EDGE = 1,
	VFX_NEGATE = 2
}VFX;

typedef enum{
	AFX_NULL = 0,
}AFX;

AVBitStreamFilterContext *aacbsfc = NULL;
AVBitStreamFilterContext* h264bsfc = NULL;
//multiple input
static AVFormatContext **ifmt_ctx;
AVFrame **frame = NULL;
//single output
static AVFormatContext *ofmt_ctx;

typedef struct FilteringContext {
	AVFilterContext *buffersink_ctx;
	AVFilterContext **buffersrc_ctx;
	AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;

typedef struct InputFile{
	const char* filenames;
	/*
	* position index
	* 0 - 1 - 2
	* 3 - 4 - 5
	* 6 - 7 - 8
	* ……
	*/
	uint32_t video_idx;
	//scale level, 0 means keep the same
	//uint32_t video_expand;
	uint32_t video_effect;
	uint32_t audio_effect;
} InputFile;
InputFile* inputfiles;

typedef struct GlobalContext{
	//always be a square,such as 2x2, 3x3
	uint32_t grid_num;
	uint32_t video_num;
	uint32_t enc_width;
	uint32_t enc_height;
	uint32_t enc_bit_rate;
	InputFile* input_file;
	const char* outfilename;
} GlobalContext;
GlobalContext* global_ctx;

static int global_ctx_config()
{
	int i;
	if (global_ctx->grid_num < global_ctx->video_num)
	{
		av_log(NULL, AV_LOG_ERROR, "Setting a wrong grid_num %d \t The grid_num is smaller than video_num!! \n", global_ctx->grid_num);
		global_ctx->grid_num = global_ctx->video_num;
		//global_ctx->stride = sqrt((double)global_ctx->grid_num);
		av_log(NULL, AV_LOG_ERROR, "Automatically change the grid_num to be same as video_num!! \n");
	}

	//global_ctx->stride = sqrt((double)global_ctx->grid_num);
	
	for (i = 0; i < global_ctx->video_num; i++)
	{
		if (global_ctx->input_file[i].video_idx >= global_ctx->grid_num)
		{
			av_log(NULL, AV_LOG_ERROR, "Invalid video_inx value in the No.%d input\n", global_ctx->input_file[i].video_idx);
			return -1;
		}
	}
	return 0;
}


static int open_input_file(InputFile *input_file)
{
	int ret;
	unsigned int i;
	unsigned int j;

	ifmt_ctx = (AVFormatContext**)av_malloc((global_ctx->video_num)*sizeof(AVFormatContext*));
	for (i = 0; i < global_ctx->video_num; i++)
	{
		*(ifmt_ctx + i) = NULL;
		if ((ret = avformat_open_input((ifmt_ctx + i), input_file[i].filenames, NULL, NULL)) < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
			return ret;
		}
		if ((ret = avformat_find_stream_info(ifmt_ctx[i], NULL)) < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
			return ret;
		}
		for (j = 0; j < ifmt_ctx[i]->nb_streams; j++) {
			AVStream *stream;
			AVCodecContext *codec_ctx;
			stream = ifmt_ctx[i]->streams[j];
			codec_ctx = stream->codec;

			if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
				|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
				/* Open decoder */
				ret = avcodec_open2(codec_ctx,
					avcodec_find_decoder(codec_ctx->codec_id), NULL);
				if (ret < 0) {
					av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
					return ret;
				}
			}
		}
		av_dump_format(ifmt_ctx[i], 0, input_file[i].filenames, 0);
	}
	return 0;
}


static int open_output_file(const char *filename)
{
	AVStream *out_stream;
	AVStream *in_stream;
	AVCodecContext *dec_ctx, *enc_ctx;
	AVCodec *encoder;
	int ret;
	unsigned int i;
	ofmt_ctx = NULL;
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", filename);
	if (!ofmt_ctx) {
		av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
		return AVERROR_UNKNOWN;
	}
	for (i = 0; i < ifmt_ctx[0]->nb_streams; i++) {
		out_stream = avformat_new_stream(ofmt_ctx, NULL);
		if (!out_stream) {
			av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
			return AVERROR_UNKNOWN;
		}
		in_stream = ifmt_ctx[0]->streams[i];
		out_stream->time_base = in_stream->time_base;

		dec_ctx = in_stream->codec;
		enc_ctx = out_stream->codec;
		if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
			|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {			
			/* In this example, we transcode to same properties (picture size,
			* sample rate etc.). These properties can be changed for output
			* streams easily using filters */
			if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
				/* in this example, we choose transcoding to same codec */
				encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
				enc_ctx->height = global_ctx->enc_height;
				enc_ctx->width = global_ctx->enc_width;
				enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
				/* take first format from list of supported formats */
				enc_ctx->pix_fmt = encoder->pix_fmts[0];
				enc_ctx->me_range = 16;
				enc_ctx->max_qdiff = 4;
				enc_ctx->bit_rate = global_ctx->enc_bit_rate;
				enc_ctx->qcompress = 0.6;
				/* video time_base can be set to whatever is handy and supported by encoder */
				enc_ctx->time_base.num = 1;
				enc_ctx->time_base.den = 25;
				enc_ctx->gop_size = 250;
				enc_ctx->max_b_frames = 3;

				AVDictionary * d = NULL;
				char *k = av_strdup("preset");       // if your strings are already allocated,
				char *v = av_strdup("ultrafast");    // you can avoid copying them like this
				av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
				ret = avcodec_open2(enc_ctx, encoder, &d);
			}
			else {
				encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
				enc_ctx->sample_rate = dec_ctx->sample_rate;
				enc_ctx->channel_layout = dec_ctx->channel_layout;
				enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
				/* take first format from list of supported formats */
				enc_ctx->sample_fmt = encoder->sample_fmts[0];
				AVRational time_base = { 1, enc_ctx->sample_rate };
				enc_ctx->time_base = time_base;
				ret = avcodec_open2(enc_ctx, encoder, NULL);
			}
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
				return ret;
			}       
		}
		else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
			av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
			return AVERROR_INVALIDDATA;
		}
		else {
			/* if this stream must be remuxed */
			ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
				ifmt_ctx[0]->streams[i]->codec);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
				return ret;
			}
		}
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
	}
	av_dump_format(ofmt_ctx, 0, filename, 1);
	if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
			return ret;
		}
	}
	/* init muxer, write output file header */
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
		return ret;
	}

#if USE_H264BSF  
	h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif  
#if USE_AACBSF  
	aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif 

	return 0;
}


static int init_filter(FilteringContext* fctx, AVCodecContext **dec_ctx,
	AVCodecContext *enc_ctx, const char *filter_spec)
{
	char args[512];
	char pad_name[10];
	int ret = 0;
	int i;
	AVFilter **buffersrc = (AVFilter**)av_malloc(global_ctx->video_num*sizeof(AVFilter*));
	AVFilter *buffersink = NULL;
	AVFilterContext **buffersrc_ctx = (AVFilterContext**)av_malloc(global_ctx->video_num*sizeof(AVFilterContext*));
	AVFilterContext *buffersink_ctx = NULL;
	AVFilterInOut **outputs = (AVFilterInOut**)av_malloc(global_ctx->video_num*sizeof(AVFilterInOut*));
	AVFilterInOut *inputs = avfilter_inout_alloc();
	AVFilterGraph *filter_graph = avfilter_graph_alloc();

	for (i = 0; i < global_ctx->video_num; i++)
	{
		buffersrc[i] = NULL;
		buffersrc_ctx[i] = NULL;
		outputs[i] = avfilter_inout_alloc();
	}
	if (!outputs || !inputs || !filter_graph) {
		ret = AVERROR(ENOMEM);
		goto end;
	}
	if (dec_ctx[0]->codec_type == AVMEDIA_TYPE_VIDEO) {
		for (i = 0; i < global_ctx->video_num; i++)
		{
			buffersrc[i] = (AVFilter *)avfilter_get_by_name("buffer");
		}
		buffersink = (AVFilter *)avfilter_get_by_name("buffersink");
		if (!buffersrc || !buffersink) {
			av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}

		for (i = 0; i < global_ctx->video_num; i++)
		{
			snprintf(args, sizeof(args),
				"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
				dec_ctx[i]->width, dec_ctx[i]->height, dec_ctx[i]->pix_fmt,
				dec_ctx[i]->time_base.num, dec_ctx[i]->time_base.den,
				dec_ctx[i]->sample_aspect_ratio.num,
				dec_ctx[i]->sample_aspect_ratio.den);
			snprintf(pad_name, sizeof(pad_name), "in%d", i);
			ret = avfilter_graph_create_filter(&(buffersrc_ctx[i]), buffersrc[i], pad_name,
				args, NULL, filter_graph);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
				goto end;
			}
		}

		ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
			NULL, NULL, filter_graph);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
			goto end;
		}
		ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
			(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
			AV_OPT_SEARCH_CHILDREN);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
			goto end;
		}
	}
	else if (dec_ctx[0]->codec_type == AVMEDIA_TYPE_AUDIO) {
		for (i = 0; i < global_ctx->video_num; i++)
		{
			buffersrc[i] = (AVFilter *)avfilter_get_by_name("abuffer");
		}
		buffersink = (AVFilter *)avfilter_get_by_name("abuffersink");
		if (!buffersrc || !buffersink) {
			av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}

		for (i = 0; i < global_ctx->video_num; i++)
		{
			if (!dec_ctx[i]->channel_layout)
				dec_ctx[i]->channel_layout =
				av_get_default_channel_layout(dec_ctx[i]->channels);
			snprintf(args, sizeof(args),
				"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%I64x",
				dec_ctx[i]->time_base.num, dec_ctx[i]->time_base.den, dec_ctx[i]->sample_rate,
				av_get_sample_fmt_name(dec_ctx[i]->sample_fmt),
				dec_ctx[i]->channel_layout);
			snprintf(pad_name, sizeof(pad_name), "in%d", i);
			ret = avfilter_graph_create_filter(&(buffersrc_ctx[i]), buffersrc[i], pad_name,
				args, NULL, filter_graph);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
				goto end;
			}
		}
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
			goto end;
		}
		ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
			NULL, NULL, filter_graph);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
			goto end;
		}
		ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
			(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
			AV_OPT_SEARCH_CHILDREN);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
			goto end;
		}
		ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
			(uint8_t*)&enc_ctx->channel_layout,
			sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
			goto end;
		}
		ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
			(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
			AV_OPT_SEARCH_CHILDREN);
		if (ret < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
			goto end;
		}
	}
	/* Endpoints for the filter graph. */
	for (i = 0; i < global_ctx->video_num; i++)
	{
		snprintf(pad_name, sizeof(pad_name), "in%d", i);
		outputs[i]->name = av_strdup(pad_name);
		outputs[i]->filter_ctx = buffersrc_ctx[i];
		outputs[i]->pad_idx = 0;
		if (i == global_ctx->video_num - 1)
			outputs[i]->next = NULL;
		else
			outputs[i]->next = outputs[i + 1];
	}

	inputs->name = av_strdup("out");
	inputs->filter_ctx = buffersink_ctx;
	inputs->pad_idx = 0;
	inputs->next = NULL;
	if (!outputs[0]->name || !inputs->name) {
		ret = AVERROR(ENOMEM);
		goto end;
	}
	if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
		&inputs, outputs, NULL)) < 0)
		goto end;
	if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
		goto end;
	/* Fill FilteringContext */
	fctx->buffersrc_ctx = buffersrc_ctx;
	fctx->buffersink_ctx = buffersink_ctx;
	fctx->filter_graph = filter_graph;
end:
	avfilter_inout_free(&inputs);
	av_free(buffersrc);
	//	av_free(buffersrc_ctx);
	avfilter_inout_free(outputs);
	av_free(outputs);

	return ret;
}


static int init_spec_filter(void)
{
	char filter_spec[512];
	char spec_temp[128];
	unsigned int i;
	unsigned int j;
	unsigned int k;
	unsigned int x_coor;
	unsigned int y_coor;
	AVCodecContext** dec_ctx_array;
	int	stream_num = ifmt_ctx[0]->nb_streams;
	int stride = (int)sqrt((long double)global_ctx->grid_num);
	int ret;
	filter_ctx = (FilteringContext *)av_malloc_array(stream_num, sizeof(*filter_ctx));
	dec_ctx_array = (AVCodecContext**)av_malloc(global_ctx->video_num*sizeof(AVCodecContext));

	if (!filter_ctx || !dec_ctx_array)
		return AVERROR(ENOMEM);
	for (i = 0; i < stream_num; i++) {
		filter_ctx[i].buffersrc_ctx = NULL;
		filter_ctx[i].buffersink_ctx = NULL;
		filter_ctx[i].filter_graph = NULL;
		for (j = 0; j < global_ctx->video_num; j++)
			dec_ctx_array[j] = ifmt_ctx[j]->streams[i]->codec;
		if (!(ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
			|| ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
			continue;
		if (ifmt_ctx[0]->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
		{
			if (global_ctx->grid_num == 1)
				snprintf(filter_spec, sizeof(filter_spec), "null");
			else
			{

				snprintf(filter_spec, sizeof(filter_spec), "color=c=black@1:s=%dx%d[x0];", global_ctx->enc_width, global_ctx->enc_height);
				k = 1;
				for (j = 0; j < global_ctx->video_num; j++)
				{
					switch (global_ctx->input_file[j].video_effect)
					{
					case VFX_NULL:
						snprintf(spec_temp, sizeof(spec_temp), "[in%d]null[ine%d];", j, j);
						strcat(filter_spec, spec_temp);
						break;
					case VFX_EDGE:
						snprintf(spec_temp, sizeof(spec_temp), "[in%d]edgedetect[ine%d];", j, j);
						strcat(filter_spec, spec_temp);
						break;
					case VFX_NEGATE:
						snprintf(spec_temp, sizeof(spec_temp), "[in%d]negate[ine%d];", j, j);
						strcat(filter_spec, spec_temp);
						break;
					}
						x_coor = global_ctx->input_file[j].video_idx % stride;
						y_coor = global_ctx->input_file[j].video_idx / stride;
						snprintf(spec_temp, sizeof(spec_temp), "[ine%d]scale=w=%d:h=%d[inn%d];[x%d][inn%d]overlay=%d*%d/%d:%d*%d/%d[x%d];", j, global_ctx->enc_width / stride, global_ctx->enc_height / stride, j, k - 1, j, global_ctx->enc_width, x_coor, stride, global_ctx->enc_height, y_coor, stride, k);
						k++;
						strcat(filter_spec, spec_temp);
				}
				snprintf(spec_temp, sizeof(spec_temp), "[x%d]null[out]", k - 1, global_ctx->enc_width, global_ctx->enc_height);
				strcat(filter_spec, spec_temp);
			}
		}
		else
		{
			if (global_ctx->video_num == 1)
				snprintf(filter_spec, sizeof(filter_spec), "anull");
			else{
				snprintf(filter_spec, sizeof(filter_spec), "");
				for (j = 0; j < global_ctx->video_num; j++)
				{
					snprintf(spec_temp, sizeof(spec_temp), "[in%d]", j);
					strcat(filter_spec, spec_temp);
				}
				snprintf(spec_temp, sizeof(spec_temp), "amix=inputs=%d[out]", global_ctx->video_num);
				strcat(filter_spec, spec_temp);
			}
		}

		ret = init_filter(&filter_ctx[i], dec_ctx_array,
			ofmt_ctx->streams[i]->codec, filter_spec);
		if (ret)
			return ret;
	}
	av_free(dec_ctx_array);
	return 0;
}

int videocombine(GlobalContext* video_ctx)
{
	int ret;
	int tmp = 0;
	int got_frame_num = 0;
	unsigned int stream_index;

	AVPacket packet;
	AVPacket enc_pkt;
	AVFrame* picref;
	enum AVMediaType mediatype;
	int read_frame_done = 0;
	int flush_now = 0;
	int framecnt = 0;
	int i, j;
	int got_frame;
	int enc_got_frame = 0;
	int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
	int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *);

	global_ctx = video_ctx;
	global_ctx_config();

	frame = (AVFrame**)av_malloc(global_ctx->video_num*sizeof(AVFrame*));
	picref = av_frame_alloc();
	av_register_all();
	avfilter_register_all();

	if ((ret = open_input_file(global_ctx->input_file)) < 0)
		goto end;
	if ((ret = open_output_file(global_ctx->outfilename)) < 0)
		goto end;
	if ((ret = init_spec_filter()) < 0)
		goto end;

	while (1) {
		for (i = 0; i < global_ctx->video_num; i++)
		{
			if (read_frame_done < 0)
			{
				flush_now = 1;
				goto flush;
			}			
			while ((read_frame_done=av_read_frame(ifmt_ctx[i], &packet)) >= 0)
			{
				stream_index = packet.stream_index;
				mediatype = ifmt_ctx[i]->streams[stream_index]->codec->codec_type;
				if (mediatype == AVMEDIA_TYPE_VIDEO || mediatype == AVMEDIA_TYPE_AUDIO)
				{
					frame[i] = av_frame_alloc();
					if (!(frame[i]))
					{
						ret = AVERROR(ENOMEM);
						goto end;
					}
					av_packet_rescale_ts(&packet,
						ifmt_ctx[i]->streams[stream_index]->time_base,
						ifmt_ctx[i]->streams[stream_index]->codec->time_base);
					dec_func = (mediatype == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : avcodec_decode_audio4;
					ret = dec_func(ifmt_ctx[i]->streams[stream_index]->codec, frame[i], &got_frame, &packet);
					if (ret < 0)
					{
						av_frame_free(&frame[i]);
						av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
						goto end;
					}
					if (got_frame) {
						frame[i]->pts = av_frame_get_best_effort_timestamp(frame[i]);
						ret = av_buffersrc_add_frame(filter_ctx[stream_index].buffersrc_ctx[i], frame[i]);
						if (ret < 0) {
							av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
							goto end;
						}
					}
					else
					{
						av_frame_free(&(frame[i]));
					}
				}
				av_free_packet(&packet);
				if (got_frame)
				{
					got_frame = 0;
					break;
				}
			}
		}

		while (1) {
			ret = av_buffersink_get_frame_flags(filter_ctx[stream_index].buffersink_ctx, picref, 0);
			if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
			{
				ret = 0;
				break;
			}
			if (ret < 0)
				goto end;

			if (picref) {								
				enc_pkt.data = NULL;
				enc_pkt.size = 0;
				av_init_packet(&enc_pkt);
				enc_func = (mediatype == AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
				ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
					picref, &enc_got_frame);	
				if (ret < 0)
				{
					av_log(NULL, AV_LOG_ERROR, "Encoding failed\n");
					goto end;
				}
				if (enc_got_frame == 1){
					framecnt++;
					enc_pkt.stream_index = stream_index;

					//Write PTS
					AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
					AVRational r_framerate1 = ifmt_ctx[0]->streams[stream_index]->r_frame_rate;// { 50, 2 };
					AVRational time_base_q = { 1, AV_TIME_BASE };
					//Duration between 2 frames (us)
					int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
					//Parameters
					//enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
					enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
					enc_pkt.dts = enc_pkt.pts;
					enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
					enc_pkt.pos = -1;

#if USE_H264BSF  
					av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif  
#if USE_AACBSF  
					av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif  

					ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
					av_log(NULL, AV_LOG_INFO, "write frame %d\n", framecnt);
					av_free_packet(&enc_pkt);
				}
				av_frame_unref(picref);
			}	
		}
	}

flush:
	/* flush filters and encoders */
	for (i = 0; i < ifmt_ctx[0]->nb_streams; i++) {
		stream_index = i;

		/* flush encoder */
		if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
			AV_CODEC_CAP_DELAY))
			return 0;
		while (1) {
			enc_pkt.data = NULL;
			enc_pkt.size = 0;
			av_init_packet(&enc_pkt);
			enc_func = (ifmt_ctx[0]->streams[stream_index]->codec->codec_type == AVMEDIA_TYPE_VIDEO) ?
			avcodec_encode_video2 : avcodec_encode_audio2;
			ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
				NULL, &enc_got_frame);
			av_frame_free(NULL);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Encoding failed\n");
				goto end;
			}
			if (!enc_got_frame){
				ret = 0;
				break;
			}
			printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);

			//Write PTS
			AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
			AVRational r_framerate1 = ifmt_ctx[0]->streams[stream_index]->r_frame_rate;// { 50, 2 };
			AVRational time_base_q = { 1, AV_TIME_BASE };
			//Duration between 2 frames (us)
			int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
			//Parameters
			enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
			enc_pkt.dts = enc_pkt.pts;
			enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base);

			/* copy packet*/
			//转换PTS/DTS(Convert PTS/DTS)
			enc_pkt.pos = -1;
			framecnt++;
			ofmt_ctx->duration = enc_pkt.duration * framecnt;

			/* mux encoded frame */
			ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
		}
	}

	av_write_trailer(ofmt_ctx);

#if USE_H264BSF  
	av_bitstream_filter_close(h264bsfc);
#endif  
#if USE_AACBSF  
	av_bitstream_filter_close(aacbsfc);
#endif  

end:
	av_free_packet(&packet);
	for (i = 0; i < global_ctx->video_num; i++)
	{
		av_frame_free(&(frame[i]));
		for (j = 0; j < ofmt_ctx->nb_streams; j++) {
			avcodec_close(ifmt_ctx[i]->streams[j]->codec);
		}
	}
	av_free(frame);
	av_free(picref);
	for (i = 0; i<ofmt_ctx->nb_streams; i++)
	{
		if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
			avcodec_close(ofmt_ctx->streams[i]->codec);
		av_free(filter_ctx[i].buffersrc_ctx);
		if (filter_ctx && filter_ctx[i].filter_graph)
			avfilter_graph_free(&filter_ctx[i].filter_graph);
	}

	av_free(filter_ctx);
	for (i = 0; i < global_ctx->video_num; i++)
		avformat_close_input(&(ifmt_ctx[i]));
	if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	av_free(ifmt_ctx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred\n");
	return (ret ? 1 : 0);
}


int main(int argc, char **argv)
{
	//test 2x2
	inputfiles = (InputFile*)av_malloc_array(4, sizeof(InputFile));
	if (!inputfiles)
		return AVERROR(ENOMEM);

	
	inputfiles[0].filenames = "in1.flv";
	//inputfiles[0].video_expand = 0;
	inputfiles[0].video_idx = 0;
	inputfiles[0].video_effect = VFX_EDGE;
	inputfiles[0].audio_effect = AFX_NULL;
	inputfiles[1].filenames = "in2.flv";
	//inputfiles[1].video_expand = 0;
	inputfiles[1].video_idx = 1;
	inputfiles[1].video_effect = VFX_NULL;
	inputfiles[1].audio_effect = AFX_NULL;
	inputfiles[2].filenames = "in3.flv";
	//inputfiles[2].video_expand = 0;
	inputfiles[2].video_idx = 2;
	inputfiles[2].video_effect = VFX_NULL;
	inputfiles[2].audio_effect = AFX_NULL;
	inputfiles[3].filenames = "in4.flv";
	//inputfiles[3].video_expand = 0;
	inputfiles[3].video_idx = 3;
	inputfiles[3].video_effect = VFX_NEGATE;
	inputfiles[3].audio_effect = AFX_NULL;

	global_ctx = (GlobalContext*)av_malloc(sizeof(GlobalContext));
	global_ctx->video_num = 4;
	global_ctx->grid_num = 4;
	global_ctx->enc_bit_rate = 500000;
	global_ctx->enc_height = 360;
	global_ctx->enc_width = 640;
	global_ctx->outfilename = "combined.flv";
	global_ctx->input_file = inputfiles;

	return videocombine(global_ctx);
}

到了这里,关于ffmpeg综合应用示例(五)——多路视频合并(Linux版本)的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处: 如若内容造成侵权/违法违规/事实不符,请点击违法举报进行投诉反馈,一经查实,立即删除!

领支付宝红包 赞助服务器费用

相关文章

  • ffmpeg@音视频工具@音视频合并

    FFmpeg中文网 (github.net.cn) FFmpeg 是一款强大的开源跨平台音视频处理工具集,它包含了一系列命令行工具以及用于音频和视频编码解码、格式转换、抓取、流化等功能的库。FFmpeg 支持多种视频、音频格式和编解码器,能够进行音视频的压缩、封装、转码、分割、合并、过滤、抓

    2024年03月17日
    浏览(79)
  • 利用FFmpeg合并音频和视频

    一、FFmpeg 多个音频合并的2种方法 多个mp3文件合并成一个mp3文件 一种方法是连接到一起 ffmpeg64.exe -i \\\"concat:123.mp3|124.mp3\\\" -acodec copy output.mp3 解释:-i代表输入参数     contact:123.mp3|124.mp3代表着需要连接到一起的音频文件                  -acodec copy  output.mp3 重新编码并复制到

    2024年04月10日
    浏览(50)
  • 监控视频片段合并完整视频|FFmpeg将多个视频片段拼接完整视频|PHP自动批量拼接合并视频

    关于环境配置ffmpeg安装使用的看之前文章 哔哩哔哩缓存转码|FFmpeg将m4s文件转为mp4|PHP自动批量转码B站视频

    2024年02月04日
    浏览(61)
  • ffmpeg——同时剪辑多个视频并合并

    基本语法 所用的ffmpeg的语法: 1.剪辑单个视频 ffmpeg -i [2021-11-24-1-2.mp4] -vcodec copy -acodec copy -ss [00:00:00] -to [00:00:05] [output/p3.mp4][ ]中三个参数依次为:剪辑视频源文件;第一个时间为剪辑的起始时间;第二个时间为视频持续的时间长度; 剪辑好的文件名 2.合并视频片段 ffmpeg -

    2024年02月16日
    浏览(44)
  • FFMPEG常用命令 音视频合并

            目录 一、音频合并 1.获取音频时长 2.合并两段音频 3.合并音频插入空白 二、视频加背景图 三、音视频合成 1.保留视频声音 2.不保留视频声音 四、合并视频         本文将用几个实例,介绍ffmpeg命令的综合使用,主要涉及音频处理、视频处理和音视频合成。 参数

    2024年02月10日
    浏览(72)
  • 使用ffmpeg合并视频遇到的坑

    下面以Linux环境介绍为主 1.ffmpeg可执行命令不同的环境是不同的,Linux在执行命令前还需要授权。 2.合并视频命令: 坑一:其中第一个花括号替换的是可执行命令所在的绝对路径,这里必须要使用ffmpeg的绝对路径,因为把应用打成jar包是无法获取到绝对路径的,具体可参考:

    2024年02月16日
    浏览(53)
  • ffmpeg合并多张图片为视频,加转场

    转场特效,可以在:https://trac.ffmpeg.org/wiki/Xfade#MP4output 查看 -stream_loop 输入流循环的次数,0 表示无循环,-1 表示无限循环,即音乐循环播放。 -acodec aac 设置音频编解码为 acc 模式 -map “[v]” 将合成的视频输入流 v 指定为输出文件的源 -map “5:a” 将第6个文件作为视频音频文件

    2024年02月16日
    浏览(64)
  • 如何使用 ffmpeg 对视频进行切分、合并

    1. 按照视频时间点进行截取, 起止点:00:00:06 ~ 00:20:36 ffmpeg -ss 00:00:06 -to 00:20:36 -accurate_seek -i \\\"input.mp4\\\" -avoid_negative_ts 1 -c copy \\\"ouput.mp4\\\" -y 2.  分离视频和音频 ffmpeg -i \\\"input.mp4\\\" -vcodec copy -an \\\"out-vod.mp4\\\" ffmpeg -i \\\"input.mp4\\\" -acodec copy -vn \\\"out-sound.m4a\\\"  3. 去除水印(指定区域模糊化,以

    2024年02月09日
    浏览(39)
  • python使用ffmpeg合并多张图片成视频

    注意: 需要在本地有ffmpeg,并且配置环境变量 下载链接如下:https://download.csdn.net/download/qq_30273575/87898080 # FFmpeg 将多张图片合成视频 # 可以使用 FFmpeg 库来将多张图片合成视频,下面是一个简单的示例: # 在代码中,需要提供存储图片的文件夹路径 images_path 和最终生成的视频

    2024年02月11日
    浏览(50)
  • ffmpeg教程:视频切片与合并,加密与解密

    目录 1、ffmpeg安装教程         1.1 下载地址         1.2 ffmpeg环境配置 2、视频分割成ts文件         2.1 准备mp4文件         2.2 打开当前目录cmd命令窗口 3、根据m3u8文件合并ts文件         3.1 准备文件         3.2 输入命令 4、视频切片与加密         4.1 下载安装openssl  

    2024年02月08日
    浏览(50)

觉得文章有用就打赏一下文章作者

支付宝扫一扫打赏

博客赞助

微信扫一扫打赏

请作者喝杯咖啡吧~博客赞助

支付宝扫一扫领取红包,优惠每天领

二维码1

领取红包

二维码2

领红包