Created
May 8, 2020 04:03
-
-
Save prabindh/152daa6bf0ad61c2b7fee77366d5c345 to your computer and use it in GitHub Desktop.
Programmatic read video packet and decode using FFMPEG API
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Modified from https://gist.github.com/cedricve/a7f6817c18eeb19a0716ed0ca0e037c4 | |
// Open the initial context variables that are needed | |
SwsContext *img_convert_ctx; | |
AVFormatContext* format_ctx = avformat_alloc_context(); | |
AVCodecContext* codec_ctx = NULL; | |
int video_stream_index; | |
std::ofstream output_file; | |
int64_t last_time = 0, first_time= 0; | |
std::cout << std::endl << "Starting video thread" << std::endl; | |
//open TCP | |
if (avformat_open_input(&format_ctx, "Listening path or full path to MP4", | |
NULL, NULL) != 0) { | |
return EXIT_FAILURE; | |
} | |
if (avformat_find_stream_info(format_ctx, NULL) < 0) { | |
return EXIT_FAILURE; | |
} | |
//search video stream | |
for (int i = 0; i < format_ctx->nb_streams; i++) { | |
if (format_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) | |
video_stream_index = i; | |
} | |
AVPacket packet; | |
av_init_packet(&packet); | |
//open output file | |
AVFormatContext* output_ctx = avformat_alloc_context(); | |
AVStream* stream = NULL; | |
int cnt = 0; | |
//start reading packets from stream and write them to file | |
av_read_play(format_ctx); //play TCP | |
// Get the codec | |
AVCodec *codec = NULL; | |
codec = avcodec_find_decoder(AV_CODEC_ID_H264); | |
if (!codec) { | |
exit(1); | |
} | |
// Add this to allocate the context by codec | |
codec_ctx = avcodec_alloc_context3(codec); | |
avcodec_get_context_defaults3(codec_ctx, codec); | |
avcodec_copy_context(codec_ctx, format_ctx->streams[video_stream_index]->codec); | |
if (avcodec_open2(codec_ctx, codec, NULL) < 0) | |
{ | |
std::cout << "avcodec_open2 failed for video"; | |
exit(1); | |
} | |
img_convert_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, | |
codec_ctx->pix_fmt, codec_ctx->width, codec_ctx->height, AV_PIX_FMT_RGB24, | |
SWS_BICUBIC, NULL, NULL, NULL); | |
int size = avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width, | |
codec_ctx->height); | |
uint8_t* picture_buffer = (uint8_t*)(av_malloc(size)); | |
AVFrame* picture = av_frame_alloc(); | |
AVFrame* picture_rgb = av_frame_alloc(); | |
int size2 = avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width, | |
codec_ctx->height); | |
uint8_t* picture_buffer_2 = (uint8_t*)(av_malloc(size2)); | |
avpicture_fill((AVPicture *)picture, picture_buffer, AV_PIX_FMT_YUV420P, | |
codec_ctx->width, codec_ctx->height); | |
avpicture_fill((AVPicture *)picture_rgb, picture_buffer_2, AV_PIX_FMT_RGB24, | |
codec_ctx->width, codec_ctx->height); | |
double timeBase = av_q2d(format_ctx->streams[video_stream_index]->time_base); | |
std::cout << "This is the timebase: " << timeBase << std::endl; | |
while (av_read_frame(format_ctx, &packet) >= 0 && !bQuit) //&& cnt < 1000) { //read ~ 1000 frames | |
{ | |
if (packet.stream_index == video_stream_index) | |
{ | |
//packet is video | |
if (stream == NULL) { //create stream in file | |
stream = avformat_new_stream(output_ctx, | |
format_ctx->streams[video_stream_index]->codec->codec); | |
avcodec_copy_context(stream->codec, | |
format_ctx->streams[video_stream_index]->codec); | |
stream->sample_aspect_ratio = | |
format_ctx->streams[video_stream_index]->codec->sample_aspect_ratio; | |
} | |
int check = 0; | |
packet.stream_index = stream->id; | |
int result = avcodec_decode_video2(codec_ctx, picture, &check, &packet); | |
if (result <= 0) | |
{ | |
std::cout << "No video decode result" << std::endl; | |
continue; | |
} | |
last_time = (packet.dts * timeBase); |
Author
prabindh
commented
May 8, 2020
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment