Created
May 12, 2015 08:11
-
-
Save zyxar/17cf3168ab7902cacd66 to your computer and use it in GitHub Desktop.
rtsp_muxer
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <assert.h> | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <string> | |
extern "C" { | |
#include <libavformat/avformat.h> | |
#include <libavutil/avstring.h> | |
} | |
#define FRAME_RATE 15 | |
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */ | |
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) | |
{ | |
int x, y, i, ret; | |
/* when we pass a frame to the encoder, it may keep a reference to it | |
* internally; | |
* make sure we do not overwrite it here | |
*/ | |
ret = av_frame_make_writable(pict); | |
if (ret < 0) | |
exit(1); | |
i = frame_index; | |
/* Y */ | |
for (y = 0; y < height; y++) | |
for (x = 0; x < width; x++) | |
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3; | |
/* Cb and Cr */ | |
for (y = 0; y < height / 2; y++) { | |
for (x = 0; x < width / 2; x++) { | |
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2; | |
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5; | |
} | |
} | |
} | |
class OutputStream { | |
public: | |
OutputStream(char const*); | |
virtual ~OutputStream(); | |
void addVideoStream(enum AVCodecID codecId); | |
int writeVideoFrame(); | |
private: | |
AVStream* m_videoStream; | |
// AVStream* m_audioStream; | |
AVFormatContext* m_context; | |
int64_t m_pts; | |
std::string m_url; | |
AVFrame* m_frame; | |
void alloc_picture(enum AVPixelFormat pix_fmt, int width, int height); | |
}; | |
OutputStream::OutputStream(char const* url) | |
: m_url (url) | |
{ | |
AVFormatContext* oc = avformat_alloc_context(); | |
assert (oc); | |
oc->oformat = av_guess_format("rtsp", url, NULL); | |
assert (oc->oformat); | |
av_strlcpy(oc->filename, url, sizeof(oc->filename)); | |
m_context = oc; | |
} | |
OutputStream::~OutputStream() | |
{ | |
av_write_trailer(m_context); | |
avcodec_close(m_videoStream->codec); | |
av_frame_free(&m_frame); | |
avformat_free_context(m_context); | |
} | |
void OutputStream::addVideoStream(enum AVCodecID codecId) | |
{ | |
m_context->oformat->video_codec = codecId; | |
AVCodecContext *c; | |
//AVCodec *codec = avcodec_find_encoder(codecId); | |
//assert (codec); | |
m_videoStream = avformat_new_stream(m_context, NULL); | |
assert (m_videoStream); | |
c = m_videoStream->codec; | |
c->codec_id = AV_CODEC_ID_H264; | |
c->codec_type = AVMEDIA_TYPE_VIDEO; | |
/* Put sample parameters. */ | |
c->bit_rate = 400000; | |
/* Resolution must be a multiple of two. */ | |
c->width = 640; | |
c->height = 480; | |
m_videoStream->time_base = (AVRational){ 1, FRAME_RATE }; | |
c->time_base = m_videoStream->time_base; | |
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ | |
c->pix_fmt = STREAM_PIX_FMT; | |
if (m_context->oformat->flags & AVFMT_GLOBALHEADER) | |
c->flags |= CODEC_FLAG_GLOBAL_HEADER; | |
/* open the codec */ | |
//assert (avcodec_open2(c, NULL, NULL) >= 0); | |
/* Allocate the encoded raw picture. */ | |
alloc_picture(c->pix_fmt, c->width, c->height); | |
av_dump_format(m_context, 0, m_url.c_str(), 1); | |
avformat_write_header(m_context, NULL); | |
} | |
void OutputStream::alloc_picture(enum AVPixelFormat pix_fmt, int width, int height) | |
{ | |
int ret; | |
m_frame = av_frame_alloc(); | |
assert (m_frame); | |
m_frame->format = pix_fmt; | |
m_frame->width = width; | |
m_frame->height = height; | |
/* allocate the buffers for the frame data */ | |
ret = av_frame_get_buffer(m_frame, 32); | |
assert (ret >= 0); | |
} | |
int OutputStream::writeVideoFrame() | |
{ | |
int ret; | |
AVCodecContext *c; | |
int got_packet = 0; | |
c = m_videoStream->codec; | |
{ | |
fill_yuv_image(m_frame, m_pts, c->width, c->height); | |
m_frame->pts = m_pts++; | |
} | |
AVPacket pkt = { 0 }; | |
av_init_packet(&pkt); | |
pkt.data = reinterpret_cast<uint8_t*>(m_frame); | |
pkt.size = sizeof(AVFrame); | |
pkt.pts = m_pts; | |
// if (got_packet) { | |
av_packet_rescale_ts(&pkt, c->time_base, m_videoStream->time_base); | |
pkt.stream_index = m_videoStream->index; | |
/* Write the compressed frame to the media file. */ | |
ret = av_interleaved_write_frame(m_context, &pkt); | |
//} | |
assert (ret == 0); | |
return 0; // (m_frame || got_packet) ? 0 : 1; | |
} | |
int main(int argc, char const *argv[]) | |
{ | |
av_register_all(); | |
avformat_network_init(); | |
// av_log_set_level(AV_LOG_DEBUG); | |
char const* url = "rtsp://localhost:1935/live/aka.sdp"; | |
OutputStream* videoStream = new OutputStream(url); | |
videoStream->addVideoStream(AV_CODEC_ID_H264); | |
while (!videoStream->writeVideoFrame()) {} | |
delete videoStream; | |
return 0; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment