Created
May 1, 2014 13:33
-
-
Save mastertheknife/5a5683a43fa66719282a to your computer and use it in GitHub Desktop.
Experimental zms code
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* ZoneMinder MPEG class implementation, $Date$, $Revision$ | |
* Copyright (C) 2001-2008 Philip Coombes | |
* | |
* This program is free software; you can redistribute it and/or | |
* modify it under the terms of the GNU General Public License | |
* as published by the Free Software Foundation; either version 2 | |
* of the License, or (at your option) any later version. | |
* | |
* This program is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
* GNU General Public License for more details. | |
* | |
* You should have received a copy of the GNU General Public License | |
* along with this program; if not, write to the Free Software | |
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
*/ | |
#include <stdlib.h> | |
#include <string.h> | |
#include "zm.h" | |
#include "zm_rgb.h" | |
#include "zm_mpeg.h" | |
int use_x264 = 1; | |
int muxing_type = 0; | |
#if HAVE_LIBAVCODEC | |
bool VideoStream::initialised = false; | |
VideoStream::MimeData VideoStream::mime_data[] = { | |
{ "asf", "video/x-ms-asf" }, | |
{ "swf", "application/x-shockwave-flash"}, | |
{ "flv", "video/x-flv" }, | |
{ "mp4", "video/mp4" }, | |
{ "move", "video/quicktime" }, | |
{ "h264", "vieo/h264" }, | |
{ "3gp", "video/3gpp" }, | |
{ "ogg", "video/ogg" }, | |
{ "web", "video/webm" } | |
}; | |
void VideoStream::Initialise() | |
{ | |
av_register_all(); | |
initialised = true; | |
} | |
void VideoStream::SetupFormat( const char *p_filename, const char *p_format ) | |
{ | |
filename = p_filename; | |
format = p_format; | |
/* auto detect the output format from the name. default is mpeg. */ | |
of = av_guess_format( format, NULL, NULL); | |
if ( !of ) | |
{ | |
Warning( "Could not deduce output format from file extension: using mpeg" ); | |
of = av_guess_format("mpeg", NULL, NULL); | |
} | |
if ( !of ) | |
{ | |
Fatal( "Could not find suitable output format" ); | |
} | |
/* allocate the output format context */ | |
ofc = avformat_alloc_context(); | |
if ( !ofc ) | |
{ | |
Panic( "Memory error" ); | |
} | |
ofc->oformat = of; | |
snprintf( ofc->filename, sizeof(ofc->filename), "%s", filename ); | |
} | |
void VideoStream::SetupCodec( int colours, int subpixelorder, int width, int height, int bitrate, double frame_rate ) | |
{ | |
this->frame_rate = frame_rate; | |
/* ffmpeg format matching */ | |
switch(colours) { | |
case ZM_COLOUR_RGB24: | |
{ | |
if(subpixelorder == ZM_SUBPIX_ORDER_BGR) { | |
/* BGR subpixel order */ | |
pf = PIX_FMT_BGR24; | |
} else { | |
/* Assume RGB subpixel order */ | |
pf = PIX_FMT_RGB24; | |
} | |
break; | |
} | |
case ZM_COLOUR_RGB32: | |
{ | |
if(subpixelorder == ZM_SUBPIX_ORDER_ARGB) { | |
/* ARGB subpixel order */ | |
pf = PIX_FMT_ARGB; | |
} else if(subpixelorder == ZM_SUBPIX_ORDER_ABGR) { | |
/* ABGR subpixel order */ | |
pf = PIX_FMT_ABGR; | |
} else if(subpixelorder == ZM_SUBPIX_ORDER_BGRA) { | |
/* BGRA subpixel order */ | |
pf = PIX_FMT_BGRA; | |
} else { | |
/* Assume RGBA subpixel order */ | |
pf = PIX_FMT_RGBA; | |
} | |
break; | |
} | |
case ZM_COLOUR_GRAY8: | |
pf = PIX_FMT_GRAY8; | |
break; | |
default: | |
Panic("Unexpected colours: %d",colours); | |
break; | |
} | |
/* add the video streams using the default format codecs | |
and initialize the codecs */ | |
/* Print some debugging information | |
AVCodec* tmpavcodec; | |
tmpavcodec = avcodec_find_encoder(ofc->oformat->video_codec); | |
Debug(2,"Default codec for format %s (%s) is %s (%s)",ofc->oformat->name,ofc->oformat->long_name,tmpavcodec->name,tmpavcodec->long_name); | |
*/ | |
//ofc->preload = (int)(1.0*AV_TIME_BASE); // Deprecated | |
ofc->max_delay = (int)(0.5*AV_TIME_BASE); | |
ost = NULL; | |
if (of->video_codec != CODEC_ID_NONE) | |
{ | |
/* Create the video stream */ | |
ost = av_new_stream(ofc, 0); | |
if (!ost) | |
{ | |
Panic( "Could not alloc stream" ); | |
} | |
AVCodecContext *c = ost->codec; | |
c->codec_id = of->video_codec; | |
c->codec_type = AVMEDIA_TYPE_VIDEO; | |
unsigned int bitratekbits = (bitrate/1000); | |
Debug(3,"Bitrate (kbits): %u kbits",bitratekbits); | |
/* hacky format selection */ | |
if(strcmp(ofc->oformat->name,"mpegts") == 0) { | |
use_x264 = 1; | |
} else if(strcmp(ofc->oformat->name,"ogg") == 0) { | |
use_x264 = 2; | |
} else if(strcmp(ofc->oformat->name,"webm") == 0) { | |
use_x264 = 3; | |
} else { | |
use_x264 = 0; | |
} | |
if(use_x264 == 1) { | |
#if HAVE_LIBX264 | |
/* Needed for the muxer */ | |
c->codec_id = CODEC_ID_H264; | |
c->pix_fmt = PIX_FMT_YUV420P; | |
/* Set the default parameters */ | |
x264_param_default(&x264params); | |
/* Enable the veryfast preset and the zerolatency and stillimage tunes */ | |
if(x264_param_default_preset(&x264params,"superfast","stillimage+zerolatency") != 0) { | |
Error("Failed setting x264 preset and tune"); | |
} | |
if(x264_param_apply_profile(&x264params, "baseline") != 0) { | |
Error("Failed setting x264 profile"); | |
} | |
x264params.i_width = width; | |
x264params.i_height = height; | |
x264params.i_csp = X264_CSP_I420; | |
x264params.rc.i_rc_method = X264_RC_CRF; | |
x264params.rc.i_bitrate = bitratekbits; | |
x264params.rc.i_vbv_max_bitrate = bitratekbits; | |
x264params.rc.i_vbv_buffer_size = bitratekbits * 2; | |
x264params.rc.i_qp_step = 10; /* Help maintain bitrate by allowing drastic quality changes */ | |
/* MPEG-TS stream */ | |
x264params.i_timebase_num = 1; | |
x264params.i_timebase_den = 90000; | |
/* Enable b-frames */ | |
x264params.i_bframe = 2; | |
x264params.i_bframe_adaptive = 1; | |
x264params.b_annexb = 1; | |
x264params.b_repeat_headers = 1; | |
x264params.b_vfr_input = 1; | |
/* TODO: Setup error handler */ | |
x264params.i_log_level = X264_LOG_DEBUG; | |
#else | |
Panic("libx264 is required"); | |
#endif | |
} else if (use_x264 == 2) { | |
#if HAVE_LIBTHEORA | |
/* Needed for the muxer */ | |
c->codec_id = CODEC_ID_THEORA; | |
c->pix_fmt = PIX_FMT_YUV420P; | |
/* Set the default parameters */ | |
th_info_init(&theorainfo); | |
th_comment_init(&theoracomment); | |
theorainfo.frame_width = width; | |
theorainfo.frame_height = height; | |
theorainfo.pic_width = width; | |
theorainfo.pic_height = height; | |
theorainfo.pic_x = 0; | |
theorainfo.pic_y = 0; | |
theorainfo.colorspace = TH_CS_UNSPECIFIED; | |
theorainfo.pixel_fmt = TH_PF_420; | |
theorainfo.target_bitrate = bitrate; | |
theorainfo.aspect_numerator = 1; | |
theorainfo.aspect_denominator = 1; | |
theorainfo.fps_numerator = frame_rate; | |
theorainfo.fps_denominator = 1; | |
/* Allocate encoder */ | |
theoraenc = th_encode_alloc(&theorainfo); | |
if(theoraenc == NULL) { | |
Fatal("Failing allocating theora encoder"); | |
} | |
unsigned int tempint; | |
/* Set encoding speed to 80% speed */ | |
tempint = 0; | |
th_encode_ctl(theoraenc, TH_ENCCTL_GET_SPLEVEL_MAX, &tempint, sizeof(tempint)); | |
Debug(3,"Theora maximum speed level: %d",tempint); | |
th_encode_ctl(theoraenc, TH_ENCCTL_SET_SPLEVEL, &tempint, sizeof(tempint)); | |
#else | |
Panic("libtheora is required"); | |
#endif | |
} else if (use_x264 == 3) { | |
#if HAVE_LIBVPX | |
/* Needed for the muxer */ | |
c->codec_id = CODEC_ID_VP8; | |
c->pix_fmt = PIX_FMT_YUV420P; | |
Debug(3,"VPX Version: %s",vpx_codec_version_str()); | |
/* Get the default interface */ | |
vp8intf = vpx_codec_vp8_cx(); | |
Debug(3,"VPX Interface: %s",vpx_codec_iface_name(vp8intf)); | |
/* Set the default parameters */ | |
vpx_codec_enc_config_default(vp8intf, &vp8cfg, 0); | |
vp8cfg.g_w = width; | |
vp8cfg.g_h = height; | |
vp8cfg.g_profile = 0; | |
vp8cfg.g_pass = VPX_RC_ONE_PASS; | |
vp8cfg.g_timebase.num = 1; | |
vp8cfg.g_timebase.den = 1000; | |
vp8cfg.kf_mode = VPX_KF_AUTO; | |
vp8cfg.rc_end_usage = VPX_CBR; | |
vp8cfg.rc_target_bitrate = bitratekbits; | |
vp8cfg.g_lag_in_frames = 0; | |
#else | |
Panic("libvpx is required"); | |
#endif | |
} else if (use_x264 == 0) { | |
#if HAVE_LIBAVCODEC | |
/* put sample parameters */ | |
c->bit_rate = (bitrate/1000); | |
c->pix_fmt = PIX_FMT_YUV420P; | |
/* resolution must be a multiple of two */ | |
c->width = width; | |
c->height = height; | |
/* time base: this is the fundamental unit of time (in seconds) in terms | |
of which frame timestamps are represented. for fixed-fps content, | |
timebase should be 1/framerate and timestamp increments should be | |
identically 1. */ | |
//c->time_base.den = (int)(frame_rate*100); | |
//c->time_base.num = 100; | |
c->time_base.den = frame_rate; | |
c->time_base.num = 1; | |
//c->gop_size = frame_rate/2; /* emit one intra frame every half second or so */ | |
c->gop_size = frame_rate; | |
// some formats want stream headers to be seperate | |
if(ofc->oformat->flags & AVFMT_GLOBALHEADER) | |
c->flags |= CODEC_FLAG_GLOBAL_HEADER; | |
} | |
#endif | |
} else { | |
Panic("No default format defined in the output format"); | |
} | |
} | |
void VideoStream::SetParameters() | |
{ | |
/* set the output parameters (must be done even if no | |
parameters). */ | |
if ( av_set_parameters(ofc, NULL) < 0 ) | |
{ | |
Panic( "Invalid output format parameters" ); | |
} | |
av_dump_format(ofc, 0, filename, 1); | |
} | |
const char *VideoStream::MimeType() const | |
{ | |
for ( int i = 0; i < sizeof(mime_data)/sizeof(*mime_data); i++ ) | |
{ | |
if ( strcmp( format, mime_data[i].format ) == 0 ) | |
{ | |
return( mime_data[i].mime_type ); | |
} | |
} | |
const char *mime_type = of->mime_type; | |
if ( !mime_type ) | |
{ | |
mime_type = "video/mpeg"; | |
Warning( "Unable to determine mime type for '%s' format, using '%s' as default", format, mime_type ); | |
} | |
return( mime_type ); | |
} | |
void VideoStream::OpenStream() | |
{ | |
pts = 0; | |
/* now that all the parameters are set, we can open the | |
video codecs and allocate the necessary encode buffers */ | |
if ( ost ) | |
{ | |
AVCodecContext *c = ost->codec; | |
if(use_x264 == 1) { | |
#if HAVE_LIBX264 | |
x264enc = x264_encoder_open(&x264params); | |
if(x264enc == NULL) { | |
Panic("Failed opening x264 encoder"); | |
} | |
Debug(3,"x264 maximum delayed frames: %d",x264_encoder_maximum_delayed_frames(x264enc)); | |
x264_picture_alloc(&x264pic1, X264_CSP_I420, x264params.i_width, x264params.i_height); | |
x264_picture_alloc(&x264pic2, X264_CSP_I420, x264params.i_width, x264params.i_height); | |
#else | |
Panic("libx264 is required"); | |
#endif | |
} else if (use_x264 == 2) { | |
#if HAVE_LIBTHEORA | |
if(strcmp(ofc->oformat->name,"ogg") == 0) { | |
/* Allocate space for the headers */ | |
theoraheaders_buf = (uint8_t*)malloc(16384); | |
theoraheaders_bufsize = 16384; | |
} | |
#else | |
Panic("libtheora is required"); | |
#endif | |
} else if ( use_x264 == 3) { | |
#if HAVE_LIBVPX | |
if(vpx_codec_enc_init(&vp8enc,vp8intf,&vp8cfg,0)) { | |
Panic("Failed opening libvpx encoder"); | |
} | |
if(vpx_img_alloc(&vp8img1,VPX_IMG_FMT_I420,vp8cfg.g_w,vp8cfg.g_h,1) == NULL) { | |
Panic("Failed allocating VPX image"); | |
} | |
#else | |
Panic("libvpx is required"); | |
#endif | |
} else { | |
/* find the video encoder */ | |
AVCodec *codec = avcodec_find_encoder(c->codec_id); | |
if ( !codec ) | |
{ | |
Panic( "codec not found" ); | |
} | |
/* open the codec */ | |
if ( avcodec_open(c, codec) < 0 ) | |
{ | |
Panic( "Could not open codec" ); | |
} | |
} | |
int size = avpicture_get_size( c->pix_fmt, c->width, c->height); | |
opicture_buf = (uint8_t *)av_malloc(size); | |
if ( !opicture_buf ) | |
{ | |
av_free(opicture); | |
Panic( "Could not allocate opicture" ); | |
} | |
size = avpicture_get_size( pf, c->width, c->height); | |
tmp_opicture_buf = (uint8_t *)av_malloc(size); | |
if (!tmp_opicture_buf) | |
{ | |
av_free( tmp_opicture ); | |
Panic( "Could not allocate temporary opicture" ); | |
} | |
/* allocate the encoded raw picture */ | |
opicture = avcodec_alloc_frame(); | |
if ( !opicture ) | |
{ | |
Panic( "Could not allocate opicture" ); | |
} | |
/* if the output format is not identical to the input format, then a temporary | |
picture is needed too. It is then converted to the required | |
output format */ | |
tmp_opicture = NULL; | |
if ( c->pix_fmt != pf ) | |
{ | |
tmp_opicture = avcodec_alloc_frame(); | |
if ( !tmp_opicture ) | |
{ | |
Panic( "Could not allocate temporary opicture" ); | |
} | |
} | |
} | |
if (muxing_type == 0) { | |
/* FFMPEG muxing */ | |
/* open the output file, if needed */ | |
if ( !(of->flags & AVFMT_NOFILE) ) | |
{ | |
if ( avio_open(&ofc->pb, filename, URL_WRONLY) < 0 ) | |
{ | |
Fatal( "Could not open '%s'", filename ); | |
} | |
} | |
} else if (muxing_type == 1) { | |
/* Open the output file, if needed */ | |
if(memcmp(filename,"pipe:",5) != 0) { | |
output_fd = fopen(filename, "w+b"); | |
if(output_fd == NULL) { | |
Fatal( "Could not open '%s'", filename ); | |
} | |
} else { | |
output_fd = filename[5]; | |
} | |
} | |
video_outbuf = NULL; | |
if ( !(ofc->oformat->flags & AVFMT_RAWPICTURE) ) | |
{ | |
/* allocate output buffer */ | |
/* XXX: API change will be done */ | |
video_outbuf_size = 524288; /* 512KB */ | |
video_outbuf = (uint8_t *)av_malloc(video_outbuf_size); | |
} | |
} | |
VideoStream::VideoStream( const char *filename, const char* codec, const char *container, int bitrate, double frame_rate, int colours, int subpixelorder, int width, int height ) | |
{ | |
if ( !initialised ) | |
{ | |
Initialise(); | |
} | |
SetupFormat( filename, format ); | |
SetupCodec( colours, subpixelorder, width, height, bitrate, frame_rate ); | |
SetParameters(); | |
} | |
VideoStream::~VideoStream() | |
{ | |
/* close each codec */ | |
if (ost) | |
{ | |
avcodec_close(ost->codec); | |
av_free(opicture->data[0]); | |
av_free(opicture); | |
if (tmp_opicture) | |
{ | |
av_free(tmp_opicture->data[0]); | |
av_free(tmp_opicture); | |
} | |
av_free(video_outbuf); | |
} | |
/* write the trailer, if any */ | |
av_write_trailer(ofc); | |
/* free the streams */ | |
for( int i = 0; i < ofc->nb_streams; i++) | |
{ | |
av_freep(&ofc->streams[i]); | |
} | |
if (muxing_type == 0) { | |
/* FFMPEG muxing */ | |
if (!(of->flags & AVFMT_NOFILE)) | |
{ | |
/* close the output file */ | |
avio_close(ofc->pb); | |
} | |
} else if (muxing_type == 1) { | |
fflush(output_fd); | |
if(memcmp(filename,"pipe:",5) != 0) { | |
fclose(output_fd); | |
} | |
output_fd = NULL; | |
} | |
/* free the format context */ | |
avformat_free_context(ofc); | |
} | |
double VideoStream::EncodeFrame( const uint8_t *buffer, int buffer_size, bool add_timestamp, unsigned int timestamp ) | |
{ | |
static unsigned int frame_count = 0; | |
#ifdef HAVE_LIBSWSCALE | |
static struct SwsContext *img_convert_ctx = 0; | |
#endif // HAVE_LIBSWSCALE | |
uint8_t* dest_buf; | |
int ret = 0; | |
AVPacket pkt; | |
Debug(9,"Last PTS: %u",pts); | |
Debug(9,"Timestamp: %d",timestamp); | |
if(strcmp(ofc->oformat->name,"mpegts") == 0) { | |
/* Scale the PTS to 90KHz */ | |
pts = timestamp * 90; | |
} else { | |
pts = timestamp; | |
} | |
Debug(7,"Using PTS: %u",pts); | |
/* | |
if (ost) | |
{ | |
pts = (double)ost->pts.val * ost->time_base.num / ost->time_base.den; | |
} | |
*/ | |
if (use_x264 == 1) { | |
dest_buf = x264pic1.img.plane[0]; | |
} else if (use_x264 == 3) { | |
dest_buf = vp8img1.planes[0]; | |
} else { | |
dest_buf = opicture_buf; | |
} | |
AVCodecContext *c = ost->codec; | |
if ( c->pix_fmt != pf ) | |
{ | |
Debug(6,"Converting image"); | |
#ifdef HAVE_LIBSWSCALE | |
if ( !img_convert_ctx ) | |
{ | |
img_convert_ctx = sws_getCachedContext( NULL, c->width, c->height, pf, c->width, c->height, c->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL ); | |
if ( !img_convert_ctx ) | |
Panic( "Unable to initialise image scaling context" ); | |
} | |
/* Fill the source */ | |
avpicture_fill( (AVPicture *)tmp_opicture, (uint8_t*)buffer, pf, c->width, c->height ); | |
/* Fill the destination */ | |
avpicture_fill( (AVPicture *)opicture, dest_buf, PIX_FMT_YUV420P, c->width, c->height ); | |
sws_scale( img_convert_ctx, tmp_opicture->data, tmp_opicture->linesize, 0, c->height, opicture->data, opicture->linesize ); | |
#else // HAVE_LIBSWSCALE | |
Fatal("swscale is required for MPEG mode"); | |
#endif // HAVE_LIBSWSCALE | |
} | |
else | |
{ | |
avpicture_fill( (AVPicture *)opicture, (uint8_t*)buffer, PIX_FMT_YUV420P, c->width, c->height ); | |
} | |
if (use_x264 == 1) { | |
x264_nal_t* nals; | |
int i_nals; | |
if(frame_count == 0) { | |
/* Write the headers */ | |
av_write_header(ofc); | |
} | |
/* Set PTS */ | |
x264pic1.i_pts = pts; | |
int frame_size = x264_encoder_encode(x264enc, &nals, &i_nals, &x264pic1, &x264pic2); | |
if (frame_size > 0) { | |
Debug(9,"x264 encode returned %u bytes",frame_size); | |
av_init_packet( &pkt ); | |
pkt.stream_index = ost->index; | |
pkt.data = nals[0].p_payload; | |
pkt.size = frame_size; | |
pkt.pts = x264pic2.i_pts; | |
pkt.dts = x264pic2.i_dts; | |
pts = x264pic2.i_pts; | |
Debug(8,"x264 PTS: %u DTS: %u Duration: %d Size: %u",pkt.pts,pkt.dts,pkt.duration,pkt.size); | |
if(x264pic2.b_keyframe) | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
ret = av_write_frame(ofc, &pkt); | |
} else if(frame_size == 0) { | |
Debug(7,"x264 encode returned zero. Delayed frames: %d",x264_encoder_delayed_frames(x264enc)); | |
} else { | |
Error("x264 encode failed: %d",frame_size); | |
} | |
} else if (use_x264 == 2) { | |
/* No way to input the PTS into the encoder */ | |
/* If this is the first frame, send all headers */ | |
if(frame_count == 0) { | |
uint8_t *theoraheaders_iter = theoraheaders_buf; | |
unsigned int headersize; | |
if(strcmp(ofc->oformat->name,"ogg") != 0) { | |
/* Write the headers */ | |
av_write_header(ofc); | |
} | |
while(th_encode_flushheader(theoraenc, &theoracomment, &oggpacket1) > 0) { | |
//Debug(7,"Got theora header packet (%lu) of size: %u",oggpacket1.packetno,oggpacket1.bytes); | |
Debug(7,"Theora header packet size: %u",oggpacket1.bytes); | |
/* Collect the headers */ | |
if(strcmp(ofc->oformat->name,"ogg") == 0) { | |
/* Safety check against oversized headers */ | |
if(oggpacket1.bytes+2 > (theoraheaders_bufsize - (theoraheaders_iter - theoraheaders_buf))) { | |
Panic("Ran out of buffer space!"); | |
} | |
/* Byte swap the size and put it in the start */ | |
*theoraheaders_iter++ = oggpacket1.bytes >> 8; | |
*theoraheaders_iter++ |= oggpacket1.bytes & 0xFF; | |
memcpy(theoraheaders_iter, oggpacket1.packet, oggpacket1.bytes); | |
theoraheaders_iter += oggpacket1.bytes; | |
} else { | |
/* Send the header packet */ | |
av_init_packet( &pkt ); | |
pkt.stream_index = ost->index; | |
pkt.data = oggpacket1.packet; | |
pkt.size = oggpacket1.bytes; | |
pkt.pts = 0; | |
pkt.dts = 0; | |
ret = av_write_frame(ofc, &pkt); | |
if(ret != 0) { | |
Error( "Error %d while writing theora header: %s", ret, strerror( errno ) ); | |
} | |
} | |
} | |
if(strcmp(ofc->oformat->name,"ogg") == 0) { | |
headersize = theoraheaders_iter - theoraheaders_buf; | |
/* Place the headers in the place ffmpeg expects for ogg use */ | |
c->extradata = theoraheaders_buf; | |
c->extradata_size = headersize; | |
Debug(7,"Theora total headers size is %u bytes",headersize); | |
/* Write the headers */ | |
av_write_header(ofc); | |
} | |
} | |
/* Setup the image buffer */ | |
theoraimg[0].width = c->width; | |
theoraimg[0].height = c->height; | |
theoraimg[0].data = opicture->data[0]; | |
theoraimg[0].stride = opicture->linesize[0]; | |
theoraimg[1].width = c->width >> 1; | |
theoraimg[1].height = c->height >> 1; | |
theoraimg[1].data = opicture->data[1]; | |
theoraimg[1].stride = opicture->linesize[1]; | |
theoraimg[2].width = c->width >> 1; | |
theoraimg[2].height = c->height >> 1; | |
theoraimg[2].data = opicture->data[2]; | |
theoraimg[2].stride = opicture->linesize[2]; | |
ret = th_encode_ycbcr_in(theoraenc, theoraimg); | |
if(ret != 0) { | |
Error( "Error %d while encoding frame: %u", ret, frame_count++ ); | |
return 0; | |
} | |
ret = th_encode_packetout(theoraenc, 0, &oggpacket1); | |
if(ret > 0) { | |
Debug(9,"Theora encode return value: %u",ret); | |
av_init_packet( &pkt ); | |
pkt.stream_index = ost->index; | |
pkt.data = oggpacket1.packet; | |
pkt.size = oggpacket1.bytes; | |
pkt.pts = oggpacket1.granulepos; | |
pkt.dts = oggpacket1.granulepos; | |
pts = oggpacket1.granulepos; | |
Debug(8,"Theora PTS: %u DTS: %u Duration: %d Size: %u",pkt.pts,pkt.dts,pkt.duration,pkt.size); | |
if(th_packet_isheader(&oggpacket1)) | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
ret = av_write_frame(ofc, &pkt); | |
} else if(ret == 0) { | |
Debug(7,"Theora encode returned zero."); | |
} else { | |
Error("Theora encode failed: %d",ret); | |
} | |
} else if (use_x264 == 3) { | |
vpx_codec_pts_t vp8pts; | |
vpx_codec_iter_t vp8iter = NULL; | |
const vpx_codec_cx_pkt_t* vp8pkt; | |
if(frame_count == 0) { | |
/* Write the headers */ | |
av_write_header(ofc); | |
} | |
/* Set PTS */ | |
vp8pts = pts; | |
vp8res = vpx_codec_encode(&vp8enc, &vp8img1, vp8pts, 1000, 0, VPX_DL_REALTIME); | |
if (vp8res != VPX_CODEC_OK) { | |
Error( "Error %d while encoding frame: %u", ret, frame_count++ ); | |
return 0; | |
} | |
while( (vp8pkt = vpx_codec_get_cx_data(&vp8enc, &vp8iter)) ) { | |
if (vp8pkt->kind == VPX_CODEC_CX_FRAME_PKT) { | |
pkt.stream_index = ost->index; | |
pkt.data = (uint8_t*)vp8pkt->data.frame.buf; | |
pkt.size = vp8pkt->data.frame.sz; | |
pkt.duration = vp8pkt->data.frame.duration; | |
pkt.pts = vp8pkt->data.frame.pts; | |
pkt.dts = vp8pkt->data.frame.pts; | |
pts = vp8pkt->data.frame.pts; | |
Debug(8,"VP8 PTS: %u DTS: %u Duration: %d Size: %u",pkt.pts,pkt.dts,pkt.duration,pkt.size); | |
if(vp8pkt->data.frame.flags & VPX_FRAME_IS_KEY) | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
ret = av_write_frame(ofc, &pkt); | |
} else { | |
Error("Received unknown packet %d while encoding frame: %u", vp8pkt->kind, frame_count); | |
return 0; | |
} | |
} | |
} else { | |
if ( ofc->oformat->flags & AVFMT_RAWPICTURE ) | |
{ | |
av_init_packet( &pkt ); | |
/* Set PTS */ | |
pkt.pts = pts; | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
pkt.stream_index = ost->index; | |
pkt.data = (uint8_t *)opicture; | |
pkt.size = sizeof(AVPicture); | |
ret = av_write_frame(ofc, &pkt); | |
} | |
else | |
{ | |
int out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, opicture); | |
if ( out_size > 0 ) | |
{ | |
av_init_packet( &pkt ); | |
pkt.pts = av_rescale_q( c->coded_frame->pts, c->time_base, ost->time_base ); | |
if(c->coded_frame->key_frame) | |
pkt.flags |= AV_PKT_FLAG_KEY; | |
pkt.stream_index = ost->index; | |
pkt.data = video_outbuf; | |
pkt.size = out_size; | |
ret = av_write_frame( ofc, &pkt ); | |
} | |
} | |
} | |
if ( ret != 0 ) | |
{ | |
Error( "Error %d while writing video frame %u: %s", ret, frame_count, strerror( errno ) ); | |
} | |
/* Send a flush packet */ | |
if(ofc->oformat->flags & AVFMT_ALLOW_FLUSH) { | |
av_write_frame(ofc, NULL); | |
} | |
fflush(stdout); | |
frame_count++; | |
return( pts ); | |
} | |
#endif // HAVE_LIBAVCODEC |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* ZoneMinder MPEG Interface, $Date$, $Revision$ | |
* Copyright (C) 2001-2008 Philip Coombes | |
* | |
* This program is free software; you can redistribute it and/or | |
* modify it under the terms of the GNU General Public License | |
* as published by the Free Software Foundation; either version 2 | |
* of the License, or (at your option) any later version. | |
* | |
* This program is distributed in the hope that it will be useful, | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
* GNU General Public License for more details. | |
* | |
* You should have received a copy of the GNU General Public License | |
* along with this program; if not, write to the Free Software | |
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
*/ | |
#ifndef ZM_MPEG_H | |
#define ZM_MPEG_H | |
#include "zm_ffmpeg.h" | |
/* libx264 */ | |
#if HAVE_LIBX264 | |
#ifdef __cplusplus | |
extern "C" { | |
#endif | |
#include <x264.h> | |
#ifdef __cplusplus | |
} | |
#endif | |
#endif | |
/* libtheora */ | |
#if HAVE_LIBTHEORA | |
#include <ogg/ogg.h> | |
#include <theora/theoraenc.h> | |
#endif | |
/* libvpx */ | |
#if HAVE_LIBVPX | |
#define VPX_CODEC_DISABLE_COMPAT 1 | |
#include "vpx/vpx_encoder.h" | |
#include "vpx/vp8cx.h" | |
#endif | |
#if HAVE_LIBAVCODEC | |
class VideoStream | |
{ | |
protected: | |
struct MimeData | |
{ | |
const char *format; | |
const char *mime_type; | |
}; | |
protected: | |
static bool initialised; | |
static struct MimeData mime_data[]; | |
protected: | |
const char *filename; | |
const char *format; | |
double frame_rate; | |
uint64_t pts; | |
uint8_t *video_outbuf; | |
int video_outbuf_size; | |
enum PixelFormat pf; | |
AVOutputFormat *of; | |
AVFormatContext *ofc; | |
AVStream *ost; | |
AVFrame *opicture; | |
uint8_t* opicture_buf; | |
AVFrame *tmp_opicture; | |
uint8_t* tmp_opicture_buf; | |
#if HAVE_LIBX264 | |
x264_t* x264enc; | |
x264_param_t x264params; | |
x264_picture_t x264pic1; | |
x264_picture_t x264pic2; | |
#endif | |
#if HAVE_LIBVPX | |
vpx_codec_ctx_t vp8enc; | |
vpx_codec_enc_cfg_t vp8cfg; | |
vpx_codec_iface_t* vp8intf; | |
vpx_image_t vp8img1; | |
vpx_codec_err_t vp8res; | |
#endif | |
#if HAVE_LIBTHEORA | |
th_enc_ctx* theoraenc; | |
th_info theorainfo; | |
th_comment theoracomment; | |
th_ycbcr_buffer theoraimg; | |
uint8_t* theoraheaders_buf; | |
unsigned int theoraheaders_bufsize; | |
ogg_packet oggpacket1; | |
#endif | |
protected: | |
static void Initialise(); | |
void SetupFormat( const char *p_filename, const char *format ); | |
void SetupCodec( int colours, int subpixelorder, int width, int height, int bitrate, double frame_rate ); | |
void SetParameters(); | |
public: | |
VideoStream( const char *filename, const char *format, int bitrate, double frame_rate, int colours, int subpixelorder, int width, int height ); | |
~VideoStream(); | |
const char *MimeType() const; | |
void OpenStream(); | |
double EncodeFrame( const uint8_t *buffer, int buffer_size, bool add_timestamp=false, unsigned int timestamp=0 ); | |
}; | |
#endif // HAVE_LIBAVCODEC | |
#endif // ZM_MPEG_H |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment