-
-
Save gebi/4e2eeaa7fc73510b7fec to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#define __USE_GNU | |
#define _GNU_SOURCE | |
#include <pthread.h> | |
#include <stdio.h> | |
#include <stdint.h> | |
#include <string.h> | |
#include "erl_nif.h" | |
#include "frog_draw.h" | |
#ifdef __linux__ | |
__asm__(".symver memcpy,memcpy@GLIBC_2.2.5"); | |
#endif | |
#include <libavcodec/avcodec.h> | |
#include <libavformat/avformat.h> | |
#include <libavfilter/avcodec.h> | |
#include <libavfilter/avfilter.h> | |
#include <libavfilter/avfiltergraph.h> | |
#include <libavfilter/buffersink.h> | |
#include <libavfilter/buffersrc.h> | |
#include <libswscale/swscale.h> | |
#include <libavutil/opt.h> | |
#include <libavutil/imgutils.h> | |
static ErlNifResourceType* decode_resource; | |
static ErlNifResourceType* encode_resource; | |
#define YUV_GRAY 127 | |
// #define USE_DIRTY_NIFS | |
#ifdef USE_DIRTY_NIFS | |
#ifdef ERL_NIF_DIRTY_SCHEDULER_SUPPORT | |
#define REALLY_USE_DIRTY_NIFS | |
#endif | |
#endif | |
struct Worker { | |
int initialized; | |
int argc; | |
ERL_NIF_TERM argv[5]; | |
char thread_name[16]; | |
ErlNifTid tid; | |
int thread_exit; | |
ErlNifMutex *mutex; | |
ErlNifCond *cond; | |
ErlNifEnv *env; | |
ERL_NIF_TERM ref; | |
ERL_NIF_TERM self; | |
ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); | |
}; | |
typedef struct FilterContext { | |
AVFilterContext *buffersink_ctx; | |
AVFilterContext *buffersrc_ctx; | |
AVFilterGraph *filter_graph; | |
} FilterContext; | |
struct Decoder { | |
struct Worker w; | |
AVCodec *decoder; | |
AVCodecContext *ctx; | |
struct SwsContext* downscale_ctx; | |
struct AVFrame *raw_frame; | |
int description_length; | |
char *description; | |
char filter[1024]; | |
struct FilterContext* vgraph; | |
}; | |
struct Encoder { | |
struct Worker w; | |
AVCodec *encoder; | |
AVCodecContext *ctx; | |
struct FilterContext* agraph; | |
struct FilterContext* vgraph; | |
int description_length; | |
char *description; | |
int width; | |
int height; | |
// For audio | |
int input_channels; | |
int64_t input_channel_layout; | |
int input_sample_rate; | |
int input_sample_fmt; | |
int input_bps; | |
// For video | |
int in_width; | |
int in_height; | |
int deinterlace; | |
char filter[1024]; | |
int dts_shift; | |
}; | |
static const int description_length = 1024; | |
static ErlNifMutex *av_open_mutex; | |
static ERL_NIF_TERM avcodec_async_decode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); | |
static ERL_NIF_TERM avcodec_async_encode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]); | |
int init_graph(FilterContext *filt_ctx, AVCodecContext *ctx, enum AVMediaType codec_type, const char *filter_spec, const char *input_desc); | |
ERL_NIF_TERM | |
error_reply(ErlNifEnv* env, char *reason) { | |
return enif_make_tuple2(env, enif_make_atom(env,"error"), enif_make_atom(env,reason)); | |
} | |
// This is running in separate thread | |
void *worker_loop(void *args) { | |
struct Worker *w = (struct Worker *)args; | |
// Need to comment it due to sles shit | |
// if(strlen(w->thread_name) > 0) { | |
// #ifdef __APPLE__ | |
// pthread_setname_np(w->thread_name); | |
// #else | |
// pthread_setname_np(pthread_self(), w->thread_name); | |
// #endif | |
// } | |
while(1) { | |
if(w->thread_exit) { | |
enif_thread_exit(NULL); | |
// return NULL; | |
} | |
// First we need to lock mutex to protect from invalid | |
// argc assignment | |
enif_mutex_lock(w->mutex); | |
while(!w->argc && !w->thread_exit) { | |
// Now if there are no tasks to run, release mutex and lock. | |
// fprintf(stderr, "%s:%d %u wait\r\n", __FILE__, __LINE__, pthread_self()); fflush(stderr); | |
enif_cond_wait(w->cond, w->mutex); | |
// fprintf(stderr, "%s:%d %u wait done. argc: %d, exit: %d\r\n", __FILE__, __LINE__, pthread_self(), w->argc, w->thread_exit); fflush(stderr); | |
// But check for false wake-up from cond_wait | |
} | |
if(w->thread_exit) { | |
enif_mutex_unlock(w->mutex); | |
return NULL; | |
} | |
int argc = w->argc; | |
ERL_NIF_TERM *argv = w->argv; | |
w->argc = 0; | |
ErlNifEnv *env = w->env; | |
w->env = NULL; | |
ERL_NIF_TERM ref = w->ref; | |
ERL_NIF_TERM remote_self = w->self; | |
enif_mutex_unlock(w->mutex); | |
ERL_NIF_TERM result = w->fptr(env, argc, argv); | |
ErlNifPid self; | |
if(enif_get_local_pid(env, remote_self, &self)) { | |
enif_send(NULL, &self, env, enif_make_tuple2(env, ref, result)); | |
} | |
enif_clear_env(env); | |
enif_free_env(env); | |
} | |
} | |
void init_worker(ErlNifEnv *env, struct Worker *w, ERL_NIF_TERM (*fptr)(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])) { | |
if(w->mutex) return; | |
fprintf(stderr, "Init thread worker\r\n"); | |
w->cond = enif_cond_create("avworker_cond"); | |
w->mutex = enif_mutex_create("avworker_mutex"); | |
ErlNifPid self; | |
w->self = enif_make_pid(w->env, enif_self(env, &self)); | |
w->fptr = fptr; | |
enif_thread_create("avworker_thread", &w->tid, worker_loop, w, NULL); | |
} | |
void stop_worker(struct Worker *w) { | |
if(!w->mutex) return; | |
enif_mutex_lock(w->mutex); | |
w->thread_exit = 1; | |
enif_cond_broadcast(w->cond); | |
enif_mutex_unlock(w->mutex); | |
enif_thread_join(w->tid, NULL); | |
// enif_mutex_destroy(w->mutex); | |
enif_cond_destroy(w->cond); | |
w->mutex = NULL; | |
w->cond = NULL; | |
} | |
static ERL_NIF_TERM schedule_task(ErlNifEnv *env, struct Worker *w, int argc, const ERL_NIF_TERM argv[]) { | |
enif_mutex_lock(w->mutex); | |
w->env = enif_alloc_env(); | |
int i; | |
for(i = 0; i < argc; i++) { | |
w->argv[i] = enif_make_copy(w->env, argv[i]); | |
} | |
w->argc = argc; | |
ERL_NIF_TERM ref = enif_make_ref(env); | |
w->ref = enif_make_copy(w->env, ref); | |
enif_cond_broadcast(w->cond); | |
enif_mutex_unlock(w->mutex); | |
return enif_make_tuple2(env, enif_make_atom(env, "ref"), ref); | |
} | |
static ERL_NIF_TERM close0_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { | |
struct Decoder *dec; | |
struct Encoder *e; | |
if(argc < 1) return enif_make_badarg(env); | |
if(enif_get_resource(env, argv[0], decode_resource, (void **)&dec)) { | |
stop_worker(&dec->w); | |
return enif_make_atom(env, "ok"); | |
} | |
if(enif_get_resource(env, argv[0], encode_resource, (void **)&e)) { | |
stop_worker(&e->w); | |
return enif_make_atom(env, "ok"); | |
} | |
return enif_make_atom(env, "false"); | |
} | |
/* $$$$$$$\ $$\ $$$$$$\ $$\ $$\ | |
$$ __$$\ $$ | \_$$ _| \__| $$ | | |
$$ | $$ | $$$$$$\ $$$$$$$\ $$$$$$\ $$$$$$$ | $$$$$$\ $$$$$$\ $$ | $$$$$$$\ $$\ $$$$$$\ | |
$$ | $$ |$$ __$$\ $$ _____|$$ __$$\ $$ __$$ |$$ __$$\ $$ __$$\ $$ | $$ __$$\ $$ |\_$$ _| | |
$$ | $$ |$$$$$$$$ |$$ / $$ / $$ |$$ / $$ |$$$$$$$$ |$$ | \__| $$ | $$ | $$ |$$ | $$ | | |
$$ | $$ |$$ ____|$$ | $$ | $$ |$$ | $$ |$$ ____|$$ | $$ | $$ | $$ |$$ | $$ |$$\ | |
$$$$$$$ |\$$$$$$$\ \$$$$$$$\ \$$$$$$ |\$$$$$$$ |\$$$$$$$\ $$ | $$$$$$\ $$ | $$ |$$ | \$$$$ | | |
\_______/ \_______| \_______| \______/ \_______| \_______|\__| \______|\__| \__|\__| \____/ | |
*/ | |
void *decoder_loop(void *); | |
static ERL_NIF_TERM | |
avcodec_init_decoder(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc < 2) return enif_make_badarg(env); | |
ErlNifBinary config; | |
ErlNifBinary codec_name; | |
ErlNifBinary decoder_filter; | |
int is_decode_filter=0; | |
if(!enif_inspect_binary(env, argv[0], &config)) return error_reply(env, "first_arg_must_be_binary"); | |
if(!enif_inspect_binary(env, argv[1], &codec_name)) return error_reply(env, "second_arg_must_be_codec"); | |
AVCodec *decoder = avcodec_find_decoder_by_name((const char *)codec_name.data); | |
if(!decoder) return error_reply(env, "failed_to_find_decoder"); | |
if(decoder->type == AVMEDIA_TYPE_VIDEO && argc >= 3) { | |
if(!enif_inspect_binary(env, argv[2], &decoder_filter)) return error_reply(env, "arg_3_must_be_binary"); | |
is_decode_filter = 1; | |
} | |
AVCodecContext *decoder_ctx = avcodec_alloc_context3(decoder); | |
//decoder_ctx->lowres = 0; | |
//decoder_ctx->idct_algo = FF_IDCT_LIBMPEG2MMX; | |
decoder_ctx->flags2 |= CODEC_FLAG2_FAST; | |
decoder_ctx->skip_frame = AVDISCARD_DEFAULT; | |
decoder_ctx->skip_idct = AVDISCARD_DEFAULT; | |
decoder_ctx->skip_loop_filter = AVDISCARD_DEFAULT; | |
if(config.size > 0) { | |
if(decoder->id == AV_CODEC_ID_PCM_ALAW || decoder->id == AV_CODEC_ID_PCM_MULAW) { | |
decoder_ctx->channels = *(uint8_t *)config.data; | |
} else { | |
decoder_ctx->extradata_size = config.size; | |
decoder_ctx->extradata = av_mallocz(config.size); | |
memcpy(decoder_ctx->extradata, (const char *)config.data, config.size); | |
} | |
} else { | |
decoder_ctx->extradata_size = 0; | |
decoder_ctx->extradata = NULL; | |
} | |
enif_mutex_lock(av_open_mutex); | |
if(avcodec_open2(decoder_ctx, decoder, NULL) < 0) { | |
enif_mutex_unlock(av_open_mutex); | |
av_free(decoder_ctx); | |
return error_reply(env, "failed_to_open_decoder"); | |
} | |
enif_mutex_unlock(av_open_mutex); | |
struct Decoder *dec; | |
int len = sizeof(struct Decoder) + description_length + 1; | |
dec = (struct Decoder *)enif_alloc_resource(decode_resource, len); | |
memset(dec, 0, len); | |
dec->description_length = description_length; | |
dec->decoder = decoder; | |
dec->ctx = decoder_ctx; | |
dec->description = (char *)dec + sizeof(struct Decoder); | |
dec->downscale_ctx = NULL; | |
dec->raw_frame = av_frame_alloc(); | |
if(is_decode_filter) { | |
strncpy(dec->filter, (const char *)decoder_filter.data, sizeof(dec->filter)); | |
} | |
if(decoder->type == AVMEDIA_TYPE_VIDEO) { | |
snprintf(dec->description, dec->description_length, "avcodec,decoder,codec=%s,src=%dx%d,dst=%dx%d", | |
decoder->name, dec->ctx->width, dec->ctx->width, 0, 0); | |
} else { | |
char chan_layout[50]; | |
if(decoder_ctx->channel_layout) { | |
av_get_channel_layout_string(chan_layout, sizeof(chan_layout), -1, decoder_ctx->channel_layout); | |
} else { | |
sprintf(chan_layout, "none"); | |
} | |
snprintf(dec->description, dec->description_length, "avcodec,decoder,codec=%s,sample_fmt=%s,channel_layout=%s", | |
decoder->name, av_get_sample_fmt_name(decoder_ctx->sample_fmt), chan_layout); | |
} | |
snprintf(dec->w.thread_name, 16, "avdecoder"); | |
#ifndef REALLY_USE_DIRTY_NIFS | |
init_worker(env, &dec->w, avcodec_async_decode); | |
#endif | |
ERL_NIF_TERM dec_term = enif_make_resource_binary(env, dec, dec->description, strlen(dec->description)); | |
enif_release_resource(dec); | |
return enif_make_tuple2(env, | |
enif_make_atom(env, "ok"), | |
dec_term | |
); | |
} | |
/* $$$$$$$\ $$\ | |
$$ __$$\ $$ | | |
$$ | $$ | $$$$$$\ $$$$$$$\ $$$$$$\ $$$$$$$ | $$$$$$\ | |
$$ | $$ |$$ __$$\ $$ _____|$$ __$$\ $$ __$$ |$$ __$$\ | |
$$ | $$ |$$$$$$$$ |$$ / $$ / $$ |$$ / $$ |$$$$$$$$ | | |
$$ | $$ |$$ ____|$$ | $$ | $$ |$$ | $$ |$$ ____| | |
$$$$$$$ |\$$$$$$$\ \$$$$$$$\ \$$$$$$ |\$$$$$$$ |\$$$$$$$\ | |
\_______/ \_______| \_______| \______/ \_______| \_______| | |
*/ | |
static ERL_NIF_TERM | |
decode_video(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc != 4) return error_reply(env, "need_4_args"); | |
struct Decoder *dec; | |
ErlNifBinary h264; | |
ErlNifBinary yuv; | |
char err_buf[256]; | |
if(!enif_get_resource(env, argv[0], decode_resource, (void **)&dec)) { | |
return error_reply(env, "arg_0_must_be_decoder"); | |
} | |
if(!enif_inspect_binary(env, argv[1], &h264)) { | |
return error_reply(env, "arg_1_must_be_binary"); | |
} | |
ErlNifSInt64 dts, pts; | |
if(!enif_get_int64(env, argv[2], &dts)) { | |
return error_reply(env, "arg_2_must_be_int_dts"); | |
} | |
if(!enif_get_int64(env, argv[3], &pts)) { | |
return error_reply(env, "arg_3_must_be_int_pts"); | |
} | |
AVPacket in_pkt; | |
av_init_packet(&in_pkt); | |
in_pkt.data = h264.data; | |
in_pkt.size = h264.size; | |
in_pkt.dts = dts; | |
in_pkt.pts = pts; | |
av_frame_unref(dec->raw_frame); | |
int decoded = 0; | |
if(avcodec_decode_video2(dec->ctx, dec->raw_frame, &decoded, &in_pkt) < 0) { | |
return error_reply(env, "couldnt_decode"); | |
} | |
if(!decoded) { | |
return error_reply(env, "not_avdecoded"); | |
} | |
ERL_NIF_TERM reply; | |
ErlNifSInt64 out_pts; | |
ERL_NIF_TERM out_frames[20]; | |
int out_count = 0; | |
if(dec->ctx->width <= 0) return error_reply(env, "bad_width"); | |
if(dec->ctx->height <= 0) return error_reply(env, "bad_height"); | |
dec->raw_frame->pts = av_frame_get_best_effort_timestamp(dec->raw_frame); | |
// snprintf(dec->description, dec->description_length, "avcodec decoder(%dx%d) -> (%dx%d)", dec->ctx->width, dec->ctx->width, | |
// 0, 0); | |
if(dec->raw_frame->width <= 0 || dec->raw_frame->width >= 5000 || | |
dec->raw_frame->height <= 0 || dec->raw_frame->height >= 5000 || | |
dec->ctx->width <= 0 || dec->ctx->width >= 5000 || | |
dec->ctx->height <= 0 || dec->ctx->height >= 5000 | |
) { | |
reply = enif_make_tuple2(env, enif_make_atom(env, "error"), | |
enif_make_tuple7(env, | |
enif_make_atom(env, "dimensions"), | |
enif_make_int(env, dec->raw_frame->width), enif_make_int(env, dec->raw_frame->height), | |
enif_make_int(env, dec->ctx->width), enif_make_int(env, dec->ctx->height), | |
enif_make_int(env, 0), enif_make_int(env, 0)) | |
); | |
return reply; | |
} | |
AVFrame *filter_frame; | |
int is_filtred = 1; | |
if(strlen(dec->filter) > 0) { | |
int ret; | |
if(dec->vgraph == NULL) { | |
av_log(NULL, AV_LOG_INFO, "\rInit decode filter:(%s)\r\n", dec->filter); | |
dec->vgraph = av_malloc(sizeof(*(dec->vgraph))); | |
char input_desc[1024]; | |
snprintf(input_desc, sizeof(input_desc), | |
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", | |
dec->ctx->width, dec->ctx->height, dec->ctx->pix_fmt, | |
dec->ctx->time_base.num, dec->ctx->time_base.den, | |
dec->ctx->sample_aspect_ratio.num, | |
dec->ctx->sample_aspect_ratio.den); | |
if(init_graph(dec->vgraph, dec->ctx, AVMEDIA_TYPE_VIDEO, dec->filter, input_desc) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rinit_graph for decode fail\r\n"); | |
is_filtred = 0; | |
goto END_FILTER; | |
} | |
} | |
if((ret = av_buffersrc_add_frame_flags(dec->vgraph->buffersrc_ctx, dec->raw_frame, AV_BUFFERSRC_FLAG_PUSH)) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rav_buffersrc_add_frame for decode fail: %d\r\n", ret); | |
is_filtred = 0; | |
goto END_FILTER; | |
} | |
ErlNifBinary yuv; | |
while(1) { | |
filter_frame = av_frame_alloc(); | |
if(!filter_frame) { | |
return error_reply(env, "ENOMEM"); | |
} | |
if((ret = av_buffersink_get_frame(dec->vgraph->buffersink_ctx, filter_frame)) < 0) { | |
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { | |
av_frame_free(&filter_frame); | |
ret = 0; | |
break; | |
} | |
av_strerror(ret, err_buf, sizeof(err_buf)); | |
av_log(NULL, AV_LOG_INFO, "\rav_buffersinc_get_frame for decode fail: %d - %s\r\n", ret, err_buf); | |
is_filtred = 0; | |
av_frame_free(&filter_frame); | |
break; | |
} | |
int width = filter_frame->width; | |
int height = filter_frame->height; | |
int buffer_size = av_image_get_buffer_size(filter_frame->format, width, height, 1); | |
if(!enif_alloc_binary(buffer_size, &yuv)) { | |
av_frame_free(&filter_frame); | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
av_image_copy_to_buffer(yuv.data, buffer_size, (const uint8_t **)(filter_frame->data), | |
filter_frame->linesize, filter_frame->format, filter_frame->width, filter_frame->height, 1); | |
out_pts = filter_frame->pts == AV_NOPTS_VALUE ? dts : filter_frame->pts; | |
av_frame_free(&filter_frame); | |
out_frames[out_count] = enif_make_tuple5(env, | |
enif_make_atom(env, "yuv"), | |
enif_make_binary(env, &yuv), | |
enif_make_int64(env, out_pts), | |
enif_make_int(env, width), | |
enif_make_int(env, height) | |
); | |
out_count++; | |
} | |
END_FILTER: | |
; | |
} else { | |
is_filtred = 0; | |
} | |
int width, height; | |
if(is_filtred) { | |
if(out_count == 0) { | |
reply = enif_make_atom(env, "undefined"); | |
} else if(out_count == 1) { | |
reply = out_frames[0]; | |
} else { | |
reply = enif_make_tuple2(env, | |
enif_make_atom(env, "ok"), | |
enif_make_list_from_array(env, out_frames, out_count) | |
); | |
} | |
} else { | |
width = dec->ctx->width; | |
height = dec->ctx->height; | |
int buffer_size = av_image_get_buffer_size(dec->ctx->pix_fmt, width, height, 1); | |
if(!enif_alloc_binary(buffer_size, &yuv)) { | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
av_image_copy_to_buffer(yuv.data, buffer_size, (const uint8_t **)(dec->raw_frame->data), | |
dec->raw_frame->linesize, dec->ctx->pix_fmt, dec->ctx->width, dec->ctx->height, 1); | |
out_pts = dec->raw_frame->pts == AV_NOPTS_VALUE ? dts : dec->raw_frame->pts; | |
reply = enif_make_tuple5(env, | |
enif_make_atom(env, "yuv"), | |
enif_make_binary(env, &yuv), | |
enif_make_int64(env, out_pts), | |
enif_make_int(env, width), | |
enif_make_int(env, height) | |
); | |
} | |
return reply; | |
} | |
static ERL_NIF_TERM | |
decode_audio(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc != 4) return error_reply(env, "need_4_args"); | |
struct Decoder *dec; | |
ErlNifBinary encoded; | |
ErlNifBinary pcm; | |
if(!enif_get_resource(env, argv[0], decode_resource, (void **)&dec)) { | |
return error_reply(env, "arg_0_must_be_decoder"); | |
} | |
if(!enif_inspect_binary(env, argv[1], &encoded)) { | |
return error_reply(env, "arg_1_must_be_binary"); | |
} | |
ErlNifSInt64 dts, pts; | |
if(!enif_get_int64(env, argv[2], &dts)) { | |
return error_reply(env, "arg_2_must_be_int_dts"); | |
} | |
if(!enif_get_int64(env, argv[3], &pts)) { | |
return error_reply(env, "arg_3_must_be_int_pts"); | |
} | |
AVPacket in_pkt; | |
av_init_packet(&in_pkt); | |
in_pkt.data = encoded.data; | |
in_pkt.size = encoded.size; | |
in_pkt.dts = dts; | |
in_pkt.pts = pts; | |
// AVPacket out_pkt; | |
av_frame_unref(dec->raw_frame); | |
int decoded = 0; | |
if(avcodec_decode_audio4(dec->ctx, dec->raw_frame, &decoded, &in_pkt) < 0) { | |
return error_reply(env, "couldnt_decode"); | |
} | |
if(!decoded) { | |
return error_reply(env, "not_avdecoded"); | |
} | |
ERL_NIF_TERM reply; | |
int ch, plane_size; | |
int data_size = av_samples_get_buffer_size(&plane_size, dec->ctx->channels, | |
dec->raw_frame->nb_samples, | |
dec->ctx->sample_fmt, 1); | |
int planar = av_sample_fmt_is_planar(dec->ctx->sample_fmt); | |
if(data_size < 0){ | |
return enif_make_atom(env, "undefined"); | |
} | |
if(!enif_alloc_binary(data_size, &pcm)) { | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
memcpy(pcm.data, dec->raw_frame->extended_data[0], plane_size); | |
//For planar audio, each channel has a separate data pointer | |
if(planar && dec->ctx->channels > 1) { | |
uint8_t *out = ((uint8_t *)pcm.data) + plane_size; | |
for (ch = 1; ch < dec->ctx->channels; ch++) { | |
memcpy(out, dec->raw_frame->extended_data[ch], plane_size); | |
out += plane_size; | |
} | |
} | |
reply = enif_make_tuple3(env, | |
enif_make_atom(env, "pcm"), | |
enif_make_binary(env, &pcm), | |
enif_make_int64(env, dec->raw_frame->pts == AV_NOPTS_VALUE ? dts : dec->raw_frame->pts) | |
); | |
return reply; | |
} | |
static ERL_NIF_TERM | |
avcodec_async_decode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
struct Decoder *dec = NULL; | |
enif_get_resource(env, argv[0], decode_resource, (void **)&dec); | |
return dec->decoder->type == AVMEDIA_TYPE_VIDEO ? decode_video(env, argc, argv) : decode_audio(env, argc, argv); | |
} | |
static ERL_NIF_TERM | |
avcodec_decode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc != 4) { | |
return error_reply(env,"need_4_args"); | |
} | |
struct Decoder *dec; | |
if(!enif_get_resource(env, argv[0], decode_resource, (void **)&dec)) { | |
return error_reply(env,"arg_0_must_be_decoder"); | |
} | |
if(!enif_is_binary(env, argv[1])) { | |
return error_reply(env, "arg_1_must_be_binary"); | |
} | |
if(dec->decoder->type == AVMEDIA_TYPE_VIDEO || dec->decoder->type == AVMEDIA_TYPE_AUDIO) { | |
return schedule_task(env, &dec->w, argc, argv); | |
} else { | |
return error_reply(env, "can_decode_video_or_audio"); | |
} | |
} | |
static ERL_NIF_TERM | |
avcodec_draw_char(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc != 6) return error_reply(env, "neet_four_args"); | |
ErlNifBinary yuv; | |
int x; | |
int y; | |
int w; | |
int h; | |
ErlNifBinary ch_image; | |
if(!enif_inspect_binary(env, argv[0], &yuv)) return error_reply(env, "1_arg_must_be_binary"); | |
if(!enif_get_int(env, argv[1], &x)) return error_reply(env, "2_arg_must_be_int"); | |
if(!enif_get_int(env, argv[2], &y)) return error_reply(env, "3_arg_must_be_int"); | |
if(!enif_get_int(env, argv[3], &w)) return error_reply(env, "4_arg_must_be_int"); | |
if(!enif_get_int(env, argv[4], &h)) return error_reply(env, "5_arg_must_be_int"); | |
if(!enif_inspect_binary(env, argv[5], &ch_image)) return error_reply(env, "6_arg_must_be_binary"); | |
if(ch_image.size != 64) return error_reply(env, "6_arg_must_have_size_8x8_bytes"); | |
frog_draw_8x8((uint64_t*)ch_image.data, x, y, yuv.data, w, h); | |
return enif_make_atom(env, "ok"); | |
} | |
/* $$$$$$$$\ $$\ $$$$$$\ $$\ $$\ | |
$$ _____| $$ | \_$$ _| \__| $$ | | |
$$ | $$$$$$$\ $$$$$$$\ $$$$$$\ $$$$$$$ | $$$$$$\ $$$$$$\ $$ | $$$$$$$\ $$\ $$$$$$\ | |
$$$$$\ $$ __$$\ $$ _____|$$ __$$\ $$ __$$ |$$ __$$\ $$ __$$\ $$ | $$ __$$\ $$ |\_$$ _| | |
$$ __| $$ | $$ |$$ / $$ / $$ |$$ / $$ |$$$$$$$$ |$$ | \__| $$ | $$ | $$ |$$ | $$ | | |
$$ | $$ | $$ |$$ | $$ | $$ |$$ | $$ |$$ ____|$$ | $$ | $$ | $$ |$$ | $$ |$$\ | |
$$$$$$$$\ $$ | $$ |\$$$$$$$\ \$$$$$$ |\$$$$$$$ |\$$$$$$$\ $$ | $$$$$$\ $$ | $$ |$$ | \$$$$ | | |
\________|\__| \__| \_______| \______/ \_______| \_______|\__| \______|\__| \__|\__| \____/ | |
*/ | |
int init_audio_graph(char *input_desc, struct Encoder *); | |
int init_video_graph(char *input_desc, struct Encoder *, char *logo); | |
/* | |
* Init encoders */ | |
static ERL_NIF_TERM | |
avcodec_init_encoder(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
struct Encoder *enc; | |
if(argc < 2) return error_reply(env, "need_2_args"); | |
ErlNifBinary codec_name; | |
if(!enif_inspect_binary(env, argv[0], &codec_name)) return error_reply(env, "first_arg_must_be_codec"); | |
AVCodec *encoder = avcodec_find_encoder_by_name((const char *)codec_name.data); | |
if(!encoder) return error_reply(env, "no_such_codec"); | |
AVCodecContext *encoder_ctx = avcodec_alloc_context3(encoder); | |
if(encoder->type == AVMEDIA_TYPE_VIDEO) { | |
encoder_ctx->pix_fmt = AV_PIX_FMT_YUV420P; | |
encoder_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER; | |
encoder_ctx->time_base = (AVRational){1,1}; | |
encoder_ctx->sample_aspect_ratio = (AVRational){1,1}; | |
encoder_ctx->width = encoder_ctx->height = -1; | |
if(!strcmp((const char *)codec_name.data, "mjpeg")) { | |
encoder_ctx->pix_fmt = AV_PIX_FMT_YUVJ420P; | |
encoder_ctx->time_base = (AVRational){1,50}; | |
} | |
} | |
if(encoder->type == AVMEDIA_TYPE_AUDIO) { | |
encoder_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP; | |
encoder_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL; | |
encoder_ctx->time_base = (AVRational){1,1}; | |
} | |
AVDictionary *options = NULL; | |
ERL_NIF_TERM head, tail, list; | |
list = argv[1]; | |
int64_t input_channel_layout = -1; | |
int input_sample_rate = -1; | |
int input_sample_fmt = -1; | |
int in_width = 0, in_height = 0; | |
int in_sarw = 1, in_sarh = 1; // Aspect ratio | |
// Form scale filter documentation: | |
// -1 - Select interlaced aware scaling depending on whether | |
// the source frames are flagged as interlaced or not. | |
int deinterlace = 0; | |
char logo[1024] = ""; | |
// Parse options from erlang | |
while(enif_get_list_cell(env, list, &head, &tail)) { | |
int arity = 0; | |
const ERL_NIF_TERM* kv; | |
if(!enif_get_tuple(env, head, &arity, &kv)) return error_reply(env, "options_are_proplist"); | |
if(arity != 2) return error_reply(env, "options_are_proplist_2"); | |
char k[256]; | |
char v[1024]; | |
memset(v,0,sizeof(v)); | |
ErlNifBinary v_; | |
if(!enif_get_atom(env, kv[0], k, sizeof(k), ERL_NIF_LATIN1)) return error_reply(env, "only_atom_keys"); | |
if(!enif_inspect_binary(env, kv[1], &v_)) { | |
list = tail; | |
continue; | |
} | |
strncpy(v, (const char *)v_.data, v_.size > 255 ? 255 : v_.size); | |
if(!strcmp("input_channel_layout", k)) { | |
input_channel_layout = av_get_channel_layout(v); | |
} else if(!strcmp("input_sample_rate", k)) { | |
input_sample_rate = atoi(v); | |
} else if(!strcmp("input_sample_fmt", k)) { | |
input_sample_fmt = av_get_sample_fmt(v); | |
} else if(!strcmp("width", k)) { | |
encoder_ctx->width = atoi(v); | |
} else if(!strcmp("height", k)) { | |
encoder_ctx->height = atoi(v); | |
} else if(!strcmp("in_width", k)) { | |
in_width = atoi(v); | |
} else if(!strcmp("in_height", k)) { | |
in_height = atoi(v); | |
} else if(!strcmp("sarw", k)) { | |
in_sarw = atoi(v); | |
} else if(!strcmp("sarh", k)) { | |
in_sarh = atoi(v); | |
} else if(!strcmp("logo", k)) { | |
strncpy(logo, v, sizeof(logo)); | |
} else if(!strcmp("deinterlace", k)) { | |
deinterlace = atoi(v); | |
} else { | |
if(!strcmp("b",k)) { | |
encoder_ctx->bit_rate = atoi(v); | |
} | |
if(!strcmp("channels",k)) { | |
encoder_ctx->channels = atoi(v); | |
} | |
if(!strcmp("sample_rate",k)) { | |
encoder_ctx->sample_rate = atoi(v); | |
encoder_ctx->time_base = (AVRational){1,encoder_ctx->sample_rate}; | |
} | |
av_dict_set(&options, k, v, 0); | |
} | |
list = tail; | |
} | |
if(encoder->type == AVMEDIA_TYPE_VIDEO && (!(encoder_ctx->width > 0) || !(encoder_ctx->height > 0))) { | |
av_log(NULL, AV_LOG_INFO, "\rwidth_height_must_be_positive\r\n"); | |
av_free(encoder_ctx); | |
return error_reply(env, "width_height_must_be_positive"); | |
} | |
// avcodec_open | |
enif_mutex_lock(av_open_mutex); | |
if(avcodec_open2(encoder_ctx, encoder, &options) == -1) { | |
enif_mutex_unlock(av_open_mutex); | |
av_free(encoder_ctx); | |
return error_reply(env, "failed_open_codec"); | |
} | |
enif_mutex_unlock(av_open_mutex); | |
int len = sizeof(struct Encoder) + description_length + 1; | |
enc = (struct Encoder *)enif_alloc_resource(encode_resource, len); | |
memset(enc, 0, len); | |
enc->description_length = description_length; | |
enc->encoder = encoder; | |
enc->ctx = encoder_ctx; | |
enc->description = (char *)enc + sizeof(struct Encoder); | |
snprintf(enc->description, enc->description_length, "avcodec %s encoder(%dx%d)", codec_name.data, enc->ctx->width, enc->ctx->height); | |
ErlNifBinary config; | |
if(!enif_alloc_binary(encoder_ctx->extradata_size, &config)) { | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
memcpy(config.data, encoder_ctx->extradata, encoder_ctx->extradata_size); | |
char input_desc[1024]; | |
if(encoder->type == AVMEDIA_TYPE_AUDIO) { | |
enc->input_channels = av_get_channel_layout_nb_channels(input_channel_layout); | |
enc->input_channel_layout = input_channel_layout; | |
enc->input_sample_fmt = input_sample_fmt; | |
enc->input_sample_rate = input_sample_rate; | |
enc->input_bps = enc->input_channels*av_get_bytes_per_sample(input_sample_fmt); | |
AVRational time_base = {1,input_sample_rate}; | |
snprintf(input_desc, sizeof(input_desc), | |
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64, | |
time_base.num, time_base.den, input_sample_rate, | |
av_get_sample_fmt_name(input_sample_fmt), input_channel_layout); | |
// init audio graph | |
int res = init_audio_graph(input_desc, enc); | |
if(res < 0) { | |
char areason[1024]; | |
sprintf(areason, "failed_to_open_audio_graph(%s): %d", input_desc, res); | |
return error_reply(env, areason); | |
} | |
} else if(encoder->type == AVMEDIA_TYPE_VIDEO) { | |
enc->in_width = in_width; | |
enc->in_height = in_height; | |
enc->deinterlace = deinterlace; | |
snprintf(input_desc, sizeof(input_desc), | |
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", | |
in_width, in_height, AV_PIX_FMT_YUV420P, 1, 90000, in_sarw, in_sarh); | |
int res = init_video_graph(input_desc, enc, logo); | |
if(res < 0) { | |
char reason[1024]; | |
sprintf(reason, "failed_to_open_video_graph(%s): %d", input_desc, res); | |
return error_reply(env, reason); | |
} | |
} | |
snprintf(enc->w.thread_name, 16, "avencoder"); | |
#ifndef REALLY_USE_DIRTY_NIFS | |
init_worker(env, &enc->w, avcodec_async_encode); | |
#endif | |
ERL_NIF_TERM enc_term = enif_make_resource_binary(env, enc, enc->description, strlen(enc->description)); | |
enif_release_resource(enc); | |
return enif_make_tuple3(env, | |
enif_make_atom(env, "ok"), | |
enif_make_binary(env, &config), | |
enc_term | |
); | |
} | |
/* | |
* Init filter graph | |
*/ | |
int init_graph(FilterContext *filt_ctx, AVCodecContext *ctx, enum AVMediaType codec_type, const char *filter_spec, const char *input_desc) { | |
// Constant same on decode_ | |
const enum AVSampleFormat sample_fmts[] = { ctx->sample_fmt, -1 }; | |
const enum AVPixelFormat pix_fmt = ctx->pix_fmt; | |
AVFilterContext *buffersrc_ctx = NULL; | |
AVFilterContext *buffersink_ctx = NULL; | |
AVFilter *buffersrc = NULL; | |
AVFilter *buffersink = NULL; | |
AVFilterGraph *filter_graph = avfilter_graph_alloc(); | |
AVFilterInOut *outputs = avfilter_inout_alloc(); | |
AVFilterInOut *inputs = avfilter_inout_alloc(); | |
int ret = 0; | |
int rcode = 0; | |
if(!outputs || !inputs || !filter_graph) { | |
ret = AVERROR(ENOMEM); | |
av_log(NULL, AV_LOG_INFO, "\rENOMEM on enter init_graph\r\n"); | |
goto end; | |
} | |
if(codec_type == AVMEDIA_TYPE_AUDIO) { | |
buffersrc = avfilter_get_by_name("abuffer"); | |
buffersink = avfilter_get_by_name("abuffersink"); | |
if(!buffersrc || !buffersink){ | |
av_log(NULL, AV_LOG_INFO, "\rfiltering source or sink element not found\r\n"); | |
ret = -6; | |
goto end; | |
} | |
/* buffer audio source: the decoded frames from the decoder will be inserted here. */ | |
if((rcode = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", | |
input_desc, NULL, filter_graph)) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rCannot create audio buffer source: 0x%x\r\n", rcode); | |
ret = -2; | |
goto end; | |
// error("Cannot create audio buffer source"); | |
} | |
/* buffer audio sink: to terminate the filter chain. */ | |
if(avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", | |
NULL, NULL, filter_graph) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rCannot create audio buffer sink\r\n"); | |
// error("Cannot create audio buffer sink"); | |
ret = -3; | |
goto end; | |
} | |
if(av_opt_set_int_list(buffersink_ctx, "sample_fmts", sample_fmts, -1, | |
AV_OPT_SEARCH_CHILDREN) < 0){ | |
av_log(NULL, AV_LOG_INFO, "\rCannot set output sample format\n"); | |
ret = -9; | |
goto end; | |
} | |
} else if(codec_type == AVMEDIA_TYPE_VIDEO) { | |
buffersrc = avfilter_get_by_name("buffer"); | |
buffersink = avfilter_get_by_name("buffersink"); | |
if(!buffersrc || !buffersink){ | |
av_log(NULL, AV_LOG_INFO, "\rfiltering source or sink element not found\r\n"); | |
ret = -6; | |
goto end; | |
} | |
/* buffer video source*/ | |
if((rcode = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", | |
input_desc, NULL, filter_graph)) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rCannot create video buffer source: 0x%x\r\n", rcode); | |
ret = -2; | |
goto end; | |
} | |
/* buffer video sink*/ | |
if(avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", | |
NULL, NULL, filter_graph) < 0) { | |
av_log(NULL, AV_LOG_INFO, "\rCannot create audio buffer sink\r\n"); | |
ret = -3; | |
goto end; | |
} | |
if(av_opt_set_bin(buffersink_ctx, "pix_fmts", | |
(uint8_t*)&pix_fmt, sizeof(pix_fmt), AV_OPT_SEARCH_CHILDREN)) { | |
av_log(NULL, AV_LOG_INFO, "\rCannot set output sample format\n"); | |
ret = -9; | |
goto end; | |
} | |
} else { | |
ret = AVERROR_UNKNOWN; | |
} | |
/* Endpoints for the filter graph. */ | |
outputs->name = av_strdup("in"); | |
outputs->filter_ctx = buffersrc_ctx; | |
outputs->pad_idx = 0; | |
outputs->next = NULL; | |
inputs->name = av_strdup("out"); | |
inputs->filter_ctx = buffersink_ctx; | |
inputs->pad_idx = 0; | |
inputs->next = NULL; | |
if (avfilter_graph_parse_ptr(filter_graph, filter_spec, &inputs, &outputs, NULL) < 0) { | |
av_log(NULL, AV_LOG_INFO, "Cannot graph parse\n"); | |
ret = -4; | |
goto end; | |
} | |
if (avfilter_graph_config(filter_graph, NULL) < 0) { | |
av_log(NULL, AV_LOG_INFO, "Cannot graph config\n"); | |
ret = -5; | |
goto end; | |
} | |
if(ctx->frame_size) { | |
av_buffersink_set_frame_size(buffersink_ctx, ctx->frame_size); | |
} | |
filt_ctx->buffersrc_ctx = buffersrc_ctx; | |
filt_ctx->buffersink_ctx = buffersink_ctx; | |
filt_ctx->filter_graph = filter_graph; | |
end: | |
avfilter_inout_free(&inputs); | |
avfilter_inout_free(&outputs); | |
return ret; | |
} | |
/* | |
* Init audio graph | |
*/ | |
int init_audio_graph(char *input_desc, struct Encoder *enc) { | |
const char *filter_desc = "anull"; | |
FilterContext *audio = av_malloc(sizeof(*audio)); | |
int ret = init_graph(audio, enc->ctx, AVMEDIA_TYPE_AUDIO, filter_desc, input_desc); | |
enc->agraph = audio; | |
return ret; | |
} | |
/* | |
* Init video graph | |
*/ | |
int init_video_graph(char *input_desc, struct Encoder *enc, char *logo) { | |
// | |
// Parse logo parameter syntx: path@overlay_param | |
// | |
char logo_name[512] = "",logo_param[512] = ""; | |
const char delim[2] = "@"; | |
char *token, *save_ptr; | |
int n; | |
for(n=0 ; ; n++,logo=NULL) { | |
token = strtok_r(logo, delim, &save_ptr); | |
if(token == NULL) | |
break; | |
if(n == 0) { | |
strncpy(logo_name, token, sizeof(logo_name)); | |
} else if(n == 1) { | |
strncpy(logo_param, token, sizeof(logo_param)); | |
} else { | |
break; | |
} | |
} | |
char filter_desc[2048]; | |
char scale_desc[1024]; | |
char yadif_desc[20] = ""; // deinterlace | |
if(enc->deinterlace) { | |
// mode=0 The interlacing mode to adopt. Output one frame for each frame. | |
// parity=-1 The picture field parity assumed for the input interlaced video. Assume the bottom field is first. | |
// deint=0 Specify which frames to deinterlace. Deinterlace all frames. | |
snprintf(yadif_desc, sizeof(yadif_desc), "yadif=0:-1:0,"); | |
} | |
snprintf(scale_desc, sizeof(scale_desc), "scale=w=%d:h=%d:interl=-1", | |
enc->ctx->width, enc->ctx->height); | |
// | |
// If logo ok enjoy overlay filter | |
// | |
if(strlen(logo_name) > 0 && strlen(logo_param) > 0) { | |
snprintf(filter_desc, sizeof(filter_desc), "movie=\'%s\'[watermark];[in][watermark] overlay=%s,%s%s [out]", | |
logo_name, logo_param, yadif_desc, scale_desc); | |
} else { | |
sprintf(filter_desc, "%s", scale_desc); | |
} | |
av_log(NULL, AV_LOG_INFO, "\rInit video graph with filter:%s\n", filter_desc); | |
FilterContext *video = av_malloc(sizeof(*video)); | |
int ret = init_graph(video, enc->ctx, AVMEDIA_TYPE_VIDEO, filter_desc, input_desc); | |
enc->vgraph = video; | |
return ret; | |
} | |
/* $$$$$$$$\ $$\ | |
$$ _____| $$ | | |
$$ | $$$$$$$\ $$$$$$$\ $$$$$$\ $$$$$$$ | $$$$$$\ | |
$$$$$\ $$ __$$\ $$ _____|$$ __$$\ $$ __$$ |$$ __$$\ | |
$$ __| $$ | $$ |$$ / $$ / $$ |$$ / $$ |$$$$$$$$ | | |
$$ | $$ | $$ |$$ | $$ | $$ |$$ | $$ |$$ ____| | |
$$$$$$$$\ $$ | $$ |\$$$$$$$\ \$$$$$$ |\$$$$$$$ |\$$$$$$$\ | |
\________|\__| \__| \_______| \______/ \_______| \_______| | |
*/ | |
static ERL_NIF_TERM | |
encode_video(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc < 3 || argc > 4) return error_reply(env, "wrong_number_of_args"); | |
struct Encoder *enc; | |
ErlNifBinary yuv; | |
ErlNifSInt64 pts = AV_NOPTS_VALUE; | |
int keyframe = 0; | |
if(!enif_get_resource(env, argv[0], encode_resource, (void **)&enc)) { | |
return error_reply(env, "arg_0_must_be_encoder"); | |
} | |
if(!enif_inspect_binary(env, argv[1], &yuv)) return error_reply(env, "arg_1_must_be_binary"); | |
if(!enif_get_int64(env, argv[2], &pts)) return error_reply(env, "arg_2_must_be_pts"); | |
//if(yuv.size != enc->ctx->width*enc->ctx->height*3/2) { | |
// return error_reply(env, "invalid_yuv_size"); | |
// av_log(NULL, AV_LOG_INFO, "\rwarning invalid yuv size %zd != %d\r\n", yuv.size, enc->ctx->width*enc->ctx->height*3/2); | |
//} | |
if(argc >= 4 && !enif_get_int(env, argv[3], &keyframe)) return error_reply(env, "arg_4_must_be_int"); | |
AVFrame *frame = av_frame_alloc();; | |
av_frame_unref(frame); | |
frame->width = enc->in_width; | |
frame->height = enc->in_height; | |
frame->format = AV_PIX_FMT_YUV420P; | |
frame->pts = pts; | |
frame->key_frame = keyframe; | |
av_image_fill_arrays(frame->data, frame->linesize, yuv.data, frame->format, frame->width, frame->height, 1); | |
if(av_buffersrc_add_frame_flags(enc->vgraph->buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_PUSH) < 0) { | |
av_frame_free(&frame); | |
av_log(NULL, AV_LOG_INFO, "\rfailed_push_to_vgraph\r\n"); | |
return error_reply(env,"failed_push_to_vgraph"); | |
} | |
ERL_NIF_TERM outframes[20]; | |
int out_count=0; | |
AVFrame *filt_frame; | |
int ret; | |
while(1) { | |
filt_frame = av_frame_alloc(); | |
if (!filt_frame) { | |
return error_reply(env,"ENOMEM"); | |
} | |
ret = av_buffersink_get_frame(enc->vgraph->buffersink_ctx, filt_frame); | |
if (ret < 0) { | |
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { | |
ret = 0; | |
} | |
av_frame_free(&filt_frame); | |
break; | |
} | |
AVPacket pkt; | |
ErlNifBinary h264; | |
av_init_packet(&pkt); | |
if(!enif_alloc_binary(enc->ctx->width*enc->ctx->height, &h264)) { | |
av_log(NULL, AV_LOG_INFO, "\rfailed_to_allocate_binary\r\n"); | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
pkt.data = h264.data; | |
pkt.size = h264.size; | |
int could_encode = 0; | |
if(avcodec_encode_video2(enc->ctx, &pkt, filt_frame, &could_encode) != 0) { | |
av_log(NULL, AV_LOG_INFO, "\ravcodec_encode_video2 error\r\n"); | |
av_frame_free(&filt_frame); | |
return enif_make_atom(env, "undefined"); | |
} | |
av_frame_free(&filt_frame); | |
if(!could_encode) { | |
enif_release_binary(&h264); | |
continue; | |
} | |
if(!enif_realloc_binary(&h264, pkt.size)) { | |
av_log(NULL, AV_LOG_INFO, "\rfailed_to_allocate_binary\r\n"); | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
if(pkt.dts < 0) { | |
enc->dts_shift = -pkt.dts; | |
} | |
outframes[out_count] = enif_make_tuple5(env, | |
enif_make_atom(env, "ok"), | |
pkt.flags & AV_PKT_FLAG_KEY ? enif_make_atom(env, "keyframe") : enif_make_atom(env, "frame"), | |
enif_make_int64(env, pkt.dts + enc->dts_shift), | |
enif_make_int64(env, pkt.pts + enc->dts_shift), | |
enif_make_binary(env, &h264) | |
); | |
out_count++; | |
} | |
if(out_count == 0) { | |
return enif_make_atom(env, "undefined"); | |
} else if(out_count == 1 ){ | |
return outframes[0]; | |
} else { | |
return enif_make_tuple2(env, | |
enif_make_atom(env, "ok"), | |
enif_make_list_from_array(env, outframes, out_count) | |
); | |
} | |
} | |
static ERL_NIF_TERM | |
encode_audio(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc < 3) return error_reply(env, "wrong_number_of_args"); | |
struct Encoder *enc; | |
ErlNifBinary pcm; | |
ErlNifSInt64 pts = AV_NOPTS_VALUE; | |
if(!enif_get_resource(env, argv[0], encode_resource, (void **)&enc)) { | |
return error_reply(env, "arg_0_must_be_encoder"); | |
} | |
if(!enif_inspect_binary(env, argv[1], &pcm)) return error_reply(env, "arg_1_must_be_binary"); | |
if(!enif_get_int64(env, argv[2], &pts)) return error_reply(env, "arg_2_must_be_pts"); | |
AVFrame *raw_audio = av_frame_alloc();; | |
av_frame_unref(raw_audio); | |
raw_audio->data[0] = pcm.data; | |
raw_audio->nb_samples = pcm.size / enc->input_bps; | |
raw_audio->channels = enc->input_channels; | |
raw_audio->channel_layout = enc->input_channel_layout; | |
raw_audio->sample_rate = enc->input_sample_rate; | |
raw_audio->format = enc->input_sample_fmt; | |
raw_audio->pts = pts; | |
av_samples_fill_arrays(raw_audio->extended_data, raw_audio->linesize, pcm.data, raw_audio->channels, | |
raw_audio->nb_samples, raw_audio->format, 1); | |
if (av_buffersrc_add_frame_flags(enc->agraph->buffersrc_ctx, raw_audio, AV_BUFFERSRC_FLAG_PUSH) < 0) { | |
av_frame_free(&raw_audio); | |
return error_reply(env,"failed_push_to_agraph"); | |
} | |
av_frame_free(&raw_audio); | |
ERL_NIF_TERM outframes[20]; | |
int out_count = 0; | |
AVFrame *samplesref; | |
int ret = 0; | |
while (1) { | |
samplesref = av_frame_alloc(); | |
ret = av_buffersink_get_frame(enc->agraph->buffersink_ctx, samplesref); | |
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { | |
av_frame_free(&samplesref); | |
break; | |
} | |
if(ret < 0) { | |
av_frame_free(&samplesref); | |
return error_reply(env,"failed_to_run_agraph"); | |
} | |
samplesref->pts = av_rescale_q(samplesref->pts, | |
enc->agraph->buffersink_ctx->inputs[0]->time_base, | |
enc->ctx->time_base); | |
// fprintf(stderr, "rescale %lld to %lld. Timebase1: %d/%d, tb2: %d/%d\r\n", | |
// samplesref->pts, filtered_frame->pts, | |
// enc->agraph->buffersink_ctx->inputs[0]->time_base.num,enc->agraph->buffersink_ctx->inputs[0]->time_base.den, | |
// enc->ctx->time_base.num, enc->ctx->time_base.den); | |
AVPacket pkt; | |
av_init_packet(&pkt); | |
ErlNifBinary aac; | |
enif_alloc_binary(4096, &aac); | |
pkt.data = aac.data; | |
pkt.size = aac.size; | |
pkt.dts = samplesref->pts; | |
int could_encode = 0; | |
if(avcodec_encode_audio2(enc->ctx, &pkt, samplesref, &could_encode) < 0) { | |
av_frame_free(&samplesref); | |
return error_reply(env,"Failed to encode aac"); | |
} | |
if(!could_encode) { | |
enif_release_binary(&aac); | |
break; | |
} | |
av_frame_free(&samplesref); | |
if(!enif_realloc_binary(&aac, pkt.size)) { | |
return enif_make_atom(env, "failed_to_allocate_binary"); | |
} | |
outframes[out_count] = enif_make_tuple2(env, enif_make_int64(env, pkt.dts), enif_make_binary(env, &aac)); | |
out_count++; | |
} | |
if(out_count > 0) { | |
return enif_make_tuple2(env, | |
enif_make_atom(env, "ok"), | |
enif_make_list_from_array(env, outframes, out_count) | |
); | |
} | |
return enif_make_atom(env,"false"); | |
} | |
static ERL_NIF_TERM | |
avcodec_async_encode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
struct Encoder *enc; | |
enif_get_resource(env, argv[0], encode_resource, (void **)&enc); | |
return enc->encoder->type == AVMEDIA_TYPE_VIDEO ? encode_video(env, argc, argv) : encode_audio(env, argc, argv); | |
} | |
static ERL_NIF_TERM | |
avcodec_encode(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { | |
if(argc < 3) return enif_make_badarg(env); | |
struct Encoder *enc; | |
if(!enif_get_resource(env, argv[0], encode_resource, (void **)&enc)) { | |
return enif_make_badarg(env); | |
} | |
if(enc->encoder->type == AVMEDIA_TYPE_VIDEO || enc->encoder->type == AVMEDIA_TYPE_AUDIO) { | |
return schedule_task(env, &enc->w, argc, argv); | |
} else { | |
return error_reply(env, "can_encode_video_or_audio"); | |
} | |
} | |
/* $$\ $$\ $$\ | |
$$$\ $$$ |\__| | |
$$$$\ $$$$ |$$\ $$$$$$$\ $$$$$$$\ | |
$$\$$\$$ $$ |$$ |$$ _____|$$ _____| | |
$$ \$$$ $$ |$$ |\$$$$$$\ $$ / | |
$$ |\$ /$$ |$$ | \____$$\ $$ | | |
$$ | \_/ $$ |$$ |$$$$$$$ |\$$$$$$$\ | |
\__| \__|\__|\_______/ \_______| | |
*/ | |
static void | |
decode_destructor(ErlNifEnv* env, void* obj) | |
{ | |
struct Decoder *dec = (struct Decoder *)obj; | |
stop_worker(&dec->w); | |
if(dec->ctx) { | |
enif_mutex_lock(av_open_mutex); | |
avcodec_close(dec->ctx); | |
enif_mutex_unlock(av_open_mutex); | |
av_free(dec->ctx); | |
dec->ctx = NULL; | |
} | |
if(dec->downscale_ctx) { | |
sws_freeContext(dec->downscale_ctx); | |
} | |
if(dec->vgraph) { | |
avfilter_graph_free(&dec->vgraph->filter_graph); | |
av_free(dec->vgraph); | |
} | |
av_frame_free(&dec->raw_frame); | |
} | |
static void | |
encode_destructor(ErlNifEnv* env, void* obj) | |
{ | |
struct Encoder *enc = (struct Encoder *)obj; | |
stop_worker(&enc->w); | |
if(enc->ctx) { | |
enif_mutex_lock(av_open_mutex); | |
avcodec_close(enc->ctx); | |
enif_mutex_unlock(av_open_mutex); | |
av_free(enc->ctx); | |
} | |
if(enc->vgraph) { | |
avfilter_graph_free(&enc->vgraph->filter_graph); | |
av_free(enc->vgraph); | |
} | |
if(enc->agraph) { | |
avfilter_graph_free(&enc->agraph->filter_graph); | |
av_free(enc->agraph); | |
} | |
} | |
static int | |
load(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { | |
if(!decode_resource) { | |
decode_resource = enif_open_resource_type(env, NULL, "decode_resource", decode_destructor, ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER, NULL); | |
} | |
if(!encode_resource) { | |
encode_resource = enif_open_resource_type(env, NULL, "encode_resource", encode_destructor, ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER, NULL); | |
} | |
// av_register_all(); | |
avcodec_register_all(); | |
avfilter_register_all(); | |
// av_log_set_level(AV_LOG_DEBUG); | |
av_open_mutex = enif_mutex_create("av_open_mutex"); | |
return 0; | |
} | |
static int | |
upgrade(ErlNifEnv* env, void** priv, void** old_priv, ERL_NIF_TERM load_info) { return 0; } | |
static int | |
reload(ErlNifEnv* env, void** priv, ERL_NIF_TERM load_info) { return 0; } | |
static void | |
unload(ErlNifEnv* env, void* priv) { | |
// if(av_open_mutex) { | |
// enif_mutex_destroy(av_open_mutex); | |
// av_open_mutex = NULL; | |
// } | |
return; | |
} | |
static ERL_NIF_TERM | |
avcodec_nif_loaded(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) | |
{ | |
return enif_make_atom(env, "true"); | |
} | |
static ErlNifFunc avcodec_funcs[] = | |
{ | |
{"nif_loaded", 0, avcodec_nif_loaded}, | |
{"init_decoder0", 2, avcodec_init_decoder}, | |
{"init_decoder0", 3, avcodec_init_decoder}, | |
{"init_encoder0", 2, avcodec_init_encoder}, | |
#ifdef REALLY_USE_DIRTY_NIFS | |
{"decode0_nif", 4, avcodec_async_decode, ERL_NIF_DIRTY_JOB_CPU_BOUND}, | |
{"encode0_nif", 4, avcodec_async_encode, ERL_NIF_DIRTY_JOB_CPU_BOUND}, | |
#else | |
{"decode0_nif", 4, avcodec_decode}, | |
{"encode0_nif", 4, avcodec_encode}, | |
#endif | |
{"draw_char0_nif", 6, avcodec_draw_char}, | |
{"close0_nif", 1, close0_nif} | |
}; | |
ERL_NIF_INIT(avcodec, avcodec_funcs, load, reload, upgrade, unload) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
-module(avcodec). | |
-on_load(load_nif/0). | |
-export([nif_loaded/0, load_nif/0]). | |
-export([init_decoder/1, init_decoder/2, init_decoder/3, decode/2, decode_stream/2]). | |
-export([init_encoder/1, encode/2, add_pcm_options/2]). | |
-export([init_thumbnailer/1, thumbnail/2]). | |
-export([init_decoder0/2, init_encoder0/2]). | |
-export([draw/3]). | |
-export([configure_stream/2, init_stream/2, feed_stream/2, parse_configuration/2, parse_options/1]). | |
-export([init_output_worker/3, encoder_loop/2]). | |
-export([close/1]). | |
-include_lib("erlmedia/include/video_frame.hrl"). | |
-include_lib("erlmedia/include/media_info.hrl"). | |
-include_lib("erlmedia/include/ac3.hrl"). | |
-type decoder() :: any(). | |
-type encoder() :: any(). | |
-type transcoder() :: any(). | |
% -type yuv() :: binary(). | |
load_nif() -> | |
Path = case code:lib_dir(ffmpeg,priv) of | |
P when is_list(P) -> P; | |
_ -> "./priv" | |
end, | |
Load = erlang:load_nif(Path++ "/avcodec", 0), | |
case Load of | |
ok -> ok; | |
Error -> error_logger:error_msg("avcodec nif load error: ~p\n", [Error]) | |
end, | |
ok. | |
nif_loaded() -> | |
false. | |
-spec init_decoder(stream_info(), binary()) -> {ok, decoder(), stream_info()}. | |
init_decoder(#stream_info{codec = Codec0, config = Config0, content = Content, params = Params} = SI, Spec) -> | |
Codec = case Codec0 of | |
mp2v -> mpeg2video; | |
mp2a -> mp2; | |
pcma -> pcm_alaw; | |
pcmu -> pcm_mulaw; | |
_ -> Codec0 | |
end, | |
Config = if | |
is_binary(Config0) -> Config0; | |
Codec0 == pcma orelse Codec0 == pcmu -> | |
Channels = case Params of | |
#audio_params{channels = mono} -> 1; | |
#audio_params{channels = stereo} -> 2; | |
#audio_params{channels = Chans_} when is_integer(Chans_) -> Chans_; | |
#audio_params{channels = undefined} -> 1 | |
end, | |
<<Channels:8>>; | |
true -> | |
<<>> | |
end, | |
SI1 = if | |
is_binary(Config0) -> video_frame:config_to_stream_info(SI, #video_frame{flavor = config, codec = Codec0, body = Config}); | |
true -> SI | |
end, | |
case init_decoder0(Config, <<(atom_to_binary(Codec,latin1))/binary, 0>>, Spec) of | |
{ok, Dec} -> | |
case Content of | |
video -> {ok, Dec, SI1}; | |
audio -> {ok, Dec, add_pcm_options(SI1,Dec)} | |
end; | |
{error, _} = E -> | |
E | |
end. | |
-spec init_decoder(stream_info()) -> {ok, decoder(), stream_info()}. | |
init_decoder(SI) -> | |
init_decoder(SI, <<0>>). | |
-spec init_decoder(stream_info(), non_neg_integer(), non_neg_integer()) -> {ok, decoder(), stream_info()}. | |
init_decoder(#stream_info{codec = Codec, config = Config, params = Params} = SI, Width, Height) when is_binary(Config) -> | |
case init_decoder0(Config, <<(atom_to_binary(Codec,latin1))/binary, 0>>, <<(iolist_to_binary(io_lib:format("scale=w=~w:h=~w:interl=-1", [Width, Height])))/binary, 0>>) of | |
{ok, Dec} -> | |
{ok, Dec, SI#stream_info{params = Params#video_params{width = Width, height = Height}}}; | |
{error, _} = E -> | |
E | |
end; | |
init_decoder(#stream_info{} = SI, Width, Height) -> | |
init_decoder(SI#stream_info{config = <<>>}, Width, Height). | |
add_pcm_options(#stream_info{params = #audio_params{channels = Channels, channel_layout = Layout, sample_fmt = SampleFmt} = Params} = SI, ADec) -> | |
% lager:info("adec: ~p", [description(ADec)]), | |
Opts = lists:flatmap(fun | |
(<<"channel_layout=none">>) when Channels == 1 -> [{channel_layout,<<"mono">>}]; | |
(<<"channel_layout=none">>) -> [{channel_layout,<<"stereo">>},{channels,2}]; | |
(<<"channel_layout=",P/binary>>) -> [{channel_layout,P}]; | |
(<<"sample_fmt=",P/binary>>) -> [{sample_fmt,P}]; | |
(_) -> [] | |
end, binary:split(description(ADec), <<",">>, [global])), | |
Or = fun | |
(undefined,K,L) -> proplists:get_value(K,L); | |
(V,_,_) when is_binary(V) -> V; | |
(V,_,_) when is_atom(V) -> atom_to_binary(V,latin1); | |
(V,channels,_) when is_integer(V) -> V | |
end, | |
Params1 = Params#audio_params{ | |
channel_layout = Or(Layout,channel_layout,Opts), | |
channels = Or(Channels,channels,Opts), | |
sample_fmt = Or(SampleFmt,sample_fmt,Opts) | |
}, | |
SI#stream_info{params = Params1}. | |
-spec decode(#video_frame{}, decoder()) -> [#video_frame{}] | {error, any()}. | |
decode(#video_frame{flavor = Fl, content = Content, body = Body, dts = InDTS, pts = InPTS} = Frame, Decoder) when Fl == keyframe orelse Fl == frame -> | |
ToFrame = fun | |
({yuv, YUV, OutPTS, W, H}) when is_binary(YUV) -> | |
Frame#video_frame{codec = yuv, body = YUV, next_id = {W,H}, mpegts = undefined, dts = OutPTS, pts = OutPTS}; | |
({pcm, PCM, OutPTS}) when is_binary(PCM) andalso Content == audio -> | |
Frame#video_frame{codec = pcm, body = PCM, mpegts = undefined, dts = OutPTS, pts = OutPTS} | |
end, | |
case decode0(Decoder, Body, round(InDTS), round(InPTS)) of | |
{ok, Output} -> | |
[ToFrame(Out) || Out <- Output]; | |
undefined -> | |
[]; | |
false -> | |
{error, false}; | |
{error, Error} -> | |
lager:info("failed to decode: ~p", [Error]), | |
{error, Error}; | |
Tuple when is_tuple(Tuple) -> | |
[ToFrame(Tuple)] | |
end; | |
decode(_, _Decoder) -> | |
{error, not_keyframe}. | |
draw(Str, {X, Y}, #video_frame{codec = yuv, body = YUV, next_id = {W,H}}) | |
when is_binary(Str), is_integer(X), is_integer(Y) -> | |
lists:foldl(fun(Char, Idx) -> | |
CharImage = draw_alphabet:char_mask(Char), | |
draw_char0(YUV, {X+Idx*8, Y}, {W, H}, CharImage), | |
Idx+1 | |
end, 0, binary_to_list(Str)), | |
ok. | |
-record(thumbnailer, { | |
codec = h264, | |
n = 0, | |
decoder, | |
encoder | |
}). | |
init_thumbnailer(Config) when byte_size(Config) > 5 -> | |
init_thumbnailer(#stream_info{codec = h264, content = video, config = Config}); | |
init_thumbnailer(#stream_info{content = video} = SI) -> | |
{ok, Decoder, #stream_info{params = #video_params{width = W0, height = H0}, options = Opts}} = avcodec:init_decoder(SI), | |
W = integer_to_binary(proplists:get_value(pixel_width, Opts, W0)), | |
H = integer_to_binary(proplists:get_value(pixel_height, Opts, H0)), | |
B = 150, % kBytes | |
{ok, _, Enc} = init_encoder0(<<"mjpeg", 0>>, [{width,W},{in_width,W},{height,H},{in_height,H},{b, integer_to_binary(B*8*1024)}, {sample_rate, integer_to_binary(1)}]), | |
{ok, #thumbnailer{decoder = Decoder, encoder = Enc}}. | |
thumbnail(#video_frame{} = Frame, Thumbnailer) -> | |
thumbnail([Frame], Thumbnailer); | |
thumbnail([#video_frame{}|_] = Frames, Config) when is_binary(Config) -> | |
{ok, Thumb} = init_thumbnailer(Config), | |
case thumbnail(Frames, Thumb) of | |
{ok, Jpeg, Thumb2} -> | |
close(Thumb2), | |
{ok, Jpeg}; | |
Else -> | |
close(Thumb), | |
Else | |
end; | |
thumbnail([#video_frame{codec = Codec, track_id = Tid}|_] = Frames0, #thumbnailer{codec = Codec} = T) -> | |
Frames = [F || #video_frame{track_id = Tr} = F <- Frames0, Tr == Tid], | |
thumbnail0(Frames, T). | |
thumbnail0([Frame|Frames], #thumbnailer{decoder = Decoder, encoder = Enc, n = N} = T) -> | |
T1 = erlang:now(), | |
case decode(Frame, Decoder) of | |
[#video_frame{codec = yuv, body = YUV}|_] -> | |
T2 = erlang:now(), | |
{ok, _, _, _, JPEG} = encode0(Enc, YUV, N, 1), | |
T3 = erlang:now(), | |
lager:debug("thumbnail. decode: ~Bus, yuv2jpeg: ~Bus", [timer:now_diff(T2,T1), timer:now_diff(T3,T2)]), | |
{ok, JPEG, T#thumbnailer{n = N + 1}}; | |
{error, _} -> | |
thumbnail0(Frames, T); | |
[] -> | |
thumbnail0(Frames, T) | |
end; | |
thumbnail0([], _T) -> | |
undefined. | |
-record(enc, { | |
codec, | |
config, | |
stream_info, | |
encoder | |
}). | |
-spec init_encoder(stream_info()) -> {ok, encoder(), stream_info()}. | |
init_encoder(#stream_info{codec = Codec, bitrate = Bitrate0, content = Content, params = Params, options = SOptions} = SI) -> | |
Codec1 = case Codec of | |
h264 -> <<"libx264">>; | |
jpeg -> <<"mjpeg">>; | |
_ -> atom_to_binary(Codec,latin1) | |
end, | |
Bitrate = if | |
Bitrate0 == undefined andalso Content == audio -> 64; | |
Bitrate0 == undefined andalso Content == video -> 700; | |
is_integer(Bitrate0) -> Bitrate0; | |
true -> error({invalid_bitrate,Bitrate0,SI}) | |
end, | |
TranscoderOptions = proplists:get_value(transcoder_options, SOptions, []), | |
GopDuration = 3, | |
DefaultOptions = case proplists:get_value(crf, TranscoderOptions) of | |
undefined -> [{b, integer_to_binary(Bitrate*1000)}]; | |
_ -> [] | |
end ++ case Codec of | |
h264 -> | |
case proplists:get_value(tune, TranscoderOptions) of | |
undefined -> [{'rc-lookahead', integer_to_binary(GopDuration*25)}]; | |
<<"zerolatency">> -> [] | |
end ++ | |
case proplists:get_value(crf, TranscoderOptions) of | |
undefined -> [{qmin, <<"26">>},{qmax, <<"50">>}]; | |
_ -> [] | |
end ++ | |
[ | |
{g, integer_to_binary(GopDuration*25)}, | |
{bufsize, integer_to_binary(Bitrate*1000*GopDuration)}, | |
{maxrate, integer_to_binary(round(Bitrate*1000*1.3))}, | |
{bt, integer_to_binary(round(Bitrate*1000*0.7))}, | |
{preset, <<"veryfast">>}, | |
{threads, <<"2">>}, | |
{profile, <<"main">>}, | |
{level, <<"3.1">>}]; | |
_ -> | |
[] | |
end ++ case Params of | |
#video_params{width = W, height = H} -> | |
case proplists:get_value(source, SOptions, SI) of | |
#stream_info{params = #video_params{width = IW0, height = IH0}, options = SrcOpts} -> | |
IW = proplists:get_value(pixel_width,SrcOpts,IW0), | |
IH = proplists:get_value(pixel_height,SrcOpts,IH0), | |
SAR = case {proplists:get_value(sar_width,SrcOpts),proplists:get_value(sar_height,SrcOpts)} of | |
{undefined,undefined} -> []; | |
{SarW,SarH} -> [{sar,<<(integer_to_binary(SarW))/binary, ":", (integer_to_binary(SarH))/binary>>}, | |
{sarw,integer_to_binary(SarW)},{sarh,integer_to_binary(SarH)}] | |
end, | |
[{in_width,integer_to_binary(IW)}, | |
{in_height,integer_to_binary(IH)}, | |
{width,integer_to_binary(IW0)}, | |
{height,integer_to_binary(IH0)}, | |
{logo, proplists:get_value(logo, TranscoderOptions)}%, | |
%{deinterlace, integer_to_binary(proplists:get_value(deinterlace, TranscoderOptions, -1))} | |
] ++ SAR; | |
%case {IW,IH} of | |
% {W,H} -> []; | |
% _ -> [{input_width,IW},{input_height,IH}] | |
%end; | |
_ -> | |
[] | |
end ++ [{width,integer_to_binary(W)},{height,integer_to_binary(H)}]; | |
#audio_params{channels = Ch, sample_rate = SR} -> | |
[{channels,integer_to_binary(Ch)},{sample_rate,integer_to_binary(SR)}] ++ | |
case proplists:get_value(source, SOptions) of | |
undefined -> []; | |
#stream_info{} = Source -> audio_stream_options(Source) | |
end; | |
_ -> [] | |
end, | |
Whitelist = [width,height,input_width,input_height], | |
Options0 = [{K,V} || {K,V} <- TranscoderOptions, is_binary(V) orelse lists:member(K,Whitelist)], | |
Options1 = Options0 ++ [{K,V} || {K,V} <- DefaultOptions, not lists:keymember(K,1,Options0)], | |
Options = Options1, | |
lager:info("Init ~s encoder with options: ~p, user: ~p", [Codec1, Options, TranscoderOptions]), | |
case init_encoder0(<<Codec1/binary, 0>>, Options) of | |
{ok, <<>>, _} when Codec == h264 -> | |
{error, {failed_to_init_h264_encoder}}; | |
{ok, RawConfig, Encoder} when Codec == h264 -> | |
NALs = [N || N <- binary:split(RawConfig, [<<0,0,0,1>>,<<0,0,1>>], [global]), size(N) > 0], | |
Config = h264:video_config(NALs), | |
SI1 = video_frame:config_to_stream_info(SI, Config), | |
{ok, #enc{codec = Codec, encoder = Encoder, config = Config, stream_info = SI1}, SI1}; | |
{ok, <<>>, Encoder} -> | |
{ok, #enc{codec = Codec, encoder = Encoder, stream_info = SI}, SI}; | |
{ok, RawConfig, Encoder} -> | |
Config = #video_frame{content = Content, codec = Codec, flavor = config, body = RawConfig, dts = 0, pts = 0}, | |
SI1 = video_frame:config_to_stream_info(SI, Config), | |
{ok, #enc{codec = Codec, encoder = Encoder, config = Config, stream_info = SI1}, SI1}; | |
{error, Error} -> | |
{error, Error} | |
end. | |
audio_stream_options(#stream_info{params = #audio_params{sample_rate = SR, channel_layout = Layout, sample_fmt = SampleFmt}}) -> | |
[{input_channel_layout,Layout},{input_sample_fmt,SampleFmt},{input_sample_rate,integer_to_binary(SR)}]. | |
-spec encode(#video_frame{}, encoder()) -> [#video_frame{}]. | |
% encode(#video_frame{codec = pcm, flavor = frame, body = PCM, dts = InDTS, next_id = #pcm{} = Info} = F, #enc{codec = Codec, encoder = Encoder}) -> | |
% #pcm{rate = Rate, samples = Samples, channel_layout = ChannelLayout, sample_fmt = SampleFmt, bps = BytesPerSample} = Info, | |
% ok; | |
encode(#video_frame{codec = InCodec, flavor = Flavor, dts = InDTS, body = Body} = F, | |
#enc{codec = Codec, encoder = Encoder, stream_info = | |
#stream_info{content = Content, params = Params}}) | |
when (Flavor == keyframe orelse Flavor == frame) andalso (InCodec == yuv orelse InCodec == pcm) -> | |
Rate = case Params of | |
#audio_params{sample_rate = SR} -> SR; | |
_ -> undefined | |
end, | |
InDTS1 = case Content of | |
audio -> round(InDTS*Rate / 1000); | |
video -> round(InDTS) | |
end, | |
KFlag = case Flavor of | |
keyframe -> 1; | |
frame -> 0 | |
end, | |
case encode0(Encoder, Body, InDTS1, KFlag) of | |
undefined -> | |
[]; | |
false -> | |
[]; | |
{ok, <<>>} -> | |
undefined; | |
{ok, OutFlavor, OutDTS, OutPTS, Bin} when Codec == h264 -> | |
NALs = [N || N <- binary:split(Bin, [<<0,0,0,1>>,<<0,0,1>>], [global]), size(N) > 0], | |
% Types = [T || <<_:3, T:5, _/binary>> <- NALs], | |
% OutFlavor = case lists:member(5, Types) of | |
% true -> keyframe; | |
% _ -> frame | |
% end, | |
H264 = iolist_to_binary([ [<<(size(NAL)):32>>,NAL] || NAL <- NALs]), | |
[F#video_frame{codec = Codec, flavor = OutFlavor, body = H264, dts = OutDTS, pts = OutPTS, next_id = undefined}]; | |
{ok, OutFlavor, OutDTS, OutPTS, Bin} -> | |
[F#video_frame{codec = Codec, flavor = OutFlavor, body = Bin, dts = OutDTS, pts = OutPTS, next_id = undefined}]; | |
{ok, _OutPTS, Bin} when Codec =/= h264 -> | |
[F#video_frame{codec = Codec, body = Bin, next_id = undefined}]; | |
{ok, Packets} when is_list(Packets) andalso Content == audio -> | |
[F#video_frame{codec = Codec, body = Bin, next_id = undefined, dts = PTS*1000 / Rate, pts = PTS*1000 / Rate} || {PTS, Bin} <- Packets]; | |
{ok, Packets} when is_list(Packets) andalso Content == video andalso Codec == h264 -> | |
lists:map(fun({ok, OutFlavor, OutDTS, OutPTS, Bin}) -> | |
NALs = [N || N <- binary:split(Bin, [<<0,0,0,1>>,<<0,0,1>>], [global]), size(N) > 0], | |
H264 = iolist_to_binary([ [<<(size(NAL)):32>>,NAL] || NAL <- NALs]), | |
F#video_frame{codec = Codec, flavor = OutFlavor, body = H264, dts = OutDTS, pts = OutPTS, next_id = undefined} end, Packets); | |
{ok, Packets} when is_list(Packets) andalso Content == video -> | |
[F#video_frame{codec = Codec, flavor = OutFlavor, body = Bin, next_id = undefined, dts = OutDTS, pts = OutPTS} || {ok, OutFlavor, OutDTS, OutPTS, Bin} <- Packets]; | |
{error, E} -> | |
lager:error("Error ~p in encoder ~p", [E, Codec]), | |
{error, E} | |
end. | |
parse_options(S) when is_binary(S) -> parse_options(binary_to_list(S)); | |
parse_options([]) -> []; | |
parse_options([{_,_}|_] = S) -> S; | |
parse_options(Line) -> | |
Opts = [O || O <- string:tokens(Line, " "), length(O) > 0], | |
ToI = fun | |
("copy") -> copy; | |
(S) -> case re:run(S, "(\\d+)k", [{capture,all_but_first,list}]) of | |
{match, [S_]} -> list_to_integer(S_)*1000; | |
_ -> list_to_integer(S) | |
end end, | |
Options = lists:map(fun(O) -> | |
[K,V] = string:tokens(O, "="), | |
case K of | |
"vb" -> {video_bitrate, ToI(V)}; | |
"ab" -> {audio_bitrate, ToI(V)}; | |
_ -> {list_to_atom(K), list_to_binary(V)} | |
end | |
end, Opts), | |
Options. | |
-type transcoder_option() :: | |
{video_bitrate, non_neg_integer()} | | |
{audio_bitrate, non_neg_integer()}. | |
-spec parse_configuration(InputMI::media_info(), [transcoder_option()]) -> {ok, media_info()}. | |
parse_configuration(#media_info{streams = IStreams0} = InputMI, Options0) -> | |
Options = parse_options(Options0), | |
IStreams = [S || #stream_info{content = C} = S <- IStreams0, C == audio orelse C == video], | |
VStreams = parse_video_config(Options, lists:keyfind(video, #stream_info.content, IStreams), undefined), | |
ABitrate = case proplists:get_value(audio_bitrate, Options) of | |
Num1 when Num1 - 10000 > 0 -> Num1 div 1000; | |
Num1 -> Num1 | |
end, | |
ACodec = proplists:get_value(audio_codec, Options, aac), | |
AStreams = lists:flatmap(fun | |
(#stream_info{content = video}) -> | |
[]; | |
(#stream_info{content = audio, codec = AC, options = SOpts, track_id = T, params = #audio_params{} = AParams} = SI) -> | |
[SI#stream_info{bitrate = ABitrate, codec = case ACodec of copy -> AC; _ -> ACodec end, | |
params = AParams#audio_params{ | |
channels = case AParams#audio_params.channels of | |
1 -> 1; | |
_ -> 2 | |
end | |
}, | |
options = SOpts ++ [{source_track_id,T}]}]; | |
(#stream_info{} = SI) -> | |
[SI] | |
end, IStreams), | |
OStreams1 = VStreams ++ AStreams, | |
OStreams2 = lists:zipwith(fun(N,S) -> S#stream_info{track_id = N} end, lists:seq(1,length(OStreams1)), OStreams1), | |
OutputMI = InputMI#media_info{streams = OStreams2}, | |
{ok, OutputMI}. | |
parse_video_config([{audio_bitrate,_}|Options], Input, Output) -> | |
parse_video_config(Options, Input, Output); | |
parse_video_config([], _Input, undefined) -> []; | |
parse_video_config([], _Input, Output) -> [Output]; | |
parse_video_config([{video_bitrate, _}|_] = Options, Input, #stream_info{} = Output) -> | |
[Output|parse_video_config(Options, Input, undefined)]; | |
parse_video_config([{video_bitrate, copy}|Options], #stream_info{options = Opts, track_id = T} = I, undefined) -> | |
parse_video_config(Options, I, I#stream_info{options = Opts ++ [{source_track_id,T},{copy,true}]}); | |
parse_video_config([{video_bitrate, VB}|Options], #stream_info{track_id = T, options = Opts} = I, undefined) -> | |
parse_video_config(Options, I, I#stream_info{bitrate = VB div 1000, codec = h264, options = Opts ++ [{source_track_id,T}]}); | |
parse_video_config([{size, S}|Options], I, #stream_info{params = #video_params{} = Params} = O) -> | |
[W_, H_] = binary:split(S, <<"x">>), | |
W = binary_to_integer(W_), | |
H = binary_to_integer(H_), | |
parse_video_config(Options, I, O#stream_info{params = Params#video_params{width = W, height = H}}); | |
parse_video_config([{K, V}|Options], I, #stream_info{options = Opts} = O) when is_atom(K), is_binary(V) -> | |
parse_video_config(Options, I, O#stream_info{options = tc_opt(Opts, [{K,V}])}); | |
parse_video_config([_|Options], I, O) -> | |
parse_video_config(Options, I, O). | |
tc_opt(Options, TCOpts) -> | |
TC1 = proplists:get_value(transcoder_options, Options, []), | |
lists:keystore(transcoder_options, 1, Options, {transcoder_options, TC1 ++ TCOpts}). | |
-spec configure_stream(InputMI::media_info(), [transcoder_option()]) -> {ok, transcoder(), media_info()}. | |
configure_stream(#media_info{options = InputOpts} = InputMI, Options) -> | |
{ok, OutputMI} = parse_configuration(InputMI, Options), | |
Deinterlace = case proplists:get_value(deinterlace, Options) of | |
undefined -> []; | |
_ -> [{deinterlace,true}] | |
end, | |
{ok, TC, OutputMI2} = avcodec:init_stream(InputMI#media_info{options = InputOpts ++ Deinterlace}, OutputMI), | |
{ok, TC, OutputMI2}. | |
-record(stream, { | |
media_info, | |
buffer, | |
inputs = [], | |
outputs = [] | |
}). | |
-record(input, { | |
n, | |
frame_duration, | |
first_pts, | |
track, | |
decoder, | |
stream_info, | |
previous | |
}). | |
init_decoder_for_stream(SI, Deinterlace) when Deinterlace -> | |
init_decoder(SI, <<"yadif=0:-1:0", 0>>); | |
init_decoder_for_stream(SI, _) -> | |
init_decoder(SI). | |
-spec init_stream(Input::media_info(), Output::media_info()) -> {ok, transcoder(), media_info()}. | |
init_stream(#media_info{streams = IStreams, options = InputOpts}, #media_info{streams = OStreams} = OutMI) -> | |
Inputs0 = [begin | |
{ok, Dec, SI1} = init_decoder_for_stream(SI, proplists:get_value(deinterlace, InputOpts)), | |
FrameDuration = if | |
Content == video -> 40; % 25 FPS | |
Codec == aac -> 1024*1000 / SI1#stream_info.params#audio_params.sample_rate; | |
Codec == ac3 -> SI1#stream_info.params#audio_params.samples*1000 / SI1#stream_info.params#audio_params.sample_rate; | |
Codec == mp2a -> SI1#stream_info.params#audio_params.samples*1000 / SI1#stream_info.params#audio_params.sample_rate; | |
Codec == mp3 -> SI1#stream_info.params#audio_params.samples*1000 / SI1#stream_info.params#audio_params.sample_rate; | |
true -> undefined | |
end, | |
#input{track = Track, decoder = Dec, stream_info = SI1, frame_duration = FrameDuration | |
} | |
end || #stream_info{track_id = Track, content = Content, codec = Codec} = SI <- IStreams, Content == video orelse Content == audio], | |
% We are looking into source_track_id option if we need stream remapping | |
% when we want to encode one input video to several outputs. | |
% So we will have several outputs with one source_track_id | |
Outputs = [begin | |
InputTrackId = proplists:get_value(source_track_id,SOptions,Track), | |
case proplists:get_value(copy, SOptions) of | |
true -> | |
{InputTrackId, copy, Track, SI}; | |
_ -> | |
#input{stream_info = InputSI} = lists:keyfind(InputTrackId, #input.track, Inputs0), | |
SI2 = SI#stream_info{options = SOptions ++ [{source,InputSI}]}, | |
MD = lager:md(), | |
case proc_lib:start_link(?MODULE, init_output_worker, [self(), SI2, MD]) of | |
{ok, Worker, OutputSI} -> | |
{InputTrackId, Worker, Track, OutputSI}; | |
{error, E} -> | |
error({E,SI2}) | |
end | |
end | |
end || #stream_info{track_id = Track, options = SOptions} = SI <- OStreams], | |
Inputs1 = [Input || #input{track = Track} = Input <- Inputs0, lists:keyfind(Track,#input.track,Inputs0) =/= false], | |
OStreams2 = [begin | |
{_, _, _, OutputSI} = lists:keyfind(Track, 3, Outputs), | |
OutputSI | |
end || #stream_info{track_id = Track} <- OStreams], | |
OutMI2 = OutMI#media_info{streams = OStreams2}, | |
{ok, #stream{inputs = Inputs1, outputs = Outputs, media_info = OutMI2}, OutMI2}. | |
init_output_worker(Parent, SI2, MD) -> | |
lager:md(MD), | |
erlang:monitor(process, Parent), | |
case init_encoder(SI2) of | |
{ok, Encoder, OutputSI} -> | |
proc_lib:init_ack({ok, self(), OutputSI}), | |
proc_lib:hibernate(?MODULE, encoder_loop, [Parent, Encoder]); | |
{error, E} -> | |
proc_lib:init_ack({error, E}) | |
end. | |
encoder_loop(Parent, Encoder) -> | |
Msg = receive M -> M end, | |
case Msg of | |
#video_frame{} = Raw -> | |
try encode(Raw, Encoder) of | |
{error, EncodeError} -> | |
lager:error("Encoder worker is failing with error: ~p", [EncodeError]), | |
error(EncodeError); | |
Reply -> Parent ! {self(), Reply} | |
catch | |
C:E -> | |
ST = erlang:get_stacktrace(), | |
lager:error("Encoder worker failed: ~p:~p\n~p", [C, E, ST]), | |
exit(normal) | |
end; | |
{'DOWN', _, _, Parent, Reason} -> | |
lager:info("encoder is exiting due to parent death: ~p", [Reason]), | |
exit(normal); | |
Else -> | |
lager:info("Bad message to worker: ~p", [Else]) | |
end, | |
proc_lib:hibernate(?MODULE, encoder_loop, [Parent, Encoder]). | |
-spec feed_stream(video_frame(), transcoder()) -> {ok, [video_frame()], transcoder()}. | |
feed_stream(#video_frame{track_id = Track} = F, #stream{outputs = Outputs} = Stream) -> | |
case [{Enc, OutTrack} || {T,Enc,OutTrack,_} <- Outputs, T == Track] of | |
[{copy, OutTrack}] -> | |
buffer_frames([F#video_frame{track_id = OutTrack}], Stream); | |
_ -> | |
feed_stream0(F, Stream) | |
end; | |
feed_stream([#video_frame{}=F|Frames], #stream{} = Stream) -> | |
{ok, Out, Stream2} = feed_stream(F,Stream), | |
{ok, OFrames, Stream3} = feed_stream(Frames, Stream2), | |
{ok, Out ++ OFrames, Stream3}; | |
feed_stream([], #stream{} = Stream) -> | |
{ok, [], Stream}. | |
feed_stream0(#video_frame{} = F, #stream{} = Stream) -> | |
case decode_stream(F, Stream) of | |
{ok, RawFrames, Stream1} -> | |
% put(last_input, {F#video_frame.codec, round(F#video_frame.dts)}), | |
encode_raw_frames(RawFrames, #video_frame{} = F, Stream1); | |
{error, _, Stream1} -> | |
{ok, [], Stream1} | |
end. | |
encode_raw_frames([], _, Stream1) -> | |
{ok, [], Stream1}; | |
encode_raw_frames([#video_frame{track_id = Track} = Raw0|RawFrames], #video_frame{} = F, #stream{outputs = Outputs} = Stream1) -> | |
% case get(frog) of | |
% undefined -> | |
% #stream_info{params = #video_params{width = W, height = H}} = | |
% lists:keyfind(Track, #stream_info.track_id, Stream#stream.media_info#media_info.streams), | |
% {ok, Fr0} = frog:init(W, H, 0.5), | |
% put(frog, Fr0); | |
% _ -> ok | |
% end, | |
% Zones = frog:detect(Raw0, get(frog)), | |
% Raw = frog:highlight(Zones, Raw0), | |
%% draw(<<"test 123. !@#$%^&*(){}\\\"<>?+',./=-_;:">>, {10, 10}, Raw0), | |
%% draw(<<"abcdefghijklmnopqrstuvxyz">>, {10, 20}, Raw0), | |
%% draw(<<"ABCDEFGHIJKLMNOPQRSTUVXYZ">>, {10, 30}, Raw0), | |
Raw = Raw0, | |
Encoders = [{Enc, OutTrack} || {T,Enc,OutTrack,_} <- Outputs, T == Track], | |
[Enc ! Raw || {Enc, _} <- Encoders, is_pid(Enc)], | |
OutputFrames = lists:flatmap(fun | |
({copy,OutTrack}) -> | |
[F#video_frame{track_id = OutTrack}]; | |
({Enc,OutTrack}) -> | |
Ref = erlang:monitor(process, Enc), | |
Encoded = receive | |
{Enc, Encoded_} -> Encoded_; | |
{'DOWN', Ref, process, Enc, Reason} -> error({encoder_failed,OutTrack,Reason}) | |
end, | |
erlang:demonitor(Ref, [flush]), | |
case Encoded of | |
#video_frame{} = Out -> [Out#video_frame{track_id = OutTrack}]; | |
[] -> []; | |
Out when is_list(Out) -> | |
% case F#video_frame.codec of | |
% h264 -> | |
% lager:info("Stream: (~p) -> ~p -> ~p", [round(F#video_frame.dts), round(Raw0#video_frame.dts), [ round(O#video_frame.dts) || O <- Out] ]); | |
% _ -> ok | |
% end, | |
[OF#video_frame{track_id = OutTrack} || OF <- Out]; | |
undefined -> []; | |
{error, E} -> lager:info("Encoder on track ~p got error ~p", [OutTrack, E]), [] | |
end | |
end, Encoders), | |
{ok, OutFrames2, Stream2} = buffer_frames(OutputFrames, Stream1), | |
% lager:info("~p ~p(~p) -> ~p -> ~p", [ get(last_input), Raw0#video_frame.codec, round(Raw0#video_frame.dts), | |
% [ {C,round(D)} || #video_frame{dts = D, codec = C} <- OutputFrames ], | |
% [ {C,round(D)} || #video_frame{dts = D, codec = C} <- OutFrames2 ] | |
% ]), | |
{ok, OutFrames3, Stream3} = encode_raw_frames(RawFrames, F, Stream2), | |
{ok, OutFrames2 ++ OutFrames3, Stream3}; | |
encode_raw_frames(#video_frame{} = Raw, #video_frame{} = F, Stream) -> | |
encode_raw_frames([Raw], F, Stream). | |
-spec decode_stream(#video_frame{}, #stream{}) -> {ok, [#video_frame{}], #stream{}} | {error, any(), #stream{}}. | |
decode_stream(#video_frame{track_id = Track} = F, #stream{inputs = Inputs} = Stream) -> | |
case lists:keyfind(Track, #input.track, Inputs) of | |
false -> | |
{error, no_stream, Stream}; | |
#input{decoder = Dec, previous = Prev, n = N, frame_duration = Duration, first_pts = FPTS} = Input -> | |
case decode(F, Dec) of | |
[#video_frame{content = video}|_] = Raw -> | |
{ok, Raw, Stream#stream{inputs = lists:keystore(Track, #input.track, Inputs, Input#input{previous = Raw})}}; | |
[#video_frame{content = audio, dts = DTS} = Raw] when Prev == undefined -> | |
Silence = Raw#video_frame{body = binary:copy(<<0>>, size(Raw#video_frame.body))}, | |
Input1 = Input#input{previous = Silence, n = 1, first_pts = DTS}, | |
{ok, [Raw], Stream#stream{inputs = lists:keystore(Track, #input.track, Inputs, Input1)}}; | |
[#video_frame{content = audio, dts = DTS0} = Raw] -> | |
DTS1 = if | |
is_number(Duration) -> N*Duration + FPTS; | |
true -> DTS0 | |
end, | |
Threshold = 10, | |
if | |
Duration == undefined orelse DTS0 - DTS1 =< Threshold*Duration -> | |
Input1 = Input#input{n = N+1}, | |
{ok, [Raw#video_frame{dts = DTS1, pts = DTS1}], Stream#stream{inputs = lists:keystore(Track, #input.track, Inputs, Input1)}}; | |
DTS0 - DTS1 > Threshold*5*Duration -> | |
lager:info("reset audio sync"), | |
Input1 = Input#input{n = 1, first_pts = DTS0}, | |
{ok, [Raw#video_frame{dts = DTS0, pts = DTS0}], Stream#stream{inputs = lists:keystore(Track, #input.track, Inputs, Input1)}}; | |
DTS0 - DTS1 > Threshold*Duration -> | |
Output = [Raw#video_frame{dts = DTS1, pts = DTS1}, | |
Prev#video_frame{dts = DTS1 + Duration, pts = DTS1 + Duration}, | |
Prev#video_frame{dts = DTS1 + 2*Duration, pts = DTS1 + 2*Duration}], | |
lager:info("compensating audio drop with 2 silence frames. dts=~p, waiting for=~p", [round(DTS0), round(DTS1)]), | |
Input1 = Input#input{n = N+3}, | |
{ok, Output, Stream#stream{inputs = lists:keystore(Track, #input.track, Inputs, Input1)}}; | |
true -> | |
error({strange_timestamps,DTS0,DTS1,N,Duration,FPTS}) | |
end; | |
[#video_frame{}|_] = Raw -> | |
{ok, Raw, Stream}; | |
[] -> | |
{ok, [], Stream}; | |
% {error, _} when Prev =/= undefined -> | |
% lager:debug("Failed to decode frame ~s(~B) ~B, compensating", [F#video_frame.codec, Track, round(F#video_frame.dts)]), | |
% {ok, Prev#video_frame{dts = F#video_frame.dts, pts = F#video_frame.pts}, Stream}; | |
{error, not_avdecoded} -> | |
{error, no_frame, Stream}; | |
{error, _E} -> | |
lager:debug("Failed to decode frame ~s(~B) ~B: ~p", [F#video_frame.codec, Track, round(F#video_frame.dts), _E]), | |
{error, failed_to_decode, Stream}; | |
undefined -> | |
{error, no_frame, Stream} | |
end | |
end. | |
buffer_frames([], Stream) -> | |
{ok, [], Stream}; | |
buffer_frames(OutputFrames, #stream{buffer = undefined} = Stream) -> | |
buffer_frames(OutputFrames, Stream#stream{buffer = frame_queue:init()}); | |
buffer_frames(OutputFrames, #stream{buffer = Buffer} = Stream) -> | |
{Frames, Buffer1} = frame_queue:push(OutputFrames, Buffer), | |
{ok, Frames, Stream#stream{buffer = Buffer1}}. | |
close(#thumbnailer{decoder = D, encoder = E}) -> | |
close(D), | |
close(E), | |
ok; | |
close(Port) -> | |
close0_nif(Port). | |
close0_nif(_Port) -> erlang:nif_error(not_implemented). | |
init_decoder0(_Config,_Codec) -> erlang:nif_error(not_implemented). | |
init_decoder0(_Config,_Codec,_Spec) -> erlang:nif_error(not_implemented). | |
description(Decoder) -> Decoder. | |
decode0_nif(_Decoder, _Encoded, _DTS, _PTS) -> erlang:nif_error(not_implemented). | |
decode0(Decoder, Encoded, DTS, PTS) -> | |
case decode0_nif(Decoder, Encoded, DTS, PTS) of | |
{ref, Ref} -> | |
receive | |
{Ref, Reply} -> Reply | |
after | |
3000 -> error(decode_timeout) | |
end; | |
Reply -> | |
Reply | |
end. | |
init_encoder0(_Codec,_Opts) -> erlang:nif_error(not_implemented). | |
encode0_nif(_Encoder,_Body,_DTS,_IsKeyframe) -> erlang:nif_error(not_implemented). | |
encode0(Encoder,Body,DTS,IsKeyframe) when is_number(DTS) -> | |
case encode0_nif(Encoder,Body,round(DTS),IsKeyframe) of | |
{ref, Ref} -> | |
receive | |
{Ref, Reply} -> Reply | |
after | |
3000 -> error(decode_timeout) | |
end; | |
Reply -> | |
Reply | |
end. | |
draw_char0_nif(_Body, _X, _Y, _W, _H, _Str) -> erlang:nif_error(not_implemented). | |
draw_char0(Body, {X, Y}, {W, H}, Str) -> | |
draw_char0_nif(Body,X,Y,W,H,Str). | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment