Commit cd991462 authored by Anton Khirnov's avatar Anton Khirnov

lavfi: add error handling to filter_samples().

parent 8d18bc55
......@@ -311,9 +311,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples;
ff_filter_samples(outlink, out_buf);
return 0;
return ff_filter_samples(outlink, out_buf);
}
/**
......@@ -454,31 +452,37 @@ static int request_frame(AVFilterLink *outlink)
return output_frame(outlink, available_samples);
}
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int i;
int i, ret = 0;
for (i = 0; i < ctx->nb_inputs; i++)
if (ctx->inputs[i] == inlink)
break;
if (i >= ctx->nb_inputs) {
av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
return;
ret = AVERROR(EINVAL);
goto fail;
}
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
if (ret < 0)
goto fail;
}
av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples);
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples);
fail:
avfilter_unref_buffer(buf);
return ret;
}
static int init(AVFilterContext *ctx, const char *args)
......
......@@ -136,18 +136,18 @@ static int request_frame(AVFilterLink *link)
avresample_convert(s->avr, (void**)buf->extended_data, buf->linesize[0],
nb_samples, NULL, 0, 0);
buf->pts = s->pts;
ff_filter_samples(link, buf);
return 0;
return ff_filter_samples(link, buf);
}
return ret;
}
static void write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
{
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
int ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
avfilter_unref_buffer(buf);
return ret;
}
/* get amount of data currently buffered, in samples */
......@@ -156,7 +156,7 @@ static int64_t get_delay(ASyncContext *s)
return avresample_available(s->avr) + avresample_get_delay(s->avr);
}
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv;
......@@ -164,7 +164,7 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size;
int out_size, ret;
int64_t delta;
/* buffer data until we get the first timestamp */
......@@ -172,14 +172,12 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (pts != AV_NOPTS_VALUE) {
s->pts = pts - get_delay(s);
}
write_to_fifo(s, buf);
return;
return write_to_fifo(s, buf);
}
/* now wait for the next timestamp */
if (pts == AV_NOPTS_VALUE) {
write_to_fifo(s, buf);
return;
return write_to_fifo(s, buf);
}
/* when we have two timestamps, compute how many samples would we have
......@@ -202,8 +200,10 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (out_size > 0) {
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
out_size);
if (!buf_out)
return;
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
}
avresample_read(s->avr, (void**)buf_out->extended_data, out_size);
buf_out->pts = s->pts;
......@@ -212,7 +212,9 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format);
}
ff_filter_samples(outlink, buf_out);
ret = ff_filter_samples(outlink, buf_out);
if (ret < 0)
goto fail;
s->got_output = 1;
} else {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
......@@ -223,9 +225,13 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
avresample_read(s->avr, NULL, avresample_available(s->avr));
s->pts = pts - avresample_get_delay(s->avr);
avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
ret = avresample_convert(s->avr, NULL, 0, 0, (void**)buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
fail:
avfilter_unref_buffer(buf);
return ret;
}
AVFilter avfilter_af_asyncts = {
......
......@@ -313,7 +313,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
return 0;
}
static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
......@@ -330,8 +330,10 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data =
av_mallocz(nch_out * sizeof(*buf->extended_data));
if (!new_extended_data)
return;
if (!new_extended_data) {
avfilter_unref_buffer(buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) {
buf->extended_data = new_extended_data;
} else {
......@@ -353,7 +355,7 @@ static void channelmap_filter_samples(AVFilterLink *inlink, AVFilterBufferRef *b
memcpy(buf->data, buf->extended_data,
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
ff_filter_samples(outlink, buf);
return ff_filter_samples(outlink, buf);
}
static int channelmap_config_input(AVFilterLink *inlink)
......
......@@ -110,24 +110,29 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
int i;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
if (!buf_out)
return;
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
}
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->audio->channel_layout =
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
ff_filter_samples(ctx->outputs[i], buf_out);
ret = ff_filter_samples(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
avfilter_unref_buffer(buf);
return ret;
}
AVFilter avfilter_af_channelsplit = {
......
......@@ -92,7 +92,7 @@ static const AVClass join_class = {
.version = LIBAVUTIL_VERSION_INT,
};
static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv;
......@@ -104,6 +104,8 @@ static void filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
av_assert0(i < ctx->nb_inputs);
av_assert0(!s->input_frames[i]);
s->input_frames[i] = buf;
return 0;
}
static int parse_maps(AVFilterContext *ctx)
......@@ -468,11 +470,11 @@ static int join_request_frame(AVFilterLink *outlink)
priv->nb_in_buffers = ctx->nb_inputs;
buf->buf->priv = priv;
ff_filter_samples(outlink, buf);
ret = ff_filter_samples(outlink, buf);
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
return 0;
return ret;
fail:
avfilter_unref_buffer(buf);
......
......@@ -157,21 +157,21 @@ static int request_frame(AVFilterLink *outlink)
}
buf->pts = s->next_pts;
ff_filter_samples(outlink, buf);
return 0;
return ff_filter_samples(outlink, buf);
}
return ret;
}
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
if (s->avr) {
AVFilterBufferRef *buf_out;
int delay, nb_samples, ret;
int delay, nb_samples;
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
......@@ -180,10 +180,19 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
AV_ROUND_UP);
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avresample_convert(s->avr, (void**)buf_out->extended_data,
buf_out->linesize[0], nb_samples,
(void**)buf->extended_data, buf->linesize[0],
buf->audio->nb_samples);
if (ret < 0) {
avfilter_unref_buffer(buf_out);
goto fail;
}
av_assert0(!avresample_available(s->avr));
......@@ -209,14 +218,18 @@ static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *buf)
s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
ff_filter_samples(outlink, buf_out);
ret = ff_filter_samples(outlink, buf_out);
s->got_output = 1;
}
fail:
avfilter_unref_buffer(buf);
} else {
ff_filter_samples(outlink, buf);
ret = ff_filter_samples(outlink, buf);
s->got_output = 1;
}
return ret;
}
AVFilter avfilter_af_resample = {
......
......@@ -19,7 +19,10 @@
#include "avfilter.h"
#include "internal.h"
static void null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref) { }
static int null_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
return 0;
}
AVFilter avfilter_asink_anullsink = {
.name = "anullsink",
......
......@@ -146,15 +146,15 @@ fail:
return NULL;
}
static void default_filter_samples(AVFilterLink *link,
AVFilterBufferRef *samplesref)
static int default_filter_samples(AVFilterLink *link,
AVFilterBufferRef *samplesref)
{
ff_filter_samples(link->dst->outputs[0], samplesref);
return ff_filter_samples(link->dst->outputs[0], samplesref);
}
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
{
void (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
int (*filter_samples)(AVFilterLink *, AVFilterBufferRef *);
AVFilterPad *dst = link->dstpad;
AVFilterBufferRef *buf_out;
......@@ -185,6 +185,6 @@ void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref)
} else
buf_out = samplesref;
filter_samples(link, buf_out);
return filter_samples(link, buf_out);
}
......@@ -49,7 +49,10 @@ AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
* @param samplesref a reference to the buffer of audio samples being sent. The
* receiving filter will free this reference when it no longer
* needs it or pass it on to the next filter.
*
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing samplesref in case of error.
*/
void ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
int ff_filter_samples(AVFilterLink *link, AVFilterBufferRef *samplesref);
#endif /* AVFILTER_AUDIO_H */
......@@ -288,8 +288,12 @@ struct AVFilterPad {
* and should do its processing.
*
* Input audio pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/**
* Frame poll callback. This returns the number of immediately available
......
......@@ -56,6 +56,12 @@ static void start_frame(AVFilterLink *link, AVFilterBufferRef *buf)
link->cur_buf = NULL;
};
static int filter_samples(AVFilterLink *link, AVFilterBufferRef *buf)
{
start_frame(link, buf);
return 0;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
BufferSinkContext *s = ctx->priv;
......@@ -160,7 +166,7 @@ AVFilter avfilter_asink_abuffer = {
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_samples = start_frame,
.filter_samples = filter_samples,
.min_perms = AV_PERM_READ,
.needs_fifo = 1 },
{ .name = NULL }},
......
......@@ -312,6 +312,7 @@ static int request_frame(AVFilterLink *link)
{
BufferSourceContext *c = link->src->priv;
AVFilterBufferRef *buf;
int ret = 0;
if (!av_fifo_size(c->fifo)) {
if (c->eof)
......@@ -327,7 +328,7 @@ static int request_frame(AVFilterLink *link)
ff_end_frame(link);
break;
case AVMEDIA_TYPE_AUDIO:
ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
ret = ff_filter_samples(link, avfilter_ref_buffer(buf, ~0));
break;
default:
return AVERROR(EINVAL);
......@@ -335,7 +336,7 @@ static int request_frame(AVFilterLink *link)
avfilter_unref_buffer(buf);
return 0;
return ret;
}
static int poll_frame(AVFilterLink *link)
......
......@@ -72,13 +72,25 @@ static av_cold void uninit(AVFilterContext *ctx)
avfilter_unref_buffer(fifo->buf_out);
}
static void add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
FifoContext *fifo = inlink->dst->priv;
fifo->last->next = av_mallocz(sizeof(Buf));
if (!fifo->last->next) {
avfilter_unref_buffer(buf);
return AVERROR(ENOMEM);
}
fifo->last = fifo->last->next;
fifo->last->buf = buf;
return 0;
}
static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
{
add_to_queue(inlink, buf);
}
static void queue_pop(FifoContext *s)
......@@ -210,15 +222,13 @@ static int return_audio_frame(AVFilterContext *ctx)
buf_out = s->buf_out;
s->buf_out = NULL;
}
ff_filter_samples(link, buf_out);
return 0;
return ff_filter_samples(link, buf_out);
}
static int request_frame(AVFilterLink *outlink)
{
FifoContext *fifo = outlink->src->priv;
int ret;
int ret = 0;
if (!fifo->root.next) {
if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
......@@ -238,7 +248,7 @@ static int request_frame(AVFilterLink *outlink)
if (outlink->request_samples) {
return return_audio_frame(outlink->src);
} else {
ff_filter_samples(outlink, fifo->root.next->buf);
ret = ff_filter_samples(outlink, fifo->root.next->buf);
queue_pop(fifo);
}
break;
......@@ -246,7 +256,7 @@ static int request_frame(AVFilterLink *outlink)
return AVERROR(EINVAL);
}
return 0;
return ret;
}
AVFilter avfilter_vf_fifo = {
......@@ -261,7 +271,7 @@ AVFilter avfilter_vf_fifo = {
.inputs = (AVFilterPad[]) {{ .name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer= ff_null_get_video_buffer,
.start_frame = add_to_queue,
.start_frame = start_frame,
.draw_slice = draw_slice,
.end_frame = end_frame,
.rej_perms = AV_PERM_REUSE2, },
......
......@@ -111,8 +111,12 @@ struct AVFilterPad {
* and should do its processing.
*
* Input audio pads only.
*
* @return >= 0 on success, a negative AVERROR on error. This function
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
void (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
int (*filter_samples)(AVFilterLink *link, AVFilterBufferRef *samplesref);
/**
* Frame poll callback. This returns the number of immediately available
......
......@@ -110,15 +110,19 @@ AVFilter avfilter_vf_split = {
.outputs = (AVFilterPad[]) {{ .name = NULL}},
};
static void filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
static int filter_samples(AVFilterLink *inlink, AVFilterBufferRef *samplesref)
{
AVFilterContext *ctx = inlink->dst;
int i;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++)
ff_filter_samples(inlink->dst->outputs[i],
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
for (i = 0; i < ctx->nb_outputs; i++) {
ret = ff_filter_samples(inlink->dst->outputs[i],
avfilter_ref_buffer(samplesref, ~AV_PERM_WRITE));
if (ret < 0)
break;
}
avfilter_unref_buffer(samplesref);
return ret;
}
AVFilter avfilter_af_asplit = {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment