avconv.c 87.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * avconv main
 * Copyright (c) 2000-2011 The libav developers.
 *
 * This file is part of Libav.
 *
 * Libav is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * Libav is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with Libav; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 */

#include "config.h"
#include <ctype.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <errno.h>
#include <signal.h>
#include <limits.h>
30 31
#include <stdint.h>

32 33 34
#include "libavformat/avformat.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
35
#include "libavresample/avresample.h"
36
#include "libavutil/opt.h"
37
#include "libavutil/channel_layout.h"
38 39 40 41 42 43 44 45 46
#include "libavutil/parseutils.h"
#include "libavutil/samplefmt.h"
#include "libavutil/fifo.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/pixdesc.h"
#include "libavutil/avstring.h"
#include "libavutil/libm.h"
47
#include "libavutil/imgutils.h"
48
#include "libavutil/time.h"
49 50 51
#include "libavformat/os_support.h"

# include "libavfilter/avfilter.h"
52
# include "libavfilter/buffersrc.h"
53
# include "libavfilter/buffersink.h"
54 55

#if HAVE_SYS_RESOURCE_H
56
#include <sys/time.h>
57 58 59 60 61 62 63 64 65 66 67 68 69 70
#include <sys/types.h>
#include <sys/resource.h>
#elif HAVE_GETPROCESSTIMES
#include <windows.h>
#endif
#if HAVE_GETPROCESSMEMORYINFO
#include <windows.h>
#include <psapi.h>
#endif

#if HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif

71 72 73 74
#if HAVE_PTHREADS
#include <pthread.h>
#endif

75 76
#include <time.h>

77
#include "avconv.h"
78 79 80 81 82 83 84 85 86 87 88 89
#include "cmdutils.h"

#include "libavutil/avassert.h"

const char program_name[] = "avconv";
const int program_birth_year = 2000;

static FILE *vstats_file;

static int nb_frames_drop = 0;


90

91
#if HAVE_PTHREADS
92 93 94 95
/* signal to input threads that they should exit; set by the main thread */
static int transcoding_finished;
#endif

96 97
#define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"

98 99 100 101
InputStream **input_streams = NULL;
int        nb_input_streams = 0;
InputFile   **input_files   = NULL;
int        nb_input_files   = 0;
102

103 104 105 106
OutputStream **output_streams = NULL;
int         nb_output_streams = 0;
OutputFile   **output_files   = NULL;
int         nb_output_files   = 0;
107

108 109
FilterGraph **filtergraphs;
int        nb_filtergraphs;
110

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static void term_exit(void)
{
    av_log(NULL, AV_LOG_QUIET, "");
}

static volatile int received_sigterm = 0;
static volatile int received_nb_signals = 0;

static void
sigterm_handler(int sig)
{
    received_sigterm = sig;
    received_nb_signals++;
    term_exit();
}

static void term_init(void)
{
Aneesh Dogra's avatar
Aneesh Dogra committed
129
    signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
130 131 132 133 134 135
    signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
#ifdef SIGXCPU
    signal(SIGXCPU, sigterm_handler);
#endif
}

136
static int decode_interrupt_cb(void *ctx)
137 138 139 140
{
    return received_nb_signals > 1;
}

141
const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
142

143
static void avconv_cleanup(int ret)
144
{
145 146 147
    int i, j;

    for (i = 0; i < nb_filtergraphs; i++) {
148 149 150 151 152
        FilterGraph *fg = filtergraphs[i];
        avfilter_graph_free(&fg->graph);
        for (j = 0; j < fg->nb_inputs; j++) {
            av_freep(&fg->inputs[j]->name);
            av_freep(&fg->inputs[j]);
153
        }
154 155 156 157
        av_freep(&fg->inputs);
        for (j = 0; j < fg->nb_outputs; j++) {
            av_freep(&fg->outputs[j]->name);
            av_freep(&fg->outputs[j]);
158
        }
159 160 161
        av_freep(&fg->outputs);
        av_freep(&fg->graph_desc);

162 163 164
        av_freep(&filtergraphs[i]);
    }
    av_freep(&filtergraphs);
165 166

    /* close files */
Aneesh Dogra's avatar
Aneesh Dogra committed
167
    for (i = 0; i < nb_output_files; i++) {
168 169
        OutputFile *of = output_files[i];
        AVFormatContext *s = of->ctx;
170
        if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE) && s->pb)
171 172
            avio_close(s->pb);
        avformat_free_context(s);
173 174
        av_dict_free(&of->opts);

175
        av_freep(&output_files[i]);
176
    }
177
    for (i = 0; i < nb_output_streams; i++) {
178 179
        OutputStream *ost = output_streams[i];
        AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
180 181 182 183 184
        while (bsfc) {
            AVBitStreamFilterContext *next = bsfc->next;
            av_bitstream_filter_close(bsfc);
            bsfc = next;
        }
185 186 187 188
        ost->bitstream_filters = NULL;
        av_frame_free(&ost->filtered_frame);

        av_parser_close(ost->parser);
189

190 191 192
        av_freep(&ost->forced_keyframes);
        av_freep(&ost->avfilter);
        av_freep(&ost->logfile_prefix);
193

194 195
        avcodec_free_context(&ost->enc_ctx);

196
        av_freep(&output_streams[i]);
197
    }
Aneesh Dogra's avatar
Aneesh Dogra committed
198
    for (i = 0; i < nb_input_files; i++) {
199 200
        avformat_close_input(&input_files[i]->ctx);
        av_freep(&input_files[i]);
201
    }
202
    for (i = 0; i < nb_input_streams; i++) {
203 204 205 206
        InputStream *ist = input_streams[i];

        av_frame_free(&ist->decoded_frame);
        av_frame_free(&ist->filter_frame);
207
        av_dict_free(&ist->decoder_opts);
208 209 210
        av_freep(&ist->filters);
        av_freep(&ist->hwaccel_device);

211 212
        avcodec_free_context(&ist->dec_ctx);

213
        av_freep(&input_streams[i]);
214
    }
215 216 217 218 219 220 221

    if (vstats_file)
        fclose(vstats_file);
    av_free(vstats_filename);

    av_freep(&input_streams);
    av_freep(&input_files);
222
    av_freep(&output_streams);
223
    av_freep(&output_files);
224 225 226

    uninit_opts();

227
    avformat_network_deinit();
228 229

    if (received_sigterm) {
230 231
        av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
               (int) received_sigterm);
232 233 234 235
        exit (255);
    }
}

236
void assert_avoptions(AVDictionary *m)
237 238 239
{
    AVDictionaryEntry *t;
    if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
240
        av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
241
        exit_program(1);
242 243 244
    }
}

245
static void abort_codec_experimental(AVCodec *c, int encoder)
246 247 248
{
    const char *codec_string = encoder ? "encoder" : "decoder";
    AVCodec *codec;
249 250 251 252 253 254 255
    av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
            "results.\nAdd '-strict experimental' if you want to use it.\n",
            codec_string, c->name);
    codec = encoder ? avcodec_find_encoder(c->id) : avcodec_find_decoder(c->id);
    if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
        av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
               codec_string, codec->name);
256
    exit_program(1);
257 258
}

259
/*
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
 * Update the requested input sample format based on the output sample format.
 * This is currently only used to request float output from decoders which
 * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
 * Ideally this will be removed in the future when decoders do not do format
 * conversion and only output in their native format.
 */
static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
                              AVCodecContext *enc)
{
    /* if sample formats match or a decoder sample format has already been
       requested, just return */
    if (enc->sample_fmt == dec->sample_fmt ||
        dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
        return;

    /* if decoder supports more than one output format */
    if (dec_codec && dec_codec->sample_fmts &&
        dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
        dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
        const enum AVSampleFormat *p;
280 281 282
        int min_dec = INT_MAX, min_inc = INT_MAX;
        enum AVSampleFormat dec_fmt = AV_SAMPLE_FMT_NONE;
        enum AVSampleFormat inc_fmt = AV_SAMPLE_FMT_NONE;
283 284 285 286 287 288

        /* find a matching sample format in the encoder */
        for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
            if (*p == enc->sample_fmt) {
                dec->request_sample_fmt = *p;
                return;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
            } else {
                enum AVSampleFormat dfmt = av_get_packed_sample_fmt(*p);
                enum AVSampleFormat efmt = av_get_packed_sample_fmt(enc->sample_fmt);
                int fmt_diff = 32 * abs(dfmt - efmt);
                if (av_sample_fmt_is_planar(*p) !=
                    av_sample_fmt_is_planar(enc->sample_fmt))
                    fmt_diff++;
                if (dfmt == efmt) {
                    min_inc = fmt_diff;
                    inc_fmt = *p;
                } else if (dfmt > efmt) {
                    if (fmt_diff < min_inc) {
                        min_inc = fmt_diff;
                        inc_fmt = *p;
                    }
                } else {
                    if (fmt_diff < min_dec) {
                        min_dec = fmt_diff;
                        dec_fmt = *p;
                    }
                }
            }
311 312 313
        }

        /* if none match, provide the one that matches quality closest */
314
        dec->request_sample_fmt = min_inc != INT_MAX ? inc_fmt : dec_fmt;
315 316 317
    }
}

318
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Aneesh Dogra's avatar
Aneesh Dogra committed
319
{
320
    AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
321
    AVCodecContext          *avctx = ost->enc_ctx;
322 323
    int ret;

324 325 326 327 328 329 330 331
    /*
     * Audio encoders may split the packets --  #frames in != #packets out.
     * But there is no reordering, so we can limit the number of output packets
     * by simply dropping them here.
     * Counting encoded video frames needs to be done separately because of
     * reordering, see do_video_out()
     */
    if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
332 333
        if (ost->frame_number >= ost->max_frames) {
            av_free_packet(pkt);
334
            return;
335
        }
336 337 338
        ost->frame_number++;
    }

Aneesh Dogra's avatar
Aneesh Dogra committed
339 340 341 342 343 344 345
    while (bsfc) {
        AVPacket new_pkt = *pkt;
        int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
                                           &new_pkt.data, &new_pkt.size,
                                           pkt->data, pkt->size,
                                           pkt->flags & AV_PKT_FLAG_KEY);
        if (a > 0) {
346
            av_free_packet(pkt);
347 348 349
            new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
                                           av_buffer_default_free, NULL, 0);
            if (!new_pkt.buf)
350
                exit_program(1);
Aneesh Dogra's avatar
Aneesh Dogra committed
351
        } else if (a < 0) {
352 353 354
            av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
                   bsfc->filter->name, pkt->stream_index,
                   avctx->codec ? avctx->codec->name : "copy");
355 356
            print_error("", a);
            if (exit_on_error)
357
                exit_program(1);
358
        }
Aneesh Dogra's avatar
Aneesh Dogra committed
359
        *pkt = new_pkt;
360

Aneesh Dogra's avatar
Aneesh Dogra committed
361
        bsfc = bsfc->next;
362 363
    }

364 365 366 367 368 369 370 371
    if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
        ost->last_mux_dts != AV_NOPTS_VALUE &&
        pkt->dts < ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT)) {
        av_log(NULL, AV_LOG_WARNING, "Non-monotonous DTS in output stream "
               "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
               ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
        if (exit_on_error) {
            av_log(NULL, AV_LOG_FATAL, "aborting.\n");
372
            exit_program(1);
373 374 375 376 377 378 379 380 381 382
        }
        av_log(NULL, AV_LOG_WARNING, "changing to %"PRId64". This may result "
               "in incorrect timestamps in the output file.\n",
               ost->last_mux_dts + 1);
        pkt->dts = ost->last_mux_dts + 1;
        if (pkt->pts != AV_NOPTS_VALUE)
            pkt->pts = FFMAX(pkt->pts, pkt->dts);
    }
    ost->last_mux_dts = pkt->dts;

383
    ost->data_size += pkt->size;
384
    ost->packets_written++;
385

386
    pkt->stream_index = ost->index;
Aneesh Dogra's avatar
Aneesh Dogra committed
387 388
    ret = av_interleaved_write_frame(s, pkt);
    if (ret < 0) {
389
        print_error("av_interleaved_write_frame()", ret);
390
        exit_program(1);
391 392 393
    }
}

394 395
static int check_recording_time(OutputStream *ost)
{
396
    OutputFile *of = output_files[ost->file_index];
397 398

    if (of->recording_time != INT64_MAX &&
399
        av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
400
                      AV_TIME_BASE_Q) >= 0) {
401
        ost->finished = 1;
402 403 404 405 406
        return 0;
    }
    return 1;
}

407 408
static void do_audio_out(AVFormatContext *s, OutputStream *ost,
                         AVFrame *frame)
409
{
410
    AVCodecContext *enc = ost->enc_ctx;
411
    AVPacket pkt;
412
    int got_packet = 0;
413 414 415 416 417

    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

418
    if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
419
        frame->pts = ost->sync_opts;
420
    ost->sync_opts = frame->pts + frame->nb_samples;
421

422 423 424
    ost->samples_encoded += frame->nb_samples;
    ost->frames_encoded++;

425 426
    if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
        av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
427
        exit_program(1);
428 429 430 431 432
    }

    if (got_packet) {
        if (pkt.pts != AV_NOPTS_VALUE)
            pkt.pts      = av_rescale_q(pkt.pts,      enc->time_base, ost->st->time_base);
433 434
        if (pkt.dts != AV_NOPTS_VALUE)
            pkt.dts      = av_rescale_q(pkt.dts,      enc->time_base, ost->st->time_base);
435 436 437 438 439
        if (pkt.duration > 0)
            pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);

        write_frame(s, &pkt, ost);
    }
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
}

static void do_subtitle_out(AVFormatContext *s,
                            OutputStream *ost,
                            InputStream *ist,
                            AVSubtitle *sub,
                            int64_t pts)
{
    static uint8_t *subtitle_out = NULL;
    int subtitle_out_max_size = 1024 * 1024;
    int subtitle_out_size, nb, i;
    AVCodecContext *enc;
    AVPacket pkt;

    if (pts == AV_NOPTS_VALUE) {
455
        av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
456
        if (exit_on_error)
457
            exit_program(1);
458 459 460
        return;
    }

461
    enc = ost->enc_ctx;
462 463 464 465 466 467 468 469

    if (!subtitle_out) {
        subtitle_out = av_malloc(subtitle_out_max_size);
    }

    /* Note: DVB subtitle need one packet to draw them and one other
       packet to clear them */
    /* XXX: signal it in the codec context ? */
470
    if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
471 472 473 474
        nb = 2;
    else
        nb = 1;

Aneesh Dogra's avatar
Aneesh Dogra committed
475
    for (i = 0; i < nb; i++) {
476 477 478 479
        ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
        if (!check_recording_time(ost))
            return;

480 481
        sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
        // start_display_time is required to be 0
Aneesh Dogra's avatar
Aneesh Dogra committed
482 483
        sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
        sub->end_display_time  -= sub->start_display_time;
484
        sub->start_display_time = 0;
485 486 487

        ost->frames_encoded++;

488 489 490
        subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
                                                    subtitle_out_max_size, sub);
        if (subtitle_out_size < 0) {
491
            av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
492
            exit_program(1);
493 494 495 496 497
        }

        av_init_packet(&pkt);
        pkt.data = subtitle_out;
        pkt.size = subtitle_out_size;
Aneesh Dogra's avatar
Aneesh Dogra committed
498
        pkt.pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
499
        if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
500 501 502 503 504 505 506
            /* XXX: the pts correction is handled here. Maybe handling
               it in the codec would be better */
            if (i == 0)
                pkt.pts += 90 * sub->start_display_time;
            else
                pkt.pts += 90 * sub->end_display_time;
        }
507
        write_frame(s, &pkt, ost);
508 509 510
    }
}

511 512 513
static void do_video_out(AVFormatContext *s,
                         OutputStream *ost,
                         AVFrame *in_picture,
514
                         int *frame_size)
515
{
516 517
    int ret, format_video_sync;
    AVPacket pkt;
518
    AVCodecContext *enc = ost->enc_ctx;
519 520 521

    *frame_size = 0;

522
    format_video_sync = video_sync_method;
523 524 525
    if (format_video_sync == VSYNC_AUTO)
        format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
                            (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
526 527 528 529
    if (format_video_sync != VSYNC_PASSTHROUGH &&
        ost->frame_number &&
        in_picture->pts != AV_NOPTS_VALUE &&
        in_picture->pts < ost->sync_opts) {
530
        nb_frames_drop++;
531 532 533
        av_log(NULL, AV_LOG_WARNING,
               "*** dropping frame %d from stream %d at ts %"PRId64"\n",
               ost->frame_number, ost->st->index, in_picture->pts);
534
        return;
535
    }
536

537 538 539 540 541
    if (in_picture->pts == AV_NOPTS_VALUE)
        in_picture->pts = ost->sync_opts;
    ost->sync_opts = in_picture->pts;


542
    if (!ost->frame_number)
543
        ost->first_pts = in_picture->pts;
544

Anton Khirnov's avatar
Anton Khirnov committed
545 546 547
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
548

549
    if (ost->frame_number >= ost->max_frames)
Anton Khirnov's avatar
Anton Khirnov committed
550
        return;
551

Anton Khirnov's avatar
Anton Khirnov committed
552
    if (s->oformat->flags & AVFMT_RAWPICTURE &&
553
        enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
Anton Khirnov's avatar
Anton Khirnov committed
554 555 556 557 558 559 560 561 562
        /* raw pictures are written as AVPicture structure to
           avoid any copies. We support temporarily the older
           method. */
        enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
        enc->coded_frame->top_field_first  = in_picture->top_field_first;
        pkt.data   = (uint8_t *)in_picture;
        pkt.size   =  sizeof(AVPicture);
        pkt.pts    = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
        pkt.flags |= AV_PKT_FLAG_KEY;
563

Anton Khirnov's avatar
Anton Khirnov committed
564 565 566
        write_frame(s, &pkt, ost);
    } else {
        int got_packet;
567

568
        if (enc->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME) &&
569 570 571
            ost->top_field_first >= 0)
            in_picture->top_field_first = !!ost->top_field_first;

572
        in_picture->quality = enc->global_quality;
Anton Khirnov's avatar
Anton Khirnov committed
573
        if (!enc->me_threshold)
574
            in_picture->pict_type = 0;
Anton Khirnov's avatar
Anton Khirnov committed
575
        if (ost->forced_kf_index < ost->forced_kf_count &&
576 577
            in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
            in_picture->pict_type = AV_PICTURE_TYPE_I;
Anton Khirnov's avatar
Anton Khirnov committed
578 579
            ost->forced_kf_index++;
        }
580 581 582

        ost->frames_encoded++;

583
        ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
Anton Khirnov's avatar
Anton Khirnov committed
584 585
        if (ret < 0) {
            av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
586
            exit_program(1);
Anton Khirnov's avatar
Anton Khirnov committed
587
        }
588

Anton Khirnov's avatar
Anton Khirnov committed
589 590 591 592 593
        if (got_packet) {
            if (pkt.pts != AV_NOPTS_VALUE)
                pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
            if (pkt.dts != AV_NOPTS_VALUE)
                pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
594

Anton Khirnov's avatar
Anton Khirnov committed
595 596
            write_frame(s, &pkt, ost);
            *frame_size = pkt.size;
597

Anton Khirnov's avatar
Anton Khirnov committed
598 599 600
            /* if two pass, output log */
            if (ost->logfile && enc->stats_out) {
                fprintf(ost->logfile, "%s", enc->stats_out);
601 602
            }
        }
Anton Khirnov's avatar
Anton Khirnov committed
603 604 605 606 607 608 609 610
    }
    ost->sync_opts++;
    /*
     * For video, number of frames in == number of packets out.
     * But there may be reordering, so we can't throw away frames on encoder
     * flush, we need to limit them here, before they go into encoder.
     */
    ost->frame_number++;
611 612
}

Aneesh Dogra's avatar
Aneesh Dogra committed
613 614 615
static double psnr(double d)
{
    return -10.0 * log(d) / log(10.0);
616 617
}

618
static void do_video_stats(OutputStream *ost, int frame_size)
619 620 621 622 623 624 625 626 627 628
{
    AVCodecContext *enc;
    int frame_number;
    double ti1, bitrate, avg_bitrate;

    /* this is executed just the first time do_video_stats is called */
    if (!vstats_file) {
        vstats_file = fopen(vstats_filename, "w");
        if (!vstats_file) {
            perror("fopen");
629
            exit_program(1);
630 631 632
        }
    }

633
    enc = ost->enc_ctx;
634 635
    if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
        frame_number = ost->frame_number;
Aneesh Dogra's avatar
Aneesh Dogra committed
636
        fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
637
        if (enc->flags&CODEC_FLAG_PSNR)
Aneesh Dogra's avatar
Aneesh Dogra committed
638
            fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
639 640 641 642 643 644 645

        fprintf(vstats_file,"f_size= %6d ", frame_size);
        /* compute pts value */
        ti1 = ost->sync_opts * av_q2d(enc->time_base);
        if (ti1 < 0.01)
            ti1 = 0.01;

Aneesh Dogra's avatar
Aneesh Dogra committed
646
        bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
647
        avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
648
        fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
649
               (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
650 651 652 653
        fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
    }
}

654
/*
655 656 657
 * Read one frame for lavfi output for ost and encode it.
 */
static int poll_filter(OutputStream *ost)
658
{
659
    OutputFile    *of = output_files[ost->file_index];
660
    AVFrame *filtered_frame = NULL;
661 662
    int frame_size, ret;

663
    if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
664
        return AVERROR(ENOMEM);
665
    }
666 667 668 669
    filtered_frame = ost->filtered_frame;

    if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
        !(ost->enc->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
670
        ret = av_buffersink_get_samples(ost->filter->filter, filtered_frame,
671
                                         ost->enc_ctx->frame_size);
672
    else
673
        ret = av_buffersink_get_frame(ost->filter->filter, filtered_frame);
674

675 676
    if (ret < 0)
        return ret;
677

678
    if (filtered_frame->pts != AV_NOPTS_VALUE) {
679
        int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
680
        filtered_frame->pts = av_rescale_q(filtered_frame->pts,
681
                                           ost->filter->filter->inputs[0]->time_base,
682
                                           ost->enc_ctx->time_base) -
683
                              av_rescale_q(start_time,
684
                                           AV_TIME_BASE_Q,
685
                                           ost->enc_ctx->time_base);
686
    }
687 688 689 690

    switch (ost->filter->filter->inputs[0]->type) {
    case AVMEDIA_TYPE_VIDEO:
        if (!ost->frame_aspect_ratio)
691
            ost->enc_ctx->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
692

693
        do_video_out(of->ctx, ost, filtered_frame, &frame_size);
694
        if (vstats_filename && frame_size)
695
            do_video_stats(ost, frame_size);
696 697 698 699 700 701 702 703 704
        break;
    case AVMEDIA_TYPE_AUDIO:
        do_audio_out(of->ctx, ost, filtered_frame);
        break;
    default:
        // TODO support subtitle filters
        av_assert0(0);
    }

705
    av_frame_unref(filtered_frame);
706

707 708 709
    return 0;
}

710
/*
711 712 713 714 715 716 717 718 719
 * Read as many frames from possible from lavfi and encode them.
 *
 * Always read from the active stream with the lowest timestamp. If no frames
 * are available for it then return EAGAIN and wait for more input. This way we
 * can use lavfi sources that generate unlimited amount of frames without memory
 * usage exploding.
 */
static int poll_filters(void)
{
720
    int i, j, ret = 0;
721 722 723 724 725 726 727 728 729

    while (ret >= 0 && !received_sigterm) {
        OutputStream *ost = NULL;
        int64_t min_pts = INT64_MAX;

        /* choose output stream with the lowest timestamp */
        for (i = 0; i < nb_output_streams; i++) {
            int64_t pts = output_streams[i]->sync_opts;

730
            if (!output_streams[i]->filter || output_streams[i]->finished)
731 732
                continue;

733
            pts = av_rescale_q(pts, output_streams[i]->enc_ctx->time_base,
734 735 736 737 738 739 740 741 742 743 744 745 746
                               AV_TIME_BASE_Q);
            if (pts < min_pts) {
                min_pts = pts;
                ost = output_streams[i];
            }
        }

        if (!ost)
            break;

        ret = poll_filter(ost);

        if (ret == AVERROR_EOF) {
747 748
            OutputFile *of = output_files[ost->file_index];

749
            ost->finished = 1;
750

751 752 753 754
            if (of->shortest) {
                for (j = 0; j < of->ctx->nb_streams; j++)
                    output_streams[of->ost_index + j]->finished = 1;
            }
755 756 757 758 759 760 761 762 763

            ret = 0;
        } else if (ret == AVERROR(EAGAIN))
            return 0;
    }

    return ret;
}

764 765 766 767 768
static void print_final_stats(int64_t total_size)
{
    uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
    uint64_t data_size = 0;
    float percent = -1.0;
769
    int i, j;
770 771 772

    for (i = 0; i < nb_output_streams; i++) {
        OutputStream *ost = output_streams[i];
773
        switch (ost->enc_ctx->codec_type) {
774 775 776 777
            case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
            case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
            default:                 other_size += ost->data_size; break;
        }
778
        extra_size += ost->enc_ctx->extradata_size;
779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
        data_size  += ost->data_size;
    }

    if (data_size && total_size >= data_size)
        percent = 100.0 * (total_size - data_size) / data_size;

    av_log(NULL, AV_LOG_INFO, "\n");
    av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
           video_size / 1024.0,
           audio_size / 1024.0,
           other_size / 1024.0,
           extra_size / 1024.0);
    if (percent >= 0.0)
        av_log(NULL, AV_LOG_INFO, "%f%%", percent);
    else
        av_log(NULL, AV_LOG_INFO, "unknown");
    av_log(NULL, AV_LOG_INFO, "\n");
796 797 798 799 800 801 802 803 804 805 806

    /* print verbose per-stream stats */
    for (i = 0; i < nb_input_files; i++) {
        InputFile *f = input_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
               i, f->ctx->filename);

        for (j = 0; j < f->nb_streams; j++) {
            InputStream *ist = input_streams[f->ist_index + j];
807
            enum AVMediaType type = ist->dec_ctx->codec_type;
808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

            total_size    += ist->data_size;
            total_packets += ist->nb_packets;

            av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
                   ist->nb_packets, ist->data_size);

            if (ist->decoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
                       ist->frames_decoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
               total_packets, total_size);
    }

    for (i = 0; i < nb_output_files; i++) {
        OutputFile *of = output_files[i];
        uint64_t total_packets = 0, total_size = 0;

        av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
               i, of->ctx->filename);

        for (j = 0; j < of->ctx->nb_streams; j++) {
            OutputStream *ost = output_streams[of->ost_index + j];
841
            enum AVMediaType type = ost->enc_ctx->codec_type;
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864

            total_size    += ost->data_size;
            total_packets += ost->packets_written;

            av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
                   i, j, media_type_string(type));
            if (ost->encoding_needed) {
                av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
                       ost->frames_encoded);
                if (type == AVMEDIA_TYPE_AUDIO)
                    av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
                av_log(NULL, AV_LOG_VERBOSE, "; ");
            }

            av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
                   ost->packets_written, ost->data_size);

            av_log(NULL, AV_LOG_VERBOSE, "\n");
        }

        av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
               total_packets, total_size);
    }
865 866
}

867
static void print_report(int is_last_report, int64_t timer_start)
868 869 870 871 872 873 874 875 876 877 878
{
    char buf[1024];
    OutputStream *ost;
    AVFormatContext *oc;
    int64_t total_size;
    AVCodecContext *enc;
    int frame_number, vid, i;
    double bitrate, ti1, pts;
    static int64_t last_time = -1;
    static int qp_histogram[52];

879 880 881
    if (!print_stats && !is_last_report)
        return;

882 883 884 885 886 887 888 889 890 891 892 893 894 895
    if (!is_last_report) {
        int64_t cur_time;
        /* display the report every 0.5 seconds */
        cur_time = av_gettime();
        if (last_time == -1) {
            last_time = cur_time;
            return;
        }
        if ((cur_time - last_time) < 500000)
            return;
        last_time = cur_time;
    }


896
    oc = output_files[0]->ctx;
897 898

    total_size = avio_size(oc->pb);
899
    if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
Aneesh Dogra's avatar
Aneesh Dogra committed
900
        total_size = avio_tell(oc->pb);
901 902 903 904 905 906 907
    if (total_size < 0) {
        char errbuf[128];
        av_strerror(total_size, errbuf, sizeof(errbuf));
        av_log(NULL, AV_LOG_VERBOSE, "Bitrate not available, "
               "avio_tell() failed: %s\n", errbuf);
        total_size = 0;
    }
908 909 910 911

    buf[0] = '\0';
    ti1 = 1e10;
    vid = 0;
912
    for (i = 0; i < nb_output_streams; i++) {
913
        float q = -1;
914
        ost = output_streams[i];
915
        enc = ost->enc_ctx;
916
        if (!ost->stream_copy && enc->coded_frame)
Aneesh Dogra's avatar
Aneesh Dogra committed
917
            q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
918 919 920 921
        if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
        }
        if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
Aneesh Dogra's avatar
Aneesh Dogra committed
922
            float t = (av_gettime() - timer_start) / 1000000.0;
923 924 925

            frame_number = ost->frame_number;
            snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
Aneesh Dogra's avatar
Aneesh Dogra committed
926 927
                     frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
            if (is_last_report)
928
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
Aneesh Dogra's avatar
Aneesh Dogra committed
929
            if (qp_hist) {
930 931
                int j;
                int qp = lrintf(q);
Aneesh Dogra's avatar
Aneesh Dogra committed
932
                if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
933
                    qp_histogram[qp]++;
Aneesh Dogra's avatar
Aneesh Dogra committed
934
                for (j = 0; j < 32; j++)
935
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
936
            }
Aneesh Dogra's avatar
Aneesh Dogra committed
937
            if (enc->flags&CODEC_FLAG_PSNR) {
938
                int j;
Aneesh Dogra's avatar
Aneesh Dogra committed
939 940 941
                double error, error_sum = 0;
                double scale, scale_sum = 0;
                char type[3] = { 'Y','U','V' };
942
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
Aneesh Dogra's avatar
Aneesh Dogra committed
943 944 945 946 947 948 949
                for (j = 0; j < 3; j++) {
                    if (is_last_report) {
                        error = enc->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
                    } else {
                        error = enc->coded_frame->error[j];
                        scale = enc->width * enc->height * 255.0 * 255.0;
950
                    }
Aneesh Dogra's avatar
Aneesh Dogra committed
951 952
                    if (j)
                        scale /= 4;
953 954
                    error_sum += error;
                    scale_sum += scale;
Aneesh Dogra's avatar
Aneesh Dogra committed
955
                    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
956
                }
Aneesh Dogra's avatar
Aneesh Dogra committed
957
                snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
958 959 960 961
            }
            vid = 1;
        }
        /* compute min output value */
962
        pts = (double)ost->last_mux_dts * av_q2d(ost->st->time_base);
963 964 965 966 967 968
        if ((pts < ti1) && (pts > 0))
            ti1 = pts;
    }
    if (ti1 < 0.01)
        ti1 = 0.01;

969
    bitrate = (double)(total_size * 8) / ti1 / 1000.0;
970

971
    snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
972 973 974
            "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
            (double)total_size / 1024, ti1, bitrate);

975 976 977
    if (nb_frames_drop)
        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " drop=%d",
                 nb_frames_drop);
978

979
    av_log(NULL, AV_LOG_INFO, "%s    \r", buf);
980

981
    fflush(stderr);
982

983 984
    if (is_last_report)
        print_final_stats(total_size);
985

986 987
}

988
static void flush_encoders(void)
989 990 991
{
    int i, ret;

992
    for (i = 0; i < nb_output_streams; i++) {
993
        OutputStream   *ost = output_streams[i];
994
        AVCodecContext *enc = ost->enc_ctx;
995
        AVFormatContext *os = output_files[ost->file_index]->ctx;
996
        int stop_encoding = 0;
997

998
        if (!ost->encoding_needed)
999 1000
            continue;

1001
        if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1002
            continue;
1003
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1004 1005
            continue;

Aneesh Dogra's avatar
Aneesh Dogra committed
1006
        for (;;) {
1007 1008
            int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
            const char *desc;
1009

1010
            switch (enc->codec_type) {
1011
            case AVMEDIA_TYPE_AUDIO:
1012 1013
                encode = avcodec_encode_audio2;
                desc   = "Audio";
1014 1015
                break;
            case AVMEDIA_TYPE_VIDEO:
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
                encode = avcodec_encode_video2;
                desc   = "Video";
                break;
            default:
                stop_encoding = 1;
            }

            if (encode) {
                AVPacket pkt;
                int got_packet;
                av_init_packet(&pkt);
                pkt.data = NULL;
                pkt.size = 0;

                ret = encode(enc, &pkt, NULL, &got_packet);
1031
                if (ret < 0) {
1032
                    av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1033
                    exit_program(1);
1034 1035 1036 1037
                }
                if (ost->logfile && enc->stats_out) {
                    fprintf(ost->logfile, "%s", enc->stats_out);
                }
1038
                if (!got_packet) {
1039 1040 1041
                    stop_encoding = 1;
                    break;
                }
1042 1043 1044 1045
                if (pkt.pts != AV_NOPTS_VALUE)
                    pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
                if (pkt.dts != AV_NOPTS_VALUE)
                    pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
1046 1047
                if (pkt.duration > 0)
                    pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
1048
                write_frame(os, &pkt, ost);
1049
            }
1050

1051
            if (stop_encoding)
1052
                break;
1053 1054 1055 1056
        }
    }
}

1057 1058 1059 1060 1061
/*
 * Check whether a packet from ist should be written into ost at this time
 */
static int check_output_constraints(InputStream *ist, OutputStream *ost)
{
1062 1063
    OutputFile *of = output_files[ost->file_index];
    int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
1064 1065 1066 1067

    if (ost->source_index != ist_index)
        return 0;

1068
    if (of->start_time != AV_NOPTS_VALUE && ist->last_dts < of->start_time)
1069 1070 1071 1072 1073 1074 1075
        return 0;

    return 1;
}

static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
{
1076
    OutputFile *of = output_files[ost->file_index];
1077
    InputFile   *f = input_files [ist->file_index];
1078 1079
    int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
    int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1080 1081 1082 1083 1084 1085 1086 1087
    AVPacket opkt;

    av_init_packet(&opkt);

    if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
        !ost->copy_initial_nonkeyframes)
        return;

1088
    if (of->recording_time != INT64_MAX &&
1089
        ist->last_dts >= of->recording_time + start_time) {
1090
        ost->finished = 1;
1091 1092 1093
        return;
    }

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
    if (f->recording_time != INT64_MAX) {
        start_time = f->ctx->start_time;
        if (f->start_time != AV_NOPTS_VALUE)
            start_time += f->start_time;
        if (ist->last_dts >= f->recording_time + start_time) {
            ost->finished = 1;
            return;
        }
    }

1104
    /* force the input stream PTS */
1105
    if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1106 1107 1108 1109 1110 1111 1112 1113
        ost->sync_opts++;

    if (pkt->pts != AV_NOPTS_VALUE)
        opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
    else
        opkt.pts = AV_NOPTS_VALUE;

    if (pkt->dts == AV_NOPTS_VALUE)
1114
        opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
1115 1116 1117 1118 1119 1120 1121
    else
        opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
    opkt.dts -= ost_tb_start_time;

    opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
    opkt.flags    = pkt->flags;

Aneesh Dogra's avatar
Aneesh Dogra committed
1122
    // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1123 1124 1125 1126
    if (  ost->enc_ctx->codec_id != AV_CODEC_ID_H264
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG1VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_MPEG2VIDEO
       && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1127
       ) {
1128 1129 1130 1131
        if (av_parser_change(ost->parser, ost->st->codec,
                             &opkt.data, &opkt.size,
                             pkt->data, pkt->size,
                             pkt->flags & AV_PKT_FLAG_KEY)) {
1132 1133
            opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
            if (!opkt.buf)
1134
                exit_program(1);
1135
        }
1136 1137 1138 1139 1140
    } else {
        opkt.data = pkt->data;
        opkt.size = pkt->size;
    }

1141
    write_frame(of->ctx, &opkt, ost);
1142 1143
}

1144
int guess_input_channel_layout(InputStream *ist)
1145
{
1146
    AVCodecContext *dec = ist->dec_ctx;
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161

    if (!dec->channel_layout) {
        char layout_name[256];

        dec->channel_layout = av_get_default_channel_layout(dec->channels);
        if (!dec->channel_layout)
            return 0;
        av_get_channel_layout_string(layout_name, sizeof(layout_name),
                                     dec->channels, dec->channel_layout);
        av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for  Input Stream "
               "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
    }
    return 1;
}

1162
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1163
{
1164
    AVFrame *decoded_frame, *f;
1165
    AVCodecContext *avctx = ist->dec_ctx;
1166
    int i, ret, err = 0, resample_changed;
1167

1168
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1169
        return AVERROR(ENOMEM);
1170 1171
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
1172
    decoded_frame = ist->decoded_frame;
1173

1174
    ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1175 1176
    if (!*got_output || ret < 0) {
        if (!pkt->size) {
1177
            for (i = 0; i < ist->nb_filters; i++)
1178
                av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
1179
        }
1180
        return ret;
1181 1182
    }

1183 1184 1185
    ist->samples_decoded += decoded_frame->nb_samples;
    ist->frames_decoded++;

1186 1187 1188
    /* if the decoder provides a pts, use it instead of the last packet pts.
       the decoder could be delaying output by a packet or more. */
    if (decoded_frame->pts != AV_NOPTS_VALUE)
1189
        ist->next_dts = decoded_frame->pts;
1190
    else if (pkt->pts != AV_NOPTS_VALUE)
1191
        decoded_frame->pts = pkt->pts;
1192
    pkt->pts           = AV_NOPTS_VALUE;
1193

1194 1195 1196 1197 1198 1199
    resample_changed = ist->resample_sample_fmt     != decoded_frame->format         ||
                       ist->resample_channels       != avctx->channels               ||
                       ist->resample_channel_layout != decoded_frame->channel_layout ||
                       ist->resample_sample_rate    != decoded_frame->sample_rate;
    if (resample_changed) {
        char layout1[64], layout2[64];
1200

1201 1202 1203 1204
        if (!guess_input_channel_layout(ist)) {
            av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
                   "layout for Input Stream #%d.%d\n", ist->file_index,
                   ist->st->index);
1205
            exit_program(1);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
        }
        decoded_frame->channel_layout = avctx->channel_layout;

        av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
                                     ist->resample_channel_layout);
        av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
                                     decoded_frame->channel_layout);

        av_log(NULL, AV_LOG_INFO,
               "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
               ist->file_index, ist->st->index,
               ist->resample_sample_rate,  av_get_sample_fmt_name(ist->resample_sample_fmt),
               ist->resample_channels, layout1,
               decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
               avctx->channels, layout2);

        ist->resample_sample_fmt     = decoded_frame->format;
        ist->resample_sample_rate    = decoded_frame->sample_rate;
        ist->resample_channel_layout = decoded_frame->channel_layout;
        ist->resample_channels       = avctx->channels;

        for (i = 0; i < nb_filtergraphs; i++)
            if (ist_in_filtergraph(filtergraphs[i], ist) &&
                configure_filtergraph(filtergraphs[i]) < 0) {
                av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1231
                exit_program(1);
1232
            }
1233
    }
1234

1235 1236 1237
    if (decoded_frame->pts != AV_NOPTS_VALUE)
        decoded_frame->pts = av_rescale_q(decoded_frame->pts,
                                          ist->st->time_base,
1238
                                          (AVRational){1, avctx->sample_rate});
1239 1240 1241 1242 1243 1244 1245 1246
    for (i = 0; i < ist->nb_filters; i++) {
        if (i < ist->nb_filters - 1) {
            f = ist->filter_frame;
            err = av_frame_ref(f, decoded_frame);
            if (err < 0)
                break;
        } else
            f = decoded_frame;
1247

1248 1249 1250 1251 1252 1253 1254 1255
        err = av_buffersrc_add_frame(ist->filters[i]->filter, f);
        if (err < 0)
            break;
    }

    av_frame_unref(ist->filter_frame);
    av_frame_unref(decoded_frame);
    return err < 0 ? err : ret;
1256 1257
}

1258
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1259
{
1260 1261
    AVFrame *decoded_frame, *f;
    int i, ret = 0, err = 0, resample_changed;
1262

1263 1264 1265
    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
        return AVERROR(ENOMEM);
    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1266
        return AVERROR(ENOMEM);
1267
    decoded_frame = ist->decoded_frame;
1268

1269
    ret = avcodec_decode_video2(ist->dec_ctx,
1270
                                decoded_frame, got_output, pkt);
1271 1272
    if (!*got_output || ret < 0) {
        if (!pkt->size) {
1273
            for (i = 0; i < ist->nb_filters; i++)
Anton Khirnov's avatar