summaryrefslogtreecommitdiff
path: root/ffmpeg.c (plain)
blob: 4b4dae47fec918edf80fd6fd472cc27254e51d4d
1/*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/**
22 * @file
23 * multimedia converter based on the FFmpeg libraries
24 */
25
26#include "config.h"
27#include <ctype.h>
28#include <string.h>
29#include <math.h>
30#include <stdlib.h>
31#include <errno.h>
32#include <limits.h>
33#include <stdatomic.h>
34#include <stdint.h>
35
36#if HAVE_IO_H
37#include <io.h>
38#endif
39#if HAVE_UNISTD_H
40#include <unistd.h>
41#endif
42
43#include "libavformat/avformat.h"
44#include "libavdevice/avdevice.h"
45#include "libswresample/swresample.h"
46#include "libavutil/opt.h"
47#include "libavutil/channel_layout.h"
48#include "libavutil/parseutils.h"
49#include "libavutil/samplefmt.h"
50#include "libavutil/fifo.h"
51#include "libavutil/hwcontext.h"
52#include "libavutil/internal.h"
53#include "libavutil/intreadwrite.h"
54#include "libavutil/dict.h"
55#include "libavutil/display.h"
56#include "libavutil/mathematics.h"
57#include "libavutil/pixdesc.h"
58#include "libavutil/avstring.h"
59#include "libavutil/libm.h"
60#include "libavutil/imgutils.h"
61#include "libavutil/timestamp.h"
62#include "libavutil/bprint.h"
63#include "libavutil/time.h"
64#include "libavutil/threadmessage.h"
65#include "libavcodec/mathops.h"
66#include "libavformat/os_support.h"
67
68# include "libavfilter/avfilter.h"
69# include "libavfilter/buffersrc.h"
70# include "libavfilter/buffersink.h"
71
72#if HAVE_SYS_RESOURCE_H
73#include <sys/time.h>
74#include <sys/types.h>
75#include <sys/resource.h>
76#elif HAVE_GETPROCESSTIMES
77#include <windows.h>
78#endif
79#if HAVE_GETPROCESSMEMORYINFO
80#include <windows.h>
81#include <psapi.h>
82#endif
83#if HAVE_SETCONSOLECTRLHANDLER
84#include <windows.h>
85#endif
86
87
88#if HAVE_SYS_SELECT_H
89#include <sys/select.h>
90#endif
91
92#if HAVE_TERMIOS_H
93#include <fcntl.h>
94#include <sys/ioctl.h>
95#include <sys/time.h>
96#include <termios.h>
97#elif HAVE_KBHIT
98#include <conio.h>
99#endif
100
101#if HAVE_PTHREADS
102#include <pthread.h>
103#endif
104
105#include <time.h>
106
107#include "ffmpeg.h"
108#include "cmdutils.h"
109
110#include "libavutil/avassert.h"
111
112const char program_name[] = "ffmpeg";
113const int program_birth_year = 2000;
114
115static FILE *vstats_file;
116
117const char *const forced_keyframes_const_names[] = {
118 "n",
119 "n_forced",
120 "prev_forced_n",
121 "prev_forced_t",
122 "t",
123 NULL
124};
125
126static void do_video_stats(OutputStream *ost, int frame_size);
127static int64_t getutime(void);
128static int64_t getmaxrss(void);
129static int ifilter_has_all_input_formats(FilterGraph *fg);
130
131static int run_as_daemon = 0;
132static int nb_frames_dup = 0;
133static unsigned dup_warning = 1000;
134static int nb_frames_drop = 0;
135static int64_t decode_error_stat[2];
136
137static int want_sdp = 1;
138
139static int current_time;
140AVIOContext *progress_avio = NULL;
141
142static uint8_t *subtitle_out;
143
144InputStream **input_streams = NULL;
145int nb_input_streams = 0;
146InputFile **input_files = NULL;
147int nb_input_files = 0;
148
149OutputStream **output_streams = NULL;
150int nb_output_streams = 0;
151OutputFile **output_files = NULL;
152int nb_output_files = 0;
153
154FilterGraph **filtergraphs;
155int nb_filtergraphs;
156
157#if HAVE_TERMIOS_H
158
159/* init terminal so that we can grab keys */
160static struct termios oldtty;
161static int restore_tty;
162#endif
163
164#if HAVE_PTHREADS
165static void free_input_threads(void);
166#endif
167
168/* sub2video hack:
169 Convert subtitles to video with alpha to insert them in filter graphs.
170 This is a temporary solution until libavfilter gets real subtitles support.
171 */
172
173static int sub2video_get_blank_frame(InputStream *ist)
174{
175 int ret;
176 AVFrame *frame = ist->sub2video.frame;
177
178 av_frame_unref(frame);
179 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
181 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
182 if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183 return ret;
184 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185 return 0;
186}
187
188static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189 AVSubtitleRect *r)
190{
191 uint32_t *pal, *dst2;
192 uint8_t *src, *src2;
193 int x, y;
194
195 if (r->type != SUBTITLE_BITMAP) {
196 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197 return;
198 }
199 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201 r->x, r->y, r->w, r->h, w, h
202 );
203 return;
204 }
205
206 dst += r->y * dst_linesize + r->x * 4;
207 src = r->data[0];
208 pal = (uint32_t *)r->data[1];
209 for (y = 0; y < r->h; y++) {
210 dst2 = (uint32_t *)dst;
211 src2 = src;
212 for (x = 0; x < r->w; x++)
213 *(dst2++) = pal[*(src2++)];
214 dst += dst_linesize;
215 src += r->linesize[0];
216 }
217}
218
219static void sub2video_push_ref(InputStream *ist, int64_t pts)
220{
221 AVFrame *frame = ist->sub2video.frame;
222 int i;
223
224 av_assert1(frame->data[0]);
225 ist->sub2video.last_pts = frame->pts = pts;
226 for (i = 0; i < ist->nb_filters; i++)
227 av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
228 AV_BUFFERSRC_FLAG_KEEP_REF |
229 AV_BUFFERSRC_FLAG_PUSH);
230}
231
232void sub2video_update(InputStream *ist, AVSubtitle *sub)
233{
234 AVFrame *frame = ist->sub2video.frame;
235 int8_t *dst;
236 int dst_linesize;
237 int num_rects, i;
238 int64_t pts, end_pts;
239
240 if (!frame)
241 return;
242 if (sub) {
243 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244 AV_TIME_BASE_Q, ist->st->time_base);
245 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246 AV_TIME_BASE_Q, ist->st->time_base);
247 num_rects = sub->num_rects;
248 } else {
249 pts = ist->sub2video.end_pts;
250 end_pts = INT64_MAX;
251 num_rects = 0;
252 }
253 if (sub2video_get_blank_frame(ist) < 0) {
254 av_log(ist->dec_ctx, AV_LOG_ERROR,
255 "Impossible to get a blank canvas.\n");
256 return;
257 }
258 dst = frame->data [0];
259 dst_linesize = frame->linesize[0];
260 for (i = 0; i < num_rects; i++)
261 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262 sub2video_push_ref(ist, pts);
263 ist->sub2video.end_pts = end_pts;
264}
265
266static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267{
268 InputFile *infile = input_files[ist->file_index];
269 int i, j, nb_reqs;
270 int64_t pts2;
271
272 /* When a frame is read from a file, examine all sub2video streams in
273 the same file and send the sub2video frame again. Otherwise, decoded
274 video frames could be accumulating in the filter graph while a filter
275 (possibly overlay) is desperately waiting for a subtitle frame. */
276 for (i = 0; i < infile->nb_streams; i++) {
277 InputStream *ist2 = input_streams[infile->ist_index + i];
278 if (!ist2->sub2video.frame)
279 continue;
280 /* subtitles seem to be usually muxed ahead of other streams;
281 if not, subtracting a larger time here is necessary */
282 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283 /* do not send the heartbeat frame if the subtitle is already ahead */
284 if (pts2 <= ist2->sub2video.last_pts)
285 continue;
286 if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287 sub2video_update(ist2, NULL);
288 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290 if (nb_reqs)
291 sub2video_push_ref(ist2, pts2);
292 }
293}
294
295static void sub2video_flush(InputStream *ist)
296{
297 int i;
298
299 if (ist->sub2video.end_pts < INT64_MAX)
300 sub2video_update(ist, NULL);
301 for (i = 0; i < ist->nb_filters; i++)
302 av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
303}
304
305/* end of sub2video hack */
306
307static void term_exit_sigsafe(void)
308{
309#if HAVE_TERMIOS_H
310 if(restore_tty)
311 tcsetattr (0, TCSANOW, &oldtty);
312#endif
313}
314
315void term_exit(void)
316{
317 av_log(NULL, AV_LOG_QUIET, "%s", "");
318 term_exit_sigsafe();
319}
320
321static volatile int received_sigterm = 0;
322static volatile int received_nb_signals = 0;
323static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
324static volatile int ffmpeg_exited = 0;
325static int main_return_code = 0;
326
327static void
328sigterm_handler(int sig)
329{
330 received_sigterm = sig;
331 received_nb_signals++;
332 term_exit_sigsafe();
333 if(received_nb_signals > 3) {
334 write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335 strlen("Received > 3 system signals, hard exiting\n"));
336
337 exit(123);
338 }
339}
340
341#if HAVE_SETCONSOLECTRLHANDLER
342static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343{
344 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345
346 switch (fdwCtrlType)
347 {
348 case CTRL_C_EVENT:
349 case CTRL_BREAK_EVENT:
350 sigterm_handler(SIGINT);
351 return TRUE;
352
353 case CTRL_CLOSE_EVENT:
354 case CTRL_LOGOFF_EVENT:
355 case CTRL_SHUTDOWN_EVENT:
356 sigterm_handler(SIGTERM);
357 /* Basically, with these 3 events, when we return from this method the
358 process is hard terminated, so stall as long as we need to
359 to try and let the main thread(s) clean up and gracefully terminate
360 (we have at most 5 seconds, but should be done far before that). */
361 while (!ffmpeg_exited) {
362 Sleep(0);
363 }
364 return TRUE;
365
366 default:
367 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368 return FALSE;
369 }
370}
371#endif
372
373void term_init(void)
374{
375#if HAVE_TERMIOS_H
376 if (!run_as_daemon && stdin_interaction) {
377 struct termios tty;
378 if (tcgetattr (0, &tty) == 0) {
379 oldtty = tty;
380 restore_tty = 1;
381
382 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383 |INLCR|IGNCR|ICRNL|IXON);
384 tty.c_oflag |= OPOST;
385 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386 tty.c_cflag &= ~(CSIZE|PARENB);
387 tty.c_cflag |= CS8;
388 tty.c_cc[VMIN] = 1;
389 tty.c_cc[VTIME] = 0;
390
391 tcsetattr (0, TCSANOW, &tty);
392 }
393 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394 }
395#endif
396
397 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399#ifdef SIGXCPU
400 signal(SIGXCPU, sigterm_handler);
401#endif
402#if HAVE_SETCONSOLECTRLHANDLER
403 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404#endif
405}
406
407/* read a key without blocking */
408static int read_key(void)
409{
410 unsigned char ch;
411#if HAVE_TERMIOS_H
412 int n = 1;
413 struct timeval tv;
414 fd_set rfds;
415
416 FD_ZERO(&rfds);
417 FD_SET(0, &rfds);
418 tv.tv_sec = 0;
419 tv.tv_usec = 0;
420 n = select(1, &rfds, NULL, NULL, &tv);
421 if (n > 0) {
422 n = read(0, &ch, 1);
423 if (n == 1)
424 return ch;
425
426 return n;
427 }
428#elif HAVE_KBHIT
429# if HAVE_PEEKNAMEDPIPE
430 static int is_pipe;
431 static HANDLE input_handle;
432 DWORD dw, nchars;
433 if(!input_handle){
434 input_handle = GetStdHandle(STD_INPUT_HANDLE);
435 is_pipe = !GetConsoleMode(input_handle, &dw);
436 }
437
438 if (is_pipe) {
439 /* When running under a GUI, you will end here. */
440 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441 // input pipe may have been closed by the program that ran ffmpeg
442 return -1;
443 }
444 //Read it
445 if(nchars != 0) {
446 read(0, &ch, 1);
447 return ch;
448 }else{
449 return -1;
450 }
451 }
452# endif
453 if(kbhit())
454 return(getch());
455#endif
456 return -1;
457}
458
459static int decode_interrupt_cb(void *ctx)
460{
461 return received_nb_signals > atomic_load(&transcode_init_done);
462}
463
464const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
465
466static void ffmpeg_cleanup(int ret)
467{
468 int i, j;
469
470 if (do_benchmark) {
471 int maxrss = getmaxrss() / 1024;
472 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473 }
474
475 for (i = 0; i < nb_filtergraphs; i++) {
476 FilterGraph *fg = filtergraphs[i];
477 avfilter_graph_free(&fg->graph);
478 for (j = 0; j < fg->nb_inputs; j++) {
479 while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480 AVFrame *frame;
481 av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482 sizeof(frame), NULL);
483 av_frame_free(&frame);
484 }
485 av_fifo_free(fg->inputs[j]->frame_queue);
486 if (fg->inputs[j]->ist->sub2video.sub_queue) {
487 while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488 AVSubtitle sub;
489 av_fifo_generic_read(fg->inputs[j]->ist->sub2video.sub_queue,
490 &sub, sizeof(sub), NULL);
491 avsubtitle_free(&sub);
492 }
493 av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
494 }
495 av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
496 av_freep(&fg->inputs[j]->name);
497 av_freep(&fg->inputs[j]);
498 }
499 av_freep(&fg->inputs);
500 for (j = 0; j < fg->nb_outputs; j++) {
501 av_freep(&fg->outputs[j]->name);
502 av_freep(&fg->outputs[j]->formats);
503 av_freep(&fg->outputs[j]->channel_layouts);
504 av_freep(&fg->outputs[j]->sample_rates);
505 av_freep(&fg->outputs[j]);
506 }
507 av_freep(&fg->outputs);
508 av_freep(&fg->graph_desc);
509
510 av_freep(&filtergraphs[i]);
511 }
512 av_freep(&filtergraphs);
513
514 av_freep(&subtitle_out);
515
516 /* close files */
517 for (i = 0; i < nb_output_files; i++) {
518 OutputFile *of = output_files[i];
519 AVFormatContext *s;
520 if (!of)
521 continue;
522 s = of->ctx;
523 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524 avio_closep(&s->pb);
525 avformat_free_context(s);
526 av_dict_free(&of->opts);
527
528 av_freep(&output_files[i]);
529 }
530 for (i = 0; i < nb_output_streams; i++) {
531 OutputStream *ost = output_streams[i];
532
533 if (!ost)
534 continue;
535
536 for (j = 0; j < ost->nb_bitstream_filters; j++)
537 av_bsf_free(&ost->bsf_ctx[j]);
538 av_freep(&ost->bsf_ctx);
539 av_freep(&ost->bsf_extradata_updated);
540
541 av_frame_free(&ost->filtered_frame);
542 av_frame_free(&ost->last_frame);
543 av_dict_free(&ost->encoder_opts);
544
545 av_parser_close(ost->parser);
546 avcodec_free_context(&ost->parser_avctx);
547
548 av_freep(&ost->forced_keyframes);
549 av_expr_free(ost->forced_keyframes_pexpr);
550 av_freep(&ost->avfilter);
551 av_freep(&ost->logfile_prefix);
552
553 av_freep(&ost->audio_channels_map);
554 ost->audio_channels_mapped = 0;
555
556 av_dict_free(&ost->sws_dict);
557
558 avcodec_free_context(&ost->enc_ctx);
559 avcodec_parameters_free(&ost->ref_par);
560
561 if (ost->muxing_queue) {
562 while (av_fifo_size(ost->muxing_queue)) {
563 AVPacket pkt;
564 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
565 av_packet_unref(&pkt);
566 }
567 av_fifo_freep(&ost->muxing_queue);
568 }
569
570 av_freep(&output_streams[i]);
571 }
572#if HAVE_PTHREADS
573 free_input_threads();
574#endif
575 for (i = 0; i < nb_input_files; i++) {
576 avformat_close_input(&input_files[i]->ctx);
577 av_freep(&input_files[i]);
578 }
579 for (i = 0; i < nb_input_streams; i++) {
580 InputStream *ist = input_streams[i];
581
582 av_frame_free(&ist->decoded_frame);
583 av_frame_free(&ist->filter_frame);
584 av_dict_free(&ist->decoder_opts);
585 avsubtitle_free(&ist->prev_sub.subtitle);
586 av_frame_free(&ist->sub2video.frame);
587 av_freep(&ist->filters);
588 av_freep(&ist->hwaccel_device);
589 av_freep(&ist->dts_buffer);
590
591 avcodec_free_context(&ist->dec_ctx);
592
593 av_freep(&input_streams[i]);
594 }
595
596 if (vstats_file) {
597 if (fclose(vstats_file))
598 av_log(NULL, AV_LOG_ERROR,
599 "Error closing vstats file, loss of information possible: %s\n",
600 av_err2str(AVERROR(errno)));
601 }
602 av_freep(&vstats_filename);
603
604 av_freep(&input_streams);
605 av_freep(&input_files);
606 av_freep(&output_streams);
607 av_freep(&output_files);
608
609 uninit_opts();
610
611 avformat_network_deinit();
612
613 if (received_sigterm) {
614 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
615 (int) received_sigterm);
616 } else if (ret && atomic_load(&transcode_init_done)) {
617 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
618 }
619 term_exit();
620 ffmpeg_exited = 1;
621}
622
623void remove_avoptions(AVDictionary **a, AVDictionary *b)
624{
625 AVDictionaryEntry *t = NULL;
626
627 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
629 }
630}
631
632void assert_avoptions(AVDictionary *m)
633{
634 AVDictionaryEntry *t;
635 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
636 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
637 exit_program(1);
638 }
639}
640
641static void abort_codec_experimental(AVCodec *c, int encoder)
642{
643 exit_program(1);
644}
645
646static void update_benchmark(const char *fmt, ...)
647{
648 if (do_benchmark_all) {
649 int64_t t = getutime();
650 va_list va;
651 char buf[1024];
652
653 if (fmt) {
654 va_start(va, fmt);
655 vsnprintf(buf, sizeof(buf), fmt, va);
656 va_end(va);
657 av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
658 }
659 current_time = t;
660 }
661}
662
663static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
664{
665 int i;
666 for (i = 0; i < nb_output_streams; i++) {
667 OutputStream *ost2 = output_streams[i];
668 ost2->finished |= ost == ost2 ? this_stream : others;
669 }
670}
671
672static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
673{
674 AVFormatContext *s = of->ctx;
675 AVStream *st = ost->st;
676 int ret;
677
678 /*
679 * Audio encoders may split the packets -- #frames in != #packets out.
680 * But there is no reordering, so we can limit the number of output packets
681 * by simply dropping them here.
682 * Counting encoded video frames needs to be done separately because of
683 * reordering, see do_video_out().
684 * Do not count the packet when unqueued because it has been counted when queued.
685 */
686 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
687 if (ost->frame_number >= ost->max_frames) {
688 av_packet_unref(pkt);
689 return;
690 }
691 ost->frame_number++;
692 }
693
694 if (!of->header_written) {
695 AVPacket tmp_pkt = {0};
696 /* the muxer is not initialized yet, buffer the packet */
697 if (!av_fifo_space(ost->muxing_queue)) {
698 int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
699 ost->max_muxing_queue_size);
700 if (new_size <= av_fifo_size(ost->muxing_queue)) {
701 av_log(NULL, AV_LOG_ERROR,
702 "Too many packets buffered for output stream %d:%d.\n",
703 ost->file_index, ost->st->index);
704 exit_program(1);
705 }
706 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
707 if (ret < 0)
708 exit_program(1);
709 }
710 ret = av_packet_ref(&tmp_pkt, pkt);
711 if (ret < 0)
712 exit_program(1);
713 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
714 av_packet_unref(pkt);
715 return;
716 }
717
718 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
719 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
720 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
721
722 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
723 int i;
724 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
725 NULL);
726 ost->quality = sd ? AV_RL32(sd) : -1;
727 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
728
729 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
730 if (sd && i < sd[5])
731 ost->error[i] = AV_RL64(sd + 8 + 8*i);
732 else
733 ost->error[i] = -1;
734 }
735
736 if (ost->frame_rate.num && ost->is_cfr) {
737 if (pkt->duration > 0)
738 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
739 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
740 ost->mux_timebase);
741 }
742 }
743
744 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
745
746 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
747 if (pkt->dts != AV_NOPTS_VALUE &&
748 pkt->pts != AV_NOPTS_VALUE &&
749 pkt->dts > pkt->pts) {
750 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
751 pkt->dts, pkt->pts,
752 ost->file_index, ost->st->index);
753 pkt->pts =
754 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
755 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
756 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
757 }
758 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
759 pkt->dts != AV_NOPTS_VALUE &&
760 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
761 ost->last_mux_dts != AV_NOPTS_VALUE) {
762 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
763 if (pkt->dts < max) {
764 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
765 av_log(s, loglevel, "Non-monotonous DTS in output stream "
766 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
767 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
768 if (exit_on_error) {
769 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
770 exit_program(1);
771 }
772 av_log(s, loglevel, "changing to %"PRId64". This may result "
773 "in incorrect timestamps in the output file.\n",
774 max);
775 if (pkt->pts >= pkt->dts)
776 pkt->pts = FFMAX(pkt->pts, max);
777 pkt->dts = max;
778 }
779 }
780 }
781 ost->last_mux_dts = pkt->dts;
782
783 ost->data_size += pkt->size;
784 ost->packets_written++;
785
786 pkt->stream_index = ost->index;
787
788 if (debug_ts) {
789 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
790 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791 av_get_media_type_string(ost->enc_ctx->codec_type),
792 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
793 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
794 pkt->size
795 );
796 }
797
798 ret = av_interleaved_write_frame(s, pkt);
799 if (ret < 0) {
800 print_error("av_interleaved_write_frame()", ret);
801 main_return_code = 1;
802 close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
803 }
804 av_packet_unref(pkt);
805}
806
807static void close_output_stream(OutputStream *ost)
808{
809 OutputFile *of = output_files[ost->file_index];
810
811 ost->finished |= ENCODER_FINISHED;
812 if (of->shortest) {
813 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
814 of->recording_time = FFMIN(of->recording_time, end);
815 }
816}
817
818static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
819{
820 int ret = 0;
821
822 /* apply the output bitstream filters, if any */
823 if (ost->nb_bitstream_filters) {
824 int idx;
825
826 ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
827 if (ret < 0)
828 goto finish;
829
830 idx = 1;
831 while (idx) {
832 /* get a packet from the previous filter up the chain */
833 ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
834 if (ret == AVERROR(EAGAIN)) {
835 ret = 0;
836 idx--;
837 continue;
838 } else if (ret < 0)
839 goto finish;
840 /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
841 * the api states this shouldn't happen after init(). Propagate it here to the
842 * muxer and to the next filters in the chain to workaround this.
843 * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
844 * par_out->extradata and adapt muxers accordingly to get rid of this. */
845 if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
846 ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
847 if (ret < 0)
848 goto finish;
849 ost->bsf_extradata_updated[idx - 1] |= 1;
850 }
851
852 /* send it to the next filter down the chain or to the muxer */
853 if (idx < ost->nb_bitstream_filters) {
854 /* HACK/FIXME! - See above */
855 if (!(ost->bsf_extradata_updated[idx] & 2)) {
856 ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
857 if (ret < 0)
858 goto finish;
859 ost->bsf_extradata_updated[idx] |= 2;
860 }
861 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
862 if (ret < 0)
863 goto finish;
864 idx++;
865 } else
866 write_packet(of, pkt, ost, 0);
867 }
868 } else
869 write_packet(of, pkt, ost, 0);
870
871finish:
872 if (ret < 0 && ret != AVERROR_EOF) {
873 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
874 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
875 if(exit_on_error)
876 exit_program(1);
877 }
878}
879
880static int check_recording_time(OutputStream *ost)
881{
882 OutputFile *of = output_files[ost->file_index];
883
884 if (of->recording_time != INT64_MAX &&
885 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
886 AV_TIME_BASE_Q) >= 0) {
887 close_output_stream(ost);
888 return 0;
889 }
890 return 1;
891}
892
893static void do_audio_out(OutputFile *of, OutputStream *ost,
894 AVFrame *frame)
895{
896 AVCodecContext *enc = ost->enc_ctx;
897 AVPacket pkt;
898 int ret;
899
900 av_init_packet(&pkt);
901 pkt.data = NULL;
902 pkt.size = 0;
903
904 if (!check_recording_time(ost))
905 return;
906
907 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
908 frame->pts = ost->sync_opts;
909 ost->sync_opts = frame->pts + frame->nb_samples;
910 ost->samples_encoded += frame->nb_samples;
911 ost->frames_encoded++;
912
913 av_assert0(pkt.size || !pkt.data);
914 update_benchmark(NULL);
915 if (debug_ts) {
916 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
917 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
918 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
919 enc->time_base.num, enc->time_base.den);
920 }
921
922 ret = avcodec_send_frame(enc, frame);
923 if (ret < 0)
924 goto error;
925
926 while (1) {
927 ret = avcodec_receive_packet(enc, &pkt);
928 if (ret == AVERROR(EAGAIN))
929 break;
930 if (ret < 0)
931 goto error;
932
933 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
934
935 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
936
937 if (debug_ts) {
938 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
939 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
940 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
941 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
942 }
943
944 output_packet(of, &pkt, ost);
945 }
946
947 return;
948error:
949 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
950 exit_program(1);
951}
952
953static void do_subtitle_out(OutputFile *of,
954 OutputStream *ost,
955 AVSubtitle *sub)
956{
957 int subtitle_out_max_size = 1024 * 1024;
958 int subtitle_out_size, nb, i;
959 AVCodecContext *enc;
960 AVPacket pkt;
961 int64_t pts;
962
963 if (sub->pts == AV_NOPTS_VALUE) {
964 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
965 if (exit_on_error)
966 exit_program(1);
967 return;
968 }
969
970 enc = ost->enc_ctx;
971
972 if (!subtitle_out) {
973 subtitle_out = av_malloc(subtitle_out_max_size);
974 if (!subtitle_out) {
975 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
976 exit_program(1);
977 }
978 }
979
980 /* Note: DVB subtitle need one packet to draw them and one other
981 packet to clear them */
982 /* XXX: signal it in the codec context ? */
983 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
984 nb = 2;
985 else
986 nb = 1;
987
988 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
989 pts = sub->pts;
990 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
991 pts -= output_files[ost->file_index]->start_time;
992 for (i = 0; i < nb; i++) {
993 unsigned save_num_rects = sub->num_rects;
994
995 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
996 if (!check_recording_time(ost))
997 return;
998
999 sub->pts = pts;
1000 // start_display_time is required to be 0
1001 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1002 sub->end_display_time -= sub->start_display_time;
1003 sub->start_display_time = 0;
1004 if (i == 1)
1005 sub->num_rects = 0;
1006
1007 ost->frames_encoded++;
1008
1009 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1010 subtitle_out_max_size, sub);
1011 if (i == 1)
1012 sub->num_rects = save_num_rects;
1013 if (subtitle_out_size < 0) {
1014 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1015 exit_program(1);
1016 }
1017
1018 av_init_packet(&pkt);
1019 pkt.data = subtitle_out;
1020 pkt.size = subtitle_out_size;
1021 pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1022 pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1023 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1024 /* XXX: the pts correction is handled here. Maybe handling
1025 it in the codec would be better */
1026 if (i == 0)
1027 pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1028 else
1029 pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030 }
1031 pkt.dts = pkt.pts;
1032 output_packet(of, &pkt, ost);
1033 }
1034}
1035
1036static void do_video_out(OutputFile *of,
1037 OutputStream *ost,
1038 AVFrame *next_picture,
1039 double sync_ipts)
1040{
1041 int ret, format_video_sync;
1042 AVPacket pkt;
1043 AVCodecContext *enc = ost->enc_ctx;
1044 AVCodecParameters *mux_par = ost->st->codecpar;
1045 AVRational frame_rate;
1046 int nb_frames, nb0_frames, i;
1047 double delta, delta0;
1048 double duration = 0;
1049 int frame_size = 0;
1050 InputStream *ist = NULL;
1051 AVFilterContext *filter = ost->filter->filter;
1052
1053 if (ost->source_index >= 0)
1054 ist = input_streams[ost->source_index];
1055
1056 frame_rate = av_buffersink_get_frame_rate(filter);
1057 if (frame_rate.num > 0 && frame_rate.den > 0)
1058 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1059
1060 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1061 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1062
1063 if (!ost->filters_script &&
1064 !ost->filters &&
1065 next_picture &&
1066 ist &&
1067 lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1068 duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1069 }
1070
1071 if (!next_picture) {
1072 //end, flushing
1073 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1074 ost->last_nb0_frames[1],
1075 ost->last_nb0_frames[2]);
1076 } else {
1077 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1078 delta = delta0 + duration;
1079
1080 /* by default, we output a single frame */
1081 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1082 nb_frames = 1;
1083
1084 format_video_sync = video_sync_method;
1085 if (format_video_sync == VSYNC_AUTO) {
1086 if(!strcmp(of->ctx->oformat->name, "avi")) {
1087 format_video_sync = VSYNC_VFR;
1088 } else
1089 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1090 if ( ist
1091 && format_video_sync == VSYNC_CFR
1092 && input_files[ist->file_index]->ctx->nb_streams == 1
1093 && input_files[ist->file_index]->input_ts_offset == 0) {
1094 format_video_sync = VSYNC_VSCFR;
1095 }
1096 if (format_video_sync == VSYNC_CFR && copy_ts) {
1097 format_video_sync = VSYNC_VSCFR;
1098 }
1099 }
1100 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1101
1102 if (delta0 < 0 &&
1103 delta > 0 &&
1104 format_video_sync != VSYNC_PASSTHROUGH &&
1105 format_video_sync != VSYNC_DROP) {
1106 if (delta0 < -0.6) {
1107 av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1108 } else
1109 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1110 sync_ipts = ost->sync_opts;
1111 duration += delta0;
1112 delta0 = 0;
1113 }
1114
1115 switch (format_video_sync) {
1116 case VSYNC_VSCFR:
1117 if (ost->frame_number == 0 && delta0 >= 0.5) {
1118 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1119 delta = duration;
1120 delta0 = 0;
1121 ost->sync_opts = lrint(sync_ipts);
1122 }
1123 case VSYNC_CFR:
1124 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1125 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1126 nb_frames = 0;
1127 } else if (delta < -1.1)
1128 nb_frames = 0;
1129 else if (delta > 1.1) {
1130 nb_frames = lrintf(delta);
1131 if (delta0 > 1.1)
1132 nb0_frames = lrintf(delta0 - 0.6);
1133 }
1134 break;
1135 case VSYNC_VFR:
1136 if (delta <= -0.6)
1137 nb_frames = 0;
1138 else if (delta > 0.6)
1139 ost->sync_opts = lrint(sync_ipts);
1140 break;
1141 case VSYNC_DROP:
1142 case VSYNC_PASSTHROUGH:
1143 ost->sync_opts = lrint(sync_ipts);
1144 break;
1145 default:
1146 av_assert0(0);
1147 }
1148 }
1149
1150 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1151 nb0_frames = FFMIN(nb0_frames, nb_frames);
1152
1153 memmove(ost->last_nb0_frames + 1,
1154 ost->last_nb0_frames,
1155 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1156 ost->last_nb0_frames[0] = nb0_frames;
1157
1158 if (nb0_frames == 0 && ost->last_dropped) {
1159 nb_frames_drop++;
1160 av_log(NULL, AV_LOG_VERBOSE,
1161 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1162 ost->frame_number, ost->st->index, ost->last_frame->pts);
1163 }
1164 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1165 if (nb_frames > dts_error_threshold * 30) {
1166 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1167 nb_frames_drop++;
1168 return;
1169 }
1170 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1171 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1172 if (nb_frames_dup > dup_warning) {
1173 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1174 dup_warning *= 10;
1175 }
1176 }
1177 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1178
1179 /* duplicates frame if needed */
1180 for (i = 0; i < nb_frames; i++) {
1181 AVFrame *in_picture;
1182 av_init_packet(&pkt);
1183 pkt.data = NULL;
1184 pkt.size = 0;
1185
1186 if (i < nb0_frames && ost->last_frame) {
1187 in_picture = ost->last_frame;
1188 } else
1189 in_picture = next_picture;
1190
1191 if (!in_picture)
1192 return;
1193
1194 in_picture->pts = ost->sync_opts;
1195
1196#if 1
1197 if (!check_recording_time(ost))
1198#else
1199 if (ost->frame_number >= ost->max_frames)
1200#endif
1201 return;
1202
1203#if FF_API_LAVF_FMT_RAWPICTURE
1204 if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1205 enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1206 /* raw pictures are written as AVPicture structure to
1207 avoid any copies. We support temporarily the older
1208 method. */
1209 if (in_picture->interlaced_frame)
1210 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1211 else
1212 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1213 pkt.data = (uint8_t *)in_picture;
1214 pkt.size = sizeof(AVPicture);
1215 pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1216 pkt.flags |= AV_PKT_FLAG_KEY;
1217
1218 output_packet(of, &pkt, ost);
1219 } else
1220#endif
1221 {
1222 int forced_keyframe = 0;
1223 double pts_time;
1224
1225 if (enc->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
1226 ost->top_field_first >= 0)
1227 in_picture->top_field_first = !!ost->top_field_first;
1228
1229 if (in_picture->interlaced_frame) {
1230 if (enc->codec->id == AV_CODEC_ID_MJPEG)
1231 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232 else
1233 mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234 } else
1235 mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236
1237 in_picture->quality = enc->global_quality;
1238 in_picture->pict_type = 0;
1239
1240 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1241 in_picture->pts * av_q2d(enc->time_base) : NAN;
1242 if (ost->forced_kf_index < ost->forced_kf_count &&
1243 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1244 ost->forced_kf_index++;
1245 forced_keyframe = 1;
1246 } else if (ost->forced_keyframes_pexpr) {
1247 double res;
1248 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1249 res = av_expr_eval(ost->forced_keyframes_pexpr,
1250 ost->forced_keyframes_expr_const_values, NULL);
1251 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1252 ost->forced_keyframes_expr_const_values[FKF_N],
1253 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1254 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1255 ost->forced_keyframes_expr_const_values[FKF_T],
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1257 res);
1258 if (res) {
1259 forced_keyframe = 1;
1260 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1261 ost->forced_keyframes_expr_const_values[FKF_N];
1262 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1263 ost->forced_keyframes_expr_const_values[FKF_T];
1264 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1265 }
1266
1267 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1268 } else if ( ost->forced_keyframes
1269 && !strncmp(ost->forced_keyframes, "source", 6)
1270 && in_picture->key_frame==1) {
1271 forced_keyframe = 1;
1272 }
1273
1274 if (forced_keyframe) {
1275 in_picture->pict_type = AV_PICTURE_TYPE_I;
1276 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1277 }
1278
1279 update_benchmark(NULL);
1280 if (debug_ts) {
1281 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1282 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1283 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1284 enc->time_base.num, enc->time_base.den);
1285 }
1286
1287 ost->frames_encoded++;
1288
1289 ret = avcodec_send_frame(enc, in_picture);
1290 if (ret < 0)
1291 goto error;
1292
1293 while (1) {
1294 ret = avcodec_receive_packet(enc, &pkt);
1295 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1296 if (ret == AVERROR(EAGAIN))
1297 break;
1298 if (ret < 0)
1299 goto error;
1300
1301 if (debug_ts) {
1302 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1303 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1304 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1305 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1306 }
1307
1308 if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1309 pkt.pts = ost->sync_opts;
1310
1311 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1312
1313 if (debug_ts) {
1314 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1317 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1318 }
1319
1320 frame_size = pkt.size;
1321 output_packet(of, &pkt, ost);
1322
1323 /* if two pass, output log */
1324 if (ost->logfile && enc->stats_out) {
1325 fprintf(ost->logfile, "%s", enc->stats_out);
1326 }
1327 }
1328 }
1329 ost->sync_opts++;
1330 /*
1331 * For video, number of frames in == number of packets out.
1332 * But there may be reordering, so we can't throw away frames on encoder
1333 * flush, we need to limit them here, before they go into encoder.
1334 */
1335 ost->frame_number++;
1336
1337 if (vstats_filename && frame_size)
1338 do_video_stats(ost, frame_size);
1339 }
1340
1341 if (!ost->last_frame)
1342 ost->last_frame = av_frame_alloc();
1343 av_frame_unref(ost->last_frame);
1344 if (next_picture && ost->last_frame)
1345 av_frame_ref(ost->last_frame, next_picture);
1346 else
1347 av_frame_free(&ost->last_frame);
1348
1349 return;
1350error:
1351 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352 exit_program(1);
1353}
1354
1355static double psnr(double d)
1356{
1357 return -10.0 * log10(d);
1358}
1359
1360static void do_video_stats(OutputStream *ost, int frame_size)
1361{
1362 AVCodecContext *enc;
1363 int frame_number;
1364 double ti1, bitrate, avg_bitrate;
1365
1366 /* this is executed just the first time do_video_stats is called */
1367 if (!vstats_file) {
1368 vstats_file = fopen(vstats_filename, "w");
1369 if (!vstats_file) {
1370 perror("fopen");
1371 exit_program(1);
1372 }
1373 }
1374
1375 enc = ost->enc_ctx;
1376 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377 frame_number = ost->st->nb_frames;
1378 if (vstats_version <= 1) {
1379 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380 ost->quality / (float)FF_QP2LAMBDA);
1381 } else {
1382 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383 ost->quality / (float)FF_QP2LAMBDA);
1384 }
1385
1386 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388
1389 fprintf(vstats_file,"f_size= %6d ", frame_size);
1390 /* compute pts value */
1391 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392 if (ti1 < 0.01)
1393 ti1 = 0.01;
1394
1395 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400 }
1401}
1402
1403static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404
1405static void finish_output_stream(OutputStream *ost)
1406{
1407 OutputFile *of = output_files[ost->file_index];
1408 int i;
1409
1410 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411
1412 if (of->shortest) {
1413 for (i = 0; i < of->ctx->nb_streams; i++)
1414 output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1415 }
1416}
1417
1418/**
1419 * Get and encode new output from any of the filtergraphs, without causing
1420 * activity.
1421 *
1422 * @return 0 for success, <0 for severe errors
1423 */
1424static int reap_filters(int flush)
1425{
1426 AVFrame *filtered_frame = NULL;
1427 int i;
1428
1429 /* Reap all buffers present in the buffer sinks */
1430 for (i = 0; i < nb_output_streams; i++) {
1431 OutputStream *ost = output_streams[i];
1432 OutputFile *of = output_files[ost->file_index];
1433 AVFilterContext *filter;
1434 AVCodecContext *enc = ost->enc_ctx;
1435 int ret = 0;
1436
1437 if (!ost->filter || !ost->filter->graph->graph)
1438 continue;
1439 filter = ost->filter->filter;
1440
1441 if (!ost->initialized) {
1442 char error[1024] = "";
1443 ret = init_output_stream(ost, error, sizeof(error));
1444 if (ret < 0) {
1445 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446 ost->file_index, ost->index, error);
1447 exit_program(1);
1448 }
1449 }
1450
1451 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452 return AVERROR(ENOMEM);
1453 }
1454 filtered_frame = ost->filtered_frame;
1455
1456 while (1) {
1457 double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459 AV_BUFFERSINK_FLAG_NO_REQUEST);
1460 if (ret < 0) {
1461 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462 av_log(NULL, AV_LOG_WARNING,
1463 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464 } else if (flush && ret == AVERROR_EOF) {
1465 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1466 do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1467 }
1468 break;
1469 }
1470 if (ost->finished) {
1471 av_frame_unref(filtered_frame);
1472 continue;
1473 }
1474 if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1476 AVRational filter_tb = av_buffersink_get_time_base(filter);
1477 AVRational tb = enc->time_base;
1478 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479
1480 tb.den <<= extra_bits;
1481 float_pts =
1482 av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1483 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1484 float_pts /= 1 << extra_bits;
1485 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487
1488 filtered_frame->pts =
1489 av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1490 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1491 }
1492 //if (ost->source_index >= 0)
1493 // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1494
1495 switch (av_buffersink_get_type(filter)) {
1496 case AVMEDIA_TYPE_VIDEO:
1497 if (!ost->frame_aspect_ratio.num)
1498 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499
1500 if (debug_ts) {
1501 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502 av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503 float_pts,
1504 enc->time_base.num, enc->time_base.den);
1505 }
1506
1507 do_video_out(of, ost, filtered_frame, float_pts);
1508 break;
1509 case AVMEDIA_TYPE_AUDIO:
1510 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511 enc->channels != av_frame_get_channels(filtered_frame)) {
1512 av_log(NULL, AV_LOG_ERROR,
1513 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514 break;
1515 }
1516 do_audio_out(of, ost, filtered_frame);
1517 break;
1518 default:
1519 // TODO support subtitle filters
1520 av_assert0(0);
1521 }
1522
1523 av_frame_unref(filtered_frame);
1524 }
1525 }
1526
1527 return 0;
1528}
1529
1530static void print_final_stats(int64_t total_size)
1531{
1532 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533 uint64_t subtitle_size = 0;
1534 uint64_t data_size = 0;
1535 float percent = -1.0;
1536 int i, j;
1537 int pass1_used = 1;
1538
1539 for (i = 0; i < nb_output_streams; i++) {
1540 OutputStream *ost = output_streams[i];
1541 switch (ost->enc_ctx->codec_type) {
1542 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545 default: other_size += ost->data_size; break;
1546 }
1547 extra_size += ost->enc_ctx->extradata_size;
1548 data_size += ost->data_size;
1549 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1550 != AV_CODEC_FLAG_PASS1)
1551 pass1_used = 0;
1552 }
1553
1554 if (data_size && total_size>0 && total_size >= data_size)
1555 percent = 100.0 * (total_size - data_size) / data_size;
1556
1557 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558 video_size / 1024.0,
1559 audio_size / 1024.0,
1560 subtitle_size / 1024.0,
1561 other_size / 1024.0,
1562 extra_size / 1024.0);
1563 if (percent >= 0.0)
1564 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565 else
1566 av_log(NULL, AV_LOG_INFO, "unknown");
1567 av_log(NULL, AV_LOG_INFO, "\n");
1568
1569 /* print verbose per-stream stats */
1570 for (i = 0; i < nb_input_files; i++) {
1571 InputFile *f = input_files[i];
1572 uint64_t total_packets = 0, total_size = 0;
1573
1574 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575 i, f->ctx->filename);
1576
1577 for (j = 0; j < f->nb_streams; j++) {
1578 InputStream *ist = input_streams[f->ist_index + j];
1579 enum AVMediaType type = ist->dec_ctx->codec_type;
1580
1581 total_size += ist->data_size;
1582 total_packets += ist->nb_packets;
1583
1584 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585 i, j, media_type_string(type));
1586 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587 ist->nb_packets, ist->data_size);
1588
1589 if (ist->decoding_needed) {
1590 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591 ist->frames_decoded);
1592 if (type == AVMEDIA_TYPE_AUDIO)
1593 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594 av_log(NULL, AV_LOG_VERBOSE, "; ");
1595 }
1596
1597 av_log(NULL, AV_LOG_VERBOSE, "\n");
1598 }
1599
1600 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601 total_packets, total_size);
1602 }
1603
1604 for (i = 0; i < nb_output_files; i++) {
1605 OutputFile *of = output_files[i];
1606 uint64_t total_packets = 0, total_size = 0;
1607
1608 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609 i, of->ctx->filename);
1610
1611 for (j = 0; j < of->ctx->nb_streams; j++) {
1612 OutputStream *ost = output_streams[of->ost_index + j];
1613 enum AVMediaType type = ost->enc_ctx->codec_type;
1614
1615 total_size += ost->data_size;
1616 total_packets += ost->packets_written;
1617
1618 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619 i, j, media_type_string(type));
1620 if (ost->encoding_needed) {
1621 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622 ost->frames_encoded);
1623 if (type == AVMEDIA_TYPE_AUDIO)
1624 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625 av_log(NULL, AV_LOG_VERBOSE, "; ");
1626 }
1627
1628 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629 ost->packets_written, ost->data_size);
1630
1631 av_log(NULL, AV_LOG_VERBOSE, "\n");
1632 }
1633
1634 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635 total_packets, total_size);
1636 }
1637 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639 if (pass1_used) {
1640 av_log(NULL, AV_LOG_WARNING, "\n");
1641 } else {
1642 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643 }
1644 }
1645}
1646
1647static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648{
1649 char buf[1024];
1650 AVBPrint buf_script;
1651 OutputStream *ost;
1652 AVFormatContext *oc;
1653 int64_t total_size;
1654 AVCodecContext *enc;
1655 int frame_number, vid, i;
1656 double bitrate;
1657 double speed;
1658 int64_t pts = INT64_MIN + 1;
1659 static int64_t last_time = -1;
1660 static int qp_histogram[52];
1661 int hours, mins, secs, us;
1662 int ret;
1663 float t;
1664
1665 if (!print_stats && !is_last_report && !progress_avio)
1666 return;
1667
1668 if (!is_last_report) {
1669 if (last_time == -1) {
1670 last_time = cur_time;
1671 return;
1672 }
1673 if ((cur_time - last_time) < 500000)
1674 return;
1675 last_time = cur_time;
1676 }
1677
1678 t = (cur_time-timer_start) / 1000000.0;
1679
1680
1681 oc = output_files[0]->ctx;
1682
1683 total_size = avio_size(oc->pb);
1684 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685 total_size = avio_tell(oc->pb);
1686
1687 buf[0] = '\0';
1688 vid = 0;
1689 av_bprint_init(&buf_script, 0, 1);
1690 for (i = 0; i < nb_output_streams; i++) {
1691 float q = -1;
1692 ost = output_streams[i];
1693 enc = ost->enc_ctx;
1694 if (!ost->stream_copy)
1695 q = ost->quality / (float) FF_QP2LAMBDA;
1696
1697 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1699 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700 ost->file_index, ost->index, q);
1701 }
1702 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703 float fps;
1704
1705 frame_number = ost->frame_number;
1706 fps = t > 1 ? frame_number / t : 0;
1707 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1708 frame_number, fps < 9.95, fps, q);
1709 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710 av_bprintf(&buf_script, "fps=%.1f\n", fps);
1711 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712 ost->file_index, ost->index, q);
1713 if (is_last_report)
1714 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1715 if (qp_hist) {
1716 int j;
1717 int qp = lrintf(q);
1718 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719 qp_histogram[qp]++;
1720 for (j = 0; j < 32; j++)
1721 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1722 }
1723
1724 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725 int j;
1726 double error, error_sum = 0;
1727 double scale, scale_sum = 0;
1728 double p;
1729 char type[3] = { 'Y','U','V' };
1730 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1731 for (j = 0; j < 3; j++) {
1732 if (is_last_report) {
1733 error = enc->error[j];
1734 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735 } else {
1736 error = ost->error[j];
1737 scale = enc->width * enc->height * 255.0 * 255.0;
1738 }
1739 if (j)
1740 scale /= 4;
1741 error_sum += error;
1742 scale_sum += scale;
1743 p = psnr(error / scale);
1744 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1745 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746 ost->file_index, ost->index, type[j] | 32, p);
1747 }
1748 p = psnr(error_sum / scale_sum);
1749 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1750 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751 ost->file_index, ost->index, p);
1752 }
1753 vid = 1;
1754 }
1755 /* compute min output value */
1756 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1757 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758 ost->st->time_base, AV_TIME_BASE_Q));
1759 if (is_last_report)
1760 nb_frames_drop += ost->last_dropped;
1761 }
1762
1763 secs = FFABS(pts) / AV_TIME_BASE;
1764 us = FFABS(pts) % AV_TIME_BASE;
1765 mins = secs / 60;
1766 secs %= 60;
1767 hours = mins / 60;
1768 mins %= 60;
1769
1770 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1771 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1772
1773 if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1774 "size=N/A time=");
1775 else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1776 "size=%8.0fkB time=", total_size / 1024.0);
1777 if (pts < 0)
1778 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1780 "%02d:%02d:%02d.%02d ", hours, mins, secs,
1781 (100 * us) / AV_TIME_BASE);
1782
1783 if (bitrate < 0) {
1784 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1785 av_bprintf(&buf_script, "bitrate=N/A\n");
1786 }else{
1787 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1788 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789 }
1790
1791 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1794 av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1795 hours, mins, secs, us);
1796
1797 if (nb_frames_dup || nb_frames_drop)
1798 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1799 nb_frames_dup, nb_frames_drop);
1800 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1801 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1802
1803 if (speed < 0) {
1804 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1805 av_bprintf(&buf_script, "speed=N/A\n");
1806 } else {
1807 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1808 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1809 }
1810
1811 if (print_stats || is_last_report) {
1812 const char end = is_last_report ? '\n' : '\r';
1813 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1814 fprintf(stderr, "%s %c", buf, end);
1815 } else
1816 av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1817
1818 fflush(stderr);
1819 }
1820
1821 if (progress_avio) {
1822 av_bprintf(&buf_script, "progress=%s\n",
1823 is_last_report ? "end" : "continue");
1824 avio_write(progress_avio, buf_script.str,
1825 FFMIN(buf_script.len, buf_script.size - 1));
1826 avio_flush(progress_avio);
1827 av_bprint_finalize(&buf_script, NULL);
1828 if (is_last_report) {
1829 if ((ret = avio_closep(&progress_avio)) < 0)
1830 av_log(NULL, AV_LOG_ERROR,
1831 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1832 }
1833 }
1834
1835 if (is_last_report)
1836 print_final_stats(total_size);
1837}
1838
1839static void flush_encoders(void)
1840{
1841 int i, ret;
1842
1843 for (i = 0; i < nb_output_streams; i++) {
1844 OutputStream *ost = output_streams[i];
1845 AVCodecContext *enc = ost->enc_ctx;
1846 OutputFile *of = output_files[ost->file_index];
1847
1848 if (!ost->encoding_needed)
1849 continue;
1850
1851 // Try to enable encoding with no input frames.
1852 // Maybe we should just let encoding fail instead.
1853 if (!ost->initialized) {
1854 FilterGraph *fg = ost->filter->graph;
1855 char error[1024] = "";
1856
1857 av_log(NULL, AV_LOG_WARNING,
1858 "Finishing stream %d:%d without any data written to it.\n",
1859 ost->file_index, ost->st->index);
1860
1861 if (ost->filter && !fg->graph) {
1862 int x;
1863 for (x = 0; x < fg->nb_inputs; x++) {
1864 InputFilter *ifilter = fg->inputs[x];
1865 if (ifilter->format < 0) {
1866 AVCodecParameters *par = ifilter->ist->st->codecpar;
1867 // We never got any input. Set a fake format, which will
1868 // come from libavformat.
1869 ifilter->format = par->format;
1870 ifilter->sample_rate = par->sample_rate;
1871 ifilter->channels = par->channels;
1872 ifilter->channel_layout = par->channel_layout;
1873 ifilter->width = par->width;
1874 ifilter->height = par->height;
1875 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1876 }
1877 }
1878
1879 if (!ifilter_has_all_input_formats(fg))
1880 continue;
1881
1882 ret = configure_filtergraph(fg);
1883 if (ret < 0) {
1884 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1885 exit_program(1);
1886 }
1887
1888 finish_output_stream(ost);
1889 }
1890
1891 ret = init_output_stream(ost, error, sizeof(error));
1892 if (ret < 0) {
1893 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1894 ost->file_index, ost->index, error);
1895 exit_program(1);
1896 }
1897 }
1898
1899 if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1900 continue;
1901#if FF_API_LAVF_FMT_RAWPICTURE
1902 if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1903 continue;
1904#endif
1905
1906 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1907 continue;
1908
1909 for (;;) {
1910 const char *desc = NULL;
1911 AVPacket pkt;
1912 int pkt_size;
1913
1914 switch (enc->codec_type) {
1915 case AVMEDIA_TYPE_AUDIO:
1916 desc = "audio";
1917 break;
1918 case AVMEDIA_TYPE_VIDEO:
1919 desc = "video";
1920 break;
1921 default:
1922 av_assert0(0);
1923 }
1924
1925 av_init_packet(&pkt);
1926 pkt.data = NULL;
1927 pkt.size = 0;
1928
1929 update_benchmark(NULL);
1930
1931 while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1932 ret = avcodec_send_frame(enc, NULL);
1933 if (ret < 0) {
1934 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1935 desc,
1936 av_err2str(ret));
1937 exit_program(1);
1938 }
1939 }
1940
1941 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1942 if (ret < 0 && ret != AVERROR_EOF) {
1943 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1944 desc,
1945 av_err2str(ret));
1946 exit_program(1);
1947 }
1948 if (ost->logfile && enc->stats_out) {
1949 fprintf(ost->logfile, "%s", enc->stats_out);
1950 }
1951 if (ret == AVERROR_EOF) {
1952 break;
1953 }
1954 if (ost->finished & MUXER_FINISHED) {
1955 av_packet_unref(&pkt);
1956 continue;
1957 }
1958 av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959 pkt_size = pkt.size;
1960 output_packet(of, &pkt, ost);
1961 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1962 do_video_stats(ost, pkt_size);
1963 }
1964 }
1965 }
1966}
1967
1968/*
1969 * Check whether a packet from ist should be written into ost at this time
1970 */
1971static int check_output_constraints(InputStream *ist, OutputStream *ost)
1972{
1973 OutputFile *of = output_files[ost->file_index];
1974 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1975
1976 if (ost->source_index != ist_index)
1977 return 0;
1978
1979 if (ost->finished)
1980 return 0;
1981
1982 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1983 return 0;
1984
1985 return 1;
1986}
1987
1988static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1989{
1990 OutputFile *of = output_files[ost->file_index];
1991 InputFile *f = input_files [ist->file_index];
1992 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1994 AVPicture pict;
1995 AVPacket opkt;
1996
1997 av_init_packet(&opkt);
1998
1999 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000 !ost->copy_initial_nonkeyframes)
2001 return;
2002
2003 if (!ost->frame_number && !ost->copy_prior_start) {
2004 int64_t comp_start = start_time;
2005 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007 if (pkt->pts == AV_NOPTS_VALUE ?
2008 ist->pts < comp_start :
2009 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2010 return;
2011 }
2012
2013 if (of->recording_time != INT64_MAX &&
2014 ist->pts >= of->recording_time + start_time) {
2015 close_output_stream(ost);
2016 return;
2017 }
2018
2019 if (f->recording_time != INT64_MAX) {
2020 start_time = f->ctx->start_time;
2021 if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022 start_time += f->start_time;
2023 if (ist->pts >= f->recording_time + start_time) {
2024 close_output_stream(ost);
2025 return;
2026 }
2027 }
2028
2029 /* force the input stream PTS */
2030 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2031 ost->sync_opts++;
2032
2033 if (pkt->pts != AV_NOPTS_VALUE)
2034 opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2035 else
2036 opkt.pts = AV_NOPTS_VALUE;
2037
2038 if (pkt->dts == AV_NOPTS_VALUE)
2039 opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2040 else
2041 opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042 opkt.dts -= ost_tb_start_time;
2043
2044 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2045 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2046 if(!duration)
2047 duration = ist->dec_ctx->frame_size;
2048 opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2049 (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2050 ost->mux_timebase) - ost_tb_start_time;
2051 }
2052
2053 opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2054
2055 opkt.flags = pkt->flags;
2056 // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057 if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059 && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060 && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2061 ) {
2062 int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063 &opkt.data, &opkt.size,
2064 pkt->data, pkt->size,
2065 pkt->flags & AV_PKT_FLAG_KEY);
2066 if (ret < 0) {
2067 av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2068 av_err2str(ret));
2069 exit_program(1);
2070 }
2071 if (ret) {
2072 opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2073 if (!opkt.buf)
2074 exit_program(1);
2075 }
2076 } else {
2077 opkt.data = pkt->data;
2078 opkt.size = pkt->size;
2079 }
2080 av_copy_packet_side_data(&opkt, pkt);
2081
2082#if FF_API_LAVF_FMT_RAWPICTURE
2083 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084 ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085 (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086 /* store AVPicture in AVPacket, as expected by the output format */
2087 int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2088 if (ret < 0) {
2089 av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2090 av_err2str(ret));
2091 exit_program(1);
2092 }
2093 opkt.data = (uint8_t *)&pict;
2094 opkt.size = sizeof(AVPicture);
2095 opkt.flags |= AV_PKT_FLAG_KEY;
2096 }
2097#endif
2098
2099 output_packet(of, &opkt, ost);
2100}
2101
2102int guess_input_channel_layout(InputStream *ist)
2103{
2104 AVCodecContext *dec = ist->dec_ctx;
2105
2106 if (!dec->channel_layout) {
2107 char layout_name[256];
2108
2109 if (dec->channels > ist->guess_layout_max)
2110 return 0;
2111 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2112 if (!dec->channel_layout)
2113 return 0;
2114 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115 dec->channels, dec->channel_layout);
2116 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2118 }
2119 return 1;
2120}
2121
2122static void check_decode_result(InputStream *ist, int *got_output, int ret)
2123{
2124 if (*got_output || ret<0)
2125 decode_error_stat[ret<0] ++;
2126
2127 if (ret < 0 && exit_on_error)
2128 exit_program(1);
2129
2130 if (exit_on_error && *got_output && ist) {
2131 if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2132 av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2133 exit_program(1);
2134 }
2135 }
2136}
2137
2138// Filters can be configured only if the formats of all inputs are known.
2139static int ifilter_has_all_input_formats(FilterGraph *fg)
2140{
2141 int i;
2142 for (i = 0; i < fg->nb_inputs; i++) {
2143 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2145 return 0;
2146 }
2147 return 1;
2148}
2149
2150static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2151{
2152 FilterGraph *fg = ifilter->graph;
2153 int need_reinit, ret, i;
2154
2155 /* determine if the parameters for this input changed */
2156 need_reinit = ifilter->format != frame->format;
2157 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2159 need_reinit = 1;
2160
2161 switch (ifilter->ist->st->codecpar->codec_type) {
2162 case AVMEDIA_TYPE_AUDIO:
2163 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164 ifilter->channels != frame->channels ||
2165 ifilter->channel_layout != frame->channel_layout;
2166 break;
2167 case AVMEDIA_TYPE_VIDEO:
2168 need_reinit |= ifilter->width != frame->width ||
2169 ifilter->height != frame->height;
2170 break;
2171 }
2172
2173 if (need_reinit) {
2174 ret = ifilter_parameters_from_frame(ifilter, frame);
2175 if (ret < 0)
2176 return ret;
2177 }
2178
2179 /* (re)init the graph if possible, otherwise buffer the frame and return */
2180 if (need_reinit || !fg->graph) {
2181 for (i = 0; i < fg->nb_inputs; i++) {
2182 if (!ifilter_has_all_input_formats(fg)) {
2183 AVFrame *tmp = av_frame_clone(frame);
2184 if (!tmp)
2185 return AVERROR(ENOMEM);
2186 av_frame_unref(frame);
2187
2188 if (!av_fifo_space(ifilter->frame_queue)) {
2189 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2190 if (ret < 0) {
2191 av_frame_free(&tmp);
2192 return ret;
2193 }
2194 }
2195 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2196 return 0;
2197 }
2198 }
2199
2200 ret = reap_filters(1);
2201 if (ret < 0 && ret != AVERROR_EOF) {
2202 char errbuf[128];
2203 av_strerror(ret, errbuf, sizeof(errbuf));
2204
2205 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2206 return ret;
2207 }
2208
2209 ret = configure_filtergraph(fg);
2210 if (ret < 0) {
2211 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2212 return ret;
2213 }
2214 }
2215
2216 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2217 if (ret < 0) {
2218 av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
2219 return ret;
2220 }
2221
2222 return 0;
2223}
2224
2225static int ifilter_send_eof(InputFilter *ifilter)
2226{
2227 int i, j, ret;
2228
2229 ifilter->eof = 1;
2230
2231 if (ifilter->filter) {
2232 ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
2233 if (ret < 0)
2234 return ret;
2235 } else {
2236 // the filtergraph was never configured
2237 FilterGraph *fg = ifilter->graph;
2238 for (i = 0; i < fg->nb_inputs; i++)
2239 if (!fg->inputs[i]->eof)
2240 break;
2241 if (i == fg->nb_inputs) {
2242 // All the input streams have finished without the filtergraph
2243 // ever being configured.
2244 // Mark the output streams as finished.
2245 for (j = 0; j < fg->nb_outputs; j++)
2246 finish_output_stream(fg->outputs[j]->ost);
2247 }
2248 }
2249
2250 return 0;
2251}
2252
2253// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2254// There is the following difference: if you got a frame, you must call
2255// it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2256// (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2257static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2258{
2259 int ret;
2260
2261 *got_frame = 0;
2262
2263 if (pkt) {
2264 ret = avcodec_send_packet(avctx, pkt);
2265 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2266 // decoded frames with avcodec_receive_frame() until done.
2267 if (ret < 0 && ret != AVERROR_EOF)
2268 return ret;
2269 }
2270
2271 ret = avcodec_receive_frame(avctx, frame);
2272 if (ret < 0 && ret != AVERROR(EAGAIN))
2273 return ret;
2274 if (ret >= 0)
2275 *got_frame = 1;
2276
2277 return 0;
2278}
2279
2280static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2281{
2282 int i, ret;
2283 AVFrame *f;
2284
2285 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2286 for (i = 0; i < ist->nb_filters; i++) {
2287 if (i < ist->nb_filters - 1) {
2288 f = ist->filter_frame;
2289 ret = av_frame_ref(f, decoded_frame);
2290 if (ret < 0)
2291 break;
2292 } else
2293 f = decoded_frame;
2294 ret = ifilter_send_frame(ist->filters[i], f);
2295 if (ret == AVERROR_EOF)
2296 ret = 0; /* ignore */
2297 if (ret < 0) {
2298 av_log(NULL, AV_LOG_ERROR,
2299 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2300 break;
2301 }
2302 }
2303 return ret;
2304}
2305
2306static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2307 int *decode_failed)
2308{
2309 AVFrame *decoded_frame;
2310 AVCodecContext *avctx = ist->dec_ctx;
2311 int ret, err = 0;
2312 AVRational decoded_frame_tb;
2313
2314 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2315 return AVERROR(ENOMEM);
2316 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2317 return AVERROR(ENOMEM);
2318 decoded_frame = ist->decoded_frame;
2319
2320 update_benchmark(NULL);
2321 ret = decode(avctx, decoded_frame, got_output, pkt);
2322 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2323 if (ret < 0)
2324 *decode_failed = 1;
2325
2326 if (ret >= 0 && avctx->sample_rate <= 0) {
2327 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2328 ret = AVERROR_INVALIDDATA;
2329 }
2330
2331 if (ret != AVERROR_EOF)
2332 check_decode_result(ist, got_output, ret);
2333
2334 if (!*got_output || ret < 0)
2335 return ret;
2336
2337 ist->samples_decoded += decoded_frame->nb_samples;
2338 ist->frames_decoded++;
2339
2340#if 1
2341 /* increment next_dts to use for the case where the input stream does not
2342 have timestamps or there are multiple frames in the packet */
2343 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2344 avctx->sample_rate;
2345 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2346 avctx->sample_rate;
2347#endif
2348
2349 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2350 decoded_frame_tb = ist->st->time_base;
2351 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2352 decoded_frame->pts = pkt->pts;
2353 decoded_frame_tb = ist->st->time_base;
2354 }else {
2355 decoded_frame->pts = ist->dts;
2356 decoded_frame_tb = AV_TIME_BASE_Q;
2357 }
2358 if (decoded_frame->pts != AV_NOPTS_VALUE)
2359 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2360 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2361 (AVRational){1, avctx->sample_rate});
2362 ist->nb_samples = decoded_frame->nb_samples;
2363 err = send_frame_to_filters(ist, decoded_frame);
2364
2365 av_frame_unref(ist->filter_frame);
2366 av_frame_unref(decoded_frame);
2367 return err < 0 ? err : ret;
2368}
2369
2370static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
2371 int *decode_failed)
2372{
2373 AVFrame *decoded_frame;
2374 int i, ret = 0, err = 0;
2375 int64_t best_effort_timestamp;
2376 int64_t dts = AV_NOPTS_VALUE;
2377 AVPacket avpkt;
2378
2379 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2380 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2381 // skip the packet.
2382 if (!eof && pkt && pkt->size == 0)
2383 return 0;
2384
2385 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2386 return AVERROR(ENOMEM);
2387 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2388 return AVERROR(ENOMEM);
2389 decoded_frame = ist->decoded_frame;
2390 if (ist->dts != AV_NOPTS_VALUE)
2391 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2392 if (pkt) {
2393 avpkt = *pkt;
2394 avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2395 }
2396
2397 // The old code used to set dts on the drain packet, which does not work
2398 // with the new API anymore.
2399 if (eof) {
2400 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2401 if (!new)
2402 return AVERROR(ENOMEM);
2403 ist->dts_buffer = new;
2404 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2405 }
2406
2407 update_benchmark(NULL);
2408 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2409 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2410 if (ret < 0)
2411 *decode_failed = 1;
2412
2413 // The following line may be required in some cases where there is no parser
2414 // or the parser does not has_b_frames correctly
2415 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2416 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2417 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2418 } else
2419 av_log(ist->dec_ctx, AV_LOG_WARNING,
2420 "video_delay is larger in decoder than demuxer %d > %d.\n"
2421 "If you want to help, upload a sample "
2422 "of this file to ftp://upload.ffmpeg.org/incoming/ "
2423 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2424 ist->dec_ctx->has_b_frames,
2425 ist->st->codecpar->video_delay);
2426 }
2427
2428 if (ret != AVERROR_EOF)
2429 check_decode_result(ist, got_output, ret);
2430
2431 if (*got_output && ret >= 0) {
2432 if (ist->dec_ctx->width != decoded_frame->width ||
2433 ist->dec_ctx->height != decoded_frame->height ||
2434 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2435 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2436 decoded_frame->width,
2437 decoded_frame->height,
2438 decoded_frame->format,
2439 ist->dec_ctx->width,
2440 ist->dec_ctx->height,
2441 ist->dec_ctx->pix_fmt);
2442 }
2443 }
2444
2445 if (!*got_output || ret < 0)
2446 return ret;
2447
2448 if(ist->top_field_first>=0)
2449 decoded_frame->top_field_first = ist->top_field_first;
2450
2451 ist->frames_decoded++;
2452
2453 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2454 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2455 if (err < 0)
2456 goto fail;
2457 }
2458 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2459
2460 best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2461
2462 if (ist->framerate.num)
2463 best_effort_timestamp = ist->cfr_next_pts++;
2464
2465 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2466 best_effort_timestamp = ist->dts_buffer[0];
2467
2468 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2469 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2470 ist->nb_dts_buffer--;
2471 }
2472
2473 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2474 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2475
2476 if (ts != AV_NOPTS_VALUE)
2477 ist->next_pts = ist->pts = ts;
2478 }
2479
2480 if (debug_ts) {
2481 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2482 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2483 ist->st->index, av_ts2str(decoded_frame->pts),
2484 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2485 best_effort_timestamp,
2486 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2487 decoded_frame->key_frame, decoded_frame->pict_type,
2488 ist->st->time_base.num, ist->st->time_base.den);
2489 }
2490
2491 if (ist->st->sample_aspect_ratio.num)
2492 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2493
2494 err = send_frame_to_filters(ist, decoded_frame);
2495
2496fail:
2497 av_frame_unref(ist->filter_frame);
2498 av_frame_unref(decoded_frame);
2499 return err < 0 ? err : ret;
2500}
2501
2502static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2503 int *decode_failed)
2504{
2505 AVSubtitle subtitle;
2506 int free_sub = 1;
2507 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2508 &subtitle, got_output, pkt);
2509
2510 check_decode_result(NULL, got_output, ret);
2511
2512 if (ret < 0 || !*got_output) {
2513 *decode_failed = 1;
2514 if (!pkt->size)
2515 sub2video_flush(ist);
2516 return ret;
2517 }
2518
2519 if (ist->fix_sub_duration) {
2520 int end = 1;
2521 if (ist->prev_sub.got_output) {
2522 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2523 1000, AV_TIME_BASE);
2524 if (end < ist->prev_sub.subtitle.end_display_time) {
2525 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2526 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2527 ist->prev_sub.subtitle.end_display_time, end,
2528 end <= 0 ? ", dropping it" : "");
2529 ist->prev_sub.subtitle.end_display_time = end;
2530 }
2531 }
2532 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2533 FFSWAP(int, ret, ist->prev_sub.ret);
2534 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2535 if (end <= 0)
2536 goto out;
2537 }
2538
2539 if (!*got_output)
2540 return ret;
2541
2542 if (ist->sub2video.frame) {
2543 sub2video_update(ist, &subtitle);
2544 } else if (ist->nb_filters) {
2545 if (!ist->sub2video.sub_queue)
2546 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2547 if (!ist->sub2video.sub_queue)
2548 exit_program(1);
2549 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2550 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2551 if (ret < 0)
2552 exit_program(1);
2553 }
2554 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2555 free_sub = 0;
2556 }
2557
2558 if (!subtitle.num_rects)
2559 goto out;
2560
2561 ist->frames_decoded++;
2562
2563 for (i = 0; i < nb_output_streams; i++) {
2564 OutputStream *ost = output_streams[i];
2565
2566 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2567 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2568 continue;
2569
2570 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2571 }
2572
2573out:
2574 if (free_sub)
2575 avsubtitle_free(&subtitle);
2576 return ret;
2577}
2578
2579static int send_filter_eof(InputStream *ist)
2580{
2581 int i, ret;
2582 for (i = 0; i < ist->nb_filters; i++) {
2583 ret = ifilter_send_eof(ist->filters[i]);
2584 if (ret < 0)
2585 return ret;
2586 }
2587 return 0;
2588}
2589
2590/* pkt = NULL means EOF (needed to flush decoder buffers) */
2591static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2592{
2593 int ret = 0, i;
2594 int repeating = 0;
2595 int eof_reached = 0;
2596
2597 AVPacket avpkt;
2598 if (!ist->saw_first_ts) {
2599 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2600 ist->pts = 0;
2601 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2602 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2603 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2604 }
2605 ist->saw_first_ts = 1;
2606 }
2607
2608 if (ist->next_dts == AV_NOPTS_VALUE)
2609 ist->next_dts = ist->dts;
2610 if (ist->next_pts == AV_NOPTS_VALUE)
2611 ist->next_pts = ist->pts;
2612
2613 if (!pkt) {
2614 /* EOF handling */
2615 av_init_packet(&avpkt);
2616 avpkt.data = NULL;
2617 avpkt.size = 0;
2618 } else {
2619 avpkt = *pkt;
2620 }
2621
2622 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2623 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2624 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2625 ist->next_pts = ist->pts = ist->dts;
2626 }
2627
2628 // while we have more to decode or while the decoder did output something on EOF
2629 while (ist->decoding_needed) {
2630 int duration = 0;
2631 int got_output = 0;
2632 int decode_failed = 0;
2633
2634 ist->pts = ist->next_pts;
2635 ist->dts = ist->next_dts;
2636
2637 switch (ist->dec_ctx->codec_type) {
2638 case AVMEDIA_TYPE_AUDIO:
2639 ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2640 &decode_failed);
2641 break;
2642 case AVMEDIA_TYPE_VIDEO:
2643 ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
2644 &decode_failed);
2645 if (!repeating || !pkt || got_output) {
2646 if (pkt && pkt->duration) {
2647 duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2648 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2649 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2650 duration = ((int64_t)AV_TIME_BASE *
2651 ist->dec_ctx->framerate.den * ticks) /
2652 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2653 }
2654
2655 if(ist->dts != AV_NOPTS_VALUE && duration) {
2656 ist->next_dts += duration;
2657 }else
2658 ist->next_dts = AV_NOPTS_VALUE;
2659 }
2660
2661 if (got_output)
2662 ist->next_pts += duration; //FIXME the duration is not correct in some cases
2663 break;
2664 case AVMEDIA_TYPE_SUBTITLE:
2665 if (repeating)
2666 break;
2667 ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2668 if (!pkt && ret >= 0)
2669 ret = AVERROR_EOF;
2670 break;
2671 default:
2672 return -1;
2673 }
2674
2675 if (ret == AVERROR_EOF) {
2676 eof_reached = 1;
2677 break;
2678 }
2679
2680 if (ret < 0) {
2681 if (decode_failed) {
2682 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2683 ist->file_index, ist->st->index, av_err2str(ret));
2684 } else {
2685 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2686 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2687 }
2688 if (!decode_failed || exit_on_error)
2689 exit_program(1);
2690 break;
2691 }
2692
2693 if (got_output)
2694 ist->got_output = 1;
2695
2696 if (!got_output)
2697 break;
2698
2699 // During draining, we might get multiple output frames in this loop.
2700 // ffmpeg.c does not drain the filter chain on configuration changes,
2701 // which means if we send multiple frames at once to the filters, and
2702 // one of those frames changes configuration, the buffered frames will
2703 // be lost. This can upset certain FATE tests.
2704 // Decode only 1 frame per call on EOF to appease these FATE tests.
2705 // The ideal solution would be to rewrite decoding to use the new
2706 // decoding API in a better way.
2707 if (!pkt)
2708 break;
2709
2710 repeating = 1;
2711 }
2712
2713 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2714 /* except when looping we need to flush but not to send an EOF */
2715 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2716 int ret = send_filter_eof(ist);
2717 if (ret < 0) {
2718 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2719 exit_program(1);
2720 }
2721 }
2722
2723 /* handle stream copy */
2724 if (!ist->decoding_needed) {
2725 ist->dts = ist->next_dts;
2726 switch (ist->dec_ctx->codec_type) {
2727 case AVMEDIA_TYPE_AUDIO:
2728 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2729 ist->dec_ctx->sample_rate;
2730 break;
2731 case AVMEDIA_TYPE_VIDEO:
2732 if (ist->framerate.num) {
2733 // TODO: Remove work-around for c99-to-c89 issue 7
2734 AVRational time_base_q = AV_TIME_BASE_Q;
2735 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2736 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2737 } else if (pkt->duration) {
2738 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2739 } else if(ist->dec_ctx->framerate.num != 0) {
2740 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2741 ist->next_dts += ((int64_t)AV_TIME_BASE *
2742 ist->dec_ctx->framerate.den * ticks) /
2743 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2744 }
2745 break;
2746 }
2747 ist->pts = ist->dts;
2748 ist->next_pts = ist->next_dts;
2749 }
2750 for (i = 0; pkt && i < nb_output_streams; i++) {
2751 OutputStream *ost = output_streams[i];
2752
2753 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2754 continue;
2755
2756 do_streamcopy(ist, ost, pkt);
2757 }
2758
2759 return !eof_reached;
2760}
2761
2762static void print_sdp(void)
2763{
2764 char sdp[16384];
2765 int i;
2766 int j;
2767 AVIOContext *sdp_pb;
2768 AVFormatContext **avc;
2769
2770 for (i = 0; i < nb_output_files; i++) {
2771 if (!output_files[i]->header_written)
2772 return;
2773 }
2774
2775 avc = av_malloc_array(nb_output_files, sizeof(*avc));
2776 if (!avc)
2777 exit_program(1);
2778 for (i = 0, j = 0; i < nb_output_files; i++) {
2779 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2780 avc[j] = output_files[i]->ctx;
2781 j++;
2782 }
2783 }
2784
2785 if (!j)
2786 goto fail;
2787
2788 av_sdp_create(avc, j, sdp, sizeof(sdp));
2789
2790 if (!sdp_filename) {
2791 printf("SDP:\n%s\n", sdp);
2792 fflush(stdout);
2793 } else {
2794 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2795 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2796 } else {
2797 avio_printf(sdp_pb, "SDP:\n%s", sdp);
2798 avio_closep(&sdp_pb);
2799 av_freep(&sdp_filename);
2800 }
2801 }
2802
2803fail:
2804 av_freep(&avc);
2805}
2806
2807static const HWAccel *get_hwaccel(enum AVPixelFormat pix_fmt)
2808{
2809 int i;
2810 for (i = 0; hwaccels[i].name; i++)
2811 if (hwaccels[i].pix_fmt == pix_fmt)
2812 return &hwaccels[i];
2813 return NULL;
2814}
2815
2816static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2817{
2818 InputStream *ist = s->opaque;
2819 const enum AVPixelFormat *p;
2820 int ret;
2821
2822 for (p = pix_fmts; *p != -1; p++) {
2823 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2824 const HWAccel *hwaccel;
2825
2826 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2827 break;
2828
2829 hwaccel = get_hwaccel(*p);
2830 if (!hwaccel ||
2831 (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2832 (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2833 continue;
2834
2835 ret = hwaccel->init(s);
2836 if (ret < 0) {
2837 if (ist->hwaccel_id == hwaccel->id) {
2838 av_log(NULL, AV_LOG_FATAL,
2839 "%s hwaccel requested for input stream #%d:%d, "
2840 "but cannot be initialized.\n", hwaccel->name,
2841 ist->file_index, ist->st->index);
2842 return AV_PIX_FMT_NONE;
2843 }
2844 continue;
2845 }
2846
2847 if (ist->hw_frames_ctx) {
2848 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2849 if (!s->hw_frames_ctx)
2850 return AV_PIX_FMT_NONE;
2851 }
2852
2853 ist->active_hwaccel_id = hwaccel->id;
2854 ist->hwaccel_pix_fmt = *p;
2855 break;
2856 }
2857
2858 return *p;
2859}
2860
2861static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2862{
2863 InputStream *ist = s->opaque;
2864
2865 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2866 return ist->hwaccel_get_buffer(s, frame, flags);
2867
2868 return avcodec_default_get_buffer2(s, frame, flags);
2869}
2870
2871static int init_input_stream(int ist_index, char *error, int error_len)
2872{
2873 int ret;
2874 InputStream *ist = input_streams[ist_index];
2875
2876 if (ist->decoding_needed) {
2877 AVCodec *codec = ist->dec;
2878 if (!codec) {
2879 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2880 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2881 return AVERROR(EINVAL);
2882 }
2883
2884 ist->dec_ctx->opaque = ist;
2885 ist->dec_ctx->get_format = get_format;
2886 ist->dec_ctx->get_buffer2 = get_buffer;
2887 ist->dec_ctx->thread_safe_callbacks = 1;
2888
2889 av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2890 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2891 (ist->decoding_needed & DECODING_FOR_OST)) {
2892 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2893 if (ist->decoding_needed & DECODING_FOR_FILTER)
2894 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2895 }
2896
2897 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2898
2899 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2900 * audio, and video decoders such as cuvid or mediacodec */
2901 av_codec_set_pkt_timebase(ist->dec_ctx, ist->st->time_base);
2902
2903 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2904 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2905 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2906 if (ret == AVERROR_EXPERIMENTAL)
2907 abort_codec_experimental(codec, 0);
2908
2909 snprintf(error, error_len,
2910 "Error while opening decoder for input stream "
2911 "#%d:%d : %s",
2912 ist->file_index, ist->st->index, av_err2str(ret));
2913 return ret;
2914 }
2915 assert_avoptions(ist->decoder_opts);
2916 }
2917
2918 ist->next_pts = AV_NOPTS_VALUE;
2919 ist->next_dts = AV_NOPTS_VALUE;
2920
2921 return 0;
2922}
2923
2924static InputStream *get_input_stream(OutputStream *ost)
2925{
2926 if (ost->source_index >= 0)
2927 return input_streams[ost->source_index];
2928 return NULL;
2929}
2930
2931static int compare_int64(const void *a, const void *b)
2932{
2933 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2934}
2935
2936/* open the muxer when all the streams are initialized */
2937static int check_init_output_file(OutputFile *of, int file_index)
2938{
2939 int ret, i;
2940
2941 for (i = 0; i < of->ctx->nb_streams; i++) {
2942 OutputStream *ost = output_streams[of->ost_index + i];
2943 if (!ost->initialized)
2944 return 0;
2945 }
2946
2947 of->ctx->interrupt_callback = int_cb;
2948
2949 ret = avformat_write_header(of->ctx, &of->opts);
2950 if (ret < 0) {
2951 av_log(NULL, AV_LOG_ERROR,
2952 "Could not write header for output file #%d "
2953 "(incorrect codec parameters ?): %s\n",
2954 file_index, av_err2str(ret));
2955 return ret;
2956 }
2957 //assert_avoptions(of->opts);
2958 of->header_written = 1;
2959
2960 av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2961
2962 if (sdp_filename || want_sdp)
2963 print_sdp();
2964
2965 /* flush the muxing queues */
2966 for (i = 0; i < of->ctx->nb_streams; i++) {
2967 OutputStream *ost = output_streams[of->ost_index + i];
2968
2969 /* try to improve muxing time_base (only possible if nothing has been written yet) */
2970 if (!av_fifo_size(ost->muxing_queue))
2971 ost->mux_timebase = ost->st->time_base;
2972
2973 while (av_fifo_size(ost->muxing_queue)) {
2974 AVPacket pkt;
2975 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2976 write_packet(of, &pkt, ost, 1);
2977 }
2978 }
2979
2980 return 0;
2981}
2982
2983static int init_output_bsfs(OutputStream *ost)
2984{
2985 AVBSFContext *ctx;
2986 int i, ret;
2987
2988 if (!ost->nb_bitstream_filters)
2989 return 0;
2990
2991 for (i = 0; i < ost->nb_bitstream_filters; i++) {
2992 ctx = ost->bsf_ctx[i];
2993
2994 ret = avcodec_parameters_copy(ctx->par_in,
2995 i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2996 if (ret < 0)
2997 return ret;
2998
2999 ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3000
3001 ret = av_bsf_init(ctx);
3002 if (ret < 0) {
3003 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3004 ost->bsf_ctx[i]->filter->name);
3005 return ret;
3006 }
3007 }
3008
3009 ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3010 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3011 if (ret < 0)
3012 return ret;
3013
3014 ost->st->time_base = ctx->time_base_out;
3015
3016 return 0;
3017}
3018
3019static int init_output_stream_streamcopy(OutputStream *ost)
3020{
3021 OutputFile *of = output_files[ost->file_index];
3022 InputStream *ist = get_input_stream(ost);
3023 AVCodecParameters *par_dst = ost->st->codecpar;
3024 AVCodecParameters *par_src = ost->ref_par;
3025 AVRational sar;
3026 int i, ret;
3027 uint32_t codec_tag = par_dst->codec_tag;
3028
3029 av_assert0(ist && !ost->filter);
3030
3031 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3032 if (ret >= 0)
3033 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3034 if (ret < 0) {
3035 av_log(NULL, AV_LOG_FATAL,
3036 "Error setting up codec context options.\n");
3037 return ret;
3038 }
3039 avcodec_parameters_from_context(par_src, ost->enc_ctx);
3040
3041 if (!codec_tag) {
3042 unsigned int codec_tag_tmp;
3043 if (!of->ctx->oformat->codec_tag ||
3044 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3045 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3046 codec_tag = par_src->codec_tag;
3047 }
3048
3049 ret = avcodec_parameters_copy(par_dst, par_src);
3050 if (ret < 0)
3051 return ret;
3052
3053 par_dst->codec_tag = codec_tag;
3054
3055 if (!ost->frame_rate.num)
3056 ost->frame_rate = ist->framerate;
3057 ost->st->avg_frame_rate = ost->frame_rate;
3058
3059 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3060 if (ret < 0)
3061 return ret;
3062
3063 // copy timebase while removing common factors
3064 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3065 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3066
3067 // copy estimated duration as a hint to the muxer
3068 if (ost->st->duration <= 0 && ist->st->duration > 0)
3069 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3070
3071 // copy disposition
3072 ost->st->disposition = ist->st->disposition;
3073
3074 if (ist->st->nb_side_data) {
3075 ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
3076 sizeof(*ist->st->side_data));
3077 if (!ost->st->side_data)
3078 return AVERROR(ENOMEM);
3079
3080 ost->st->nb_side_data = 0;
3081 for (i = 0; i < ist->st->nb_side_data; i++) {
3082 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3083 AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
3084
3085 sd_dst->data = av_malloc(sd_src->size);
3086 if (!sd_dst->data)
3087 return AVERROR(ENOMEM);
3088 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3089 sd_dst->size = sd_src->size;
3090 sd_dst->type = sd_src->type;
3091 ost->st->nb_side_data++;
3092 }
3093 }
3094
3095 if (ost->rotate_overridden) {
3096 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3097 sizeof(int32_t) * 9);
3098 if (sd)
3099 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3100 }
3101
3102 ost->parser = av_parser_init(par_dst->codec_id);
3103 ost->parser_avctx = avcodec_alloc_context3(NULL);
3104 if (!ost->parser_avctx)
3105 return AVERROR(ENOMEM);
3106
3107 switch (par_dst->codec_type) {
3108 case AVMEDIA_TYPE_AUDIO:
3109 if (audio_volume != 256) {
3110 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3111 exit_program(1);
3112 }
3113 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3114 par_dst->block_align= 0;
3115 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3116 par_dst->block_align= 0;
3117 break;
3118 case AVMEDIA_TYPE_VIDEO:
3119 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3120 sar =
3121 av_mul_q(ost->frame_aspect_ratio,
3122 (AVRational){ par_dst->height, par_dst->width });
3123 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3124 "with stream copy may produce invalid files\n");
3125 }
3126 else if (ist->st->sample_aspect_ratio.num)
3127 sar = ist->st->sample_aspect_ratio;
3128 else
3129 sar = par_src->sample_aspect_ratio;
3130 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3131 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3132 ost->st->r_frame_rate = ist->st->r_frame_rate;
3133 break;
3134 }
3135
3136 ost->mux_timebase = ist->st->time_base;
3137
3138 return 0;
3139}
3140
3141static void set_encoder_id(OutputFile *of, OutputStream *ost)
3142{
3143 AVDictionaryEntry *e;
3144
3145 uint8_t *encoder_string;
3146 int encoder_string_len;
3147 int format_flags = 0;
3148 int codec_flags = 0;
3149
3150 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3151 return;
3152
3153 e = av_dict_get(of->opts, "fflags", NULL, 0);
3154 if (e) {
3155 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3156 if (!o)
3157 return;
3158 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3159 }
3160 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3161 if (e) {
3162 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3163 if (!o)
3164 return;
3165 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3166 }
3167
3168 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3169 encoder_string = av_mallocz(encoder_string_len);
3170 if (!encoder_string)
3171 exit_program(1);
3172
3173 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3174 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3175 else
3176 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3177 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3178 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3179 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3180}
3181
3182static void parse_forced_key_frames(char *kf, OutputStream *ost,
3183 AVCodecContext *avctx)
3184{
3185 char *p;
3186 int n = 1, i, size, index = 0;
3187 int64_t t, *pts;
3188
3189 for (p = kf; *p; p++)
3190 if (*p == ',')
3191 n++;
3192 size = n;
3193 pts = av_malloc_array(size, sizeof(*pts));
3194 if (!pts) {
3195 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3196 exit_program(1);
3197 }
3198
3199 p = kf;
3200 for (i = 0; i < n; i++) {
3201 char *next = strchr(p, ',');
3202
3203 if (next)
3204 *next++ = 0;
3205
3206 if (!memcmp(p, "chapters", 8)) {
3207
3208 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3209 int j;
3210
3211 if (avf->nb_chapters > INT_MAX - size ||
3212 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3213 sizeof(*pts)))) {
3214 av_log(NULL, AV_LOG_FATAL,
3215 "Could not allocate forced key frames array.\n");
3216 exit_program(1);
3217 }
3218 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3219 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3220
3221 for (j = 0; j < avf->nb_chapters; j++) {
3222 AVChapter *c = avf->chapters[j];
3223 av_assert1(index < size);
3224 pts[index++] = av_rescale_q(c->start, c->time_base,
3225 avctx->time_base) + t;
3226 }
3227
3228 } else {
3229
3230 t = parse_time_or_die("force_key_frames", p, 1);
3231 av_assert1(index < size);
3232 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3233
3234 }
3235
3236 p = next;
3237 }
3238
3239 av_assert0(index == size);
3240 qsort(pts, size, sizeof(*pts), compare_int64);
3241 ost->forced_kf_count = size;
3242 ost->forced_kf_pts = pts;
3243}
3244
3245static int init_output_stream_encode(OutputStream *ost)
3246{
3247 InputStream *ist = get_input_stream(ost);
3248 AVCodecContext *enc_ctx = ost->enc_ctx;
3249 AVCodecContext *dec_ctx = NULL;
3250 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3251 int j, ret;
3252
3253 set_encoder_id(output_files[ost->file_index], ost);
3254
3255 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3256 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3257 // which have to be filtered out to prevent leaking them to output files.
3258 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3259
3260 if (ist) {
3261 ost->st->disposition = ist->st->disposition;
3262
3263 dec_ctx = ist->dec_ctx;
3264
3265 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3266 } else {
3267 for (j = 0; j < oc->nb_streams; j++) {
3268 AVStream *st = oc->streams[j];
3269 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3270 break;
3271 }
3272 if (j == oc->nb_streams)
3273 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3274 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3275 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3276 }
3277
3278 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3279 if (!ost->frame_rate.num)
3280 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3281 if (ist && !ost->frame_rate.num)
3282 ost->frame_rate = ist->framerate;
3283 if (ist && !ost->frame_rate.num)
3284 ost->frame_rate = ist->st->r_frame_rate;
3285 if (ist && !ost->frame_rate.num) {
3286 ost->frame_rate = (AVRational){25, 1};
3287 av_log(NULL, AV_LOG_WARNING,
3288 "No information "
3289 "about the input framerate is available. Falling "
3290 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3291 "if you want a different framerate.\n",
3292 ost->file_index, ost->index);
3293 }
3294// ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3295 if (ost->enc->supported_framerates && !ost->force_fps) {
3296 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3297 ost->frame_rate = ost->enc->supported_framerates[idx];
3298 }
3299 // reduce frame rate for mpeg4 to be within the spec limits
3300 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3301 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3302 ost->frame_rate.num, ost->frame_rate.den, 65535);
3303 }
3304 }
3305
3306 switch (enc_ctx->codec_type) {
3307 case AVMEDIA_TYPE_AUDIO:
3308 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3309 if (dec_ctx)
3310 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3311 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3312 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3313 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3314 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3315 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3316 break;
3317 case AVMEDIA_TYPE_VIDEO:
3318 enc_ctx->time_base = av_inv_q(ost->frame_rate);
3319 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3320 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3321 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3322 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3323 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3324 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3325 }
3326 for (j = 0; j < ost->forced_kf_count; j++)
3327 ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3328 AV_TIME_BASE_Q,
3329 enc_ctx->time_base);
3330
3331 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3332 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3333 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3334 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3335 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3336 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3337 if (!strncmp(ost->enc->name, "libx264", 7) &&
3338 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3339 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3340 av_log(NULL, AV_LOG_WARNING,
3341 "No pixel format specified, %s for H.264 encoding chosen.\n"
3342 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3343 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3344 if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3345 enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3346 av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
3347 av_log(NULL, AV_LOG_WARNING,
3348 "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3349 "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3350 av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
3351 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3352 if (dec_ctx)
3353 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3354 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3355
3356 enc_ctx->framerate = ost->frame_rate;
3357
3358 ost->st->avg_frame_rate = ost->frame_rate;
3359
3360 if (!dec_ctx ||
3361 enc_ctx->width != dec_ctx->width ||
3362 enc_ctx->height != dec_ctx->height ||
3363 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3364 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3365 }
3366
3367 if (ost->forced_keyframes) {
3368 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3369 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3370 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3371 if (ret < 0) {
3372 av_log(NULL, AV_LOG_ERROR,
3373 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3374 return ret;
3375 }
3376 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3377 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3378 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3379 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3380
3381 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3382 // parse it only for static kf timings
3383 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3384 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3385 }
3386 }
3387 break;
3388 case AVMEDIA_TYPE_SUBTITLE:
3389 enc_ctx->time_base = AV_TIME_BASE_Q;
3390 if (!enc_ctx->width) {
3391 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3392 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3393 }
3394 break;
3395 case AVMEDIA_TYPE_DATA:
3396 break;
3397 default:
3398 abort();
3399 break;
3400 }
3401
3402 ost->mux_timebase = enc_ctx->time_base;
3403
3404 return 0;
3405}
3406
3407static int init_output_stream(OutputStream *ost, char *error, int error_len)
3408{
3409 int ret = 0;
3410
3411 if (ost->encoding_needed) {
3412 AVCodec *codec = ost->enc;
3413 AVCodecContext *dec = NULL;
3414 InputStream *ist;
3415
3416 ret = init_output_stream_encode(ost);
3417 if (ret < 0)
3418 return ret;
3419
3420 if ((ist = get_input_stream(ost)))
3421 dec = ist->dec_ctx;
3422 if (dec && dec->subtitle_header) {
3423 /* ASS code assumes this buffer is null terminated so add extra byte. */
3424 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3425 if (!ost->enc_ctx->subtitle_header)
3426 return AVERROR(ENOMEM);
3427 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3428 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3429 }
3430 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3431 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3432 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3433 !codec->defaults &&
3434 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3435 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3436 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3437
3438 if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3439 ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3440 av_buffersink_get_format(ost->filter->filter)) {
3441 ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3442 if (!ost->enc_ctx->hw_frames_ctx)
3443 return AVERROR(ENOMEM);
3444 }
3445
3446 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3447 if (ret == AVERROR_EXPERIMENTAL)
3448 abort_codec_experimental(codec, 1);
3449 snprintf(error, error_len,
3450 "Error while opening encoder for output stream #%d:%d - "
3451 "maybe incorrect parameters such as bit_rate, rate, width or height",
3452 ost->file_index, ost->index);
3453 return ret;
3454 }
3455 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3456 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3457 av_buffersink_set_frame_size(ost->filter->filter,
3458 ost->enc_ctx->frame_size);
3459 assert_avoptions(ost->encoder_opts);
3460 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3461 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3462 " It takes bits/s as argument, not kbits/s\n");
3463
3464 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3465 if (ret < 0) {
3466 av_log(NULL, AV_LOG_FATAL,
3467 "Error initializing the output stream codec context.\n");
3468 exit_program(1);
3469 }
3470 /*
3471 * FIXME: ost->st->codec should't be needed here anymore.
3472 */
3473 ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3474 if (ret < 0)
3475 return ret;
3476
3477 if (ost->enc_ctx->nb_coded_side_data) {
3478 int i;
3479
3480 ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
3481 sizeof(*ost->st->side_data));
3482 if (!ost->st->side_data)
3483 return AVERROR(ENOMEM);
3484
3485 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3486 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3487 AVPacketSideData *sd_dst = &ost->st->side_data[i];
3488
3489 sd_dst->data = av_malloc(sd_src->size);
3490 if (!sd_dst->data)
3491 return AVERROR(ENOMEM);
3492 memcpy(sd_dst->data, sd_src->data, sd_src->size);
3493 sd_dst->size = sd_src->size;
3494 sd_dst->type = sd_src->type;
3495 ost->st->nb_side_data++;
3496 }
3497 }
3498
3499 /*
3500 * Add global input side data. For now this is naive, and copies it
3501 * from the input stream's global side data. All side data should
3502 * really be funneled over AVFrame and libavfilter, then added back to
3503 * packet side data, and then potentially using the first packet for
3504 * global side data.
3505 */
3506 if (ist) {
3507 int i;
3508 for (i = 0; i < ist->st->nb_side_data; i++) {
3509 AVPacketSideData *sd = &ist->st->side_data[i];
3510 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3511 if (!dst)
3512 return AVERROR(ENOMEM);
3513 memcpy(dst, sd->data, sd->size);
3514 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3515 av_display_rotation_set((uint32_t *)dst, 0);
3516 }
3517 }
3518
3519 // copy timebase while removing common factors
3520 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3521 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3522
3523 // copy estimated duration as a hint to the muxer
3524 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3525 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3526
3527 ost->st->codec->codec= ost->enc_ctx->codec;
3528 } else if (ost->stream_copy) {
3529 ret = init_output_stream_streamcopy(ost);
3530 if (ret < 0)
3531 return ret;
3532
3533 /*
3534 * FIXME: will the codec context used by the parser during streamcopy
3535 * This should go away with the new parser API.
3536 */
3537 ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3538 if (ret < 0)
3539 return ret;
3540 }
3541
3542 // parse user provided disposition, and update stream values
3543 if (ost->disposition) {
3544 static const AVOption opts[] = {
3545 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3546 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3547 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3548 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3549 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3550 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3551 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3552 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3553 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3554 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3555 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3556 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3557 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3558 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3559 { NULL },
3560 };
3561 static const AVClass class = {
3562 .class_name = "",
3563 .item_name = av_default_item_name,
3564 .option = opts,
3565 .version = LIBAVUTIL_VERSION_INT,
3566 };
3567 const AVClass *pclass = &class;
3568
3569 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3570 if (ret < 0)
3571 return ret;
3572 }
3573
3574 /* initialize bitstream filters for the output stream
3575 * needs to be done here, because the codec id for streamcopy is not
3576 * known until now */
3577 ret = init_output_bsfs(ost);
3578 if (ret < 0)
3579 return ret;
3580
3581 ost->initialized = 1;
3582
3583 ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3584 if (ret < 0)
3585 return ret;
3586
3587 return ret;
3588}
3589
3590static void report_new_stream(int input_index, AVPacket *pkt)
3591{
3592 InputFile *file = input_files[input_index];
3593 AVStream *st = file->ctx->streams[pkt->stream_index];
3594
3595 if (pkt->stream_index < file->nb_streams_warn)
3596 return;
3597 av_log(file->ctx, AV_LOG_WARNING,
3598 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3599 av_get_media_type_string(st->codecpar->codec_type),
3600 input_index, pkt->stream_index,
3601 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3602 file->nb_streams_warn = pkt->stream_index + 1;
3603}
3604
3605static int transcode_init(void)
3606{
3607 int ret = 0, i, j, k;
3608 AVFormatContext *oc;
3609 OutputStream *ost;
3610 InputStream *ist;
3611 char error[1024] = {0};
3612
3613 for (i = 0; i < nb_filtergraphs; i++) {
3614 FilterGraph *fg = filtergraphs[i];
3615 for (j = 0; j < fg->nb_outputs; j++) {
3616 OutputFilter *ofilter = fg->outputs[j];
3617 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3618 continue;
3619 if (fg->nb_inputs != 1)
3620 continue;
3621 for (k = nb_input_streams-1; k >= 0 ; k--)
3622 if (fg->inputs[0]->ist == input_streams[k])
3623 break;
3624 ofilter->ost->source_index = k;
3625 }
3626 }
3627
3628 /* init framerate emulation */
3629 for (i = 0; i < nb_input_files; i++) {
3630 InputFile *ifile = input_files[i];
3631 if (ifile->rate_emu)
3632 for (j = 0; j < ifile->nb_streams; j++)
3633 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3634 }
3635
3636 /* init input streams */
3637 for (i = 0; i < nb_input_streams; i++)
3638 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3639 for (i = 0; i < nb_output_streams; i++) {
3640 ost = output_streams[i];
3641 avcodec_close(ost->enc_ctx);
3642 }
3643 goto dump_format;
3644 }
3645
3646 /* open each encoder */
3647 for (i = 0; i < nb_output_streams; i++) {
3648 // skip streams fed from filtergraphs until we have a frame for them
3649 if (output_streams[i]->filter)
3650 continue;
3651
3652 ret = init_output_stream(output_streams[i], error, sizeof(error));
3653 if (ret < 0)
3654 goto dump_format;
3655 }
3656
3657 /* discard unused programs */
3658 for (i = 0; i < nb_input_files; i++) {
3659 InputFile *ifile = input_files[i];
3660 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3661 AVProgram *p = ifile->ctx->programs[j];
3662 int discard = AVDISCARD_ALL;
3663
3664 for (k = 0; k < p->nb_stream_indexes; k++)
3665 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3666 discard = AVDISCARD_DEFAULT;
3667 break;
3668 }
3669 p->discard = discard;
3670 }
3671 }
3672
3673 /* write headers for files with no streams */
3674 for (i = 0; i < nb_output_files; i++) {
3675 oc = output_files[i]->ctx;
3676 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3677 ret = check_init_output_file(output_files[i], i);
3678 if (ret < 0)
3679 goto dump_format;
3680 }
3681 }
3682
3683 dump_format:
3684 /* dump the stream mapping */
3685 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3686 for (i = 0; i < nb_input_streams; i++) {
3687 ist = input_streams[i];
3688
3689 for (j = 0; j < ist->nb_filters; j++) {
3690 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3691 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3692 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3693 ist->filters[j]->name);
3694 if (nb_filtergraphs > 1)
3695 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3696 av_log(NULL, AV_LOG_INFO, "\n");
3697 }
3698 }
3699 }
3700
3701 for (i = 0; i < nb_output_streams; i++) {
3702 ost = output_streams[i];
3703
3704 if (ost->attachment_filename) {
3705 /* an attached file */
3706 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3707 ost->attachment_filename, ost->file_index, ost->index);
3708 continue;
3709 }
3710
3711 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3712 /* output from a complex graph */
3713 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3714 if (nb_filtergraphs > 1)
3715 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3716
3717 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3718 ost->index, ost->enc ? ost->enc->name : "?");
3719 continue;
3720 }
3721
3722 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3723 input_streams[ost->source_index]->file_index,
3724 input_streams[ost->source_index]->st->index,
3725 ost->file_index,
3726 ost->index);
3727 if (ost->sync_ist != input_streams[ost->source_index])
3728 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3729 ost->sync_ist->file_index,
3730 ost->sync_ist->st->index);
3731 if (ost->stream_copy)
3732 av_log(NULL, AV_LOG_INFO, " (copy)");
3733 else {
3734 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3735 const AVCodec *out_codec = ost->enc;
3736 const char *decoder_name = "?";
3737 const char *in_codec_name = "?";
3738 const char *encoder_name = "?";
3739 const char *out_codec_name = "?";
3740 const AVCodecDescriptor *desc;
3741
3742 if (in_codec) {
3743 decoder_name = in_codec->name;
3744 desc = avcodec_descriptor_get(in_codec->id);
3745 if (desc)
3746 in_codec_name = desc->name;
3747 if (!strcmp(decoder_name, in_codec_name))
3748 decoder_name = "native";
3749 }
3750
3751 if (out_codec) {
3752 encoder_name = out_codec->name;
3753 desc = avcodec_descriptor_get(out_codec->id);
3754 if (desc)
3755 out_codec_name = desc->name;
3756 if (!strcmp(encoder_name, out_codec_name))
3757 encoder_name = "native";
3758 }
3759
3760 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3761 in_codec_name, decoder_name,
3762 out_codec_name, encoder_name);
3763 }
3764 av_log(NULL, AV_LOG_INFO, "\n");
3765 }
3766
3767 if (ret) {
3768 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3769 return ret;
3770 }
3771
3772 atomic_store(&transcode_init_done, 1);
3773
3774 return 0;
3775}
3776
3777/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3778static int need_output(void)
3779{
3780 int i;
3781
3782 for (i = 0; i < nb_output_streams; i++) {
3783 OutputStream *ost = output_streams[i];
3784 OutputFile *of = output_files[ost->file_index];
3785 AVFormatContext *os = output_files[ost->file_index]->ctx;
3786
3787 if (ost->finished ||
3788 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3789 continue;
3790 if (ost->frame_number >= ost->max_frames) {
3791 int j;
3792 for (j = 0; j < of->ctx->nb_streams; j++)
3793 close_output_stream(output_streams[of->ost_index + j]);
3794 continue;
3795 }
3796
3797 return 1;
3798 }
3799
3800 return 0;
3801}
3802
3803/**
3804 * Select the output stream to process.
3805 *
3806 * @return selected output stream, or NULL if none available
3807 */
3808static OutputStream *choose_output(void)
3809{
3810 int i;
3811 int64_t opts_min = INT64_MAX;
3812 OutputStream *ost_min = NULL;
3813
3814 for (i = 0; i < nb_output_streams; i++) {
3815 OutputStream *ost = output_streams[i];
3816 int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3817 av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3818 AV_TIME_BASE_Q);
3819 if (ost->st->cur_dts == AV_NOPTS_VALUE)
3820 av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3821
3822 if (!ost->initialized && !ost->inputs_done)
3823 return ost;
3824
3825 if (!ost->finished && opts < opts_min) {
3826 opts_min = opts;
3827 ost_min = ost->unavailable ? NULL : ost;
3828 }
3829 }
3830 return ost_min;
3831}
3832
3833static void set_tty_echo(int on)
3834{
3835#if HAVE_TERMIOS_H
3836 struct termios tty;
3837 if (tcgetattr(0, &tty) == 0) {
3838 if (on) tty.c_lflag |= ECHO;
3839 else tty.c_lflag &= ~ECHO;
3840 tcsetattr(0, TCSANOW, &tty);
3841 }
3842#endif
3843}
3844
3845static int check_keyboard_interaction(int64_t cur_time)
3846{
3847 int i, ret, key;
3848 static int64_t last_time;
3849 if (received_nb_signals)
3850 return AVERROR_EXIT;
3851 /* read_key() returns 0 on EOF */
3852 if(cur_time - last_time >= 100000 && !run_as_daemon){
3853 key = read_key();
3854 last_time = cur_time;
3855 }else
3856 key = -1;
3857 if (key == 'q')
3858 return AVERROR_EXIT;
3859 if (key == '+') av_log_set_level(av_log_get_level()+10);
3860 if (key == '-') av_log_set_level(av_log_get_level()-10);
3861 if (key == 's') qp_hist ^= 1;
3862 if (key == 'h'){
3863 if (do_hex_dump){
3864 do_hex_dump = do_pkt_dump = 0;
3865 } else if(do_pkt_dump){
3866 do_hex_dump = 1;
3867 } else
3868 do_pkt_dump = 1;
3869 av_log_set_level(AV_LOG_DEBUG);
3870 }
3871 if (key == 'c' || key == 'C'){
3872 char buf[4096], target[64], command[256], arg[256] = {0};
3873 double time;
3874 int k, n = 0;
3875 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3876 i = 0;
3877 set_tty_echo(1);
3878 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3879 if (k > 0)
3880 buf[i++] = k;
3881 buf[i] = 0;
3882 set_tty_echo(0);
3883 fprintf(stderr, "\n");
3884 if (k > 0 &&
3885 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3886 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3887 target, time, command, arg);
3888 for (i = 0; i < nb_filtergraphs; i++) {
3889 FilterGraph *fg = filtergraphs[i];
3890 if (fg->graph) {
3891 if (time < 0) {
3892 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3893 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3894 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3895 } else if (key == 'c') {
3896 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3897 ret = AVERROR_PATCHWELCOME;
3898 } else {
3899 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3900 if (ret < 0)
3901 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3902 }
3903 }
3904 }
3905 } else {
3906 av_log(NULL, AV_LOG_ERROR,
3907 "Parse error, at least 3 arguments were expected, "
3908 "only %d given in string '%s'\n", n, buf);
3909 }
3910 }
3911 if (key == 'd' || key == 'D'){
3912 int debug=0;
3913 if(key == 'D') {
3914 debug = input_streams[0]->st->codec->debug<<1;
3915 if(!debug) debug = 1;
3916 while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3917 debug += debug;
3918 }else{
3919 char buf[32];
3920 int k = 0;
3921 i = 0;
3922 set_tty_echo(1);
3923 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3924 if (k > 0)
3925 buf[i++] = k;
3926 buf[i] = 0;
3927 set_tty_echo(0);
3928 fprintf(stderr, "\n");
3929 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3930 fprintf(stderr,"error parsing debug value\n");
3931 }
3932 for(i=0;i<nb_input_streams;i++) {
3933 input_streams[i]->st->codec->debug = debug;
3934 }
3935 for(i=0;i<nb_output_streams;i++) {
3936 OutputStream *ost = output_streams[i];
3937 ost->enc_ctx->debug = debug;
3938 }
3939 if(debug) av_log_set_level(AV_LOG_DEBUG);
3940 fprintf(stderr,"debug=%d\n", debug);
3941 }
3942 if (key == '?'){
3943 fprintf(stderr, "key function\n"
3944 "? show this help\n"
3945 "+ increase verbosity\n"
3946 "- decrease verbosity\n"
3947 "c Send command to first matching filter supporting it\n"
3948 "C Send/Queue command to all matching filters\n"
3949 "D cycle through available debug modes\n"
3950 "h dump packets/hex press to cycle through the 3 states\n"
3951 "q quit\n"
3952 "s Show QP histogram\n"
3953 );
3954 }
3955 return 0;
3956}
3957
3958#if HAVE_PTHREADS
3959static void *input_thread(void *arg)
3960{
3961 InputFile *f = arg;
3962 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3963 int ret = 0;
3964
3965 while (1) {
3966 AVPacket pkt;
3967 ret = av_read_frame(f->ctx, &pkt);
3968
3969 if (ret == AVERROR(EAGAIN)) {
3970 av_usleep(10000);
3971 continue;
3972 }
3973 if (ret < 0) {
3974 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3975 break;
3976 }
3977 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3978 if (flags && ret == AVERROR(EAGAIN)) {
3979 flags = 0;
3980 ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3981 av_log(f->ctx, AV_LOG_WARNING,
3982 "Thread message queue blocking; consider raising the "
3983 "thread_queue_size option (current value: %d)\n",
3984 f->thread_queue_size);
3985 }
3986 if (ret < 0) {
3987 if (ret != AVERROR_EOF)
3988 av_log(f->ctx, AV_LOG_ERROR,
3989 "Unable to send packet to main thread: %s\n",
3990 av_err2str(ret));
3991 av_packet_unref(&pkt);
3992 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3993 break;
3994 }
3995 }
3996
3997 return NULL;
3998}
3999
4000static void free_input_threads(void)
4001{
4002 int i;
4003
4004 for (i = 0; i < nb_input_files; i++) {
4005 InputFile *f = input_files[i];
4006 AVPacket pkt;
4007
4008 if (!f || !f->in_thread_queue)
4009 continue;
4010 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4011 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4012 av_packet_unref(&pkt);
4013
4014 pthread_join(f->thread, NULL);
4015 f->joined = 1;
4016 av_thread_message_queue_free(&f->in_thread_queue);
4017 }
4018}
4019
4020static int init_input_threads(void)
4021{
4022 int i, ret;
4023
4024 if (nb_input_files == 1)
4025 return 0;
4026
4027 for (i = 0; i < nb_input_files; i++) {
4028 InputFile *f = input_files[i];
4029
4030 if (f->ctx->pb ? !f->ctx->pb->seekable :
4031 strcmp(f->ctx->iformat->name, "lavfi"))
4032 f->non_blocking = 1;
4033 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4034 f->thread_queue_size, sizeof(AVPacket));
4035 if (ret < 0)
4036 return ret;
4037
4038 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4039 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4040 av_thread_message_queue_free(&f->in_thread_queue);
4041 return AVERROR(ret);
4042 }
4043 }
4044 return 0;
4045}
4046
4047static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4048{
4049 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4050 f->non_blocking ?
4051 AV_THREAD_MESSAGE_NONBLOCK : 0);
4052}
4053#endif
4054
4055static int get_input_packet(InputFile *f, AVPacket *pkt)
4056{
4057 if (f->rate_emu) {
4058 int i;
4059 for (i = 0; i < f->nb_streams; i++) {
4060 InputStream *ist = input_streams[f->ist_index + i];
4061 int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4062 int64_t now = av_gettime_relative() - ist->start;
4063 if (pts > now)
4064 return AVERROR(EAGAIN);
4065 }
4066 }
4067
4068#if HAVE_PTHREADS
4069 if (nb_input_files > 1)
4070 return get_input_packet_mt(f, pkt);
4071#endif
4072 return av_read_frame(f->ctx, pkt);
4073}
4074
4075static int got_eagain(void)
4076{
4077 int i;
4078 for (i = 0; i < nb_output_streams; i++)
4079 if (output_streams[i]->unavailable)
4080 return 1;
4081 return 0;
4082}
4083
4084static void reset_eagain(void)
4085{
4086 int i;
4087 for (i = 0; i < nb_input_files; i++)
4088 input_files[i]->eagain = 0;
4089 for (i = 0; i < nb_output_streams; i++)
4090 output_streams[i]->unavailable = 0;
4091}
4092
4093// set duration to max(tmp, duration) in a proper time base and return duration's time_base
4094static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4095 AVRational time_base)
4096{
4097 int ret;
4098
4099 if (!*duration) {
4100 *duration = tmp;
4101 return tmp_time_base;
4102 }
4103
4104 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4105 if (ret < 0) {
4106 *duration = tmp;
4107 return tmp_time_base;
4108 }
4109
4110 return time_base;
4111}
4112
4113static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4114{
4115 InputStream *ist;
4116 AVCodecContext *avctx;
4117 int i, ret, has_audio = 0;
4118 int64_t duration = 0;
4119
4120 ret = av_seek_frame(is, -1, is->start_time, 0);
4121 if (ret < 0)
4122 return ret;
4123
4124 for (i = 0; i < ifile->nb_streams; i++) {
4125 ist = input_streams[ifile->ist_index + i];
4126 avctx = ist->dec_ctx;
4127
4128 // flush decoders
4129 if (ist->decoding_needed) {
4130 process_input_packet(ist, NULL, 1);
4131 avcodec_flush_buffers(avctx);
4132 }
4133
4134 /* duration is the length of the last frame in a stream
4135 * when audio stream is present we don't care about
4136 * last video frame length because it's not defined exactly */
4137 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4138 has_audio = 1;
4139 }
4140
4141 for (i = 0; i < ifile->nb_streams; i++) {
4142 ist = input_streams[ifile->ist_index + i];
4143 avctx = ist->dec_ctx;
4144
4145 if (has_audio) {
4146 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4147 AVRational sample_rate = {1, avctx->sample_rate};
4148
4149 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4150 } else
4151 continue;
4152 } else {
4153 if (ist->framerate.num) {
4154 duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4155 } else if (ist->st->avg_frame_rate.num) {
4156 duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4157 } else duration = 1;
4158 }
4159 if (!ifile->duration)
4160 ifile->time_base = ist->st->time_base;
4161 /* the total duration of the stream, max_pts - min_pts is
4162 * the duration of the stream without the last frame */
4163 duration += ist->max_pts - ist->min_pts;
4164 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4165 ifile->time_base);
4166 }
4167
4168 if (ifile->loop > 0)
4169 ifile->loop--;
4170
4171 return ret;
4172}
4173
4174/*
4175 * Return
4176 * - 0 -- one packet was read and processed
4177 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4178 * this function should be called again
4179 * - AVERROR_EOF -- this function should not be called again
4180 */
4181static int process_input(int file_index)
4182{
4183 InputFile *ifile = input_files[file_index];
4184 AVFormatContext *is;
4185 InputStream *ist;
4186 AVPacket pkt;
4187 int ret, i, j;
4188 int64_t duration;
4189 int64_t pkt_dts;
4190
4191 is = ifile->ctx;
4192 ret = get_input_packet(ifile, &pkt);
4193
4194 if (ret == AVERROR(EAGAIN)) {
4195 ifile->eagain = 1;
4196 return ret;
4197 }
4198 if (ret < 0 && ifile->loop) {
4199 if ((ret = seek_to_start(ifile, is)) < 0)
4200 return ret;
4201 ret = get_input_packet(ifile, &pkt);
4202 if (ret == AVERROR(EAGAIN)) {
4203 ifile->eagain = 1;
4204 return ret;
4205 }
4206 }
4207 if (ret < 0) {
4208 if (ret != AVERROR_EOF) {
4209 print_error(is->filename, ret);
4210 if (exit_on_error)
4211 exit_program(1);
4212 }
4213
4214 for (i = 0; i < ifile->nb_streams; i++) {
4215 ist = input_streams[ifile->ist_index + i];
4216 if (ist->decoding_needed) {
4217 ret = process_input_packet(ist, NULL, 0);
4218 if (ret>0)
4219 return 0;
4220 }
4221
4222 /* mark all outputs that don't go through lavfi as finished */
4223 for (j = 0; j < nb_output_streams; j++) {
4224 OutputStream *ost = output_streams[j];
4225
4226 if (ost->source_index == ifile->ist_index + i &&
4227 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4228 finish_output_stream(ost);
4229 }
4230 }
4231
4232 ifile->eof_reached = 1;
4233 return AVERROR(EAGAIN);
4234 }
4235
4236 reset_eagain();
4237
4238 if (do_pkt_dump) {
4239 av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4240 is->streams[pkt.stream_index]);
4241 }
4242 /* the following test is needed in case new streams appear
4243 dynamically in stream : we ignore them */
4244 if (pkt.stream_index >= ifile->nb_streams) {
4245 report_new_stream(file_index, &pkt);
4246 goto discard_packet;
4247 }
4248
4249 ist = input_streams[ifile->ist_index + pkt.stream_index];
4250
4251 ist->data_size += pkt.size;
4252 ist->nb_packets++;
4253
4254 if (ist->discard)
4255 goto discard_packet;
4256
4257 if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4258 av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4259 exit_program(1);
4260 }
4261
4262 if (debug_ts) {
4263 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4264 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4265 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4266 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4267 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4268 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4269 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4270 av_ts2str(input_files[ist->file_index]->ts_offset),
4271 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4272 }
4273
4274 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4275 int64_t stime, stime2;
4276 // Correcting starttime based on the enabled streams
4277 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4278 // so we instead do it here as part of discontinuity handling
4279 if ( ist->next_dts == AV_NOPTS_VALUE
4280 && ifile->ts_offset == -is->start_time
4281 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4282 int64_t new_start_time = INT64_MAX;
4283 for (i=0; i<is->nb_streams; i++) {
4284 AVStream *st = is->streams[i];
4285 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4286 continue;
4287 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4288 }
4289 if (new_start_time > is->start_time) {
4290 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4291 ifile->ts_offset = -new_start_time;
4292 }
4293 }
4294
4295 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4296 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4297 ist->wrap_correction_done = 1;
4298
4299 if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4300 pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4301 ist->wrap_correction_done = 0;
4302 }
4303 if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4304 pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4305 ist->wrap_correction_done = 0;
4306 }
4307 }
4308
4309 /* add the stream-global side data to the first packet */
4310 if (ist->nb_packets == 1) {
4311 for (i = 0; i < ist->st->nb_side_data; i++) {
4312 AVPacketSideData *src_sd = &ist->st->side_data[i];
4313 uint8_t *dst_data;
4314
4315 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4316 continue;
4317
4318 if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4319 continue;
4320
4321 dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4322 if (!dst_data)
4323 exit_program(1);
4324
4325 memcpy(dst_data, src_sd->data, src_sd->size);
4326 }
4327 }
4328
4329 if (pkt.dts != AV_NOPTS_VALUE)
4330 pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4331 if (pkt.pts != AV_NOPTS_VALUE)
4332 pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4333
4334 if (pkt.pts != AV_NOPTS_VALUE)
4335 pkt.pts *= ist->ts_scale;
4336 if (pkt.dts != AV_NOPTS_VALUE)
4337 pkt.dts *= ist->ts_scale;
4338
4339 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4340 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4341 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4342 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4343 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4344 int64_t delta = pkt_dts - ifile->last_ts;
4345 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4346 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4347 ifile->ts_offset -= delta;
4348 av_log(NULL, AV_LOG_DEBUG,
4349 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4350 delta, ifile->ts_offset);
4351 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4352 if (pkt.pts != AV_NOPTS_VALUE)
4353 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4354 }
4355 }
4356
4357 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4358 if (pkt.pts != AV_NOPTS_VALUE) {
4359 pkt.pts += duration;
4360 ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4361 ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4362 }
4363
4364 if (pkt.dts != AV_NOPTS_VALUE)
4365 pkt.dts += duration;
4366
4367 pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4368 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4369 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4370 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4371 !copy_ts) {
4372 int64_t delta = pkt_dts - ist->next_dts;
4373 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4374 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4375 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4376 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4377 ifile->ts_offset -= delta;
4378 av_log(NULL, AV_LOG_DEBUG,
4379 "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4380 delta, ifile->ts_offset);
4381 pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4382 if (pkt.pts != AV_NOPTS_VALUE)
4383 pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4384 }
4385 } else {
4386 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4387 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4388 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4389 pkt.dts = AV_NOPTS_VALUE;
4390 }
4391 if (pkt.pts != AV_NOPTS_VALUE){
4392 int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4393 delta = pkt_pts - ist->next_dts;
4394 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4395 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4396 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4397 pkt.pts = AV_NOPTS_VALUE;
4398 }
4399 }
4400 }
4401 }
4402
4403 if (pkt.dts != AV_NOPTS_VALUE)
4404 ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4405
4406 if (debug_ts) {
4407 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4408 ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4409 av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4410 av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4411 av_ts2str(input_files[ist->file_index]->ts_offset),
4412 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4413 }
4414
4415 sub2video_heartbeat(ist, pkt.pts);
4416
4417 process_input_packet(ist, &pkt, 0);
4418
4419discard_packet:
4420 av_packet_unref(&pkt);
4421
4422 return 0;
4423}
4424
4425/**
4426 * Perform a step of transcoding for the specified filter graph.
4427 *
4428 * @param[in] graph filter graph to consider
4429 * @param[out] best_ist input stream where a frame would allow to continue
4430 * @return 0 for success, <0 for error
4431 */
4432static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4433{
4434 int i, ret;
4435 int nb_requests, nb_requests_max = 0;
4436 InputFilter *ifilter;
4437 InputStream *ist;
4438
4439 *best_ist = NULL;
4440 ret = avfilter_graph_request_oldest(graph->graph);
4441 if (ret >= 0)
4442 return reap_filters(0);
4443
4444 if (ret == AVERROR_EOF) {
4445 ret = reap_filters(1);
4446 for (i = 0; i < graph->nb_outputs; i++)
4447 close_output_stream(graph->outputs[i]->ost);
4448 return ret;
4449 }
4450 if (ret != AVERROR(EAGAIN))
4451 return ret;
4452
4453 for (i = 0; i < graph->nb_inputs; i++) {
4454 ifilter = graph->inputs[i];
4455 ist = ifilter->ist;
4456 if (input_files[ist->file_index]->eagain ||
4457 input_files[ist->file_index]->eof_reached)
4458 continue;
4459 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4460 if (nb_requests > nb_requests_max) {
4461 nb_requests_max = nb_requests;
4462 *best_ist = ist;
4463 }
4464 }
4465
4466 if (!*best_ist)
4467 for (i = 0; i < graph->nb_outputs; i++)
4468 graph->outputs[i]->ost->unavailable = 1;
4469
4470 return 0;
4471}
4472
4473/**
4474 * Run a single step of transcoding.
4475 *
4476 * @return 0 for success, <0 for error
4477 */
4478static int transcode_step(void)
4479{
4480 OutputStream *ost;
4481 InputStream *ist = NULL;
4482 int ret;
4483
4484 ost = choose_output();
4485 if (!ost) {
4486 if (got_eagain()) {
4487 reset_eagain();
4488 av_usleep(10000);
4489 return 0;
4490 }
4491 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4492 return AVERROR_EOF;
4493 }
4494
4495 if (ost->filter && !ost->filter->graph->graph) {
4496 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4497 ret = configure_filtergraph(ost->filter->graph);
4498 if (ret < 0) {
4499 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4500 return ret;
4501 }
4502 }
4503 }
4504
4505 if (ost->filter && ost->filter->graph->graph) {
4506 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4507 return ret;
4508 if (!ist)
4509 return 0;
4510 } else if (ost->filter) {
4511 int i;
4512 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4513 InputFilter *ifilter = ost->filter->graph->inputs[i];
4514 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4515 ist = ifilter->ist;
4516 break;
4517 }
4518 }
4519 if (!ist) {
4520 ost->inputs_done = 1;
4521 return 0;
4522 }
4523 } else {
4524 av_assert0(ost->source_index >= 0);
4525 ist = input_streams[ost->source_index];
4526 }
4527
4528 ret = process_input(ist->file_index);
4529 if (ret == AVERROR(EAGAIN)) {
4530 if (input_files[ist->file_index]->eagain)
4531 ost->unavailable = 1;
4532 return 0;
4533 }
4534
4535 if (ret < 0)
4536 return ret == AVERROR_EOF ? 0 : ret;
4537
4538 return reap_filters(0);
4539}
4540
4541/*
4542 * The following code is the main loop of the file converter
4543 */
4544static int transcode(void)
4545{
4546 int ret, i;
4547 AVFormatContext *os;
4548 OutputStream *ost;
4549 InputStream *ist;
4550 int64_t timer_start;
4551 int64_t total_packets_written = 0;
4552
4553 ret = transcode_init();
4554 if (ret < 0)
4555 goto fail;
4556
4557 if (stdin_interaction) {
4558 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4559 }
4560
4561 timer_start = av_gettime_relative();
4562
4563#if HAVE_PTHREADS
4564 if ((ret = init_input_threads()) < 0)
4565 goto fail;
4566#endif
4567
4568 while (!received_sigterm) {
4569 int64_t cur_time= av_gettime_relative();
4570
4571 /* if 'q' pressed, exits */
4572 if (stdin_interaction)
4573 if (check_keyboard_interaction(cur_time) < 0)
4574 break;
4575
4576 /* check if there's any stream where output is still needed */
4577 if (!need_output()) {
4578 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4579 break;
4580 }
4581
4582 ret = transcode_step();
4583 if (ret < 0 && ret != AVERROR_EOF) {
4584 char errbuf[128];
4585 av_strerror(ret, errbuf, sizeof(errbuf));
4586
4587 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4588 break;
4589 }
4590
4591 /* dump report by using the output first video and audio streams */
4592 print_report(0, timer_start, cur_time);
4593 }
4594#if HAVE_PTHREADS
4595 free_input_threads();
4596#endif
4597
4598 /* at the end of stream, we must flush the decoder buffers */
4599 for (i = 0; i < nb_input_streams; i++) {
4600 ist = input_streams[i];
4601 if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4602 process_input_packet(ist, NULL, 0);
4603 }
4604 }
4605 flush_encoders();
4606
4607 term_exit();
4608
4609 /* write the trailer if needed and close file */
4610 for (i = 0; i < nb_output_files; i++) {
4611 os = output_files[i]->ctx;
4612 if (!output_files[i]->header_written) {
4613 av_log(NULL, AV_LOG_ERROR,
4614 "Nothing was written into output file %d (%s), because "
4615 "at least one of its streams received no packets.\n",
4616 i, os->filename);
4617 continue;
4618 }
4619 if ((ret = av_write_trailer(os)) < 0) {
4620 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4621 if (exit_on_error)
4622 exit_program(1);
4623 }
4624 }
4625
4626 /* dump report by using the first video and audio streams */
4627 print_report(1, timer_start, av_gettime_relative());
4628
4629 /* close each encoder */
4630 for (i = 0; i < nb_output_streams; i++) {
4631 ost = output_streams[i];
4632 if (ost->encoding_needed) {
4633 av_freep(&ost->enc_ctx->stats_in);
4634 }
4635 total_packets_written += ost->packets_written;
4636 }
4637
4638 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4639 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4640 exit_program(1);
4641 }
4642
4643 /* close each decoder */
4644 for (i = 0; i < nb_input_streams; i++) {
4645 ist = input_streams[i];
4646 if (ist->decoding_needed) {
4647 avcodec_close(ist->dec_ctx);
4648 if (ist->hwaccel_uninit)
4649 ist->hwaccel_uninit(ist->dec_ctx);
4650 }
4651 }
4652
4653 av_buffer_unref(&hw_device_ctx);
4654
4655 /* finished ! */
4656 ret = 0;
4657
4658 fail:
4659#if HAVE_PTHREADS
4660 free_input_threads();
4661#endif
4662
4663 if (output_streams) {
4664 for (i = 0; i < nb_output_streams; i++) {
4665 ost = output_streams[i];
4666 if (ost) {
4667 if (ost->logfile) {
4668 if (fclose(ost->logfile))
4669 av_log(NULL, AV_LOG_ERROR,
4670 "Error closing logfile, loss of information possible: %s\n",
4671 av_err2str(AVERROR(errno)));
4672 ost->logfile = NULL;
4673 }
4674 av_freep(&ost->forced_kf_pts);
4675 av_freep(&ost->apad);
4676 av_freep(&ost->disposition);
4677 av_dict_free(&ost->encoder_opts);
4678 av_dict_free(&ost->sws_dict);
4679 av_dict_free(&ost->swr_opts);
4680 av_dict_free(&ost->resample_opts);
4681 }
4682 }
4683 }
4684 return ret;
4685}
4686
4687
4688static int64_t getutime(void)
4689{
4690#if HAVE_GETRUSAGE
4691 struct rusage rusage;
4692
4693 getrusage(RUSAGE_SELF, &rusage);
4694 return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4695#elif HAVE_GETPROCESSTIMES
4696 HANDLE proc;
4697 FILETIME c, e, k, u;
4698 proc = GetCurrentProcess();
4699 GetProcessTimes(proc, &c, &e, &k, &u);
4700 return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4701#else
4702 return av_gettime_relative();
4703#endif
4704}
4705
4706static int64_t getmaxrss(void)
4707{
4708#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4709 struct rusage rusage;
4710 getrusage(RUSAGE_SELF, &rusage);
4711 return (int64_t)rusage.ru_maxrss * 1024;
4712#elif HAVE_GETPROCESSMEMORYINFO
4713 HANDLE proc;
4714 PROCESS_MEMORY_COUNTERS memcounters;
4715 proc = GetCurrentProcess();
4716 memcounters.cb = sizeof(memcounters);
4717 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4718 return memcounters.PeakPagefileUsage;
4719#else
4720 return 0;
4721#endif
4722}
4723
4724static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4725{
4726}
4727
4728int main(int argc, char **argv)
4729{
4730 int i, ret;
4731 int64_t ti;
4732
4733 init_dynload();
4734
4735 register_exit(ffmpeg_cleanup);
4736
4737 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4738
4739 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4740 parse_loglevel(argc, argv, options);
4741
4742 if(argc>1 && !strcmp(argv[1], "-d")){
4743 run_as_daemon=1;
4744 av_log_set_callback(log_callback_null);
4745 argc--;
4746 argv++;
4747 }
4748
4749 avcodec_register_all();
4750#if CONFIG_AVDEVICE
4751 avdevice_register_all();
4752#endif
4753 avfilter_register_all();
4754 av_register_all();
4755 avformat_network_init();
4756
4757 show_banner(argc, argv, options);
4758
4759 /* parse options and open all input/output files */
4760 ret = ffmpeg_parse_options(argc, argv);
4761 if (ret < 0)
4762 exit_program(1);
4763
4764 if (nb_output_files <= 0 && nb_input_files == 0) {
4765 show_usage();
4766 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4767 exit_program(1);
4768 }
4769
4770 /* file converter / grab */
4771 if (nb_output_files <= 0) {
4772 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4773 exit_program(1);
4774 }
4775
4776// if (nb_input_files == 0) {
4777// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4778// exit_program(1);
4779// }
4780
4781 for (i = 0; i < nb_output_files; i++) {
4782 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4783 want_sdp = 0;
4784 }
4785
4786 current_time = ti = getutime();
4787 if (transcode() < 0)
4788 exit_program(1);
4789 ti = getutime() - ti;
4790 if (do_benchmark) {
4791 av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4792 }
4793 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4794 decode_error_stat[0], decode_error_stat[1]);
4795 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4796 exit_program(69);
4797
4798 exit_program(received_nb_signals ? 255 : main_return_code);
4799 return main_return_code;
4800}
4801