summaryrefslogtreecommitdiff
path: root/libavformat/utils.c (plain)
blob: 265813693ab8dfea3d65f0542658b5075369c97b
1/*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "avformat.h"
23#include "avio_internal.h"
24#include "internal.h"
25#include "libavcodec/internal.h"
26#include "libavcodec/raw.h"
27#include "libavcodec/bytestream.h"
28#include "libavutil/opt.h"
29#include "libavutil/dict.h"
30#include "libavutil/internal.h"
31#include "libavutil/pixdesc.h"
32#include "metadata.h"
33#include "id3v2.h"
34#include "libavutil/avassert.h"
35#include "libavutil/avstring.h"
36#include "libavutil/mathematics.h"
37#include "libavutil/parseutils.h"
38#include "libavutil/time.h"
39#include "libavutil/timestamp.h"
40#include "riff.h"
41#include "audiointerleave.h"
42#include "url.h"
43#include <stdarg.h>
44#if CONFIG_NETWORK
45#include "network.h"
46#endif
47
48#undef NDEBUG
49#include <assert.h>
50
51/**
52 * @file
53 * various utility functions for use within FFmpeg
54 */
55
56unsigned avformat_version(void)
57{
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
60}
61
62const char *avformat_configuration(void)
63{
64 return FFMPEG_CONFIGURATION;
65}
66
67const char *avformat_license(void)
68{
69#define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
71}
72
73#define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
74
75static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
77}
78
79/**
80 * Wrap a given time stamp, if there is an indication for an overflow
81 *
82 * @param st stream
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
85 */
86static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
87{
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
96 }
97 return timestamp;
98}
99
100MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
101MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
102MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
103MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
104
105static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
106{
107 if (st->codec->codec)
108 return st->codec->codec;
109
110 switch(st->codec->codec_type){
111 case AVMEDIA_TYPE_VIDEO:
112 if(s->video_codec) return s->video_codec;
113 break;
114 case AVMEDIA_TYPE_AUDIO:
115 if(s->audio_codec) return s->audio_codec;
116 break;
117 case AVMEDIA_TYPE_SUBTITLE:
118 if(s->subtitle_codec) return s->subtitle_codec;
119 break;
120 }
121
122 return avcodec_find_decoder(codec_id);
123}
124
125int av_format_get_probe_score(const AVFormatContext *s)
126{
127 return s->probe_score;
128}
129
130/* an arbitrarily chosen "sane" max packet size -- 50M */
131#define SANE_CHUNK_SIZE (50000000)
132
133int ffio_limit(AVIOContext *s, int size)
134{
135 if(s->maxsize>=0){
136 int64_t remaining= s->maxsize - avio_tell(s);
137 if(remaining < size){
138 int64_t newsize= avio_size(s);
139 if(!s->maxsize || s->maxsize<newsize)
140 s->maxsize= newsize - !newsize;
141 remaining= s->maxsize - avio_tell(s);
142 remaining= FFMAX(remaining, 0);
143 }
144
145 if(s->maxsize>=0 && remaining+1 < size){
146 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
147 size= remaining+1;
148 }
149 }
150 return size;
151}
152
153/*
154 * Read the data in sane-sized chunks and append to pkt.
155 * Return the number of bytes read or an error.
156 */
157static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
158{
159 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
160 int orig_size = pkt->size;
161 int ret;
162
163 do {
164 int prev_size = pkt->size;
165 int read_size;
166
167 /*
168 * When the caller requests a lot of data, limit it to the amount left
169 * in file or SANE_CHUNK_SIZE when it is not known
170 */
171 read_size = size;
172 if (read_size > SANE_CHUNK_SIZE/10) {
173 read_size = ffio_limit(s, read_size);
174 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
175 if (s->maxsize < 0)
176 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
177 }
178
179 ret = av_grow_packet(pkt, read_size);
180 if (ret < 0)
181 break;
182
183 ret = avio_read(s, pkt->data + prev_size, read_size);
184 if (ret != read_size) {
185 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
186 break;
187 }
188
189 size -= read_size;
190 } while (size > 0);
191 if (size > 0)
192 pkt->flags |= AV_PKT_FLAG_CORRUPT;
193
194 pkt->pos = orig_pos;
195 if (!pkt->size)
196 av_free_packet(pkt);
197 return pkt->size > orig_size ? pkt->size - orig_size : ret;
198}
199
200int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
201{
202 av_init_packet(pkt);
203 pkt->data = NULL;
204 pkt->size = 0;
205 pkt->pos = avio_tell(s);
206
207 return append_packet_chunked(s, pkt, size);
208}
209
210int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
211{
212 if (!pkt->size)
213 return av_get_packet(s, pkt, size);
214 return append_packet_chunked(s, pkt, size);
215}
216
217
218int av_filename_number_test(const char *filename)
219{
220 char buf[1024];
221 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
222}
223
224AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
225{
226 AVProbeData lpd = *pd;
227 AVInputFormat *fmt1 = NULL, *fmt;
228 int score, nodat = 0, score_max=0;
229 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
230
231 if (!lpd.buf)
232 lpd.buf = zerobuffer;
233
234 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
235 int id3len = ff_id3v2_tag_len(lpd.buf);
236 if (lpd.buf_size > id3len + 16) {
237 lpd.buf += id3len;
238 lpd.buf_size -= id3len;
239 }else
240 nodat = 1;
241 }
242
243 fmt = NULL;
244 while ((fmt1 = av_iformat_next(fmt1))) {
245 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
246 continue;
247 score = 0;
248 if (fmt1->read_probe) {
249 score = fmt1->read_probe(&lpd);
250 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
251 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
252 } else if (fmt1->extensions) {
253 if (av_match_ext(lpd.filename, fmt1->extensions)) {
254 score = AVPROBE_SCORE_EXTENSION;
255 }
256 }
257 if (score > score_max) {
258 score_max = score;
259 fmt = fmt1;
260 }else if (score == score_max)
261 fmt = NULL;
262 }
263 if(nodat)
264 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
265 *score_ret= score_max;
266
267 return fmt;
268}
269
270AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
271{
272 int score_ret;
273 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
274 if(score_ret > *score_max){
275 *score_max= score_ret;
276 return fmt;
277 }else
278 return NULL;
279}
280
281AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
282 int score=0;
283 return av_probe_input_format2(pd, is_opened, &score);
284}
285
286static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
287{
288 static const struct {
289 const char *name; enum AVCodecID id; enum AVMediaType type;
290 } fmt_id_type[] = {
291 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
292 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
293 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
294 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
295 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
296 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
297 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
298 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
299 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
300 { 0 }
301 };
302 int score;
303 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
304
305 if (fmt && st->request_probe <= score) {
306 int i;
307 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
308 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
309 for (i = 0; fmt_id_type[i].name; i++) {
310 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
311 st->codec->codec_id = fmt_id_type[i].id;
312 st->codec->codec_type = fmt_id_type[i].type;
313 break;
314 }
315 }
316 }
317 return score;
318}
319
320/************************************************************/
321/* input media file */
322
323int av_demuxer_open(AVFormatContext *ic){
324 int err;
325
326 if (ic->iformat->read_header) {
327 err = ic->iformat->read_header(ic);
328 if (err < 0)
329 return err;
330 }
331
332 if (ic->pb && !ic->data_offset)
333 ic->data_offset = avio_tell(ic->pb);
334
335 return 0;
336}
337
338
339int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
340 const char *filename, void *logctx,
341 unsigned int offset, unsigned int max_probe_size)
342{
343 AVProbeData pd = { filename ? filename : "", NULL, -offset };
344 unsigned char *buf = NULL;
345 uint8_t *mime_type;
346 int ret = 0, probe_size, buf_offset = 0;
347 int score = 0;
348
349 if (!max_probe_size) {
350 max_probe_size = PROBE_BUF_MAX;
351 } else if (max_probe_size > PROBE_BUF_MAX) {
352 max_probe_size = PROBE_BUF_MAX;
353 } else if (max_probe_size < PROBE_BUF_MIN) {
354 av_log(logctx, AV_LOG_ERROR,
355 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
356 return AVERROR(EINVAL);
357 }
358
359 if (offset >= max_probe_size) {
360 return AVERROR(EINVAL);
361 }
362
363 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
364 if (!av_strcasecmp(mime_type, "audio/aacp")) {
365 *fmt = av_find_input_format("aac");
366 }
367 av_freep(&mime_type);
368 }
369
370 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
371 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
372
373 if (probe_size < offset) {
374 continue;
375 }
376 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
377
378 /* read probe data */
379 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
380 return ret;
381 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
382 /* fail if error was not end of file, otherwise, lower score */
383 if (ret != AVERROR_EOF) {
384 av_free(buf);
385 return ret;
386 }
387 score = 0;
388 ret = 0; /* error was end of file, nothing read */
389 }
390 pd.buf_size = buf_offset += ret;
391 pd.buf = &buf[offset];
392
393 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
394
395 /* guess file format */
396 *fmt = av_probe_input_format2(&pd, 1, &score);
397 if(*fmt){
398 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
399 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
400 }else
401 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
402 }
403 }
404
405 if (!*fmt) {
406 av_free(buf);
407 return AVERROR_INVALIDDATA;
408 }
409
410 /* rewind. reuse probe buffer to avoid seeking */
411 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
412
413 return ret < 0 ? ret : score;
414}
415
416int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
417 const char *filename, void *logctx,
418 unsigned int offset, unsigned int max_probe_size)
419{
420 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
421 return ret < 0 ? ret : 0;
422}
423
424
425/* open input file and probe the format if necessary */
426static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
427{
428 int ret;
429 AVProbeData pd = {filename, NULL, 0};
430 int score = AVPROBE_SCORE_RETRY;
431
432 if (s->pb) {
433 s->flags |= AVFMT_FLAG_CUSTOM_IO;
434 if (!s->iformat)
435 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
436 else if (s->iformat->flags & AVFMT_NOFILE)
437 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
438 "will be ignored with AVFMT_NOFILE format.\n");
439 return 0;
440 }
441
442 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
443 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
444 return score;
445
446 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
447 &s->interrupt_callback, options)) < 0)
448 return ret;
449 if (s->iformat)
450 return 0;
451 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
452}
453
454static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
455 AVPacketList **plast_pktl){
456 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
457 if (!pktl)
458 return NULL;
459
460 if (*packet_buffer)
461 (*plast_pktl)->next = pktl;
462 else
463 *packet_buffer = pktl;
464
465 /* add the packet in the buffered packet list */
466 *plast_pktl = pktl;
467 pktl->pkt= *pkt;
468 return &pktl->pkt;
469}
470
471int avformat_queue_attached_pictures(AVFormatContext *s)
472{
473 int i;
474 for (i = 0; i < s->nb_streams; i++)
475 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
476 s->streams[i]->discard < AVDISCARD_ALL) {
477 AVPacket copy = s->streams[i]->attached_pic;
478 copy.buf = av_buffer_ref(copy.buf);
479 if (!copy.buf)
480 return AVERROR(ENOMEM);
481
482 add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
483 }
484 return 0;
485}
486
487int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
488{
489 AVFormatContext *s = *ps;
490 int ret = 0;
491 AVDictionary *tmp = NULL;
492 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
493
494 if (!s && !(s = avformat_alloc_context()))
495 return AVERROR(ENOMEM);
496 if (!s->av_class){
497 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
498 return AVERROR(EINVAL);
499 }
500 if (fmt)
501 s->iformat = fmt;
502
503 if (options)
504 av_dict_copy(&tmp, *options, 0);
505
506 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
507 goto fail;
508
509 if ((ret = init_input(s, filename, &tmp)) < 0)
510 goto fail;
511 s->probe_score = ret;
512 avio_skip(s->pb, s->skip_initial_bytes);
513
514 /* check filename in case an image number is expected */
515 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
516 if (!av_filename_number_test(filename)) {
517 ret = AVERROR(EINVAL);
518 goto fail;
519 }
520 }
521
522 s->duration = s->start_time = AV_NOPTS_VALUE;
523 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
524
525 /* allocate private data */
526 if (s->iformat->priv_data_size > 0) {
527 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
528 ret = AVERROR(ENOMEM);
529 goto fail;
530 }
531 if (s->iformat->priv_class) {
532 *(const AVClass**)s->priv_data = s->iformat->priv_class;
533 av_opt_set_defaults(s->priv_data);
534 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
535 goto fail;
536 }
537 }
538
539 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
540 if (s->pb)
541 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
542
543 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
544 if ((ret = s->iformat->read_header(s)) < 0)
545 goto fail;
546
547 if (id3v2_extra_meta) {
548 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
549 !strcmp(s->iformat->name, "tta")) {
550 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
551 goto fail;
552 } else
553 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
554 }
555 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
556
557 if ((ret = avformat_queue_attached_pictures(s)) < 0)
558 goto fail;
559
560 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
561 s->data_offset = avio_tell(s->pb);
562
563 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
564
565 if (options) {
566 av_dict_free(options);
567 *options = tmp;
568 }
569 *ps = s;
570 return 0;
571
572fail:
573 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
574 av_dict_free(&tmp);
575 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
576 avio_close(s->pb);
577 avformat_free_context(s);
578 *ps = NULL;
579 return ret;
580}
581
582/*******************************************************/
583
584static void force_codec_ids(AVFormatContext *s, AVStream *st)
585{
586 switch(st->codec->codec_type){
587 case AVMEDIA_TYPE_VIDEO:
588 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
589 break;
590 case AVMEDIA_TYPE_AUDIO:
591 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
592 break;
593 case AVMEDIA_TYPE_SUBTITLE:
594 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
595 break;
596 }
597}
598
599static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
600{
601 if(st->request_probe>0){
602 AVProbeData *pd = &st->probe_data;
603 int end;
604 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
605 --st->probe_packets;
606
607 if (pkt) {
608 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
609 if(!new_buf) {
610 av_log(s, AV_LOG_WARNING,
611 "Failed to reallocate probe buffer for stream %d\n",
612 st->index);
613 goto no_packet;
614 }
615 pd->buf = new_buf;
616 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
617 pd->buf_size += pkt->size;
618 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
619 } else {
620no_packet:
621 st->probe_packets = 0;
622 if (!pd->buf_size) {
623 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
624 st->index);
625 }
626 }
627
628 end= s->raw_packet_buffer_remaining_size <= 0
629 || st->probe_packets<=0;
630
631 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
632 int score= set_codec_from_probe_data(s, st, pd);
633 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
634 || end){
635 pd->buf_size=0;
636 av_freep(&pd->buf);
637 st->request_probe= -1;
638 if(st->codec->codec_id != AV_CODEC_ID_NONE){
639 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
640 }else
641 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
642 }
643 force_codec_ids(s, st);
644 }
645 }
646 return 0;
647}
648
649int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
650{
651 int ret, i, err;
652 AVStream *st;
653
654 for(;;){
655 AVPacketList *pktl = s->raw_packet_buffer;
656
657 if (pktl) {
658 *pkt = pktl->pkt;
659 st = s->streams[pkt->stream_index];
660 if (s->raw_packet_buffer_remaining_size <= 0) {
661 if ((err = probe_codec(s, st, NULL)) < 0)
662 return err;
663 }
664 if(st->request_probe <= 0){
665 s->raw_packet_buffer = pktl->next;
666 s->raw_packet_buffer_remaining_size += pkt->size;
667 av_free(pktl);
668 return 0;
669 }
670 }
671
672 pkt->data = NULL;
673 pkt->size = 0;
674 av_init_packet(pkt);
675 ret= s->iformat->read_packet(s, pkt);
676 if (ret < 0) {
677 if (!pktl || ret == AVERROR(EAGAIN))
678 return ret;
679 for (i = 0; i < s->nb_streams; i++) {
680 st = s->streams[i];
681 if (st->probe_packets) {
682 if ((err = probe_codec(s, st, NULL)) < 0)
683 return err;
684 }
685 av_assert0(st->request_probe <= 0);
686 }
687 continue;
688 }
689
690 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
691 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
692 av_log(s, AV_LOG_WARNING,
693 "Dropped corrupted packet (stream = %d)\n",
694 pkt->stream_index);
695 av_free_packet(pkt);
696 continue;
697 }
698
699 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
700 av_packet_merge_side_data(pkt);
701
702 if(pkt->stream_index >= (unsigned)s->nb_streams){
703 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
704 continue;
705 }
706
707 st= s->streams[pkt->stream_index];
708 pkt->dts = wrap_timestamp(st, pkt->dts);
709 pkt->pts = wrap_timestamp(st, pkt->pts);
710
711 force_codec_ids(s, st);
712
713 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
714 if (s->use_wallclock_as_timestamps)
715 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
716
717 if(!pktl && st->request_probe <= 0)
718 return ret;
719
720 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
721 s->raw_packet_buffer_remaining_size -= pkt->size;
722
723 if ((err = probe_codec(s, st, pkt)) < 0)
724 return err;
725 }
726}
727
728#if FF_API_READ_PACKET
729int av_read_packet(AVFormatContext *s, AVPacket *pkt)
730{
731 return ff_read_packet(s, pkt);
732}
733#endif
734
735
736/**********************************************************/
737
738static int determinable_frame_size(AVCodecContext *avctx)
739{
740 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
741 avctx->codec_id == AV_CODEC_ID_MP1 ||
742 avctx->codec_id == AV_CODEC_ID_MP2 ||
743 avctx->codec_id == AV_CODEC_ID_MP3/* ||
744 avctx->codec_id == AV_CODEC_ID_CELT*/)
745 return 1;
746 return 0;
747}
748
749/**
750 * Get the number of samples of an audio frame. Return -1 on error.
751 */
752int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
753{
754 int frame_size;
755
756 /* give frame_size priority if demuxing */
757 if (!mux && enc->frame_size > 1)
758 return enc->frame_size;
759
760 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
761 return frame_size;
762
763 /* Fall back on using frame_size if muxing. */
764 if (enc->frame_size > 1)
765 return enc->frame_size;
766
767 //For WMA we currently have no other means to calculate duration thus we
768 //do it here by assuming CBR, which is true for all known cases.
769 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
770 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
771 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
772 }
773
774 return -1;
775}
776
777
778/**
779 * Return the frame duration in seconds. Return 0 if not available.
780 */
781void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
782 AVCodecParserContext *pc, AVPacket *pkt)
783{
784 int frame_size;
785
786 *pnum = 0;
787 *pden = 0;
788 switch(st->codec->codec_type) {
789 case AVMEDIA_TYPE_VIDEO:
790 if (st->r_frame_rate.num && !pc) {
791 *pnum = st->r_frame_rate.den;
792 *pden = st->r_frame_rate.num;
793 } else if(st->time_base.num*1000LL > st->time_base.den) {
794 *pnum = st->time_base.num;
795 *pden = st->time_base.den;
796 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
797 *pnum = st->codec->time_base.num;
798 *pden = st->codec->time_base.den;
799 if (pc && pc->repeat_pict) {
800 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
801 *pden /= 1 + pc->repeat_pict;
802 else
803 *pnum *= 1 + pc->repeat_pict;
804 }
805 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
806 //Thus if we have no parser in such case leave duration undefined.
807 if(st->codec->ticks_per_frame>1 && !pc){
808 *pnum = *pden = 0;
809 }
810 }
811 break;
812 case AVMEDIA_TYPE_AUDIO:
813 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
814 if (frame_size <= 0 || st->codec->sample_rate <= 0)
815 break;
816 *pnum = frame_size;
817 *pden = st->codec->sample_rate;
818 break;
819 default:
820 break;
821 }
822}
823
824static int is_intra_only(AVCodecContext *enc){
825 const AVCodecDescriptor *desc;
826
827 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
828 return 1;
829
830 desc = av_codec_get_codec_descriptor(enc);
831 if (!desc) {
832 desc = avcodec_descriptor_get(enc->codec_id);
833 av_codec_set_codec_descriptor(enc, desc);
834 }
835 if (desc)
836 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
837 return 0;
838}
839
840static int has_decode_delay_been_guessed(AVStream *st)
841{
842 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
843 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
844 return 1;
845#if CONFIG_H264_DECODER
846 if(st->codec->has_b_frames &&
847 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
848 return 1;
849#endif
850 if(st->codec->has_b_frames<3)
851 return st->nb_decoded_frames >= 7;
852 else if(st->codec->has_b_frames<4)
853 return st->nb_decoded_frames >= 18;
854 else
855 return st->nb_decoded_frames >= 20;
856}
857
858static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
859{
860 if (pktl->next)
861 return pktl->next;
862 if (pktl == s->parse_queue_end)
863 return s->packet_buffer;
864 return NULL;
865}
866
867static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
868{
869 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
870 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
871 int i;
872
873 // reference time stamp should be 60 s before first time stamp
874 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
875 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
876 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
877 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
878 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
879
880 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
881
882 if (!first_program) {
883 int default_stream_index = av_find_default_stream_index(s);
884 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
885 for (i=0; i<s->nb_streams; i++) {
886 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
887 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
888 }
889 }
890 else {
891 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
892 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
893 }
894 }
895 else {
896 AVProgram *program = first_program;
897 while (program) {
898 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
899 pts_wrap_reference = program->pts_wrap_reference;
900 pts_wrap_behavior = program->pts_wrap_behavior;
901 break;
902 }
903 program = av_find_program_from_stream(s, program, stream_index);
904 }
905
906 // update every program with differing pts_wrap_reference
907 program = first_program;
908 while(program) {
909 if (program->pts_wrap_reference != pts_wrap_reference) {
910 for (i=0; i<program->nb_stream_indexes; i++) {
911 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
912 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
913 }
914
915 program->pts_wrap_reference = pts_wrap_reference;
916 program->pts_wrap_behavior = pts_wrap_behavior;
917 }
918 program = av_find_program_from_stream(s, program, stream_index);
919 }
920 }
921 return 1;
922 }
923 return 0;
924}
925
926static void update_initial_timestamps(AVFormatContext *s, int stream_index,
927 int64_t dts, int64_t pts, AVPacket *pkt)
928{
929 AVStream *st= s->streams[stream_index];
930 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
931 int64_t pts_buffer[MAX_REORDER_DELAY+1];
932 int64_t shift;
933 int i, delay;
934
935 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
936 return;
937
938 delay = st->codec->has_b_frames;
939 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
940 st->cur_dts= dts;
941 shift = st->first_dts - RELATIVE_TS_BASE;
942
943 for (i=0; i<MAX_REORDER_DELAY+1; i++)
944 pts_buffer[i] = AV_NOPTS_VALUE;
945
946 if (is_relative(pts))
947 pts += shift;
948
949 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
950 if(pktl->pkt.stream_index != stream_index)
951 continue;
952 if(is_relative(pktl->pkt.pts))
953 pktl->pkt.pts += shift;
954
955 if(is_relative(pktl->pkt.dts))
956 pktl->pkt.dts += shift;
957
958 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
959 st->start_time= pktl->pkt.pts;
960
961 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
962 pts_buffer[0]= pktl->pkt.pts;
963 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
964 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
965 if(pktl->pkt.dts == AV_NOPTS_VALUE)
966 pktl->pkt.dts= pts_buffer[0];
967 }
968 }
969
970 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
971 // correct first time stamps to negative values
972 st->first_dts = wrap_timestamp(st, st->first_dts);
973 st->cur_dts = wrap_timestamp(st, st->cur_dts);
974 pkt->dts = wrap_timestamp(st, pkt->dts);
975 pkt->pts = wrap_timestamp(st, pkt->pts);
976 pts = wrap_timestamp(st, pts);
977 }
978
979 if (st->start_time == AV_NOPTS_VALUE)
980 st->start_time = pts;
981}
982
983static void update_initial_durations(AVFormatContext *s, AVStream *st,
984 int stream_index, int duration)
985{
986 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
987 int64_t cur_dts= RELATIVE_TS_BASE;
988
989 if(st->first_dts != AV_NOPTS_VALUE){
990 cur_dts= st->first_dts;
991 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
992 if(pktl->pkt.stream_index == stream_index){
993 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
994 break;
995 cur_dts -= duration;
996 }
997 }
998 if(pktl && pktl->pkt.dts != st->first_dts) {
999 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1000 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1001 return;
1002 }
1003 if(!pktl) {
1004 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1005 return;
1006 }
1007 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1008 st->first_dts = cur_dts;
1009 }else if(st->cur_dts != RELATIVE_TS_BASE)
1010 return;
1011
1012 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1013 if(pktl->pkt.stream_index != stream_index)
1014 continue;
1015 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1016 && !pktl->pkt.duration){
1017 pktl->pkt.dts= cur_dts;
1018 if(!st->codec->has_b_frames)
1019 pktl->pkt.pts= cur_dts;
1020// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1021 pktl->pkt.duration = duration;
1022 }else
1023 break;
1024 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1025 }
1026 if(!pktl)
1027 st->cur_dts= cur_dts;
1028}
1029
1030static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1031 AVCodecParserContext *pc, AVPacket *pkt)
1032{
1033 int num, den, presentation_delayed, delay, i;
1034 int64_t offset;
1035
1036 if (s->flags & AVFMT_FLAG_NOFILLIN)
1037 return;
1038
1039 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1040 pkt->dts= AV_NOPTS_VALUE;
1041
1042 if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1043 //FIXME Set low_delay = 0 when has_b_frames = 1
1044 st->codec->has_b_frames = 1;
1045
1046 /* do we have a video B-frame ? */
1047 delay= st->codec->has_b_frames;
1048 presentation_delayed = 0;
1049
1050 /* XXX: need has_b_frame, but cannot get it if the codec is
1051 not initialized */
1052 if (delay &&
1053 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1054 presentation_delayed = 1;
1055
1056 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1057 st->pts_wrap_bits < 63 &&
1058 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1059 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1060 pkt->dts -= 1LL<<st->pts_wrap_bits;
1061 } else
1062 pkt->pts += 1LL<<st->pts_wrap_bits;
1063 }
1064
1065 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1066 // we take the conservative approach and discard both
1067 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1068 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1069 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1070 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1071 pkt->dts= AV_NOPTS_VALUE;
1072 }
1073
1074 if (pkt->duration == 0) {
1075 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1076 if (den && num) {
1077 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1078 }
1079 }
1080 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1081 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1082
1083 /* correct timestamps with byte offset if demuxers only have timestamps
1084 on packet boundaries */
1085 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1086 /* this will estimate bitrate based on this frame's duration and size */
1087 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1088 if(pkt->pts != AV_NOPTS_VALUE)
1089 pkt->pts += offset;
1090 if(pkt->dts != AV_NOPTS_VALUE)
1091 pkt->dts += offset;
1092 }
1093
1094 if (pc && pc->dts_sync_point >= 0) {
1095 // we have synchronization info from the parser
1096 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1097 if (den > 0) {
1098 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1099 if (pkt->dts != AV_NOPTS_VALUE) {
1100 // got DTS from the stream, update reference timestamp
1101 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1102 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1103 // compute DTS based on reference timestamp
1104 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1105 }
1106
1107 if (st->reference_dts != AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE)
1108 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1109
1110 if (pc->dts_sync_point > 0)
1111 st->reference_dts = pkt->dts; // new reference
1112 }
1113 }
1114
1115 /* This may be redundant, but it should not hurt. */
1116 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1117 presentation_delayed = 1;
1118
1119 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1120 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1121 /* interpolate PTS and DTS if they are not present */
1122 //We skip H264 currently because delay and has_b_frames are not reliably set
1123 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1124 if (presentation_delayed) {
1125 /* DTS = decompression timestamp */
1126 /* PTS = presentation timestamp */
1127 if (pkt->dts == AV_NOPTS_VALUE)
1128 pkt->dts = st->last_IP_pts;
1129 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1130 if (pkt->dts == AV_NOPTS_VALUE)
1131 pkt->dts = st->cur_dts;
1132
1133 /* this is tricky: the dts must be incremented by the duration
1134 of the frame we are displaying, i.e. the last I- or P-frame */
1135 if (st->last_IP_duration == 0)
1136 st->last_IP_duration = pkt->duration;
1137 if(pkt->dts != AV_NOPTS_VALUE)
1138 st->cur_dts = pkt->dts + st->last_IP_duration;
1139 st->last_IP_duration = pkt->duration;
1140 st->last_IP_pts= pkt->pts;
1141 /* cannot compute PTS if not present (we can compute it only
1142 by knowing the future */
1143 } else if (pkt->pts != AV_NOPTS_VALUE ||
1144 pkt->dts != AV_NOPTS_VALUE ||
1145 pkt->duration ) {
1146 int duration = pkt->duration;
1147
1148 /* presentation is not delayed : PTS and DTS are the same */
1149 if (pkt->pts == AV_NOPTS_VALUE)
1150 pkt->pts = pkt->dts;
1151 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1152 pkt->pts, pkt);
1153 if (pkt->pts == AV_NOPTS_VALUE)
1154 pkt->pts = st->cur_dts;
1155 pkt->dts = pkt->pts;
1156 if (pkt->pts != AV_NOPTS_VALUE)
1157 st->cur_dts = pkt->pts + duration;
1158 }
1159 }
1160
1161 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1162 st->pts_buffer[0]= pkt->pts;
1163 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1164 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1165 if(pkt->dts == AV_NOPTS_VALUE)
1166 pkt->dts= st->pts_buffer[0];
1167 }
1168 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1169 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1170 }
1171 if(pkt->dts > st->cur_dts)
1172 st->cur_dts = pkt->dts;
1173
1174 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1175 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1176
1177 /* update flags */
1178 if (is_intra_only(st->codec))
1179 pkt->flags |= AV_PKT_FLAG_KEY;
1180 if (pc)
1181 pkt->convergence_duration = pc->convergence_duration;
1182}
1183
1184static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1185{
1186 while (*pkt_buf) {
1187 AVPacketList *pktl = *pkt_buf;
1188 *pkt_buf = pktl->next;
1189 av_free_packet(&pktl->pkt);
1190 av_freep(&pktl);
1191 }
1192 *pkt_buf_end = NULL;
1193}
1194
1195/**
1196 * Parse a packet, add all split parts to parse_queue
1197 *
1198 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1199 */
1200static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1201{
1202 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1203 AVStream *st = s->streams[stream_index];
1204 uint8_t *data = pkt ? pkt->data : NULL;
1205 int size = pkt ? pkt->size : 0;
1206 int ret = 0, got_output = 0;
1207
1208 if (!pkt) {
1209 av_init_packet(&flush_pkt);
1210 pkt = &flush_pkt;
1211 got_output = 1;
1212 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1213 // preserve 0-size sync packets
1214 compute_pkt_fields(s, st, st->parser, pkt);
1215 }
1216
1217 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1218 int len;
1219
1220 av_init_packet(&out_pkt);
1221 len = av_parser_parse2(st->parser, st->codec,
1222 &out_pkt.data, &out_pkt.size, data, size,
1223 pkt->pts, pkt->dts, pkt->pos);
1224
1225 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1226 pkt->pos = -1;
1227 /* increment read pointer */
1228 data += len;
1229 size -= len;
1230
1231 got_output = !!out_pkt.size;
1232
1233 if (!out_pkt.size)
1234 continue;
1235
1236 if (pkt->side_data) {
1237 out_pkt.side_data = pkt->side_data;
1238 out_pkt.side_data_elems = pkt->side_data_elems;
1239 pkt->side_data = NULL;
1240 pkt->side_data_elems = 0;
1241 }
1242
1243 /* set the duration */
1244 out_pkt.duration = 0;
1245 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1246 if (st->codec->sample_rate > 0) {
1247 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1248 (AVRational){ 1, st->codec->sample_rate },
1249 st->time_base,
1250 AV_ROUND_DOWN);
1251 }
1252 } else if (st->codec->time_base.num != 0 &&
1253 st->codec->time_base.den != 0) {
1254 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1255 st->codec->time_base,
1256 st->time_base,
1257 AV_ROUND_DOWN);
1258 }
1259
1260 out_pkt.stream_index = st->index;
1261 out_pkt.pts = st->parser->pts;
1262 out_pkt.dts = st->parser->dts;
1263 out_pkt.pos = st->parser->pos;
1264
1265 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1266 out_pkt.pos = st->parser->frame_offset;
1267
1268 if (st->parser->key_frame == 1 ||
1269 (st->parser->key_frame == -1 &&
1270 st->parser->pict_type == AV_PICTURE_TYPE_I))
1271 out_pkt.flags |= AV_PKT_FLAG_KEY;
1272
1273 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1274 out_pkt.flags |= AV_PKT_FLAG_KEY;
1275
1276 compute_pkt_fields(s, st, st->parser, &out_pkt);
1277
1278 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1279 out_pkt.buf = pkt->buf;
1280 pkt->buf = NULL;
1281#if FF_API_DESTRUCT_PACKET
1282FF_DISABLE_DEPRECATION_WARNINGS
1283 out_pkt.destruct = pkt->destruct;
1284 pkt->destruct = NULL;
1285FF_ENABLE_DEPRECATION_WARNINGS
1286#endif
1287 }
1288 if ((ret = av_dup_packet(&out_pkt)) < 0)
1289 goto fail;
1290
1291 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1292 av_free_packet(&out_pkt);
1293 ret = AVERROR(ENOMEM);
1294 goto fail;
1295 }
1296 }
1297
1298
1299 /* end of the stream => close and free the parser */
1300 if (pkt == &flush_pkt) {
1301 av_parser_close(st->parser);
1302 st->parser = NULL;
1303 }
1304
1305fail:
1306 av_free_packet(pkt);
1307 return ret;
1308}
1309
1310static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1311 AVPacketList **pkt_buffer_end,
1312 AVPacket *pkt)
1313{
1314 AVPacketList *pktl;
1315 av_assert0(*pkt_buffer);
1316 pktl = *pkt_buffer;
1317 *pkt = pktl->pkt;
1318 *pkt_buffer = pktl->next;
1319 if (!pktl->next)
1320 *pkt_buffer_end = NULL;
1321 av_freep(&pktl);
1322 return 0;
1323}
1324
1325static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1326{
1327 int ret = 0, i, got_packet = 0;
1328
1329 av_init_packet(pkt);
1330
1331 while (!got_packet && !s->parse_queue) {
1332 AVStream *st;
1333 AVPacket cur_pkt;
1334
1335 /* read next packet */
1336 ret = ff_read_packet(s, &cur_pkt);
1337 if (ret < 0) {
1338 if (ret == AVERROR(EAGAIN))
1339 return ret;
1340 /* flush the parsers */
1341 for(i = 0; i < s->nb_streams; i++) {
1342 st = s->streams[i];
1343 if (st->parser && st->need_parsing)
1344 parse_packet(s, NULL, st->index);
1345 }
1346 /* all remaining packets are now in parse_queue =>
1347 * really terminate parsing */
1348 break;
1349 }
1350 ret = 0;
1351 st = s->streams[cur_pkt.stream_index];
1352
1353 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1354 cur_pkt.dts != AV_NOPTS_VALUE &&
1355 cur_pkt.pts < cur_pkt.dts) {
1356 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1357 cur_pkt.stream_index,
1358 av_ts2str(cur_pkt.pts),
1359 av_ts2str(cur_pkt.dts),
1360 cur_pkt.size);
1361 }
1362 if (s->debug & FF_FDEBUG_TS)
1363 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1364 cur_pkt.stream_index,
1365 av_ts2str(cur_pkt.pts),
1366 av_ts2str(cur_pkt.dts),
1367 cur_pkt.size,
1368 cur_pkt.duration,
1369 cur_pkt.flags);
1370
1371 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1372 st->parser = av_parser_init(st->codec->codec_id);
1373 if (!st->parser) {
1374 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1375 "%s, packets or times may be invalid.\n",
1376 avcodec_get_name(st->codec->codec_id));
1377 /* no parser available: just output the raw packets */
1378 st->need_parsing = AVSTREAM_PARSE_NONE;
1379 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1380 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1381 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1382 st->parser->flags |= PARSER_FLAG_ONCE;
1383 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1384 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1385 }
1386 }
1387
1388 if (!st->need_parsing || !st->parser) {
1389 /* no parsing needed: we just output the packet as is */
1390 *pkt = cur_pkt;
1391 compute_pkt_fields(s, st, NULL, pkt);
1392 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1393 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1394 ff_reduce_index(s, st->index);
1395 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1396 }
1397 got_packet = 1;
1398 } else if (st->discard < AVDISCARD_ALL) {
1399 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1400 return ret;
1401 } else {
1402 /* free packet */
1403 av_free_packet(&cur_pkt);
1404 }
1405 if (pkt->flags & AV_PKT_FLAG_KEY)
1406 st->skip_to_keyframe = 0;
1407 if (st->skip_to_keyframe) {
1408 av_free_packet(&cur_pkt);
1409 if (got_packet) {
1410 *pkt = cur_pkt;
1411 }
1412 got_packet = 0;
1413 }
1414 }
1415
1416 if (!got_packet && s->parse_queue)
1417 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1418
1419 if(s->debug & FF_FDEBUG_TS)
1420 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1421 pkt->stream_index,
1422 av_ts2str(pkt->pts),
1423 av_ts2str(pkt->dts),
1424 pkt->size,
1425 pkt->duration,
1426 pkt->flags);
1427
1428 return ret;
1429}
1430
1431int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1432{
1433 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1434 int eof = 0;
1435 int ret;
1436 AVStream *st;
1437
1438 if (!genpts) {
1439 ret = s->packet_buffer ?
1440 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1441 read_frame_internal(s, pkt);
1442 if (ret < 0)
1443 return ret;
1444 goto return_packet;
1445 }
1446
1447 for (;;) {
1448 AVPacketList *pktl = s->packet_buffer;
1449
1450 if (pktl) {
1451 AVPacket *next_pkt = &pktl->pkt;
1452
1453 if (next_pkt->dts != AV_NOPTS_VALUE) {
1454 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1455 // last dts seen for this stream. if any of packets following
1456 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1457 int64_t last_dts = next_pkt->dts;
1458 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1459 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1460 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1461 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1462 next_pkt->pts = pktl->pkt.dts;
1463 }
1464 if (last_dts != AV_NOPTS_VALUE) {
1465 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1466 last_dts = pktl->pkt.dts;
1467 }
1468 }
1469 pktl = pktl->next;
1470 }
1471 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1472 // Fixing the last reference frame had none pts issue (For MXF etc).
1473 // We only do this when
1474 // 1. eof.
1475 // 2. we are not able to resolve a pts value for current packet.
1476 // 3. the packets for this stream at the end of the files had valid dts.
1477 next_pkt->pts = last_dts + next_pkt->duration;
1478 }
1479 pktl = s->packet_buffer;
1480 }
1481
1482 /* read packet from packet buffer, if there is data */
1483 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1484 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1485 ret = read_from_packet_buffer(&s->packet_buffer,
1486 &s->packet_buffer_end, pkt);
1487 goto return_packet;
1488 }
1489 }
1490
1491 ret = read_frame_internal(s, pkt);
1492 if (ret < 0) {
1493 if (pktl && ret != AVERROR(EAGAIN)) {
1494 eof = 1;
1495 continue;
1496 } else
1497 return ret;
1498 }
1499
1500 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1501 &s->packet_buffer_end)) < 0)
1502 return AVERROR(ENOMEM);
1503 }
1504
1505return_packet:
1506
1507 st = s->streams[pkt->stream_index];
1508 if (st->skip_samples) {
1509 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1510 if (p) {
1511 AV_WL32(p, st->skip_samples);
1512 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1513 }
1514 st->skip_samples = 0;
1515 }
1516
1517 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1518 ff_reduce_index(s, st->index);
1519 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1520 }
1521
1522 if (is_relative(pkt->dts))
1523 pkt->dts -= RELATIVE_TS_BASE;
1524 if (is_relative(pkt->pts))
1525 pkt->pts -= RELATIVE_TS_BASE;
1526
1527 return ret;
1528}
1529
1530/* XXX: suppress the packet queue */
1531static void flush_packet_queue(AVFormatContext *s)
1532{
1533 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1534 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1535 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1536
1537 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1538}
1539
1540/*******************************************************/
1541/* seek support */
1542
1543int av_find_default_stream_index(AVFormatContext *s)
1544{
1545 int first_audio_index = -1;
1546 int i;
1547 AVStream *st;
1548
1549 if (s->nb_streams <= 0)
1550 return -1;
1551 for(i = 0; i < s->nb_streams; i++) {
1552 st = s->streams[i];
1553 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1554 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1555 return i;
1556 }
1557 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1558 first_audio_index = i;
1559 }
1560 return first_audio_index >= 0 ? first_audio_index : 0;
1561}
1562
1563/**
1564 * Flush the frame reader.
1565 */
1566void ff_read_frame_flush(AVFormatContext *s)
1567{
1568 AVStream *st;
1569 int i, j;
1570
1571 flush_packet_queue(s);
1572
1573 /* for each stream, reset read state */
1574 for(i = 0; i < s->nb_streams; i++) {
1575 st = s->streams[i];
1576
1577 if (st->parser) {
1578 av_parser_close(st->parser);
1579 st->parser = NULL;
1580 }
1581 st->last_IP_pts = AV_NOPTS_VALUE;
1582 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1583 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1584 st->reference_dts = AV_NOPTS_VALUE;
1585
1586 st->probe_packets = MAX_PROBE_PACKETS;
1587
1588 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1589 st->pts_buffer[j]= AV_NOPTS_VALUE;
1590 }
1591}
1592
1593void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1594{
1595 int i;
1596
1597 for(i = 0; i < s->nb_streams; i++) {
1598 AVStream *st = s->streams[i];
1599
1600 st->cur_dts = av_rescale(timestamp,
1601 st->time_base.den * (int64_t)ref_st->time_base.num,
1602 st->time_base.num * (int64_t)ref_st->time_base.den);
1603 }
1604}
1605
1606void ff_reduce_index(AVFormatContext *s, int stream_index)
1607{
1608 AVStream *st= s->streams[stream_index];
1609 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1610
1611 if((unsigned)st->nb_index_entries >= max_entries){
1612 int i;
1613 for(i=0; 2*i<st->nb_index_entries; i++)
1614 st->index_entries[i]= st->index_entries[2*i];
1615 st->nb_index_entries= i;
1616 }
1617}
1618
1619int ff_add_index_entry(AVIndexEntry **index_entries,
1620 int *nb_index_entries,
1621 unsigned int *index_entries_allocated_size,
1622 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1623{
1624 AVIndexEntry *entries, *ie;
1625 int index;
1626
1627 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1628 return -1;
1629
1630 if(timestamp == AV_NOPTS_VALUE)
1631 return AVERROR(EINVAL);
1632
1633 if (size < 0 || size > 0x3FFFFFFF)
1634 return AVERROR(EINVAL);
1635
1636 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1637 timestamp -= RELATIVE_TS_BASE;
1638
1639 entries = av_fast_realloc(*index_entries,
1640 index_entries_allocated_size,
1641 (*nb_index_entries + 1) *
1642 sizeof(AVIndexEntry));
1643 if(!entries)
1644 return -1;
1645
1646 *index_entries= entries;
1647
1648 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1649
1650 if(index<0){
1651 index= (*nb_index_entries)++;
1652 ie= &entries[index];
1653 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1654 }else{
1655 ie= &entries[index];
1656 if(ie->timestamp != timestamp){
1657 if(ie->timestamp <= timestamp)
1658 return -1;
1659 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1660 (*nb_index_entries)++;
1661 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1662 distance= ie->min_distance;
1663 }
1664
1665 ie->pos = pos;
1666 ie->timestamp = timestamp;
1667 ie->min_distance= distance;
1668 ie->size= size;
1669 ie->flags = flags;
1670
1671 return index;
1672}
1673
1674int av_add_index_entry(AVStream *st,
1675 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1676{
1677 timestamp = wrap_timestamp(st, timestamp);
1678 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1679 &st->index_entries_allocated_size, pos,
1680 timestamp, size, distance, flags);
1681}
1682
1683int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1684 int64_t wanted_timestamp, int flags)
1685{
1686 int a, b, m;
1687 int64_t timestamp;
1688
1689 a = - 1;
1690 b = nb_entries;
1691
1692 //optimize appending index entries at the end
1693 if(b && entries[b-1].timestamp < wanted_timestamp)
1694 a= b-1;
1695
1696 while (b - a > 1) {
1697 m = (a + b) >> 1;
1698 timestamp = entries[m].timestamp;
1699 if(timestamp >= wanted_timestamp)
1700 b = m;
1701 if(timestamp <= wanted_timestamp)
1702 a = m;
1703 }
1704 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1705
1706 if(!(flags & AVSEEK_FLAG_ANY)){
1707 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1708 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1709 }
1710 }
1711
1712 if(m == nb_entries)
1713 return -1;
1714 return m;
1715}
1716
1717int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1718 int flags)
1719{
1720 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1721 wanted_timestamp, flags);
1722}
1723
1724static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1725 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1726{
1727 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1728 if (stream_index >= 0)
1729 ts = wrap_timestamp(s->streams[stream_index], ts);
1730 return ts;
1731}
1732
1733int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1734{
1735 AVInputFormat *avif= s->iformat;
1736 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1737 int64_t ts_min, ts_max, ts;
1738 int index;
1739 int64_t ret;
1740 AVStream *st;
1741
1742 if (stream_index < 0)
1743 return -1;
1744
1745 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1746
1747 ts_max=
1748 ts_min= AV_NOPTS_VALUE;
1749 pos_limit= -1; //gcc falsely says it may be uninitialized
1750
1751 st= s->streams[stream_index];
1752 if(st->index_entries){
1753 AVIndexEntry *e;
1754
1755 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1756 index= FFMAX(index, 0);
1757 e= &st->index_entries[index];
1758
1759 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1760 pos_min= e->pos;
1761 ts_min= e->timestamp;
1762 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1763 pos_min, av_ts2str(ts_min));
1764 }else{
1765 av_assert1(index==0);
1766 }
1767
1768 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1769 av_assert0(index < st->nb_index_entries);
1770 if(index >= 0){
1771 e= &st->index_entries[index];
1772 av_assert1(e->timestamp >= target_ts);
1773 pos_max= e->pos;
1774 ts_max= e->timestamp;
1775 pos_limit= pos_max - e->min_distance;
1776 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1777 pos_max, pos_limit, av_ts2str(ts_max));
1778 }
1779 }
1780
1781 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1782 if(pos<0)
1783 return -1;
1784
1785 /* do the seek */
1786 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1787 return ret;
1788
1789 ff_read_frame_flush(s);
1790 ff_update_cur_dts(s, st, ts);
1791
1792 return 0;
1793}
1794
1795int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1796 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1797{
1798 int64_t step= 1024;
1799 int64_t limit, ts_max;
1800 int64_t filesize = avio_size(s->pb);
1801 int64_t pos_max = filesize - 1;
1802 do{
1803 limit = pos_max;
1804 pos_max = FFMAX(0, (pos_max) - step);
1805 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1806 step += step;
1807 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1808 if (ts_max == AV_NOPTS_VALUE)
1809 return -1;
1810
1811 for(;;){
1812 int64_t tmp_pos = pos_max + 1;
1813 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1814 if(tmp_ts == AV_NOPTS_VALUE)
1815 break;
1816 av_assert0(tmp_pos > pos_max);
1817 ts_max = tmp_ts;
1818 pos_max = tmp_pos;
1819 if(tmp_pos >= filesize)
1820 break;
1821 }
1822
1823 if (ts)
1824 *ts = ts_max;
1825 if (pos)
1826 *pos = pos_max;
1827
1828 return 0;
1829}
1830
1831int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1832 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1833 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1834 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1835{
1836 int64_t pos, ts;
1837 int64_t start_pos;
1838 int no_change;
1839 int ret;
1840
1841 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1842
1843 if(ts_min == AV_NOPTS_VALUE){
1844 pos_min = s->data_offset;
1845 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1846 if (ts_min == AV_NOPTS_VALUE)
1847 return -1;
1848 }
1849
1850 if(ts_min >= target_ts){
1851 *ts_ret= ts_min;
1852 return pos_min;
1853 }
1854
1855 if(ts_max == AV_NOPTS_VALUE){
1856 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1857 return ret;
1858 pos_limit= pos_max;
1859 }
1860
1861 if(ts_max <= target_ts){
1862 *ts_ret= ts_max;
1863 return pos_max;
1864 }
1865
1866 if(ts_min > ts_max){
1867 return -1;
1868 }else if(ts_min == ts_max){
1869 pos_limit= pos_min;
1870 }
1871
1872 no_change=0;
1873 while (pos_min < pos_limit) {
1874 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1875 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1876 assert(pos_limit <= pos_max);
1877
1878 if(no_change==0){
1879 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1880 // interpolate position (better than dichotomy)
1881 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1882 + pos_min - approximate_keyframe_distance;
1883 }else if(no_change==1){
1884 // bisection, if interpolation failed to change min or max pos last time
1885 pos = (pos_min + pos_limit)>>1;
1886 }else{
1887 /* linear search if bisection failed, can only happen if there
1888 are very few or no keyframes between min/max */
1889 pos=pos_min;
1890 }
1891 if(pos <= pos_min)
1892 pos= pos_min + 1;
1893 else if(pos > pos_limit)
1894 pos= pos_limit;
1895 start_pos= pos;
1896
1897 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1898 if(pos == pos_max)
1899 no_change++;
1900 else
1901 no_change=0;
1902 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1903 pos_min, pos, pos_max,
1904 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1905 pos_limit, start_pos, no_change);
1906 if(ts == AV_NOPTS_VALUE){
1907 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1908 return -1;
1909 }
1910 assert(ts != AV_NOPTS_VALUE);
1911 if (target_ts <= ts) {
1912 pos_limit = start_pos - 1;
1913 pos_max = pos;
1914 ts_max = ts;
1915 }
1916 if (target_ts >= ts) {
1917 pos_min = pos;
1918 ts_min = ts;
1919 }
1920 }
1921
1922 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1923 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1924#if 0
1925 pos_min = pos;
1926 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1927 pos_min++;
1928 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1929 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1930 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1931#endif
1932 *ts_ret= ts;
1933 return pos;
1934}
1935
1936static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1937 int64_t pos_min, pos_max;
1938
1939 pos_min = s->data_offset;
1940 pos_max = avio_size(s->pb) - 1;
1941
1942 if (pos < pos_min) pos= pos_min;
1943 else if(pos > pos_max) pos= pos_max;
1944
1945 avio_seek(s->pb, pos, SEEK_SET);
1946
1947 s->io_repositioned = 1;
1948
1949 return 0;
1950}
1951
1952static int seek_frame_generic(AVFormatContext *s,
1953 int stream_index, int64_t timestamp, int flags)
1954{
1955 int index;
1956 int64_t ret;
1957 AVStream *st;
1958 AVIndexEntry *ie;
1959
1960 st = s->streams[stream_index];
1961
1962 index = av_index_search_timestamp(st, timestamp, flags);
1963
1964 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1965 return -1;
1966
1967 if(index < 0 || index==st->nb_index_entries-1){
1968 AVPacket pkt;
1969 int nonkey=0;
1970
1971 if(st->nb_index_entries){
1972 av_assert0(st->index_entries);
1973 ie= &st->index_entries[st->nb_index_entries-1];
1974 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1975 return ret;
1976 ff_update_cur_dts(s, st, ie->timestamp);
1977 }else{
1978 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1979 return ret;
1980 }
1981 for (;;) {
1982 int read_status;
1983 do{
1984 read_status = av_read_frame(s, &pkt);
1985 } while (read_status == AVERROR(EAGAIN));
1986 if (read_status < 0)
1987 break;
1988 av_free_packet(&pkt);
1989 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1990 if(pkt.flags & AV_PKT_FLAG_KEY)
1991 break;
1992 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1993 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1994 break;
1995 }
1996 }
1997 }
1998 index = av_index_search_timestamp(st, timestamp, flags);
1999 }
2000 if (index < 0)
2001 return -1;
2002
2003 ff_read_frame_flush(s);
2004 if (s->iformat->read_seek){
2005 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2006 return 0;
2007 }
2008 ie = &st->index_entries[index];
2009 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2010 return ret;
2011 ff_update_cur_dts(s, st, ie->timestamp);
2012
2013 return 0;
2014}
2015
2016static int seek_frame_internal(AVFormatContext *s, int stream_index,
2017 int64_t timestamp, int flags)
2018{
2019 int ret;
2020 AVStream *st;
2021
2022 if (flags & AVSEEK_FLAG_BYTE) {
2023 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2024 return -1;
2025 ff_read_frame_flush(s);
2026 return seek_frame_byte(s, stream_index, timestamp, flags);
2027 }
2028
2029 if(stream_index < 0){
2030 stream_index= av_find_default_stream_index(s);
2031 if(stream_index < 0)
2032 return -1;
2033
2034 st= s->streams[stream_index];
2035 /* timestamp for default must be expressed in AV_TIME_BASE units */
2036 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2037 }
2038
2039 /* first, we try the format specific seek */
2040 if (s->iformat->read_seek) {
2041 ff_read_frame_flush(s);
2042 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2043 } else
2044 ret = -1;
2045 if (ret >= 0) {
2046 return 0;
2047 }
2048
2049 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2050 ff_read_frame_flush(s);
2051 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2052 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2053 ff_read_frame_flush(s);
2054 return seek_frame_generic(s, stream_index, timestamp, flags);
2055 }
2056 else
2057 return -1;
2058}
2059
2060int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2061{
2062 int ret;
2063
2064 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2065 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2066 if ((flags & AVSEEK_FLAG_BACKWARD))
2067 max_ts = timestamp;
2068 else
2069 min_ts = timestamp;
2070 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2071 flags & ~AVSEEK_FLAG_BACKWARD);
2072 }
2073
2074 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2075
2076 if (ret >= 0)
2077 ret = avformat_queue_attached_pictures(s);
2078
2079 return ret;
2080}
2081
2082int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2083{
2084 if(min_ts > ts || max_ts < ts)
2085 return -1;
2086 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2087 return AVERROR(EINVAL);
2088
2089 if(s->seek2any>0)
2090 flags |= AVSEEK_FLAG_ANY;
2091 flags &= ~AVSEEK_FLAG_BACKWARD;
2092
2093 if (s->iformat->read_seek2) {
2094 int ret;
2095 ff_read_frame_flush(s);
2096
2097 if (stream_index == -1 && s->nb_streams == 1) {
2098 AVRational time_base = s->streams[0]->time_base;
2099 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2100 min_ts = av_rescale_rnd(min_ts, time_base.den,
2101 time_base.num * (int64_t)AV_TIME_BASE,
2102 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2103 max_ts = av_rescale_rnd(max_ts, time_base.den,
2104 time_base.num * (int64_t)AV_TIME_BASE,
2105 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2106 }
2107
2108 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2109
2110 if (ret >= 0)
2111 ret = avformat_queue_attached_pictures(s);
2112 return ret;
2113 }
2114
2115 if(s->iformat->read_timestamp){
2116 //try to seek via read_timestamp()
2117 }
2118
2119 // Fall back on old API if new is not implemented but old is.
2120 // Note the old API has somewhat different semantics.
2121 if (s->iformat->read_seek || 1) {
2122 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2123 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2124 if (ret<0 && ts != min_ts && max_ts != ts) {
2125 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2126 if (ret >= 0)
2127 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2128 }
2129 return ret;
2130 }
2131
2132 // try some generic seek like seek_frame_generic() but with new ts semantics
2133 return -1; //unreachable
2134}
2135
2136/*******************************************************/
2137
2138/**
2139 * Return TRUE if the stream has accurate duration in any stream.
2140 *
2141 * @return TRUE if the stream has accurate duration for at least one component.
2142 */
2143static int has_duration(AVFormatContext *ic)
2144{
2145 int i;
2146 AVStream *st;
2147
2148 for(i = 0;i < ic->nb_streams; i++) {
2149 st = ic->streams[i];
2150 if (st->duration != AV_NOPTS_VALUE)
2151 return 1;
2152 }
2153 if (ic->duration != AV_NOPTS_VALUE)
2154 return 1;
2155 return 0;
2156}
2157
2158/**
2159 * Estimate the stream timings from the one of each components.
2160 *
2161 * Also computes the global bitrate if possible.
2162 */
2163static void update_stream_timings(AVFormatContext *ic)
2164{
2165 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2166 int64_t duration, duration1, filesize;
2167 int i;
2168 AVStream *st;
2169 AVProgram *p;
2170
2171 start_time = INT64_MAX;
2172 start_time_text = INT64_MAX;
2173 end_time = INT64_MIN;
2174 duration = INT64_MIN;
2175 for(i = 0;i < ic->nb_streams; i++) {
2176 st = ic->streams[i];
2177 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2178 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2179 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2180 if (start_time1 < start_time_text)
2181 start_time_text = start_time1;
2182 } else
2183 start_time = FFMIN(start_time, start_time1);
2184 end_time1 = AV_NOPTS_VALUE;
2185 if (st->duration != AV_NOPTS_VALUE) {
2186 end_time1 = start_time1
2187 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2188 end_time = FFMAX(end_time, end_time1);
2189 }
2190 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2191 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2192 p->start_time = start_time1;
2193 if(p->end_time < end_time1)
2194 p->end_time = end_time1;
2195 }
2196 }
2197 if (st->duration != AV_NOPTS_VALUE) {
2198 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2199 duration = FFMAX(duration, duration1);
2200 }
2201 }
2202 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2203 start_time = start_time_text;
2204 else if(start_time > start_time_text)
2205 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2206
2207 if (start_time != INT64_MAX) {
2208 ic->start_time = start_time;
2209 if (end_time != INT64_MIN) {
2210 if (ic->nb_programs) {
2211 for (i=0; i<ic->nb_programs; i++) {
2212 p = ic->programs[i];
2213 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2214 duration = FFMAX(duration, p->end_time - p->start_time);
2215 }
2216 } else
2217 duration = FFMAX(duration, end_time - start_time);
2218 }
2219 }
2220 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2221 ic->duration = duration;
2222 }
2223 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2224 /* compute the bitrate */
2225 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2226 (double)ic->duration;
2227 if (bitrate >= 0 && bitrate <= INT_MAX)
2228 ic->bit_rate = bitrate;
2229 }
2230}
2231
2232static void fill_all_stream_timings(AVFormatContext *ic)
2233{
2234 int i;
2235 AVStream *st;
2236
2237 update_stream_timings(ic);
2238 for(i = 0;i < ic->nb_streams; i++) {
2239 st = ic->streams[i];
2240 if (st->start_time == AV_NOPTS_VALUE) {
2241 if(ic->start_time != AV_NOPTS_VALUE)
2242 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2243 if(ic->duration != AV_NOPTS_VALUE)
2244 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2245 }
2246 }
2247}
2248
2249static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2250{
2251 int64_t filesize, duration;
2252 int i, show_warning = 0;
2253 AVStream *st;
2254
2255 /* if bit_rate is already set, we believe it */
2256 if (ic->bit_rate <= 0) {
2257 int bit_rate = 0;
2258 for(i=0;i<ic->nb_streams;i++) {
2259 st = ic->streams[i];
2260 if (st->codec->bit_rate > 0) {
2261 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2262 bit_rate = 0;
2263 break;
2264 }
2265 bit_rate += st->codec->bit_rate;
2266 }
2267 }
2268 ic->bit_rate = bit_rate;
2269 }
2270
2271 /* if duration is already set, we believe it */
2272 if (ic->duration == AV_NOPTS_VALUE &&
2273 ic->bit_rate != 0) {
2274 filesize = ic->pb ? avio_size(ic->pb) : 0;
2275 if (filesize > 0) {
2276 for(i = 0; i < ic->nb_streams; i++) {
2277 st = ic->streams[i];
2278 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2279 && st->duration == AV_NOPTS_VALUE) {
2280 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2281 st->duration = duration;
2282 show_warning = 1;
2283 }
2284 }
2285 }
2286 }
2287 if (show_warning)
2288 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2289}
2290
2291#define DURATION_MAX_READ_SIZE 250000LL
2292#define DURATION_MAX_RETRY 4
2293
2294/* only usable for MPEG-PS streams */
2295static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2296{
2297 AVPacket pkt1, *pkt = &pkt1;
2298 AVStream *st;
2299 int read_size, i, ret;
2300 int64_t end_time;
2301 int64_t filesize, offset, duration;
2302 int retry=0;
2303
2304 /* flush packet queue */
2305 flush_packet_queue(ic);
2306
2307 for (i=0; i<ic->nb_streams; i++) {
2308 st = ic->streams[i];
2309 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2310 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2311
2312 if (st->parser) {
2313 av_parser_close(st->parser);
2314 st->parser= NULL;
2315 }
2316 }
2317
2318 /* estimate the end time (duration) */
2319 /* XXX: may need to support wrapping */
2320 filesize = ic->pb ? avio_size(ic->pb) : 0;
2321 end_time = AV_NOPTS_VALUE;
2322 do{
2323 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2324 if (offset < 0)
2325 offset = 0;
2326
2327 avio_seek(ic->pb, offset, SEEK_SET);
2328 read_size = 0;
2329 for(;;) {
2330 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2331 break;
2332
2333 do {
2334 ret = ff_read_packet(ic, pkt);
2335 } while(ret == AVERROR(EAGAIN));
2336 if (ret != 0)
2337 break;
2338 read_size += pkt->size;
2339 st = ic->streams[pkt->stream_index];
2340 if (pkt->pts != AV_NOPTS_VALUE &&
2341 (st->start_time != AV_NOPTS_VALUE ||
2342 st->first_dts != AV_NOPTS_VALUE)) {
2343 duration = end_time = pkt->pts;
2344 if (st->start_time != AV_NOPTS_VALUE)
2345 duration -= st->start_time;
2346 else
2347 duration -= st->first_dts;
2348 if (duration > 0) {
2349 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2350 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2351 st->duration = duration;
2352 st->info->last_duration = duration;
2353 }
2354 }
2355 av_free_packet(pkt);
2356 }
2357 }while( end_time==AV_NOPTS_VALUE
2358 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2359 && ++retry <= DURATION_MAX_RETRY);
2360
2361 fill_all_stream_timings(ic);
2362
2363 avio_seek(ic->pb, old_offset, SEEK_SET);
2364 for (i=0; i<ic->nb_streams; i++) {
2365 st= ic->streams[i];
2366 st->cur_dts= st->first_dts;
2367 st->last_IP_pts = AV_NOPTS_VALUE;
2368 st->reference_dts = AV_NOPTS_VALUE;
2369 }
2370}
2371
2372static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2373{
2374 int64_t file_size;
2375
2376 /* get the file size, if possible */
2377 if (ic->iformat->flags & AVFMT_NOFILE) {
2378 file_size = 0;
2379 } else {
2380 file_size = avio_size(ic->pb);
2381 file_size = FFMAX(0, file_size);
2382 }
2383
2384 if ((!strcmp(ic->iformat->name, "mpeg") ||
2385 !strcmp(ic->iformat->name, "mpegts")) &&
2386 file_size && ic->pb->seekable) {
2387 /* get accurate estimate from the PTSes */
2388 estimate_timings_from_pts(ic, old_offset);
2389 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2390 } else if (has_duration(ic)) {
2391 /* at least one component has timings - we use them for all
2392 the components */
2393 fill_all_stream_timings(ic);
2394 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2395 } else {
2396 /* less precise: use bitrate info */
2397 estimate_timings_from_bit_rate(ic);
2398 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2399 }
2400 update_stream_timings(ic);
2401
2402 {
2403 int i;
2404 AVStream av_unused *st;
2405 for(i = 0;i < ic->nb_streams; i++) {
2406 st = ic->streams[i];
2407 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2408 (double) st->start_time / AV_TIME_BASE,
2409 (double) st->duration / AV_TIME_BASE);
2410 }
2411 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2412 (double) ic->start_time / AV_TIME_BASE,
2413 (double) ic->duration / AV_TIME_BASE,
2414 ic->bit_rate / 1000);
2415 }
2416}
2417
2418static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2419{
2420 AVCodecContext *avctx = st->codec;
2421
2422#define FAIL(errmsg) do { \
2423 if (errmsg_ptr) \
2424 *errmsg_ptr = errmsg; \
2425 return 0; \
2426 } while (0)
2427
2428 switch (avctx->codec_type) {
2429 case AVMEDIA_TYPE_AUDIO:
2430 if (!avctx->frame_size && determinable_frame_size(avctx))
2431 FAIL("unspecified frame size");
2432 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2433 FAIL("unspecified sample format");
2434 if (!avctx->sample_rate)
2435 FAIL("unspecified sample rate");
2436 if (!avctx->channels)
2437 FAIL("unspecified number of channels");
2438 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2439 FAIL("no decodable DTS frames");
2440 break;
2441 case AVMEDIA_TYPE_VIDEO:
2442 if (!avctx->width)
2443 FAIL("unspecified size");
2444 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2445 FAIL("unspecified pixel format");
2446 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2447 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2448 FAIL("no frame in rv30/40 and no sar");
2449 break;
2450 case AVMEDIA_TYPE_SUBTITLE:
2451 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2452 FAIL("unspecified size");
2453 break;
2454 case AVMEDIA_TYPE_DATA:
2455 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2456 }
2457
2458 if (avctx->codec_id == AV_CODEC_ID_NONE)
2459 FAIL("unknown codec");
2460 return 1;
2461}
2462
2463/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2464static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2465{
2466 const AVCodec *codec;
2467 int got_picture = 1, ret = 0;
2468 AVFrame *frame = avcodec_alloc_frame();
2469 AVSubtitle subtitle;
2470 AVPacket pkt = *avpkt;
2471
2472 if (!frame)
2473 return AVERROR(ENOMEM);
2474
2475 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2476 AVDictionary *thread_opt = NULL;
2477
2478 codec = find_decoder(s, st, st->codec->codec_id);
2479
2480 if (!codec) {
2481 st->info->found_decoder = -1;
2482 ret = -1;
2483 goto fail;
2484 }
2485
2486 /* force thread count to 1 since the h264 decoder will not extract SPS
2487 * and PPS to extradata during multi-threaded decoding */
2488 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2489 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2490 if (!options)
2491 av_dict_free(&thread_opt);
2492 if (ret < 0) {
2493 st->info->found_decoder = -1;
2494 goto fail;
2495 }
2496 st->info->found_decoder = 1;
2497 } else if (!st->info->found_decoder)
2498 st->info->found_decoder = 1;
2499
2500 if (st->info->found_decoder < 0) {
2501 ret = -1;
2502 goto fail;
2503 }
2504
2505 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2506 ret >= 0 &&
2507 (!has_codec_parameters(st, NULL) ||
2508 !has_decode_delay_been_guessed(st) ||
2509 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2510 got_picture = 0;
2511 avcodec_get_frame_defaults(frame);
2512 switch(st->codec->codec_type) {
2513 case AVMEDIA_TYPE_VIDEO:
2514 ret = avcodec_decode_video2(st->codec, frame,
2515 &got_picture, &pkt);
2516 break;
2517 case AVMEDIA_TYPE_AUDIO:
2518 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2519 break;
2520 case AVMEDIA_TYPE_SUBTITLE:
2521 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2522 &got_picture, &pkt);
2523 ret = pkt.size;
2524 break;
2525 default:
2526 break;
2527 }
2528 if (ret >= 0) {
2529 if (got_picture)
2530 st->nb_decoded_frames++;
2531 pkt.data += ret;
2532 pkt.size -= ret;
2533 ret = got_picture;
2534 }
2535 }
2536
2537 if(!pkt.data && !got_picture)
2538 ret = -1;
2539
2540fail:
2541 avcodec_free_frame(&frame);
2542 return ret;
2543}
2544
2545unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2546{
2547 while (tags->id != AV_CODEC_ID_NONE) {
2548 if (tags->id == id)
2549 return tags->tag;
2550 tags++;
2551 }
2552 return 0;
2553}
2554
2555enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2556{
2557 int i;
2558 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2559 if(tag == tags[i].tag)
2560 return tags[i].id;
2561 }
2562 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2563 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2564 return tags[i].id;
2565 }
2566 return AV_CODEC_ID_NONE;
2567}
2568
2569enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2570{
2571 if (flt) {
2572 switch (bps) {
2573 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2574 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2575 default: return AV_CODEC_ID_NONE;
2576 }
2577 } else {
2578 bps += 7;
2579 bps >>= 3;
2580 if (sflags & (1 << (bps - 1))) {
2581 switch (bps) {
2582 case 1: return AV_CODEC_ID_PCM_S8;
2583 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2584 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2585 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2586 default: return AV_CODEC_ID_NONE;
2587 }
2588 } else {
2589 switch (bps) {
2590 case 1: return AV_CODEC_ID_PCM_U8;
2591 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2592 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2593 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2594 default: return AV_CODEC_ID_NONE;
2595 }
2596 }
2597 }
2598}
2599
2600unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2601{
2602 unsigned int tag;
2603 if (!av_codec_get_tag2(tags, id, &tag))
2604 return 0;
2605 return tag;
2606}
2607
2608int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2609 unsigned int *tag)
2610{
2611 int i;
2612 for(i=0; tags && tags[i]; i++){
2613 const AVCodecTag *codec_tags = tags[i];
2614 while (codec_tags->id != AV_CODEC_ID_NONE) {
2615 if (codec_tags->id == id) {
2616 *tag = codec_tags->tag;
2617 return 1;
2618 }
2619 codec_tags++;
2620 }
2621 }
2622 return 0;
2623}
2624
2625enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2626{
2627 int i;
2628 for(i=0; tags && tags[i]; i++){
2629 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2630 if(id!=AV_CODEC_ID_NONE) return id;
2631 }
2632 return AV_CODEC_ID_NONE;
2633}
2634
2635static void compute_chapters_end(AVFormatContext *s)
2636{
2637 unsigned int i, j;
2638 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2639
2640 for (i = 0; i < s->nb_chapters; i++)
2641 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2642 AVChapter *ch = s->chapters[i];
2643 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2644 : INT64_MAX;
2645
2646 for (j = 0; j < s->nb_chapters; j++) {
2647 AVChapter *ch1 = s->chapters[j];
2648 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2649 if (j != i && next_start > ch->start && next_start < end)
2650 end = next_start;
2651 }
2652 ch->end = (end == INT64_MAX) ? ch->start : end;
2653 }
2654}
2655
2656static int get_std_framerate(int i){
2657 if(i<60*12) return (i+1)*1001;
2658 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2659}
2660
2661/*
2662 * Is the time base unreliable.
2663 * This is a heuristic to balance between quick acceptance of the values in
2664 * the headers vs. some extra checks.
2665 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2666 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2667 * And there are "variable" fps files this needs to detect as well.
2668 */
2669static int tb_unreliable(AVCodecContext *c){
2670 if( c->time_base.den >= 101L*c->time_base.num
2671 || c->time_base.den < 5L*c->time_base.num
2672/* || c->codec_tag == AV_RL32("DIVX")
2673 || c->codec_tag == AV_RL32("XVID")*/
2674 || c->codec_tag == AV_RL32("mp4v")
2675 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2676 || c->codec_id == AV_CODEC_ID_H264
2677 )
2678 return 1;
2679 return 0;
2680}
2681
2682#if FF_API_FORMAT_PARAMETERS
2683int av_find_stream_info(AVFormatContext *ic)
2684{
2685 return avformat_find_stream_info(ic, NULL);
2686}
2687#endif
2688
2689int ff_alloc_extradata(AVCodecContext *avctx, int size)
2690{
2691 int ret;
2692
2693 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2694 avctx->extradata_size = 0;
2695 return AVERROR(EINVAL);
2696 }
2697 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2698 if (avctx->extradata) {
2699 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2700 avctx->extradata_size = size;
2701 ret = 0;
2702 } else {
2703 avctx->extradata_size = 0;
2704 ret = AVERROR(ENOMEM);
2705 }
2706 return ret;
2707}
2708
2709int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2710{
2711 int i, count, ret = 0, j;
2712 int64_t read_size;
2713 AVStream *st;
2714 AVPacket pkt1, *pkt;
2715 int64_t old_offset = avio_tell(ic->pb);
2716 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2717 int flush_codecs = ic->probesize > 0;
2718
2719 if(ic->pb)
2720 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2721
2722 for(i=0;i<ic->nb_streams;i++) {
2723 const AVCodec *codec;
2724 AVDictionary *thread_opt = NULL;
2725 st = ic->streams[i];
2726
2727 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2728 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2729/* if(!st->time_base.num)
2730 st->time_base= */
2731 if(!st->codec->time_base.num)
2732 st->codec->time_base= st->time_base;
2733 }
2734 //only for the split stuff
2735 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2736 st->parser = av_parser_init(st->codec->codec_id);
2737 if(st->parser){
2738 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2739 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2740 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2741 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2742 }
2743 } else if (st->need_parsing) {
2744 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2745 "%s, packets or times may be invalid.\n",
2746 avcodec_get_name(st->codec->codec_id));
2747 }
2748 }
2749 codec = find_decoder(ic, st, st->codec->codec_id);
2750
2751 /* force thread count to 1 since the h264 decoder will not extract SPS
2752 * and PPS to extradata during multi-threaded decoding */
2753 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2754
2755 /* Ensure that subtitle_header is properly set. */
2756 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2757 && codec && !st->codec->codec)
2758 avcodec_open2(st->codec, codec, options ? &options[i]
2759 : &thread_opt);
2760
2761 //try to just open decoders, in case this is enough to get parameters
2762 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2763 if (codec && !st->codec->codec)
2764 avcodec_open2(st->codec, codec, options ? &options[i]
2765 : &thread_opt);
2766 }
2767 if (!options)
2768 av_dict_free(&thread_opt);
2769 }
2770
2771 for (i=0; i<ic->nb_streams; i++) {
2772#if FF_API_R_FRAME_RATE
2773 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2774#endif
2775 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2776 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2777 }
2778
2779 count = 0;
2780 read_size = 0;
2781 for(;;) {
2782 if (ff_check_interrupt(&ic->interrupt_callback)){
2783 ret= AVERROR_EXIT;
2784 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2785 break;
2786 }
2787
2788 /* check if one codec still needs to be handled */
2789 for(i=0;i<ic->nb_streams;i++) {
2790 int fps_analyze_framecount = 20;
2791
2792 st = ic->streams[i];
2793 if (!has_codec_parameters(st, NULL))
2794 break;
2795 /* if the timebase is coarse (like the usual millisecond precision
2796 of mkv), we need to analyze more frames to reliably arrive at
2797 the correct fps */
2798 if (av_q2d(st->time_base) > 0.0005)
2799 fps_analyze_framecount *= 2;
2800 if (ic->fps_probe_size >= 0)
2801 fps_analyze_framecount = ic->fps_probe_size;
2802 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2803 fps_analyze_framecount = 0;
2804 /* variable fps and no guess at the real fps */
2805 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2806 && st->info->duration_count < fps_analyze_framecount
2807 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2808 break;
2809 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2810 break;
2811 if (st->first_dts == AV_NOPTS_VALUE &&
2812 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2813 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2814 break;
2815 }
2816 if (i == ic->nb_streams) {
2817 /* NOTE: if the format has no header, then we need to read
2818 some packets to get most of the streams, so we cannot
2819 stop here */
2820 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2821 /* if we found the info for all the codecs, we can stop */
2822 ret = count;
2823 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2824 flush_codecs = 0;
2825 break;
2826 }
2827 }
2828 /* we did not get all the codec info, but we read too much data */
2829 if (read_size >= ic->probesize) {
2830 ret = count;
2831 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2832 for (i = 0; i < ic->nb_streams; i++)
2833 if (!ic->streams[i]->r_frame_rate.num &&
2834 ic->streams[i]->info->duration_count <= 1 &&
2835 strcmp(ic->iformat->name, "image2"))
2836 av_log(ic, AV_LOG_WARNING,
2837 "Stream #%d: not enough frames to estimate rate; "
2838 "consider increasing probesize\n", i);
2839 break;
2840 }
2841
2842 /* NOTE: a new stream can be added there if no header in file
2843 (AVFMTCTX_NOHEADER) */
2844 ret = read_frame_internal(ic, &pkt1);
2845 if (ret == AVERROR(EAGAIN))
2846 continue;
2847
2848 if (ret < 0) {
2849 /* EOF or error*/
2850 break;
2851 }
2852
2853 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2854 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2855 {
2856 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2857 &ic->packet_buffer_end);
2858 if (!pkt) {
2859 ret = AVERROR(ENOMEM);
2860 goto find_stream_info_err;
2861 }
2862 if ((ret = av_dup_packet(pkt)) < 0)
2863 goto find_stream_info_err;
2864 }
2865
2866 read_size += pkt->size;
2867
2868 st = ic->streams[pkt->stream_index];
2869 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2870 /* check for non-increasing dts */
2871 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2872 st->info->fps_last_dts >= pkt->dts) {
2873 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2874 "packet %d with DTS %"PRId64", packet %d with DTS "
2875 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2876 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2877 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2878 }
2879 /* check for a discontinuity in dts - if the difference in dts
2880 * is more than 1000 times the average packet duration in the sequence,
2881 * we treat it as a discontinuity */
2882 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2883 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2884 (pkt->dts - st->info->fps_last_dts) / 1000 >
2885 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2886 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2887 "packet %d with DTS %"PRId64", packet %d with DTS "
2888 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2889 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2890 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2891 }
2892
2893 /* update stored dts values */
2894 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2895 st->info->fps_first_dts = pkt->dts;
2896 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2897 }
2898 st->info->fps_last_dts = pkt->dts;
2899 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2900 }
2901 if (st->codec_info_nb_frames>1) {
2902 int64_t t=0;
2903 if (st->time_base.den > 0)
2904 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2905 if (st->avg_frame_rate.num > 0)
2906 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2907
2908 if ( t==0
2909 && st->codec_info_nb_frames>30
2910 && st->info->fps_first_dts != AV_NOPTS_VALUE
2911 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2912 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2913
2914 if (t >= ic->max_analyze_duration) {
2915 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2916 break;
2917 }
2918 if (pkt->duration) {
2919 st->info->codec_info_duration += pkt->duration;
2920 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2921 }
2922 }
2923#if FF_API_R_FRAME_RATE
2924 {
2925 int64_t last = st->info->last_dts;
2926
2927 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2928 && pkt->dts - (uint64_t)last < INT64_MAX){
2929 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2930 int64_t duration= pkt->dts - last;
2931
2932 if (!st->info->duration_error)
2933 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2934 if (!st->info->duration_error)
2935 return AVERROR(ENOMEM);
2936
2937// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2938// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2939 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2940 int framerate= get_std_framerate(i);
2941 double sdts= dts*framerate/(1001*12);
2942 for(j=0; j<2; j++){
2943 int64_t ticks= llrint(sdts+j*0.5);
2944 double error= sdts - ticks + j*0.5;
2945 st->info->duration_error[j][0][i] += error;
2946 st->info->duration_error[j][1][i] += error*error;
2947 }
2948 }
2949 st->info->duration_count++;
2950 // ignore the first 4 values, they might have some random jitter
2951 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2952 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2953 }
2954 if (pkt->dts != AV_NOPTS_VALUE)
2955 st->info->last_dts = pkt->dts;
2956 }
2957#endif
2958 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2959 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2960 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2961 if (ff_alloc_extradata(st->codec, i))
2962 return AVERROR(ENOMEM);
2963 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2964 }
2965 }
2966
2967 /* if still no information, we try to open the codec and to
2968 decompress the frame. We try to avoid that in most cases as
2969 it takes longer and uses more memory. For MPEG-4, we need to
2970 decompress for QuickTime.
2971
2972 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2973 least one frame of codec data, this makes sure the codec initializes
2974 the channel configuration and does not only trust the values from the container.
2975 */
2976 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2977
2978 st->codec_info_nb_frames++;
2979 count++;
2980 }
2981
2982 if (flush_codecs) {
2983 AVPacket empty_pkt = { 0 };
2984 int err = 0;
2985 av_init_packet(&empty_pkt);
2986
2987 for(i=0;i<ic->nb_streams;i++) {
2988
2989 st = ic->streams[i];
2990
2991 /* flush the decoders */
2992 if (st->info->found_decoder == 1) {
2993 do {
2994 err = try_decode_frame(ic, st, &empty_pkt,
2995 (options && i < orig_nb_streams) ?
2996 &options[i] : NULL);
2997 } while (err > 0 && !has_codec_parameters(st, NULL));
2998
2999 if (err < 0) {
3000 av_log(ic, AV_LOG_INFO,
3001 "decoding for stream %d failed\n", st->index);
3002 }
3003 }
3004 }
3005 }
3006
3007 // close codecs which were opened in try_decode_frame()
3008 for(i=0;i<ic->nb_streams;i++) {
3009 st = ic->streams[i];
3010 avcodec_close(st->codec);
3011 }
3012 for(i=0;i<ic->nb_streams;i++) {
3013 st = ic->streams[i];
3014 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3015 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
3016 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3017 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
3018 st->codec->codec_tag= tag;
3019 }
3020
3021 /* estimate average framerate if not set by demuxer */
3022 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3023 int best_fps = 0;
3024 double best_error = 0.01;
3025
3026 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3027 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3028 st->info->codec_info_duration < 0)
3029 continue;
3030 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3031 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3032 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3033
3034 /* round guessed framerate to a "standard" framerate if it's
3035 * within 1% of the original estimate*/
3036 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3037 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3038 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3039
3040 if (error < best_error) {
3041 best_error = error;
3042 best_fps = std_fps.num;
3043 }
3044 }
3045 if (best_fps) {
3046 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3047 best_fps, 12*1001, INT_MAX);
3048 }
3049 }
3050 // the check for tb_unreliable() is not completely correct, since this is not about handling
3051 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3052 // ipmovie.c produces.
3053 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3054 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3055 if (st->info->duration_count>1 && !st->r_frame_rate.num
3056 && tb_unreliable(st->codec)) {
3057 int num = 0;
3058 double best_error= 0.01;
3059
3060 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3061 int k;
3062
3063 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3064 continue;
3065 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3066 continue;
3067 for(k=0; k<2; k++){
3068 int n= st->info->duration_count;
3069 double a= st->info->duration_error[k][0][j] / n;
3070 double error= st->info->duration_error[k][1][j]/n - a*a;
3071
3072 if(error < best_error && best_error> 0.000000001){
3073 best_error= error;
3074 num = get_std_framerate(j);
3075 }
3076 if(error < 0.02)
3077 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3078 }
3079 }
3080 // do not increase frame rate by more than 1 % in order to match a standard rate.
3081 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3082 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3083 }
3084
3085 if (!st->r_frame_rate.num){
3086 if( st->codec->time_base.den * (int64_t)st->time_base.num
3087 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3088 st->r_frame_rate.num = st->codec->time_base.den;
3089 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3090 }else{
3091 st->r_frame_rate.num = st->time_base.den;
3092 st->r_frame_rate.den = st->time_base.num;
3093 }
3094 }
3095 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3096 if(!st->codec->bits_per_coded_sample)
3097 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3098 // set stream disposition based on audio service type
3099 switch (st->codec->audio_service_type) {
3100 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3101 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3102 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3103 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3104 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3105 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3106 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3107 st->disposition = AV_DISPOSITION_COMMENT; break;
3108 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3109 st->disposition = AV_DISPOSITION_KARAOKE; break;
3110 }
3111 }
3112 }
3113
3114 if(ic->probesize)
3115 estimate_timings(ic, old_offset);
3116
3117 if (ret >= 0 && ic->nb_streams)
3118 ret = -1; /* we could not have all the codec parameters before EOF */
3119 for(i=0;i<ic->nb_streams;i++) {
3120 const char *errmsg;
3121 st = ic->streams[i];
3122 if (!has_codec_parameters(st, &errmsg)) {
3123 char buf[256];
3124 avcodec_string(buf, sizeof(buf), st->codec, 0);
3125 av_log(ic, AV_LOG_WARNING,
3126 "Could not find codec parameters for stream %d (%s): %s\n"
3127 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3128 i, buf, errmsg);
3129 } else {
3130 ret = 0;
3131 }
3132 }
3133
3134 compute_chapters_end(ic);
3135
3136 find_stream_info_err:
3137 for (i=0; i < ic->nb_streams; i++) {
3138 st = ic->streams[i];
3139 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3140 ic->streams[i]->codec->thread_count = 0;
3141 if (st->info)
3142 av_freep(&st->info->duration_error);
3143 av_freep(&ic->streams[i]->info);
3144 }
3145 if(ic->pb)
3146 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3147 return ret;
3148}
3149
3150AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3151{
3152 int i, j;
3153
3154 for (i = 0; i < ic->nb_programs; i++) {
3155 if (ic->programs[i] == last) {
3156 last = NULL;
3157 } else {
3158 if (!last)
3159 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3160 if (ic->programs[i]->stream_index[j] == s)
3161 return ic->programs[i];
3162 }
3163 }
3164 return NULL;
3165}
3166
3167int av_find_best_stream(AVFormatContext *ic,
3168 enum AVMediaType type,
3169 int wanted_stream_nb,
3170 int related_stream,
3171 AVCodec **decoder_ret,
3172 int flags)
3173{
3174 int i, nb_streams = ic->nb_streams;
3175 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3176 unsigned *program = NULL;
3177 AVCodec *decoder = NULL, *best_decoder = NULL;
3178
3179 if (related_stream >= 0 && wanted_stream_nb < 0) {
3180 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3181 if (p) {
3182 program = p->stream_index;
3183 nb_streams = p->nb_stream_indexes;
3184 }
3185 }
3186 for (i = 0; i < nb_streams; i++) {
3187 int real_stream_index = program ? program[i] : i;
3188 AVStream *st = ic->streams[real_stream_index];
3189 AVCodecContext *avctx = st->codec;
3190 if (avctx->codec_type != type)
3191 continue;
3192 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3193 continue;
3194 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3195 continue;
3196 if (decoder_ret) {
3197 decoder = find_decoder(ic, st, st->codec->codec_id);
3198 if (!decoder) {
3199 if (ret < 0)
3200 ret = AVERROR_DECODER_NOT_FOUND;
3201 continue;
3202 }
3203 }
3204 count = st->codec_info_nb_frames;
3205 bitrate = avctx->bit_rate;
3206 multiframe = FFMIN(5, count);
3207 if ((best_multiframe > multiframe) ||
3208 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3209 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3210 continue;
3211 best_count = count;
3212 best_bitrate = bitrate;
3213 best_multiframe = multiframe;
3214 ret = real_stream_index;
3215 best_decoder = decoder;
3216 if (program && i == nb_streams - 1 && ret < 0) {
3217 program = NULL;
3218 nb_streams = ic->nb_streams;
3219 i = 0; /* no related stream found, try again with everything */
3220 }
3221 }
3222 if (decoder_ret)
3223 *decoder_ret = best_decoder;
3224 return ret;
3225}
3226
3227/*******************************************************/
3228
3229int av_read_play(AVFormatContext *s)
3230{
3231 if (s->iformat->read_play)
3232 return s->iformat->read_play(s);
3233 if (s->pb)
3234 return avio_pause(s->pb, 0);
3235 return AVERROR(ENOSYS);
3236}
3237
3238int av_read_pause(AVFormatContext *s)
3239{
3240 if (s->iformat->read_pause)
3241 return s->iformat->read_pause(s);
3242 if (s->pb)
3243 return avio_pause(s->pb, 1);
3244 return AVERROR(ENOSYS);
3245}
3246
3247void ff_free_stream(AVFormatContext *s, AVStream *st){
3248 av_assert0(s->nb_streams>0);
3249 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3250
3251 if (st->parser) {
3252 av_parser_close(st->parser);
3253 }
3254 if (st->attached_pic.data)
3255 av_free_packet(&st->attached_pic);
3256 av_dict_free(&st->metadata);
3257 av_freep(&st->probe_data.buf);
3258 av_freep(&st->index_entries);
3259 av_freep(&st->codec->extradata);
3260 av_freep(&st->codec->subtitle_header);
3261 av_freep(&st->codec);
3262 av_freep(&st->priv_data);
3263 if (st->info)
3264 av_freep(&st->info->duration_error);
3265 av_freep(&st->info);
3266 av_freep(&s->streams[ --s->nb_streams ]);
3267}
3268
3269void avformat_free_context(AVFormatContext *s)
3270{
3271 int i;
3272
3273 if (!s)
3274 return;
3275
3276 av_opt_free(s);
3277 if (s->iformat && s->iformat->priv_class && s->priv_data)
3278 av_opt_free(s->priv_data);
3279
3280 for(i=s->nb_streams-1; i>=0; i--) {
3281 ff_free_stream(s, s->streams[i]);
3282 }
3283 for(i=s->nb_programs-1; i>=0; i--) {
3284 av_dict_free(&s->programs[i]->metadata);
3285 av_freep(&s->programs[i]->stream_index);
3286 av_freep(&s->programs[i]);
3287 }
3288 av_freep(&s->programs);
3289 av_freep(&s->priv_data);
3290 while(s->nb_chapters--) {
3291 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3292 av_freep(&s->chapters[s->nb_chapters]);
3293 }
3294 av_freep(&s->chapters);
3295 av_dict_free(&s->metadata);
3296 av_freep(&s->streams);
3297 av_free(s);
3298}
3299
3300#if FF_API_CLOSE_INPUT_FILE
3301void av_close_input_file(AVFormatContext *s)
3302{
3303 avformat_close_input(&s);
3304}
3305#endif
3306
3307void avformat_close_input(AVFormatContext **ps)
3308{
3309 AVFormatContext *s;
3310 AVIOContext *pb;
3311
3312 if (!ps || !*ps)
3313 return;
3314
3315 s = *ps;
3316 pb = s->pb;
3317
3318 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3319 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3320 pb = NULL;
3321
3322 flush_packet_queue(s);
3323
3324 if (s->iformat) {
3325 if (s->iformat->read_close)
3326 s->iformat->read_close(s);
3327 }
3328
3329 avformat_free_context(s);
3330
3331 *ps = NULL;
3332
3333 avio_close(pb);
3334}
3335
3336#if FF_API_NEW_STREAM
3337AVStream *av_new_stream(AVFormatContext *s, int id)
3338{
3339 AVStream *st = avformat_new_stream(s, NULL);
3340 if (st)
3341 st->id = id;
3342 return st;
3343}
3344#endif
3345
3346AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3347{
3348 AVStream *st;
3349 int i;
3350 AVStream **streams;
3351
3352 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3353 return NULL;
3354 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3355 if (!streams)
3356 return NULL;
3357 s->streams = streams;
3358
3359 st = av_mallocz(sizeof(AVStream));
3360 if (!st)
3361 return NULL;
3362 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3363 av_free(st);
3364 return NULL;
3365 }
3366 st->info->last_dts = AV_NOPTS_VALUE;
3367
3368 st->codec = avcodec_alloc_context3(c);
3369 if (s->iformat) {
3370 /* no default bitrate if decoding */
3371 st->codec->bit_rate = 0;
3372 }
3373 st->index = s->nb_streams;
3374 st->start_time = AV_NOPTS_VALUE;
3375 st->duration = AV_NOPTS_VALUE;
3376 /* we set the current DTS to 0 so that formats without any timestamps
3377 but durations get some timestamps, formats with some unknown
3378 timestamps have their first few packets buffered and the
3379 timestamps corrected before they are returned to the user */
3380 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3381 st->first_dts = AV_NOPTS_VALUE;
3382 st->probe_packets = MAX_PROBE_PACKETS;
3383 st->pts_wrap_reference = AV_NOPTS_VALUE;
3384 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3385
3386 /* default pts setting is MPEG-like */
3387 avpriv_set_pts_info(st, 33, 1, 90000);
3388 st->last_IP_pts = AV_NOPTS_VALUE;
3389 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3390 st->pts_buffer[i]= AV_NOPTS_VALUE;
3391 st->reference_dts = AV_NOPTS_VALUE;
3392
3393 st->sample_aspect_ratio = (AVRational){0,1};
3394
3395#if FF_API_R_FRAME_RATE
3396 st->info->last_dts = AV_NOPTS_VALUE;
3397#endif
3398 st->info->fps_first_dts = AV_NOPTS_VALUE;
3399 st->info->fps_last_dts = AV_NOPTS_VALUE;
3400
3401 s->streams[s->nb_streams++] = st;
3402 return st;
3403}
3404
3405AVProgram *av_new_program(AVFormatContext *ac, int id)
3406{
3407 AVProgram *program=NULL;
3408 int i;
3409
3410 av_dlog(ac, "new_program: id=0x%04x\n", id);
3411
3412 for(i=0; i<ac->nb_programs; i++)
3413 if(ac->programs[i]->id == id)
3414 program = ac->programs[i];
3415
3416 if(!program){
3417 program = av_mallocz(sizeof(AVProgram));
3418 if (!program)
3419 return NULL;
3420 dynarray_add(&ac->programs, &ac->nb_programs, program);
3421 program->discard = AVDISCARD_NONE;
3422 }
3423 program->id = id;
3424 program->pts_wrap_reference = AV_NOPTS_VALUE;
3425 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3426
3427 program->start_time =
3428 program->end_time = AV_NOPTS_VALUE;
3429
3430 return program;
3431}
3432
3433AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3434{
3435 AVChapter *chapter = NULL;
3436 int i;
3437
3438 for(i=0; i<s->nb_chapters; i++)
3439 if(s->chapters[i]->id == id)
3440 chapter = s->chapters[i];
3441
3442 if(!chapter){
3443 chapter= av_mallocz(sizeof(AVChapter));
3444 if(!chapter)
3445 return NULL;
3446 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3447 }
3448 av_dict_set(&chapter->metadata, "title", title, 0);
3449 chapter->id = id;
3450 chapter->time_base= time_base;
3451 chapter->start = start;
3452 chapter->end = end;
3453
3454 return chapter;
3455}
3456
3457void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3458{
3459 int i, j;
3460 AVProgram *program=NULL;
3461 void *tmp;
3462
3463 if (idx >= ac->nb_streams) {
3464 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3465 return;
3466 }
3467
3468 for(i=0; i<ac->nb_programs; i++){
3469 if(ac->programs[i]->id != progid)
3470 continue;
3471 program = ac->programs[i];
3472 for(j=0; j<program->nb_stream_indexes; j++)
3473 if(program->stream_index[j] == idx)
3474 return;
3475
3476 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3477 if(!tmp)
3478 return;
3479 program->stream_index = tmp;
3480 program->stream_index[program->nb_stream_indexes++] = idx;
3481 return;
3482 }
3483}
3484
3485static void print_fps(double d, const char *postfix){
3486 uint64_t v= lrintf(d*100);
3487 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3488 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3489 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3490}
3491
3492static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3493{
3494 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3495 AVDictionaryEntry *tag=NULL;
3496
3497 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3498 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3499 if(strcmp("language", tag->key)){
3500 const char *p = tag->value;
3501 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3502 while(*p) {
3503 char tmp[256];
3504 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3505 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3506 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3507 p += len;
3508 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3509 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3510 if (*p) p++;
3511 }
3512 av_log(ctx, AV_LOG_INFO, "\n");
3513 }
3514 }
3515 }
3516}
3517
3518/* "user interface" functions */
3519static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3520{
3521 char buf[256];
3522 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3523 AVStream *st = ic->streams[i];
3524 int g = av_gcd(st->time_base.num, st->time_base.den);
3525 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3526 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3527 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3528 /* the pid is an important information, so we display it */
3529 /* XXX: add a generic system */
3530 if (flags & AVFMT_SHOW_IDS)
3531 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3532 if (lang)
3533 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3534 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3535 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3536 if (st->sample_aspect_ratio.num && // default
3537 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3538 AVRational display_aspect_ratio;
3539 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3540 st->codec->width*st->sample_aspect_ratio.num,
3541 st->codec->height*st->sample_aspect_ratio.den,
3542 1024*1024);
3543 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3544 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3545 display_aspect_ratio.num, display_aspect_ratio.den);
3546 }
3547 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3548 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3549 print_fps(av_q2d(st->avg_frame_rate), "fps");
3550#if FF_API_R_FRAME_RATE
3551 if(st->r_frame_rate.den && st->r_frame_rate.num)
3552 print_fps(av_q2d(st->r_frame_rate), "tbr");
3553#endif
3554 if(st->time_base.den && st->time_base.num)
3555 print_fps(1/av_q2d(st->time_base), "tbn");
3556 if(st->codec->time_base.den && st->codec->time_base.num)
3557 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3558 }
3559 if (st->disposition & AV_DISPOSITION_DEFAULT)
3560 av_log(NULL, AV_LOG_INFO, " (default)");
3561 if (st->disposition & AV_DISPOSITION_DUB)
3562 av_log(NULL, AV_LOG_INFO, " (dub)");
3563 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3564 av_log(NULL, AV_LOG_INFO, " (original)");
3565 if (st->disposition & AV_DISPOSITION_COMMENT)
3566 av_log(NULL, AV_LOG_INFO, " (comment)");
3567 if (st->disposition & AV_DISPOSITION_LYRICS)
3568 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3569 if (st->disposition & AV_DISPOSITION_KARAOKE)
3570 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3571 if (st->disposition & AV_DISPOSITION_FORCED)
3572 av_log(NULL, AV_LOG_INFO, " (forced)");
3573 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3574 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3575 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3576 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3577 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3578 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3579 av_log(NULL, AV_LOG_INFO, "\n");
3580 dump_metadata(NULL, st->metadata, " ");
3581}
3582
3583void av_dump_format(AVFormatContext *ic,
3584 int index,
3585 const char *url,
3586 int is_output)
3587{
3588 int i;
3589 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3590 if (ic->nb_streams && !printed)
3591 return;
3592
3593 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3594 is_output ? "Output" : "Input",
3595 index,
3596 is_output ? ic->oformat->name : ic->iformat->name,
3597 is_output ? "to" : "from", url);
3598 dump_metadata(NULL, ic->metadata, " ");
3599 if (!is_output) {
3600 av_log(NULL, AV_LOG_INFO, " Duration: ");
3601 if (ic->duration != AV_NOPTS_VALUE) {
3602 int hours, mins, secs, us;
3603 int64_t duration = ic->duration + 5000;
3604 secs = duration / AV_TIME_BASE;
3605 us = duration % AV_TIME_BASE;
3606 mins = secs / 60;
3607 secs %= 60;
3608 hours = mins / 60;
3609 mins %= 60;
3610 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3611 (100 * us) / AV_TIME_BASE);
3612 } else {
3613 av_log(NULL, AV_LOG_INFO, "N/A");
3614 }
3615 if (ic->start_time != AV_NOPTS_VALUE) {
3616 int secs, us;
3617 av_log(NULL, AV_LOG_INFO, ", start: ");
3618 secs = ic->start_time / AV_TIME_BASE;
3619 us = abs(ic->start_time % AV_TIME_BASE);
3620 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3621 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3622 }
3623 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3624 if (ic->bit_rate) {
3625 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3626 } else {
3627 av_log(NULL, AV_LOG_INFO, "N/A");
3628 }
3629 av_log(NULL, AV_LOG_INFO, "\n");
3630 }
3631 for (i = 0; i < ic->nb_chapters; i++) {
3632 AVChapter *ch = ic->chapters[i];
3633 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3634 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3635 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3636
3637 dump_metadata(NULL, ch->metadata, " ");
3638 }
3639 if(ic->nb_programs) {
3640 int j, k, total = 0;
3641 for(j=0; j<ic->nb_programs; j++) {
3642 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3643 "name", NULL, 0);
3644 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3645 name ? name->value : "");
3646 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3647 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3648 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3649 printed[ic->programs[j]->stream_index[k]] = 1;
3650 }
3651 total += ic->programs[j]->nb_stream_indexes;
3652 }
3653 if (total < ic->nb_streams)
3654 av_log(NULL, AV_LOG_INFO, " No Program\n");
3655 }
3656 for(i=0;i<ic->nb_streams;i++)
3657 if (!printed[i])
3658 dump_stream_format(ic, i, index, is_output);
3659
3660 av_free(printed);
3661}
3662
3663uint64_t ff_ntp_time(void)
3664{
3665 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3666}
3667
3668int av_get_frame_filename(char *buf, int buf_size,
3669 const char *path, int number)
3670{
3671 const char *p;
3672 char *q, buf1[20], c;
3673 int nd, len, percentd_found;
3674
3675 q = buf;
3676 p = path;
3677 percentd_found = 0;
3678 for(;;) {
3679 c = *p++;
3680 if (c == '\0')
3681 break;
3682 if (c == '%') {
3683 do {
3684 nd = 0;
3685 while (av_isdigit(*p)) {
3686 nd = nd * 10 + *p++ - '0';
3687 }
3688 c = *p++;
3689 } while (av_isdigit(c));
3690
3691 switch(c) {
3692 case '%':
3693 goto addchar;
3694 case 'd':
3695 if (percentd_found)
3696 goto fail;
3697 percentd_found = 1;
3698 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3699 len = strlen(buf1);
3700 if ((q - buf + len) > buf_size - 1)
3701 goto fail;
3702 memcpy(q, buf1, len);
3703 q += len;
3704 break;
3705 default:
3706 goto fail;
3707 }
3708 } else {
3709 addchar:
3710 if ((q - buf) < buf_size - 1)
3711 *q++ = c;
3712 }
3713 }
3714 if (!percentd_found)
3715 goto fail;
3716 *q = '\0';
3717 return 0;
3718 fail:
3719 *q = '\0';
3720 return -1;
3721}
3722
3723static void hex_dump_internal(void *avcl, FILE *f, int level,
3724 const uint8_t *buf, int size)
3725{
3726 int len, i, j, c;
3727#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3728
3729 for(i=0;i<size;i+=16) {
3730 len = size - i;
3731 if (len > 16)
3732 len = 16;
3733 PRINT("%08x ", i);
3734 for(j=0;j<16;j++) {
3735 if (j < len)
3736 PRINT(" %02x", buf[i+j]);
3737 else
3738 PRINT(" ");
3739 }
3740 PRINT(" ");
3741 for(j=0;j<len;j++) {
3742 c = buf[i+j];
3743 if (c < ' ' || c > '~')
3744 c = '.';
3745 PRINT("%c", c);
3746 }
3747 PRINT("\n");
3748 }
3749#undef PRINT
3750}
3751
3752void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3753{
3754 hex_dump_internal(NULL, f, 0, buf, size);
3755}
3756
3757void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3758{
3759 hex_dump_internal(avcl, NULL, level, buf, size);
3760}
3761
3762static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3763{
3764#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3765 PRINT("stream #%d:\n", pkt->stream_index);
3766 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3767 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3768 /* DTS is _always_ valid after av_read_frame() */
3769 PRINT(" dts=");
3770 if (pkt->dts == AV_NOPTS_VALUE)
3771 PRINT("N/A");
3772 else
3773 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3774 /* PTS may not be known if B-frames are present. */
3775 PRINT(" pts=");
3776 if (pkt->pts == AV_NOPTS_VALUE)
3777 PRINT("N/A");
3778 else
3779 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3780 PRINT("\n");
3781 PRINT(" size=%d\n", pkt->size);
3782#undef PRINT
3783 if (dump_payload)
3784 av_hex_dump(f, pkt->data, pkt->size);
3785}
3786
3787void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3788{
3789 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3790}
3791
3792void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3793 AVStream *st)
3794{
3795 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3796}
3797
3798void av_url_split(char *proto, int proto_size,
3799 char *authorization, int authorization_size,
3800 char *hostname, int hostname_size,
3801 int *port_ptr,
3802 char *path, int path_size,
3803 const char *url)
3804{
3805 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3806
3807 if (port_ptr) *port_ptr = -1;
3808 if (proto_size > 0) proto[0] = 0;
3809 if (authorization_size > 0) authorization[0] = 0;
3810 if (hostname_size > 0) hostname[0] = 0;
3811 if (path_size > 0) path[0] = 0;
3812
3813 /* parse protocol */
3814 if ((p = strchr(url, ':'))) {
3815 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3816 p++; /* skip ':' */
3817 if (*p == '/') p++;
3818 if (*p == '/') p++;
3819 } else {
3820 /* no protocol means plain filename */
3821 av_strlcpy(path, url, path_size);
3822 return;
3823 }
3824
3825 /* separate path from hostname */
3826 ls = strchr(p, '/');
3827 ls2 = strchr(p, '?');
3828 if(!ls)
3829 ls = ls2;
3830 else if (ls && ls2)
3831 ls = FFMIN(ls, ls2);
3832 if(ls)
3833 av_strlcpy(path, ls, path_size);
3834 else
3835 ls = &p[strlen(p)]; // XXX
3836
3837 /* the rest is hostname, use that to parse auth/port */
3838 if (ls != p) {
3839 /* authorization (user[:pass]@hostname) */
3840 at2 = p;
3841 while ((at = strchr(p, '@')) && at < ls) {
3842 av_strlcpy(authorization, at2,
3843 FFMIN(authorization_size, at + 1 - at2));
3844 p = at + 1; /* skip '@' */
3845 }
3846
3847 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3848 /* [host]:port */
3849 av_strlcpy(hostname, p + 1,
3850 FFMIN(hostname_size, brk - p));
3851 if (brk[1] == ':' && port_ptr)
3852 *port_ptr = atoi(brk + 2);
3853 } else if ((col = strchr(p, ':')) && col < ls) {
3854 av_strlcpy(hostname, p,
3855 FFMIN(col + 1 - p, hostname_size));
3856 if (port_ptr) *port_ptr = atoi(col + 1);
3857 } else
3858 av_strlcpy(hostname, p,
3859 FFMIN(ls + 1 - p, hostname_size));
3860 }
3861}
3862
3863char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3864{
3865 int i;
3866 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3867 '4', '5', '6', '7',
3868 '8', '9', 'A', 'B',
3869 'C', 'D', 'E', 'F' };
3870 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3871 '4', '5', '6', '7',
3872 '8', '9', 'a', 'b',
3873 'c', 'd', 'e', 'f' };
3874 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3875
3876 for(i = 0; i < s; i++) {
3877 buff[i * 2] = hex_table[src[i] >> 4];
3878 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3879 }
3880
3881 return buff;
3882}
3883
3884int ff_hex_to_data(uint8_t *data, const char *p)
3885{
3886 int c, len, v;
3887
3888 len = 0;
3889 v = 1;
3890 for (;;) {
3891 p += strspn(p, SPACE_CHARS);
3892 if (*p == '\0')
3893 break;
3894 c = av_toupper((unsigned char) *p++);
3895 if (c >= '0' && c <= '9')
3896 c = c - '0';
3897 else if (c >= 'A' && c <= 'F')
3898 c = c - 'A' + 10;
3899 else
3900 break;
3901 v = (v << 4) | c;
3902 if (v & 0x100) {
3903 if (data)
3904 data[len] = v;
3905 len++;
3906 v = 1;
3907 }
3908 }
3909 return len;
3910}
3911
3912#if FF_API_SET_PTS_INFO
3913void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3914 unsigned int pts_num, unsigned int pts_den)
3915{
3916 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3917}
3918#endif
3919
3920void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3921 unsigned int pts_num, unsigned int pts_den)
3922{
3923 AVRational new_tb;
3924 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3925 if(new_tb.num != pts_num)
3926 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3927 }else
3928 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3929
3930 if(new_tb.num <= 0 || new_tb.den <= 0) {
3931 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3932 return;
3933 }
3934 s->time_base = new_tb;
3935 av_codec_set_pkt_timebase(s->codec, new_tb);
3936 s->pts_wrap_bits = pts_wrap_bits;
3937}
3938
3939void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3940 void *context)
3941{
3942 const char *ptr = str;
3943
3944 /* Parse key=value pairs. */
3945 for (;;) {
3946 const char *key;
3947 char *dest = NULL, *dest_end;
3948 int key_len, dest_len = 0;
3949
3950 /* Skip whitespace and potential commas. */
3951 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3952 ptr++;
3953 if (!*ptr)
3954 break;
3955
3956 key = ptr;
3957
3958 if (!(ptr = strchr(key, '=')))
3959 break;
3960 ptr++;
3961 key_len = ptr - key;
3962
3963 callback_get_buf(context, key, key_len, &dest, &dest_len);
3964 dest_end = dest + dest_len - 1;
3965
3966 if (*ptr == '\"') {
3967 ptr++;
3968 while (*ptr && *ptr != '\"') {
3969 if (*ptr == '\\') {
3970 if (!ptr[1])
3971 break;
3972 if (dest && dest < dest_end)
3973 *dest++ = ptr[1];
3974 ptr += 2;
3975 } else {
3976 if (dest && dest < dest_end)
3977 *dest++ = *ptr;
3978 ptr++;
3979 }
3980 }
3981 if (*ptr == '\"')
3982 ptr++;
3983 } else {
3984 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3985 if (dest && dest < dest_end)
3986 *dest++ = *ptr;
3987 }
3988 if (dest)
3989 *dest = 0;
3990 }
3991}
3992
3993int ff_find_stream_index(AVFormatContext *s, int id)
3994{
3995 int i;
3996 for (i = 0; i < s->nb_streams; i++) {
3997 if (s->streams[i]->id == id)
3998 return i;
3999 }
4000 return -1;
4001}
4002
4003int64_t ff_iso8601_to_unix_time(const char *datestr)
4004{
4005 struct tm time1 = {0}, time2 = {0};
4006 char *ret1, *ret2;
4007 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
4008 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
4009 if (ret2 && !ret1)
4010 return av_timegm(&time2);
4011 else
4012 return av_timegm(&time1);
4013}
4014
4015int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4016{
4017 if (ofmt) {
4018 if (ofmt->query_codec)
4019 return ofmt->query_codec(codec_id, std_compliance);
4020 else if (ofmt->codec_tag)
4021 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4022 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4023 codec_id == ofmt->subtitle_codec)
4024 return 1;
4025 }
4026 return AVERROR_PATCHWELCOME;
4027}
4028
4029int avformat_network_init(void)
4030{
4031#if CONFIG_NETWORK
4032 int ret;
4033 ff_network_inited_globally = 1;
4034 if ((ret = ff_network_init()) < 0)
4035 return ret;
4036 ff_tls_init();
4037#endif
4038 return 0;
4039}
4040
4041int avformat_network_deinit(void)
4042{
4043#if CONFIG_NETWORK
4044 ff_network_close();
4045 ff_tls_deinit();
4046#endif
4047 return 0;
4048}
4049
4050int ff_add_param_change(AVPacket *pkt, int32_t channels,
4051 uint64_t channel_layout, int32_t sample_rate,
4052 int32_t width, int32_t height)
4053{
4054 uint32_t flags = 0;
4055 int size = 4;
4056 uint8_t *data;
4057 if (!pkt)
4058 return AVERROR(EINVAL);
4059 if (channels) {
4060 size += 4;
4061 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4062 }
4063 if (channel_layout) {
4064 size += 8;
4065 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4066 }
4067 if (sample_rate) {
4068 size += 4;
4069 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4070 }
4071 if (width || height) {
4072 size += 8;
4073 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4074 }
4075 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4076 if (!data)
4077 return AVERROR(ENOMEM);
4078 bytestream_put_le32(&data, flags);
4079 if (channels)
4080 bytestream_put_le32(&data, channels);
4081 if (channel_layout)
4082 bytestream_put_le64(&data, channel_layout);
4083 if (sample_rate)
4084 bytestream_put_le32(&data, sample_rate);
4085 if (width || height) {
4086 bytestream_put_le32(&data, width);
4087 bytestream_put_le32(&data, height);
4088 }
4089 return 0;
4090}
4091
4092AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4093{
4094 AVRational undef = {0, 1};
4095 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4096 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4097 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4098
4099 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4100 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4101 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4102 stream_sample_aspect_ratio = undef;
4103
4104 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4105 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4106 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4107 frame_sample_aspect_ratio = undef;
4108
4109 if (stream_sample_aspect_ratio.num)
4110 return stream_sample_aspect_ratio;
4111 else
4112 return frame_sample_aspect_ratio;
4113}
4114
4115AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4116{
4117 AVRational fr = st->r_frame_rate;
4118
4119 if (st->codec->ticks_per_frame > 1) {
4120 AVRational codec_fr = av_inv_q(st->codec->time_base);
4121 AVRational avg_fr = st->avg_frame_rate;
4122 codec_fr.den *= st->codec->ticks_per_frame;
4123 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4124 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4125 fr = codec_fr;
4126 }
4127
4128 return fr;
4129}
4130
4131int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4132 const char *spec)
4133{
4134 if (*spec <= '9' && *spec >= '0') /* opt:index */
4135 return strtol(spec, NULL, 0) == st->index;
4136 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4137 *spec == 't') { /* opt:[vasdt] */
4138 enum AVMediaType type;
4139
4140 switch (*spec++) {
4141 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4142 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4143 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4144 case 'd': type = AVMEDIA_TYPE_DATA; break;
4145 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4146 default: av_assert0(0);
4147 }
4148 if (type != st->codec->codec_type)
4149 return 0;
4150 if (*spec++ == ':') { /* possibly followed by :index */
4151 int i, index = strtol(spec, NULL, 0);
4152 for (i = 0; i < s->nb_streams; i++)
4153 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4154 return i == st->index;
4155 return 0;
4156 }
4157 return 1;
4158 } else if (*spec == 'p' && *(spec + 1) == ':') {
4159 int prog_id, i, j;
4160 char *endptr;
4161 spec += 2;
4162 prog_id = strtol(spec, &endptr, 0);
4163 for (i = 0; i < s->nb_programs; i++) {
4164 if (s->programs[i]->id != prog_id)
4165 continue;
4166
4167 if (*endptr++ == ':') {
4168 int stream_idx = strtol(endptr, NULL, 0);
4169 return stream_idx >= 0 &&
4170 stream_idx < s->programs[i]->nb_stream_indexes &&
4171 st->index == s->programs[i]->stream_index[stream_idx];
4172 }
4173
4174 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4175 if (st->index == s->programs[i]->stream_index[j])
4176 return 1;
4177 }
4178 return 0;
4179 } else if (*spec == '#') {
4180 int sid;
4181 char *endptr;
4182 sid = strtol(spec + 1, &endptr, 0);
4183 if (!*endptr)
4184 return st->id == sid;
4185 } else if (!*spec) /* empty specifier, matches everything */
4186 return 1;
4187
4188 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4189 return AVERROR(EINVAL);
4190}
4191
4192void ff_generate_avci_extradata(AVStream *st)
4193{
4194 static const uint8_t avci100_1080p_extradata[] = {
4195 // SPS
4196 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4197 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4198 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4199 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4200 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4201 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4202 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4203 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4204 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4205 // PPS
4206 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4207 0xd0
4208 };
4209 static const uint8_t avci100_1080i_extradata[] = {
4210 // SPS
4211 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4212 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4213 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4214 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4215 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4216 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4217 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4218 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4219 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4220 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4221 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4222 // PPS
4223 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4224 0xd0
4225 };
4226 static const uint8_t avci50_1080i_extradata[] = {
4227 // SPS
4228 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4229 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4230 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4231 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4232 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4233 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4234 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4235 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4236 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4237 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4238 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4239 // PPS
4240 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4241 0x11
4242 };
4243 static const uint8_t avci100_720p_extradata[] = {
4244 // SPS
4245 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4246 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4247 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4248 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4249 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4250 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4251 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4252 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4253 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4254 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4255 // PPS
4256 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4257 0x11
4258 };
4259 int size = 0;
4260 const uint8_t *data = 0;
4261 if (st->codec->width == 1920) {
4262 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4263 data = avci100_1080p_extradata;
4264 size = sizeof(avci100_1080p_extradata);
4265 } else {
4266 data = avci100_1080i_extradata;
4267 size = sizeof(avci100_1080i_extradata);
4268 }
4269 } else if (st->codec->width == 1440) {
4270 data = avci50_1080i_extradata;
4271 size = sizeof(avci50_1080i_extradata);
4272 } else if (st->codec->width == 1280) {
4273 data = avci100_720p_extradata;
4274 size = sizeof(avci100_720p_extradata);
4275 }
4276 if (!size)
4277 return;
4278 av_freep(&st->codec->extradata);
4279 if (ff_alloc_extradata(st->codec, size))
4280 return;
4281 memcpy(st->codec->extradata, data, size);
4282}
4283