summaryrefslogtreecommitdiff
path: root/libavformat/utils.c (plain)
blob: 0c732c5c8eff1332180652390bcc3304f2ec86d5
1/*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "avformat.h"
23#include "avio_internal.h"
24#include "internal.h"
25#include "libavcodec/internal.h"
26#include "libavcodec/raw.h"
27#include "libavcodec/bytestream.h"
28#include "libavutil/opt.h"
29#include "libavutil/dict.h"
30#include "libavutil/internal.h"
31#include "libavutil/pixdesc.h"
32#include "metadata.h"
33#include "id3v2.h"
34#include "libavutil/avassert.h"
35#include "libavutil/avstring.h"
36#include "libavutil/mathematics.h"
37#include "libavutil/parseutils.h"
38#include "libavutil/time.h"
39#include "libavutil/timestamp.h"
40#include "riff.h"
41#include "audiointerleave.h"
42#include "url.h"
43#include <stdarg.h>
44#if CONFIG_NETWORK
45#include "network.h"
46#endif
47
48#undef NDEBUG
49#include <assert.h>
50
51/**
52 * @file
53 * various utility functions for use within FFmpeg
54 */
55
56unsigned avformat_version(void)
57{
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
60}
61
62const char *avformat_configuration(void)
63{
64 return FFMPEG_CONFIGURATION;
65}
66
67const char *avformat_license(void)
68{
69#define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
71}
72
73int64_t avformat_getcurtime_us(void)
74{
75 struct timespec timeval;
76 clock_gettime(CLOCK_MONOTONIC, &timeval);
77 return ((int64_t)timeval.tv_nsec / 1000 + (int64_t)timeval.tv_sec * 1000000);
78}
79
80#define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
81
82static int is_relative(int64_t ts) {
83 return ts > (RELATIVE_TS_BASE - (1LL<<48));
84}
85
86/**
87 * Wrap a given time stamp, if there is an indication for an overflow
88 *
89 * @param st stream
90 * @param timestamp the time stamp to wrap
91 * @return resulting time stamp
92 */
93static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
94{
95 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
96 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
97 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
98 timestamp < st->pts_wrap_reference)
99 return timestamp + (1ULL<<st->pts_wrap_bits);
100 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
101 timestamp >= st->pts_wrap_reference)
102 return timestamp - (1ULL<<st->pts_wrap_bits);
103 }
104 return timestamp;
105}
106
107MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
108MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
109MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
110MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
111
112static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
113{
114 if (st->codec->codec)
115 return st->codec->codec;
116
117 switch(st->codec->codec_type){
118 case AVMEDIA_TYPE_VIDEO:
119 if(s->video_codec) return s->video_codec;
120 break;
121 case AVMEDIA_TYPE_AUDIO:
122 if(s->audio_codec) return s->audio_codec;
123 break;
124 case AVMEDIA_TYPE_SUBTITLE:
125 if(s->subtitle_codec) return s->subtitle_codec;
126 break;
127 }
128
129 return avcodec_find_decoder(codec_id);
130}
131
132int av_format_get_probe_score(const AVFormatContext *s)
133{
134 return s->probe_score;
135}
136
137/* an arbitrarily chosen "sane" max packet size -- 50M */
138#define SANE_CHUNK_SIZE (50000000)
139
140int ffio_limit(AVIOContext *s, int size)
141{
142 if(s->maxsize>=0){
143 int64_t remaining= s->maxsize - avio_tell(s);
144 if(remaining < size){
145 int64_t newsize= avio_size(s);
146 if(!s->maxsize || s->maxsize<newsize)
147 s->maxsize= newsize - !newsize;
148 remaining= s->maxsize - avio_tell(s);
149 remaining= FFMAX(remaining, 0);
150 }
151
152 if(s->maxsize>=0 && remaining+1 < size){
153 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
154 size= remaining+1;
155 }
156 }
157 return size;
158}
159
160/*
161 * Read the data in sane-sized chunks and append to pkt.
162 * Return the number of bytes read or an error.
163 */
164static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
165{
166 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
167 int orig_size = pkt->size;
168 int ret;
169
170 do {
171 int prev_size = pkt->size;
172 int read_size;
173
174 /*
175 * When the caller requests a lot of data, limit it to the amount left
176 * in file or SANE_CHUNK_SIZE when it is not known
177 */
178 read_size = size;
179 if (read_size > SANE_CHUNK_SIZE/10) {
180 read_size = ffio_limit(s, read_size);
181 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
182 if (s->maxsize < 0)
183 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
184 }
185
186 ret = av_grow_packet(pkt, read_size);
187 if (ret < 0)
188 break;
189
190 ret = avio_read(s, pkt->data + prev_size, read_size);
191 if (ret != read_size) {
192 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
193 break;
194 }
195
196 size -= read_size;
197 } while (size > 0);
198 if (size > 0)
199 pkt->flags |= AV_PKT_FLAG_CORRUPT;
200
201 pkt->pos = orig_pos;
202 if (!pkt->size)
203 av_free_packet(pkt);
204 return pkt->size > orig_size ? pkt->size - orig_size : ret;
205}
206
207int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
208{
209 av_init_packet(pkt);
210 pkt->data = NULL;
211 pkt->size = 0;
212 pkt->pos = avio_tell(s);
213
214 return append_packet_chunked(s, pkt, size);
215}
216
217int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
218{
219 if (!pkt->size)
220 return av_get_packet(s, pkt, size);
221 return append_packet_chunked(s, pkt, size);
222}
223
224
225int av_filename_number_test(const char *filename)
226{
227 char buf[1024];
228 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
229}
230
231AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
232{
233 AVProbeData lpd = *pd;
234 AVInputFormat *fmt1 = NULL, *fmt;
235 int score, nodat = 0, score_max=0;
236 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
237
238 if (!lpd.buf)
239 lpd.buf = zerobuffer;
240
241 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
242 int id3len = ff_id3v2_tag_len(lpd.buf);
243 if (lpd.buf_size > id3len + 16) {
244 lpd.buf += id3len;
245 lpd.buf_size -= id3len;
246 }else
247 nodat = 1;
248 }
249
250 fmt = NULL;
251 while ((fmt1 = av_iformat_next(fmt1))) {
252 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
253 continue;
254 score = 0;
255 if (fmt1->read_probe) {
256 score = fmt1->read_probe(&lpd);
257 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
258 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
259 } else if (fmt1->extensions) {
260 if (av_match_ext(lpd.filename, fmt1->extensions)) {
261 score = AVPROBE_SCORE_EXTENSION;
262 }
263 }
264 if (score > score_max) {
265 score_max = score;
266 fmt = fmt1;
267 }else if (score == score_max)
268 fmt = NULL;
269 }
270 if(nodat)
271 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
272 *score_ret= score_max;
273
274 return fmt;
275}
276
277AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
278{
279 int score_ret;
280 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
281 if(score_ret > *score_max){
282 *score_max= score_ret;
283 return fmt;
284 }else
285 return NULL;
286}
287
288AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
289 int score=0;
290 return av_probe_input_format2(pd, is_opened, &score);
291}
292
293static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
294{
295 static const struct {
296 const char *name; enum AVCodecID id; enum AVMediaType type;
297 } fmt_id_type[] = {
298 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
299 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
300 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
301 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
302 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
303 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
304 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
305 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
306 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
307 { 0 }
308 };
309 int score;
310 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
311
312 if (fmt && st->request_probe <= score) {
313 int i;
314 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
315 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
316 for (i = 0; fmt_id_type[i].name; i++) {
317 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
318 st->codec->codec_id = fmt_id_type[i].id;
319 st->codec->codec_type = fmt_id_type[i].type;
320 break;
321 }
322 }
323 }
324 return score;
325}
326
327/************************************************************/
328/* input media file */
329
330int av_demuxer_open(AVFormatContext *ic){
331 int err;
332
333 if (ic->iformat->read_header) {
334 err = ic->iformat->read_header(ic);
335 if (err < 0)
336 return err;
337 }
338
339 if (ic->pb && !ic->data_offset)
340 ic->data_offset = avio_tell(ic->pb);
341
342 return 0;
343}
344
345
346int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
347 const char *filename, void *logctx,
348 unsigned int offset, unsigned int max_probe_size)
349{
350 AVProbeData pd = { filename ? filename : "", NULL, -offset };
351 unsigned char *buf = NULL;
352 uint8_t *mime_type;
353 int ret = 0, probe_size, buf_offset = 0;
354 int score = 0;
355
356 if (!max_probe_size) {
357 max_probe_size = PROBE_BUF_MAX;
358 } else if (max_probe_size > PROBE_BUF_MAX) {
359 max_probe_size = PROBE_BUF_MAX;
360 } else if (max_probe_size < PROBE_BUF_MIN) {
361 av_log(logctx, AV_LOG_ERROR,
362 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
363 return AVERROR(EINVAL);
364 }
365
366 if (offset >= max_probe_size) {
367 return AVERROR(EINVAL);
368 }
369
370 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
371 if (!av_strcasecmp(mime_type, "audio/aacp")) {
372 *fmt = av_find_input_format("aac");
373 }
374 av_freep(&mime_type);
375 }
376
377 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
378 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
379
380 if (probe_size < offset) {
381 continue;
382 }
383 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
384
385 /* read probe data */
386 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
387 return ret;
388 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
389 /* fail if error was not end of file, otherwise, lower score */
390 if (ret != AVERROR_EOF) {
391 av_free(buf);
392 return ret;
393 }
394 score = 0;
395 ret = 0; /* error was end of file, nothing read */
396 }
397 pd.buf_size = buf_offset += ret;
398 pd.buf = &buf[offset];
399
400 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
401
402 /* guess file format */
403 *fmt = av_probe_input_format2(&pd, 1, &score);
404 if(*fmt){
405 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
406 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
407 }else
408 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
409 }
410 }
411
412 if (!*fmt) {
413 av_free(buf);
414 return AVERROR_INVALIDDATA;
415 }
416
417 /* rewind. reuse probe buffer to avoid seeking */
418 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
419
420 return ret < 0 ? ret : score;
421}
422
423int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
424 const char *filename, void *logctx,
425 unsigned int offset, unsigned int max_probe_size)
426{
427 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
428 return ret < 0 ? ret : 0;
429}
430
431
432/* open input file and probe the format if necessary */
433static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
434{
435 int ret;
436 AVProbeData pd = {filename, NULL, 0};
437 int score = AVPROBE_SCORE_RETRY;
438
439 if (s->pb) {
440 s->flags |= AVFMT_FLAG_CUSTOM_IO;
441 if (!s->iformat)
442 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
443 else if (s->iformat->flags & AVFMT_NOFILE)
444 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
445 "will be ignored with AVFMT_NOFILE format.\n");
446 return 0;
447 }
448
449 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
450 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
451 return score;
452
453 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
454 &s->interrupt_callback, options)) < 0)
455 return ret;
456 if (s->iformat)
457 return 0;
458 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
459}
460
461static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
462 AVPacketList **plast_pktl){
463 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
464 if (!pktl)
465 return NULL;
466
467 if (*packet_buffer)
468 (*plast_pktl)->next = pktl;
469 else
470 *packet_buffer = pktl;
471
472 /* add the packet in the buffered packet list */
473 *plast_pktl = pktl;
474 pktl->pkt= *pkt;
475 return &pktl->pkt;
476}
477
478int avformat_queue_attached_pictures(AVFormatContext *s)
479{
480 int i;
481 for (i = 0; i < s->nb_streams; i++)
482 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
483 s->streams[i]->discard < AVDISCARD_ALL) {
484 AVPacket copy = s->streams[i]->attached_pic;
485 copy.buf = av_buffer_ref(copy.buf);
486 if (!copy.buf)
487 return AVERROR(ENOMEM);
488
489 add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
490 }
491 return 0;
492}
493
494int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
495{
496 AVFormatContext *s = *ps;
497 int ret = 0;
498 AVDictionary *tmp = NULL;
499 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
500
501 if (!s && !(s = avformat_alloc_context()))
502 return AVERROR(ENOMEM);
503 if (!s->av_class){
504 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
505 return AVERROR(EINVAL);
506 }
507 if (fmt)
508 s->iformat = fmt;
509
510 if (options)
511 av_dict_copy(&tmp, *options, 0);
512
513 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
514 goto fail;
515
516 if ((ret = init_input(s, filename, &tmp)) < 0)
517 goto fail;
518 s->probe_score = ret;
519 avio_skip(s->pb, s->skip_initial_bytes);
520
521 /* check filename in case an image number is expected */
522 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
523 if (!av_filename_number_test(filename)) {
524 ret = AVERROR(EINVAL);
525 goto fail;
526 }
527 }
528
529 s->duration = s->start_time = AV_NOPTS_VALUE;
530 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
531
532 /* allocate private data */
533 if (s->iformat->priv_data_size > 0) {
534 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
535 ret = AVERROR(ENOMEM);
536 goto fail;
537 }
538 if (s->iformat->priv_class) {
539 *(const AVClass**)s->priv_data = s->iformat->priv_class;
540 av_opt_set_defaults(s->priv_data);
541 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
542 goto fail;
543 }
544 }
545
546 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
547 if (s->pb)
548 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
549
550 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
551 if ((ret = s->iformat->read_header(s)) < 0)
552 goto fail;
553
554 if (id3v2_extra_meta) {
555 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
556 !strcmp(s->iformat->name, "tta")) {
557 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
558 goto fail;
559 } else
560 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
561 }
562 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
563
564 if ((ret = avformat_queue_attached_pictures(s)) < 0)
565 goto fail;
566
567 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
568 s->data_offset = avio_tell(s->pb);
569
570 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
571
572 if (options) {
573 av_dict_free(options);
574 *options = tmp;
575 }
576 *ps = s;
577 return 0;
578
579fail:
580 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
581 av_dict_free(&tmp);
582 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
583 avio_close(s->pb);
584 avformat_free_context(s);
585 *ps = NULL;
586 return ret;
587}
588
589/*******************************************************/
590
591static void force_codec_ids(AVFormatContext *s, AVStream *st)
592{
593 switch(st->codec->codec_type){
594 case AVMEDIA_TYPE_VIDEO:
595 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
596 break;
597 case AVMEDIA_TYPE_AUDIO:
598 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
599 break;
600 case AVMEDIA_TYPE_SUBTITLE:
601 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
602 break;
603 }
604}
605
606static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
607{
608 if(st->request_probe>0){
609 AVProbeData *pd = &st->probe_data;
610 int end;
611 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
612 --st->probe_packets;
613
614 if (pkt) {
615 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
616 if(!new_buf) {
617 av_log(s, AV_LOG_WARNING,
618 "Failed to reallocate probe buffer for stream %d\n",
619 st->index);
620 goto no_packet;
621 }
622 pd->buf = new_buf;
623 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
624 pd->buf_size += pkt->size;
625 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
626 } else {
627no_packet:
628 st->probe_packets = 0;
629 if (!pd->buf_size) {
630 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
631 st->index);
632 }
633 }
634
635 end= s->raw_packet_buffer_remaining_size <= 0
636 || st->probe_packets<=0;
637
638 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
639 int score= set_codec_from_probe_data(s, st, pd);
640 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
641 || end){
642 pd->buf_size=0;
643 av_freep(&pd->buf);
644 st->request_probe= -1;
645 if(st->codec->codec_id != AV_CODEC_ID_NONE){
646 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
647 }else
648 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
649 }
650 force_codec_ids(s, st);
651 }
652 }
653 return 0;
654}
655
656int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
657{
658 int ret, i, err;
659 AVStream *st;
660
661 for(;;){
662 AVPacketList *pktl = s->raw_packet_buffer;
663
664 if (pktl) {
665 *pkt = pktl->pkt;
666 st = s->streams[pkt->stream_index];
667 if (s->raw_packet_buffer_remaining_size <= 0) {
668 if ((err = probe_codec(s, st, NULL)) < 0)
669 return err;
670 }
671 if(st->request_probe <= 0){
672 s->raw_packet_buffer = pktl->next;
673 s->raw_packet_buffer_remaining_size += pkt->size;
674 av_free(pktl);
675 return 0;
676 }
677 }
678
679 pkt->data = NULL;
680 pkt->size = 0;
681 av_init_packet(pkt);
682 ret= s->iformat->read_packet(s, pkt);
683 if (ret < 0) {
684 if (!pktl || ret == AVERROR(EAGAIN))
685 return ret;
686 for (i = 0; i < s->nb_streams; i++) {
687 st = s->streams[i];
688 if (st->probe_packets) {
689 if ((err = probe_codec(s, st, NULL)) < 0)
690 return err;
691 }
692 av_assert0(st->request_probe <= 0);
693 }
694 continue;
695 }
696
697 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
698 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
699 av_log(s, AV_LOG_WARNING,
700 "Dropped corrupted packet (stream = %d)\n",
701 pkt->stream_index);
702 av_free_packet(pkt);
703 continue;
704 }
705
706 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
707 av_packet_merge_side_data(pkt);
708
709 if(pkt->stream_index >= (unsigned)s->nb_streams){
710 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
711 continue;
712 }
713
714 st= s->streams[pkt->stream_index];
715 pkt->dts = wrap_timestamp(st, pkt->dts);
716 pkt->pts = wrap_timestamp(st, pkt->pts);
717
718 force_codec_ids(s, st);
719
720 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
721 if (s->use_wallclock_as_timestamps)
722 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
723
724 if(!pktl && st->request_probe <= 0)
725 return ret;
726
727 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
728 s->raw_packet_buffer_remaining_size -= pkt->size;
729
730 if ((err = probe_codec(s, st, pkt)) < 0)
731 return err;
732 }
733}
734
735#if FF_API_READ_PACKET
736int av_read_packet(AVFormatContext *s, AVPacket *pkt)
737{
738 return ff_read_packet(s, pkt);
739}
740#endif
741
742
743/**********************************************************/
744
745static int determinable_frame_size(AVCodecContext *avctx)
746{
747 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
748 avctx->codec_id == AV_CODEC_ID_MP1 ||
749 avctx->codec_id == AV_CODEC_ID_MP2 ||
750 avctx->codec_id == AV_CODEC_ID_MP3/* ||
751 avctx->codec_id == AV_CODEC_ID_CELT*/)
752 return 1;
753 return 0;
754}
755
756/**
757 * Get the number of samples of an audio frame. Return -1 on error.
758 */
759int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
760{
761 int frame_size;
762
763 /* give frame_size priority if demuxing */
764 if (!mux && enc->frame_size > 1)
765 return enc->frame_size;
766
767 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
768 return frame_size;
769
770 /* Fall back on using frame_size if muxing. */
771 if (enc->frame_size > 1)
772 return enc->frame_size;
773
774 //For WMA we currently have no other means to calculate duration thus we
775 //do it here by assuming CBR, which is true for all known cases.
776 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
777 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
778 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
779 }
780
781 return -1;
782}
783
784
785/**
786 * Return the frame duration in seconds. Return 0 if not available.
787 */
788void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
789 AVCodecParserContext *pc, AVPacket *pkt)
790{
791 int frame_size;
792
793 *pnum = 0;
794 *pden = 0;
795 switch(st->codec->codec_type) {
796 case AVMEDIA_TYPE_VIDEO:
797 if (st->r_frame_rate.num && !pc) {
798 *pnum = st->r_frame_rate.den;
799 *pden = st->r_frame_rate.num;
800 } else if(st->time_base.num*1000LL > st->time_base.den) {
801 *pnum = st->time_base.num;
802 *pden = st->time_base.den;
803 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
804 *pnum = st->codec->time_base.num;
805 *pden = st->codec->time_base.den;
806 if (pc && pc->repeat_pict) {
807 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
808 *pden /= 1 + pc->repeat_pict;
809 else
810 *pnum *= 1 + pc->repeat_pict;
811 }
812 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
813 //Thus if we have no parser in such case leave duration undefined.
814 if(st->codec->ticks_per_frame>1 && !pc){
815 *pnum = *pden = 0;
816 }
817 }
818 break;
819 case AVMEDIA_TYPE_AUDIO:
820 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
821 if (frame_size <= 0 || st->codec->sample_rate <= 0)
822 break;
823 *pnum = frame_size;
824 *pden = st->codec->sample_rate;
825 break;
826 default:
827 break;
828 }
829}
830
831static int is_intra_only(AVCodecContext *enc){
832 const AVCodecDescriptor *desc;
833
834 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
835 return 1;
836
837 desc = av_codec_get_codec_descriptor(enc);
838 if (!desc) {
839 desc = avcodec_descriptor_get(enc->codec_id);
840 av_codec_set_codec_descriptor(enc, desc);
841 }
842 if (desc)
843 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
844 return 0;
845}
846
847static int has_decode_delay_been_guessed(AVStream *st)
848{
849 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
850 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
851 return 1;
852#if CONFIG_H264_DECODER
853 if(st->codec->has_b_frames &&
854 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
855 return 1;
856#endif
857 if(st->codec->has_b_frames<3)
858 return st->nb_decoded_frames >= 7;
859 else if(st->codec->has_b_frames<4)
860 return st->nb_decoded_frames >= 18;
861 else
862 return st->nb_decoded_frames >= 20;
863}
864
865static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
866{
867 if (pktl->next)
868 return pktl->next;
869 if (pktl == s->parse_queue_end)
870 return s->packet_buffer;
871 return NULL;
872}
873
874static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
875{
876 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
877 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
878 int i;
879
880 // reference time stamp should be 60 s before first time stamp
881 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
882 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
883 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
884 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
885 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
886
887 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
888
889 if (!first_program) {
890 int default_stream_index = av_find_default_stream_index(s);
891 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
892 for (i=0; i<s->nb_streams; i++) {
893 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
894 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
895 }
896 }
897 else {
898 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
899 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
900 }
901 }
902 else {
903 AVProgram *program = first_program;
904 while (program) {
905 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
906 pts_wrap_reference = program->pts_wrap_reference;
907 pts_wrap_behavior = program->pts_wrap_behavior;
908 break;
909 }
910 program = av_find_program_from_stream(s, program, stream_index);
911 }
912
913 // update every program with differing pts_wrap_reference
914 program = first_program;
915 while(program) {
916 if (program->pts_wrap_reference != pts_wrap_reference) {
917 for (i=0; i<program->nb_stream_indexes; i++) {
918 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
919 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
920 }
921
922 program->pts_wrap_reference = pts_wrap_reference;
923 program->pts_wrap_behavior = pts_wrap_behavior;
924 }
925 program = av_find_program_from_stream(s, program, stream_index);
926 }
927 }
928 return 1;
929 }
930 return 0;
931}
932
933static void update_initial_timestamps(AVFormatContext *s, int stream_index,
934 int64_t dts, int64_t pts, AVPacket *pkt)
935{
936 AVStream *st= s->streams[stream_index];
937 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
938 int64_t pts_buffer[MAX_REORDER_DELAY+1];
939 int64_t shift;
940 int i, delay;
941
942 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
943 return;
944
945 delay = st->codec->has_b_frames;
946 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
947 st->cur_dts= dts;
948 shift = st->first_dts - RELATIVE_TS_BASE;
949
950 for (i=0; i<MAX_REORDER_DELAY+1; i++)
951 pts_buffer[i] = AV_NOPTS_VALUE;
952
953 if (is_relative(pts))
954 pts += shift;
955
956 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
957 if(pktl->pkt.stream_index != stream_index)
958 continue;
959 if(is_relative(pktl->pkt.pts))
960 pktl->pkt.pts += shift;
961
962 if(is_relative(pktl->pkt.dts))
963 pktl->pkt.dts += shift;
964
965 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
966 st->start_time= pktl->pkt.pts;
967
968 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
969 pts_buffer[0]= pktl->pkt.pts;
970 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
971 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
972 if(pktl->pkt.dts == AV_NOPTS_VALUE)
973 pktl->pkt.dts= pts_buffer[0];
974 }
975 }
976
977 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
978 // correct first time stamps to negative values
979 st->first_dts = wrap_timestamp(st, st->first_dts);
980 st->cur_dts = wrap_timestamp(st, st->cur_dts);
981 pkt->dts = wrap_timestamp(st, pkt->dts);
982 pkt->pts = wrap_timestamp(st, pkt->pts);
983 pts = wrap_timestamp(st, pts);
984 }
985
986 if (st->start_time == AV_NOPTS_VALUE)
987 st->start_time = pts;
988}
989
990static void update_initial_durations(AVFormatContext *s, AVStream *st,
991 int stream_index, int duration)
992{
993 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
994 int64_t cur_dts= RELATIVE_TS_BASE;
995
996 if(st->first_dts != AV_NOPTS_VALUE){
997 cur_dts= st->first_dts;
998 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
999 if(pktl->pkt.stream_index == stream_index){
1000 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
1001 break;
1002 cur_dts -= duration;
1003 }
1004 }
1005 if(pktl && pktl->pkt.dts != st->first_dts) {
1006 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1007 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1008 return;
1009 }
1010 if(!pktl) {
1011 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1012 return;
1013 }
1014 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1015 st->first_dts = cur_dts;
1016 }else if(st->cur_dts != RELATIVE_TS_BASE)
1017 return;
1018
1019 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1020 if(pktl->pkt.stream_index != stream_index)
1021 continue;
1022 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1023 && !pktl->pkt.duration){
1024 pktl->pkt.dts= cur_dts;
1025 if(!st->codec->has_b_frames)
1026 pktl->pkt.pts= cur_dts;
1027// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1028 pktl->pkt.duration = duration;
1029 }else
1030 break;
1031 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1032 }
1033 if(!pktl)
1034 st->cur_dts= cur_dts;
1035}
1036
1037static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1038 AVCodecParserContext *pc, AVPacket *pkt)
1039{
1040 int num, den, presentation_delayed, delay, i;
1041 int64_t offset;
1042
1043 if (s->flags & AVFMT_FLAG_NOFILLIN)
1044 return;
1045
1046 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1047 pkt->dts= AV_NOPTS_VALUE;
1048
1049 if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1050 //FIXME Set low_delay = 0 when has_b_frames = 1
1051 st->codec->has_b_frames = 1;
1052
1053 /* do we have a video B-frame ? */
1054 delay= st->codec->has_b_frames;
1055 presentation_delayed = 0;
1056
1057 /* XXX: need has_b_frame, but cannot get it if the codec is
1058 not initialized */
1059 if (delay &&
1060 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1061 presentation_delayed = 1;
1062
1063 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1064 st->pts_wrap_bits < 63 &&
1065 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1066 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1067 pkt->dts -= 1LL<<st->pts_wrap_bits;
1068 } else
1069 pkt->pts += 1LL<<st->pts_wrap_bits;
1070 }
1071
1072 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1073 // we take the conservative approach and discard both
1074 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1075 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1076 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1077 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1078 pkt->dts= AV_NOPTS_VALUE;
1079 }
1080
1081 if (pkt->duration == 0) {
1082 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1083 if (den && num) {
1084 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1085 }
1086 }
1087 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1088 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1089
1090 /* correct timestamps with byte offset if demuxers only have timestamps
1091 on packet boundaries */
1092 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1093 /* this will estimate bitrate based on this frame's duration and size */
1094 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1095 if(pkt->pts != AV_NOPTS_VALUE)
1096 pkt->pts += offset;
1097 if(pkt->dts != AV_NOPTS_VALUE)
1098 pkt->dts += offset;
1099 }
1100
1101 /* This may be redundant, but it should not hurt. */
1102 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1103 presentation_delayed = 1;
1104
1105 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1106 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1107 /* interpolate PTS and DTS if they are not present */
1108 //We skip H264 currently because delay and has_b_frames are not reliably set
1109 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1110 if (presentation_delayed) {
1111 /* DTS = decompression timestamp */
1112 /* PTS = presentation timestamp */
1113 if (pkt->dts == AV_NOPTS_VALUE)
1114 pkt->dts = st->last_IP_pts;
1115 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1116 if (pkt->dts == AV_NOPTS_VALUE)
1117 pkt->dts = st->cur_dts;
1118
1119 /* this is tricky: the dts must be incremented by the duration
1120 of the frame we are displaying, i.e. the last I- or P-frame */
1121 if (st->last_IP_duration == 0)
1122 st->last_IP_duration = pkt->duration;
1123 if(pkt->dts != AV_NOPTS_VALUE)
1124 st->cur_dts = pkt->dts + st->last_IP_duration;
1125 st->last_IP_duration = pkt->duration;
1126 st->last_IP_pts= pkt->pts;
1127 /* cannot compute PTS if not present (we can compute it only
1128 by knowing the future */
1129 } else if (pkt->pts != AV_NOPTS_VALUE ||
1130 pkt->dts != AV_NOPTS_VALUE ||
1131 pkt->duration ) {
1132 int duration = pkt->duration;
1133
1134 /* presentation is not delayed : PTS and DTS are the same */
1135 if (pkt->pts == AV_NOPTS_VALUE)
1136 pkt->pts = pkt->dts;
1137 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1138 pkt->pts, pkt);
1139 if (pkt->pts == AV_NOPTS_VALUE)
1140 pkt->pts = st->cur_dts;
1141 pkt->dts = pkt->pts;
1142 if (pkt->pts != AV_NOPTS_VALUE)
1143 st->cur_dts = pkt->pts + duration;
1144 }
1145 }
1146
1147 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1148 st->pts_buffer[0]= pkt->pts;
1149 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1150 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1151 if(pkt->dts == AV_NOPTS_VALUE)
1152 pkt->dts= st->pts_buffer[0];
1153 }
1154 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1155 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1156 }
1157 if(pkt->dts > st->cur_dts)
1158 st->cur_dts = pkt->dts;
1159
1160 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1161 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1162
1163 /* update flags */
1164 if (is_intra_only(st->codec))
1165 pkt->flags |= AV_PKT_FLAG_KEY;
1166 if (pc)
1167 pkt->convergence_duration = pc->convergence_duration;
1168}
1169
1170static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1171{
1172 while (*pkt_buf) {
1173 AVPacketList *pktl = *pkt_buf;
1174 *pkt_buf = pktl->next;
1175 av_free_packet(&pktl->pkt);
1176 av_freep(&pktl);
1177 }
1178 *pkt_buf_end = NULL;
1179}
1180
1181/**
1182 * Parse a packet, add all split parts to parse_queue
1183 *
1184 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1185 */
1186static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1187{
1188 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1189 AVStream *st = s->streams[stream_index];
1190 uint8_t *data = pkt ? pkt->data : NULL;
1191 int size = pkt ? pkt->size : 0;
1192 int ret = 0, got_output = 0;
1193
1194 if (!pkt) {
1195 av_init_packet(&flush_pkt);
1196 pkt = &flush_pkt;
1197 got_output = 1;
1198 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1199 // preserve 0-size sync packets
1200 compute_pkt_fields(s, st, st->parser, pkt);
1201 }
1202
1203 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1204 int len;
1205
1206 av_init_packet(&out_pkt);
1207 len = av_parser_parse2(st->parser, st->codec,
1208 &out_pkt.data, &out_pkt.size, data, size,
1209 pkt->pts, pkt->dts, pkt->pos);
1210
1211 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1212 pkt->pos = -1;
1213 /* increment read pointer */
1214 data += len;
1215 size -= len;
1216
1217 got_output = !!out_pkt.size;
1218
1219 if (!out_pkt.size)
1220 continue;
1221
1222 if (pkt->side_data) {
1223 out_pkt.side_data = pkt->side_data;
1224 out_pkt.side_data_elems = pkt->side_data_elems;
1225 pkt->side_data = NULL;
1226 pkt->side_data_elems = 0;
1227 }
1228
1229 /* set the duration */
1230 out_pkt.duration = 0;
1231 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1232 if (st->codec->sample_rate > 0) {
1233 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1234 (AVRational){ 1, st->codec->sample_rate },
1235 st->time_base,
1236 AV_ROUND_DOWN);
1237 }
1238 } else if (st->codec->time_base.num != 0 &&
1239 st->codec->time_base.den != 0) {
1240 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1241 st->codec->time_base,
1242 st->time_base,
1243 AV_ROUND_DOWN);
1244 }
1245
1246 out_pkt.stream_index = st->index;
1247 out_pkt.pts = st->parser->pts;
1248 out_pkt.dts = st->parser->dts;
1249 out_pkt.pos = st->parser->pos;
1250
1251 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1252 out_pkt.pos = st->parser->frame_offset;
1253
1254 if (st->parser->key_frame == 1 ||
1255 (st->parser->key_frame == -1 &&
1256 st->parser->pict_type == AV_PICTURE_TYPE_I))
1257 out_pkt.flags |= AV_PKT_FLAG_KEY;
1258
1259 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1260 out_pkt.flags |= AV_PKT_FLAG_KEY;
1261
1262 compute_pkt_fields(s, st, st->parser, &out_pkt);
1263
1264 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1265 out_pkt.buf = pkt->buf;
1266 pkt->buf = NULL;
1267#if FF_API_DESTRUCT_PACKET
1268FF_DISABLE_DEPRECATION_WARNINGS
1269 out_pkt.destruct = pkt->destruct;
1270 pkt->destruct = NULL;
1271FF_ENABLE_DEPRECATION_WARNINGS
1272#endif
1273 }
1274 if ((ret = av_dup_packet(&out_pkt)) < 0)
1275 goto fail;
1276
1277 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1278 av_free_packet(&out_pkt);
1279 ret = AVERROR(ENOMEM);
1280 goto fail;
1281 }
1282 }
1283
1284
1285 /* end of the stream => close and free the parser */
1286 if (pkt == &flush_pkt) {
1287 av_parser_close(st->parser);
1288 st->parser = NULL;
1289 }
1290
1291fail:
1292 av_free_packet(pkt);
1293 return ret;
1294}
1295
1296static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1297 AVPacketList **pkt_buffer_end,
1298 AVPacket *pkt)
1299{
1300 AVPacketList *pktl;
1301 av_assert0(*pkt_buffer);
1302 pktl = *pkt_buffer;
1303 *pkt = pktl->pkt;
1304 *pkt_buffer = pktl->next;
1305 if (!pktl->next)
1306 *pkt_buffer_end = NULL;
1307 av_freep(&pktl);
1308 return 0;
1309}
1310
1311static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1312{
1313 int ret = 0, i, got_packet = 0;
1314 int64_t first_timeval = avformat_getcurtime_us();
1315 av_init_packet(pkt);
1316
1317 while (!got_packet && !s->parse_queue) {
1318 AVStream *st;
1319 AVPacket cur_pkt;
1320 if (s->pb->mediascan_flag) {
1321 if (avformat_getcurtime_us() > (first_timeval + s->max_analyze_duration)) {
1322 return -1;
1323 }
1324 }
1325
1326 /* read next packet */
1327 ret = ff_read_packet(s, &cur_pkt);
1328 if (ret < 0) {
1329 if (ret == AVERROR(EAGAIN))
1330 return ret;
1331 /* flush the parsers */
1332 for(i = 0; i < s->nb_streams; i++) {
1333 st = s->streams[i];
1334 if (st->parser && st->need_parsing)
1335 parse_packet(s, NULL, st->index);
1336 }
1337 /* all remaining packets are now in parse_queue =>
1338 * really terminate parsing */
1339 break;
1340 }
1341 ret = 0;
1342 st = s->streams[cur_pkt.stream_index];
1343
1344 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1345 cur_pkt.dts != AV_NOPTS_VALUE &&
1346 cur_pkt.pts < cur_pkt.dts) {
1347 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1348 cur_pkt.stream_index,
1349 av_ts2str(cur_pkt.pts),
1350 av_ts2str(cur_pkt.dts),
1351 cur_pkt.size);
1352 }
1353 if (s->debug & FF_FDEBUG_TS)
1354 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1355 cur_pkt.stream_index,
1356 av_ts2str(cur_pkt.pts),
1357 av_ts2str(cur_pkt.dts),
1358 cur_pkt.size,
1359 cur_pkt.duration,
1360 cur_pkt.flags);
1361
1362 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1363 st->parser = av_parser_init(st->codec->codec_id);
1364 if (!st->parser) {
1365 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1366 "%s, packets or times may be invalid.\n",
1367 avcodec_get_name(st->codec->codec_id));
1368 /* no parser available: just output the raw packets */
1369 st->need_parsing = AVSTREAM_PARSE_NONE;
1370 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1371 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1372 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1373 st->parser->flags |= PARSER_FLAG_ONCE;
1374 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1375 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1376 }
1377 }
1378
1379 if (!st->need_parsing || !st->parser) {
1380 /* no parsing needed: we just output the packet as is */
1381 *pkt = cur_pkt;
1382 compute_pkt_fields(s, st, NULL, pkt);
1383 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1384 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1385 ff_reduce_index(s, st->index);
1386 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1387 }
1388 got_packet = 1;
1389 } else if (st->discard < AVDISCARD_ALL) {
1390 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1391 return ret;
1392 } else {
1393 /* free packet */
1394 av_free_packet(&cur_pkt);
1395 }
1396 if (pkt->flags & AV_PKT_FLAG_KEY)
1397 st->skip_to_keyframe = 0;
1398 if (st->skip_to_keyframe) {
1399 av_free_packet(&cur_pkt);
1400 if (got_packet) {
1401 *pkt = cur_pkt;
1402 }
1403 got_packet = 0;
1404 }
1405 }
1406
1407 if (!got_packet && s->parse_queue)
1408 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1409
1410 if(s->debug & FF_FDEBUG_TS)
1411 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1412 pkt->stream_index,
1413 av_ts2str(pkt->pts),
1414 av_ts2str(pkt->dts),
1415 pkt->size,
1416 pkt->duration,
1417 pkt->flags);
1418
1419 return ret;
1420}
1421
1422int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1423{
1424 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1425 int eof = 0;
1426 int ret;
1427 AVStream *st;
1428
1429 if (!genpts) {
1430 ret = s->packet_buffer ?
1431 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1432 read_frame_internal(s, pkt);
1433 if (ret < 0)
1434 return ret;
1435 goto return_packet;
1436 }
1437
1438 for (;;) {
1439 AVPacketList *pktl = s->packet_buffer;
1440
1441 if (pktl) {
1442 AVPacket *next_pkt = &pktl->pkt;
1443
1444 if (next_pkt->dts != AV_NOPTS_VALUE) {
1445 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1446 // last dts seen for this stream. if any of packets following
1447 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1448 int64_t last_dts = next_pkt->dts;
1449 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1450 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1451 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1452 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1453 next_pkt->pts = pktl->pkt.dts;
1454 }
1455 if (last_dts != AV_NOPTS_VALUE) {
1456 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1457 last_dts = pktl->pkt.dts;
1458 }
1459 }
1460 pktl = pktl->next;
1461 }
1462 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1463 // Fixing the last reference frame had none pts issue (For MXF etc).
1464 // We only do this when
1465 // 1. eof.
1466 // 2. we are not able to resolve a pts value for current packet.
1467 // 3. the packets for this stream at the end of the files had valid dts.
1468 next_pkt->pts = last_dts + next_pkt->duration;
1469 }
1470 pktl = s->packet_buffer;
1471 }
1472
1473 /* read packet from packet buffer, if there is data */
1474 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1475 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1476 ret = read_from_packet_buffer(&s->packet_buffer,
1477 &s->packet_buffer_end, pkt);
1478 goto return_packet;
1479 }
1480 }
1481
1482 ret = read_frame_internal(s, pkt);
1483 if (ret < 0) {
1484 if (pktl && ret != AVERROR(EAGAIN)) {
1485 eof = 1;
1486 continue;
1487 } else
1488 return ret;
1489 }
1490
1491 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1492 &s->packet_buffer_end)) < 0)
1493 return AVERROR(ENOMEM);
1494 }
1495
1496return_packet:
1497
1498 st = s->streams[pkt->stream_index];
1499 if (st->skip_samples) {
1500 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1501 if (p) {
1502 AV_WL32(p, st->skip_samples);
1503 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1504 }
1505 st->skip_samples = 0;
1506 }
1507
1508 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1509 ff_reduce_index(s, st->index);
1510 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1511 }
1512
1513 if (is_relative(pkt->dts))
1514 pkt->dts -= RELATIVE_TS_BASE;
1515 if (is_relative(pkt->pts))
1516 pkt->pts -= RELATIVE_TS_BASE;
1517
1518 return ret;
1519}
1520
1521/* XXX: suppress the packet queue */
1522static void flush_packet_queue(AVFormatContext *s)
1523{
1524 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1525 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1526 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1527
1528 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1529}
1530
1531/*******************************************************/
1532/* seek support */
1533
1534int av_find_default_stream_index(AVFormatContext *s)
1535{
1536 int first_audio_index = -1;
1537 int i;
1538 AVStream *st;
1539
1540 if (s->nb_streams <= 0)
1541 return -1;
1542 for(i = 0; i < s->nb_streams; i++) {
1543 st = s->streams[i];
1544 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1545 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1546 return i;
1547 }
1548 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1549 first_audio_index = i;
1550 }
1551 return first_audio_index >= 0 ? first_audio_index : 0;
1552}
1553
1554/**
1555 * Flush the frame reader.
1556 */
1557void ff_read_frame_flush(AVFormatContext *s)
1558{
1559 AVStream *st;
1560 int i, j;
1561
1562 flush_packet_queue(s);
1563
1564 /* for each stream, reset read state */
1565 for(i = 0; i < s->nb_streams; i++) {
1566 st = s->streams[i];
1567
1568 if (st->parser) {
1569 av_parser_close(st->parser);
1570 st->parser = NULL;
1571 }
1572 st->last_IP_pts = AV_NOPTS_VALUE;
1573 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1574 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1575
1576 st->probe_packets = MAX_PROBE_PACKETS;
1577
1578 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1579 st->pts_buffer[j]= AV_NOPTS_VALUE;
1580 }
1581}
1582
1583void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1584{
1585 int i;
1586
1587 for(i = 0; i < s->nb_streams; i++) {
1588 AVStream *st = s->streams[i];
1589
1590 st->cur_dts = av_rescale(timestamp,
1591 st->time_base.den * (int64_t)ref_st->time_base.num,
1592 st->time_base.num * (int64_t)ref_st->time_base.den);
1593 }
1594}
1595
1596void ff_reduce_index(AVFormatContext *s, int stream_index)
1597{
1598 AVStream *st= s->streams[stream_index];
1599 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1600
1601 if((unsigned)st->nb_index_entries >= max_entries){
1602 int i;
1603 for(i=0; 2*i<st->nb_index_entries; i++)
1604 st->index_entries[i]= st->index_entries[2*i];
1605 st->nb_index_entries= i;
1606 }
1607}
1608
1609int ff_add_index_entry(AVIndexEntry **index_entries,
1610 int *nb_index_entries,
1611 unsigned int *index_entries_allocated_size,
1612 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1613{
1614 AVIndexEntry *entries, *ie;
1615 int index;
1616
1617 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1618 return -1;
1619
1620 if(timestamp == AV_NOPTS_VALUE)
1621 return AVERROR(EINVAL);
1622
1623 if (size < 0 || size > 0x3FFFFFFF)
1624 return AVERROR(EINVAL);
1625
1626 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1627 timestamp -= RELATIVE_TS_BASE;
1628
1629 entries = av_fast_realloc(*index_entries,
1630 index_entries_allocated_size,
1631 (*nb_index_entries + 1) *
1632 sizeof(AVIndexEntry));
1633 if(!entries)
1634 return -1;
1635
1636 *index_entries= entries;
1637
1638 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1639
1640 if(index<0){
1641 index= (*nb_index_entries)++;
1642 ie= &entries[index];
1643 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1644 }else{
1645 ie= &entries[index];
1646 if(ie->timestamp != timestamp){
1647 if(ie->timestamp <= timestamp)
1648 return -1;
1649 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1650 (*nb_index_entries)++;
1651 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1652 distance= ie->min_distance;
1653 }
1654
1655 ie->pos = pos;
1656 ie->timestamp = timestamp;
1657 ie->min_distance= distance;
1658 ie->size= size;
1659 ie->flags = flags;
1660
1661 return index;
1662}
1663
1664int av_add_index_entry(AVStream *st,
1665 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1666{
1667 timestamp = wrap_timestamp(st, timestamp);
1668 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1669 &st->index_entries_allocated_size, pos,
1670 timestamp, size, distance, flags);
1671}
1672
1673int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1674 int64_t wanted_timestamp, int flags)
1675{
1676 int a, b, m;
1677 int64_t timestamp;
1678
1679 a = - 1;
1680 b = nb_entries;
1681
1682 //optimize appending index entries at the end
1683 if(b && entries[b-1].timestamp < wanted_timestamp)
1684 a= b-1;
1685
1686 while (b - a > 1) {
1687 m = (a + b) >> 1;
1688 timestamp = entries[m].timestamp;
1689 if(timestamp >= wanted_timestamp)
1690 b = m;
1691 if(timestamp <= wanted_timestamp)
1692 a = m;
1693 }
1694 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1695
1696 if(!(flags & AVSEEK_FLAG_ANY)){
1697 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1698 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1699 }
1700 }
1701
1702 if(m == nb_entries)
1703 return -1;
1704 return m;
1705}
1706
1707int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1708 int flags)
1709{
1710 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1711 wanted_timestamp, flags);
1712}
1713
1714static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1715 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1716{
1717 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1718 if (stream_index >= 0)
1719 ts = wrap_timestamp(s->streams[stream_index], ts);
1720 return ts;
1721}
1722
1723int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1724{
1725 AVInputFormat *avif= s->iformat;
1726 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1727 int64_t ts_min, ts_max, ts;
1728 int index;
1729 int64_t ret;
1730 AVStream *st;
1731
1732 if (stream_index < 0)
1733 return -1;
1734
1735 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1736
1737 ts_max=
1738 ts_min= AV_NOPTS_VALUE;
1739 pos_limit= -1; //gcc falsely says it may be uninitialized
1740
1741 st= s->streams[stream_index];
1742 if(st->index_entries){
1743 AVIndexEntry *e;
1744
1745 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1746 index= FFMAX(index, 0);
1747 e= &st->index_entries[index];
1748
1749 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1750 pos_min= e->pos;
1751 ts_min= e->timestamp;
1752 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1753 pos_min, av_ts2str(ts_min));
1754 }else{
1755 av_assert1(index==0);
1756 }
1757
1758 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1759 av_assert0(index < st->nb_index_entries);
1760 if(index >= 0){
1761 e= &st->index_entries[index];
1762 av_assert1(e->timestamp >= target_ts);
1763 pos_max= e->pos;
1764 ts_max= e->timestamp;
1765 pos_limit= pos_max - e->min_distance;
1766 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1767 pos_max, pos_limit, av_ts2str(ts_max));
1768 }
1769 }
1770
1771 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1772 if(pos<0)
1773 return -1;
1774
1775 /* do the seek */
1776 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1777 return ret;
1778
1779 ff_read_frame_flush(s);
1780 ff_update_cur_dts(s, st, ts);
1781
1782 return 0;
1783}
1784
1785int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1786 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1787{
1788 int64_t step= 1024;
1789 int64_t limit, ts_max;
1790 int64_t filesize = avio_size(s->pb);
1791 int64_t pos_max = filesize - 1;
1792 do{
1793 limit = pos_max;
1794 pos_max = FFMAX(0, (pos_max) - step);
1795 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1796 step += step;
1797 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1798 if (ts_max == AV_NOPTS_VALUE)
1799 return -1;
1800
1801 for(;;){
1802 int64_t tmp_pos = pos_max + 1;
1803 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1804 if(tmp_ts == AV_NOPTS_VALUE)
1805 break;
1806 av_assert0(tmp_pos > pos_max);
1807 ts_max = tmp_ts;
1808 pos_max = tmp_pos;
1809 if(tmp_pos >= filesize)
1810 break;
1811 }
1812
1813 if (ts)
1814 *ts = ts_max;
1815 if (pos)
1816 *pos = pos_max;
1817
1818 return 0;
1819}
1820
1821int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1822 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1823 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1824 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1825{
1826 int64_t pos, ts;
1827 int64_t start_pos;
1828 int no_change;
1829 int ret;
1830
1831 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1832
1833 if(ts_min == AV_NOPTS_VALUE){
1834 pos_min = s->data_offset;
1835 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1836 if (ts_min == AV_NOPTS_VALUE)
1837 return -1;
1838 }
1839
1840 if(ts_min >= target_ts){
1841 *ts_ret= ts_min;
1842 return pos_min;
1843 }
1844
1845 if(ts_max == AV_NOPTS_VALUE){
1846 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1847 return ret;
1848 pos_limit= pos_max;
1849 }
1850
1851 if(ts_max <= target_ts){
1852 *ts_ret= ts_max;
1853 return pos_max;
1854 }
1855
1856 if(ts_min > ts_max){
1857 return -1;
1858 }else if(ts_min == ts_max){
1859 pos_limit= pos_min;
1860 }
1861
1862 no_change=0;
1863 while (pos_min < pos_limit) {
1864 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1865 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1866 assert(pos_limit <= pos_max);
1867
1868 if(no_change==0){
1869 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1870 // interpolate position (better than dichotomy)
1871 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1872 + pos_min - approximate_keyframe_distance;
1873 }else if(no_change==1){
1874 // bisection, if interpolation failed to change min or max pos last time
1875 pos = (pos_min + pos_limit)>>1;
1876 }else{
1877 /* linear search if bisection failed, can only happen if there
1878 are very few or no keyframes between min/max */
1879 pos=pos_min;
1880 }
1881 if(pos <= pos_min)
1882 pos= pos_min + 1;
1883 else if(pos > pos_limit)
1884 pos= pos_limit;
1885 start_pos= pos;
1886
1887 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1888 if(pos == pos_max)
1889 no_change++;
1890 else
1891 no_change=0;
1892 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1893 pos_min, pos, pos_max,
1894 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1895 pos_limit, start_pos, no_change);
1896 if(ts == AV_NOPTS_VALUE){
1897 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1898 return -1;
1899 }
1900 assert(ts != AV_NOPTS_VALUE);
1901 if (target_ts <= ts) {
1902 pos_limit = start_pos - 1;
1903 pos_max = pos;
1904 ts_max = ts;
1905 }
1906 if (target_ts >= ts) {
1907 pos_min = pos;
1908 ts_min = ts;
1909 }
1910 }
1911
1912 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1913 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1914#if 0
1915 pos_min = pos;
1916 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1917 pos_min++;
1918 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1919 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1920 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1921#endif
1922 *ts_ret= ts;
1923 return pos;
1924}
1925
1926static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1927 int64_t pos_min, pos_max;
1928
1929 pos_min = s->data_offset;
1930 pos_max = avio_size(s->pb) - 1;
1931
1932 if (pos < pos_min) pos= pos_min;
1933 else if(pos > pos_max) pos= pos_max;
1934
1935 avio_seek(s->pb, pos, SEEK_SET);
1936
1937 s->io_repositioned = 1;
1938
1939 return 0;
1940}
1941
1942static int seek_frame_generic(AVFormatContext *s,
1943 int stream_index, int64_t timestamp, int flags)
1944{
1945 int index;
1946 int64_t ret;
1947 AVStream *st;
1948 AVIndexEntry *ie;
1949
1950 st = s->streams[stream_index];
1951
1952 index = av_index_search_timestamp(st, timestamp, flags);
1953
1954 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1955 return -1;
1956
1957 if(index < 0 || index==st->nb_index_entries-1){
1958 AVPacket pkt;
1959 int nonkey=0;
1960
1961 if(st->nb_index_entries){
1962 av_assert0(st->index_entries);
1963 ie= &st->index_entries[st->nb_index_entries-1];
1964 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1965 return ret;
1966 ff_update_cur_dts(s, st, ie->timestamp);
1967 }else{
1968 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1969 return ret;
1970 }
1971 for (;;) {
1972 int read_status;
1973 do{
1974 read_status = av_read_frame(s, &pkt);
1975 } while (read_status == AVERROR(EAGAIN));
1976 if (read_status < 0)
1977 break;
1978 av_free_packet(&pkt);
1979 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1980 if(pkt.flags & AV_PKT_FLAG_KEY)
1981 break;
1982 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1983 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1984 break;
1985 }
1986 }
1987 }
1988 index = av_index_search_timestamp(st, timestamp, flags);
1989 }
1990 if (index < 0)
1991 return -1;
1992
1993 ff_read_frame_flush(s);
1994 if (s->iformat->read_seek){
1995 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1996 return 0;
1997 }
1998 ie = &st->index_entries[index];
1999 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2000 return ret;
2001 ff_update_cur_dts(s, st, ie->timestamp);
2002
2003 return 0;
2004}
2005
2006static int seek_frame_internal(AVFormatContext *s, int stream_index,
2007 int64_t timestamp, int flags)
2008{
2009 int ret;
2010 AVStream *st;
2011
2012 if (flags & AVSEEK_FLAG_BYTE) {
2013 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2014 return -1;
2015 ff_read_frame_flush(s);
2016 return seek_frame_byte(s, stream_index, timestamp, flags);
2017 }
2018
2019 if(stream_index < 0){
2020 stream_index= av_find_default_stream_index(s);
2021 if(stream_index < 0)
2022 return -1;
2023
2024 st= s->streams[stream_index];
2025 /* timestamp for default must be expressed in AV_TIME_BASE units */
2026 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2027 }
2028
2029 /* first, we try the format specific seek */
2030 if (s->iformat->read_seek) {
2031 ff_read_frame_flush(s);
2032 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2033 } else
2034 ret = -1;
2035 if (ret >= 0) {
2036 return 0;
2037 }
2038
2039 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2040 ff_read_frame_flush(s);
2041 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2042 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2043 ff_read_frame_flush(s);
2044 return seek_frame_generic(s, stream_index, timestamp, flags);
2045 }
2046 else
2047 return -1;
2048}
2049
2050int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2051{
2052 int ret;
2053
2054 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2055 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2056 if ((flags & AVSEEK_FLAG_BACKWARD))
2057 max_ts = timestamp;
2058 else
2059 min_ts = timestamp;
2060 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2061 flags & ~AVSEEK_FLAG_BACKWARD);
2062 }
2063
2064 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2065
2066 if (ret >= 0)
2067 ret = avformat_queue_attached_pictures(s);
2068
2069 return ret;
2070}
2071
2072int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2073{
2074 if(min_ts > ts || max_ts < ts)
2075 return -1;
2076 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2077 return AVERROR(EINVAL);
2078
2079 if(s->seek2any>0)
2080 flags |= AVSEEK_FLAG_ANY;
2081 flags &= ~AVSEEK_FLAG_BACKWARD;
2082
2083 if (s->iformat->read_seek2) {
2084 int ret;
2085 ff_read_frame_flush(s);
2086
2087 if (stream_index == -1 && s->nb_streams == 1) {
2088 AVRational time_base = s->streams[0]->time_base;
2089 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2090 min_ts = av_rescale_rnd(min_ts, time_base.den,
2091 time_base.num * (int64_t)AV_TIME_BASE,
2092 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2093 max_ts = av_rescale_rnd(max_ts, time_base.den,
2094 time_base.num * (int64_t)AV_TIME_BASE,
2095 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2096 }
2097
2098 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2099
2100 if (ret >= 0)
2101 ret = avformat_queue_attached_pictures(s);
2102 return ret;
2103 }
2104
2105 if(s->iformat->read_timestamp){
2106 //try to seek via read_timestamp()
2107 }
2108
2109 // Fall back on old API if new is not implemented but old is.
2110 // Note the old API has somewhat different semantics.
2111 if (s->iformat->read_seek || 1) {
2112 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2113 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2114 if (ret<0 && ts != min_ts && max_ts != ts) {
2115 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2116 if (ret >= 0)
2117 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2118 }
2119 return ret;
2120 }
2121
2122 // try some generic seek like seek_frame_generic() but with new ts semantics
2123 return -1; //unreachable
2124}
2125
2126/*******************************************************/
2127
2128/**
2129 * Return TRUE if the stream has accurate duration in any stream.
2130 *
2131 * @return TRUE if the stream has accurate duration for at least one component.
2132 */
2133static int has_duration(AVFormatContext *ic)
2134{
2135 int i;
2136 AVStream *st;
2137
2138 for(i = 0;i < ic->nb_streams; i++) {
2139 st = ic->streams[i];
2140 if (st->duration != AV_NOPTS_VALUE)
2141 return 1;
2142 }
2143 if (ic->duration != AV_NOPTS_VALUE)
2144 return 1;
2145 return 0;
2146}
2147
2148/**
2149 * Estimate the stream timings from the one of each components.
2150 *
2151 * Also computes the global bitrate if possible.
2152 */
2153static void update_stream_timings(AVFormatContext *ic)
2154{
2155 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2156 int64_t duration, duration1, filesize;
2157 int i;
2158 AVStream *st;
2159 AVProgram *p;
2160
2161 start_time = INT64_MAX;
2162 start_time_text = INT64_MAX;
2163 end_time = INT64_MIN;
2164 duration = INT64_MIN;
2165 for(i = 0;i < ic->nb_streams; i++) {
2166 st = ic->streams[i];
2167 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2168 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2169 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2170 if (start_time1 < start_time_text)
2171 start_time_text = start_time1;
2172 } else
2173 start_time = FFMIN(start_time, start_time1);
2174 end_time1 = AV_NOPTS_VALUE;
2175 if (st->duration != AV_NOPTS_VALUE) {
2176 end_time1 = start_time1
2177 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2178 end_time = FFMAX(end_time, end_time1);
2179 }
2180 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2181 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2182 p->start_time = start_time1;
2183 if(p->end_time < end_time1)
2184 p->end_time = end_time1;
2185 }
2186 }
2187 if (st->duration != AV_NOPTS_VALUE) {
2188 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2189 duration = FFMAX(duration, duration1);
2190 }
2191 }
2192 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2193 start_time = start_time_text;
2194 else if(start_time > start_time_text)
2195 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2196
2197 if (start_time != INT64_MAX) {
2198 ic->start_time = start_time;
2199 if (end_time != INT64_MIN) {
2200 if (ic->nb_programs) {
2201 for (i=0; i<ic->nb_programs; i++) {
2202 p = ic->programs[i];
2203 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2204 duration = FFMAX(duration, p->end_time - p->start_time);
2205 }
2206 } else
2207 duration = FFMAX(duration, end_time - start_time);
2208 }
2209 }
2210 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2211 ic->duration = duration;
2212 }
2213 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2214 /* compute the bitrate */
2215 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2216 (double)ic->duration;
2217 if (bitrate >= 0 && bitrate <= INT_MAX)
2218 ic->bit_rate = bitrate;
2219 }
2220}
2221
2222static void fill_all_stream_timings(AVFormatContext *ic)
2223{
2224 int i;
2225 AVStream *st;
2226
2227 update_stream_timings(ic);
2228 for(i = 0;i < ic->nb_streams; i++) {
2229 st = ic->streams[i];
2230 if (st->start_time == AV_NOPTS_VALUE) {
2231 if(ic->start_time != AV_NOPTS_VALUE)
2232 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2233 if(ic->duration != AV_NOPTS_VALUE)
2234 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2235 }
2236 }
2237}
2238
2239static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2240{
2241 int64_t filesize, duration;
2242 int i, show_warning = 0;
2243 AVStream *st;
2244
2245 /* if bit_rate is already set, we believe it */
2246 if (ic->bit_rate <= 0) {
2247 int bit_rate = 0;
2248 for(i=0;i<ic->nb_streams;i++) {
2249 st = ic->streams[i];
2250 if (st->codec->bit_rate > 0) {
2251 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2252 bit_rate = 0;
2253 break;
2254 }
2255 bit_rate += st->codec->bit_rate;
2256 }
2257 }
2258 ic->bit_rate = bit_rate;
2259 }
2260
2261 /* if duration is already set, we believe it */
2262 if (ic->duration == AV_NOPTS_VALUE &&
2263 ic->bit_rate != 0) {
2264 filesize = ic->pb ? avio_size(ic->pb) : 0;
2265 if (filesize > 0) {
2266 for(i = 0; i < ic->nb_streams; i++) {
2267 st = ic->streams[i];
2268 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2269 && st->duration == AV_NOPTS_VALUE) {
2270 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2271 st->duration = duration;
2272 show_warning = 1;
2273 }
2274 }
2275 }
2276 }
2277 if (show_warning)
2278 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2279}
2280
2281#define DURATION_MAX_READ_SIZE 250000LL
2282#define DURATION_MAX_RETRY 6
2283
2284/* only usable for MPEG-PS streams */
2285static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2286{
2287 AVPacket pkt1, *pkt = &pkt1;
2288 AVStream *st;
2289 int read_size, i, ret;
2290 int64_t end_time;
2291 int64_t filesize, offset, duration;
2292 int retry=0;
2293
2294 /* flush packet queue */
2295 flush_packet_queue(ic);
2296
2297 for (i=0; i<ic->nb_streams; i++) {
2298 st = ic->streams[i];
2299 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2300 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2301
2302 if (st->parser) {
2303 av_parser_close(st->parser);
2304 st->parser= NULL;
2305 }
2306 }
2307
2308 /* estimate the end time (duration) */
2309 /* XXX: may need to support wrapping */
2310 filesize = ic->pb ? avio_size(ic->pb) : 0;
2311 end_time = AV_NOPTS_VALUE;
2312 do{
2313 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2314 if (offset < 0)
2315 offset = 0;
2316
2317 avio_seek(ic->pb, offset, SEEK_SET);
2318 read_size = 0;
2319 for(;;) {
2320 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2321 break;
2322
2323 do {
2324 ret = ff_read_packet(ic, pkt);
2325 } while(ret == AVERROR(EAGAIN));
2326 if (ret != 0)
2327 break;
2328 read_size += pkt->size;
2329 st = ic->streams[pkt->stream_index];
2330 if (pkt->pts != AV_NOPTS_VALUE &&
2331 (st->start_time != AV_NOPTS_VALUE ||
2332 st->first_dts != AV_NOPTS_VALUE)) {
2333 duration = end_time = pkt->pts;
2334 if (st->start_time != AV_NOPTS_VALUE)
2335 duration -= st->start_time;
2336 else
2337 duration -= st->first_dts;
2338 if (duration > 0) {
2339 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2340 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2341 st->duration = duration;
2342 st->info->last_duration = duration;
2343 }
2344 }
2345 av_free_packet(pkt);
2346 }
2347 }while( end_time==AV_NOPTS_VALUE
2348 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2349 && ++retry <= DURATION_MAX_RETRY);
2350
2351 fill_all_stream_timings(ic);
2352
2353 avio_seek(ic->pb, old_offset, SEEK_SET);
2354 for (i=0; i<ic->nb_streams; i++) {
2355 st= ic->streams[i];
2356 st->cur_dts= st->first_dts;
2357 st->last_IP_pts = AV_NOPTS_VALUE;
2358 }
2359}
2360
2361static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2362{
2363 int64_t file_size;
2364
2365 /* get the file size, if possible */
2366 if (ic->iformat->flags & AVFMT_NOFILE) {
2367 file_size = 0;
2368 } else {
2369 file_size = avio_size(ic->pb);
2370 file_size = FFMAX(0, file_size);
2371 }
2372
2373 if ((!strcmp(ic->iformat->name, "mpeg") ||
2374 !strcmp(ic->iformat->name, "mpegts")) &&
2375 file_size && ic->pb->seekable) {
2376 /* get accurate estimate from the PTSes */
2377 estimate_timings_from_pts(ic, old_offset);
2378 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2379 } else if (has_duration(ic)) {
2380 /* at least one component has timings - we use them for all
2381 the components */
2382 fill_all_stream_timings(ic);
2383 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2384 } else {
2385 /* less precise: use bitrate info */
2386 estimate_timings_from_bit_rate(ic);
2387 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2388 }
2389 update_stream_timings(ic);
2390
2391 {
2392 int i;
2393 AVStream av_unused *st;
2394 for(i = 0;i < ic->nb_streams; i++) {
2395 st = ic->streams[i];
2396 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2397 (double) st->start_time / AV_TIME_BASE,
2398 (double) st->duration / AV_TIME_BASE);
2399 }
2400 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2401 (double) ic->start_time / AV_TIME_BASE,
2402 (double) ic->duration / AV_TIME_BASE,
2403 ic->bit_rate / 1000);
2404 }
2405}
2406
2407static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2408{
2409 AVCodecContext *avctx = st->codec;
2410
2411#define FAIL(errmsg) do { \
2412 if (errmsg_ptr) \
2413 *errmsg_ptr = errmsg; \
2414 return 0; \
2415 } while (0)
2416
2417 switch (avctx->codec_type) {
2418 case AVMEDIA_TYPE_AUDIO:
2419 if (!avctx->frame_size && determinable_frame_size(avctx))
2420 FAIL("unspecified frame size");
2421 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2422 FAIL("unspecified sample format");
2423 if (!avctx->sample_rate)
2424 FAIL("unspecified sample rate");
2425 if (!avctx->channels)
2426 FAIL("unspecified number of channels");
2427 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2428 FAIL("no decodable DTS frames");
2429 break;
2430 case AVMEDIA_TYPE_VIDEO:
2431 if (!avctx->width)
2432 FAIL("unspecified size");
2433 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2434 FAIL("unspecified pixel format");
2435 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2436 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2437 FAIL("no frame in rv30/40 and no sar");
2438 break;
2439 case AVMEDIA_TYPE_SUBTITLE:
2440 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2441 FAIL("unspecified size");
2442 break;
2443 case AVMEDIA_TYPE_DATA:
2444 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2445 }
2446
2447 if (avctx->codec_id == AV_CODEC_ID_NONE)
2448 FAIL("unknown codec");
2449 return 1;
2450}
2451
2452/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2453static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2454{
2455 const AVCodec *codec;
2456 int got_picture = 1, ret = 0;
2457 AVFrame *frame = avcodec_alloc_frame();
2458 AVSubtitle subtitle;
2459 AVPacket pkt = *avpkt;
2460
2461 if (!frame)
2462 return AVERROR(ENOMEM);
2463
2464 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2465 AVDictionary *thread_opt = NULL;
2466
2467 codec = find_decoder(s, st, st->codec->codec_id);
2468
2469 if (!codec) {
2470 st->info->found_decoder = -1;
2471 ret = -1;
2472 goto fail;
2473 }
2474
2475 /* force thread count to 1 since the h264 decoder will not extract SPS
2476 * and PPS to extradata during multi-threaded decoding */
2477 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2478 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2479 if (!options)
2480 av_dict_free(&thread_opt);
2481 if (ret < 0) {
2482 st->info->found_decoder = -1;
2483 goto fail;
2484 }
2485 st->info->found_decoder = 1;
2486 } else if (!st->info->found_decoder)
2487 st->info->found_decoder = 1;
2488
2489 if (st->info->found_decoder < 0) {
2490 ret = -1;
2491 goto fail;
2492 }
2493
2494 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2495 ret >= 0 &&
2496 (!has_codec_parameters(st, NULL) ||
2497 !has_decode_delay_been_guessed(st) ||
2498 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2499 got_picture = 0;
2500 avcodec_get_frame_defaults(frame);
2501 switch(st->codec->codec_type) {
2502 case AVMEDIA_TYPE_VIDEO:
2503 ret = avcodec_decode_video2(st->codec, frame,
2504 &got_picture, &pkt);
2505 break;
2506 case AVMEDIA_TYPE_AUDIO:
2507 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2508 break;
2509 case AVMEDIA_TYPE_SUBTITLE:
2510 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2511 &got_picture, &pkt);
2512 ret = pkt.size;
2513 break;
2514 default:
2515 break;
2516 }
2517 if (ret >= 0) {
2518 if (got_picture)
2519 st->nb_decoded_frames++;
2520 pkt.data += ret;
2521 pkt.size -= ret;
2522 ret = got_picture;
2523 }
2524 }
2525
2526 if(!pkt.data && !got_picture)
2527 ret = -1;
2528
2529fail:
2530 avcodec_free_frame(&frame);
2531 return ret;
2532}
2533
2534unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2535{
2536 while (tags->id != AV_CODEC_ID_NONE) {
2537 if (tags->id == id)
2538 return tags->tag;
2539 tags++;
2540 }
2541 return 0;
2542}
2543
2544enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2545{
2546 int i;
2547 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2548 if(tag == tags[i].tag)
2549 return tags[i].id;
2550 }
2551 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2552 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2553 return tags[i].id;
2554 }
2555 return AV_CODEC_ID_NONE;
2556}
2557
2558enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2559{
2560 if (flt) {
2561 switch (bps) {
2562 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2563 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2564 default: return AV_CODEC_ID_NONE;
2565 }
2566 } else {
2567 bps += 7;
2568 bps >>= 3;
2569 if (sflags & (1 << (bps - 1))) {
2570 switch (bps) {
2571 case 1: return AV_CODEC_ID_PCM_S8;
2572 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2573 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2574 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2575 default: return AV_CODEC_ID_NONE;
2576 }
2577 } else {
2578 switch (bps) {
2579 case 1: return AV_CODEC_ID_PCM_U8;
2580 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2581 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2582 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2583 default: return AV_CODEC_ID_NONE;
2584 }
2585 }
2586 }
2587}
2588
2589unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2590{
2591 unsigned int tag;
2592 if (!av_codec_get_tag2(tags, id, &tag))
2593 return 0;
2594 return tag;
2595}
2596
2597int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2598 unsigned int *tag)
2599{
2600 int i;
2601 for(i=0; tags && tags[i]; i++){
2602 const AVCodecTag *codec_tags = tags[i];
2603 while (codec_tags->id != AV_CODEC_ID_NONE) {
2604 if (codec_tags->id == id) {
2605 *tag = codec_tags->tag;
2606 return 1;
2607 }
2608 codec_tags++;
2609 }
2610 }
2611 return 0;
2612}
2613
2614enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2615{
2616 int i;
2617 for(i=0; tags && tags[i]; i++){
2618 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2619 if(id!=AV_CODEC_ID_NONE) return id;
2620 }
2621 return AV_CODEC_ID_NONE;
2622}
2623
2624static void compute_chapters_end(AVFormatContext *s)
2625{
2626 unsigned int i, j;
2627 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2628
2629 for (i = 0; i < s->nb_chapters; i++)
2630 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2631 AVChapter *ch = s->chapters[i];
2632 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2633 : INT64_MAX;
2634
2635 for (j = 0; j < s->nb_chapters; j++) {
2636 AVChapter *ch1 = s->chapters[j];
2637 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2638 if (j != i && next_start > ch->start && next_start < end)
2639 end = next_start;
2640 }
2641 ch->end = (end == INT64_MAX) ? ch->start : end;
2642 }
2643}
2644
2645static int get_std_framerate(int i){
2646 if(i<60*12) return (i+1)*1001;
2647 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2648}
2649
2650/*
2651 * Is the time base unreliable.
2652 * This is a heuristic to balance between quick acceptance of the values in
2653 * the headers vs. some extra checks.
2654 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2655 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2656 * And there are "variable" fps files this needs to detect as well.
2657 */
2658static int tb_unreliable(AVCodecContext *c){
2659 if( c->time_base.den >= 101L*c->time_base.num
2660 || c->time_base.den < 5L*c->time_base.num
2661/* || c->codec_tag == AV_RL32("DIVX")
2662 || c->codec_tag == AV_RL32("XVID")*/
2663 || c->codec_tag == AV_RL32("mp4v")
2664 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2665 || c->codec_id == AV_CODEC_ID_H264
2666 )
2667 return 1;
2668 return 0;
2669}
2670
2671#if FF_API_FORMAT_PARAMETERS
2672int av_find_stream_info(AVFormatContext *ic)
2673{
2674 return avformat_find_stream_info(ic, NULL);
2675}
2676#endif
2677
2678int ff_alloc_extradata(AVCodecContext *avctx, int size)
2679{
2680 int ret;
2681
2682 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2683 avctx->extradata_size = 0;
2684 return AVERROR(EINVAL);
2685 }
2686 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2687 if (avctx->extradata) {
2688 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2689 avctx->extradata_size = size;
2690 ret = 0;
2691 } else {
2692 avctx->extradata_size = 0;
2693 ret = AVERROR(ENOMEM);
2694 }
2695 return ret;
2696}
2697
2698int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2699{
2700 int i, count, ret = 0, j;
2701 int64_t read_size;
2702 AVStream *st;
2703 AVPacket pkt1, *pkt;
2704 int64_t old_offset = avio_tell(ic->pb);
2705 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2706 int flush_codecs = ic->probesize > 0;
2707
2708 if(ic->pb)
2709 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2710
2711 for(i=0;i<ic->nb_streams;i++) {
2712 const AVCodec *codec;
2713 AVDictionary *thread_opt = NULL;
2714 st = ic->streams[i];
2715
2716 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2717 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2718/* if(!st->time_base.num)
2719 st->time_base= */
2720 if(!st->codec->time_base.num)
2721 st->codec->time_base= st->time_base;
2722 }
2723 //only for the split stuff
2724 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2725 st->parser = av_parser_init(st->codec->codec_id);
2726 if(st->parser){
2727 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2728 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2729 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2730 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2731 }
2732 } else if (st->need_parsing) {
2733 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2734 "%s, packets or times may be invalid.\n",
2735 avcodec_get_name(st->codec->codec_id));
2736 }
2737 }
2738 codec = find_decoder(ic, st, st->codec->codec_id);
2739
2740 /* force thread count to 1 since the h264 decoder will not extract SPS
2741 * and PPS to extradata during multi-threaded decoding */
2742 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2743
2744 /* Ensure that subtitle_header is properly set. */
2745 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2746 && codec && !st->codec->codec)
2747 avcodec_open2(st->codec, codec, options ? &options[i]
2748 : &thread_opt);
2749
2750 //try to just open decoders, in case this is enough to get parameters
2751 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2752 if (codec && !st->codec->codec)
2753 avcodec_open2(st->codec, codec, options ? &options[i]
2754 : &thread_opt);
2755 }
2756 if (!options)
2757 av_dict_free(&thread_opt);
2758 }
2759
2760 for (i=0; i<ic->nb_streams; i++) {
2761#if FF_API_R_FRAME_RATE
2762 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2763#endif
2764 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2765 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2766 }
2767
2768 count = 0;
2769 read_size = 0;
2770 for(;;) {
2771 if (ff_check_interrupt(&ic->interrupt_callback)){
2772 ret= AVERROR_EXIT;
2773 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2774 break;
2775 }
2776
2777 /* check if one codec still needs to be handled */
2778 for(i=0;i<ic->nb_streams;i++) {
2779 int fps_analyze_framecount = 20;
2780
2781 st = ic->streams[i];
2782 if (!has_codec_parameters(st, NULL))
2783 break;
2784 /* if the timebase is coarse (like the usual millisecond precision
2785 of mkv), we need to analyze more frames to reliably arrive at
2786 the correct fps */
2787 if (av_q2d(st->time_base) > 0.0005)
2788 fps_analyze_framecount *= 2;
2789 if (ic->fps_probe_size >= 0)
2790 fps_analyze_framecount = ic->fps_probe_size;
2791 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2792 fps_analyze_framecount = 0;
2793 /* variable fps and no guess at the real fps */
2794 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2795 && st->info->duration_count < fps_analyze_framecount
2796 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2797 break;
2798 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2799 break;
2800 if (st->first_dts == AV_NOPTS_VALUE &&
2801 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2802 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2803 break;
2804 }
2805 if (i == ic->nb_streams) {
2806 /* NOTE: if the format has no header, then we need to read
2807 some packets to get most of the streams, so we cannot
2808 stop here */
2809 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2810 /* if we found the info for all the codecs, we can stop */
2811 ret = count;
2812 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2813 flush_codecs = 0;
2814 break;
2815 }
2816 }
2817 /* we did not get all the codec info, but we read too much data */
2818 if (read_size >= ic->probesize) {
2819 ret = count;
2820 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2821 for (i = 0; i < ic->nb_streams; i++)
2822 if (!ic->streams[i]->r_frame_rate.num &&
2823 ic->streams[i]->info->duration_count <= 1 &&
2824 strcmp(ic->iformat->name, "image2"))
2825 av_log(ic, AV_LOG_WARNING,
2826 "Stream #%d: not enough frames to estimate rate; "
2827 "consider increasing probesize\n", i);
2828 break;
2829 }
2830
2831 /* NOTE: a new stream can be added there if no header in file
2832 (AVFMTCTX_NOHEADER) */
2833 ret = read_frame_internal(ic, &pkt1);
2834 if (ret == AVERROR(EAGAIN))
2835 continue;
2836
2837 if (ret < 0) {
2838 /* EOF or error*/
2839 break;
2840 }
2841
2842 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2843 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2844 {
2845 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2846 &ic->packet_buffer_end);
2847 if (!pkt) {
2848 ret = AVERROR(ENOMEM);
2849 goto find_stream_info_err;
2850 }
2851 if ((ret = av_dup_packet(pkt)) < 0)
2852 goto find_stream_info_err;
2853 }
2854
2855 read_size += pkt->size;
2856
2857 st = ic->streams[pkt->stream_index];
2858 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2859 /* check for non-increasing dts */
2860 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2861 st->info->fps_last_dts >= pkt->dts) {
2862 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2863 "packet %d with DTS %"PRId64", packet %d with DTS "
2864 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2865 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2866 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2867 }
2868 /* check for a discontinuity in dts - if the difference in dts
2869 * is more than 1000 times the average packet duration in the sequence,
2870 * we treat it as a discontinuity */
2871 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2872 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2873 (pkt->dts - st->info->fps_last_dts) / 1000 >
2874 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2875 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2876 "packet %d with DTS %"PRId64", packet %d with DTS "
2877 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2878 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2879 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2880 }
2881
2882 /* update stored dts values */
2883 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2884 st->info->fps_first_dts = pkt->dts;
2885 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2886 }
2887 st->info->fps_last_dts = pkt->dts;
2888 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2889 }
2890 if (st->codec_info_nb_frames>1) {
2891 int64_t t=0;
2892 if (st->time_base.den > 0)
2893 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2894 if (st->avg_frame_rate.num > 0)
2895 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2896
2897 if ( t==0
2898 && st->codec_info_nb_frames>30
2899 && st->info->fps_first_dts != AV_NOPTS_VALUE
2900 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2901 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2902
2903 if (t >= ic->max_analyze_duration) {
2904 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2905 break;
2906 }
2907 if (pkt->duration) {
2908 st->info->codec_info_duration += pkt->duration;
2909 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2910 }
2911 }
2912#if FF_API_R_FRAME_RATE
2913 {
2914 int64_t last = st->info->last_dts;
2915
2916 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2917 && pkt->dts - (uint64_t)last < INT64_MAX){
2918 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2919 int64_t duration= pkt->dts - last;
2920
2921 if (!st->info->duration_error)
2922 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2923 if (!st->info->duration_error)
2924 return AVERROR(ENOMEM);
2925
2926// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2927// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2928 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2929 int framerate= get_std_framerate(i);
2930 double sdts= dts*framerate/(1001*12);
2931 for(j=0; j<2; j++){
2932 int64_t ticks= llrint(sdts+j*0.5);
2933 double error= sdts - ticks + j*0.5;
2934 st->info->duration_error[j][0][i] += error;
2935 st->info->duration_error[j][1][i] += error*error;
2936 }
2937 }
2938 st->info->duration_count++;
2939 // ignore the first 4 values, they might have some random jitter
2940 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2941 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2942 }
2943 if (pkt->dts != AV_NOPTS_VALUE)
2944 st->info->last_dts = pkt->dts;
2945 }
2946#endif
2947 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2948 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2949 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2950 if (ff_alloc_extradata(st->codec, i))
2951 return AVERROR(ENOMEM);
2952 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2953 }
2954 }
2955
2956 /* if still no information, we try to open the codec and to
2957 decompress the frame. We try to avoid that in most cases as
2958 it takes longer and uses more memory. For MPEG-4, we need to
2959 decompress for QuickTime.
2960
2961 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2962 least one frame of codec data, this makes sure the codec initializes
2963 the channel configuration and does not only trust the values from the container.
2964 */
2965 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2966
2967 st->codec_info_nb_frames++;
2968 count++;
2969 }
2970
2971 if (flush_codecs) {
2972 AVPacket empty_pkt = { 0 };
2973 int err = 0;
2974 av_init_packet(&empty_pkt);
2975
2976 for(i=0;i<ic->nb_streams;i++) {
2977
2978 st = ic->streams[i];
2979
2980 /* flush the decoders */
2981 if (st->info->found_decoder == 1) {
2982 do {
2983 err = try_decode_frame(ic, st, &empty_pkt,
2984 (options && i < orig_nb_streams) ?
2985 &options[i] : NULL);
2986 } while (err > 0 && !has_codec_parameters(st, NULL));
2987
2988 if (err < 0) {
2989 av_log(ic, AV_LOG_INFO,
2990 "decoding for stream %d failed\n", st->index);
2991 }
2992 }
2993 }
2994 }
2995
2996 // close codecs which were opened in try_decode_frame()
2997 for(i=0;i<ic->nb_streams;i++) {
2998 st = ic->streams[i];
2999 avcodec_close(st->codec);
3000 }
3001 for(i=0;i<ic->nb_streams;i++) {
3002 st = ic->streams[i];
3003 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3004 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
3005 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3006 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
3007 st->codec->codec_tag= tag;
3008 }
3009
3010 /* estimate average framerate if not set by demuxer */
3011 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3012 int best_fps = 0;
3013 double best_error = 0.01;
3014
3015 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3016 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3017 st->info->codec_info_duration < 0)
3018 continue;
3019 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3020 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3021 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3022
3023 /* round guessed framerate to a "standard" framerate if it's
3024 * within 1% of the original estimate*/
3025 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3026 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3027 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3028
3029 if (error < best_error) {
3030 best_error = error;
3031 best_fps = std_fps.num;
3032 }
3033 }
3034 if (best_fps) {
3035 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3036 best_fps, 12*1001, INT_MAX);
3037 }
3038 }
3039 // the check for tb_unreliable() is not completely correct, since this is not about handling
3040 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3041 // ipmovie.c produces.
3042 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3043 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3044 if (st->info->duration_count>1 && !st->r_frame_rate.num
3045 && tb_unreliable(st->codec)) {
3046 int num = 0;
3047 double best_error= 0.01;
3048
3049 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3050 int k;
3051
3052 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3053 continue;
3054 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3055 continue;
3056 for(k=0; k<2; k++){
3057 int n= st->info->duration_count;
3058 double a= st->info->duration_error[k][0][j] / n;
3059 double error= st->info->duration_error[k][1][j]/n - a*a;
3060
3061 if(error < best_error && best_error> 0.000000001){
3062 best_error= error;
3063 num = get_std_framerate(j);
3064 }
3065 if(error < 0.02)
3066 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3067 }
3068 }
3069 // do not increase frame rate by more than 1 % in order to match a standard rate.
3070 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3071 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3072 }
3073
3074 if (!st->r_frame_rate.num){
3075 if( st->codec->time_base.den * (int64_t)st->time_base.num
3076 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3077 st->r_frame_rate.num = st->codec->time_base.den;
3078 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3079 }else{
3080 st->r_frame_rate.num = st->time_base.den;
3081 st->r_frame_rate.den = st->time_base.num;
3082 }
3083 }
3084 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3085 if(!st->codec->bits_per_coded_sample)
3086 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3087 // set stream disposition based on audio service type
3088 switch (st->codec->audio_service_type) {
3089 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3090 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3091 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3092 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3093 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3094 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3095 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3096 st->disposition = AV_DISPOSITION_COMMENT; break;
3097 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3098 st->disposition = AV_DISPOSITION_KARAOKE; break;
3099 }
3100 }
3101 }
3102
3103 if(ic->probesize)
3104 estimate_timings(ic, old_offset);
3105
3106 if (ret >= 0 && ic->nb_streams)
3107 ret = -1; /* we could not have all the codec parameters before EOF */
3108 for(i=0;i<ic->nb_streams;i++) {
3109 const char *errmsg;
3110 st = ic->streams[i];
3111 if (!has_codec_parameters(st, &errmsg)) {
3112 char buf[256];
3113 avcodec_string(buf, sizeof(buf), st->codec, 0);
3114 av_log(ic, AV_LOG_WARNING,
3115 "Could not find codec parameters for stream %d (%s): %s\n"
3116 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3117 i, buf, errmsg);
3118 } else {
3119 ret = 0;
3120 }
3121 }
3122
3123 compute_chapters_end(ic);
3124
3125 find_stream_info_err:
3126 for (i=0; i < ic->nb_streams; i++) {
3127 st = ic->streams[i];
3128 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3129 ic->streams[i]->codec->thread_count = 0;
3130 if (st->info)
3131 av_freep(&st->info->duration_error);
3132 av_freep(&ic->streams[i]->info);
3133 }
3134 if(ic->pb)
3135 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3136 return ret;
3137}
3138
3139AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3140{
3141 int i, j;
3142
3143 for (i = 0; i < ic->nb_programs; i++) {
3144 if (ic->programs[i] == last) {
3145 last = NULL;
3146 } else {
3147 if (!last)
3148 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3149 if (ic->programs[i]->stream_index[j] == s)
3150 return ic->programs[i];
3151 }
3152 }
3153 return NULL;
3154}
3155
3156int av_find_best_stream(AVFormatContext *ic,
3157 enum AVMediaType type,
3158 int wanted_stream_nb,
3159 int related_stream,
3160 AVCodec **decoder_ret,
3161 int flags)
3162{
3163 int i, nb_streams = ic->nb_streams;
3164 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3165 unsigned *program = NULL;
3166 AVCodec *decoder = NULL, *best_decoder = NULL;
3167
3168 if (related_stream >= 0 && wanted_stream_nb < 0) {
3169 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3170 if (p) {
3171 program = p->stream_index;
3172 nb_streams = p->nb_stream_indexes;
3173 }
3174 }
3175 for (i = 0; i < nb_streams; i++) {
3176 int real_stream_index = program ? program[i] : i;
3177 AVStream *st = ic->streams[real_stream_index];
3178 AVCodecContext *avctx = st->codec;
3179 if (avctx->codec_type != type)
3180 continue;
3181 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3182 continue;
3183 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3184 continue;
3185 if (decoder_ret) {
3186 decoder = find_decoder(ic, st, st->codec->codec_id);
3187 if (!decoder) {
3188 if (ret < 0)
3189 ret = AVERROR_DECODER_NOT_FOUND;
3190 continue;
3191 }
3192 }
3193 count = st->codec_info_nb_frames;
3194 bitrate = avctx->bit_rate;
3195 multiframe = FFMIN(5, count);
3196 if ((best_multiframe > multiframe) ||
3197 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3198 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3199 continue;
3200 best_count = count;
3201 best_bitrate = bitrate;
3202 best_multiframe = multiframe;
3203 ret = real_stream_index;
3204 best_decoder = decoder;
3205 if (program && i == nb_streams - 1 && ret < 0) {
3206 program = NULL;
3207 nb_streams = ic->nb_streams;
3208 i = 0; /* no related stream found, try again with everything */
3209 }
3210 }
3211 if (decoder_ret)
3212 *decoder_ret = best_decoder;
3213 return ret;
3214}
3215
3216/*******************************************************/
3217
3218int av_read_play(AVFormatContext *s)
3219{
3220 if (s->iformat->read_play)
3221 return s->iformat->read_play(s);
3222 if (s->pb)
3223 return avio_pause(s->pb, 0);
3224 return AVERROR(ENOSYS);
3225}
3226
3227int av_read_pause(AVFormatContext *s)
3228{
3229 if (s->iformat->read_pause)
3230 return s->iformat->read_pause(s);
3231 if (s->pb)
3232 return avio_pause(s->pb, 1);
3233 return AVERROR(ENOSYS);
3234}
3235
3236void ff_free_stream(AVFormatContext *s, AVStream *st){
3237 av_assert0(s->nb_streams>0);
3238 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3239
3240 if (st->parser) {
3241 av_parser_close(st->parser);
3242 }
3243 if (st->attached_pic.data)
3244 av_free_packet(&st->attached_pic);
3245 av_dict_free(&st->metadata);
3246 av_freep(&st->probe_data.buf);
3247 av_freep(&st->index_entries);
3248 av_freep(&st->codec->extradata);
3249 av_freep(&st->codec->subtitle_header);
3250 av_freep(&st->codec);
3251 av_freep(&st->priv_data);
3252 if (st->info)
3253 av_freep(&st->info->duration_error);
3254 av_freep(&st->info);
3255 av_freep(&s->streams[ --s->nb_streams ]);
3256}
3257
3258void avformat_free_context(AVFormatContext *s)
3259{
3260 int i;
3261
3262 if (!s)
3263 return;
3264
3265 if(s->cover_data)
3266 av_free(s->cover_data);
3267
3268 av_opt_free(s);
3269 if (s->iformat && s->iformat->priv_class && s->priv_data)
3270 av_opt_free(s->priv_data);
3271
3272 for(i=s->nb_streams-1; i>=0; i--) {
3273 ff_free_stream(s, s->streams[i]);
3274 }
3275 for(i=s->nb_programs-1; i>=0; i--) {
3276 av_dict_free(&s->programs[i]->metadata);
3277 av_freep(&s->programs[i]->stream_index);
3278 av_freep(&s->programs[i]);
3279 }
3280 av_freep(&s->programs);
3281 av_freep(&s->priv_data);
3282 while(s->nb_chapters--) {
3283 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3284 av_freep(&s->chapters[s->nb_chapters]);
3285 }
3286 av_freep(&s->chapters);
3287 av_dict_free(&s->metadata);
3288 av_freep(&s->streams);
3289 av_free(s);
3290}
3291
3292#if FF_API_CLOSE_INPUT_FILE
3293void av_close_input_file(AVFormatContext *s)
3294{
3295 avformat_close_input(&s);
3296}
3297#endif
3298
3299void avformat_close_input(AVFormatContext **ps)
3300{
3301 AVFormatContext *s;
3302 AVIOContext *pb;
3303
3304 if (!ps || !*ps)
3305 return;
3306
3307 s = *ps;
3308 pb = s->pb;
3309
3310 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3311 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3312 pb = NULL;
3313
3314 flush_packet_queue(s);
3315
3316 if (s->iformat) {
3317 if (s->iformat->read_close)
3318 s->iformat->read_close(s);
3319 }
3320
3321 avformat_free_context(s);
3322
3323 *ps = NULL;
3324
3325 avio_close(pb);
3326}
3327
3328#if FF_API_NEW_STREAM
3329AVStream *av_new_stream(AVFormatContext *s, int id)
3330{
3331 AVStream *st = avformat_new_stream(s, NULL);
3332 if (st)
3333 st->id = id;
3334 return st;
3335}
3336#endif
3337
3338AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3339{
3340 AVStream *st;
3341 int i;
3342 AVStream **streams;
3343
3344 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3345 return NULL;
3346 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3347 if (!streams)
3348 return NULL;
3349 s->streams = streams;
3350
3351 st = av_mallocz(sizeof(AVStream));
3352 if (!st)
3353 return NULL;
3354 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3355 av_free(st);
3356 return NULL;
3357 }
3358 st->info->last_dts = AV_NOPTS_VALUE;
3359
3360 st->codec = avcodec_alloc_context3(c);
3361 if (s->iformat) {
3362 /* no default bitrate if decoding */
3363 st->codec->bit_rate = 0;
3364 }
3365 st->index = s->nb_streams;
3366 st->start_time = AV_NOPTS_VALUE;
3367 st->duration = AV_NOPTS_VALUE;
3368 /* we set the current DTS to 0 so that formats without any timestamps
3369 but durations get some timestamps, formats with some unknown
3370 timestamps have their first few packets buffered and the
3371 timestamps corrected before they are returned to the user */
3372 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3373 st->first_dts = AV_NOPTS_VALUE;
3374 st->probe_packets = MAX_PROBE_PACKETS;
3375 st->pts_wrap_reference = AV_NOPTS_VALUE;
3376 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3377
3378 /* default pts setting is MPEG-like */
3379 avpriv_set_pts_info(st, 33, 1, 90000);
3380 st->last_IP_pts = AV_NOPTS_VALUE;
3381 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3382 st->pts_buffer[i]= AV_NOPTS_VALUE;
3383
3384 st->sample_aspect_ratio = (AVRational){0,1};
3385
3386#if FF_API_R_FRAME_RATE
3387 st->info->last_dts = AV_NOPTS_VALUE;
3388#endif
3389 st->info->fps_first_dts = AV_NOPTS_VALUE;
3390 st->info->fps_last_dts = AV_NOPTS_VALUE;
3391
3392 s->streams[s->nb_streams++] = st;
3393 return st;
3394}
3395
3396AVProgram *av_new_program(AVFormatContext *ac, int id)
3397{
3398 AVProgram *program=NULL;
3399 int i;
3400
3401 av_dlog(ac, "new_program: id=0x%04x\n", id);
3402
3403 for(i=0; i<ac->nb_programs; i++)
3404 if(ac->programs[i]->id == id)
3405 program = ac->programs[i];
3406
3407 if(!program){
3408 program = av_mallocz(sizeof(AVProgram));
3409 if (!program)
3410 return NULL;
3411 dynarray_add(&ac->programs, &ac->nb_programs, program);
3412 program->discard = AVDISCARD_NONE;
3413 }
3414 program->id = id;
3415 program->pts_wrap_reference = AV_NOPTS_VALUE;
3416 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3417
3418 program->start_time =
3419 program->end_time = AV_NOPTS_VALUE;
3420
3421 return program;
3422}
3423
3424AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3425{
3426 AVChapter *chapter = NULL;
3427 int i;
3428
3429 for(i=0; i<s->nb_chapters; i++)
3430 if(s->chapters[i]->id == id)
3431 chapter = s->chapters[i];
3432
3433 if(!chapter){
3434 chapter= av_mallocz(sizeof(AVChapter));
3435 if(!chapter)
3436 return NULL;
3437 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3438 }
3439 av_dict_set(&chapter->metadata, "title", title, 0);
3440 chapter->id = id;
3441 chapter->time_base= time_base;
3442 chapter->start = start;
3443 chapter->end = end;
3444
3445 return chapter;
3446}
3447
3448void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3449{
3450 int i, j;
3451 AVProgram *program=NULL;
3452 void *tmp;
3453
3454 if (idx >= ac->nb_streams) {
3455 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3456 return;
3457 }
3458
3459 for(i=0; i<ac->nb_programs; i++){
3460 if(ac->programs[i]->id != progid)
3461 continue;
3462 program = ac->programs[i];
3463 for(j=0; j<program->nb_stream_indexes; j++)
3464 if(program->stream_index[j] == idx)
3465 return;
3466
3467 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3468 if(!tmp)
3469 return;
3470 program->stream_index = tmp;
3471 program->stream_index[program->nb_stream_indexes++] = idx;
3472 return;
3473 }
3474}
3475
3476static void print_fps(double d, const char *postfix){
3477 uint64_t v= lrintf(d*100);
3478 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3479 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3480 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3481}
3482
3483static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3484{
3485 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3486 AVDictionaryEntry *tag=NULL;
3487
3488 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3489 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3490 if(strcmp("language", tag->key)){
3491 const char *p = tag->value;
3492 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3493 while(*p) {
3494 char tmp[256];
3495 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3496 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3497 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3498 p += len;
3499 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3500 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3501 if (*p) p++;
3502 }
3503 av_log(ctx, AV_LOG_INFO, "\n");
3504 }
3505 }
3506 }
3507}
3508
3509/* "user interface" functions */
3510static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3511{
3512 char buf[256];
3513 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3514 AVStream *st = ic->streams[i];
3515 int g = av_gcd(st->time_base.num, st->time_base.den);
3516 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3517 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3518 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3519 /* the pid is an important information, so we display it */
3520 /* XXX: add a generic system */
3521 if (flags & AVFMT_SHOW_IDS)
3522 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3523 if (lang)
3524 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3525 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3526 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3527 if (st->sample_aspect_ratio.num && // default
3528 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3529 AVRational display_aspect_ratio;
3530 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3531 st->codec->width*st->sample_aspect_ratio.num,
3532 st->codec->height*st->sample_aspect_ratio.den,
3533 1024*1024);
3534 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3535 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3536 display_aspect_ratio.num, display_aspect_ratio.den);
3537 }
3538 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3539 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3540 print_fps(av_q2d(st->avg_frame_rate), "fps");
3541#if FF_API_R_FRAME_RATE
3542 if(st->r_frame_rate.den && st->r_frame_rate.num)
3543 print_fps(av_q2d(st->r_frame_rate), "tbr");
3544#endif
3545 if(st->time_base.den && st->time_base.num)
3546 print_fps(1/av_q2d(st->time_base), "tbn");
3547 if(st->codec->time_base.den && st->codec->time_base.num)
3548 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3549 }
3550 if (st->disposition & AV_DISPOSITION_DEFAULT)
3551 av_log(NULL, AV_LOG_INFO, " (default)");
3552 if (st->disposition & AV_DISPOSITION_DUB)
3553 av_log(NULL, AV_LOG_INFO, " (dub)");
3554 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3555 av_log(NULL, AV_LOG_INFO, " (original)");
3556 if (st->disposition & AV_DISPOSITION_COMMENT)
3557 av_log(NULL, AV_LOG_INFO, " (comment)");
3558 if (st->disposition & AV_DISPOSITION_LYRICS)
3559 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3560 if (st->disposition & AV_DISPOSITION_KARAOKE)
3561 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3562 if (st->disposition & AV_DISPOSITION_FORCED)
3563 av_log(NULL, AV_LOG_INFO, " (forced)");
3564 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3565 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3566 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3567 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3568 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3569 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3570 av_log(NULL, AV_LOG_INFO, "\n");
3571 dump_metadata(NULL, st->metadata, " ");
3572}
3573
3574void av_dump_format(AVFormatContext *ic,
3575 int index,
3576 const char *url,
3577 int is_output)
3578{
3579 int i;
3580 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3581 if (ic->nb_streams && !printed)
3582 return;
3583
3584 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3585 is_output ? "Output" : "Input",
3586 index,
3587 is_output ? ic->oformat->name : ic->iformat->name,
3588 is_output ? "to" : "from", url);
3589 dump_metadata(NULL, ic->metadata, " ");
3590 if (!is_output) {
3591 av_log(NULL, AV_LOG_INFO, " Duration: ");
3592 if (ic->duration != AV_NOPTS_VALUE) {
3593 int hours, mins, secs, us;
3594 int64_t duration = ic->duration + 5000;
3595 secs = duration / AV_TIME_BASE;
3596 us = duration % AV_TIME_BASE;
3597 mins = secs / 60;
3598 secs %= 60;
3599 hours = mins / 60;
3600 mins %= 60;
3601 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3602 (100 * us) / AV_TIME_BASE);
3603 } else {
3604 av_log(NULL, AV_LOG_INFO, "N/A");
3605 }
3606 if (ic->start_time != AV_NOPTS_VALUE) {
3607 int secs, us;
3608 av_log(NULL, AV_LOG_INFO, ", start: ");
3609 secs = ic->start_time / AV_TIME_BASE;
3610 us = abs(ic->start_time % AV_TIME_BASE);
3611 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3612 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3613 }
3614 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3615 if (ic->bit_rate) {
3616 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3617 } else {
3618 av_log(NULL, AV_LOG_INFO, "N/A");
3619 }
3620 av_log(NULL, AV_LOG_INFO, "\n");
3621 }
3622 for (i = 0; i < ic->nb_chapters; i++) {
3623 AVChapter *ch = ic->chapters[i];
3624 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3625 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3626 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3627
3628 dump_metadata(NULL, ch->metadata, " ");
3629 }
3630 if(ic->nb_programs) {
3631 int j, k, total = 0;
3632 for(j=0; j<ic->nb_programs; j++) {
3633 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3634 "name", NULL, 0);
3635 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3636 name ? name->value : "");
3637 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3638 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3639 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3640 printed[ic->programs[j]->stream_index[k]] = 1;
3641 }
3642 total += ic->programs[j]->nb_stream_indexes;
3643 }
3644 if (total < ic->nb_streams)
3645 av_log(NULL, AV_LOG_INFO, " No Program\n");
3646 }
3647 for(i=0;i<ic->nb_streams;i++)
3648 if (!printed[i])
3649 dump_stream_format(ic, i, index, is_output);
3650
3651 av_free(printed);
3652}
3653
3654uint64_t ff_ntp_time(void)
3655{
3656 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3657}
3658
3659int av_get_frame_filename(char *buf, int buf_size,
3660 const char *path, int number)
3661{
3662 const char *p;
3663 char *q, buf1[20], c;
3664 int nd, len, percentd_found;
3665
3666 q = buf;
3667 p = path;
3668 percentd_found = 0;
3669 for(;;) {
3670 c = *p++;
3671 if (c == '\0')
3672 break;
3673 if (c == '%') {
3674 do {
3675 nd = 0;
3676 while (av_isdigit(*p)) {
3677 nd = nd * 10 + *p++ - '0';
3678 }
3679 c = *p++;
3680 } while (av_isdigit(c));
3681
3682 switch(c) {
3683 case '%':
3684 goto addchar;
3685 case 'd':
3686 if (percentd_found)
3687 goto fail;
3688 percentd_found = 1;
3689 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3690 len = strlen(buf1);
3691 if ((q - buf + len) > buf_size - 1)
3692 goto fail;
3693 memcpy(q, buf1, len);
3694 q += len;
3695 break;
3696 default:
3697 goto fail;
3698 }
3699 } else {
3700 addchar:
3701 if ((q - buf) < buf_size - 1)
3702 *q++ = c;
3703 }
3704 }
3705 if (!percentd_found)
3706 goto fail;
3707 *q = '\0';
3708 return 0;
3709 fail:
3710 *q = '\0';
3711 return -1;
3712}
3713
3714static void hex_dump_internal(void *avcl, FILE *f, int level,
3715 const uint8_t *buf, int size)
3716{
3717 int len, i, j, c;
3718#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3719
3720 for(i=0;i<size;i+=16) {
3721 len = size - i;
3722 if (len > 16)
3723 len = 16;
3724 PRINT("%08x ", i);
3725 for(j=0;j<16;j++) {
3726 if (j < len)
3727 PRINT(" %02x", buf[i+j]);
3728 else
3729 PRINT(" ");
3730 }
3731 PRINT(" ");
3732 for(j=0;j<len;j++) {
3733 c = buf[i+j];
3734 if (c < ' ' || c > '~')
3735 c = '.';
3736 PRINT("%c", c);
3737 }
3738 PRINT("\n");
3739 }
3740#undef PRINT
3741}
3742
3743void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3744{
3745 hex_dump_internal(NULL, f, 0, buf, size);
3746}
3747
3748void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3749{
3750 hex_dump_internal(avcl, NULL, level, buf, size);
3751}
3752
3753static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3754{
3755#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3756 PRINT("stream #%d:\n", pkt->stream_index);
3757 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3758 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3759 /* DTS is _always_ valid after av_read_frame() */
3760 PRINT(" dts=");
3761 if (pkt->dts == AV_NOPTS_VALUE)
3762 PRINT("N/A");
3763 else
3764 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3765 /* PTS may not be known if B-frames are present. */
3766 PRINT(" pts=");
3767 if (pkt->pts == AV_NOPTS_VALUE)
3768 PRINT("N/A");
3769 else
3770 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3771 PRINT("\n");
3772 PRINT(" size=%d\n", pkt->size);
3773#undef PRINT
3774 if (dump_payload)
3775 av_hex_dump(f, pkt->data, pkt->size);
3776}
3777
3778void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3779{
3780 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3781}
3782
3783void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3784 AVStream *st)
3785{
3786 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3787}
3788
3789void av_url_split(char *proto, int proto_size,
3790 char *authorization, int authorization_size,
3791 char *hostname, int hostname_size,
3792 int *port_ptr,
3793 char *path, int path_size,
3794 const char *url)
3795{
3796 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3797
3798 if (port_ptr) *port_ptr = -1;
3799 if (proto_size > 0) proto[0] = 0;
3800 if (authorization_size > 0) authorization[0] = 0;
3801 if (hostname_size > 0) hostname[0] = 0;
3802 if (path_size > 0) path[0] = 0;
3803
3804 /* parse protocol */
3805 if ((p = strchr(url, ':'))) {
3806 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3807 p++; /* skip ':' */
3808 if (*p == '/') p++;
3809 if (*p == '/') p++;
3810 } else {
3811 /* no protocol means plain filename */
3812 av_strlcpy(path, url, path_size);
3813 return;
3814 }
3815
3816 /* separate path from hostname */
3817 ls = strchr(p, '/');
3818 ls2 = strchr(p, '?');
3819 if(!ls)
3820 ls = ls2;
3821 else if (ls && ls2)
3822 ls = FFMIN(ls, ls2);
3823 if(ls)
3824 av_strlcpy(path, ls, path_size);
3825 else
3826 ls = &p[strlen(p)]; // XXX
3827
3828 /* the rest is hostname, use that to parse auth/port */
3829 if (ls != p) {
3830 /* authorization (user[:pass]@hostname) */
3831 at2 = p;
3832 while ((at = strchr(p, '@')) && at < ls) {
3833 av_strlcpy(authorization, at2,
3834 FFMIN(authorization_size, at + 1 - at2));
3835 p = at + 1; /* skip '@' */
3836 }
3837
3838 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3839 /* [host]:port */
3840 av_strlcpy(hostname, p + 1,
3841 FFMIN(hostname_size, brk - p));
3842 if (brk[1] == ':' && port_ptr)
3843 *port_ptr = atoi(brk + 2);
3844 } else if ((col = strchr(p, ':')) && col < ls) {
3845 av_strlcpy(hostname, p,
3846 FFMIN(col + 1 - p, hostname_size));
3847 if (port_ptr) *port_ptr = atoi(col + 1);
3848 } else
3849 av_strlcpy(hostname, p,
3850 FFMIN(ls + 1 - p, hostname_size));
3851 }
3852}
3853
3854char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3855{
3856 int i;
3857 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3858 '4', '5', '6', '7',
3859 '8', '9', 'A', 'B',
3860 'C', 'D', 'E', 'F' };
3861 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3862 '4', '5', '6', '7',
3863 '8', '9', 'a', 'b',
3864 'c', 'd', 'e', 'f' };
3865 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3866
3867 for(i = 0; i < s; i++) {
3868 buff[i * 2] = hex_table[src[i] >> 4];
3869 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3870 }
3871
3872 return buff;
3873}
3874
3875int ff_hex_to_data(uint8_t *data, const char *p)
3876{
3877 int c, len, v;
3878
3879 len = 0;
3880 v = 1;
3881 for (;;) {
3882 p += strspn(p, SPACE_CHARS);
3883 if (*p == '\0')
3884 break;
3885 c = av_toupper((unsigned char) *p++);
3886 if (c >= '0' && c <= '9')
3887 c = c - '0';
3888 else if (c >= 'A' && c <= 'F')
3889 c = c - 'A' + 10;
3890 else
3891 break;
3892 v = (v << 4) | c;
3893 if (v & 0x100) {
3894 if (data)
3895 data[len] = v;
3896 len++;
3897 v = 1;
3898 }
3899 }
3900 return len;
3901}
3902
3903#if FF_API_SET_PTS_INFO
3904void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3905 unsigned int pts_num, unsigned int pts_den)
3906{
3907 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3908}
3909#endif
3910
3911void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3912 unsigned int pts_num, unsigned int pts_den)
3913{
3914 AVRational new_tb;
3915 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3916 if(new_tb.num != pts_num)
3917 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3918 }else
3919 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3920
3921 if(new_tb.num <= 0 || new_tb.den <= 0) {
3922 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3923 return;
3924 }
3925 s->time_base = new_tb;
3926 av_codec_set_pkt_timebase(s->codec, new_tb);
3927 s->pts_wrap_bits = pts_wrap_bits;
3928}
3929
3930void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3931 void *context)
3932{
3933 const char *ptr = str;
3934
3935 /* Parse key=value pairs. */
3936 for (;;) {
3937 const char *key;
3938 char *dest = NULL, *dest_end;
3939 int key_len, dest_len = 0;
3940
3941 /* Skip whitespace and potential commas. */
3942 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3943 ptr++;
3944 if (!*ptr)
3945 break;
3946
3947 key = ptr;
3948
3949 if (!(ptr = strchr(key, '=')))
3950 break;
3951 ptr++;
3952 key_len = ptr - key;
3953
3954 callback_get_buf(context, key, key_len, &dest, &dest_len);
3955 dest_end = dest + dest_len - 1;
3956
3957 if (*ptr == '\"') {
3958 ptr++;
3959 while (*ptr && *ptr != '\"') {
3960 if (*ptr == '\\') {
3961 if (!ptr[1])
3962 break;
3963 if (dest && dest < dest_end)
3964 *dest++ = ptr[1];
3965 ptr += 2;
3966 } else {
3967 if (dest && dest < dest_end)
3968 *dest++ = *ptr;
3969 ptr++;
3970 }
3971 }
3972 if (*ptr == '\"')
3973 ptr++;
3974 } else {
3975 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3976 if (dest && dest < dest_end)
3977 *dest++ = *ptr;
3978 }
3979 if (dest)
3980 *dest = 0;
3981 }
3982}
3983
3984int ff_find_stream_index(AVFormatContext *s, int id)
3985{
3986 int i;
3987 for (i = 0; i < s->nb_streams; i++) {
3988 if (s->streams[i]->id == id)
3989 return i;
3990 }
3991 return -1;
3992}
3993
3994int64_t ff_iso8601_to_unix_time(const char *datestr)
3995{
3996 struct tm time1 = {0}, time2 = {0};
3997 char *ret1, *ret2;
3998 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3999 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
4000 if (ret2 && !ret1)
4001 return av_timegm(&time2);
4002 else
4003 return av_timegm(&time1);
4004}
4005
4006int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4007{
4008 if (ofmt) {
4009 if (ofmt->query_codec)
4010 return ofmt->query_codec(codec_id, std_compliance);
4011 else if (ofmt->codec_tag)
4012 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4013 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4014 codec_id == ofmt->subtitle_codec)
4015 return 1;
4016 }
4017 return AVERROR_PATCHWELCOME;
4018}
4019
4020int avformat_network_init(void)
4021{
4022#if CONFIG_NETWORK
4023 int ret;
4024 ff_network_inited_globally = 1;
4025 if ((ret = ff_network_init()) < 0)
4026 return ret;
4027 ff_tls_init();
4028#endif
4029 return 0;
4030}
4031
4032int avformat_network_deinit(void)
4033{
4034#if CONFIG_NETWORK
4035 ff_network_close();
4036 ff_tls_deinit();
4037#endif
4038 return 0;
4039}
4040
4041int ff_add_param_change(AVPacket *pkt, int32_t channels,
4042 uint64_t channel_layout, int32_t sample_rate,
4043 int32_t width, int32_t height)
4044{
4045 uint32_t flags = 0;
4046 int size = 4;
4047 uint8_t *data;
4048 if (!pkt)
4049 return AVERROR(EINVAL);
4050 if (channels) {
4051 size += 4;
4052 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4053 }
4054 if (channel_layout) {
4055 size += 8;
4056 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4057 }
4058 if (sample_rate) {
4059 size += 4;
4060 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4061 }
4062 if (width || height) {
4063 size += 8;
4064 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4065 }
4066 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4067 if (!data)
4068 return AVERROR(ENOMEM);
4069 bytestream_put_le32(&data, flags);
4070 if (channels)
4071 bytestream_put_le32(&data, channels);
4072 if (channel_layout)
4073 bytestream_put_le64(&data, channel_layout);
4074 if (sample_rate)
4075 bytestream_put_le32(&data, sample_rate);
4076 if (width || height) {
4077 bytestream_put_le32(&data, width);
4078 bytestream_put_le32(&data, height);
4079 }
4080 return 0;
4081}
4082
4083AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4084{
4085 AVRational undef = {0, 1};
4086 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4087 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4088 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4089
4090 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4091 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4092 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4093 stream_sample_aspect_ratio = undef;
4094
4095 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4096 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4097 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4098 frame_sample_aspect_ratio = undef;
4099
4100 if (stream_sample_aspect_ratio.num)
4101 return stream_sample_aspect_ratio;
4102 else
4103 return frame_sample_aspect_ratio;
4104}
4105
4106AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4107{
4108 AVRational fr = st->r_frame_rate;
4109
4110 if (st->codec->ticks_per_frame > 1) {
4111 AVRational codec_fr = av_inv_q(st->codec->time_base);
4112 AVRational avg_fr = st->avg_frame_rate;
4113 codec_fr.den *= st->codec->ticks_per_frame;
4114 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4115 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4116 fr = codec_fr;
4117 }
4118
4119 return fr;
4120}
4121
4122int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4123 const char *spec)
4124{
4125 if (*spec <= '9' && *spec >= '0') /* opt:index */
4126 return strtol(spec, NULL, 0) == st->index;
4127 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4128 *spec == 't') { /* opt:[vasdt] */
4129 enum AVMediaType type;
4130
4131 switch (*spec++) {
4132 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4133 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4134 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4135 case 'd': type = AVMEDIA_TYPE_DATA; break;
4136 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4137 default: av_assert0(0);
4138 }
4139 if (type != st->codec->codec_type)
4140 return 0;
4141 if (*spec++ == ':') { /* possibly followed by :index */
4142 int i, index = strtol(spec, NULL, 0);
4143 for (i = 0; i < s->nb_streams; i++)
4144 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4145 return i == st->index;
4146 return 0;
4147 }
4148 return 1;
4149 } else if (*spec == 'p' && *(spec + 1) == ':') {
4150 int prog_id, i, j;
4151 char *endptr;
4152 spec += 2;
4153 prog_id = strtol(spec, &endptr, 0);
4154 for (i = 0; i < s->nb_programs; i++) {
4155 if (s->programs[i]->id != prog_id)
4156 continue;
4157
4158 if (*endptr++ == ':') {
4159 int stream_idx = strtol(endptr, NULL, 0);
4160 return stream_idx >= 0 &&
4161 stream_idx < s->programs[i]->nb_stream_indexes &&
4162 st->index == s->programs[i]->stream_index[stream_idx];
4163 }
4164
4165 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4166 if (st->index == s->programs[i]->stream_index[j])
4167 return 1;
4168 }
4169 return 0;
4170 } else if (*spec == '#') {
4171 int sid;
4172 char *endptr;
4173 sid = strtol(spec + 1, &endptr, 0);
4174 if (!*endptr)
4175 return st->id == sid;
4176 } else if (!*spec) /* empty specifier, matches everything */
4177 return 1;
4178
4179 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4180 return AVERROR(EINVAL);
4181}
4182
4183void ff_generate_avci_extradata(AVStream *st)
4184{
4185 static const uint8_t avci100_1080p_extradata[] = {
4186 // SPS
4187 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4188 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4189 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4190 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4191 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4192 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4193 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4194 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4195 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4196 // PPS
4197 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4198 0xd0
4199 };
4200 static const uint8_t avci100_1080i_extradata[] = {
4201 // SPS
4202 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4203 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4204 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4205 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4206 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4207 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4208 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4209 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4210 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4211 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4212 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4213 // PPS
4214 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4215 0xd0
4216 };
4217 static const uint8_t avci50_1080i_extradata[] = {
4218 // SPS
4219 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4220 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4221 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4222 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4223 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4224 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4225 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4226 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4227 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4228 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4229 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4230 // PPS
4231 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4232 0x11
4233 };
4234 static const uint8_t avci100_720p_extradata[] = {
4235 // SPS
4236 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4237 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4238 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4239 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4240 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4241 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4242 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4243 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4244 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4245 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4246 // PPS
4247 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4248 0x11
4249 };
4250 int size = 0;
4251 const uint8_t *data = 0;
4252 if (st->codec->width == 1920) {
4253 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4254 data = avci100_1080p_extradata;
4255 size = sizeof(avci100_1080p_extradata);
4256 } else {
4257 data = avci100_1080i_extradata;
4258 size = sizeof(avci100_1080i_extradata);
4259 }
4260 } else if (st->codec->width == 1440) {
4261 data = avci50_1080i_extradata;
4262 size = sizeof(avci50_1080i_extradata);
4263 } else if (st->codec->width == 1280) {
4264 data = avci100_720p_extradata;
4265 size = sizeof(avci100_720p_extradata);
4266 }
4267 if (!size)
4268 return;
4269 av_freep(&st->codec->extradata);
4270 if (ff_alloc_extradata(st->codec, size))
4271 return;
4272 memcpy(st->codec->extradata, data, size);
4273}
4274