summaryrefslogtreecommitdiff
path: root/libavformat/utils.c (plain)
blob: 5211ffd3ebdfbc6faa54fee5c0433ebb3982535f
1/*
2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "avformat.h"
23#include "avio_internal.h"
24#include "internal.h"
25#include "libavcodec/internal.h"
26#include "libavcodec/raw.h"
27#include "libavcodec/bytestream.h"
28#include "libavutil/opt.h"
29#include "libavutil/dict.h"
30#include "libavutil/internal.h"
31#include "libavutil/pixdesc.h"
32#include "metadata.h"
33#include "id3v2.h"
34#include "libavutil/avassert.h"
35#include "libavutil/avstring.h"
36#include "libavutil/mathematics.h"
37#include "libavutil/parseutils.h"
38#include "libavutil/time.h"
39#include "libavutil/timestamp.h"
40#include "riff.h"
41#include "audiointerleave.h"
42#include "url.h"
43#include <stdarg.h>
44#if CONFIG_NETWORK
45#include "network.h"
46#endif
47
48#undef NDEBUG
49#include <assert.h>
50
51/**
52 * @file
53 * various utility functions for use within FFmpeg
54 */
55
56unsigned avformat_version(void)
57{
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
60}
61
62const char *avformat_configuration(void)
63{
64 return FFMPEG_CONFIGURATION;
65}
66
67const char *avformat_license(void)
68{
69#define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
71}
72
73#define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
74
75static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
77}
78
79/**
80 * Wrap a given time stamp, if there is an indication for an overflow
81 *
82 * @param st stream
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
85 */
86static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
87{
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
96 }
97 return timestamp;
98}
99
100MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
101MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
102MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
103MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
104
105static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
106{
107 if (st->codec->codec)
108 return st->codec->codec;
109
110 switch(st->codec->codec_type){
111 case AVMEDIA_TYPE_VIDEO:
112 if(s->video_codec) return s->video_codec;
113 break;
114 case AVMEDIA_TYPE_AUDIO:
115 if(s->audio_codec) return s->audio_codec;
116 break;
117 case AVMEDIA_TYPE_SUBTITLE:
118 if(s->subtitle_codec) return s->subtitle_codec;
119 break;
120 }
121
122 return avcodec_find_decoder(codec_id);
123}
124
125int av_format_get_probe_score(const AVFormatContext *s)
126{
127 return s->probe_score;
128}
129
130/* an arbitrarily chosen "sane" max packet size -- 50M */
131#define SANE_CHUNK_SIZE (50000000)
132
133int ffio_limit(AVIOContext *s, int size)
134{
135 if(s->maxsize>=0){
136 int64_t remaining= s->maxsize - avio_tell(s);
137 if(remaining < size){
138 int64_t newsize= avio_size(s);
139 if(!s->maxsize || s->maxsize<newsize)
140 s->maxsize= newsize - !newsize;
141 remaining= s->maxsize - avio_tell(s);
142 remaining= FFMAX(remaining, 0);
143 }
144
145 if(s->maxsize>=0 && remaining+1 < size){
146 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
147 size= remaining+1;
148 }
149 }
150 return size;
151}
152
153/*
154 * Read the data in sane-sized chunks and append to pkt.
155 * Return the number of bytes read or an error.
156 */
157static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
158{
159 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
160 int orig_size = pkt->size;
161 int ret;
162
163 do {
164 int prev_size = pkt->size;
165 int read_size;
166
167 /*
168 * When the caller requests a lot of data, limit it to the amount left
169 * in file or SANE_CHUNK_SIZE when it is not known
170 */
171 read_size = size;
172 if (read_size > SANE_CHUNK_SIZE/10) {
173 read_size = ffio_limit(s, read_size);
174 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
175 if (s->maxsize < 0)
176 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
177 }
178
179 ret = av_grow_packet(pkt, read_size);
180 if (ret < 0)
181 break;
182
183 ret = avio_read(s, pkt->data + prev_size, read_size);
184 if (ret != read_size) {
185 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
186 break;
187 }
188
189 size -= read_size;
190 } while (size > 0);
191 if (size > 0)
192 pkt->flags |= AV_PKT_FLAG_CORRUPT;
193
194 pkt->pos = orig_pos;
195 if (!pkt->size)
196 av_free_packet(pkt);
197 return pkt->size > orig_size ? pkt->size - orig_size : ret;
198}
199
200int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
201{
202 av_init_packet(pkt);
203 pkt->data = NULL;
204 pkt->size = 0;
205 pkt->pos = avio_tell(s);
206
207 return append_packet_chunked(s, pkt, size);
208}
209
210int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
211{
212 if (!pkt->size)
213 return av_get_packet(s, pkt, size);
214 return append_packet_chunked(s, pkt, size);
215}
216
217
218int av_filename_number_test(const char *filename)
219{
220 char buf[1024];
221 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
222}
223
224AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
225{
226 AVProbeData lpd = *pd;
227 AVInputFormat *fmt1 = NULL, *fmt;
228 int score, nodat = 0, score_max=0;
229 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
230
231 if (!lpd.buf)
232 lpd.buf = zerobuffer;
233
234 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
235 int id3len = ff_id3v2_tag_len(lpd.buf);
236 if (lpd.buf_size > id3len + 16) {
237 lpd.buf += id3len;
238 lpd.buf_size -= id3len;
239 }else
240 nodat = 1;
241 }
242
243 fmt = NULL;
244 while ((fmt1 = av_iformat_next(fmt1))) {
245 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
246 continue;
247 score = 0;
248 if (fmt1->read_probe) {
249 score = fmt1->read_probe(&lpd);
250 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
251 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
252 } else if (fmt1->extensions) {
253 if (av_match_ext(lpd.filename, fmt1->extensions)) {
254 score = AVPROBE_SCORE_EXTENSION;
255 }
256 }
257 if (score > score_max) {
258 score_max = score;
259 fmt = fmt1;
260 }else if (score == score_max)
261 fmt = NULL;
262 }
263 if(nodat)
264 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
265 *score_ret= score_max;
266
267 return fmt;
268}
269
270AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
271{
272 int score_ret;
273 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
274 if(score_ret > *score_max){
275 *score_max= score_ret;
276 return fmt;
277 }else
278 return NULL;
279}
280
281AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
282 int score=0;
283 return av_probe_input_format2(pd, is_opened, &score);
284}
285
286static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
287{
288 static const struct {
289 const char *name; enum AVCodecID id; enum AVMediaType type;
290 } fmt_id_type[] = {
291 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
292 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
293 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
294 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
295 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
296 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
297 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
298 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
299 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
300 { 0 }
301 };
302 int score;
303 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
304
305 if (fmt && st->request_probe <= score) {
306 int i;
307 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
308 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
309 for (i = 0; fmt_id_type[i].name; i++) {
310 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
311 st->codec->codec_id = fmt_id_type[i].id;
312 st->codec->codec_type = fmt_id_type[i].type;
313 break;
314 }
315 }
316 }
317 return score;
318}
319
320/************************************************************/
321/* input media file */
322
323int av_demuxer_open(AVFormatContext *ic){
324 int err;
325
326 if (ic->iformat->read_header) {
327 err = ic->iformat->read_header(ic);
328 if (err < 0)
329 return err;
330 }
331
332 if (ic->pb && !ic->data_offset)
333 ic->data_offset = avio_tell(ic->pb);
334
335 return 0;
336}
337
338
339int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
340 const char *filename, void *logctx,
341 unsigned int offset, unsigned int max_probe_size)
342{
343 AVProbeData pd = { filename ? filename : "", NULL, -offset };
344 unsigned char *buf = NULL;
345 uint8_t *mime_type;
346 int ret = 0, probe_size, buf_offset = 0;
347 int score = 0;
348
349 if (!max_probe_size) {
350 max_probe_size = PROBE_BUF_MAX;
351 } else if (max_probe_size > PROBE_BUF_MAX) {
352 max_probe_size = PROBE_BUF_MAX;
353 } else if (max_probe_size < PROBE_BUF_MIN) {
354 av_log(logctx, AV_LOG_ERROR,
355 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
356 return AVERROR(EINVAL);
357 }
358
359 if (offset >= max_probe_size) {
360 return AVERROR(EINVAL);
361 }
362
363 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
364 if (!av_strcasecmp(mime_type, "audio/aacp")) {
365 *fmt = av_find_input_format("aac");
366 }
367 av_freep(&mime_type);
368 }
369
370 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
371 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
372
373 if (probe_size < offset) {
374 continue;
375 }
376 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
377
378 /* read probe data */
379 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
380 return ret;
381 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
382 /* fail if error was not end of file, otherwise, lower score */
383 if (ret != AVERROR_EOF) {
384 av_free(buf);
385 return ret;
386 }
387 score = 0;
388 ret = 0; /* error was end of file, nothing read */
389 }
390 pd.buf_size = buf_offset += ret;
391 pd.buf = &buf[offset];
392
393 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
394
395 /* guess file format */
396 *fmt = av_probe_input_format2(&pd, 1, &score);
397 if(*fmt){
398 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
399 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
400 }else
401 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
402 }
403 }
404
405 if (!*fmt) {
406 av_free(buf);
407 return AVERROR_INVALIDDATA;
408 }
409
410 /* rewind. reuse probe buffer to avoid seeking */
411 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
412
413 return ret < 0 ? ret : score;
414}
415
416int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
417 const char *filename, void *logctx,
418 unsigned int offset, unsigned int max_probe_size)
419{
420 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
421 return ret < 0 ? ret : 0;
422}
423
424
425/* open input file and probe the format if necessary */
426static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
427{
428 int ret;
429 AVProbeData pd = {filename, NULL, 0};
430 int score = AVPROBE_SCORE_RETRY;
431
432 if (s->pb) {
433 s->flags |= AVFMT_FLAG_CUSTOM_IO;
434 if (!s->iformat)
435 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
436 else if (s->iformat->flags & AVFMT_NOFILE)
437 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
438 "will be ignored with AVFMT_NOFILE format.\n");
439 return 0;
440 }
441
442 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
443 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
444 return score;
445
446 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
447 &s->interrupt_callback, options)) < 0)
448 return ret;
449 if (s->iformat)
450 return 0;
451 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
452}
453
454static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
455 AVPacketList **plast_pktl){
456 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
457 if (!pktl)
458 return NULL;
459
460 if (*packet_buffer)
461 (*plast_pktl)->next = pktl;
462 else
463 *packet_buffer = pktl;
464
465 /* add the packet in the buffered packet list */
466 *plast_pktl = pktl;
467 pktl->pkt= *pkt;
468 return &pktl->pkt;
469}
470
471int avformat_queue_attached_pictures(AVFormatContext *s)
472{
473 int i;
474 for (i = 0; i < s->nb_streams; i++)
475 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
476 s->streams[i]->discard < AVDISCARD_ALL) {
477 AVPacket copy = s->streams[i]->attached_pic;
478 copy.buf = av_buffer_ref(copy.buf);
479 if (!copy.buf)
480 return AVERROR(ENOMEM);
481
482 add_to_pktbuf(&s->raw_packet_buffer, &copy, &s->raw_packet_buffer_end);
483 }
484 return 0;
485}
486
487int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
488{
489 AVFormatContext *s = *ps;
490 int ret = 0;
491 AVDictionary *tmp = NULL;
492 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
493
494 if (!s && !(s = avformat_alloc_context()))
495 return AVERROR(ENOMEM);
496 if (!s->av_class){
497 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
498 return AVERROR(EINVAL);
499 }
500 if (fmt)
501 s->iformat = fmt;
502
503 if (options)
504 av_dict_copy(&tmp, *options, 0);
505
506 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
507 goto fail;
508
509 if ((ret = init_input(s, filename, &tmp)) < 0)
510 goto fail;
511 s->probe_score = ret;
512 avio_skip(s->pb, s->skip_initial_bytes);
513
514 /* check filename in case an image number is expected */
515 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
516 if (!av_filename_number_test(filename)) {
517 ret = AVERROR(EINVAL);
518 goto fail;
519 }
520 }
521
522 s->duration = s->start_time = AV_NOPTS_VALUE;
523 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
524
525 /* allocate private data */
526 if (s->iformat->priv_data_size > 0) {
527 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
528 ret = AVERROR(ENOMEM);
529 goto fail;
530 }
531 if (s->iformat->priv_class) {
532 *(const AVClass**)s->priv_data = s->iformat->priv_class;
533 av_opt_set_defaults(s->priv_data);
534 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
535 goto fail;
536 }
537 }
538
539 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
540 if (s->pb)
541 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
542
543 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
544 if ((ret = s->iformat->read_header(s)) < 0)
545 goto fail;
546
547 if (id3v2_extra_meta) {
548 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
549 !strcmp(s->iformat->name, "tta")) {
550 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
551 goto fail;
552 } else
553 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
554 }
555 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
556
557 if ((ret = avformat_queue_attached_pictures(s)) < 0)
558 goto fail;
559
560 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
561 s->data_offset = avio_tell(s->pb);
562
563 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
564
565 if (options) {
566 av_dict_free(options);
567 *options = tmp;
568 }
569 *ps = s;
570 return 0;
571
572fail:
573 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
574 av_dict_free(&tmp);
575 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
576 avio_close(s->pb);
577 avformat_free_context(s);
578 *ps = NULL;
579 return ret;
580}
581
582/*******************************************************/
583
584static void force_codec_ids(AVFormatContext *s, AVStream *st)
585{
586 switch(st->codec->codec_type){
587 case AVMEDIA_TYPE_VIDEO:
588 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
589 break;
590 case AVMEDIA_TYPE_AUDIO:
591 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
592 break;
593 case AVMEDIA_TYPE_SUBTITLE:
594 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
595 break;
596 }
597}
598
599static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
600{
601 if(st->request_probe>0){
602 AVProbeData *pd = &st->probe_data;
603 int end;
604 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
605 --st->probe_packets;
606
607 if (pkt) {
608 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
609 if(!new_buf) {
610 av_log(s, AV_LOG_WARNING,
611 "Failed to reallocate probe buffer for stream %d\n",
612 st->index);
613 goto no_packet;
614 }
615 pd->buf = new_buf;
616 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
617 pd->buf_size += pkt->size;
618 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
619 } else {
620no_packet:
621 st->probe_packets = 0;
622 if (!pd->buf_size) {
623 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
624 st->index);
625 }
626 }
627
628 end= s->raw_packet_buffer_remaining_size <= 0
629 || st->probe_packets<=0;
630
631 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
632 int score= set_codec_from_probe_data(s, st, pd);
633 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
634 || end){
635 pd->buf_size=0;
636 av_freep(&pd->buf);
637 st->request_probe= -1;
638 if(st->codec->codec_id != AV_CODEC_ID_NONE){
639 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
640 }else
641 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
642 }
643 force_codec_ids(s, st);
644 }
645 }
646 return 0;
647}
648
649int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
650{
651 int ret, i, err;
652 AVStream *st;
653
654 for(;;){
655 AVPacketList *pktl = s->raw_packet_buffer;
656
657 if (pktl) {
658 *pkt = pktl->pkt;
659 st = s->streams[pkt->stream_index];
660 if (s->raw_packet_buffer_remaining_size <= 0) {
661 if ((err = probe_codec(s, st, NULL)) < 0)
662 return err;
663 }
664 if(st->request_probe <= 0){
665 s->raw_packet_buffer = pktl->next;
666 s->raw_packet_buffer_remaining_size += pkt->size;
667 av_free(pktl);
668 return 0;
669 }
670 }
671
672 pkt->data = NULL;
673 pkt->size = 0;
674 av_init_packet(pkt);
675 ret= s->iformat->read_packet(s, pkt);
676 if (ret < 0) {
677 if (!pktl || ret == AVERROR(EAGAIN))
678 return ret;
679 for (i = 0; i < s->nb_streams; i++) {
680 st = s->streams[i];
681 if (st->probe_packets) {
682 if ((err = probe_codec(s, st, NULL)) < 0)
683 return err;
684 }
685 av_assert0(st->request_probe <= 0);
686 }
687 continue;
688 }
689
690 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
691 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
692 av_log(s, AV_LOG_WARNING,
693 "Dropped corrupted packet (stream = %d)\n",
694 pkt->stream_index);
695 av_free_packet(pkt);
696 continue;
697 }
698
699 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
700 av_packet_merge_side_data(pkt);
701
702 if(pkt->stream_index >= (unsigned)s->nb_streams){
703 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
704 continue;
705 }
706
707 st= s->streams[pkt->stream_index];
708 pkt->dts = wrap_timestamp(st, pkt->dts);
709 pkt->pts = wrap_timestamp(st, pkt->pts);
710
711 force_codec_ids(s, st);
712
713 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
714 if (s->use_wallclock_as_timestamps)
715 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
716
717 if(!pktl && st->request_probe <= 0)
718 return ret;
719
720 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
721 s->raw_packet_buffer_remaining_size -= pkt->size;
722
723 if ((err = probe_codec(s, st, pkt)) < 0)
724 return err;
725 }
726}
727
728#if FF_API_READ_PACKET
729int av_read_packet(AVFormatContext *s, AVPacket *pkt)
730{
731 return ff_read_packet(s, pkt);
732}
733#endif
734
735
736/**********************************************************/
737
738static int determinable_frame_size(AVCodecContext *avctx)
739{
740 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
741 avctx->codec_id == AV_CODEC_ID_MP1 ||
742 avctx->codec_id == AV_CODEC_ID_MP2 ||
743 avctx->codec_id == AV_CODEC_ID_MP3/* ||
744 avctx->codec_id == AV_CODEC_ID_CELT*/)
745 return 1;
746 return 0;
747}
748
749/**
750 * Get the number of samples of an audio frame. Return -1 on error.
751 */
752int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
753{
754 int frame_size;
755
756 /* give frame_size priority if demuxing */
757 if (!mux && enc->frame_size > 1)
758 return enc->frame_size;
759
760 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
761 return frame_size;
762
763 /* Fall back on using frame_size if muxing. */
764 if (enc->frame_size > 1)
765 return enc->frame_size;
766
767 //For WMA we currently have no other means to calculate duration thus we
768 //do it here by assuming CBR, which is true for all known cases.
769 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
770 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
771 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
772 }
773
774 return -1;
775}
776
777
778/**
779 * Return the frame duration in seconds. Return 0 if not available.
780 */
781void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
782 AVCodecParserContext *pc, AVPacket *pkt)
783{
784 int frame_size;
785
786 *pnum = 0;
787 *pden = 0;
788 switch(st->codec->codec_type) {
789 case AVMEDIA_TYPE_VIDEO:
790 if (st->r_frame_rate.num && !pc) {
791 *pnum = st->r_frame_rate.den;
792 *pden = st->r_frame_rate.num;
793 } else if(st->time_base.num*1000LL > st->time_base.den) {
794 *pnum = st->time_base.num;
795 *pden = st->time_base.den;
796 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
797 *pnum = st->codec->time_base.num;
798 *pden = st->codec->time_base.den;
799 if (pc && pc->repeat_pict) {
800 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
801 *pden /= 1 + pc->repeat_pict;
802 else
803 *pnum *= 1 + pc->repeat_pict;
804 }
805 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
806 //Thus if we have no parser in such case leave duration undefined.
807 if(st->codec->ticks_per_frame>1 && !pc){
808 *pnum = *pden = 0;
809 }
810 }
811 break;
812 case AVMEDIA_TYPE_AUDIO:
813 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
814 if (frame_size <= 0 || st->codec->sample_rate <= 0)
815 break;
816 *pnum = frame_size;
817 *pden = st->codec->sample_rate;
818 break;
819 default:
820 break;
821 }
822}
823
824static int is_intra_only(AVCodecContext *enc){
825 const AVCodecDescriptor *desc;
826
827 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
828 return 1;
829
830 desc = av_codec_get_codec_descriptor(enc);
831 if (!desc) {
832 desc = avcodec_descriptor_get(enc->codec_id);
833 av_codec_set_codec_descriptor(enc, desc);
834 }
835 if (desc)
836 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
837 return 0;
838}
839
840static int has_decode_delay_been_guessed(AVStream *st)
841{
842 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
843 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
844 return 1;
845#if CONFIG_H264_DECODER
846 if(st->codec->has_b_frames &&
847 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
848 return 1;
849#endif
850 if(st->codec->has_b_frames<3)
851 return st->nb_decoded_frames >= 7;
852 else if(st->codec->has_b_frames<4)
853 return st->nb_decoded_frames >= 18;
854 else
855 return st->nb_decoded_frames >= 20;
856}
857
858static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
859{
860 if (pktl->next)
861 return pktl->next;
862 if (pktl == s->parse_queue_end)
863 return s->packet_buffer;
864 return NULL;
865}
866
867static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
868{
869 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
870 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
871 int i;
872
873 // reference time stamp should be 60 s before first time stamp
874 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
875 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
876 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
877 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
878 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
879
880 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
881
882 if (!first_program) {
883 int default_stream_index = av_find_default_stream_index(s);
884 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
885 for (i=0; i<s->nb_streams; i++) {
886 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
887 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
888 }
889 }
890 else {
891 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
892 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
893 }
894 }
895 else {
896 AVProgram *program = first_program;
897 while (program) {
898 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
899 pts_wrap_reference = program->pts_wrap_reference;
900 pts_wrap_behavior = program->pts_wrap_behavior;
901 break;
902 }
903 program = av_find_program_from_stream(s, program, stream_index);
904 }
905
906 // update every program with differing pts_wrap_reference
907 program = first_program;
908 while(program) {
909 if (program->pts_wrap_reference != pts_wrap_reference) {
910 for (i=0; i<program->nb_stream_indexes; i++) {
911 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
912 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
913 }
914
915 program->pts_wrap_reference = pts_wrap_reference;
916 program->pts_wrap_behavior = pts_wrap_behavior;
917 }
918 program = av_find_program_from_stream(s, program, stream_index);
919 }
920 }
921 return 1;
922 }
923 return 0;
924}
925
926static void update_initial_timestamps(AVFormatContext *s, int stream_index,
927 int64_t dts, int64_t pts, AVPacket *pkt)
928{
929 AVStream *st= s->streams[stream_index];
930 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
931 int64_t pts_buffer[MAX_REORDER_DELAY+1];
932 int64_t shift;
933 int i, delay;
934
935 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
936 return;
937
938 delay = st->codec->has_b_frames;
939 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
940 st->cur_dts= dts;
941 shift = st->first_dts - RELATIVE_TS_BASE;
942
943 for (i=0; i<MAX_REORDER_DELAY+1; i++)
944 pts_buffer[i] = AV_NOPTS_VALUE;
945
946 if (is_relative(pts))
947 pts += shift;
948
949 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
950 if(pktl->pkt.stream_index != stream_index)
951 continue;
952 if(is_relative(pktl->pkt.pts))
953 pktl->pkt.pts += shift;
954
955 if(is_relative(pktl->pkt.dts))
956 pktl->pkt.dts += shift;
957
958 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
959 st->start_time= pktl->pkt.pts;
960
961 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
962 pts_buffer[0]= pktl->pkt.pts;
963 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
964 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
965 if(pktl->pkt.dts == AV_NOPTS_VALUE)
966 pktl->pkt.dts= pts_buffer[0];
967 }
968 }
969
970 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
971 // correct first time stamps to negative values
972 st->first_dts = wrap_timestamp(st, st->first_dts);
973 st->cur_dts = wrap_timestamp(st, st->cur_dts);
974 pkt->dts = wrap_timestamp(st, pkt->dts);
975 pkt->pts = wrap_timestamp(st, pkt->pts);
976 pts = wrap_timestamp(st, pts);
977 }
978
979 if (st->start_time == AV_NOPTS_VALUE)
980 st->start_time = pts;
981}
982
983static void update_initial_durations(AVFormatContext *s, AVStream *st,
984 int stream_index, int duration)
985{
986 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
987 int64_t cur_dts= RELATIVE_TS_BASE;
988
989 if(st->first_dts != AV_NOPTS_VALUE){
990 cur_dts= st->first_dts;
991 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
992 if(pktl->pkt.stream_index == stream_index){
993 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
994 break;
995 cur_dts -= duration;
996 }
997 }
998 if(pktl && pktl->pkt.dts != st->first_dts) {
999 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1000 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1001 return;
1002 }
1003 if(!pktl) {
1004 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1005 return;
1006 }
1007 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1008 st->first_dts = cur_dts;
1009 }else if(st->cur_dts != RELATIVE_TS_BASE)
1010 return;
1011
1012 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1013 if(pktl->pkt.stream_index != stream_index)
1014 continue;
1015 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1016 && !pktl->pkt.duration){
1017 pktl->pkt.dts= cur_dts;
1018 if(!st->codec->has_b_frames)
1019 pktl->pkt.pts= cur_dts;
1020// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1021 pktl->pkt.duration = duration;
1022 }else
1023 break;
1024 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1025 }
1026 if(!pktl)
1027 st->cur_dts= cur_dts;
1028}
1029
1030static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1031 AVCodecParserContext *pc, AVPacket *pkt)
1032{
1033 int num, den, presentation_delayed, delay, i;
1034 int64_t offset;
1035
1036 if (s->flags & AVFMT_FLAG_NOFILLIN)
1037 return;
1038
1039 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1040 pkt->dts= AV_NOPTS_VALUE;
1041
1042 if (st->codec->codec_id != AV_CODEC_ID_H264 && pc && pc->pict_type == AV_PICTURE_TYPE_B)
1043 //FIXME Set low_delay = 0 when has_b_frames = 1
1044 st->codec->has_b_frames = 1;
1045
1046 /* do we have a video B-frame ? */
1047 delay= st->codec->has_b_frames;
1048 presentation_delayed = 0;
1049
1050 /* XXX: need has_b_frame, but cannot get it if the codec is
1051 not initialized */
1052 if (delay &&
1053 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1054 presentation_delayed = 1;
1055
1056 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1057 st->pts_wrap_bits < 63 &&
1058 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1059 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1060 pkt->dts -= 1LL<<st->pts_wrap_bits;
1061 } else
1062 pkt->pts += 1LL<<st->pts_wrap_bits;
1063 }
1064
1065 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1066 // we take the conservative approach and discard both
1067 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1068 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1069 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1070 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1071 pkt->dts= AV_NOPTS_VALUE;
1072 }
1073
1074 if (pkt->duration == 0) {
1075 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1076 if (den && num) {
1077 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1078 }
1079 }
1080 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1081 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1082
1083 /* correct timestamps with byte offset if demuxers only have timestamps
1084 on packet boundaries */
1085 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1086 /* this will estimate bitrate based on this frame's duration and size */
1087 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1088 if(pkt->pts != AV_NOPTS_VALUE)
1089 pkt->pts += offset;
1090 if(pkt->dts != AV_NOPTS_VALUE)
1091 pkt->dts += offset;
1092 }
1093
1094 /* This may be redundant, but it should not hurt. */
1095 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1096 presentation_delayed = 1;
1097
1098 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1099 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1100 /* interpolate PTS and DTS if they are not present */
1101 //We skip H264 currently because delay and has_b_frames are not reliably set
1102 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1103 if (presentation_delayed) {
1104 /* DTS = decompression timestamp */
1105 /* PTS = presentation timestamp */
1106 if (pkt->dts == AV_NOPTS_VALUE)
1107 pkt->dts = st->last_IP_pts;
1108 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1109 if (pkt->dts == AV_NOPTS_VALUE)
1110 pkt->dts = st->cur_dts;
1111
1112 /* this is tricky: the dts must be incremented by the duration
1113 of the frame we are displaying, i.e. the last I- or P-frame */
1114 if (st->last_IP_duration == 0)
1115 st->last_IP_duration = pkt->duration;
1116 if(pkt->dts != AV_NOPTS_VALUE)
1117 st->cur_dts = pkt->dts + st->last_IP_duration;
1118 st->last_IP_duration = pkt->duration;
1119 st->last_IP_pts= pkt->pts;
1120 /* cannot compute PTS if not present (we can compute it only
1121 by knowing the future */
1122 } else if (pkt->pts != AV_NOPTS_VALUE ||
1123 pkt->dts != AV_NOPTS_VALUE ||
1124 pkt->duration ) {
1125 int duration = pkt->duration;
1126
1127 /* presentation is not delayed : PTS and DTS are the same */
1128 if (pkt->pts == AV_NOPTS_VALUE)
1129 pkt->pts = pkt->dts;
1130 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1131 pkt->pts, pkt);
1132 if (pkt->pts == AV_NOPTS_VALUE)
1133 pkt->pts = st->cur_dts;
1134 pkt->dts = pkt->pts;
1135 if (pkt->pts != AV_NOPTS_VALUE)
1136 st->cur_dts = pkt->pts + duration;
1137 }
1138 }
1139
1140 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1141 st->pts_buffer[0]= pkt->pts;
1142 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1143 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1144 if(pkt->dts == AV_NOPTS_VALUE)
1145 pkt->dts= st->pts_buffer[0];
1146 }
1147 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1148 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1149 }
1150 if(pkt->dts > st->cur_dts)
1151 st->cur_dts = pkt->dts;
1152
1153 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1154 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1155
1156 /* update flags */
1157 if (is_intra_only(st->codec))
1158 pkt->flags |= AV_PKT_FLAG_KEY;
1159 if (pc)
1160 pkt->convergence_duration = pc->convergence_duration;
1161}
1162
1163static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1164{
1165 while (*pkt_buf) {
1166 AVPacketList *pktl = *pkt_buf;
1167 *pkt_buf = pktl->next;
1168 av_free_packet(&pktl->pkt);
1169 av_freep(&pktl);
1170 }
1171 *pkt_buf_end = NULL;
1172}
1173
1174/**
1175 * Parse a packet, add all split parts to parse_queue
1176 *
1177 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1178 */
1179static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1180{
1181 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1182 AVStream *st = s->streams[stream_index];
1183 uint8_t *data = pkt ? pkt->data : NULL;
1184 int size = pkt ? pkt->size : 0;
1185 int ret = 0, got_output = 0;
1186
1187 if (!pkt) {
1188 av_init_packet(&flush_pkt);
1189 pkt = &flush_pkt;
1190 got_output = 1;
1191 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1192 // preserve 0-size sync packets
1193 compute_pkt_fields(s, st, st->parser, pkt);
1194 }
1195
1196 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1197 int len;
1198
1199 av_init_packet(&out_pkt);
1200 len = av_parser_parse2(st->parser, st->codec,
1201 &out_pkt.data, &out_pkt.size, data, size,
1202 pkt->pts, pkt->dts, pkt->pos);
1203
1204 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1205 pkt->pos = -1;
1206 /* increment read pointer */
1207 data += len;
1208 size -= len;
1209
1210 got_output = !!out_pkt.size;
1211
1212 if (!out_pkt.size)
1213 continue;
1214
1215 if (pkt->side_data) {
1216 out_pkt.side_data = pkt->side_data;
1217 out_pkt.side_data_elems = pkt->side_data_elems;
1218 pkt->side_data = NULL;
1219 pkt->side_data_elems = 0;
1220 }
1221
1222 /* set the duration */
1223 out_pkt.duration = 0;
1224 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1225 if (st->codec->sample_rate > 0) {
1226 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1227 (AVRational){ 1, st->codec->sample_rate },
1228 st->time_base,
1229 AV_ROUND_DOWN);
1230 }
1231 } else if (st->codec->time_base.num != 0 &&
1232 st->codec->time_base.den != 0) {
1233 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1234 st->codec->time_base,
1235 st->time_base,
1236 AV_ROUND_DOWN);
1237 }
1238
1239 out_pkt.stream_index = st->index;
1240 out_pkt.pts = st->parser->pts;
1241 out_pkt.dts = st->parser->dts;
1242 out_pkt.pos = st->parser->pos;
1243
1244 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1245 out_pkt.pos = st->parser->frame_offset;
1246
1247 if (st->parser->key_frame == 1 ||
1248 (st->parser->key_frame == -1 &&
1249 st->parser->pict_type == AV_PICTURE_TYPE_I))
1250 out_pkt.flags |= AV_PKT_FLAG_KEY;
1251
1252 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1253 out_pkt.flags |= AV_PKT_FLAG_KEY;
1254
1255 compute_pkt_fields(s, st, st->parser, &out_pkt);
1256
1257 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1258 out_pkt.buf = pkt->buf;
1259 pkt->buf = NULL;
1260#if FF_API_DESTRUCT_PACKET
1261FF_DISABLE_DEPRECATION_WARNINGS
1262 out_pkt.destruct = pkt->destruct;
1263 pkt->destruct = NULL;
1264FF_ENABLE_DEPRECATION_WARNINGS
1265#endif
1266 }
1267 if ((ret = av_dup_packet(&out_pkt)) < 0)
1268 goto fail;
1269
1270 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1271 av_free_packet(&out_pkt);
1272 ret = AVERROR(ENOMEM);
1273 goto fail;
1274 }
1275 }
1276
1277
1278 /* end of the stream => close and free the parser */
1279 if (pkt == &flush_pkt) {
1280 av_parser_close(st->parser);
1281 st->parser = NULL;
1282 }
1283
1284fail:
1285 av_free_packet(pkt);
1286 return ret;
1287}
1288
1289static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1290 AVPacketList **pkt_buffer_end,
1291 AVPacket *pkt)
1292{
1293 AVPacketList *pktl;
1294 av_assert0(*pkt_buffer);
1295 pktl = *pkt_buffer;
1296 *pkt = pktl->pkt;
1297 *pkt_buffer = pktl->next;
1298 if (!pktl->next)
1299 *pkt_buffer_end = NULL;
1300 av_freep(&pktl);
1301 return 0;
1302}
1303
1304static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1305{
1306 int ret = 0, i, got_packet = 0;
1307
1308 av_init_packet(pkt);
1309
1310 while (!got_packet && !s->parse_queue) {
1311 AVStream *st;
1312 AVPacket cur_pkt;
1313
1314 /* read next packet */
1315 ret = ff_read_packet(s, &cur_pkt);
1316 if (ret < 0) {
1317 if (ret == AVERROR(EAGAIN))
1318 return ret;
1319 /* flush the parsers */
1320 for(i = 0; i < s->nb_streams; i++) {
1321 st = s->streams[i];
1322 if (st->parser && st->need_parsing)
1323 parse_packet(s, NULL, st->index);
1324 }
1325 /* all remaining packets are now in parse_queue =>
1326 * really terminate parsing */
1327 break;
1328 }
1329 ret = 0;
1330 st = s->streams[cur_pkt.stream_index];
1331
1332 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1333 cur_pkt.dts != AV_NOPTS_VALUE &&
1334 cur_pkt.pts < cur_pkt.dts) {
1335 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1336 cur_pkt.stream_index,
1337 av_ts2str(cur_pkt.pts),
1338 av_ts2str(cur_pkt.dts),
1339 cur_pkt.size);
1340 }
1341 if (s->debug & FF_FDEBUG_TS)
1342 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1343 cur_pkt.stream_index,
1344 av_ts2str(cur_pkt.pts),
1345 av_ts2str(cur_pkt.dts),
1346 cur_pkt.size,
1347 cur_pkt.duration,
1348 cur_pkt.flags);
1349
1350 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1351 st->parser = av_parser_init(st->codec->codec_id);
1352 if (!st->parser) {
1353 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1354 "%s, packets or times may be invalid.\n",
1355 avcodec_get_name(st->codec->codec_id));
1356 /* no parser available: just output the raw packets */
1357 st->need_parsing = AVSTREAM_PARSE_NONE;
1358 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1359 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1360 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1361 st->parser->flags |= PARSER_FLAG_ONCE;
1362 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1363 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1364 }
1365 }
1366
1367 if (!st->need_parsing || !st->parser) {
1368 /* no parsing needed: we just output the packet as is */
1369 *pkt = cur_pkt;
1370 compute_pkt_fields(s, st, NULL, pkt);
1371 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1372 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1373 ff_reduce_index(s, st->index);
1374 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1375 }
1376 got_packet = 1;
1377 } else if (st->discard < AVDISCARD_ALL) {
1378 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1379 return ret;
1380 } else {
1381 /* free packet */
1382 av_free_packet(&cur_pkt);
1383 }
1384 if (pkt->flags & AV_PKT_FLAG_KEY)
1385 st->skip_to_keyframe = 0;
1386 if (st->skip_to_keyframe) {
1387 av_free_packet(&cur_pkt);
1388 if (got_packet) {
1389 *pkt = cur_pkt;
1390 }
1391 got_packet = 0;
1392 }
1393 }
1394
1395 if (!got_packet && s->parse_queue)
1396 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1397
1398 if(s->debug & FF_FDEBUG_TS)
1399 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1400 pkt->stream_index,
1401 av_ts2str(pkt->pts),
1402 av_ts2str(pkt->dts),
1403 pkt->size,
1404 pkt->duration,
1405 pkt->flags);
1406
1407 return ret;
1408}
1409
1410int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1411{
1412 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1413 int eof = 0;
1414 int ret;
1415 AVStream *st;
1416
1417 if (!genpts) {
1418 ret = s->packet_buffer ?
1419 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1420 read_frame_internal(s, pkt);
1421 if (ret < 0)
1422 return ret;
1423 goto return_packet;
1424 }
1425
1426 for (;;) {
1427 AVPacketList *pktl = s->packet_buffer;
1428
1429 if (pktl) {
1430 AVPacket *next_pkt = &pktl->pkt;
1431
1432 if (next_pkt->dts != AV_NOPTS_VALUE) {
1433 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1434 // last dts seen for this stream. if any of packets following
1435 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1436 int64_t last_dts = next_pkt->dts;
1437 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1438 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1439 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1440 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1441 next_pkt->pts = pktl->pkt.dts;
1442 }
1443 if (last_dts != AV_NOPTS_VALUE) {
1444 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1445 last_dts = pktl->pkt.dts;
1446 }
1447 }
1448 pktl = pktl->next;
1449 }
1450 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1451 // Fixing the last reference frame had none pts issue (For MXF etc).
1452 // We only do this when
1453 // 1. eof.
1454 // 2. we are not able to resolve a pts value for current packet.
1455 // 3. the packets for this stream at the end of the files had valid dts.
1456 next_pkt->pts = last_dts + next_pkt->duration;
1457 }
1458 pktl = s->packet_buffer;
1459 }
1460
1461 /* read packet from packet buffer, if there is data */
1462 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1463 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1464 ret = read_from_packet_buffer(&s->packet_buffer,
1465 &s->packet_buffer_end, pkt);
1466 goto return_packet;
1467 }
1468 }
1469
1470 ret = read_frame_internal(s, pkt);
1471 if (ret < 0) {
1472 if (pktl && ret != AVERROR(EAGAIN)) {
1473 eof = 1;
1474 continue;
1475 } else
1476 return ret;
1477 }
1478
1479 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1480 &s->packet_buffer_end)) < 0)
1481 return AVERROR(ENOMEM);
1482 }
1483
1484return_packet:
1485
1486 st = s->streams[pkt->stream_index];
1487 if (st->skip_samples) {
1488 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1489 if (p) {
1490 AV_WL32(p, st->skip_samples);
1491 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1492 }
1493 st->skip_samples = 0;
1494 }
1495
1496 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1497 ff_reduce_index(s, st->index);
1498 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1499 }
1500
1501 if (is_relative(pkt->dts))
1502 pkt->dts -= RELATIVE_TS_BASE;
1503 if (is_relative(pkt->pts))
1504 pkt->pts -= RELATIVE_TS_BASE;
1505
1506 return ret;
1507}
1508
1509/* XXX: suppress the packet queue */
1510static void flush_packet_queue(AVFormatContext *s)
1511{
1512 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1513 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1514 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1515
1516 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1517}
1518
1519/*******************************************************/
1520/* seek support */
1521
1522int av_find_default_stream_index(AVFormatContext *s)
1523{
1524 int first_audio_index = -1;
1525 int i;
1526 AVStream *st;
1527
1528 if (s->nb_streams <= 0)
1529 return -1;
1530 for(i = 0; i < s->nb_streams; i++) {
1531 st = s->streams[i];
1532 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1533 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1534 return i;
1535 }
1536 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1537 first_audio_index = i;
1538 }
1539 return first_audio_index >= 0 ? first_audio_index : 0;
1540}
1541
1542/**
1543 * Flush the frame reader.
1544 */
1545void ff_read_frame_flush(AVFormatContext *s)
1546{
1547 AVStream *st;
1548 int i, j;
1549
1550 flush_packet_queue(s);
1551
1552 /* for each stream, reset read state */
1553 for(i = 0; i < s->nb_streams; i++) {
1554 st = s->streams[i];
1555
1556 if (st->parser) {
1557 av_parser_close(st->parser);
1558 st->parser = NULL;
1559 }
1560 st->last_IP_pts = AV_NOPTS_VALUE;
1561 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1562 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1563
1564 st->probe_packets = MAX_PROBE_PACKETS;
1565
1566 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1567 st->pts_buffer[j]= AV_NOPTS_VALUE;
1568 }
1569}
1570
1571void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1572{
1573 int i;
1574
1575 for(i = 0; i < s->nb_streams; i++) {
1576 AVStream *st = s->streams[i];
1577
1578 st->cur_dts = av_rescale(timestamp,
1579 st->time_base.den * (int64_t)ref_st->time_base.num,
1580 st->time_base.num * (int64_t)ref_st->time_base.den);
1581 }
1582}
1583
1584void ff_reduce_index(AVFormatContext *s, int stream_index)
1585{
1586 AVStream *st= s->streams[stream_index];
1587 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1588
1589 if((unsigned)st->nb_index_entries >= max_entries){
1590 int i;
1591 for(i=0; 2*i<st->nb_index_entries; i++)
1592 st->index_entries[i]= st->index_entries[2*i];
1593 st->nb_index_entries= i;
1594 }
1595}
1596
1597int ff_add_index_entry(AVIndexEntry **index_entries,
1598 int *nb_index_entries,
1599 unsigned int *index_entries_allocated_size,
1600 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1601{
1602 AVIndexEntry *entries, *ie;
1603 int index;
1604
1605 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1606 return -1;
1607
1608 if(timestamp == AV_NOPTS_VALUE)
1609 return AVERROR(EINVAL);
1610
1611 if (size < 0 || size > 0x3FFFFFFF)
1612 return AVERROR(EINVAL);
1613
1614 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1615 timestamp -= RELATIVE_TS_BASE;
1616
1617 entries = av_fast_realloc(*index_entries,
1618 index_entries_allocated_size,
1619 (*nb_index_entries + 1) *
1620 sizeof(AVIndexEntry));
1621 if(!entries)
1622 return -1;
1623
1624 *index_entries= entries;
1625
1626 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1627
1628 if(index<0){
1629 index= (*nb_index_entries)++;
1630 ie= &entries[index];
1631 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1632 }else{
1633 ie= &entries[index];
1634 if(ie->timestamp != timestamp){
1635 if(ie->timestamp <= timestamp)
1636 return -1;
1637 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1638 (*nb_index_entries)++;
1639 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1640 distance= ie->min_distance;
1641 }
1642
1643 ie->pos = pos;
1644 ie->timestamp = timestamp;
1645 ie->min_distance= distance;
1646 ie->size= size;
1647 ie->flags = flags;
1648
1649 return index;
1650}
1651
1652int av_add_index_entry(AVStream *st,
1653 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1654{
1655 timestamp = wrap_timestamp(st, timestamp);
1656 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1657 &st->index_entries_allocated_size, pos,
1658 timestamp, size, distance, flags);
1659}
1660
1661int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1662 int64_t wanted_timestamp, int flags)
1663{
1664 int a, b, m;
1665 int64_t timestamp;
1666
1667 a = - 1;
1668 b = nb_entries;
1669
1670 //optimize appending index entries at the end
1671 if(b && entries[b-1].timestamp < wanted_timestamp)
1672 a= b-1;
1673
1674 while (b - a > 1) {
1675 m = (a + b) >> 1;
1676 timestamp = entries[m].timestamp;
1677 if(timestamp >= wanted_timestamp)
1678 b = m;
1679 if(timestamp <= wanted_timestamp)
1680 a = m;
1681 }
1682 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1683
1684 if(!(flags & AVSEEK_FLAG_ANY)){
1685 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1686 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1687 }
1688 }
1689
1690 if(m == nb_entries)
1691 return -1;
1692 return m;
1693}
1694
1695int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1696 int flags)
1697{
1698 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1699 wanted_timestamp, flags);
1700}
1701
1702static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1703 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1704{
1705 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1706 if (stream_index >= 0)
1707 ts = wrap_timestamp(s->streams[stream_index], ts);
1708 return ts;
1709}
1710
1711int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1712{
1713 AVInputFormat *avif= s->iformat;
1714 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1715 int64_t ts_min, ts_max, ts;
1716 int index;
1717 int64_t ret;
1718 AVStream *st;
1719
1720 if (stream_index < 0)
1721 return -1;
1722
1723 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1724
1725 ts_max=
1726 ts_min= AV_NOPTS_VALUE;
1727 pos_limit= -1; //gcc falsely says it may be uninitialized
1728
1729 st= s->streams[stream_index];
1730 if(st->index_entries){
1731 AVIndexEntry *e;
1732
1733 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1734 index= FFMAX(index, 0);
1735 e= &st->index_entries[index];
1736
1737 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1738 pos_min= e->pos;
1739 ts_min= e->timestamp;
1740 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1741 pos_min, av_ts2str(ts_min));
1742 }else{
1743 av_assert1(index==0);
1744 }
1745
1746 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1747 av_assert0(index < st->nb_index_entries);
1748 if(index >= 0){
1749 e= &st->index_entries[index];
1750 av_assert1(e->timestamp >= target_ts);
1751 pos_max= e->pos;
1752 ts_max= e->timestamp;
1753 pos_limit= pos_max - e->min_distance;
1754 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1755 pos_max, pos_limit, av_ts2str(ts_max));
1756 }
1757 }
1758
1759 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1760 if(pos<0)
1761 return -1;
1762
1763 /* do the seek */
1764 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1765 return ret;
1766
1767 ff_read_frame_flush(s);
1768 ff_update_cur_dts(s, st, ts);
1769
1770 return 0;
1771}
1772
1773int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1774 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1775{
1776 int64_t step= 1024;
1777 int64_t limit, ts_max;
1778 int64_t filesize = avio_size(s->pb);
1779 int64_t pos_max = filesize - 1;
1780 do{
1781 limit = pos_max;
1782 pos_max = FFMAX(0, (pos_max) - step);
1783 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1784 step += step;
1785 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1786 if (ts_max == AV_NOPTS_VALUE)
1787 return -1;
1788
1789 for(;;){
1790 int64_t tmp_pos = pos_max + 1;
1791 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1792 if(tmp_ts == AV_NOPTS_VALUE)
1793 break;
1794 av_assert0(tmp_pos > pos_max);
1795 ts_max = tmp_ts;
1796 pos_max = tmp_pos;
1797 if(tmp_pos >= filesize)
1798 break;
1799 }
1800
1801 if (ts)
1802 *ts = ts_max;
1803 if (pos)
1804 *pos = pos_max;
1805
1806 return 0;
1807}
1808
1809int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1810 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1811 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1812 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1813{
1814 int64_t pos, ts;
1815 int64_t start_pos;
1816 int no_change;
1817 int ret;
1818
1819 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1820
1821 if(ts_min == AV_NOPTS_VALUE){
1822 pos_min = s->data_offset;
1823 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1824 if (ts_min == AV_NOPTS_VALUE)
1825 return -1;
1826 }
1827
1828 if(ts_min >= target_ts){
1829 *ts_ret= ts_min;
1830 return pos_min;
1831 }
1832
1833 if(ts_max == AV_NOPTS_VALUE){
1834 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1835 return ret;
1836 pos_limit= pos_max;
1837 }
1838
1839 if(ts_max <= target_ts){
1840 *ts_ret= ts_max;
1841 return pos_max;
1842 }
1843
1844 if(ts_min > ts_max){
1845 return -1;
1846 }else if(ts_min == ts_max){
1847 pos_limit= pos_min;
1848 }
1849
1850 no_change=0;
1851 while (pos_min < pos_limit) {
1852 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1853 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1854 assert(pos_limit <= pos_max);
1855
1856 if(no_change==0){
1857 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1858 // interpolate position (better than dichotomy)
1859 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1860 + pos_min - approximate_keyframe_distance;
1861 }else if(no_change==1){
1862 // bisection, if interpolation failed to change min or max pos last time
1863 pos = (pos_min + pos_limit)>>1;
1864 }else{
1865 /* linear search if bisection failed, can only happen if there
1866 are very few or no keyframes between min/max */
1867 pos=pos_min;
1868 }
1869 if(pos <= pos_min)
1870 pos= pos_min + 1;
1871 else if(pos > pos_limit)
1872 pos= pos_limit;
1873 start_pos= pos;
1874
1875 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1876 if(pos == pos_max)
1877 no_change++;
1878 else
1879 no_change=0;
1880 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1881 pos_min, pos, pos_max,
1882 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1883 pos_limit, start_pos, no_change);
1884 if(ts == AV_NOPTS_VALUE){
1885 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1886 return -1;
1887 }
1888 assert(ts != AV_NOPTS_VALUE);
1889 if (target_ts <= ts) {
1890 pos_limit = start_pos - 1;
1891 pos_max = pos;
1892 ts_max = ts;
1893 }
1894 if (target_ts >= ts) {
1895 pos_min = pos;
1896 ts_min = ts;
1897 }
1898 }
1899
1900 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1901 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1902#if 0
1903 pos_min = pos;
1904 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1905 pos_min++;
1906 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1907 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1908 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1909#endif
1910 *ts_ret= ts;
1911 return pos;
1912}
1913
1914static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1915 int64_t pos_min, pos_max;
1916
1917 pos_min = s->data_offset;
1918 pos_max = avio_size(s->pb) - 1;
1919
1920 if (pos < pos_min) pos= pos_min;
1921 else if(pos > pos_max) pos= pos_max;
1922
1923 avio_seek(s->pb, pos, SEEK_SET);
1924
1925 s->io_repositioned = 1;
1926
1927 return 0;
1928}
1929
1930static int seek_frame_generic(AVFormatContext *s,
1931 int stream_index, int64_t timestamp, int flags)
1932{
1933 int index;
1934 int64_t ret;
1935 AVStream *st;
1936 AVIndexEntry *ie;
1937
1938 st = s->streams[stream_index];
1939
1940 index = av_index_search_timestamp(st, timestamp, flags);
1941
1942 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1943 return -1;
1944
1945 if(index < 0 || index==st->nb_index_entries-1){
1946 AVPacket pkt;
1947 int nonkey=0;
1948
1949 if(st->nb_index_entries){
1950 av_assert0(st->index_entries);
1951 ie= &st->index_entries[st->nb_index_entries-1];
1952 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1953 return ret;
1954 ff_update_cur_dts(s, st, ie->timestamp);
1955 }else{
1956 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1957 return ret;
1958 }
1959 for (;;) {
1960 int read_status;
1961 do{
1962 read_status = av_read_frame(s, &pkt);
1963 } while (read_status == AVERROR(EAGAIN));
1964 if (read_status < 0)
1965 break;
1966 av_free_packet(&pkt);
1967 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1968 if(pkt.flags & AV_PKT_FLAG_KEY)
1969 break;
1970 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1971 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1972 break;
1973 }
1974 }
1975 }
1976 index = av_index_search_timestamp(st, timestamp, flags);
1977 }
1978 if (index < 0)
1979 return -1;
1980
1981 ff_read_frame_flush(s);
1982 if (s->iformat->read_seek){
1983 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1984 return 0;
1985 }
1986 ie = &st->index_entries[index];
1987 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1988 return ret;
1989 ff_update_cur_dts(s, st, ie->timestamp);
1990
1991 return 0;
1992}
1993
1994static int seek_frame_internal(AVFormatContext *s, int stream_index,
1995 int64_t timestamp, int flags)
1996{
1997 int ret;
1998 AVStream *st;
1999
2000 if (flags & AVSEEK_FLAG_BYTE) {
2001 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2002 return -1;
2003 ff_read_frame_flush(s);
2004 return seek_frame_byte(s, stream_index, timestamp, flags);
2005 }
2006
2007 if(stream_index < 0){
2008 stream_index= av_find_default_stream_index(s);
2009 if(stream_index < 0)
2010 return -1;
2011
2012 st= s->streams[stream_index];
2013 /* timestamp for default must be expressed in AV_TIME_BASE units */
2014 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2015 }
2016
2017 /* first, we try the format specific seek */
2018 if (s->iformat->read_seek) {
2019 ff_read_frame_flush(s);
2020 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2021 } else
2022 ret = -1;
2023 if (ret >= 0) {
2024 return 0;
2025 }
2026
2027 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2028 ff_read_frame_flush(s);
2029 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2030 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2031 ff_read_frame_flush(s);
2032 return seek_frame_generic(s, stream_index, timestamp, flags);
2033 }
2034 else
2035 return -1;
2036}
2037
2038int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2039{
2040 int ret;
2041
2042 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2043 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2044 if ((flags & AVSEEK_FLAG_BACKWARD))
2045 max_ts = timestamp;
2046 else
2047 min_ts = timestamp;
2048 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2049 flags & ~AVSEEK_FLAG_BACKWARD);
2050 }
2051
2052 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2053
2054 if (ret >= 0)
2055 ret = avformat_queue_attached_pictures(s);
2056
2057 return ret;
2058}
2059
2060int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2061{
2062 if(min_ts > ts || max_ts < ts)
2063 return -1;
2064 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2065 return AVERROR(EINVAL);
2066
2067 if(s->seek2any>0)
2068 flags |= AVSEEK_FLAG_ANY;
2069 flags &= ~AVSEEK_FLAG_BACKWARD;
2070
2071 if (s->iformat->read_seek2) {
2072 int ret;
2073 ff_read_frame_flush(s);
2074
2075 if (stream_index == -1 && s->nb_streams == 1) {
2076 AVRational time_base = s->streams[0]->time_base;
2077 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2078 min_ts = av_rescale_rnd(min_ts, time_base.den,
2079 time_base.num * (int64_t)AV_TIME_BASE,
2080 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2081 max_ts = av_rescale_rnd(max_ts, time_base.den,
2082 time_base.num * (int64_t)AV_TIME_BASE,
2083 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2084 }
2085
2086 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2087
2088 if (ret >= 0)
2089 ret = avformat_queue_attached_pictures(s);
2090 return ret;
2091 }
2092
2093 if(s->iformat->read_timestamp){
2094 //try to seek via read_timestamp()
2095 }
2096
2097 // Fall back on old API if new is not implemented but old is.
2098 // Note the old API has somewhat different semantics.
2099 if (s->iformat->read_seek || 1) {
2100 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2101 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2102 if (ret<0 && ts != min_ts && max_ts != ts) {
2103 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2104 if (ret >= 0)
2105 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2106 }
2107 return ret;
2108 }
2109
2110 // try some generic seek like seek_frame_generic() but with new ts semantics
2111 return -1; //unreachable
2112}
2113
2114/*******************************************************/
2115
2116/**
2117 * Return TRUE if the stream has accurate duration in any stream.
2118 *
2119 * @return TRUE if the stream has accurate duration for at least one component.
2120 */
2121static int has_duration(AVFormatContext *ic)
2122{
2123 int i;
2124 AVStream *st;
2125
2126 for(i = 0;i < ic->nb_streams; i++) {
2127 st = ic->streams[i];
2128 if (st->duration != AV_NOPTS_VALUE)
2129 return 1;
2130 }
2131 if (ic->duration != AV_NOPTS_VALUE)
2132 return 1;
2133 return 0;
2134}
2135
2136/**
2137 * Estimate the stream timings from the one of each components.
2138 *
2139 * Also computes the global bitrate if possible.
2140 */
2141static void update_stream_timings(AVFormatContext *ic)
2142{
2143 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2144 int64_t duration, duration1, filesize;
2145 int i;
2146 AVStream *st;
2147 AVProgram *p;
2148
2149 start_time = INT64_MAX;
2150 start_time_text = INT64_MAX;
2151 end_time = INT64_MIN;
2152 duration = INT64_MIN;
2153 for(i = 0;i < ic->nb_streams; i++) {
2154 st = ic->streams[i];
2155 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2156 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2157 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2158 if (start_time1 < start_time_text)
2159 start_time_text = start_time1;
2160 } else
2161 start_time = FFMIN(start_time, start_time1);
2162 end_time1 = AV_NOPTS_VALUE;
2163 if (st->duration != AV_NOPTS_VALUE) {
2164 end_time1 = start_time1
2165 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2166 end_time = FFMAX(end_time, end_time1);
2167 }
2168 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2169 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2170 p->start_time = start_time1;
2171 if(p->end_time < end_time1)
2172 p->end_time = end_time1;
2173 }
2174 }
2175 if (st->duration != AV_NOPTS_VALUE) {
2176 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2177 duration = FFMAX(duration, duration1);
2178 }
2179 }
2180 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2181 start_time = start_time_text;
2182 else if(start_time > start_time_text)
2183 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2184
2185 if (start_time != INT64_MAX) {
2186 ic->start_time = start_time;
2187 if (end_time != INT64_MIN) {
2188 if (ic->nb_programs) {
2189 for (i=0; i<ic->nb_programs; i++) {
2190 p = ic->programs[i];
2191 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2192 duration = FFMAX(duration, p->end_time - p->start_time);
2193 }
2194 } else
2195 duration = FFMAX(duration, end_time - start_time);
2196 }
2197 }
2198 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2199 ic->duration = duration;
2200 }
2201 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2202 /* compute the bitrate */
2203 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2204 (double)ic->duration;
2205 if (bitrate >= 0 && bitrate <= INT_MAX)
2206 ic->bit_rate = bitrate;
2207 }
2208}
2209
2210static void fill_all_stream_timings(AVFormatContext *ic)
2211{
2212 int i;
2213 AVStream *st;
2214
2215 update_stream_timings(ic);
2216 for(i = 0;i < ic->nb_streams; i++) {
2217 st = ic->streams[i];
2218 if (st->start_time == AV_NOPTS_VALUE) {
2219 if(ic->start_time != AV_NOPTS_VALUE)
2220 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2221 if(ic->duration != AV_NOPTS_VALUE)
2222 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2223 }
2224 }
2225}
2226
2227static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2228{
2229 int64_t filesize, duration;
2230 int i, show_warning = 0;
2231 AVStream *st;
2232
2233 /* if bit_rate is already set, we believe it */
2234 if (ic->bit_rate <= 0) {
2235 int bit_rate = 0;
2236 for(i=0;i<ic->nb_streams;i++) {
2237 st = ic->streams[i];
2238 if (st->codec->bit_rate > 0) {
2239 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2240 bit_rate = 0;
2241 break;
2242 }
2243 bit_rate += st->codec->bit_rate;
2244 }
2245 }
2246 ic->bit_rate = bit_rate;
2247 }
2248
2249 /* if duration is already set, we believe it */
2250 if (ic->duration == AV_NOPTS_VALUE &&
2251 ic->bit_rate != 0) {
2252 filesize = ic->pb ? avio_size(ic->pb) : 0;
2253 if (filesize > 0) {
2254 for(i = 0; i < ic->nb_streams; i++) {
2255 st = ic->streams[i];
2256 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2257 && st->duration == AV_NOPTS_VALUE) {
2258 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2259 st->duration = duration;
2260 show_warning = 1;
2261 }
2262 }
2263 }
2264 }
2265 if (show_warning)
2266 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2267}
2268
2269#define DURATION_MAX_READ_SIZE 250000LL
2270#define DURATION_MAX_RETRY 6
2271
2272/* only usable for MPEG-PS streams */
2273static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2274{
2275 AVPacket pkt1, *pkt = &pkt1;
2276 AVStream *st;
2277 int read_size, i, ret;
2278 int64_t end_time;
2279 int64_t filesize, offset, duration;
2280 int retry=0;
2281
2282 /* flush packet queue */
2283 flush_packet_queue(ic);
2284
2285 for (i=0; i<ic->nb_streams; i++) {
2286 st = ic->streams[i];
2287 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2288 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2289
2290 if (st->parser) {
2291 av_parser_close(st->parser);
2292 st->parser= NULL;
2293 }
2294 }
2295
2296 /* estimate the end time (duration) */
2297 /* XXX: may need to support wrapping */
2298 filesize = ic->pb ? avio_size(ic->pb) : 0;
2299 end_time = AV_NOPTS_VALUE;
2300 do{
2301 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2302 if (offset < 0)
2303 offset = 0;
2304
2305 avio_seek(ic->pb, offset, SEEK_SET);
2306 read_size = 0;
2307 for(;;) {
2308 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2309 break;
2310
2311 do {
2312 ret = ff_read_packet(ic, pkt);
2313 } while(ret == AVERROR(EAGAIN));
2314 if (ret != 0)
2315 break;
2316 read_size += pkt->size;
2317 st = ic->streams[pkt->stream_index];
2318 if (pkt->pts != AV_NOPTS_VALUE &&
2319 (st->start_time != AV_NOPTS_VALUE ||
2320 st->first_dts != AV_NOPTS_VALUE)) {
2321 duration = end_time = pkt->pts;
2322 if (st->start_time != AV_NOPTS_VALUE)
2323 duration -= st->start_time;
2324 else
2325 duration -= st->first_dts;
2326 if (duration > 0) {
2327 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2328 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2329 st->duration = duration;
2330 st->info->last_duration = duration;
2331 }
2332 }
2333 av_free_packet(pkt);
2334 }
2335 }while( end_time==AV_NOPTS_VALUE
2336 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2337 && ++retry <= DURATION_MAX_RETRY);
2338
2339 fill_all_stream_timings(ic);
2340
2341 avio_seek(ic->pb, old_offset, SEEK_SET);
2342 for (i=0; i<ic->nb_streams; i++) {
2343 st= ic->streams[i];
2344 st->cur_dts= st->first_dts;
2345 st->last_IP_pts = AV_NOPTS_VALUE;
2346 }
2347}
2348
2349static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2350{
2351 int64_t file_size;
2352
2353 /* get the file size, if possible */
2354 if (ic->iformat->flags & AVFMT_NOFILE) {
2355 file_size = 0;
2356 } else {
2357 file_size = avio_size(ic->pb);
2358 file_size = FFMAX(0, file_size);
2359 }
2360
2361 if ((!strcmp(ic->iformat->name, "mpeg") ||
2362 !strcmp(ic->iformat->name, "mpegts")) &&
2363 file_size && ic->pb->seekable) {
2364 /* get accurate estimate from the PTSes */
2365 estimate_timings_from_pts(ic, old_offset);
2366 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2367 } else if (has_duration(ic)) {
2368 /* at least one component has timings - we use them for all
2369 the components */
2370 fill_all_stream_timings(ic);
2371 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2372 } else {
2373 /* less precise: use bitrate info */
2374 estimate_timings_from_bit_rate(ic);
2375 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2376 }
2377 update_stream_timings(ic);
2378
2379 {
2380 int i;
2381 AVStream av_unused *st;
2382 for(i = 0;i < ic->nb_streams; i++) {
2383 st = ic->streams[i];
2384 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2385 (double) st->start_time / AV_TIME_BASE,
2386 (double) st->duration / AV_TIME_BASE);
2387 }
2388 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2389 (double) ic->start_time / AV_TIME_BASE,
2390 (double) ic->duration / AV_TIME_BASE,
2391 ic->bit_rate / 1000);
2392 }
2393}
2394
2395static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2396{
2397 AVCodecContext *avctx = st->codec;
2398
2399#define FAIL(errmsg) do { \
2400 if (errmsg_ptr) \
2401 *errmsg_ptr = errmsg; \
2402 return 0; \
2403 } while (0)
2404
2405 switch (avctx->codec_type) {
2406 case AVMEDIA_TYPE_AUDIO:
2407 if (!avctx->frame_size && determinable_frame_size(avctx))
2408 FAIL("unspecified frame size");
2409 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2410 FAIL("unspecified sample format");
2411 if (!avctx->sample_rate)
2412 FAIL("unspecified sample rate");
2413 if (!avctx->channels)
2414 FAIL("unspecified number of channels");
2415 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2416 FAIL("no decodable DTS frames");
2417 break;
2418 case AVMEDIA_TYPE_VIDEO:
2419 if (!avctx->width)
2420 FAIL("unspecified size");
2421 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2422 FAIL("unspecified pixel format");
2423 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2424 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2425 FAIL("no frame in rv30/40 and no sar");
2426 break;
2427 case AVMEDIA_TYPE_SUBTITLE:
2428 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2429 FAIL("unspecified size");
2430 break;
2431 case AVMEDIA_TYPE_DATA:
2432 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2433 }
2434
2435 if (avctx->codec_id == AV_CODEC_ID_NONE)
2436 FAIL("unknown codec");
2437 return 1;
2438}
2439
2440/* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2441static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2442{
2443 const AVCodec *codec;
2444 int got_picture = 1, ret = 0;
2445 AVFrame *frame = avcodec_alloc_frame();
2446 AVSubtitle subtitle;
2447 AVPacket pkt = *avpkt;
2448
2449 if (!frame)
2450 return AVERROR(ENOMEM);
2451
2452 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2453 AVDictionary *thread_opt = NULL;
2454
2455 codec = find_decoder(s, st, st->codec->codec_id);
2456
2457 if (!codec) {
2458 st->info->found_decoder = -1;
2459 ret = -1;
2460 goto fail;
2461 }
2462
2463 /* force thread count to 1 since the h264 decoder will not extract SPS
2464 * and PPS to extradata during multi-threaded decoding */
2465 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2466 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2467 if (!options)
2468 av_dict_free(&thread_opt);
2469 if (ret < 0) {
2470 st->info->found_decoder = -1;
2471 goto fail;
2472 }
2473 st->info->found_decoder = 1;
2474 } else if (!st->info->found_decoder)
2475 st->info->found_decoder = 1;
2476
2477 if (st->info->found_decoder < 0) {
2478 ret = -1;
2479 goto fail;
2480 }
2481
2482 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2483 ret >= 0 &&
2484 (!has_codec_parameters(st, NULL) ||
2485 !has_decode_delay_been_guessed(st) ||
2486 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2487 got_picture = 0;
2488 avcodec_get_frame_defaults(frame);
2489 switch(st->codec->codec_type) {
2490 case AVMEDIA_TYPE_VIDEO:
2491 ret = avcodec_decode_video2(st->codec, frame,
2492 &got_picture, &pkt);
2493 break;
2494 case AVMEDIA_TYPE_AUDIO:
2495 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2496 break;
2497 case AVMEDIA_TYPE_SUBTITLE:
2498 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2499 &got_picture, &pkt);
2500 ret = pkt.size;
2501 break;
2502 default:
2503 break;
2504 }
2505 if (ret >= 0) {
2506 if (got_picture)
2507 st->nb_decoded_frames++;
2508 pkt.data += ret;
2509 pkt.size -= ret;
2510 ret = got_picture;
2511 }
2512 }
2513
2514 if(!pkt.data && !got_picture)
2515 ret = -1;
2516
2517fail:
2518 avcodec_free_frame(&frame);
2519 return ret;
2520}
2521
2522unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2523{
2524 while (tags->id != AV_CODEC_ID_NONE) {
2525 if (tags->id == id)
2526 return tags->tag;
2527 tags++;
2528 }
2529 return 0;
2530}
2531
2532enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2533{
2534 int i;
2535 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2536 if(tag == tags[i].tag)
2537 return tags[i].id;
2538 }
2539 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2540 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2541 return tags[i].id;
2542 }
2543 return AV_CODEC_ID_NONE;
2544}
2545
2546enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2547{
2548 if (flt) {
2549 switch (bps) {
2550 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2551 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2552 default: return AV_CODEC_ID_NONE;
2553 }
2554 } else {
2555 bps += 7;
2556 bps >>= 3;
2557 if (sflags & (1 << (bps - 1))) {
2558 switch (bps) {
2559 case 1: return AV_CODEC_ID_PCM_S8;
2560 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2561 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2562 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2563 default: return AV_CODEC_ID_NONE;
2564 }
2565 } else {
2566 switch (bps) {
2567 case 1: return AV_CODEC_ID_PCM_U8;
2568 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2569 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2570 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2571 default: return AV_CODEC_ID_NONE;
2572 }
2573 }
2574 }
2575}
2576
2577unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2578{
2579 unsigned int tag;
2580 if (!av_codec_get_tag2(tags, id, &tag))
2581 return 0;
2582 return tag;
2583}
2584
2585int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2586 unsigned int *tag)
2587{
2588 int i;
2589 for(i=0; tags && tags[i]; i++){
2590 const AVCodecTag *codec_tags = tags[i];
2591 while (codec_tags->id != AV_CODEC_ID_NONE) {
2592 if (codec_tags->id == id) {
2593 *tag = codec_tags->tag;
2594 return 1;
2595 }
2596 codec_tags++;
2597 }
2598 }
2599 return 0;
2600}
2601
2602enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2603{
2604 int i;
2605 for(i=0; tags && tags[i]; i++){
2606 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2607 if(id!=AV_CODEC_ID_NONE) return id;
2608 }
2609 return AV_CODEC_ID_NONE;
2610}
2611
2612static void compute_chapters_end(AVFormatContext *s)
2613{
2614 unsigned int i, j;
2615 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2616
2617 for (i = 0; i < s->nb_chapters; i++)
2618 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2619 AVChapter *ch = s->chapters[i];
2620 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2621 : INT64_MAX;
2622
2623 for (j = 0; j < s->nb_chapters; j++) {
2624 AVChapter *ch1 = s->chapters[j];
2625 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2626 if (j != i && next_start > ch->start && next_start < end)
2627 end = next_start;
2628 }
2629 ch->end = (end == INT64_MAX) ? ch->start : end;
2630 }
2631}
2632
2633static int get_std_framerate(int i){
2634 if(i<60*12) return (i+1)*1001;
2635 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2636}
2637
2638/*
2639 * Is the time base unreliable.
2640 * This is a heuristic to balance between quick acceptance of the values in
2641 * the headers vs. some extra checks.
2642 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2643 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2644 * And there are "variable" fps files this needs to detect as well.
2645 */
2646static int tb_unreliable(AVCodecContext *c){
2647 if( c->time_base.den >= 101L*c->time_base.num
2648 || c->time_base.den < 5L*c->time_base.num
2649/* || c->codec_tag == AV_RL32("DIVX")
2650 || c->codec_tag == AV_RL32("XVID")*/
2651 || c->codec_tag == AV_RL32("mp4v")
2652 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2653 || c->codec_id == AV_CODEC_ID_H264
2654 )
2655 return 1;
2656 return 0;
2657}
2658
2659#if FF_API_FORMAT_PARAMETERS
2660int av_find_stream_info(AVFormatContext *ic)
2661{
2662 return avformat_find_stream_info(ic, NULL);
2663}
2664#endif
2665
2666int ff_alloc_extradata(AVCodecContext *avctx, int size)
2667{
2668 int ret;
2669
2670 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2671 avctx->extradata_size = 0;
2672 return AVERROR(EINVAL);
2673 }
2674 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2675 if (avctx->extradata) {
2676 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2677 avctx->extradata_size = size;
2678 ret = 0;
2679 } else {
2680 avctx->extradata_size = 0;
2681 ret = AVERROR(ENOMEM);
2682 }
2683 return ret;
2684}
2685
2686int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2687{
2688 int i, count, ret = 0, j;
2689 int64_t read_size;
2690 AVStream *st;
2691 AVPacket pkt1, *pkt;
2692 int64_t old_offset = avio_tell(ic->pb);
2693 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2694 int flush_codecs = ic->probesize > 0;
2695
2696 if(ic->pb)
2697 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2698
2699 for(i=0;i<ic->nb_streams;i++) {
2700 const AVCodec *codec;
2701 AVDictionary *thread_opt = NULL;
2702 st = ic->streams[i];
2703
2704 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2705 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2706/* if(!st->time_base.num)
2707 st->time_base= */
2708 if(!st->codec->time_base.num)
2709 st->codec->time_base= st->time_base;
2710 }
2711 //only for the split stuff
2712 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2713 st->parser = av_parser_init(st->codec->codec_id);
2714 if(st->parser){
2715 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2716 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2717 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2718 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2719 }
2720 } else if (st->need_parsing) {
2721 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2722 "%s, packets or times may be invalid.\n",
2723 avcodec_get_name(st->codec->codec_id));
2724 }
2725 }
2726 codec = find_decoder(ic, st, st->codec->codec_id);
2727
2728 /* force thread count to 1 since the h264 decoder will not extract SPS
2729 * and PPS to extradata during multi-threaded decoding */
2730 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2731
2732 /* Ensure that subtitle_header is properly set. */
2733 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2734 && codec && !st->codec->codec)
2735 avcodec_open2(st->codec, codec, options ? &options[i]
2736 : &thread_opt);
2737
2738 //try to just open decoders, in case this is enough to get parameters
2739 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2740 if (codec && !st->codec->codec)
2741 avcodec_open2(st->codec, codec, options ? &options[i]
2742 : &thread_opt);
2743 }
2744 if (!options)
2745 av_dict_free(&thread_opt);
2746 }
2747
2748 for (i=0; i<ic->nb_streams; i++) {
2749#if FF_API_R_FRAME_RATE
2750 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2751#endif
2752 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2753 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2754 }
2755
2756 count = 0;
2757 read_size = 0;
2758 for(;;) {
2759 if (ff_check_interrupt(&ic->interrupt_callback)){
2760 ret= AVERROR_EXIT;
2761 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2762 break;
2763 }
2764
2765 /* check if one codec still needs to be handled */
2766 for(i=0;i<ic->nb_streams;i++) {
2767 int fps_analyze_framecount = 20;
2768
2769 st = ic->streams[i];
2770 if (!has_codec_parameters(st, NULL))
2771 break;
2772 /* if the timebase is coarse (like the usual millisecond precision
2773 of mkv), we need to analyze more frames to reliably arrive at
2774 the correct fps */
2775 if (av_q2d(st->time_base) > 0.0005)
2776 fps_analyze_framecount *= 2;
2777 if (ic->fps_probe_size >= 0)
2778 fps_analyze_framecount = ic->fps_probe_size;
2779 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2780 fps_analyze_framecount = 0;
2781 /* variable fps and no guess at the real fps */
2782 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2783 && st->info->duration_count < fps_analyze_framecount
2784 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2785 break;
2786 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2787 break;
2788 if (st->first_dts == AV_NOPTS_VALUE &&
2789 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2790 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2791 break;
2792 }
2793 if (i == ic->nb_streams) {
2794 /* NOTE: if the format has no header, then we need to read
2795 some packets to get most of the streams, so we cannot
2796 stop here */
2797 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2798 /* if we found the info for all the codecs, we can stop */
2799 ret = count;
2800 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2801 flush_codecs = 0;
2802 break;
2803 }
2804 }
2805 /* we did not get all the codec info, but we read too much data */
2806 if (read_size >= ic->probesize) {
2807 ret = count;
2808 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2809 for (i = 0; i < ic->nb_streams; i++)
2810 if (!ic->streams[i]->r_frame_rate.num &&
2811 ic->streams[i]->info->duration_count <= 1 &&
2812 strcmp(ic->iformat->name, "image2"))
2813 av_log(ic, AV_LOG_WARNING,
2814 "Stream #%d: not enough frames to estimate rate; "
2815 "consider increasing probesize\n", i);
2816 break;
2817 }
2818
2819 /* NOTE: a new stream can be added there if no header in file
2820 (AVFMTCTX_NOHEADER) */
2821 ret = read_frame_internal(ic, &pkt1);
2822 if (ret == AVERROR(EAGAIN))
2823 continue;
2824
2825 if (ret < 0) {
2826 /* EOF or error*/
2827 break;
2828 }
2829
2830 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2831 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2832 {
2833 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2834 &ic->packet_buffer_end);
2835 if (!pkt) {
2836 ret = AVERROR(ENOMEM);
2837 goto find_stream_info_err;
2838 }
2839 if ((ret = av_dup_packet(pkt)) < 0)
2840 goto find_stream_info_err;
2841 }
2842
2843 read_size += pkt->size;
2844
2845 st = ic->streams[pkt->stream_index];
2846 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2847 /* check for non-increasing dts */
2848 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2849 st->info->fps_last_dts >= pkt->dts) {
2850 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2851 "packet %d with DTS %"PRId64", packet %d with DTS "
2852 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2853 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2854 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2855 }
2856 /* check for a discontinuity in dts - if the difference in dts
2857 * is more than 1000 times the average packet duration in the sequence,
2858 * we treat it as a discontinuity */
2859 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2860 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2861 (pkt->dts - st->info->fps_last_dts) / 1000 >
2862 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2863 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2864 "packet %d with DTS %"PRId64", packet %d with DTS "
2865 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2866 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2867 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2868 }
2869
2870 /* update stored dts values */
2871 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2872 st->info->fps_first_dts = pkt->dts;
2873 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2874 }
2875 st->info->fps_last_dts = pkt->dts;
2876 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2877 }
2878 if (st->codec_info_nb_frames>1) {
2879 int64_t t=0;
2880 if (st->time_base.den > 0)
2881 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2882 if (st->avg_frame_rate.num > 0)
2883 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2884
2885 if ( t==0
2886 && st->codec_info_nb_frames>30
2887 && st->info->fps_first_dts != AV_NOPTS_VALUE
2888 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2889 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2890
2891 if (t >= ic->max_analyze_duration) {
2892 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2893 break;
2894 }
2895 if (pkt->duration) {
2896 st->info->codec_info_duration += pkt->duration;
2897 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2898 }
2899 }
2900#if FF_API_R_FRAME_RATE
2901 {
2902 int64_t last = st->info->last_dts;
2903
2904 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2905 && pkt->dts - (uint64_t)last < INT64_MAX){
2906 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2907 int64_t duration= pkt->dts - last;
2908
2909 if (!st->info->duration_error)
2910 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2911 if (!st->info->duration_error)
2912 return AVERROR(ENOMEM);
2913
2914// if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2915// av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2916 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2917 int framerate= get_std_framerate(i);
2918 double sdts= dts*framerate/(1001*12);
2919 for(j=0; j<2; j++){
2920 int64_t ticks= llrint(sdts+j*0.5);
2921 double error= sdts - ticks + j*0.5;
2922 st->info->duration_error[j][0][i] += error;
2923 st->info->duration_error[j][1][i] += error*error;
2924 }
2925 }
2926 st->info->duration_count++;
2927 // ignore the first 4 values, they might have some random jitter
2928 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2929 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2930 }
2931 if (pkt->dts != AV_NOPTS_VALUE)
2932 st->info->last_dts = pkt->dts;
2933 }
2934#endif
2935 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2936 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2937 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2938 if (ff_alloc_extradata(st->codec, i))
2939 return AVERROR(ENOMEM);
2940 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2941 }
2942 }
2943
2944 /* if still no information, we try to open the codec and to
2945 decompress the frame. We try to avoid that in most cases as
2946 it takes longer and uses more memory. For MPEG-4, we need to
2947 decompress for QuickTime.
2948
2949 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2950 least one frame of codec data, this makes sure the codec initializes
2951 the channel configuration and does not only trust the values from the container.
2952 */
2953 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2954
2955 st->codec_info_nb_frames++;
2956 count++;
2957 }
2958
2959 if (flush_codecs) {
2960 AVPacket empty_pkt = { 0 };
2961 int err = 0;
2962 av_init_packet(&empty_pkt);
2963
2964 for(i=0;i<ic->nb_streams;i++) {
2965
2966 st = ic->streams[i];
2967
2968 /* flush the decoders */
2969 if (st->info->found_decoder == 1) {
2970 do {
2971 err = try_decode_frame(ic, st, &empty_pkt,
2972 (options && i < orig_nb_streams) ?
2973 &options[i] : NULL);
2974 } while (err > 0 && !has_codec_parameters(st, NULL));
2975
2976 if (err < 0) {
2977 av_log(ic, AV_LOG_INFO,
2978 "decoding for stream %d failed\n", st->index);
2979 }
2980 }
2981 }
2982 }
2983
2984 // close codecs which were opened in try_decode_frame()
2985 for(i=0;i<ic->nb_streams;i++) {
2986 st = ic->streams[i];
2987 avcodec_close(st->codec);
2988 }
2989 for(i=0;i<ic->nb_streams;i++) {
2990 st = ic->streams[i];
2991 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2992 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
2993 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2994 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
2995 st->codec->codec_tag= tag;
2996 }
2997
2998 /* estimate average framerate if not set by demuxer */
2999 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3000 int best_fps = 0;
3001 double best_error = 0.01;
3002
3003 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3004 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3005 st->info->codec_info_duration < 0)
3006 continue;
3007 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3008 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3009 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3010
3011 /* round guessed framerate to a "standard" framerate if it's
3012 * within 1% of the original estimate*/
3013 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3014 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3015 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3016
3017 if (error < best_error) {
3018 best_error = error;
3019 best_fps = std_fps.num;
3020 }
3021 }
3022 if (best_fps) {
3023 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3024 best_fps, 12*1001, INT_MAX);
3025 }
3026 }
3027 // the check for tb_unreliable() is not completely correct, since this is not about handling
3028 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3029 // ipmovie.c produces.
3030 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3031 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3032 if (st->info->duration_count>1 && !st->r_frame_rate.num
3033 && tb_unreliable(st->codec)) {
3034 int num = 0;
3035 double best_error= 0.01;
3036
3037 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3038 int k;
3039
3040 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3041 continue;
3042 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3043 continue;
3044 for(k=0; k<2; k++){
3045 int n= st->info->duration_count;
3046 double a= st->info->duration_error[k][0][j] / n;
3047 double error= st->info->duration_error[k][1][j]/n - a*a;
3048
3049 if(error < best_error && best_error> 0.000000001){
3050 best_error= error;
3051 num = get_std_framerate(j);
3052 }
3053 if(error < 0.02)
3054 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3055 }
3056 }
3057 // do not increase frame rate by more than 1 % in order to match a standard rate.
3058 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3059 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3060 }
3061
3062 if (!st->r_frame_rate.num){
3063 if( st->codec->time_base.den * (int64_t)st->time_base.num
3064 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3065 st->r_frame_rate.num = st->codec->time_base.den;
3066 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3067 }else{
3068 st->r_frame_rate.num = st->time_base.den;
3069 st->r_frame_rate.den = st->time_base.num;
3070 }
3071 }
3072 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3073 if(!st->codec->bits_per_coded_sample)
3074 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3075 // set stream disposition based on audio service type
3076 switch (st->codec->audio_service_type) {
3077 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3078 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3079 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3080 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3081 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3082 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3083 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3084 st->disposition = AV_DISPOSITION_COMMENT; break;
3085 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3086 st->disposition = AV_DISPOSITION_KARAOKE; break;
3087 }
3088 }
3089 }
3090
3091 if(ic->probesize)
3092 estimate_timings(ic, old_offset);
3093
3094 if (ret >= 0 && ic->nb_streams)
3095 ret = -1; /* we could not have all the codec parameters before EOF */
3096 for(i=0;i<ic->nb_streams;i++) {
3097 const char *errmsg;
3098 st = ic->streams[i];
3099 if (!has_codec_parameters(st, &errmsg)) {
3100 char buf[256];
3101 avcodec_string(buf, sizeof(buf), st->codec, 0);
3102 av_log(ic, AV_LOG_WARNING,
3103 "Could not find codec parameters for stream %d (%s): %s\n"
3104 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3105 i, buf, errmsg);
3106 } else {
3107 ret = 0;
3108 }
3109 }
3110
3111 compute_chapters_end(ic);
3112
3113 find_stream_info_err:
3114 for (i=0; i < ic->nb_streams; i++) {
3115 st = ic->streams[i];
3116 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3117 ic->streams[i]->codec->thread_count = 0;
3118 if (st->info)
3119 av_freep(&st->info->duration_error);
3120 av_freep(&ic->streams[i]->info);
3121 }
3122 if(ic->pb)
3123 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3124 return ret;
3125}
3126
3127AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3128{
3129 int i, j;
3130
3131 for (i = 0; i < ic->nb_programs; i++) {
3132 if (ic->programs[i] == last) {
3133 last = NULL;
3134 } else {
3135 if (!last)
3136 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3137 if (ic->programs[i]->stream_index[j] == s)
3138 return ic->programs[i];
3139 }
3140 }
3141 return NULL;
3142}
3143
3144int av_find_best_stream(AVFormatContext *ic,
3145 enum AVMediaType type,
3146 int wanted_stream_nb,
3147 int related_stream,
3148 AVCodec **decoder_ret,
3149 int flags)
3150{
3151 int i, nb_streams = ic->nb_streams;
3152 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3153 unsigned *program = NULL;
3154 AVCodec *decoder = NULL, *best_decoder = NULL;
3155
3156 if (related_stream >= 0 && wanted_stream_nb < 0) {
3157 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3158 if (p) {
3159 program = p->stream_index;
3160 nb_streams = p->nb_stream_indexes;
3161 }
3162 }
3163 for (i = 0; i < nb_streams; i++) {
3164 int real_stream_index = program ? program[i] : i;
3165 AVStream *st = ic->streams[real_stream_index];
3166 AVCodecContext *avctx = st->codec;
3167 if (avctx->codec_type != type)
3168 continue;
3169 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3170 continue;
3171 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3172 continue;
3173 if (decoder_ret) {
3174 decoder = find_decoder(ic, st, st->codec->codec_id);
3175 if (!decoder) {
3176 if (ret < 0)
3177 ret = AVERROR_DECODER_NOT_FOUND;
3178 continue;
3179 }
3180 }
3181 count = st->codec_info_nb_frames;
3182 bitrate = avctx->bit_rate;
3183 multiframe = FFMIN(5, count);
3184 if ((best_multiframe > multiframe) ||
3185 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3186 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3187 continue;
3188 best_count = count;
3189 best_bitrate = bitrate;
3190 best_multiframe = multiframe;
3191 ret = real_stream_index;
3192 best_decoder = decoder;
3193 if (program && i == nb_streams - 1 && ret < 0) {
3194 program = NULL;
3195 nb_streams = ic->nb_streams;
3196 i = 0; /* no related stream found, try again with everything */
3197 }
3198 }
3199 if (decoder_ret)
3200 *decoder_ret = best_decoder;
3201 return ret;
3202}
3203
3204/*******************************************************/
3205
3206int av_read_play(AVFormatContext *s)
3207{
3208 if (s->iformat->read_play)
3209 return s->iformat->read_play(s);
3210 if (s->pb)
3211 return avio_pause(s->pb, 0);
3212 return AVERROR(ENOSYS);
3213}
3214
3215int av_read_pause(AVFormatContext *s)
3216{
3217 if (s->iformat->read_pause)
3218 return s->iformat->read_pause(s);
3219 if (s->pb)
3220 return avio_pause(s->pb, 1);
3221 return AVERROR(ENOSYS);
3222}
3223
3224void ff_free_stream(AVFormatContext *s, AVStream *st){
3225 av_assert0(s->nb_streams>0);
3226 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3227
3228 if (st->parser) {
3229 av_parser_close(st->parser);
3230 }
3231 if (st->attached_pic.data)
3232 av_free_packet(&st->attached_pic);
3233 av_dict_free(&st->metadata);
3234 av_freep(&st->probe_data.buf);
3235 av_freep(&st->index_entries);
3236 av_freep(&st->codec->extradata);
3237 av_freep(&st->codec->subtitle_header);
3238 av_freep(&st->codec);
3239 av_freep(&st->priv_data);
3240 if (st->info)
3241 av_freep(&st->info->duration_error);
3242 av_freep(&st->info);
3243 av_freep(&s->streams[ --s->nb_streams ]);
3244}
3245
3246void avformat_free_context(AVFormatContext *s)
3247{
3248 int i;
3249
3250 if (!s)
3251 return;
3252
3253 if(s->cover_data)
3254 av_free(s->cover_data);
3255
3256 av_opt_free(s);
3257 if (s->iformat && s->iformat->priv_class && s->priv_data)
3258 av_opt_free(s->priv_data);
3259
3260 for(i=s->nb_streams-1; i>=0; i--) {
3261 ff_free_stream(s, s->streams[i]);
3262 }
3263 for(i=s->nb_programs-1; i>=0; i--) {
3264 av_dict_free(&s->programs[i]->metadata);
3265 av_freep(&s->programs[i]->stream_index);
3266 av_freep(&s->programs[i]);
3267 }
3268 av_freep(&s->programs);
3269 av_freep(&s->priv_data);
3270 while(s->nb_chapters--) {
3271 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3272 av_freep(&s->chapters[s->nb_chapters]);
3273 }
3274 av_freep(&s->chapters);
3275 av_dict_free(&s->metadata);
3276 av_freep(&s->streams);
3277 av_free(s);
3278}
3279
3280#if FF_API_CLOSE_INPUT_FILE
3281void av_close_input_file(AVFormatContext *s)
3282{
3283 avformat_close_input(&s);
3284}
3285#endif
3286
3287void avformat_close_input(AVFormatContext **ps)
3288{
3289 AVFormatContext *s;
3290 AVIOContext *pb;
3291
3292 if (!ps || !*ps)
3293 return;
3294
3295 s = *ps;
3296 pb = s->pb;
3297
3298 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3299 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3300 pb = NULL;
3301
3302 flush_packet_queue(s);
3303
3304 if (s->iformat) {
3305 if (s->iformat->read_close)
3306 s->iformat->read_close(s);
3307 }
3308
3309 avformat_free_context(s);
3310
3311 *ps = NULL;
3312
3313 avio_close(pb);
3314}
3315
3316#if FF_API_NEW_STREAM
3317AVStream *av_new_stream(AVFormatContext *s, int id)
3318{
3319 AVStream *st = avformat_new_stream(s, NULL);
3320 if (st)
3321 st->id = id;
3322 return st;
3323}
3324#endif
3325
3326AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3327{
3328 AVStream *st;
3329 int i;
3330 AVStream **streams;
3331
3332 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3333 return NULL;
3334 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3335 if (!streams)
3336 return NULL;
3337 s->streams = streams;
3338
3339 st = av_mallocz(sizeof(AVStream));
3340 if (!st)
3341 return NULL;
3342 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3343 av_free(st);
3344 return NULL;
3345 }
3346 st->info->last_dts = AV_NOPTS_VALUE;
3347
3348 st->codec = avcodec_alloc_context3(c);
3349 if (s->iformat) {
3350 /* no default bitrate if decoding */
3351 st->codec->bit_rate = 0;
3352 }
3353 st->index = s->nb_streams;
3354 st->start_time = AV_NOPTS_VALUE;
3355 st->duration = AV_NOPTS_VALUE;
3356 /* we set the current DTS to 0 so that formats without any timestamps
3357 but durations get some timestamps, formats with some unknown
3358 timestamps have their first few packets buffered and the
3359 timestamps corrected before they are returned to the user */
3360 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3361 st->first_dts = AV_NOPTS_VALUE;
3362 st->probe_packets = MAX_PROBE_PACKETS;
3363 st->pts_wrap_reference = AV_NOPTS_VALUE;
3364 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3365
3366 /* default pts setting is MPEG-like */
3367 avpriv_set_pts_info(st, 33, 1, 90000);
3368 st->last_IP_pts = AV_NOPTS_VALUE;
3369 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3370 st->pts_buffer[i]= AV_NOPTS_VALUE;
3371
3372 st->sample_aspect_ratio = (AVRational){0,1};
3373
3374#if FF_API_R_FRAME_RATE
3375 st->info->last_dts = AV_NOPTS_VALUE;
3376#endif
3377 st->info->fps_first_dts = AV_NOPTS_VALUE;
3378 st->info->fps_last_dts = AV_NOPTS_VALUE;
3379
3380 s->streams[s->nb_streams++] = st;
3381 return st;
3382}
3383
3384AVProgram *av_new_program(AVFormatContext *ac, int id)
3385{
3386 AVProgram *program=NULL;
3387 int i;
3388
3389 av_dlog(ac, "new_program: id=0x%04x\n", id);
3390
3391 for(i=0; i<ac->nb_programs; i++)
3392 if(ac->programs[i]->id == id)
3393 program = ac->programs[i];
3394
3395 if(!program){
3396 program = av_mallocz(sizeof(AVProgram));
3397 if (!program)
3398 return NULL;
3399 dynarray_add(&ac->programs, &ac->nb_programs, program);
3400 program->discard = AVDISCARD_NONE;
3401 }
3402 program->id = id;
3403 program->pts_wrap_reference = AV_NOPTS_VALUE;
3404 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3405
3406 program->start_time =
3407 program->end_time = AV_NOPTS_VALUE;
3408
3409 return program;
3410}
3411
3412AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3413{
3414 AVChapter *chapter = NULL;
3415 int i;
3416
3417 for(i=0; i<s->nb_chapters; i++)
3418 if(s->chapters[i]->id == id)
3419 chapter = s->chapters[i];
3420
3421 if(!chapter){
3422 chapter= av_mallocz(sizeof(AVChapter));
3423 if(!chapter)
3424 return NULL;
3425 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3426 }
3427 av_dict_set(&chapter->metadata, "title", title, 0);
3428 chapter->id = id;
3429 chapter->time_base= time_base;
3430 chapter->start = start;
3431 chapter->end = end;
3432
3433 return chapter;
3434}
3435
3436void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3437{
3438 int i, j;
3439 AVProgram *program=NULL;
3440 void *tmp;
3441
3442 if (idx >= ac->nb_streams) {
3443 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3444 return;
3445 }
3446
3447 for(i=0; i<ac->nb_programs; i++){
3448 if(ac->programs[i]->id != progid)
3449 continue;
3450 program = ac->programs[i];
3451 for(j=0; j<program->nb_stream_indexes; j++)
3452 if(program->stream_index[j] == idx)
3453 return;
3454
3455 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3456 if(!tmp)
3457 return;
3458 program->stream_index = tmp;
3459 program->stream_index[program->nb_stream_indexes++] = idx;
3460 return;
3461 }
3462}
3463
3464static void print_fps(double d, const char *postfix){
3465 uint64_t v= lrintf(d*100);
3466 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3467 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3468 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3469}
3470
3471static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3472{
3473 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3474 AVDictionaryEntry *tag=NULL;
3475
3476 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3477 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3478 if(strcmp("language", tag->key)){
3479 const char *p = tag->value;
3480 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3481 while(*p) {
3482 char tmp[256];
3483 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3484 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3485 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3486 p += len;
3487 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3488 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3489 if (*p) p++;
3490 }
3491 av_log(ctx, AV_LOG_INFO, "\n");
3492 }
3493 }
3494 }
3495}
3496
3497/* "user interface" functions */
3498static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3499{
3500 char buf[256];
3501 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3502 AVStream *st = ic->streams[i];
3503 int g = av_gcd(st->time_base.num, st->time_base.den);
3504 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3505 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3506 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3507 /* the pid is an important information, so we display it */
3508 /* XXX: add a generic system */
3509 if (flags & AVFMT_SHOW_IDS)
3510 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3511 if (lang)
3512 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3513 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3514 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3515 if (st->sample_aspect_ratio.num && // default
3516 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3517 AVRational display_aspect_ratio;
3518 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3519 st->codec->width*st->sample_aspect_ratio.num,
3520 st->codec->height*st->sample_aspect_ratio.den,
3521 1024*1024);
3522 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3523 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3524 display_aspect_ratio.num, display_aspect_ratio.den);
3525 }
3526 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3527 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3528 print_fps(av_q2d(st->avg_frame_rate), "fps");
3529#if FF_API_R_FRAME_RATE
3530 if(st->r_frame_rate.den && st->r_frame_rate.num)
3531 print_fps(av_q2d(st->r_frame_rate), "tbr");
3532#endif
3533 if(st->time_base.den && st->time_base.num)
3534 print_fps(1/av_q2d(st->time_base), "tbn");
3535 if(st->codec->time_base.den && st->codec->time_base.num)
3536 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3537 }
3538 if (st->disposition & AV_DISPOSITION_DEFAULT)
3539 av_log(NULL, AV_LOG_INFO, " (default)");
3540 if (st->disposition & AV_DISPOSITION_DUB)
3541 av_log(NULL, AV_LOG_INFO, " (dub)");
3542 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3543 av_log(NULL, AV_LOG_INFO, " (original)");
3544 if (st->disposition & AV_DISPOSITION_COMMENT)
3545 av_log(NULL, AV_LOG_INFO, " (comment)");
3546 if (st->disposition & AV_DISPOSITION_LYRICS)
3547 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3548 if (st->disposition & AV_DISPOSITION_KARAOKE)
3549 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3550 if (st->disposition & AV_DISPOSITION_FORCED)
3551 av_log(NULL, AV_LOG_INFO, " (forced)");
3552 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3553 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3554 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3555 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3556 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3557 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3558 av_log(NULL, AV_LOG_INFO, "\n");
3559 dump_metadata(NULL, st->metadata, " ");
3560}
3561
3562void av_dump_format(AVFormatContext *ic,
3563 int index,
3564 const char *url,
3565 int is_output)
3566{
3567 int i;
3568 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3569 if (ic->nb_streams && !printed)
3570 return;
3571
3572 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3573 is_output ? "Output" : "Input",
3574 index,
3575 is_output ? ic->oformat->name : ic->iformat->name,
3576 is_output ? "to" : "from", url);
3577 dump_metadata(NULL, ic->metadata, " ");
3578 if (!is_output) {
3579 av_log(NULL, AV_LOG_INFO, " Duration: ");
3580 if (ic->duration != AV_NOPTS_VALUE) {
3581 int hours, mins, secs, us;
3582 int64_t duration = ic->duration + 5000;
3583 secs = duration / AV_TIME_BASE;
3584 us = duration % AV_TIME_BASE;
3585 mins = secs / 60;
3586 secs %= 60;
3587 hours = mins / 60;
3588 mins %= 60;
3589 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3590 (100 * us) / AV_TIME_BASE);
3591 } else {
3592 av_log(NULL, AV_LOG_INFO, "N/A");
3593 }
3594 if (ic->start_time != AV_NOPTS_VALUE) {
3595 int secs, us;
3596 av_log(NULL, AV_LOG_INFO, ", start: ");
3597 secs = ic->start_time / AV_TIME_BASE;
3598 us = abs(ic->start_time % AV_TIME_BASE);
3599 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3600 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3601 }
3602 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3603 if (ic->bit_rate) {
3604 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3605 } else {
3606 av_log(NULL, AV_LOG_INFO, "N/A");
3607 }
3608 av_log(NULL, AV_LOG_INFO, "\n");
3609 }
3610 for (i = 0; i < ic->nb_chapters; i++) {
3611 AVChapter *ch = ic->chapters[i];
3612 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3613 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3614 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3615
3616 dump_metadata(NULL, ch->metadata, " ");
3617 }
3618 if(ic->nb_programs) {
3619 int j, k, total = 0;
3620 for(j=0; j<ic->nb_programs; j++) {
3621 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3622 "name", NULL, 0);
3623 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3624 name ? name->value : "");
3625 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3626 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3627 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3628 printed[ic->programs[j]->stream_index[k]] = 1;
3629 }
3630 total += ic->programs[j]->nb_stream_indexes;
3631 }
3632 if (total < ic->nb_streams)
3633 av_log(NULL, AV_LOG_INFO, " No Program\n");
3634 }
3635 for(i=0;i<ic->nb_streams;i++)
3636 if (!printed[i])
3637 dump_stream_format(ic, i, index, is_output);
3638
3639 av_free(printed);
3640}
3641
3642uint64_t ff_ntp_time(void)
3643{
3644 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3645}
3646
3647int av_get_frame_filename(char *buf, int buf_size,
3648 const char *path, int number)
3649{
3650 const char *p;
3651 char *q, buf1[20], c;
3652 int nd, len, percentd_found;
3653
3654 q = buf;
3655 p = path;
3656 percentd_found = 0;
3657 for(;;) {
3658 c = *p++;
3659 if (c == '\0')
3660 break;
3661 if (c == '%') {
3662 do {
3663 nd = 0;
3664 while (av_isdigit(*p)) {
3665 nd = nd * 10 + *p++ - '0';
3666 }
3667 c = *p++;
3668 } while (av_isdigit(c));
3669
3670 switch(c) {
3671 case '%':
3672 goto addchar;
3673 case 'd':
3674 if (percentd_found)
3675 goto fail;
3676 percentd_found = 1;
3677 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3678 len = strlen(buf1);
3679 if ((q - buf + len) > buf_size - 1)
3680 goto fail;
3681 memcpy(q, buf1, len);
3682 q += len;
3683 break;
3684 default:
3685 goto fail;
3686 }
3687 } else {
3688 addchar:
3689 if ((q - buf) < buf_size - 1)
3690 *q++ = c;
3691 }
3692 }
3693 if (!percentd_found)
3694 goto fail;
3695 *q = '\0';
3696 return 0;
3697 fail:
3698 *q = '\0';
3699 return -1;
3700}
3701
3702static void hex_dump_internal(void *avcl, FILE *f, int level,
3703 const uint8_t *buf, int size)
3704{
3705 int len, i, j, c;
3706#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3707
3708 for(i=0;i<size;i+=16) {
3709 len = size - i;
3710 if (len > 16)
3711 len = 16;
3712 PRINT("%08x ", i);
3713 for(j=0;j<16;j++) {
3714 if (j < len)
3715 PRINT(" %02x", buf[i+j]);
3716 else
3717 PRINT(" ");
3718 }
3719 PRINT(" ");
3720 for(j=0;j<len;j++) {
3721 c = buf[i+j];
3722 if (c < ' ' || c > '~')
3723 c = '.';
3724 PRINT("%c", c);
3725 }
3726 PRINT("\n");
3727 }
3728#undef PRINT
3729}
3730
3731void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3732{
3733 hex_dump_internal(NULL, f, 0, buf, size);
3734}
3735
3736void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3737{
3738 hex_dump_internal(avcl, NULL, level, buf, size);
3739}
3740
3741static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3742{
3743#define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3744 PRINT("stream #%d:\n", pkt->stream_index);
3745 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3746 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3747 /* DTS is _always_ valid after av_read_frame() */
3748 PRINT(" dts=");
3749 if (pkt->dts == AV_NOPTS_VALUE)
3750 PRINT("N/A");
3751 else
3752 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3753 /* PTS may not be known if B-frames are present. */
3754 PRINT(" pts=");
3755 if (pkt->pts == AV_NOPTS_VALUE)
3756 PRINT("N/A");
3757 else
3758 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3759 PRINT("\n");
3760 PRINT(" size=%d\n", pkt->size);
3761#undef PRINT
3762 if (dump_payload)
3763 av_hex_dump(f, pkt->data, pkt->size);
3764}
3765
3766void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3767{
3768 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3769}
3770
3771void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3772 AVStream *st)
3773{
3774 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3775}
3776
3777void av_url_split(char *proto, int proto_size,
3778 char *authorization, int authorization_size,
3779 char *hostname, int hostname_size,
3780 int *port_ptr,
3781 char *path, int path_size,
3782 const char *url)
3783{
3784 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3785
3786 if (port_ptr) *port_ptr = -1;
3787 if (proto_size > 0) proto[0] = 0;
3788 if (authorization_size > 0) authorization[0] = 0;
3789 if (hostname_size > 0) hostname[0] = 0;
3790 if (path_size > 0) path[0] = 0;
3791
3792 /* parse protocol */
3793 if ((p = strchr(url, ':'))) {
3794 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3795 p++; /* skip ':' */
3796 if (*p == '/') p++;
3797 if (*p == '/') p++;
3798 } else {
3799 /* no protocol means plain filename */
3800 av_strlcpy(path, url, path_size);
3801 return;
3802 }
3803
3804 /* separate path from hostname */
3805 ls = strchr(p, '/');
3806 ls2 = strchr(p, '?');
3807 if(!ls)
3808 ls = ls2;
3809 else if (ls && ls2)
3810 ls = FFMIN(ls, ls2);
3811 if(ls)
3812 av_strlcpy(path, ls, path_size);
3813 else
3814 ls = &p[strlen(p)]; // XXX
3815
3816 /* the rest is hostname, use that to parse auth/port */
3817 if (ls != p) {
3818 /* authorization (user[:pass]@hostname) */
3819 at2 = p;
3820 while ((at = strchr(p, '@')) && at < ls) {
3821 av_strlcpy(authorization, at2,
3822 FFMIN(authorization_size, at + 1 - at2));
3823 p = at + 1; /* skip '@' */
3824 }
3825
3826 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3827 /* [host]:port */
3828 av_strlcpy(hostname, p + 1,
3829 FFMIN(hostname_size, brk - p));
3830 if (brk[1] == ':' && port_ptr)
3831 *port_ptr = atoi(brk + 2);
3832 } else if ((col = strchr(p, ':')) && col < ls) {
3833 av_strlcpy(hostname, p,
3834 FFMIN(col + 1 - p, hostname_size));
3835 if (port_ptr) *port_ptr = atoi(col + 1);
3836 } else
3837 av_strlcpy(hostname, p,
3838 FFMIN(ls + 1 - p, hostname_size));
3839 }
3840}
3841
3842char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3843{
3844 int i;
3845 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3846 '4', '5', '6', '7',
3847 '8', '9', 'A', 'B',
3848 'C', 'D', 'E', 'F' };
3849 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3850 '4', '5', '6', '7',
3851 '8', '9', 'a', 'b',
3852 'c', 'd', 'e', 'f' };
3853 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3854
3855 for(i = 0; i < s; i++) {
3856 buff[i * 2] = hex_table[src[i] >> 4];
3857 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3858 }
3859
3860 return buff;
3861}
3862
3863int ff_hex_to_data(uint8_t *data, const char *p)
3864{
3865 int c, len, v;
3866
3867 len = 0;
3868 v = 1;
3869 for (;;) {
3870 p += strspn(p, SPACE_CHARS);
3871 if (*p == '\0')
3872 break;
3873 c = av_toupper((unsigned char) *p++);
3874 if (c >= '0' && c <= '9')
3875 c = c - '0';
3876 else if (c >= 'A' && c <= 'F')
3877 c = c - 'A' + 10;
3878 else
3879 break;
3880 v = (v << 4) | c;
3881 if (v & 0x100) {
3882 if (data)
3883 data[len] = v;
3884 len++;
3885 v = 1;
3886 }
3887 }
3888 return len;
3889}
3890
3891#if FF_API_SET_PTS_INFO
3892void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3893 unsigned int pts_num, unsigned int pts_den)
3894{
3895 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3896}
3897#endif
3898
3899void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3900 unsigned int pts_num, unsigned int pts_den)
3901{
3902 AVRational new_tb;
3903 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3904 if(new_tb.num != pts_num)
3905 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3906 }else
3907 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3908
3909 if(new_tb.num <= 0 || new_tb.den <= 0) {
3910 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3911 return;
3912 }
3913 s->time_base = new_tb;
3914 av_codec_set_pkt_timebase(s->codec, new_tb);
3915 s->pts_wrap_bits = pts_wrap_bits;
3916}
3917
3918void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3919 void *context)
3920{
3921 const char *ptr = str;
3922
3923 /* Parse key=value pairs. */
3924 for (;;) {
3925 const char *key;
3926 char *dest = NULL, *dest_end;
3927 int key_len, dest_len = 0;
3928
3929 /* Skip whitespace and potential commas. */
3930 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3931 ptr++;
3932 if (!*ptr)
3933 break;
3934
3935 key = ptr;
3936
3937 if (!(ptr = strchr(key, '=')))
3938 break;
3939 ptr++;
3940 key_len = ptr - key;
3941
3942 callback_get_buf(context, key, key_len, &dest, &dest_len);
3943 dest_end = dest + dest_len - 1;
3944
3945 if (*ptr == '\"') {
3946 ptr++;
3947 while (*ptr && *ptr != '\"') {
3948 if (*ptr == '\\') {
3949 if (!ptr[1])
3950 break;
3951 if (dest && dest < dest_end)
3952 *dest++ = ptr[1];
3953 ptr += 2;
3954 } else {
3955 if (dest && dest < dest_end)
3956 *dest++ = *ptr;
3957 ptr++;
3958 }
3959 }
3960 if (*ptr == '\"')
3961 ptr++;
3962 } else {
3963 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3964 if (dest && dest < dest_end)
3965 *dest++ = *ptr;
3966 }
3967 if (dest)
3968 *dest = 0;
3969 }
3970}
3971
3972int ff_find_stream_index(AVFormatContext *s, int id)
3973{
3974 int i;
3975 for (i = 0; i < s->nb_streams; i++) {
3976 if (s->streams[i]->id == id)
3977 return i;
3978 }
3979 return -1;
3980}
3981
3982int64_t ff_iso8601_to_unix_time(const char *datestr)
3983{
3984 struct tm time1 = {0}, time2 = {0};
3985 char *ret1, *ret2;
3986 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
3987 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
3988 if (ret2 && !ret1)
3989 return av_timegm(&time2);
3990 else
3991 return av_timegm(&time1);
3992}
3993
3994int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
3995{
3996 if (ofmt) {
3997 if (ofmt->query_codec)
3998 return ofmt->query_codec(codec_id, std_compliance);
3999 else if (ofmt->codec_tag)
4000 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4001 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4002 codec_id == ofmt->subtitle_codec)
4003 return 1;
4004 }
4005 return AVERROR_PATCHWELCOME;
4006}
4007
4008int avformat_network_init(void)
4009{
4010#if CONFIG_NETWORK
4011 int ret;
4012 ff_network_inited_globally = 1;
4013 if ((ret = ff_network_init()) < 0)
4014 return ret;
4015 ff_tls_init();
4016#endif
4017 return 0;
4018}
4019
4020int avformat_network_deinit(void)
4021{
4022#if CONFIG_NETWORK
4023 ff_network_close();
4024 ff_tls_deinit();
4025#endif
4026 return 0;
4027}
4028
4029int ff_add_param_change(AVPacket *pkt, int32_t channels,
4030 uint64_t channel_layout, int32_t sample_rate,
4031 int32_t width, int32_t height)
4032{
4033 uint32_t flags = 0;
4034 int size = 4;
4035 uint8_t *data;
4036 if (!pkt)
4037 return AVERROR(EINVAL);
4038 if (channels) {
4039 size += 4;
4040 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4041 }
4042 if (channel_layout) {
4043 size += 8;
4044 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4045 }
4046 if (sample_rate) {
4047 size += 4;
4048 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4049 }
4050 if (width || height) {
4051 size += 8;
4052 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4053 }
4054 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4055 if (!data)
4056 return AVERROR(ENOMEM);
4057 bytestream_put_le32(&data, flags);
4058 if (channels)
4059 bytestream_put_le32(&data, channels);
4060 if (channel_layout)
4061 bytestream_put_le64(&data, channel_layout);
4062 if (sample_rate)
4063 bytestream_put_le32(&data, sample_rate);
4064 if (width || height) {
4065 bytestream_put_le32(&data, width);
4066 bytestream_put_le32(&data, height);
4067 }
4068 return 0;
4069}
4070
4071AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4072{
4073 AVRational undef = {0, 1};
4074 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4075 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4076 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4077
4078 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4079 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4080 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4081 stream_sample_aspect_ratio = undef;
4082
4083 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4084 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4085 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4086 frame_sample_aspect_ratio = undef;
4087
4088 if (stream_sample_aspect_ratio.num)
4089 return stream_sample_aspect_ratio;
4090 else
4091 return frame_sample_aspect_ratio;
4092}
4093
4094AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4095{
4096 AVRational fr = st->r_frame_rate;
4097
4098 if (st->codec->ticks_per_frame > 1) {
4099 AVRational codec_fr = av_inv_q(st->codec->time_base);
4100 AVRational avg_fr = st->avg_frame_rate;
4101 codec_fr.den *= st->codec->ticks_per_frame;
4102 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4103 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4104 fr = codec_fr;
4105 }
4106
4107 return fr;
4108}
4109
4110int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4111 const char *spec)
4112{
4113 if (*spec <= '9' && *spec >= '0') /* opt:index */
4114 return strtol(spec, NULL, 0) == st->index;
4115 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4116 *spec == 't') { /* opt:[vasdt] */
4117 enum AVMediaType type;
4118
4119 switch (*spec++) {
4120 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4121 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4122 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4123 case 'd': type = AVMEDIA_TYPE_DATA; break;
4124 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4125 default: av_assert0(0);
4126 }
4127 if (type != st->codec->codec_type)
4128 return 0;
4129 if (*spec++ == ':') { /* possibly followed by :index */
4130 int i, index = strtol(spec, NULL, 0);
4131 for (i = 0; i < s->nb_streams; i++)
4132 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4133 return i == st->index;
4134 return 0;
4135 }
4136 return 1;
4137 } else if (*spec == 'p' && *(spec + 1) == ':') {
4138 int prog_id, i, j;
4139 char *endptr;
4140 spec += 2;
4141 prog_id = strtol(spec, &endptr, 0);
4142 for (i = 0; i < s->nb_programs; i++) {
4143 if (s->programs[i]->id != prog_id)
4144 continue;
4145
4146 if (*endptr++ == ':') {
4147 int stream_idx = strtol(endptr, NULL, 0);
4148 return stream_idx >= 0 &&
4149 stream_idx < s->programs[i]->nb_stream_indexes &&
4150 st->index == s->programs[i]->stream_index[stream_idx];
4151 }
4152
4153 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4154 if (st->index == s->programs[i]->stream_index[j])
4155 return 1;
4156 }
4157 return 0;
4158 } else if (*spec == '#') {
4159 int sid;
4160 char *endptr;
4161 sid = strtol(spec + 1, &endptr, 0);
4162 if (!*endptr)
4163 return st->id == sid;
4164 } else if (!*spec) /* empty specifier, matches everything */
4165 return 1;
4166
4167 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4168 return AVERROR(EINVAL);
4169}
4170
4171void ff_generate_avci_extradata(AVStream *st)
4172{
4173 static const uint8_t avci100_1080p_extradata[] = {
4174 // SPS
4175 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4176 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4177 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4178 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4179 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4180 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4181 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4182 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4183 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4184 // PPS
4185 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4186 0xd0
4187 };
4188 static const uint8_t avci100_1080i_extradata[] = {
4189 // SPS
4190 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4191 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4192 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4193 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4194 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4195 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4196 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4197 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4198 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4199 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4200 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4201 // PPS
4202 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4203 0xd0
4204 };
4205 static const uint8_t avci50_1080i_extradata[] = {
4206 // SPS
4207 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4208 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4209 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4210 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4211 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4212 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4213 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4214 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4215 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4216 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4217 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4218 // PPS
4219 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4220 0x11
4221 };
4222 static const uint8_t avci100_720p_extradata[] = {
4223 // SPS
4224 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4225 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4226 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4227 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4228 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4229 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4230 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4231 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4232 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4233 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4234 // PPS
4235 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4236 0x11
4237 };
4238 int size = 0;
4239 const uint8_t *data = 0;
4240 if (st->codec->width == 1920) {
4241 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4242 data = avci100_1080p_extradata;
4243 size = sizeof(avci100_1080p_extradata);
4244 } else {
4245 data = avci100_1080i_extradata;
4246 size = sizeof(avci100_1080i_extradata);
4247 }
4248 } else if (st->codec->width == 1440) {
4249 data = avci50_1080i_extradata;
4250 size = sizeof(avci50_1080i_extradata);
4251 } else if (st->codec->width == 1280) {
4252 data = avci100_720p_extradata;
4253 size = sizeof(avci100_720p_extradata);
4254 }
4255 if (!size)
4256 return;
4257 av_freep(&st->codec->extradata);
4258 if (ff_alloc_extradata(st->codec, size))
4259 return;
4260 memcpy(st->codec->extradata, data, size);
4261}
4262