summaryrefslogtreecommitdiff
path: root/libavformat/mux.c (plain)
blob: 3a5e876913c4d640d24336407b213c98d0fef444
1/*
2 * muxing functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "avformat.h"
23#include "avio_internal.h"
24#include "internal.h"
25#include "libavcodec/internal.h"
26#include "libavcodec/bytestream.h"
27#include "libavutil/opt.h"
28#include "libavutil/dict.h"
29#include "libavutil/pixdesc.h"
30#include "libavutil/timestamp.h"
31#include "metadata.h"
32#include "id3v2.h"
33#include "libavutil/avassert.h"
34#include "libavutil/avstring.h"
35#include "libavutil/internal.h"
36#include "libavutil/mathematics.h"
37#include "libavutil/parseutils.h"
38#include "libavutil/time.h"
39#include "riff.h"
40#include "audiointerleave.h"
41#include "url.h"
42#include <stdarg.h>
43#if CONFIG_NETWORK
44#include "network.h"
45#endif
46
47/**
48 * @file
49 * muxing functions for use within libavformat
50 */
51
52/* fraction handling */
53
54/**
55 * f = val + (num / den) + 0.5.
56 *
57 * 'num' is normalized so that it is such as 0 <= num < den.
58 *
59 * @param f fractional number
60 * @param val integer value
61 * @param num must be >= 0
62 * @param den must be >= 1
63 */
64static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
65{
66 num += (den >> 1);
67 if (num >= den) {
68 val += num / den;
69 num = num % den;
70 }
71 f->val = val;
72 f->num = num;
73 f->den = den;
74}
75
76/**
77 * Fractional addition to f: f = f + (incr / f->den).
78 *
79 * @param f fractional number
80 * @param incr increment, can be positive or negative
81 */
82static void frac_add(FFFrac *f, int64_t incr)
83{
84 int64_t num, den;
85
86 num = f->num + incr;
87 den = f->den;
88 if (num < 0) {
89 f->val += num / den;
90 num = num % den;
91 if (num < 0) {
92 num += den;
93 f->val--;
94 }
95 } else if (num >= den) {
96 f->val += num / den;
97 num = num % den;
98 }
99 f->num = num;
100}
101
102AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision)
103{
104 AVRational q;
105 int j;
106
107 q = st->time_base;
108
109 for (j=2; j<14; j+= 1+(j>2))
110 while (q.den / q.num < min_precision && q.num % j == 0)
111 q.num /= j;
112 while (q.den / q.num < min_precision && q.den < (1<<24))
113 q.den <<= 1;
114
115 return q;
116}
117
118enum AVChromaLocation ff_choose_chroma_location(AVFormatContext *s, AVStream *st)
119{
120 AVCodecParameters *par = st->codecpar;
121 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(par->format);
122
123 if (par->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
124 return par->chroma_location;
125
126 if (pix_desc) {
127 if (pix_desc->log2_chroma_h == 0) {
128 return AVCHROMA_LOC_TOPLEFT;
129 } else if (pix_desc->log2_chroma_w == 1 && pix_desc->log2_chroma_h == 1) {
130 if (par->field_order == AV_FIELD_UNKNOWN || par->field_order == AV_FIELD_PROGRESSIVE) {
131 switch (par->codec_id) {
132 case AV_CODEC_ID_MJPEG:
133 case AV_CODEC_ID_MPEG1VIDEO: return AVCHROMA_LOC_CENTER;
134 }
135 }
136 if (par->field_order == AV_FIELD_UNKNOWN || par->field_order != AV_FIELD_PROGRESSIVE) {
137 switch (par->codec_id) {
138 case AV_CODEC_ID_MPEG2VIDEO: return AVCHROMA_LOC_LEFT;
139 }
140 }
141 }
142 }
143
144 return AVCHROMA_LOC_UNSPECIFIED;
145
146}
147
148int avformat_alloc_output_context2(AVFormatContext **avctx, AVOutputFormat *oformat,
149 const char *format, const char *filename)
150{
151 AVFormatContext *s = avformat_alloc_context();
152 int ret = 0;
153
154 *avctx = NULL;
155 if (!s)
156 goto nomem;
157
158 if (!oformat) {
159 if (format) {
160 oformat = av_guess_format(format, NULL, NULL);
161 if (!oformat) {
162 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
163 ret = AVERROR(EINVAL);
164 goto error;
165 }
166 } else {
167 oformat = av_guess_format(NULL, filename, NULL);
168 if (!oformat) {
169 ret = AVERROR(EINVAL);
170 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
171 filename);
172 goto error;
173 }
174 }
175 }
176
177 s->oformat = oformat;
178 if (s->oformat->priv_data_size > 0) {
179 s->priv_data = av_mallocz(s->oformat->priv_data_size);
180 if (!s->priv_data)
181 goto nomem;
182 if (s->oformat->priv_class) {
183 *(const AVClass**)s->priv_data= s->oformat->priv_class;
184 av_opt_set_defaults(s->priv_data);
185 }
186 } else
187 s->priv_data = NULL;
188
189 if (filename)
190 av_strlcpy(s->filename, filename, sizeof(s->filename));
191 *avctx = s;
192 return 0;
193nomem:
194 av_log(s, AV_LOG_ERROR, "Out of memory\n");
195 ret = AVERROR(ENOMEM);
196error:
197 avformat_free_context(s);
198 return ret;
199}
200
201static int validate_codec_tag(AVFormatContext *s, AVStream *st)
202{
203 const AVCodecTag *avctag;
204 int n;
205 enum AVCodecID id = AV_CODEC_ID_NONE;
206 int64_t tag = -1;
207
208 /**
209 * Check that tag + id is in the table
210 * If neither is in the table -> OK
211 * If tag is in the table with another id -> FAIL
212 * If id is in the table with another tag -> FAIL unless strict < normal
213 */
214 for (n = 0; s->oformat->codec_tag[n]; n++) {
215 avctag = s->oformat->codec_tag[n];
216 while (avctag->id != AV_CODEC_ID_NONE) {
217 if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codecpar->codec_tag)) {
218 id = avctag->id;
219 if (id == st->codecpar->codec_id)
220 return 1;
221 }
222 if (avctag->id == st->codecpar->codec_id)
223 tag = avctag->tag;
224 avctag++;
225 }
226 }
227 if (id != AV_CODEC_ID_NONE)
228 return 0;
229 if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
230 return 0;
231 return 1;
232}
233
234
235static int init_muxer(AVFormatContext *s, AVDictionary **options)
236{
237 int ret = 0, i;
238 AVStream *st;
239 AVDictionary *tmp = NULL;
240 AVCodecParameters *par = NULL;
241 AVOutputFormat *of = s->oformat;
242 const AVCodecDescriptor *desc;
243 AVDictionaryEntry *e;
244
245 if (options)
246 av_dict_copy(&tmp, *options, 0);
247
248 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
249 goto fail;
250 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
251 (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
252 goto fail;
253
254#if FF_API_LAVF_AVCTX
255FF_DISABLE_DEPRECATION_WARNINGS
256 if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) {
257 if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
258#if FF_API_LAVF_BITEXACT
259 av_log(s, AV_LOG_WARNING,
260 "Setting the AVFormatContext to bitexact mode, because "
261 "the AVCodecContext is in that mode. This behavior will "
262 "change in the future. To keep the current behavior, set "
263 "AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
264 s->flags |= AVFMT_FLAG_BITEXACT;
265#else
266 av_log(s, AV_LOG_WARNING,
267 "The AVFormatContext is not in set to bitexact mode, only "
268 "the AVCodecContext. If this is not intended, set "
269 "AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
270#endif
271 }
272 }
273FF_ENABLE_DEPRECATION_WARNINGS
274#endif
275
276 // some sanity checks
277 if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
278 av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
279 ret = AVERROR(EINVAL);
280 goto fail;
281 }
282
283 for (i = 0; i < s->nb_streams; i++) {
284 st = s->streams[i];
285 par = st->codecpar;
286
287#if FF_API_LAVF_CODEC_TB && FF_API_LAVF_AVCTX
288FF_DISABLE_DEPRECATION_WARNINGS
289 if (!st->time_base.num && st->codec->time_base.num) {
290 av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
291 "timebase hint to the muxer is deprecated. Set "
292 "AVStream.time_base instead.\n");
293 avpriv_set_pts_info(st, 64, st->codec->time_base.num, st->codec->time_base.den);
294 }
295FF_ENABLE_DEPRECATION_WARNINGS
296#endif
297
298#if FF_API_LAVF_AVCTX
299FF_DISABLE_DEPRECATION_WARNINGS
300 if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
301 st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) {
302 av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
303 "parameters to muxers is deprecated, use AVStream.codecpar "
304 "instead.\n");
305 ret = avcodec_parameters_from_context(st->codecpar, st->codec);
306 if (ret < 0)
307 goto fail;
308 }
309FF_ENABLE_DEPRECATION_WARNINGS
310#endif
311
312 if (!st->time_base.num) {
313 /* fall back on the default timebase values */
314 if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
315 avpriv_set_pts_info(st, 64, 1, par->sample_rate);
316 else
317 avpriv_set_pts_info(st, 33, 1, 90000);
318 }
319
320 switch (par->codec_type) {
321 case AVMEDIA_TYPE_AUDIO:
322 if (par->sample_rate <= 0) {
323 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
324 ret = AVERROR(EINVAL);
325 goto fail;
326 }
327 if (!par->block_align)
328 par->block_align = par->channels *
329 av_get_bits_per_sample(par->codec_id) >> 3;
330 break;
331 case AVMEDIA_TYPE_VIDEO:
332 if ((par->width <= 0 || par->height <= 0) &&
333 !(of->flags & AVFMT_NODIMENSIONS)) {
334 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
335 ret = AVERROR(EINVAL);
336 goto fail;
337 }
338 if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
339 && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
340 ) {
341 if (st->sample_aspect_ratio.num != 0 &&
342 st->sample_aspect_ratio.den != 0 &&
343 par->sample_aspect_ratio.num != 0 &&
344 par->sample_aspect_ratio.den != 0) {
345 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
346 "(%d/%d) and encoder layer (%d/%d)\n",
347 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
348 par->sample_aspect_ratio.num,
349 par->sample_aspect_ratio.den);
350 ret = AVERROR(EINVAL);
351 goto fail;
352 }
353 }
354 break;
355 }
356
357 desc = avcodec_descriptor_get(par->codec_id);
358 if (desc && desc->props & AV_CODEC_PROP_REORDER)
359 st->internal->reorder = 1;
360
361 if (of->codec_tag) {
362 if ( par->codec_tag
363 && par->codec_id == AV_CODEC_ID_RAWVIDEO
364 && ( av_codec_get_tag(of->codec_tag, par->codec_id) == 0
365 || av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
366 && !validate_codec_tag(s, st)) {
367 // the current rawvideo encoding system ends up setting
368 // the wrong codec_tag for avi/mov, we override it here
369 par->codec_tag = 0;
370 }
371 if (par->codec_tag) {
372 if (!validate_codec_tag(s, st)) {
373 const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
374 av_log(s, AV_LOG_ERROR,
375 "Tag %s incompatible with output codec id '%d' (%s)\n",
376 av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
377 ret = AVERROR_INVALIDDATA;
378 goto fail;
379 }
380 } else
381 par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
382 }
383
384 if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
385 s->internal->nb_interleaved_streams++;
386 }
387
388 if (!s->priv_data && of->priv_data_size > 0) {
389 s->priv_data = av_mallocz(of->priv_data_size);
390 if (!s->priv_data) {
391 ret = AVERROR(ENOMEM);
392 goto fail;
393 }
394 if (of->priv_class) {
395 *(const AVClass **)s->priv_data = of->priv_class;
396 av_opt_set_defaults(s->priv_data);
397 if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
398 goto fail;
399 }
400 }
401
402 /* set muxer identification string */
403 if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
404 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
405 } else {
406 av_dict_set(&s->metadata, "encoder", NULL, 0);
407 }
408
409 for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
410 av_dict_set(&s->metadata, e->key, NULL, 0);
411 }
412
413 if (options) {
414 av_dict_free(options);
415 *options = tmp;
416 }
417
418 if (s->oformat->init) {
419 if ((ret = s->oformat->init(s)) < 0) {
420 if (s->oformat->deinit)
421 s->oformat->deinit(s);
422 return ret;
423 }
424 return ret == 0;
425 }
426
427 return 0;
428
429fail:
430 av_dict_free(&tmp);
431 return ret;
432}
433
434static int init_pts(AVFormatContext *s)
435{
436 int i;
437 AVStream *st;
438
439 /* init PTS generation */
440 for (i = 0; i < s->nb_streams; i++) {
441 int64_t den = AV_NOPTS_VALUE;
442 st = s->streams[i];
443
444 switch (st->codecpar->codec_type) {
445 case AVMEDIA_TYPE_AUDIO:
446 den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
447 break;
448 case AVMEDIA_TYPE_VIDEO:
449 den = (int64_t)st->time_base.num * st->time_base.den;
450 break;
451 default:
452 break;
453 }
454
455 if (!st->priv_pts)
456 st->priv_pts = av_mallocz(sizeof(*st->priv_pts));
457 if (!st->priv_pts)
458 return AVERROR(ENOMEM);
459
460 if (den != AV_NOPTS_VALUE) {
461 if (den <= 0)
462 return AVERROR_INVALIDDATA;
463
464 frac_init(st->priv_pts, 0, 0, den);
465 }
466 }
467
468 return 0;
469}
470
471static int write_header_internal(AVFormatContext *s)
472{
473 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
474 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
475 if (s->oformat->write_header) {
476 int ret = s->oformat->write_header(s);
477 if (ret >= 0 && s->pb && s->pb->error < 0)
478 ret = s->pb->error;
479 s->internal->write_header_ret = ret;
480 if (ret < 0)
481 return ret;
482 if (s->flush_packets && s->pb && s->pb->error >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
483 avio_flush(s->pb);
484 }
485 s->internal->header_written = 1;
486 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
487 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
488 return 0;
489}
490
491int avformat_init_output(AVFormatContext *s, AVDictionary **options)
492{
493 int ret = 0;
494
495 if ((ret = init_muxer(s, options)) < 0)
496 return ret;
497
498 s->internal->initialized = 1;
499 s->internal->streams_initialized = ret;
500
501 if (s->oformat->init && ret) {
502 if ((ret = init_pts(s)) < 0)
503 return ret;
504
505 if (s->avoid_negative_ts < 0) {
506 av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
507 if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
508 s->avoid_negative_ts = 0;
509 } else
510 s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
511 }
512
513 return AVSTREAM_INIT_IN_INIT_OUTPUT;
514 }
515
516 return AVSTREAM_INIT_IN_WRITE_HEADER;
517}
518
519int avformat_write_header(AVFormatContext *s, AVDictionary **options)
520{
521 int ret = 0;
522 int already_initialized = s->internal->initialized;
523 int streams_already_initialized = s->internal->streams_initialized;
524
525 if (!already_initialized)
526 if ((ret = avformat_init_output(s, options)) < 0)
527 return ret;
528
529 if (!(s->oformat->check_bitstream && s->flags & AVFMT_FLAG_AUTO_BSF)) {
530 ret = write_header_internal(s);
531 if (ret < 0)
532 goto fail;
533 }
534
535 if (!s->internal->streams_initialized) {
536 if ((ret = init_pts(s)) < 0)
537 goto fail;
538
539 if (s->avoid_negative_ts < 0) {
540 av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
541 if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
542 s->avoid_negative_ts = 0;
543 } else
544 s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
545 }
546 }
547
548 return streams_already_initialized;
549
550fail:
551 if (s->oformat->deinit)
552 s->oformat->deinit(s);
553 return ret;
554}
555
556#define AV_PKT_FLAG_UNCODED_FRAME 0x2000
557
558/* Note: using sizeof(AVFrame) from outside lavu is unsafe in general, but
559 it is only being used internally to this file as a consistency check.
560 The value is chosen to be very unlikely to appear on its own and to cause
561 immediate failure if used anywhere as a real size. */
562#define UNCODED_FRAME_PACKET_SIZE (INT_MIN / 3 * 2 + (int)sizeof(AVFrame))
563
564
565#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
566FF_DISABLE_DEPRECATION_WARNINGS
567//FIXME merge with compute_pkt_fields
568static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
569{
570 int delay = FFMAX(st->codecpar->video_delay, st->internal->avctx->max_b_frames > 0);
571 int num, den, i;
572 int frame_size;
573
574 if (!s->internal->missing_ts_warning &&
575 !(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
576 (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
577 (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
578 av_log(s, AV_LOG_WARNING,
579 "Timestamps are unset in a packet for stream %d. "
580 "This is deprecated and will stop working in the future. "
581 "Fix your code to set the timestamps properly\n", st->index);
582 s->internal->missing_ts_warning = 1;
583 }
584
585 if (s->debug & FF_FDEBUG_TS)
586 av_log(s, AV_LOG_TRACE, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
587 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
588
589 if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
590 av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
591 pkt->duration, pkt->stream_index);
592 pkt->duration = 0;
593 }
594
595 /* duration field */
596 if (pkt->duration == 0) {
597 ff_compute_frame_duration(s, &num, &den, st, NULL, pkt);
598 if (den && num) {
599 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
600 }
601 }
602
603 if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
604 pkt->pts = pkt->dts;
605
606 //XXX/FIXME this is a temporary hack until all encoders output pts
607 if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
608 static int warned;
609 if (!warned) {
610 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
611 warned = 1;
612 }
613 pkt->dts =
614// pkt->pts= st->cur_dts;
615 pkt->pts = st->priv_pts->val;
616 }
617
618 //calculate dts from pts
619 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
620 st->pts_buffer[0] = pkt->pts;
621 for (i = 1; i < delay + 1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
622 st->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
623 for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
624 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
625
626 pkt->dts = st->pts_buffer[0];
627 }
628
629 if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
630 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
631 st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
632 st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
633 st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
634 av_log(s, AV_LOG_ERROR,
635 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
636 st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
637 return AVERROR(EINVAL);
638 }
639 if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
640 av_log(s, AV_LOG_ERROR,
641 "pts (%s) < dts (%s) in stream %d\n",
642 av_ts2str(pkt->pts), av_ts2str(pkt->dts),
643 st->index);
644 return AVERROR(EINVAL);
645 }
646
647 if (s->debug & FF_FDEBUG_TS)
648 av_log(s, AV_LOG_TRACE, "av_write_frame: pts2:%s dts2:%s\n",
649 av_ts2str(pkt->pts), av_ts2str(pkt->dts));
650
651 st->cur_dts = pkt->dts;
652 st->priv_pts->val = pkt->dts;
653
654 /* update pts */
655 switch (st->codecpar->codec_type) {
656 case AVMEDIA_TYPE_AUDIO:
657 frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
658 ((AVFrame *)pkt->data)->nb_samples :
659 av_get_audio_frame_duration(st->codec, pkt->size);
660
661 /* HACK/FIXME, we skip the initial 0 size packets as they are most
662 * likely equal to the encoder delay, but it would be better if we
663 * had the real timestamps from the encoder */
664 if (frame_size >= 0 && (pkt->size || st->priv_pts->num != st->priv_pts->den >> 1 || st->priv_pts->val)) {
665 frac_add(st->priv_pts, (int64_t)st->time_base.den * frame_size);
666 }
667 break;
668 case AVMEDIA_TYPE_VIDEO:
669 frac_add(st->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
670 break;
671 }
672 return 0;
673}
674FF_ENABLE_DEPRECATION_WARNINGS
675#endif
676
677/**
678 * Make timestamps non negative, move side data from payload to internal struct, call muxer, and restore
679 * sidedata.
680 *
681 * FIXME: this function should NEVER get undefined pts/dts beside when the
682 * AVFMT_NOTIMESTAMPS is set.
683 * Those additional safety checks should be dropped once the correct checks
684 * are set in the callers.
685 */
686static int write_packet(AVFormatContext *s, AVPacket *pkt)
687{
688 int ret, did_split;
689 int64_t pts_backup, dts_backup;
690
691 pts_backup = pkt->pts;
692 dts_backup = pkt->dts;
693
694 // If the timestamp offsetting below is adjusted, adjust
695 // ff_interleaved_peek similarly.
696 if (s->output_ts_offset) {
697 AVStream *st = s->streams[pkt->stream_index];
698 int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
699
700 if (pkt->dts != AV_NOPTS_VALUE)
701 pkt->dts += offset;
702 if (pkt->pts != AV_NOPTS_VALUE)
703 pkt->pts += offset;
704 }
705
706 if (s->avoid_negative_ts > 0) {
707 AVStream *st = s->streams[pkt->stream_index];
708 int64_t offset = st->mux_ts_offset;
709 int64_t ts = s->internal->avoid_negative_ts_use_pts ? pkt->pts : pkt->dts;
710
711 if (s->internal->offset == AV_NOPTS_VALUE && ts != AV_NOPTS_VALUE &&
712 (ts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) {
713 s->internal->offset = -ts;
714 s->internal->offset_timebase = st->time_base;
715 }
716
717 if (s->internal->offset != AV_NOPTS_VALUE && !offset) {
718 offset = st->mux_ts_offset =
719 av_rescale_q_rnd(s->internal->offset,
720 s->internal->offset_timebase,
721 st->time_base,
722 AV_ROUND_UP);
723 }
724
725 if (pkt->dts != AV_NOPTS_VALUE)
726 pkt->dts += offset;
727 if (pkt->pts != AV_NOPTS_VALUE)
728 pkt->pts += offset;
729
730 if (s->internal->avoid_negative_ts_use_pts) {
731 if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) {
732 av_log(s, AV_LOG_WARNING, "failed to avoid negative "
733 "pts %s in stream %d.\n"
734 "Try -avoid_negative_ts 1 as a possible workaround.\n",
735 av_ts2str(pkt->dts),
736 pkt->stream_index
737 );
738 }
739 } else {
740 av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0 || s->max_interleave_delta > 0);
741 if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
742 av_log(s, AV_LOG_WARNING,
743 "Packets poorly interleaved, failed to avoid negative "
744 "timestamp %s in stream %d.\n"
745 "Try -max_interleave_delta 0 as a possible workaround.\n",
746 av_ts2str(pkt->dts),
747 pkt->stream_index
748 );
749 }
750 }
751 }
752
753#if FF_API_LAVF_MERGE_SD
754FF_DISABLE_DEPRECATION_WARNINGS
755 did_split = av_packet_split_side_data(pkt);
756FF_ENABLE_DEPRECATION_WARNINGS
757#endif
758
759 if (!s->internal->header_written) {
760 ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
761 if (ret < 0)
762 goto fail;
763 }
764
765 if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
766 AVFrame *frame = (AVFrame *)pkt->data;
767 av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
768 ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, &frame, 0);
769 av_frame_free(&frame);
770 } else {
771 ret = s->oformat->write_packet(s, pkt);
772 }
773
774 if (s->pb && ret >= 0) {
775 if (s->flush_packets && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
776 avio_flush(s->pb);
777 if (s->pb->error < 0)
778 ret = s->pb->error;
779 }
780
781fail:
782#if FF_API_LAVF_MERGE_SD
783FF_DISABLE_DEPRECATION_WARNINGS
784 if (did_split)
785 av_packet_merge_side_data(pkt);
786FF_ENABLE_DEPRECATION_WARNINGS
787#endif
788
789 if (ret < 0) {
790 pkt->pts = pts_backup;
791 pkt->dts = dts_backup;
792 }
793
794 return ret;
795}
796
797static int check_packet(AVFormatContext *s, AVPacket *pkt)
798{
799 if (!pkt)
800 return 0;
801
802 if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
803 av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
804 pkt->stream_index);
805 return AVERROR(EINVAL);
806 }
807
808 if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
809 av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
810 return AVERROR(EINVAL);
811 }
812
813 return 0;
814}
815
816static int prepare_input_packet(AVFormatContext *s, AVPacket *pkt)
817{
818 int ret;
819
820 ret = check_packet(s, pkt);
821 if (ret < 0)
822 return ret;
823
824#if !FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
825 /* sanitize the timestamps */
826 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
827 AVStream *st = s->streams[pkt->stream_index];
828
829 /* when there is no reordering (so dts is equal to pts), but
830 * only one of them is set, set the other as well */
831 if (!st->internal->reorder) {
832 if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
833 pkt->pts = pkt->dts;
834 if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
835 pkt->dts = pkt->pts;
836 }
837
838 /* check that the timestamps are set */
839 if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
840 av_log(s, AV_LOG_ERROR,
841 "Timestamps are unset in a packet for stream %d\n", st->index);
842 return AVERROR(EINVAL);
843 }
844
845 /* check that the dts are increasing (or at least non-decreasing,
846 * if the format allows it */
847 if (st->cur_dts != AV_NOPTS_VALUE &&
848 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
849 st->cur_dts > pkt->dts)) {
850 av_log(s, AV_LOG_ERROR,
851 "Application provided invalid, non monotonically increasing "
852 "dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
853 st->index, st->cur_dts, pkt->dts);
854 return AVERROR(EINVAL);
855 }
856
857 if (pkt->pts < pkt->dts) {
858 av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
859 pkt->pts, pkt->dts, st->index);
860 return AVERROR(EINVAL);
861 }
862 }
863#endif
864
865 return 0;
866}
867
868static int do_packet_auto_bsf(AVFormatContext *s, AVPacket *pkt) {
869 AVStream *st = s->streams[pkt->stream_index];
870 int i, ret;
871
872 if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
873 return 1;
874
875 if (s->oformat->check_bitstream) {
876 if (!st->internal->bitstream_checked) {
877 if ((ret = s->oformat->check_bitstream(s, pkt)) < 0)
878 return ret;
879 else if (ret == 1)
880 st->internal->bitstream_checked = 1;
881 }
882 }
883
884#if FF_API_LAVF_MERGE_SD
885FF_DISABLE_DEPRECATION_WARNINGS
886 if (st->internal->nb_bsfcs) {
887 ret = av_packet_split_side_data(pkt);
888 if (ret < 0)
889 av_log(s, AV_LOG_WARNING, "Failed to split side data before bitstream filter\n");
890 }
891FF_ENABLE_DEPRECATION_WARNINGS
892#endif
893
894 for (i = 0; i < st->internal->nb_bsfcs; i++) {
895 AVBSFContext *ctx = st->internal->bsfcs[i];
896 if (i > 0) {
897 AVBSFContext* prev_ctx = st->internal->bsfcs[i - 1];
898 if (prev_ctx->par_out->extradata_size != ctx->par_in->extradata_size) {
899 if ((ret = avcodec_parameters_copy(ctx->par_in, prev_ctx->par_out)) < 0)
900 return ret;
901 }
902 }
903 // TODO: when any bitstream filter requires flushing at EOF, we'll need to
904 // flush each stream's BSF chain on write_trailer.
905 if ((ret = av_bsf_send_packet(ctx, pkt)) < 0) {
906 av_log(ctx, AV_LOG_ERROR,
907 "Failed to send packet to filter %s for stream %d\n",
908 ctx->filter->name, pkt->stream_index);
909 return ret;
910 }
911 // TODO: when any automatically-added bitstream filter is generating multiple
912 // output packets for a single input one, we'll need to call this in a loop
913 // and write each output packet.
914 if ((ret = av_bsf_receive_packet(ctx, pkt)) < 0) {
915 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
916 return 0;
917 av_log(ctx, AV_LOG_ERROR,
918 "Failed to send packet to filter %s for stream %d\n",
919 ctx->filter->name, pkt->stream_index);
920 return ret;
921 }
922 if (i == st->internal->nb_bsfcs - 1) {
923 if (ctx->par_out->extradata_size != st->codecpar->extradata_size) {
924 if ((ret = avcodec_parameters_copy(st->codecpar, ctx->par_out)) < 0)
925 return ret;
926 }
927 }
928 }
929 return 1;
930}
931
932int av_write_frame(AVFormatContext *s, AVPacket *pkt)
933{
934 int ret;
935
936 ret = prepare_input_packet(s, pkt);
937 if (ret < 0)
938 return ret;
939
940 if (!pkt) {
941 if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
942 if (!s->internal->header_written) {
943 ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
944 if (ret < 0)
945 return ret;
946 }
947 ret = s->oformat->write_packet(s, NULL);
948 if (s->flush_packets && s->pb && s->pb->error >= 0 && s->flags & AVFMT_FLAG_FLUSH_PACKETS)
949 avio_flush(s->pb);
950 if (ret >= 0 && s->pb && s->pb->error < 0)
951 ret = s->pb->error;
952 return ret;
953 }
954 return 1;
955 }
956
957 ret = do_packet_auto_bsf(s, pkt);
958 if (ret <= 0)
959 return ret;
960
961#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
962 ret = compute_muxer_pkt_fields(s, s->streams[pkt->stream_index], pkt);
963
964 if (ret < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
965 return ret;
966#endif
967
968 ret = write_packet(s, pkt);
969 if (ret >= 0 && s->pb && s->pb->error < 0)
970 ret = s->pb->error;
971
972 if (ret >= 0)
973 s->streams[pkt->stream_index]->nb_frames++;
974 return ret;
975}
976
977#define CHUNK_START 0x1000
978
979int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
980 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
981{
982 int ret;
983 AVPacketList **next_point, *this_pktl;
984 AVStream *st = s->streams[pkt->stream_index];
985 int chunked = s->max_chunk_size || s->max_chunk_duration;
986
987 this_pktl = av_mallocz(sizeof(AVPacketList));
988 if (!this_pktl)
989 return AVERROR(ENOMEM);
990 if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
991 av_assert0(pkt->size == UNCODED_FRAME_PACKET_SIZE);
992 av_assert0(((AVFrame *)pkt->data)->buf);
993 this_pktl->pkt = *pkt;
994 pkt->buf = NULL;
995 pkt->side_data = NULL;
996 pkt->side_data_elems = 0;
997 } else {
998 if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) {
999 av_free(this_pktl);
1000 return ret;
1001 }
1002 }
1003
1004 if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
1005 next_point = &(st->last_in_packet_buffer->next);
1006 } else {
1007 next_point = &s->internal->packet_buffer;
1008 }
1009
1010 if (chunked) {
1011 uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
1012 st->interleaver_chunk_size += pkt->size;
1013 st->interleaver_chunk_duration += pkt->duration;
1014 if ( (s->max_chunk_size && st->interleaver_chunk_size > s->max_chunk_size)
1015 || (max && st->interleaver_chunk_duration > max)) {
1016 st->interleaver_chunk_size = 0;
1017 this_pktl->pkt.flags |= CHUNK_START;
1018 if (max && st->interleaver_chunk_duration > max) {
1019 int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
1020 int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
1021
1022 st->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
1023 } else
1024 st->interleaver_chunk_duration = 0;
1025 }
1026 }
1027 if (*next_point) {
1028 if (chunked && !(this_pktl->pkt.flags & CHUNK_START))
1029 goto next_non_null;
1030
1031 if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
1032 while ( *next_point
1033 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
1034 || !compare(s, &(*next_point)->pkt, pkt)))
1035 next_point = &(*next_point)->next;
1036 if (*next_point)
1037 goto next_non_null;
1038 } else {
1039 next_point = &(s->internal->packet_buffer_end->next);
1040 }
1041 }
1042 av_assert1(!*next_point);
1043
1044 s->internal->packet_buffer_end = this_pktl;
1045next_non_null:
1046
1047 this_pktl->next = *next_point;
1048
1049 s->streams[pkt->stream_index]->last_in_packet_buffer =
1050 *next_point = this_pktl;
1051
1052 av_packet_unref(pkt);
1053
1054 return 0;
1055}
1056
1057static int interleave_compare_dts(AVFormatContext *s, AVPacket *next,
1058 AVPacket *pkt)
1059{
1060 AVStream *st = s->streams[pkt->stream_index];
1061 AVStream *st2 = s->streams[next->stream_index];
1062 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
1063 st->time_base);
1064 if (s->audio_preload && ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) != (st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO))) {
1065 int64_t ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
1066 int64_t ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - s->audio_preload*(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO);
1067 if (ts == ts2) {
1068 ts= ( pkt ->dts* st->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)* st->time_base.den)*st2->time_base.den
1069 -( next->dts*st2->time_base.num*AV_TIME_BASE - s->audio_preload*(int64_t)(st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)*st2->time_base.den)* st->time_base.den;
1070 ts2=0;
1071 }
1072 comp= (ts>ts2) - (ts<ts2);
1073 }
1074
1075 if (comp == 0)
1076 return pkt->stream_index < next->stream_index;
1077 return comp > 0;
1078}
1079
1080int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
1081 AVPacket *pkt, int flush)
1082{
1083 AVPacketList *pktl;
1084 int stream_count = 0;
1085 int noninterleaved_count = 0;
1086 int i, ret;
1087 int eof = flush;
1088
1089 if (pkt) {
1090 if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
1091 return ret;
1092 }
1093
1094 for (i = 0; i < s->nb_streams; i++) {
1095 if (s->streams[i]->last_in_packet_buffer) {
1096 ++stream_count;
1097 } else if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
1098 s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP8 &&
1099 s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP9) {
1100 ++noninterleaved_count;
1101 }
1102 }
1103
1104 if (s->internal->nb_interleaved_streams == stream_count)
1105 flush = 1;
1106
1107 if (s->max_interleave_delta > 0 &&
1108 s->internal->packet_buffer &&
1109 !flush &&
1110 s->internal->nb_interleaved_streams == stream_count+noninterleaved_count
1111 ) {
1112 AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
1113 int64_t delta_dts = INT64_MIN;
1114 int64_t top_dts = av_rescale_q(top_pkt->dts,
1115 s->streams[top_pkt->stream_index]->time_base,
1116 AV_TIME_BASE_Q);
1117
1118 for (i = 0; i < s->nb_streams; i++) {
1119 int64_t last_dts;
1120 const AVPacketList *last = s->streams[i]->last_in_packet_buffer;
1121
1122 if (!last)
1123 continue;
1124
1125 last_dts = av_rescale_q(last->pkt.dts,
1126 s->streams[i]->time_base,
1127 AV_TIME_BASE_Q);
1128 delta_dts = FFMAX(delta_dts, last_dts - top_dts);
1129 }
1130
1131 if (delta_dts > s->max_interleave_delta) {
1132 av_log(s, AV_LOG_DEBUG,
1133 "Delay between the first packet and last packet in the "
1134 "muxing queue is %"PRId64" > %"PRId64": forcing output\n",
1135 delta_dts, s->max_interleave_delta);
1136 flush = 1;
1137 }
1138 }
1139
1140 if (s->internal->packet_buffer &&
1141 eof &&
1142 (s->flags & AVFMT_FLAG_SHORTEST) &&
1143 s->internal->shortest_end == AV_NOPTS_VALUE) {
1144 AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
1145
1146 s->internal->shortest_end = av_rescale_q(top_pkt->dts,
1147 s->streams[top_pkt->stream_index]->time_base,
1148 AV_TIME_BASE_Q);
1149 }
1150
1151 if (s->internal->shortest_end != AV_NOPTS_VALUE) {
1152 while (s->internal->packet_buffer) {
1153 AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
1154 AVStream *st;
1155 int64_t top_dts = av_rescale_q(top_pkt->dts,
1156 s->streams[top_pkt->stream_index]->time_base,
1157 AV_TIME_BASE_Q);
1158
1159 if (s->internal->shortest_end + 1 >= top_dts)
1160 break;
1161
1162 pktl = s->internal->packet_buffer;
1163 st = s->streams[pktl->pkt.stream_index];
1164
1165 s->internal->packet_buffer = pktl->next;
1166 if (!s->internal->packet_buffer)
1167 s->internal->packet_buffer_end = NULL;
1168
1169 if (st->last_in_packet_buffer == pktl)
1170 st->last_in_packet_buffer = NULL;
1171
1172 av_packet_unref(&pktl->pkt);
1173 av_freep(&pktl);
1174 flush = 0;
1175 }
1176 }
1177
1178 if (stream_count && flush) {
1179 AVStream *st;
1180 pktl = s->internal->packet_buffer;
1181 *out = pktl->pkt;
1182 st = s->streams[out->stream_index];
1183
1184 s->internal->packet_buffer = pktl->next;
1185 if (!s->internal->packet_buffer)
1186 s->internal->packet_buffer_end = NULL;
1187
1188 if (st->last_in_packet_buffer == pktl)
1189 st->last_in_packet_buffer = NULL;
1190 av_freep(&pktl);
1191
1192 return 1;
1193 } else {
1194 av_init_packet(out);
1195 return 0;
1196 }
1197}
1198
1199int ff_interleaved_peek(AVFormatContext *s, int stream,
1200 AVPacket *pkt, int add_offset)
1201{
1202 AVPacketList *pktl = s->internal->packet_buffer;
1203 while (pktl) {
1204 if (pktl->pkt.stream_index == stream) {
1205 *pkt = pktl->pkt;
1206 if (add_offset) {
1207 AVStream *st = s->streams[pkt->stream_index];
1208 int64_t offset = st->mux_ts_offset;
1209
1210 if (s->output_ts_offset)
1211 offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
1212
1213 if (pkt->dts != AV_NOPTS_VALUE)
1214 pkt->dts += offset;
1215 if (pkt->pts != AV_NOPTS_VALUE)
1216 pkt->pts += offset;
1217 }
1218 return 0;
1219 }
1220 pktl = pktl->next;
1221 }
1222 return AVERROR(ENOENT);
1223}
1224
1225/**
1226 * Interleave an AVPacket correctly so it can be muxed.
1227 * @param out the interleaved packet will be output here
1228 * @param in the input packet
1229 * @param flush 1 if no further packets are available as input and all
1230 * remaining packets should be output
1231 * @return 1 if a packet was output, 0 if no packet could be output,
1232 * < 0 if an error occurred
1233 */
1234static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
1235{
1236 if (s->oformat->interleave_packet) {
1237 int ret = s->oformat->interleave_packet(s, out, in, flush);
1238 if (in)
1239 av_packet_unref(in);
1240 return ret;
1241 } else
1242 return ff_interleave_packet_per_dts(s, out, in, flush);
1243}
1244
1245int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
1246{
1247 int ret, flush = 0;
1248
1249 ret = prepare_input_packet(s, pkt);
1250 if (ret < 0)
1251 goto fail;
1252
1253 if (pkt) {
1254 AVStream *st = s->streams[pkt->stream_index];
1255
1256 ret = do_packet_auto_bsf(s, pkt);
1257 if (ret == 0)
1258 return 0;
1259 else if (ret < 0)
1260 goto fail;
1261
1262 if (s->debug & FF_FDEBUG_TS)
1263 av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame size:%d dts:%s pts:%s\n",
1264 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
1265
1266#if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
1267 if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1268 goto fail;
1269#endif
1270
1271 if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
1272 ret = AVERROR(EINVAL);
1273 goto fail;
1274 }
1275 } else {
1276 av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
1277 flush = 1;
1278 }
1279
1280 for (;; ) {
1281 AVPacket opkt;
1282 int ret = interleave_packet(s, &opkt, pkt, flush);
1283 if (pkt) {
1284 memset(pkt, 0, sizeof(*pkt));
1285 av_init_packet(pkt);
1286 pkt = NULL;
1287 }
1288 if (ret <= 0) //FIXME cleanup needed for ret<0 ?
1289 return ret;
1290
1291 ret = write_packet(s, &opkt);
1292 if (ret >= 0)
1293 s->streams[opkt.stream_index]->nb_frames++;
1294
1295 av_packet_unref(&opkt);
1296
1297 if (ret < 0)
1298 return ret;
1299 if(s->pb && s->pb->error)
1300 return s->pb->error;
1301 }
1302fail:
1303 av_packet_unref(pkt);
1304 return ret;
1305}
1306
1307int av_write_trailer(AVFormatContext *s)
1308{
1309 int ret, i;
1310
1311 for (;; ) {
1312 AVPacket pkt;
1313 ret = interleave_packet(s, &pkt, NULL, 1);
1314 if (ret < 0)
1315 goto fail;
1316 if (!ret)
1317 break;
1318
1319 ret = write_packet(s, &pkt);
1320 if (ret >= 0)
1321 s->streams[pkt.stream_index]->nb_frames++;
1322
1323 av_packet_unref(&pkt);
1324
1325 if (ret < 0)
1326 goto fail;
1327 if(s->pb && s->pb->error)
1328 goto fail;
1329 }
1330
1331 if (!s->internal->header_written) {
1332 ret = s->internal->write_header_ret ? s->internal->write_header_ret : write_header_internal(s);
1333 if (ret < 0)
1334 goto fail;
1335 }
1336
1337fail:
1338 if (s->internal->header_written && s->oformat->write_trailer) {
1339 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1340 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
1341 if (ret >= 0) {
1342 ret = s->oformat->write_trailer(s);
1343 } else {
1344 s->oformat->write_trailer(s);
1345 }
1346 }
1347
1348 if (s->oformat->deinit)
1349 s->oformat->deinit(s);
1350
1351 s->internal->header_written =
1352 s->internal->initialized =
1353 s->internal->streams_initialized = 0;
1354
1355 if (s->pb)
1356 avio_flush(s->pb);
1357 if (ret == 0)
1358 ret = s->pb ? s->pb->error : 0;
1359 for (i = 0; i < s->nb_streams; i++) {
1360 av_freep(&s->streams[i]->priv_data);
1361 av_freep(&s->streams[i]->index_entries);
1362 }
1363 if (s->oformat->priv_class)
1364 av_opt_free(s->priv_data);
1365 av_freep(&s->priv_data);
1366 return ret;
1367}
1368
1369int av_get_output_timestamp(struct AVFormatContext *s, int stream,
1370 int64_t *dts, int64_t *wall)
1371{
1372 if (!s->oformat || !s->oformat->get_output_timestamp)
1373 return AVERROR(ENOSYS);
1374 s->oformat->get_output_timestamp(s, stream, dts, wall);
1375 return 0;
1376}
1377
1378int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
1379 AVFormatContext *src, int interleave)
1380{
1381 AVPacket local_pkt;
1382 int ret;
1383
1384 local_pkt = *pkt;
1385 local_pkt.stream_index = dst_stream;
1386 if (pkt->pts != AV_NOPTS_VALUE)
1387 local_pkt.pts = av_rescale_q(pkt->pts,
1388 src->streams[pkt->stream_index]->time_base,
1389 dst->streams[dst_stream]->time_base);
1390 if (pkt->dts != AV_NOPTS_VALUE)
1391 local_pkt.dts = av_rescale_q(pkt->dts,
1392 src->streams[pkt->stream_index]->time_base,
1393 dst->streams[dst_stream]->time_base);
1394 if (pkt->duration)
1395 local_pkt.duration = av_rescale_q(pkt->duration,
1396 src->streams[pkt->stream_index]->time_base,
1397 dst->streams[dst_stream]->time_base);
1398
1399 if (interleave) ret = av_interleaved_write_frame(dst, &local_pkt);
1400 else ret = av_write_frame(dst, &local_pkt);
1401 pkt->buf = local_pkt.buf;
1402 pkt->side_data = local_pkt.side_data;
1403 pkt->side_data_elems = local_pkt.side_data_elems;
1404 return ret;
1405}
1406
1407static int av_write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
1408 AVFrame *frame, int interleaved)
1409{
1410 AVPacket pkt, *pktp;
1411
1412 av_assert0(s->oformat);
1413 if (!s->oformat->write_uncoded_frame)
1414 return AVERROR(ENOSYS);
1415
1416 if (!frame) {
1417 pktp = NULL;
1418 } else {
1419 pktp = &pkt;
1420 av_init_packet(&pkt);
1421 pkt.data = (void *)frame;
1422 pkt.size = UNCODED_FRAME_PACKET_SIZE;
1423 pkt.pts =
1424 pkt.dts = frame->pts;
1425 pkt.duration = av_frame_get_pkt_duration(frame);
1426 pkt.stream_index = stream_index;
1427 pkt.flags |= AV_PKT_FLAG_UNCODED_FRAME;
1428 }
1429
1430 return interleaved ? av_interleaved_write_frame(s, pktp) :
1431 av_write_frame(s, pktp);
1432}
1433
1434int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
1435 AVFrame *frame)
1436{
1437 return av_write_uncoded_frame_internal(s, stream_index, frame, 0);
1438}
1439
1440int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
1441 AVFrame *frame)
1442{
1443 return av_write_uncoded_frame_internal(s, stream_index, frame, 1);
1444}
1445
1446int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
1447{
1448 av_assert0(s->oformat);
1449 if (!s->oformat->write_uncoded_frame)
1450 return AVERROR(ENOSYS);
1451 return s->oformat->write_uncoded_frame(s, stream_index, NULL,
1452 AV_WRITE_UNCODED_FRAME_QUERY);
1453}
1454