summaryrefslogtreecommitdiff
path: root/libavcodec/utils.c (plain)
blob: 0c6883686e71182bd86b1b978d5ed7cbc65c8ff8
1/*
2 * utils for libavcodec
3 * Copyright (c) 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23/**
24 * @file
25 * utils.
26 */
27
28#include "config.h"
29#include "libavutil/atomic.h"
30#include "libavutil/attributes.h"
31#include "libavutil/avassert.h"
32#include "libavutil/avstring.h"
33#include "libavutil/bprint.h"
34#include "libavutil/channel_layout.h"
35#include "libavutil/crc.h"
36#include "libavutil/frame.h"
37#include "libavutil/hwcontext.h"
38#include "libavutil/internal.h"
39#include "libavutil/mathematics.h"
40#include "libavutil/mem_internal.h"
41#include "libavutil/pixdesc.h"
42#include "libavutil/imgutils.h"
43#include "libavutil/samplefmt.h"
44#include "libavutil/dict.h"
45#include "libavutil/thread.h"
46#include "avcodec.h"
47#include "libavutil/opt.h"
48#include "me_cmp.h"
49#include "mpegvideo.h"
50#include "thread.h"
51#include "frame_thread_encoder.h"
52#include "internal.h"
53#include "raw.h"
54#include "bytestream.h"
55#include "version.h"
56#include <stdlib.h>
57#include <stdarg.h>
58#include <limits.h>
59#include <float.h>
60#if CONFIG_ICONV
61# include <iconv.h>
62#endif
63
64#include "libavutil/ffversion.h"
65const char av_codec_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
66
67#if HAVE_PTHREADS || HAVE_W32THREADS || HAVE_OS2THREADS
68static int default_lockmgr_cb(void **arg, enum AVLockOp op)
69{
70 void * volatile * mutex = arg;
71 int err;
72
73 switch (op) {
74 case AV_LOCK_CREATE:
75 return 0;
76 case AV_LOCK_OBTAIN:
77 if (!*mutex) {
78 pthread_mutex_t *tmp = av_malloc(sizeof(pthread_mutex_t));
79 if (!tmp)
80 return AVERROR(ENOMEM);
81 if ((err = pthread_mutex_init(tmp, NULL))) {
82 av_free(tmp);
83 return AVERROR(err);
84 }
85 if (avpriv_atomic_ptr_cas(mutex, NULL, tmp)) {
86 pthread_mutex_destroy(tmp);
87 av_free(tmp);
88 }
89 }
90
91 if ((err = pthread_mutex_lock(*mutex)))
92 return AVERROR(err);
93
94 return 0;
95 case AV_LOCK_RELEASE:
96 if ((err = pthread_mutex_unlock(*mutex)))
97 return AVERROR(err);
98
99 return 0;
100 case AV_LOCK_DESTROY:
101 if (*mutex)
102 pthread_mutex_destroy(*mutex);
103 av_free(*mutex);
104 avpriv_atomic_ptr_cas(mutex, *mutex, NULL);
105 return 0;
106 }
107 return 1;
108}
109static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = default_lockmgr_cb;
110#else
111static int (*lockmgr_cb)(void **mutex, enum AVLockOp op) = NULL;
112#endif
113
114
115volatile int ff_avcodec_locked;
116static int volatile entangled_thread_counter = 0;
117static void *codec_mutex;
118static void *avformat_mutex;
119
120void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
121{
122 uint8_t **p = ptr;
123 if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
124 av_freep(p);
125 *size = 0;
126 return;
127 }
128 if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1))
129 memset(*p + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
130}
131
132void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
133{
134 uint8_t **p = ptr;
135 if (min_size > SIZE_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
136 av_freep(p);
137 *size = 0;
138 return;
139 }
140 if (!ff_fast_malloc(p, size, min_size + AV_INPUT_BUFFER_PADDING_SIZE, 1))
141 memset(*p, 0, min_size + AV_INPUT_BUFFER_PADDING_SIZE);
142}
143
144/* encoder management */
145static AVCodec *first_avcodec = NULL;
146static AVCodec **last_avcodec = &first_avcodec;
147
148AVCodec *av_codec_next(const AVCodec *c)
149{
150 if (c)
151 return c->next;
152 else
153 return first_avcodec;
154}
155
156static av_cold void avcodec_init(void)
157{
158 static int initialized = 0;
159
160 if (initialized != 0)
161 return;
162 initialized = 1;
163
164 if (CONFIG_ME_CMP)
165 ff_me_cmp_init_static();
166}
167
168int av_codec_is_encoder(const AVCodec *codec)
169{
170 return codec && (codec->encode_sub || codec->encode2 ||codec->send_frame);
171}
172
173int av_codec_is_decoder(const AVCodec *codec)
174{
175 return codec && (codec->decode || codec->send_packet);
176}
177
178av_cold void avcodec_register(AVCodec *codec)
179{
180 AVCodec **p;
181 avcodec_init();
182 p = last_avcodec;
183 codec->next = NULL;
184
185 while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, codec))
186 p = &(*p)->next;
187 last_avcodec = &codec->next;
188
189 if (codec->init_static_data)
190 codec->init_static_data(codec);
191}
192
193#if FF_API_EMU_EDGE
194unsigned avcodec_get_edge_width(void)
195{
196 return EDGE_WIDTH;
197}
198#endif
199
200#if FF_API_SET_DIMENSIONS
201void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
202{
203 int ret = ff_set_dimensions(s, width, height);
204 if (ret < 0) {
205 av_log(s, AV_LOG_WARNING, "Failed to set dimensions %d %d\n", width, height);
206 }
207}
208#endif
209
210int ff_set_dimensions(AVCodecContext *s, int width, int height)
211{
212 int ret = av_image_check_size2(width, height, s->max_pixels, AV_PIX_FMT_NONE, 0, s);
213
214 if (ret < 0)
215 width = height = 0;
216
217 s->coded_width = width;
218 s->coded_height = height;
219 s->width = AV_CEIL_RSHIFT(width, s->lowres);
220 s->height = AV_CEIL_RSHIFT(height, s->lowres);
221
222 return ret;
223}
224
225int ff_set_sar(AVCodecContext *avctx, AVRational sar)
226{
227 int ret = av_image_check_sar(avctx->width, avctx->height, sar);
228
229 if (ret < 0) {
230 av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %d/%d\n",
231 sar.num, sar.den);
232 avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
233 return ret;
234 } else {
235 avctx->sample_aspect_ratio = sar;
236 }
237 return 0;
238}
239
240int ff_side_data_update_matrix_encoding(AVFrame *frame,
241 enum AVMatrixEncoding matrix_encoding)
242{
243 AVFrameSideData *side_data;
244 enum AVMatrixEncoding *data;
245
246 side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_MATRIXENCODING);
247 if (!side_data)
248 side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_MATRIXENCODING,
249 sizeof(enum AVMatrixEncoding));
250
251 if (!side_data)
252 return AVERROR(ENOMEM);
253
254 data = (enum AVMatrixEncoding*)side_data->data;
255 *data = matrix_encoding;
256
257 return 0;
258}
259
260void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
261 int linesize_align[AV_NUM_DATA_POINTERS])
262{
263 int i;
264 int w_align = 1;
265 int h_align = 1;
266 AVPixFmtDescriptor const *desc = av_pix_fmt_desc_get(s->pix_fmt);
267
268 if (desc) {
269 w_align = 1 << desc->log2_chroma_w;
270 h_align = 1 << desc->log2_chroma_h;
271 }
272
273 switch (s->pix_fmt) {
274 case AV_PIX_FMT_YUV420P:
275 case AV_PIX_FMT_YUYV422:
276 case AV_PIX_FMT_YVYU422:
277 case AV_PIX_FMT_UYVY422:
278 case AV_PIX_FMT_YUV422P:
279 case AV_PIX_FMT_YUV440P:
280 case AV_PIX_FMT_YUV444P:
281 case AV_PIX_FMT_GBRP:
282 case AV_PIX_FMT_GBRAP:
283 case AV_PIX_FMT_GRAY8:
284 case AV_PIX_FMT_GRAY16BE:
285 case AV_PIX_FMT_GRAY16LE:
286 case AV_PIX_FMT_YUVJ420P:
287 case AV_PIX_FMT_YUVJ422P:
288 case AV_PIX_FMT_YUVJ440P:
289 case AV_PIX_FMT_YUVJ444P:
290 case AV_PIX_FMT_YUVA420P:
291 case AV_PIX_FMT_YUVA422P:
292 case AV_PIX_FMT_YUVA444P:
293 case AV_PIX_FMT_YUV420P9LE:
294 case AV_PIX_FMT_YUV420P9BE:
295 case AV_PIX_FMT_YUV420P10LE:
296 case AV_PIX_FMT_YUV420P10BE:
297 case AV_PIX_FMT_YUV420P12LE:
298 case AV_PIX_FMT_YUV420P12BE:
299 case AV_PIX_FMT_YUV420P14LE:
300 case AV_PIX_FMT_YUV420P14BE:
301 case AV_PIX_FMT_YUV420P16LE:
302 case AV_PIX_FMT_YUV420P16BE:
303 case AV_PIX_FMT_YUVA420P9LE:
304 case AV_PIX_FMT_YUVA420P9BE:
305 case AV_PIX_FMT_YUVA420P10LE:
306 case AV_PIX_FMT_YUVA420P10BE:
307 case AV_PIX_FMT_YUVA420P16LE:
308 case AV_PIX_FMT_YUVA420P16BE:
309 case AV_PIX_FMT_YUV422P9LE:
310 case AV_PIX_FMT_YUV422P9BE:
311 case AV_PIX_FMT_YUV422P10LE:
312 case AV_PIX_FMT_YUV422P10BE:
313 case AV_PIX_FMT_YUV422P12LE:
314 case AV_PIX_FMT_YUV422P12BE:
315 case AV_PIX_FMT_YUV422P14LE:
316 case AV_PIX_FMT_YUV422P14BE:
317 case AV_PIX_FMT_YUV422P16LE:
318 case AV_PIX_FMT_YUV422P16BE:
319 case AV_PIX_FMT_YUVA422P9LE:
320 case AV_PIX_FMT_YUVA422P9BE:
321 case AV_PIX_FMT_YUVA422P10LE:
322 case AV_PIX_FMT_YUVA422P10BE:
323 case AV_PIX_FMT_YUVA422P16LE:
324 case AV_PIX_FMT_YUVA422P16BE:
325 case AV_PIX_FMT_YUV440P10LE:
326 case AV_PIX_FMT_YUV440P10BE:
327 case AV_PIX_FMT_YUV440P12LE:
328 case AV_PIX_FMT_YUV440P12BE:
329 case AV_PIX_FMT_YUV444P9LE:
330 case AV_PIX_FMT_YUV444P9BE:
331 case AV_PIX_FMT_YUV444P10LE:
332 case AV_PIX_FMT_YUV444P10BE:
333 case AV_PIX_FMT_YUV444P12LE:
334 case AV_PIX_FMT_YUV444P12BE:
335 case AV_PIX_FMT_YUV444P14LE:
336 case AV_PIX_FMT_YUV444P14BE:
337 case AV_PIX_FMT_YUV444P16LE:
338 case AV_PIX_FMT_YUV444P16BE:
339 case AV_PIX_FMT_YUVA444P9LE:
340 case AV_PIX_FMT_YUVA444P9BE:
341 case AV_PIX_FMT_YUVA444P10LE:
342 case AV_PIX_FMT_YUVA444P10BE:
343 case AV_PIX_FMT_YUVA444P16LE:
344 case AV_PIX_FMT_YUVA444P16BE:
345 case AV_PIX_FMT_GBRP9LE:
346 case AV_PIX_FMT_GBRP9BE:
347 case AV_PIX_FMT_GBRP10LE:
348 case AV_PIX_FMT_GBRP10BE:
349 case AV_PIX_FMT_GBRP12LE:
350 case AV_PIX_FMT_GBRP12BE:
351 case AV_PIX_FMT_GBRP14LE:
352 case AV_PIX_FMT_GBRP14BE:
353 case AV_PIX_FMT_GBRP16LE:
354 case AV_PIX_FMT_GBRP16BE:
355 case AV_PIX_FMT_GBRAP12LE:
356 case AV_PIX_FMT_GBRAP12BE:
357 case AV_PIX_FMT_GBRAP16LE:
358 case AV_PIX_FMT_GBRAP16BE:
359 w_align = 16; //FIXME assume 16 pixel per macroblock
360 h_align = 16 * 2; // interlaced needs 2 macroblocks height
361 break;
362 case AV_PIX_FMT_YUV411P:
363 case AV_PIX_FMT_YUVJ411P:
364 case AV_PIX_FMT_UYYVYY411:
365 w_align = 32;
366 h_align = 16 * 2;
367 break;
368 case AV_PIX_FMT_YUV410P:
369 if (s->codec_id == AV_CODEC_ID_SVQ1) {
370 w_align = 64;
371 h_align = 64;
372 }
373 break;
374 case AV_PIX_FMT_RGB555:
375 if (s->codec_id == AV_CODEC_ID_RPZA) {
376 w_align = 4;
377 h_align = 4;
378 }
379 if (s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) {
380 w_align = 8;
381 h_align = 8;
382 }
383 break;
384 case AV_PIX_FMT_PAL8:
385 case AV_PIX_FMT_BGR8:
386 case AV_PIX_FMT_RGB8:
387 if (s->codec_id == AV_CODEC_ID_SMC ||
388 s->codec_id == AV_CODEC_ID_CINEPAK) {
389 w_align = 4;
390 h_align = 4;
391 }
392 if (s->codec_id == AV_CODEC_ID_JV ||
393 s->codec_id == AV_CODEC_ID_INTERPLAY_VIDEO) {
394 w_align = 8;
395 h_align = 8;
396 }
397 break;
398 case AV_PIX_FMT_BGR24:
399 if ((s->codec_id == AV_CODEC_ID_MSZH) ||
400 (s->codec_id == AV_CODEC_ID_ZLIB)) {
401 w_align = 4;
402 h_align = 4;
403 }
404 break;
405 case AV_PIX_FMT_RGB24:
406 if (s->codec_id == AV_CODEC_ID_CINEPAK) {
407 w_align = 4;
408 h_align = 4;
409 }
410 break;
411 default:
412 break;
413 }
414
415 if (s->codec_id == AV_CODEC_ID_IFF_ILBM) {
416 w_align = FFMAX(w_align, 8);
417 }
418
419 *width = FFALIGN(*width, w_align);
420 *height = FFALIGN(*height, h_align);
421 if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) {
422 // some of the optimized chroma MC reads one line too much
423 // which is also done in mpeg decoders with lowres > 0
424 *height += 2;
425
426 // H.264 uses edge emulation for out of frame motion vectors, for this
427 // it requires a temporary area large enough to hold a 21x21 block,
428 // increasing witdth ensure that the temporary area is large enough,
429 // the next rounded up width is 32
430 *width = FFMAX(*width, 32);
431 }
432
433 for (i = 0; i < 4; i++)
434 linesize_align[i] = STRIDE_ALIGN;
435}
436
437void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height)
438{
439 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt);
440 int chroma_shift = desc->log2_chroma_w;
441 int linesize_align[AV_NUM_DATA_POINTERS];
442 int align;
443
444 avcodec_align_dimensions2(s, width, height, linesize_align);
445 align = FFMAX(linesize_align[0], linesize_align[3]);
446 linesize_align[1] <<= chroma_shift;
447 linesize_align[2] <<= chroma_shift;
448 align = FFMAX3(align, linesize_align[1], linesize_align[2]);
449 *width = FFALIGN(*width, align);
450}
451
452int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos)
453{
454 if (pos <= AVCHROMA_LOC_UNSPECIFIED || pos >= AVCHROMA_LOC_NB)
455 return AVERROR(EINVAL);
456 pos--;
457
458 *xpos = (pos&1) * 128;
459 *ypos = ((pos>>1)^(pos<4)) * 128;
460
461 return 0;
462}
463
464enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos)
465{
466 int pos, xout, yout;
467
468 for (pos = AVCHROMA_LOC_UNSPECIFIED + 1; pos < AVCHROMA_LOC_NB; pos++) {
469 if (avcodec_enum_to_chroma_pos(&xout, &yout, pos) == 0 && xout == xpos && yout == ypos)
470 return pos;
471 }
472 return AVCHROMA_LOC_UNSPECIFIED;
473}
474
475int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
476 enum AVSampleFormat sample_fmt, const uint8_t *buf,
477 int buf_size, int align)
478{
479 int ch, planar, needed_size, ret = 0;
480
481 needed_size = av_samples_get_buffer_size(NULL, nb_channels,
482 frame->nb_samples, sample_fmt,
483 align);
484 if (buf_size < needed_size)
485 return AVERROR(EINVAL);
486
487 planar = av_sample_fmt_is_planar(sample_fmt);
488 if (planar && nb_channels > AV_NUM_DATA_POINTERS) {
489 if (!(frame->extended_data = av_mallocz_array(nb_channels,
490 sizeof(*frame->extended_data))))
491 return AVERROR(ENOMEM);
492 } else {
493 frame->extended_data = frame->data;
494 }
495
496 if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0],
497 (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples,
498 sample_fmt, align)) < 0) {
499 if (frame->extended_data != frame->data)
500 av_freep(&frame->extended_data);
501 return ret;
502 }
503 if (frame->extended_data != frame->data) {
504 for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++)
505 frame->data[ch] = frame->extended_data[ch];
506 }
507
508 return ret;
509}
510
511static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
512{
513 FramePool *pool = avctx->internal->pool;
514 int i, ret;
515
516 switch (avctx->codec_type) {
517 case AVMEDIA_TYPE_VIDEO: {
518 uint8_t *data[4];
519 int linesize[4];
520 int size[4] = { 0 };
521 int w = frame->width;
522 int h = frame->height;
523 int tmpsize, unaligned;
524
525 if (pool->format == frame->format &&
526 pool->width == frame->width && pool->height == frame->height)
527 return 0;
528
529 avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
530
531 do {
532 // NOTE: do not align linesizes individually, this breaks e.g. assumptions
533 // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
534 ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
535 if (ret < 0)
536 return ret;
537 // increase alignment of w for next try (rhs gives the lowest bit set in w)
538 w += w & ~(w - 1);
539
540 unaligned = 0;
541 for (i = 0; i < 4; i++)
542 unaligned |= linesize[i] % pool->stride_align[i];
543 } while (unaligned);
544
545 tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
546 NULL, linesize);
547 if (tmpsize < 0)
548 return -1;
549
550 for (i = 0; i < 3 && data[i + 1]; i++)
551 size[i] = data[i + 1] - data[i];
552 size[i] = tmpsize - (data[i] - data[0]);
553
554 for (i = 0; i < 4; i++) {
555 av_buffer_pool_uninit(&pool->pools[i]);
556 pool->linesize[i] = linesize[i];
557 if (size[i]) {
558 pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
559 CONFIG_MEMORY_POISONING ?
560 NULL :
561 av_buffer_allocz);
562 if (!pool->pools[i]) {
563 ret = AVERROR(ENOMEM);
564 goto fail;
565 }
566 }
567 }
568 pool->format = frame->format;
569 pool->width = frame->width;
570 pool->height = frame->height;
571
572 break;
573 }
574 case AVMEDIA_TYPE_AUDIO: {
575 int ch = av_frame_get_channels(frame); //av_get_channel_layout_nb_channels(frame->channel_layout);
576 int planar = av_sample_fmt_is_planar(frame->format);
577 int planes = planar ? ch : 1;
578
579 if (pool->format == frame->format && pool->planes == planes &&
580 pool->channels == ch && frame->nb_samples == pool->samples)
581 return 0;
582
583 av_buffer_pool_uninit(&pool->pools[0]);
584 ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
585 frame->nb_samples, frame->format, 0);
586 if (ret < 0)
587 goto fail;
588
589 pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
590 if (!pool->pools[0]) {
591 ret = AVERROR(ENOMEM);
592 goto fail;
593 }
594
595 pool->format = frame->format;
596 pool->planes = planes;
597 pool->channels = ch;
598 pool->samples = frame->nb_samples;
599 break;
600 }
601 default: av_assert0(0);
602 }
603 return 0;
604fail:
605 for (i = 0; i < 4; i++)
606 av_buffer_pool_uninit(&pool->pools[i]);
607 pool->format = -1;
608 pool->planes = pool->channels = pool->samples = 0;
609 pool->width = pool->height = 0;
610 return ret;
611}
612
613static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
614{
615 FramePool *pool = avctx->internal->pool;
616 int planes = pool->planes;
617 int i;
618
619 frame->linesize[0] = pool->linesize[0];
620
621 if (planes > AV_NUM_DATA_POINTERS) {
622 frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
623 frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
624 frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
625 sizeof(*frame->extended_buf));
626 if (!frame->extended_data || !frame->extended_buf) {
627 av_freep(&frame->extended_data);
628 av_freep(&frame->extended_buf);
629 return AVERROR(ENOMEM);
630 }
631 } else {
632 frame->extended_data = frame->data;
633 av_assert0(frame->nb_extended_buf == 0);
634 }
635
636 for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
637 frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
638 if (!frame->buf[i])
639 goto fail;
640 frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
641 }
642 for (i = 0; i < frame->nb_extended_buf; i++) {
643 frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
644 if (!frame->extended_buf[i])
645 goto fail;
646 frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
647 }
648
649 if (avctx->debug & FF_DEBUG_BUFFERS)
650 av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
651
652 return 0;
653fail:
654 av_frame_unref(frame);
655 return AVERROR(ENOMEM);
656}
657
658static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
659{
660 FramePool *pool = s->internal->pool;
661 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
662 int i;
663
664 if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
665 av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
666 return -1;
667 }
668
669 if (!desc) {
670 av_log(s, AV_LOG_ERROR,
671 "Unable to get pixel format descriptor for format %s\n",
672 av_get_pix_fmt_name(pic->format));
673 return AVERROR(EINVAL);
674 }
675
676 memset(pic->data, 0, sizeof(pic->data));
677 pic->extended_data = pic->data;
678
679 for (i = 0; i < 4 && pool->pools[i]; i++) {
680 pic->linesize[i] = pool->linesize[i];
681
682 pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
683 if (!pic->buf[i])
684 goto fail;
685
686 pic->data[i] = pic->buf[i]->data;
687 }
688 for (; i < AV_NUM_DATA_POINTERS; i++) {
689 pic->data[i] = NULL;
690 pic->linesize[i] = 0;
691 }
692 if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
693 desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
694 avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
695
696 if (s->debug & FF_DEBUG_BUFFERS)
697 av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
698
699 return 0;
700fail:
701 av_frame_unref(pic);
702 return AVERROR(ENOMEM);
703}
704
705void ff_color_frame(AVFrame *frame, const int c[4])
706{
707 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
708 int p, y, x;
709
710 av_assert0(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
711
712 for (p = 0; p<desc->nb_components; p++) {
713 uint8_t *dst = frame->data[p];
714 int is_chroma = p == 1 || p == 2;
715 int bytes = is_chroma ? AV_CEIL_RSHIFT(frame->width, desc->log2_chroma_w) : frame->width;
716 int height = is_chroma ? AV_CEIL_RSHIFT(frame->height, desc->log2_chroma_h) : frame->height;
717 for (y = 0; y < height; y++) {
718 if (desc->comp[0].depth >= 9) {
719 for (x = 0; x<bytes; x++)
720 ((uint16_t*)dst)[x] = c[p];
721 }else
722 memset(dst, c[p], bytes);
723 dst += frame->linesize[p];
724 }
725 }
726}
727
728int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
729{
730 int ret;
731
732 if (avctx->hw_frames_ctx)
733 return av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
734
735 if ((ret = update_frame_pool(avctx, frame)) < 0)
736 return ret;
737
738 switch (avctx->codec_type) {
739 case AVMEDIA_TYPE_VIDEO:
740 return video_get_buffer(avctx, frame);
741 case AVMEDIA_TYPE_AUDIO:
742 return audio_get_buffer(avctx, frame);
743 default:
744 return -1;
745 }
746}
747
748static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
749{
750 int size;
751 const uint8_t *side_metadata;
752
753 AVDictionary **frame_md = avpriv_frame_get_metadatap(frame);
754
755 side_metadata = av_packet_get_side_data(avpkt,
756 AV_PKT_DATA_STRINGS_METADATA, &size);
757 return av_packet_unpack_dictionary(side_metadata, size, frame_md);
758}
759
760int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame)
761{
762 const AVPacket *pkt = avctx->internal->pkt;
763 int i;
764 static const struct {
765 enum AVPacketSideDataType packet;
766 enum AVFrameSideDataType frame;
767 } sd[] = {
768 { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
769 { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
770 { AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
771 { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
772 { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
773 { AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
774 };
775
776 if (pkt) {
777 frame->pts = pkt->pts;
778#if FF_API_PKT_PTS
779FF_DISABLE_DEPRECATION_WARNINGS
780 frame->pkt_pts = pkt->pts;
781FF_ENABLE_DEPRECATION_WARNINGS
782#endif
783 av_frame_set_pkt_pos (frame, pkt->pos);
784 av_frame_set_pkt_duration(frame, pkt->duration);
785 av_frame_set_pkt_size (frame, pkt->size);
786
787 for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
788 int size;
789 uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
790 if (packet_sd) {
791 AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
792 sd[i].frame,
793 size);
794 if (!frame_sd)
795 return AVERROR(ENOMEM);
796
797 memcpy(frame_sd->data, packet_sd, size);
798 }
799 }
800 add_metadata_from_side_data(pkt, frame);
801
802 if (pkt->flags & AV_PKT_FLAG_DISCARD) {
803 frame->flags |= AV_FRAME_FLAG_DISCARD;
804 } else {
805 frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
806 }
807 } else {
808 frame->pts = AV_NOPTS_VALUE;
809#if FF_API_PKT_PTS
810FF_DISABLE_DEPRECATION_WARNINGS
811 frame->pkt_pts = AV_NOPTS_VALUE;
812FF_ENABLE_DEPRECATION_WARNINGS
813#endif
814 av_frame_set_pkt_pos (frame, -1);
815 av_frame_set_pkt_duration(frame, 0);
816 av_frame_set_pkt_size (frame, -1);
817 }
818 frame->reordered_opaque = avctx->reordered_opaque;
819
820 if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
821 frame->color_primaries = avctx->color_primaries;
822 if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
823 frame->color_trc = avctx->color_trc;
824 if (av_frame_get_colorspace(frame) == AVCOL_SPC_UNSPECIFIED)
825 av_frame_set_colorspace(frame, avctx->colorspace);
826 if (av_frame_get_color_range(frame) == AVCOL_RANGE_UNSPECIFIED)
827 av_frame_set_color_range(frame, avctx->color_range);
828 if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
829 frame->chroma_location = avctx->chroma_sample_location;
830
831 switch (avctx->codec->type) {
832 case AVMEDIA_TYPE_VIDEO:
833 frame->format = avctx->pix_fmt;
834 if (!frame->sample_aspect_ratio.num)
835 frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
836
837 if (frame->width && frame->height &&
838 av_image_check_sar(frame->width, frame->height,
839 frame->sample_aspect_ratio) < 0) {
840 av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
841 frame->sample_aspect_ratio.num,
842 frame->sample_aspect_ratio.den);
843 frame->sample_aspect_ratio = (AVRational){ 0, 1 };
844 }
845
846 break;
847 case AVMEDIA_TYPE_AUDIO:
848 if (!frame->sample_rate)
849 frame->sample_rate = avctx->sample_rate;
850 if (frame->format < 0)
851 frame->format = avctx->sample_fmt;
852 if (!frame->channel_layout) {
853 if (avctx->channel_layout) {
854 if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
855 avctx->channels) {
856 av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
857 "configuration.\n");
858 return AVERROR(EINVAL);
859 }
860
861 frame->channel_layout = avctx->channel_layout;
862 } else {
863 if (avctx->channels > FF_SANE_NB_CHANNELS) {
864 av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
865 avctx->channels);
866 return AVERROR(ENOSYS);
867 }
868 }
869 }
870 av_frame_set_channels(frame, avctx->channels);
871 break;
872 }
873 return 0;
874}
875
876int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
877{
878 return ff_init_buffer_info(avctx, frame);
879}
880
881static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
882{
883 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
884 int i;
885 int num_planes = av_pix_fmt_count_planes(frame->format);
886 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
887 int flags = desc ? desc->flags : 0;
888 if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
889 num_planes = 2;
890 for (i = 0; i < num_planes; i++) {
891 av_assert0(frame->data[i]);
892 }
893 // For now do not enforce anything for palette of pseudopal formats
894 if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PSEUDOPAL))
895 num_planes = 2;
896 // For formats without data like hwaccel allow unused pointers to be non-NULL.
897 for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
898 if (frame->data[i])
899 av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
900 frame->data[i] = NULL;
901 }
902 }
903}
904
905static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
906{
907 const AVHWAccel *hwaccel = avctx->hwaccel;
908 int override_dimensions = 1;
909 int ret;
910
911 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
912 if ((ret = av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
913 av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
914 return AVERROR(EINVAL);
915 }
916
917 if (frame->width <= 0 || frame->height <= 0) {
918 frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
919 frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
920 override_dimensions = 0;
921 }
922
923 if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
924 av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
925 return AVERROR(EINVAL);
926 }
927 }
928 ret = ff_decode_frame_props(avctx, frame);
929 if (ret < 0)
930 return ret;
931
932 if (hwaccel) {
933 if (hwaccel->alloc_frame) {
934 ret = hwaccel->alloc_frame(avctx, frame);
935 goto end;
936 }
937 } else
938 avctx->sw_pix_fmt = avctx->pix_fmt;
939
940 ret = avctx->get_buffer2(avctx, frame, flags);
941 if (ret >= 0)
942 validate_avframe_allocation(avctx, frame);
943
944end:
945 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions) {
946 frame->width = avctx->width;
947 frame->height = avctx->height;
948 }
949
950 return ret;
951}
952
953int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
954{
955 int ret = get_buffer_internal(avctx, frame, flags);
956 if (ret < 0) {
957 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
958 frame->width = frame->height = 0;
959 }
960 return ret;
961}
962
963static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
964{
965 AVFrame *tmp;
966 int ret;
967
968 av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
969
970 if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
971 av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
972 frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
973 av_frame_unref(frame);
974 }
975
976 ff_init_buffer_info(avctx, frame);
977
978 if (!frame->data[0])
979 return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
980
981 if (av_frame_is_writable(frame))
982 return ff_decode_frame_props(avctx, frame);
983
984 tmp = av_frame_alloc();
985 if (!tmp)
986 return AVERROR(ENOMEM);
987
988 av_frame_move_ref(tmp, frame);
989
990 ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
991 if (ret < 0) {
992 av_frame_free(&tmp);
993 return ret;
994 }
995
996 av_frame_copy(frame, tmp);
997 av_frame_free(&tmp);
998
999 return 0;
1000}
1001
1002int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
1003{
1004 int ret = reget_buffer_internal(avctx, frame);
1005 if (ret < 0)
1006 av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1007 return ret;
1008}
1009
1010int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size)
1011{
1012 int i;
1013
1014 for (i = 0; i < count; i++) {
1015 int r = func(c, (char *)arg + i * size);
1016 if (ret)
1017 ret[i] = r;
1018 }
1019 emms_c();
1020 return 0;
1021}
1022
1023int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count)
1024{
1025 int i;
1026
1027 for (i = 0; i < count; i++) {
1028 int r = func(c, arg, i, 0);
1029 if (ret)
1030 ret[i] = r;
1031 }
1032 emms_c();
1033 return 0;
1034}
1035
1036enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags,
1037 unsigned int fourcc)
1038{
1039 while (tags->pix_fmt >= 0) {
1040 if (tags->fourcc == fourcc)
1041 return tags->pix_fmt;
1042 tags++;
1043 }
1044 return AV_PIX_FMT_NONE;
1045}
1046
1047static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
1048{
1049 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
1050 return desc->flags & AV_PIX_FMT_FLAG_HWACCEL;
1051}
1052
1053enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
1054{
1055 while (*fmt != AV_PIX_FMT_NONE && is_hwaccel_pix_fmt(*fmt))
1056 ++fmt;
1057 return fmt[0];
1058}
1059
1060static AVHWAccel *find_hwaccel(enum AVCodecID codec_id,
1061 enum AVPixelFormat pix_fmt)
1062{
1063 AVHWAccel *hwaccel = NULL;
1064
1065 while ((hwaccel = av_hwaccel_next(hwaccel)))
1066 if (hwaccel->id == codec_id
1067 && hwaccel->pix_fmt == pix_fmt)
1068 return hwaccel;
1069 return NULL;
1070}
1071
1072static int setup_hwaccel(AVCodecContext *avctx,
1073 const enum AVPixelFormat fmt,
1074 const char *name)
1075{
1076 AVHWAccel *hwa = find_hwaccel(avctx->codec_id, fmt);
1077 int ret = 0;
1078
1079 if (!hwa) {
1080 av_log(avctx, AV_LOG_ERROR,
1081 "Could not find an AVHWAccel for the pixel format: %s",
1082 name);
1083 return AVERROR(ENOENT);
1084 }
1085
1086 if (hwa->capabilities & HWACCEL_CODEC_CAP_EXPERIMENTAL &&
1087 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1088 av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1089 hwa->name);
1090 return AVERROR_PATCHWELCOME;
1091 }
1092
1093 if (hwa->priv_data_size) {
1094 avctx->internal->hwaccel_priv_data = av_mallocz(hwa->priv_data_size);
1095 if (!avctx->internal->hwaccel_priv_data)
1096 return AVERROR(ENOMEM);
1097 }
1098
1099 if (hwa->init) {
1100 ret = hwa->init(avctx);
1101 if (ret < 0) {
1102 av_freep(&avctx->internal->hwaccel_priv_data);
1103 return ret;
1104 }
1105 }
1106
1107 avctx->hwaccel = hwa;
1108
1109 return 0;
1110}
1111
1112int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1113{
1114 const AVPixFmtDescriptor *desc;
1115 enum AVPixelFormat *choices;
1116 enum AVPixelFormat ret;
1117 unsigned n = 0;
1118
1119 while (fmt[n] != AV_PIX_FMT_NONE)
1120 ++n;
1121
1122 av_assert0(n >= 1);
1123 avctx->sw_pix_fmt = fmt[n - 1];
1124 av_assert2(!is_hwaccel_pix_fmt(avctx->sw_pix_fmt));
1125
1126 choices = av_malloc_array(n + 1, sizeof(*choices));
1127 if (!choices)
1128 return AV_PIX_FMT_NONE;
1129
1130 memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1131
1132 for (;;) {
1133 if (avctx->hwaccel && avctx->hwaccel->uninit)
1134 avctx->hwaccel->uninit(avctx);
1135 av_freep(&avctx->internal->hwaccel_priv_data);
1136 avctx->hwaccel = NULL;
1137
1138 av_buffer_unref(&avctx->hw_frames_ctx);
1139
1140 ret = avctx->get_format(avctx, choices);
1141
1142 desc = av_pix_fmt_desc_get(ret);
1143 if (!desc) {
1144 ret = AV_PIX_FMT_NONE;
1145 break;
1146 }
1147
1148 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1149 break;
1150#if FF_API_CAP_VDPAU
1151 if (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
1152 break;
1153#endif
1154
1155 if (avctx->hw_frames_ctx) {
1156 AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1157 if (hw_frames_ctx->format != ret) {
1158 av_log(avctx, AV_LOG_ERROR, "Format returned from get_buffer() "
1159 "does not match the format of provided AVHWFramesContext\n");
1160 ret = AV_PIX_FMT_NONE;
1161 break;
1162 }
1163 }
1164
1165 if (!setup_hwaccel(avctx, ret, desc->name))
1166 break;
1167
1168 /* Remove failed hwaccel from choices */
1169 for (n = 0; choices[n] != ret; n++)
1170 av_assert0(choices[n] != AV_PIX_FMT_NONE);
1171
1172 do
1173 choices[n] = choices[n + 1];
1174 while (choices[n++] != AV_PIX_FMT_NONE);
1175 }
1176
1177 av_freep(&choices);
1178 return ret;
1179}
1180
1181MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase)
1182MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor)
1183MAKE_ACCESSORS(AVCodecContext, codec, int, lowres)
1184MAKE_ACCESSORS(AVCodecContext, codec, int, seek_preroll)
1185MAKE_ACCESSORS(AVCodecContext, codec, uint16_t*, chroma_intra_matrix)
1186
1187unsigned av_codec_get_codec_properties(const AVCodecContext *codec)
1188{
1189 return codec->properties;
1190}
1191
1192int av_codec_get_max_lowres(const AVCodec *codec)
1193{
1194 return codec->max_lowres;
1195}
1196
1197int avpriv_codec_get_cap_skip_frame_fill_param(const AVCodec *codec){
1198 return !!(codec->caps_internal & FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM);
1199}
1200
1201static void get_subtitle_defaults(AVSubtitle *sub)
1202{
1203 memset(sub, 0, sizeof(*sub));
1204 sub->pts = AV_NOPTS_VALUE;
1205}
1206
1207static int64_t get_bit_rate(AVCodecContext *ctx)
1208{
1209 int64_t bit_rate;
1210 int bits_per_sample;
1211
1212 switch (ctx->codec_type) {
1213 case AVMEDIA_TYPE_VIDEO:
1214 case AVMEDIA_TYPE_DATA:
1215 case AVMEDIA_TYPE_SUBTITLE:
1216 case AVMEDIA_TYPE_ATTACHMENT:
1217 bit_rate = ctx->bit_rate;
1218 break;
1219 case AVMEDIA_TYPE_AUDIO:
1220 bits_per_sample = av_get_bits_per_sample(ctx->codec_id);
1221 bit_rate = bits_per_sample ? ctx->sample_rate * (int64_t)ctx->channels * bits_per_sample : ctx->bit_rate;
1222 break;
1223 default:
1224 bit_rate = 0;
1225 break;
1226 }
1227 return bit_rate;
1228}
1229
1230int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
1231{
1232 int ret = 0;
1233
1234 ff_unlock_avcodec(codec);
1235
1236 ret = avcodec_open2(avctx, codec, options);
1237
1238 ff_lock_avcodec(avctx, codec);
1239 return ret;
1240}
1241
1242int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
1243{
1244 int ret = 0;
1245 AVDictionary *tmp = NULL;
1246 const AVPixFmtDescriptor *pixdesc;
1247
1248 if (avcodec_is_open(avctx))
1249 return 0;
1250
1251 if ((!codec && !avctx->codec)) {
1252 av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n");
1253 return AVERROR(EINVAL);
1254 }
1255 if ((codec && avctx->codec && codec != avctx->codec)) {
1256 av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, "
1257 "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name);
1258 return AVERROR(EINVAL);
1259 }
1260 if (!codec)
1261 codec = avctx->codec;
1262
1263 if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE)
1264 return AVERROR(EINVAL);
1265
1266 if (options)
1267 av_dict_copy(&tmp, *options, 0);
1268
1269 ret = ff_lock_avcodec(avctx, codec);
1270 if (ret < 0)
1271 return ret;
1272
1273 avctx->internal = av_mallocz(sizeof(AVCodecInternal));
1274 if (!avctx->internal) {
1275 ret = AVERROR(ENOMEM);
1276 goto end;
1277 }
1278
1279 avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool));
1280 if (!avctx->internal->pool) {
1281 ret = AVERROR(ENOMEM);
1282 goto free_and_end;
1283 }
1284
1285 avctx->internal->to_free = av_frame_alloc();
1286 if (!avctx->internal->to_free) {
1287 ret = AVERROR(ENOMEM);
1288 goto free_and_end;
1289 }
1290
1291 avctx->internal->buffer_frame = av_frame_alloc();
1292 if (!avctx->internal->buffer_frame) {
1293 ret = AVERROR(ENOMEM);
1294 goto free_and_end;
1295 }
1296
1297 avctx->internal->buffer_pkt = av_packet_alloc();
1298 if (!avctx->internal->buffer_pkt) {
1299 ret = AVERROR(ENOMEM);
1300 goto free_and_end;
1301 }
1302
1303 avctx->internal->skip_samples_multiplier = 1;
1304
1305 if (codec->priv_data_size > 0) {
1306 if (!avctx->priv_data) {
1307 avctx->priv_data = av_mallocz(codec->priv_data_size);
1308 if (!avctx->priv_data) {
1309 ret = AVERROR(ENOMEM);
1310 goto end;
1311 }
1312 if (codec->priv_class) {
1313 *(const AVClass **)avctx->priv_data = codec->priv_class;
1314 av_opt_set_defaults(avctx->priv_data);
1315 }
1316 }
1317 if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0)
1318 goto free_and_end;
1319 } else {
1320 avctx->priv_data = NULL;
1321 }
1322 if ((ret = av_opt_set_dict(avctx, &tmp)) < 0)
1323 goto free_and_end;
1324
1325 if (avctx->codec_whitelist && av_match_list(codec->name, avctx->codec_whitelist, ',') <= 0) {
1326 av_log(avctx, AV_LOG_ERROR, "Codec (%s) not on whitelist \'%s\'\n", codec->name, avctx->codec_whitelist);
1327 ret = AVERROR(EINVAL);
1328 goto free_and_end;
1329 }
1330
1331 // only call ff_set_dimensions() for non H.264/VP6F/DXV codecs so as not to overwrite previously setup dimensions
1332 if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height &&
1333 (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F || avctx->codec_id == AV_CODEC_ID_DXV))) {
1334 if (avctx->coded_width && avctx->coded_height)
1335 ret = ff_set_dimensions(avctx, avctx->coded_width, avctx->coded_height);
1336 else if (avctx->width && avctx->height)
1337 ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
1338 if (ret < 0)
1339 goto free_and_end;
1340 }
1341
1342 if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
1343 && ( av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0
1344 || av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0)) {
1345 av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n");
1346 ff_set_dimensions(avctx, 0, 0);
1347 }
1348
1349 if (avctx->width > 0 && avctx->height > 0) {
1350 if (av_image_check_sar(avctx->width, avctx->height,
1351 avctx->sample_aspect_ratio) < 0) {
1352 av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1353 avctx->sample_aspect_ratio.num,
1354 avctx->sample_aspect_ratio.den);
1355 avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
1356 }
1357 }
1358
1359 /* if the decoder init function was already called previously,
1360 * free the already allocated subtitle_header before overwriting it */
1361 if (av_codec_is_decoder(codec))
1362 av_freep(&avctx->subtitle_header);
1363
1364 if (avctx->channels > FF_SANE_NB_CHANNELS) {
1365 ret = AVERROR(EINVAL);
1366 goto free_and_end;
1367 }
1368
1369 avctx->codec = codec;
1370 if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
1371 avctx->codec_id == AV_CODEC_ID_NONE) {
1372 avctx->codec_type = codec->type;
1373 avctx->codec_id = codec->id;
1374 }
1375 if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
1376 && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) {
1377 av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n");
1378 ret = AVERROR(EINVAL);
1379 goto free_and_end;
1380 }
1381 avctx->frame_number = 0;
1382 avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id);
1383
1384 if ((avctx->codec->capabilities & AV_CODEC_CAP_EXPERIMENTAL) &&
1385 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1386 const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder";
1387 AVCodec *codec2;
1388 av_log(avctx, AV_LOG_ERROR,
1389 "The %s '%s' is experimental but experimental codecs are not enabled, "
1390 "add '-strict %d' if you want to use it.\n",
1391 codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL);
1392 codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id);
1393 if (!(codec2->capabilities & AV_CODEC_CAP_EXPERIMENTAL))
1394 av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n",
1395 codec_string, codec2->name);
1396 ret = AVERROR_EXPERIMENTAL;
1397 goto free_and_end;
1398 }
1399
1400 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO &&
1401 (!avctx->time_base.num || !avctx->time_base.den)) {
1402 avctx->time_base.num = 1;
1403 avctx->time_base.den = avctx->sample_rate;
1404 }
1405
1406 if (!HAVE_THREADS)
1407 av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n");
1408
1409 if (CONFIG_FRAME_THREAD_ENCODER && av_codec_is_encoder(avctx->codec)) {
1410 ff_unlock_avcodec(codec); //we will instantiate a few encoders thus kick the counter to prevent false detection of a problem
1411 ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL);
1412 ff_lock_avcodec(avctx, codec);
1413 if (ret < 0)
1414 goto free_and_end;
1415 }
1416
1417 if (HAVE_THREADS
1418 && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) {
1419 ret = ff_thread_init(avctx);
1420 if (ret < 0) {
1421 goto free_and_end;
1422 }
1423 }
1424 if (!HAVE_THREADS && !(codec->capabilities & AV_CODEC_CAP_AUTO_THREADS))
1425 avctx->thread_count = 1;
1426
1427 if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) {
1428 av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
1429 avctx->codec->max_lowres);
1430 avctx->lowres = avctx->codec->max_lowres;
1431 }
1432
1433#if FF_API_VISMV
1434 if (avctx->debug_mv)
1435 av_log(avctx, AV_LOG_WARNING, "The 'vismv' option is deprecated, "
1436 "see the codecview filter instead.\n");
1437#endif
1438
1439 if (av_codec_is_encoder(avctx->codec)) {
1440 int i;
1441#if FF_API_CODED_FRAME
1442FF_DISABLE_DEPRECATION_WARNINGS
1443 avctx->coded_frame = av_frame_alloc();
1444 if (!avctx->coded_frame) {
1445 ret = AVERROR(ENOMEM);
1446 goto free_and_end;
1447 }
1448FF_ENABLE_DEPRECATION_WARNINGS
1449#endif
1450
1451 if (avctx->time_base.num <= 0 || avctx->time_base.den <= 0) {
1452 av_log(avctx, AV_LOG_ERROR, "The encoder timebase is not set.\n");
1453 ret = AVERROR(EINVAL);
1454 goto free_and_end;
1455 }
1456
1457 if (avctx->codec->sample_fmts) {
1458 for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) {
1459 if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
1460 break;
1461 if (avctx->channels == 1 &&
1462 av_get_planar_sample_fmt(avctx->sample_fmt) ==
1463 av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) {
1464 avctx->sample_fmt = avctx->codec->sample_fmts[i];
1465 break;
1466 }
1467 }
1468 if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
1469 char buf[128];
1470 snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt);
1471 av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n",
1472 (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf));
1473 ret = AVERROR(EINVAL);
1474 goto free_and_end;
1475 }
1476 }
1477 if (avctx->codec->pix_fmts) {
1478 for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++)
1479 if (avctx->pix_fmt == avctx->codec->pix_fmts[i])
1480 break;
1481 if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE
1482 && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG)
1483 && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) {
1484 char buf[128];
1485 snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt);
1486 av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n",
1487 (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf));
1488 ret = AVERROR(EINVAL);
1489 goto free_and_end;
1490 }
1491 if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ420P ||
1492 avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ411P ||
1493 avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ422P ||
1494 avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ440P ||
1495 avctx->codec->pix_fmts[i] == AV_PIX_FMT_YUVJ444P)
1496 avctx->color_range = AVCOL_RANGE_JPEG;
1497 }
1498 if (avctx->codec->supported_samplerates) {
1499 for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++)
1500 if (avctx->sample_rate == avctx->codec->supported_samplerates[i])
1501 break;
1502 if (avctx->codec->supported_samplerates[i] == 0) {
1503 av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
1504 avctx->sample_rate);
1505 ret = AVERROR(EINVAL);
1506 goto free_and_end;
1507 }
1508 }
1509 if (avctx->sample_rate < 0) {
1510 av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n",
1511 avctx->sample_rate);
1512 ret = AVERROR(EINVAL);
1513 goto free_and_end;
1514 }
1515 if (avctx->codec->channel_layouts) {
1516 if (!avctx->channel_layout) {
1517 av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n");
1518 } else {
1519 for (i = 0; avctx->codec->channel_layouts[i] != 0; i++)
1520 if (avctx->channel_layout == avctx->codec->channel_layouts[i])
1521 break;
1522 if (avctx->codec->channel_layouts[i] == 0) {
1523 char buf[512];
1524 av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
1525 av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf);
1526 ret = AVERROR(EINVAL);
1527 goto free_and_end;
1528 }
1529 }
1530 }
1531 if (avctx->channel_layout && avctx->channels) {
1532 int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
1533 if (channels != avctx->channels) {
1534 char buf[512];
1535 av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
1536 av_log(avctx, AV_LOG_ERROR,
1537 "Channel layout '%s' with %d channels does not match number of specified channels %d\n",
1538 buf, channels, avctx->channels);
1539 ret = AVERROR(EINVAL);
1540 goto free_and_end;
1541 }
1542 } else if (avctx->channel_layout) {
1543 avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
1544 }
1545 if (avctx->channels < 0) {
1546 av_log(avctx, AV_LOG_ERROR, "Specified number of channels %d is not supported\n",
1547 avctx->channels);
1548 ret = AVERROR(EINVAL);
1549 goto free_and_end;
1550 }
1551 if(avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1552 pixdesc = av_pix_fmt_desc_get(avctx->pix_fmt);
1553 if ( avctx->bits_per_raw_sample < 0
1554 || (avctx->bits_per_raw_sample > 8 && pixdesc->comp[0].depth <= 8)) {
1555 av_log(avctx, AV_LOG_WARNING, "Specified bit depth %d not possible with the specified pixel formats depth %d\n",
1556 avctx->bits_per_raw_sample, pixdesc->comp[0].depth);
1557 avctx->bits_per_raw_sample = pixdesc->comp[0].depth;
1558 }
1559 if (avctx->width <= 0 || avctx->height <= 0) {
1560 av_log(avctx, AV_LOG_ERROR, "dimensions not set\n");
1561 ret = AVERROR(EINVAL);
1562 goto free_and_end;
1563 }
1564 }
1565 if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
1566 && avctx->bit_rate>0 && avctx->bit_rate<1000) {
1567 av_log(avctx, AV_LOG_WARNING, "Bitrate %"PRId64" is extremely low, maybe you mean %"PRId64"k\n", (int64_t)avctx->bit_rate, (int64_t)avctx->bit_rate);
1568 }
1569
1570 if (!avctx->rc_initial_buffer_occupancy)
1571 avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4;
1572
1573 if (avctx->ticks_per_frame && avctx->time_base.num &&
1574 avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
1575 av_log(avctx, AV_LOG_ERROR,
1576 "ticks_per_frame %d too large for the timebase %d/%d.",
1577 avctx->ticks_per_frame,
1578 avctx->time_base.num,
1579 avctx->time_base.den);
1580 goto free_and_end;
1581 }
1582
1583 if (avctx->hw_frames_ctx) {
1584 AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1585 if (frames_ctx->format != avctx->pix_fmt) {
1586 av_log(avctx, AV_LOG_ERROR,
1587 "Mismatching AVCodecContext.pix_fmt and AVHWFramesContext.format\n");
1588 ret = AVERROR(EINVAL);
1589 goto free_and_end;
1590 }
1591 if (avctx->sw_pix_fmt != AV_PIX_FMT_NONE &&
1592 avctx->sw_pix_fmt != frames_ctx->sw_format) {
1593 av_log(avctx, AV_LOG_ERROR,
1594 "Mismatching AVCodecContext.sw_pix_fmt (%s) "
1595 "and AVHWFramesContext.sw_format (%s)\n",
1596 av_get_pix_fmt_name(avctx->sw_pix_fmt),
1597 av_get_pix_fmt_name(frames_ctx->sw_format));
1598 ret = AVERROR(EINVAL);
1599 goto free_and_end;
1600 }
1601 avctx->sw_pix_fmt = frames_ctx->sw_format;
1602 }
1603 }
1604
1605 avctx->pts_correction_num_faulty_pts =
1606 avctx->pts_correction_num_faulty_dts = 0;
1607 avctx->pts_correction_last_pts =
1608 avctx->pts_correction_last_dts = INT64_MIN;
1609
1610 if ( !CONFIG_GRAY && avctx->flags & AV_CODEC_FLAG_GRAY
1611 && avctx->codec_descriptor->type == AVMEDIA_TYPE_VIDEO)
1612 av_log(avctx, AV_LOG_WARNING,
1613 "gray decoding requested but not enabled at configuration time\n");
1614
1615 if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME)
1616 || avctx->internal->frame_thread_encoder)) {
1617 ret = avctx->codec->init(avctx);
1618 if (ret < 0) {
1619 goto free_and_end;
1620 }
1621 }
1622
1623 ret=0;
1624
1625#if FF_API_AUDIOENC_DELAY
1626 if (av_codec_is_encoder(avctx->codec))
1627 avctx->delay = avctx->initial_padding;
1628#endif
1629
1630 if (av_codec_is_decoder(avctx->codec)) {
1631 if (!avctx->bit_rate)
1632 avctx->bit_rate = get_bit_rate(avctx);
1633 /* validate channel layout from the decoder */
1634 if (avctx->channel_layout) {
1635 int channels = av_get_channel_layout_nb_channels(avctx->channel_layout);
1636 if (!avctx->channels)
1637 avctx->channels = channels;
1638 else if (channels != avctx->channels) {
1639 char buf[512];
1640 av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout);
1641 av_log(avctx, AV_LOG_WARNING,
1642 "Channel layout '%s' with %d channels does not match specified number of channels %d: "
1643 "ignoring specified channel layout\n",
1644 buf, channels, avctx->channels);
1645 avctx->channel_layout = 0;
1646 }
1647 }
1648 if (avctx->channels && avctx->channels < 0 ||
1649 avctx->channels > FF_SANE_NB_CHANNELS) {
1650 ret = AVERROR(EINVAL);
1651 goto free_and_end;
1652 }
1653 if (avctx->sub_charenc) {
1654 if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) {
1655 av_log(avctx, AV_LOG_ERROR, "Character encoding is only "
1656 "supported with subtitles codecs\n");
1657 ret = AVERROR(EINVAL);
1658 goto free_and_end;
1659 } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) {
1660 av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, "
1661 "subtitles character encoding will be ignored\n",
1662 avctx->codec_descriptor->name);
1663 avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING;
1664 } else {
1665 /* input character encoding is set for a text based subtitle
1666 * codec at this point */
1667 if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC)
1668 avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER;
1669
1670 if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) {
1671#if CONFIG_ICONV
1672 iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc);
1673 if (cd == (iconv_t)-1) {
1674 ret = AVERROR(errno);
1675 av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context "
1676 "with input character encoding \"%s\"\n", avctx->sub_charenc);
1677 goto free_and_end;
1678 }
1679 iconv_close(cd);
1680#else
1681 av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles "
1682 "conversion needs a libavcodec built with iconv support "
1683 "for this codec\n");
1684 ret = AVERROR(ENOSYS);
1685 goto free_and_end;
1686#endif
1687 }
1688 }
1689 }
1690
1691#if FF_API_AVCTX_TIMEBASE
1692 if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
1693 avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
1694#endif
1695 }
1696 if (codec->priv_data_size > 0 && avctx->priv_data && codec->priv_class) {
1697 av_assert0(*(const AVClass **)avctx->priv_data == codec->priv_class);
1698 }
1699
1700end:
1701 ff_unlock_avcodec(codec);
1702 if (options) {
1703 av_dict_free(options);
1704 *options = tmp;
1705 }
1706
1707 return ret;
1708free_and_end:
1709 if (avctx->codec &&
1710 (avctx->codec->caps_internal & FF_CODEC_CAP_INIT_CLEANUP))
1711 avctx->codec->close(avctx);
1712
1713 if (codec->priv_class && codec->priv_data_size)
1714 av_opt_free(avctx->priv_data);
1715 av_opt_free(avctx);
1716
1717#if FF_API_CODED_FRAME
1718FF_DISABLE_DEPRECATION_WARNINGS
1719 av_frame_free(&avctx->coded_frame);
1720FF_ENABLE_DEPRECATION_WARNINGS
1721#endif
1722
1723 av_dict_free(&tmp);
1724 av_freep(&avctx->priv_data);
1725 if (avctx->internal) {
1726 av_frame_free(&avctx->internal->to_free);
1727 av_frame_free(&avctx->internal->buffer_frame);
1728 av_packet_free(&avctx->internal->buffer_pkt);
1729 av_freep(&avctx->internal->pool);
1730 }
1731 av_freep(&avctx->internal);
1732 avctx->codec = NULL;
1733 goto end;
1734}
1735
1736int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
1737{
1738 if (avpkt->size < 0) {
1739 av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
1740 return AVERROR(EINVAL);
1741 }
1742 if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
1743 av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
1744 size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
1745 return AVERROR(EINVAL);
1746 }
1747
1748 if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
1749 av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
1750 if (!avpkt->data || avpkt->size < size) {
1751 av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
1752 avpkt->data = avctx->internal->byte_buffer;
1753 avpkt->size = avctx->internal->byte_buffer_size;
1754 }
1755 }
1756
1757 if (avpkt->data) {
1758 AVBufferRef *buf = avpkt->buf;
1759
1760 if (avpkt->size < size) {
1761 av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
1762 return AVERROR(EINVAL);
1763 }
1764
1765 av_init_packet(avpkt);
1766 avpkt->buf = buf;
1767 avpkt->size = size;
1768 return 0;
1769 } else {
1770 int ret = av_new_packet(avpkt, size);
1771 if (ret < 0)
1772 av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
1773 return ret;
1774 }
1775}
1776
1777int ff_alloc_packet(AVPacket *avpkt, int size)
1778{
1779 return ff_alloc_packet2(NULL, avpkt, size, 0);
1780}
1781
1782/**
1783 * Pad last frame with silence.
1784 */
1785static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
1786{
1787 AVFrame *frame = NULL;
1788 int ret;
1789
1790 if (!(frame = av_frame_alloc()))
1791 return AVERROR(ENOMEM);
1792
1793 frame->format = src->format;
1794 frame->channel_layout = src->channel_layout;
1795 av_frame_set_channels(frame, av_frame_get_channels(src));
1796 frame->nb_samples = s->frame_size;
1797 ret = av_frame_get_buffer(frame, 32);
1798 if (ret < 0)
1799 goto fail;
1800
1801 ret = av_frame_copy_props(frame, src);
1802 if (ret < 0)
1803 goto fail;
1804
1805 if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
1806 src->nb_samples, s->channels, s->sample_fmt)) < 0)
1807 goto fail;
1808 if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
1809 frame->nb_samples - src->nb_samples,
1810 s->channels, s->sample_fmt)) < 0)
1811 goto fail;
1812
1813 *dst = frame;
1814
1815 return 0;
1816
1817fail:
1818 av_frame_free(&frame);
1819 return ret;
1820}
1821
1822int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
1823 AVPacket *avpkt,
1824 const AVFrame *frame,
1825 int *got_packet_ptr)
1826{
1827 AVFrame *extended_frame = NULL;
1828 AVFrame *padded_frame = NULL;
1829 int ret;
1830 AVPacket user_pkt = *avpkt;
1831 int needs_realloc = !user_pkt.data;
1832
1833 *got_packet_ptr = 0;
1834
1835 if (!avctx->codec->encode2) {
1836 av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
1837 return AVERROR(ENOSYS);
1838 }
1839
1840 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
1841 av_packet_unref(avpkt);
1842 av_init_packet(avpkt);
1843 return 0;
1844 }
1845
1846 /* ensure that extended_data is properly set */
1847 if (frame && !frame->extended_data) {
1848 if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
1849 avctx->channels > AV_NUM_DATA_POINTERS) {
1850 av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
1851 "with more than %d channels, but extended_data is not set.\n",
1852 AV_NUM_DATA_POINTERS);
1853 return AVERROR(EINVAL);
1854 }
1855 av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
1856
1857 extended_frame = av_frame_alloc();
1858 if (!extended_frame)
1859 return AVERROR(ENOMEM);
1860
1861 memcpy(extended_frame, frame, sizeof(AVFrame));
1862 extended_frame->extended_data = extended_frame->data;
1863 frame = extended_frame;
1864 }
1865
1866 /* extract audio service type metadata */
1867 if (frame) {
1868 AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
1869 if (sd && sd->size >= sizeof(enum AVAudioServiceType))
1870 avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
1871 }
1872
1873 /* check for valid frame size */
1874 if (frame) {
1875 if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
1876 if (frame->nb_samples > avctx->frame_size) {
1877 av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
1878 ret = AVERROR(EINVAL);
1879 goto end;
1880 }
1881 } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
1882 if (frame->nb_samples < avctx->frame_size &&
1883 !avctx->internal->last_audio_frame) {
1884 ret = pad_last_frame(avctx, &padded_frame, frame);
1885 if (ret < 0)
1886 goto end;
1887
1888 frame = padded_frame;
1889 avctx->internal->last_audio_frame = 1;
1890 }
1891
1892 if (frame->nb_samples != avctx->frame_size) {
1893 av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
1894 ret = AVERROR(EINVAL);
1895 goto end;
1896 }
1897 }
1898 }
1899
1900 av_assert0(avctx->codec->encode2);
1901
1902 ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
1903 if (!ret) {
1904 if (*got_packet_ptr) {
1905 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
1906 if (avpkt->pts == AV_NOPTS_VALUE)
1907 avpkt->pts = frame->pts;
1908 if (!avpkt->duration)
1909 avpkt->duration = ff_samples_to_time_base(avctx,
1910 frame->nb_samples);
1911 }
1912 avpkt->dts = avpkt->pts;
1913 } else {
1914 avpkt->size = 0;
1915 }
1916 }
1917 if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
1918 needs_realloc = 0;
1919 if (user_pkt.data) {
1920 if (user_pkt.size >= avpkt->size) {
1921 memcpy(user_pkt.data, avpkt->data, avpkt->size);
1922 } else {
1923 av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
1924 avpkt->size = user_pkt.size;
1925 ret = -1;
1926 }
1927 avpkt->buf = user_pkt.buf;
1928 avpkt->data = user_pkt.data;
1929 } else {
1930 if (av_dup_packet(avpkt) < 0) {
1931 ret = AVERROR(ENOMEM);
1932 }
1933 }
1934 }
1935
1936 if (!ret) {
1937 if (needs_realloc && avpkt->data) {
1938 ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
1939 if (ret >= 0)
1940 avpkt->data = avpkt->buf->data;
1941 }
1942
1943 avctx->frame_number++;
1944 }
1945
1946 if (ret < 0 || !*got_packet_ptr) {
1947 av_packet_unref(avpkt);
1948 av_init_packet(avpkt);
1949 goto end;
1950 }
1951
1952 /* NOTE: if we add any audio encoders which output non-keyframe packets,
1953 * this needs to be moved to the encoders, but for now we can do it
1954 * here to simplify things */
1955 avpkt->flags |= AV_PKT_FLAG_KEY;
1956
1957end:
1958 av_frame_free(&padded_frame);
1959 av_free(extended_frame);
1960
1961#if FF_API_AUDIOENC_DELAY
1962 avctx->delay = avctx->initial_padding;
1963#endif
1964
1965 return ret;
1966}
1967
1968int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
1969 AVPacket *avpkt,
1970 const AVFrame *frame,
1971 int *got_packet_ptr)
1972{
1973 int ret;
1974 AVPacket user_pkt = *avpkt;
1975 int needs_realloc = !user_pkt.data;
1976
1977 *got_packet_ptr = 0;
1978
1979 if (!avctx->codec->encode2) {
1980 av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
1981 return AVERROR(ENOSYS);
1982 }
1983
1984 if(CONFIG_FRAME_THREAD_ENCODER &&
1985 avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
1986 return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
1987
1988 if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
1989 avctx->stats_out[0] = '\0';
1990
1991 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
1992 av_packet_unref(avpkt);
1993 av_init_packet(avpkt);
1994 avpkt->size = 0;
1995 return 0;
1996 }
1997
1998 if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
1999 return AVERROR(EINVAL);
2000
2001 if (frame && frame->format == AV_PIX_FMT_NONE)
2002 av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
2003 if (frame && (frame->width == 0 || frame->height == 0))
2004 av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
2005
2006 av_assert0(avctx->codec->encode2);
2007
2008 ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
2009 av_assert0(ret <= 0);
2010
2011 emms_c();
2012
2013 if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
2014 needs_realloc = 0;
2015 if (user_pkt.data) {
2016 if (user_pkt.size >= avpkt->size) {
2017 memcpy(user_pkt.data, avpkt->data, avpkt->size);
2018 } else {
2019 av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
2020 avpkt->size = user_pkt.size;
2021 ret = -1;
2022 }
2023 avpkt->buf = user_pkt.buf;
2024 avpkt->data = user_pkt.data;
2025 } else {
2026 if (av_dup_packet(avpkt) < 0) {
2027 ret = AVERROR(ENOMEM);
2028 }
2029 }
2030 }
2031
2032 if (!ret) {
2033 if (!*got_packet_ptr)
2034 avpkt->size = 0;
2035 else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
2036 avpkt->pts = avpkt->dts = frame->pts;
2037
2038 if (needs_realloc && avpkt->data) {
2039 ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
2040 if (ret >= 0)
2041 avpkt->data = avpkt->buf->data;
2042 }
2043
2044 avctx->frame_number++;
2045 }
2046
2047 if (ret < 0 || !*got_packet_ptr)
2048 av_packet_unref(avpkt);
2049
2050 return ret;
2051}
2052
2053int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
2054 const AVSubtitle *sub)
2055{
2056 int ret;
2057 if (sub->start_display_time) {
2058 av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
2059 return -1;
2060 }
2061
2062 ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
2063 avctx->frame_number++;
2064 return ret;
2065}
2066
2067/**
2068 * Attempt to guess proper monotonic timestamps for decoded video frames
2069 * which might have incorrect times. Input timestamps may wrap around, in
2070 * which case the output will as well.
2071 *
2072 * @param pts the pts field of the decoded AVPacket, as passed through
2073 * AVFrame.pts
2074 * @param dts the dts field of the decoded AVPacket
2075 * @return one of the input values, may be AV_NOPTS_VALUE
2076 */
2077static int64_t guess_correct_pts(AVCodecContext *ctx,
2078 int64_t reordered_pts, int64_t dts)
2079{
2080 int64_t pts = AV_NOPTS_VALUE;
2081
2082 if (dts != AV_NOPTS_VALUE) {
2083 ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
2084 ctx->pts_correction_last_dts = dts;
2085 } else if (reordered_pts != AV_NOPTS_VALUE)
2086 ctx->pts_correction_last_dts = reordered_pts;
2087
2088 if (reordered_pts != AV_NOPTS_VALUE) {
2089 ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
2090 ctx->pts_correction_last_pts = reordered_pts;
2091 } else if(dts != AV_NOPTS_VALUE)
2092 ctx->pts_correction_last_pts = dts;
2093
2094 if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
2095 && reordered_pts != AV_NOPTS_VALUE)
2096 pts = reordered_pts;
2097 else
2098 pts = dts;
2099
2100 return pts;
2101}
2102
2103static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
2104{
2105 int size = 0, ret;
2106 const uint8_t *data;
2107 uint32_t flags;
2108 int64_t val;
2109
2110 data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
2111 if (!data)
2112 return 0;
2113
2114 if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
2115 av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
2116 "changes, but PARAM_CHANGE side data was sent to it.\n");
2117 ret = AVERROR(EINVAL);
2118 goto fail2;
2119 }
2120
2121 if (size < 4)
2122 goto fail;
2123
2124 flags = bytestream_get_le32(&data);
2125 size -= 4;
2126
2127 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
2128 if (size < 4)
2129 goto fail;
2130 val = bytestream_get_le32(&data);
2131 if (val <= 0 || val > INT_MAX) {
2132 av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
2133 ret = AVERROR_INVALIDDATA;
2134 goto fail2;
2135 }
2136 avctx->channels = val;
2137 size -= 4;
2138 }
2139 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
2140 if (size < 8)
2141 goto fail;
2142 avctx->channel_layout = bytestream_get_le64(&data);
2143 size -= 8;
2144 }
2145 if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
2146 if (size < 4)
2147 goto fail;
2148 val = bytestream_get_le32(&data);
2149 if (val <= 0 || val > INT_MAX) {
2150 av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
2151 ret = AVERROR_INVALIDDATA;
2152 goto fail2;
2153 }
2154 avctx->sample_rate = val;
2155 size -= 4;
2156 }
2157 if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
2158 if (size < 8)
2159 goto fail;
2160 avctx->width = bytestream_get_le32(&data);
2161 avctx->height = bytestream_get_le32(&data);
2162 size -= 8;
2163 ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
2164 if (ret < 0)
2165 goto fail2;
2166 }
2167
2168 return 0;
2169fail:
2170 av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
2171 ret = AVERROR_INVALIDDATA;
2172fail2:
2173 if (ret < 0) {
2174 av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
2175 if (avctx->err_recognition & AV_EF_EXPLODE)
2176 return ret;
2177 }
2178 return 0;
2179}
2180
2181static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
2182{
2183 int ret;
2184
2185 /* move the original frame to our backup */
2186 av_frame_unref(avci->to_free);
2187 av_frame_move_ref(avci->to_free, frame);
2188
2189 /* now copy everything except the AVBufferRefs back
2190 * note that we make a COPY of the side data, so calling av_frame_free() on
2191 * the caller's frame will work properly */
2192 ret = av_frame_copy_props(frame, avci->to_free);
2193 if (ret < 0)
2194 return ret;
2195
2196 memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
2197 memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
2198 if (avci->to_free->extended_data != avci->to_free->data) {
2199 int planes = av_frame_get_channels(avci->to_free);
2200 int size = planes * sizeof(*frame->extended_data);
2201
2202 if (!size) {
2203 av_frame_unref(frame);
2204 return AVERROR_BUG;
2205 }
2206
2207 frame->extended_data = av_malloc(size);
2208 if (!frame->extended_data) {
2209 av_frame_unref(frame);
2210 return AVERROR(ENOMEM);
2211 }
2212 memcpy(frame->extended_data, avci->to_free->extended_data,
2213 size);
2214 } else
2215 frame->extended_data = frame->data;
2216
2217 frame->format = avci->to_free->format;
2218 frame->width = avci->to_free->width;
2219 frame->height = avci->to_free->height;
2220 frame->channel_layout = avci->to_free->channel_layout;
2221 frame->nb_samples = avci->to_free->nb_samples;
2222 av_frame_set_channels(frame, av_frame_get_channels(avci->to_free));
2223
2224 return 0;
2225}
2226
2227int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
2228 int *got_picture_ptr,
2229 const AVPacket *avpkt)
2230{
2231 AVCodecInternal *avci = avctx->internal;
2232 int ret;
2233 // copy to ensure we do not change avpkt
2234 AVPacket tmp = *avpkt;
2235
2236 if (!avctx->codec)
2237 return AVERROR(EINVAL);
2238 if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) {
2239 av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n");
2240 return AVERROR(EINVAL);
2241 }
2242
2243 if (!avctx->codec->decode) {
2244 av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
2245 return AVERROR(ENOSYS);
2246 }
2247
2248 *got_picture_ptr = 0;
2249 if ((avctx->coded_width || avctx->coded_height) && av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
2250 return AVERROR(EINVAL);
2251
2252 avctx->internal->pkt = avpkt;
2253 ret = apply_param_change(avctx, avpkt);
2254 if (ret < 0)
2255 return ret;
2256
2257 av_frame_unref(picture);
2258
2259 if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size ||
2260 (avctx->active_thread_type & FF_THREAD_FRAME)) {
2261#if FF_API_MERGE_SD
2262FF_DISABLE_DEPRECATION_WARNINGS
2263 int did_split = av_packet_split_side_data(&tmp);
2264FF_ENABLE_DEPRECATION_WARNINGS
2265#endif
2266 ret = apply_param_change(avctx, &tmp);
2267 if (ret < 0)
2268 goto fail;
2269
2270 avctx->internal->pkt = &tmp;
2271 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
2272 ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
2273 &tmp);
2274 else {
2275 ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
2276 &tmp);
2277 if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
2278 picture->pkt_dts = avpkt->dts;
2279
2280 if(!avctx->has_b_frames){
2281 av_frame_set_pkt_pos(picture, avpkt->pos);
2282 }
2283 //FIXME these should be under if(!avctx->has_b_frames)
2284 /* get_buffer is supposed to set frame parameters */
2285 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
2286 if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
2287 if (!picture->width) picture->width = avctx->width;
2288 if (!picture->height) picture->height = avctx->height;
2289 if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt;
2290 }
2291 }
2292
2293fail:
2294 emms_c(); //needed to avoid an emms_c() call before every return;
2295
2296 avctx->internal->pkt = NULL;
2297#if FF_API_MERGE_SD
2298 if (did_split) {
2299 av_packet_free_side_data(&tmp);
2300 if(ret == tmp.size)
2301 ret = avpkt->size;
2302 }
2303#endif
2304 if (picture->flags & AV_FRAME_FLAG_DISCARD) {
2305 *got_picture_ptr = 0;
2306 }
2307 if (*got_picture_ptr) {
2308 if (!avctx->refcounted_frames) {
2309 int err = unrefcount_frame(avci, picture);
2310 if (err < 0)
2311 return err;
2312 }
2313
2314 avctx->frame_number++;
2315 av_frame_set_best_effort_timestamp(picture,
2316 guess_correct_pts(avctx,
2317 picture->pts,
2318 picture->pkt_dts));
2319 } else
2320 av_frame_unref(picture);
2321 } else
2322 ret = 0;
2323
2324 /* many decoders assign whole AVFrames, thus overwriting extended_data;
2325 * make sure it's set correctly */
2326 av_assert0(!picture->extended_data || picture->extended_data == picture->data);
2327
2328#if FF_API_AVCTX_TIMEBASE
2329 if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
2330 avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
2331#endif
2332
2333 return ret;
2334}
2335
2336int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
2337 AVFrame *frame,
2338 int *got_frame_ptr,
2339 const AVPacket *avpkt)
2340{
2341 AVCodecInternal *avci = avctx->internal;
2342 int ret = 0;
2343
2344 *got_frame_ptr = 0;
2345
2346 if (!avctx->codec)
2347 return AVERROR(EINVAL);
2348
2349 if (!avctx->codec->decode) {
2350 av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
2351 return AVERROR(ENOSYS);
2352 }
2353
2354 if (!avpkt->data && avpkt->size) {
2355 av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
2356 return AVERROR(EINVAL);
2357 }
2358 if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) {
2359 av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n");
2360 return AVERROR(EINVAL);
2361 }
2362
2363 av_frame_unref(frame);
2364
2365 if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
2366 uint8_t *side;
2367 int side_size;
2368 uint32_t discard_padding = 0;
2369 uint8_t skip_reason = 0;
2370 uint8_t discard_reason = 0;
2371 // copy to ensure we do not change avpkt
2372 AVPacket tmp = *avpkt;
2373#if FF_API_MERGE_SD
2374FF_DISABLE_DEPRECATION_WARNINGS
2375 int did_split = av_packet_split_side_data(&tmp);
2376FF_ENABLE_DEPRECATION_WARNINGS
2377#endif
2378 ret = apply_param_change(avctx, &tmp);
2379 if (ret < 0)
2380 goto fail;
2381
2382 avctx->internal->pkt = &tmp;
2383 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
2384 ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp);
2385 else {
2386 ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp);
2387 av_assert0(ret <= tmp.size);
2388 frame->pkt_dts = avpkt->dts;
2389 }
2390 if (ret >= 0 && *got_frame_ptr) {
2391 avctx->frame_number++;
2392 av_frame_set_best_effort_timestamp(frame,
2393 guess_correct_pts(avctx,
2394 frame->pts,
2395 frame->pkt_dts));
2396 if (frame->format == AV_SAMPLE_FMT_NONE)
2397 frame->format = avctx->sample_fmt;
2398 if (!frame->channel_layout)
2399 frame->channel_layout = avctx->channel_layout;
2400 if (!av_frame_get_channels(frame))
2401 av_frame_set_channels(frame, avctx->channels);
2402 if (!frame->sample_rate)
2403 frame->sample_rate = avctx->sample_rate;
2404 }
2405
2406 side= av_packet_get_side_data(avctx->internal->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
2407 if(side && side_size>=10) {
2408 avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
2409 discard_padding = AV_RL32(side + 4);
2410 av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
2411 avctx->internal->skip_samples, (int)discard_padding);
2412 skip_reason = AV_RL8(side + 8);
2413 discard_reason = AV_RL8(side + 9);
2414 }
2415
2416 if ((frame->flags & AV_FRAME_FLAG_DISCARD) && *got_frame_ptr &&
2417 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
2418 avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
2419 *got_frame_ptr = 0;
2420 }
2421
2422 if (avctx->internal->skip_samples > 0 && *got_frame_ptr &&
2423 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
2424 if(frame->nb_samples <= avctx->internal->skip_samples){
2425 *got_frame_ptr = 0;
2426 avctx->internal->skip_samples -= frame->nb_samples;
2427 av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
2428 avctx->internal->skip_samples);
2429 } else {
2430 av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
2431 frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
2432 if(avctx->pkt_timebase.num && avctx->sample_rate) {
2433 int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
2434 (AVRational){1, avctx->sample_rate},
2435 avctx->pkt_timebase);
2436 if(frame->pts!=AV_NOPTS_VALUE)
2437 frame->pts += diff_ts;
2438#if FF_API_PKT_PTS
2439FF_DISABLE_DEPRECATION_WARNINGS
2440 if(frame->pkt_pts!=AV_NOPTS_VALUE)
2441 frame->pkt_pts += diff_ts;
2442FF_ENABLE_DEPRECATION_WARNINGS
2443#endif
2444 if(frame->pkt_dts!=AV_NOPTS_VALUE)
2445 frame->pkt_dts += diff_ts;
2446 if (av_frame_get_pkt_duration(frame) >= diff_ts)
2447 av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
2448 } else {
2449 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
2450 }
2451 av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
2452 avctx->internal->skip_samples, frame->nb_samples);
2453 frame->nb_samples -= avctx->internal->skip_samples;
2454 avctx->internal->skip_samples = 0;
2455 }
2456 }
2457
2458 if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr &&
2459 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
2460 if (discard_padding == frame->nb_samples) {
2461 *got_frame_ptr = 0;
2462 } else {
2463 if(avctx->pkt_timebase.num && avctx->sample_rate) {
2464 int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
2465 (AVRational){1, avctx->sample_rate},
2466 avctx->pkt_timebase);
2467 av_frame_set_pkt_duration(frame, diff_ts);
2468 } else {
2469 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
2470 }
2471 av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
2472 (int)discard_padding, frame->nb_samples);
2473 frame->nb_samples -= discard_padding;
2474 }
2475 }
2476
2477 if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
2478 AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
2479 if (fside) {
2480 AV_WL32(fside->data, avctx->internal->skip_samples);
2481 AV_WL32(fside->data + 4, discard_padding);
2482 AV_WL8(fside->data + 8, skip_reason);
2483 AV_WL8(fside->data + 9, discard_reason);
2484 avctx->internal->skip_samples = 0;
2485 }
2486 }
2487fail:
2488 avctx->internal->pkt = NULL;
2489#if FF_API_MERGE_SD
2490 if (did_split) {
2491 av_packet_free_side_data(&tmp);
2492 if(ret == tmp.size)
2493 ret = avpkt->size;
2494 }
2495#endif
2496
2497 if (ret >= 0 && *got_frame_ptr) {
2498 if (!avctx->refcounted_frames) {
2499 int err = unrefcount_frame(avci, frame);
2500 if (err < 0)
2501 return err;
2502 }
2503 } else
2504 av_frame_unref(frame);
2505 }
2506
2507 av_assert0(ret <= avpkt->size);
2508
2509 if (!avci->showed_multi_packet_warning &&
2510 ret >= 0 && ret != avpkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
2511 av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
2512 avci->showed_multi_packet_warning = 1;
2513 }
2514
2515 return ret;
2516}
2517
2518#define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
2519static int recode_subtitle(AVCodecContext *avctx,
2520 AVPacket *outpkt, const AVPacket *inpkt)
2521{
2522#if CONFIG_ICONV
2523 iconv_t cd = (iconv_t)-1;
2524 int ret = 0;
2525 char *inb, *outb;
2526 size_t inl, outl;
2527 AVPacket tmp;
2528#endif
2529
2530 if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
2531 return 0;
2532
2533#if CONFIG_ICONV
2534 cd = iconv_open("UTF-8", avctx->sub_charenc);
2535 av_assert0(cd != (iconv_t)-1);
2536
2537 inb = inpkt->data;
2538 inl = inpkt->size;
2539
2540 if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
2541 av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
2542 ret = AVERROR(ENOMEM);
2543 goto end;
2544 }
2545
2546 ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
2547 if (ret < 0)
2548 goto end;
2549 outpkt->buf = tmp.buf;
2550 outpkt->data = tmp.data;
2551 outpkt->size = tmp.size;
2552 outb = outpkt->data;
2553 outl = outpkt->size;
2554
2555 if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
2556 iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
2557 outl >= outpkt->size || inl != 0) {
2558 ret = FFMIN(AVERROR(errno), -1);
2559 av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
2560 "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
2561 av_packet_unref(&tmp);
2562 goto end;
2563 }
2564 outpkt->size -= outl;
2565 memset(outpkt->data + outpkt->size, 0, outl);
2566
2567end:
2568 if (cd != (iconv_t)-1)
2569 iconv_close(cd);
2570 return ret;
2571#else
2572 av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
2573 return AVERROR(EINVAL);
2574#endif
2575}
2576
2577static int utf8_check(const uint8_t *str)
2578{
2579 const uint8_t *byte;
2580 uint32_t codepoint, min;
2581
2582 while (*str) {
2583 byte = str;
2584 GET_UTF8(codepoint, *(byte++), return 0;);
2585 min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
2586 1 << (5 * (byte - str) - 4);
2587 if (codepoint < min || codepoint >= 0x110000 ||
2588 codepoint == 0xFFFE /* BOM */ ||
2589 codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
2590 return 0;
2591 str = byte;
2592 }
2593 return 1;
2594}
2595
2596#if FF_API_ASS_TIMING
2597static void insert_ts(AVBPrint *buf, int ts)
2598{
2599 if (ts == -1) {
2600 av_bprintf(buf, "9:59:59.99,");
2601 } else {
2602 int h, m, s;
2603
2604 h = ts/360000; ts -= 360000*h;
2605 m = ts/ 6000; ts -= 6000*m;
2606 s = ts/ 100; ts -= 100*s;
2607 av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
2608 }
2609}
2610
2611static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
2612{
2613 int i;
2614 AVBPrint buf;
2615
2616 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
2617
2618 for (i = 0; i < sub->num_rects; i++) {
2619 char *final_dialog;
2620 const char *dialog;
2621 AVSubtitleRect *rect = sub->rects[i];
2622 int ts_start, ts_duration = -1;
2623 long int layer;
2624
2625 if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
2626 continue;
2627
2628 av_bprint_clear(&buf);
2629
2630 /* skip ReadOrder */
2631 dialog = strchr(rect->ass, ',');
2632 if (!dialog)
2633 continue;
2634 dialog++;
2635
2636 /* extract Layer or Marked */
2637 layer = strtol(dialog, (char**)&dialog, 10);
2638 if (*dialog != ',')
2639 continue;
2640 dialog++;
2641
2642 /* rescale timing to ASS time base (ms) */
2643 ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
2644 if (pkt->duration != -1)
2645 ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
2646 sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
2647
2648 /* construct ASS (standalone file form with timestamps) string */
2649 av_bprintf(&buf, "Dialogue: %ld,", layer);
2650 insert_ts(&buf, ts_start);
2651 insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
2652 av_bprintf(&buf, "%s\r\n", dialog);
2653
2654 final_dialog = av_strdup(buf.str);
2655 if (!av_bprint_is_complete(&buf) || !final_dialog) {
2656 av_freep(&final_dialog);
2657 av_bprint_finalize(&buf, NULL);
2658 return AVERROR(ENOMEM);
2659 }
2660 av_freep(&rect->ass);
2661 rect->ass = final_dialog;
2662 }
2663
2664 av_bprint_finalize(&buf, NULL);
2665 return 0;
2666}
2667#endif
2668
2669int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
2670 int *got_sub_ptr,
2671 AVPacket *avpkt)
2672{
2673 int i, ret = 0;
2674
2675 if (!avpkt->data && avpkt->size) {
2676 av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
2677 return AVERROR(EINVAL);
2678 }
2679 if (!avctx->codec)
2680 return AVERROR(EINVAL);
2681 if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
2682 av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
2683 return AVERROR(EINVAL);
2684 }
2685
2686 *got_sub_ptr = 0;
2687 get_subtitle_defaults(sub);
2688
2689 if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
2690 AVPacket pkt_recoded;
2691 AVPacket tmp = *avpkt;
2692#if FF_API_MERGE_SD
2693FF_DISABLE_DEPRECATION_WARNINGS
2694 int did_split = av_packet_split_side_data(&tmp);
2695 //apply_param_change(avctx, &tmp);
2696
2697 if (did_split) {
2698 /* FFMIN() prevents overflow in case the packet wasn't allocated with
2699 * proper padding.
2700 * If the side data is smaller than the buffer padding size, the
2701 * remaining bytes should have already been filled with zeros by the
2702 * original packet allocation anyway. */
2703 memset(tmp.data + tmp.size, 0,
2704 FFMIN(avpkt->size - tmp.size, AV_INPUT_BUFFER_PADDING_SIZE));
2705 }
2706FF_ENABLE_DEPRECATION_WARNINGS
2707#endif
2708
2709 pkt_recoded = tmp;
2710 ret = recode_subtitle(avctx, &pkt_recoded, &tmp);
2711 if (ret < 0) {
2712 *got_sub_ptr = 0;
2713 } else {
2714 avctx->internal->pkt = &pkt_recoded;
2715
2716 if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
2717 sub->pts = av_rescale_q(avpkt->pts,
2718 avctx->pkt_timebase, AV_TIME_BASE_Q);
2719 ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
2720 av_assert1((ret >= 0) >= !!*got_sub_ptr &&
2721 !!*got_sub_ptr >= !!sub->num_rects);
2722
2723#if FF_API_ASS_TIMING
2724 if (avctx->sub_text_format == FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
2725 && *got_sub_ptr && sub->num_rects) {
2726 const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
2727 : avctx->time_base;
2728 int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
2729 if (err < 0)
2730 ret = err;
2731 }
2732#endif
2733
2734 if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
2735 avctx->pkt_timebase.num) {
2736 AVRational ms = { 1, 1000 };
2737 sub->end_display_time = av_rescale_q(avpkt->duration,
2738 avctx->pkt_timebase, ms);
2739 }
2740
2741 if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
2742 sub->format = 0;
2743 else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
2744 sub->format = 1;
2745
2746 for (i = 0; i < sub->num_rects; i++) {
2747 if (sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
2748 av_log(avctx, AV_LOG_ERROR,
2749 "Invalid UTF-8 in decoded subtitles text; "
2750 "maybe missing -sub_charenc option\n");
2751 avsubtitle_free(sub);
2752 ret = AVERROR_INVALIDDATA;
2753 break;
2754 }
2755 }
2756
2757 if (tmp.data != pkt_recoded.data) { // did we recode?
2758 /* prevent from destroying side data from original packet */
2759 pkt_recoded.side_data = NULL;
2760 pkt_recoded.side_data_elems = 0;
2761
2762 av_packet_unref(&pkt_recoded);
2763 }
2764 avctx->internal->pkt = NULL;
2765 }
2766
2767#if FF_API_MERGE_SD
2768 if (did_split) {
2769 av_packet_free_side_data(&tmp);
2770 if(ret == tmp.size)
2771 ret = avpkt->size;
2772 }
2773#endif
2774
2775 if (*got_sub_ptr)
2776 avctx->frame_number++;
2777 }
2778
2779 return ret;
2780}
2781
2782void avsubtitle_free(AVSubtitle *sub)
2783{
2784 int i;
2785
2786 for (i = 0; i < sub->num_rects; i++) {
2787 av_freep(&sub->rects[i]->data[0]);
2788 av_freep(&sub->rects[i]->data[1]);
2789 av_freep(&sub->rects[i]->data[2]);
2790 av_freep(&sub->rects[i]->data[3]);
2791 av_freep(&sub->rects[i]->text);
2792 av_freep(&sub->rects[i]->ass);
2793 av_freep(&sub->rects[i]);
2794 }
2795
2796 av_freep(&sub->rects);
2797
2798 memset(sub, 0, sizeof(AVSubtitle));
2799}
2800
2801static int do_decode(AVCodecContext *avctx, AVPacket *pkt)
2802{
2803 int got_frame = 0;
2804 int ret;
2805
2806 av_assert0(!avctx->internal->buffer_frame->buf[0]);
2807
2808 if (!pkt)
2809 pkt = avctx->internal->buffer_pkt;
2810
2811 // This is the lesser evil. The field is for compatibility with legacy users
2812 // of the legacy API, and users using the new API should not be forced to
2813 // even know about this field.
2814 avctx->refcounted_frames = 1;
2815
2816 // Some codecs (at least wma lossless) will crash when feeding drain packets
2817 // after EOF was signaled.
2818 if (avctx->internal->draining_done)
2819 return AVERROR_EOF;
2820
2821 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2822 ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame,
2823 &got_frame, pkt);
2824 if (ret >= 0 && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
2825 ret = pkt->size;
2826 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2827 ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame,
2828 &got_frame, pkt);
2829 } else {
2830 ret = AVERROR(EINVAL);
2831 }
2832
2833 if (ret == AVERROR(EAGAIN))
2834 ret = pkt->size;
2835
2836 if (avctx->internal->draining && !got_frame)
2837 avctx->internal->draining_done = 1;
2838
2839 if (ret < 0)
2840 return ret;
2841
2842 if (ret >= pkt->size) {
2843 av_packet_unref(avctx->internal->buffer_pkt);
2844 } else {
2845 int consumed = ret;
2846
2847 if (pkt != avctx->internal->buffer_pkt) {
2848 av_packet_unref(avctx->internal->buffer_pkt);
2849 if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0)
2850 return ret;
2851 }
2852
2853 avctx->internal->buffer_pkt->data += consumed;
2854 avctx->internal->buffer_pkt->size -= consumed;
2855 avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE;
2856 avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE;
2857 }
2858
2859 if (got_frame)
2860 av_assert0(avctx->internal->buffer_frame->buf[0]);
2861
2862 return 0;
2863}
2864
2865int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
2866{
2867 int ret;
2868
2869 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
2870 return AVERROR(EINVAL);
2871
2872 if (avctx->internal->draining)
2873 return AVERROR_EOF;
2874
2875 if (avpkt && !avpkt->size && avpkt->data)
2876 return AVERROR(EINVAL);
2877
2878 if (!avpkt || !avpkt->size) {
2879 avctx->internal->draining = 1;
2880 avpkt = NULL;
2881
2882 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
2883 return 0;
2884 }
2885
2886 if (avctx->codec->send_packet) {
2887 if (avpkt) {
2888 AVPacket tmp = *avpkt;
2889#if FF_API_MERGE_SD
2890FF_DISABLE_DEPRECATION_WARNINGS
2891 int did_split = av_packet_split_side_data(&tmp);
2892FF_ENABLE_DEPRECATION_WARNINGS
2893#endif
2894 ret = apply_param_change(avctx, &tmp);
2895 if (ret >= 0)
2896 ret = avctx->codec->send_packet(avctx, &tmp);
2897#if FF_API_MERGE_SD
2898 if (did_split)
2899 av_packet_free_side_data(&tmp);
2900#endif
2901 return ret;
2902 } else {
2903 return avctx->codec->send_packet(avctx, NULL);
2904 }
2905 }
2906
2907 // Emulation via old API. Assume avpkt is likely not refcounted, while
2908 // decoder output is always refcounted, and avoid copying.
2909
2910 if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0])
2911 return AVERROR(EAGAIN);
2912
2913 // The goal is decoding the first frame of the packet without using memcpy,
2914 // because the common case is having only 1 frame per packet (especially
2915 // with video, but audio too). In other cases, it can't be avoided, unless
2916 // the user is feeding refcounted packets.
2917 return do_decode(avctx, (AVPacket *)avpkt);
2918}
2919
2920int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
2921{
2922 int ret;
2923
2924 av_frame_unref(frame);
2925
2926 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
2927 return AVERROR(EINVAL);
2928
2929 if (avctx->codec->receive_frame) {
2930 if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
2931 return AVERROR_EOF;
2932 ret = avctx->codec->receive_frame(avctx, frame);
2933 if (ret >= 0) {
2934 if (av_frame_get_best_effort_timestamp(frame) == AV_NOPTS_VALUE) {
2935 av_frame_set_best_effort_timestamp(frame,
2936 guess_correct_pts(avctx, frame->pts, frame->pkt_dts));
2937 }
2938 }
2939 return ret;
2940 }
2941
2942 // Emulation via old API.
2943
2944 if (!avctx->internal->buffer_frame->buf[0]) {
2945 if (!avctx->internal->buffer_pkt->size && !avctx->internal->draining)
2946 return AVERROR(EAGAIN);
2947
2948 while (1) {
2949 if ((ret = do_decode(avctx, avctx->internal->buffer_pkt)) < 0) {
2950 av_packet_unref(avctx->internal->buffer_pkt);
2951 return ret;
2952 }
2953 // Some audio decoders may consume partial data without returning
2954 // a frame (fate-wmapro-2ch). There is no way to make the caller
2955 // call avcodec_receive_frame() again without returning a frame,
2956 // so try to decode more in these cases.
2957 if (avctx->internal->buffer_frame->buf[0] ||
2958 !avctx->internal->buffer_pkt->size)
2959 break;
2960 }
2961 }
2962
2963 if (!avctx->internal->buffer_frame->buf[0])
2964 return avctx->internal->draining ? AVERROR_EOF : AVERROR(EAGAIN);
2965
2966 av_frame_move_ref(frame, avctx->internal->buffer_frame);
2967 return 0;
2968}
2969
2970static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
2971{
2972 int ret;
2973 *got_packet = 0;
2974
2975 av_packet_unref(avctx->internal->buffer_pkt);
2976 avctx->internal->buffer_pkt_valid = 0;
2977
2978 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2979 ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
2980 frame, got_packet);
2981 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2982 ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
2983 frame, got_packet);
2984 } else {
2985 ret = AVERROR(EINVAL);
2986 }
2987
2988 if (ret >= 0 && *got_packet) {
2989 // Encoders must always return ref-counted buffers.
2990 // Side-data only packets have no data and can be not ref-counted.
2991 av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
2992 avctx->internal->buffer_pkt_valid = 1;
2993 ret = 0;
2994 } else {
2995 av_packet_unref(avctx->internal->buffer_pkt);
2996 }
2997
2998 return ret;
2999}
3000
3001int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
3002{
3003 if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
3004 return AVERROR(EINVAL);
3005
3006 if (avctx->internal->draining)
3007 return AVERROR_EOF;
3008
3009 if (!frame) {
3010 avctx->internal->draining = 1;
3011
3012 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
3013 return 0;
3014 }
3015
3016 if (avctx->codec->send_frame)
3017 return avctx->codec->send_frame(avctx, frame);
3018
3019 // Emulation via old API. Do it here instead of avcodec_receive_packet, because:
3020 // 1. if the AVFrame is not refcounted, the copying will be much more
3021 // expensive than copying the packet data
3022 // 2. assume few users use non-refcounted AVPackets, so usually no copy is
3023 // needed
3024
3025 if (avctx->internal->buffer_pkt_valid)
3026 return AVERROR(EAGAIN);
3027
3028 return do_encode(avctx, frame, &(int){0});
3029}
3030
3031int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
3032{
3033 av_packet_unref(avpkt);
3034
3035 if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
3036 return AVERROR(EINVAL);
3037
3038 if (avctx->codec->receive_packet) {
3039 if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
3040 return AVERROR_EOF;
3041 return avctx->codec->receive_packet(avctx, avpkt);
3042 }
3043
3044 // Emulation via old API.
3045
3046 if (!avctx->internal->buffer_pkt_valid) {
3047 int got_packet;
3048 int ret;
3049 if (!avctx->internal->draining)
3050 return AVERROR(EAGAIN);
3051 ret = do_encode(avctx, NULL, &got_packet);
3052 if (ret < 0)
3053 return ret;
3054 if (ret >= 0 && !got_packet)
3055 return AVERROR_EOF;
3056 }
3057
3058 av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
3059 avctx->internal->buffer_pkt_valid = 0;
3060 return 0;
3061}
3062
3063av_cold int avcodec_close(AVCodecContext *avctx)
3064{
3065 int i;
3066
3067 if (!avctx)
3068 return 0;
3069
3070 if (avcodec_is_open(avctx)) {
3071 FramePool *pool = avctx->internal->pool;
3072 if (CONFIG_FRAME_THREAD_ENCODER &&
3073 avctx->internal->frame_thread_encoder && avctx->thread_count > 1) {
3074 ff_frame_thread_encoder_free(avctx);
3075 }
3076 if (HAVE_THREADS && avctx->internal->thread_ctx)
3077 ff_thread_free(avctx);
3078 if (avctx->codec && avctx->codec->close)
3079 avctx->codec->close(avctx);
3080 avctx->internal->byte_buffer_size = 0;
3081 av_freep(&avctx->internal->byte_buffer);
3082 av_frame_free(&avctx->internal->to_free);
3083 av_frame_free(&avctx->internal->buffer_frame);
3084 av_packet_free(&avctx->internal->buffer_pkt);
3085 for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
3086 av_buffer_pool_uninit(&pool->pools[i]);
3087 av_freep(&avctx->internal->pool);
3088
3089 if (avctx->hwaccel && avctx->hwaccel->uninit)
3090 avctx->hwaccel->uninit(avctx);
3091 av_freep(&avctx->internal->hwaccel_priv_data);
3092
3093 av_freep(&avctx->internal);
3094 }
3095
3096 for (i = 0; i < avctx->nb_coded_side_data; i++)
3097 av_freep(&avctx->coded_side_data[i].data);
3098 av_freep(&avctx->coded_side_data);
3099 avctx->nb_coded_side_data = 0;
3100
3101 av_buffer_unref(&avctx->hw_frames_ctx);
3102 av_buffer_unref(&avctx->hw_device_ctx);
3103
3104 if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
3105 av_opt_free(avctx->priv_data);
3106 av_opt_free(avctx);
3107 av_freep(&avctx->priv_data);
3108 if (av_codec_is_encoder(avctx->codec)) {
3109 av_freep(&avctx->extradata);
3110#if FF_API_CODED_FRAME
3111FF_DISABLE_DEPRECATION_WARNINGS
3112 av_frame_free(&avctx->coded_frame);
3113FF_ENABLE_DEPRECATION_WARNINGS
3114#endif
3115 }
3116 avctx->codec = NULL;
3117 avctx->active_thread_type = 0;
3118
3119 return 0;
3120}
3121
3122static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id)
3123{
3124 switch(id){
3125 //This is for future deprecatec codec ids, its empty since
3126 //last major bump but will fill up again over time, please don't remove it
3127 default : return id;
3128 }
3129}
3130
3131static AVCodec *find_encdec(enum AVCodecID id, int encoder)
3132{
3133 AVCodec *p, *experimental = NULL;
3134 p = first_avcodec;
3135 id= remap_deprecated_codec_id(id);
3136 while (p) {
3137 if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) &&
3138 p->id == id) {
3139 if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) {
3140 experimental = p;
3141 } else
3142 return p;
3143 }
3144 p = p->next;
3145 }
3146 return experimental;
3147}
3148
3149AVCodec *avcodec_find_encoder(enum AVCodecID id)
3150{
3151 return find_encdec(id, 1);
3152}
3153
3154AVCodec *avcodec_find_encoder_by_name(const char *name)
3155{
3156 AVCodec *p;
3157 if (!name)
3158 return NULL;
3159 p = first_avcodec;
3160 while (p) {
3161 if (av_codec_is_encoder(p) && strcmp(name, p->name) == 0)
3162 return p;
3163 p = p->next;
3164 }
3165 return NULL;
3166}
3167
3168AVCodec *avcodec_find_decoder(enum AVCodecID id)
3169{
3170 return find_encdec(id, 0);
3171}
3172
3173AVCodec *avcodec_find_decoder_by_name(const char *name)
3174{
3175 AVCodec *p;
3176 if (!name)
3177 return NULL;
3178 p = first_avcodec;
3179 while (p) {
3180 if (av_codec_is_decoder(p) && strcmp(name, p->name) == 0)
3181 return p;
3182 p = p->next;
3183 }
3184 return NULL;
3185}
3186
3187const char *avcodec_get_name(enum AVCodecID id)
3188{
3189 const AVCodecDescriptor *cd;
3190 AVCodec *codec;
3191
3192 if (id == AV_CODEC_ID_NONE)
3193 return "none";
3194 cd = avcodec_descriptor_get(id);
3195 if (cd)
3196 return cd->name;
3197 av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id);
3198 codec = avcodec_find_decoder(id);
3199 if (codec)
3200 return codec->name;
3201 codec = avcodec_find_encoder(id);
3202 if (codec)
3203 return codec->name;
3204 return "unknown_codec";
3205}
3206
3207size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag)
3208{
3209 int i, len, ret = 0;
3210
3211#define TAG_PRINT(x) \
3212 (((x) >= '0' && (x) <= '9') || \
3213 ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \
3214 ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_'))
3215
3216 for (i = 0; i < 4; i++) {
3217 len = snprintf(buf, buf_size,
3218 TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF);
3219 buf += len;
3220 buf_size = buf_size > len ? buf_size - len : 0;
3221 ret += len;
3222 codec_tag >>= 8;
3223 }
3224 return ret;
3225}
3226
3227void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode)
3228{
3229 const char *codec_type;
3230 const char *codec_name;
3231 const char *profile = NULL;
3232 int64_t bitrate;
3233 int new_line = 0;
3234 AVRational display_aspect_ratio;
3235 const char *separator = enc->dump_separator ? (const char *)enc->dump_separator : ", ";
3236
3237 if (!buf || buf_size <= 0)
3238 return;
3239 codec_type = av_get_media_type_string(enc->codec_type);
3240 codec_name = avcodec_get_name(enc->codec_id);
3241 profile = avcodec_profile_name(enc->codec_id, enc->profile);
3242
3243 snprintf(buf, buf_size, "%s: %s", codec_type ? codec_type : "unknown",
3244 codec_name);
3245 buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */
3246
3247 if (enc->codec && strcmp(enc->codec->name, codec_name))
3248 snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", enc->codec->name);
3249
3250 if (profile)
3251 snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile);
3252 if ( enc->codec_type == AVMEDIA_TYPE_VIDEO
3253 && av_log_get_level() >= AV_LOG_VERBOSE
3254 && enc->refs)
3255 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3256 ", %d reference frame%s",
3257 enc->refs, enc->refs > 1 ? "s" : "");
3258
3259 if (enc->codec_tag)
3260 snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s / 0x%04X)",
3261 av_fourcc2str(enc->codec_tag), enc->codec_tag);
3262
3263 switch (enc->codec_type) {
3264 case AVMEDIA_TYPE_VIDEO:
3265 {
3266 char detail[256] = "(";
3267
3268 av_strlcat(buf, separator, buf_size);
3269
3270 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3271 "%s", enc->pix_fmt == AV_PIX_FMT_NONE ? "none" :
3272 av_get_pix_fmt_name(enc->pix_fmt));
3273 if (enc->bits_per_raw_sample && enc->pix_fmt != AV_PIX_FMT_NONE &&
3274 enc->bits_per_raw_sample < av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth)
3275 av_strlcatf(detail, sizeof(detail), "%d bpc, ", enc->bits_per_raw_sample);
3276 if (enc->color_range != AVCOL_RANGE_UNSPECIFIED)
3277 av_strlcatf(detail, sizeof(detail), "%s, ",
3278 av_color_range_name(enc->color_range));
3279
3280 if (enc->colorspace != AVCOL_SPC_UNSPECIFIED ||
3281 enc->color_primaries != AVCOL_PRI_UNSPECIFIED ||
3282 enc->color_trc != AVCOL_TRC_UNSPECIFIED) {
3283 if (enc->colorspace != (int)enc->color_primaries ||
3284 enc->colorspace != (int)enc->color_trc) {
3285 new_line = 1;
3286 av_strlcatf(detail, sizeof(detail), "%s/%s/%s, ",
3287 av_color_space_name(enc->colorspace),
3288 av_color_primaries_name(enc->color_primaries),
3289 av_color_transfer_name(enc->color_trc));
3290 } else
3291 av_strlcatf(detail, sizeof(detail), "%s, ",
3292 av_get_colorspace_name(enc->colorspace));
3293 }
3294
3295 if (enc->field_order != AV_FIELD_UNKNOWN) {
3296 const char *field_order = "progressive";
3297 if (enc->field_order == AV_FIELD_TT)
3298 field_order = "top first";
3299 else if (enc->field_order == AV_FIELD_BB)
3300 field_order = "bottom first";
3301 else if (enc->field_order == AV_FIELD_TB)
3302 field_order = "top coded first (swapped)";
3303 else if (enc->field_order == AV_FIELD_BT)
3304 field_order = "bottom coded first (swapped)";
3305
3306 av_strlcatf(detail, sizeof(detail), "%s, ", field_order);
3307 }
3308
3309 if (av_log_get_level() >= AV_LOG_VERBOSE &&
3310 enc->chroma_sample_location != AVCHROMA_LOC_UNSPECIFIED)
3311 av_strlcatf(detail, sizeof(detail), "%s, ",
3312 av_chroma_location_name(enc->chroma_sample_location));
3313
3314 if (strlen(detail) > 1) {
3315 detail[strlen(detail) - 2] = 0;
3316 av_strlcatf(buf, buf_size, "%s)", detail);
3317 }
3318 }
3319
3320 if (enc->width) {
3321 av_strlcat(buf, new_line ? separator : ", ", buf_size);
3322
3323 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3324 "%dx%d",
3325 enc->width, enc->height);
3326
3327 if (av_log_get_level() >= AV_LOG_VERBOSE &&
3328 (enc->width != enc->coded_width ||
3329 enc->height != enc->coded_height))
3330 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3331 " (%dx%d)", enc->coded_width, enc->coded_height);
3332
3333 if (enc->sample_aspect_ratio.num) {
3334 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3335 enc->width * (int64_t)enc->sample_aspect_ratio.num,
3336 enc->height * (int64_t)enc->sample_aspect_ratio.den,
3337 1024 * 1024);
3338 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3339 " [SAR %d:%d DAR %d:%d]",
3340 enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den,
3341 display_aspect_ratio.num, display_aspect_ratio.den);
3342 }
3343 if (av_log_get_level() >= AV_LOG_DEBUG) {
3344 int g = av_gcd(enc->time_base.num, enc->time_base.den);
3345 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3346 ", %d/%d",
3347 enc->time_base.num / g, enc->time_base.den / g);
3348 }
3349 }
3350 if (encode) {
3351 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3352 ", q=%d-%d", enc->qmin, enc->qmax);
3353 } else {
3354 if (enc->properties & FF_CODEC_PROPERTY_CLOSED_CAPTIONS)
3355 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3356 ", Closed Captions");
3357 if (enc->properties & FF_CODEC_PROPERTY_LOSSLESS)
3358 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3359 ", lossless");
3360 }
3361 break;
3362 case AVMEDIA_TYPE_AUDIO:
3363 av_strlcat(buf, separator, buf_size);
3364
3365 if (enc->sample_rate) {
3366 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3367 "%d Hz, ", enc->sample_rate);
3368 }
3369 av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout);
3370 if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) {
3371 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3372 ", %s", av_get_sample_fmt_name(enc->sample_fmt));
3373 }
3374 if ( enc->bits_per_raw_sample > 0
3375 && enc->bits_per_raw_sample != av_get_bytes_per_sample(enc->sample_fmt) * 8)
3376 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3377 " (%d bit)", enc->bits_per_raw_sample);
3378 if (av_log_get_level() >= AV_LOG_VERBOSE) {
3379 if (enc->initial_padding)
3380 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3381 ", delay %d", enc->initial_padding);
3382 if (enc->trailing_padding)
3383 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3384 ", padding %d", enc->trailing_padding);
3385 }
3386 break;
3387 case AVMEDIA_TYPE_DATA:
3388 if (av_log_get_level() >= AV_LOG_DEBUG) {
3389 int g = av_gcd(enc->time_base.num, enc->time_base.den);
3390 if (g)
3391 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3392 ", %d/%d",
3393 enc->time_base.num / g, enc->time_base.den / g);
3394 }
3395 break;
3396 case AVMEDIA_TYPE_SUBTITLE:
3397 if (enc->width)
3398 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3399 ", %dx%d", enc->width, enc->height);
3400 break;
3401 default:
3402 return;
3403 }
3404 if (encode) {
3405 if (enc->flags & AV_CODEC_FLAG_PASS1)
3406 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3407 ", pass 1");
3408 if (enc->flags & AV_CODEC_FLAG_PASS2)
3409 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3410 ", pass 2");
3411 }
3412 bitrate = get_bit_rate(enc);
3413 if (bitrate != 0) {
3414 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3415 ", %"PRId64" kb/s", bitrate / 1000);
3416 } else if (enc->rc_max_rate > 0) {
3417 snprintf(buf + strlen(buf), buf_size - strlen(buf),
3418 ", max. %"PRId64" kb/s", (int64_t)enc->rc_max_rate / 1000);
3419 }
3420}
3421
3422const char *av_get_profile_name(const AVCodec *codec, int profile)
3423{
3424 const AVProfile *p;
3425 if (profile == FF_PROFILE_UNKNOWN || !codec->profiles)
3426 return NULL;
3427
3428 for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
3429 if (p->profile == profile)
3430 return p->name;
3431
3432 return NULL;
3433}
3434
3435const char *avcodec_profile_name(enum AVCodecID codec_id, int profile)
3436{
3437 const AVCodecDescriptor *desc = avcodec_descriptor_get(codec_id);
3438 const AVProfile *p;
3439
3440 if (profile == FF_PROFILE_UNKNOWN || !desc || !desc->profiles)
3441 return NULL;
3442
3443 for (p = desc->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
3444 if (p->profile == profile)
3445 return p->name;
3446
3447 return NULL;
3448}
3449
3450unsigned avcodec_version(void)
3451{
3452// av_assert0(AV_CODEC_ID_V410==164);
3453 av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563);
3454 av_assert0(AV_CODEC_ID_ADPCM_G722==69660);
3455// av_assert0(AV_CODEC_ID_BMV_AUDIO==86071);
3456 av_assert0(AV_CODEC_ID_SRT==94216);
3457 av_assert0(LIBAVCODEC_VERSION_MICRO >= 100);
3458
3459 return LIBAVCODEC_VERSION_INT;
3460}
3461
3462const char *avcodec_configuration(void)
3463{
3464 return FFMPEG_CONFIGURATION;
3465}
3466
3467const char *avcodec_license(void)
3468{
3469#define LICENSE_PREFIX "libavcodec license: "
3470 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
3471}
3472
3473void avcodec_flush_buffers(AVCodecContext *avctx)
3474{
3475 avctx->internal->draining = 0;
3476 avctx->internal->draining_done = 0;
3477 av_frame_unref(avctx->internal->buffer_frame);
3478 av_packet_unref(avctx->internal->buffer_pkt);
3479 avctx->internal->buffer_pkt_valid = 0;
3480
3481 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
3482 ff_thread_flush(avctx);
3483 else if (avctx->codec->flush)
3484 avctx->codec->flush(avctx);
3485
3486 avctx->pts_correction_last_pts =
3487 avctx->pts_correction_last_dts = INT64_MIN;
3488
3489 if (!avctx->refcounted_frames)
3490 av_frame_unref(avctx->internal->to_free);
3491}
3492
3493int av_get_exact_bits_per_sample(enum AVCodecID codec_id)
3494{
3495 switch (codec_id) {
3496 case AV_CODEC_ID_8SVX_EXP:
3497 case AV_CODEC_ID_8SVX_FIB:
3498 case AV_CODEC_ID_ADPCM_CT:
3499 case AV_CODEC_ID_ADPCM_IMA_APC:
3500 case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
3501 case AV_CODEC_ID_ADPCM_IMA_OKI:
3502 case AV_CODEC_ID_ADPCM_IMA_WS:
3503 case AV_CODEC_ID_ADPCM_G722:
3504 case AV_CODEC_ID_ADPCM_YAMAHA:
3505 case AV_CODEC_ID_ADPCM_AICA:
3506 return 4;
3507 case AV_CODEC_ID_DSD_LSBF:
3508 case AV_CODEC_ID_DSD_MSBF:
3509 case AV_CODEC_ID_DSD_LSBF_PLANAR:
3510 case AV_CODEC_ID_DSD_MSBF_PLANAR:
3511 case AV_CODEC_ID_PCM_ALAW:
3512 case AV_CODEC_ID_PCM_MULAW:
3513 case AV_CODEC_ID_PCM_S8:
3514 case AV_CODEC_ID_PCM_S8_PLANAR:
3515 case AV_CODEC_ID_PCM_U8:
3516 case AV_CODEC_ID_PCM_ZORK:
3517 case AV_CODEC_ID_SDX2_DPCM:
3518 return 8;
3519 case AV_CODEC_ID_PCM_S16BE:
3520 case AV_CODEC_ID_PCM_S16BE_PLANAR:
3521 case AV_CODEC_ID_PCM_S16LE:
3522 case AV_CODEC_ID_PCM_S16LE_PLANAR:
3523 case AV_CODEC_ID_PCM_U16BE:
3524 case AV_CODEC_ID_PCM_U16LE:
3525 return 16;
3526 case AV_CODEC_ID_PCM_S24DAUD:
3527 case AV_CODEC_ID_PCM_S24BE:
3528 case AV_CODEC_ID_PCM_S24LE:
3529 case AV_CODEC_ID_PCM_S24LE_PLANAR:
3530 case AV_CODEC_ID_PCM_U24BE:
3531 case AV_CODEC_ID_PCM_U24LE:
3532 return 24;
3533 case AV_CODEC_ID_PCM_S32BE:
3534 case AV_CODEC_ID_PCM_S32LE:
3535 case AV_CODEC_ID_PCM_S32LE_PLANAR:
3536 case AV_CODEC_ID_PCM_U32BE:
3537 case AV_CODEC_ID_PCM_U32LE:
3538 case AV_CODEC_ID_PCM_F32BE:
3539 case AV_CODEC_ID_PCM_F32LE:
3540 case AV_CODEC_ID_PCM_F24LE:
3541 case AV_CODEC_ID_PCM_F16LE:
3542 return 32;
3543 case AV_CODEC_ID_PCM_F64BE:
3544 case AV_CODEC_ID_PCM_F64LE:
3545 case AV_CODEC_ID_PCM_S64BE:
3546 case AV_CODEC_ID_PCM_S64LE:
3547 return 64;
3548 default:
3549 return 0;
3550 }
3551}
3552
3553enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
3554{
3555 static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = {
3556 [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 },
3557 [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE },
3558 [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE },
3559 [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE },
3560 [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE },
3561 [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 },
3562 [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE },
3563 [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE },
3564 [AV_SAMPLE_FMT_S64P] = { AV_CODEC_ID_PCM_S64LE, AV_CODEC_ID_PCM_S64BE },
3565 [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE },
3566 [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE },
3567 };
3568 if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB)
3569 return AV_CODEC_ID_NONE;
3570 if (be < 0 || be > 1)
3571 be = AV_NE(1, 0);
3572 return map[fmt][be];
3573}
3574
3575int av_get_bits_per_sample(enum AVCodecID codec_id)
3576{
3577 switch (codec_id) {
3578 case AV_CODEC_ID_ADPCM_SBPRO_2:
3579 return 2;
3580 case AV_CODEC_ID_ADPCM_SBPRO_3:
3581 return 3;
3582 case AV_CODEC_ID_ADPCM_SBPRO_4:
3583 case AV_CODEC_ID_ADPCM_IMA_WAV:
3584 case AV_CODEC_ID_ADPCM_IMA_QT:
3585 case AV_CODEC_ID_ADPCM_SWF:
3586 case AV_CODEC_ID_ADPCM_MS:
3587 return 4;
3588 default:
3589 return av_get_exact_bits_per_sample(codec_id);
3590 }
3591}
3592
3593static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba,
3594 uint32_t tag, int bits_per_coded_sample, int64_t bitrate,
3595 uint8_t * extradata, int frame_size, int frame_bytes)
3596{
3597 int bps = av_get_exact_bits_per_sample(id);
3598 int framecount = (ba > 0 && frame_bytes / ba > 0) ? frame_bytes / ba : 1;
3599
3600 /* codecs with an exact constant bits per sample */
3601 if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768)
3602 return (frame_bytes * 8LL) / (bps * ch);
3603 bps = bits_per_coded_sample;
3604
3605 /* codecs with a fixed packet duration */
3606 switch (id) {
3607 case AV_CODEC_ID_ADPCM_ADX: return 32;
3608 case AV_CODEC_ID_ADPCM_IMA_QT: return 64;
3609 case AV_CODEC_ID_ADPCM_EA_XAS: return 128;
3610 case AV_CODEC_ID_AMR_NB:
3611 case AV_CODEC_ID_EVRC:
3612 case AV_CODEC_ID_GSM:
3613 case AV_CODEC_ID_QCELP:
3614 case AV_CODEC_ID_RA_288: return 160;
3615 case AV_CODEC_ID_AMR_WB:
3616 case AV_CODEC_ID_GSM_MS: return 320;
3617 case AV_CODEC_ID_MP1: return 384;
3618 case AV_CODEC_ID_ATRAC1: return 512;
3619 case AV_CODEC_ID_ATRAC3: return 1024 * framecount;
3620 case AV_CODEC_ID_ATRAC3P: return 2048;
3621 case AV_CODEC_ID_MP2:
3622 case AV_CODEC_ID_MUSEPACK7: return 1152;
3623 case AV_CODEC_ID_AC3: return 1536;
3624 }
3625
3626 if (sr > 0) {
3627 /* calc from sample rate */
3628 if (id == AV_CODEC_ID_TTA)
3629 return 256 * sr / 245;
3630 else if (id == AV_CODEC_ID_DST)
3631 return 588 * sr / 44100;
3632
3633 if (ch > 0) {
3634 /* calc from sample rate and channels */
3635 if (id == AV_CODEC_ID_BINKAUDIO_DCT)
3636 return (480 << (sr / 22050)) / ch;
3637 }
3638 }
3639
3640 if (ba > 0) {
3641 /* calc from block_align */
3642 if (id == AV_CODEC_ID_SIPR) {
3643 switch (ba) {
3644 case 20: return 160;
3645 case 19: return 144;
3646 case 29: return 288;
3647 case 37: return 480;
3648 }
3649 } else if (id == AV_CODEC_ID_ILBC) {
3650 switch (ba) {
3651 case 38: return 160;
3652 case 50: return 240;
3653 }
3654 }
3655 }
3656
3657 if (frame_bytes > 0) {
3658 /* calc from frame_bytes only */
3659 if (id == AV_CODEC_ID_TRUESPEECH)
3660 return 240 * (frame_bytes / 32);
3661 if (id == AV_CODEC_ID_NELLYMOSER)
3662 return 256 * (frame_bytes / 64);
3663 if (id == AV_CODEC_ID_RA_144)
3664 return 160 * (frame_bytes / 20);
3665 if (id == AV_CODEC_ID_G723_1)
3666 return 240 * (frame_bytes / 24);
3667
3668 if (bps > 0) {
3669 /* calc from frame_bytes and bits_per_coded_sample */
3670 if (id == AV_CODEC_ID_ADPCM_G726)
3671 return frame_bytes * 8 / bps;
3672 }
3673
3674 if (ch > 0 && ch < INT_MAX/16) {
3675 /* calc from frame_bytes and channels */
3676 switch (id) {
3677 case AV_CODEC_ID_ADPCM_AFC:
3678 return frame_bytes / (9 * ch) * 16;
3679 case AV_CODEC_ID_ADPCM_PSX:
3680 case AV_CODEC_ID_ADPCM_DTK:
3681 return frame_bytes / (16 * ch) * 28;
3682 case AV_CODEC_ID_ADPCM_4XM:
3683 case AV_CODEC_ID_ADPCM_IMA_DAT4:
3684 case AV_CODEC_ID_ADPCM_IMA_ISS:
3685 return (frame_bytes - 4 * ch) * 2 / ch;
3686 case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
3687 return (frame_bytes - 4) * 2 / ch;
3688 case AV_CODEC_ID_ADPCM_IMA_AMV:
3689 return (frame_bytes - 8) * 2 / ch;
3690 case AV_CODEC_ID_ADPCM_THP:
3691 case AV_CODEC_ID_ADPCM_THP_LE:
3692 if (extradata)
3693 return frame_bytes * 14 / (8 * ch);
3694 break;
3695 case AV_CODEC_ID_ADPCM_XA:
3696 return (frame_bytes / 128) * 224 / ch;
3697 case AV_CODEC_ID_INTERPLAY_DPCM:
3698 return (frame_bytes - 6 - ch) / ch;
3699 case AV_CODEC_ID_ROQ_DPCM:
3700 return (frame_bytes - 8) / ch;
3701 case AV_CODEC_ID_XAN_DPCM:
3702 return (frame_bytes - 2 * ch) / ch;
3703 case AV_CODEC_ID_MACE3:
3704 return 3 * frame_bytes / ch;
3705 case AV_CODEC_ID_MACE6:
3706 return 6 * frame_bytes / ch;
3707 case AV_CODEC_ID_PCM_LXF:
3708 return 2 * (frame_bytes / (5 * ch));
3709 case AV_CODEC_ID_IAC:
3710 case AV_CODEC_ID_IMC:
3711 return 4 * frame_bytes / ch;
3712 }
3713
3714 if (tag) {
3715 /* calc from frame_bytes, channels, and codec_tag */
3716 if (id == AV_CODEC_ID_SOL_DPCM) {
3717 if (tag == 3)
3718 return frame_bytes / ch;
3719 else
3720 return frame_bytes * 2 / ch;
3721 }
3722 }
3723
3724 if (ba > 0) {
3725 /* calc from frame_bytes, channels, and block_align */
3726 int blocks = frame_bytes / ba;
3727 switch (id) {
3728 case AV_CODEC_ID_ADPCM_IMA_WAV:
3729 if (bps < 2 || bps > 5)
3730 return 0;
3731 return blocks * (1 + (ba - 4 * ch) / (bps * ch) * 8);
3732 case AV_CODEC_ID_ADPCM_IMA_DK3:
3733 return blocks * (((ba - 16) * 2 / 3 * 4) / ch);
3734 case AV_CODEC_ID_ADPCM_IMA_DK4:
3735 return blocks * (1 + (ba - 4 * ch) * 2 / ch);
3736 case AV_CODEC_ID_ADPCM_IMA_RAD:
3737 return blocks * ((ba - 4 * ch) * 2 / ch);
3738 case AV_CODEC_ID_ADPCM_MS:
3739 return blocks * (2 + (ba - 7 * ch) * 2 / ch);
3740 case AV_CODEC_ID_ADPCM_MTAF:
3741 return blocks * (ba - 16) * 2 / ch;
3742 }
3743 }
3744
3745 if (bps > 0) {
3746 /* calc from frame_bytes, channels, and bits_per_coded_sample */
3747 switch (id) {
3748 case AV_CODEC_ID_PCM_DVD:
3749 if(bps<4)
3750 return 0;
3751 return 2 * (frame_bytes / ((bps * 2 / 8) * ch));
3752 case AV_CODEC_ID_PCM_BLURAY:
3753 if(bps<4)
3754 return 0;
3755 return frame_bytes / ((FFALIGN(ch, 2) * bps) / 8);
3756 case AV_CODEC_ID_S302M:
3757 return 2 * (frame_bytes / ((bps + 4) / 4)) / ch;
3758 }
3759 }
3760 }
3761 }
3762
3763 /* Fall back on using frame_size */
3764 if (frame_size > 1 && frame_bytes)
3765 return frame_size;
3766
3767 //For WMA we currently have no other means to calculate duration thus we
3768 //do it here by assuming CBR, which is true for all known cases.
3769 if (bitrate > 0 && frame_bytes > 0 && sr > 0 && ba > 1) {
3770 if (id == AV_CODEC_ID_WMAV1 || id == AV_CODEC_ID_WMAV2)
3771 return (frame_bytes * 8LL * sr) / bitrate;
3772 }
3773
3774 return 0;
3775}
3776
3777int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
3778{
3779 return get_audio_frame_duration(avctx->codec_id, avctx->sample_rate,
3780 avctx->channels, avctx->block_align,
3781 avctx->codec_tag, avctx->bits_per_coded_sample,
3782 avctx->bit_rate, avctx->extradata, avctx->frame_size,
3783 frame_bytes);
3784}
3785
3786int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes)
3787{
3788 return get_audio_frame_duration(par->codec_id, par->sample_rate,
3789 par->channels, par->block_align,
3790 par->codec_tag, par->bits_per_coded_sample,
3791 par->bit_rate, par->extradata, par->frame_size,
3792 frame_bytes);
3793}
3794
3795#if !HAVE_THREADS
3796int ff_thread_init(AVCodecContext *s)
3797{
3798 return -1;
3799}
3800
3801#endif
3802
3803unsigned int av_xiphlacing(unsigned char *s, unsigned int v)
3804{
3805 unsigned int n = 0;
3806
3807 while (v >= 0xff) {
3808 *s++ = 0xff;
3809 v -= 0xff;
3810 n++;
3811 }
3812 *s = v;
3813 n++;
3814 return n;
3815}
3816
3817int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
3818{
3819 int i;
3820 for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ;
3821 return i;
3822}
3823
3824#if FF_API_MISSING_SAMPLE
3825FF_DISABLE_DEPRECATION_WARNINGS
3826void av_log_missing_feature(void *avc, const char *feature, int want_sample)
3827{
3828 av_log(avc, AV_LOG_WARNING, "%s is not implemented. Update your FFmpeg "
3829 "version to the newest one from Git. If the problem still "
3830 "occurs, it means that your file has a feature which has not "
3831 "been implemented.\n", feature);
3832 if(want_sample)
3833 av_log_ask_for_sample(avc, NULL);
3834}
3835
3836void av_log_ask_for_sample(void *avc, const char *msg, ...)
3837{
3838 va_list argument_list;
3839
3840 va_start(argument_list, msg);
3841
3842 if (msg)
3843 av_vlog(avc, AV_LOG_WARNING, msg, argument_list);
3844 av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
3845 "of this file to ftp://upload.ffmpeg.org/incoming/ "
3846 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n");
3847
3848 va_end(argument_list);
3849}
3850FF_ENABLE_DEPRECATION_WARNINGS
3851#endif /* FF_API_MISSING_SAMPLE */
3852
3853static AVHWAccel *first_hwaccel = NULL;
3854static AVHWAccel **last_hwaccel = &first_hwaccel;
3855
3856void av_register_hwaccel(AVHWAccel *hwaccel)
3857{
3858 AVHWAccel **p = last_hwaccel;
3859 hwaccel->next = NULL;
3860 while(*p || avpriv_atomic_ptr_cas((void * volatile *)p, NULL, hwaccel))
3861 p = &(*p)->next;
3862 last_hwaccel = &hwaccel->next;
3863}
3864
3865AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel)
3866{
3867 return hwaccel ? hwaccel->next : first_hwaccel;
3868}
3869
3870int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op))
3871{
3872 if (lockmgr_cb) {
3873 // There is no good way to rollback a failure to destroy the
3874 // mutex, so we ignore failures.
3875 lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY);
3876 lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY);
3877 lockmgr_cb = NULL;
3878 codec_mutex = NULL;
3879 avformat_mutex = NULL;
3880 }
3881
3882 if (cb) {
3883 void *new_codec_mutex = NULL;
3884 void *new_avformat_mutex = NULL;
3885 int err;
3886 if (err = cb(&new_codec_mutex, AV_LOCK_CREATE)) {
3887 return err > 0 ? AVERROR_UNKNOWN : err;
3888 }
3889 if (err = cb(&new_avformat_mutex, AV_LOCK_CREATE)) {
3890 // Ignore failures to destroy the newly created mutex.
3891 cb(&new_codec_mutex, AV_LOCK_DESTROY);
3892 return err > 0 ? AVERROR_UNKNOWN : err;
3893 }
3894 lockmgr_cb = cb;
3895 codec_mutex = new_codec_mutex;
3896 avformat_mutex = new_avformat_mutex;
3897 }
3898
3899 return 0;
3900}
3901
3902int ff_lock_avcodec(AVCodecContext *log_ctx, const AVCodec *codec)
3903{
3904 if (codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE || !codec->init)
3905 return 0;
3906
3907 if (lockmgr_cb) {
3908 if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
3909 return -1;
3910 }
3911
3912 if (avpriv_atomic_int_add_and_fetch(&entangled_thread_counter, 1) != 1) {
3913 av_log(log_ctx, AV_LOG_ERROR,
3914 "Insufficient thread locking. At least %d threads are "
3915 "calling avcodec_open2() at the same time right now.\n",
3916 entangled_thread_counter);
3917 if (!lockmgr_cb)
3918 av_log(log_ctx, AV_LOG_ERROR, "No lock manager is set, please see av_lockmgr_register()\n");
3919 ff_avcodec_locked = 1;
3920 ff_unlock_avcodec(codec);
3921 return AVERROR(EINVAL);
3922 }
3923 av_assert0(!ff_avcodec_locked);
3924 ff_avcodec_locked = 1;
3925 return 0;
3926}
3927
3928int ff_unlock_avcodec(const AVCodec *codec)
3929{
3930 if (codec->caps_internal & FF_CODEC_CAP_INIT_THREADSAFE || !codec->init)
3931 return 0;
3932
3933 av_assert0(ff_avcodec_locked);
3934 ff_avcodec_locked = 0;
3935 avpriv_atomic_int_add_and_fetch(&entangled_thread_counter, -1);
3936 if (lockmgr_cb) {
3937 if ((*lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE))
3938 return -1;
3939 }
3940
3941 return 0;
3942}
3943
3944int avpriv_lock_avformat(void)
3945{
3946 if (lockmgr_cb) {
3947 if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN))
3948 return -1;
3949 }
3950 return 0;
3951}
3952
3953int avpriv_unlock_avformat(void)
3954{
3955 if (lockmgr_cb) {
3956 if ((*lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE))
3957 return -1;
3958 }
3959 return 0;
3960}
3961
3962unsigned int avpriv_toupper4(unsigned int x)
3963{
3964 return av_toupper(x & 0xFF) +
3965 (av_toupper((x >> 8) & 0xFF) << 8) +
3966 (av_toupper((x >> 16) & 0xFF) << 16) +
3967((unsigned)av_toupper((x >> 24) & 0xFF) << 24);
3968}
3969
3970int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src)
3971{
3972 int ret;
3973
3974 dst->owner[0] = src->owner[0];
3975 dst->owner[1] = src->owner[1];
3976
3977 ret = av_frame_ref(dst->f, src->f);
3978 if (ret < 0)
3979 return ret;
3980
3981 av_assert0(!dst->progress);
3982
3983 if (src->progress &&
3984 !(dst->progress = av_buffer_ref(src->progress))) {
3985 ff_thread_release_buffer(dst->owner[0], dst);
3986 return AVERROR(ENOMEM);
3987 }
3988
3989 return 0;
3990}
3991
3992#if !HAVE_THREADS
3993
3994enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
3995{
3996 return ff_get_format(avctx, fmt);
3997}
3998
3999int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
4000{
4001 f->owner[0] = f->owner[1] = avctx;
4002 return ff_get_buffer(avctx, f->f, flags);
4003}
4004
4005void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
4006{
4007 if (f->f)
4008 av_frame_unref(f->f);
4009}
4010
4011void ff_thread_finish_setup(AVCodecContext *avctx)
4012{
4013}
4014
4015void ff_thread_report_progress(ThreadFrame *f, int progress, int field)
4016{
4017}
4018
4019void ff_thread_await_progress(ThreadFrame *f, int progress, int field)
4020{
4021}
4022
4023int ff_thread_can_start_frame(AVCodecContext *avctx)
4024{
4025 return 1;
4026}
4027
4028int ff_alloc_entries(AVCodecContext *avctx, int count)
4029{
4030 return 0;
4031}
4032
4033void ff_reset_entries(AVCodecContext *avctx)
4034{
4035}
4036
4037void ff_thread_await_progress2(AVCodecContext *avctx, int field, int thread, int shift)
4038{
4039}
4040
4041void ff_thread_report_progress2(AVCodecContext *avctx, int field, int thread, int n)
4042{
4043}
4044
4045#endif
4046
4047int avcodec_is_open(AVCodecContext *s)
4048{
4049 return !!s->internal;
4050}
4051
4052int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf)
4053{
4054 int ret;
4055 char *str;
4056
4057 ret = av_bprint_finalize(buf, &str);
4058 if (ret < 0)
4059 return ret;
4060 if (!av_bprint_is_complete(buf)) {
4061 av_free(str);
4062 return AVERROR(ENOMEM);
4063 }
4064
4065 avctx->extradata = str;
4066 /* Note: the string is NUL terminated (so extradata can be read as a
4067 * string), but the ending character is not accounted in the size (in
4068 * binary formats you are likely not supposed to mux that character). When
4069 * extradata is copied, it is also padded with AV_INPUT_BUFFER_PADDING_SIZE
4070 * zeros. */
4071 avctx->extradata_size = buf->len;
4072 return 0;
4073}
4074
4075const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p,
4076 const uint8_t *end,
4077 uint32_t *av_restrict state)
4078{
4079 int i;
4080
4081 av_assert0(p <= end);
4082 if (p >= end)
4083 return end;
4084
4085 for (i = 0; i < 3; i++) {
4086 uint32_t tmp = *state << 8;
4087 *state = tmp + *(p++);
4088 if (tmp == 0x100 || p == end)
4089 return p;
4090 }
4091
4092 while (p < end) {
4093 if (p[-1] > 1 ) p += 3;
4094 else if (p[-2] ) p += 2;
4095 else if (p[-3]|(p[-1]-1)) p++;
4096 else {
4097 p++;
4098 break;
4099 }
4100 }
4101
4102 p = FFMIN(p, end) - 4;
4103 *state = AV_RB32(p);
4104
4105 return p + 4;
4106}
4107
4108AVCPBProperties *av_cpb_properties_alloc(size_t *size)
4109{
4110 AVCPBProperties *props = av_mallocz(sizeof(AVCPBProperties));
4111 if (!props)
4112 return NULL;
4113
4114 if (size)
4115 *size = sizeof(*props);
4116
4117 props->vbv_delay = UINT64_MAX;
4118
4119 return props;
4120}
4121
4122AVCPBProperties *ff_add_cpb_side_data(AVCodecContext *avctx)
4123{
4124 AVPacketSideData *tmp;
4125 AVCPBProperties *props;
4126 size_t size;
4127
4128 props = av_cpb_properties_alloc(&size);
4129 if (!props)
4130 return NULL;
4131
4132 tmp = av_realloc_array(avctx->coded_side_data, avctx->nb_coded_side_data + 1, sizeof(*tmp));
4133 if (!tmp) {
4134 av_freep(&props);
4135 return NULL;
4136 }
4137
4138 avctx->coded_side_data = tmp;
4139 avctx->nb_coded_side_data++;
4140
4141 avctx->coded_side_data[avctx->nb_coded_side_data - 1].type = AV_PKT_DATA_CPB_PROPERTIES;
4142 avctx->coded_side_data[avctx->nb_coded_side_data - 1].data = (uint8_t*)props;
4143 avctx->coded_side_data[avctx->nb_coded_side_data - 1].size = size;
4144
4145 return props;
4146}
4147
4148static void codec_parameters_reset(AVCodecParameters *par)
4149{
4150 av_freep(&par->extradata);
4151
4152 memset(par, 0, sizeof(*par));
4153
4154 par->codec_type = AVMEDIA_TYPE_UNKNOWN;
4155 par->codec_id = AV_CODEC_ID_NONE;
4156 par->format = -1;
4157 par->field_order = AV_FIELD_UNKNOWN;
4158 par->color_range = AVCOL_RANGE_UNSPECIFIED;
4159 par->color_primaries = AVCOL_PRI_UNSPECIFIED;
4160 par->color_trc = AVCOL_TRC_UNSPECIFIED;
4161 par->color_space = AVCOL_SPC_UNSPECIFIED;
4162 par->chroma_location = AVCHROMA_LOC_UNSPECIFIED;
4163 par->sample_aspect_ratio = (AVRational){ 0, 1 };
4164 par->profile = FF_PROFILE_UNKNOWN;
4165 par->level = FF_LEVEL_UNKNOWN;
4166}
4167
4168AVCodecParameters *avcodec_parameters_alloc(void)
4169{
4170 AVCodecParameters *par = av_mallocz(sizeof(*par));
4171
4172 if (!par)
4173 return NULL;
4174 codec_parameters_reset(par);
4175 return par;
4176}
4177
4178void avcodec_parameters_free(AVCodecParameters **ppar)
4179{
4180 AVCodecParameters *par = *ppar;
4181
4182 if (!par)
4183 return;
4184 codec_parameters_reset(par);
4185
4186 av_freep(ppar);
4187}
4188
4189int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
4190{
4191 codec_parameters_reset(dst);
4192 memcpy(dst, src, sizeof(*dst));
4193
4194 dst->extradata = NULL;
4195 dst->extradata_size = 0;
4196 if (src->extradata) {
4197 dst->extradata = av_mallocz(src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
4198 if (!dst->extradata)
4199 return AVERROR(ENOMEM);
4200 memcpy(dst->extradata, src->extradata, src->extradata_size);
4201 dst->extradata_size = src->extradata_size;
4202 }
4203
4204 return 0;
4205}
4206
4207int avcodec_parameters_from_context(AVCodecParameters *par,
4208 const AVCodecContext *codec)
4209{
4210 codec_parameters_reset(par);
4211
4212 par->codec_type = codec->codec_type;
4213 par->codec_id = codec->codec_id;
4214 par->codec_tag = codec->codec_tag;
4215
4216 par->bit_rate = codec->bit_rate;
4217 par->bits_per_coded_sample = codec->bits_per_coded_sample;
4218 par->bits_per_raw_sample = codec->bits_per_raw_sample;
4219 par->profile = codec->profile;
4220 par->level = codec->level;
4221
4222 switch (par->codec_type) {
4223 case AVMEDIA_TYPE_VIDEO:
4224 par->format = codec->pix_fmt;
4225 par->width = codec->width;
4226 par->height = codec->height;
4227 par->field_order = codec->field_order;
4228 par->color_range = codec->color_range;
4229 par->color_primaries = codec->color_primaries;
4230 par->color_trc = codec->color_trc;
4231 par->color_space = codec->colorspace;
4232 par->chroma_location = codec->chroma_sample_location;
4233 par->sample_aspect_ratio = codec->sample_aspect_ratio;
4234 par->video_delay = codec->has_b_frames;
4235 break;
4236 case AVMEDIA_TYPE_AUDIO:
4237 par->format = codec->sample_fmt;
4238 par->channel_layout = codec->channel_layout;
4239 par->channels = codec->channels;
4240 par->sample_rate = codec->sample_rate;
4241 par->block_align = codec->block_align;
4242 par->frame_size = codec->frame_size;
4243 par->initial_padding = codec->initial_padding;
4244 par->trailing_padding = codec->trailing_padding;
4245 par->seek_preroll = codec->seek_preroll;
4246 break;
4247 case AVMEDIA_TYPE_SUBTITLE:
4248 par->width = codec->width;
4249 par->height = codec->height;
4250 break;
4251 }
4252
4253 if (codec->extradata) {
4254 par->extradata = av_mallocz(codec->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
4255 if (!par->extradata)
4256 return AVERROR(ENOMEM);
4257 memcpy(par->extradata, codec->extradata, codec->extradata_size);
4258 par->extradata_size = codec->extradata_size;
4259 }
4260
4261 return 0;
4262}
4263
4264int avcodec_parameters_to_context(AVCodecContext *codec,
4265 const AVCodecParameters *par)
4266{
4267 codec->codec_type = par->codec_type;
4268 codec->codec_id = par->codec_id;
4269 codec->codec_tag = par->codec_tag;
4270
4271 codec->bit_rate = par->bit_rate;
4272 codec->bits_per_coded_sample = par->bits_per_coded_sample;
4273 codec->bits_per_raw_sample = par->bits_per_raw_sample;
4274 codec->profile = par->profile;
4275 codec->level = par->level;
4276
4277 switch (par->codec_type) {
4278 case AVMEDIA_TYPE_VIDEO:
4279 codec->pix_fmt = par->format;
4280 codec->width = par->width;
4281 codec->height = par->height;
4282 codec->field_order = par->field_order;
4283 codec->color_range = par->color_range;
4284 codec->color_primaries = par->color_primaries;
4285 codec->color_trc = par->color_trc;
4286 codec->colorspace = par->color_space;
4287 codec->chroma_sample_location = par->chroma_location;
4288 codec->sample_aspect_ratio = par->sample_aspect_ratio;
4289 codec->has_b_frames = par->video_delay;
4290 break;
4291 case AVMEDIA_TYPE_AUDIO:
4292 codec->sample_fmt = par->format;
4293 codec->channel_layout = par->channel_layout;
4294 codec->channels = par->channels;
4295 codec->sample_rate = par->sample_rate;
4296 codec->block_align = par->block_align;
4297 codec->frame_size = par->frame_size;
4298 codec->delay =
4299 codec->initial_padding = par->initial_padding;
4300 codec->trailing_padding = par->trailing_padding;
4301 codec->seek_preroll = par->seek_preroll;
4302 break;
4303 case AVMEDIA_TYPE_SUBTITLE:
4304 codec->width = par->width;
4305 codec->height = par->height;
4306 break;
4307 }
4308
4309 if (par->extradata) {
4310 av_freep(&codec->extradata);
4311 codec->extradata = av_mallocz(par->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
4312 if (!codec->extradata)
4313 return AVERROR(ENOMEM);
4314 memcpy(codec->extradata, par->extradata, par->extradata_size);
4315 codec->extradata_size = par->extradata_size;
4316 }
4317
4318 return 0;
4319}
4320
4321int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len,
4322 void **data, size_t *sei_size)
4323{
4324 AVFrameSideData *side_data = NULL;
4325 uint8_t *sei_data;
4326
4327 if (frame)
4328 side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
4329
4330 if (!side_data) {
4331 *data = NULL;
4332 return 0;
4333 }
4334
4335 *sei_size = side_data->size + 11;
4336 *data = av_mallocz(*sei_size + prefix_len);
4337 if (!*data)
4338 return AVERROR(ENOMEM);
4339 sei_data = (uint8_t*)*data + prefix_len;
4340
4341 // country code
4342 sei_data[0] = 181;
4343 sei_data[1] = 0;
4344 sei_data[2] = 49;
4345
4346 /**
4347 * 'GA94' is standard in North America for ATSC, but hard coding
4348 * this style may not be the right thing to do -- other formats
4349 * do exist. This information is not available in the side_data
4350 * so we are going with this right now.
4351 */
4352 AV_WL32(sei_data + 3, MKTAG('G', 'A', '9', '4'));
4353 sei_data[7] = 3;
4354 sei_data[8] = ((side_data->size/3) & 0x1f) | 0x40;
4355 sei_data[9] = 0;
4356
4357 memcpy(sei_data + 10, side_data->data, side_data->size);
4358
4359 sei_data[side_data->size+10] = 255;
4360
4361 return 0;
4362}
4363
4364int64_t ff_guess_coded_bitrate(AVCodecContext *avctx)
4365{
4366 AVRational framerate = avctx->framerate;
4367 int bits_per_coded_sample = avctx->bits_per_coded_sample;
4368 int64_t bitrate;
4369
4370 if (!(framerate.num && framerate.den))
4371 framerate = av_inv_q(avctx->time_base);
4372 if (!(framerate.num && framerate.den))
4373 return 0;
4374
4375 if (!bits_per_coded_sample) {
4376 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
4377 bits_per_coded_sample = av_get_bits_per_pixel(desc);
4378 }
4379 bitrate = (int64_t)bits_per_coded_sample * avctx->width * avctx->height *
4380 framerate.num / framerate.den;
4381
4382 return bitrate;
4383}
4384