blob: 67adad53ed8bb93d1cda3e7289aca98a8adfd1c6
1 | /* |
2 | * Videotoolbox hardware acceleration |
3 | * |
4 | * copyright (c) 2012 Sebastien Zwickert |
5 | * |
6 | * This file is part of FFmpeg. |
7 | * |
8 | * FFmpeg is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU Lesser General Public |
10 | * License as published by the Free Software Foundation; either |
11 | * version 2.1 of the License, or (at your option) any later version. |
12 | * |
13 | * FFmpeg is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * Lesser General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU Lesser General Public |
19 | * License along with FFmpeg; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 | */ |
22 | |
23 | #include "config.h" |
24 | #if CONFIG_VIDEOTOOLBOX |
25 | # include "videotoolbox.h" |
26 | #else |
27 | # include "vda.h" |
28 | #endif |
29 | #include "vda_vt_internal.h" |
30 | #include "libavutil/avutil.h" |
31 | #include "bytestream.h" |
32 | #include "h264dec.h" |
33 | #include "mpegvideo.h" |
34 | #include <TargetConditionals.h> |
35 | |
36 | #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder |
37 | # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder") |
38 | #endif |
39 | |
40 | #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12 |
41 | |
42 | static void videotoolbox_buffer_release(void *opaque, uint8_t *data) |
43 | { |
44 | CVPixelBufferRef cv_buffer = (CVImageBufferRef)data; |
45 | CVPixelBufferRelease(cv_buffer); |
46 | } |
47 | |
48 | static int videotoolbox_buffer_copy(VTContext *vtctx, |
49 | const uint8_t *buffer, |
50 | uint32_t size) |
51 | { |
52 | void *tmp; |
53 | |
54 | tmp = av_fast_realloc(vtctx->bitstream, |
55 | &vtctx->allocated_size, |
56 | size); |
57 | |
58 | if (!tmp) |
59 | return AVERROR(ENOMEM); |
60 | |
61 | vtctx->bitstream = tmp; |
62 | memcpy(vtctx->bitstream, buffer, size); |
63 | vtctx->bitstream_size = size; |
64 | |
65 | return 0; |
66 | } |
67 | |
68 | int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame) |
69 | { |
70 | frame->width = avctx->width; |
71 | frame->height = avctx->height; |
72 | frame->format = avctx->pix_fmt; |
73 | frame->buf[0] = av_buffer_alloc(1); |
74 | |
75 | if (!frame->buf[0]) |
76 | return AVERROR(ENOMEM); |
77 | |
78 | return 0; |
79 | } |
80 | |
81 | #define AV_W8(p, v) *(p) = (v) |
82 | |
83 | CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx) |
84 | { |
85 | H264Context *h = avctx->priv_data; |
86 | CFDataRef data = NULL; |
87 | uint8_t *p; |
88 | int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size; |
89 | uint8_t *vt_extradata = av_malloc(vt_extradata_size); |
90 | if (!vt_extradata) |
91 | return NULL; |
92 | |
93 | p = vt_extradata; |
94 | |
95 | AV_W8(p + 0, 1); /* version */ |
96 | AV_W8(p + 1, h->ps.sps->data[1]); /* profile */ |
97 | AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */ |
98 | AV_W8(p + 3, h->ps.sps->data[3]); /* level */ |
99 | AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */ |
100 | AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */ |
101 | AV_WB16(p + 6, h->ps.sps->data_size); |
102 | memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size); |
103 | p += 8 + h->ps.sps->data_size; |
104 | AV_W8(p + 0, 1); /* number of pps */ |
105 | AV_WB16(p + 1, h->ps.pps->data_size); |
106 | memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size); |
107 | |
108 | p += 3 + h->ps.pps->data_size; |
109 | av_assert0(p - vt_extradata == vt_extradata_size); |
110 | |
111 | data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size); |
112 | av_free(vt_extradata); |
113 | return data; |
114 | } |
115 | |
116 | int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame) |
117 | { |
118 | av_buffer_unref(&frame->buf[0]); |
119 | |
120 | frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame, |
121 | sizeof(vtctx->frame), |
122 | videotoolbox_buffer_release, |
123 | NULL, |
124 | AV_BUFFER_FLAG_READONLY); |
125 | if (!frame->buf[0]) { |
126 | return AVERROR(ENOMEM); |
127 | } |
128 | |
129 | frame->data[3] = (uint8_t*)vtctx->frame; |
130 | vtctx->frame = NULL; |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, |
136 | const uint8_t *buffer, |
137 | uint32_t size) |
138 | { |
139 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
140 | H264Context *h = avctx->priv_data; |
141 | |
142 | vtctx->bitstream_size = 0; |
143 | |
144 | if (h->is_avc == 1) { |
145 | return videotoolbox_buffer_copy(vtctx, buffer, size); |
146 | } |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, |
152 | const uint8_t *buffer, |
153 | uint32_t size) |
154 | { |
155 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
156 | H264Context *h = avctx->priv_data; |
157 | void *tmp; |
158 | |
159 | if (h->is_avc == 1) |
160 | return 0; |
161 | |
162 | tmp = av_fast_realloc(vtctx->bitstream, |
163 | &vtctx->allocated_size, |
164 | vtctx->bitstream_size+size+4); |
165 | if (!tmp) |
166 | return AVERROR(ENOMEM); |
167 | |
168 | vtctx->bitstream = tmp; |
169 | |
170 | AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size); |
171 | memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size); |
172 | |
173 | vtctx->bitstream_size += size + 4; |
174 | |
175 | return 0; |
176 | } |
177 | |
178 | int ff_videotoolbox_uninit(AVCodecContext *avctx) |
179 | { |
180 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
181 | if (vtctx) { |
182 | av_freep(&vtctx->bitstream); |
183 | if (vtctx->frame) |
184 | CVPixelBufferRelease(vtctx->frame); |
185 | } |
186 | |
187 | return 0; |
188 | } |
189 | |
190 | #if CONFIG_VIDEOTOOLBOX |
191 | static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length) |
192 | { |
193 | int i; |
194 | uint8_t b; |
195 | |
196 | for (i = 3; i >= 0; i--) { |
197 | b = (length >> (i * 7)) & 0x7F; |
198 | if (i != 0) |
199 | b |= 0x80; |
200 | |
201 | bytestream2_put_byteu(pb, b); |
202 | } |
203 | } |
204 | |
205 | static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx) |
206 | { |
207 | CFDataRef data; |
208 | uint8_t *rw_extradata; |
209 | PutByteContext pb; |
210 | int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3; |
211 | // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor |
212 | int config_size = 13 + 5 + avctx->extradata_size; |
213 | int s; |
214 | |
215 | if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING))) |
216 | return NULL; |
217 | |
218 | bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING); |
219 | bytestream2_put_byteu(&pb, 0); // version |
220 | bytestream2_put_ne24(&pb, 0); // flags |
221 | |
222 | // elementary stream descriptor |
223 | bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag |
224 | videotoolbox_write_mp4_descr_length(&pb, full_size); |
225 | bytestream2_put_ne16(&pb, 0); // esid |
226 | bytestream2_put_byteu(&pb, 0); // stream priority (0-32) |
227 | |
228 | // decoder configuration descriptor |
229 | bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag |
230 | videotoolbox_write_mp4_descr_length(&pb, config_size); |
231 | bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4 |
232 | bytestream2_put_byteu(&pb, 0x11); // stream type |
233 | bytestream2_put_ne24(&pb, 0); // buffer size |
234 | bytestream2_put_ne32(&pb, 0); // max bitrate |
235 | bytestream2_put_ne32(&pb, 0); // avg bitrate |
236 | |
237 | // decoder specific descriptor |
238 | bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag |
239 | videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size); |
240 | |
241 | bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size); |
242 | |
243 | // SLConfigDescriptor |
244 | bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag |
245 | bytestream2_put_byteu(&pb, 0x01); // length |
246 | bytestream2_put_byteu(&pb, 0x02); // |
247 | |
248 | s = bytestream2_size_p(&pb); |
249 | |
250 | data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s); |
251 | |
252 | av_freep(&rw_extradata); |
253 | return data; |
254 | } |
255 | |
256 | static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc, |
257 | void *buffer, |
258 | int size) |
259 | { |
260 | OSStatus status; |
261 | CMBlockBufferRef block_buf; |
262 | CMSampleBufferRef sample_buf; |
263 | |
264 | block_buf = NULL; |
265 | sample_buf = NULL; |
266 | |
267 | status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator |
268 | buffer, // memoryBlock |
269 | size, // blockLength |
270 | kCFAllocatorNull, // blockAllocator |
271 | NULL, // customBlockSource |
272 | 0, // offsetToData |
273 | size, // dataLength |
274 | 0, // flags |
275 | &block_buf); |
276 | |
277 | if (!status) { |
278 | status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator |
279 | block_buf, // dataBuffer |
280 | TRUE, // dataReady |
281 | 0, // makeDataReadyCallback |
282 | 0, // makeDataReadyRefcon |
283 | fmt_desc, // formatDescription |
284 | 1, // numSamples |
285 | 0, // numSampleTimingEntries |
286 | NULL, // sampleTimingArray |
287 | 0, // numSampleSizeEntries |
288 | NULL, // sampleSizeArray |
289 | &sample_buf); |
290 | } |
291 | |
292 | if (block_buf) |
293 | CFRelease(block_buf); |
294 | |
295 | return sample_buf; |
296 | } |
297 | |
298 | static void videotoolbox_decoder_callback(void *opaque, |
299 | void *sourceFrameRefCon, |
300 | OSStatus status, |
301 | VTDecodeInfoFlags flags, |
302 | CVImageBufferRef image_buffer, |
303 | CMTime pts, |
304 | CMTime duration) |
305 | { |
306 | AVCodecContext *avctx = opaque; |
307 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
308 | |
309 | if (vtctx->frame) { |
310 | CVPixelBufferRelease(vtctx->frame); |
311 | vtctx->frame = NULL; |
312 | } |
313 | |
314 | if (!image_buffer) { |
315 | av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n"); |
316 | return; |
317 | } |
318 | |
319 | vtctx->frame = CVPixelBufferRetain(image_buffer); |
320 | } |
321 | |
322 | static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx) |
323 | { |
324 | OSStatus status; |
325 | CMSampleBufferRef sample_buf; |
326 | AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context; |
327 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
328 | |
329 | sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc, |
330 | vtctx->bitstream, |
331 | vtctx->bitstream_size); |
332 | |
333 | if (!sample_buf) |
334 | return -1; |
335 | |
336 | status = VTDecompressionSessionDecodeFrame(videotoolbox->session, |
337 | sample_buf, |
338 | 0, // decodeFlags |
339 | NULL, // sourceFrameRefCon |
340 | 0); // infoFlagsOut |
341 | if (status == noErr) |
342 | status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session); |
343 | |
344 | CFRelease(sample_buf); |
345 | |
346 | return status; |
347 | } |
348 | |
349 | static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame) |
350 | { |
351 | int status; |
352 | AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context; |
353 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
354 | |
355 | if (!videotoolbox->session || !vtctx->bitstream) |
356 | return AVERROR_INVALIDDATA; |
357 | |
358 | status = videotoolbox_session_decode_frame(avctx); |
359 | |
360 | if (status) { |
361 | av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status); |
362 | return AVERROR_UNKNOWN; |
363 | } |
364 | |
365 | if (!vtctx->frame) |
366 | return AVERROR_UNKNOWN; |
367 | |
368 | return ff_videotoolbox_buffer_create(vtctx, frame); |
369 | } |
370 | |
371 | static int videotoolbox_h264_end_frame(AVCodecContext *avctx) |
372 | { |
373 | H264Context *h = avctx->priv_data; |
374 | AVFrame *frame = h->cur_pic_ptr->f; |
375 | |
376 | return videotoolbox_common_end_frame(avctx, frame); |
377 | } |
378 | |
379 | static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx, |
380 | const uint8_t *buffer, |
381 | uint32_t size) |
382 | { |
383 | VTContext *vtctx = avctx->internal->hwaccel_priv_data; |
384 | |
385 | return videotoolbox_buffer_copy(vtctx, buffer, size); |
386 | } |
387 | |
388 | static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx, |
389 | const uint8_t *buffer, |
390 | uint32_t size) |
391 | { |
392 | return 0; |
393 | } |
394 | |
395 | static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx) |
396 | { |
397 | MpegEncContext *s = avctx->priv_data; |
398 | AVFrame *frame = s->current_picture_ptr->f; |
399 | |
400 | return videotoolbox_common_end_frame(avctx, frame); |
401 | } |
402 | |
403 | static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type, |
404 | AVCodecContext *avctx) |
405 | { |
406 | CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault, |
407 | 0, |
408 | &kCFTypeDictionaryKeyCallBacks, |
409 | &kCFTypeDictionaryValueCallBacks); |
410 | |
411 | CFDictionarySetValue(config_info, |
412 | kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder, |
413 | kCFBooleanTrue); |
414 | |
415 | if (avctx->extradata_size) { |
416 | CFMutableDictionaryRef avc_info; |
417 | CFDataRef data = NULL; |
418 | |
419 | avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault, |
420 | 1, |
421 | &kCFTypeDictionaryKeyCallBacks, |
422 | &kCFTypeDictionaryValueCallBacks); |
423 | |
424 | switch (codec_type) { |
425 | case kCMVideoCodecType_MPEG4Video : |
426 | data = videotoolbox_esds_extradata_create(avctx); |
427 | if (data) |
428 | CFDictionarySetValue(avc_info, CFSTR("esds"), data); |
429 | break; |
430 | case kCMVideoCodecType_H264 : |
431 | data = ff_videotoolbox_avcc_extradata_create(avctx); |
432 | if (data) |
433 | CFDictionarySetValue(avc_info, CFSTR("avcC"), data); |
434 | break; |
435 | default: |
436 | break; |
437 | } |
438 | |
439 | CFDictionarySetValue(config_info, |
440 | kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms, |
441 | avc_info); |
442 | |
443 | if (data) |
444 | CFRelease(data); |
445 | |
446 | CFRelease(avc_info); |
447 | } |
448 | return config_info; |
449 | } |
450 | |
451 | static CFDictionaryRef videotoolbox_buffer_attributes_create(int width, |
452 | int height, |
453 | OSType pix_fmt) |
454 | { |
455 | CFMutableDictionaryRef buffer_attributes; |
456 | CFMutableDictionaryRef io_surface_properties; |
457 | CFNumberRef cv_pix_fmt; |
458 | CFNumberRef w; |
459 | CFNumberRef h; |
460 | |
461 | w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width); |
462 | h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height); |
463 | cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt); |
464 | |
465 | buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault, |
466 | 4, |
467 | &kCFTypeDictionaryKeyCallBacks, |
468 | &kCFTypeDictionaryValueCallBacks); |
469 | io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault, |
470 | 0, |
471 | &kCFTypeDictionaryKeyCallBacks, |
472 | &kCFTypeDictionaryValueCallBacks); |
473 | |
474 | if (pix_fmt) |
475 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt); |
476 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties); |
477 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w); |
478 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h); |
479 | #if TARGET_OS_IPHONE |
480 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue); |
481 | #else |
482 | CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue); |
483 | #endif |
484 | |
485 | CFRelease(io_surface_properties); |
486 | CFRelease(cv_pix_fmt); |
487 | CFRelease(w); |
488 | CFRelease(h); |
489 | |
490 | return buffer_attributes; |
491 | } |
492 | |
493 | static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type, |
494 | CFDictionaryRef decoder_spec, |
495 | int width, |
496 | int height) |
497 | { |
498 | CMFormatDescriptionRef cm_fmt_desc; |
499 | OSStatus status; |
500 | |
501 | status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault, |
502 | codec_type, |
503 | width, |
504 | height, |
505 | decoder_spec, // Dictionary of extension |
506 | &cm_fmt_desc); |
507 | |
508 | if (status) |
509 | return NULL; |
510 | |
511 | return cm_fmt_desc; |
512 | } |
513 | |
514 | static int videotoolbox_default_init(AVCodecContext *avctx) |
515 | { |
516 | AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context; |
517 | OSStatus status; |
518 | VTDecompressionOutputCallbackRecord decoder_cb; |
519 | CFDictionaryRef decoder_spec; |
520 | CFDictionaryRef buf_attr; |
521 | |
522 | if (!videotoolbox) { |
523 | av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n"); |
524 | return -1; |
525 | } |
526 | |
527 | switch( avctx->codec_id ) { |
528 | case AV_CODEC_ID_H263 : |
529 | videotoolbox->cm_codec_type = kCMVideoCodecType_H263; |
530 | break; |
531 | case AV_CODEC_ID_H264 : |
532 | videotoolbox->cm_codec_type = kCMVideoCodecType_H264; |
533 | break; |
534 | case AV_CODEC_ID_MPEG1VIDEO : |
535 | videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video; |
536 | break; |
537 | case AV_CODEC_ID_MPEG2VIDEO : |
538 | videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video; |
539 | break; |
540 | case AV_CODEC_ID_MPEG4 : |
541 | videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video; |
542 | break; |
543 | default : |
544 | break; |
545 | } |
546 | |
547 | decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx); |
548 | |
549 | videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type, |
550 | decoder_spec, |
551 | avctx->width, |
552 | avctx->height); |
553 | if (!videotoolbox->cm_fmt_desc) { |
554 | if (decoder_spec) |
555 | CFRelease(decoder_spec); |
556 | |
557 | av_log(avctx, AV_LOG_ERROR, "format description creation failed\n"); |
558 | return -1; |
559 | } |
560 | |
561 | buf_attr = videotoolbox_buffer_attributes_create(avctx->width, |
562 | avctx->height, |
563 | videotoolbox->cv_pix_fmt_type); |
564 | |
565 | decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback; |
566 | decoder_cb.decompressionOutputRefCon = avctx; |
567 | |
568 | status = VTDecompressionSessionCreate(NULL, // allocator |
569 | videotoolbox->cm_fmt_desc, // videoFormatDescription |
570 | decoder_spec, // videoDecoderSpecification |
571 | buf_attr, // destinationImageBufferAttributes |
572 | &decoder_cb, // outputCallback |
573 | &videotoolbox->session); // decompressionSessionOut |
574 | |
575 | if (decoder_spec) |
576 | CFRelease(decoder_spec); |
577 | if (buf_attr) |
578 | CFRelease(buf_attr); |
579 | |
580 | switch (status) { |
581 | case kVTVideoDecoderNotAvailableNowErr: |
582 | case kVTVideoDecoderUnsupportedDataFormatErr: |
583 | return AVERROR(ENOSYS); |
584 | case kVTVideoDecoderMalfunctionErr: |
585 | return AVERROR(EINVAL); |
586 | case kVTVideoDecoderBadDataErr : |
587 | return AVERROR_INVALIDDATA; |
588 | case 0: |
589 | return 0; |
590 | default: |
591 | return AVERROR_UNKNOWN; |
592 | } |
593 | } |
594 | |
595 | static void videotoolbox_default_free(AVCodecContext *avctx) |
596 | { |
597 | AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context; |
598 | |
599 | if (videotoolbox) { |
600 | if (videotoolbox->cm_fmt_desc) |
601 | CFRelease(videotoolbox->cm_fmt_desc); |
602 | |
603 | if (videotoolbox->session) { |
604 | VTDecompressionSessionInvalidate(videotoolbox->session); |
605 | CFRelease(videotoolbox->session); |
606 | } |
607 | } |
608 | } |
609 | |
610 | AVHWAccel ff_h263_videotoolbox_hwaccel = { |
611 | .name = "h263_videotoolbox", |
612 | .type = AVMEDIA_TYPE_VIDEO, |
613 | .id = AV_CODEC_ID_H263, |
614 | .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX, |
615 | .alloc_frame = ff_videotoolbox_alloc_frame, |
616 | .start_frame = videotoolbox_mpeg_start_frame, |
617 | .decode_slice = videotoolbox_mpeg_decode_slice, |
618 | .end_frame = videotoolbox_mpeg_end_frame, |
619 | .uninit = ff_videotoolbox_uninit, |
620 | .priv_data_size = sizeof(VTContext), |
621 | }; |
622 | |
623 | AVHWAccel ff_h264_videotoolbox_hwaccel = { |
624 | .name = "h264_videotoolbox", |
625 | .type = AVMEDIA_TYPE_VIDEO, |
626 | .id = AV_CODEC_ID_H264, |
627 | .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX, |
628 | .alloc_frame = ff_videotoolbox_alloc_frame, |
629 | .start_frame = ff_videotoolbox_h264_start_frame, |
630 | .decode_slice = ff_videotoolbox_h264_decode_slice, |
631 | .end_frame = videotoolbox_h264_end_frame, |
632 | .uninit = ff_videotoolbox_uninit, |
633 | .priv_data_size = sizeof(VTContext), |
634 | }; |
635 | |
636 | AVHWAccel ff_mpeg1_videotoolbox_hwaccel = { |
637 | .name = "mpeg1_videotoolbox", |
638 | .type = AVMEDIA_TYPE_VIDEO, |
639 | .id = AV_CODEC_ID_MPEG1VIDEO, |
640 | .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX, |
641 | .alloc_frame = ff_videotoolbox_alloc_frame, |
642 | .start_frame = videotoolbox_mpeg_start_frame, |
643 | .decode_slice = videotoolbox_mpeg_decode_slice, |
644 | .end_frame = videotoolbox_mpeg_end_frame, |
645 | .uninit = ff_videotoolbox_uninit, |
646 | .priv_data_size = sizeof(VTContext), |
647 | }; |
648 | |
649 | AVHWAccel ff_mpeg2_videotoolbox_hwaccel = { |
650 | .name = "mpeg2_videotoolbox", |
651 | .type = AVMEDIA_TYPE_VIDEO, |
652 | .id = AV_CODEC_ID_MPEG2VIDEO, |
653 | .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX, |
654 | .alloc_frame = ff_videotoolbox_alloc_frame, |
655 | .start_frame = videotoolbox_mpeg_start_frame, |
656 | .decode_slice = videotoolbox_mpeg_decode_slice, |
657 | .end_frame = videotoolbox_mpeg_end_frame, |
658 | .uninit = ff_videotoolbox_uninit, |
659 | .priv_data_size = sizeof(VTContext), |
660 | }; |
661 | |
662 | AVHWAccel ff_mpeg4_videotoolbox_hwaccel = { |
663 | .name = "mpeg4_videotoolbox", |
664 | .type = AVMEDIA_TYPE_VIDEO, |
665 | .id = AV_CODEC_ID_MPEG4, |
666 | .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX, |
667 | .alloc_frame = ff_videotoolbox_alloc_frame, |
668 | .start_frame = videotoolbox_mpeg_start_frame, |
669 | .decode_slice = videotoolbox_mpeg_decode_slice, |
670 | .end_frame = videotoolbox_mpeg_end_frame, |
671 | .uninit = ff_videotoolbox_uninit, |
672 | .priv_data_size = sizeof(VTContext), |
673 | }; |
674 | |
675 | AVVideotoolboxContext *av_videotoolbox_alloc_context(void) |
676 | { |
677 | AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret)); |
678 | |
679 | if (ret) { |
680 | ret->output_callback = videotoolbox_decoder_callback; |
681 | ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange; |
682 | } |
683 | |
684 | return ret; |
685 | } |
686 | |
687 | int av_videotoolbox_default_init(AVCodecContext *avctx) |
688 | { |
689 | return av_videotoolbox_default_init2(avctx, NULL); |
690 | } |
691 | |
692 | int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx) |
693 | { |
694 | avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context(); |
695 | if (!avctx->hwaccel_context) |
696 | return AVERROR(ENOMEM); |
697 | return videotoolbox_default_init(avctx); |
698 | } |
699 | |
700 | void av_videotoolbox_default_free(AVCodecContext *avctx) |
701 | { |
702 | |
703 | videotoolbox_default_free(avctx); |
704 | av_freep(&avctx->hwaccel_context); |
705 | } |
706 | #endif /* CONFIG_VIDEOTOOLBOX */ |
707 |