blob: b63fb94fc110a1c54211b332e7190cd9274dad54
1 | /* |
2 | * This file is part of FFmpeg. |
3 | * |
4 | * FFmpeg is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU Lesser General Public |
6 | * License as published by the Free Software Foundation; either |
7 | * version 2.1 of the License, or (at your option) any later version. |
8 | * |
9 | * FFmpeg is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * Lesser General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU Lesser General Public |
15 | * License along with FFmpeg; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | |
19 | #include "libavutil/avassert.h" |
20 | #include "libavutil/common.h" |
21 | #include "libavutil/pixdesc.h" |
22 | |
23 | #include "avcodec.h" |
24 | #include "internal.h" |
25 | #include "vaapi_decode.h" |
26 | |
27 | |
28 | int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx, |
29 | VAAPIDecodePicture *pic, |
30 | int type, |
31 | const void *data, |
32 | size_t size) |
33 | { |
34 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
35 | VAStatus vas; |
36 | VABufferID buffer; |
37 | |
38 | av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS); |
39 | |
40 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
41 | type, size, 1, (void*)data, &buffer); |
42 | if (vas != VA_STATUS_SUCCESS) { |
43 | av_log(avctx, AV_LOG_ERROR, "Failed to create parameter " |
44 | "buffer (type %d): %d (%s).\n", |
45 | type, vas, vaErrorStr(vas)); |
46 | return AVERROR(EIO); |
47 | } |
48 | |
49 | pic->param_buffers[pic->nb_param_buffers++] = buffer; |
50 | |
51 | av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes) " |
52 | "is %#x.\n", type, size, buffer); |
53 | return 0; |
54 | } |
55 | |
56 | |
57 | int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx, |
58 | VAAPIDecodePicture *pic, |
59 | const void *params_data, |
60 | size_t params_size, |
61 | const void *slice_data, |
62 | size_t slice_size) |
63 | { |
64 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
65 | VAStatus vas; |
66 | int index; |
67 | |
68 | av_assert0(pic->nb_slices <= pic->slices_allocated); |
69 | if (pic->nb_slices == pic->slices_allocated) { |
70 | if (pic->slices_allocated > 0) |
71 | pic->slices_allocated *= 2; |
72 | else |
73 | pic->slices_allocated = 64; |
74 | |
75 | pic->slice_buffers = |
76 | av_realloc_array(pic->slice_buffers, |
77 | pic->slices_allocated, |
78 | 2 * sizeof(*pic->slice_buffers)); |
79 | if (!pic->slice_buffers) |
80 | return AVERROR(ENOMEM); |
81 | } |
82 | av_assert0(pic->nb_slices + 1 <= pic->slices_allocated); |
83 | |
84 | index = 2 * pic->nb_slices; |
85 | |
86 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
87 | VASliceParameterBufferType, |
88 | params_size, 1, (void*)params_data, |
89 | &pic->slice_buffers[index]); |
90 | if (vas != VA_STATUS_SUCCESS) { |
91 | av_log(avctx, AV_LOG_ERROR, "Failed to create slice " |
92 | "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas)); |
93 | return AVERROR(EIO); |
94 | } |
95 | |
96 | av_log(avctx, AV_LOG_DEBUG, "Slice %d param buffer (%zu bytes) " |
97 | "is %#x.\n", pic->nb_slices, params_size, |
98 | pic->slice_buffers[index]); |
99 | |
100 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
101 | VASliceDataBufferType, |
102 | slice_size, 1, (void*)slice_data, |
103 | &pic->slice_buffers[index + 1]); |
104 | if (vas != VA_STATUS_SUCCESS) { |
105 | av_log(avctx, AV_LOG_ERROR, "Failed to create slice " |
106 | "data buffer (size %zu): %d (%s).\n", |
107 | slice_size, vas, vaErrorStr(vas)); |
108 | vaDestroyBuffer(ctx->hwctx->display, |
109 | pic->slice_buffers[index]); |
110 | return AVERROR(EIO); |
111 | } |
112 | |
113 | av_log(avctx, AV_LOG_DEBUG, "Slice %d data buffer (%zu bytes) " |
114 | "is %#x.\n", pic->nb_slices, slice_size, |
115 | pic->slice_buffers[index + 1]); |
116 | |
117 | ++pic->nb_slices; |
118 | return 0; |
119 | } |
120 | |
121 | static void ff_vaapi_decode_destroy_buffers(AVCodecContext *avctx, |
122 | VAAPIDecodePicture *pic) |
123 | { |
124 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
125 | VAStatus vas; |
126 | int i; |
127 | |
128 | for (i = 0; i < pic->nb_param_buffers; i++) { |
129 | vas = vaDestroyBuffer(ctx->hwctx->display, |
130 | pic->param_buffers[i]); |
131 | if (vas != VA_STATUS_SUCCESS) { |
132 | av_log(avctx, AV_LOG_ERROR, "Failed to destroy " |
133 | "parameter buffer %#x: %d (%s).\n", |
134 | pic->param_buffers[i], vas, vaErrorStr(vas)); |
135 | } |
136 | } |
137 | |
138 | for (i = 0; i < 2 * pic->nb_slices; i++) { |
139 | vas = vaDestroyBuffer(ctx->hwctx->display, |
140 | pic->slice_buffers[i]); |
141 | if (vas != VA_STATUS_SUCCESS) { |
142 | av_log(avctx, AV_LOG_ERROR, "Failed to destroy slice " |
143 | "slice buffer %#x: %d (%s).\n", |
144 | pic->slice_buffers[i], vas, vaErrorStr(vas)); |
145 | } |
146 | } |
147 | } |
148 | |
149 | int ff_vaapi_decode_issue(AVCodecContext *avctx, |
150 | VAAPIDecodePicture *pic) |
151 | { |
152 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
153 | VAStatus vas; |
154 | int err; |
155 | |
156 | av_log(avctx, AV_LOG_DEBUG, "Decode to surface %#x.\n", |
157 | pic->output_surface); |
158 | |
159 | vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context, |
160 | pic->output_surface); |
161 | if (vas != VA_STATUS_SUCCESS) { |
162 | av_log(avctx, AV_LOG_ERROR, "Failed to begin picture decode " |
163 | "issue: %d (%s).\n", vas, vaErrorStr(vas)); |
164 | err = AVERROR(EIO); |
165 | goto fail_with_picture; |
166 | } |
167 | |
168 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, |
169 | pic->param_buffers, pic->nb_param_buffers); |
170 | if (vas != VA_STATUS_SUCCESS) { |
171 | av_log(avctx, AV_LOG_ERROR, "Failed to upload decode " |
172 | "parameters: %d (%s).\n", vas, vaErrorStr(vas)); |
173 | err = AVERROR(EIO); |
174 | goto fail_with_picture; |
175 | } |
176 | |
177 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, |
178 | pic->slice_buffers, 2 * pic->nb_slices); |
179 | if (vas != VA_STATUS_SUCCESS) { |
180 | av_log(avctx, AV_LOG_ERROR, "Failed to upload slices: " |
181 | "%d (%s).\n", vas, vaErrorStr(vas)); |
182 | err = AVERROR(EIO); |
183 | goto fail_with_picture; |
184 | } |
185 | |
186 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); |
187 | if (vas != VA_STATUS_SUCCESS) { |
188 | av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode " |
189 | "issue: %d (%s).\n", vas, vaErrorStr(vas)); |
190 | err = AVERROR(EIO); |
191 | if (ctx->hwctx->driver_quirks & |
192 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) |
193 | goto fail; |
194 | else |
195 | goto fail_at_end; |
196 | } |
197 | |
198 | if (ctx->hwctx->driver_quirks & |
199 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) |
200 | ff_vaapi_decode_destroy_buffers(avctx, pic); |
201 | |
202 | pic->nb_param_buffers = 0; |
203 | pic->nb_slices = 0; |
204 | pic->slices_allocated = 0; |
205 | av_freep(&pic->slice_buffers); |
206 | |
207 | return 0; |
208 | |
209 | fail_with_picture: |
210 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); |
211 | if (vas != VA_STATUS_SUCCESS) { |
212 | av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode " |
213 | "after error: %d (%s).\n", vas, vaErrorStr(vas)); |
214 | } |
215 | fail: |
216 | ff_vaapi_decode_destroy_buffers(avctx, pic); |
217 | fail_at_end: |
218 | return err; |
219 | } |
220 | |
221 | int ff_vaapi_decode_cancel(AVCodecContext *avctx, |
222 | VAAPIDecodePicture *pic) |
223 | { |
224 | ff_vaapi_decode_destroy_buffers(avctx, pic); |
225 | |
226 | pic->nb_param_buffers = 0; |
227 | pic->nb_slices = 0; |
228 | pic->slices_allocated = 0; |
229 | av_freep(&pic->slice_buffers); |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | static const struct { |
235 | enum AVCodecID codec_id; |
236 | int codec_profile; |
237 | VAProfile va_profile; |
238 | } vaapi_profile_map[] = { |
239 | #define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, VAProfile ## v } |
240 | MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2Simple ), |
241 | MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2Main ), |
242 | MAP(H263, UNKNOWN, H263Baseline), |
243 | MAP(MPEG4, MPEG4_SIMPLE, MPEG4Simple ), |
244 | MAP(MPEG4, MPEG4_ADVANCED_SIMPLE, |
245 | MPEG4AdvancedSimple), |
246 | MAP(MPEG4, MPEG4_MAIN, MPEG4Main ), |
247 | MAP(H264, H264_CONSTRAINED_BASELINE, |
248 | H264ConstrainedBaseline), |
249 | MAP(H264, H264_BASELINE, H264Baseline), |
250 | MAP(H264, H264_MAIN, H264Main ), |
251 | MAP(H264, H264_HIGH, H264High ), |
252 | #if VA_CHECK_VERSION(0, 37, 0) |
253 | MAP(HEVC, HEVC_MAIN, HEVCMain ), |
254 | MAP(HEVC, HEVC_MAIN_10, HEVCMain10 ), |
255 | #endif |
256 | MAP(WMV3, VC1_SIMPLE, VC1Simple ), |
257 | MAP(WMV3, VC1_MAIN, VC1Main ), |
258 | MAP(WMV3, VC1_COMPLEX, VC1Advanced ), |
259 | MAP(WMV3, VC1_ADVANCED, VC1Advanced ), |
260 | MAP(VC1, VC1_SIMPLE, VC1Simple ), |
261 | MAP(VC1, VC1_MAIN, VC1Main ), |
262 | MAP(VC1, VC1_COMPLEX, VC1Advanced ), |
263 | MAP(VC1, VC1_ADVANCED, VC1Advanced ), |
264 | #if VA_CHECK_VERSION(0, 35, 0) |
265 | MAP(VP8, UNKNOWN, VP8Version0_3 ), |
266 | #endif |
267 | #if VA_CHECK_VERSION(0, 38, 0) |
268 | MAP(VP9, VP9_0, VP9Profile0 ), |
269 | #endif |
270 | #if VA_CHECK_VERSION(0, 39, 0) |
271 | MAP(VP9, VP9_2, VP9Profile2 ), |
272 | #endif |
273 | #undef MAP |
274 | }; |
275 | |
276 | static int vaapi_decode_make_config(AVCodecContext *avctx) |
277 | { |
278 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
279 | |
280 | AVVAAPIHWConfig *hwconfig = NULL; |
281 | AVHWFramesConstraints *constraints = NULL; |
282 | VAStatus vas; |
283 | int err, i, j; |
284 | const AVCodecDescriptor *codec_desc; |
285 | VAProfile profile, *profile_list = NULL; |
286 | int profile_count, exact_match, alt_profile; |
287 | const AVPixFmtDescriptor *sw_desc, *desc; |
288 | |
289 | // Allowing a profile mismatch can be useful because streams may |
290 | // over-declare their required capabilities - in particular, many |
291 | // H.264 baseline profile streams (notably some of those in FATE) |
292 | // only use the feature set of constrained baseline. This flag |
293 | // would have to be be set by some external means in order to |
294 | // actually be useful. (AV_HWACCEL_FLAG_IGNORE_PROFILE?) |
295 | int allow_profile_mismatch = 0; |
296 | |
297 | codec_desc = avcodec_descriptor_get(avctx->codec_id); |
298 | if (!codec_desc) { |
299 | err = AVERROR(EINVAL); |
300 | goto fail; |
301 | } |
302 | |
303 | profile_count = vaMaxNumProfiles(ctx->hwctx->display); |
304 | profile_list = av_malloc_array(profile_count, |
305 | sizeof(VAProfile)); |
306 | if (!profile_list) { |
307 | err = AVERROR(ENOMEM); |
308 | goto fail; |
309 | } |
310 | |
311 | vas = vaQueryConfigProfiles(ctx->hwctx->display, |
312 | profile_list, &profile_count); |
313 | if (vas != VA_STATUS_SUCCESS) { |
314 | av_log(avctx, AV_LOG_ERROR, "Failed to query profiles: " |
315 | "%d (%s).\n", vas, vaErrorStr(vas)); |
316 | err = AVERROR(ENOSYS); |
317 | goto fail; |
318 | } |
319 | |
320 | profile = VAProfileNone; |
321 | exact_match = 0; |
322 | |
323 | for (i = 0; i < FF_ARRAY_ELEMS(vaapi_profile_map); i++) { |
324 | int profile_match = 0; |
325 | if (avctx->codec_id != vaapi_profile_map[i].codec_id) |
326 | continue; |
327 | if (avctx->profile == vaapi_profile_map[i].codec_profile) |
328 | profile_match = 1; |
329 | profile = vaapi_profile_map[i].va_profile; |
330 | for (j = 0; j < profile_count; j++) { |
331 | if (profile == profile_list[j]) { |
332 | exact_match = profile_match; |
333 | break; |
334 | } |
335 | } |
336 | if (j < profile_count) { |
337 | if (exact_match) |
338 | break; |
339 | alt_profile = vaapi_profile_map[i].codec_profile; |
340 | } |
341 | } |
342 | av_freep(&profile_list); |
343 | |
344 | if (profile == VAProfileNone) { |
345 | av_log(avctx, AV_LOG_ERROR, "No support for codec %s " |
346 | "profile %d.\n", codec_desc->name, avctx->profile); |
347 | err = AVERROR(ENOSYS); |
348 | goto fail; |
349 | } |
350 | if (!exact_match) { |
351 | if (allow_profile_mismatch) { |
352 | av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not " |
353 | "supported for hardware decode.\n", |
354 | codec_desc->name, avctx->profile); |
355 | av_log(avctx, AV_LOG_WARNING, "Using possibly-" |
356 | "incompatible profile %d instead.\n", |
357 | alt_profile); |
358 | } else { |
359 | av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not " |
360 | "supported for hardware decode.\n", |
361 | codec_desc->name, avctx->profile); |
362 | err = AVERROR(EINVAL); |
363 | goto fail; |
364 | } |
365 | } |
366 | |
367 | ctx->va_profile = profile; |
368 | ctx->va_entrypoint = VAEntrypointVLD; |
369 | |
370 | vas = vaCreateConfig(ctx->hwctx->display, ctx->va_profile, |
371 | ctx->va_entrypoint, NULL, 0, |
372 | &ctx->va_config); |
373 | if (vas != VA_STATUS_SUCCESS) { |
374 | av_log(avctx, AV_LOG_ERROR, "Failed to create decode " |
375 | "configuration: %d (%s).\n", vas, vaErrorStr(vas)); |
376 | err = AVERROR(EIO); |
377 | goto fail; |
378 | } |
379 | |
380 | hwconfig = av_hwdevice_hwconfig_alloc(avctx->hw_device_ctx ? |
381 | avctx->hw_device_ctx : |
382 | ctx->frames->device_ref); |
383 | if (!hwconfig) { |
384 | err = AVERROR(ENOMEM); |
385 | goto fail; |
386 | } |
387 | hwconfig->config_id = ctx->va_config; |
388 | |
389 | constraints = |
390 | av_hwdevice_get_hwframe_constraints(avctx->hw_device_ctx ? |
391 | avctx->hw_device_ctx : |
392 | ctx->frames->device_ref, |
393 | hwconfig); |
394 | if (!constraints) { |
395 | err = AVERROR(ENOMEM); |
396 | goto fail; |
397 | } |
398 | |
399 | if (avctx->coded_width < constraints->min_width || |
400 | avctx->coded_height < constraints->min_height || |
401 | avctx->coded_width > constraints->max_width || |
402 | avctx->coded_height > constraints->max_height) { |
403 | av_log(avctx, AV_LOG_ERROR, "Hardware does not support image " |
404 | "size %dx%d (constraints: width %d-%d height %d-%d).\n", |
405 | avctx->coded_width, avctx->coded_height, |
406 | constraints->min_width, constraints->max_width, |
407 | constraints->min_height, constraints->max_height); |
408 | err = AVERROR(EINVAL); |
409 | goto fail; |
410 | } |
411 | if (!constraints->valid_sw_formats || |
412 | constraints->valid_sw_formats[0] == AV_PIX_FMT_NONE) { |
413 | av_log(avctx, AV_LOG_ERROR, "Hardware does not offer any " |
414 | "usable surface formats.\n"); |
415 | err = AVERROR(EINVAL); |
416 | goto fail; |
417 | } |
418 | |
419 | // Find the first format in the list which matches the expected |
420 | // bit depth and subsampling. If none are found (this can happen |
421 | // when 10-bit streams are decoded to 8-bit surfaces, for example) |
422 | // then just take the first format on the list. |
423 | ctx->surface_format = constraints->valid_sw_formats[0]; |
424 | sw_desc = av_pix_fmt_desc_get(avctx->sw_pix_fmt); |
425 | for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { |
426 | desc = av_pix_fmt_desc_get(constraints->valid_sw_formats[i]); |
427 | if (desc->nb_components != sw_desc->nb_components || |
428 | desc->log2_chroma_w != sw_desc->log2_chroma_w || |
429 | desc->log2_chroma_h != sw_desc->log2_chroma_h) |
430 | continue; |
431 | for (j = 0; j < desc->nb_components; j++) { |
432 | if (desc->comp[j].depth != sw_desc->comp[j].depth) |
433 | break; |
434 | } |
435 | if (j < desc->nb_components) |
436 | continue; |
437 | ctx->surface_format = constraints->valid_sw_formats[i]; |
438 | break; |
439 | } |
440 | |
441 | // Start with at least four surfaces. |
442 | ctx->surface_count = 4; |
443 | // Add per-codec number of surfaces used for storing reference frames. |
444 | switch (avctx->codec_id) { |
445 | case AV_CODEC_ID_H264: |
446 | case AV_CODEC_ID_HEVC: |
447 | ctx->surface_count += 16; |
448 | break; |
449 | case AV_CODEC_ID_VP9: |
450 | ctx->surface_count += 8; |
451 | break; |
452 | case AV_CODEC_ID_VP8: |
453 | ctx->surface_count += 3; |
454 | break; |
455 | default: |
456 | ctx->surface_count += 2; |
457 | } |
458 | // Add an additional surface per thread is frame threading is enabled. |
459 | if (avctx->active_thread_type & FF_THREAD_FRAME) |
460 | ctx->surface_count += avctx->thread_count; |
461 | |
462 | av_hwframe_constraints_free(&constraints); |
463 | av_freep(&hwconfig); |
464 | |
465 | return 0; |
466 | |
467 | fail: |
468 | av_hwframe_constraints_free(&constraints); |
469 | av_freep(&hwconfig); |
470 | if (ctx->va_config != VA_INVALID_ID) { |
471 | vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
472 | ctx->va_config = VA_INVALID_ID; |
473 | } |
474 | av_freep(&profile_list); |
475 | return err; |
476 | } |
477 | |
478 | int ff_vaapi_decode_init(AVCodecContext *avctx) |
479 | { |
480 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
481 | VAStatus vas; |
482 | int err; |
483 | |
484 | ctx->va_config = VA_INVALID_ID; |
485 | ctx->va_context = VA_INVALID_ID; |
486 | |
487 | #if FF_API_STRUCT_VAAPI_CONTEXT |
488 | if (avctx->hwaccel_context) { |
489 | av_log(avctx, AV_LOG_WARNING, "Using deprecated struct " |
490 | "vaapi_context in decode.\n"); |
491 | |
492 | ctx->have_old_context = 1; |
493 | ctx->old_context = avctx->hwaccel_context; |
494 | |
495 | // Really we only want the VAAPI device context, but this |
496 | // allocates a whole generic device context because we don't |
497 | // have any other way to determine how big it should be. |
498 | ctx->device_ref = |
499 | av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VAAPI); |
500 | if (!ctx->device_ref) { |
501 | err = AVERROR(ENOMEM); |
502 | goto fail; |
503 | } |
504 | ctx->device = (AVHWDeviceContext*)ctx->device_ref->data; |
505 | ctx->hwctx = ctx->device->hwctx; |
506 | |
507 | ctx->hwctx->display = ctx->old_context->display; |
508 | |
509 | // The old VAAPI decode setup assumed this quirk was always |
510 | // present, so set it here to avoid the behaviour changing. |
511 | ctx->hwctx->driver_quirks = |
512 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS; |
513 | |
514 | } else |
515 | #endif |
516 | if (avctx->hw_frames_ctx) { |
517 | // This structure has a shorter lifetime than the enclosing |
518 | // AVCodecContext, so we inherit the references from there |
519 | // and do not need to make separate ones. |
520 | |
521 | ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
522 | ctx->hwfc = ctx->frames->hwctx; |
523 | ctx->device = ctx->frames->device_ctx; |
524 | ctx->hwctx = ctx->device->hwctx; |
525 | |
526 | } else if (avctx->hw_device_ctx) { |
527 | ctx->device = (AVHWDeviceContext*)avctx->hw_device_ctx->data; |
528 | ctx->hwctx = ctx->device->hwctx; |
529 | |
530 | if (ctx->device->type != AV_HWDEVICE_TYPE_VAAPI) { |
531 | av_log(avctx, AV_LOG_ERROR, "Device supplied for VAAPI " |
532 | "decoding must be a VAAPI device (not %d).\n", |
533 | ctx->device->type); |
534 | err = AVERROR(EINVAL); |
535 | goto fail; |
536 | } |
537 | |
538 | } else { |
539 | av_log(avctx, AV_LOG_ERROR, "A hardware device or frames context " |
540 | "is required for VAAPI decoding.\n"); |
541 | err = AVERROR(EINVAL); |
542 | goto fail; |
543 | } |
544 | |
545 | #if FF_API_STRUCT_VAAPI_CONTEXT |
546 | if (ctx->have_old_context) { |
547 | ctx->va_config = ctx->old_context->config_id; |
548 | ctx->va_context = ctx->old_context->context_id; |
549 | |
550 | av_log(avctx, AV_LOG_DEBUG, "Using user-supplied decoder " |
551 | "context: %#x/%#x.\n", ctx->va_config, ctx->va_context); |
552 | } else { |
553 | #endif |
554 | |
555 | err = vaapi_decode_make_config(avctx); |
556 | if (err) |
557 | goto fail; |
558 | |
559 | if (!avctx->hw_frames_ctx) { |
560 | avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx); |
561 | if (!avctx->hw_frames_ctx) { |
562 | err = AVERROR(ENOMEM); |
563 | goto fail; |
564 | } |
565 | ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data; |
566 | |
567 | ctx->frames->format = AV_PIX_FMT_VAAPI; |
568 | ctx->frames->width = avctx->coded_width; |
569 | ctx->frames->height = avctx->coded_height; |
570 | |
571 | ctx->frames->sw_format = ctx->surface_format; |
572 | ctx->frames->initial_pool_size = ctx->surface_count; |
573 | |
574 | err = av_hwframe_ctx_init(avctx->hw_frames_ctx); |
575 | if (err < 0) { |
576 | av_log(avctx, AV_LOG_ERROR, "Failed to initialise internal " |
577 | "frames context: %d.\n", err); |
578 | goto fail; |
579 | } |
580 | |
581 | ctx->hwfc = ctx->frames->hwctx; |
582 | } |
583 | |
584 | vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, |
585 | avctx->coded_width, avctx->coded_height, |
586 | VA_PROGRESSIVE, |
587 | ctx->hwfc->surface_ids, |
588 | ctx->hwfc->nb_surfaces, |
589 | &ctx->va_context); |
590 | if (vas != VA_STATUS_SUCCESS) { |
591 | av_log(avctx, AV_LOG_ERROR, "Failed to create decode " |
592 | "context: %d (%s).\n", vas, vaErrorStr(vas)); |
593 | err = AVERROR(EIO); |
594 | goto fail; |
595 | } |
596 | |
597 | av_log(avctx, AV_LOG_DEBUG, "Decode context initialised: " |
598 | "%#x/%#x.\n", ctx->va_config, ctx->va_context); |
599 | #if FF_API_STRUCT_VAAPI_CONTEXT |
600 | } |
601 | #endif |
602 | |
603 | return 0; |
604 | |
605 | fail: |
606 | ff_vaapi_decode_uninit(avctx); |
607 | return err; |
608 | } |
609 | |
610 | int ff_vaapi_decode_uninit(AVCodecContext *avctx) |
611 | { |
612 | VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data; |
613 | VAStatus vas; |
614 | |
615 | #if FF_API_STRUCT_VAAPI_CONTEXT |
616 | if (ctx->have_old_context) { |
617 | av_buffer_unref(&ctx->device_ref); |
618 | } else { |
619 | #endif |
620 | |
621 | if (ctx->va_context != VA_INVALID_ID) { |
622 | vas = vaDestroyContext(ctx->hwctx->display, ctx->va_context); |
623 | if (vas != VA_STATUS_SUCCESS) { |
624 | av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode " |
625 | "context %#x: %d (%s).\n", |
626 | ctx->va_context, vas, vaErrorStr(vas)); |
627 | } |
628 | } |
629 | if (ctx->va_config != VA_INVALID_ID) { |
630 | vas = vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
631 | if (vas != VA_STATUS_SUCCESS) { |
632 | av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode " |
633 | "configuration %#x: %d (%s).\n", |
634 | ctx->va_config, vas, vaErrorStr(vas)); |
635 | } |
636 | } |
637 | |
638 | #if FF_API_STRUCT_VAAPI_CONTEXT |
639 | } |
640 | #endif |
641 | |
642 | return 0; |
643 | } |
644 |