blob: 91652d852b1c70fc8440e96e992501cfec1f98b0
1 | /* |
2 | * This file is part of FFmpeg. |
3 | * |
4 | * FFmpeg is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU Lesser General Public |
6 | * License as published by the Free Software Foundation; either |
7 | * version 2.1 of the License, or (at your option) any later version. |
8 | * |
9 | * FFmpeg is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * Lesser General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU Lesser General Public |
15 | * License along with FFmpeg; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | |
19 | #include <string.h> |
20 | |
21 | #include <va/va.h> |
22 | #include <va/va_vpp.h> |
23 | |
24 | #include "libavutil/avassert.h" |
25 | #include "libavutil/hwcontext.h" |
26 | #include "libavutil/hwcontext_vaapi.h" |
27 | #include "libavutil/mem.h" |
28 | #include "libavutil/opt.h" |
29 | #include "libavutil/pixdesc.h" |
30 | |
31 | #include "avfilter.h" |
32 | #include "formats.h" |
33 | #include "internal.h" |
34 | #include "video.h" |
35 | |
36 | #define MAX_REFERENCES 8 |
37 | |
38 | typedef struct DeintVAAPIContext { |
39 | const AVClass *class; |
40 | |
41 | AVVAAPIDeviceContext *hwctx; |
42 | AVBufferRef *device_ref; |
43 | |
44 | int mode; |
45 | |
46 | int valid_ids; |
47 | VAConfigID va_config; |
48 | VAContextID va_context; |
49 | |
50 | AVBufferRef *input_frames_ref; |
51 | AVHWFramesContext *input_frames; |
52 | |
53 | AVBufferRef *output_frames_ref; |
54 | AVHWFramesContext *output_frames; |
55 | int output_height; |
56 | int output_width; |
57 | |
58 | VAProcFilterCapDeinterlacing |
59 | deint_caps[VAProcDeinterlacingCount]; |
60 | int nb_deint_caps; |
61 | VAProcPipelineCaps pipeline_caps; |
62 | |
63 | int queue_depth; |
64 | int queue_count; |
65 | AVFrame *frame_queue[MAX_REFERENCES]; |
66 | |
67 | VABufferID filter_buffer; |
68 | } DeintVAAPIContext; |
69 | |
70 | static const char *deint_vaapi_mode_name(int mode) |
71 | { |
72 | switch (mode) { |
73 | #define D(name) case VAProcDeinterlacing ## name: return #name |
74 | D(Bob); |
75 | D(Weave); |
76 | D(MotionAdaptive); |
77 | D(MotionCompensated); |
78 | #undef D |
79 | default: |
80 | return "Invalid"; |
81 | } |
82 | } |
83 | |
84 | static int deint_vaapi_query_formats(AVFilterContext *avctx) |
85 | { |
86 | enum AVPixelFormat pix_fmts[] = { |
87 | AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, |
88 | }; |
89 | int err; |
90 | |
91 | if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), |
92 | &avctx->inputs[0]->out_formats)) < 0) |
93 | return err; |
94 | if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), |
95 | &avctx->outputs[0]->in_formats)) < 0) |
96 | return err; |
97 | |
98 | return 0; |
99 | } |
100 | |
101 | static int deint_vaapi_pipeline_uninit(AVFilterContext *avctx) |
102 | { |
103 | DeintVAAPIContext *ctx = avctx->priv; |
104 | int i; |
105 | |
106 | for (i = 0; i < ctx->queue_count; i++) |
107 | av_frame_free(&ctx->frame_queue[i]); |
108 | ctx->queue_count = 0; |
109 | |
110 | if (ctx->filter_buffer != VA_INVALID_ID) { |
111 | vaDestroyBuffer(ctx->hwctx->display, ctx->filter_buffer); |
112 | ctx->filter_buffer = VA_INVALID_ID; |
113 | } |
114 | |
115 | if (ctx->va_context != VA_INVALID_ID) { |
116 | vaDestroyContext(ctx->hwctx->display, ctx->va_context); |
117 | ctx->va_context = VA_INVALID_ID; |
118 | } |
119 | |
120 | if (ctx->va_config != VA_INVALID_ID) { |
121 | vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
122 | ctx->va_config = VA_INVALID_ID; |
123 | } |
124 | |
125 | av_buffer_unref(&ctx->device_ref); |
126 | ctx->hwctx = NULL; |
127 | |
128 | return 0; |
129 | } |
130 | |
131 | static int deint_vaapi_config_input(AVFilterLink *inlink) |
132 | { |
133 | AVFilterContext *avctx = inlink->dst; |
134 | DeintVAAPIContext *ctx = avctx->priv; |
135 | |
136 | deint_vaapi_pipeline_uninit(avctx); |
137 | |
138 | if (!inlink->hw_frames_ctx) { |
139 | av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " |
140 | "required to associate the processing device.\n"); |
141 | return AVERROR(EINVAL); |
142 | } |
143 | |
144 | ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); |
145 | ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; |
146 | |
147 | return 0; |
148 | } |
149 | |
150 | static int deint_vaapi_build_filter_params(AVFilterContext *avctx) |
151 | { |
152 | DeintVAAPIContext *ctx = avctx->priv; |
153 | VAStatus vas; |
154 | VAProcFilterParameterBufferDeinterlacing params; |
155 | int i; |
156 | |
157 | ctx->nb_deint_caps = VAProcDeinterlacingCount; |
158 | vas = vaQueryVideoProcFilterCaps(ctx->hwctx->display, |
159 | ctx->va_context, |
160 | VAProcFilterDeinterlacing, |
161 | &ctx->deint_caps, |
162 | &ctx->nb_deint_caps); |
163 | if (vas != VA_STATUS_SUCCESS) { |
164 | av_log(avctx, AV_LOG_ERROR, "Failed to query deinterlacing " |
165 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); |
166 | return AVERROR(EIO); |
167 | } |
168 | |
169 | if (ctx->mode == VAProcDeinterlacingNone) { |
170 | for (i = 0; i < ctx->nb_deint_caps; i++) { |
171 | if (ctx->deint_caps[i].type > ctx->mode) |
172 | ctx->mode = ctx->deint_caps[i].type; |
173 | } |
174 | av_log(avctx, AV_LOG_VERBOSE, "Picking %d (%s) as default " |
175 | "deinterlacing mode.\n", ctx->mode, |
176 | deint_vaapi_mode_name(ctx->mode)); |
177 | } else { |
178 | for (i = 0; i < ctx->nb_deint_caps; i++) { |
179 | if (ctx->deint_caps[i].type == ctx->mode) |
180 | break; |
181 | } |
182 | if (i >= ctx->nb_deint_caps) { |
183 | av_log(avctx, AV_LOG_ERROR, "Deinterlacing mode %d (%s) is " |
184 | "not supported.\n", ctx->mode, |
185 | deint_vaapi_mode_name(ctx->mode)); |
186 | } |
187 | } |
188 | |
189 | params.type = VAProcFilterDeinterlacing; |
190 | params.algorithm = ctx->mode; |
191 | params.flags = 0; |
192 | |
193 | av_assert0(ctx->filter_buffer == VA_INVALID_ID); |
194 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
195 | VAProcFilterParameterBufferType, |
196 | sizeof(params), 1, ¶ms, |
197 | &ctx->filter_buffer); |
198 | if (vas != VA_STATUS_SUCCESS) { |
199 | av_log(avctx, AV_LOG_ERROR, "Failed to create deinterlace " |
200 | "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas)); |
201 | return AVERROR(EIO); |
202 | } |
203 | |
204 | vas = vaQueryVideoProcPipelineCaps(ctx->hwctx->display, |
205 | ctx->va_context, |
206 | &ctx->filter_buffer, 1, |
207 | &ctx->pipeline_caps); |
208 | if (vas != VA_STATUS_SUCCESS) { |
209 | av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline " |
210 | "caps: %d (%s).\n", vas, vaErrorStr(vas)); |
211 | return AVERROR(EIO); |
212 | } |
213 | |
214 | ctx->queue_depth = ctx->pipeline_caps.num_backward_references + |
215 | ctx->pipeline_caps.num_forward_references + 1; |
216 | if (ctx->queue_depth > MAX_REFERENCES) { |
217 | av_log(avctx, AV_LOG_ERROR, "Pipeline requires too many " |
218 | "references (%u forward, %u back).\n", |
219 | ctx->pipeline_caps.num_forward_references, |
220 | ctx->pipeline_caps.num_backward_references); |
221 | return AVERROR(ENOSYS); |
222 | } |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | static int deint_vaapi_config_output(AVFilterLink *outlink) |
228 | { |
229 | AVFilterContext *avctx = outlink->src; |
230 | DeintVAAPIContext *ctx = avctx->priv; |
231 | AVVAAPIHWConfig *hwconfig = NULL; |
232 | AVHWFramesConstraints *constraints = NULL; |
233 | AVVAAPIFramesContext *va_frames; |
234 | VAStatus vas; |
235 | int err; |
236 | |
237 | deint_vaapi_pipeline_uninit(avctx); |
238 | |
239 | av_assert0(ctx->input_frames); |
240 | ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); |
241 | ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; |
242 | |
243 | ctx->output_width = ctx->input_frames->width; |
244 | ctx->output_height = ctx->input_frames->height; |
245 | |
246 | av_assert0(ctx->va_config == VA_INVALID_ID); |
247 | vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, |
248 | VAEntrypointVideoProc, 0, 0, &ctx->va_config); |
249 | if (vas != VA_STATUS_SUCCESS) { |
250 | av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline " |
251 | "config: %d (%s).\n", vas, vaErrorStr(vas)); |
252 | err = AVERROR(EIO); |
253 | goto fail; |
254 | } |
255 | |
256 | hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); |
257 | if (!hwconfig) { |
258 | err = AVERROR(ENOMEM); |
259 | goto fail; |
260 | } |
261 | hwconfig->config_id = ctx->va_config; |
262 | |
263 | constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, |
264 | hwconfig); |
265 | if (!constraints) { |
266 | err = AVERROR(ENOMEM); |
267 | goto fail; |
268 | } |
269 | |
270 | if (ctx->output_width < constraints->min_width || |
271 | ctx->output_height < constraints->min_height || |
272 | ctx->output_width > constraints->max_width || |
273 | ctx->output_height > constraints->max_height) { |
274 | av_log(avctx, AV_LOG_ERROR, "Hardware does not support " |
275 | "deinterlacing to size %dx%d " |
276 | "(constraints: width %d-%d height %d-%d).\n", |
277 | ctx->output_width, ctx->output_height, |
278 | constraints->min_width, constraints->max_width, |
279 | constraints->min_height, constraints->max_height); |
280 | err = AVERROR(EINVAL); |
281 | goto fail; |
282 | } |
283 | |
284 | ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); |
285 | if (!ctx->output_frames_ref) { |
286 | av_log(avctx, AV_LOG_ERROR, "Failed to create HW frame context " |
287 | "for output.\n"); |
288 | err = AVERROR(ENOMEM); |
289 | goto fail; |
290 | } |
291 | |
292 | ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; |
293 | |
294 | ctx->output_frames->format = AV_PIX_FMT_VAAPI; |
295 | ctx->output_frames->sw_format = ctx->input_frames->sw_format; |
296 | ctx->output_frames->width = ctx->output_width; |
297 | ctx->output_frames->height = ctx->output_height; |
298 | |
299 | // The number of output frames we need is determined by what follows |
300 | // the filter. If it's an encoder with complex frame reference |
301 | // structures then this could be very high. |
302 | ctx->output_frames->initial_pool_size = 10; |
303 | |
304 | err = av_hwframe_ctx_init(ctx->output_frames_ref); |
305 | if (err < 0) { |
306 | av_log(avctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " |
307 | "context for output: %d\n", err); |
308 | goto fail; |
309 | } |
310 | |
311 | va_frames = ctx->output_frames->hwctx; |
312 | |
313 | av_assert0(ctx->va_context == VA_INVALID_ID); |
314 | vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, |
315 | ctx->output_width, ctx->output_height, 0, |
316 | va_frames->surface_ids, va_frames->nb_surfaces, |
317 | &ctx->va_context); |
318 | if (vas != VA_STATUS_SUCCESS) { |
319 | av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline " |
320 | "context: %d (%s).\n", vas, vaErrorStr(vas)); |
321 | err = AVERROR(EIO); |
322 | goto fail; |
323 | } |
324 | |
325 | err = deint_vaapi_build_filter_params(avctx); |
326 | if (err < 0) |
327 | goto fail; |
328 | |
329 | outlink->w = ctx->output_width; |
330 | outlink->h = ctx->output_height; |
331 | |
332 | outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); |
333 | if (!outlink->hw_frames_ctx) { |
334 | err = AVERROR(ENOMEM); |
335 | goto fail; |
336 | } |
337 | |
338 | av_freep(&hwconfig); |
339 | av_hwframe_constraints_free(&constraints); |
340 | return 0; |
341 | |
342 | fail: |
343 | av_buffer_unref(&ctx->output_frames_ref); |
344 | av_freep(&hwconfig); |
345 | av_hwframe_constraints_free(&constraints); |
346 | return err; |
347 | } |
348 | |
349 | static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) |
350 | { |
351 | switch(av_cs) { |
352 | #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; |
353 | CS(BT709, BT709); |
354 | CS(BT470BG, BT470BG); |
355 | CS(SMPTE170M, SMPTE170M); |
356 | CS(SMPTE240M, SMPTE240M); |
357 | #undef CS |
358 | default: |
359 | return VAProcColorStandardNone; |
360 | } |
361 | } |
362 | |
363 | static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) |
364 | { |
365 | AVFilterContext *avctx = inlink->dst; |
366 | AVFilterLink *outlink = avctx->outputs[0]; |
367 | DeintVAAPIContext *ctx = avctx->priv; |
368 | AVFrame *output_frame = NULL; |
369 | VASurfaceID input_surface, output_surface; |
370 | VASurfaceID backward_references[MAX_REFERENCES]; |
371 | VASurfaceID forward_references[MAX_REFERENCES]; |
372 | VAProcPipelineParameterBuffer params; |
373 | VAProcFilterParameterBufferDeinterlacing *filter_params; |
374 | VARectangle input_region; |
375 | VABufferID params_id; |
376 | VAStatus vas; |
377 | void *filter_params_addr = NULL; |
378 | int err, i; |
379 | |
380 | av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", |
381 | av_get_pix_fmt_name(input_frame->format), |
382 | input_frame->width, input_frame->height, input_frame->pts); |
383 | |
384 | if (ctx->queue_count < ctx->queue_depth) { |
385 | ctx->frame_queue[ctx->queue_count++] = input_frame; |
386 | if (ctx->queue_count < ctx->queue_depth) { |
387 | // Need more reference surfaces before we can continue. |
388 | return 0; |
389 | } |
390 | } else { |
391 | av_frame_free(&ctx->frame_queue[0]); |
392 | for (i = 0; i + 1 < ctx->queue_count; i++) |
393 | ctx->frame_queue[i] = ctx->frame_queue[i + 1]; |
394 | ctx->frame_queue[i] = input_frame; |
395 | } |
396 | |
397 | input_frame = |
398 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references]; |
399 | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; |
400 | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) |
401 | backward_references[i] = (VASurfaceID)(uintptr_t) |
402 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references - |
403 | i - 1]->data[3]; |
404 | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) |
405 | forward_references[i] = (VASurfaceID)(uintptr_t) |
406 | ctx->frame_queue[ctx->pipeline_caps.num_backward_references + |
407 | i + 1]->data[3]; |
408 | |
409 | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for " |
410 | "deinterlace input.\n", input_surface); |
411 | av_log(avctx, AV_LOG_DEBUG, "Backward references:"); |
412 | for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++) |
413 | av_log(avctx, AV_LOG_DEBUG, " %#x", backward_references[i]); |
414 | av_log(avctx, AV_LOG_DEBUG, "\n"); |
415 | av_log(avctx, AV_LOG_DEBUG, "Forward references:"); |
416 | for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++) |
417 | av_log(avctx, AV_LOG_DEBUG, " %#x", forward_references[i]); |
418 | av_log(avctx, AV_LOG_DEBUG, "\n"); |
419 | |
420 | output_frame = av_frame_alloc(); |
421 | if (!output_frame) { |
422 | err = AVERROR(ENOMEM); |
423 | goto fail; |
424 | } |
425 | |
426 | err = av_hwframe_get_buffer(ctx->output_frames_ref, |
427 | output_frame, 0); |
428 | if (err < 0) { |
429 | err = AVERROR(ENOMEM); |
430 | goto fail; |
431 | } |
432 | |
433 | output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; |
434 | av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for " |
435 | "deinterlace output.\n", output_surface); |
436 | |
437 | memset(¶ms, 0, sizeof(params)); |
438 | |
439 | input_region = (VARectangle) { |
440 | .x = 0, |
441 | .y = 0, |
442 | .width = input_frame->width, |
443 | .height = input_frame->height, |
444 | }; |
445 | |
446 | params.surface = input_surface; |
447 | params.surface_region = &input_region; |
448 | params.surface_color_standard = vaapi_proc_colour_standard( |
449 | av_frame_get_colorspace(input_frame)); |
450 | |
451 | params.output_region = NULL; |
452 | params.output_background_color = 0xff000000; |
453 | params.output_color_standard = params.surface_color_standard; |
454 | |
455 | params.pipeline_flags = 0; |
456 | params.filter_flags = VA_FRAME_PICTURE; |
457 | |
458 | vas = vaMapBuffer(ctx->hwctx->display, ctx->filter_buffer, |
459 | &filter_params_addr); |
460 | if (vas != VA_STATUS_SUCCESS) { |
461 | av_log(avctx, AV_LOG_ERROR, "Failed to map filter parameter " |
462 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); |
463 | err = AVERROR(EIO); |
464 | goto fail; |
465 | } |
466 | filter_params = filter_params_addr; |
467 | filter_params->flags = 0; |
468 | if (input_frame->interlaced_frame && !input_frame->top_field_first) |
469 | filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST; |
470 | filter_params_addr = NULL; |
471 | vas = vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer); |
472 | if (vas != VA_STATUS_SUCCESS) |
473 | av_log(avctx, AV_LOG_ERROR, "Failed to unmap filter parameter " |
474 | "buffer: %d (%s).\n", vas, vaErrorStr(vas)); |
475 | |
476 | params.filters = &ctx->filter_buffer; |
477 | params.num_filters = 1; |
478 | |
479 | params.forward_references = forward_references; |
480 | params.num_forward_references = |
481 | ctx->pipeline_caps.num_forward_references; |
482 | params.backward_references = backward_references; |
483 | params.num_backward_references = |
484 | ctx->pipeline_caps.num_backward_references; |
485 | |
486 | vas = vaBeginPicture(ctx->hwctx->display, |
487 | ctx->va_context, output_surface); |
488 | if (vas != VA_STATUS_SUCCESS) { |
489 | av_log(avctx, AV_LOG_ERROR, "Failed to attach new picture: " |
490 | "%d (%s).\n", vas, vaErrorStr(vas)); |
491 | err = AVERROR(EIO); |
492 | goto fail; |
493 | } |
494 | |
495 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
496 | VAProcPipelineParameterBufferType, |
497 | sizeof(params), 1, ¶ms, ¶ms_id); |
498 | if (vas != VA_STATUS_SUCCESS) { |
499 | av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer: " |
500 | "%d (%s).\n", vas, vaErrorStr(vas)); |
501 | err = AVERROR(EIO); |
502 | goto fail_after_begin; |
503 | } |
504 | av_log(avctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", |
505 | params_id); |
506 | |
507 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, |
508 | ¶ms_id, 1); |
509 | if (vas != VA_STATUS_SUCCESS) { |
510 | av_log(avctx, AV_LOG_ERROR, "Failed to render parameter buffer: " |
511 | "%d (%s).\n", vas, vaErrorStr(vas)); |
512 | err = AVERROR(EIO); |
513 | goto fail_after_begin; |
514 | } |
515 | |
516 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); |
517 | if (vas != VA_STATUS_SUCCESS) { |
518 | av_log(avctx, AV_LOG_ERROR, "Failed to start picture processing: " |
519 | "%d (%s).\n", vas, vaErrorStr(vas)); |
520 | err = AVERROR(EIO); |
521 | goto fail_after_render; |
522 | } |
523 | |
524 | if (ctx->hwctx->driver_quirks & |
525 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { |
526 | vas = vaDestroyBuffer(ctx->hwctx->display, params_id); |
527 | if (vas != VA_STATUS_SUCCESS) { |
528 | av_log(avctx, AV_LOG_ERROR, "Failed to free parameter buffer: " |
529 | "%d (%s).\n", vas, vaErrorStr(vas)); |
530 | // And ignore. |
531 | } |
532 | } |
533 | |
534 | err = av_frame_copy_props(output_frame, input_frame); |
535 | if (err < 0) |
536 | goto fail; |
537 | |
538 | av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", |
539 | av_get_pix_fmt_name(output_frame->format), |
540 | output_frame->width, output_frame->height, output_frame->pts); |
541 | |
542 | return ff_filter_frame(outlink, output_frame); |
543 | |
544 | fail_after_begin: |
545 | vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); |
546 | fail_after_render: |
547 | vaEndPicture(ctx->hwctx->display, ctx->va_context); |
548 | fail: |
549 | if (filter_params_addr) |
550 | vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer); |
551 | av_frame_free(&output_frame); |
552 | return err; |
553 | } |
554 | |
555 | static av_cold int deint_vaapi_init(AVFilterContext *avctx) |
556 | { |
557 | DeintVAAPIContext *ctx = avctx->priv; |
558 | |
559 | ctx->va_config = VA_INVALID_ID; |
560 | ctx->va_context = VA_INVALID_ID; |
561 | ctx->filter_buffer = VA_INVALID_ID; |
562 | ctx->valid_ids = 1; |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static av_cold void deint_vaapi_uninit(AVFilterContext *avctx) |
568 | { |
569 | DeintVAAPIContext *ctx = avctx->priv; |
570 | |
571 | if (ctx->valid_ids) |
572 | deint_vaapi_pipeline_uninit(avctx); |
573 | |
574 | av_buffer_unref(&ctx->input_frames_ref); |
575 | av_buffer_unref(&ctx->output_frames_ref); |
576 | av_buffer_unref(&ctx->device_ref); |
577 | } |
578 | |
579 | #define OFFSET(x) offsetof(DeintVAAPIContext, x) |
580 | #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM) |
581 | static const AVOption deint_vaapi_options[] = { |
582 | { "mode", "Deinterlacing mode", |
583 | OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VAProcDeinterlacingNone }, |
584 | VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1, FLAGS, "mode" }, |
585 | { "default", "Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm", |
586 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingNone }, .unit = "mode" }, |
587 | { "bob", "Use the bob deinterlacing algorithm", |
588 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingBob }, .unit = "mode" }, |
589 | { "weave", "Use the weave deinterlacing algorithm", |
590 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingWeave }, .unit = "mode" }, |
591 | { "motion_adaptive", "Use the motion adaptive deinterlacing algorithm", |
592 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionAdaptive }, .unit = "mode" }, |
593 | { "motion_compensated", "Use the motion compensated deinterlacing algorithm", |
594 | 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionCompensated }, .unit = "mode" }, |
595 | { NULL }, |
596 | }; |
597 | |
598 | static const AVClass deint_vaapi_class = { |
599 | .class_name = "deinterlace_vaapi", |
600 | .item_name = av_default_item_name, |
601 | .option = deint_vaapi_options, |
602 | .version = LIBAVUTIL_VERSION_INT, |
603 | }; |
604 | |
605 | static const AVFilterPad deint_vaapi_inputs[] = { |
606 | { |
607 | .name = "default", |
608 | .type = AVMEDIA_TYPE_VIDEO, |
609 | .filter_frame = &deint_vaapi_filter_frame, |
610 | .config_props = &deint_vaapi_config_input, |
611 | }, |
612 | { NULL } |
613 | }; |
614 | |
615 | static const AVFilterPad deint_vaapi_outputs[] = { |
616 | { |
617 | .name = "default", |
618 | .type = AVMEDIA_TYPE_VIDEO, |
619 | .config_props = &deint_vaapi_config_output, |
620 | }, |
621 | { NULL } |
622 | }; |
623 | |
624 | AVFilter ff_vf_deinterlace_vaapi = { |
625 | .name = "deinterlace_vaapi", |
626 | .description = NULL_IF_CONFIG_SMALL("Deinterlacing of VAAPI surfaces"), |
627 | .priv_size = sizeof(DeintVAAPIContext), |
628 | .init = &deint_vaapi_init, |
629 | .uninit = &deint_vaapi_uninit, |
630 | .query_formats = &deint_vaapi_query_formats, |
631 | .inputs = deint_vaapi_inputs, |
632 | .outputs = deint_vaapi_outputs, |
633 | .priv_class = &deint_vaapi_class, |
634 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, |
635 | }; |
636 |