blob: 5f049a5d7bb9622d6e60463029787f58ffd213cd
1 | /* |
2 | * This file is part of FFmpeg. |
3 | * |
4 | * FFmpeg is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU Lesser General Public |
6 | * License as published by the Free Software Foundation; either |
7 | * version 2.1 of the License, or (at your option) any later version. |
8 | * |
9 | * FFmpeg is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * Lesser General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU Lesser General Public |
15 | * License along with FFmpeg; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | |
19 | #include <string.h> |
20 | |
21 | #include <va/va.h> |
22 | #include <va/va_vpp.h> |
23 | |
24 | #include "libavutil/avassert.h" |
25 | #include "libavutil/hwcontext.h" |
26 | #include "libavutil/hwcontext_vaapi.h" |
27 | #include "libavutil/mem.h" |
28 | #include "libavutil/opt.h" |
29 | #include "libavutil/pixdesc.h" |
30 | |
31 | #include "avfilter.h" |
32 | #include "formats.h" |
33 | #include "internal.h" |
34 | #include "scale.h" |
35 | #include "video.h" |
36 | |
37 | typedef struct ScaleVAAPIContext { |
38 | const AVClass *class; |
39 | |
40 | AVVAAPIDeviceContext *hwctx; |
41 | AVBufferRef *device_ref; |
42 | |
43 | int valid_ids; |
44 | VAConfigID va_config; |
45 | VAContextID va_context; |
46 | |
47 | AVBufferRef *input_frames_ref; |
48 | AVHWFramesContext *input_frames; |
49 | |
50 | AVBufferRef *output_frames_ref; |
51 | AVHWFramesContext *output_frames; |
52 | |
53 | char *output_format_string; |
54 | enum AVPixelFormat output_format; |
55 | |
56 | char *w_expr; // width expression string |
57 | char *h_expr; // height expression string |
58 | |
59 | int output_width; // computed width |
60 | int output_height; // computed height |
61 | } ScaleVAAPIContext; |
62 | |
63 | |
64 | static int scale_vaapi_query_formats(AVFilterContext *avctx) |
65 | { |
66 | enum AVPixelFormat pix_fmts[] = { |
67 | AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE, |
68 | }; |
69 | int err; |
70 | |
71 | if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), |
72 | &avctx->inputs[0]->out_formats)) < 0) |
73 | return err; |
74 | if ((err = ff_formats_ref(ff_make_format_list(pix_fmts), |
75 | &avctx->outputs[0]->in_formats)) < 0) |
76 | return err; |
77 | |
78 | return 0; |
79 | } |
80 | |
81 | static int scale_vaapi_pipeline_uninit(ScaleVAAPIContext *ctx) |
82 | { |
83 | if (ctx->va_context != VA_INVALID_ID) { |
84 | vaDestroyContext(ctx->hwctx->display, ctx->va_context); |
85 | ctx->va_context = VA_INVALID_ID; |
86 | } |
87 | |
88 | if (ctx->va_config != VA_INVALID_ID) { |
89 | vaDestroyConfig(ctx->hwctx->display, ctx->va_config); |
90 | ctx->va_config = VA_INVALID_ID; |
91 | } |
92 | |
93 | av_buffer_unref(&ctx->output_frames_ref); |
94 | av_buffer_unref(&ctx->device_ref); |
95 | ctx->hwctx = 0; |
96 | |
97 | return 0; |
98 | } |
99 | |
100 | static int scale_vaapi_config_input(AVFilterLink *inlink) |
101 | { |
102 | AVFilterContext *avctx = inlink->dst; |
103 | ScaleVAAPIContext *ctx = avctx->priv; |
104 | |
105 | scale_vaapi_pipeline_uninit(ctx); |
106 | |
107 | if (!inlink->hw_frames_ctx) { |
108 | av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is " |
109 | "required to associate the processing device.\n"); |
110 | return AVERROR(EINVAL); |
111 | } |
112 | |
113 | ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); |
114 | ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data; |
115 | |
116 | return 0; |
117 | } |
118 | |
119 | static int scale_vaapi_config_output(AVFilterLink *outlink) |
120 | { |
121 | AVFilterLink *inlink = outlink->src->inputs[0]; |
122 | AVFilterContext *avctx = outlink->src; |
123 | ScaleVAAPIContext *ctx = avctx->priv; |
124 | AVVAAPIHWConfig *hwconfig = NULL; |
125 | AVHWFramesConstraints *constraints = NULL; |
126 | AVVAAPIFramesContext *va_frames; |
127 | VAStatus vas; |
128 | int err, i; |
129 | |
130 | scale_vaapi_pipeline_uninit(ctx); |
131 | |
132 | ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref); |
133 | ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx; |
134 | |
135 | av_assert0(ctx->va_config == VA_INVALID_ID); |
136 | vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone, |
137 | VAEntrypointVideoProc, 0, 0, &ctx->va_config); |
138 | if (vas != VA_STATUS_SUCCESS) { |
139 | av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " |
140 | "config: %d (%s).\n", vas, vaErrorStr(vas)); |
141 | err = AVERROR(EIO); |
142 | goto fail; |
143 | } |
144 | |
145 | hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref); |
146 | if (!hwconfig) { |
147 | err = AVERROR(ENOMEM); |
148 | goto fail; |
149 | } |
150 | hwconfig->config_id = ctx->va_config; |
151 | |
152 | constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref, |
153 | hwconfig); |
154 | if (!constraints) { |
155 | err = AVERROR(ENOMEM); |
156 | goto fail; |
157 | } |
158 | |
159 | if (ctx->output_format == AV_PIX_FMT_NONE) |
160 | ctx->output_format = ctx->input_frames->sw_format; |
161 | if (constraints->valid_sw_formats) { |
162 | for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) { |
163 | if (ctx->output_format == constraints->valid_sw_formats[i]) |
164 | break; |
165 | } |
166 | if (constraints->valid_sw_formats[i] == AV_PIX_FMT_NONE) { |
167 | av_log(ctx, AV_LOG_ERROR, "Hardware does not support output " |
168 | "format %s.\n", av_get_pix_fmt_name(ctx->output_format)); |
169 | err = AVERROR(EINVAL); |
170 | goto fail; |
171 | } |
172 | } |
173 | |
174 | if ((err = ff_scale_eval_dimensions(ctx, |
175 | ctx->w_expr, ctx->h_expr, |
176 | inlink, outlink, |
177 | &ctx->output_width, &ctx->output_height)) < 0) |
178 | goto fail; |
179 | |
180 | if (ctx->output_width < constraints->min_width || |
181 | ctx->output_height < constraints->min_height || |
182 | ctx->output_width > constraints->max_width || |
183 | ctx->output_height > constraints->max_height) { |
184 | av_log(ctx, AV_LOG_ERROR, "Hardware does not support scaling to " |
185 | "size %dx%d (constraints: width %d-%d height %d-%d).\n", |
186 | ctx->output_width, ctx->output_height, |
187 | constraints->min_width, constraints->max_width, |
188 | constraints->min_height, constraints->max_height); |
189 | err = AVERROR(EINVAL); |
190 | goto fail; |
191 | } |
192 | |
193 | ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref); |
194 | if (!ctx->output_frames_ref) { |
195 | av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " |
196 | "for output.\n"); |
197 | err = AVERROR(ENOMEM); |
198 | goto fail; |
199 | } |
200 | |
201 | ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data; |
202 | |
203 | ctx->output_frames->format = AV_PIX_FMT_VAAPI; |
204 | ctx->output_frames->sw_format = ctx->output_format; |
205 | ctx->output_frames->width = ctx->output_width; |
206 | ctx->output_frames->height = ctx->output_height; |
207 | |
208 | // The number of output frames we need is determined by what follows |
209 | // the filter. If it's an encoder with complex frame reference |
210 | // structures then this could be very high. |
211 | ctx->output_frames->initial_pool_size = 10; |
212 | |
213 | err = av_hwframe_ctx_init(ctx->output_frames_ref); |
214 | if (err < 0) { |
215 | av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame " |
216 | "context for output: %d\n", err); |
217 | goto fail; |
218 | } |
219 | |
220 | va_frames = ctx->output_frames->hwctx; |
221 | |
222 | av_assert0(ctx->va_context == VA_INVALID_ID); |
223 | vas = vaCreateContext(ctx->hwctx->display, ctx->va_config, |
224 | ctx->output_width, ctx->output_height, |
225 | VA_PROGRESSIVE, |
226 | va_frames->surface_ids, va_frames->nb_surfaces, |
227 | &ctx->va_context); |
228 | if (vas != VA_STATUS_SUCCESS) { |
229 | av_log(ctx, AV_LOG_ERROR, "Failed to create processing pipeline " |
230 | "context: %d (%s).\n", vas, vaErrorStr(vas)); |
231 | return AVERROR(EIO); |
232 | } |
233 | |
234 | outlink->w = ctx->output_width; |
235 | outlink->h = ctx->output_height; |
236 | |
237 | outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref); |
238 | if (!outlink->hw_frames_ctx) { |
239 | err = AVERROR(ENOMEM); |
240 | goto fail; |
241 | } |
242 | |
243 | av_freep(&hwconfig); |
244 | av_hwframe_constraints_free(&constraints); |
245 | return 0; |
246 | |
247 | fail: |
248 | av_buffer_unref(&ctx->output_frames_ref); |
249 | av_freep(&hwconfig); |
250 | av_hwframe_constraints_free(&constraints); |
251 | return err; |
252 | } |
253 | |
254 | static int vaapi_proc_colour_standard(enum AVColorSpace av_cs) |
255 | { |
256 | switch(av_cs) { |
257 | #define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va; |
258 | CS(BT709, BT709); |
259 | CS(BT470BG, BT601); |
260 | CS(SMPTE170M, SMPTE170M); |
261 | CS(SMPTE240M, SMPTE240M); |
262 | #undef CS |
263 | default: |
264 | return VAProcColorStandardNone; |
265 | } |
266 | } |
267 | |
268 | static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) |
269 | { |
270 | AVFilterContext *avctx = inlink->dst; |
271 | AVFilterLink *outlink = avctx->outputs[0]; |
272 | ScaleVAAPIContext *ctx = avctx->priv; |
273 | AVFrame *output_frame = NULL; |
274 | VASurfaceID input_surface, output_surface; |
275 | VAProcPipelineParameterBuffer params; |
276 | VABufferID params_id; |
277 | VARectangle input_region; |
278 | VAStatus vas; |
279 | int err; |
280 | |
281 | av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", |
282 | av_get_pix_fmt_name(input_frame->format), |
283 | input_frame->width, input_frame->height, input_frame->pts); |
284 | |
285 | if (ctx->va_context == VA_INVALID_ID) |
286 | return AVERROR(EINVAL); |
287 | |
288 | input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; |
289 | av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", |
290 | input_surface); |
291 | |
292 | output_frame = ff_get_video_buffer(outlink, ctx->output_width, |
293 | ctx->output_height); |
294 | if (!output_frame) { |
295 | err = AVERROR(ENOMEM); |
296 | goto fail; |
297 | } |
298 | |
299 | output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; |
300 | av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", |
301 | output_surface); |
302 | |
303 | memset(¶ms, 0, sizeof(params)); |
304 | |
305 | // If there were top/left cropping, it could be taken into |
306 | // account here. |
307 | input_region = (VARectangle) { |
308 | .x = 0, |
309 | .y = 0, |
310 | .width = input_frame->width, |
311 | .height = input_frame->height, |
312 | }; |
313 | |
314 | params.surface = input_surface; |
315 | params.surface_region = &input_region; |
316 | params.surface_color_standard = |
317 | vaapi_proc_colour_standard(input_frame->colorspace); |
318 | |
319 | params.output_region = 0; |
320 | params.output_background_color = 0xff000000; |
321 | params.output_color_standard = params.surface_color_standard; |
322 | |
323 | params.pipeline_flags = 0; |
324 | params.filter_flags = VA_FILTER_SCALING_HQ; |
325 | |
326 | vas = vaBeginPicture(ctx->hwctx->display, |
327 | ctx->va_context, output_surface); |
328 | if (vas != VA_STATUS_SUCCESS) { |
329 | av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: " |
330 | "%d (%s).\n", vas, vaErrorStr(vas)); |
331 | err = AVERROR(EIO); |
332 | goto fail; |
333 | } |
334 | |
335 | vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, |
336 | VAProcPipelineParameterBufferType, |
337 | sizeof(params), 1, ¶ms, ¶ms_id); |
338 | if (vas != VA_STATUS_SUCCESS) { |
339 | av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer: " |
340 | "%d (%s).\n", vas, vaErrorStr(vas)); |
341 | err = AVERROR(EIO); |
342 | goto fail_after_begin; |
343 | } |
344 | av_log(ctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", |
345 | params_id); |
346 | |
347 | vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, |
348 | ¶ms_id, 1); |
349 | if (vas != VA_STATUS_SUCCESS) { |
350 | av_log(ctx, AV_LOG_ERROR, "Failed to render parameter buffer: " |
351 | "%d (%s).\n", vas, vaErrorStr(vas)); |
352 | err = AVERROR(EIO); |
353 | goto fail_after_begin; |
354 | } |
355 | |
356 | vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); |
357 | if (vas != VA_STATUS_SUCCESS) { |
358 | av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: " |
359 | "%d (%s).\n", vas, vaErrorStr(vas)); |
360 | err = AVERROR(EIO); |
361 | goto fail_after_render; |
362 | } |
363 | |
364 | if (ctx->hwctx->driver_quirks & |
365 | AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { |
366 | vas = vaDestroyBuffer(ctx->hwctx->display, params_id); |
367 | if (vas != VA_STATUS_SUCCESS) { |
368 | av_log(ctx, AV_LOG_ERROR, "Failed to free parameter buffer: " |
369 | "%d (%s).\n", vas, vaErrorStr(vas)); |
370 | // And ignore. |
371 | } |
372 | } |
373 | |
374 | av_frame_copy_props(output_frame, input_frame); |
375 | av_frame_free(&input_frame); |
376 | |
377 | av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", |
378 | av_get_pix_fmt_name(output_frame->format), |
379 | output_frame->width, output_frame->height, output_frame->pts); |
380 | |
381 | return ff_filter_frame(outlink, output_frame); |
382 | |
383 | // We want to make sure that if vaBeginPicture has been called, we also |
384 | // call vaRenderPicture and vaEndPicture. These calls may well fail or |
385 | // do something else nasty, but once we're in this failure case there |
386 | // isn't much else we can do. |
387 | fail_after_begin: |
388 | vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); |
389 | fail_after_render: |
390 | vaEndPicture(ctx->hwctx->display, ctx->va_context); |
391 | fail: |
392 | av_frame_free(&input_frame); |
393 | av_frame_free(&output_frame); |
394 | return err; |
395 | } |
396 | |
397 | static av_cold int scale_vaapi_init(AVFilterContext *avctx) |
398 | { |
399 | ScaleVAAPIContext *ctx = avctx->priv; |
400 | |
401 | ctx->va_config = VA_INVALID_ID; |
402 | ctx->va_context = VA_INVALID_ID; |
403 | ctx->valid_ids = 1; |
404 | |
405 | if (ctx->output_format_string) { |
406 | ctx->output_format = av_get_pix_fmt(ctx->output_format_string); |
407 | if (ctx->output_format == AV_PIX_FMT_NONE) { |
408 | av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); |
409 | return AVERROR(EINVAL); |
410 | } |
411 | } else { |
412 | // Use the input format once that is configured. |
413 | ctx->output_format = AV_PIX_FMT_NONE; |
414 | } |
415 | |
416 | return 0; |
417 | } |
418 | |
419 | static av_cold void scale_vaapi_uninit(AVFilterContext *avctx) |
420 | { |
421 | ScaleVAAPIContext *ctx = avctx->priv; |
422 | |
423 | if (ctx->valid_ids) |
424 | scale_vaapi_pipeline_uninit(ctx); |
425 | |
426 | av_buffer_unref(&ctx->input_frames_ref); |
427 | av_buffer_unref(&ctx->output_frames_ref); |
428 | av_buffer_unref(&ctx->device_ref); |
429 | } |
430 | |
431 | |
432 | #define OFFSET(x) offsetof(ScaleVAAPIContext, x) |
433 | #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM) |
434 | static const AVOption scale_vaapi_options[] = { |
435 | { "w", "Output video width", |
436 | OFFSET(w_expr), AV_OPT_TYPE_STRING, {.str = "iw"}, .flags = FLAGS }, |
437 | { "h", "Output video height", |
438 | OFFSET(h_expr), AV_OPT_TYPE_STRING, {.str = "ih"}, .flags = FLAGS }, |
439 | { "format", "Output video format (software format of hardware frames)", |
440 | OFFSET(output_format_string), AV_OPT_TYPE_STRING, .flags = FLAGS }, |
441 | { NULL }, |
442 | }; |
443 | |
444 | static const AVClass scale_vaapi_class = { |
445 | .class_name = "scale_vaapi", |
446 | .item_name = av_default_item_name, |
447 | .option = scale_vaapi_options, |
448 | .version = LIBAVUTIL_VERSION_INT, |
449 | }; |
450 | |
451 | static const AVFilterPad scale_vaapi_inputs[] = { |
452 | { |
453 | .name = "default", |
454 | .type = AVMEDIA_TYPE_VIDEO, |
455 | .filter_frame = &scale_vaapi_filter_frame, |
456 | .config_props = &scale_vaapi_config_input, |
457 | }, |
458 | { NULL } |
459 | }; |
460 | |
461 | static const AVFilterPad scale_vaapi_outputs[] = { |
462 | { |
463 | .name = "default", |
464 | .type = AVMEDIA_TYPE_VIDEO, |
465 | .config_props = &scale_vaapi_config_output, |
466 | }, |
467 | { NULL } |
468 | }; |
469 | |
470 | AVFilter ff_vf_scale_vaapi = { |
471 | .name = "scale_vaapi", |
472 | .description = NULL_IF_CONFIG_SMALL("Scale to/from VAAPI surfaces."), |
473 | .priv_size = sizeof(ScaleVAAPIContext), |
474 | .init = &scale_vaapi_init, |
475 | .uninit = &scale_vaapi_uninit, |
476 | .query_formats = &scale_vaapi_query_formats, |
477 | .inputs = scale_vaapi_inputs, |
478 | .outputs = scale_vaapi_outputs, |
479 | .priv_class = &scale_vaapi_class, |
480 | .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, |
481 | }; |
482 |