blob: 92495097cc5515994a344be2a0558073c5293227
1 | /* |
2 | * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> |
3 | * Copyright (c) 2013 Paul B Mahol |
4 | * |
5 | * This file is part of FFmpeg. |
6 | * |
7 | * FFmpeg is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2 of the License, or (at your option) any later version. |
11 | * |
12 | * FFmpeg is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | * GNU General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU General Public License along |
18 | * with FFmpeg; if not, write to the Free Software |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | */ |
21 | |
22 | #include "libavutil/avassert.h" |
23 | #include "libavutil/eval.h" |
24 | #include "libavutil/imgutils.h" |
25 | #include "libavutil/pixdesc.h" |
26 | #include "libavutil/opt.h" |
27 | #include "avfilter.h" |
28 | #include "formats.h" |
29 | #include "internal.h" |
30 | #include "video.h" |
31 | |
32 | #define SUB_PIXEL_BITS 8 |
33 | #define SUB_PIXELS (1 << SUB_PIXEL_BITS) |
34 | #define COEFF_BITS 11 |
35 | |
36 | #define LINEAR 0 |
37 | #define CUBIC 1 |
38 | |
39 | typedef struct PerspectiveContext { |
40 | const AVClass *class; |
41 | char *expr_str[4][2]; |
42 | double ref[4][2]; |
43 | int32_t (*pv)[2]; |
44 | int32_t coeff[SUB_PIXELS][4]; |
45 | int interpolation; |
46 | int linesize[4]; |
47 | int height[4]; |
48 | int hsub, vsub; |
49 | int nb_planes; |
50 | int sense; |
51 | int eval_mode; |
52 | |
53 | int (*perspective)(AVFilterContext *ctx, |
54 | void *arg, int job, int nb_jobs); |
55 | } PerspectiveContext; |
56 | |
57 | #define OFFSET(x) offsetof(PerspectiveContext, x) |
58 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
59 | |
60 | enum PERSPECTIVESense { |
61 | PERSPECTIVE_SENSE_SOURCE = 0, ///< coordinates give locations in source of corners of destination. |
62 | PERSPECTIVE_SENSE_DESTINATION = 1, ///< coordinates give locations in destination of corners of source. |
63 | }; |
64 | |
65 | enum EvalMode { |
66 | EVAL_MODE_INIT, |
67 | EVAL_MODE_FRAME, |
68 | EVAL_MODE_NB |
69 | }; |
70 | |
71 | static const AVOption perspective_options[] = { |
72 | { "x0", "set top left x coordinate", OFFSET(expr_str[0][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, |
73 | { "y0", "set top left y coordinate", OFFSET(expr_str[0][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, |
74 | { "x1", "set top right x coordinate", OFFSET(expr_str[1][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS }, |
75 | { "y1", "set top right y coordinate", OFFSET(expr_str[1][1]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, |
76 | { "x2", "set bottom left x coordinate", OFFSET(expr_str[2][0]), AV_OPT_TYPE_STRING, {.str="0"}, 0, 0, FLAGS }, |
77 | { "y2", "set bottom left y coordinate", OFFSET(expr_str[2][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS }, |
78 | { "x3", "set bottom right x coordinate", OFFSET(expr_str[3][0]), AV_OPT_TYPE_STRING, {.str="W"}, 0, 0, FLAGS }, |
79 | { "y3", "set bottom right y coordinate", OFFSET(expr_str[3][1]), AV_OPT_TYPE_STRING, {.str="H"}, 0, 0, FLAGS }, |
80 | { "interpolation", "set interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=LINEAR}, 0, 1, FLAGS, "interpolation" }, |
81 | { "linear", "", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "interpolation" }, |
82 | { "cubic", "", 0, AV_OPT_TYPE_CONST, {.i64=CUBIC}, 0, 0, FLAGS, "interpolation" }, |
83 | { "sense", "specify the sense of the coordinates", OFFSET(sense), AV_OPT_TYPE_INT, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 1, FLAGS, "sense"}, |
84 | { "source", "specify locations in source to send to corners in destination", |
85 | 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_SOURCE}, 0, 0, FLAGS, "sense"}, |
86 | { "destination", "specify locations in destination to send corners of source", |
87 | 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE_SENSE_DESTINATION}, 0, 0, FLAGS, "sense"}, |
88 | { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" }, |
89 | { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" }, |
90 | { "frame", "eval expressions per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" }, |
91 | |
92 | { NULL } |
93 | }; |
94 | |
95 | AVFILTER_DEFINE_CLASS(perspective); |
96 | |
97 | static int query_formats(AVFilterContext *ctx) |
98 | { |
99 | static const enum AVPixelFormat pix_fmts[] = { |
100 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA420P, |
101 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P,AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P, |
102 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, |
103 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE |
104 | }; |
105 | |
106 | AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); |
107 | if (!fmts_list) |
108 | return AVERROR(ENOMEM); |
109 | return ff_set_common_formats(ctx, fmts_list); |
110 | } |
111 | |
112 | static inline double get_coeff(double d) |
113 | { |
114 | double coeff, A = -0.60; |
115 | |
116 | d = fabs(d); |
117 | |
118 | if (d < 1.0) |
119 | coeff = (1.0 - (A + 3.0) * d * d + (A + 2.0) * d * d * d); |
120 | else if (d < 2.0) |
121 | coeff = (-4.0 * A + 8.0 * A * d - 5.0 * A * d * d + A * d * d * d); |
122 | else |
123 | coeff = 0.0; |
124 | |
125 | return coeff; |
126 | } |
127 | |
128 | static const char *const var_names[] = { "W", "H", "in", "on", NULL }; |
129 | enum { VAR_W, VAR_H, VAR_IN, VAR_ON, VAR_VARS_NB }; |
130 | |
131 | static int calc_persp_luts(AVFilterContext *ctx, AVFilterLink *inlink) |
132 | { |
133 | PerspectiveContext *s = ctx->priv; |
134 | AVFilterLink *outlink = ctx->outputs[0]; |
135 | double (*ref)[2] = s->ref; |
136 | |
137 | double values[VAR_VARS_NB] = { [VAR_W] = inlink->w, [VAR_H] = inlink->h, |
138 | [VAR_IN] = inlink->frame_count_out + 1, |
139 | [VAR_ON] = outlink->frame_count_in + 1 }; |
140 | const int h = values[VAR_H]; |
141 | const int w = values[VAR_W]; |
142 | double x0, x1, x2, x3, x4, x5, x6, x7, x8, q; |
143 | double t0, t1, t2, t3; |
144 | int x, y, i, j, ret; |
145 | |
146 | for (i = 0; i < 4; i++) { |
147 | for (j = 0; j < 2; j++) { |
148 | if (!s->expr_str[i][j]) |
149 | return AVERROR(EINVAL); |
150 | ret = av_expr_parse_and_eval(&s->ref[i][j], s->expr_str[i][j], |
151 | var_names, &values[0], |
152 | NULL, NULL, NULL, NULL, |
153 | 0, 0, ctx); |
154 | if (ret < 0) |
155 | return ret; |
156 | } |
157 | } |
158 | |
159 | switch (s->sense) { |
160 | case PERSPECTIVE_SENSE_SOURCE: |
161 | x6 = ((ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) * |
162 | (ref[2][1] - ref[3][1]) - |
163 | ( ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) * |
164 | (ref[2][0] - ref[3][0])) * h; |
165 | x7 = ((ref[0][1] - ref[1][1] - ref[2][1] + ref[3][1]) * |
166 | (ref[1][0] - ref[3][0]) - |
167 | ( ref[0][0] - ref[1][0] - ref[2][0] + ref[3][0]) * |
168 | (ref[1][1] - ref[3][1])) * w; |
169 | q = ( ref[1][0] - ref[3][0]) * (ref[2][1] - ref[3][1]) - |
170 | ( ref[2][0] - ref[3][0]) * (ref[1][1] - ref[3][1]); |
171 | |
172 | x0 = q * (ref[1][0] - ref[0][0]) * h + x6 * ref[1][0]; |
173 | x1 = q * (ref[2][0] - ref[0][0]) * w + x7 * ref[2][0]; |
174 | x2 = q * ref[0][0] * w * h; |
175 | x3 = q * (ref[1][1] - ref[0][1]) * h + x6 * ref[1][1]; |
176 | x4 = q * (ref[2][1] - ref[0][1]) * w + x7 * ref[2][1]; |
177 | x5 = q * ref[0][1] * w * h; |
178 | x8 = q * w * h; |
179 | break; |
180 | case PERSPECTIVE_SENSE_DESTINATION: |
181 | t0 = ref[0][0] * (ref[3][1] - ref[1][1]) + |
182 | ref[1][0] * (ref[0][1] - ref[3][1]) + |
183 | ref[3][0] * (ref[1][1] - ref[0][1]); |
184 | t1 = ref[1][0] * (ref[2][1] - ref[3][1]) + |
185 | ref[2][0] * (ref[3][1] - ref[1][1]) + |
186 | ref[3][0] * (ref[1][1] - ref[2][1]); |
187 | t2 = ref[0][0] * (ref[3][1] - ref[2][1]) + |
188 | ref[2][0] * (ref[0][1] - ref[3][1]) + |
189 | ref[3][0] * (ref[2][1] - ref[0][1]); |
190 | t3 = ref[0][0] * (ref[1][1] - ref[2][1]) + |
191 | ref[1][0] * (ref[2][1] - ref[0][1]) + |
192 | ref[2][0] * (ref[0][1] - ref[1][1]); |
193 | |
194 | x0 = t0 * t1 * w * (ref[2][1] - ref[0][1]); |
195 | x1 = t0 * t1 * w * (ref[0][0] - ref[2][0]); |
196 | x2 = t0 * t1 * w * (ref[0][1] * ref[2][0] - ref[0][0] * ref[2][1]); |
197 | x3 = t1 * t2 * h * (ref[1][1] - ref[0][1]); |
198 | x4 = t1 * t2 * h * (ref[0][0] - ref[1][0]); |
199 | x5 = t1 * t2 * h * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]); |
200 | x6 = t1 * t2 * (ref[1][1] - ref[0][1]) + |
201 | t0 * t3 * (ref[2][1] - ref[3][1]); |
202 | x7 = t1 * t2 * (ref[0][0] - ref[1][0]) + |
203 | t0 * t3 * (ref[3][0] - ref[2][0]); |
204 | x8 = t1 * t2 * (ref[0][1] * ref[1][0] - ref[0][0] * ref[1][1]) + |
205 | t0 * t3 * (ref[2][0] * ref[3][1] - ref[2][1] * ref[3][0]); |
206 | break; |
207 | default: |
208 | av_assert0(0); |
209 | } |
210 | |
211 | for (y = 0; y < h; y++){ |
212 | for (x = 0; x < w; x++){ |
213 | int u, v; |
214 | |
215 | u = lrint(SUB_PIXELS * (x0 * x + x1 * y + x2) / |
216 | (x6 * x + x7 * y + x8)); |
217 | v = lrint(SUB_PIXELS * (x3 * x + x4 * y + x5) / |
218 | (x6 * x + x7 * y + x8)); |
219 | |
220 | s->pv[x + y * w][0] = u; |
221 | s->pv[x + y * w][1] = v; |
222 | } |
223 | } |
224 | |
225 | return 0; |
226 | } |
227 | |
228 | static int config_input(AVFilterLink *inlink) |
229 | { |
230 | AVFilterContext *ctx = inlink->dst; |
231 | PerspectiveContext *s = ctx->priv; |
232 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
233 | int h = inlink->h; |
234 | int w = inlink->w; |
235 | int i, j, ret; |
236 | s->hsub = desc->log2_chroma_w; |
237 | s->vsub = desc->log2_chroma_h; |
238 | s->nb_planes = av_pix_fmt_count_planes(inlink->format); |
239 | if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) |
240 | return ret; |
241 | |
242 | s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); |
243 | s->height[0] = s->height[3] = inlink->h; |
244 | |
245 | s->pv = av_realloc_f(s->pv, w * h, 2 * sizeof(*s->pv)); |
246 | if (!s->pv) |
247 | return AVERROR(ENOMEM); |
248 | |
249 | if (s->eval_mode == EVAL_MODE_INIT) { |
250 | if ((ret = calc_persp_luts(ctx, inlink)) < 0) { |
251 | return ret; |
252 | } |
253 | } |
254 | |
255 | for (i = 0; i < SUB_PIXELS; i++){ |
256 | double d = i / (double)SUB_PIXELS; |
257 | double temp[4]; |
258 | double sum = 0; |
259 | |
260 | for (j = 0; j < 4; j++) |
261 | temp[j] = get_coeff(j - d - 1); |
262 | |
263 | for (j = 0; j < 4; j++) |
264 | sum += temp[j]; |
265 | |
266 | for (j = 0; j < 4; j++) |
267 | s->coeff[i][j] = lrint((1 << COEFF_BITS) * temp[j] / sum); |
268 | } |
269 | |
270 | return 0; |
271 | } |
272 | |
273 | typedef struct ThreadData { |
274 | uint8_t *dst; |
275 | int dst_linesize; |
276 | uint8_t *src; |
277 | int src_linesize; |
278 | int w, h; |
279 | int hsub, vsub; |
280 | } ThreadData; |
281 | |
282 | static int resample_cubic(AVFilterContext *ctx, void *arg, |
283 | int job, int nb_jobs) |
284 | { |
285 | PerspectiveContext *s = ctx->priv; |
286 | ThreadData *td = arg; |
287 | uint8_t *dst = td->dst; |
288 | int dst_linesize = td->dst_linesize; |
289 | uint8_t *src = td->src; |
290 | int src_linesize = td->src_linesize; |
291 | int w = td->w; |
292 | int h = td->h; |
293 | int hsub = td->hsub; |
294 | int vsub = td->vsub; |
295 | int start = (h * job) / nb_jobs; |
296 | int end = (h * (job+1)) / nb_jobs; |
297 | const int linesize = s->linesize[0]; |
298 | int x, y; |
299 | |
300 | for (y = start; y < end; y++) { |
301 | int sy = y << vsub; |
302 | for (x = 0; x < w; x++) { |
303 | int u, v, subU, subV, sum, sx; |
304 | |
305 | sx = x << hsub; |
306 | u = s->pv[sx + sy * linesize][0] >> hsub; |
307 | v = s->pv[sx + sy * linesize][1] >> vsub; |
308 | subU = u & (SUB_PIXELS - 1); |
309 | subV = v & (SUB_PIXELS - 1); |
310 | u >>= SUB_PIXEL_BITS; |
311 | v >>= SUB_PIXEL_BITS; |
312 | |
313 | if (u > 0 && v > 0 && u < w - 2 && v < h - 2){ |
314 | const int index = u + v*src_linesize; |
315 | const int a = s->coeff[subU][0]; |
316 | const int b = s->coeff[subU][1]; |
317 | const int c = s->coeff[subU][2]; |
318 | const int d = s->coeff[subU][3]; |
319 | |
320 | sum = s->coeff[subV][0] * (a * src[index - 1 - src_linesize] + b * src[index - 0 - src_linesize] + |
321 | c * src[index + 1 - src_linesize] + d * src[index + 2 - src_linesize]) + |
322 | s->coeff[subV][1] * (a * src[index - 1 ] + b * src[index - 0 ] + |
323 | c * src[index + 1 ] + d * src[index + 2 ]) + |
324 | s->coeff[subV][2] * (a * src[index - 1 + src_linesize] + b * src[index - 0 + src_linesize] + |
325 | c * src[index + 1 + src_linesize] + d * src[index + 2 + src_linesize]) + |
326 | s->coeff[subV][3] * (a * src[index - 1 + 2 * src_linesize] + b * src[index - 0 + 2 * src_linesize] + |
327 | c * src[index + 1 + 2 * src_linesize] + d * src[index + 2 + 2 * src_linesize]); |
328 | } else { |
329 | int dx, dy; |
330 | |
331 | sum = 0; |
332 | |
333 | for (dy = 0; dy < 4; dy++) { |
334 | int iy = v + dy - 1; |
335 | |
336 | if (iy < 0) |
337 | iy = 0; |
338 | else if (iy >= h) |
339 | iy = h-1; |
340 | for (dx = 0; dx < 4; dx++) { |
341 | int ix = u + dx - 1; |
342 | |
343 | if (ix < 0) |
344 | ix = 0; |
345 | else if (ix >= w) |
346 | ix = w - 1; |
347 | |
348 | sum += s->coeff[subU][dx] * s->coeff[subV][dy] * src[ ix + iy * src_linesize]; |
349 | } |
350 | } |
351 | } |
352 | |
353 | sum = (sum + (1<<(COEFF_BITS * 2 - 1))) >> (COEFF_BITS * 2); |
354 | sum = av_clip_uint8(sum); |
355 | dst[x + y * dst_linesize] = sum; |
356 | } |
357 | } |
358 | return 0; |
359 | } |
360 | |
361 | static int resample_linear(AVFilterContext *ctx, void *arg, |
362 | int job, int nb_jobs) |
363 | { |
364 | PerspectiveContext *s = ctx->priv; |
365 | ThreadData *td = arg; |
366 | uint8_t *dst = td->dst; |
367 | int dst_linesize = td->dst_linesize; |
368 | uint8_t *src = td->src; |
369 | int src_linesize = td->src_linesize; |
370 | int w = td->w; |
371 | int h = td->h; |
372 | int hsub = td->hsub; |
373 | int vsub = td->vsub; |
374 | int start = (h * job) / nb_jobs; |
375 | int end = (h * (job+1)) / nb_jobs; |
376 | const int linesize = s->linesize[0]; |
377 | int x, y; |
378 | |
379 | for (y = start; y < end; y++){ |
380 | int sy = y << vsub; |
381 | for (x = 0; x < w; x++){ |
382 | int u, v, subU, subV, sum, sx, index, subUI, subVI; |
383 | |
384 | sx = x << hsub; |
385 | u = s->pv[sx + sy * linesize][0] >> hsub; |
386 | v = s->pv[sx + sy * linesize][1] >> vsub; |
387 | subU = u & (SUB_PIXELS - 1); |
388 | subV = v & (SUB_PIXELS - 1); |
389 | u >>= SUB_PIXEL_BITS; |
390 | v >>= SUB_PIXEL_BITS; |
391 | |
392 | index = u + v * src_linesize; |
393 | subUI = SUB_PIXELS - subU; |
394 | subVI = SUB_PIXELS - subV; |
395 | |
396 | if ((unsigned)u < (unsigned)(w - 1)){ |
397 | if((unsigned)v < (unsigned)(h - 1)){ |
398 | sum = subVI * (subUI * src[index] + subU * src[index + 1]) + |
399 | subV * (subUI * src[index + src_linesize] + subU * src[index + src_linesize + 1]); |
400 | sum = (sum + (1 << (SUB_PIXEL_BITS * 2 - 1)))>> (SUB_PIXEL_BITS * 2); |
401 | } else { |
402 | if (v < 0) |
403 | v = 0; |
404 | else |
405 | v = h - 1; |
406 | index = u + v * src_linesize; |
407 | sum = subUI * src[index] + subU * src[index + 1]; |
408 | sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS; |
409 | } |
410 | } else { |
411 | if (u < 0) |
412 | u = 0; |
413 | else |
414 | u = w - 1; |
415 | if ((unsigned)v < (unsigned)(h - 1)){ |
416 | index = u + v * src_linesize; |
417 | sum = subVI * src[index] + subV * src[index + src_linesize]; |
418 | sum = (sum + (1 << (SUB_PIXEL_BITS - 1))) >> SUB_PIXEL_BITS; |
419 | } else { |
420 | if (v < 0) |
421 | v = 0; |
422 | else |
423 | v = h - 1; |
424 | index = u + v * src_linesize; |
425 | sum = src[index]; |
426 | } |
427 | } |
428 | |
429 | sum = av_clip_uint8(sum); |
430 | dst[x + y * dst_linesize] = sum; |
431 | } |
432 | } |
433 | return 0; |
434 | } |
435 | |
436 | static av_cold int init(AVFilterContext *ctx) |
437 | { |
438 | PerspectiveContext *s = ctx->priv; |
439 | |
440 | switch (s->interpolation) { |
441 | case LINEAR: s->perspective = resample_linear; break; |
442 | case CUBIC: s->perspective = resample_cubic; break; |
443 | } |
444 | |
445 | return 0; |
446 | } |
447 | |
448 | static int filter_frame(AVFilterLink *inlink, AVFrame *frame) |
449 | { |
450 | AVFilterContext *ctx = inlink->dst; |
451 | AVFilterLink *outlink = ctx->outputs[0]; |
452 | PerspectiveContext *s = ctx->priv; |
453 | AVFrame *out; |
454 | int plane; |
455 | int ret; |
456 | |
457 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
458 | if (!out) { |
459 | av_frame_free(&frame); |
460 | return AVERROR(ENOMEM); |
461 | } |
462 | av_frame_copy_props(out, frame); |
463 | |
464 | if (s->eval_mode == EVAL_MODE_FRAME) { |
465 | if ((ret = calc_persp_luts(ctx, inlink)) < 0) { |
466 | av_frame_free(&out); |
467 | return ret; |
468 | } |
469 | } |
470 | |
471 | for (plane = 0; plane < s->nb_planes; plane++) { |
472 | int hsub = plane == 1 || plane == 2 ? s->hsub : 0; |
473 | int vsub = plane == 1 || plane == 2 ? s->vsub : 0; |
474 | ThreadData td = {.dst = out->data[plane], |
475 | .dst_linesize = out->linesize[plane], |
476 | .src = frame->data[plane], |
477 | .src_linesize = frame->linesize[plane], |
478 | .w = s->linesize[plane], |
479 | .h = s->height[plane], |
480 | .hsub = hsub, |
481 | .vsub = vsub }; |
482 | ctx->internal->execute(ctx, s->perspective, &td, NULL, FFMIN(td.h, ff_filter_get_nb_threads(ctx))); |
483 | } |
484 | |
485 | av_frame_free(&frame); |
486 | return ff_filter_frame(outlink, out); |
487 | } |
488 | |
489 | static av_cold void uninit(AVFilterContext *ctx) |
490 | { |
491 | PerspectiveContext *s = ctx->priv; |
492 | |
493 | av_freep(&s->pv); |
494 | } |
495 | |
496 | static const AVFilterPad perspective_inputs[] = { |
497 | { |
498 | .name = "default", |
499 | .type = AVMEDIA_TYPE_VIDEO, |
500 | .filter_frame = filter_frame, |
501 | .config_props = config_input, |
502 | }, |
503 | { NULL } |
504 | }; |
505 | |
506 | static const AVFilterPad perspective_outputs[] = { |
507 | { |
508 | .name = "default", |
509 | .type = AVMEDIA_TYPE_VIDEO, |
510 | }, |
511 | { NULL } |
512 | }; |
513 | |
514 | AVFilter ff_vf_perspective = { |
515 | .name = "perspective", |
516 | .description = NULL_IF_CONFIG_SMALL("Correct the perspective of video."), |
517 | .priv_size = sizeof(PerspectiveContext), |
518 | .init = init, |
519 | .uninit = uninit, |
520 | .query_formats = query_formats, |
521 | .inputs = perspective_inputs, |
522 | .outputs = perspective_outputs, |
523 | .priv_class = &perspective_class, |
524 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, |
525 | }; |
526 |