blob: 85b10531b6c791aba971350cb96556be94e6ec58
1 | /* |
2 | * Copyright (c) 2016 Paul B Mahol |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | #include "libavutil/attributes.h" |
22 | #include "libavutil/common.h" |
23 | #include "libavutil/eval.h" |
24 | #include "libavutil/opt.h" |
25 | #include "libavutil/pixdesc.h" |
26 | #include "avfilter.h" |
27 | #include "drawutils.h" |
28 | #include "formats.h" |
29 | #include "internal.h" |
30 | #include "video.h" |
31 | #include "framesync.h" |
32 | |
33 | static const char *const var_names[] = { |
34 | "w", ///< width of the input video |
35 | "h", ///< height of the input video |
36 | "x", ///< input value for the pixel from input #1 |
37 | "y", ///< input value for the pixel from input #2 |
38 | "bdx", ///< input #1 video bitdepth |
39 | "bdy", ///< input #2 video bitdepth |
40 | NULL |
41 | }; |
42 | |
43 | enum var_name { |
44 | VAR_W, |
45 | VAR_H, |
46 | VAR_X, |
47 | VAR_Y, |
48 | VAR_BITDEPTHX, |
49 | VAR_BITDEPTHY, |
50 | VAR_VARS_NB |
51 | }; |
52 | |
53 | typedef struct LUT2Context { |
54 | const AVClass *class; |
55 | |
56 | char *comp_expr_str[4]; |
57 | |
58 | AVExpr *comp_expr[4]; |
59 | double var_values[VAR_VARS_NB]; |
60 | uint16_t *lut[4]; ///< lookup table for each component |
61 | int width[4], height[4]; |
62 | int nb_planes; |
63 | int depth, depthx, depthy; |
64 | |
65 | void (*lut2)(struct LUT2Context *s, AVFrame *dst, AVFrame *srcx, AVFrame *srcy); |
66 | |
67 | FFFrameSync fs; |
68 | } LUT2Context; |
69 | |
70 | #define OFFSET(x) offsetof(LUT2Context, x) |
71 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
72 | |
73 | static const AVOption lut2_options[] = { |
74 | { "c0", "set component #0 expression", OFFSET(comp_expr_str[0]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS }, |
75 | { "c1", "set component #1 expression", OFFSET(comp_expr_str[1]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS }, |
76 | { "c2", "set component #2 expression", OFFSET(comp_expr_str[2]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS }, |
77 | { "c3", "set component #3 expression", OFFSET(comp_expr_str[3]), AV_OPT_TYPE_STRING, { .str = "x" }, .flags = FLAGS }, |
78 | { NULL } |
79 | }; |
80 | |
81 | static av_cold void uninit(AVFilterContext *ctx) |
82 | { |
83 | LUT2Context *s = ctx->priv; |
84 | int i; |
85 | |
86 | for (i = 0; i < 4; i++) { |
87 | av_expr_free(s->comp_expr[i]); |
88 | s->comp_expr[i] = NULL; |
89 | av_freep(&s->comp_expr_str[i]); |
90 | av_freep(&s->lut[i]); |
91 | } |
92 | } |
93 | |
94 | static int query_formats(AVFilterContext *ctx) |
95 | { |
96 | static const enum AVPixelFormat pix_fmts[] = { |
97 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV440P, |
98 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, |
99 | AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUV420P, |
100 | AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, |
101 | AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, |
102 | AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9, |
103 | AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, |
104 | AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV440P12, |
105 | AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9, |
106 | AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10, |
107 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, |
108 | AV_PIX_FMT_GBRP12, |
109 | AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP12, |
110 | AV_PIX_FMT_GRAY8, |
111 | AV_PIX_FMT_NONE |
112 | }; |
113 | |
114 | return ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); |
115 | } |
116 | |
117 | static int config_inputx(AVFilterLink *inlink) |
118 | { |
119 | AVFilterContext *ctx = inlink->dst; |
120 | LUT2Context *s = ctx->priv; |
121 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
122 | int hsub = desc->log2_chroma_w; |
123 | int vsub = desc->log2_chroma_h; |
124 | |
125 | s->nb_planes = av_pix_fmt_count_planes(inlink->format); |
126 | s->height[1] = s->height[2] = AV_CEIL_RSHIFT(inlink->h, vsub); |
127 | s->height[0] = s->height[3] = inlink->h; |
128 | s->width[1] = s->width[2] = AV_CEIL_RSHIFT(inlink->w, hsub); |
129 | s->width[0] = s->width[3] = inlink->w; |
130 | |
131 | s->var_values[VAR_W] = inlink->w; |
132 | s->var_values[VAR_H] = inlink->h; |
133 | s->depthx = desc->comp[0].depth; |
134 | s->var_values[VAR_BITDEPTHX] = s->depthx; |
135 | |
136 | return 0; |
137 | } |
138 | |
139 | static int config_inputy(AVFilterLink *inlink) |
140 | { |
141 | AVFilterContext *ctx = inlink->dst; |
142 | LUT2Context *s = ctx->priv; |
143 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
144 | |
145 | s->depthy = desc->comp[0].depth; |
146 | s->var_values[VAR_BITDEPTHY] = s->depthy; |
147 | |
148 | return 0; |
149 | } |
150 | |
151 | static void lut2_8bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy) |
152 | { |
153 | int p, y, x; |
154 | |
155 | for (p = 0; p < s->nb_planes; p++) { |
156 | const uint16_t *lut = s->lut[p]; |
157 | const uint8_t *srcxx, *srcyy; |
158 | uint8_t *dst; |
159 | |
160 | dst = out->data[p]; |
161 | srcxx = srcx->data[p]; |
162 | srcyy = srcy->data[p]; |
163 | |
164 | for (y = 0; y < s->height[p]; y++) { |
165 | for (x = 0; x < s->width[p]; x++) { |
166 | dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]]; |
167 | } |
168 | |
169 | dst += out->linesize[p]; |
170 | srcxx += srcx->linesize[p]; |
171 | srcyy += srcy->linesize[p]; |
172 | } |
173 | } |
174 | } |
175 | |
176 | static void lut2_16bit(struct LUT2Context *s, AVFrame *out, AVFrame *srcx, AVFrame *srcy) |
177 | { |
178 | int p, y, x; |
179 | |
180 | for (p = 0; p < s->nb_planes; p++) { |
181 | const uint16_t *lut = s->lut[p]; |
182 | const uint16_t *srcxx, *srcyy; |
183 | uint16_t *dst; |
184 | |
185 | dst = (uint16_t *)out->data[p]; |
186 | srcxx = (uint16_t *)srcx->data[p]; |
187 | srcyy = (uint16_t *)srcy->data[p]; |
188 | |
189 | for (y = 0; y < s->height[p]; y++) { |
190 | for (x = 0; x < s->width[p]; x++) { |
191 | dst[x] = lut[(srcyy[x] << s->depthx) | srcxx[x]]; |
192 | } |
193 | |
194 | dst += out->linesize[p] / 2; |
195 | srcxx += srcx->linesize[p] / 2; |
196 | srcyy += srcy->linesize[p] / 2; |
197 | } |
198 | } |
199 | } |
200 | |
201 | static int process_frame(FFFrameSync *fs) |
202 | { |
203 | AVFilterContext *ctx = fs->parent; |
204 | LUT2Context *s = fs->opaque; |
205 | AVFilterLink *outlink = ctx->outputs[0]; |
206 | AVFrame *out, *srcx, *srcy; |
207 | int ret; |
208 | |
209 | if ((ret = ff_framesync_get_frame(&s->fs, 0, &srcx, 0)) < 0 || |
210 | (ret = ff_framesync_get_frame(&s->fs, 1, &srcy, 0)) < 0) |
211 | return ret; |
212 | |
213 | if (ctx->is_disabled) { |
214 | out = av_frame_clone(srcx); |
215 | if (!out) |
216 | return AVERROR(ENOMEM); |
217 | } else { |
218 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
219 | if (!out) |
220 | return AVERROR(ENOMEM); |
221 | av_frame_copy_props(out, srcx); |
222 | |
223 | s->lut2(s, out, srcx, srcy); |
224 | } |
225 | |
226 | out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base); |
227 | |
228 | return ff_filter_frame(outlink, out); |
229 | } |
230 | |
231 | static int config_output(AVFilterLink *outlink) |
232 | { |
233 | AVFilterContext *ctx = outlink->src; |
234 | LUT2Context *s = ctx->priv; |
235 | AVFilterLink *srcx = ctx->inputs[0]; |
236 | AVFilterLink *srcy = ctx->inputs[1]; |
237 | FFFrameSyncIn *in; |
238 | int p, ret; |
239 | |
240 | s->depth = s->depthx + s->depthy; |
241 | |
242 | if (srcx->format != srcy->format) { |
243 | av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n"); |
244 | return AVERROR(EINVAL); |
245 | } |
246 | if (srcx->w != srcy->w || |
247 | srcx->h != srcy->h || |
248 | srcx->sample_aspect_ratio.num != srcy->sample_aspect_ratio.num || |
249 | srcx->sample_aspect_ratio.den != srcy->sample_aspect_ratio.den) { |
250 | av_log(ctx, AV_LOG_ERROR, "First input link %s parameters " |
251 | "(size %dx%d, SAR %d:%d) do not match the corresponding " |
252 | "second input link %s parameters (%dx%d, SAR %d:%d)\n", |
253 | ctx->input_pads[0].name, srcx->w, srcx->h, |
254 | srcx->sample_aspect_ratio.num, |
255 | srcx->sample_aspect_ratio.den, |
256 | ctx->input_pads[1].name, |
257 | srcy->w, srcy->h, |
258 | srcy->sample_aspect_ratio.num, |
259 | srcy->sample_aspect_ratio.den); |
260 | return AVERROR(EINVAL); |
261 | } |
262 | |
263 | outlink->w = srcx->w; |
264 | outlink->h = srcx->h; |
265 | outlink->time_base = srcx->time_base; |
266 | outlink->sample_aspect_ratio = srcx->sample_aspect_ratio; |
267 | outlink->frame_rate = srcx->frame_rate; |
268 | |
269 | if ((ret = ff_framesync_init(&s->fs, ctx, 2)) < 0) |
270 | return ret; |
271 | |
272 | in = s->fs.in; |
273 | in[0].time_base = srcx->time_base; |
274 | in[1].time_base = srcy->time_base; |
275 | in[0].sync = 1; |
276 | in[0].before = EXT_STOP; |
277 | in[0].after = EXT_INFINITY; |
278 | in[1].sync = 1; |
279 | in[1].before = EXT_STOP; |
280 | in[1].after = EXT_INFINITY; |
281 | s->fs.opaque = s; |
282 | s->fs.on_event = process_frame; |
283 | |
284 | s->lut2 = s->depth > 16 ? lut2_16bit : lut2_8bit; |
285 | |
286 | for (p = 0; p < s->nb_planes; p++) { |
287 | s->lut[p] = av_malloc_array(1 << s->depth, sizeof(uint16_t)); |
288 | if (!s->lut[p]) |
289 | return AVERROR(ENOMEM); |
290 | } |
291 | |
292 | for (p = 0; p < s->nb_planes; p++) { |
293 | double res; |
294 | int x, y; |
295 | |
296 | /* create the parsed expression */ |
297 | av_expr_free(s->comp_expr[p]); |
298 | s->comp_expr[p] = NULL; |
299 | ret = av_expr_parse(&s->comp_expr[p], s->comp_expr_str[p], |
300 | var_names, NULL, NULL, NULL, NULL, 0, ctx); |
301 | if (ret < 0) { |
302 | av_log(ctx, AV_LOG_ERROR, |
303 | "Error when parsing the expression '%s' for the component %d.\n", |
304 | s->comp_expr_str[p], p); |
305 | return AVERROR(EINVAL); |
306 | } |
307 | |
308 | /* compute the lut */ |
309 | for (y = 0; y < (1 << s->depthx); y++) { |
310 | s->var_values[VAR_Y] = y; |
311 | for (x = 0; x < (1 << s->depthx); x++) { |
312 | s->var_values[VAR_X] = x; |
313 | res = av_expr_eval(s->comp_expr[p], s->var_values, s); |
314 | if (isnan(res)) { |
315 | av_log(ctx, AV_LOG_ERROR, |
316 | "Error when evaluating the expression '%s' for the values %d and %d for the component %d.\n", |
317 | s->comp_expr_str[p], x, y, p); |
318 | return AVERROR(EINVAL); |
319 | } |
320 | |
321 | s->lut[p][(y << s->depthx) + x] = res; |
322 | } |
323 | } |
324 | } |
325 | |
326 | return ff_framesync_configure(&s->fs); |
327 | } |
328 | |
329 | static int filter_frame(AVFilterLink *inlink, AVFrame *buf) |
330 | { |
331 | LUT2Context *s = inlink->dst->priv; |
332 | return ff_framesync_filter_frame(&s->fs, inlink, buf); |
333 | } |
334 | |
335 | static int request_frame(AVFilterLink *outlink) |
336 | { |
337 | LUT2Context *s = outlink->src->priv; |
338 | return ff_framesync_request_frame(&s->fs, outlink); |
339 | } |
340 | |
341 | static const AVFilterPad inputs[] = { |
342 | { |
343 | .name = "srcx", |
344 | .type = AVMEDIA_TYPE_VIDEO, |
345 | .filter_frame = filter_frame, |
346 | .config_props = config_inputx, |
347 | }, |
348 | { |
349 | .name = "srcy", |
350 | .type = AVMEDIA_TYPE_VIDEO, |
351 | .filter_frame = filter_frame, |
352 | .config_props = config_inputy, |
353 | }, |
354 | { NULL } |
355 | }; |
356 | |
357 | static const AVFilterPad outputs[] = { |
358 | { |
359 | .name = "default", |
360 | .type = AVMEDIA_TYPE_VIDEO, |
361 | .config_props = config_output, |
362 | .request_frame = request_frame, |
363 | }, |
364 | { NULL } |
365 | }; |
366 | |
367 | AVFILTER_DEFINE_CLASS(lut2); |
368 | |
369 | AVFilter ff_vf_lut2 = { |
370 | .name = "lut2", |
371 | .description = NULL_IF_CONFIG_SMALL("Compute and apply a lookup table from two video inputs."), |
372 | .priv_size = sizeof(LUT2Context), |
373 | .priv_class = &lut2_class, |
374 | .uninit = uninit, |
375 | .query_formats = query_formats, |
376 | .inputs = inputs, |
377 | .outputs = outputs, |
378 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, |
379 | }; |
380 |