summaryrefslogtreecommitdiff
path: root/libavfilter/vf_owdenoise.c (plain)
blob: e0a953fba2cd9ae008b298f80d222134a6bc560f
1/*
2 * Copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (c) 2013 Clément Bœsch <u pkh me>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22/**
23 * @todo try to change to int
24 * @todo try lifting based implementation
25 * @todo optimize optimize optimize
26 * @todo hard thresholding
27 * @todo use QP to decide filter strength
28 * @todo wavelet normalization / least squares optimal signal vs. noise thresholds
29 */
30
31#include "libavutil/imgutils.h"
32#include "libavutil/opt.h"
33#include "libavutil/pixdesc.h"
34#include "avfilter.h"
35#include "internal.h"
36
37typedef struct {
38 const AVClass *class;
39 double luma_strength;
40 double chroma_strength;
41 int depth;
42 float *plane[16+1][4];
43 int linesize;
44 int hsub, vsub;
45 int pixel_depth;
46} OWDenoiseContext;
47
48#define OFFSET(x) offsetof(OWDenoiseContext, x)
49#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
50static const AVOption owdenoise_options[] = {
51 { "depth", "set depth", OFFSET(depth), AV_OPT_TYPE_INT, {.i64 = 8}, 8, 16, FLAGS },
52 { "luma_strength", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
53 { "ls", "set luma strength", OFFSET(luma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
54 { "chroma_strength", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
55 { "cs", "set chroma strength", OFFSET(chroma_strength), AV_OPT_TYPE_DOUBLE, {.dbl = 1.0}, 0, 1000, FLAGS },
56 { NULL }
57};
58
59AVFILTER_DEFINE_CLASS(owdenoise);
60
61DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
62 { 0, 48, 12, 60, 3, 51, 15, 63 },
63 { 32, 16, 44, 28, 35, 19, 47, 31 },
64 { 8, 56, 4, 52, 11, 59, 7, 55 },
65 { 40, 24, 36, 20, 43, 27, 39, 23 },
66 { 2, 50, 14, 62, 1, 49, 13, 61 },
67 { 34, 18, 46, 30, 33, 17, 45, 29 },
68 { 10, 58, 6, 54, 9, 57, 5, 53 },
69 { 42, 26, 38, 22, 41, 25, 37, 21 },
70};
71
72static const double coeff[2][5] = {
73 {
74 0.6029490182363579 * M_SQRT2,
75 0.2668641184428723 * M_SQRT2,
76 -0.07822326652898785 * M_SQRT2,
77 -0.01686411844287495 * M_SQRT2,
78 0.02674875741080976 * M_SQRT2,
79 },{
80 1.115087052456994 / M_SQRT2,
81 -0.5912717631142470 / M_SQRT2,
82 -0.05754352622849957 / M_SQRT2,
83 0.09127176311424948 / M_SQRT2,
84 }
85};
86
87static const double icoeff[2][5] = {
88 {
89 1.115087052456994 / M_SQRT2,
90 0.5912717631142470 / M_SQRT2,
91 -0.05754352622849957 / M_SQRT2,
92 -0.09127176311424948 / M_SQRT2,
93 },{
94 0.6029490182363579 * M_SQRT2,
95 -0.2668641184428723 * M_SQRT2,
96 -0.07822326652898785 * M_SQRT2,
97 0.01686411844287495 * M_SQRT2,
98 0.02674875741080976 * M_SQRT2,
99 }
100};
101
102
103static inline void decompose(float *dst_l, float *dst_h, const float *src,
104 int linesize, int w)
105{
106 int x, i;
107 for (x = 0; x < w; x++) {
108 double sum_l = src[x * linesize] * coeff[0][0];
109 double sum_h = src[x * linesize] * coeff[1][0];
110 for (i = 1; i <= 4; i++) {
111 const double s = src[avpriv_mirror(x - i, w - 1) * linesize]
112 + src[avpriv_mirror(x + i, w - 1) * linesize];
113
114 sum_l += coeff[0][i] * s;
115 sum_h += coeff[1][i] * s;
116 }
117 dst_l[x * linesize] = sum_l;
118 dst_h[x * linesize] = sum_h;
119 }
120}
121
122static inline void compose(float *dst, const float *src_l, const float *src_h,
123 int linesize, int w)
124{
125 int x, i;
126 for (x = 0; x < w; x++) {
127 double sum_l = src_l[x * linesize] * icoeff[0][0];
128 double sum_h = src_h[x * linesize] * icoeff[1][0];
129 for (i = 1; i <= 4; i++) {
130 const int x0 = avpriv_mirror(x - i, w - 1) * linesize;
131 const int x1 = avpriv_mirror(x + i, w - 1) * linesize;
132
133 sum_l += icoeff[0][i] * (src_l[x0] + src_l[x1]);
134 sum_h += icoeff[1][i] * (src_h[x0] + src_h[x1]);
135 }
136 dst[x * linesize] = (sum_l + sum_h) * 0.5;
137 }
138}
139
140static inline void decompose2D(float *dst_l, float *dst_h, const float *src,
141 int xlinesize, int ylinesize,
142 int step, int w, int h)
143{
144 int y, x;
145 for (y = 0; y < h; y++)
146 for (x = 0; x < step; x++)
147 decompose(dst_l + ylinesize*y + xlinesize*x,
148 dst_h + ylinesize*y + xlinesize*x,
149 src + ylinesize*y + xlinesize*x,
150 step * xlinesize, (w - x + step - 1) / step);
151}
152
153static inline void compose2D(float *dst, const float *src_l, const float *src_h,
154 int xlinesize, int ylinesize,
155 int step, int w, int h)
156{
157 int y, x;
158 for (y = 0; y < h; y++)
159 for (x = 0; x < step; x++)
160 compose(dst + ylinesize*y + xlinesize*x,
161 src_l + ylinesize*y + xlinesize*x,
162 src_h + ylinesize*y + xlinesize*x,
163 step * xlinesize, (w - x + step - 1) / step);
164}
165
166static void decompose2D2(float *dst[4], float *src, float *temp[2],
167 int linesize, int step, int w, int h)
168{
169 decompose2D(temp[0], temp[1], src, 1, linesize, step, w, h);
170 decompose2D( dst[0], dst[1], temp[0], linesize, 1, step, h, w);
171 decompose2D( dst[2], dst[3], temp[1], linesize, 1, step, h, w);
172}
173
174static void compose2D2(float *dst, float *src[4], float *temp[2],
175 int linesize, int step, int w, int h)
176{
177 compose2D(temp[0], src[0], src[1], linesize, 1, step, h, w);
178 compose2D(temp[1], src[2], src[3], linesize, 1, step, h, w);
179 compose2D(dst, temp[0], temp[1], 1, linesize, step, w, h);
180}
181
182static void filter(OWDenoiseContext *s,
183 uint8_t *dst, int dst_linesize,
184 const uint8_t *src, int src_linesize,
185 int width, int height, double strength)
186{
187 int x, y, i, j, depth = s->depth;
188
189 while (1<<depth > width || 1<<depth > height)
190 depth--;
191
192 if (s->pixel_depth <= 8) {
193 for (y = 0; y < height; y++)
194 for(x = 0; x < width; x++)
195 s->plane[0][0][y*s->linesize + x] = src[y*src_linesize + x];
196 } else {
197 const uint16_t *src16 = (const uint16_t *)src;
198
199 src_linesize /= 2;
200 for (y = 0; y < height; y++)
201 for(x = 0; x < width; x++)
202 s->plane[0][0][y*s->linesize + x] = src16[y*src_linesize + x];
203 }
204
205 for (i = 0; i < depth; i++)
206 decompose2D2(s->plane[i + 1], s->plane[i][0], s->plane[0] + 1, s->linesize, 1<<i, width, height);
207
208 for (i = 0; i < depth; i++) {
209 for (j = 1; j < 4; j++) {
210 for (y = 0; y < height; y++) {
211 for (x = 0; x < width; x++) {
212 double v = s->plane[i + 1][j][y*s->linesize + x];
213 if (v > strength) v -= strength;
214 else if (v < -strength) v += strength;
215 else v = 0;
216 s->plane[i + 1][j][x + y*s->linesize] = v;
217 }
218 }
219 }
220 }
221 for (i = depth-1; i >= 0; i--)
222 compose2D2(s->plane[i][0], s->plane[i + 1], s->plane[0] + 1, s->linesize, 1<<i, width, height);
223
224 if (s->pixel_depth <= 8) {
225 for (y = 0; y < height; y++) {
226 for (x = 0; x < width; x++) {
227 i = s->plane[0][0][y*s->linesize + x] + dither[x&7][y&7]*(1.0/64) + 1.0/128; // yes the rounding is insane but optimal :)
228 if ((unsigned)i > 255U) i = ~(i >> 31);
229 dst[y*dst_linesize + x] = i;
230 }
231 }
232 } else {
233 uint16_t *dst16 = (uint16_t *)dst;
234
235 dst_linesize /= 2;
236 for (y = 0; y < height; y++) {
237 for (x = 0; x < width; x++) {
238 i = s->plane[0][0][y*s->linesize + x];
239 dst16[y*dst_linesize + x] = i;
240 }
241 }
242 }
243}
244
245static int filter_frame(AVFilterLink *inlink, AVFrame *in)
246{
247 AVFilterContext *ctx = inlink->dst;
248 OWDenoiseContext *s = ctx->priv;
249 AVFilterLink *outlink = ctx->outputs[0];
250 AVFrame *out;
251 const int cw = AV_CEIL_RSHIFT(inlink->w, s->hsub);
252 const int ch = AV_CEIL_RSHIFT(inlink->h, s->vsub);
253
254 if (av_frame_is_writable(in)) {
255 out = in;
256
257 if (s->luma_strength > 0)
258 filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
259 if (s->chroma_strength > 0) {
260 filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
261 filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
262 }
263 } else {
264 out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
265 if (!out) {
266 av_frame_free(&in);
267 return AVERROR(ENOMEM);
268 }
269 av_frame_copy_props(out, in);
270
271 if (s->luma_strength > 0) {
272 filter(s, out->data[0], out->linesize[0], in->data[0], in->linesize[0], inlink->w, inlink->h, s->luma_strength);
273 } else {
274 av_image_copy_plane(out->data[0], out->linesize[0], in ->data[0], in ->linesize[0], inlink->w, inlink->h);
275 }
276 if (s->chroma_strength > 0) {
277 filter(s, out->data[1], out->linesize[1], in->data[1], in->linesize[1], cw, ch, s->chroma_strength);
278 filter(s, out->data[2], out->linesize[2], in->data[2], in->linesize[2], cw, ch, s->chroma_strength);
279 } else {
280 av_image_copy_plane(out->data[1], out->linesize[1], in ->data[1], in ->linesize[1], inlink->w, inlink->h);
281 av_image_copy_plane(out->data[2], out->linesize[2], in ->data[2], in ->linesize[2], inlink->w, inlink->h);
282 }
283
284 if (in->data[3])
285 av_image_copy_plane(out->data[3], out->linesize[3],
286 in ->data[3], in ->linesize[3],
287 inlink->w, inlink->h);
288 av_frame_free(&in);
289 }
290
291 return ff_filter_frame(outlink, out);
292}
293
294static int query_formats(AVFilterContext *ctx)
295{
296 static const enum AVPixelFormat pix_fmts[] = {
297 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P,
298 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P,
299 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
300 AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P,
301 AV_PIX_FMT_YUVA420P,
302 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
303 AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
304 AV_PIX_FMT_YUV440P10,
305 AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV420P12,
306 AV_PIX_FMT_YUV440P12,
307 AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
308 AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
309 AV_PIX_FMT_NONE
310 };
311 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
312 if (!fmts_list)
313 return AVERROR(ENOMEM);
314 return ff_set_common_formats(ctx, fmts_list);
315}
316
317static int config_input(AVFilterLink *inlink)
318{
319 int i, j;
320 OWDenoiseContext *s = inlink->dst->priv;
321 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
322 const int h = FFALIGN(inlink->h, 16);
323
324 s->hsub = desc->log2_chroma_w;
325 s->vsub = desc->log2_chroma_h;
326 s->pixel_depth = desc->comp[0].depth;
327
328 s->linesize = FFALIGN(inlink->w, 16);
329 for (j = 0; j < 4; j++) {
330 for (i = 0; i <= s->depth; i++) {
331 s->plane[i][j] = av_malloc_array(s->linesize, h * sizeof(s->plane[0][0][0]));
332 if (!s->plane[i][j])
333 return AVERROR(ENOMEM);
334 }
335 }
336 return 0;
337}
338
339static av_cold void uninit(AVFilterContext *ctx)
340{
341 int i, j;
342 OWDenoiseContext *s = ctx->priv;
343
344 for (j = 0; j < 4; j++)
345 for (i = 0; i <= s->depth; i++)
346 av_freep(&s->plane[i][j]);
347}
348
349static const AVFilterPad owdenoise_inputs[] = {
350 {
351 .name = "default",
352 .type = AVMEDIA_TYPE_VIDEO,
353 .filter_frame = filter_frame,
354 .config_props = config_input,
355 },
356 { NULL }
357};
358
359static const AVFilterPad owdenoise_outputs[] = {
360 {
361 .name = "default",
362 .type = AVMEDIA_TYPE_VIDEO,
363 },
364 { NULL }
365};
366
367AVFilter ff_vf_owdenoise = {
368 .name = "owdenoise",
369 .description = NULL_IF_CONFIG_SMALL("Denoise using wavelets."),
370 .priv_size = sizeof(OWDenoiseContext),
371 .uninit = uninit,
372 .query_formats = query_formats,
373 .inputs = owdenoise_inputs,
374 .outputs = owdenoise_outputs,
375 .priv_class = &owdenoise_class,
376 .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
377};
378