summaryrefslogtreecommitdiff
path: root/libavfilter/af_stereotools.c (plain)
blob: 8ab184df11c01715881461777375fecba667fe2a
1/*
2 * Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "libavutil/channel_layout.h"
22#include "libavutil/opt.h"
23#include "avfilter.h"
24#include "audio.h"
25#include "formats.h"
26
27typedef struct StereoToolsContext {
28 const AVClass *class;
29
30 int softclip;
31 int mute_l;
32 int mute_r;
33 int phase_l;
34 int phase_r;
35 int mode;
36 double slev;
37 double sbal;
38 double mlev;
39 double mpan;
40 double phase;
41 double base;
42 double delay;
43 double balance_in;
44 double balance_out;
45 double phase_sin_coef;
46 double phase_cos_coef;
47 double sc_level;
48 double inv_atan_shape;
49 double level_in;
50 double level_out;
51
52 double *buffer;
53 int length;
54 int pos;
55} StereoToolsContext;
56
57#define OFFSET(x) offsetof(StereoToolsContext, x)
58#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
59
60static const AVOption stereotools_options[] = {
61 { "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
62 { "level_out", "set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
63 { "balance_in", "set balance in", OFFSET(balance_in), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
64 { "balance_out", "set balance out", OFFSET(balance_out), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
65 { "softclip", "enable softclip", OFFSET(softclip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
66 { "mutel", "mute L", OFFSET(mute_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
67 { "muter", "mute R", OFFSET(mute_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
68 { "phasel", "phase L", OFFSET(phase_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
69 { "phaser", "phase R", OFFSET(phase_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
70 { "mode", "set stereo mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 6, A, "mode" },
71 { "lr>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
72 { "lr>ms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
73 { "ms>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "mode" },
74 { "lr>ll", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, A, "mode" },
75 { "lr>rr", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, A, "mode" },
76 { "lr>l+r", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, A, "mode" },
77 { "lr>rl", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, A, "mode" },
78 { "slev", "set side level", OFFSET(slev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
79 { "sbal", "set side balance", OFFSET(sbal), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
80 { "mlev", "set middle level", OFFSET(mlev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
81 { "mpan", "set middle pan", OFFSET(mpan), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
82 { "base", "set stereo base", OFFSET(base), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
83 { "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -20, 20, A },
84 { "sclevel", "set S/C level", OFFSET(sc_level), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 100, A },
85 { "phase", "set stereo phase", OFFSET(phase), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 360, A },
86 { NULL }
87};
88
89AVFILTER_DEFINE_CLASS(stereotools);
90
91static int query_formats(AVFilterContext *ctx)
92{
93 AVFilterFormats *formats = NULL;
94 AVFilterChannelLayouts *layout = NULL;
95 int ret;
96
97 if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
98 (ret = ff_set_common_formats (ctx , formats )) < 0 ||
99 (ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
100 (ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
101 return ret;
102
103 formats = ff_all_samplerates();
104 return ff_set_common_samplerates(ctx, formats);
105}
106
107static int config_input(AVFilterLink *inlink)
108{
109 AVFilterContext *ctx = inlink->dst;
110 StereoToolsContext *s = ctx->priv;
111
112 s->length = 2 * inlink->sample_rate * 0.05;
113 if (s->length <= 1 || s->length & 1) {
114 av_log(ctx, AV_LOG_ERROR, "sample rate is too small\n");
115 return AVERROR(EINVAL);
116 }
117 s->buffer = av_calloc(s->length, sizeof(*s->buffer));
118 if (!s->buffer)
119 return AVERROR(ENOMEM);
120
121 s->inv_atan_shape = 1.0 / atan(s->sc_level);
122 s->phase_cos_coef = cos(s->phase / 180 * M_PI);
123 s->phase_sin_coef = sin(s->phase / 180 * M_PI);
124
125 return 0;
126}
127
128static int filter_frame(AVFilterLink *inlink, AVFrame *in)
129{
130 AVFilterContext *ctx = inlink->dst;
131 AVFilterLink *outlink = ctx->outputs[0];
132 StereoToolsContext *s = ctx->priv;
133 const double *src = (const double *)in->data[0];
134 const double sb = s->base < 0 ? s->base * 0.5 : s->base;
135 const double sbal = 1 + s->sbal;
136 const double mpan = 1 + s->mpan;
137 const double slev = s->slev;
138 const double mlev = s->mlev;
139 const double balance_in = s->balance_in;
140 const double balance_out = s->balance_out;
141 const double level_in = s->level_in;
142 const double level_out = s->level_out;
143 const double sc_level = s->sc_level;
144 const double delay = s->delay;
145 const int length = s->length;
146 const int mute_l = s->mute_l;
147 const int mute_r = s->mute_r;
148 const int phase_l = s->phase_l;
149 const int phase_r = s->phase_r;
150 double *buffer = s->buffer;
151 AVFrame *out;
152 double *dst;
153 int nbuf = inlink->sample_rate * (fabs(delay) / 1000.);
154 int n;
155
156 nbuf -= nbuf % 2;
157 if (av_frame_is_writable(in)) {
158 out = in;
159 } else {
160 out = ff_get_audio_buffer(inlink, in->nb_samples);
161 if (!out) {
162 av_frame_free(&in);
163 return AVERROR(ENOMEM);
164 }
165 av_frame_copy_props(out, in);
166 }
167 dst = (double *)out->data[0];
168
169 for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
170 double L = src[0], R = src[1], l, r, m, S;
171
172 L *= level_in;
173 R *= level_in;
174
175 L *= 1. - FFMAX(0., balance_in);
176 R *= 1. + FFMIN(0., balance_in);
177
178 if (s->softclip) {
179 R = s->inv_atan_shape * atan(R * sc_level);
180 L = s->inv_atan_shape * atan(L * sc_level);
181 }
182
183 switch (s->mode) {
184 case 0:
185 m = (L + R) * 0.5;
186 S = (L - R) * 0.5;
187 l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
188 r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
189 L = l;
190 R = r;
191 break;
192 case 1:
193 l = L * FFMIN(1., 2. - sbal);
194 r = R * FFMIN(1., sbal);
195 L = 0.5 * (l + r) * mlev;
196 R = 0.5 * (l - r) * slev;
197 break;
198 case 2:
199 l = L * mlev * FFMIN(1., 2. - mpan) + R * slev * FFMIN(1., 2. - sbal);
200 r = L * mlev * FFMIN(1., mpan) - R * slev * FFMIN(1., sbal);
201 L = l;
202 R = r;
203 break;
204 case 3:
205 R = L;
206 break;
207 case 4:
208 L = R;
209 break;
210 case 5:
211 L = (L + R) / 2;
212 R = L;
213 break;
214 case 6:
215 l = L;
216 L = R;
217 R = l;
218 m = (L + R) * 0.5;
219 S = (L - R) * 0.5;
220 l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
221 r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
222 L = l;
223 R = r;
224 break;
225 }
226
227 L *= 1. - mute_l;
228 R *= 1. - mute_r;
229
230 L *= (2. * (1. - phase_l)) - 1.;
231 R *= (2. * (1. - phase_r)) - 1.;
232
233 buffer[s->pos ] = L;
234 buffer[s->pos+1] = R;
235
236 if (delay > 0.) {
237 R = buffer[(s->pos - (int)nbuf + 1 + length) % length];
238 } else if (delay < 0.) {
239 L = buffer[(s->pos - (int)nbuf + length) % length];
240 }
241
242 l = L + sb * L - sb * R;
243 r = R + sb * R - sb * L;
244
245 L = l;
246 R = r;
247
248 l = L * s->phase_cos_coef - R * s->phase_sin_coef;
249 r = L * s->phase_sin_coef + R * s->phase_cos_coef;
250
251 L = l;
252 R = r;
253
254 s->pos = (s->pos + 2) % s->length;
255
256 L *= 1. - FFMAX(0., balance_out);
257 R *= 1. + FFMIN(0., balance_out);
258
259 L *= level_out;
260 R *= level_out;
261
262 dst[0] = L;
263 dst[1] = R;
264 }
265
266 if (out != in)
267 av_frame_free(&in);
268 return ff_filter_frame(outlink, out);
269}
270
271static av_cold void uninit(AVFilterContext *ctx)
272{
273 StereoToolsContext *s = ctx->priv;
274
275 av_freep(&s->buffer);
276}
277
278static const AVFilterPad inputs[] = {
279 {
280 .name = "default",
281 .type = AVMEDIA_TYPE_AUDIO,
282 .filter_frame = filter_frame,
283 .config_props = config_input,
284 },
285 { NULL }
286};
287
288static const AVFilterPad outputs[] = {
289 {
290 .name = "default",
291 .type = AVMEDIA_TYPE_AUDIO,
292 },
293 { NULL }
294};
295
296AVFilter ff_af_stereotools = {
297 .name = "stereotools",
298 .description = NULL_IF_CONFIG_SMALL("Apply various stereo tools."),
299 .query_formats = query_formats,
300 .priv_size = sizeof(StereoToolsContext),
301 .priv_class = &stereotools_class,
302 .uninit = uninit,
303 .inputs = inputs,
304 .outputs = outputs,
305};
306