blob: aadc5c1c2a07af2eb9d9536545c113f7e86ff141
1 | /* |
2 | * Copyright (c) 2012 Stefano Sabatini |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | /** |
22 | * @file |
23 | * audio to video multimedia filter |
24 | */ |
25 | |
26 | #include "libavutil/avassert.h" |
27 | #include "libavutil/avstring.h" |
28 | #include "libavutil/channel_layout.h" |
29 | #include "libavutil/opt.h" |
30 | #include "libavutil/parseutils.h" |
31 | #include "avfilter.h" |
32 | #include "formats.h" |
33 | #include "audio.h" |
34 | #include "video.h" |
35 | #include "internal.h" |
36 | |
37 | enum ShowWavesMode { |
38 | MODE_POINT, |
39 | MODE_LINE, |
40 | MODE_P2P, |
41 | MODE_CENTERED_LINE, |
42 | MODE_NB, |
43 | }; |
44 | |
45 | enum ShowWavesScale { |
46 | SCALE_LIN, |
47 | SCALE_LOG, |
48 | SCALE_SQRT, |
49 | SCALE_CBRT, |
50 | SCALE_NB, |
51 | }; |
52 | |
53 | struct frame_node { |
54 | AVFrame *frame; |
55 | struct frame_node *next; |
56 | }; |
57 | |
58 | typedef struct { |
59 | const AVClass *class; |
60 | int w, h; |
61 | AVRational rate; |
62 | char *colors; |
63 | int buf_idx; |
64 | int16_t *buf_idy; /* y coordinate of previous sample for each channel */ |
65 | AVFrame *outpicref; |
66 | int n; |
67 | int pixstep; |
68 | int sample_count_mod; |
69 | int mode; ///< ShowWavesMode |
70 | int scale; ///< ShowWavesScale |
71 | int split_channels; |
72 | uint8_t *fg; |
73 | |
74 | int (*get_h)(int16_t sample, int height); |
75 | void (*draw_sample)(uint8_t *buf, int height, int linesize, |
76 | int16_t *prev_y, const uint8_t color[4], int h); |
77 | |
78 | /* single picture */ |
79 | int single_pic; |
80 | struct frame_node *audio_frames; |
81 | struct frame_node *last_frame; |
82 | int64_t total_samples; |
83 | int64_t *sum; /* abs sum of the samples per channel */ |
84 | } ShowWavesContext; |
85 | |
86 | #define OFFSET(x) offsetof(ShowWavesContext, x) |
87 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
88 | |
89 | static const AVOption showwaves_options[] = { |
90 | { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
91 | { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
92 | { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"}, |
93 | { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"}, |
94 | { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"}, |
95 | { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"}, |
96 | { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"}, |
97 | { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS }, |
98 | { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS }, |
99 | { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS }, |
100 | { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS }, |
101 | { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS }, |
102 | { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" }, |
103 | { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"}, |
104 | { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"}, |
105 | { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"}, |
106 | { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"}, |
107 | { NULL } |
108 | }; |
109 | |
110 | AVFILTER_DEFINE_CLASS(showwaves); |
111 | |
112 | static av_cold void uninit(AVFilterContext *ctx) |
113 | { |
114 | ShowWavesContext *showwaves = ctx->priv; |
115 | |
116 | av_frame_free(&showwaves->outpicref); |
117 | av_freep(&showwaves->buf_idy); |
118 | av_freep(&showwaves->fg); |
119 | |
120 | if (showwaves->single_pic) { |
121 | struct frame_node *node = showwaves->audio_frames; |
122 | while (node) { |
123 | struct frame_node *tmp = node; |
124 | |
125 | node = node->next; |
126 | av_frame_free(&tmp->frame); |
127 | av_freep(&tmp); |
128 | } |
129 | av_freep(&showwaves->sum); |
130 | showwaves->last_frame = NULL; |
131 | } |
132 | } |
133 | |
134 | static int query_formats(AVFilterContext *ctx) |
135 | { |
136 | AVFilterFormats *formats = NULL; |
137 | AVFilterChannelLayouts *layouts = NULL; |
138 | AVFilterLink *inlink = ctx->inputs[0]; |
139 | AVFilterLink *outlink = ctx->outputs[0]; |
140 | static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE }; |
141 | static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; |
142 | int ret; |
143 | |
144 | /* set input audio formats */ |
145 | formats = ff_make_format_list(sample_fmts); |
146 | if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0) |
147 | return ret; |
148 | |
149 | layouts = ff_all_channel_layouts(); |
150 | if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0) |
151 | return ret; |
152 | |
153 | formats = ff_all_samplerates(); |
154 | if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0) |
155 | return ret; |
156 | |
157 | /* set output video format */ |
158 | formats = ff_make_format_list(pix_fmts); |
159 | if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0) |
160 | return ret; |
161 | |
162 | return 0; |
163 | } |
164 | |
165 | static int get_lin_h(int16_t sample, int height) |
166 | { |
167 | return height/2 - av_rescale(sample, height/2, INT16_MAX); |
168 | } |
169 | |
170 | static int get_lin_h2(int16_t sample, int height) |
171 | { |
172 | return av_rescale(FFABS(sample), height, INT16_MAX); |
173 | } |
174 | |
175 | static int get_log_h(int16_t sample, int height) |
176 | { |
177 | return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX)); |
178 | } |
179 | |
180 | static int get_log_h2(int16_t sample, int height) |
181 | { |
182 | return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX); |
183 | } |
184 | |
185 | static int get_sqrt_h(int16_t sample, int height) |
186 | { |
187 | return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX)); |
188 | } |
189 | |
190 | static int get_sqrt_h2(int16_t sample, int height) |
191 | { |
192 | return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX); |
193 | } |
194 | |
195 | static int get_cbrt_h(int16_t sample, int height) |
196 | { |
197 | return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX)); |
198 | } |
199 | |
200 | static int get_cbrt_h2(int16_t sample, int height) |
201 | { |
202 | return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX); |
203 | } |
204 | |
205 | static void draw_sample_point_rgba(uint8_t *buf, int height, int linesize, |
206 | int16_t *prev_y, |
207 | const uint8_t color[4], int h) |
208 | { |
209 | if (h >= 0 && h < height) { |
210 | buf[h * linesize + 0] += color[0]; |
211 | buf[h * linesize + 1] += color[1]; |
212 | buf[h * linesize + 2] += color[2]; |
213 | buf[h * linesize + 3] += color[3]; |
214 | } |
215 | } |
216 | |
217 | static void draw_sample_line_rgba(uint8_t *buf, int height, int linesize, |
218 | int16_t *prev_y, |
219 | const uint8_t color[4], int h) |
220 | { |
221 | int k; |
222 | int start = height/2; |
223 | int end = av_clip(h, 0, height-1); |
224 | if (start > end) |
225 | FFSWAP(int16_t, start, end); |
226 | for (k = start; k < end; k++) { |
227 | buf[k * linesize + 0] += color[0]; |
228 | buf[k * linesize + 1] += color[1]; |
229 | buf[k * linesize + 2] += color[2]; |
230 | buf[k * linesize + 3] += color[3]; |
231 | } |
232 | } |
233 | |
234 | static void draw_sample_p2p_rgba(uint8_t *buf, int height, int linesize, |
235 | int16_t *prev_y, |
236 | const uint8_t color[4], int h) |
237 | { |
238 | int k; |
239 | if (h >= 0 && h < height) { |
240 | buf[h * linesize + 0] += color[0]; |
241 | buf[h * linesize + 1] += color[1]; |
242 | buf[h * linesize + 2] += color[2]; |
243 | buf[h * linesize + 3] += color[3]; |
244 | if (*prev_y && h != *prev_y) { |
245 | int start = *prev_y; |
246 | int end = av_clip(h, 0, height-1); |
247 | if (start > end) |
248 | FFSWAP(int16_t, start, end); |
249 | for (k = start + 1; k < end; k++) { |
250 | buf[k * linesize + 0] += color[0]; |
251 | buf[k * linesize + 1] += color[1]; |
252 | buf[k * linesize + 2] += color[2]; |
253 | buf[k * linesize + 3] += color[3]; |
254 | } |
255 | } |
256 | } |
257 | *prev_y = h; |
258 | } |
259 | |
260 | static void draw_sample_cline_rgba(uint8_t *buf, int height, int linesize, |
261 | int16_t *prev_y, |
262 | const uint8_t color[4], int h) |
263 | { |
264 | int k; |
265 | const int start = (height - h) / 2; |
266 | const int end = start + h; |
267 | for (k = start; k < end; k++) { |
268 | buf[k * linesize + 0] += color[0]; |
269 | buf[k * linesize + 1] += color[1]; |
270 | buf[k * linesize + 2] += color[2]; |
271 | buf[k * linesize + 3] += color[3]; |
272 | } |
273 | } |
274 | |
275 | static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, |
276 | int16_t *prev_y, |
277 | const uint8_t color[4], int h) |
278 | { |
279 | if (h >= 0 && h < height) |
280 | buf[h * linesize] += color[0]; |
281 | } |
282 | |
283 | static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, |
284 | int16_t *prev_y, |
285 | const uint8_t color[4], int h) |
286 | { |
287 | int k; |
288 | int start = height/2; |
289 | int end = av_clip(h, 0, height-1); |
290 | if (start > end) |
291 | FFSWAP(int16_t, start, end); |
292 | for (k = start; k < end; k++) |
293 | buf[k * linesize] += color[0]; |
294 | } |
295 | |
296 | static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, |
297 | int16_t *prev_y, |
298 | const uint8_t color[4], int h) |
299 | { |
300 | int k; |
301 | if (h >= 0 && h < height) { |
302 | buf[h * linesize] += color[0]; |
303 | if (*prev_y && h != *prev_y) { |
304 | int start = *prev_y; |
305 | int end = av_clip(h, 0, height-1); |
306 | if (start > end) |
307 | FFSWAP(int16_t, start, end); |
308 | for (k = start + 1; k < end; k++) |
309 | buf[k * linesize] += color[0]; |
310 | } |
311 | } |
312 | *prev_y = h; |
313 | } |
314 | |
315 | static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, |
316 | int16_t *prev_y, |
317 | const uint8_t color[4], int h) |
318 | { |
319 | int k; |
320 | const int start = (height - h) / 2; |
321 | const int end = start + h; |
322 | for (k = start; k < end; k++) |
323 | buf[k * linesize] += color[0]; |
324 | } |
325 | |
326 | static int config_output(AVFilterLink *outlink) |
327 | { |
328 | AVFilterContext *ctx = outlink->src; |
329 | AVFilterLink *inlink = ctx->inputs[0]; |
330 | ShowWavesContext *showwaves = ctx->priv; |
331 | int nb_channels = inlink->channels; |
332 | char *colors, *saveptr = NULL; |
333 | uint8_t x; |
334 | int ch; |
335 | |
336 | if (showwaves->single_pic) |
337 | showwaves->n = 1; |
338 | |
339 | if (!showwaves->n) |
340 | showwaves->n = FFMAX(1, ((double)inlink->sample_rate / (showwaves->w * av_q2d(showwaves->rate))) + 0.5); |
341 | |
342 | showwaves->buf_idx = 0; |
343 | if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) { |
344 | av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n"); |
345 | return AVERROR(ENOMEM); |
346 | } |
347 | outlink->w = showwaves->w; |
348 | outlink->h = showwaves->h; |
349 | outlink->sample_aspect_ratio = (AVRational){1,1}; |
350 | |
351 | outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n}, |
352 | (AVRational){showwaves->w,1}); |
353 | |
354 | av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n", |
355 | showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n); |
356 | |
357 | switch (outlink->format) { |
358 | case AV_PIX_FMT_GRAY8: |
359 | switch (showwaves->mode) { |
360 | case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break; |
361 | case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break; |
362 | case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break; |
363 | case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break; |
364 | default: |
365 | return AVERROR_BUG; |
366 | } |
367 | showwaves->pixstep = 1; |
368 | break; |
369 | case AV_PIX_FMT_RGBA: |
370 | switch (showwaves->mode) { |
371 | case MODE_POINT: showwaves->draw_sample = draw_sample_point_rgba; break; |
372 | case MODE_LINE: showwaves->draw_sample = draw_sample_line_rgba; break; |
373 | case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_rgba; break; |
374 | case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_rgba; break; |
375 | default: |
376 | return AVERROR_BUG; |
377 | } |
378 | showwaves->pixstep = 4; |
379 | break; |
380 | } |
381 | |
382 | switch (showwaves->scale) { |
383 | case SCALE_LIN: |
384 | switch (showwaves->mode) { |
385 | case MODE_POINT: |
386 | case MODE_LINE: |
387 | case MODE_P2P: showwaves->get_h = get_lin_h; break; |
388 | case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break; |
389 | default: |
390 | return AVERROR_BUG; |
391 | } |
392 | break; |
393 | case SCALE_LOG: |
394 | switch (showwaves->mode) { |
395 | case MODE_POINT: |
396 | case MODE_LINE: |
397 | case MODE_P2P: showwaves->get_h = get_log_h; break; |
398 | case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break; |
399 | default: |
400 | return AVERROR_BUG; |
401 | } |
402 | break; |
403 | case SCALE_SQRT: |
404 | switch (showwaves->mode) { |
405 | case MODE_POINT: |
406 | case MODE_LINE: |
407 | case MODE_P2P: showwaves->get_h = get_sqrt_h; break; |
408 | case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break; |
409 | default: |
410 | return AVERROR_BUG; |
411 | } |
412 | break; |
413 | case SCALE_CBRT: |
414 | switch (showwaves->mode) { |
415 | case MODE_POINT: |
416 | case MODE_LINE: |
417 | case MODE_P2P: showwaves->get_h = get_cbrt_h; break; |
418 | case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break; |
419 | default: |
420 | return AVERROR_BUG; |
421 | } |
422 | break; |
423 | } |
424 | |
425 | showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg)); |
426 | if (!showwaves->fg) |
427 | return AVERROR(ENOMEM); |
428 | |
429 | colors = av_strdup(showwaves->colors); |
430 | if (!colors) |
431 | return AVERROR(ENOMEM); |
432 | |
433 | /* multiplication factor, pre-computed to avoid in-loop divisions */ |
434 | x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n); |
435 | if (outlink->format == AV_PIX_FMT_RGBA) { |
436 | uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff }; |
437 | |
438 | for (ch = 0; ch < nb_channels; ch++) { |
439 | char *color; |
440 | |
441 | color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr); |
442 | if (color) |
443 | av_parse_color(fg, color, -1, ctx); |
444 | showwaves->fg[4*ch + 0] = fg[0] * x / 255.; |
445 | showwaves->fg[4*ch + 1] = fg[1] * x / 255.; |
446 | showwaves->fg[4*ch + 2] = fg[2] * x / 255.; |
447 | showwaves->fg[4*ch + 3] = fg[3] * x / 255.; |
448 | } |
449 | } else { |
450 | for (ch = 0; ch < nb_channels; ch++) |
451 | showwaves->fg[4 * ch + 0] = x; |
452 | } |
453 | av_free(colors); |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | inline static int push_frame(AVFilterLink *outlink) |
459 | { |
460 | AVFilterContext *ctx = outlink->src; |
461 | AVFilterLink *inlink = ctx->inputs[0]; |
462 | ShowWavesContext *showwaves = outlink->src->priv; |
463 | int nb_channels = inlink->channels; |
464 | int ret, i; |
465 | |
466 | ret = ff_filter_frame(outlink, showwaves->outpicref); |
467 | showwaves->outpicref = NULL; |
468 | showwaves->buf_idx = 0; |
469 | for (i = 0; i < nb_channels; i++) |
470 | showwaves->buf_idy[i] = 0; |
471 | return ret; |
472 | } |
473 | |
474 | static int push_single_pic(AVFilterLink *outlink) |
475 | { |
476 | AVFilterContext *ctx = outlink->src; |
477 | AVFilterLink *inlink = ctx->inputs[0]; |
478 | ShowWavesContext *showwaves = ctx->priv; |
479 | int64_t n = 0, max_samples = showwaves->total_samples / outlink->w; |
480 | AVFrame *out = showwaves->outpicref; |
481 | struct frame_node *node; |
482 | const int nb_channels = inlink->channels; |
483 | const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; |
484 | const int linesize = out->linesize[0]; |
485 | const int pixstep = showwaves->pixstep; |
486 | int col = 0; |
487 | int64_t *sum = showwaves->sum; |
488 | |
489 | if (max_samples == 0) { |
490 | av_log(ctx, AV_LOG_ERROR, "Too few samples\n"); |
491 | return AVERROR(EINVAL); |
492 | } |
493 | |
494 | av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", max_samples); |
495 | |
496 | memset(sum, 0, nb_channels); |
497 | |
498 | for (node = showwaves->audio_frames; node; node = node->next) { |
499 | int i; |
500 | const AVFrame *frame = node->frame; |
501 | const int16_t *p = (const int16_t *)frame->data[0]; |
502 | |
503 | for (i = 0; i < frame->nb_samples; i++) { |
504 | int ch; |
505 | |
506 | for (ch = 0; ch < nb_channels; ch++) |
507 | sum[ch] += abs(p[ch + i*nb_channels]) << 1; |
508 | if (n++ == max_samples) { |
509 | for (ch = 0; ch < nb_channels; ch++) { |
510 | int16_t sample = sum[ch] / max_samples; |
511 | uint8_t *buf = out->data[0] + col * pixstep; |
512 | int h; |
513 | |
514 | if (showwaves->split_channels) |
515 | buf += ch*ch_height*linesize; |
516 | av_assert0(col < outlink->w); |
517 | h = showwaves->get_h(sample, ch_height); |
518 | showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h); |
519 | sum[ch] = 0; |
520 | } |
521 | col++; |
522 | n = 0; |
523 | } |
524 | } |
525 | } |
526 | |
527 | return push_frame(outlink); |
528 | } |
529 | |
530 | |
531 | static int request_frame(AVFilterLink *outlink) |
532 | { |
533 | ShowWavesContext *showwaves = outlink->src->priv; |
534 | AVFilterLink *inlink = outlink->src->inputs[0]; |
535 | int ret; |
536 | |
537 | ret = ff_request_frame(inlink); |
538 | if (ret == AVERROR_EOF && showwaves->outpicref) { |
539 | if (showwaves->single_pic) |
540 | push_single_pic(outlink); |
541 | else |
542 | push_frame(outlink); |
543 | } |
544 | |
545 | return ret; |
546 | } |
547 | |
548 | static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, |
549 | const AVFilterLink *inlink, AVFilterLink *outlink, |
550 | const AVFrame *in) |
551 | { |
552 | if (!showwaves->outpicref) { |
553 | int j; |
554 | AVFrame *out = showwaves->outpicref = |
555 | ff_get_video_buffer(outlink, outlink->w, outlink->h); |
556 | if (!out) |
557 | return AVERROR(ENOMEM); |
558 | out->width = outlink->w; |
559 | out->height = outlink->h; |
560 | out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels, |
561 | av_make_q(1, inlink->sample_rate), |
562 | outlink->time_base); |
563 | for (j = 0; j < outlink->h; j++) |
564 | memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep); |
565 | } |
566 | return 0; |
567 | } |
568 | |
569 | static av_cold int init(AVFilterContext *ctx) |
570 | { |
571 | ShowWavesContext *showwaves = ctx->priv; |
572 | |
573 | if (!strcmp(ctx->filter->name, "showwavespic")) { |
574 | showwaves->single_pic = 1; |
575 | showwaves->mode = MODE_CENTERED_LINE; |
576 | } |
577 | |
578 | return 0; |
579 | } |
580 | |
581 | #if CONFIG_SHOWWAVES_FILTER |
582 | |
583 | static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
584 | { |
585 | AVFilterContext *ctx = inlink->dst; |
586 | AVFilterLink *outlink = ctx->outputs[0]; |
587 | ShowWavesContext *showwaves = ctx->priv; |
588 | const int nb_samples = insamples->nb_samples; |
589 | AVFrame *outpicref = showwaves->outpicref; |
590 | int16_t *p = (int16_t *)insamples->data[0]; |
591 | int nb_channels = inlink->channels; |
592 | int i, j, ret = 0; |
593 | const int pixstep = showwaves->pixstep; |
594 | const int n = showwaves->n; |
595 | const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h; |
596 | |
597 | /* draw data in the buffer */ |
598 | for (i = 0; i < nb_samples; i++) { |
599 | |
600 | ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
601 | if (ret < 0) |
602 | goto end; |
603 | outpicref = showwaves->outpicref; |
604 | |
605 | for (j = 0; j < nb_channels; j++) { |
606 | uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep; |
607 | const int linesize = outpicref->linesize[0]; |
608 | int h; |
609 | |
610 | if (showwaves->split_channels) |
611 | buf += j*ch_height*linesize; |
612 | h = showwaves->get_h(*p++, ch_height); |
613 | showwaves->draw_sample(buf, ch_height, linesize, |
614 | &showwaves->buf_idy[j], &showwaves->fg[j * 4], h); |
615 | } |
616 | |
617 | showwaves->sample_count_mod++; |
618 | if (showwaves->sample_count_mod == n) { |
619 | showwaves->sample_count_mod = 0; |
620 | showwaves->buf_idx++; |
621 | } |
622 | if (showwaves->buf_idx == showwaves->w) |
623 | if ((ret = push_frame(outlink)) < 0) |
624 | break; |
625 | outpicref = showwaves->outpicref; |
626 | } |
627 | |
628 | end: |
629 | av_frame_free(&insamples); |
630 | return ret; |
631 | } |
632 | |
633 | static const AVFilterPad showwaves_inputs[] = { |
634 | { |
635 | .name = "default", |
636 | .type = AVMEDIA_TYPE_AUDIO, |
637 | .filter_frame = showwaves_filter_frame, |
638 | }, |
639 | { NULL } |
640 | }; |
641 | |
642 | static const AVFilterPad showwaves_outputs[] = { |
643 | { |
644 | .name = "default", |
645 | .type = AVMEDIA_TYPE_VIDEO, |
646 | .config_props = config_output, |
647 | .request_frame = request_frame, |
648 | }, |
649 | { NULL } |
650 | }; |
651 | |
652 | AVFilter ff_avf_showwaves = { |
653 | .name = "showwaves", |
654 | .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."), |
655 | .init = init, |
656 | .uninit = uninit, |
657 | .query_formats = query_formats, |
658 | .priv_size = sizeof(ShowWavesContext), |
659 | .inputs = showwaves_inputs, |
660 | .outputs = showwaves_outputs, |
661 | .priv_class = &showwaves_class, |
662 | }; |
663 | |
664 | #endif // CONFIG_SHOWWAVES_FILTER |
665 | |
666 | #if CONFIG_SHOWWAVESPIC_FILTER |
667 | |
668 | #define OFFSET(x) offsetof(ShowWavesContext, x) |
669 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
670 | |
671 | static const AVOption showwavespic_options[] = { |
672 | { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
673 | { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS }, |
674 | { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS }, |
675 | { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS }, |
676 | { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" }, |
677 | { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"}, |
678 | { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"}, |
679 | { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"}, |
680 | { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"}, |
681 | { NULL } |
682 | }; |
683 | |
684 | AVFILTER_DEFINE_CLASS(showwavespic); |
685 | |
686 | static int showwavespic_config_input(AVFilterLink *inlink) |
687 | { |
688 | AVFilterContext *ctx = inlink->dst; |
689 | ShowWavesContext *showwaves = ctx->priv; |
690 | |
691 | if (showwaves->single_pic) { |
692 | showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum)); |
693 | if (!showwaves->sum) |
694 | return AVERROR(ENOMEM); |
695 | } |
696 | |
697 | return 0; |
698 | } |
699 | |
700 | static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples) |
701 | { |
702 | AVFilterContext *ctx = inlink->dst; |
703 | AVFilterLink *outlink = ctx->outputs[0]; |
704 | ShowWavesContext *showwaves = ctx->priv; |
705 | int16_t *p = (int16_t *)insamples->data[0]; |
706 | int ret = 0; |
707 | |
708 | if (showwaves->single_pic) { |
709 | struct frame_node *f; |
710 | |
711 | ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples); |
712 | if (ret < 0) |
713 | goto end; |
714 | |
715 | /* queue the audio frame */ |
716 | f = av_malloc(sizeof(*f)); |
717 | if (!f) { |
718 | ret = AVERROR(ENOMEM); |
719 | goto end; |
720 | } |
721 | f->frame = insamples; |
722 | f->next = NULL; |
723 | if (!showwaves->last_frame) { |
724 | showwaves->audio_frames = |
725 | showwaves->last_frame = f; |
726 | } else { |
727 | showwaves->last_frame->next = f; |
728 | showwaves->last_frame = f; |
729 | } |
730 | showwaves->total_samples += insamples->nb_samples; |
731 | |
732 | return 0; |
733 | } |
734 | |
735 | end: |
736 | av_frame_free(&insamples); |
737 | return ret; |
738 | } |
739 | |
740 | static const AVFilterPad showwavespic_inputs[] = { |
741 | { |
742 | .name = "default", |
743 | .type = AVMEDIA_TYPE_AUDIO, |
744 | .config_props = showwavespic_config_input, |
745 | .filter_frame = showwavespic_filter_frame, |
746 | }, |
747 | { NULL } |
748 | }; |
749 | |
750 | static const AVFilterPad showwavespic_outputs[] = { |
751 | { |
752 | .name = "default", |
753 | .type = AVMEDIA_TYPE_VIDEO, |
754 | .config_props = config_output, |
755 | .request_frame = request_frame, |
756 | }, |
757 | { NULL } |
758 | }; |
759 | |
760 | AVFilter ff_avf_showwavespic = { |
761 | .name = "showwavespic", |
762 | .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."), |
763 | .init = init, |
764 | .uninit = uninit, |
765 | .query_formats = query_formats, |
766 | .priv_size = sizeof(ShowWavesContext), |
767 | .inputs = showwavespic_inputs, |
768 | .outputs = showwavespic_outputs, |
769 | .priv_class = &showwavespic_class, |
770 | }; |
771 | |
772 | #endif // CONFIG_SHOWWAVESPIC_FILTER |
773 |