blob: 5bf71b38edb68db5e10625400d94ed46b25a7bab
1 | /* |
2 | * Copyright (c) 2015 Derek Buitenhuis |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | #include "libavutil/opt.h" |
22 | #include "avfilter.h" |
23 | #include "formats.h" |
24 | #include "internal.h" |
25 | #include "video.h" |
26 | |
27 | #define DEFAULT_LENGTH 300 |
28 | |
29 | typedef struct ReverseContext { |
30 | int nb_frames; |
31 | AVFrame **frames; |
32 | unsigned int frames_size; |
33 | unsigned int pts_size; |
34 | int64_t *pts; |
35 | int flush_idx; |
36 | } ReverseContext; |
37 | |
38 | static av_cold int init(AVFilterContext *ctx) |
39 | { |
40 | ReverseContext *s = ctx->priv; |
41 | |
42 | s->pts = av_fast_realloc(NULL, &s->pts_size, |
43 | DEFAULT_LENGTH * sizeof(*(s->pts))); |
44 | if (!s->pts) |
45 | return AVERROR(ENOMEM); |
46 | |
47 | s->frames = av_fast_realloc(NULL, &s->frames_size, |
48 | DEFAULT_LENGTH * sizeof(*(s->frames))); |
49 | if (!s->frames) { |
50 | av_freep(&s->pts); |
51 | return AVERROR(ENOMEM); |
52 | } |
53 | |
54 | return 0; |
55 | } |
56 | |
57 | static av_cold void uninit(AVFilterContext *ctx) |
58 | { |
59 | ReverseContext *s = ctx->priv; |
60 | |
61 | av_freep(&s->pts); |
62 | av_freep(&s->frames); |
63 | } |
64 | |
65 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
66 | { |
67 | AVFilterContext *ctx = inlink->dst; |
68 | ReverseContext *s = ctx->priv; |
69 | void *ptr; |
70 | |
71 | if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) { |
72 | ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2); |
73 | if (!ptr) |
74 | return AVERROR(ENOMEM); |
75 | s->pts = ptr; |
76 | } |
77 | |
78 | if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) { |
79 | ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2); |
80 | if (!ptr) |
81 | return AVERROR(ENOMEM); |
82 | s->frames = ptr; |
83 | } |
84 | |
85 | s->frames[s->nb_frames] = in; |
86 | s->pts[s->nb_frames] = in->pts; |
87 | s->nb_frames++; |
88 | |
89 | return 0; |
90 | } |
91 | |
92 | #if CONFIG_REVERSE_FILTER |
93 | |
94 | static int request_frame(AVFilterLink *outlink) |
95 | { |
96 | AVFilterContext *ctx = outlink->src; |
97 | ReverseContext *s = ctx->priv; |
98 | int ret; |
99 | |
100 | ret = ff_request_frame(ctx->inputs[0]); |
101 | |
102 | if (ret == AVERROR_EOF && s->nb_frames > 0) { |
103 | AVFrame *out = s->frames[s->nb_frames - 1]; |
104 | out->pts = s->pts[s->flush_idx++]; |
105 | ret = ff_filter_frame(outlink, out); |
106 | s->nb_frames--; |
107 | } |
108 | |
109 | return ret; |
110 | } |
111 | |
112 | static const AVFilterPad reverse_inputs[] = { |
113 | { |
114 | .name = "default", |
115 | .type = AVMEDIA_TYPE_VIDEO, |
116 | .filter_frame = filter_frame, |
117 | }, |
118 | { NULL } |
119 | }; |
120 | |
121 | static const AVFilterPad reverse_outputs[] = { |
122 | { |
123 | .name = "default", |
124 | .type = AVMEDIA_TYPE_VIDEO, |
125 | .request_frame = request_frame, |
126 | }, |
127 | { NULL } |
128 | }; |
129 | |
130 | AVFilter ff_vf_reverse = { |
131 | .name = "reverse", |
132 | .description = NULL_IF_CONFIG_SMALL("Reverse a clip."), |
133 | .priv_size = sizeof(ReverseContext), |
134 | .init = init, |
135 | .uninit = uninit, |
136 | .inputs = reverse_inputs, |
137 | .outputs = reverse_outputs, |
138 | }; |
139 | |
140 | #endif /* CONFIG_REVERSE_FILTER */ |
141 | |
142 | #if CONFIG_AREVERSE_FILTER |
143 | |
144 | static int query_formats(AVFilterContext *ctx) |
145 | { |
146 | AVFilterFormats *formats; |
147 | AVFilterChannelLayouts *layouts; |
148 | int ret; |
149 | |
150 | layouts = ff_all_channel_counts(); |
151 | if (!layouts) |
152 | return AVERROR(ENOMEM); |
153 | ret = ff_set_common_channel_layouts(ctx, layouts); |
154 | if (ret < 0) |
155 | return ret; |
156 | |
157 | ret = ff_set_common_formats(ctx, ff_planar_sample_fmts()); |
158 | if (ret < 0) |
159 | return ret; |
160 | |
161 | formats = ff_all_samplerates(); |
162 | if (!formats) |
163 | return AVERROR(ENOMEM); |
164 | return ff_set_common_samplerates(ctx, formats); |
165 | } |
166 | |
167 | static int areverse_request_frame(AVFilterLink *outlink) |
168 | { |
169 | AVFilterContext *ctx = outlink->src; |
170 | ReverseContext *s = ctx->priv; |
171 | int ret, p, i, j; |
172 | |
173 | ret = ff_request_frame(ctx->inputs[0]); |
174 | |
175 | if (ret == AVERROR_EOF && s->nb_frames > 0) { |
176 | AVFrame *out = s->frames[s->nb_frames - 1]; |
177 | out->pts = s->pts[s->flush_idx++]; |
178 | |
179 | for (p = 0; p < outlink->channels; p++) { |
180 | switch (outlink->format) { |
181 | case AV_SAMPLE_FMT_U8P: { |
182 | uint8_t *dst = (uint8_t *)out->extended_data[p]; |
183 | for (i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
184 | FFSWAP(uint8_t, dst[i], dst[j]); |
185 | } |
186 | break; |
187 | case AV_SAMPLE_FMT_S16P: { |
188 | int16_t *dst = (int16_t *)out->extended_data[p]; |
189 | for (i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
190 | FFSWAP(int16_t, dst[i], dst[j]); |
191 | } |
192 | break; |
193 | case AV_SAMPLE_FMT_S32P: { |
194 | int32_t *dst = (int32_t *)out->extended_data[p]; |
195 | for (i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
196 | FFSWAP(int32_t, dst[i], dst[j]); |
197 | } |
198 | break; |
199 | case AV_SAMPLE_FMT_FLTP: { |
200 | float *dst = (float *)out->extended_data[p]; |
201 | for (i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
202 | FFSWAP(float, dst[i], dst[j]); |
203 | } |
204 | break; |
205 | case AV_SAMPLE_FMT_DBLP: { |
206 | double *dst = (double *)out->extended_data[p]; |
207 | for (i = 0, j = out->nb_samples - 1; i < j; i++, j--) |
208 | FFSWAP(double, dst[i], dst[j]); |
209 | } |
210 | break; |
211 | } |
212 | } |
213 | |
214 | ret = ff_filter_frame(outlink, out); |
215 | s->nb_frames--; |
216 | } |
217 | |
218 | return ret; |
219 | } |
220 | |
221 | static const AVFilterPad areverse_inputs[] = { |
222 | { |
223 | .name = "default", |
224 | .type = AVMEDIA_TYPE_AUDIO, |
225 | .filter_frame = filter_frame, |
226 | .needs_writable = 1, |
227 | }, |
228 | { NULL } |
229 | }; |
230 | |
231 | static const AVFilterPad areverse_outputs[] = { |
232 | { |
233 | .name = "default", |
234 | .type = AVMEDIA_TYPE_AUDIO, |
235 | .request_frame = areverse_request_frame, |
236 | }, |
237 | { NULL } |
238 | }; |
239 | |
240 | AVFilter ff_af_areverse = { |
241 | .name = "areverse", |
242 | .description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."), |
243 | .query_formats = query_formats, |
244 | .priv_size = sizeof(ReverseContext), |
245 | .init = init, |
246 | .uninit = uninit, |
247 | .inputs = areverse_inputs, |
248 | .outputs = areverse_outputs, |
249 | }; |
250 | |
251 | #endif /* CONFIG_AREVERSE_FILTER */ |
252 |