blob: 1a517bfc958215de61ab6abd41f5fa0ebff1b693
1 | /* |
2 | * This file is part of FFmpeg. |
3 | * |
4 | * FFmpeg is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU Lesser General Public |
6 | * License as published by the Free Software Foundation; either |
7 | * version 2.1 of the License, or (at your option) any later version. |
8 | * |
9 | * FFmpeg is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | * Lesser General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU Lesser General Public |
15 | * License along with FFmpeg; if not, write to the Free Software |
16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | |
19 | #include "libavutil/avstring.h" |
20 | #include "libavutil/internal.h" |
21 | #include "libavutil/opt.h" |
22 | #include "avfilter.h" |
23 | #include "audio.h" |
24 | #include "formats.h" |
25 | #include "framesync.h" |
26 | #include "internal.h" |
27 | #include "video.h" |
28 | |
29 | typedef struct StreamSelectContext { |
30 | const AVClass *class; |
31 | int nb_inputs; |
32 | char *map_str; |
33 | int *map; |
34 | int nb_map; |
35 | int is_audio; |
36 | int64_t *last_pts; |
37 | AVFrame **frames; |
38 | FFFrameSync fs; |
39 | } StreamSelectContext; |
40 | |
41 | #define OFFSET(x) offsetof(StreamSelectContext, x) |
42 | #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM |
43 | static const AVOption streamselect_options[] = { |
44 | { "inputs", "number of input streams", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags=FLAGS }, |
45 | { "map", "input indexes to remap to outputs", OFFSET(map_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags=FLAGS }, |
46 | { NULL } |
47 | }; |
48 | |
49 | AVFILTER_DEFINE_CLASS(streamselect); |
50 | |
51 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
52 | { |
53 | StreamSelectContext *s = inlink->dst->priv; |
54 | return ff_framesync_filter_frame(&s->fs, inlink, in); |
55 | } |
56 | |
57 | static int process_frame(FFFrameSync *fs) |
58 | { |
59 | AVFilterContext *ctx = fs->parent; |
60 | StreamSelectContext *s = fs->opaque; |
61 | AVFrame **in = s->frames; |
62 | int i, j, ret = 0; |
63 | |
64 | for (i = 0; i < ctx->nb_inputs; i++) { |
65 | if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) |
66 | return ret; |
67 | } |
68 | |
69 | for (j = 0; j < ctx->nb_inputs; j++) { |
70 | for (i = 0; i < s->nb_map; i++) { |
71 | if (s->map[i] == j) { |
72 | AVFrame *out; |
73 | |
74 | if (s->is_audio && s->last_pts[j] == in[j]->pts && |
75 | ctx->outputs[i]->frame_count_in > 0) |
76 | continue; |
77 | out = av_frame_clone(in[j]); |
78 | if (!out) |
79 | return AVERROR(ENOMEM); |
80 | |
81 | out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, ctx->outputs[i]->time_base); |
82 | s->last_pts[j] = in[j]->pts; |
83 | ret = ff_filter_frame(ctx->outputs[i], out); |
84 | if (ret < 0) |
85 | return ret; |
86 | } |
87 | } |
88 | } |
89 | |
90 | return ret; |
91 | } |
92 | |
93 | static int request_frame(AVFilterLink *outlink) |
94 | { |
95 | StreamSelectContext *s = outlink->src->priv; |
96 | return ff_framesync_request_frame(&s->fs, outlink); |
97 | } |
98 | |
99 | static int config_output(AVFilterLink *outlink) |
100 | { |
101 | AVFilterContext *ctx = outlink->src; |
102 | StreamSelectContext *s = ctx->priv; |
103 | const int outlink_idx = FF_OUTLINK_IDX(outlink); |
104 | const int inlink_idx = s->map[outlink_idx]; |
105 | AVFilterLink *inlink = ctx->inputs[inlink_idx]; |
106 | FFFrameSyncIn *in; |
107 | int i, ret; |
108 | |
109 | av_log(ctx, AV_LOG_VERBOSE, "config output link %d " |
110 | "with settings from input link %d\n", |
111 | outlink_idx, inlink_idx); |
112 | |
113 | switch (outlink->type) { |
114 | case AVMEDIA_TYPE_VIDEO: |
115 | outlink->w = inlink->w; |
116 | outlink->h = inlink->h; |
117 | outlink->sample_aspect_ratio = inlink->sample_aspect_ratio; |
118 | outlink->frame_rate = inlink->frame_rate; |
119 | break; |
120 | case AVMEDIA_TYPE_AUDIO: |
121 | outlink->sample_rate = inlink->sample_rate; |
122 | outlink->channels = inlink->channels; |
123 | outlink->channel_layout = inlink->channel_layout; |
124 | break; |
125 | } |
126 | |
127 | outlink->time_base = inlink->time_base; |
128 | outlink->format = inlink->format; |
129 | |
130 | if (s->fs.opaque == s) |
131 | return 0; |
132 | |
133 | if ((ret = ff_framesync_init(&s->fs, ctx, ctx->nb_inputs)) < 0) |
134 | return ret; |
135 | |
136 | in = s->fs.in; |
137 | s->fs.opaque = s; |
138 | s->fs.on_event = process_frame; |
139 | |
140 | for (i = 0; i < ctx->nb_inputs; i++) { |
141 | in[i].time_base = ctx->inputs[i]->time_base; |
142 | in[i].sync = 1; |
143 | in[i].before = EXT_STOP; |
144 | in[i].after = EXT_STOP; |
145 | } |
146 | |
147 | s->frames = av_calloc(ctx->nb_inputs, sizeof(*s->frames)); |
148 | if (!s->frames) |
149 | return AVERROR(ENOMEM); |
150 | |
151 | return ff_framesync_configure(&s->fs); |
152 | } |
153 | |
154 | static int parse_definition(AVFilterContext *ctx, int nb_pads, void *filter_frame, int is_audio) |
155 | { |
156 | const int is_input = !!filter_frame; |
157 | const char *padtype = is_input ? "in" : "out"; |
158 | int i = 0, ret = 0; |
159 | |
160 | for (i = 0; i < nb_pads; i++) { |
161 | AVFilterPad pad = { 0 }; |
162 | |
163 | pad.type = is_audio ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO; |
164 | |
165 | pad.name = av_asprintf("%sput%d", padtype, i); |
166 | if (!pad.name) |
167 | return AVERROR(ENOMEM); |
168 | |
169 | av_log(ctx, AV_LOG_DEBUG, "Add %s pad %s\n", padtype, pad.name); |
170 | |
171 | if (is_input) { |
172 | pad.filter_frame = filter_frame; |
173 | ret = ff_insert_inpad(ctx, i, &pad); |
174 | } else { |
175 | pad.config_props = config_output; |
176 | pad.request_frame = request_frame; |
177 | ret = ff_insert_outpad(ctx, i, &pad); |
178 | } |
179 | |
180 | if (ret < 0) { |
181 | av_freep(&pad.name); |
182 | return ret; |
183 | } |
184 | } |
185 | |
186 | return 0; |
187 | } |
188 | |
189 | static int parse_mapping(AVFilterContext *ctx, const char *map) |
190 | { |
191 | StreamSelectContext *s = ctx->priv; |
192 | int *new_map; |
193 | int new_nb_map = 0; |
194 | |
195 | if (!map) { |
196 | av_log(ctx, AV_LOG_ERROR, "mapping definition is not set\n"); |
197 | return AVERROR(EINVAL); |
198 | } |
199 | |
200 | new_map = av_calloc(s->nb_inputs, sizeof(*new_map)); |
201 | if (!new_map) |
202 | return AVERROR(ENOMEM); |
203 | |
204 | while (1) { |
205 | char *p; |
206 | const int n = strtol(map, &p, 0); |
207 | |
208 | av_log(ctx, AV_LOG_DEBUG, "n=%d map=%p p=%p\n", n, map, p); |
209 | |
210 | if (map == p) |
211 | break; |
212 | map = p; |
213 | |
214 | if (new_nb_map >= s->nb_inputs) { |
215 | av_log(ctx, AV_LOG_ERROR, "Unable to map more than the %d " |
216 | "input pads available\n", s->nb_inputs); |
217 | av_free(new_map); |
218 | return AVERROR(EINVAL); |
219 | } |
220 | |
221 | if (n < 0 || n >= ctx->nb_inputs) { |
222 | av_log(ctx, AV_LOG_ERROR, "Input stream index %d doesn't exist " |
223 | "(there is only %d input streams defined)\n", |
224 | n, s->nb_inputs); |
225 | av_free(new_map); |
226 | return AVERROR(EINVAL); |
227 | } |
228 | |
229 | av_log(ctx, AV_LOG_VERBOSE, "Map input stream %d to output stream %d\n", n, new_nb_map); |
230 | new_map[new_nb_map++] = n; |
231 | } |
232 | |
233 | if (!new_nb_map) { |
234 | av_log(ctx, AV_LOG_ERROR, "invalid mapping\n"); |
235 | av_free(new_map); |
236 | return AVERROR(EINVAL); |
237 | } |
238 | |
239 | av_freep(&s->map); |
240 | s->map = new_map; |
241 | s->nb_map = new_nb_map; |
242 | |
243 | av_log(ctx, AV_LOG_VERBOSE, "%d map set\n", s->nb_map); |
244 | |
245 | return 0; |
246 | } |
247 | |
248 | static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, |
249 | char *res, int res_len, int flags) |
250 | { |
251 | if (!strcmp(cmd, "map")) { |
252 | int ret = parse_mapping(ctx, args); |
253 | |
254 | if (ret < 0) |
255 | return ret; |
256 | return avfilter_config_links(ctx); |
257 | } |
258 | return AVERROR(ENOSYS); |
259 | } |
260 | |
261 | static av_cold int init(AVFilterContext *ctx) |
262 | { |
263 | StreamSelectContext *s = ctx->priv; |
264 | int ret, nb_outputs = 0; |
265 | char *map = s->map_str; |
266 | |
267 | if (!strcmp(ctx->filter->name, "astreamselect")) |
268 | s->is_audio = 1; |
269 | |
270 | for (; map;) { |
271 | char *p; |
272 | |
273 | strtol(map, &p, 0); |
274 | if (map == p) |
275 | break; |
276 | nb_outputs++; |
277 | map = p; |
278 | } |
279 | |
280 | s->last_pts = av_calloc(s->nb_inputs, sizeof(*s->last_pts)); |
281 | if (!s->last_pts) |
282 | return AVERROR(ENOMEM); |
283 | |
284 | if ((ret = parse_definition(ctx, s->nb_inputs, filter_frame, s->is_audio)) < 0 || |
285 | (ret = parse_definition(ctx, nb_outputs, NULL, s->is_audio)) < 0) |
286 | return ret; |
287 | |
288 | av_log(ctx, AV_LOG_DEBUG, "Configured with %d inpad and %d outpad\n", |
289 | ctx->nb_inputs, ctx->nb_outputs); |
290 | |
291 | return parse_mapping(ctx, s->map_str); |
292 | } |
293 | |
294 | static av_cold void uninit(AVFilterContext *ctx) |
295 | { |
296 | StreamSelectContext *s = ctx->priv; |
297 | |
298 | av_freep(&s->last_pts); |
299 | av_freep(&s->map); |
300 | av_freep(&s->frames); |
301 | ff_framesync_uninit(&s->fs); |
302 | } |
303 | |
304 | static int query_formats(AVFilterContext *ctx) |
305 | { |
306 | AVFilterFormats *formats, *rates = NULL; |
307 | AVFilterChannelLayouts *layouts = NULL; |
308 | int ret, i; |
309 | |
310 | for (i = 0; i < ctx->nb_inputs; i++) { |
311 | formats = ff_all_formats(ctx->inputs[i]->type); |
312 | if ((ret = ff_set_common_formats(ctx, formats)) < 0) |
313 | return ret; |
314 | |
315 | if (ctx->inputs[i]->type == AVMEDIA_TYPE_AUDIO) { |
316 | rates = ff_all_samplerates(); |
317 | if ((ret = ff_set_common_samplerates(ctx, rates)) < 0) |
318 | return ret; |
319 | layouts = ff_all_channel_counts(); |
320 | if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0) |
321 | return ret; |
322 | } |
323 | } |
324 | |
325 | return 0; |
326 | } |
327 | |
328 | AVFilter ff_vf_streamselect = { |
329 | .name = "streamselect", |
330 | .description = NULL_IF_CONFIG_SMALL("Select video streams"), |
331 | .init = init, |
332 | .query_formats = query_formats, |
333 | .process_command = process_command, |
334 | .uninit = uninit, |
335 | .priv_size = sizeof(StreamSelectContext), |
336 | .priv_class = &streamselect_class, |
337 | .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS, |
338 | }; |
339 | |
340 | #define astreamselect_options streamselect_options |
341 | AVFILTER_DEFINE_CLASS(astreamselect); |
342 | |
343 | AVFilter ff_af_astreamselect = { |
344 | .name = "astreamselect", |
345 | .description = NULL_IF_CONFIG_SMALL("Select audio streams"), |
346 | .init = init, |
347 | .query_formats = query_formats, |
348 | .process_command = process_command, |
349 | .uninit = uninit, |
350 | .priv_size = sizeof(StreamSelectContext), |
351 | .priv_class = &astreamselect_class, |
352 | .flags = AVFILTER_FLAG_DYNAMIC_INPUTS | AVFILTER_FLAG_DYNAMIC_OUTPUTS, |
353 | }; |
354 |