blob: 0f87b5439acd0819a5b4ba380a82377a21652d64
1 | /* |
2 | * Copyright (c) 2011 Stefano Sabatini |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | /** |
22 | * @file |
23 | * buffer sink |
24 | */ |
25 | |
26 | #include "libavutil/avassert.h" |
27 | #include "libavutil/channel_layout.h" |
28 | #include "libavutil/common.h" |
29 | #include "libavutil/internal.h" |
30 | #include "libavutil/opt.h" |
31 | |
32 | #define FF_INTERNAL_FIELDS 1 |
33 | #include "framequeue.h" |
34 | |
35 | #include "audio.h" |
36 | #include "avfilter.h" |
37 | #include "buffersink.h" |
38 | #include "filters.h" |
39 | #include "internal.h" |
40 | |
41 | typedef struct BufferSinkContext { |
42 | const AVClass *class; |
43 | unsigned warning_limit; |
44 | |
45 | /* only used for video */ |
46 | enum AVPixelFormat *pixel_fmts; ///< list of accepted pixel formats, must be terminated with -1 |
47 | int pixel_fmts_size; |
48 | |
49 | /* only used for audio */ |
50 | enum AVSampleFormat *sample_fmts; ///< list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE |
51 | int sample_fmts_size; |
52 | int64_t *channel_layouts; ///< list of accepted channel layouts, terminated by -1 |
53 | int channel_layouts_size; |
54 | int *channel_counts; ///< list of accepted channel counts, terminated by -1 |
55 | int channel_counts_size; |
56 | int all_channel_counts; |
57 | int *sample_rates; ///< list of accepted sample rates, terminated by -1 |
58 | int sample_rates_size; |
59 | |
60 | AVFrame *peeked_frame; |
61 | } BufferSinkContext; |
62 | |
63 | #define NB_ITEMS(list) (list ## _size / sizeof(*list)) |
64 | #define FIFO_INIT_SIZE 8 |
65 | #define FIFO_INIT_ELEMENT_SIZE sizeof(void *) |
66 | |
67 | int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame) |
68 | { |
69 | return av_buffersink_get_frame_flags(ctx, frame, 0); |
70 | } |
71 | |
72 | static int return_or_keep_frame(BufferSinkContext *buf, AVFrame *out, AVFrame *in, int flags) |
73 | { |
74 | if ((flags & AV_BUFFERSINK_FLAG_PEEK)) { |
75 | buf->peeked_frame = in; |
76 | return out ? av_frame_ref(out, in) : 0; |
77 | } else { |
78 | av_assert1(out); |
79 | buf->peeked_frame = NULL; |
80 | av_frame_move_ref(out, in); |
81 | av_frame_free(&in); |
82 | return 0; |
83 | } |
84 | } |
85 | |
86 | static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples) |
87 | { |
88 | BufferSinkContext *buf = ctx->priv; |
89 | AVFilterLink *inlink = ctx->inputs[0]; |
90 | int status, ret; |
91 | AVFrame *cur_frame; |
92 | int64_t pts; |
93 | |
94 | if (buf->peeked_frame) |
95 | return return_or_keep_frame(buf, frame, buf->peeked_frame, flags); |
96 | |
97 | while (1) { |
98 | ret = samples ? ff_inlink_consume_samples(inlink, samples, samples, &cur_frame) : |
99 | ff_inlink_consume_frame(inlink, &cur_frame); |
100 | if (ret < 0) { |
101 | return ret; |
102 | } else if (ret) { |
103 | /* TODO return the frame instead of copying it */ |
104 | return return_or_keep_frame(buf, frame, cur_frame, flags); |
105 | } else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { |
106 | return status; |
107 | } else if ((flags & AV_BUFFERSINK_FLAG_NO_REQUEST)) { |
108 | return AVERROR(EAGAIN); |
109 | } else if (inlink->frame_wanted_out) { |
110 | ret = ff_filter_graph_run_once(ctx->graph); |
111 | if (ret < 0) |
112 | return ret; |
113 | } else { |
114 | ff_inlink_request_frame(inlink); |
115 | } |
116 | } |
117 | } |
118 | |
119 | int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags) |
120 | { |
121 | return get_frame_internal(ctx, frame, flags, ctx->inputs[0]->min_samples); |
122 | } |
123 | |
124 | int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, |
125 | AVFrame *frame, int nb_samples) |
126 | { |
127 | return get_frame_internal(ctx, frame, 0, nb_samples); |
128 | } |
129 | |
130 | AVBufferSinkParams *av_buffersink_params_alloc(void) |
131 | { |
132 | static const int pixel_fmts[] = { AV_PIX_FMT_NONE }; |
133 | AVBufferSinkParams *params = av_malloc(sizeof(AVBufferSinkParams)); |
134 | if (!params) |
135 | return NULL; |
136 | |
137 | params->pixel_fmts = pixel_fmts; |
138 | return params; |
139 | } |
140 | |
141 | AVABufferSinkParams *av_abuffersink_params_alloc(void) |
142 | { |
143 | AVABufferSinkParams *params = av_mallocz(sizeof(AVABufferSinkParams)); |
144 | |
145 | if (!params) |
146 | return NULL; |
147 | return params; |
148 | } |
149 | |
150 | static av_cold int common_init(AVFilterContext *ctx) |
151 | { |
152 | BufferSinkContext *buf = ctx->priv; |
153 | |
154 | buf->warning_limit = 100; |
155 | return 0; |
156 | } |
157 | |
158 | static int activate(AVFilterContext *ctx) |
159 | { |
160 | BufferSinkContext *buf = ctx->priv; |
161 | |
162 | if (buf->warning_limit && |
163 | ff_framequeue_queued_frames(&ctx->inputs[0]->fifo) >= buf->warning_limit) { |
164 | av_log(ctx, AV_LOG_WARNING, |
165 | "%d buffers queued in %s, something may be wrong.\n", |
166 | buf->warning_limit, |
167 | (char *)av_x_if_null(ctx->name, ctx->filter->name)); |
168 | buf->warning_limit *= 10; |
169 | } |
170 | |
171 | /* The frame is queued, the rest is up to get_frame_internal */ |
172 | return 0; |
173 | } |
174 | |
175 | void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size) |
176 | { |
177 | AVFilterLink *inlink = ctx->inputs[0]; |
178 | |
179 | inlink->min_samples = inlink->max_samples = |
180 | inlink->partial_buf_size = frame_size; |
181 | } |
182 | |
183 | #define MAKE_AVFILTERLINK_ACCESSOR(type, field) \ |
184 | type av_buffersink_get_##field(const AVFilterContext *ctx) { \ |
185 | av_assert0(ctx->filter->activate == activate); \ |
186 | return ctx->inputs[0]->field; \ |
187 | } |
188 | |
189 | MAKE_AVFILTERLINK_ACCESSOR(enum AVMediaType , type ) |
190 | MAKE_AVFILTERLINK_ACCESSOR(AVRational , time_base ) |
191 | MAKE_AVFILTERLINK_ACCESSOR(int , format ) |
192 | |
193 | MAKE_AVFILTERLINK_ACCESSOR(AVRational , frame_rate ) |
194 | MAKE_AVFILTERLINK_ACCESSOR(int , w ) |
195 | MAKE_AVFILTERLINK_ACCESSOR(int , h ) |
196 | MAKE_AVFILTERLINK_ACCESSOR(AVRational , sample_aspect_ratio) |
197 | |
198 | MAKE_AVFILTERLINK_ACCESSOR(int , channels ) |
199 | MAKE_AVFILTERLINK_ACCESSOR(uint64_t , channel_layout ) |
200 | MAKE_AVFILTERLINK_ACCESSOR(int , sample_rate ) |
201 | |
202 | MAKE_AVFILTERLINK_ACCESSOR(AVBufferRef * , hw_frames_ctx ) |
203 | |
204 | static av_cold int vsink_init(AVFilterContext *ctx, void *opaque) |
205 | { |
206 | BufferSinkContext *buf = ctx->priv; |
207 | AVBufferSinkParams *params = opaque; |
208 | int ret; |
209 | |
210 | if (params) { |
211 | if ((ret = av_opt_set_int_list(buf, "pix_fmts", params->pixel_fmts, AV_PIX_FMT_NONE, 0)) < 0) |
212 | return ret; |
213 | } |
214 | |
215 | return common_init(ctx); |
216 | } |
217 | |
218 | #define CHECK_LIST_SIZE(field) \ |
219 | if (buf->field ## _size % sizeof(*buf->field)) { \ |
220 | av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \ |
221 | "should be multiple of %d\n", \ |
222 | buf->field ## _size, (int)sizeof(*buf->field)); \ |
223 | return AVERROR(EINVAL); \ |
224 | } |
225 | static int vsink_query_formats(AVFilterContext *ctx) |
226 | { |
227 | BufferSinkContext *buf = ctx->priv; |
228 | AVFilterFormats *formats = NULL; |
229 | unsigned i; |
230 | int ret; |
231 | |
232 | CHECK_LIST_SIZE(pixel_fmts) |
233 | if (buf->pixel_fmts_size) { |
234 | for (i = 0; i < NB_ITEMS(buf->pixel_fmts); i++) |
235 | if ((ret = ff_add_format(&formats, buf->pixel_fmts[i])) < 0) |
236 | return ret; |
237 | if ((ret = ff_set_common_formats(ctx, formats)) < 0) |
238 | return ret; |
239 | } else { |
240 | if ((ret = ff_default_query_formats(ctx)) < 0) |
241 | return ret; |
242 | } |
243 | |
244 | return 0; |
245 | } |
246 | |
247 | static av_cold int asink_init(AVFilterContext *ctx, void *opaque) |
248 | { |
249 | BufferSinkContext *buf = ctx->priv; |
250 | AVABufferSinkParams *params = opaque; |
251 | int ret; |
252 | |
253 | if (params) { |
254 | if ((ret = av_opt_set_int_list(buf, "sample_fmts", params->sample_fmts, AV_SAMPLE_FMT_NONE, 0)) < 0 || |
255 | (ret = av_opt_set_int_list(buf, "sample_rates", params->sample_rates, -1, 0)) < 0 || |
256 | (ret = av_opt_set_int_list(buf, "channel_layouts", params->channel_layouts, -1, 0)) < 0 || |
257 | (ret = av_opt_set_int_list(buf, "channel_counts", params->channel_counts, -1, 0)) < 0 || |
258 | (ret = av_opt_set_int(buf, "all_channel_counts", params->all_channel_counts, 0)) < 0) |
259 | return ret; |
260 | } |
261 | return common_init(ctx); |
262 | } |
263 | |
264 | static int asink_query_formats(AVFilterContext *ctx) |
265 | { |
266 | BufferSinkContext *buf = ctx->priv; |
267 | AVFilterFormats *formats = NULL; |
268 | AVFilterChannelLayouts *layouts = NULL; |
269 | unsigned i; |
270 | int ret; |
271 | |
272 | CHECK_LIST_SIZE(sample_fmts) |
273 | CHECK_LIST_SIZE(sample_rates) |
274 | CHECK_LIST_SIZE(channel_layouts) |
275 | CHECK_LIST_SIZE(channel_counts) |
276 | |
277 | if (buf->sample_fmts_size) { |
278 | for (i = 0; i < NB_ITEMS(buf->sample_fmts); i++) |
279 | if ((ret = ff_add_format(&formats, buf->sample_fmts[i])) < 0) |
280 | return ret; |
281 | if ((ret = ff_set_common_formats(ctx, formats)) < 0) |
282 | return ret; |
283 | } |
284 | |
285 | if (buf->channel_layouts_size || buf->channel_counts_size || |
286 | buf->all_channel_counts) { |
287 | for (i = 0; i < NB_ITEMS(buf->channel_layouts); i++) |
288 | if ((ret = ff_add_channel_layout(&layouts, buf->channel_layouts[i])) < 0) |
289 | return ret; |
290 | for (i = 0; i < NB_ITEMS(buf->channel_counts); i++) |
291 | if ((ret = ff_add_channel_layout(&layouts, FF_COUNT2LAYOUT(buf->channel_counts[i]))) < 0) |
292 | return ret; |
293 | if (buf->all_channel_counts) { |
294 | if (layouts) |
295 | av_log(ctx, AV_LOG_WARNING, |
296 | "Conflicting all_channel_counts and list in options\n"); |
297 | else if (!(layouts = ff_all_channel_counts())) |
298 | return AVERROR(ENOMEM); |
299 | } |
300 | if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0) |
301 | return ret; |
302 | } |
303 | |
304 | if (buf->sample_rates_size) { |
305 | formats = NULL; |
306 | for (i = 0; i < NB_ITEMS(buf->sample_rates); i++) |
307 | if ((ret = ff_add_format(&formats, buf->sample_rates[i])) < 0) |
308 | return ret; |
309 | if ((ret = ff_set_common_samplerates(ctx, formats)) < 0) |
310 | return ret; |
311 | } |
312 | |
313 | return 0; |
314 | } |
315 | |
316 | #define OFFSET(x) offsetof(BufferSinkContext, x) |
317 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
318 | static const AVOption buffersink_options[] = { |
319 | { "pix_fmts", "set the supported pixel formats", OFFSET(pixel_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS }, |
320 | { NULL }, |
321 | }; |
322 | #undef FLAGS |
323 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
324 | static const AVOption abuffersink_options[] = { |
325 | { "sample_fmts", "set the supported sample formats", OFFSET(sample_fmts), AV_OPT_TYPE_BINARY, .flags = FLAGS }, |
326 | { "sample_rates", "set the supported sample rates", OFFSET(sample_rates), AV_OPT_TYPE_BINARY, .flags = FLAGS }, |
327 | { "channel_layouts", "set the supported channel layouts", OFFSET(channel_layouts), AV_OPT_TYPE_BINARY, .flags = FLAGS }, |
328 | { "channel_counts", "set the supported channel counts", OFFSET(channel_counts), AV_OPT_TYPE_BINARY, .flags = FLAGS }, |
329 | { "all_channel_counts", "accept all channel counts", OFFSET(all_channel_counts), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS }, |
330 | { NULL }, |
331 | }; |
332 | #undef FLAGS |
333 | |
334 | AVFILTER_DEFINE_CLASS(buffersink); |
335 | AVFILTER_DEFINE_CLASS(abuffersink); |
336 | |
337 | static const AVFilterPad avfilter_vsink_buffer_inputs[] = { |
338 | { |
339 | .name = "default", |
340 | .type = AVMEDIA_TYPE_VIDEO, |
341 | }, |
342 | { NULL } |
343 | }; |
344 | |
345 | AVFilter ff_vsink_buffer = { |
346 | .name = "buffersink", |
347 | .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them available to the end of the filter graph."), |
348 | .priv_size = sizeof(BufferSinkContext), |
349 | .priv_class = &buffersink_class, |
350 | .init_opaque = vsink_init, |
351 | |
352 | .query_formats = vsink_query_formats, |
353 | .activate = activate, |
354 | .inputs = avfilter_vsink_buffer_inputs, |
355 | .outputs = NULL, |
356 | }; |
357 | |
358 | static const AVFilterPad avfilter_asink_abuffer_inputs[] = { |
359 | { |
360 | .name = "default", |
361 | .type = AVMEDIA_TYPE_AUDIO, |
362 | }, |
363 | { NULL } |
364 | }; |
365 | |
366 | AVFilter ff_asink_abuffer = { |
367 | .name = "abuffersink", |
368 | .description = NULL_IF_CONFIG_SMALL("Buffer audio frames, and make them available to the end of the filter graph."), |
369 | .priv_class = &abuffersink_class, |
370 | .priv_size = sizeof(BufferSinkContext), |
371 | .init_opaque = asink_init, |
372 | |
373 | .query_formats = asink_query_formats, |
374 | .activate = activate, |
375 | .inputs = avfilter_asink_abuffer_inputs, |
376 | .outputs = NULL, |
377 | }; |
378 |