blob: ecfb872ed80b7904966ab1c930accfa96af258c7
1 | /* |
2 | * filter layer |
3 | * Copyright (c) 2007 Bobby Bingham |
4 | * |
5 | * This file is part of FFmpeg. |
6 | * |
7 | * FFmpeg is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * FFmpeg is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with FFmpeg; if not, write to the Free Software |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | */ |
21 | |
22 | #include "libavutil/atomic.h" |
23 | #include "libavutil/avassert.h" |
24 | #include "libavutil/avstring.h" |
25 | #include "libavutil/buffer.h" |
26 | #include "libavutil/channel_layout.h" |
27 | #include "libavutil/common.h" |
28 | #include "libavutil/eval.h" |
29 | #include "libavutil/hwcontext.h" |
30 | #include "libavutil/imgutils.h" |
31 | #include "libavutil/internal.h" |
32 | #include "libavutil/opt.h" |
33 | #include "libavutil/pixdesc.h" |
34 | #include "libavutil/rational.h" |
35 | #include "libavutil/samplefmt.h" |
36 | |
37 | #define FF_INTERNAL_FIELDS 1 |
38 | #include "framequeue.h" |
39 | |
40 | #include "audio.h" |
41 | #include "avfilter.h" |
42 | #include "filters.h" |
43 | #include "formats.h" |
44 | #include "internal.h" |
45 | |
46 | #include "libavutil/ffversion.h" |
47 | const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION; |
48 | |
49 | void ff_tlog_ref(void *ctx, AVFrame *ref, int end) |
50 | { |
51 | av_unused char buf[16]; |
52 | ff_tlog(ctx, |
53 | "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64, |
54 | ref, ref->buf, ref->data[0], |
55 | ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3], |
56 | ref->pts, av_frame_get_pkt_pos(ref)); |
57 | |
58 | if (ref->width) { |
59 | ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c", |
60 | ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den, |
61 | ref->width, ref->height, |
62 | !ref->interlaced_frame ? 'P' : /* Progressive */ |
63 | ref->top_field_first ? 'T' : 'B', /* Top / Bottom */ |
64 | ref->key_frame, |
65 | av_get_picture_type_char(ref->pict_type)); |
66 | } |
67 | if (ref->nb_samples) { |
68 | ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d", |
69 | ref->channel_layout, |
70 | ref->nb_samples, |
71 | ref->sample_rate); |
72 | } |
73 | |
74 | ff_tlog(ctx, "]%s", end ? "\n" : ""); |
75 | } |
76 | |
77 | unsigned avfilter_version(void) |
78 | { |
79 | av_assert0(LIBAVFILTER_VERSION_MICRO >= 100); |
80 | return LIBAVFILTER_VERSION_INT; |
81 | } |
82 | |
83 | const char *avfilter_configuration(void) |
84 | { |
85 | return FFMPEG_CONFIGURATION; |
86 | } |
87 | |
88 | const char *avfilter_license(void) |
89 | { |
90 | #define LICENSE_PREFIX "libavfilter license: " |
91 | return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; |
92 | } |
93 | |
94 | void ff_command_queue_pop(AVFilterContext *filter) |
95 | { |
96 | AVFilterCommand *c= filter->command_queue; |
97 | av_freep(&c->arg); |
98 | av_freep(&c->command); |
99 | filter->command_queue= c->next; |
100 | av_free(c); |
101 | } |
102 | |
103 | int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off, |
104 | AVFilterPad **pads, AVFilterLink ***links, |
105 | AVFilterPad *newpad) |
106 | { |
107 | AVFilterLink **newlinks; |
108 | AVFilterPad *newpads; |
109 | unsigned i; |
110 | |
111 | idx = FFMIN(idx, *count); |
112 | |
113 | newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad)); |
114 | newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*)); |
115 | if (newpads) |
116 | *pads = newpads; |
117 | if (newlinks) |
118 | *links = newlinks; |
119 | if (!newpads || !newlinks) |
120 | return AVERROR(ENOMEM); |
121 | |
122 | memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx)); |
123 | memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx)); |
124 | memcpy(*pads + idx, newpad, sizeof(AVFilterPad)); |
125 | (*links)[idx] = NULL; |
126 | |
127 | (*count)++; |
128 | for (i = idx + 1; i < *count; i++) |
129 | if ((*links)[i]) |
130 | (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++; |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | int avfilter_link(AVFilterContext *src, unsigned srcpad, |
136 | AVFilterContext *dst, unsigned dstpad) |
137 | { |
138 | AVFilterLink *link; |
139 | |
140 | av_assert0(src->graph); |
141 | av_assert0(dst->graph); |
142 | av_assert0(src->graph == dst->graph); |
143 | |
144 | if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad || |
145 | src->outputs[srcpad] || dst->inputs[dstpad]) |
146 | return AVERROR(EINVAL); |
147 | |
148 | if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) { |
149 | av_log(src, AV_LOG_ERROR, |
150 | "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n", |
151 | src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"), |
152 | dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?")); |
153 | return AVERROR(EINVAL); |
154 | } |
155 | |
156 | link = av_mallocz(sizeof(*link)); |
157 | if (!link) |
158 | return AVERROR(ENOMEM); |
159 | |
160 | src->outputs[srcpad] = dst->inputs[dstpad] = link; |
161 | |
162 | link->src = src; |
163 | link->dst = dst; |
164 | link->srcpad = &src->output_pads[srcpad]; |
165 | link->dstpad = &dst->input_pads[dstpad]; |
166 | link->type = src->output_pads[srcpad].type; |
167 | av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1); |
168 | link->format = -1; |
169 | ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues); |
170 | |
171 | return 0; |
172 | } |
173 | |
174 | void avfilter_link_free(AVFilterLink **link) |
175 | { |
176 | if (!*link) |
177 | return; |
178 | |
179 | av_frame_free(&(*link)->partial_buf); |
180 | ff_framequeue_free(&(*link)->fifo); |
181 | ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool); |
182 | |
183 | av_freep(link); |
184 | } |
185 | |
186 | int avfilter_link_get_channels(AVFilterLink *link) |
187 | { |
188 | return link->channels; |
189 | } |
190 | |
191 | void ff_filter_set_ready(AVFilterContext *filter, unsigned priority) |
192 | { |
193 | filter->ready = FFMAX(filter->ready, priority); |
194 | } |
195 | |
196 | /** |
197 | * Clear frame_blocked_in on all outputs. |
198 | * This is necessary whenever something changes on input. |
199 | */ |
200 | static void filter_unblock(AVFilterContext *filter) |
201 | { |
202 | unsigned i; |
203 | |
204 | for (i = 0; i < filter->nb_outputs; i++) |
205 | filter->outputs[i]->frame_blocked_in = 0; |
206 | } |
207 | |
208 | |
209 | void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts) |
210 | { |
211 | if (link->status_in == status) |
212 | return; |
213 | av_assert0(!link->status_in); |
214 | link->status_in = status; |
215 | link->status_in_pts = pts; |
216 | link->frame_wanted_out = 0; |
217 | link->frame_blocked_in = 0; |
218 | filter_unblock(link->dst); |
219 | ff_filter_set_ready(link->dst, 200); |
220 | } |
221 | |
222 | void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts) |
223 | { |
224 | av_assert0(!link->frame_wanted_out); |
225 | av_assert0(!link->status_out); |
226 | link->status_out = status; |
227 | if (pts != AV_NOPTS_VALUE) |
228 | ff_update_link_current_pts(link, pts); |
229 | filter_unblock(link->dst); |
230 | ff_filter_set_ready(link->src, 200); |
231 | } |
232 | |
233 | void avfilter_link_set_closed(AVFilterLink *link, int closed) |
234 | { |
235 | ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE); |
236 | } |
237 | |
238 | int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt, |
239 | unsigned filt_srcpad_idx, unsigned filt_dstpad_idx) |
240 | { |
241 | int ret; |
242 | unsigned dstpad_idx = link->dstpad - link->dst->input_pads; |
243 | |
244 | av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' " |
245 | "between the filter '%s' and the filter '%s'\n", |
246 | filt->name, link->src->name, link->dst->name); |
247 | |
248 | link->dst->inputs[dstpad_idx] = NULL; |
249 | if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) { |
250 | /* failed to link output filter to new filter */ |
251 | link->dst->inputs[dstpad_idx] = link; |
252 | return ret; |
253 | } |
254 | |
255 | /* re-hookup the link to the new destination filter we inserted */ |
256 | link->dst = filt; |
257 | link->dstpad = &filt->input_pads[filt_srcpad_idx]; |
258 | filt->inputs[filt_srcpad_idx] = link; |
259 | |
260 | /* if any information on supported media formats already exists on the |
261 | * link, we need to preserve that */ |
262 | if (link->out_formats) |
263 | ff_formats_changeref(&link->out_formats, |
264 | &filt->outputs[filt_dstpad_idx]->out_formats); |
265 | if (link->out_samplerates) |
266 | ff_formats_changeref(&link->out_samplerates, |
267 | &filt->outputs[filt_dstpad_idx]->out_samplerates); |
268 | if (link->out_channel_layouts) |
269 | ff_channel_layouts_changeref(&link->out_channel_layouts, |
270 | &filt->outputs[filt_dstpad_idx]->out_channel_layouts); |
271 | |
272 | return 0; |
273 | } |
274 | |
275 | int avfilter_config_links(AVFilterContext *filter) |
276 | { |
277 | int (*config_link)(AVFilterLink *); |
278 | unsigned i; |
279 | int ret; |
280 | |
281 | for (i = 0; i < filter->nb_inputs; i ++) { |
282 | AVFilterLink *link = filter->inputs[i]; |
283 | AVFilterLink *inlink; |
284 | |
285 | if (!link) continue; |
286 | if (!link->src || !link->dst) { |
287 | av_log(filter, AV_LOG_ERROR, |
288 | "Not all input and output are properly linked (%d).\n", i); |
289 | return AVERROR(EINVAL); |
290 | } |
291 | |
292 | inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL; |
293 | link->current_pts = |
294 | link->current_pts_us = AV_NOPTS_VALUE; |
295 | |
296 | switch (link->init_state) { |
297 | case AVLINK_INIT: |
298 | continue; |
299 | case AVLINK_STARTINIT: |
300 | av_log(filter, AV_LOG_INFO, "circular filter chain detected\n"); |
301 | return 0; |
302 | case AVLINK_UNINIT: |
303 | link->init_state = AVLINK_STARTINIT; |
304 | |
305 | if ((ret = avfilter_config_links(link->src)) < 0) |
306 | return ret; |
307 | |
308 | if (!(config_link = link->srcpad->config_props)) { |
309 | if (link->src->nb_inputs != 1) { |
310 | av_log(link->src, AV_LOG_ERROR, "Source filters and filters " |
311 | "with more than one input " |
312 | "must set config_props() " |
313 | "callbacks on all outputs\n"); |
314 | return AVERROR(EINVAL); |
315 | } |
316 | } else if ((ret = config_link(link)) < 0) { |
317 | av_log(link->src, AV_LOG_ERROR, |
318 | "Failed to configure output pad on %s\n", |
319 | link->src->name); |
320 | return ret; |
321 | } |
322 | |
323 | switch (link->type) { |
324 | case AVMEDIA_TYPE_VIDEO: |
325 | if (!link->time_base.num && !link->time_base.den) |
326 | link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q; |
327 | |
328 | if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den) |
329 | link->sample_aspect_ratio = inlink ? |
330 | inlink->sample_aspect_ratio : (AVRational){1,1}; |
331 | |
332 | if (inlink) { |
333 | if (!link->frame_rate.num && !link->frame_rate.den) |
334 | link->frame_rate = inlink->frame_rate; |
335 | if (!link->w) |
336 | link->w = inlink->w; |
337 | if (!link->h) |
338 | link->h = inlink->h; |
339 | } else if (!link->w || !link->h) { |
340 | av_log(link->src, AV_LOG_ERROR, |
341 | "Video source filters must set their output link's " |
342 | "width and height\n"); |
343 | return AVERROR(EINVAL); |
344 | } |
345 | break; |
346 | |
347 | case AVMEDIA_TYPE_AUDIO: |
348 | if (inlink) { |
349 | if (!link->time_base.num && !link->time_base.den) |
350 | link->time_base = inlink->time_base; |
351 | } |
352 | |
353 | if (!link->time_base.num && !link->time_base.den) |
354 | link->time_base = (AVRational) {1, link->sample_rate}; |
355 | } |
356 | |
357 | if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx && |
358 | !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) { |
359 | av_assert0(!link->hw_frames_ctx && |
360 | "should not be set by non-hwframe-aware filter"); |
361 | link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx); |
362 | if (!link->hw_frames_ctx) |
363 | return AVERROR(ENOMEM); |
364 | } |
365 | |
366 | if ((config_link = link->dstpad->config_props)) |
367 | if ((ret = config_link(link)) < 0) { |
368 | av_log(link->dst, AV_LOG_ERROR, |
369 | "Failed to configure input pad on %s\n", |
370 | link->dst->name); |
371 | return ret; |
372 | } |
373 | |
374 | link->init_state = AVLINK_INIT; |
375 | } |
376 | } |
377 | |
378 | return 0; |
379 | } |
380 | |
381 | void ff_tlog_link(void *ctx, AVFilterLink *link, int end) |
382 | { |
383 | if (link->type == AVMEDIA_TYPE_VIDEO) { |
384 | ff_tlog(ctx, |
385 | "link[%p s:%dx%d fmt:%s %s->%s]%s", |
386 | link, link->w, link->h, |
387 | av_get_pix_fmt_name(link->format), |
388 | link->src ? link->src->filter->name : "", |
389 | link->dst ? link->dst->filter->name : "", |
390 | end ? "\n" : ""); |
391 | } else { |
392 | char buf[128]; |
393 | av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout); |
394 | |
395 | ff_tlog(ctx, |
396 | "link[%p r:%d cl:%s fmt:%s %s->%s]%s", |
397 | link, (int)link->sample_rate, buf, |
398 | av_get_sample_fmt_name(link->format), |
399 | link->src ? link->src->filter->name : "", |
400 | link->dst ? link->dst->filter->name : "", |
401 | end ? "\n" : ""); |
402 | } |
403 | } |
404 | |
405 | int ff_request_frame(AVFilterLink *link) |
406 | { |
407 | FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1); |
408 | |
409 | av_assert1(!link->dst->filter->activate); |
410 | if (link->status_out) |
411 | return link->status_out; |
412 | if (link->status_in) { |
413 | if (ff_framequeue_queued_frames(&link->fifo)) { |
414 | av_assert1(!link->frame_wanted_out); |
415 | av_assert1(link->dst->ready >= 300); |
416 | return 0; |
417 | } else { |
418 | /* Acknowledge status change. Filters using ff_request_frame() will |
419 | handle the change automatically. Filters can also check the |
420 | status directly but none do yet. */ |
421 | ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts); |
422 | return link->status_out; |
423 | } |
424 | } |
425 | link->frame_wanted_out = 1; |
426 | ff_filter_set_ready(link->src, 100); |
427 | return 0; |
428 | } |
429 | |
430 | static int ff_request_frame_to_filter(AVFilterLink *link) |
431 | { |
432 | int ret = -1; |
433 | |
434 | FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1); |
435 | /* Assume the filter is blocked, let the method clear it if not */ |
436 | link->frame_blocked_in = 1; |
437 | if (link->srcpad->request_frame) |
438 | ret = link->srcpad->request_frame(link); |
439 | else if (link->src->inputs[0]) |
440 | ret = ff_request_frame(link->src->inputs[0]); |
441 | if (ret < 0) { |
442 | if (ret != AVERROR(EAGAIN) && ret != link->status_in) |
443 | ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE); |
444 | if (ret == AVERROR_EOF) |
445 | ret = 0; |
446 | } |
447 | return ret; |
448 | } |
449 | |
450 | int ff_poll_frame(AVFilterLink *link) |
451 | { |
452 | int i, min = INT_MAX; |
453 | |
454 | if (link->srcpad->poll_frame) |
455 | return link->srcpad->poll_frame(link); |
456 | |
457 | for (i = 0; i < link->src->nb_inputs; i++) { |
458 | int val; |
459 | if (!link->src->inputs[i]) |
460 | return AVERROR(EINVAL); |
461 | val = ff_poll_frame(link->src->inputs[i]); |
462 | min = FFMIN(min, val); |
463 | } |
464 | |
465 | return min; |
466 | } |
467 | |
468 | static const char *const var_names[] = { |
469 | "t", |
470 | "n", |
471 | "pos", |
472 | "w", |
473 | "h", |
474 | NULL |
475 | }; |
476 | |
477 | enum { |
478 | VAR_T, |
479 | VAR_N, |
480 | VAR_POS, |
481 | VAR_W, |
482 | VAR_H, |
483 | VAR_VARS_NB |
484 | }; |
485 | |
486 | static int set_enable_expr(AVFilterContext *ctx, const char *expr) |
487 | { |
488 | int ret; |
489 | char *expr_dup; |
490 | AVExpr *old = ctx->enable; |
491 | |
492 | if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) { |
493 | av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported " |
494 | "with filter '%s'\n", ctx->filter->name); |
495 | return AVERROR_PATCHWELCOME; |
496 | } |
497 | |
498 | expr_dup = av_strdup(expr); |
499 | if (!expr_dup) |
500 | return AVERROR(ENOMEM); |
501 | |
502 | if (!ctx->var_values) { |
503 | ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values)); |
504 | if (!ctx->var_values) { |
505 | av_free(expr_dup); |
506 | return AVERROR(ENOMEM); |
507 | } |
508 | } |
509 | |
510 | ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names, |
511 | NULL, NULL, NULL, NULL, 0, ctx->priv); |
512 | if (ret < 0) { |
513 | av_log(ctx->priv, AV_LOG_ERROR, |
514 | "Error when evaluating the expression '%s' for enable\n", |
515 | expr_dup); |
516 | av_free(expr_dup); |
517 | return ret; |
518 | } |
519 | |
520 | av_expr_free(old); |
521 | av_free(ctx->enable_str); |
522 | ctx->enable_str = expr_dup; |
523 | return 0; |
524 | } |
525 | |
526 | void ff_update_link_current_pts(AVFilterLink *link, int64_t pts) |
527 | { |
528 | if (pts == AV_NOPTS_VALUE) |
529 | return; |
530 | link->current_pts = pts; |
531 | link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q); |
532 | /* TODO use duration */ |
533 | if (link->graph && link->age_index >= 0) |
534 | ff_avfilter_graph_update_heap(link->graph, link); |
535 | } |
536 | |
537 | int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags) |
538 | { |
539 | if(!strcmp(cmd, "ping")){ |
540 | char local_res[256] = {0}; |
541 | |
542 | if (!res) { |
543 | res = local_res; |
544 | res_len = sizeof(local_res); |
545 | } |
546 | av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name); |
547 | if (res == local_res) |
548 | av_log(filter, AV_LOG_INFO, "%s", res); |
549 | return 0; |
550 | }else if(!strcmp(cmd, "enable")) { |
551 | return set_enable_expr(filter, arg); |
552 | }else if(filter->filter->process_command) { |
553 | return filter->filter->process_command(filter, cmd, arg, res, res_len, flags); |
554 | } |
555 | return AVERROR(ENOSYS); |
556 | } |
557 | |
558 | static AVFilter *first_filter; |
559 | static AVFilter **last_filter = &first_filter; |
560 | |
561 | #if !FF_API_NOCONST_GET_NAME |
562 | const |
563 | #endif |
564 | AVFilter *avfilter_get_by_name(const char *name) |
565 | { |
566 | const AVFilter *f = NULL; |
567 | |
568 | if (!name) |
569 | return NULL; |
570 | |
571 | while ((f = avfilter_next(f))) |
572 | if (!strcmp(f->name, name)) |
573 | return (AVFilter *)f; |
574 | |
575 | return NULL; |
576 | } |
577 | |
578 | int avfilter_register(AVFilter *filter) |
579 | { |
580 | AVFilter **f = last_filter; |
581 | |
582 | /* the filter must select generic or internal exclusively */ |
583 | av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE); |
584 | |
585 | filter->next = NULL; |
586 | |
587 | while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter)) |
588 | f = &(*f)->next; |
589 | last_filter = &filter->next; |
590 | |
591 | return 0; |
592 | } |
593 | |
594 | const AVFilter *avfilter_next(const AVFilter *prev) |
595 | { |
596 | return prev ? prev->next : first_filter; |
597 | } |
598 | |
599 | #if FF_API_OLD_FILTER_REGISTER |
600 | AVFilter **av_filter_next(AVFilter **filter) |
601 | { |
602 | return filter ? &(*filter)->next : &first_filter; |
603 | } |
604 | |
605 | void avfilter_uninit(void) |
606 | { |
607 | } |
608 | #endif |
609 | |
610 | int avfilter_pad_count(const AVFilterPad *pads) |
611 | { |
612 | int count; |
613 | |
614 | if (!pads) |
615 | return 0; |
616 | |
617 | for (count = 0; pads->name; count++) |
618 | pads++; |
619 | return count; |
620 | } |
621 | |
622 | static const char *default_filter_name(void *filter_ctx) |
623 | { |
624 | AVFilterContext *ctx = filter_ctx; |
625 | return ctx->name ? ctx->name : ctx->filter->name; |
626 | } |
627 | |
628 | static void *filter_child_next(void *obj, void *prev) |
629 | { |
630 | AVFilterContext *ctx = obj; |
631 | if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv) |
632 | return ctx->priv; |
633 | return NULL; |
634 | } |
635 | |
636 | static const AVClass *filter_child_class_next(const AVClass *prev) |
637 | { |
638 | const AVFilter *f = NULL; |
639 | |
640 | /* find the filter that corresponds to prev */ |
641 | while (prev && (f = avfilter_next(f))) |
642 | if (f->priv_class == prev) |
643 | break; |
644 | |
645 | /* could not find filter corresponding to prev */ |
646 | if (prev && !f) |
647 | return NULL; |
648 | |
649 | /* find next filter with specific options */ |
650 | while ((f = avfilter_next(f))) |
651 | if (f->priv_class) |
652 | return f->priv_class; |
653 | |
654 | return NULL; |
655 | } |
656 | |
657 | #define OFFSET(x) offsetof(AVFilterContext, x) |
658 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM |
659 | static const AVOption avfilter_options[] = { |
660 | { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS, |
661 | { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" }, |
662 | { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" }, |
663 | { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, |
664 | { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT, |
665 | { .i64 = 0 }, 0, INT_MAX, FLAGS }, |
666 | { NULL }, |
667 | }; |
668 | |
669 | static const AVClass avfilter_class = { |
670 | .class_name = "AVFilter", |
671 | .item_name = default_filter_name, |
672 | .version = LIBAVUTIL_VERSION_INT, |
673 | .category = AV_CLASS_CATEGORY_FILTER, |
674 | .child_next = filter_child_next, |
675 | .child_class_next = filter_child_class_next, |
676 | .option = avfilter_options, |
677 | }; |
678 | |
679 | static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg, |
680 | int *ret, int nb_jobs) |
681 | { |
682 | int i; |
683 | |
684 | for (i = 0; i < nb_jobs; i++) { |
685 | int r = func(ctx, arg, i, nb_jobs); |
686 | if (ret) |
687 | ret[i] = r; |
688 | } |
689 | return 0; |
690 | } |
691 | |
692 | AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name) |
693 | { |
694 | AVFilterContext *ret; |
695 | |
696 | if (!filter) |
697 | return NULL; |
698 | |
699 | ret = av_mallocz(sizeof(AVFilterContext)); |
700 | if (!ret) |
701 | return NULL; |
702 | |
703 | ret->av_class = &avfilter_class; |
704 | ret->filter = filter; |
705 | ret->name = inst_name ? av_strdup(inst_name) : NULL; |
706 | if (filter->priv_size) { |
707 | ret->priv = av_mallocz(filter->priv_size); |
708 | if (!ret->priv) |
709 | goto err; |
710 | } |
711 | |
712 | av_opt_set_defaults(ret); |
713 | if (filter->priv_class) { |
714 | *(const AVClass**)ret->priv = filter->priv_class; |
715 | av_opt_set_defaults(ret->priv); |
716 | } |
717 | |
718 | ret->internal = av_mallocz(sizeof(*ret->internal)); |
719 | if (!ret->internal) |
720 | goto err; |
721 | ret->internal->execute = default_execute; |
722 | |
723 | ret->nb_inputs = avfilter_pad_count(filter->inputs); |
724 | if (ret->nb_inputs ) { |
725 | ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad)); |
726 | if (!ret->input_pads) |
727 | goto err; |
728 | memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs); |
729 | ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*)); |
730 | if (!ret->inputs) |
731 | goto err; |
732 | } |
733 | |
734 | ret->nb_outputs = avfilter_pad_count(filter->outputs); |
735 | if (ret->nb_outputs) { |
736 | ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad)); |
737 | if (!ret->output_pads) |
738 | goto err; |
739 | memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs); |
740 | ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*)); |
741 | if (!ret->outputs) |
742 | goto err; |
743 | } |
744 | |
745 | return ret; |
746 | |
747 | err: |
748 | av_freep(&ret->inputs); |
749 | av_freep(&ret->input_pads); |
750 | ret->nb_inputs = 0; |
751 | av_freep(&ret->outputs); |
752 | av_freep(&ret->output_pads); |
753 | ret->nb_outputs = 0; |
754 | av_freep(&ret->priv); |
755 | av_freep(&ret->internal); |
756 | av_free(ret); |
757 | return NULL; |
758 | } |
759 | |
760 | #if FF_API_AVFILTER_OPEN |
761 | int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name) |
762 | { |
763 | *filter_ctx = ff_filter_alloc(filter, inst_name); |
764 | return *filter_ctx ? 0 : AVERROR(ENOMEM); |
765 | } |
766 | #endif |
767 | |
768 | static void free_link(AVFilterLink *link) |
769 | { |
770 | if (!link) |
771 | return; |
772 | |
773 | if (link->src) |
774 | link->src->outputs[link->srcpad - link->src->output_pads] = NULL; |
775 | if (link->dst) |
776 | link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL; |
777 | |
778 | av_buffer_unref(&link->hw_frames_ctx); |
779 | |
780 | ff_formats_unref(&link->in_formats); |
781 | ff_formats_unref(&link->out_formats); |
782 | ff_formats_unref(&link->in_samplerates); |
783 | ff_formats_unref(&link->out_samplerates); |
784 | ff_channel_layouts_unref(&link->in_channel_layouts); |
785 | ff_channel_layouts_unref(&link->out_channel_layouts); |
786 | avfilter_link_free(&link); |
787 | } |
788 | |
789 | void avfilter_free(AVFilterContext *filter) |
790 | { |
791 | int i; |
792 | |
793 | if (!filter) |
794 | return; |
795 | |
796 | if (filter->graph) |
797 | ff_filter_graph_remove_filter(filter->graph, filter); |
798 | |
799 | if (filter->filter->uninit) |
800 | filter->filter->uninit(filter); |
801 | |
802 | for (i = 0; i < filter->nb_inputs; i++) { |
803 | free_link(filter->inputs[i]); |
804 | } |
805 | for (i = 0; i < filter->nb_outputs; i++) { |
806 | free_link(filter->outputs[i]); |
807 | } |
808 | |
809 | if (filter->filter->priv_class) |
810 | av_opt_free(filter->priv); |
811 | |
812 | av_buffer_unref(&filter->hw_device_ctx); |
813 | |
814 | av_freep(&filter->name); |
815 | av_freep(&filter->input_pads); |
816 | av_freep(&filter->output_pads); |
817 | av_freep(&filter->inputs); |
818 | av_freep(&filter->outputs); |
819 | av_freep(&filter->priv); |
820 | while(filter->command_queue){ |
821 | ff_command_queue_pop(filter); |
822 | } |
823 | av_opt_free(filter); |
824 | av_expr_free(filter->enable); |
825 | filter->enable = NULL; |
826 | av_freep(&filter->var_values); |
827 | av_freep(&filter->internal); |
828 | av_free(filter); |
829 | } |
830 | |
831 | int ff_filter_get_nb_threads(AVFilterContext *ctx) |
832 | { |
833 | if (ctx->nb_threads > 0) |
834 | return FFMIN(ctx->nb_threads, ctx->graph->nb_threads); |
835 | return ctx->graph->nb_threads; |
836 | } |
837 | |
838 | static int process_options(AVFilterContext *ctx, AVDictionary **options, |
839 | const char *args) |
840 | { |
841 | const AVOption *o = NULL; |
842 | int ret, count = 0; |
843 | char *av_uninit(parsed_key), *av_uninit(value); |
844 | const char *key; |
845 | int offset= -1; |
846 | |
847 | if (!args) |
848 | return 0; |
849 | |
850 | while (*args) { |
851 | const char *shorthand = NULL; |
852 | |
853 | o = av_opt_next(ctx->priv, o); |
854 | if (o) { |
855 | if (o->type == AV_OPT_TYPE_CONST || o->offset == offset) |
856 | continue; |
857 | offset = o->offset; |
858 | shorthand = o->name; |
859 | } |
860 | |
861 | ret = av_opt_get_key_value(&args, "=", ":", |
862 | shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0, |
863 | &parsed_key, &value); |
864 | if (ret < 0) { |
865 | if (ret == AVERROR(EINVAL)) |
866 | av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args); |
867 | else |
868 | av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args, |
869 | av_err2str(ret)); |
870 | return ret; |
871 | } |
872 | if (*args) |
873 | args++; |
874 | if (parsed_key) { |
875 | key = parsed_key; |
876 | while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */ |
877 | } else { |
878 | key = shorthand; |
879 | } |
880 | |
881 | av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value); |
882 | |
883 | if (av_opt_find(ctx, key, NULL, 0, 0)) { |
884 | ret = av_opt_set(ctx, key, value, 0); |
885 | if (ret < 0) { |
886 | av_free(value); |
887 | av_free(parsed_key); |
888 | return ret; |
889 | } |
890 | } else { |
891 | av_dict_set(options, key, value, 0); |
892 | if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) { |
893 | if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) { |
894 | if (ret == AVERROR_OPTION_NOT_FOUND) |
895 | av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key); |
896 | av_free(value); |
897 | av_free(parsed_key); |
898 | return ret; |
899 | } |
900 | } |
901 | } |
902 | |
903 | av_free(value); |
904 | av_free(parsed_key); |
905 | count++; |
906 | } |
907 | |
908 | if (ctx->enable_str) { |
909 | ret = set_enable_expr(ctx, ctx->enable_str); |
910 | if (ret < 0) |
911 | return ret; |
912 | } |
913 | return count; |
914 | } |
915 | |
916 | #if FF_API_AVFILTER_INIT_FILTER |
917 | int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque) |
918 | { |
919 | return avfilter_init_str(filter, args); |
920 | } |
921 | #endif |
922 | |
923 | int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options) |
924 | { |
925 | int ret = 0; |
926 | |
927 | ret = av_opt_set_dict(ctx, options); |
928 | if (ret < 0) { |
929 | av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n"); |
930 | return ret; |
931 | } |
932 | |
933 | if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS && |
934 | ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE && |
935 | ctx->graph->internal->thread_execute) { |
936 | ctx->thread_type = AVFILTER_THREAD_SLICE; |
937 | ctx->internal->execute = ctx->graph->internal->thread_execute; |
938 | } else { |
939 | ctx->thread_type = 0; |
940 | } |
941 | |
942 | if (ctx->filter->priv_class) { |
943 | ret = av_opt_set_dict(ctx->priv, options); |
944 | if (ret < 0) { |
945 | av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n"); |
946 | return ret; |
947 | } |
948 | } |
949 | |
950 | if (ctx->filter->init_opaque) |
951 | ret = ctx->filter->init_opaque(ctx, NULL); |
952 | else if (ctx->filter->init) |
953 | ret = ctx->filter->init(ctx); |
954 | else if (ctx->filter->init_dict) |
955 | ret = ctx->filter->init_dict(ctx, options); |
956 | |
957 | return ret; |
958 | } |
959 | |
960 | int avfilter_init_str(AVFilterContext *filter, const char *args) |
961 | { |
962 | AVDictionary *options = NULL; |
963 | AVDictionaryEntry *e; |
964 | int ret = 0; |
965 | |
966 | if (args && *args) { |
967 | if (!filter->filter->priv_class) { |
968 | av_log(filter, AV_LOG_ERROR, "This filter does not take any " |
969 | "options, but options were provided: %s.\n", args); |
970 | return AVERROR(EINVAL); |
971 | } |
972 | |
973 | #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR |
974 | if ( !strcmp(filter->filter->name, "format") || |
975 | !strcmp(filter->filter->name, "noformat") || |
976 | !strcmp(filter->filter->name, "frei0r") || |
977 | !strcmp(filter->filter->name, "frei0r_src") || |
978 | !strcmp(filter->filter->name, "ocv") || |
979 | !strcmp(filter->filter->name, "pan") || |
980 | !strcmp(filter->filter->name, "pp") || |
981 | !strcmp(filter->filter->name, "aevalsrc")) { |
982 | /* a hack for compatibility with the old syntax |
983 | * replace colons with |s */ |
984 | char *copy = av_strdup(args); |
985 | char *p = copy; |
986 | int nb_leading = 0; // number of leading colons to skip |
987 | int deprecated = 0; |
988 | |
989 | if (!copy) { |
990 | ret = AVERROR(ENOMEM); |
991 | goto fail; |
992 | } |
993 | |
994 | if (!strcmp(filter->filter->name, "frei0r") || |
995 | !strcmp(filter->filter->name, "ocv")) |
996 | nb_leading = 1; |
997 | else if (!strcmp(filter->filter->name, "frei0r_src")) |
998 | nb_leading = 3; |
999 | |
1000 | while (nb_leading--) { |
1001 | p = strchr(p, ':'); |
1002 | if (!p) { |
1003 | p = copy + strlen(copy); |
1004 | break; |
1005 | } |
1006 | p++; |
1007 | } |
1008 | |
1009 | deprecated = strchr(p, ':') != NULL; |
1010 | |
1011 | if (!strcmp(filter->filter->name, "aevalsrc")) { |
1012 | deprecated = 0; |
1013 | while ((p = strchr(p, ':')) && p[1] != ':') { |
1014 | const char *epos = strchr(p + 1, '='); |
1015 | const char *spos = strchr(p + 1, ':'); |
1016 | const int next_token_is_opt = epos && (!spos || epos < spos); |
1017 | if (next_token_is_opt) { |
1018 | p++; |
1019 | break; |
1020 | } |
1021 | /* next token does not contain a '=', assume a channel expression */ |
1022 | deprecated = 1; |
1023 | *p++ = '|'; |
1024 | } |
1025 | if (p && *p == ':') { // double sep '::' found |
1026 | deprecated = 1; |
1027 | memmove(p, p + 1, strlen(p)); |
1028 | } |
1029 | } else |
1030 | while ((p = strchr(p, ':'))) |
1031 | *p++ = '|'; |
1032 | |
1033 | #if FF_API_OLD_FILTER_OPTS |
1034 | if (deprecated) |
1035 | av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use " |
1036 | "'|' to separate the list items.\n"); |
1037 | |
1038 | av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy); |
1039 | ret = process_options(filter, &options, copy); |
1040 | #else |
1041 | if (deprecated) { |
1042 | av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use " |
1043 | "'|' to separate the list items ('%s' instead of '%s')\n", |
1044 | copy, args); |
1045 | ret = AVERROR(EINVAL); |
1046 | } else { |
1047 | ret = process_options(filter, &options, copy); |
1048 | } |
1049 | #endif |
1050 | av_freep(©); |
1051 | |
1052 | if (ret < 0) |
1053 | goto fail; |
1054 | } else |
1055 | #endif |
1056 | { |
1057 | ret = process_options(filter, &options, args); |
1058 | if (ret < 0) |
1059 | goto fail; |
1060 | } |
1061 | } |
1062 | |
1063 | ret = avfilter_init_dict(filter, &options); |
1064 | if (ret < 0) |
1065 | goto fail; |
1066 | |
1067 | if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) { |
1068 | av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key); |
1069 | ret = AVERROR_OPTION_NOT_FOUND; |
1070 | goto fail; |
1071 | } |
1072 | |
1073 | fail: |
1074 | av_dict_free(&options); |
1075 | |
1076 | return ret; |
1077 | } |
1078 | |
1079 | const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx) |
1080 | { |
1081 | return pads[pad_idx].name; |
1082 | } |
1083 | |
1084 | enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx) |
1085 | { |
1086 | return pads[pad_idx].type; |
1087 | } |
1088 | |
1089 | static int default_filter_frame(AVFilterLink *link, AVFrame *frame) |
1090 | { |
1091 | return ff_filter_frame(link->dst->outputs[0], frame); |
1092 | } |
1093 | |
1094 | static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame) |
1095 | { |
1096 | int (*filter_frame)(AVFilterLink *, AVFrame *); |
1097 | AVFilterContext *dstctx = link->dst; |
1098 | AVFilterPad *dst = link->dstpad; |
1099 | int ret; |
1100 | |
1101 | if (!(filter_frame = dst->filter_frame)) |
1102 | filter_frame = default_filter_frame; |
1103 | |
1104 | if (dst->needs_writable) { |
1105 | ret = ff_inlink_make_frame_writable(link, &frame); |
1106 | if (ret < 0) |
1107 | goto fail; |
1108 | } |
1109 | |
1110 | ff_inlink_process_commands(link, frame); |
1111 | dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame); |
1112 | |
1113 | if (dstctx->is_disabled && |
1114 | (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC)) |
1115 | filter_frame = default_filter_frame; |
1116 | ret = filter_frame(link, frame); |
1117 | link->frame_count_out++; |
1118 | return ret; |
1119 | |
1120 | fail: |
1121 | av_frame_free(&frame); |
1122 | return ret; |
1123 | } |
1124 | |
1125 | int ff_filter_frame(AVFilterLink *link, AVFrame *frame) |
1126 | { |
1127 | int ret; |
1128 | FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1); |
1129 | |
1130 | /* Consistency checks */ |
1131 | if (link->type == AVMEDIA_TYPE_VIDEO) { |
1132 | if (strcmp(link->dst->filter->name, "buffersink") && |
1133 | strcmp(link->dst->filter->name, "format") && |
1134 | strcmp(link->dst->filter->name, "idet") && |
1135 | strcmp(link->dst->filter->name, "null") && |
1136 | strcmp(link->dst->filter->name, "scale")) { |
1137 | av_assert1(frame->format == link->format); |
1138 | av_assert1(frame->width == link->w); |
1139 | av_assert1(frame->height == link->h); |
1140 | } |
1141 | } else { |
1142 | if (frame->format != link->format) { |
1143 | av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n"); |
1144 | goto error; |
1145 | } |
1146 | if (av_frame_get_channels(frame) != link->channels) { |
1147 | av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n"); |
1148 | goto error; |
1149 | } |
1150 | if (frame->channel_layout != link->channel_layout) { |
1151 | av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n"); |
1152 | goto error; |
1153 | } |
1154 | if (frame->sample_rate != link->sample_rate) { |
1155 | av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n"); |
1156 | goto error; |
1157 | } |
1158 | } |
1159 | |
1160 | link->frame_blocked_in = link->frame_wanted_out = 0; |
1161 | link->frame_count_in++; |
1162 | filter_unblock(link->dst); |
1163 | ret = ff_framequeue_add(&link->fifo, frame); |
1164 | if (ret < 0) { |
1165 | av_frame_free(&frame); |
1166 | return ret; |
1167 | } |
1168 | ff_filter_set_ready(link->dst, 300); |
1169 | return 0; |
1170 | |
1171 | error: |
1172 | av_frame_free(&frame); |
1173 | return AVERROR_PATCHWELCOME; |
1174 | } |
1175 | |
1176 | static int samples_ready(AVFilterLink *link, unsigned min) |
1177 | { |
1178 | return ff_framequeue_queued_frames(&link->fifo) && |
1179 | (ff_framequeue_queued_samples(&link->fifo) >= min || |
1180 | link->status_in); |
1181 | } |
1182 | |
1183 | static int take_samples(AVFilterLink *link, unsigned min, unsigned max, |
1184 | AVFrame **rframe) |
1185 | { |
1186 | AVFrame *frame0, *frame, *buf; |
1187 | unsigned nb_samples, nb_frames, i, p; |
1188 | int ret; |
1189 | |
1190 | /* Note: this function relies on no format changes and must only be |
1191 | called with enough samples. */ |
1192 | av_assert1(samples_ready(link, link->min_samples)); |
1193 | frame0 = frame = ff_framequeue_peek(&link->fifo, 0); |
1194 | if (frame->nb_samples >= min && frame->nb_samples < max) { |
1195 | *rframe = ff_framequeue_take(&link->fifo); |
1196 | return 0; |
1197 | } |
1198 | nb_frames = 0; |
1199 | nb_samples = 0; |
1200 | while (1) { |
1201 | if (nb_samples + frame->nb_samples > max) { |
1202 | if (nb_samples < min) |
1203 | nb_samples = max; |
1204 | break; |
1205 | } |
1206 | nb_samples += frame->nb_samples; |
1207 | nb_frames++; |
1208 | if (nb_frames == ff_framequeue_queued_frames(&link->fifo)) |
1209 | break; |
1210 | frame = ff_framequeue_peek(&link->fifo, nb_frames); |
1211 | } |
1212 | |
1213 | buf = ff_get_audio_buffer(link, nb_samples); |
1214 | if (!buf) |
1215 | return AVERROR(ENOMEM); |
1216 | ret = av_frame_copy_props(buf, frame0); |
1217 | if (ret < 0) { |
1218 | av_frame_free(&buf); |
1219 | return ret; |
1220 | } |
1221 | buf->pts = frame0->pts; |
1222 | |
1223 | p = 0; |
1224 | for (i = 0; i < nb_frames; i++) { |
1225 | frame = ff_framequeue_take(&link->fifo); |
1226 | av_samples_copy(buf->extended_data, frame->extended_data, p, 0, |
1227 | frame->nb_samples, link->channels, link->format); |
1228 | p += frame->nb_samples; |
1229 | av_frame_free(&frame); |
1230 | } |
1231 | if (p < nb_samples) { |
1232 | unsigned n = nb_samples - p; |
1233 | frame = ff_framequeue_peek(&link->fifo, 0); |
1234 | av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n, |
1235 | link->channels, link->format); |
1236 | ff_framequeue_skip_samples(&link->fifo, n, link->time_base); |
1237 | } |
1238 | |
1239 | *rframe = buf; |
1240 | return 0; |
1241 | } |
1242 | |
1243 | static int ff_filter_frame_to_filter(AVFilterLink *link) |
1244 | { |
1245 | AVFrame *frame = NULL; |
1246 | AVFilterContext *dst = link->dst; |
1247 | int ret; |
1248 | |
1249 | av_assert1(ff_framequeue_queued_frames(&link->fifo)); |
1250 | ret = link->min_samples ? |
1251 | ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) : |
1252 | ff_inlink_consume_frame(link, &frame); |
1253 | av_assert1(ret); |
1254 | if (ret < 0) { |
1255 | av_assert1(!frame); |
1256 | return ret; |
1257 | } |
1258 | /* The filter will soon have received a new frame, that may allow it to |
1259 | produce one or more: unblock its outputs. */ |
1260 | filter_unblock(dst); |
1261 | /* AVFilterPad.filter_frame() expect frame_count_out to have the value |
1262 | before the frame; ff_filter_frame_framed() will re-increment it. */ |
1263 | link->frame_count_out--; |
1264 | ret = ff_filter_frame_framed(link, frame); |
1265 | if (ret < 0 && ret != link->status_out) { |
1266 | ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE); |
1267 | } else { |
1268 | /* Run once again, to see if several frames were available, or if |
1269 | the input status has also changed, or any other reason. */ |
1270 | ff_filter_set_ready(dst, 300); |
1271 | } |
1272 | return ret; |
1273 | } |
1274 | |
1275 | static int forward_status_change(AVFilterContext *filter, AVFilterLink *in) |
1276 | { |
1277 | unsigned out = 0, progress = 0; |
1278 | int ret; |
1279 | |
1280 | av_assert0(!in->status_out); |
1281 | if (!filter->nb_outputs) { |
1282 | /* not necessary with the current API and sinks */ |
1283 | return 0; |
1284 | } |
1285 | while (!in->status_out) { |
1286 | if (!filter->outputs[out]->status_in) { |
1287 | progress++; |
1288 | ret = ff_request_frame_to_filter(filter->outputs[out]); |
1289 | if (ret < 0) |
1290 | return ret; |
1291 | } |
1292 | if (++out == filter->nb_outputs) { |
1293 | if (!progress) { |
1294 | /* Every output already closed: input no longer interesting |
1295 | (example: overlay in shortest mode, other input closed). */ |
1296 | ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts); |
1297 | return 0; |
1298 | } |
1299 | progress = 0; |
1300 | out = 0; |
1301 | } |
1302 | } |
1303 | ff_filter_set_ready(filter, 200); |
1304 | return 0; |
1305 | } |
1306 | |
1307 | #define FFERROR_NOT_READY FFERRTAG('N','R','D','Y') |
1308 | |
1309 | static int ff_filter_activate_default(AVFilterContext *filter) |
1310 | { |
1311 | unsigned i; |
1312 | |
1313 | for (i = 0; i < filter->nb_inputs; i++) { |
1314 | if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) { |
1315 | return ff_filter_frame_to_filter(filter->inputs[i]); |
1316 | } |
1317 | } |
1318 | for (i = 0; i < filter->nb_inputs; i++) { |
1319 | if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) { |
1320 | av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo)); |
1321 | return forward_status_change(filter, filter->inputs[i]); |
1322 | } |
1323 | } |
1324 | for (i = 0; i < filter->nb_outputs; i++) { |
1325 | if (filter->outputs[i]->frame_wanted_out && |
1326 | !filter->outputs[i]->frame_blocked_in) { |
1327 | return ff_request_frame_to_filter(filter->outputs[i]); |
1328 | } |
1329 | } |
1330 | return FFERROR_NOT_READY; |
1331 | } |
1332 | |
1333 | /* |
1334 | Filter scheduling and activation |
1335 | |
1336 | When a filter is activated, it must: |
1337 | - if possible, output a frame; |
1338 | - else, if relevant, forward the input status change; |
1339 | - else, check outputs for wanted frames and forward the requests. |
1340 | |
1341 | The following AVFilterLink fields are used for activation: |
1342 | |
1343 | - frame_wanted_out: |
1344 | |
1345 | This field indicates if a frame is needed on this input of the |
1346 | destination filter. A positive value indicates that a frame is needed |
1347 | to process queued frames or internal data or to satisfy the |
1348 | application; a zero value indicates that a frame is not especially |
1349 | needed but could be processed anyway; a negative value indicates that a |
1350 | frame would just be queued. |
1351 | |
1352 | It is set by filters using ff_request_frame() or ff_request_no_frame(), |
1353 | when requested by the application through a specific API or when it is |
1354 | set on one of the outputs. |
1355 | |
1356 | It is cleared when a frame is sent from the source using |
1357 | ff_filter_frame(). |
1358 | |
1359 | It is also cleared when a status change is sent from the source using |
1360 | ff_avfilter_link_set_in_status(). |
1361 | |
1362 | - frame_blocked_in: |
1363 | |
1364 | This field means that the source filter can not generate a frame as is. |
1365 | Its goal is to avoid repeatedly calling the request_frame() method on |
1366 | the same link. |
1367 | |
1368 | It is set by the framework on all outputs of a filter before activating it. |
1369 | |
1370 | It is automatically cleared by ff_filter_frame(). |
1371 | |
1372 | It is also automatically cleared by ff_avfilter_link_set_in_status(). |
1373 | |
1374 | It is also cleared on all outputs (using filter_unblock()) when |
1375 | something happens on an input: processing a frame or changing the |
1376 | status. |
1377 | |
1378 | - fifo: |
1379 | |
1380 | Contains the frames queued on a filter input. If it contains frames and |
1381 | frame_wanted_out is not set, then the filter can be activated. If that |
1382 | result in the filter not able to use these frames, the filter must set |
1383 | frame_wanted_out to ask for more frames. |
1384 | |
1385 | - status_in and status_in_pts: |
1386 | |
1387 | Status (EOF or error code) of the link and timestamp of the status |
1388 | change (in link time base, same as frames) as seen from the input of |
1389 | the link. The status change is considered happening after the frames |
1390 | queued in fifo. |
1391 | |
1392 | It is set by the source filter using ff_avfilter_link_set_in_status(). |
1393 | |
1394 | - status_out: |
1395 | |
1396 | Status of the link as seen from the output of the link. The status |
1397 | change is considered having already happened. |
1398 | |
1399 | It is set by the destination filter using |
1400 | ff_avfilter_link_set_out_status(). |
1401 | |
1402 | Filters are activated according to the ready field, set using the |
1403 | ff_filter_set_ready(). Eventually, a priority queue will be used. |
1404 | ff_filter_set_ready() is called whenever anything could cause progress to |
1405 | be possible. Marking a filter ready when it is not is not a problem, |
1406 | except for the small overhead it causes. |
1407 | |
1408 | Conditions that cause a filter to be marked ready are: |
1409 | |
1410 | - frames added on an input link; |
1411 | |
1412 | - changes in the input or output status of an input link; |
1413 | |
1414 | - requests for a frame on an output link; |
1415 | |
1416 | - after any actual processing using the legacy methods (filter_frame(), |
1417 | and request_frame() to acknowledge status changes), to run once more |
1418 | and check if enough input was present for several frames. |
1419 | |
1420 | Exemples of scenarios to consider: |
1421 | |
1422 | - buffersrc: activate if frame_wanted_out to notify the application; |
1423 | activate when the application adds a frame to push it immediately. |
1424 | |
1425 | - testsrc: activate only if frame_wanted_out to produce and push a frame. |
1426 | |
1427 | - concat (not at stitch points): can process a frame on any output. |
1428 | Activate if frame_wanted_out on output to forward on the corresponding |
1429 | input. Activate when a frame is present on input to process it |
1430 | immediately. |
1431 | |
1432 | - framesync: needs at least one frame on each input; extra frames on the |
1433 | wrong input will accumulate. When a frame is first added on one input, |
1434 | set frame_wanted_out<0 on it to avoid getting more (would trigger |
1435 | testsrc) and frame_wanted_out>0 on the other to allow processing it. |
1436 | |
1437 | Activation of old filters: |
1438 | |
1439 | In order to activate a filter implementing the legacy filter_frame() and |
1440 | request_frame() methods, perform the first possible of the following |
1441 | actions: |
1442 | |
1443 | - If an input has frames in fifo and frame_wanted_out == 0, dequeue a |
1444 | frame and call filter_frame(). |
1445 | |
1446 | Ratinale: filter frames as soon as possible instead of leaving them |
1447 | queued; frame_wanted_out < 0 is not possible since the old API does not |
1448 | set it nor provides any similar feedback; frame_wanted_out > 0 happens |
1449 | when min_samples > 0 and there are not enough samples queued. |
1450 | |
1451 | - If an input has status_in set but not status_out, try to call |
1452 | request_frame() on one of the outputs in the hope that it will trigger |
1453 | request_frame() on the input with status_in and acknowledge it. This is |
1454 | awkward and fragile, filters with several inputs or outputs should be |
1455 | updated to direct activation as soon as possible. |
1456 | |
1457 | - If an output has frame_wanted_out > 0 and not frame_blocked_in, call |
1458 | request_frame(). |
1459 | |
1460 | Rationale: checking frame_blocked_in is necessary to avoid requesting |
1461 | repeatedly on a blocked input if another is not blocked (example: |
1462 | [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2). |
1463 | |
1464 | TODO: respect needs_fifo and remove auto-inserted fifos. |
1465 | |
1466 | */ |
1467 | |
1468 | int ff_filter_activate(AVFilterContext *filter) |
1469 | { |
1470 | int ret; |
1471 | |
1472 | /* Generic timeline support is not yet implemented but should be easy */ |
1473 | av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC && |
1474 | filter->filter->activate)); |
1475 | filter->ready = 0; |
1476 | ret = filter->filter->activate ? filter->filter->activate(filter) : |
1477 | ff_filter_activate_default(filter); |
1478 | if (ret == FFERROR_NOT_READY) |
1479 | ret = 0; |
1480 | return ret; |
1481 | } |
1482 | |
1483 | int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts) |
1484 | { |
1485 | *rpts = link->current_pts; |
1486 | if (ff_framequeue_queued_frames(&link->fifo)) |
1487 | return *rstatus = 0; |
1488 | if (link->status_out) |
1489 | return *rstatus = link->status_out; |
1490 | if (!link->status_in) |
1491 | return *rstatus = 0; |
1492 | *rstatus = link->status_out = link->status_in; |
1493 | ff_update_link_current_pts(link, link->status_in_pts); |
1494 | *rpts = link->current_pts; |
1495 | return 1; |
1496 | } |
1497 | |
1498 | int ff_inlink_check_available_frame(AVFilterLink *link) |
1499 | { |
1500 | return ff_framequeue_queued_frames(&link->fifo) > 0; |
1501 | } |
1502 | |
1503 | int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min) |
1504 | { |
1505 | uint64_t samples = ff_framequeue_queued_samples(&link->fifo); |
1506 | av_assert1(min); |
1507 | return samples >= min || (link->status_in && samples); |
1508 | } |
1509 | |
1510 | static void consume_update(AVFilterLink *link, const AVFrame *frame) |
1511 | { |
1512 | ff_update_link_current_pts(link, frame->pts); |
1513 | ff_inlink_process_commands(link, frame); |
1514 | link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame); |
1515 | link->frame_count_out++; |
1516 | } |
1517 | |
1518 | int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe) |
1519 | { |
1520 | AVFrame *frame; |
1521 | |
1522 | *rframe = NULL; |
1523 | if (!ff_inlink_check_available_frame(link)) |
1524 | return 0; |
1525 | frame = ff_framequeue_take(&link->fifo); |
1526 | consume_update(link, frame); |
1527 | *rframe = frame; |
1528 | return 1; |
1529 | } |
1530 | |
1531 | int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, |
1532 | AVFrame **rframe) |
1533 | { |
1534 | AVFrame *frame; |
1535 | int ret; |
1536 | |
1537 | av_assert1(min); |
1538 | *rframe = NULL; |
1539 | if (!ff_inlink_check_available_samples(link, min)) |
1540 | return 0; |
1541 | if (link->status_in) |
1542 | min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo)); |
1543 | ret = take_samples(link, min, link->max_samples, &frame); |
1544 | if (ret < 0) |
1545 | return ret; |
1546 | consume_update(link, frame); |
1547 | *rframe = frame; |
1548 | return 1; |
1549 | } |
1550 | |
1551 | int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe) |
1552 | { |
1553 | AVFrame *frame = *rframe; |
1554 | AVFrame *out; |
1555 | int ret; |
1556 | |
1557 | if (av_frame_is_writable(frame)) |
1558 | return 0; |
1559 | av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n"); |
1560 | |
1561 | switch (link->type) { |
1562 | case AVMEDIA_TYPE_VIDEO: |
1563 | out = ff_get_video_buffer(link, link->w, link->h); |
1564 | break; |
1565 | case AVMEDIA_TYPE_AUDIO: |
1566 | out = ff_get_audio_buffer(link, frame->nb_samples); |
1567 | break; |
1568 | default: |
1569 | return AVERROR(EINVAL); |
1570 | } |
1571 | if (!out) |
1572 | return AVERROR(ENOMEM); |
1573 | |
1574 | ret = av_frame_copy_props(out, frame); |
1575 | if (ret < 0) { |
1576 | av_frame_free(&out); |
1577 | return ret; |
1578 | } |
1579 | |
1580 | switch (link->type) { |
1581 | case AVMEDIA_TYPE_VIDEO: |
1582 | av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize, |
1583 | frame->format, frame->width, frame->height); |
1584 | break; |
1585 | case AVMEDIA_TYPE_AUDIO: |
1586 | av_samples_copy(out->extended_data, frame->extended_data, |
1587 | 0, 0, frame->nb_samples, |
1588 | av_frame_get_channels(frame), |
1589 | frame->format); |
1590 | break; |
1591 | default: |
1592 | av_assert0(!"reached"); |
1593 | } |
1594 | |
1595 | av_frame_free(&frame); |
1596 | *rframe = out; |
1597 | return 0; |
1598 | } |
1599 | |
1600 | int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame) |
1601 | { |
1602 | AVFilterCommand *cmd = link->dst->command_queue; |
1603 | |
1604 | while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){ |
1605 | av_log(link->dst, AV_LOG_DEBUG, |
1606 | "Processing command time:%f command:%s arg:%s\n", |
1607 | cmd->time, cmd->command, cmd->arg); |
1608 | avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags); |
1609 | ff_command_queue_pop(link->dst); |
1610 | cmd= link->dst->command_queue; |
1611 | } |
1612 | return 0; |
1613 | } |
1614 | |
1615 | int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame) |
1616 | { |
1617 | AVFilterContext *dstctx = link->dst; |
1618 | int64_t pts = frame->pts; |
1619 | int64_t pos = av_frame_get_pkt_pos(frame); |
1620 | |
1621 | if (!dstctx->enable_str) |
1622 | return 1; |
1623 | |
1624 | dstctx->var_values[VAR_N] = link->frame_count_out; |
1625 | dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base); |
1626 | dstctx->var_values[VAR_W] = link->w; |
1627 | dstctx->var_values[VAR_H] = link->h; |
1628 | dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos; |
1629 | |
1630 | return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5; |
1631 | } |
1632 | |
1633 | void ff_inlink_request_frame(AVFilterLink *link) |
1634 | { |
1635 | av_assert1(!link->status_in); |
1636 | av_assert1(!link->status_out); |
1637 | link->frame_wanted_out = 1; |
1638 | ff_filter_set_ready(link->src, 100); |
1639 | } |
1640 | |
1641 | const AVClass *avfilter_get_class(void) |
1642 | { |
1643 | return &avfilter_class; |
1644 | } |
1645 |