blob: 694ac44999ff17a25d01a34572ff40260b75852e
1 | /* |
2 | * Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at> |
3 | * 2010 James Darnley <james.darnley@gmail.com> |
4 | |
5 | * This file is part of FFmpeg. |
6 | * |
7 | * FFmpeg is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * FFmpeg is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with FFmpeg; if not, write to the Free Software |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | */ |
21 | |
22 | #include "libavutil/avassert.h" |
23 | #include "libavutil/cpu.h" |
24 | #include "libavutil/common.h" |
25 | #include "libavutil/opt.h" |
26 | #include "libavutil/pixdesc.h" |
27 | #include "libavutil/imgutils.h" |
28 | #include "avfilter.h" |
29 | #include "formats.h" |
30 | #include "internal.h" |
31 | #include "video.h" |
32 | #include "yadif.h" |
33 | |
34 | typedef struct ThreadData { |
35 | AVFrame *frame; |
36 | int plane; |
37 | int w, h; |
38 | int parity; |
39 | int tff; |
40 | } ThreadData; |
41 | |
42 | #define CHECK(j)\ |
43 | { int score = FFABS(cur[mrefs - 1 + (j)] - cur[prefs - 1 - (j)])\ |
44 | + FFABS(cur[mrefs +(j)] - cur[prefs -(j)])\ |
45 | + FFABS(cur[mrefs + 1 + (j)] - cur[prefs + 1 - (j)]);\ |
46 | if (score < spatial_score) {\ |
47 | spatial_score= score;\ |
48 | spatial_pred= (cur[mrefs +(j)] + cur[prefs -(j)])>>1;\ |
49 | |
50 | /* The is_not_edge argument here controls when the code will enter a branch |
51 | * which reads up to and including x-3 and x+3. */ |
52 | |
53 | #define FILTER(start, end, is_not_edge) \ |
54 | for (x = start; x < end; x++) { \ |
55 | int c = cur[mrefs]; \ |
56 | int d = (prev2[0] + next2[0])>>1; \ |
57 | int e = cur[prefs]; \ |
58 | int temporal_diff0 = FFABS(prev2[0] - next2[0]); \ |
59 | int temporal_diff1 =(FFABS(prev[mrefs] - c) + FFABS(prev[prefs] - e) )>>1; \ |
60 | int temporal_diff2 =(FFABS(next[mrefs] - c) + FFABS(next[prefs] - e) )>>1; \ |
61 | int diff = FFMAX3(temporal_diff0 >> 1, temporal_diff1, temporal_diff2); \ |
62 | int spatial_pred = (c+e) >> 1; \ |
63 | \ |
64 | if (is_not_edge) {\ |
65 | int spatial_score = FFABS(cur[mrefs - 1] - cur[prefs - 1]) + FFABS(c-e) \ |
66 | + FFABS(cur[mrefs + 1] - cur[prefs + 1]) - 1; \ |
67 | CHECK(-1) CHECK(-2) }} }} \ |
68 | CHECK( 1) CHECK( 2) }} }} \ |
69 | }\ |
70 | \ |
71 | if (!(mode&2)) { \ |
72 | int b = (prev2[2 * mrefs] + next2[2 * mrefs])>>1; \ |
73 | int f = (prev2[2 * prefs] + next2[2 * prefs])>>1; \ |
74 | int max = FFMAX3(d - e, d - c, FFMIN(b - c, f - e)); \ |
75 | int min = FFMIN3(d - e, d - c, FFMAX(b - c, f - e)); \ |
76 | \ |
77 | diff = FFMAX3(diff, min, -max); \ |
78 | } \ |
79 | \ |
80 | if (spatial_pred > d + diff) \ |
81 | spatial_pred = d + diff; \ |
82 | else if (spatial_pred < d - diff) \ |
83 | spatial_pred = d - diff; \ |
84 | \ |
85 | dst[0] = spatial_pred; \ |
86 | \ |
87 | dst++; \ |
88 | cur++; \ |
89 | prev++; \ |
90 | next++; \ |
91 | prev2++; \ |
92 | next2++; \ |
93 | } |
94 | |
95 | static void filter_line_c(void *dst1, |
96 | void *prev1, void *cur1, void *next1, |
97 | int w, int prefs, int mrefs, int parity, int mode) |
98 | { |
99 | uint8_t *dst = dst1; |
100 | uint8_t *prev = prev1; |
101 | uint8_t *cur = cur1; |
102 | uint8_t *next = next1; |
103 | int x; |
104 | uint8_t *prev2 = parity ? prev : cur ; |
105 | uint8_t *next2 = parity ? cur : next; |
106 | |
107 | /* The function is called with the pointers already pointing to data[3] and |
108 | * with 6 subtracted from the width. This allows the FILTER macro to be |
109 | * called so that it processes all the pixels normally. A constant value of |
110 | * true for is_not_edge lets the compiler ignore the if statement. */ |
111 | FILTER(0, w, 1) |
112 | } |
113 | |
114 | #define MAX_ALIGN 8 |
115 | static void filter_edges(void *dst1, void *prev1, void *cur1, void *next1, |
116 | int w, int prefs, int mrefs, int parity, int mode) |
117 | { |
118 | uint8_t *dst = dst1; |
119 | uint8_t *prev = prev1; |
120 | uint8_t *cur = cur1; |
121 | uint8_t *next = next1; |
122 | int x; |
123 | uint8_t *prev2 = parity ? prev : cur ; |
124 | uint8_t *next2 = parity ? cur : next; |
125 | |
126 | /* Only edge pixels need to be processed here. A constant value of false |
127 | * for is_not_edge should let the compiler ignore the whole branch. */ |
128 | FILTER(0, 3, 0) |
129 | |
130 | dst = (uint8_t*)dst1 + w - (MAX_ALIGN-1); |
131 | prev = (uint8_t*)prev1 + w - (MAX_ALIGN-1); |
132 | cur = (uint8_t*)cur1 + w - (MAX_ALIGN-1); |
133 | next = (uint8_t*)next1 + w - (MAX_ALIGN-1); |
134 | prev2 = (uint8_t*)(parity ? prev : cur); |
135 | next2 = (uint8_t*)(parity ? cur : next); |
136 | |
137 | FILTER(w - (MAX_ALIGN-1), w - 3, 1) |
138 | FILTER(w - 3, w, 0) |
139 | } |
140 | |
141 | |
142 | static void filter_line_c_16bit(void *dst1, |
143 | void *prev1, void *cur1, void *next1, |
144 | int w, int prefs, int mrefs, int parity, |
145 | int mode) |
146 | { |
147 | uint16_t *dst = dst1; |
148 | uint16_t *prev = prev1; |
149 | uint16_t *cur = cur1; |
150 | uint16_t *next = next1; |
151 | int x; |
152 | uint16_t *prev2 = parity ? prev : cur ; |
153 | uint16_t *next2 = parity ? cur : next; |
154 | mrefs /= 2; |
155 | prefs /= 2; |
156 | |
157 | FILTER(0, w, 1) |
158 | } |
159 | |
160 | static void filter_edges_16bit(void *dst1, void *prev1, void *cur1, void *next1, |
161 | int w, int prefs, int mrefs, int parity, int mode) |
162 | { |
163 | uint16_t *dst = dst1; |
164 | uint16_t *prev = prev1; |
165 | uint16_t *cur = cur1; |
166 | uint16_t *next = next1; |
167 | int x; |
168 | uint16_t *prev2 = parity ? prev : cur ; |
169 | uint16_t *next2 = parity ? cur : next; |
170 | mrefs /= 2; |
171 | prefs /= 2; |
172 | |
173 | FILTER(0, 3, 0) |
174 | |
175 | dst = (uint16_t*)dst1 + w - (MAX_ALIGN/2-1); |
176 | prev = (uint16_t*)prev1 + w - (MAX_ALIGN/2-1); |
177 | cur = (uint16_t*)cur1 + w - (MAX_ALIGN/2-1); |
178 | next = (uint16_t*)next1 + w - (MAX_ALIGN/2-1); |
179 | prev2 = (uint16_t*)(parity ? prev : cur); |
180 | next2 = (uint16_t*)(parity ? cur : next); |
181 | |
182 | FILTER(w - (MAX_ALIGN/2-1), w - 3, 1) |
183 | FILTER(w - 3, w, 0) |
184 | } |
185 | |
186 | static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) |
187 | { |
188 | YADIFContext *s = ctx->priv; |
189 | ThreadData *td = arg; |
190 | int refs = s->cur->linesize[td->plane]; |
191 | int df = (s->csp->comp[td->plane].depth + 7) / 8; |
192 | int pix_3 = 3 * df; |
193 | int slice_start = (td->h * jobnr ) / nb_jobs; |
194 | int slice_end = (td->h * (jobnr+1)) / nb_jobs; |
195 | int y; |
196 | |
197 | /* filtering reads 3 pixels to the left/right; to avoid invalid reads, |
198 | * we need to call the c variant which avoids this for border pixels |
199 | */ |
200 | for (y = slice_start; y < slice_end; y++) { |
201 | if ((y ^ td->parity) & 1) { |
202 | uint8_t *prev = &s->prev->data[td->plane][y * refs]; |
203 | uint8_t *cur = &s->cur ->data[td->plane][y * refs]; |
204 | uint8_t *next = &s->next->data[td->plane][y * refs]; |
205 | uint8_t *dst = &td->frame->data[td->plane][y * td->frame->linesize[td->plane]]; |
206 | int mode = y == 1 || y + 2 == td->h ? 2 : s->mode; |
207 | s->filter_line(dst + pix_3, prev + pix_3, cur + pix_3, |
208 | next + pix_3, td->w - (3 + MAX_ALIGN/df-1), |
209 | y + 1 < td->h ? refs : -refs, |
210 | y ? -refs : refs, |
211 | td->parity ^ td->tff, mode); |
212 | s->filter_edges(dst, prev, cur, next, td->w, |
213 | y + 1 < td->h ? refs : -refs, |
214 | y ? -refs : refs, |
215 | td->parity ^ td->tff, mode); |
216 | } else { |
217 | memcpy(&td->frame->data[td->plane][y * td->frame->linesize[td->plane]], |
218 | &s->cur->data[td->plane][y * refs], td->w * df); |
219 | } |
220 | } |
221 | return 0; |
222 | } |
223 | |
224 | static void filter(AVFilterContext *ctx, AVFrame *dstpic, |
225 | int parity, int tff) |
226 | { |
227 | YADIFContext *yadif = ctx->priv; |
228 | ThreadData td = { .frame = dstpic, .parity = parity, .tff = tff }; |
229 | int i; |
230 | |
231 | for (i = 0; i < yadif->csp->nb_components; i++) { |
232 | int w = dstpic->width; |
233 | int h = dstpic->height; |
234 | |
235 | if (i == 1 || i == 2) { |
236 | w = AV_CEIL_RSHIFT(w, yadif->csp->log2_chroma_w); |
237 | h = AV_CEIL_RSHIFT(h, yadif->csp->log2_chroma_h); |
238 | } |
239 | |
240 | |
241 | td.w = w; |
242 | td.h = h; |
243 | td.plane = i; |
244 | |
245 | ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(h, ff_filter_get_nb_threads(ctx))); |
246 | } |
247 | |
248 | emms_c(); |
249 | } |
250 | |
251 | static int return_frame(AVFilterContext *ctx, int is_second) |
252 | { |
253 | YADIFContext *yadif = ctx->priv; |
254 | AVFilterLink *link = ctx->outputs[0]; |
255 | int tff, ret; |
256 | |
257 | if (yadif->parity == -1) { |
258 | tff = yadif->cur->interlaced_frame ? |
259 | yadif->cur->top_field_first : 1; |
260 | } else { |
261 | tff = yadif->parity ^ 1; |
262 | } |
263 | |
264 | if (is_second) { |
265 | yadif->out = ff_get_video_buffer(link, link->w, link->h); |
266 | if (!yadif->out) |
267 | return AVERROR(ENOMEM); |
268 | |
269 | av_frame_copy_props(yadif->out, yadif->cur); |
270 | yadif->out->interlaced_frame = 0; |
271 | } |
272 | |
273 | filter(ctx, yadif->out, tff ^ !is_second, tff); |
274 | |
275 | if (is_second) { |
276 | int64_t cur_pts = yadif->cur->pts; |
277 | int64_t next_pts = yadif->next->pts; |
278 | |
279 | if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { |
280 | yadif->out->pts = cur_pts + next_pts; |
281 | } else { |
282 | yadif->out->pts = AV_NOPTS_VALUE; |
283 | } |
284 | } |
285 | ret = ff_filter_frame(ctx->outputs[0], yadif->out); |
286 | |
287 | yadif->frame_pending = (yadif->mode&1) && !is_second; |
288 | return ret; |
289 | } |
290 | |
291 | static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b) |
292 | { |
293 | int i; |
294 | for (i = 0; i < yadif->csp->nb_components; i++) |
295 | if (a->linesize[i] != b->linesize[i]) |
296 | return 1; |
297 | return 0; |
298 | } |
299 | |
300 | static void fixstride(AVFilterLink *link, AVFrame *f) |
301 | { |
302 | AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height); |
303 | if(!dst) |
304 | return; |
305 | av_frame_copy_props(dst, f); |
306 | av_image_copy(dst->data, dst->linesize, |
307 | (const uint8_t **)f->data, f->linesize, |
308 | dst->format, dst->width, dst->height); |
309 | av_frame_unref(f); |
310 | av_frame_move_ref(f, dst); |
311 | av_frame_free(&dst); |
312 | } |
313 | |
314 | static int filter_frame(AVFilterLink *link, AVFrame *frame) |
315 | { |
316 | AVFilterContext *ctx = link->dst; |
317 | YADIFContext *yadif = ctx->priv; |
318 | |
319 | av_assert0(frame); |
320 | |
321 | if (yadif->frame_pending) |
322 | return_frame(ctx, 1); |
323 | |
324 | if (yadif->prev) |
325 | av_frame_free(&yadif->prev); |
326 | yadif->prev = yadif->cur; |
327 | yadif->cur = yadif->next; |
328 | yadif->next = frame; |
329 | |
330 | if (!yadif->cur && |
331 | !(yadif->cur = av_frame_clone(yadif->next))) |
332 | return AVERROR(ENOMEM); |
333 | |
334 | if (checkstride(yadif, yadif->next, yadif->cur)) { |
335 | av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n"); |
336 | fixstride(link, yadif->next); |
337 | } |
338 | if (checkstride(yadif, yadif->next, yadif->cur)) |
339 | fixstride(link, yadif->cur); |
340 | if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev)) |
341 | fixstride(link, yadif->prev); |
342 | if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) { |
343 | av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n"); |
344 | return -1; |
345 | } |
346 | |
347 | if (!yadif->prev) |
348 | return 0; |
349 | |
350 | if ((yadif->deint && !yadif->cur->interlaced_frame) || |
351 | ctx->is_disabled || |
352 | (yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) || |
353 | (yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict) |
354 | ) { |
355 | yadif->out = av_frame_clone(yadif->cur); |
356 | if (!yadif->out) |
357 | return AVERROR(ENOMEM); |
358 | |
359 | av_frame_free(&yadif->prev); |
360 | if (yadif->out->pts != AV_NOPTS_VALUE) |
361 | yadif->out->pts *= 2; |
362 | return ff_filter_frame(ctx->outputs[0], yadif->out); |
363 | } |
364 | |
365 | yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); |
366 | if (!yadif->out) |
367 | return AVERROR(ENOMEM); |
368 | |
369 | av_frame_copy_props(yadif->out, yadif->cur); |
370 | yadif->out->interlaced_frame = 0; |
371 | |
372 | if (yadif->out->pts != AV_NOPTS_VALUE) |
373 | yadif->out->pts *= 2; |
374 | |
375 | return return_frame(ctx, 0); |
376 | } |
377 | |
378 | static int request_frame(AVFilterLink *link) |
379 | { |
380 | AVFilterContext *ctx = link->src; |
381 | YADIFContext *yadif = ctx->priv; |
382 | int ret; |
383 | |
384 | if (yadif->frame_pending) { |
385 | return_frame(ctx, 1); |
386 | return 0; |
387 | } |
388 | |
389 | if (yadif->eof) |
390 | return AVERROR_EOF; |
391 | |
392 | ret = ff_request_frame(ctx->inputs[0]); |
393 | |
394 | if (ret == AVERROR_EOF && yadif->cur) { |
395 | AVFrame *next = av_frame_clone(yadif->next); |
396 | |
397 | if (!next) |
398 | return AVERROR(ENOMEM); |
399 | |
400 | next->pts = yadif->next->pts * 2 - yadif->cur->pts; |
401 | |
402 | filter_frame(ctx->inputs[0], next); |
403 | yadif->eof = 1; |
404 | } else if (ret < 0) { |
405 | return ret; |
406 | } |
407 | |
408 | return 0; |
409 | } |
410 | |
411 | static av_cold void uninit(AVFilterContext *ctx) |
412 | { |
413 | YADIFContext *yadif = ctx->priv; |
414 | |
415 | av_frame_free(&yadif->prev); |
416 | av_frame_free(&yadif->cur ); |
417 | av_frame_free(&yadif->next); |
418 | } |
419 | |
420 | static int query_formats(AVFilterContext *ctx) |
421 | { |
422 | static const enum AVPixelFormat pix_fmts[] = { |
423 | AV_PIX_FMT_YUV420P, |
424 | AV_PIX_FMT_YUV422P, |
425 | AV_PIX_FMT_YUV444P, |
426 | AV_PIX_FMT_YUV410P, |
427 | AV_PIX_FMT_YUV411P, |
428 | AV_PIX_FMT_GRAY8, |
429 | AV_PIX_FMT_YUVJ420P, |
430 | AV_PIX_FMT_YUVJ422P, |
431 | AV_PIX_FMT_YUVJ444P, |
432 | AV_PIX_FMT_GRAY16, |
433 | AV_PIX_FMT_YUV440P, |
434 | AV_PIX_FMT_YUVJ440P, |
435 | AV_PIX_FMT_YUV420P9, |
436 | AV_PIX_FMT_YUV422P9, |
437 | AV_PIX_FMT_YUV444P9, |
438 | AV_PIX_FMT_YUV420P10, |
439 | AV_PIX_FMT_YUV422P10, |
440 | AV_PIX_FMT_YUV444P10, |
441 | AV_PIX_FMT_YUV420P12, |
442 | AV_PIX_FMT_YUV422P12, |
443 | AV_PIX_FMT_YUV444P12, |
444 | AV_PIX_FMT_YUV420P14, |
445 | AV_PIX_FMT_YUV422P14, |
446 | AV_PIX_FMT_YUV444P14, |
447 | AV_PIX_FMT_YUV420P16, |
448 | AV_PIX_FMT_YUV422P16, |
449 | AV_PIX_FMT_YUV444P16, |
450 | AV_PIX_FMT_YUVA420P, |
451 | AV_PIX_FMT_YUVA422P, |
452 | AV_PIX_FMT_YUVA444P, |
453 | AV_PIX_FMT_GBRP, |
454 | AV_PIX_FMT_GBRP9, |
455 | AV_PIX_FMT_GBRP10, |
456 | AV_PIX_FMT_GBRP12, |
457 | AV_PIX_FMT_GBRP14, |
458 | AV_PIX_FMT_GBRP16, |
459 | AV_PIX_FMT_GBRAP, |
460 | AV_PIX_FMT_NONE |
461 | }; |
462 | |
463 | AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); |
464 | if (!fmts_list) |
465 | return AVERROR(ENOMEM); |
466 | return ff_set_common_formats(ctx, fmts_list); |
467 | } |
468 | |
469 | static int config_props(AVFilterLink *link) |
470 | { |
471 | AVFilterContext *ctx = link->src; |
472 | YADIFContext *s = ctx->priv; |
473 | |
474 | link->time_base.num = ctx->inputs[0]->time_base.num; |
475 | link->time_base.den = ctx->inputs[0]->time_base.den * 2; |
476 | link->w = ctx->inputs[0]->w; |
477 | link->h = ctx->inputs[0]->h; |
478 | |
479 | if(s->mode & 1) |
480 | link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate, |
481 | (AVRational){2, 1}); |
482 | |
483 | if (link->w < 3 || link->h < 3) { |
484 | av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n"); |
485 | return AVERROR(EINVAL); |
486 | } |
487 | |
488 | s->csp = av_pix_fmt_desc_get(link->format); |
489 | if (s->csp->comp[0].depth > 8) { |
490 | s->filter_line = filter_line_c_16bit; |
491 | s->filter_edges = filter_edges_16bit; |
492 | } else { |
493 | s->filter_line = filter_line_c; |
494 | s->filter_edges = filter_edges; |
495 | } |
496 | |
497 | if (ARCH_X86) |
498 | ff_yadif_init_x86(s); |
499 | |
500 | return 0; |
501 | } |
502 | |
503 | |
504 | #define OFFSET(x) offsetof(YADIFContext, x) |
505 | #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
506 | |
507 | #define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } |
508 | |
509 | static const AVOption yadif_options[] = { |
510 | { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"}, |
511 | CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"), |
512 | CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"), |
513 | CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"), |
514 | CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"), |
515 | |
516 | { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" }, |
517 | CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"), |
518 | CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"), |
519 | CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"), |
520 | |
521 | { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" }, |
522 | CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"), |
523 | CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"), |
524 | |
525 | { NULL } |
526 | }; |
527 | |
528 | AVFILTER_DEFINE_CLASS(yadif); |
529 | |
530 | static const AVFilterPad avfilter_vf_yadif_inputs[] = { |
531 | { |
532 | .name = "default", |
533 | .type = AVMEDIA_TYPE_VIDEO, |
534 | .filter_frame = filter_frame, |
535 | }, |
536 | { NULL } |
537 | }; |
538 | |
539 | static const AVFilterPad avfilter_vf_yadif_outputs[] = { |
540 | { |
541 | .name = "default", |
542 | .type = AVMEDIA_TYPE_VIDEO, |
543 | .request_frame = request_frame, |
544 | .config_props = config_props, |
545 | }, |
546 | { NULL } |
547 | }; |
548 | |
549 | AVFilter ff_vf_yadif = { |
550 | .name = "yadif", |
551 | .description = NULL_IF_CONFIG_SMALL("Deinterlace the input image."), |
552 | .priv_size = sizeof(YADIFContext), |
553 | .priv_class = &yadif_class, |
554 | .uninit = uninit, |
555 | .query_formats = query_formats, |
556 | .inputs = avfilter_vf_yadif_inputs, |
557 | .outputs = avfilter_vf_yadif_outputs, |
558 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS, |
559 | }; |
560 |