blob: a5fc1b7403cc161eeb482e73bde3069502692e93
1 | /* |
2 | * Copyright (c) 2013 Paul B Mahol |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | #include "libavutil/imgutils.h" |
22 | #include "libavutil/opt.h" |
23 | #include "libavutil/pixdesc.h" |
24 | #include "avfilter.h" |
25 | #include "internal.h" |
26 | |
27 | typedef struct WeaveContext { |
28 | const AVClass *class; |
29 | int first_field; |
30 | int nb_planes; |
31 | int planeheight[4]; |
32 | int linesize[4]; |
33 | |
34 | AVFrame *prev; |
35 | } WeaveContext; |
36 | |
37 | #define OFFSET(x) offsetof(WeaveContext, x) |
38 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
39 | |
40 | static const AVOption weave_options[] = { |
41 | { "first_field", "set first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"}, |
42 | { "top", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"}, |
43 | { "t", "set top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"}, |
44 | { "bottom", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"}, |
45 | { "b", "set bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"}, |
46 | { NULL } |
47 | }; |
48 | |
49 | AVFILTER_DEFINE_CLASS(weave); |
50 | |
51 | static int config_props_output(AVFilterLink *outlink) |
52 | { |
53 | AVFilterContext *ctx = outlink->src; |
54 | WeaveContext *s = ctx->priv; |
55 | AVFilterLink *inlink = ctx->inputs[0]; |
56 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); |
57 | int ret; |
58 | |
59 | outlink->time_base.num = inlink->time_base.num * 2; |
60 | outlink->time_base.den = inlink->time_base.den; |
61 | outlink->frame_rate.num = inlink->frame_rate.num; |
62 | outlink->frame_rate.den = inlink->frame_rate.den * 2; |
63 | outlink->w = inlink->w; |
64 | outlink->h = inlink->h * 2; |
65 | |
66 | if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) |
67 | return ret; |
68 | |
69 | s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); |
70 | s->planeheight[0] = s->planeheight[3] = inlink->h; |
71 | |
72 | s->nb_planes = av_pix_fmt_count_planes(inlink->format); |
73 | |
74 | return 0; |
75 | } |
76 | |
77 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
78 | { |
79 | AVFilterContext *ctx = inlink->dst; |
80 | WeaveContext *s = ctx->priv; |
81 | AVFilterLink *outlink = ctx->outputs[0]; |
82 | AVFrame *out; |
83 | int i; |
84 | |
85 | if (!s->prev) { |
86 | s->prev = in; |
87 | return 0; |
88 | } |
89 | |
90 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
91 | if (!out) { |
92 | av_frame_free(&in); |
93 | av_frame_free(&s->prev); |
94 | return AVERROR(ENOMEM); |
95 | } |
96 | av_frame_copy_props(out, in); |
97 | |
98 | for (i = 0; i < s->nb_planes; i++) { |
99 | av_image_copy_plane(out->data[i] + out->linesize[i] * s->first_field, |
100 | out->linesize[i] * 2, |
101 | in->data[i], in->linesize[i], |
102 | s->linesize[i], s->planeheight[i]); |
103 | av_image_copy_plane(out->data[i] + out->linesize[i] * !s->first_field, |
104 | out->linesize[i] * 2, |
105 | s->prev->data[i], s->prev->linesize[i], |
106 | s->linesize[i], s->planeheight[i]); |
107 | } |
108 | |
109 | out->pts = in->pts / 2; |
110 | out->interlaced_frame = 1; |
111 | out->top_field_first = !s->first_field; |
112 | |
113 | av_frame_free(&in); |
114 | av_frame_free(&s->prev); |
115 | return ff_filter_frame(outlink, out); |
116 | } |
117 | |
118 | static av_cold void uninit(AVFilterContext *ctx) |
119 | { |
120 | WeaveContext *s = ctx->priv; |
121 | |
122 | av_frame_free(&s->prev); |
123 | } |
124 | |
125 | static const AVFilterPad weave_inputs[] = { |
126 | { |
127 | .name = "default", |
128 | .type = AVMEDIA_TYPE_VIDEO, |
129 | .filter_frame = filter_frame, |
130 | }, |
131 | { NULL } |
132 | }; |
133 | |
134 | static const AVFilterPad weave_outputs[] = { |
135 | { |
136 | .name = "default", |
137 | .type = AVMEDIA_TYPE_VIDEO, |
138 | .config_props = config_props_output, |
139 | }, |
140 | { NULL } |
141 | }; |
142 | |
143 | AVFilter ff_vf_weave = { |
144 | .name = "weave", |
145 | .description = NULL_IF_CONFIG_SMALL("Weave input video fields into frames."), |
146 | .priv_size = sizeof(WeaveContext), |
147 | .priv_class = &weave_class, |
148 | .uninit = uninit, |
149 | .inputs = weave_inputs, |
150 | .outputs = weave_outputs, |
151 | }; |
152 |