blob: c21104320d1365f42fa229602330737e9abcd02d
1 | /* |
2 | * Copyright (c) 2013 Paul B Mahol |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | #include "libavutil/avassert.h" |
22 | #include "libavutil/avstring.h" |
23 | #include "libavutil/imgutils.h" |
24 | #include "libavutil/opt.h" |
25 | #include "libavutil/pixdesc.h" |
26 | #include "avfilter.h" |
27 | #include "internal.h" |
28 | #include "framesync.h" |
29 | |
30 | typedef struct InputParam { |
31 | int depth[4]; |
32 | int nb_planes; |
33 | int planewidth[4]; |
34 | int planeheight[4]; |
35 | } InputParam; |
36 | |
37 | typedef struct MergePlanesContext { |
38 | const AVClass *class; |
39 | int64_t mapping; |
40 | const enum AVPixelFormat out_fmt; |
41 | int nb_inputs; |
42 | int nb_planes; |
43 | int planewidth[4]; |
44 | int planeheight[4]; |
45 | int map[4][2]; |
46 | const AVPixFmtDescriptor *outdesc; |
47 | |
48 | FFFrameSync fs; |
49 | } MergePlanesContext; |
50 | |
51 | #define OFFSET(x) offsetof(MergePlanesContext, x) |
52 | #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM |
53 | static const AVOption mergeplanes_options[] = { |
54 | { "mapping", "set input to output plane mapping", OFFSET(mapping), AV_OPT_TYPE_INT, {.i64=0}, 0, 0x33333333, FLAGS }, |
55 | { "format", "set output pixel format", OFFSET(out_fmt), AV_OPT_TYPE_PIXEL_FMT, {.i64=AV_PIX_FMT_YUVA444P}, 0, INT_MAX, .flags=FLAGS }, |
56 | { NULL } |
57 | }; |
58 | |
59 | AVFILTER_DEFINE_CLASS(mergeplanes); |
60 | |
61 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
62 | { |
63 | MergePlanesContext *s = inlink->dst->priv; |
64 | return ff_framesync_filter_frame(&s->fs, inlink, in); |
65 | } |
66 | |
67 | static av_cold int init(AVFilterContext *ctx) |
68 | { |
69 | MergePlanesContext *s = ctx->priv; |
70 | int64_t m = s->mapping; |
71 | int i, ret; |
72 | |
73 | s->outdesc = av_pix_fmt_desc_get(s->out_fmt); |
74 | if (!(s->outdesc->flags & AV_PIX_FMT_FLAG_PLANAR) || |
75 | s->outdesc->nb_components < 2) { |
76 | av_log(ctx, AV_LOG_ERROR, "Only planar formats with more than one component are supported.\n"); |
77 | return AVERROR(EINVAL); |
78 | } |
79 | s->nb_planes = av_pix_fmt_count_planes(s->out_fmt); |
80 | |
81 | for (i = s->nb_planes - 1; i >= 0; i--) { |
82 | s->map[i][0] = m & 0xf; |
83 | m >>= 4; |
84 | s->map[i][1] = m & 0xf; |
85 | m >>= 4; |
86 | |
87 | if (s->map[i][0] > 3 || s->map[i][1] > 3) { |
88 | av_log(ctx, AV_LOG_ERROR, "Mapping with out of range input and/or plane number.\n"); |
89 | return AVERROR(EINVAL); |
90 | } |
91 | |
92 | s->nb_inputs = FFMAX(s->nb_inputs, s->map[i][1] + 1); |
93 | } |
94 | |
95 | av_assert0(s->nb_inputs && s->nb_inputs <= 4); |
96 | |
97 | for (i = 0; i < s->nb_inputs; i++) { |
98 | AVFilterPad pad = { 0 }; |
99 | |
100 | pad.type = AVMEDIA_TYPE_VIDEO; |
101 | pad.name = av_asprintf("in%d", i); |
102 | if (!pad.name) |
103 | return AVERROR(ENOMEM); |
104 | pad.filter_frame = filter_frame; |
105 | |
106 | if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0){ |
107 | av_freep(&pad.name); |
108 | return ret; |
109 | } |
110 | } |
111 | |
112 | return 0; |
113 | } |
114 | |
115 | static int query_formats(AVFilterContext *ctx) |
116 | { |
117 | MergePlanesContext *s = ctx->priv; |
118 | AVFilterFormats *formats = NULL; |
119 | int i, ret; |
120 | |
121 | s->outdesc = av_pix_fmt_desc_get(s->out_fmt); |
122 | for (i = 0; av_pix_fmt_desc_get(i); i++) { |
123 | const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i); |
124 | if (desc->comp[0].depth == s->outdesc->comp[0].depth && |
125 | (desc->comp[0].depth <= 8 || (desc->flags & AV_PIX_FMT_FLAG_BE) == (s->outdesc->flags & AV_PIX_FMT_FLAG_BE)) && |
126 | av_pix_fmt_count_planes(i) == desc->nb_components && |
127 | (ret = ff_add_format(&formats, i)) < 0) |
128 | return ret; |
129 | } |
130 | |
131 | for (i = 0; i < s->nb_inputs; i++) |
132 | if ((ret = ff_formats_ref(formats, &ctx->inputs[i]->out_formats)) < 0) |
133 | return ret; |
134 | |
135 | formats = NULL; |
136 | if ((ret = ff_add_format(&formats, s->out_fmt)) < 0 || |
137 | (ret = ff_formats_ref(formats, &ctx->outputs[0]->in_formats)) < 0) |
138 | return ret; |
139 | |
140 | return 0; |
141 | } |
142 | |
143 | static int process_frame(FFFrameSync *fs) |
144 | { |
145 | AVFilterContext *ctx = fs->parent; |
146 | AVFilterLink *outlink = ctx->outputs[0]; |
147 | MergePlanesContext *s = fs->opaque; |
148 | AVFrame *in[4] = { NULL }; |
149 | AVFrame *out; |
150 | int i, ret; |
151 | |
152 | for (i = 0; i < s->nb_inputs; i++) { |
153 | if ((ret = ff_framesync_get_frame(&s->fs, i, &in[i], 0)) < 0) |
154 | return ret; |
155 | } |
156 | |
157 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
158 | if (!out) |
159 | return AVERROR(ENOMEM); |
160 | out->pts = av_rescale_q(s->fs.pts, s->fs.time_base, outlink->time_base); |
161 | |
162 | for (i = 0; i < s->nb_planes; i++) { |
163 | const int input = s->map[i][1]; |
164 | const int plane = s->map[i][0]; |
165 | |
166 | av_image_copy_plane(out->data[i], out->linesize[i], |
167 | in[input]->data[plane], in[input]->linesize[plane], |
168 | s->planewidth[i], s->planeheight[i]); |
169 | } |
170 | |
171 | return ff_filter_frame(outlink, out); |
172 | } |
173 | |
174 | static int config_output(AVFilterLink *outlink) |
175 | { |
176 | AVFilterContext *ctx = outlink->src; |
177 | MergePlanesContext *s = ctx->priv; |
178 | InputParam inputsp[4]; |
179 | FFFrameSyncIn *in; |
180 | int i, ret; |
181 | |
182 | if ((ret = ff_framesync_init(&s->fs, ctx, s->nb_inputs)) < 0) |
183 | return ret; |
184 | |
185 | in = s->fs.in; |
186 | s->fs.opaque = s; |
187 | s->fs.on_event = process_frame; |
188 | |
189 | outlink->w = ctx->inputs[0]->w; |
190 | outlink->h = ctx->inputs[0]->h; |
191 | outlink->time_base = ctx->inputs[0]->time_base; |
192 | outlink->frame_rate = ctx->inputs[0]->frame_rate; |
193 | outlink->sample_aspect_ratio = ctx->inputs[0]->sample_aspect_ratio; |
194 | |
195 | s->planewidth[1] = |
196 | s->planewidth[2] = AV_CEIL_RSHIFT(((s->outdesc->comp[1].depth > 8) + 1) * outlink->w, s->outdesc->log2_chroma_w); |
197 | s->planewidth[0] = |
198 | s->planewidth[3] = ((s->outdesc->comp[0].depth > 8) + 1) * outlink->w; |
199 | s->planeheight[1] = |
200 | s->planeheight[2] = AV_CEIL_RSHIFT(outlink->h, s->outdesc->log2_chroma_h); |
201 | s->planeheight[0] = |
202 | s->planeheight[3] = outlink->h; |
203 | |
204 | for (i = 0; i < s->nb_inputs; i++) { |
205 | InputParam *inputp = &inputsp[i]; |
206 | AVFilterLink *inlink = ctx->inputs[i]; |
207 | const AVPixFmtDescriptor *indesc = av_pix_fmt_desc_get(inlink->format); |
208 | int j; |
209 | |
210 | if (outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num || |
211 | outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) { |
212 | av_log(ctx, AV_LOG_ERROR, "input #%d link %s SAR %d:%d " |
213 | "does not match output link %s SAR %d:%d\n", |
214 | i, ctx->input_pads[i].name, |
215 | inlink->sample_aspect_ratio.num, |
216 | inlink->sample_aspect_ratio.den, |
217 | ctx->output_pads[0].name, |
218 | outlink->sample_aspect_ratio.num, |
219 | outlink->sample_aspect_ratio.den); |
220 | return AVERROR(EINVAL); |
221 | } |
222 | |
223 | inputp->planewidth[1] = |
224 | inputp->planewidth[2] = AV_CEIL_RSHIFT(((indesc->comp[1].depth > 8) + 1) * inlink->w, indesc->log2_chroma_w); |
225 | inputp->planewidth[0] = |
226 | inputp->planewidth[3] = ((indesc->comp[0].depth > 8) + 1) * inlink->w; |
227 | inputp->planeheight[1] = |
228 | inputp->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, indesc->log2_chroma_h); |
229 | inputp->planeheight[0] = |
230 | inputp->planeheight[3] = inlink->h; |
231 | inputp->nb_planes = av_pix_fmt_count_planes(inlink->format); |
232 | |
233 | for (j = 0; j < inputp->nb_planes; j++) |
234 | inputp->depth[j] = indesc->comp[j].depth; |
235 | |
236 | in[i].time_base = inlink->time_base; |
237 | in[i].sync = 1; |
238 | in[i].before = EXT_STOP; |
239 | in[i].after = EXT_STOP; |
240 | } |
241 | |
242 | for (i = 0; i < s->nb_planes; i++) { |
243 | const int input = s->map[i][1]; |
244 | const int plane = s->map[i][0]; |
245 | InputParam *inputp = &inputsp[input]; |
246 | |
247 | if (plane + 1 > inputp->nb_planes) { |
248 | av_log(ctx, AV_LOG_ERROR, "input %d does not have %d plane\n", |
249 | input, plane); |
250 | goto fail; |
251 | } |
252 | if (s->outdesc->comp[i].depth != inputp->depth[plane]) { |
253 | av_log(ctx, AV_LOG_ERROR, "output plane %d depth %d does not " |
254 | "match input %d plane %d depth %d\n", |
255 | i, s->outdesc->comp[i].depth, |
256 | input, plane, inputp->depth[plane]); |
257 | goto fail; |
258 | } |
259 | if (s->planewidth[i] != inputp->planewidth[plane]) { |
260 | av_log(ctx, AV_LOG_ERROR, "output plane %d width %d does not " |
261 | "match input %d plane %d width %d\n", |
262 | i, s->planewidth[i], |
263 | input, plane, inputp->planewidth[plane]); |
264 | goto fail; |
265 | } |
266 | if (s->planeheight[i] != inputp->planeheight[plane]) { |
267 | av_log(ctx, AV_LOG_ERROR, "output plane %d height %d does not " |
268 | "match input %d plane %d height %d\n", |
269 | i, s->planeheight[i], |
270 | input, plane, inputp->planeheight[plane]); |
271 | goto fail; |
272 | } |
273 | } |
274 | |
275 | return ff_framesync_configure(&s->fs); |
276 | fail: |
277 | return AVERROR(EINVAL); |
278 | } |
279 | |
280 | static int request_frame(AVFilterLink *outlink) |
281 | { |
282 | MergePlanesContext *s = outlink->src->priv; |
283 | return ff_framesync_request_frame(&s->fs, outlink); |
284 | } |
285 | |
286 | static av_cold void uninit(AVFilterContext *ctx) |
287 | { |
288 | MergePlanesContext *s = ctx->priv; |
289 | int i; |
290 | |
291 | ff_framesync_uninit(&s->fs); |
292 | |
293 | for (i = 0; i < ctx->nb_inputs; i++) |
294 | av_freep(&ctx->input_pads[i].name); |
295 | } |
296 | |
297 | static const AVFilterPad mergeplanes_outputs[] = { |
298 | { |
299 | .name = "default", |
300 | .type = AVMEDIA_TYPE_VIDEO, |
301 | .config_props = config_output, |
302 | .request_frame = request_frame, |
303 | }, |
304 | { NULL } |
305 | }; |
306 | |
307 | AVFilter ff_vf_mergeplanes = { |
308 | .name = "mergeplanes", |
309 | .description = NULL_IF_CONFIG_SMALL("Merge planes."), |
310 | .priv_size = sizeof(MergePlanesContext), |
311 | .priv_class = &mergeplanes_class, |
312 | .init = init, |
313 | .uninit = uninit, |
314 | .query_formats = query_formats, |
315 | .inputs = NULL, |
316 | .outputs = mergeplanes_outputs, |
317 | .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, |
318 | }; |
319 |