summaryrefslogtreecommitdiff
path: root/libavfilter/vf_framepack.c (plain)
blob: a5cd9540b97ce73c1f19b308540de3d65fd88b46
1/*
2 * Copyright (c) 2013 Vittorio Giovara
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/**
22 * @file
23 * Generate a frame packed video, by combining two views in a single surface.
24 */
25
26#include <string.h>
27
28#include "libavutil/common.h"
29#include "libavutil/imgutils.h"
30#include "libavutil/opt.h"
31#include "libavutil/pixdesc.h"
32#include "libavutil/rational.h"
33#include "libavutil/stereo3d.h"
34
35#include "avfilter.h"
36#include "formats.h"
37#include "internal.h"
38#include "video.h"
39
40#define LEFT 0
41#define RIGHT 1
42
43typedef struct FramepackContext {
44 const AVClass *class;
45
46 const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
47
48 enum AVStereo3DType format; ///< frame pack type output
49
50 AVFrame *input_views[2]; ///< input frames
51
52 int64_t double_pts; ///< new pts for frameseq mode
53} FramepackContext;
54
55static const enum AVPixelFormat formats_supported[] = {
56 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
57 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
58 AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
59 AV_PIX_FMT_NONE
60};
61
62static int query_formats(AVFilterContext *ctx)
63{
64 // this will ensure that formats are the same on all pads
65 AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
66 if (!fmts_list)
67 return AVERROR(ENOMEM);
68 return ff_set_common_formats(ctx, fmts_list);
69}
70
71static av_cold void framepack_uninit(AVFilterContext *ctx)
72{
73 FramepackContext *s = ctx->priv;
74
75 // clean any leftover frame
76 av_frame_free(&s->input_views[LEFT]);
77 av_frame_free(&s->input_views[RIGHT]);
78}
79
80static int config_output(AVFilterLink *outlink)
81{
82 AVFilterContext *ctx = outlink->src;
83 FramepackContext *s = outlink->src->priv;
84
85 int width = ctx->inputs[LEFT]->w;
86 int height = ctx->inputs[LEFT]->h;
87 AVRational time_base = ctx->inputs[LEFT]->time_base;
88 AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
89
90 // check size and fps match on the other input
91 if (width != ctx->inputs[RIGHT]->w ||
92 height != ctx->inputs[RIGHT]->h) {
93 av_log(ctx, AV_LOG_ERROR,
94 "Left and right sizes differ (%dx%d vs %dx%d).\n",
95 width, height,
96 ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
97 return AVERROR_INVALIDDATA;
98 } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
99 av_log(ctx, AV_LOG_ERROR,
100 "Left and right time bases differ (%d/%d vs %d/%d).\n",
101 time_base.num, time_base.den,
102 ctx->inputs[RIGHT]->time_base.num,
103 ctx->inputs[RIGHT]->time_base.den);
104 return AVERROR_INVALIDDATA;
105 } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
106 av_log(ctx, AV_LOG_ERROR,
107 "Left and right framerates differ (%d/%d vs %d/%d).\n",
108 frame_rate.num, frame_rate.den,
109 ctx->inputs[RIGHT]->frame_rate.num,
110 ctx->inputs[RIGHT]->frame_rate.den);
111 return AVERROR_INVALIDDATA;
112 }
113
114 s->pix_desc = av_pix_fmt_desc_get(outlink->format);
115 if (!s->pix_desc)
116 return AVERROR_BUG;
117
118 // modify output properties as needed
119 switch (s->format) {
120 case AV_STEREO3D_FRAMESEQUENCE:
121 time_base.den *= 2;
122 frame_rate.num *= 2;
123
124 s->double_pts = AV_NOPTS_VALUE;
125 break;
126 case AV_STEREO3D_COLUMNS:
127 case AV_STEREO3D_SIDEBYSIDE:
128 width *= 2;
129 break;
130 case AV_STEREO3D_LINES:
131 case AV_STEREO3D_TOPBOTTOM:
132 height *= 2;
133 break;
134 default:
135 av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
136 return AVERROR_INVALIDDATA;
137 }
138
139 outlink->w = width;
140 outlink->h = height;
141 outlink->time_base = time_base;
142 outlink->frame_rate = frame_rate;
143
144 return 0;
145}
146
147static void horizontal_frame_pack(AVFilterLink *outlink,
148 AVFrame *out,
149 int interleaved)
150{
151 AVFilterContext *ctx = outlink->src;
152 FramepackContext *s = ctx->priv;
153 int i, plane;
154
155 if (interleaved) {
156 const uint8_t *leftp = s->input_views[LEFT]->data[0];
157 const uint8_t *rightp = s->input_views[RIGHT]->data[0];
158 uint8_t *dstp = out->data[0];
159 int length = out->width / 2;
160 int lines = out->height;
161
162 for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
163 if (plane == 1 || plane == 2) {
164 length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
165 lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
166 }
167 for (i = 0; i < lines; i++) {
168 int j;
169 leftp = s->input_views[LEFT]->data[plane] +
170 s->input_views[LEFT]->linesize[plane] * i;
171 rightp = s->input_views[RIGHT]->data[plane] +
172 s->input_views[RIGHT]->linesize[plane] * i;
173 dstp = out->data[plane] + out->linesize[plane] * i;
174 for (j = 0; j < length; j++) {
175 // interpolate chroma as necessary
176 if ((s->pix_desc->log2_chroma_w ||
177 s->pix_desc->log2_chroma_h) &&
178 (plane == 1 || plane == 2)) {
179 *dstp++ = (*leftp + *rightp) / 2;
180 *dstp++ = (*leftp + *rightp) / 2;
181 } else {
182 *dstp++ = *leftp;
183 *dstp++ = *rightp;
184 }
185 leftp += 1;
186 rightp += 1;
187 }
188 }
189 }
190 } else {
191 for (i = 0; i < 2; i++) {
192 const uint8_t *src[4];
193 uint8_t *dst[4];
194 int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
195
196 src[0] = s->input_views[i]->data[0];
197 src[1] = s->input_views[i]->data[1];
198 src[2] = s->input_views[i]->data[2];
199
200 dst[0] = out->data[0] + i * s->input_views[i]->width;
201 dst[1] = out->data[1] + i * sub_w;
202 dst[2] = out->data[2] + i * sub_w;
203
204 av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
205 s->input_views[i]->format,
206 s->input_views[i]->width,
207 s->input_views[i]->height);
208 }
209 }
210}
211
212static void vertical_frame_pack(AVFilterLink *outlink,
213 AVFrame *out,
214 int interleaved)
215{
216 AVFilterContext *ctx = outlink->src;
217 FramepackContext *s = ctx->priv;
218 int i;
219
220 for (i = 0; i < 2; i++) {
221 const uint8_t *src[4];
222 uint8_t *dst[4];
223 int linesizes[4];
224 int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
225
226 src[0] = s->input_views[i]->data[0];
227 src[1] = s->input_views[i]->data[1];
228 src[2] = s->input_views[i]->data[2];
229
230 dst[0] = out->data[0] + i * out->linesize[0] *
231 (interleaved + s->input_views[i]->height * (1 - interleaved));
232 dst[1] = out->data[1] + i * out->linesize[1] *
233 (interleaved + sub_h * (1 - interleaved));
234 dst[2] = out->data[2] + i * out->linesize[2] *
235 (interleaved + sub_h * (1 - interleaved));
236
237 linesizes[0] = out->linesize[0] +
238 interleaved * out->linesize[0];
239 linesizes[1] = out->linesize[1] +
240 interleaved * out->linesize[1];
241 linesizes[2] = out->linesize[2] +
242 interleaved * out->linesize[2];
243
244 av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
245 s->input_views[i]->format,
246 s->input_views[i]->width,
247 s->input_views[i]->height);
248 }
249}
250
251static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
252 AVFrame *dst)
253{
254 AVFilterContext *ctx = outlink->src;
255 FramepackContext *s = ctx->priv;
256 switch (s->format) {
257 case AV_STEREO3D_SIDEBYSIDE:
258 horizontal_frame_pack(outlink, dst, 0);
259 break;
260 case AV_STEREO3D_COLUMNS:
261 horizontal_frame_pack(outlink, dst, 1);
262 break;
263 case AV_STEREO3D_TOPBOTTOM:
264 vertical_frame_pack(outlink, dst, 0);
265 break;
266 case AV_STEREO3D_LINES:
267 vertical_frame_pack(outlink, dst, 1);
268 break;
269 }
270}
271
272static int try_push_frame(AVFilterContext *ctx);
273
274static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
275{
276 FramepackContext *s = inlink->dst->priv;
277 s->input_views[LEFT] = frame;
278 return try_push_frame(inlink->dst);
279}
280
281static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
282{
283 FramepackContext *s = inlink->dst->priv;
284 s->input_views[RIGHT] = frame;
285 return try_push_frame(inlink->dst);
286}
287
288static int request_frame(AVFilterLink *outlink)
289{
290 AVFilterContext *ctx = outlink->src;
291 FramepackContext *s = ctx->priv;
292 int ret, i;
293
294 /* get a frame on the either input, stop as soon as a video ends */
295 for (i = 0; i < 2; i++) {
296 if (!s->input_views[i]) {
297 ret = ff_request_frame(ctx->inputs[i]);
298 if (ret < 0)
299 return ret;
300 }
301 }
302 return 0;
303}
304
305static int try_push_frame(AVFilterContext *ctx)
306{
307 FramepackContext *s = ctx->priv;
308 AVFilterLink *outlink = ctx->outputs[0];
309 AVStereo3D *stereo;
310 int ret, i;
311
312 if (!(s->input_views[0] && s->input_views[1]))
313 return 0;
314 if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
315 if (s->double_pts == AV_NOPTS_VALUE)
316 s->double_pts = s->input_views[LEFT]->pts;
317
318 for (i = 0; i < 2; i++) {
319 // set correct timestamps
320 s->input_views[i]->pts = s->double_pts++;
321
322 // set stereo3d side data
323 stereo = av_stereo3d_create_side_data(s->input_views[i]);
324 if (!stereo)
325 return AVERROR(ENOMEM);
326 stereo->type = s->format;
327
328 // filter the frame and immediately relinquish its pointer
329 ret = ff_filter_frame(outlink, s->input_views[i]);
330 s->input_views[i] = NULL;
331 if (ret < 0)
332 return ret;
333 }
334 return ret;
335 } else {
336 AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
337 if (!dst)
338 return AVERROR(ENOMEM);
339
340 spatial_frame_pack(outlink, dst);
341
342 // get any property from the original frame
343 ret = av_frame_copy_props(dst, s->input_views[LEFT]);
344 if (ret < 0) {
345 av_frame_free(&dst);
346 return ret;
347 }
348
349 for (i = 0; i < 2; i++)
350 av_frame_free(&s->input_views[i]);
351
352 // set stereo3d side data
353 stereo = av_stereo3d_create_side_data(dst);
354 if (!stereo) {
355 av_frame_free(&dst);
356 return AVERROR(ENOMEM);
357 }
358 stereo->type = s->format;
359
360 return ff_filter_frame(outlink, dst);
361 }
362}
363
364#define OFFSET(x) offsetof(FramepackContext, x)
365#define V AV_OPT_FLAG_VIDEO_PARAM
366static const AVOption framepack_options[] = {
367 { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
368 { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
369 { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
370 { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
371 { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
372 { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
373 { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
374 { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
375 { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
376 { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
377 { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
378 { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
379 { NULL },
380};
381
382AVFILTER_DEFINE_CLASS(framepack);
383
384static const AVFilterPad framepack_inputs[] = {
385 {
386 .name = "left",
387 .type = AVMEDIA_TYPE_VIDEO,
388 .filter_frame = filter_frame_left,
389 .needs_fifo = 1,
390 },
391 {
392 .name = "right",
393 .type = AVMEDIA_TYPE_VIDEO,
394 .filter_frame = filter_frame_right,
395 .needs_fifo = 1,
396 },
397 { NULL }
398};
399
400static const AVFilterPad framepack_outputs[] = {
401 {
402 .name = "packed",
403 .type = AVMEDIA_TYPE_VIDEO,
404 .config_props = config_output,
405 .request_frame = request_frame,
406 },
407 { NULL }
408};
409
410AVFilter ff_vf_framepack = {
411 .name = "framepack",
412 .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
413 .priv_size = sizeof(FramepackContext),
414 .priv_class = &framepack_class,
415 .query_formats = query_formats,
416 .inputs = framepack_inputs,
417 .outputs = framepack_outputs,
418 .uninit = framepack_uninit,
419};
420