blob: 53a0700e37a958d849ffbf503c28ddd9f663be3c
1 | /* |
2 | * Copyright (c) 2013 Paul B Mahol |
3 | * |
4 | * This file is part of FFmpeg. |
5 | * |
6 | * FFmpeg is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2.1 of the License, or (at your option) any later version. |
10 | * |
11 | * FFmpeg is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with FFmpeg; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
19 | */ |
20 | |
21 | #include "libavutil/avassert.h" |
22 | #include "libavutil/eval.h" |
23 | #include "libavutil/opt.h" |
24 | #include "libavutil/pixdesc.h" |
25 | #include "avfilter.h" |
26 | #include "formats.h" |
27 | #include "internal.h" |
28 | #include "video.h" |
29 | #include "libswscale/swscale.h" |
30 | |
31 | static const char *const var_names[] = { |
32 | "in_w", "iw", |
33 | "in_h", "ih", |
34 | "out_w", "ow", |
35 | "out_h", "oh", |
36 | "in", |
37 | "on", |
38 | "duration", |
39 | "pduration", |
40 | "time", |
41 | "frame", |
42 | "zoom", |
43 | "pzoom", |
44 | "x", "px", |
45 | "y", "py", |
46 | "a", |
47 | "sar", |
48 | "dar", |
49 | "hsub", |
50 | "vsub", |
51 | NULL |
52 | }; |
53 | |
54 | enum var_name { |
55 | VAR_IN_W, VAR_IW, |
56 | VAR_IN_H, VAR_IH, |
57 | VAR_OUT_W, VAR_OW, |
58 | VAR_OUT_H, VAR_OH, |
59 | VAR_IN, |
60 | VAR_ON, |
61 | VAR_DURATION, |
62 | VAR_PDURATION, |
63 | VAR_TIME, |
64 | VAR_FRAME, |
65 | VAR_ZOOM, |
66 | VAR_PZOOM, |
67 | VAR_X, VAR_PX, |
68 | VAR_Y, VAR_PY, |
69 | VAR_A, |
70 | VAR_SAR, |
71 | VAR_DAR, |
72 | VAR_HSUB, |
73 | VAR_VSUB, |
74 | VARS_NB |
75 | }; |
76 | |
77 | typedef struct ZPcontext { |
78 | const AVClass *class; |
79 | char *zoom_expr_str; |
80 | char *x_expr_str; |
81 | char *y_expr_str; |
82 | char *duration_expr_str; |
83 | int w, h; |
84 | double x, y; |
85 | double prev_zoom; |
86 | int prev_nb_frames; |
87 | struct SwsContext *sws; |
88 | int64_t frame_count; |
89 | const AVPixFmtDescriptor *desc; |
90 | AVFrame *in; |
91 | double var_values[VARS_NB]; |
92 | int nb_frames; |
93 | int current_frame; |
94 | int finished; |
95 | AVRational framerate; |
96 | } ZPContext; |
97 | |
98 | #define OFFSET(x) offsetof(ZPContext, x) |
99 | #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM |
100 | static const AVOption zoompan_options[] = { |
101 | { "zoom", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS }, |
102 | { "z", "set the zoom expression", OFFSET(zoom_expr_str), AV_OPT_TYPE_STRING, {.str = "1" }, .flags = FLAGS }, |
103 | { "x", "set the x expression", OFFSET(x_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS }, |
104 | { "y", "set the y expression", OFFSET(y_expr_str), AV_OPT_TYPE_STRING, {.str="0"}, .flags = FLAGS }, |
105 | { "d", "set the duration expression", OFFSET(duration_expr_str), AV_OPT_TYPE_STRING, {.str="90"}, .flags = FLAGS }, |
106 | { "s", "set the output image size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, .flags = FLAGS }, |
107 | { "fps", "set the output framerate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, INT_MAX, .flags = FLAGS }, |
108 | { NULL } |
109 | }; |
110 | |
111 | AVFILTER_DEFINE_CLASS(zoompan); |
112 | |
113 | static av_cold int init(AVFilterContext *ctx) |
114 | { |
115 | ZPContext *s = ctx->priv; |
116 | |
117 | s->prev_zoom = 1; |
118 | return 0; |
119 | } |
120 | |
121 | static int config_output(AVFilterLink *outlink) |
122 | { |
123 | AVFilterContext *ctx = outlink->src; |
124 | ZPContext *s = ctx->priv; |
125 | |
126 | outlink->w = s->w; |
127 | outlink->h = s->h; |
128 | outlink->time_base = av_inv_q(s->framerate); |
129 | outlink->frame_rate = s->framerate; |
130 | s->desc = av_pix_fmt_desc_get(outlink->format); |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_values, int i, |
136 | double *zoom, double *dx, double *dy) |
137 | { |
138 | ZPContext *s = ctx->priv; |
139 | AVFilterLink *outlink = ctx->outputs[0]; |
140 | int64_t pts = s->frame_count; |
141 | int k, x, y, w, h, ret = 0; |
142 | uint8_t *input[4]; |
143 | int px[4], py[4]; |
144 | AVFrame *out; |
145 | |
146 | var_values[VAR_PX] = s->x; |
147 | var_values[VAR_PY] = s->y; |
148 | var_values[VAR_PZOOM] = s->prev_zoom; |
149 | var_values[VAR_PDURATION] = s->prev_nb_frames; |
150 | var_values[VAR_TIME] = pts * av_q2d(outlink->time_base); |
151 | var_values[VAR_FRAME] = i; |
152 | var_values[VAR_ON] = outlink->frame_count_in + 1; |
153 | if ((ret = av_expr_parse_and_eval(zoom, s->zoom_expr_str, |
154 | var_names, var_values, |
155 | NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) |
156 | return ret; |
157 | |
158 | *zoom = av_clipd(*zoom, 1, 10); |
159 | var_values[VAR_ZOOM] = *zoom; |
160 | w = in->width * (1.0 / *zoom); |
161 | h = in->height * (1.0 / *zoom); |
162 | |
163 | if ((ret = av_expr_parse_and_eval(dx, s->x_expr_str, |
164 | var_names, var_values, |
165 | NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) |
166 | return ret; |
167 | x = *dx = av_clipd(*dx, 0, FFMAX(in->width - w, 0)); |
168 | var_values[VAR_X] = *dx; |
169 | x &= ~((1 << s->desc->log2_chroma_w) - 1); |
170 | |
171 | if ((ret = av_expr_parse_and_eval(dy, s->y_expr_str, |
172 | var_names, var_values, |
173 | NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) |
174 | return ret; |
175 | y = *dy = av_clipd(*dy, 0, FFMAX(in->height - h, 0)); |
176 | var_values[VAR_Y] = *dy; |
177 | y &= ~((1 << s->desc->log2_chroma_h) - 1); |
178 | |
179 | out = ff_get_video_buffer(outlink, outlink->w, outlink->h); |
180 | if (!out) { |
181 | ret = AVERROR(ENOMEM); |
182 | return ret; |
183 | } |
184 | |
185 | px[1] = px[2] = AV_CEIL_RSHIFT(x, s->desc->log2_chroma_w); |
186 | px[0] = px[3] = x; |
187 | |
188 | py[1] = py[2] = AV_CEIL_RSHIFT(y, s->desc->log2_chroma_h); |
189 | py[0] = py[3] = y; |
190 | |
191 | s->sws = sws_alloc_context(); |
192 | if (!s->sws) { |
193 | ret = AVERROR(ENOMEM); |
194 | goto error; |
195 | } |
196 | |
197 | for (k = 0; in->data[k]; k++) |
198 | input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; |
199 | |
200 | av_opt_set_int(s->sws, "srcw", w, 0); |
201 | av_opt_set_int(s->sws, "srch", h, 0); |
202 | av_opt_set_int(s->sws, "src_format", in->format, 0); |
203 | av_opt_set_int(s->sws, "dstw", outlink->w, 0); |
204 | av_opt_set_int(s->sws, "dsth", outlink->h, 0); |
205 | av_opt_set_int(s->sws, "dst_format", outlink->format, 0); |
206 | av_opt_set_int(s->sws, "sws_flags", SWS_BICUBIC, 0); |
207 | |
208 | if ((ret = sws_init_context(s->sws, NULL, NULL)) < 0) |
209 | goto error; |
210 | |
211 | sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); |
212 | |
213 | out->pts = pts; |
214 | s->frame_count++; |
215 | |
216 | ret = ff_filter_frame(outlink, out); |
217 | sws_freeContext(s->sws); |
218 | s->sws = NULL; |
219 | s->current_frame++; |
220 | return ret; |
221 | error: |
222 | av_frame_free(&out); |
223 | return ret; |
224 | } |
225 | |
226 | static int filter_frame(AVFilterLink *inlink, AVFrame *in) |
227 | { |
228 | AVFilterContext *ctx = inlink->dst; |
229 | AVFilterLink *outlink = ctx->outputs[0]; |
230 | ZPContext *s = ctx->priv; |
231 | double nb_frames; |
232 | int ret; |
233 | |
234 | av_assert0(s->in == NULL); |
235 | |
236 | s->finished = 0; |
237 | s->var_values[VAR_IN_W] = s->var_values[VAR_IW] = in->width; |
238 | s->var_values[VAR_IN_H] = s->var_values[VAR_IH] = in->height; |
239 | s->var_values[VAR_OUT_W] = s->var_values[VAR_OW] = s->w; |
240 | s->var_values[VAR_OUT_H] = s->var_values[VAR_OH] = s->h; |
241 | s->var_values[VAR_IN] = inlink->frame_count_out + 1; |
242 | s->var_values[VAR_ON] = outlink->frame_count_in + 1; |
243 | s->var_values[VAR_PX] = s->x; |
244 | s->var_values[VAR_PY] = s->y; |
245 | s->var_values[VAR_X] = 0; |
246 | s->var_values[VAR_Y] = 0; |
247 | s->var_values[VAR_PZOOM] = s->prev_zoom; |
248 | s->var_values[VAR_ZOOM] = 1; |
249 | s->var_values[VAR_PDURATION] = s->prev_nb_frames; |
250 | s->var_values[VAR_A] = (double) in->width / in->height; |
251 | s->var_values[VAR_SAR] = inlink->sample_aspect_ratio.num ? |
252 | (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1; |
253 | s->var_values[VAR_DAR] = s->var_values[VAR_A] * s->var_values[VAR_SAR]; |
254 | s->var_values[VAR_HSUB] = 1 << s->desc->log2_chroma_w; |
255 | s->var_values[VAR_VSUB] = 1 << s->desc->log2_chroma_h; |
256 | |
257 | if ((ret = av_expr_parse_and_eval(&nb_frames, s->duration_expr_str, |
258 | var_names, s->var_values, |
259 | NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { |
260 | av_frame_free(&in); |
261 | return ret; |
262 | } |
263 | |
264 | s->var_values[VAR_DURATION] = s->nb_frames = nb_frames; |
265 | s->in = in; |
266 | |
267 | return 0; |
268 | } |
269 | |
270 | static int request_frame(AVFilterLink *outlink) |
271 | { |
272 | AVFilterContext *ctx = outlink->src; |
273 | ZPContext *s = ctx->priv; |
274 | AVFrame *in = s->in; |
275 | double zoom=-1, dx=-1, dy=-1; |
276 | int ret = -1; |
277 | |
278 | if (in) { |
279 | ret = output_single_frame(ctx, in, s->var_values, s->current_frame, |
280 | &zoom, &dx, &dy); |
281 | if (ret < 0) |
282 | goto fail; |
283 | } |
284 | |
285 | if (s->current_frame >= s->nb_frames) { |
286 | if (dx != -1) |
287 | s->x = dx; |
288 | if (dy != -1) |
289 | s->y = dy; |
290 | if (zoom != -1) |
291 | s->prev_zoom = zoom; |
292 | s->prev_nb_frames = s->nb_frames; |
293 | s->nb_frames = 0; |
294 | s->current_frame = 0; |
295 | av_frame_free(&s->in); |
296 | s->finished = 1; |
297 | ret = ff_request_frame(ctx->inputs[0]); |
298 | } |
299 | |
300 | fail: |
301 | sws_freeContext(s->sws); |
302 | s->sws = NULL; |
303 | |
304 | return ret; |
305 | } |
306 | |
307 | static int poll_frame(AVFilterLink *link) |
308 | { |
309 | ZPContext *s = link->src->priv; |
310 | return s->nb_frames - s->current_frame; |
311 | } |
312 | |
313 | static int query_formats(AVFilterContext *ctx) |
314 | { |
315 | static const enum AVPixelFormat pix_fmts[] = { |
316 | AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, |
317 | AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, |
318 | AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, |
319 | AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA422P, |
320 | AV_PIX_FMT_YUVA420P, |
321 | AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, |
322 | AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, |
323 | AV_PIX_FMT_YUVJ411P, |
324 | AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, |
325 | AV_PIX_FMT_GRAY8, |
326 | AV_PIX_FMT_NONE |
327 | }; |
328 | |
329 | AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); |
330 | if (!fmts_list) |
331 | return AVERROR(ENOMEM); |
332 | return ff_set_common_formats(ctx, fmts_list); |
333 | } |
334 | |
335 | static av_cold void uninit(AVFilterContext *ctx) |
336 | { |
337 | ZPContext *s = ctx->priv; |
338 | |
339 | sws_freeContext(s->sws); |
340 | s->sws = NULL; |
341 | } |
342 | |
343 | static const AVFilterPad inputs[] = { |
344 | { |
345 | .name = "default", |
346 | .type = AVMEDIA_TYPE_VIDEO, |
347 | .filter_frame = filter_frame, |
348 | .needs_fifo = 1, |
349 | }, |
350 | { NULL } |
351 | }; |
352 | |
353 | static const AVFilterPad outputs[] = { |
354 | { |
355 | .name = "default", |
356 | .type = AVMEDIA_TYPE_VIDEO, |
357 | .config_props = config_output, |
358 | .poll_frame = poll_frame, |
359 | .request_frame = request_frame, |
360 | }, |
361 | { NULL } |
362 | }; |
363 | |
364 | AVFilter ff_vf_zoompan = { |
365 | .name = "zoompan", |
366 | .description = NULL_IF_CONFIG_SMALL("Apply Zoom & Pan effect."), |
367 | .priv_size = sizeof(ZPContext), |
368 | .priv_class = &zoompan_class, |
369 | .init = init, |
370 | .uninit = uninit, |
371 | .query_formats = query_formats, |
372 | .inputs = inputs, |
373 | .outputs = outputs, |
374 | .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, |
375 | }; |
376 |