summaryrefslogtreecommitdiff
path: root/libavfilter/vf_framerate.c (plain)
blob: dc8b05f40f261d74ece937385e1f6da765bf003c
1/*
2 * Copyright (C) 2012 Mark Himsley
3 *
4 * get_scene_score() Copyright (c) 2011 Stefano Sabatini
5 * taken from libavfilter/vf_select.c
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24/**
25 * @file
26 * filter for upsampling or downsampling a progressive source
27 */
28
29#define DEBUG
30
31#include "libavutil/avassert.h"
32#include "libavutil/imgutils.h"
33#include "libavutil/internal.h"
34#include "libavutil/opt.h"
35#include "libavutil/pixdesc.h"
36#include "libavutil/pixelutils.h"
37
38#include "avfilter.h"
39#include "internal.h"
40#include "video.h"
41
42#define N_SRCE 3
43
44typedef struct FrameRateContext {
45 const AVClass *class;
46 // parameters
47 AVRational dest_frame_rate; ///< output frames per second
48 int flags; ///< flags affecting frame rate conversion algorithm
49 double scene_score; ///< score that denotes a scene change has happened
50 int interp_start; ///< start of range to apply linear interpolation
51 int interp_end; ///< end of range to apply linear interpolation
52
53 int line_size[4]; ///< bytes of pixel data per line for each plane
54 int vsub;
55
56 int frst, next, prev, crnt, last;
57 int pending_srce_frames; ///< how many input frames are still waiting to be processed
58 int flush; ///< are we flushing final frames
59 int pending_end_frame; ///< flag indicating we are waiting to call filter_frame()
60
61 AVRational srce_time_base; ///< timebase of source
62
63 AVRational dest_time_base; ///< timebase of destination
64 int32_t dest_frame_num;
65 int64_t last_dest_frame_pts; ///< pts of the last frame output
66 int64_t average_srce_pts_dest_delta;///< average input pts delta converted from input rate to output rate
67 int64_t average_dest_pts_delta; ///< calculated average output pts delta
68
69 av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
70 double prev_mafd; ///< previous MAFD (scene detect only)
71
72 AVFrame *srce[N_SRCE]; ///< buffered source frames
73 int64_t srce_pts_dest[N_SRCE]; ///< pts for source frames scaled to output timebase
74 int64_t pts; ///< pts of frame we are working on
75
76 int (*blend_frames)(AVFilterContext *ctx, float interpolate,
77 AVFrame *copy_src1, AVFrame *copy_src2);
78 int max;
79 int bitdepth;
80 AVFrame *work;
81} FrameRateContext;
82
83#define OFFSET(x) offsetof(FrameRateContext, x)
84#define V AV_OPT_FLAG_VIDEO_PARAM
85#define F AV_OPT_FLAG_FILTERING_PARAM
86#define FRAMERATE_FLAG_SCD 01
87
88static const AVOption framerate_options[] = {
89 {"fps", "required output frames per second rate", OFFSET(dest_frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str="50"}, 0, INT_MAX, V|F },
90
91 {"interp_start", "point to start linear interpolation", OFFSET(interp_start), AV_OPT_TYPE_INT, {.i64=15}, 0, 255, V|F },
92 {"interp_end", "point to end linear interpolation", OFFSET(interp_end), AV_OPT_TYPE_INT, {.i64=240}, 0, 255, V|F },
93 {"scene", "scene change level", OFFSET(scene_score), AV_OPT_TYPE_DOUBLE, {.dbl=7.0}, 0, INT_MAX, V|F },
94
95 {"flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=1}, 0, INT_MAX, V|F, "flags" },
96 {"scene_change_detect", "enable scene change detection", 0, AV_OPT_TYPE_CONST, {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
97 {"scd", "enable scene change detection", 0, AV_OPT_TYPE_CONST, {.i64=FRAMERATE_FLAG_SCD}, INT_MIN, INT_MAX, V|F, "flags" },
98
99 {NULL}
100};
101
102AVFILTER_DEFINE_CLASS(framerate);
103
104static void next_source(AVFilterContext *ctx)
105{
106 FrameRateContext *s = ctx->priv;
107 int i;
108
109 ff_dlog(ctx, "next_source()\n");
110
111 if (s->srce[s->last] && s->srce[s->last] != s->srce[s->last-1]) {
112 ff_dlog(ctx, "next_source() unlink %d\n", s->last);
113 av_frame_free(&s->srce[s->last]);
114 }
115 for (i = s->last; i > s->frst; i--) {
116 ff_dlog(ctx, "next_source() copy %d to %d\n", i - 1, i);
117 s->srce[i] = s->srce[i - 1];
118 }
119 ff_dlog(ctx, "next_source() make %d null\n", s->frst);
120 s->srce[s->frst] = NULL;
121}
122
123static av_always_inline int64_t sad_8x8_16(const uint16_t *src1, ptrdiff_t stride1,
124 const uint16_t *src2, ptrdiff_t stride2)
125{
126 int sum = 0;
127 int x, y;
128
129 for (y = 0; y < 8; y++) {
130 for (x = 0; x < 8; x++)
131 sum += FFABS(src1[x] - src2[x]);
132 src1 += stride1;
133 src2 += stride2;
134 }
135 return sum;
136}
137
138static double get_scene_score16(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
139{
140 FrameRateContext *s = ctx->priv;
141 double ret = 0;
142
143 ff_dlog(ctx, "get_scene_score16()\n");
144
145 if (crnt &&
146 crnt->height == next->height &&
147 crnt->width == next->width) {
148 int x, y;
149 int64_t sad;
150 double mafd, diff;
151 const uint16_t *p1 = (const uint16_t *)crnt->data[0];
152 const uint16_t *p2 = (const uint16_t *)next->data[0];
153 const int p1_linesize = crnt->linesize[0] / 2;
154 const int p2_linesize = next->linesize[0] / 2;
155
156 ff_dlog(ctx, "get_scene_score16() process\n");
157
158 for (sad = y = 0; y < crnt->height; y += 8) {
159 for (x = 0; x < p1_linesize; x += 8) {
160 sad += sad_8x8_16(p1 + y * p1_linesize + x,
161 p1_linesize,
162 p2 + y * p2_linesize + x,
163 p2_linesize);
164 }
165 }
166 mafd = sad / (crnt->height * crnt->width * 3);
167 diff = fabs(mafd - s->prev_mafd);
168 ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
169 s->prev_mafd = mafd;
170 }
171 ff_dlog(ctx, "get_scene_score16() result is:%f\n", ret);
172 return ret;
173}
174
175static double get_scene_score(AVFilterContext *ctx, AVFrame *crnt, AVFrame *next)
176{
177 FrameRateContext *s = ctx->priv;
178 double ret = 0;
179
180 ff_dlog(ctx, "get_scene_score()\n");
181
182 if (crnt &&
183 crnt->height == next->height &&
184 crnt->width == next->width) {
185 int x, y;
186 int64_t sad;
187 double mafd, diff;
188 uint8_t *p1 = crnt->data[0];
189 uint8_t *p2 = next->data[0];
190 const int p1_linesize = crnt->linesize[0];
191 const int p2_linesize = next->linesize[0];
192
193 ff_dlog(ctx, "get_scene_score() process\n");
194
195 for (sad = y = 0; y < crnt->height; y += 8) {
196 for (x = 0; x < p1_linesize; x += 8) {
197 sad += s->sad(p1 + y * p1_linesize + x,
198 p1_linesize,
199 p2 + y * p2_linesize + x,
200 p2_linesize);
201 }
202 }
203 emms_c();
204 mafd = sad / (crnt->height * crnt->width * 3);
205 diff = fabs(mafd - s->prev_mafd);
206 ret = av_clipf(FFMIN(mafd, diff), 0, 100.0);
207 s->prev_mafd = mafd;
208 }
209 ff_dlog(ctx, "get_scene_score() result is:%f\n", ret);
210 return ret;
211}
212
213static int blend_frames16(AVFilterContext *ctx, float interpolate,
214 AVFrame *copy_src1, AVFrame *copy_src2)
215{
216 FrameRateContext *s = ctx->priv;
217 AVFilterLink *outlink = ctx->outputs[0];
218 double interpolate_scene_score = 0;
219
220 if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
221 interpolate_scene_score = get_scene_score16(ctx, copy_src1, copy_src2);
222 ff_dlog(ctx, "blend_frames16() interpolate scene score:%f\n", interpolate_scene_score);
223 }
224 // decide if the shot-change detection allows us to blend two frames
225 if (interpolate_scene_score < s->scene_score && copy_src2) {
226 uint16_t src2_factor = fabsf(interpolate) * (1 << (s->bitdepth - 8));
227 uint16_t src1_factor = s->max - src2_factor;
228 const int half = s->max / 2;
229 const int uv = (s->max + 1) * half;
230 const int shift = s->bitdepth;
231 int plane, line, pixel;
232
233 // get work-space for output frame
234 s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
235 if (!s->work)
236 return AVERROR(ENOMEM);
237
238 av_frame_copy_props(s->work, s->srce[s->crnt]);
239
240 ff_dlog(ctx, "blend_frames16() INTERPOLATE to create work frame\n");
241 for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
242 int cpy_line_width = s->line_size[plane];
243 const uint16_t *cpy_src1_data = (const uint16_t *)copy_src1->data[plane];
244 int cpy_src1_line_size = copy_src1->linesize[plane] / 2;
245 const uint16_t *cpy_src2_data = (const uint16_t *)copy_src2->data[plane];
246 int cpy_src2_line_size = copy_src2->linesize[plane] / 2;
247 int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
248 uint16_t *cpy_dst_data = (uint16_t *)s->work->data[plane];
249 int cpy_dst_line_size = s->work->linesize[plane] / 2;
250
251 if (plane <1 || plane >2) {
252 // luma or alpha
253 for (line = 0; line < cpy_src_h; line++) {
254 for (pixel = 0; pixel < cpy_line_width; pixel++)
255 cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + half) >> shift;
256 cpy_src1_data += cpy_src1_line_size;
257 cpy_src2_data += cpy_src2_line_size;
258 cpy_dst_data += cpy_dst_line_size;
259 }
260 } else {
261 // chroma
262 for (line = 0; line < cpy_src_h; line++) {
263 for (pixel = 0; pixel < cpy_line_width; pixel++) {
264 cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - half) * src1_factor) + ((cpy_src2_data[pixel] - half) * src2_factor) + uv) >> shift;
265 }
266 cpy_src1_data += cpy_src1_line_size;
267 cpy_src2_data += cpy_src2_line_size;
268 cpy_dst_data += cpy_dst_line_size;
269 }
270 }
271 }
272 return 1;
273 }
274 return 0;
275}
276
277static int blend_frames8(AVFilterContext *ctx, float interpolate,
278 AVFrame *copy_src1, AVFrame *copy_src2)
279{
280 FrameRateContext *s = ctx->priv;
281 AVFilterLink *outlink = ctx->outputs[0];
282 double interpolate_scene_score = 0;
283
284 if ((s->flags & FRAMERATE_FLAG_SCD) && copy_src2) {
285 interpolate_scene_score = get_scene_score(ctx, copy_src1, copy_src2);
286 ff_dlog(ctx, "blend_frames8() interpolate scene score:%f\n", interpolate_scene_score);
287 }
288 // decide if the shot-change detection allows us to blend two frames
289 if (interpolate_scene_score < s->scene_score && copy_src2) {
290 uint16_t src2_factor = fabsf(interpolate);
291 uint16_t src1_factor = 256 - src2_factor;
292 int plane, line, pixel;
293
294 // get work-space for output frame
295 s->work = ff_get_video_buffer(outlink, outlink->w, outlink->h);
296 if (!s->work)
297 return AVERROR(ENOMEM);
298
299 av_frame_copy_props(s->work, s->srce[s->crnt]);
300
301 ff_dlog(ctx, "blend_frames8() INTERPOLATE to create work frame\n");
302 for (plane = 0; plane < 4 && copy_src1->data[plane] && copy_src2->data[plane]; plane++) {
303 int cpy_line_width = s->line_size[plane];
304 uint8_t *cpy_src1_data = copy_src1->data[plane];
305 int cpy_src1_line_size = copy_src1->linesize[plane];
306 uint8_t *cpy_src2_data = copy_src2->data[plane];
307 int cpy_src2_line_size = copy_src2->linesize[plane];
308 int cpy_src_h = (plane > 0 && plane < 3) ? (copy_src1->height >> s->vsub) : (copy_src1->height);
309 uint8_t *cpy_dst_data = s->work->data[plane];
310 int cpy_dst_line_size = s->work->linesize[plane];
311 if (plane <1 || plane >2) {
312 // luma or alpha
313 for (line = 0; line < cpy_src_h; line++) {
314 for (pixel = 0; pixel < cpy_line_width; pixel++) {
315 // integer version of (src1 * src1_factor) + (src2 + src2_factor) + 0.5
316 // 0.5 is for rounding
317 // 128 is the integer representation of 0.5 << 8
318 cpy_dst_data[pixel] = ((cpy_src1_data[pixel] * src1_factor) + (cpy_src2_data[pixel] * src2_factor) + 128) >> 8;
319 }
320 cpy_src1_data += cpy_src1_line_size;
321 cpy_src2_data += cpy_src2_line_size;
322 cpy_dst_data += cpy_dst_line_size;
323 }
324 } else {
325 // chroma
326 for (line = 0; line < cpy_src_h; line++) {
327 for (pixel = 0; pixel < cpy_line_width; pixel++) {
328 // as above
329 // because U and V are based around 128 we have to subtract 128 from the components.
330 // 32896 is the integer representation of 128.5 << 8
331 cpy_dst_data[pixel] = (((cpy_src1_data[pixel] - 128) * src1_factor) + ((cpy_src2_data[pixel] - 128) * src2_factor) + 32896) >> 8;
332 }
333 cpy_src1_data += cpy_src1_line_size;
334 cpy_src2_data += cpy_src2_line_size;
335 cpy_dst_data += cpy_dst_line_size;
336 }
337 }
338 }
339 return 1;
340 }
341 return 0;
342}
343
344static int process_work_frame(AVFilterContext *ctx, int stop)
345{
346 FrameRateContext *s = ctx->priv;
347 int64_t work_next_pts;
348 AVFrame *copy_src1;
349 float interpolate;
350
351 ff_dlog(ctx, "process_work_frame()\n");
352
353 ff_dlog(ctx, "process_work_frame() pending_input_frames %d\n", s->pending_srce_frames);
354
355 if (s->srce[s->prev]) ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
356 if (s->srce[s->crnt]) ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
357 if (s->srce[s->next]) ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
358
359 if (!s->srce[s->crnt]) {
360 // the filter cannot do anything
361 ff_dlog(ctx, "process_work_frame() no current frame cached: move on to next frame, do not output a frame\n");
362 next_source(ctx);
363 return 0;
364 }
365
366 work_next_pts = s->pts + s->average_dest_pts_delta;
367
368 ff_dlog(ctx, "process_work_frame() work crnt pts:%"PRId64"\n", s->pts);
369 ff_dlog(ctx, "process_work_frame() work next pts:%"PRId64"\n", work_next_pts);
370 if (s->srce[s->prev])
371 ff_dlog(ctx, "process_work_frame() srce prev pts:%"PRId64" at dest time base:%u/%u\n",
372 s->srce_pts_dest[s->prev], s->dest_time_base.num, s->dest_time_base.den);
373 if (s->srce[s->crnt])
374 ff_dlog(ctx, "process_work_frame() srce crnt pts:%"PRId64" at dest time base:%u/%u\n",
375 s->srce_pts_dest[s->crnt], s->dest_time_base.num, s->dest_time_base.den);
376 if (s->srce[s->next])
377 ff_dlog(ctx, "process_work_frame() srce next pts:%"PRId64" at dest time base:%u/%u\n",
378 s->srce_pts_dest[s->next], s->dest_time_base.num, s->dest_time_base.den);
379
380 av_assert0(s->srce[s->next]);
381
382 // should filter be skipping input frame (output frame rate is lower than input frame rate)
383 if (!s->flush && s->pts >= s->srce_pts_dest[s->next]) {
384 ff_dlog(ctx, "process_work_frame() work crnt pts >= srce next pts: SKIP FRAME, move on to next frame, do not output a frame\n");
385 next_source(ctx);
386 s->pending_srce_frames--;
387 return 0;
388 }
389
390 // calculate interpolation
391 interpolate = ((s->pts - s->srce_pts_dest[s->crnt]) * 256.0 / s->average_srce_pts_dest_delta);
392 ff_dlog(ctx, "process_work_frame() interpolate:%f/256\n", interpolate);
393 copy_src1 = s->srce[s->crnt];
394 if (interpolate > s->interp_end) {
395 ff_dlog(ctx, "process_work_frame() source is:NEXT\n");
396 copy_src1 = s->srce[s->next];
397 }
398 if (s->srce[s->prev] && interpolate < -s->interp_end) {
399 ff_dlog(ctx, "process_work_frame() source is:PREV\n");
400 copy_src1 = s->srce[s->prev];
401 }
402
403 // decide whether to blend two frames
404 if ((interpolate >= s->interp_start && interpolate <= s->interp_end) || (interpolate <= -s->interp_start && interpolate >= -s->interp_end)) {
405 AVFrame *copy_src2;
406
407 if (interpolate > 0) {
408 ff_dlog(ctx, "process_work_frame() interpolate source is:NEXT\n");
409 copy_src2 = s->srce[s->next];
410 } else {
411 ff_dlog(ctx, "process_work_frame() interpolate source is:PREV\n");
412 copy_src2 = s->srce[s->prev];
413 }
414 if (s->blend_frames(ctx, interpolate, copy_src1, copy_src2))
415 goto copy_done;
416 else
417 ff_dlog(ctx, "process_work_frame() CUT - DON'T INTERPOLATE\n");
418 }
419
420 ff_dlog(ctx, "process_work_frame() COPY to the work frame\n");
421 // copy the frame we decided is our base source
422 s->work = av_frame_clone(copy_src1);
423 if (!s->work)
424 return AVERROR(ENOMEM);
425
426copy_done:
427 s->work->pts = s->pts;
428
429 // should filter be re-using input frame (output frame rate is higher than input frame rate)
430 if (!s->flush && (work_next_pts + s->average_dest_pts_delta) < (s->srce_pts_dest[s->crnt] + s->average_srce_pts_dest_delta)) {
431 ff_dlog(ctx, "process_work_frame() REPEAT FRAME\n");
432 } else {
433 ff_dlog(ctx, "process_work_frame() CONSUME FRAME, move to next frame\n");
434 s->pending_srce_frames--;
435 next_source(ctx);
436 }
437 ff_dlog(ctx, "process_work_frame() output a frame\n");
438 s->dest_frame_num++;
439 if (stop)
440 s->pending_end_frame = 0;
441 s->last_dest_frame_pts = s->work->pts;
442
443 return 1;
444}
445
446static void set_srce_frame_dest_pts(AVFilterContext *ctx)
447{
448 FrameRateContext *s = ctx->priv;
449
450 ff_dlog(ctx, "set_srce_frame_output_pts()\n");
451
452 // scale the input pts from the timebase difference between input and output
453 if (s->srce[s->prev])
454 s->srce_pts_dest[s->prev] = av_rescale_q(s->srce[s->prev]->pts, s->srce_time_base, s->dest_time_base);
455 if (s->srce[s->crnt])
456 s->srce_pts_dest[s->crnt] = av_rescale_q(s->srce[s->crnt]->pts, s->srce_time_base, s->dest_time_base);
457 if (s->srce[s->next])
458 s->srce_pts_dest[s->next] = av_rescale_q(s->srce[s->next]->pts, s->srce_time_base, s->dest_time_base);
459}
460
461static void set_work_frame_pts(AVFilterContext *ctx)
462{
463 FrameRateContext *s = ctx->priv;
464 int64_t pts, average_srce_pts_delta = 0;
465
466 ff_dlog(ctx, "set_work_frame_pts()\n");
467
468 av_assert0(s->srce[s->next]);
469 av_assert0(s->srce[s->crnt]);
470
471 ff_dlog(ctx, "set_work_frame_pts() srce crnt pts:%"PRId64"\n", s->srce[s->crnt]->pts);
472 ff_dlog(ctx, "set_work_frame_pts() srce next pts:%"PRId64"\n", s->srce[s->next]->pts);
473 if (s->srce[s->prev])
474 ff_dlog(ctx, "set_work_frame_pts() srce prev pts:%"PRId64"\n", s->srce[s->prev]->pts);
475
476 average_srce_pts_delta = s->average_srce_pts_dest_delta;
477 ff_dlog(ctx, "set_work_frame_pts() initial average srce pts:%"PRId64"\n", average_srce_pts_delta);
478
479 set_srce_frame_dest_pts(ctx);
480
481 // calculate the PTS delta
482 if ((pts = (s->srce_pts_dest[s->next] - s->srce_pts_dest[s->crnt]))) {
483 average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
484 } else if (s->srce[s->prev] && (pts = (s->srce_pts_dest[s->crnt] - s->srce_pts_dest[s->prev]))) {
485 average_srce_pts_delta = average_srce_pts_delta?((average_srce_pts_delta+pts)>>1):pts;
486 }
487
488 s->average_srce_pts_dest_delta = average_srce_pts_delta;
489 ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64"\n", average_srce_pts_delta);
490 ff_dlog(ctx, "set_work_frame_pts() average srce pts:%"PRId64" at dest time base:%u/%u\n",
491 s->average_srce_pts_dest_delta, s->dest_time_base.num, s->dest_time_base.den);
492
493 if (ctx->inputs[0] && !s->average_dest_pts_delta) {
494 int64_t d = av_q2d(av_inv_q(av_mul_q(s->dest_time_base, s->dest_frame_rate)));
495 s->average_dest_pts_delta = d;
496 ff_dlog(ctx, "set_work_frame_pts() average dest pts delta:%"PRId64"\n", s->average_dest_pts_delta);
497 }
498
499 if (!s->dest_frame_num) {
500 s->pts = s->last_dest_frame_pts = s->srce_pts_dest[s->crnt];
501 } else {
502 s->pts = s->last_dest_frame_pts + s->average_dest_pts_delta;
503 }
504
505 ff_dlog(ctx, "set_work_frame_pts() calculated pts:%"PRId64" at dest time base:%u/%u\n",
506 s->pts, s->dest_time_base.num, s->dest_time_base.den);
507}
508
509static av_cold int init(AVFilterContext *ctx)
510{
511 FrameRateContext *s = ctx->priv;
512
513 s->dest_frame_num = 0;
514
515 s->crnt = (N_SRCE)>>1;
516 s->last = N_SRCE - 1;
517
518 s->next = s->crnt - 1;
519 s->prev = s->crnt + 1;
520
521 return 0;
522}
523
524static av_cold void uninit(AVFilterContext *ctx)
525{
526 FrameRateContext *s = ctx->priv;
527 int i;
528
529 for (i = s->frst; i < s->last; i++) {
530 if (s->srce[i] && (s->srce[i] != s->srce[i + 1]))
531 av_frame_free(&s->srce[i]);
532 }
533 av_frame_free(&s->srce[s->last]);
534}
535
536static int query_formats(AVFilterContext *ctx)
537{
538 static const enum AVPixelFormat pix_fmts[] = {
539 AV_PIX_FMT_YUV410P,
540 AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUVJ411P,
541 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
542 AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
543 AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVJ440P,
544 AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P,
545 AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12,
546 AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12,
547 AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12,
548 AV_PIX_FMT_NONE
549 };
550
551 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
552 if (!fmts_list)
553 return AVERROR(ENOMEM);
554 return ff_set_common_formats(ctx, fmts_list);
555}
556
557static int config_input(AVFilterLink *inlink)
558{
559 AVFilterContext *ctx = inlink->dst;
560 FrameRateContext *s = ctx->priv;
561 const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(inlink->format);
562 int plane;
563
564 for (plane = 0; plane < 4; plane++) {
565 s->line_size[plane] = av_image_get_linesize(inlink->format, inlink->w,
566 plane);
567 }
568
569 s->bitdepth = pix_desc->comp[0].depth;
570 s->vsub = pix_desc->log2_chroma_h;
571
572 s->sad = av_pixelutils_get_sad_fn(3, 3, 2, s); // 8x8 both sources aligned
573 if (!s->sad)
574 return AVERROR(EINVAL);
575
576 s->srce_time_base = inlink->time_base;
577
578 if (s->bitdepth == 8)
579 s->blend_frames = blend_frames8;
580 else
581 s->blend_frames = blend_frames16;
582 s->max = 1 << (s->bitdepth);
583
584 return 0;
585}
586
587static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
588{
589 int ret;
590 AVFilterContext *ctx = inlink->dst;
591 FrameRateContext *s = ctx->priv;
592
593 // we have one new frame
594 s->pending_srce_frames++;
595
596 if (inpicref->interlaced_frame)
597 av_log(ctx, AV_LOG_WARNING, "Interlaced frame found - the output will not be correct.\n");
598
599 // store the pointer to the new frame
600 av_frame_free(&s->srce[s->frst]);
601 s->srce[s->frst] = inpicref;
602
603 if (!s->pending_end_frame && s->srce[s->crnt]) {
604 set_work_frame_pts(ctx);
605 s->pending_end_frame = 1;
606 } else {
607 set_srce_frame_dest_pts(ctx);
608 }
609
610 ret = process_work_frame(ctx, 1);
611 if (ret < 0)
612 return ret;
613 return ret ? ff_filter_frame(ctx->outputs[0], s->work) : 0;
614}
615
616static int config_output(AVFilterLink *outlink)
617{
618 AVFilterContext *ctx = outlink->src;
619 FrameRateContext *s = ctx->priv;
620 int exact;
621
622 ff_dlog(ctx, "config_output()\n");
623
624 ff_dlog(ctx,
625 "config_output() input time base:%u/%u (%f)\n",
626 ctx->inputs[0]->time_base.num,ctx->inputs[0]->time_base.den,
627 av_q2d(ctx->inputs[0]->time_base));
628
629 // make sure timebase is small enough to hold the framerate
630
631 exact = av_reduce(&s->dest_time_base.num, &s->dest_time_base.den,
632 av_gcd((int64_t)s->srce_time_base.num * s->dest_frame_rate.num,
633 (int64_t)s->srce_time_base.den * s->dest_frame_rate.den ),
634 (int64_t)s->srce_time_base.den * s->dest_frame_rate.num, INT_MAX);
635
636 av_log(ctx, AV_LOG_INFO,
637 "time base:%u/%u -> %u/%u exact:%d\n",
638 s->srce_time_base.num, s->srce_time_base.den,
639 s->dest_time_base.num, s->dest_time_base.den, exact);
640 if (!exact) {
641 av_log(ctx, AV_LOG_WARNING, "Timebase conversion is not exact\n");
642 }
643
644 outlink->frame_rate = s->dest_frame_rate;
645 outlink->time_base = s->dest_time_base;
646
647 ff_dlog(ctx,
648 "config_output() output time base:%u/%u (%f) w:%d h:%d\n",
649 outlink->time_base.num, outlink->time_base.den,
650 av_q2d(outlink->time_base),
651 outlink->w, outlink->h);
652
653
654 av_log(ctx, AV_LOG_INFO, "fps -> fps:%u/%u scene score:%f interpolate start:%d end:%d\n",
655 s->dest_frame_rate.num, s->dest_frame_rate.den,
656 s->scene_score, s->interp_start, s->interp_end);
657
658 return 0;
659}
660
661static int request_frame(AVFilterLink *outlink)
662{
663 AVFilterContext *ctx = outlink->src;
664 FrameRateContext *s = ctx->priv;
665 int ret, i;
666
667 ff_dlog(ctx, "request_frame()\n");
668
669 // if there is no "next" frame AND we are not in flush then get one from our input filter
670 if (!s->srce[s->frst] && !s->flush)
671 goto request;
672
673 ff_dlog(ctx, "request_frame() REPEAT or FLUSH\n");
674
675 if (s->pending_srce_frames <= 0) {
676 ff_dlog(ctx, "request_frame() nothing else to do, return:EOF\n");
677 return AVERROR_EOF;
678 }
679
680 // otherwise, make brand-new frame and pass to our output filter
681 ff_dlog(ctx, "request_frame() FLUSH\n");
682
683 // back fill at end of file when source has no more frames
684 for (i = s->last; i > s->frst; i--) {
685 if (!s->srce[i - 1] && s->srce[i]) {
686 ff_dlog(ctx, "request_frame() copy:%d to:%d\n", i, i - 1);
687 s->srce[i - 1] = s->srce[i];
688 }
689 }
690
691 set_work_frame_pts(ctx);
692 ret = process_work_frame(ctx, 0);
693 if (ret < 0)
694 return ret;
695 if (ret)
696 return ff_filter_frame(ctx->outputs[0], s->work);
697
698request:
699 ff_dlog(ctx, "request_frame() call source's request_frame()\n");
700 ret = ff_request_frame(ctx->inputs[0]);
701 if (ret < 0 && (ret != AVERROR_EOF)) {
702 ff_dlog(ctx, "request_frame() source's request_frame() returned error:%d\n", ret);
703 return ret;
704 } else if (ret == AVERROR_EOF) {
705 s->flush = 1;
706 }
707 ff_dlog(ctx, "request_frame() source's request_frame() returned:%d\n", ret);
708 return 0;
709}
710
711static const AVFilterPad framerate_inputs[] = {
712 {
713 .name = "default",
714 .type = AVMEDIA_TYPE_VIDEO,
715 .config_props = config_input,
716 .filter_frame = filter_frame,
717 },
718 { NULL }
719};
720
721static const AVFilterPad framerate_outputs[] = {
722 {
723 .name = "default",
724 .type = AVMEDIA_TYPE_VIDEO,
725 .request_frame = request_frame,
726 .config_props = config_output,
727 },
728 { NULL }
729};
730
731AVFilter ff_vf_framerate = {
732 .name = "framerate",
733 .description = NULL_IF_CONFIG_SMALL("Upsamples or downsamples progressive source between specified frame rates."),
734 .priv_size = sizeof(FrameRateContext),
735 .priv_class = &framerate_class,
736 .init = init,
737 .uninit = uninit,
738 .query_formats = query_formats,
739 .inputs = framerate_inputs,
740 .outputs = framerate_outputs,
741};
742