summaryrefslogtreecommitdiff
path: root/libavcodec/vp9recon.c (plain)
blob: afdb51350c50b4fd41a3dca5afa583c1138b887f
1/*
2 * VP9 compatible video decoder
3 *
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include "libavutil/avassert.h"
25
26#include "avcodec.h"
27#include "internal.h"
28#include "videodsp.h"
29#include "vp9data.h"
30#include "vp9dec.h"
31
32static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
33 uint8_t *dst_edge, ptrdiff_t stride_edge,
34 uint8_t *dst_inner, ptrdiff_t stride_inner,
35 uint8_t *l, int col, int x, int w,
36 int row, int y, enum TxfmMode tx,
37 int p, int ss_h, int ss_v, int bytesperpixel)
38{
39 int have_top = row > 0 || y > 0;
40 int have_left = col > s->tile_col_start || x > 0;
41 int have_right = x < w - 1;
42 int bpp = s->s.h.bpp;
43 static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
44 [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
45 { DC_127_PRED, VERT_PRED } },
46 [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
47 { HOR_PRED, HOR_PRED } },
48 [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
49 { LEFT_DC_PRED, DC_PRED } },
50 [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
51 { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
52 [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
53 { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
54 [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
55 { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
56 [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
57 { HOR_DOWN_PRED, HOR_DOWN_PRED } },
58 [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
59 { DC_127_PRED, VERT_LEFT_PRED } },
60 [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
61 { HOR_UP_PRED, HOR_UP_PRED } },
62 [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
63 { HOR_PRED, TM_VP8_PRED } },
64 };
65 static const struct {
66 uint8_t needs_left:1;
67 uint8_t needs_top:1;
68 uint8_t needs_topleft:1;
69 uint8_t needs_topright:1;
70 uint8_t invert_left:1;
71 } edges[N_INTRA_PRED_MODES] = {
72 [VERT_PRED] = { .needs_top = 1 },
73 [HOR_PRED] = { .needs_left = 1 },
74 [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
75 [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
76 [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
77 .needs_topleft = 1 },
78 [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
79 .needs_topleft = 1 },
80 [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
81 .needs_topleft = 1 },
82 [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
83 [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
84 [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
85 .needs_topleft = 1 },
86 [LEFT_DC_PRED] = { .needs_left = 1 },
87 [TOP_DC_PRED] = { .needs_top = 1 },
88 [DC_128_PRED] = { 0 },
89 [DC_127_PRED] = { 0 },
90 [DC_129_PRED] = { 0 }
91 };
92
93 av_assert2(mode >= 0 && mode < 10);
94 mode = mode_conv[mode][have_left][have_top];
95 if (edges[mode].needs_top) {
96 uint8_t *top, *topleft;
97 int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !ss_h) - x) * 4;
98 int n_px_need_tr = 0;
99
100 if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
101 n_px_need_tr = 4;
102
103 // if top of sb64-row, use s->intra_pred_data[] instead of
104 // dst[-stride] for intra prediction (it contains pre- instead of
105 // post-loopfilter data)
106 if (have_top) {
107 top = !(row & 7) && !y ?
108 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
109 y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
110 if (have_left)
111 topleft = !(row & 7) && !y ?
112 s->intra_pred_data[p] + (col * (8 >> ss_h) + x * 4) * bytesperpixel :
113 y == 0 || x == 0 ? &dst_edge[-stride_edge] :
114 &dst_inner[-stride_inner];
115 }
116
117 if (have_top &&
118 (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
119 (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
120 n_px_need + n_px_need_tr <= n_px_have) {
121 *a = top;
122 } else {
123 if (have_top) {
124 if (n_px_need <= n_px_have) {
125 memcpy(*a, top, n_px_need * bytesperpixel);
126 } else {
127#define memset_bpp(c, i1, v, i2, num) do { \
128 if (bytesperpixel == 1) { \
129 memset(&(c)[(i1)], (v)[(i2)], (num)); \
130 } else { \
131 int n, val = AV_RN16A(&(v)[(i2) * 2]); \
132 for (n = 0; n < (num); n++) { \
133 AV_WN16A(&(c)[((i1) + n) * 2], val); \
134 } \
135 } \
136} while (0)
137 memcpy(*a, top, n_px_have * bytesperpixel);
138 memset_bpp(*a, n_px_have, (*a), n_px_have - 1, n_px_need - n_px_have);
139 }
140 } else {
141#define memset_val(c, val, num) do { \
142 if (bytesperpixel == 1) { \
143 memset((c), (val), (num)); \
144 } else { \
145 int n; \
146 for (n = 0; n < (num); n++) { \
147 AV_WN16A(&(c)[n * 2], (val)); \
148 } \
149 } \
150} while (0)
151 memset_val(*a, (128 << (bpp - 8)) - 1, n_px_need);
152 }
153 if (edges[mode].needs_topleft) {
154 if (have_left && have_top) {
155#define assign_bpp(c, i1, v, i2) do { \
156 if (bytesperpixel == 1) { \
157 (c)[(i1)] = (v)[(i2)]; \
158 } else { \
159 AV_COPY16(&(c)[(i1) * 2], &(v)[(i2) * 2]); \
160 } \
161} while (0)
162 assign_bpp(*a, -1, topleft, -1);
163 } else {
164#define assign_val(c, i, v) do { \
165 if (bytesperpixel == 1) { \
166 (c)[(i)] = (v); \
167 } else { \
168 AV_WN16A(&(c)[(i) * 2], (v)); \
169 } \
170} while (0)
171 assign_val((*a), -1, (128 << (bpp - 8)) + (have_top ? +1 : -1));
172 }
173 }
174 if (tx == TX_4X4 && edges[mode].needs_topright) {
175 if (have_top && have_right &&
176 n_px_need + n_px_need_tr <= n_px_have) {
177 memcpy(&(*a)[4 * bytesperpixel], &top[4 * bytesperpixel], 4 * bytesperpixel);
178 } else {
179 memset_bpp(*a, 4, *a, 3, 4);
180 }
181 }
182 }
183 }
184 if (edges[mode].needs_left) {
185 if (have_left) {
186 int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !ss_v) - y) * 4;
187 uint8_t *dst = x == 0 ? dst_edge : dst_inner;
188 ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
189
190 if (edges[mode].invert_left) {
191 if (n_px_need <= n_px_have) {
192 for (i = 0; i < n_px_need; i++)
193 assign_bpp(l, i, &dst[i * stride], -1);
194 } else {
195 for (i = 0; i < n_px_have; i++)
196 assign_bpp(l, i, &dst[i * stride], -1);
197 memset_bpp(l, n_px_have, l, n_px_have - 1, n_px_need - n_px_have);
198 }
199 } else {
200 if (n_px_need <= n_px_have) {
201 for (i = 0; i < n_px_need; i++)
202 assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
203 } else {
204 for (i = 0; i < n_px_have; i++)
205 assign_bpp(l, n_px_need - 1 - i, &dst[i * stride], -1);
206 memset_bpp(l, 0, l, n_px_need - n_px_have, n_px_need - n_px_have);
207 }
208 }
209 } else {
210 memset_val(l, (128 << (bpp - 8)) + 1, 4 << tx);
211 }
212 }
213
214 return mode;
215}
216
217static av_always_inline void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off,
218 ptrdiff_t uv_off, int bytesperpixel)
219{
220 VP9Context *s = avctx->priv_data;
221 VP9Block *b = s->b;
222 int row = s->row, col = s->col;
223 int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
224 int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
225 int end_x = FFMIN(2 * (s->cols - col), w4);
226 int end_y = FFMIN(2 * (s->rows - row), h4);
227 int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
228 int uvstep1d = 1 << b->uvtx, p;
229 uint8_t *dst = s->dst[0], *dst_r = s->s.frames[CUR_FRAME].tf.f->data[0] + y_off;
230 LOCAL_ALIGNED_32(uint8_t, a_buf, [96]);
231 LOCAL_ALIGNED_32(uint8_t, l, [64]);
232
233 for (n = 0, y = 0; y < end_y; y += step1d) {
234 uint8_t *ptr = dst, *ptr_r = dst_r;
235 for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d * bytesperpixel,
236 ptr_r += 4 * step1d * bytesperpixel, n += step) {
237 int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
238 y * 2 + x : 0];
239 uint8_t *a = &a_buf[32];
240 enum TxfmType txtp = ff_vp9_intra_txfm_type[mode];
241 int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
242
243 mode = check_intra_mode(s, mode, &a, ptr_r,
244 s->s.frames[CUR_FRAME].tf.f->linesize[0],
245 ptr, s->y_stride, l,
246 col, x, w4, row, y, b->tx, 0, 0, 0, bytesperpixel);
247 s->dsp.intra_pred[b->tx][mode](ptr, s->y_stride, l, a);
248 if (eob)
249 s->dsp.itxfm_add[tx][txtp](ptr, s->y_stride,
250 s->block + 16 * n * bytesperpixel, eob);
251 }
252 dst_r += 4 * step1d * s->s.frames[CUR_FRAME].tf.f->linesize[0];
253 dst += 4 * step1d * s->y_stride;
254 }
255
256 // U/V
257 w4 >>= s->ss_h;
258 end_x >>= s->ss_h;
259 end_y >>= s->ss_v;
260 step = 1 << (b->uvtx * 2);
261 for (p = 0; p < 2; p++) {
262 dst = s->dst[1 + p];
263 dst_r = s->s.frames[CUR_FRAME].tf.f->data[1 + p] + uv_off;
264 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
265 uint8_t *ptr = dst, *ptr_r = dst_r;
266 for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d * bytesperpixel,
267 ptr_r += 4 * uvstep1d * bytesperpixel, n += step) {
268 int mode = b->uvmode;
269 uint8_t *a = &a_buf[32];
270 int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
271
272 mode = check_intra_mode(s, mode, &a, ptr_r,
273 s->s.frames[CUR_FRAME].tf.f->linesize[1],
274 ptr, s->uv_stride, l, col, x, w4, row, y,
275 b->uvtx, p + 1, s->ss_h, s->ss_v, bytesperpixel);
276 s->dsp.intra_pred[b->uvtx][mode](ptr, s->uv_stride, l, a);
277 if (eob)
278 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
279 s->uvblock[p] + 16 * n * bytesperpixel, eob);
280 }
281 dst_r += 4 * uvstep1d * s->s.frames[CUR_FRAME].tf.f->linesize[1];
282 dst += 4 * uvstep1d * s->uv_stride;
283 }
284 }
285}
286
287void ff_vp9_intra_recon_8bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
288{
289 intra_recon(avctx, y_off, uv_off, 1);
290}
291
292void ff_vp9_intra_recon_16bpp(AVCodecContext *avctx, ptrdiff_t y_off, ptrdiff_t uv_off)
293{
294 intra_recon(avctx, y_off, uv_off, 2);
295}
296
297static av_always_inline void mc_luma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
298 uint8_t *dst, ptrdiff_t dst_stride,
299 const uint8_t *ref, ptrdiff_t ref_stride,
300 ThreadFrame *ref_frame,
301 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
302 int bw, int bh, int w, int h, int bytesperpixel)
303{
304 int mx = mv->x, my = mv->y, th;
305
306 y += my >> 3;
307 x += mx >> 3;
308 ref += y * ref_stride + x * bytesperpixel;
309 mx &= 7;
310 my &= 7;
311 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
312 // we use +7 because the last 7 pixels of each sbrow can be changed in
313 // the longest loopfilter of the next sbrow
314 th = (y + bh + 4 * !!my + 7) >> 6;
315 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
316 // The arm/aarch64 _hv filters read one more row than what actually is
317 // needed, so switch to emulated edge one pixel sooner vertically
318 // (!!my * 5) than horizontally (!!mx * 4).
319 if (x < !!mx * 3 || y < !!my * 3 ||
320 x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
321 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
322 ref - !!my * 3 * ref_stride - !!mx * 3 * bytesperpixel,
323 160, ref_stride,
324 bw + !!mx * 7, bh + !!my * 7,
325 x - !!mx * 3, y - !!my * 3, w, h);
326 ref = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
327 ref_stride = 160;
328 }
329 mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
330}
331
332static av_always_inline void mc_chroma_unscaled(VP9Context *s, vp9_mc_func (*mc)[2],
333 uint8_t *dst_u, uint8_t *dst_v,
334 ptrdiff_t dst_stride,
335 const uint8_t *ref_u, ptrdiff_t src_stride_u,
336 const uint8_t *ref_v, ptrdiff_t src_stride_v,
337 ThreadFrame *ref_frame,
338 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
339 int bw, int bh, int w, int h, int bytesperpixel)
340{
341 int mx = mv->x * (1 << !s->ss_h), my = mv->y * (1 << !s->ss_v), th;
342
343 y += my >> 4;
344 x += mx >> 4;
345 ref_u += y * src_stride_u + x * bytesperpixel;
346 ref_v += y * src_stride_v + x * bytesperpixel;
347 mx &= 15;
348 my &= 15;
349 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
350 // we use +7 because the last 7 pixels of each sbrow can be changed in
351 // the longest loopfilter of the next sbrow
352 th = (y + bh + 4 * !!my + 7) >> (6 - s->ss_v);
353 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
354 // The arm/aarch64 _hv filters read one more row than what actually is
355 // needed, so switch to emulated edge one pixel sooner vertically
356 // (!!my * 5) than horizontally (!!mx * 4).
357 if (x < !!mx * 3 || y < !!my * 3 ||
358 x + !!mx * 4 > w - bw || y + !!my * 5 > h - bh) {
359 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
360 ref_u - !!my * 3 * src_stride_u - !!mx * 3 * bytesperpixel,
361 160, src_stride_u,
362 bw + !!mx * 7, bh + !!my * 7,
363 x - !!mx * 3, y - !!my * 3, w, h);
364 ref_u = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
365 mc[!!mx][!!my](dst_u, dst_stride, ref_u, 160, bh, mx, my);
366
367 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
368 ref_v - !!my * 3 * src_stride_v - !!mx * 3 * bytesperpixel,
369 160, src_stride_v,
370 bw + !!mx * 7, bh + !!my * 7,
371 x - !!mx * 3, y - !!my * 3, w, h);
372 ref_v = s->edge_emu_buffer + !!my * 3 * 160 + !!mx * 3 * bytesperpixel;
373 mc[!!mx][!!my](dst_v, dst_stride, ref_v, 160, bh, mx, my);
374 } else {
375 mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
376 mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
377 }
378}
379
380#define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
381 px, py, pw, ph, bw, bh, w, h, i) \
382 mc_luma_unscaled(s, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
383 mv, bw, bh, w, h, bytesperpixel)
384#define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
385 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
386 mc_chroma_unscaled(s, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
387 row, col, mv, bw, bh, w, h, bytesperpixel)
388#define SCALED 0
389#define FN(x) x##_8bpp
390#define BYTES_PER_PIXEL 1
391#include "vp9_mc_template.c"
392#undef FN
393#undef BYTES_PER_PIXEL
394#define FN(x) x##_16bpp
395#define BYTES_PER_PIXEL 2
396#include "vp9_mc_template.c"
397#undef mc_luma_dir
398#undef mc_chroma_dir
399#undef FN
400#undef BYTES_PER_PIXEL
401#undef SCALED
402
403static av_always_inline void mc_luma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
404 vp9_mc_func (*mc)[2],
405 uint8_t *dst, ptrdiff_t dst_stride,
406 const uint8_t *ref, ptrdiff_t ref_stride,
407 ThreadFrame *ref_frame,
408 ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
409 int px, int py, int pw, int ph,
410 int bw, int bh, int w, int h, int bytesperpixel,
411 const uint16_t *scale, const uint8_t *step)
412{
413 if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
414 s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
415 mc_luma_unscaled(s, mc, dst, dst_stride, ref, ref_stride, ref_frame,
416 y, x, in_mv, bw, bh, w, h, bytesperpixel);
417 } else {
418#define scale_mv(n, dim) (((int64_t)(n) * scale[dim]) >> 14)
419 int mx, my;
420 int refbw_m1, refbh_m1;
421 int th;
422 VP56mv mv;
423
424 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
425 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
426 // BUG libvpx seems to scale the two components separately. This introduces
427 // rounding errors but we have to reproduce them to be exactly compatible
428 // with the output from libvpx...
429 mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
430 my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
431
432 y = my >> 4;
433 x = mx >> 4;
434 ref += y * ref_stride + x * bytesperpixel;
435 mx &= 15;
436 my &= 15;
437 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
438 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
439 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
440 // we use +7 because the last 7 pixels of each sbrow can be changed in
441 // the longest loopfilter of the next sbrow
442 th = (y + refbh_m1 + 4 + 7) >> 6;
443 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
444 // The arm/aarch64 _hv filters read one more row than what actually is
445 // needed, so switch to emulated edge one pixel sooner vertically
446 // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
447 if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
448 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
449 ref - 3 * ref_stride - 3 * bytesperpixel,
450 288, ref_stride,
451 refbw_m1 + 8, refbh_m1 + 8,
452 x - 3, y - 3, w, h);
453 ref = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
454 ref_stride = 288;
455 }
456 smc(dst, dst_stride, ref, ref_stride, bh, mx, my, step[0], step[1]);
457 }
458}
459
460static av_always_inline void mc_chroma_scaled(VP9Context *s, vp9_scaled_mc_func smc,
461 vp9_mc_func (*mc)[2],
462 uint8_t *dst_u, uint8_t *dst_v,
463 ptrdiff_t dst_stride,
464 const uint8_t *ref_u, ptrdiff_t src_stride_u,
465 const uint8_t *ref_v, ptrdiff_t src_stride_v,
466 ThreadFrame *ref_frame,
467 ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
468 int px, int py, int pw, int ph,
469 int bw, int bh, int w, int h, int bytesperpixel,
470 const uint16_t *scale, const uint8_t *step)
471{
472 if (s->s.frames[CUR_FRAME].tf.f->width == ref_frame->f->width &&
473 s->s.frames[CUR_FRAME].tf.f->height == ref_frame->f->height) {
474 mc_chroma_unscaled(s, mc, dst_u, dst_v, dst_stride, ref_u, src_stride_u,
475 ref_v, src_stride_v, ref_frame,
476 y, x, in_mv, bw, bh, w, h, bytesperpixel);
477 } else {
478 int mx, my;
479 int refbw_m1, refbh_m1;
480 int th;
481 VP56mv mv;
482
483 if (s->ss_h) {
484 // BUG https://code.google.com/p/webm/issues/detail?id=820
485 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 16, (s->cols * 4 - x + px + 3) * 16);
486 mx = scale_mv(mv.x, 0) + (scale_mv(x * 16, 0) & ~15) + (scale_mv(x * 32, 0) & 15);
487 } else {
488 mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
489 mx = scale_mv(mv.x * 2, 0) + scale_mv(x * 16, 0);
490 }
491 if (s->ss_v) {
492 // BUG https://code.google.com/p/webm/issues/detail?id=820
493 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 16, (s->rows * 4 - y + py + 3) * 16);
494 my = scale_mv(mv.y, 1) + (scale_mv(y * 16, 1) & ~15) + (scale_mv(y * 32, 1) & 15);
495 } else {
496 mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
497 my = scale_mv(mv.y * 2, 1) + scale_mv(y * 16, 1);
498 }
499#undef scale_mv
500 y = my >> 4;
501 x = mx >> 4;
502 ref_u += y * src_stride_u + x * bytesperpixel;
503 ref_v += y * src_stride_v + x * bytesperpixel;
504 mx &= 15;
505 my &= 15;
506 refbw_m1 = ((bw - 1) * step[0] + mx) >> 4;
507 refbh_m1 = ((bh - 1) * step[1] + my) >> 4;
508 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
509 // we use +7 because the last 7 pixels of each sbrow can be changed in
510 // the longest loopfilter of the next sbrow
511 th = (y + refbh_m1 + 4 + 7) >> (6 - s->ss_v);
512 ff_thread_await_progress(ref_frame, FFMAX(th, 0), 0);
513 // The arm/aarch64 _hv filters read one more row than what actually is
514 // needed, so switch to emulated edge one pixel sooner vertically
515 // (y + 5 >= h - refbh_m1) than horizontally (x + 4 >= w - refbw_m1).
516 if (x < 3 || y < 3 || x + 4 >= w - refbw_m1 || y + 5 >= h - refbh_m1) {
517 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
518 ref_u - 3 * src_stride_u - 3 * bytesperpixel,
519 288, src_stride_u,
520 refbw_m1 + 8, refbh_m1 + 8,
521 x - 3, y - 3, w, h);
522 ref_u = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
523 smc(dst_u, dst_stride, ref_u, 288, bh, mx, my, step[0], step[1]);
524
525 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
526 ref_v - 3 * src_stride_v - 3 * bytesperpixel,
527 288, src_stride_v,
528 refbw_m1 + 8, refbh_m1 + 8,
529 x - 3, y - 3, w, h);
530 ref_v = s->edge_emu_buffer + 3 * 288 + 3 * bytesperpixel;
531 smc(dst_v, dst_stride, ref_v, 288, bh, mx, my, step[0], step[1]);
532 } else {
533 smc(dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my, step[0], step[1]);
534 smc(dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my, step[0], step[1]);
535 }
536 }
537}
538
539#define mc_luma_dir(s, mc, dst, dst_ls, src, src_ls, tref, row, col, mv, \
540 px, py, pw, ph, bw, bh, w, h, i) \
541 mc_luma_scaled(s, s->dsp.s##mc, s->dsp.mc, dst, dst_ls, src, src_ls, tref, row, col, \
542 mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
543 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
544#define mc_chroma_dir(s, mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
545 row, col, mv, px, py, pw, ph, bw, bh, w, h, i) \
546 mc_chroma_scaled(s, s->dsp.s##mc, s->dsp.mc, dstu, dstv, dst_ls, srcu, srcu_ls, srcv, srcv_ls, tref, \
547 row, col, mv, px, py, pw, ph, bw, bh, w, h, bytesperpixel, \
548 s->mvscale[b->ref[i]], s->mvstep[b->ref[i]])
549#define SCALED 1
550#define FN(x) x##_scaled_8bpp
551#define BYTES_PER_PIXEL 1
552#include "vp9_mc_template.c"
553#undef FN
554#undef BYTES_PER_PIXEL
555#define FN(x) x##_scaled_16bpp
556#define BYTES_PER_PIXEL 2
557#include "vp9_mc_template.c"
558#undef mc_luma_dir
559#undef mc_chroma_dir
560#undef FN
561#undef BYTES_PER_PIXEL
562#undef SCALED
563
564static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixel)
565{
566 VP9Context *s = avctx->priv_data;
567 VP9Block *b = s->b;
568 int row = s->row, col = s->col;
569
570 if (s->mvscale[b->ref[0]][0] || (b->comp && s->mvscale[b->ref[1]][0])) {
571 if (bytesperpixel == 1) {
572 inter_pred_scaled_8bpp(avctx);
573 } else {
574 inter_pred_scaled_16bpp(avctx);
575 }
576 } else {
577 if (bytesperpixel == 1) {
578 inter_pred_8bpp(avctx);
579 } else {
580 inter_pred_16bpp(avctx);
581 }
582 }
583
584 if (!b->skip) {
585 /* mostly copied intra_recon() */
586
587 int w4 = ff_vp9_bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
588 int h4 = ff_vp9_bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
589 int end_x = FFMIN(2 * (s->cols - col), w4);
590 int end_y = FFMIN(2 * (s->rows - row), h4);
591 int tx = 4 * s->s.h.lossless + b->tx, uvtx = b->uvtx + 4 * s->s.h.lossless;
592 int uvstep1d = 1 << b->uvtx, p;
593 uint8_t *dst = s->dst[0];
594
595 // y itxfm add
596 for (n = 0, y = 0; y < end_y; y += step1d) {
597 uint8_t *ptr = dst;
598 for (x = 0; x < end_x; x += step1d,
599 ptr += 4 * step1d * bytesperpixel, n += step) {
600 int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
601
602 if (eob)
603 s->dsp.itxfm_add[tx][DCT_DCT](ptr, s->y_stride,
604 s->block + 16 * n * bytesperpixel, eob);
605 }
606 dst += 4 * s->y_stride * step1d;
607 }
608
609 // uv itxfm add
610 end_x >>= s->ss_h;
611 end_y >>= s->ss_v;
612 step = 1 << (b->uvtx * 2);
613 for (p = 0; p < 2; p++) {
614 dst = s->dst[p + 1];
615 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
616 uint8_t *ptr = dst;
617 for (x = 0; x < end_x; x += uvstep1d,
618 ptr += 4 * uvstep1d * bytesperpixel, n += step) {
619 int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
620
621 if (eob)
622 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, s->uv_stride,
623 s->uvblock[p] + 16 * n * bytesperpixel, eob);
624 }
625 dst += 4 * uvstep1d * s->uv_stride;
626 }
627 }
628 }
629}
630
631void ff_vp9_inter_recon_8bpp(AVCodecContext *avctx)
632{
633 inter_recon(avctx, 1);
634}
635
636void ff_vp9_inter_recon_16bpp(AVCodecContext *avctx)
637{
638 inter_recon(avctx, 2);
639}
640