summaryrefslogtreecommitdiff
path: root/libavcodec/vc1_mc.c (plain)
blob: 75c74cad8df706ba1fe27b90e345fd64dbc35698
1/*
2 * VC-1 and WMV3 decoder
3 * Copyright (c) 2011 Mashiat Sarker Shakkhar
4 * Copyright (c) 2006-2007 Konstantin Shishkov
5 * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer
6 *
7 * This file is part of FFmpeg.
8 *
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24/**
25 * @file
26 * VC-1 and WMV3 block decoding routines
27 */
28
29#include "avcodec.h"
30#include "h264chroma.h"
31#include "mathops.h"
32#include "mpegvideo.h"
33#include "vc1.h"
34
35static av_always_inline void vc1_scale_luma(uint8_t *srcY,
36 int k, int linesize)
37{
38 int i, j;
39 for (j = 0; j < k; j++) {
40 for (i = 0; i < k; i++)
41 srcY[i] = ((srcY[i] - 128) >> 1) + 128;
42 srcY += linesize;
43 }
44}
45
46static av_always_inline void vc1_scale_chroma(uint8_t *srcU, uint8_t *srcV,
47 int k, int uvlinesize)
48{
49 int i, j;
50 for (j = 0; j < k; j++) {
51 for (i = 0; i < k; i++) {
52 srcU[i] = ((srcU[i] - 128) >> 1) + 128;
53 srcV[i] = ((srcV[i] - 128) >> 1) + 128;
54 }
55 srcU += uvlinesize;
56 srcV += uvlinesize;
57 }
58}
59
60static av_always_inline void vc1_lut_scale_luma(uint8_t *srcY,
61 uint8_t *lut1, uint8_t *lut2,
62 int k, int linesize)
63{
64 int i, j;
65
66 for (j = 0; j < k; j += 2) {
67 for (i = 0; i < k; i++)
68 srcY[i] = lut1[srcY[i]];
69 srcY += linesize;
70
71 if (j + 1 == k)
72 break;
73
74 for (i = 0; i < k; i++)
75 srcY[i] = lut2[srcY[i]];
76 srcY += linesize;
77 }
78}
79
80static av_always_inline void vc1_lut_scale_chroma(uint8_t *srcU, uint8_t *srcV,
81 uint8_t *lut1, uint8_t *lut2,
82 int k, int uvlinesize)
83{
84 int i, j;
85
86 for (j = 0; j < k; j += 2) {
87 for (i = 0; i < k; i++) {
88 srcU[i] = lut1[srcU[i]];
89 srcV[i] = lut1[srcV[i]];
90 }
91 srcU += uvlinesize;
92 srcV += uvlinesize;
93
94 if (j + 1 == k)
95 break;
96
97 for (i = 0; i < k; i++) {
98 srcU[i] = lut2[srcU[i]];
99 srcV[i] = lut2[srcV[i]];
100 }
101 srcU += uvlinesize;
102 srcV += uvlinesize;
103 }
104}
105
106static const uint8_t popcount4[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4 };
107
108static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
109{
110 MpegEncContext *s = &v->s;
111 int idx = v->mv_f[dir][s->block_index[0] + v->blocks_off] |
112 (v->mv_f[dir][s->block_index[1] + v->blocks_off] << 1) |
113 (v->mv_f[dir][s->block_index[2] + v->blocks_off] << 2) |
114 (v->mv_f[dir][s->block_index[3] + v->blocks_off] << 3);
115 static const uint8_t index2[16] = { 0, 0, 0, 0x23, 0, 0x13, 0x03, 0, 0, 0x12, 0x02, 0, 0x01, 0, 0, 0 };
116 int opp_count = popcount4[idx];
117
118 switch (opp_count) {
119 case 0:
120 case 4:
121 *tx = median4(s->mv[dir][0][0], s->mv[dir][1][0], s->mv[dir][2][0], s->mv[dir][3][0]);
122 *ty = median4(s->mv[dir][0][1], s->mv[dir][1][1], s->mv[dir][2][1], s->mv[dir][3][1]);
123 break;
124 case 1:
125 *tx = mid_pred(s->mv[dir][idx < 2][0], s->mv[dir][1 + (idx < 4)][0], s->mv[dir][2 + (idx < 8)][0]);
126 *ty = mid_pred(s->mv[dir][idx < 2][1], s->mv[dir][1 + (idx < 4)][1], s->mv[dir][2 + (idx < 8)][1]);
127 break;
128 case 3:
129 *tx = mid_pred(s->mv[dir][idx > 0xd][0], s->mv[dir][1 + (idx > 0xb)][0], s->mv[dir][2 + (idx > 0x7)][0]);
130 *ty = mid_pred(s->mv[dir][idx > 0xd][1], s->mv[dir][1 + (idx > 0xb)][1], s->mv[dir][2 + (idx > 0x7)][1]);
131 break;
132 case 2:
133 *tx = (s->mv[dir][index2[idx] >> 4][0] + s->mv[dir][index2[idx] & 0xf][0]) / 2;
134 *ty = (s->mv[dir][index2[idx] >> 4][1] + s->mv[dir][index2[idx] & 0xf][1]) / 2;
135 break;
136 }
137 return opp_count;
138}
139
140static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
141{
142 MpegEncContext *s = &v->s;
143 int idx = !v->mb_type[0][s->block_index[0]] |
144 (!v->mb_type[0][s->block_index[1]] << 1) |
145 (!v->mb_type[0][s->block_index[2]] << 2) |
146 (!v->mb_type[0][s->block_index[3]] << 3);
147 static const uint8_t index2[16] = { 0, 0, 0, 0x01, 0, 0x02, 0x12, 0, 0, 0x03, 0x13, 0, 0x23, 0, 0, 0 };
148 int valid_count = popcount4[idx];
149
150 switch (valid_count) {
151 case 4:
152 *tx = median4(s->mv[dir][0][0], s->mv[dir][1][0], s->mv[dir][2][0], s->mv[dir][3][0]);
153 *ty = median4(s->mv[dir][0][1], s->mv[dir][1][1], s->mv[dir][2][1], s->mv[dir][3][1]);
154 break;
155 case 3:
156 *tx = mid_pred(s->mv[dir][idx > 0xd][0], s->mv[dir][1 + (idx > 0xb)][0], s->mv[dir][2 + (idx > 0x7)][0]);
157 *ty = mid_pred(s->mv[dir][idx > 0xd][1], s->mv[dir][1 + (idx > 0xb)][1], s->mv[dir][2 + (idx > 0x7)][1]);
158 break;
159 case 2:
160 *tx = (s->mv[dir][index2[idx] >> 4][0] + s->mv[dir][index2[idx] & 0xf][0]) / 2;
161 *ty = (s->mv[dir][index2[idx] >> 4][1] + s->mv[dir][index2[idx] & 0xf][1]) / 2;
162 break;
163 default:
164 return 0;
165 }
166 return valid_count;
167}
168
169/** Do motion compensation over 1 macroblock
170 * Mostly adapted hpel_motion and qpel_motion from mpegvideo.c
171 */
172void ff_vc1_mc_1mv(VC1Context *v, int dir)
173{
174 MpegEncContext *s = &v->s;
175 H264ChromaContext *h264chroma = &v->h264chroma;
176 uint8_t *srcY, *srcU, *srcV;
177 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
178 int v_edge_pos = s->v_edge_pos >> v->field_mode;
179 int i;
180 uint8_t (*luty)[256], (*lutuv)[256];
181 int use_ic;
182
183 if ((!v->field_mode ||
184 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
185 !v->s.last_picture.f->data[0])
186 return;
187
188 mx = s->mv[dir][0][0];
189 my = s->mv[dir][0][1];
190
191 // store motion vectors for further use in B-frames
192 if (s->pict_type == AV_PICTURE_TYPE_P) {
193 for (i = 0; i < 4; i++) {
194 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][0] = mx;
195 s->current_picture.motion_val[1][s->block_index[i] + v->blocks_off][1] = my;
196 }
197 }
198
199 uvmx = (mx + ((mx & 3) == 3)) >> 1;
200 uvmy = (my + ((my & 3) == 3)) >> 1;
201 v->luma_mv[s->mb_x][0] = uvmx;
202 v->luma_mv[s->mb_x][1] = uvmy;
203
204 if (v->field_mode &&
205 v->cur_field_type != v->ref_field_type[dir]) {
206 my = my - 2 + 4 * v->cur_field_type;
207 uvmy = uvmy - 2 + 4 * v->cur_field_type;
208 }
209
210 // fastuvmc shall be ignored for interlaced frame picture
211 if (v->fastuvmc && (v->fcm != ILACE_FRAME)) {
212 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
213 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
214 }
215 if (!dir) {
216 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
217 srcY = s->current_picture.f->data[0];
218 srcU = s->current_picture.f->data[1];
219 srcV = s->current_picture.f->data[2];
220 luty = v->curr_luty;
221 lutuv = v->curr_lutuv;
222 use_ic = *v->curr_use_ic;
223 } else {
224 srcY = s->last_picture.f->data[0];
225 srcU = s->last_picture.f->data[1];
226 srcV = s->last_picture.f->data[2];
227 luty = v->last_luty;
228 lutuv = v->last_lutuv;
229 use_ic = v->last_use_ic;
230 }
231 } else {
232 srcY = s->next_picture.f->data[0];
233 srcU = s->next_picture.f->data[1];
234 srcV = s->next_picture.f->data[2];
235 luty = v->next_luty;
236 lutuv = v->next_lutuv;
237 use_ic = v->next_use_ic;
238 }
239
240 if (!srcY || !srcU) {
241 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
242 return;
243 }
244
245 src_x = s->mb_x * 16 + (mx >> 2);
246 src_y = s->mb_y * 16 + (my >> 2);
247 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
248 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
249
250 if (v->profile != PROFILE_ADVANCED) {
251 src_x = av_clip( src_x, -16, s->mb_width * 16);
252 src_y = av_clip( src_y, -16, s->mb_height * 16);
253 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
254 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
255 } else {
256 src_x = av_clip( src_x, -17, s->avctx->coded_width);
257 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
258 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
259 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
260 }
261
262 srcY += src_y * s->linesize + src_x;
263 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
264 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
265
266 if (v->field_mode && v->ref_field_type[dir]) {
267 srcY += s->current_picture_ptr->f->linesize[0];
268 srcU += s->current_picture_ptr->f->linesize[1];
269 srcV += s->current_picture_ptr->f->linesize[2];
270 }
271
272 /* for grayscale we should not try to read from unknown area */
273 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
274 srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
275 srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
276 }
277
278 if (v->rangeredfrm || use_ic
279 || s->h_edge_pos < 22 || v_edge_pos < 22
280 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx&3) - 16 - s->mspel * 3
281 || (unsigned)(src_y - 1) > v_edge_pos - (my&3) - 16 - 3) {
282 uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize;
283 uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
284 const int k = 17 + s->mspel * 2;
285
286 srcY -= s->mspel * (1 + s->linesize);
287 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
288 s->linesize, s->linesize,
289 k, k,
290 src_x - s->mspel, src_y - s->mspel,
291 s->h_edge_pos, v_edge_pos);
292 srcY = s->sc.edge_emu_buffer;
293 s->vdsp.emulated_edge_mc(ubuf, srcU,
294 s->uvlinesize, s->uvlinesize,
295 8 + 1, 8 + 1,
296 uvsrc_x, uvsrc_y,
297 s->h_edge_pos >> 1, v_edge_pos >> 1);
298 s->vdsp.emulated_edge_mc(vbuf, srcV,
299 s->uvlinesize, s->uvlinesize,
300 8 + 1, 8 + 1,
301 uvsrc_x, uvsrc_y,
302 s->h_edge_pos >> 1, v_edge_pos >> 1);
303 srcU = ubuf;
304 srcV = vbuf;
305 /* if we deal with range reduction we need to scale source blocks */
306 if (v->rangeredfrm) {
307 vc1_scale_luma(srcY, k, s->linesize);
308 vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
309 }
310 /* if we deal with intensity compensation we need to scale source blocks */
311 if (use_ic) {
312 vc1_lut_scale_luma(srcY,
313 luty[v->field_mode ? v->ref_field_type[dir] : ((0 + src_y - s->mspel) & 1)],
314 luty[v->field_mode ? v->ref_field_type[dir] : ((1 + src_y - s->mspel) & 1)],
315 k, s->linesize);
316 vc1_lut_scale_chroma(srcU, srcV,
317 lutuv[v->field_mode ? v->ref_field_type[dir] : ((0 + uvsrc_y) & 1)],
318 lutuv[v->field_mode ? v->ref_field_type[dir] : ((1 + uvsrc_y) & 1)],
319 9, s->uvlinesize);
320 }
321 srcY += s->mspel * (1 + s->linesize);
322 }
323
324 if (s->mspel) {
325 dxy = ((my & 3) << 2) | (mx & 3);
326 v->vc1dsp.put_vc1_mspel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, v->rnd);
327 } else { // hpel mc - always used for luma
328 dxy = (my & 2) | ((mx & 2) >> 1);
329 if (!v->rnd)
330 s->hdsp.put_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
331 else
332 s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
333 }
334
335 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
336 return;
337 /* Chroma MC always uses qpel bilinear */
338 uvmx = (uvmx & 3) << 1;
339 uvmy = (uvmy & 3) << 1;
340 if (!v->rnd) {
341 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
342 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
343 } else {
344 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
345 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
346 }
347}
348
349/** Do motion compensation for 4-MV macroblock - luminance block
350 */
351void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
352{
353 MpegEncContext *s = &v->s;
354 uint8_t *srcY;
355 int dxy, mx, my, src_x, src_y;
356 int off;
357 int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
358 int v_edge_pos = s->v_edge_pos >> v->field_mode;
359 uint8_t (*luty)[256];
360 int use_ic;
361
362 if ((!v->field_mode ||
363 (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
364 !v->s.last_picture.f->data[0])
365 return;
366
367 mx = s->mv[dir][n][0];
368 my = s->mv[dir][n][1];
369
370 if (!dir) {
371 if (v->field_mode && (v->cur_field_type != v->ref_field_type[dir]) && v->second_field) {
372 srcY = s->current_picture.f->data[0];
373 luty = v->curr_luty;
374 use_ic = *v->curr_use_ic;
375 } else {
376 srcY = s->last_picture.f->data[0];
377 luty = v->last_luty;
378 use_ic = v->last_use_ic;
379 }
380 } else {
381 srcY = s->next_picture.f->data[0];
382 luty = v->next_luty;
383 use_ic = v->next_use_ic;
384 }
385
386 if (!srcY) {
387 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
388 return;
389 }
390
391 if (v->field_mode) {
392 if (v->cur_field_type != v->ref_field_type[dir])
393 my = my - 2 + 4 * v->cur_field_type;
394 }
395
396 if (s->pict_type == AV_PICTURE_TYPE_P && n == 3 && v->field_mode) {
397 int opp_count = get_luma_mv(v, 0,
398 &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0],
399 &s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1]);
400 int k, f = opp_count > 2;
401 for (k = 0; k < 4; k++)
402 v->mv_f[1][s->block_index[k] + v->blocks_off] = f;
403 }
404
405 if (v->fcm == ILACE_FRAME) { // not sure if needed for other types of picture
406 int qx, qy;
407 int width = s->avctx->coded_width;
408 int height = s->avctx->coded_height >> 1;
409 if (s->pict_type == AV_PICTURE_TYPE_P) {
410 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][0] = mx;
411 s->current_picture.motion_val[1][s->block_index[n] + v->blocks_off][1] = my;
412 }
413 qx = (s->mb_x * 16) + (mx >> 2);
414 qy = (s->mb_y * 8) + (my >> 3);
415
416 if (qx < -17)
417 mx -= 4 * (qx + 17);
418 else if (qx > width)
419 mx -= 4 * (qx - width);
420 if (qy < -18)
421 my -= 8 * (qy + 18);
422 else if (qy > height + 1)
423 my -= 8 * (qy - height - 1);
424 }
425
426 if ((v->fcm == ILACE_FRAME) && fieldmv)
427 off = ((n > 1) ? s->linesize : 0) + (n & 1) * 8;
428 else
429 off = s->linesize * 4 * (n & 2) + (n & 1) * 8;
430
431 src_x = s->mb_x * 16 + (n & 1) * 8 + (mx >> 2);
432 if (!fieldmv)
433 src_y = s->mb_y * 16 + (n & 2) * 4 + (my >> 2);
434 else
435 src_y = s->mb_y * 16 + ((n > 1) ? 1 : 0) + (my >> 2);
436
437 if (v->profile != PROFILE_ADVANCED) {
438 src_x = av_clip(src_x, -16, s->mb_width * 16);
439 src_y = av_clip(src_y, -16, s->mb_height * 16);
440 } else {
441 src_x = av_clip(src_x, -17, s->avctx->coded_width);
442 if (v->fcm == ILACE_FRAME) {
443 if (src_y & 1)
444 src_y = av_clip(src_y, -17, s->avctx->coded_height + 1);
445 else
446 src_y = av_clip(src_y, -18, s->avctx->coded_height);
447 } else {
448 src_y = av_clip(src_y, -18, s->avctx->coded_height + 1);
449 }
450 }
451
452 srcY += src_y * s->linesize + src_x;
453 if (v->field_mode && v->ref_field_type[dir])
454 srcY += s->current_picture_ptr->f->linesize[0];
455
456 if (fieldmv) {
457 if (!(src_y & 1))
458 v_edge_pos--;
459 else
460 src_y -= (src_y < 4);
461 }
462 if (v->rangeredfrm || use_ic
463 || s->h_edge_pos < 13 || v_edge_pos < 23
464 || (unsigned)(src_x - s->mspel) > s->h_edge_pos - (mx & 3) - 8 - s->mspel * 2
465 || (unsigned)(src_y - (s->mspel << fieldmv)) > v_edge_pos - (my & 3) - ((8 + s->mspel * 2) << fieldmv)) {
466 const int k = 9 + s->mspel * 2;
467
468 srcY -= s->mspel * (1 + (s->linesize << fieldmv));
469 /* check emulate edge stride and offset */
470 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
471 s->linesize, s->linesize,
472 k, k << fieldmv,
473 src_x - s->mspel, src_y - (s->mspel << fieldmv),
474 s->h_edge_pos, v_edge_pos);
475 srcY = s->sc.edge_emu_buffer;
476 /* if we deal with range reduction we need to scale source blocks */
477 if (v->rangeredfrm) {
478 vc1_scale_luma(srcY, k, s->linesize << fieldmv);
479 }
480 /* if we deal with intensity compensation we need to scale source blocks */
481 if (use_ic) {
482 vc1_lut_scale_luma(srcY,
483 luty[v->field_mode ? v->ref_field_type[dir] : (((0<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1)],
484 luty[v->field_mode ? v->ref_field_type[dir] : (((1<<fieldmv)+src_y - (s->mspel << fieldmv)) & 1)],
485 k, s->linesize << fieldmv);
486 }
487 srcY += s->mspel * (1 + (s->linesize << fieldmv));
488 }
489
490 if (s->mspel) {
491 dxy = ((my & 3) << 2) | (mx & 3);
492 if (avg)
493 v->vc1dsp.avg_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
494 else
495 v->vc1dsp.put_vc1_mspel_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize << fieldmv, v->rnd);
496 } else { // hpel mc - always used for luma
497 dxy = (my & 2) | ((mx & 2) >> 1);
498 if (!v->rnd)
499 s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
500 else
501 s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
502 }
503}
504
505/** Do motion compensation for 4-MV macroblock - both chroma blocks
506 */
507void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
508{
509 MpegEncContext *s = &v->s;
510 H264ChromaContext *h264chroma = &v->h264chroma;
511 uint8_t *srcU, *srcV;
512 int uvmx, uvmy, uvsrc_x, uvsrc_y;
513 int16_t tx, ty;
514 int chroma_ref_type;
515 int v_edge_pos = s->v_edge_pos >> v->field_mode;
516 uint8_t (*lutuv)[256];
517 int use_ic;
518
519 if (!v->field_mode && !v->s.last_picture.f->data[0])
520 return;
521 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
522 return;
523
524 /* calculate chroma MV vector from four luma MVs */
525 if (!v->field_mode || !v->numref) {
526 int valid_count = get_chroma_mv(v, dir, &tx, &ty);
527 if (!valid_count) {
528 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = 0;
529 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = 0;
530 v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0;
531 return; //no need to do MC for intra blocks
532 }
533 chroma_ref_type = v->ref_field_type[dir];
534 } else {
535 int opp_count = get_luma_mv(v, dir, &tx, &ty);
536 chroma_ref_type = v->cur_field_type ^ (opp_count > 2);
537 }
538 if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
539 return;
540 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
541 s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
542 uvmx = (tx + ((tx & 3) == 3)) >> 1;
543 uvmy = (ty + ((ty & 3) == 3)) >> 1;
544
545 v->luma_mv[s->mb_x][0] = uvmx;
546 v->luma_mv[s->mb_x][1] = uvmy;
547
548 if (v->fastuvmc) {
549 uvmx = uvmx + ((uvmx < 0) ? (uvmx & 1) : -(uvmx & 1));
550 uvmy = uvmy + ((uvmy < 0) ? (uvmy & 1) : -(uvmy & 1));
551 }
552 // Field conversion bias
553 if (v->cur_field_type != chroma_ref_type)
554 uvmy += 2 - 4 * chroma_ref_type;
555
556 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
557 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
558
559 if (v->profile != PROFILE_ADVANCED) {
560 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
561 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
562 } else {
563 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
564 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
565 }
566
567 if (!dir) {
568 if (v->field_mode && (v->cur_field_type != chroma_ref_type) && v->second_field) {
569 srcU = s->current_picture.f->data[1];
570 srcV = s->current_picture.f->data[2];
571 lutuv = v->curr_lutuv;
572 use_ic = *v->curr_use_ic;
573 } else {
574 srcU = s->last_picture.f->data[1];
575 srcV = s->last_picture.f->data[2];
576 lutuv = v->last_lutuv;
577 use_ic = v->last_use_ic;
578 }
579 } else {
580 srcU = s->next_picture.f->data[1];
581 srcV = s->next_picture.f->data[2];
582 lutuv = v->next_lutuv;
583 use_ic = v->next_use_ic;
584 }
585
586 if (!srcU) {
587 av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
588 return;
589 }
590
591 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
592 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
593
594 if (v->field_mode) {
595 if (chroma_ref_type) {
596 srcU += s->current_picture_ptr->f->linesize[1];
597 srcV += s->current_picture_ptr->f->linesize[2];
598 }
599 }
600
601 if (v->rangeredfrm || use_ic
602 || s->h_edge_pos < 18 || v_edge_pos < 18
603 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 9
604 || (unsigned)uvsrc_y > (v_edge_pos >> 1) - 9) {
605 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU,
606 s->uvlinesize, s->uvlinesize,
607 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
608 s->h_edge_pos >> 1, v_edge_pos >> 1);
609 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV,
610 s->uvlinesize, s->uvlinesize,
611 8 + 1, 8 + 1, uvsrc_x, uvsrc_y,
612 s->h_edge_pos >> 1, v_edge_pos >> 1);
613 srcU = s->sc.edge_emu_buffer;
614 srcV = s->sc.edge_emu_buffer + 16;
615
616 /* if we deal with range reduction we need to scale source blocks */
617 if (v->rangeredfrm) {
618 vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
619 }
620 /* if we deal with intensity compensation we need to scale source blocks */
621 if (use_ic) {
622 vc1_lut_scale_chroma(srcU, srcV,
623 lutuv[v->field_mode ? chroma_ref_type : ((0 + uvsrc_y) & 1)],
624 lutuv[v->field_mode ? chroma_ref_type : ((1 + uvsrc_y) & 1)],
625 9, s->uvlinesize);
626 }
627 }
628
629 /* Chroma MC always uses qpel bilinear */
630 uvmx = (uvmx & 3) << 1;
631 uvmy = (uvmy & 3) << 1;
632 if (!v->rnd) {
633 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
634 h264chroma->put_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
635 } else {
636 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
637 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
638 }
639}
640
641/** Do motion compensation for 4-MV interlaced frame chroma macroblock (both U and V)
642 */
643void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
644{
645 MpegEncContext *s = &v->s;
646 H264ChromaContext *h264chroma = &v->h264chroma;
647 uint8_t *srcU, *srcV;
648 int uvsrc_x, uvsrc_y;
649 int uvmx_field[4], uvmy_field[4];
650 int i, off, tx, ty;
651 int fieldmv = v->blk_mv_type[s->block_index[0]];
652 static const uint8_t s_rndtblfield[16] = { 0, 0, 1, 2, 4, 4, 5, 6, 2, 2, 3, 8, 6, 6, 7, 12 };
653 int v_dist = fieldmv ? 1 : 4; // vertical offset for lower sub-blocks
654 int v_edge_pos = s->v_edge_pos >> 1;
655 int use_ic;
656 uint8_t (*lutuv)[256];
657
658 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
659 return;
660
661 for (i = 0; i < 4; i++) {
662 int d = i < 2 ? dir: dir2;
663 tx = s->mv[d][i][0];
664 uvmx_field[i] = (tx + ((tx & 3) == 3)) >> 1;
665 ty = s->mv[d][i][1];
666 if (fieldmv)
667 uvmy_field[i] = (ty >> 4) * 8 + s_rndtblfield[ty & 0xF];
668 else
669 uvmy_field[i] = (ty + ((ty & 3) == 3)) >> 1;
670 }
671
672 for (i = 0; i < 4; i++) {
673 off = (i & 1) * 4 + ((i & 2) ? v_dist * s->uvlinesize : 0);
674 uvsrc_x = s->mb_x * 8 + (i & 1) * 4 + (uvmx_field[i] >> 2);
675 uvsrc_y = s->mb_y * 8 + ((i & 2) ? v_dist : 0) + (uvmy_field[i] >> 2);
676 // FIXME: implement proper pull-back (see vc1cropmv.c, vc1CROPMV_ChromaPullBack())
677 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
678 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
679 if (i < 2 ? dir : dir2) {
680 srcU = s->next_picture.f->data[1];
681 srcV = s->next_picture.f->data[2];
682 lutuv = v->next_lutuv;
683 use_ic = v->next_use_ic;
684 } else {
685 srcU = s->last_picture.f->data[1];
686 srcV = s->last_picture.f->data[2];
687 lutuv = v->last_lutuv;
688 use_ic = v->last_use_ic;
689 }
690 if (!srcU)
691 return;
692 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
693 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
694 uvmx_field[i] = (uvmx_field[i] & 3) << 1;
695 uvmy_field[i] = (uvmy_field[i] & 3) << 1;
696
697 if (fieldmv) {
698 if (!(uvsrc_y & 1))
699 v_edge_pos = (s->v_edge_pos >> 1) - 1;
700 else
701 uvsrc_y -= (uvsrc_y < 2);
702 }
703 if (use_ic
704 || s->h_edge_pos < 10 || v_edge_pos < (5 << fieldmv)
705 || (unsigned)uvsrc_x > (s->h_edge_pos >> 1) - 5
706 || (unsigned)uvsrc_y > v_edge_pos - (5 << fieldmv)) {
707 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcU,
708 s->uvlinesize, s->uvlinesize,
709 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
710 s->h_edge_pos >> 1, v_edge_pos);
711 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer + 16, srcV,
712 s->uvlinesize, s->uvlinesize,
713 5, (5 << fieldmv), uvsrc_x, uvsrc_y,
714 s->h_edge_pos >> 1, v_edge_pos);
715 srcU = s->sc.edge_emu_buffer;
716 srcV = s->sc.edge_emu_buffer + 16;
717
718 /* if we deal with intensity compensation we need to scale source blocks */
719 if (use_ic) {
720 vc1_lut_scale_chroma(srcU, srcV,
721 lutuv[(uvsrc_y + (0 << fieldmv)) & 1],
722 lutuv[(uvsrc_y + (1 << fieldmv)) & 1],
723 5, s->uvlinesize << fieldmv);
724 }
725 }
726 if (avg) {
727 if (!v->rnd) {
728 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
729 h264chroma->avg_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
730 } else {
731 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
732 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
733 }
734 } else {
735 if (!v->rnd) {
736 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
737 h264chroma->put_h264_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
738 } else {
739 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[1] + off, srcU, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
740 v->vc1dsp.put_no_rnd_vc1_chroma_pixels_tab[1](s->dest[2] + off, srcV, s->uvlinesize << fieldmv, 4, uvmx_field[i], uvmy_field[i]);
741 }
742 }
743 }
744}
745
746/** Motion compensation for direct or interpolated blocks in B-frames
747 */
748void ff_vc1_interp_mc(VC1Context *v)
749{
750 MpegEncContext *s = &v->s;
751 H264ChromaContext *h264chroma = &v->h264chroma;
752 uint8_t *srcY, *srcU, *srcV;
753 int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
754 int v_edge_pos = s->v_edge_pos >> v->field_mode;
755 int use_ic = v->next_use_ic;
756
757 if (!v->field_mode && !v->s.next_picture.f->data[0])
758 return;
759
760 mx = s->mv[1][0][0];
761 my = s->mv[1][0][1];
762 uvmx = (mx + ((mx & 3) == 3)) >> 1;
763 uvmy = (my + ((my & 3) == 3)) >> 1;
764 if (v->field_mode && v->cur_field_type != v->ref_field_type[1]) {
765 my = my - 2 + 4 * v->cur_field_type;
766 uvmy = uvmy - 2 + 4 * v->cur_field_type;
767 }
768 if (v->fastuvmc) {
769 uvmx = uvmx + ((uvmx < 0) ? -(uvmx & 1) : (uvmx & 1));
770 uvmy = uvmy + ((uvmy < 0) ? -(uvmy & 1) : (uvmy & 1));
771 }
772 srcY = s->next_picture.f->data[0];
773 srcU = s->next_picture.f->data[1];
774 srcV = s->next_picture.f->data[2];
775
776 src_x = s->mb_x * 16 + (mx >> 2);
777 src_y = s->mb_y * 16 + (my >> 2);
778 uvsrc_x = s->mb_x * 8 + (uvmx >> 2);
779 uvsrc_y = s->mb_y * 8 + (uvmy >> 2);
780
781 if (v->profile != PROFILE_ADVANCED) {
782 src_x = av_clip( src_x, -16, s->mb_width * 16);
783 src_y = av_clip( src_y, -16, s->mb_height * 16);
784 uvsrc_x = av_clip(uvsrc_x, -8, s->mb_width * 8);
785 uvsrc_y = av_clip(uvsrc_y, -8, s->mb_height * 8);
786 } else {
787 src_x = av_clip( src_x, -17, s->avctx->coded_width);
788 src_y = av_clip( src_y, -18, s->avctx->coded_height + 1);
789 uvsrc_x = av_clip(uvsrc_x, -8, s->avctx->coded_width >> 1);
790 uvsrc_y = av_clip(uvsrc_y, -8, s->avctx->coded_height >> 1);
791 }
792
793 srcY += src_y * s->linesize + src_x;
794 srcU += uvsrc_y * s->uvlinesize + uvsrc_x;
795 srcV += uvsrc_y * s->uvlinesize + uvsrc_x;
796
797 if (v->field_mode && v->ref_field_type[1]) {
798 srcY += s->current_picture_ptr->f->linesize[0];
799 srcU += s->current_picture_ptr->f->linesize[1];
800 srcV += s->current_picture_ptr->f->linesize[2];
801 }
802
803 /* for grayscale we should not try to read from unknown area */
804 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) {
805 srcU = s->sc.edge_emu_buffer + 18 * s->linesize;
806 srcV = s->sc.edge_emu_buffer + 18 * s->linesize;
807 }
808
809 if (v->rangeredfrm || s->h_edge_pos < 22 || v_edge_pos < 22 || use_ic
810 || (unsigned)(src_x - 1) > s->h_edge_pos - (mx & 3) - 16 - 3
811 || (unsigned)(src_y - 1) > v_edge_pos - (my & 3) - 16 - 3) {
812 uint8_t *ubuf = s->sc.edge_emu_buffer + 19 * s->linesize;
813 uint8_t *vbuf = ubuf + 9 * s->uvlinesize;
814 const int k = 17 + s->mspel * 2;
815
816 srcY -= s->mspel * (1 + s->linesize);
817 s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, srcY,
818 s->linesize, s->linesize,
819 k, k,
820 src_x - s->mspel, src_y - s->mspel,
821 s->h_edge_pos, v_edge_pos);
822 srcY = s->sc.edge_emu_buffer;
823 s->vdsp.emulated_edge_mc(ubuf, srcU,
824 s->uvlinesize, s->uvlinesize,
825 8 + 1, 8 + 1,
826 uvsrc_x, uvsrc_y,
827 s->h_edge_pos >> 1, v_edge_pos >> 1);
828 s->vdsp.emulated_edge_mc(vbuf, srcV,
829 s->uvlinesize, s->uvlinesize,
830 8 + 1, 8 + 1,
831 uvsrc_x, uvsrc_y,
832 s->h_edge_pos >> 1, v_edge_pos >> 1);
833 srcU = ubuf;
834 srcV = vbuf;
835 /* if we deal with range reduction we need to scale source blocks */
836 if (v->rangeredfrm) {
837 vc1_scale_luma(srcY, k, s->linesize);
838 vc1_scale_chroma(srcU, srcV, 9, s->uvlinesize);
839 }
840
841 if (use_ic) {
842 uint8_t (*luty )[256] = v->next_luty;
843 uint8_t (*lutuv)[256] = v->next_lutuv;
844 vc1_lut_scale_luma(srcY,
845 luty[v->field_mode ? v->ref_field_type[1] : ((0+src_y - s->mspel) & 1)],
846 luty[v->field_mode ? v->ref_field_type[1] : ((1+src_y - s->mspel) & 1)],
847 k, s->linesize);
848 vc1_lut_scale_chroma(srcU, srcV,
849 lutuv[v->field_mode ? v->ref_field_type[1] : ((0+uvsrc_y) & 1)],
850 lutuv[v->field_mode ? v->ref_field_type[1] : ((1+uvsrc_y) & 1)],
851 9, s->uvlinesize);
852 }
853 srcY += s->mspel * (1 + s->linesize);
854 }
855
856 if (s->mspel) {
857 dxy = ((my & 3) << 2) | (mx & 3);
858 v->vc1dsp.avg_vc1_mspel_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, v->rnd);
859 } else { // hpel mc
860 dxy = (my & 2) | ((mx & 2) >> 1);
861
862 if (!v->rnd)
863 s->hdsp.avg_pixels_tab[0][dxy](s->dest[0], srcY, s->linesize, 16);
864 else
865 s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0], srcY, s->linesize, 16);
866 }
867
868 if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
869 return;
870 /* Chroma MC always uses qpel bilinear */
871 uvmx = (uvmx & 3) << 1;
872 uvmy = (uvmy & 3) << 1;
873 if (!v->rnd) {
874 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
875 h264chroma->avg_h264_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
876 } else {
877 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[1], srcU, s->uvlinesize, 8, uvmx, uvmy);
878 v->vc1dsp.avg_no_rnd_vc1_chroma_pixels_tab[0](s->dest[2], srcV, s->uvlinesize, 8, uvmx, uvmy);
879 }
880}
881