blob: 54712f6b7a32ac0874205349417292189819ba73
1 | /* |
2 | * VC-1 and WMV3 decoder |
3 | * Copyright (c) 2011 Mashiat Sarker Shakkhar |
4 | * Copyright (c) 2006-2007 Konstantin Shishkov |
5 | * Partly based on vc9.c (c) 2005 Anonymous, Alex Beregszaszi, Michael Niedermayer |
6 | * |
7 | * This file is part of FFmpeg. |
8 | * |
9 | * FFmpeg is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public |
11 | * License as published by the Free Software Foundation; either |
12 | * version 2.1 of the License, or (at your option) any later version. |
13 | * |
14 | * FFmpeg is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | * Lesser General Public License for more details. |
18 | * |
19 | * You should have received a copy of the GNU Lesser General Public |
20 | * License along with FFmpeg; if not, write to the Free Software |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | */ |
23 | |
24 | /** |
25 | * @file |
26 | * VC-1 and WMV3 block decoding routines |
27 | */ |
28 | |
29 | #include "mathops.h" |
30 | #include "mpegutils.h" |
31 | #include "mpegvideo.h" |
32 | #include "vc1.h" |
33 | #include "vc1_pred.h" |
34 | #include "vc1data.h" |
35 | |
36 | static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir) |
37 | { |
38 | int scaledvalue, refdist; |
39 | int scalesame1, scalesame2; |
40 | int scalezone1_x, zone1offset_x; |
41 | int table_index = dir ^ v->second_field; |
42 | |
43 | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
44 | refdist = v->refdist; |
45 | else |
46 | refdist = dir ? v->brfd : v->frfd; |
47 | if (refdist > 3) |
48 | refdist = 3; |
49 | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; |
50 | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; |
51 | scalezone1_x = ff_vc1_field_mvpred_scales[table_index][3][refdist]; |
52 | zone1offset_x = ff_vc1_field_mvpred_scales[table_index][5][refdist]; |
53 | |
54 | if (FFABS(n) > 255) |
55 | scaledvalue = n; |
56 | else { |
57 | if (FFABS(n) < scalezone1_x) |
58 | scaledvalue = (n * scalesame1) >> 8; |
59 | else { |
60 | if (n < 0) |
61 | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_x; |
62 | else |
63 | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_x; |
64 | } |
65 | } |
66 | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); |
67 | } |
68 | |
69 | static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir) |
70 | { |
71 | int scaledvalue, refdist; |
72 | int scalesame1, scalesame2; |
73 | int scalezone1_y, zone1offset_y; |
74 | int table_index = dir ^ v->second_field; |
75 | |
76 | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
77 | refdist = v->refdist; |
78 | else |
79 | refdist = dir ? v->brfd : v->frfd; |
80 | if (refdist > 3) |
81 | refdist = 3; |
82 | scalesame1 = ff_vc1_field_mvpred_scales[table_index][1][refdist]; |
83 | scalesame2 = ff_vc1_field_mvpred_scales[table_index][2][refdist]; |
84 | scalezone1_y = ff_vc1_field_mvpred_scales[table_index][4][refdist]; |
85 | zone1offset_y = ff_vc1_field_mvpred_scales[table_index][6][refdist]; |
86 | |
87 | if (FFABS(n) > 63) |
88 | scaledvalue = n; |
89 | else { |
90 | if (FFABS(n) < scalezone1_y) |
91 | scaledvalue = (n * scalesame1) >> 8; |
92 | else { |
93 | if (n < 0) |
94 | scaledvalue = ((n * scalesame2) >> 8) - zone1offset_y; |
95 | else |
96 | scaledvalue = ((n * scalesame2) >> 8) + zone1offset_y; |
97 | } |
98 | } |
99 | |
100 | if (v->cur_field_type && !v->ref_field_type[dir]) |
101 | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); |
102 | else |
103 | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); |
104 | } |
105 | |
106 | static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */) |
107 | { |
108 | int scalezone1_x, zone1offset_x; |
109 | int scaleopp1, scaleopp2, brfd; |
110 | int scaledvalue; |
111 | |
112 | brfd = FFMIN(v->brfd, 3); |
113 | scalezone1_x = ff_vc1_b_field_mvpred_scales[3][brfd]; |
114 | zone1offset_x = ff_vc1_b_field_mvpred_scales[5][brfd]; |
115 | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; |
116 | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; |
117 | |
118 | if (FFABS(n) > 255) |
119 | scaledvalue = n; |
120 | else { |
121 | if (FFABS(n) < scalezone1_x) |
122 | scaledvalue = (n * scaleopp1) >> 8; |
123 | else { |
124 | if (n < 0) |
125 | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_x; |
126 | else |
127 | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_x; |
128 | } |
129 | } |
130 | return av_clip(scaledvalue, -v->range_x, v->range_x - 1); |
131 | } |
132 | |
133 | static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir) |
134 | { |
135 | int scalezone1_y, zone1offset_y; |
136 | int scaleopp1, scaleopp2, brfd; |
137 | int scaledvalue; |
138 | |
139 | brfd = FFMIN(v->brfd, 3); |
140 | scalezone1_y = ff_vc1_b_field_mvpred_scales[4][brfd]; |
141 | zone1offset_y = ff_vc1_b_field_mvpred_scales[6][brfd]; |
142 | scaleopp1 = ff_vc1_b_field_mvpred_scales[1][brfd]; |
143 | scaleopp2 = ff_vc1_b_field_mvpred_scales[2][brfd]; |
144 | |
145 | if (FFABS(n) > 63) |
146 | scaledvalue = n; |
147 | else { |
148 | if (FFABS(n) < scalezone1_y) |
149 | scaledvalue = (n * scaleopp1) >> 8; |
150 | else { |
151 | if (n < 0) |
152 | scaledvalue = ((n * scaleopp2) >> 8) - zone1offset_y; |
153 | else |
154 | scaledvalue = ((n * scaleopp2) >> 8) + zone1offset_y; |
155 | } |
156 | } |
157 | if (v->cur_field_type && !v->ref_field_type[dir]) { |
158 | return av_clip(scaledvalue, -v->range_y / 2 + 1, v->range_y / 2); |
159 | } else { |
160 | return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1); |
161 | } |
162 | } |
163 | |
164 | static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */, |
165 | int dim, int dir) |
166 | { |
167 | int brfd, scalesame; |
168 | int hpel = 1 - v->s.quarter_sample; |
169 | |
170 | n >>= hpel; |
171 | if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) { |
172 | if (dim) |
173 | n = scaleforsame_y(v, i, n, dir) * (1 << hpel); |
174 | else |
175 | n = scaleforsame_x(v, n, dir) * (1 << hpel); |
176 | return n; |
177 | } |
178 | brfd = FFMIN(v->brfd, 3); |
179 | scalesame = ff_vc1_b_field_mvpred_scales[0][brfd]; |
180 | |
181 | n = (n * scalesame >> 8) << hpel; |
182 | return n; |
183 | } |
184 | |
185 | static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */, |
186 | int dim, int dir) |
187 | { |
188 | int refdist, scaleopp; |
189 | int hpel = 1 - v->s.quarter_sample; |
190 | |
191 | n >>= hpel; |
192 | if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) { |
193 | if (dim) |
194 | n = scaleforopp_y(v, n, dir) << hpel; |
195 | else |
196 | n = scaleforopp_x(v, n) << hpel; |
197 | return n; |
198 | } |
199 | if (v->s.pict_type != AV_PICTURE_TYPE_B) |
200 | refdist = FFMIN(v->refdist, 3); |
201 | else |
202 | refdist = dir ? v->brfd : v->frfd; |
203 | scaleopp = ff_vc1_field_mvpred_scales[dir ^ v->second_field][0][refdist]; |
204 | |
205 | n = (n * scaleopp >> 8) * (1 << hpel); |
206 | return n; |
207 | } |
208 | |
209 | /** Predict and set motion vector |
210 | */ |
211 | void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y, |
212 | int mv1, int r_x, int r_y, uint8_t* is_intra, |
213 | int pred_flag, int dir) |
214 | { |
215 | MpegEncContext *s = &v->s; |
216 | int xy, wrap, off = 0; |
217 | int16_t *A, *B, *C; |
218 | int px, py; |
219 | int sum; |
220 | int mixedmv_pic, num_samefield = 0, num_oppfield = 0; |
221 | int opposite, a_f, b_f, c_f; |
222 | int16_t field_predA[2]; |
223 | int16_t field_predB[2]; |
224 | int16_t field_predC[2]; |
225 | int a_valid, b_valid, c_valid; |
226 | int hybridmv_thresh, y_bias = 0; |
227 | |
228 | if (v->mv_mode == MV_PMODE_MIXED_MV || |
229 | ((v->mv_mode == MV_PMODE_INTENSITY_COMP) && (v->mv_mode2 == MV_PMODE_MIXED_MV))) |
230 | mixedmv_pic = 1; |
231 | else |
232 | mixedmv_pic = 0; |
233 | /* scale MV difference to be quad-pel */ |
234 | if (!s->quarter_sample) { |
235 | dmv_x *= 2; |
236 | dmv_y *= 2; |
237 | } |
238 | |
239 | wrap = s->b8_stride; |
240 | xy = s->block_index[n]; |
241 | |
242 | if (s->mb_intra) { |
243 | s->mv[0][n][0] = s->current_picture.motion_val[0][xy + v->blocks_off][0] = 0; |
244 | s->mv[0][n][1] = s->current_picture.motion_val[0][xy + v->blocks_off][1] = 0; |
245 | s->current_picture.motion_val[1][xy + v->blocks_off][0] = 0; |
246 | s->current_picture.motion_val[1][xy + v->blocks_off][1] = 0; |
247 | if (mv1) { /* duplicate motion data for 1-MV block */ |
248 | s->current_picture.motion_val[0][xy + 1 + v->blocks_off][0] = 0; |
249 | s->current_picture.motion_val[0][xy + 1 + v->blocks_off][1] = 0; |
250 | s->current_picture.motion_val[0][xy + wrap + v->blocks_off][0] = 0; |
251 | s->current_picture.motion_val[0][xy + wrap + v->blocks_off][1] = 0; |
252 | s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][0] = 0; |
253 | s->current_picture.motion_val[0][xy + wrap + 1 + v->blocks_off][1] = 0; |
254 | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; |
255 | s->current_picture.motion_val[1][xy + 1 + v->blocks_off][0] = 0; |
256 | s->current_picture.motion_val[1][xy + 1 + v->blocks_off][1] = 0; |
257 | s->current_picture.motion_val[1][xy + wrap][0] = 0; |
258 | s->current_picture.motion_val[1][xy + wrap + v->blocks_off][1] = 0; |
259 | s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][0] = 0; |
260 | s->current_picture.motion_val[1][xy + wrap + 1 + v->blocks_off][1] = 0; |
261 | } |
262 | return; |
263 | } |
264 | |
265 | C = s->current_picture.motion_val[dir][xy - 1 + v->blocks_off]; |
266 | A = s->current_picture.motion_val[dir][xy - wrap + v->blocks_off]; |
267 | if (mv1) { |
268 | if (v->field_mode && mixedmv_pic) |
269 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
270 | else |
271 | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 2; |
272 | } else { |
273 | //in 4-MV mode different blocks have different B predictor position |
274 | switch (n) { |
275 | case 0: |
276 | off = (s->mb_x > 0) ? -1 : 1; |
277 | break; |
278 | case 1: |
279 | off = (s->mb_x == (s->mb_width - 1)) ? -1 : 1; |
280 | break; |
281 | case 2: |
282 | off = 1; |
283 | break; |
284 | case 3: |
285 | off = -1; |
286 | } |
287 | } |
288 | B = s->current_picture.motion_val[dir][xy - wrap + off + v->blocks_off]; |
289 | |
290 | a_valid = !s->first_slice_line || (n == 2 || n == 3); |
291 | b_valid = a_valid && (s->mb_width > 1); |
292 | c_valid = s->mb_x || (n == 1 || n == 3); |
293 | if (v->field_mode) { |
294 | a_valid = a_valid && !is_intra[xy - wrap]; |
295 | b_valid = b_valid && !is_intra[xy - wrap + off]; |
296 | c_valid = c_valid && !is_intra[xy - 1]; |
297 | } |
298 | |
299 | if (a_valid) { |
300 | a_f = v->mv_f[dir][xy - wrap + v->blocks_off]; |
301 | num_oppfield += a_f; |
302 | num_samefield += 1 - a_f; |
303 | field_predA[0] = A[0]; |
304 | field_predA[1] = A[1]; |
305 | } else { |
306 | field_predA[0] = field_predA[1] = 0; |
307 | a_f = 0; |
308 | } |
309 | if (b_valid) { |
310 | b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off]; |
311 | num_oppfield += b_f; |
312 | num_samefield += 1 - b_f; |
313 | field_predB[0] = B[0]; |
314 | field_predB[1] = B[1]; |
315 | } else { |
316 | field_predB[0] = field_predB[1] = 0; |
317 | b_f = 0; |
318 | } |
319 | if (c_valid) { |
320 | c_f = v->mv_f[dir][xy - 1 + v->blocks_off]; |
321 | num_oppfield += c_f; |
322 | num_samefield += 1 - c_f; |
323 | field_predC[0] = C[0]; |
324 | field_predC[1] = C[1]; |
325 | } else { |
326 | field_predC[0] = field_predC[1] = 0; |
327 | c_f = 0; |
328 | } |
329 | |
330 | if (v->field_mode) { |
331 | if (!v->numref) |
332 | // REFFIELD determines if the last field or the second-last field is |
333 | // to be used as reference |
334 | opposite = 1 - v->reffield; |
335 | else { |
336 | if (num_samefield <= num_oppfield) |
337 | opposite = 1 - pred_flag; |
338 | else |
339 | opposite = pred_flag; |
340 | } |
341 | } else |
342 | opposite = 0; |
343 | if (opposite) { |
344 | if (a_valid && !a_f) { |
345 | field_predA[0] = scaleforopp(v, field_predA[0], 0, dir); |
346 | field_predA[1] = scaleforopp(v, field_predA[1], 1, dir); |
347 | } |
348 | if (b_valid && !b_f) { |
349 | field_predB[0] = scaleforopp(v, field_predB[0], 0, dir); |
350 | field_predB[1] = scaleforopp(v, field_predB[1], 1, dir); |
351 | } |
352 | if (c_valid && !c_f) { |
353 | field_predC[0] = scaleforopp(v, field_predC[0], 0, dir); |
354 | field_predC[1] = scaleforopp(v, field_predC[1], 1, dir); |
355 | } |
356 | v->mv_f[dir][xy + v->blocks_off] = 1; |
357 | v->ref_field_type[dir] = !v->cur_field_type; |
358 | } else { |
359 | if (a_valid && a_f) { |
360 | field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir); |
361 | field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir); |
362 | } |
363 | if (b_valid && b_f) { |
364 | field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir); |
365 | field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir); |
366 | } |
367 | if (c_valid && c_f) { |
368 | field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir); |
369 | field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir); |
370 | } |
371 | v->mv_f[dir][xy + v->blocks_off] = 0; |
372 | v->ref_field_type[dir] = v->cur_field_type; |
373 | } |
374 | |
375 | if (a_valid) { |
376 | px = field_predA[0]; |
377 | py = field_predA[1]; |
378 | } else if (c_valid) { |
379 | px = field_predC[0]; |
380 | py = field_predC[1]; |
381 | } else if (b_valid) { |
382 | px = field_predB[0]; |
383 | py = field_predB[1]; |
384 | } else { |
385 | px = 0; |
386 | py = 0; |
387 | } |
388 | |
389 | if (num_samefield + num_oppfield > 1) { |
390 | px = mid_pred(field_predA[0], field_predB[0], field_predC[0]); |
391 | py = mid_pred(field_predA[1], field_predB[1], field_predC[1]); |
392 | } |
393 | |
394 | /* Pullback MV as specified in 8.3.5.3.4 */ |
395 | if (!v->field_mode) { |
396 | int qx, qy, X, Y; |
397 | int MV = mv1 ? -60 : -28; |
398 | qx = (s->mb_x << 6) + ((n == 1 || n == 3) ? 32 : 0); |
399 | qy = (s->mb_y << 6) + ((n == 2 || n == 3) ? 32 : 0); |
400 | X = (s->mb_width << 6) - 4; |
401 | Y = (s->mb_height << 6) - 4; |
402 | if (qx + px < MV) px = MV - qx; |
403 | if (qy + py < MV) py = MV - qy; |
404 | if (qx + px > X) px = X - qx; |
405 | if (qy + py > Y) py = Y - qy; |
406 | } |
407 | |
408 | if (!v->field_mode || s->pict_type != AV_PICTURE_TYPE_B) { |
409 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 (also 10.3.5.4.3.5) */ |
410 | hybridmv_thresh = 32; |
411 | if (a_valid && c_valid) { |
412 | if (is_intra[xy - wrap]) |
413 | sum = FFABS(px) + FFABS(py); |
414 | else |
415 | sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]); |
416 | if (sum > hybridmv_thresh) { |
417 | if (get_bits1(&s->gb)) { // read HYBRIDPRED bit |
418 | px = field_predA[0]; |
419 | py = field_predA[1]; |
420 | } else { |
421 | px = field_predC[0]; |
422 | py = field_predC[1]; |
423 | } |
424 | } else { |
425 | if (is_intra[xy - 1]) |
426 | sum = FFABS(px) + FFABS(py); |
427 | else |
428 | sum = FFABS(px - field_predC[0]) + FFABS(py - field_predC[1]); |
429 | if (sum > hybridmv_thresh) { |
430 | if (get_bits1(&s->gb)) { |
431 | px = field_predA[0]; |
432 | py = field_predA[1]; |
433 | } else { |
434 | px = field_predC[0]; |
435 | py = field_predC[1]; |
436 | } |
437 | } |
438 | } |
439 | } |
440 | } |
441 | |
442 | if (v->field_mode && v->numref) |
443 | r_y >>= 1; |
444 | if (v->field_mode && v->cur_field_type && v->ref_field_type[dir] == 0) |
445 | y_bias = 1; |
446 | /* store MV using signed modulus of MV range defined in 4.11 */ |
447 | s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; |
448 | s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1] = ((py + dmv_y + r_y - y_bias) & ((r_y << 1) - 1)) - r_y + y_bias; |
449 | if (mv1) { /* duplicate motion data for 1-MV block */ |
450 | s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; |
451 | s->current_picture.motion_val[dir][xy + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; |
452 | s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; |
453 | s->current_picture.motion_val[dir][xy + wrap + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; |
454 | s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][0] = s->current_picture.motion_val[dir][xy + v->blocks_off][0]; |
455 | s->current_picture.motion_val[dir][xy + wrap + 1 + v->blocks_off][1] = s->current_picture.motion_val[dir][xy + v->blocks_off][1]; |
456 | v->mv_f[dir][xy + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; |
457 | v->mv_f[dir][xy + wrap + v->blocks_off] = v->mv_f[dir][xy + wrap + 1 + v->blocks_off] = v->mv_f[dir][xy + v->blocks_off]; |
458 | } |
459 | } |
460 | |
461 | /** Predict and set motion vector for interlaced frame picture MBs |
462 | */ |
463 | void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y, |
464 | int mvn, int r_x, int r_y, uint8_t* is_intra, int dir) |
465 | { |
466 | MpegEncContext *s = &v->s; |
467 | int xy, wrap, off = 0; |
468 | int A[2], B[2], C[2]; |
469 | int px = 0, py = 0; |
470 | int a_valid = 0, b_valid = 0, c_valid = 0; |
471 | int field_a, field_b, field_c; // 0: same, 1: opposite |
472 | int total_valid, num_samefield, num_oppfield; |
473 | int pos_c, pos_b, n_adj; |
474 | |
475 | wrap = s->b8_stride; |
476 | xy = s->block_index[n]; |
477 | |
478 | if (s->mb_intra) { |
479 | s->mv[0][n][0] = s->current_picture.motion_val[0][xy][0] = 0; |
480 | s->mv[0][n][1] = s->current_picture.motion_val[0][xy][1] = 0; |
481 | s->current_picture.motion_val[1][xy][0] = 0; |
482 | s->current_picture.motion_val[1][xy][1] = 0; |
483 | if (mvn == 1) { /* duplicate motion data for 1-MV block */ |
484 | s->current_picture.motion_val[0][xy + 1][0] = 0; |
485 | s->current_picture.motion_val[0][xy + 1][1] = 0; |
486 | s->current_picture.motion_val[0][xy + wrap][0] = 0; |
487 | s->current_picture.motion_val[0][xy + wrap][1] = 0; |
488 | s->current_picture.motion_val[0][xy + wrap + 1][0] = 0; |
489 | s->current_picture.motion_val[0][xy + wrap + 1][1] = 0; |
490 | v->luma_mv[s->mb_x][0] = v->luma_mv[s->mb_x][1] = 0; |
491 | s->current_picture.motion_val[1][xy + 1][0] = 0; |
492 | s->current_picture.motion_val[1][xy + 1][1] = 0; |
493 | s->current_picture.motion_val[1][xy + wrap][0] = 0; |
494 | s->current_picture.motion_val[1][xy + wrap][1] = 0; |
495 | s->current_picture.motion_val[1][xy + wrap + 1][0] = 0; |
496 | s->current_picture.motion_val[1][xy + wrap + 1][1] = 0; |
497 | } |
498 | return; |
499 | } |
500 | |
501 | off = ((n == 0) || (n == 1)) ? 1 : -1; |
502 | /* predict A */ |
503 | if (s->mb_x || (n == 1) || (n == 3)) { |
504 | if ((v->blk_mv_type[xy]) // current block (MB) has a field MV |
505 | || (!v->blk_mv_type[xy] && !v->blk_mv_type[xy - 1])) { // or both have frame MV |
506 | A[0] = s->current_picture.motion_val[dir][xy - 1][0]; |
507 | A[1] = s->current_picture.motion_val[dir][xy - 1][1]; |
508 | a_valid = 1; |
509 | } else { // current block has frame mv and cand. has field MV (so average) |
510 | A[0] = (s->current_picture.motion_val[dir][xy - 1][0] |
511 | + s->current_picture.motion_val[dir][xy - 1 + off * wrap][0] + 1) >> 1; |
512 | A[1] = (s->current_picture.motion_val[dir][xy - 1][1] |
513 | + s->current_picture.motion_val[dir][xy - 1 + off * wrap][1] + 1) >> 1; |
514 | a_valid = 1; |
515 | } |
516 | if (!(n & 1) && v->is_intra[s->mb_x - 1]) { |
517 | a_valid = 0; |
518 | A[0] = A[1] = 0; |
519 | } |
520 | } else |
521 | A[0] = A[1] = 0; |
522 | /* Predict B and C */ |
523 | B[0] = B[1] = C[0] = C[1] = 0; |
524 | if (n == 0 || n == 1 || v->blk_mv_type[xy]) { |
525 | if (!s->first_slice_line) { |
526 | if (!v->is_intra[s->mb_x - s->mb_stride]) { |
527 | b_valid = 1; |
528 | n_adj = n | 2; |
529 | pos_b = s->block_index[n_adj] - 2 * wrap; |
530 | if (v->blk_mv_type[pos_b] && v->blk_mv_type[xy]) { |
531 | n_adj = (n & 2) | (n & 1); |
532 | } |
533 | B[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][0]; |
534 | B[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap][1]; |
535 | if (v->blk_mv_type[pos_b] && !v->blk_mv_type[xy]) { |
536 | B[0] = (B[0] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][0] + 1) >> 1; |
537 | B[1] = (B[1] + s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap][1] + 1) >> 1; |
538 | } |
539 | } |
540 | if (s->mb_width > 1) { |
541 | if (!v->is_intra[s->mb_x - s->mb_stride + 1]) { |
542 | c_valid = 1; |
543 | n_adj = 2; |
544 | pos_c = s->block_index[2] - 2 * wrap + 2; |
545 | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { |
546 | n_adj = n & 2; |
547 | } |
548 | C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][0]; |
549 | C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap + 2][1]; |
550 | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { |
551 | C[0] = (1 + C[0] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][0])) >> 1; |
552 | C[1] = (1 + C[1] + (s->current_picture.motion_val[dir][s->block_index[n_adj ^ 2] - 2 * wrap + 2][1])) >> 1; |
553 | } |
554 | if (s->mb_x == s->mb_width - 1) { |
555 | if (!v->is_intra[s->mb_x - s->mb_stride - 1]) { |
556 | c_valid = 1; |
557 | n_adj = 3; |
558 | pos_c = s->block_index[3] - 2 * wrap - 2; |
559 | if (v->blk_mv_type[pos_c] && v->blk_mv_type[xy]) { |
560 | n_adj = n | 1; |
561 | } |
562 | C[0] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][0]; |
563 | C[1] = s->current_picture.motion_val[dir][s->block_index[n_adj] - 2 * wrap - 2][1]; |
564 | if (v->blk_mv_type[pos_c] && !v->blk_mv_type[xy]) { |
565 | C[0] = (1 + C[0] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][0]) >> 1; |
566 | C[1] = (1 + C[1] + s->current_picture.motion_val[dir][s->block_index[1] - 2 * wrap - 2][1]) >> 1; |
567 | } |
568 | } else |
569 | c_valid = 0; |
570 | } |
571 | } |
572 | } |
573 | } |
574 | } else { |
575 | pos_b = s->block_index[1]; |
576 | b_valid = 1; |
577 | B[0] = s->current_picture.motion_val[dir][pos_b][0]; |
578 | B[1] = s->current_picture.motion_val[dir][pos_b][1]; |
579 | pos_c = s->block_index[0]; |
580 | c_valid = 1; |
581 | C[0] = s->current_picture.motion_val[dir][pos_c][0]; |
582 | C[1] = s->current_picture.motion_val[dir][pos_c][1]; |
583 | } |
584 | |
585 | total_valid = a_valid + b_valid + c_valid; |
586 | // check if predictor A is out of bounds |
587 | if (!s->mb_x && !(n == 1 || n == 3)) { |
588 | A[0] = A[1] = 0; |
589 | } |
590 | // check if predictor B is out of bounds |
591 | if ((s->first_slice_line && v->blk_mv_type[xy]) || (s->first_slice_line && !(n & 2))) { |
592 | B[0] = B[1] = C[0] = C[1] = 0; |
593 | } |
594 | if (!v->blk_mv_type[xy]) { |
595 | if (s->mb_width == 1) { |
596 | px = B[0]; |
597 | py = B[1]; |
598 | } else { |
599 | if (total_valid >= 2) { |
600 | px = mid_pred(A[0], B[0], C[0]); |
601 | py = mid_pred(A[1], B[1], C[1]); |
602 | } else if (total_valid) { |
603 | if (a_valid) { px = A[0]; py = A[1]; } |
604 | else if (b_valid) { px = B[0]; py = B[1]; } |
605 | else { px = C[0]; py = C[1]; } |
606 | } |
607 | } |
608 | } else { |
609 | if (a_valid) |
610 | field_a = (A[1] & 4) ? 1 : 0; |
611 | else |
612 | field_a = 0; |
613 | if (b_valid) |
614 | field_b = (B[1] & 4) ? 1 : 0; |
615 | else |
616 | field_b = 0; |
617 | if (c_valid) |
618 | field_c = (C[1] & 4) ? 1 : 0; |
619 | else |
620 | field_c = 0; |
621 | |
622 | num_oppfield = field_a + field_b + field_c; |
623 | num_samefield = total_valid - num_oppfield; |
624 | if (total_valid == 3) { |
625 | if ((num_samefield == 3) || (num_oppfield == 3)) { |
626 | px = mid_pred(A[0], B[0], C[0]); |
627 | py = mid_pred(A[1], B[1], C[1]); |
628 | } else if (num_samefield >= num_oppfield) { |
629 | /* take one MV from same field set depending on priority |
630 | the check for B may not be necessary */ |
631 | px = !field_a ? A[0] : B[0]; |
632 | py = !field_a ? A[1] : B[1]; |
633 | } else { |
634 | px = field_a ? A[0] : B[0]; |
635 | py = field_a ? A[1] : B[1]; |
636 | } |
637 | } else if (total_valid == 2) { |
638 | if (num_samefield >= num_oppfield) { |
639 | if (!field_a && a_valid) { |
640 | px = A[0]; |
641 | py = A[1]; |
642 | } else if (!field_b && b_valid) { |
643 | px = B[0]; |
644 | py = B[1]; |
645 | } else /*if (c_valid)*/ { |
646 | av_assert1(c_valid); |
647 | px = C[0]; |
648 | py = C[1]; |
649 | } |
650 | } else { |
651 | if (field_a && a_valid) { |
652 | px = A[0]; |
653 | py = A[1]; |
654 | } else /*if (field_b && b_valid)*/ { |
655 | av_assert1(field_b && b_valid); |
656 | px = B[0]; |
657 | py = B[1]; |
658 | } |
659 | } |
660 | } else if (total_valid == 1) { |
661 | px = (a_valid) ? A[0] : ((b_valid) ? B[0] : C[0]); |
662 | py = (a_valid) ? A[1] : ((b_valid) ? B[1] : C[1]); |
663 | } |
664 | } |
665 | |
666 | /* store MV using signed modulus of MV range defined in 4.11 */ |
667 | s->mv[dir][n][0] = s->current_picture.motion_val[dir][xy][0] = ((px + dmv_x + r_x) & ((r_x << 1) - 1)) - r_x; |
668 | s->mv[dir][n][1] = s->current_picture.motion_val[dir][xy][1] = ((py + dmv_y + r_y) & ((r_y << 1) - 1)) - r_y; |
669 | if (mvn == 1) { /* duplicate motion data for 1-MV block */ |
670 | s->current_picture.motion_val[dir][xy + 1 ][0] = s->current_picture.motion_val[dir][xy][0]; |
671 | s->current_picture.motion_val[dir][xy + 1 ][1] = s->current_picture.motion_val[dir][xy][1]; |
672 | s->current_picture.motion_val[dir][xy + wrap ][0] = s->current_picture.motion_val[dir][xy][0]; |
673 | s->current_picture.motion_val[dir][xy + wrap ][1] = s->current_picture.motion_val[dir][xy][1]; |
674 | s->current_picture.motion_val[dir][xy + wrap + 1][0] = s->current_picture.motion_val[dir][xy][0]; |
675 | s->current_picture.motion_val[dir][xy + wrap + 1][1] = s->current_picture.motion_val[dir][xy][1]; |
676 | } else if (mvn == 2) { /* duplicate motion data for 2-Field MV block */ |
677 | s->current_picture.motion_val[dir][xy + 1][0] = s->current_picture.motion_val[dir][xy][0]; |
678 | s->current_picture.motion_val[dir][xy + 1][1] = s->current_picture.motion_val[dir][xy][1]; |
679 | s->mv[dir][n + 1][0] = s->mv[dir][n][0]; |
680 | s->mv[dir][n + 1][1] = s->mv[dir][n][1]; |
681 | } |
682 | } |
683 | |
684 | void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], |
685 | int direct, int mvtype) |
686 | { |
687 | MpegEncContext *s = &v->s; |
688 | int xy, wrap, off = 0; |
689 | int16_t *A, *B, *C; |
690 | int px, py; |
691 | int sum; |
692 | int r_x, r_y; |
693 | const uint8_t *is_intra = v->mb_type[0]; |
694 | |
695 | av_assert0(!v->field_mode); |
696 | |
697 | r_x = v->range_x; |
698 | r_y = v->range_y; |
699 | /* scale MV difference to be quad-pel */ |
700 | if (!s->quarter_sample) { |
701 | dmv_x[0] *= 2; |
702 | dmv_y[0] *= 2; |
703 | dmv_x[1] *= 2; |
704 | dmv_y[1] *= 2; |
705 | } |
706 | |
707 | wrap = s->b8_stride; |
708 | xy = s->block_index[0]; |
709 | |
710 | if (s->mb_intra) { |
711 | s->current_picture.motion_val[0][xy][0] = |
712 | s->current_picture.motion_val[0][xy][1] = |
713 | s->current_picture.motion_val[1][xy][0] = |
714 | s->current_picture.motion_val[1][xy][1] = 0; |
715 | return; |
716 | } |
717 | if (direct && s->next_picture_ptr->field_picture) |
718 | av_log(s->avctx, AV_LOG_WARNING, "Mixed frame/field direct mode not supported\n"); |
719 | |
720 | s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 0, s->quarter_sample); |
721 | s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 0, s->quarter_sample); |
722 | s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][xy][0], v->bfraction, 1, s->quarter_sample); |
723 | s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][xy][1], v->bfraction, 1, s->quarter_sample); |
724 | |
725 | /* Pullback predicted motion vectors as specified in 8.4.5.4 */ |
726 | s->mv[0][0][0] = av_clip(s->mv[0][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); |
727 | s->mv[0][0][1] = av_clip(s->mv[0][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); |
728 | s->mv[1][0][0] = av_clip(s->mv[1][0][0], -60 - (s->mb_x << 6), (s->mb_width << 6) - 4 - (s->mb_x << 6)); |
729 | s->mv[1][0][1] = av_clip(s->mv[1][0][1], -60 - (s->mb_y << 6), (s->mb_height << 6) - 4 - (s->mb_y << 6)); |
730 | if (direct) { |
731 | s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; |
732 | s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; |
733 | s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; |
734 | s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; |
735 | return; |
736 | } |
737 | |
738 | if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { |
739 | C = s->current_picture.motion_val[0][xy - 2]; |
740 | A = s->current_picture.motion_val[0][xy - wrap * 2]; |
741 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
742 | B = s->current_picture.motion_val[0][xy - wrap * 2 + off]; |
743 | |
744 | if (!s->mb_x) C[0] = C[1] = 0; |
745 | if (!s->first_slice_line) { // predictor A is not out of bounds |
746 | if (s->mb_width == 1) { |
747 | px = A[0]; |
748 | py = A[1]; |
749 | } else { |
750 | px = mid_pred(A[0], B[0], C[0]); |
751 | py = mid_pred(A[1], B[1], C[1]); |
752 | } |
753 | } else if (s->mb_x) { // predictor C is not out of bounds |
754 | px = C[0]; |
755 | py = C[1]; |
756 | } else { |
757 | px = py = 0; |
758 | } |
759 | /* Pullback MV as specified in 8.3.5.3.4 */ |
760 | { |
761 | int qx, qy, X, Y; |
762 | int sh = v->profile < PROFILE_ADVANCED ? 5 : 6; |
763 | int MV = 4 - (1 << sh); |
764 | qx = (s->mb_x << sh); |
765 | qy = (s->mb_y << sh); |
766 | X = (s->mb_width << sh) - 4; |
767 | Y = (s->mb_height << sh) - 4; |
768 | if (qx + px < MV) px = MV - qx; |
769 | if (qy + py < MV) py = MV - qy; |
770 | if (qx + px > X) px = X - qx; |
771 | if (qy + py > Y) py = Y - qy; |
772 | } |
773 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ |
774 | if (0 && !s->first_slice_line && s->mb_x) { |
775 | if (is_intra[xy - wrap]) |
776 | sum = FFABS(px) + FFABS(py); |
777 | else |
778 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
779 | if (sum > 32) { |
780 | if (get_bits1(&s->gb)) { |
781 | px = A[0]; |
782 | py = A[1]; |
783 | } else { |
784 | px = C[0]; |
785 | py = C[1]; |
786 | } |
787 | } else { |
788 | if (is_intra[xy - 2]) |
789 | sum = FFABS(px) + FFABS(py); |
790 | else |
791 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
792 | if (sum > 32) { |
793 | if (get_bits1(&s->gb)) { |
794 | px = A[0]; |
795 | py = A[1]; |
796 | } else { |
797 | px = C[0]; |
798 | py = C[1]; |
799 | } |
800 | } |
801 | } |
802 | } |
803 | /* store MV using signed modulus of MV range defined in 4.11 */ |
804 | s->mv[0][0][0] = ((px + dmv_x[0] + r_x) & ((r_x << 1) - 1)) - r_x; |
805 | s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y; |
806 | } |
807 | if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) { |
808 | C = s->current_picture.motion_val[1][xy - 2]; |
809 | A = s->current_picture.motion_val[1][xy - wrap * 2]; |
810 | off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2; |
811 | B = s->current_picture.motion_val[1][xy - wrap * 2 + off]; |
812 | |
813 | if (!s->mb_x) |
814 | C[0] = C[1] = 0; |
815 | if (!s->first_slice_line) { // predictor A is not out of bounds |
816 | if (s->mb_width == 1) { |
817 | px = A[0]; |
818 | py = A[1]; |
819 | } else { |
820 | px = mid_pred(A[0], B[0], C[0]); |
821 | py = mid_pred(A[1], B[1], C[1]); |
822 | } |
823 | } else if (s->mb_x) { // predictor C is not out of bounds |
824 | px = C[0]; |
825 | py = C[1]; |
826 | } else { |
827 | px = py = 0; |
828 | } |
829 | /* Pullback MV as specified in 8.3.5.3.4 */ |
830 | { |
831 | int qx, qy, X, Y; |
832 | int sh = v->profile < PROFILE_ADVANCED ? 5 : 6; |
833 | int MV = 4 - (1 << sh); |
834 | qx = (s->mb_x << sh); |
835 | qy = (s->mb_y << sh); |
836 | X = (s->mb_width << sh) - 4; |
837 | Y = (s->mb_height << sh) - 4; |
838 | if (qx + px < MV) px = MV - qx; |
839 | if (qy + py < MV) py = MV - qy; |
840 | if (qx + px > X) px = X - qx; |
841 | if (qy + py > Y) py = Y - qy; |
842 | } |
843 | /* Calculate hybrid prediction as specified in 8.3.5.3.5 */ |
844 | if (0 && !s->first_slice_line && s->mb_x) { |
845 | if (is_intra[xy - wrap]) |
846 | sum = FFABS(px) + FFABS(py); |
847 | else |
848 | sum = FFABS(px - A[0]) + FFABS(py - A[1]); |
849 | if (sum > 32) { |
850 | if (get_bits1(&s->gb)) { |
851 | px = A[0]; |
852 | py = A[1]; |
853 | } else { |
854 | px = C[0]; |
855 | py = C[1]; |
856 | } |
857 | } else { |
858 | if (is_intra[xy - 2]) |
859 | sum = FFABS(px) + FFABS(py); |
860 | else |
861 | sum = FFABS(px - C[0]) + FFABS(py - C[1]); |
862 | if (sum > 32) { |
863 | if (get_bits1(&s->gb)) { |
864 | px = A[0]; |
865 | py = A[1]; |
866 | } else { |
867 | px = C[0]; |
868 | py = C[1]; |
869 | } |
870 | } |
871 | } |
872 | } |
873 | /* store MV using signed modulus of MV range defined in 4.11 */ |
874 | |
875 | s->mv[1][0][0] = ((px + dmv_x[1] + r_x) & ((r_x << 1) - 1)) - r_x; |
876 | s->mv[1][0][1] = ((py + dmv_y[1] + r_y) & ((r_y << 1) - 1)) - r_y; |
877 | } |
878 | s->current_picture.motion_val[0][xy][0] = s->mv[0][0][0]; |
879 | s->current_picture.motion_val[0][xy][1] = s->mv[0][0][1]; |
880 | s->current_picture.motion_val[1][xy][0] = s->mv[1][0][0]; |
881 | s->current_picture.motion_val[1][xy][1] = s->mv[1][0][1]; |
882 | } |
883 | |
884 | void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y, |
885 | int mv1, int *pred_flag) |
886 | { |
887 | int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0; |
888 | MpegEncContext *s = &v->s; |
889 | int mb_pos = s->mb_x + s->mb_y * s->mb_stride; |
890 | |
891 | if (v->bmvtype == BMV_TYPE_DIRECT) { |
892 | int total_opp, k, f; |
893 | if (s->next_picture.mb_type[mb_pos + v->mb_off] != MB_TYPE_INTRA) { |
894 | s->mv[0][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0], |
895 | v->bfraction, 0, s->quarter_sample); |
896 | s->mv[0][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1], |
897 | v->bfraction, 0, s->quarter_sample); |
898 | s->mv[1][0][0] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][0], |
899 | v->bfraction, 1, s->quarter_sample); |
900 | s->mv[1][0][1] = scale_mv(s->next_picture.motion_val[1][s->block_index[0] + v->blocks_off][1], |
901 | v->bfraction, 1, s->quarter_sample); |
902 | |
903 | total_opp = v->mv_f_next[0][s->block_index[0] + v->blocks_off] |
904 | + v->mv_f_next[0][s->block_index[1] + v->blocks_off] |
905 | + v->mv_f_next[0][s->block_index[2] + v->blocks_off] |
906 | + v->mv_f_next[0][s->block_index[3] + v->blocks_off]; |
907 | f = (total_opp > 2) ? 1 : 0; |
908 | } else { |
909 | s->mv[0][0][0] = s->mv[0][0][1] = 0; |
910 | s->mv[1][0][0] = s->mv[1][0][1] = 0; |
911 | f = 0; |
912 | } |
913 | v->ref_field_type[0] = v->ref_field_type[1] = v->cur_field_type ^ f; |
914 | for (k = 0; k < 4; k++) { |
915 | s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][0] = s->mv[0][0][0]; |
916 | s->current_picture.motion_val[0][s->block_index[k] + v->blocks_off][1] = s->mv[0][0][1]; |
917 | s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][0] = s->mv[1][0][0]; |
918 | s->current_picture.motion_val[1][s->block_index[k] + v->blocks_off][1] = s->mv[1][0][1]; |
919 | v->mv_f[0][s->block_index[k] + v->blocks_off] = f; |
920 | v->mv_f[1][s->block_index[k] + v->blocks_off] = f; |
921 | } |
922 | return; |
923 | } |
924 | if (v->bmvtype == BMV_TYPE_INTERPOLATED) { |
925 | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0); |
926 | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1); |
927 | return; |
928 | } |
929 | if (dir) { // backward |
930 | ff_vc1_pred_mv(v, n, dmv_x[1], dmv_y[1], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[1], 1); |
931 | if (n == 3 || mv1) { |
932 | ff_vc1_pred_mv(v, 0, dmv_x[0], dmv_y[0], 1, v->range_x, v->range_y, v->mb_type[0], 0, 0); |
933 | } |
934 | } else { // forward |
935 | ff_vc1_pred_mv(v, n, dmv_x[0], dmv_y[0], mv1, v->range_x, v->range_y, v->mb_type[0], pred_flag[0], 0); |
936 | if (n == 3 || mv1) { |
937 | ff_vc1_pred_mv(v, 0, dmv_x[1], dmv_y[1], 1, v->range_x, v->range_y, v->mb_type[0], 0, 1); |
938 | } |
939 | } |
940 | } |
941 |