blob: c913504a657f228be18600605fbdc7a3e767334b
1 | /* |
2 | * Copyright (c) 2000,2001 Fabrice Bellard |
3 | * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> |
4 | * |
5 | * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at> |
6 | * |
7 | * This file is part of FFmpeg. |
8 | * |
9 | * FFmpeg is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public |
11 | * License as published by the Free Software Foundation; either |
12 | * version 2.1 of the License, or (at your option) any later version. |
13 | * |
14 | * FFmpeg is distributed in the hope that it will be useful, |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
17 | * Lesser General Public License for more details. |
18 | * |
19 | * You should have received a copy of the GNU Lesser General Public |
20 | * License along with FFmpeg; if not, write to the Free Software |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
22 | */ |
23 | |
24 | #include <string.h> |
25 | |
26 | #include "libavutil/avassert.h" |
27 | #include "libavutil/internal.h" |
28 | #include "avcodec.h" |
29 | #include "h261.h" |
30 | #include "mpegutils.h" |
31 | #include "mpegvideo.h" |
32 | #include "mjpegenc.h" |
33 | #include "msmpeg4.h" |
34 | #include "qpeldsp.h" |
35 | #include "wmv2.h" |
36 | #include <limits.h> |
37 | |
38 | static void gmc1_motion(MpegEncContext *s, |
39 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, |
40 | uint8_t **ref_picture) |
41 | { |
42 | uint8_t *ptr; |
43 | int src_x, src_y, motion_x, motion_y; |
44 | ptrdiff_t offset, linesize, uvlinesize; |
45 | int emu = 0; |
46 | |
47 | motion_x = s->sprite_offset[0][0]; |
48 | motion_y = s->sprite_offset[0][1]; |
49 | src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy + 1)); |
50 | src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy + 1)); |
51 | motion_x *= 1 << (3 - s->sprite_warping_accuracy); |
52 | motion_y *= 1 << (3 - s->sprite_warping_accuracy); |
53 | src_x = av_clip(src_x, -16, s->width); |
54 | if (src_x == s->width) |
55 | motion_x = 0; |
56 | src_y = av_clip(src_y, -16, s->height); |
57 | if (src_y == s->height) |
58 | motion_y = 0; |
59 | |
60 | linesize = s->linesize; |
61 | uvlinesize = s->uvlinesize; |
62 | |
63 | ptr = ref_picture[0] + src_y * linesize + src_x; |
64 | |
65 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - 17, 0) || |
66 | (unsigned)src_y >= FFMAX(s->v_edge_pos - 17, 0)) { |
67 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
68 | linesize, linesize, |
69 | 17, 17, |
70 | src_x, src_y, |
71 | s->h_edge_pos, s->v_edge_pos); |
72 | ptr = s->sc.edge_emu_buffer; |
73 | } |
74 | |
75 | if ((motion_x | motion_y) & 7) { |
76 | s->mdsp.gmc1(dest_y, ptr, linesize, 16, |
77 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); |
78 | s->mdsp.gmc1(dest_y + 8, ptr + 8, linesize, 16, |
79 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); |
80 | } else { |
81 | int dxy; |
82 | |
83 | dxy = ((motion_x >> 3) & 1) | ((motion_y >> 2) & 2); |
84 | if (s->no_rounding) { |
85 | s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); |
86 | } else { |
87 | s->hdsp.put_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); |
88 | } |
89 | } |
90 | |
91 | if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) |
92 | return; |
93 | |
94 | motion_x = s->sprite_offset[1][0]; |
95 | motion_y = s->sprite_offset[1][1]; |
96 | src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy + 1)); |
97 | src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy + 1)); |
98 | motion_x *= 1 << (3 - s->sprite_warping_accuracy); |
99 | motion_y *= 1 << (3 - s->sprite_warping_accuracy); |
100 | src_x = av_clip(src_x, -8, s->width >> 1); |
101 | if (src_x == s->width >> 1) |
102 | motion_x = 0; |
103 | src_y = av_clip(src_y, -8, s->height >> 1); |
104 | if (src_y == s->height >> 1) |
105 | motion_y = 0; |
106 | |
107 | offset = (src_y * uvlinesize) + src_x; |
108 | ptr = ref_picture[1] + offset; |
109 | if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - 9, 0) || |
110 | (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - 9, 0)) { |
111 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
112 | uvlinesize, uvlinesize, |
113 | 9, 9, |
114 | src_x, src_y, |
115 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
116 | ptr = s->sc.edge_emu_buffer; |
117 | emu = 1; |
118 | } |
119 | s->mdsp.gmc1(dest_cb, ptr, uvlinesize, 8, |
120 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); |
121 | |
122 | ptr = ref_picture[2] + offset; |
123 | if (emu) { |
124 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
125 | uvlinesize, uvlinesize, |
126 | 9, 9, |
127 | src_x, src_y, |
128 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
129 | ptr = s->sc.edge_emu_buffer; |
130 | } |
131 | s->mdsp.gmc1(dest_cr, ptr, uvlinesize, 8, |
132 | motion_x & 15, motion_y & 15, 128 - s->no_rounding); |
133 | } |
134 | |
135 | static void gmc_motion(MpegEncContext *s, |
136 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, |
137 | uint8_t **ref_picture) |
138 | { |
139 | uint8_t *ptr; |
140 | int linesize, uvlinesize; |
141 | const int a = s->sprite_warping_accuracy; |
142 | int ox, oy; |
143 | |
144 | linesize = s->linesize; |
145 | uvlinesize = s->uvlinesize; |
146 | |
147 | ptr = ref_picture[0]; |
148 | |
149 | ox = s->sprite_offset[0][0] + s->sprite_delta[0][0] * s->mb_x * 16 + |
150 | s->sprite_delta[0][1] * s->mb_y * 16; |
151 | oy = s->sprite_offset[0][1] + s->sprite_delta[1][0] * s->mb_x * 16 + |
152 | s->sprite_delta[1][1] * s->mb_y * 16; |
153 | |
154 | s->mdsp.gmc(dest_y, ptr, linesize, 16, |
155 | ox, oy, |
156 | s->sprite_delta[0][0], s->sprite_delta[0][1], |
157 | s->sprite_delta[1][0], s->sprite_delta[1][1], |
158 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, |
159 | s->h_edge_pos, s->v_edge_pos); |
160 | s->mdsp.gmc(dest_y + 8, ptr, linesize, 16, |
161 | ox + s->sprite_delta[0][0] * 8, |
162 | oy + s->sprite_delta[1][0] * 8, |
163 | s->sprite_delta[0][0], s->sprite_delta[0][1], |
164 | s->sprite_delta[1][0], s->sprite_delta[1][1], |
165 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, |
166 | s->h_edge_pos, s->v_edge_pos); |
167 | |
168 | if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY) |
169 | return; |
170 | |
171 | ox = s->sprite_offset[1][0] + s->sprite_delta[0][0] * s->mb_x * 8 + |
172 | s->sprite_delta[0][1] * s->mb_y * 8; |
173 | oy = s->sprite_offset[1][1] + s->sprite_delta[1][0] * s->mb_x * 8 + |
174 | s->sprite_delta[1][1] * s->mb_y * 8; |
175 | |
176 | ptr = ref_picture[1]; |
177 | s->mdsp.gmc(dest_cb, ptr, uvlinesize, 8, |
178 | ox, oy, |
179 | s->sprite_delta[0][0], s->sprite_delta[0][1], |
180 | s->sprite_delta[1][0], s->sprite_delta[1][1], |
181 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, |
182 | (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1); |
183 | |
184 | ptr = ref_picture[2]; |
185 | s->mdsp.gmc(dest_cr, ptr, uvlinesize, 8, |
186 | ox, oy, |
187 | s->sprite_delta[0][0], s->sprite_delta[0][1], |
188 | s->sprite_delta[1][0], s->sprite_delta[1][1], |
189 | a + 1, (1 << (2 * a + 1)) - s->no_rounding, |
190 | (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1); |
191 | } |
192 | |
193 | static inline int hpel_motion(MpegEncContext *s, |
194 | uint8_t *dest, uint8_t *src, |
195 | int src_x, int src_y, |
196 | op_pixels_func *pix_op, |
197 | int motion_x, int motion_y) |
198 | { |
199 | int dxy = 0; |
200 | int emu = 0; |
201 | |
202 | src_x += motion_x >> 1; |
203 | src_y += motion_y >> 1; |
204 | |
205 | /* WARNING: do no forget half pels */ |
206 | src_x = av_clip(src_x, -16, s->width); // FIXME unneeded for emu? |
207 | if (src_x != s->width) |
208 | dxy |= motion_x & 1; |
209 | src_y = av_clip(src_y, -16, s->height); |
210 | if (src_y != s->height) |
211 | dxy |= (motion_y & 1) << 1; |
212 | src += src_y * s->linesize + src_x; |
213 | |
214 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 7, 0) || |
215 | (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 1) - 7, 0)) { |
216 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src, |
217 | s->linesize, s->linesize, |
218 | 9, 9, |
219 | src_x, src_y, |
220 | s->h_edge_pos, s->v_edge_pos); |
221 | src = s->sc.edge_emu_buffer; |
222 | emu = 1; |
223 | } |
224 | pix_op[dxy](dest, src, s->linesize, 8); |
225 | return emu; |
226 | } |
227 | |
228 | static av_always_inline |
229 | void mpeg_motion_internal(MpegEncContext *s, |
230 | uint8_t *dest_y, |
231 | uint8_t *dest_cb, |
232 | uint8_t *dest_cr, |
233 | int field_based, |
234 | int bottom_field, |
235 | int field_select, |
236 | uint8_t **ref_picture, |
237 | op_pixels_func (*pix_op)[4], |
238 | int motion_x, |
239 | int motion_y, |
240 | int h, |
241 | int is_mpeg12, |
242 | int mb_y) |
243 | { |
244 | uint8_t *ptr_y, *ptr_cb, *ptr_cr; |
245 | int dxy, uvdxy, mx, my, src_x, src_y, |
246 | uvsrc_x, uvsrc_y, v_edge_pos; |
247 | ptrdiff_t uvlinesize, linesize; |
248 | |
249 | v_edge_pos = s->v_edge_pos >> field_based; |
250 | linesize = s->current_picture.f->linesize[0] << field_based; |
251 | uvlinesize = s->current_picture.f->linesize[1] << field_based; |
252 | |
253 | dxy = ((motion_y & 1) << 1) | (motion_x & 1); |
254 | src_x = s->mb_x * 16 + (motion_x >> 1); |
255 | src_y = (mb_y << (4 - field_based)) + (motion_y >> 1); |
256 | |
257 | if (!is_mpeg12 && s->out_format == FMT_H263) { |
258 | if ((s->workaround_bugs & FF_BUG_HPEL_CHROMA) && field_based) { |
259 | mx = (motion_x >> 1) | (motion_x & 1); |
260 | my = motion_y >> 1; |
261 | uvdxy = ((my & 1) << 1) | (mx & 1); |
262 | uvsrc_x = s->mb_x * 8 + (mx >> 1); |
263 | uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1); |
264 | } else { |
265 | uvdxy = dxy | (motion_y & 2) | ((motion_x & 2) >> 1); |
266 | uvsrc_x = src_x >> 1; |
267 | uvsrc_y = src_y >> 1; |
268 | } |
269 | // Even chroma mv's are full pel in H261 |
270 | } else if (!is_mpeg12 && s->out_format == FMT_H261) { |
271 | mx = motion_x / 4; |
272 | my = motion_y / 4; |
273 | uvdxy = 0; |
274 | uvsrc_x = s->mb_x * 8 + mx; |
275 | uvsrc_y = mb_y * 8 + my; |
276 | } else { |
277 | if (s->chroma_y_shift) { |
278 | mx = motion_x / 2; |
279 | my = motion_y / 2; |
280 | uvdxy = ((my & 1) << 1) | (mx & 1); |
281 | uvsrc_x = s->mb_x * 8 + (mx >> 1); |
282 | uvsrc_y = (mb_y << (3 - field_based)) + (my >> 1); |
283 | } else { |
284 | if (s->chroma_x_shift) { |
285 | // Chroma422 |
286 | mx = motion_x / 2; |
287 | uvdxy = ((motion_y & 1) << 1) | (mx & 1); |
288 | uvsrc_x = s->mb_x * 8 + (mx >> 1); |
289 | uvsrc_y = src_y; |
290 | } else { |
291 | // Chroma444 |
292 | uvdxy = dxy; |
293 | uvsrc_x = src_x; |
294 | uvsrc_y = src_y; |
295 | } |
296 | } |
297 | } |
298 | |
299 | ptr_y = ref_picture[0] + src_y * linesize + src_x; |
300 | ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; |
301 | ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; |
302 | |
303 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 1) - 15 , 0) || |
304 | (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 1) - h + 1, 0)) { |
305 | if (is_mpeg12 || |
306 | s->codec_id == AV_CODEC_ID_MPEG2VIDEO || |
307 | s->codec_id == AV_CODEC_ID_MPEG1VIDEO) { |
308 | av_log(s->avctx, AV_LOG_DEBUG, |
309 | "MPEG motion vector out of boundary (%d %d)\n", src_x, |
310 | src_y); |
311 | return; |
312 | } |
313 | src_y = (unsigned)src_y << field_based; |
314 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, |
315 | s->linesize, s->linesize, |
316 | 17, 17 + field_based, |
317 | src_x, src_y, |
318 | s->h_edge_pos, s->v_edge_pos); |
319 | ptr_y = s->sc.edge_emu_buffer; |
320 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { |
321 | uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; |
322 | uint8_t *vbuf = ubuf + 10 * s->uvlinesize; |
323 | if (s->workaround_bugs & FF_BUG_IEDGE) |
324 | vbuf -= s->uvlinesize; |
325 | uvsrc_y = (unsigned)uvsrc_y << field_based; |
326 | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, |
327 | s->uvlinesize, s->uvlinesize, |
328 | 9, 9 + field_based, |
329 | uvsrc_x, uvsrc_y, |
330 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
331 | s->vdsp.emulated_edge_mc(vbuf, ptr_cr, |
332 | s->uvlinesize, s->uvlinesize, |
333 | 9, 9 + field_based, |
334 | uvsrc_x, uvsrc_y, |
335 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
336 | ptr_cb = ubuf; |
337 | ptr_cr = vbuf; |
338 | } |
339 | } |
340 | |
341 | /* FIXME use this for field pix too instead of the obnoxious hack which |
342 | * changes picture.data */ |
343 | if (bottom_field) { |
344 | dest_y += s->linesize; |
345 | dest_cb += s->uvlinesize; |
346 | dest_cr += s->uvlinesize; |
347 | } |
348 | |
349 | if (field_select) { |
350 | ptr_y += s->linesize; |
351 | ptr_cb += s->uvlinesize; |
352 | ptr_cr += s->uvlinesize; |
353 | } |
354 | |
355 | pix_op[0][dxy](dest_y, ptr_y, linesize, h); |
356 | |
357 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { |
358 | pix_op[s->chroma_x_shift][uvdxy] |
359 | (dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift); |
360 | pix_op[s->chroma_x_shift][uvdxy] |
361 | (dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift); |
362 | } |
363 | if (!is_mpeg12 && (CONFIG_H261_ENCODER || CONFIG_H261_DECODER) && |
364 | s->out_format == FMT_H261) { |
365 | ff_h261_loop_filter(s); |
366 | } |
367 | } |
368 | /* apply one mpeg motion vector to the three components */ |
369 | static void mpeg_motion(MpegEncContext *s, |
370 | uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, |
371 | int field_select, uint8_t **ref_picture, |
372 | op_pixels_func (*pix_op)[4], |
373 | int motion_x, int motion_y, int h, int mb_y) |
374 | { |
375 | #if !CONFIG_SMALL |
376 | if (s->out_format == FMT_MPEG1) |
377 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0, |
378 | field_select, ref_picture, pix_op, |
379 | motion_x, motion_y, h, 1, mb_y); |
380 | else |
381 | #endif |
382 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 0, 0, |
383 | field_select, ref_picture, pix_op, |
384 | motion_x, motion_y, h, 0, mb_y); |
385 | } |
386 | |
387 | static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y, |
388 | uint8_t *dest_cb, uint8_t *dest_cr, |
389 | int bottom_field, int field_select, |
390 | uint8_t **ref_picture, |
391 | op_pixels_func (*pix_op)[4], |
392 | int motion_x, int motion_y, int h, int mb_y) |
393 | { |
394 | #if !CONFIG_SMALL |
395 | if (s->out_format == FMT_MPEG1) |
396 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1, |
397 | bottom_field, field_select, ref_picture, pix_op, |
398 | motion_x, motion_y, h, 1, mb_y); |
399 | else |
400 | #endif |
401 | mpeg_motion_internal(s, dest_y, dest_cb, dest_cr, 1, |
402 | bottom_field, field_select, ref_picture, pix_op, |
403 | motion_x, motion_y, h, 0, mb_y); |
404 | } |
405 | |
406 | // FIXME: SIMDify, avg variant, 16x16 version |
407 | static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride) |
408 | { |
409 | int x; |
410 | uint8_t *const top = src[1]; |
411 | uint8_t *const left = src[2]; |
412 | uint8_t *const mid = src[0]; |
413 | uint8_t *const right = src[3]; |
414 | uint8_t *const bottom = src[4]; |
415 | #define OBMC_FILTER(x, t, l, m, r, b)\ |
416 | dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3 |
417 | #define OBMC_FILTER4(x, t, l, m, r, b)\ |
418 | OBMC_FILTER(x , t, l, m, r, b);\ |
419 | OBMC_FILTER(x+1 , t, l, m, r, b);\ |
420 | OBMC_FILTER(x +stride, t, l, m, r, b);\ |
421 | OBMC_FILTER(x+1+stride, t, l, m, r, b); |
422 | |
423 | x = 0; |
424 | OBMC_FILTER (x , 2, 2, 4, 0, 0); |
425 | OBMC_FILTER (x + 1, 2, 1, 5, 0, 0); |
426 | OBMC_FILTER4(x + 2, 2, 1, 5, 0, 0); |
427 | OBMC_FILTER4(x + 4, 2, 0, 5, 1, 0); |
428 | OBMC_FILTER (x + 6, 2, 0, 5, 1, 0); |
429 | OBMC_FILTER (x + 7, 2, 0, 4, 2, 0); |
430 | x += stride; |
431 | OBMC_FILTER (x , 1, 2, 5, 0, 0); |
432 | OBMC_FILTER (x + 1, 1, 2, 5, 0, 0); |
433 | OBMC_FILTER (x + 6, 1, 0, 5, 2, 0); |
434 | OBMC_FILTER (x + 7, 1, 0, 5, 2, 0); |
435 | x += stride; |
436 | OBMC_FILTER4(x , 1, 2, 5, 0, 0); |
437 | OBMC_FILTER4(x + 2, 1, 1, 6, 0, 0); |
438 | OBMC_FILTER4(x + 4, 1, 0, 6, 1, 0); |
439 | OBMC_FILTER4(x + 6, 1, 0, 5, 2, 0); |
440 | x += 2 * stride; |
441 | OBMC_FILTER4(x , 0, 2, 5, 0, 1); |
442 | OBMC_FILTER4(x + 2, 0, 1, 6, 0, 1); |
443 | OBMC_FILTER4(x + 4, 0, 0, 6, 1, 1); |
444 | OBMC_FILTER4(x + 6, 0, 0, 5, 2, 1); |
445 | x += 2*stride; |
446 | OBMC_FILTER (x , 0, 2, 5, 0, 1); |
447 | OBMC_FILTER (x + 1, 0, 2, 5, 0, 1); |
448 | OBMC_FILTER4(x + 2, 0, 1, 5, 0, 2); |
449 | OBMC_FILTER4(x + 4, 0, 0, 5, 1, 2); |
450 | OBMC_FILTER (x + 6, 0, 0, 5, 2, 1); |
451 | OBMC_FILTER (x + 7, 0, 0, 5, 2, 1); |
452 | x += stride; |
453 | OBMC_FILTER (x , 0, 2, 4, 0, 2); |
454 | OBMC_FILTER (x + 1, 0, 1, 5, 0, 2); |
455 | OBMC_FILTER (x + 6, 0, 0, 5, 1, 2); |
456 | OBMC_FILTER (x + 7, 0, 0, 4, 2, 2); |
457 | } |
458 | |
459 | /* obmc for 1 8x8 luma block */ |
460 | static inline void obmc_motion(MpegEncContext *s, |
461 | uint8_t *dest, uint8_t *src, |
462 | int src_x, int src_y, |
463 | op_pixels_func *pix_op, |
464 | int16_t mv[5][2] /* mid top left right bottom */) |
465 | #define MID 0 |
466 | { |
467 | int i; |
468 | uint8_t *ptr[5]; |
469 | |
470 | av_assert2(s->quarter_sample == 0); |
471 | |
472 | for (i = 0; i < 5; i++) { |
473 | if (i && mv[i][0] == mv[MID][0] && mv[i][1] == mv[MID][1]) { |
474 | ptr[i] = ptr[MID]; |
475 | } else { |
476 | ptr[i] = s->sc.obmc_scratchpad + 8 * (i & 1) + |
477 | s->linesize * 8 * (i >> 1); |
478 | hpel_motion(s, ptr[i], src, src_x, src_y, pix_op, |
479 | mv[i][0], mv[i][1]); |
480 | } |
481 | } |
482 | |
483 | put_obmc(dest, ptr, s->linesize); |
484 | } |
485 | |
486 | static inline void qpel_motion(MpegEncContext *s, |
487 | uint8_t *dest_y, |
488 | uint8_t *dest_cb, |
489 | uint8_t *dest_cr, |
490 | int field_based, int bottom_field, |
491 | int field_select, uint8_t **ref_picture, |
492 | op_pixels_func (*pix_op)[4], |
493 | qpel_mc_func (*qpix_op)[16], |
494 | int motion_x, int motion_y, int h) |
495 | { |
496 | uint8_t *ptr_y, *ptr_cb, *ptr_cr; |
497 | int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos; |
498 | ptrdiff_t linesize, uvlinesize; |
499 | |
500 | dxy = ((motion_y & 3) << 2) | (motion_x & 3); |
501 | |
502 | src_x = s->mb_x * 16 + (motion_x >> 2); |
503 | src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2); |
504 | |
505 | v_edge_pos = s->v_edge_pos >> field_based; |
506 | linesize = s->linesize << field_based; |
507 | uvlinesize = s->uvlinesize << field_based; |
508 | |
509 | if (field_based) { |
510 | mx = motion_x / 2; |
511 | my = motion_y >> 1; |
512 | } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA2) { |
513 | static const int rtab[8] = { 0, 0, 1, 1, 0, 0, 0, 1 }; |
514 | mx = (motion_x >> 1) + rtab[motion_x & 7]; |
515 | my = (motion_y >> 1) + rtab[motion_y & 7]; |
516 | } else if (s->workaround_bugs & FF_BUG_QPEL_CHROMA) { |
517 | mx = (motion_x >> 1) | (motion_x & 1); |
518 | my = (motion_y >> 1) | (motion_y & 1); |
519 | } else { |
520 | mx = motion_x / 2; |
521 | my = motion_y / 2; |
522 | } |
523 | mx = (mx >> 1) | (mx & 1); |
524 | my = (my >> 1) | (my & 1); |
525 | |
526 | uvdxy = (mx & 1) | ((my & 1) << 1); |
527 | mx >>= 1; |
528 | my >>= 1; |
529 | |
530 | uvsrc_x = s->mb_x * 8 + mx; |
531 | uvsrc_y = s->mb_y * (8 >> field_based) + my; |
532 | |
533 | ptr_y = ref_picture[0] + src_y * linesize + src_x; |
534 | ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; |
535 | ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; |
536 | |
537 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 15 , 0) || |
538 | (unsigned)src_y >= FFMAX( v_edge_pos - (motion_y & 3) - h + 1, 0)) { |
539 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y, |
540 | s->linesize, s->linesize, |
541 | 17, 17 + field_based, |
542 | src_x, src_y * (1 << field_based), |
543 | s->h_edge_pos, s->v_edge_pos); |
544 | ptr_y = s->sc.edge_emu_buffer; |
545 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { |
546 | uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize; |
547 | uint8_t *vbuf = ubuf + 10 * s->uvlinesize; |
548 | if (s->workaround_bugs & FF_BUG_IEDGE) |
549 | vbuf -= s->uvlinesize; |
550 | s->vdsp.emulated_edge_mc(ubuf, ptr_cb, |
551 | s->uvlinesize, s->uvlinesize, |
552 | 9, 9 + field_based, |
553 | uvsrc_x, uvsrc_y * (1 << field_based), |
554 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
555 | s->vdsp.emulated_edge_mc(vbuf, ptr_cr, |
556 | s->uvlinesize, s->uvlinesize, |
557 | 9, 9 + field_based, |
558 | uvsrc_x, uvsrc_y * (1 << field_based), |
559 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
560 | ptr_cb = ubuf; |
561 | ptr_cr = vbuf; |
562 | } |
563 | } |
564 | |
565 | if (!field_based) |
566 | qpix_op[0][dxy](dest_y, ptr_y, linesize); |
567 | else { |
568 | if (bottom_field) { |
569 | dest_y += s->linesize; |
570 | dest_cb += s->uvlinesize; |
571 | dest_cr += s->uvlinesize; |
572 | } |
573 | |
574 | if (field_select) { |
575 | ptr_y += s->linesize; |
576 | ptr_cb += s->uvlinesize; |
577 | ptr_cr += s->uvlinesize; |
578 | } |
579 | // damn interlaced mode |
580 | // FIXME boundary mirroring is not exactly correct here |
581 | qpix_op[1][dxy](dest_y, ptr_y, linesize); |
582 | qpix_op[1][dxy](dest_y + 8, ptr_y + 8, linesize); |
583 | } |
584 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) { |
585 | pix_op[1][uvdxy](dest_cr, ptr_cr, uvlinesize, h >> 1); |
586 | pix_op[1][uvdxy](dest_cb, ptr_cb, uvlinesize, h >> 1); |
587 | } |
588 | } |
589 | |
590 | /** |
591 | * H.263 chroma 4mv motion compensation. |
592 | */ |
593 | static void chroma_4mv_motion(MpegEncContext *s, |
594 | uint8_t *dest_cb, uint8_t *dest_cr, |
595 | uint8_t **ref_picture, |
596 | op_pixels_func *pix_op, |
597 | int mx, int my) |
598 | { |
599 | uint8_t *ptr; |
600 | int src_x, src_y, dxy, emu = 0; |
601 | ptrdiff_t offset; |
602 | |
603 | /* In case of 8X8, we construct a single chroma motion vector |
604 | * with a special rounding */ |
605 | mx = ff_h263_round_chroma(mx); |
606 | my = ff_h263_round_chroma(my); |
607 | |
608 | dxy = ((my & 1) << 1) | (mx & 1); |
609 | mx >>= 1; |
610 | my >>= 1; |
611 | |
612 | src_x = s->mb_x * 8 + mx; |
613 | src_y = s->mb_y * 8 + my; |
614 | src_x = av_clip(src_x, -8, (s->width >> 1)); |
615 | if (src_x == (s->width >> 1)) |
616 | dxy &= ~1; |
617 | src_y = av_clip(src_y, -8, (s->height >> 1)); |
618 | if (src_y == (s->height >> 1)) |
619 | dxy &= ~2; |
620 | |
621 | offset = src_y * s->uvlinesize + src_x; |
622 | ptr = ref_picture[1] + offset; |
623 | if ((unsigned)src_x >= FFMAX((s->h_edge_pos >> 1) - (dxy & 1) - 7, 0) || |
624 | (unsigned)src_y >= FFMAX((s->v_edge_pos >> 1) - (dxy >> 1) - 7, 0)) { |
625 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
626 | s->uvlinesize, s->uvlinesize, |
627 | 9, 9, src_x, src_y, |
628 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
629 | ptr = s->sc.edge_emu_buffer; |
630 | emu = 1; |
631 | } |
632 | pix_op[dxy](dest_cb, ptr, s->uvlinesize, 8); |
633 | |
634 | ptr = ref_picture[2] + offset; |
635 | if (emu) { |
636 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
637 | s->uvlinesize, s->uvlinesize, |
638 | 9, 9, src_x, src_y, |
639 | s->h_edge_pos >> 1, s->v_edge_pos >> 1); |
640 | ptr = s->sc.edge_emu_buffer; |
641 | } |
642 | pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8); |
643 | } |
644 | |
645 | static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir) |
646 | { |
647 | /* fetch pixels for estimated mv 4 macroblocks ahead |
648 | * optimized for 64byte cache lines */ |
649 | const int shift = s->quarter_sample ? 2 : 1; |
650 | const int mx = (s->mv[dir][0][0] >> shift) + 16 * s->mb_x + 8; |
651 | const int my = (s->mv[dir][0][1] >> shift) + 16 * s->mb_y; |
652 | int off = mx + (my + (s->mb_x & 3) * 4) * s->linesize + 64; |
653 | |
654 | s->vdsp.prefetch(pix[0] + off, s->linesize, 4); |
655 | off = (mx >> 1) + ((my >> 1) + (s->mb_x & 7)) * s->uvlinesize + 64; |
656 | s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2); |
657 | } |
658 | |
659 | static inline void apply_obmc(MpegEncContext *s, |
660 | uint8_t *dest_y, |
661 | uint8_t *dest_cb, |
662 | uint8_t *dest_cr, |
663 | uint8_t **ref_picture, |
664 | op_pixels_func (*pix_op)[4]) |
665 | { |
666 | LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]); |
667 | Picture *cur_frame = &s->current_picture; |
668 | int mb_x = s->mb_x; |
669 | int mb_y = s->mb_y; |
670 | const int xy = mb_x + mb_y * s->mb_stride; |
671 | const int mot_stride = s->b8_stride; |
672 | const int mot_xy = mb_x * 2 + mb_y * 2 * mot_stride; |
673 | int mx, my, i; |
674 | |
675 | av_assert2(!s->mb_skipped); |
676 | |
677 | AV_COPY32(mv_cache[1][1], cur_frame->motion_val[0][mot_xy]); |
678 | AV_COPY32(mv_cache[1][2], cur_frame->motion_val[0][mot_xy + 1]); |
679 | |
680 | AV_COPY32(mv_cache[2][1], |
681 | cur_frame->motion_val[0][mot_xy + mot_stride]); |
682 | AV_COPY32(mv_cache[2][2], |
683 | cur_frame->motion_val[0][mot_xy + mot_stride + 1]); |
684 | |
685 | AV_COPY32(mv_cache[3][1], |
686 | cur_frame->motion_val[0][mot_xy + mot_stride]); |
687 | AV_COPY32(mv_cache[3][2], |
688 | cur_frame->motion_val[0][mot_xy + mot_stride + 1]); |
689 | |
690 | if (mb_y == 0 || IS_INTRA(cur_frame->mb_type[xy - s->mb_stride])) { |
691 | AV_COPY32(mv_cache[0][1], mv_cache[1][1]); |
692 | AV_COPY32(mv_cache[0][2], mv_cache[1][2]); |
693 | } else { |
694 | AV_COPY32(mv_cache[0][1], |
695 | cur_frame->motion_val[0][mot_xy - mot_stride]); |
696 | AV_COPY32(mv_cache[0][2], |
697 | cur_frame->motion_val[0][mot_xy - mot_stride + 1]); |
698 | } |
699 | |
700 | if (mb_x == 0 || IS_INTRA(cur_frame->mb_type[xy - 1])) { |
701 | AV_COPY32(mv_cache[1][0], mv_cache[1][1]); |
702 | AV_COPY32(mv_cache[2][0], mv_cache[2][1]); |
703 | } else { |
704 | AV_COPY32(mv_cache[1][0], cur_frame->motion_val[0][mot_xy - 1]); |
705 | AV_COPY32(mv_cache[2][0], |
706 | cur_frame->motion_val[0][mot_xy - 1 + mot_stride]); |
707 | } |
708 | |
709 | if (mb_x + 1 >= s->mb_width || IS_INTRA(cur_frame->mb_type[xy + 1])) { |
710 | AV_COPY32(mv_cache[1][3], mv_cache[1][2]); |
711 | AV_COPY32(mv_cache[2][3], mv_cache[2][2]); |
712 | } else { |
713 | AV_COPY32(mv_cache[1][3], cur_frame->motion_val[0][mot_xy + 2]); |
714 | AV_COPY32(mv_cache[2][3], |
715 | cur_frame->motion_val[0][mot_xy + 2 + mot_stride]); |
716 | } |
717 | |
718 | mx = 0; |
719 | my = 0; |
720 | for (i = 0; i < 4; i++) { |
721 | const int x = (i & 1) + 1; |
722 | const int y = (i >> 1) + 1; |
723 | int16_t mv[5][2] = { |
724 | { mv_cache[y][x][0], mv_cache[y][x][1] }, |
725 | { mv_cache[y - 1][x][0], mv_cache[y - 1][x][1] }, |
726 | { mv_cache[y][x - 1][0], mv_cache[y][x - 1][1] }, |
727 | { mv_cache[y][x + 1][0], mv_cache[y][x + 1][1] }, |
728 | { mv_cache[y + 1][x][0], mv_cache[y + 1][x][1] } |
729 | }; |
730 | // FIXME cleanup |
731 | obmc_motion(s, dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, |
732 | ref_picture[0], |
733 | mb_x * 16 + (i & 1) * 8, mb_y * 16 + (i >> 1) * 8, |
734 | pix_op[1], |
735 | mv); |
736 | |
737 | mx += mv[0][0]; |
738 | my += mv[0][1]; |
739 | } |
740 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) |
741 | chroma_4mv_motion(s, dest_cb, dest_cr, |
742 | ref_picture, pix_op[1], |
743 | mx, my); |
744 | } |
745 | |
746 | static inline void apply_8x8(MpegEncContext *s, |
747 | uint8_t *dest_y, |
748 | uint8_t *dest_cb, |
749 | uint8_t *dest_cr, |
750 | int dir, |
751 | uint8_t **ref_picture, |
752 | qpel_mc_func (*qpix_op)[16], |
753 | op_pixels_func (*pix_op)[4]) |
754 | { |
755 | int dxy, mx, my, src_x, src_y; |
756 | int i; |
757 | int mb_x = s->mb_x; |
758 | int mb_y = s->mb_y; |
759 | uint8_t *ptr, *dest; |
760 | |
761 | mx = 0; |
762 | my = 0; |
763 | if (s->quarter_sample) { |
764 | for (i = 0; i < 4; i++) { |
765 | int motion_x = s->mv[dir][i][0]; |
766 | int motion_y = s->mv[dir][i][1]; |
767 | |
768 | dxy = ((motion_y & 3) << 2) | (motion_x & 3); |
769 | src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8; |
770 | src_y = mb_y * 16 + (motion_y >> 2) + (i >> 1) * 8; |
771 | |
772 | /* WARNING: do no forget half pels */ |
773 | src_x = av_clip(src_x, -16, s->width); |
774 | if (src_x == s->width) |
775 | dxy &= ~3; |
776 | src_y = av_clip(src_y, -16, s->height); |
777 | if (src_y == s->height) |
778 | dxy &= ~12; |
779 | |
780 | ptr = ref_picture[0] + (src_y * s->linesize) + (src_x); |
781 | if ((unsigned)src_x >= FFMAX(s->h_edge_pos - (motion_x & 3) - 7, 0) || |
782 | (unsigned)src_y >= FFMAX(s->v_edge_pos - (motion_y & 3) - 7, 0)) { |
783 | s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr, |
784 | s->linesize, s->linesize, |
785 | 9, 9, |
786 | src_x, src_y, |
787 | s->h_edge_pos, |
788 | s->v_edge_pos); |
789 | ptr = s->sc.edge_emu_buffer; |
790 | } |
791 | dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize; |
792 | qpix_op[1][dxy](dest, ptr, s->linesize); |
793 | |
794 | mx += s->mv[dir][i][0] / 2; |
795 | my += s->mv[dir][i][1] / 2; |
796 | } |
797 | } else { |
798 | for (i = 0; i < 4; i++) { |
799 | hpel_motion(s, |
800 | dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize, |
801 | ref_picture[0], |
802 | mb_x * 16 + (i & 1) * 8, |
803 | mb_y * 16 + (i >> 1) * 8, |
804 | pix_op[1], |
805 | s->mv[dir][i][0], |
806 | s->mv[dir][i][1]); |
807 | |
808 | mx += s->mv[dir][i][0]; |
809 | my += s->mv[dir][i][1]; |
810 | } |
811 | } |
812 | |
813 | if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) |
814 | chroma_4mv_motion(s, dest_cb, dest_cr, |
815 | ref_picture, pix_op[1], mx, my); |
816 | } |
817 | |
818 | /** |
819 | * motion compensation of a single macroblock |
820 | * @param s context |
821 | * @param dest_y luma destination pointer |
822 | * @param dest_cb chroma cb/u destination pointer |
823 | * @param dest_cr chroma cr/v destination pointer |
824 | * @param dir direction (0->forward, 1->backward) |
825 | * @param ref_picture array[3] of pointers to the 3 planes of the reference picture |
826 | * @param pix_op halfpel motion compensation function (average or put normally) |
827 | * @param qpix_op qpel motion compensation function (average or put normally) |
828 | * the motion vectors are taken from s->mv and the MV type from s->mv_type |
829 | */ |
830 | static av_always_inline void mpv_motion_internal(MpegEncContext *s, |
831 | uint8_t *dest_y, |
832 | uint8_t *dest_cb, |
833 | uint8_t *dest_cr, |
834 | int dir, |
835 | uint8_t **ref_picture, |
836 | op_pixels_func (*pix_op)[4], |
837 | qpel_mc_func (*qpix_op)[16], |
838 | int is_mpeg12) |
839 | { |
840 | int i; |
841 | int mb_y = s->mb_y; |
842 | |
843 | prefetch_motion(s, ref_picture, dir); |
844 | |
845 | if (!is_mpeg12 && s->obmc && s->pict_type != AV_PICTURE_TYPE_B) { |
846 | apply_obmc(s, dest_y, dest_cb, dest_cr, ref_picture, pix_op); |
847 | return; |
848 | } |
849 | |
850 | switch (s->mv_type) { |
851 | case MV_TYPE_16X16: |
852 | if (s->mcsel) { |
853 | if (s->real_sprite_warping_points == 1) { |
854 | gmc1_motion(s, dest_y, dest_cb, dest_cr, |
855 | ref_picture); |
856 | } else { |
857 | gmc_motion(s, dest_y, dest_cb, dest_cr, |
858 | ref_picture); |
859 | } |
860 | } else if (!is_mpeg12 && s->quarter_sample) { |
861 | qpel_motion(s, dest_y, dest_cb, dest_cr, |
862 | 0, 0, 0, |
863 | ref_picture, pix_op, qpix_op, |
864 | s->mv[dir][0][0], s->mv[dir][0][1], 16); |
865 | } else if (!is_mpeg12 && (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) && |
866 | s->mspel && s->codec_id == AV_CODEC_ID_WMV2) { |
867 | ff_mspel_motion(s, dest_y, dest_cb, dest_cr, |
868 | ref_picture, pix_op, |
869 | s->mv[dir][0][0], s->mv[dir][0][1], 16); |
870 | } else { |
871 | mpeg_motion(s, dest_y, dest_cb, dest_cr, 0, |
872 | ref_picture, pix_op, |
873 | s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y); |
874 | } |
875 | break; |
876 | case MV_TYPE_8X8: |
877 | if (!is_mpeg12) |
878 | apply_8x8(s, dest_y, dest_cb, dest_cr, |
879 | dir, ref_picture, qpix_op, pix_op); |
880 | break; |
881 | case MV_TYPE_FIELD: |
882 | if (s->picture_structure == PICT_FRAME) { |
883 | if (!is_mpeg12 && s->quarter_sample) { |
884 | for (i = 0; i < 2; i++) |
885 | qpel_motion(s, dest_y, dest_cb, dest_cr, |
886 | 1, i, s->field_select[dir][i], |
887 | ref_picture, pix_op, qpix_op, |
888 | s->mv[dir][i][0], s->mv[dir][i][1], 8); |
889 | } else { |
890 | /* top field */ |
891 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, |
892 | 0, s->field_select[dir][0], |
893 | ref_picture, pix_op, |
894 | s->mv[dir][0][0], s->mv[dir][0][1], 8, mb_y); |
895 | /* bottom field */ |
896 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, |
897 | 1, s->field_select[dir][1], |
898 | ref_picture, pix_op, |
899 | s->mv[dir][1][0], s->mv[dir][1][1], 8, mb_y); |
900 | } |
901 | } else { |
902 | if ( s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field |
903 | || !ref_picture[0]) { |
904 | ref_picture = s->current_picture_ptr->f->data; |
905 | } |
906 | |
907 | mpeg_motion(s, dest_y, dest_cb, dest_cr, |
908 | s->field_select[dir][0], |
909 | ref_picture, pix_op, |
910 | s->mv[dir][0][0], s->mv[dir][0][1], 16, mb_y >> 1); |
911 | } |
912 | break; |
913 | case MV_TYPE_16X8: |
914 | for (i = 0; i < 2; i++) { |
915 | uint8_t **ref2picture; |
916 | |
917 | if ((s->picture_structure == s->field_select[dir][i] + 1 |
918 | || s->pict_type == AV_PICTURE_TYPE_B || s->first_field) && ref_picture[0]) { |
919 | ref2picture = ref_picture; |
920 | } else { |
921 | ref2picture = s->current_picture_ptr->f->data; |
922 | } |
923 | |
924 | mpeg_motion(s, dest_y, dest_cb, dest_cr, |
925 | s->field_select[dir][i], |
926 | ref2picture, pix_op, |
927 | s->mv[dir][i][0], s->mv[dir][i][1] + 16 * i, |
928 | 8, mb_y >> 1); |
929 | |
930 | dest_y += 16 * s->linesize; |
931 | dest_cb += (16 >> s->chroma_y_shift) * s->uvlinesize; |
932 | dest_cr += (16 >> s->chroma_y_shift) * s->uvlinesize; |
933 | } |
934 | break; |
935 | case MV_TYPE_DMV: |
936 | if (s->picture_structure == PICT_FRAME) { |
937 | for (i = 0; i < 2; i++) { |
938 | int j; |
939 | for (j = 0; j < 2; j++) |
940 | mpeg_motion_field(s, dest_y, dest_cb, dest_cr, |
941 | j, j ^ i, ref_picture, pix_op, |
942 | s->mv[dir][2 * i + j][0], |
943 | s->mv[dir][2 * i + j][1], 8, mb_y); |
944 | pix_op = s->hdsp.avg_pixels_tab; |
945 | } |
946 | } else { |
947 | if (!ref_picture[0]) { |
948 | ref_picture = s->current_picture_ptr->f->data; |
949 | } |
950 | for (i = 0; i < 2; i++) { |
951 | mpeg_motion(s, dest_y, dest_cb, dest_cr, |
952 | s->picture_structure != i + 1, |
953 | ref_picture, pix_op, |
954 | s->mv[dir][2 * i][0], s->mv[dir][2 * i][1], |
955 | 16, mb_y >> 1); |
956 | |
957 | // after put we make avg of the same block |
958 | pix_op = s->hdsp.avg_pixels_tab; |
959 | |
960 | /* opposite parity is always in the same frame if this is |
961 | * second field */ |
962 | if (!s->first_field) { |
963 | ref_picture = s->current_picture_ptr->f->data; |
964 | } |
965 | } |
966 | } |
967 | break; |
968 | default: av_assert2(0); |
969 | } |
970 | } |
971 | |
972 | void ff_mpv_motion(MpegEncContext *s, |
973 | uint8_t *dest_y, uint8_t *dest_cb, |
974 | uint8_t *dest_cr, int dir, |
975 | uint8_t **ref_picture, |
976 | op_pixels_func (*pix_op)[4], |
977 | qpel_mc_func (*qpix_op)[16]) |
978 | { |
979 | #if !CONFIG_SMALL |
980 | if (s->out_format == FMT_MPEG1) |
981 | mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, |
982 | ref_picture, pix_op, qpix_op, 1); |
983 | else |
984 | #endif |
985 | mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir, |
986 | ref_picture, pix_op, qpix_op, 0); |
987 | } |
988 |