summaryrefslogtreecommitdiff
path: root/libavcodec/error_resilience.c (plain)
blob: 5364940e9466c44fd204bd9b8dfc4175435d2d42
1/*
2 * Error resilience / concealment
3 *
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23/**
24 * @file
25 * Error resilience / concealment.
26 */
27
28#include <limits.h>
29
30#include "libavutil/atomic.h"
31#include "libavutil/internal.h"
32#include "avcodec.h"
33#include "error_resilience.h"
34#include "me_cmp.h"
35#include "mpegutils.h"
36#include "mpegvideo.h"
37#include "rectangle.h"
38#include "thread.h"
39#include "version.h"
40
41/**
42 * @param stride the number of MVs to get to the next row
43 * @param mv_step the number of MVs per row or column in a macroblock
44 */
45static void set_mv_strides(ERContext *s, ptrdiff_t *mv_step, ptrdiff_t *stride)
46{
47 if (s->avctx->codec_id == AV_CODEC_ID_H264) {
48 av_assert0(s->quarter_sample);
49 *mv_step = 4;
50 *stride = s->mb_width * 4;
51 } else {
52 *mv_step = 2;
53 *stride = s->b8_stride;
54 }
55}
56
57/**
58 * Replace the current MB with a flat dc-only version.
59 */
60static void put_dc(ERContext *s, uint8_t *dest_y, uint8_t *dest_cb,
61 uint8_t *dest_cr, int mb_x, int mb_y)
62{
63 int *linesize = s->cur_pic.f->linesize;
64 int dc, dcu, dcv, y, i;
65 for (i = 0; i < 4; i++) {
66 dc = s->dc_val[0][mb_x * 2 + (i & 1) + (mb_y * 2 + (i >> 1)) * s->b8_stride];
67 if (dc < 0)
68 dc = 0;
69 else if (dc > 2040)
70 dc = 2040;
71 for (y = 0; y < 8; y++) {
72 int x;
73 for (x = 0; x < 8; x++)
74 dest_y[x + (i & 1) * 8 + (y + (i >> 1) * 8) * linesize[0]] = dc / 8;
75 }
76 }
77 dcu = s->dc_val[1][mb_x + mb_y * s->mb_stride];
78 dcv = s->dc_val[2][mb_x + mb_y * s->mb_stride];
79 if (dcu < 0)
80 dcu = 0;
81 else if (dcu > 2040)
82 dcu = 2040;
83 if (dcv < 0)
84 dcv = 0;
85 else if (dcv > 2040)
86 dcv = 2040;
87
88 if (dest_cr)
89 for (y = 0; y < 8; y++) {
90 int x;
91 for (x = 0; x < 8; x++) {
92 dest_cb[x + y * linesize[1]] = dcu / 8;
93 dest_cr[x + y * linesize[2]] = dcv / 8;
94 }
95 }
96}
97
98static void filter181(int16_t *data, int width, int height, ptrdiff_t stride)
99{
100 int x, y;
101
102 /* horizontal filter */
103 for (y = 1; y < height - 1; y++) {
104 int prev_dc = data[0 + y * stride];
105
106 for (x = 1; x < width - 1; x++) {
107 int dc;
108 dc = -prev_dc +
109 data[x + y * stride] * 8 -
110 data[x + 1 + y * stride];
111 dc = (dc * 10923 + 32768) >> 16;
112 prev_dc = data[x + y * stride];
113 data[x + y * stride] = dc;
114 }
115 }
116
117 /* vertical filter */
118 for (x = 1; x < width - 1; x++) {
119 int prev_dc = data[x];
120
121 for (y = 1; y < height - 1; y++) {
122 int dc;
123
124 dc = -prev_dc +
125 data[x + y * stride] * 8 -
126 data[x + (y + 1) * stride];
127 dc = (dc * 10923 + 32768) >> 16;
128 prev_dc = data[x + y * stride];
129 data[x + y * stride] = dc;
130 }
131 }
132}
133
134/**
135 * guess the dc of blocks which do not have an undamaged dc
136 * @param w width in 8 pixel blocks
137 * @param h height in 8 pixel blocks
138 */
139static void guess_dc(ERContext *s, int16_t *dc, int w,
140 int h, ptrdiff_t stride, int is_luma)
141{
142 int b_x, b_y;
143 int16_t (*col )[4] = av_malloc_array(stride, h*sizeof( int16_t)*4);
144 uint32_t (*dist)[4] = av_malloc_array(stride, h*sizeof(uint32_t)*4);
145
146 if(!col || !dist) {
147 av_log(s->avctx, AV_LOG_ERROR, "guess_dc() is out of memory\n");
148 goto fail;
149 }
150
151 for(b_y=0; b_y<h; b_y++){
152 int color= 1024;
153 int distance= -1;
154 for(b_x=0; b_x<w; b_x++){
155 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
156 int error_j= s->error_status_table[mb_index_j];
157 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
158 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
159 color= dc[b_x + b_y*stride];
160 distance= b_x;
161 }
162 col [b_x + b_y*stride][1]= color;
163 dist[b_x + b_y*stride][1]= distance >= 0 ? b_x-distance : 9999;
164 }
165 color= 1024;
166 distance= -1;
167 for(b_x=w-1; b_x>=0; b_x--){
168 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
169 int error_j= s->error_status_table[mb_index_j];
170 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
171 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
172 color= dc[b_x + b_y*stride];
173 distance= b_x;
174 }
175 col [b_x + b_y*stride][0]= color;
176 dist[b_x + b_y*stride][0]= distance >= 0 ? distance-b_x : 9999;
177 }
178 }
179 for(b_x=0; b_x<w; b_x++){
180 int color= 1024;
181 int distance= -1;
182 for(b_y=0; b_y<h; b_y++){
183 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
184 int error_j= s->error_status_table[mb_index_j];
185 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
186 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
187 color= dc[b_x + b_y*stride];
188 distance= b_y;
189 }
190 col [b_x + b_y*stride][3]= color;
191 dist[b_x + b_y*stride][3]= distance >= 0 ? b_y-distance : 9999;
192 }
193 color= 1024;
194 distance= -1;
195 for(b_y=h-1; b_y>=0; b_y--){
196 int mb_index_j= (b_x>>is_luma) + (b_y>>is_luma)*s->mb_stride;
197 int error_j= s->error_status_table[mb_index_j];
198 int intra_j = IS_INTRA(s->cur_pic.mb_type[mb_index_j]);
199 if(intra_j==0 || !(error_j&ER_DC_ERROR)){
200 color= dc[b_x + b_y*stride];
201 distance= b_y;
202 }
203 col [b_x + b_y*stride][2]= color;
204 dist[b_x + b_y*stride][2]= distance >= 0 ? distance-b_y : 9999;
205 }
206 }
207
208 for (b_y = 0; b_y < h; b_y++) {
209 for (b_x = 0; b_x < w; b_x++) {
210 int mb_index, error, j;
211 int64_t guess, weight_sum;
212 mb_index = (b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride;
213 error = s->error_status_table[mb_index];
214
215 if (IS_INTER(s->cur_pic.mb_type[mb_index]))
216 continue; // inter
217 if (!(error & ER_DC_ERROR))
218 continue; // dc-ok
219
220 weight_sum = 0;
221 guess = 0;
222 for (j = 0; j < 4; j++) {
223 int64_t weight = 256 * 256 * 256 * 16 / FFMAX(dist[b_x + b_y*stride][j], 1);
224 guess += weight*(int64_t)col[b_x + b_y*stride][j];
225 weight_sum += weight;
226 }
227 guess = (guess + weight_sum / 2) / weight_sum;
228 dc[b_x + b_y * stride] = guess;
229 }
230 }
231
232fail:
233 av_freep(&col);
234 av_freep(&dist);
235}
236
237/**
238 * simple horizontal deblocking filter used for error resilience
239 * @param w width in 8 pixel blocks
240 * @param h height in 8 pixel blocks
241 */
242static void h_block_filter(ERContext *s, uint8_t *dst, int w,
243 int h, ptrdiff_t stride, int is_luma)
244{
245 int b_x, b_y;
246 ptrdiff_t mvx_stride, mvy_stride;
247 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
248 set_mv_strides(s, &mvx_stride, &mvy_stride);
249 mvx_stride >>= is_luma;
250 mvy_stride *= mvx_stride;
251
252 for (b_y = 0; b_y < h; b_y++) {
253 for (b_x = 0; b_x < w - 1; b_x++) {
254 int y;
255 int left_status = s->error_status_table[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
256 int right_status = s->error_status_table[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride];
257 int left_intra = IS_INTRA(s->cur_pic.mb_type[( b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
258 int right_intra = IS_INTRA(s->cur_pic.mb_type[((b_x + 1) >> is_luma) + (b_y >> is_luma) * s->mb_stride]);
259 int left_damage = left_status & ER_MB_ERROR;
260 int right_damage = right_status & ER_MB_ERROR;
261 int offset = b_x * 8 + b_y * stride * 8;
262 int16_t *left_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
263 int16_t *right_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * (b_x + 1)];
264 if (!(left_damage || right_damage))
265 continue; // both undamaged
266 if ((!left_intra) && (!right_intra) &&
267 FFABS(left_mv[0] - right_mv[0]) +
268 FFABS(left_mv[1] + right_mv[1]) < 2)
269 continue;
270
271 for (y = 0; y < 8; y++) {
272 int a, b, c, d;
273
274 a = dst[offset + 7 + y * stride] - dst[offset + 6 + y * stride];
275 b = dst[offset + 8 + y * stride] - dst[offset + 7 + y * stride];
276 c = dst[offset + 9 + y * stride] - dst[offset + 8 + y * stride];
277
278 d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
279 d = FFMAX(d, 0);
280 if (b < 0)
281 d = -d;
282
283 if (d == 0)
284 continue;
285
286 if (!(left_damage && right_damage))
287 d = d * 16 / 9;
288
289 if (left_damage) {
290 dst[offset + 7 + y * stride] = cm[dst[offset + 7 + y * stride] + ((d * 7) >> 4)];
291 dst[offset + 6 + y * stride] = cm[dst[offset + 6 + y * stride] + ((d * 5) >> 4)];
292 dst[offset + 5 + y * stride] = cm[dst[offset + 5 + y * stride] + ((d * 3) >> 4)];
293 dst[offset + 4 + y * stride] = cm[dst[offset + 4 + y * stride] + ((d * 1) >> 4)];
294 }
295 if (right_damage) {
296 dst[offset + 8 + y * stride] = cm[dst[offset + 8 + y * stride] - ((d * 7) >> 4)];
297 dst[offset + 9 + y * stride] = cm[dst[offset + 9 + y * stride] - ((d * 5) >> 4)];
298 dst[offset + 10+ y * stride] = cm[dst[offset + 10 + y * stride] - ((d * 3) >> 4)];
299 dst[offset + 11+ y * stride] = cm[dst[offset + 11 + y * stride] - ((d * 1) >> 4)];
300 }
301 }
302 }
303 }
304}
305
306/**
307 * simple vertical deblocking filter used for error resilience
308 * @param w width in 8 pixel blocks
309 * @param h height in 8 pixel blocks
310 */
311static void v_block_filter(ERContext *s, uint8_t *dst, int w, int h,
312 ptrdiff_t stride, int is_luma)
313{
314 int b_x, b_y;
315 ptrdiff_t mvx_stride, mvy_stride;
316 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;
317 set_mv_strides(s, &mvx_stride, &mvy_stride);
318 mvx_stride >>= is_luma;
319 mvy_stride *= mvx_stride;
320
321 for (b_y = 0; b_y < h - 1; b_y++) {
322 for (b_x = 0; b_x < w; b_x++) {
323 int x;
324 int top_status = s->error_status_table[(b_x >> is_luma) + (b_y >> is_luma) * s->mb_stride];
325 int bottom_status = s->error_status_table[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride];
326 int top_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ( b_y >> is_luma) * s->mb_stride]);
327 int bottom_intra = IS_INTRA(s->cur_pic.mb_type[(b_x >> is_luma) + ((b_y + 1) >> is_luma) * s->mb_stride]);
328 int top_damage = top_status & ER_MB_ERROR;
329 int bottom_damage = bottom_status & ER_MB_ERROR;
330 int offset = b_x * 8 + b_y * stride * 8;
331
332 int16_t *top_mv = s->cur_pic.motion_val[0][mvy_stride * b_y + mvx_stride * b_x];
333 int16_t *bottom_mv = s->cur_pic.motion_val[0][mvy_stride * (b_y + 1) + mvx_stride * b_x];
334
335 if (!(top_damage || bottom_damage))
336 continue; // both undamaged
337
338 if ((!top_intra) && (!bottom_intra) &&
339 FFABS(top_mv[0] - bottom_mv[0]) +
340 FFABS(top_mv[1] + bottom_mv[1]) < 2)
341 continue;
342
343 for (x = 0; x < 8; x++) {
344 int a, b, c, d;
345
346 a = dst[offset + x + 7 * stride] - dst[offset + x + 6 * stride];
347 b = dst[offset + x + 8 * stride] - dst[offset + x + 7 * stride];
348 c = dst[offset + x + 9 * stride] - dst[offset + x + 8 * stride];
349
350 d = FFABS(b) - ((FFABS(a) + FFABS(c) + 1) >> 1);
351 d = FFMAX(d, 0);
352 if (b < 0)
353 d = -d;
354
355 if (d == 0)
356 continue;
357
358 if (!(top_damage && bottom_damage))
359 d = d * 16 / 9;
360
361 if (top_damage) {
362 dst[offset + x + 7 * stride] = cm[dst[offset + x + 7 * stride] + ((d * 7) >> 4)];
363 dst[offset + x + 6 * stride] = cm[dst[offset + x + 6 * stride] + ((d * 5) >> 4)];
364 dst[offset + x + 5 * stride] = cm[dst[offset + x + 5 * stride] + ((d * 3) >> 4)];
365 dst[offset + x + 4 * stride] = cm[dst[offset + x + 4 * stride] + ((d * 1) >> 4)];
366 }
367 if (bottom_damage) {
368 dst[offset + x + 8 * stride] = cm[dst[offset + x + 8 * stride] - ((d * 7) >> 4)];
369 dst[offset + x + 9 * stride] = cm[dst[offset + x + 9 * stride] - ((d * 5) >> 4)];
370 dst[offset + x + 10 * stride] = cm[dst[offset + x + 10 * stride] - ((d * 3) >> 4)];
371 dst[offset + x + 11 * stride] = cm[dst[offset + x + 11 * stride] - ((d * 1) >> 4)];
372 }
373 }
374 }
375 }
376}
377
378#define MV_FROZEN 8
379#define MV_CHANGED 4
380#define MV_UNCHANGED 2
381#define MV_LISTED 1
382static av_always_inline void add_blocklist(int (*blocklist)[2], int *blocklist_length, uint8_t *fixed, int mb_x, int mb_y, int mb_xy)
383{
384 if (fixed[mb_xy])
385 return;
386 fixed[mb_xy] = MV_LISTED;
387 blocklist[ *blocklist_length ][0] = mb_x;
388 blocklist[(*blocklist_length)++][1] = mb_y;
389}
390
391static void guess_mv(ERContext *s)
392{
393 int (*blocklist)[2], (*next_blocklist)[2];
394 uint8_t *fixed;
395 const ptrdiff_t mb_stride = s->mb_stride;
396 const int mb_width = s->mb_width;
397 int mb_height = s->mb_height;
398 int i, depth, num_avail;
399 int mb_x, mb_y;
400 ptrdiff_t mot_step, mot_stride;
401 int blocklist_length, next_blocklist_length;
402
403 if (s->last_pic.f && s->last_pic.f->data[0])
404 mb_height = FFMIN(mb_height, (s->last_pic.f->height+15)>>4);
405 if (s->next_pic.f && s->next_pic.f->data[0])
406 mb_height = FFMIN(mb_height, (s->next_pic.f->height+15)>>4);
407
408 blocklist = (int (*)[2])s->er_temp_buffer;
409 next_blocklist = blocklist + s->mb_stride * s->mb_height;
410 fixed = (uint8_t *)(next_blocklist + s->mb_stride * s->mb_height);
411
412 set_mv_strides(s, &mot_step, &mot_stride);
413
414 num_avail = 0;
415 if (s->last_pic.motion_val[0])
416 ff_thread_await_progress(s->last_pic.tf, mb_height-1, 0);
417 for (i = 0; i < mb_width * mb_height; i++) {
418 const int mb_xy = s->mb_index2xy[i];
419 int f = 0;
420 int error = s->error_status_table[mb_xy];
421
422 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
423 f = MV_FROZEN; // intra // FIXME check
424 if (!(error & ER_MV_ERROR))
425 f = MV_FROZEN; // inter with undamaged MV
426
427 fixed[mb_xy] = f;
428 if (f == MV_FROZEN)
429 num_avail++;
430 else if(s->last_pic.f->data[0] && s->last_pic.motion_val[0]){
431 const int mb_y= mb_xy / s->mb_stride;
432 const int mb_x= mb_xy % s->mb_stride;
433 const int mot_index= (mb_x + mb_y*mot_stride) * mot_step;
434 s->cur_pic.motion_val[0][mot_index][0]= s->last_pic.motion_val[0][mot_index][0];
435 s->cur_pic.motion_val[0][mot_index][1]= s->last_pic.motion_val[0][mot_index][1];
436 s->cur_pic.ref_index[0][4*mb_xy] = s->last_pic.ref_index[0][4*mb_xy];
437 }
438 }
439
440 if ((!(s->avctx->error_concealment&FF_EC_GUESS_MVS)) ||
441 num_avail <= mb_width / 2) {
442 for (mb_y = 0; mb_y < mb_height; mb_y++) {
443 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
444 const int mb_xy = mb_x + mb_y * s->mb_stride;
445 int mv_dir = (s->last_pic.f && s->last_pic.f->data[0]) ? MV_DIR_FORWARD : MV_DIR_BACKWARD;
446
447 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
448 continue;
449 if (!(s->error_status_table[mb_xy] & ER_MV_ERROR))
450 continue;
451
452 s->mv[0][0][0] = 0;
453 s->mv[0][0][1] = 0;
454 s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
455 mb_x, mb_y, 0, 0);
456 }
457 }
458 return;
459 }
460
461 blocklist_length = 0;
462 for (mb_y = 0; mb_y < mb_height; mb_y++) {
463 for (mb_x = 0; mb_x < mb_width; mb_x++) {
464 const int mb_xy = mb_x + mb_y * mb_stride;
465 if (fixed[mb_xy] == MV_FROZEN) {
466 if (mb_x) add_blocklist(blocklist, &blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
467 if (mb_y) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
468 if (mb_x+1 < mb_width) add_blocklist(blocklist, &blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
469 if (mb_y+1 < mb_height) add_blocklist(blocklist, &blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
470 }
471 }
472 }
473
474 for (depth = 0; ; depth++) {
475 int changed, pass, none_left;
476 int blocklist_index;
477
478 none_left = 1;
479 changed = 1;
480 for (pass = 0; (changed || pass < 2) && pass < 10; pass++) {
481 int score_sum = 0;
482
483 changed = 0;
484 for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
485 const int mb_x = blocklist[blocklist_index][0];
486 const int mb_y = blocklist[blocklist_index][1];
487 const int mb_xy = mb_x + mb_y * mb_stride;
488 int mv_predictor[8][2];
489 int ref[8];
490 int pred_count;
491 int j;
492 int best_score;
493 int best_pred;
494 int mot_index;
495 int prev_x, prev_y, prev_ref;
496
497 if ((mb_x ^ mb_y ^ pass) & 1)
498 continue;
499 av_assert2(fixed[mb_xy] != MV_FROZEN);
500
501
502 av_assert1(!IS_INTRA(s->cur_pic.mb_type[mb_xy]));
503 av_assert1(s->last_pic.f && s->last_pic.f->data[0]);
504
505 j = 0;
506 if (mb_x > 0)
507 j |= fixed[mb_xy - 1];
508 if (mb_x + 1 < mb_width)
509 j |= fixed[mb_xy + 1];
510 if (mb_y > 0)
511 j |= fixed[mb_xy - mb_stride];
512 if (mb_y + 1 < mb_height)
513 j |= fixed[mb_xy + mb_stride];
514
515 av_assert2(j & MV_FROZEN);
516
517 if (!(j & MV_CHANGED) && pass > 1)
518 continue;
519
520 none_left = 0;
521 pred_count = 0;
522 mot_index = (mb_x + mb_y * mot_stride) * mot_step;
523
524 if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
525 mv_predictor[pred_count][0] =
526 s->cur_pic.motion_val[0][mot_index - mot_step][0];
527 mv_predictor[pred_count][1] =
528 s->cur_pic.motion_val[0][mot_index - mot_step][1];
529 ref[pred_count] =
530 s->cur_pic.ref_index[0][4 * (mb_xy - 1)];
531 pred_count++;
532 }
533 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
534 mv_predictor[pred_count][0] =
535 s->cur_pic.motion_val[0][mot_index + mot_step][0];
536 mv_predictor[pred_count][1] =
537 s->cur_pic.motion_val[0][mot_index + mot_step][1];
538 ref[pred_count] =
539 s->cur_pic.ref_index[0][4 * (mb_xy + 1)];
540 pred_count++;
541 }
542 if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
543 mv_predictor[pred_count][0] =
544 s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][0];
545 mv_predictor[pred_count][1] =
546 s->cur_pic.motion_val[0][mot_index - mot_stride * mot_step][1];
547 ref[pred_count] =
548 s->cur_pic.ref_index[0][4 * (mb_xy - s->mb_stride)];
549 pred_count++;
550 }
551 if (mb_y + 1<mb_height && fixed[mb_xy + mb_stride] > 1) {
552 mv_predictor[pred_count][0] =
553 s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][0];
554 mv_predictor[pred_count][1] =
555 s->cur_pic.motion_val[0][mot_index + mot_stride * mot_step][1];
556 ref[pred_count] =
557 s->cur_pic.ref_index[0][4 * (mb_xy + s->mb_stride)];
558 pred_count++;
559 }
560 if (pred_count == 0)
561 continue;
562
563 if (pred_count > 1) {
564 int sum_x = 0, sum_y = 0, sum_r = 0;
565 int max_x, max_y, min_x, min_y, max_r, min_r;
566
567 for (j = 0; j < pred_count; j++) {
568 sum_x += mv_predictor[j][0];
569 sum_y += mv_predictor[j][1];
570 sum_r += ref[j];
571 if (j && ref[j] != ref[j - 1])
572 goto skip_mean_and_median;
573 }
574
575 /* mean */
576 mv_predictor[pred_count][0] = sum_x / j;
577 mv_predictor[pred_count][1] = sum_y / j;
578 ref[pred_count] = sum_r / j;
579
580 /* median */
581 if (pred_count >= 3) {
582 min_y = min_x = min_r = 99999;
583 max_y = max_x = max_r = -99999;
584 } else {
585 min_x = min_y = max_x = max_y = min_r = max_r = 0;
586 }
587 for (j = 0; j < pred_count; j++) {
588 max_x = FFMAX(max_x, mv_predictor[j][0]);
589 max_y = FFMAX(max_y, mv_predictor[j][1]);
590 max_r = FFMAX(max_r, ref[j]);
591 min_x = FFMIN(min_x, mv_predictor[j][0]);
592 min_y = FFMIN(min_y, mv_predictor[j][1]);
593 min_r = FFMIN(min_r, ref[j]);
594 }
595 mv_predictor[pred_count + 1][0] = sum_x - max_x - min_x;
596 mv_predictor[pred_count + 1][1] = sum_y - max_y - min_y;
597 ref[pred_count + 1] = sum_r - max_r - min_r;
598
599 if (pred_count == 4) {
600 mv_predictor[pred_count + 1][0] /= 2;
601 mv_predictor[pred_count + 1][1] /= 2;
602 ref[pred_count + 1] /= 2;
603 }
604 pred_count += 2;
605 }
606
607skip_mean_and_median:
608 /* zero MV */
609 mv_predictor[pred_count][0] =
610 mv_predictor[pred_count][1] =
611 ref[pred_count] = 0;
612 pred_count++;
613
614 prev_x = s->cur_pic.motion_val[0][mot_index][0];
615 prev_y = s->cur_pic.motion_val[0][mot_index][1];
616 prev_ref = s->cur_pic.ref_index[0][4 * mb_xy];
617
618 /* last MV */
619 mv_predictor[pred_count][0] = prev_x;
620 mv_predictor[pred_count][1] = prev_y;
621 ref[pred_count] = prev_ref;
622 pred_count++;
623
624 best_pred = 0;
625 best_score = 256 * 256 * 256 * 64;
626 for (j = 0; j < pred_count; j++) {
627 int *linesize = s->cur_pic.f->linesize;
628 int score = 0;
629 uint8_t *src = s->cur_pic.f->data[0] +
630 mb_x * 16 + mb_y * 16 * linesize[0];
631
632 s->cur_pic.motion_val[0][mot_index][0] =
633 s->mv[0][0][0] = mv_predictor[j][0];
634 s->cur_pic.motion_val[0][mot_index][1] =
635 s->mv[0][0][1] = mv_predictor[j][1];
636
637 // predictor intra or otherwise not available
638 if (ref[j] < 0)
639 continue;
640
641 s->decode_mb(s->opaque, ref[j], MV_DIR_FORWARD,
642 MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
643
644 if (mb_x > 0 && fixed[mb_xy - 1] > 1) {
645 int k;
646 for (k = 0; k < 16; k++)
647 score += FFABS(src[k * linesize[0] - 1] -
648 src[k * linesize[0]]);
649 }
650 if (mb_x + 1 < mb_width && fixed[mb_xy + 1] > 1) {
651 int k;
652 for (k = 0; k < 16; k++)
653 score += FFABS(src[k * linesize[0] + 15] -
654 src[k * linesize[0] + 16]);
655 }
656 if (mb_y > 0 && fixed[mb_xy - mb_stride] > 1) {
657 int k;
658 for (k = 0; k < 16; k++)
659 score += FFABS(src[k - linesize[0]] - src[k]);
660 }
661 if (mb_y + 1 < mb_height && fixed[mb_xy + mb_stride] > 1) {
662 int k;
663 for (k = 0; k < 16; k++)
664 score += FFABS(src[k + linesize[0] * 15] -
665 src[k + linesize[0] * 16]);
666 }
667
668 if (score <= best_score) { // <= will favor the last MV
669 best_score = score;
670 best_pred = j;
671 }
672 }
673 score_sum += best_score;
674 s->mv[0][0][0] = mv_predictor[best_pred][0];
675 s->mv[0][0][1] = mv_predictor[best_pred][1];
676
677 for (i = 0; i < mot_step; i++)
678 for (j = 0; j < mot_step; j++) {
679 s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][0] = s->mv[0][0][0];
680 s->cur_pic.motion_val[0][mot_index + i + j * mot_stride][1] = s->mv[0][0][1];
681 }
682
683 s->decode_mb(s->opaque, ref[best_pred], MV_DIR_FORWARD,
684 MV_TYPE_16X16, &s->mv, mb_x, mb_y, 0, 0);
685
686
687 if (s->mv[0][0][0] != prev_x || s->mv[0][0][1] != prev_y) {
688 fixed[mb_xy] = MV_CHANGED;
689 changed++;
690 } else
691 fixed[mb_xy] = MV_UNCHANGED;
692 }
693 }
694
695 if (none_left)
696 return;
697
698 next_blocklist_length = 0;
699
700 for (blocklist_index = 0; blocklist_index < blocklist_length; blocklist_index++) {
701 const int mb_x = blocklist[blocklist_index][0];
702 const int mb_y = blocklist[blocklist_index][1];
703 const int mb_xy = mb_x + mb_y * mb_stride;
704
705 if (fixed[mb_xy] & (MV_CHANGED|MV_UNCHANGED|MV_FROZEN)) {
706 fixed[mb_xy] = MV_FROZEN;
707 if (mb_x > 0)
708 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x - 1, mb_y, mb_xy - 1);
709 if (mb_y > 0)
710 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y - 1, mb_xy - mb_stride);
711 if (mb_x + 1 < mb_width)
712 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x + 1, mb_y, mb_xy + 1);
713 if (mb_y + 1 < mb_height)
714 add_blocklist(next_blocklist, &next_blocklist_length, fixed, mb_x, mb_y + 1, mb_xy + mb_stride);
715 }
716 }
717 av_assert0(next_blocklist_length <= mb_height * mb_width);
718 FFSWAP(int , blocklist_length, next_blocklist_length);
719 FFSWAP(void*, blocklist, next_blocklist);
720 }
721}
722
723static int is_intra_more_likely(ERContext *s)
724{
725 int is_intra_likely, i, j, undamaged_count, skip_amount, mb_x, mb_y;
726
727 if (!s->last_pic.f || !s->last_pic.f->data[0])
728 return 1; // no previous frame available -> use spatial prediction
729
730 if (s->avctx->error_concealment & FF_EC_FAVOR_INTER)
731 return 0;
732
733 undamaged_count = 0;
734 for (i = 0; i < s->mb_num; i++) {
735 const int mb_xy = s->mb_index2xy[i];
736 const int error = s->error_status_table[mb_xy];
737 if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
738 undamaged_count++;
739 }
740
741 if (undamaged_count < 5)
742 return 0; // almost all MBs damaged -> use temporal prediction
743
744 // prevent dsp.sad() check, that requires access to the image
745 if (CONFIG_XVMC &&
746 s->avctx->hwaccel && s->avctx->hwaccel->decode_mb &&
747 s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I)
748 return 1;
749
750 skip_amount = FFMAX(undamaged_count / 50, 1); // check only up to 50 MBs
751 is_intra_likely = 0;
752
753 j = 0;
754 for (mb_y = 0; mb_y < s->mb_height - 1; mb_y++) {
755 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
756 int error;
757 const int mb_xy = mb_x + mb_y * s->mb_stride;
758
759 error = s->error_status_table[mb_xy];
760 if ((error & ER_DC_ERROR) && (error & ER_MV_ERROR))
761 continue; // skip damaged
762
763 j++;
764 // skip a few to speed things up
765 if ((j % skip_amount) != 0)
766 continue;
767
768 if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_I) {
769 int *linesize = s->cur_pic.f->linesize;
770 uint8_t *mb_ptr = s->cur_pic.f->data[0] +
771 mb_x * 16 + mb_y * 16 * linesize[0];
772 uint8_t *last_mb_ptr = s->last_pic.f->data[0] +
773 mb_x * 16 + mb_y * 16 * linesize[0];
774
775 if (s->avctx->codec_id == AV_CODEC_ID_H264) {
776 // FIXME
777 } else {
778 ff_thread_await_progress(s->last_pic.tf, mb_y, 0);
779 }
780 is_intra_likely += s->mecc.sad[0](NULL, last_mb_ptr, mb_ptr,
781 linesize[0], 16);
782 // FIXME need await_progress() here
783 is_intra_likely -= s->mecc.sad[0](NULL, last_mb_ptr,
784 last_mb_ptr + linesize[0] * 16,
785 linesize[0], 16);
786 } else {
787 if (IS_INTRA(s->cur_pic.mb_type[mb_xy]))
788 is_intra_likely++;
789 else
790 is_intra_likely--;
791 }
792 }
793 }
794// av_log(NULL, AV_LOG_ERROR, "is_intra_likely: %d type:%d\n", is_intra_likely, s->pict_type);
795 return is_intra_likely > 0;
796}
797
798void ff_er_frame_start(ERContext *s)
799{
800 if (!s->avctx->error_concealment)
801 return;
802
803 if (!s->mecc_inited) {
804 ff_me_cmp_init(&s->mecc, s->avctx);
805 s->mecc_inited = 1;
806 }
807
808 memset(s->error_status_table, ER_MB_ERROR | VP_START | ER_MB_END,
809 s->mb_stride * s->mb_height * sizeof(uint8_t));
810 s->error_count = 3 * s->mb_num;
811 s->error_occurred = 0;
812}
813
814static int er_supported(ERContext *s)
815{
816 if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
817#if FF_API_CAP_VDPAU
818 s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU ||
819#endif
820 !s->cur_pic.f ||
821 s->cur_pic.field_picture
822 )
823 return 0;
824 return 1;
825}
826
827/**
828 * Add a slice.
829 * @param endx x component of the last macroblock, can be -1
830 * for the last of the previous line
831 * @param status the status at the end (ER_MV_END, ER_AC_ERROR, ...), it is
832 * assumed that no earlier end or error of the same type occurred
833 */
834void ff_er_add_slice(ERContext *s, int startx, int starty,
835 int endx, int endy, int status)
836{
837 const int start_i = av_clip(startx + starty * s->mb_width, 0, s->mb_num - 1);
838 const int end_i = av_clip(endx + endy * s->mb_width, 0, s->mb_num);
839 const int start_xy = s->mb_index2xy[start_i];
840 const int end_xy = s->mb_index2xy[end_i];
841 int mask = -1;
842
843 if (s->avctx->hwaccel && s->avctx->hwaccel->decode_slice)
844 return;
845
846 if (start_i > end_i || start_xy > end_xy) {
847 av_log(s->avctx, AV_LOG_ERROR,
848 "internal error, slice end before start\n");
849 return;
850 }
851
852 if (!s->avctx->error_concealment)
853 return;
854
855 mask &= ~VP_START;
856 if (status & (ER_AC_ERROR | ER_AC_END)) {
857 mask &= ~(ER_AC_ERROR | ER_AC_END);
858 avpriv_atomic_int_add_and_fetch(&s->error_count, start_i - end_i - 1);
859 }
860 if (status & (ER_DC_ERROR | ER_DC_END)) {
861 mask &= ~(ER_DC_ERROR | ER_DC_END);
862 avpriv_atomic_int_add_and_fetch(&s->error_count, start_i - end_i - 1);
863 }
864 if (status & (ER_MV_ERROR | ER_MV_END)) {
865 mask &= ~(ER_MV_ERROR | ER_MV_END);
866 avpriv_atomic_int_add_and_fetch(&s->error_count, start_i - end_i - 1);
867 }
868
869 if (status & ER_MB_ERROR) {
870 s->error_occurred = 1;
871 avpriv_atomic_int_set(&s->error_count, INT_MAX);
872 }
873
874 if (mask == ~0x7F) {
875 memset(&s->error_status_table[start_xy], 0,
876 (end_xy - start_xy) * sizeof(uint8_t));
877 } else {
878 int i;
879 for (i = start_xy; i < end_xy; i++)
880 s->error_status_table[i] &= mask;
881 }
882
883 if (end_i == s->mb_num)
884 avpriv_atomic_int_set(&s->error_count, INT_MAX);
885 else {
886 s->error_status_table[end_xy] &= mask;
887 s->error_status_table[end_xy] |= status;
888 }
889
890 s->error_status_table[start_xy] |= VP_START;
891
892 if (start_xy > 0 && !(s->avctx->active_thread_type & FF_THREAD_SLICE) &&
893 er_supported(s) && s->avctx->skip_top * s->mb_width < start_i) {
894 int prev_status = s->error_status_table[s->mb_index2xy[start_i - 1]];
895
896 prev_status &= ~ VP_START;
897 if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END)) {
898 s->error_occurred = 1;
899 avpriv_atomic_int_set(&s->error_count, INT_MAX);
900 }
901 }
902}
903
904void ff_er_frame_end(ERContext *s)
905{
906 int *linesize = NULL;
907 int i, mb_x, mb_y, error, error_type, dc_error, mv_error, ac_error;
908 int distance;
909 int threshold_part[4] = { 100, 100, 100 };
910 int threshold = 50;
911 int is_intra_likely;
912 int size = s->b8_stride * 2 * s->mb_height;
913
914 /* We do not support ER of field pictures yet,
915 * though it should not crash if enabled. */
916 if (!s->avctx->error_concealment || s->error_count == 0 ||
917 s->avctx->lowres ||
918 !er_supported(s) ||
919 s->error_count == 3 * s->mb_width *
920 (s->avctx->skip_top + s->avctx->skip_bottom)) {
921 return;
922 }
923 linesize = s->cur_pic.f->linesize;
924 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
925 int status = s->error_status_table[mb_x + (s->mb_height - 1) * s->mb_stride];
926 if (status != 0x7F)
927 break;
928 }
929
930 if ( mb_x == s->mb_width
931 && s->avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO
932 && (FFALIGN(s->avctx->height, 16)&16)
933 && s->error_count == 3 * s->mb_width * (s->avctx->skip_top + s->avctx->skip_bottom + 1)
934 ) {
935 av_log(s->avctx, AV_LOG_DEBUG, "ignoring last missing slice\n");
936 return;
937 }
938
939 if (s->last_pic.f) {
940 if (s->last_pic.f->width != s->cur_pic.f->width ||
941 s->last_pic.f->height != s->cur_pic.f->height ||
942 s->last_pic.f->format != s->cur_pic.f->format) {
943 av_log(s->avctx, AV_LOG_WARNING, "Cannot use previous picture in error concealment\n");
944 memset(&s->last_pic, 0, sizeof(s->last_pic));
945 }
946 }
947 if (s->next_pic.f) {
948 if (s->next_pic.f->width != s->cur_pic.f->width ||
949 s->next_pic.f->height != s->cur_pic.f->height ||
950 s->next_pic.f->format != s->cur_pic.f->format) {
951 av_log(s->avctx, AV_LOG_WARNING, "Cannot use next picture in error concealment\n");
952 memset(&s->next_pic, 0, sizeof(s->next_pic));
953 }
954 }
955
956 if (!s->cur_pic.motion_val[0] || !s->cur_pic.ref_index[0]) {
957 av_log(s->avctx, AV_LOG_ERROR, "Warning MVs not available\n");
958
959 for (i = 0; i < 2; i++) {
960 s->ref_index_buf[i] = av_buffer_allocz(s->mb_stride * s->mb_height * 4 * sizeof(uint8_t));
961 s->motion_val_buf[i] = av_buffer_allocz((size + 4) * 2 * sizeof(uint16_t));
962 if (!s->ref_index_buf[i] || !s->motion_val_buf[i])
963 break;
964 s->cur_pic.ref_index[i] = s->ref_index_buf[i]->data;
965 s->cur_pic.motion_val[i] = (int16_t (*)[2])s->motion_val_buf[i]->data + 4;
966 }
967 if (i < 2) {
968 for (i = 0; i < 2; i++) {
969 av_buffer_unref(&s->ref_index_buf[i]);
970 av_buffer_unref(&s->motion_val_buf[i]);
971 s->cur_pic.ref_index[i] = NULL;
972 s->cur_pic.motion_val[i] = NULL;
973 }
974 return;
975 }
976 }
977
978 if (s->avctx->debug & FF_DEBUG_ER) {
979 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
980 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
981 int status = s->error_status_table[mb_x + mb_y * s->mb_stride];
982
983 av_log(s->avctx, AV_LOG_DEBUG, "%2X ", status);
984 }
985 av_log(s->avctx, AV_LOG_DEBUG, "\n");
986 }
987 }
988
989#if 1
990 /* handle overlapping slices */
991 for (error_type = 1; error_type <= 3; error_type++) {
992 int end_ok = 0;
993
994 for (i = s->mb_num - 1; i >= 0; i--) {
995 const int mb_xy = s->mb_index2xy[i];
996 int error = s->error_status_table[mb_xy];
997
998 if (error & (1 << error_type))
999 end_ok = 1;
1000 if (error & (8 << error_type))
1001 end_ok = 1;
1002
1003 if (!end_ok)
1004 s->error_status_table[mb_xy] |= 1 << error_type;
1005
1006 if (error & VP_START)
1007 end_ok = 0;
1008 }
1009 }
1010#endif
1011#if 1
1012 /* handle slices with partitions of different length */
1013 if (s->partitioned_frame) {
1014 int end_ok = 0;
1015
1016 for (i = s->mb_num - 1; i >= 0; i--) {
1017 const int mb_xy = s->mb_index2xy[i];
1018 int error = s->error_status_table[mb_xy];
1019
1020 if (error & ER_AC_END)
1021 end_ok = 0;
1022 if ((error & ER_MV_END) ||
1023 (error & ER_DC_END) ||
1024 (error & ER_AC_ERROR))
1025 end_ok = 1;
1026
1027 if (!end_ok)
1028 s->error_status_table[mb_xy]|= ER_AC_ERROR;
1029
1030 if (error & VP_START)
1031 end_ok = 0;
1032 }
1033 }
1034#endif
1035 /* handle missing slices */
1036 if (s->avctx->err_recognition & AV_EF_EXPLODE) {
1037 int end_ok = 1;
1038
1039 // FIXME + 100 hack
1040 for (i = s->mb_num - 2; i >= s->mb_width + 100; i--) {
1041 const int mb_xy = s->mb_index2xy[i];
1042 int error1 = s->error_status_table[mb_xy];
1043 int error2 = s->error_status_table[s->mb_index2xy[i + 1]];
1044
1045 if (error1 & VP_START)
1046 end_ok = 1;
1047
1048 if (error2 == (VP_START | ER_MB_ERROR | ER_MB_END) &&
1049 error1 != (VP_START | ER_MB_ERROR | ER_MB_END) &&
1050 ((error1 & ER_AC_END) || (error1 & ER_DC_END) ||
1051 (error1 & ER_MV_END))) {
1052 // end & uninit
1053 end_ok = 0;
1054 }
1055
1056 if (!end_ok)
1057 s->error_status_table[mb_xy] |= ER_MB_ERROR;
1058 }
1059 }
1060
1061#if 1
1062 /* backward mark errors */
1063 distance = 9999999;
1064 for (error_type = 1; error_type <= 3; error_type++) {
1065 for (i = s->mb_num - 1; i >= 0; i--) {
1066 const int mb_xy = s->mb_index2xy[i];
1067 int error = s->error_status_table[mb_xy];
1068
1069 if (!s->mbskip_table || !s->mbskip_table[mb_xy]) // FIXME partition specific
1070 distance++;
1071 if (error & (1 << error_type))
1072 distance = 0;
1073
1074 if (s->partitioned_frame) {
1075 if (distance < threshold_part[error_type - 1])
1076 s->error_status_table[mb_xy] |= 1 << error_type;
1077 } else {
1078 if (distance < threshold)
1079 s->error_status_table[mb_xy] |= 1 << error_type;
1080 }
1081
1082 if (error & VP_START)
1083 distance = 9999999;
1084 }
1085 }
1086#endif
1087
1088 /* forward mark errors */
1089 error = 0;
1090 for (i = 0; i < s->mb_num; i++) {
1091 const int mb_xy = s->mb_index2xy[i];
1092 int old_error = s->error_status_table[mb_xy];
1093
1094 if (old_error & VP_START) {
1095 error = old_error & ER_MB_ERROR;
1096 } else {
1097 error |= old_error & ER_MB_ERROR;
1098 s->error_status_table[mb_xy] |= error;
1099 }
1100 }
1101#if 1
1102 /* handle not partitioned case */
1103 if (!s->partitioned_frame) {
1104 for (i = 0; i < s->mb_num; i++) {
1105 const int mb_xy = s->mb_index2xy[i];
1106 int error = s->error_status_table[mb_xy];
1107 if (error & ER_MB_ERROR)
1108 error |= ER_MB_ERROR;
1109 s->error_status_table[mb_xy] = error;
1110 }
1111 }
1112#endif
1113
1114 dc_error = ac_error = mv_error = 0;
1115 for (i = 0; i < s->mb_num; i++) {
1116 const int mb_xy = s->mb_index2xy[i];
1117 int error = s->error_status_table[mb_xy];
1118 if (error & ER_DC_ERROR)
1119 dc_error++;
1120 if (error & ER_AC_ERROR)
1121 ac_error++;
1122 if (error & ER_MV_ERROR)
1123 mv_error++;
1124 }
1125 av_log(s->avctx, AV_LOG_INFO, "concealing %d DC, %d AC, %d MV errors in %c frame\n",
1126 dc_error, ac_error, mv_error, av_get_picture_type_char(s->cur_pic.f->pict_type));
1127
1128 is_intra_likely = is_intra_more_likely(s);
1129
1130 /* set unknown mb-type to most likely */
1131 for (i = 0; i < s->mb_num; i++) {
1132 const int mb_xy = s->mb_index2xy[i];
1133 int error = s->error_status_table[mb_xy];
1134 if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
1135 continue;
1136
1137 if (is_intra_likely)
1138 s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1139 else
1140 s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
1141 }
1142
1143 // change inter to intra blocks if no reference frames are available
1144 if (!(s->last_pic.f && s->last_pic.f->data[0]) &&
1145 !(s->next_pic.f && s->next_pic.f->data[0]))
1146 for (i = 0; i < s->mb_num; i++) {
1147 const int mb_xy = s->mb_index2xy[i];
1148 if (!IS_INTRA(s->cur_pic.mb_type[mb_xy]))
1149 s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
1150 }
1151
1152 /* handle inter blocks with damaged AC */
1153 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1154 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1155 const int mb_xy = mb_x + mb_y * s->mb_stride;
1156 const int mb_type = s->cur_pic.mb_type[mb_xy];
1157 const int dir = !(s->last_pic.f && s->last_pic.f->data[0]);
1158 const int mv_dir = dir ? MV_DIR_BACKWARD : MV_DIR_FORWARD;
1159 int mv_type;
1160
1161 int error = s->error_status_table[mb_xy];
1162
1163 if (IS_INTRA(mb_type))
1164 continue; // intra
1165 if (error & ER_MV_ERROR)
1166 continue; // inter with damaged MV
1167 if (!(error & ER_AC_ERROR))
1168 continue; // undamaged inter
1169
1170 if (IS_8X8(mb_type)) {
1171 int mb_index = mb_x * 2 + mb_y * 2 * s->b8_stride;
1172 int j;
1173 mv_type = MV_TYPE_8X8;
1174 for (j = 0; j < 4; j++) {
1175 s->mv[0][j][0] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][0];
1176 s->mv[0][j][1] = s->cur_pic.motion_val[dir][mb_index + (j & 1) + (j >> 1) * s->b8_stride][1];
1177 }
1178 } else {
1179 mv_type = MV_TYPE_16X16;
1180 s->mv[0][0][0] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][0];
1181 s->mv[0][0][1] = s->cur_pic.motion_val[dir][mb_x * 2 + mb_y * 2 * s->b8_stride][1];
1182 }
1183
1184 s->decode_mb(s->opaque, 0 /* FIXME H.264 partitioned slices need this set */,
1185 mv_dir, mv_type, &s->mv, mb_x, mb_y, 0, 0);
1186 }
1187 }
1188
1189 /* guess MVs */
1190 if (s->cur_pic.f->pict_type == AV_PICTURE_TYPE_B) {
1191 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1192 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1193 int xy = mb_x * 2 + mb_y * 2 * s->b8_stride;
1194 const int mb_xy = mb_x + mb_y * s->mb_stride;
1195 const int mb_type = s->cur_pic.mb_type[mb_xy];
1196 int mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
1197
1198 int error = s->error_status_table[mb_xy];
1199
1200 if (IS_INTRA(mb_type))
1201 continue;
1202 if (!(error & ER_MV_ERROR))
1203 continue; // inter with undamaged MV
1204 if (!(error & ER_AC_ERROR))
1205 continue; // undamaged inter
1206
1207 if (!(s->last_pic.f && s->last_pic.f->data[0]))
1208 mv_dir &= ~MV_DIR_FORWARD;
1209 if (!(s->next_pic.f && s->next_pic.f->data[0]))
1210 mv_dir &= ~MV_DIR_BACKWARD;
1211
1212 if (s->pp_time) {
1213 int time_pp = s->pp_time;
1214 int time_pb = s->pb_time;
1215
1216 av_assert0(s->avctx->codec_id != AV_CODEC_ID_H264);
1217 ff_thread_await_progress(s->next_pic.tf, mb_y, 0);
1218
1219 s->mv[0][0][0] = s->next_pic.motion_val[0][xy][0] * time_pb / time_pp;
1220 s->mv[0][0][1] = s->next_pic.motion_val[0][xy][1] * time_pb / time_pp;
1221 s->mv[1][0][0] = s->next_pic.motion_val[0][xy][0] * (time_pb - time_pp) / time_pp;
1222 s->mv[1][0][1] = s->next_pic.motion_val[0][xy][1] * (time_pb - time_pp) / time_pp;
1223 } else {
1224 s->mv[0][0][0] = 0;
1225 s->mv[0][0][1] = 0;
1226 s->mv[1][0][0] = 0;
1227 s->mv[1][0][1] = 0;
1228 }
1229
1230 s->decode_mb(s->opaque, 0, mv_dir, MV_TYPE_16X16, &s->mv,
1231 mb_x, mb_y, 0, 0);
1232 }
1233 }
1234 } else
1235 guess_mv(s);
1236
1237 /* the filters below manipulate raw image, skip them */
1238 if (CONFIG_XVMC && s->avctx->hwaccel && s->avctx->hwaccel->decode_mb)
1239 goto ec_clean;
1240 /* fill DC for inter blocks */
1241 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1242 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1243 int dc, dcu, dcv, y, n;
1244 int16_t *dc_ptr;
1245 uint8_t *dest_y, *dest_cb, *dest_cr;
1246 const int mb_xy = mb_x + mb_y * s->mb_stride;
1247 const int mb_type = s->cur_pic.mb_type[mb_xy];
1248
1249 // error = s->error_status_table[mb_xy];
1250
1251 if (IS_INTRA(mb_type) && s->partitioned_frame)
1252 continue;
1253 // if (error & ER_MV_ERROR)
1254 // continue; // inter data damaged FIXME is this good?
1255
1256 dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1257 dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1258 dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1259
1260 dc_ptr = &s->dc_val[0][mb_x * 2 + mb_y * 2 * s->b8_stride];
1261 for (n = 0; n < 4; n++) {
1262 dc = 0;
1263 for (y = 0; y < 8; y++) {
1264 int x;
1265 for (x = 0; x < 8; x++)
1266 dc += dest_y[x + (n & 1) * 8 +
1267 (y + (n >> 1) * 8) * linesize[0]];
1268 }
1269 dc_ptr[(n & 1) + (n >> 1) * s->b8_stride] = (dc + 4) >> 3;
1270 }
1271
1272 if (!s->cur_pic.f->data[2])
1273 continue;
1274
1275 dcu = dcv = 0;
1276 for (y = 0; y < 8; y++) {
1277 int x;
1278 for (x = 0; x < 8; x++) {
1279 dcu += dest_cb[x + y * linesize[1]];
1280 dcv += dest_cr[x + y * linesize[2]];
1281 }
1282 }
1283 s->dc_val[1][mb_x + mb_y * s->mb_stride] = (dcu + 4) >> 3;
1284 s->dc_val[2][mb_x + mb_y * s->mb_stride] = (dcv + 4) >> 3;
1285 }
1286 }
1287#if 1
1288 /* guess DC for damaged blocks */
1289 guess_dc(s, s->dc_val[0], s->mb_width*2, s->mb_height*2, s->b8_stride, 1);
1290 guess_dc(s, s->dc_val[1], s->mb_width , s->mb_height , s->mb_stride, 0);
1291 guess_dc(s, s->dc_val[2], s->mb_width , s->mb_height , s->mb_stride, 0);
1292#endif
1293
1294 /* filter luma DC */
1295 filter181(s->dc_val[0], s->mb_width * 2, s->mb_height * 2, s->b8_stride);
1296
1297#if 1
1298 /* render DC only intra */
1299 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1300 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
1301 uint8_t *dest_y, *dest_cb, *dest_cr;
1302 const int mb_xy = mb_x + mb_y * s->mb_stride;
1303 const int mb_type = s->cur_pic.mb_type[mb_xy];
1304
1305 int error = s->error_status_table[mb_xy];
1306
1307 if (IS_INTER(mb_type))
1308 continue;
1309 if (!(error & ER_AC_ERROR))
1310 continue; // undamaged
1311
1312 dest_y = s->cur_pic.f->data[0] + mb_x * 16 + mb_y * 16 * linesize[0];
1313 dest_cb = s->cur_pic.f->data[1] + mb_x * 8 + mb_y * 8 * linesize[1];
1314 dest_cr = s->cur_pic.f->data[2] + mb_x * 8 + mb_y * 8 * linesize[2];
1315 if (!s->cur_pic.f->data[2])
1316 dest_cb = dest_cr = NULL;
1317
1318 put_dc(s, dest_y, dest_cb, dest_cr, mb_x, mb_y);
1319 }
1320 }
1321#endif
1322
1323 if (s->avctx->error_concealment & FF_EC_DEBLOCK) {
1324 /* filter horizontal block boundaries */
1325 h_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1326 s->mb_height * 2, linesize[0], 1);
1327
1328 /* filter vertical block boundaries */
1329 v_block_filter(s, s->cur_pic.f->data[0], s->mb_width * 2,
1330 s->mb_height * 2, linesize[0], 1);
1331
1332 if (s->cur_pic.f->data[2]) {
1333 h_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1334 s->mb_height, linesize[1], 0);
1335 h_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1336 s->mb_height, linesize[2], 0);
1337 v_block_filter(s, s->cur_pic.f->data[1], s->mb_width,
1338 s->mb_height, linesize[1], 0);
1339 v_block_filter(s, s->cur_pic.f->data[2], s->mb_width,
1340 s->mb_height, linesize[2], 0);
1341 }
1342 }
1343
1344ec_clean:
1345 /* clean a few tables */
1346 for (i = 0; i < s->mb_num; i++) {
1347 const int mb_xy = s->mb_index2xy[i];
1348 int error = s->error_status_table[mb_xy];
1349
1350 if (s->mbskip_table && s->cur_pic.f->pict_type != AV_PICTURE_TYPE_B &&
1351 (error & (ER_DC_ERROR | ER_MV_ERROR | ER_AC_ERROR))) {
1352 s->mbskip_table[mb_xy] = 0;
1353 }
1354 if (s->mbintra_table)
1355 s->mbintra_table[mb_xy] = 1;
1356 }
1357
1358 for (i = 0; i < 2; i++) {
1359 av_buffer_unref(&s->ref_index_buf[i]);
1360 av_buffer_unref(&s->motion_val_buf[i]);
1361 s->cur_pic.ref_index[i] = NULL;
1362 s->cur_pic.motion_val[i] = NULL;
1363 }
1364
1365 memset(&s->cur_pic, 0, sizeof(ERPicture));
1366 memset(&s->last_pic, 0, sizeof(ERPicture));
1367 memset(&s->next_pic, 0, sizeof(ERPicture));
1368}
1369