summaryrefslogtreecommitdiff
path: root/libavcodec/snow.h (plain)
blob: 59c710b5f9fdc459f92c830993c2aea0b83cf384
1/*
2 * Copyright (C) 2004 Michael Niedermayer <michaelni@gmx.at>
3 * Copyright (C) 2006 Robert Edele <yartrebo@earthlink.net>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef AVCODEC_SNOW_H
23#define AVCODEC_SNOW_H
24
25#include "libavutil/motion_vector.h"
26
27#include "hpeldsp.h"
28#include "me_cmp.h"
29#include "qpeldsp.h"
30#include "snow_dwt.h"
31
32#include "rangecoder.h"
33#include "mathops.h"
34
35#define FF_MPV_OFFSET(x) (offsetof(MpegEncContext, x) + offsetof(SnowContext, m))
36#include "mpegvideo.h"
37#include "h264qpel.h"
38
39#define MID_STATE 128
40
41#define MAX_PLANES 4
42#define QSHIFT 5
43#define QROOT (1<<QSHIFT)
44#define LOSSLESS_QLOG -128
45#define FRAC_BITS 4
46#define MAX_REF_FRAMES 8
47
48#define LOG2_OBMC_MAX 8
49#define OBMC_MAX (1<<(LOG2_OBMC_MAX))
50typedef struct BlockNode{
51 int16_t mx; ///< Motion vector component X, see mv_scale
52 int16_t my; ///< Motion vector component Y, see mv_scale
53 uint8_t ref; ///< Reference frame index
54 uint8_t color[3]; ///< Color for intra
55 uint8_t type; ///< Bitfield of BLOCK_*
56//#define TYPE_SPLIT 1
57#define BLOCK_INTRA 1 ///< Intra block, inter otherwise
58#define BLOCK_OPT 2 ///< Block needs no checks in this round of iterative motion estiation
59//#define TYPE_NOCOLOR 4
60 uint8_t level; //FIXME merge into type?
61}BlockNode;
62
63static const BlockNode null_block= { //FIXME add border maybe
64 .color= {128,128,128},
65 .mx= 0,
66 .my= 0,
67 .ref= 0,
68 .type= 0,
69 .level= 0,
70};
71
72#define LOG2_MB_SIZE 4
73#define MB_SIZE (1<<LOG2_MB_SIZE)
74#define ENCODER_EXTRA_BITS 4
75#define HTAPS_MAX 8
76
77typedef struct x_and_coeff{
78 int16_t x;
79 uint16_t coeff;
80} x_and_coeff;
81
82typedef struct SubBand{
83 int level;
84 int stride;
85 int width;
86 int height;
87 int qlog; ///< log(qscale)/log[2^(1/6)]
88 DWTELEM *buf;
89 IDWTELEM *ibuf;
90 int buf_x_offset;
91 int buf_y_offset;
92 int stride_line; ///< Stride measured in lines, not pixels.
93 x_and_coeff * x_coeff;
94 struct SubBand *parent;
95 uint8_t state[/*7*2*/ 7 + 512][32];
96}SubBand;
97
98typedef struct Plane{
99 int width;
100 int height;
101 SubBand band[MAX_DECOMPOSITIONS][4];
102
103 int htaps;
104 int8_t hcoeff[HTAPS_MAX/2];
105 int diag_mc;
106 int fast_mc;
107
108 int last_htaps;
109 int8_t last_hcoeff[HTAPS_MAX/2];
110 int last_diag_mc;
111}Plane;
112
113typedef struct SnowContext{
114 AVClass *class;
115 AVCodecContext *avctx;
116 RangeCoder c;
117 MECmpContext mecc;
118 HpelDSPContext hdsp;
119 QpelDSPContext qdsp;
120 VideoDSPContext vdsp;
121 H264QpelContext h264qpel;
122 MpegvideoEncDSPContext mpvencdsp;
123 SnowDWTContext dwt;
124 AVFrame *input_picture; ///< new_picture with the internal linesizes
125 AVFrame *current_picture;
126 AVFrame *last_picture[MAX_REF_FRAMES];
127 uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4];
128 AVFrame *mconly_picture;
129// uint8_t q_context[16];
130 uint8_t header_state[32];
131 uint8_t block_state[128 + 32*128];
132 int keyframe;
133 int always_reset;
134 int version;
135 int spatial_decomposition_type;
136 int last_spatial_decomposition_type;
137 int temporal_decomposition_type;
138 int spatial_decomposition_count;
139 int last_spatial_decomposition_count;
140 int temporal_decomposition_count;
141 int max_ref_frames;
142 int ref_frames;
143 int16_t (*ref_mvs[MAX_REF_FRAMES])[2];
144 uint32_t *ref_scores[MAX_REF_FRAMES];
145 DWTELEM *spatial_dwt_buffer;
146 DWTELEM *temp_dwt_buffer;
147 IDWTELEM *spatial_idwt_buffer;
148 IDWTELEM *temp_idwt_buffer;
149 int *run_buffer;
150 int colorspace_type;
151 int chroma_h_shift;
152 int chroma_v_shift;
153 int spatial_scalability;
154 int qlog;
155 int last_qlog;
156 int lambda;
157 int lambda2;
158 int pass1_rc;
159 int mv_scale;
160 int last_mv_scale;
161 int qbias;
162 int last_qbias;
163#define QBIAS_SHIFT 3
164 int b_width;
165 int b_height;
166 int block_max_depth;
167 int last_block_max_depth;
168 int nb_planes;
169 Plane plane[MAX_PLANES];
170 BlockNode *block;
171#define ME_CACHE_SIZE 1024
172 unsigned me_cache[ME_CACHE_SIZE];
173 unsigned me_cache_generation;
174 slice_buffer sb;
175 int memc_only;
176 int no_bitstream;
177 int intra_penalty;
178 int motion_est;
179 int iterative_dia_size;
180 int scenechange_threshold;
181
182 MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
183
184 uint8_t *scratchbuf;
185 uint8_t *emu_edge_buffer;
186
187 AVMotionVector *avmv;
188 int avmv_index;
189 uint64_t encoding_error[AV_NUM_DATA_POINTERS];
190
191 int pred;
192}SnowContext;
193
194/* Tables */
195extern const uint8_t * const ff_obmc_tab[4];
196extern uint8_t ff_qexp[QROOT];
197extern int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES];
198
199/* C bits used by mmx/sse2/altivec */
200
201static av_always_inline void snow_interleave_line_header(int * i, int width, IDWTELEM * low, IDWTELEM * high){
202 (*i) = (width) - 2;
203
204 if (width & 1){
205 low[(*i)+1] = low[((*i)+1)>>1];
206 (*i)--;
207 }
208}
209
210static av_always_inline void snow_interleave_line_footer(int * i, IDWTELEM * low, IDWTELEM * high){
211 for (; (*i)>=0; (*i)-=2){
212 low[(*i)+1] = high[(*i)>>1];
213 low[*i] = low[(*i)>>1];
214 }
215}
216
217static av_always_inline void snow_horizontal_compose_lift_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w, int lift_high, int mul, int add, int shift){
218 for(; i<w; i++){
219 dst[i] = src[i] - ((mul * (ref[i] + ref[i + 1]) + add) >> shift);
220 }
221
222 if((width^lift_high)&1){
223 dst[w] = src[w] - ((mul * 2 * ref[w] + add) >> shift);
224 }
225}
226
227static av_always_inline void snow_horizontal_compose_liftS_lead_out(int i, IDWTELEM * dst, IDWTELEM * src, IDWTELEM * ref, int width, int w){
228 for(; i<w; i++){
229 dst[i] = src[i] + ((ref[i] + ref[(i+1)]+W_BO + 4 * src[i]) >> W_BS);
230 }
231
232 if(width&1){
233 dst[w] = src[w] + ((2 * ref[w] + W_BO + 4 * src[w]) >> W_BS);
234 }
235}
236
237/* common code */
238
239int ff_snow_common_init(AVCodecContext *avctx);
240int ff_snow_common_init_after_header(AVCodecContext *avctx);
241void ff_snow_common_end(SnowContext *s);
242void ff_snow_release_buffer(AVCodecContext *avctx);
243void ff_snow_reset_contexts(SnowContext *s);
244int ff_snow_alloc_blocks(SnowContext *s);
245int ff_snow_frame_start(SnowContext *s);
246void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride,
247 int sx, int sy, int b_w, int b_h, const BlockNode *block,
248 int plane_index, int w, int h);
249int ff_snow_get_buffer(SnowContext *s, AVFrame *frame);
250/* common inline functions */
251//XXX doublecheck all of them should stay inlined
252
253static inline void pred_mv(SnowContext *s, int *mx, int *my, int ref,
254 const BlockNode *left, const BlockNode *top, const BlockNode *tr){
255 if(s->ref_frames == 1){
256 *mx = mid_pred(left->mx, top->mx, tr->mx);
257 *my = mid_pred(left->my, top->my, tr->my);
258 }else{
259 const int *scale = ff_scale_mv_ref[ref];
260 *mx = mid_pred((left->mx * scale[left->ref] + 128) >>8,
261 (top ->mx * scale[top ->ref] + 128) >>8,
262 (tr ->mx * scale[tr ->ref] + 128) >>8);
263 *my = mid_pred((left->my * scale[left->ref] + 128) >>8,
264 (top ->my * scale[top ->ref] + 128) >>8,
265 (tr ->my * scale[tr ->ref] + 128) >>8);
266 }
267}
268
269static av_always_inline int same_block(BlockNode *a, BlockNode *b){
270 if((a->type&BLOCK_INTRA) && (b->type&BLOCK_INTRA)){
271 return !((a->color[0] - b->color[0]) | (a->color[1] - b->color[1]) | (a->color[2] - b->color[2]));
272 }else{
273 return !((a->mx - b->mx) | (a->my - b->my) | (a->ref - b->ref) | ((a->type ^ b->type)&BLOCK_INTRA));
274 }
275}
276
277//FIXME name cleanup (b_w, block_w, b_width stuff)
278//XXX should we really inline it?
279static av_always_inline void add_yblock(SnowContext *s, int sliced, slice_buffer *sb, IDWTELEM *dst, uint8_t *dst8, const uint8_t *obmc, int src_x, int src_y, int b_w, int b_h, int w, int h, int dst_stride, int src_stride, int obmc_stride, int b_x, int b_y, int add, int offset_dst, int plane_index){
280 const int b_width = s->b_width << s->block_max_depth;
281 const int b_height= s->b_height << s->block_max_depth;
282 const int b_stride= b_width;
283 BlockNode *lt= &s->block[b_x + b_y*b_stride];
284 BlockNode *rt= lt+1;
285 BlockNode *lb= lt+b_stride;
286 BlockNode *rb= lb+1;
287 uint8_t *block[4];
288 // When src_stride is large enough, it is possible to interleave the blocks.
289 // Otherwise the blocks are written sequentially in the tmp buffer.
290 int tmp_step= src_stride >= 7*MB_SIZE ? MB_SIZE : MB_SIZE*src_stride;
291 uint8_t *tmp = s->scratchbuf;
292 uint8_t *ptmp;
293 int x,y;
294
295 if(b_x<0){
296 lt= rt;
297 lb= rb;
298 }else if(b_x + 1 >= b_width){
299 rt= lt;
300 rb= lb;
301 }
302 if(b_y<0){
303 lt= lb;
304 rt= rb;
305 }else if(b_y + 1 >= b_height){
306 lb= lt;
307 rb= rt;
308 }
309
310 if(src_x<0){ //FIXME merge with prev & always round internal width up to *16
311 obmc -= src_x;
312 b_w += src_x;
313 if(!sliced && !offset_dst)
314 dst -= src_x;
315 src_x=0;
316 }
317 if(src_x + b_w > w){
318 b_w = w - src_x;
319 }
320 if(src_y<0){
321 obmc -= src_y*obmc_stride;
322 b_h += src_y;
323 if(!sliced && !offset_dst)
324 dst -= src_y*dst_stride;
325 src_y=0;
326 }
327 if(src_y + b_h> h){
328 b_h = h - src_y;
329 }
330
331 if(b_w<=0 || b_h<=0) return;
332
333 if(!sliced && offset_dst)
334 dst += src_x + src_y*dst_stride;
335 dst8+= src_x + src_y*src_stride;
336// src += src_x + src_y*src_stride;
337
338 ptmp= tmp + 3*tmp_step;
339 block[0]= ptmp;
340 ptmp+=tmp_step;
341 ff_snow_pred_block(s, block[0], tmp, src_stride, src_x, src_y, b_w, b_h, lt, plane_index, w, h);
342
343 if(same_block(lt, rt)){
344 block[1]= block[0];
345 }else{
346 block[1]= ptmp;
347 ptmp+=tmp_step;
348 ff_snow_pred_block(s, block[1], tmp, src_stride, src_x, src_y, b_w, b_h, rt, plane_index, w, h);
349 }
350
351 if(same_block(lt, lb)){
352 block[2]= block[0];
353 }else if(same_block(rt, lb)){
354 block[2]= block[1];
355 }else{
356 block[2]= ptmp;
357 ptmp+=tmp_step;
358 ff_snow_pred_block(s, block[2], tmp, src_stride, src_x, src_y, b_w, b_h, lb, plane_index, w, h);
359 }
360
361 if(same_block(lt, rb) ){
362 block[3]= block[0];
363 }else if(same_block(rt, rb)){
364 block[3]= block[1];
365 }else if(same_block(lb, rb)){
366 block[3]= block[2];
367 }else{
368 block[3]= ptmp;
369 ff_snow_pred_block(s, block[3], tmp, src_stride, src_x, src_y, b_w, b_h, rb, plane_index, w, h);
370 }
371 if(sliced){
372 s->dwt.inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
373 }else{
374 for(y=0; y<b_h; y++){
375 //FIXME ugly misuse of obmc_stride
376 const uint8_t *obmc1= obmc + y*obmc_stride;
377 const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
378 const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
379 const uint8_t *obmc4= obmc3+ (obmc_stride>>1);
380 for(x=0; x<b_w; x++){
381 int v= obmc1[x] * block[3][x + y*src_stride]
382 +obmc2[x] * block[2][x + y*src_stride]
383 +obmc3[x] * block[1][x + y*src_stride]
384 +obmc4[x] * block[0][x + y*src_stride];
385
386 v <<= 8 - LOG2_OBMC_MAX;
387 if(FRAC_BITS != 8){
388 v >>= 8 - FRAC_BITS;
389 }
390 if(add){
391 v += dst[x + y*dst_stride];
392 v = (v + (1<<(FRAC_BITS-1))) >> FRAC_BITS;
393 if(v&(~255)) v= ~(v>>31);
394 dst8[x + y*src_stride] = v;
395 }else{
396 dst[x + y*dst_stride] -= v;
397 }
398 }
399 }
400 }
401}
402
403static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int plane_index, int add, int mb_y){
404 Plane *p= &s->plane[plane_index];
405 const int mb_w= s->b_width << s->block_max_depth;
406 const int mb_h= s->b_height << s->block_max_depth;
407 int x, y, mb_x;
408 int block_size = MB_SIZE >> s->block_max_depth;
409 int block_w = plane_index ? block_size>>s->chroma_h_shift : block_size;
410 int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
411 const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
412 const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
413 int ref_stride= s->current_picture->linesize[plane_index];
414 uint8_t *dst8= s->current_picture->data[plane_index];
415 int w= p->width;
416 int h= p->height;
417 av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
418 if(s->keyframe || (s->avctx->debug&512)){
419 if(mb_y==mb_h)
420 return;
421
422 if(add){
423 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
424 for(x=0; x<w; x++){
425 int v= buf[x + y*w] + (128<<FRAC_BITS) + (1<<(FRAC_BITS-1));
426 v >>= FRAC_BITS;
427 if(v&(~255)) v= ~(v>>31);
428 dst8[x + y*ref_stride]= v;
429 }
430 }
431 }else{
432 for(y=block_h*mb_y; y<FFMIN(h,block_h*(mb_y+1)); y++){
433 for(x=0; x<w; x++){
434 buf[x + y*w]-= 128<<FRAC_BITS;
435 }
436 }
437 }
438
439 return;
440 }
441
442 for(mb_x=0; mb_x<=mb_w; mb_x++){
443 add_yblock(s, 0, NULL, buf, dst8, obmc,
444 block_w*mb_x - block_w/2,
445 block_h*mb_y - block_h/2,
446 block_w, block_h,
447 w, h,
448 w, ref_stride, obmc_stride,
449 mb_x - 1, mb_y - 1,
450 add, 1, plane_index);
451 }
452}
453
454static av_always_inline void predict_plane(SnowContext *s, IDWTELEM *buf, int plane_index, int add){
455 const int mb_h= s->b_height << s->block_max_depth;
456 int mb_y;
457 for(mb_y=0; mb_y<=mb_h; mb_y++)
458 predict_slice(s, buf, plane_index, add, mb_y);
459}
460
461static inline void set_blocks(SnowContext *s, int level, int x, int y, int l, int cb, int cr, int mx, int my, int ref, int type){
462 const int w= s->b_width << s->block_max_depth;
463 const int rem_depth= s->block_max_depth - level;
464 const int index= (x + y*w) << rem_depth;
465 const int block_w= 1<<rem_depth;
466 const int block_h= 1<<rem_depth; //FIXME "w!=h"
467 BlockNode block;
468 int i,j;
469
470 block.color[0]= l;
471 block.color[1]= cb;
472 block.color[2]= cr;
473 block.mx= mx;
474 block.my= my;
475 block.ref= ref;
476 block.type= type;
477 block.level= level;
478
479 for(j=0; j<block_h; j++){
480 for(i=0; i<block_w; i++){
481 s->block[index + i + j*w]= block;
482 }
483 }
484}
485
486static inline void init_ref(MotionEstContext *c, uint8_t *src[3], uint8_t *ref[3], uint8_t *ref2[3], int x, int y, int ref_index){
487 SnowContext *s = c->avctx->priv_data;
488 const int offset[3]= {
489 y*c-> stride + x,
490 ((y*c->uvstride + x)>>s->chroma_h_shift),
491 ((y*c->uvstride + x)>>s->chroma_h_shift),
492 };
493 int i;
494 for(i=0; i<3; i++){
495 c->src[0][i]= src [i];
496 c->ref[0][i]= ref [i] + offset[i];
497 }
498 av_assert2(!ref_index);
499}
500
501
502/* bitstream functions */
503
504extern const int8_t ff_quant3bA[256];
505
506#define QEXPSHIFT (7-FRAC_BITS+8) //FIXME try to change this to 0
507
508static inline void put_symbol(RangeCoder *c, uint8_t *state, int v, int is_signed){
509 int i;
510
511 if(v){
512 const int a= FFABS(v);
513 const int e= av_log2(a);
514 const int el= FFMIN(e, 10);
515 put_rac(c, state+0, 0);
516
517 for(i=0; i<el; i++){
518 put_rac(c, state+1+i, 1); //1..10
519 }
520 for(; i<e; i++){
521 put_rac(c, state+1+9, 1); //1..10
522 }
523 put_rac(c, state+1+FFMIN(i,9), 0);
524
525 for(i=e-1; i>=el; i--){
526 put_rac(c, state+22+9, (a>>i)&1); //22..31
527 }
528 for(; i>=0; i--){
529 put_rac(c, state+22+i, (a>>i)&1); //22..31
530 }
531
532 if(is_signed)
533 put_rac(c, state+11 + el, v < 0); //11..21
534 }else{
535 put_rac(c, state+0, 1);
536 }
537}
538
539static inline int get_symbol(RangeCoder *c, uint8_t *state, int is_signed){
540 if(get_rac(c, state+0))
541 return 0;
542 else{
543 int i, e, a;
544 e= 0;
545 while(get_rac(c, state+1 + FFMIN(e,9))){ //1..10
546 e++;
547 if (e > 31)
548 return AVERROR_INVALIDDATA;
549 }
550
551 a= 1;
552 for(i=e-1; i>=0; i--){
553 a += a + get_rac(c, state+22 + FFMIN(i,9)); //22..31
554 }
555
556 e= -(is_signed && get_rac(c, state+11 + FFMIN(e,10))); //11..21
557 return (a^e)-e;
558 }
559}
560
561static inline void put_symbol2(RangeCoder *c, uint8_t *state, int v, int log2){
562 int i;
563 int r= log2>=0 ? 1<<log2 : 1;
564
565 av_assert2(v>=0);
566 av_assert2(log2>=-4);
567
568 while(v >= r){
569 put_rac(c, state+4+log2, 1);
570 v -= r;
571 log2++;
572 if(log2>0) r+=r;
573 }
574 put_rac(c, state+4+log2, 0);
575
576 for(i=log2-1; i>=0; i--){
577 put_rac(c, state+31-i, (v>>i)&1);
578 }
579}
580
581static inline int get_symbol2(RangeCoder *c, uint8_t *state, int log2){
582 int i;
583 int r= log2>=0 ? 1<<log2 : 1;
584 int v=0;
585
586 av_assert2(log2>=-4);
587
588 while(log2<28 && get_rac(c, state+4+log2)){
589 v+= r;
590 log2++;
591 if(log2>0) r+=r;
592 }
593
594 for(i=log2-1; i>=0; i--){
595 v+= get_rac(c, state+31-i)<<i;
596 }
597
598 return v;
599}
600
601static inline void unpack_coeffs(SnowContext *s, SubBand *b, SubBand * parent, int orientation){
602 const int w= b->width;
603 const int h= b->height;
604 int x,y;
605
606 int run, runs;
607 x_and_coeff *xc= b->x_coeff;
608 x_and_coeff *prev_xc= NULL;
609 x_and_coeff *prev2_xc= xc;
610 x_and_coeff *parent_xc= parent ? parent->x_coeff : NULL;
611 x_and_coeff *prev_parent_xc= parent_xc;
612
613 runs= get_symbol2(&s->c, b->state[30], 0);
614 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
615 else run= INT_MAX;
616
617 for(y=0; y<h; y++){
618 int v=0;
619 int lt=0, t=0, rt=0;
620
621 if(y && prev_xc->x == 0){
622 rt= prev_xc->coeff;
623 }
624 for(x=0; x<w; x++){
625 int p=0;
626 const int l= v;
627
628 lt= t; t= rt;
629
630 if(y){
631 if(prev_xc->x <= x)
632 prev_xc++;
633 if(prev_xc->x == x + 1)
634 rt= prev_xc->coeff;
635 else
636 rt=0;
637 }
638 if(parent_xc){
639 if(x>>1 > parent_xc->x){
640 parent_xc++;
641 }
642 if(x>>1 == parent_xc->x){
643 p= parent_xc->coeff;
644 }
645 }
646 if(/*ll|*/l|lt|t|rt|p){
647 int context= av_log2(/*FFABS(ll) + */3*(l>>1) + (lt>>1) + (t&~1) + (rt>>1) + (p>>1));
648
649 v=get_rac(&s->c, &b->state[0][context]);
650 if(v){
651 v= 2*(get_symbol2(&s->c, b->state[context + 2], context-4) + 1);
652 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3 + ff_quant3bA[l&0xFF] + 3*ff_quant3bA[t&0xFF]]);
653 if ((uint16_t)v != v) {
654 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
655 v = 1;
656 }
657 xc->x=x;
658 (xc++)->coeff= v;
659 }
660 }else{
661 if(!run){
662 if(runs-- > 0) run= get_symbol2(&s->c, b->state[1], 3);
663 else run= INT_MAX;
664 v= 2*(get_symbol2(&s->c, b->state[0 + 2], 0-4) + 1);
665 v+=get_rac(&s->c, &b->state[0][16 + 1 + 3]);
666 if ((uint16_t)v != v) {
667 av_log(s->avctx, AV_LOG_ERROR, "Coefficient damaged\n");
668 v = 1;
669 }
670
671 xc->x=x;
672 (xc++)->coeff= v;
673 }else{
674 int max_run;
675 run--;
676 v=0;
677 av_assert2(run >= 0);
678 if(y) max_run= FFMIN(run, prev_xc->x - x - 2);
679 else max_run= FFMIN(run, w-x-1);
680 if(parent_xc)
681 max_run= FFMIN(max_run, 2*parent_xc->x - x - 1);
682 av_assert2(max_run >= 0 && max_run <= run);
683
684 x+= max_run;
685 run-= max_run;
686 }
687 }
688 }
689 (xc++)->x= w+1; //end marker
690 prev_xc= prev2_xc;
691 prev2_xc= xc;
692
693 if(parent_xc){
694 if(y&1){
695 while(parent_xc->x != parent->width+1)
696 parent_xc++;
697 parent_xc++;
698 prev_parent_xc= parent_xc;
699 }else{
700 parent_xc= prev_parent_xc;
701 }
702 }
703 }
704
705 (xc++)->x= w+1; //end marker
706}
707
708#endif /* AVCODEC_SNOW_H */
709