summaryrefslogtreecommitdiff
path: root/libavcodec/svq3.c (plain)
blob: 06e3d37590df05f830293f6c4af769220642e824
1/*
2 * Copyright (c) 2003 The FFmpeg Project
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/*
22 * How to use this decoder:
23 * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24 * have stsd atoms to describe media trak properties. A stsd atom for a
25 * video trak contains 1 or more ImageDescription atoms. These atoms begin
26 * with the 4-byte length of the atom followed by the codec fourcc. Some
27 * decoders need information in this atom to operate correctly. Such
28 * is the case with SVQ3. In order to get the best use out of this decoder,
29 * the calling app must make the SVQ3 ImageDescription atom available
30 * via the AVCodecContext's extradata[_size] field:
31 *
32 * AVCodecContext.extradata = pointer to ImageDescription, first characters
33 * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34 * AVCodecContext.extradata_size = size of ImageDescription atom memory
35 * buffer (which will be the same as the ImageDescription atom size field
36 * from the QT file, minus 4 bytes since the length is missing)
37 *
38 * You will know you have these parameters passed correctly when the decoder
39 * correctly decodes this file:
40 * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41 */
42
43#include <inttypes.h>
44
45#include "libavutil/attributes.h"
46#include "internal.h"
47#include "avcodec.h"
48#include "mpegutils.h"
49#include "h264dec.h"
50#include "h264data.h"
51#include "golomb.h"
52#include "hpeldsp.h"
53#include "mathops.h"
54#include "rectangle.h"
55#include "tpeldsp.h"
56
57#if CONFIG_ZLIB
58#include <zlib.h>
59#endif
60
61#include "svq1.h"
62
63/**
64 * @file
65 * svq3 decoder.
66 */
67
68typedef struct SVQ3Frame {
69 AVFrame *f;
70
71 AVBufferRef *motion_val_buf[2];
72 int16_t (*motion_val[2])[2];
73
74 AVBufferRef *mb_type_buf;
75 uint32_t *mb_type;
76
77
78 AVBufferRef *ref_index_buf[2];
79 int8_t *ref_index[2];
80} SVQ3Frame;
81
82typedef struct SVQ3Context {
83 AVCodecContext *avctx;
84
85 H264DSPContext h264dsp;
86 H264PredContext hpc;
87 HpelDSPContext hdsp;
88 TpelDSPContext tdsp;
89 VideoDSPContext vdsp;
90
91 SVQ3Frame *cur_pic;
92 SVQ3Frame *next_pic;
93 SVQ3Frame *last_pic;
94 GetBitContext gb;
95 GetBitContext gb_slice;
96 uint8_t *slice_buf;
97 int slice_size;
98 int halfpel_flag;
99 int thirdpel_flag;
100 int has_watermark;
101 uint32_t watermark_key;
102 uint8_t *buf;
103 int buf_size;
104 int adaptive_quant;
105 int next_p_frame_damaged;
106 int h_edge_pos;
107 int v_edge_pos;
108 int last_frame_output;
109 int slice_num;
110 int qscale;
111 int cbp;
112 int frame_num;
113 int frame_num_offset;
114 int prev_frame_num_offset;
115 int prev_frame_num;
116
117 enum AVPictureType pict_type;
118 enum AVPictureType slice_type;
119 int low_delay;
120
121 int mb_x, mb_y;
122 int mb_xy;
123 int mb_width, mb_height;
124 int mb_stride, mb_num;
125 int b_stride;
126
127 uint32_t *mb2br_xy;
128
129 int chroma_pred_mode;
130 int intra16x16_pred_mode;
131
132 int8_t intra4x4_pred_mode_cache[5 * 8];
133 int8_t (*intra4x4_pred_mode);
134
135 unsigned int top_samples_available;
136 unsigned int topright_samples_available;
137 unsigned int left_samples_available;
138
139 uint8_t *edge_emu_buffer;
140
141 DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
142 DECLARE_ALIGNED(8, int8_t, ref_cache)[2][5 * 8];
143 DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
144 DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
145 DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
146 uint32_t dequant4_coeff[QP_MAX_NUM + 1][16];
147 int block_offset[2 * (16 * 3)];
148} SVQ3Context;
149
150#define FULLPEL_MODE 1
151#define HALFPEL_MODE 2
152#define THIRDPEL_MODE 3
153#define PREDICT_MODE 4
154
155/* dual scan (from some older H.264 draft)
156 * o-->o-->o o
157 * | /|
158 * o o o / o
159 * | / | |/ |
160 * o o o o
161 * /
162 * o-->o-->o-->o
163 */
164static const uint8_t svq3_scan[16] = {
165 0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
166 2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
167 0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
168 0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
169};
170
171static const uint8_t luma_dc_zigzag_scan[16] = {
172 0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
173 3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
174 1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
175 3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
176};
177
178static const uint8_t svq3_pred_0[25][2] = {
179 { 0, 0 },
180 { 1, 0 }, { 0, 1 },
181 { 0, 2 }, { 1, 1 }, { 2, 0 },
182 { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
183 { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
184 { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
185 { 2, 4 }, { 3, 3 }, { 4, 2 },
186 { 4, 3 }, { 3, 4 },
187 { 4, 4 }
188};
189
190static const int8_t svq3_pred_1[6][6][5] = {
191 { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
192 { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
193 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
194 { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
195 { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
196 { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
197 { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
198 { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
199 { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
200 { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
201 { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
202 { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
203};
204
205static const struct {
206 uint8_t run;
207 uint8_t level;
208} svq3_dct_tables[2][16] = {
209 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
210 { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
211 { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
212 { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
213};
214
215static const uint32_t svq3_dequant_coeff[32] = {
216 3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
217 9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
218 24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
219 61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
220};
221
222static int svq3_decode_end(AVCodecContext *avctx);
223
224static void svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
225{
226 const int qmul = svq3_dequant_coeff[qp];
227#define stride 16
228 int i;
229 int temp[16];
230 static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
231
232 for (i = 0; i < 4; i++) {
233 const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
234 const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
235 const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
236 const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
237
238 temp[4 * i + 0] = z0 + z3;
239 temp[4 * i + 1] = z1 + z2;
240 temp[4 * i + 2] = z1 - z2;
241 temp[4 * i + 3] = z0 - z3;
242 }
243
244 for (i = 0; i < 4; i++) {
245 const int offset = x_offset[i];
246 const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
247 const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
248 const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
249 const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
250
251 output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
252 output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
253 output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
254 output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
255 }
256}
257#undef stride
258
259static void svq3_add_idct_c(uint8_t *dst, int16_t *block,
260 int stride, int qp, int dc)
261{
262 const int qmul = svq3_dequant_coeff[qp];
263 int i;
264
265 if (dc) {
266 dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
267 : qmul * (block[0] >> 3) / 2);
268 block[0] = 0;
269 }
270
271 for (i = 0; i < 4; i++) {
272 const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
273 const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
274 const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
275 const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
276
277 block[0 + 4 * i] = z0 + z3;
278 block[1 + 4 * i] = z1 + z2;
279 block[2 + 4 * i] = z1 - z2;
280 block[3 + 4 * i] = z0 - z3;
281 }
282
283 for (i = 0; i < 4; i++) {
284 const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
285 const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
286 const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
287 const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
288 const int rr = (dc + 0x80000);
289
290 dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
291 dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
292 dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
293 dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
294 }
295
296 memset(block, 0, 16 * sizeof(int16_t));
297}
298
299static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
300 int index, const int type)
301{
302 static const uint8_t *const scan_patterns[4] = {
303 luma_dc_zigzag_scan, ff_zigzag_scan, svq3_scan, ff_h264_chroma_dc_scan
304 };
305
306 int run, level, sign, limit;
307 unsigned vlc;
308 const int intra = 3 * type >> 2;
309 const uint8_t *const scan = scan_patterns[type];
310
311 for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
312 for (; (vlc = get_interleaved_ue_golomb(gb)) != 0; index++) {
313 if ((int32_t)vlc < 0)
314 return -1;
315
316 sign = (vlc & 1) ? 0 : -1;
317 vlc = vlc + 1 >> 1;
318
319 if (type == 3) {
320 if (vlc < 3) {
321 run = 0;
322 level = vlc;
323 } else if (vlc < 4) {
324 run = 1;
325 level = 1;
326 } else {
327 run = vlc & 0x3;
328 level = (vlc + 9 >> 2) - run;
329 }
330 } else {
331 if (vlc < 16U) {
332 run = svq3_dct_tables[intra][vlc].run;
333 level = svq3_dct_tables[intra][vlc].level;
334 } else if (intra) {
335 run = vlc & 0x7;
336 level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
337 } else {
338 run = vlc & 0xF;
339 level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
340 }
341 }
342
343
344 if ((index += run) >= limit)
345 return -1;
346
347 block[scan[index]] = (level ^ sign) - sign;
348 }
349
350 if (type != 2) {
351 break;
352 }
353 }
354
355 return 0;
356}
357
358static av_always_inline int
359svq3_fetch_diagonal_mv(const SVQ3Context *s, const int16_t **C,
360 int i, int list, int part_width)
361{
362 const int topright_ref = s->ref_cache[list][i - 8 + part_width];
363
364 if (topright_ref != PART_NOT_AVAILABLE) {
365 *C = s->mv_cache[list][i - 8 + part_width];
366 return topright_ref;
367 } else {
368 *C = s->mv_cache[list][i - 8 - 1];
369 return s->ref_cache[list][i - 8 - 1];
370 }
371}
372
373/**
374 * Get the predicted MV.
375 * @param n the block index
376 * @param part_width the width of the partition (4, 8,16) -> (1, 2, 4)
377 * @param mx the x component of the predicted motion vector
378 * @param my the y component of the predicted motion vector
379 */
380static av_always_inline void svq3_pred_motion(const SVQ3Context *s, int n,
381 int part_width, int list,
382 int ref, int *const mx, int *const my)
383{
384 const int index8 = scan8[n];
385 const int top_ref = s->ref_cache[list][index8 - 8];
386 const int left_ref = s->ref_cache[list][index8 - 1];
387 const int16_t *const A = s->mv_cache[list][index8 - 1];
388 const int16_t *const B = s->mv_cache[list][index8 - 8];
389 const int16_t *C;
390 int diagonal_ref, match_count;
391
392/* mv_cache
393 * B . . A T T T T
394 * U . . L . . , .
395 * U . . L . . . .
396 * U . . L . . , .
397 * . . . L . . . .
398 */
399
400 diagonal_ref = svq3_fetch_diagonal_mv(s, &C, index8, list, part_width);
401 match_count = (diagonal_ref == ref) + (top_ref == ref) + (left_ref == ref);
402 if (match_count > 1) { //most common
403 *mx = mid_pred(A[0], B[0], C[0]);
404 *my = mid_pred(A[1], B[1], C[1]);
405 } else if (match_count == 1) {
406 if (left_ref == ref) {
407 *mx = A[0];
408 *my = A[1];
409 } else if (top_ref == ref) {
410 *mx = B[0];
411 *my = B[1];
412 } else {
413 *mx = C[0];
414 *my = C[1];
415 }
416 } else {
417 if (top_ref == PART_NOT_AVAILABLE &&
418 diagonal_ref == PART_NOT_AVAILABLE &&
419 left_ref != PART_NOT_AVAILABLE) {
420 *mx = A[0];
421 *my = A[1];
422 } else {
423 *mx = mid_pred(A[0], B[0], C[0]);
424 *my = mid_pred(A[1], B[1], C[1]);
425 }
426 }
427}
428
429static inline void svq3_mc_dir_part(SVQ3Context *s,
430 int x, int y, int width, int height,
431 int mx, int my, int dxy,
432 int thirdpel, int dir, int avg)
433{
434 const SVQ3Frame *pic = (dir == 0) ? s->last_pic : s->next_pic;
435 uint8_t *src, *dest;
436 int i, emu = 0;
437 int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
438 int linesize = s->cur_pic->f->linesize[0];
439 int uvlinesize = s->cur_pic->f->linesize[1];
440
441 mx += x;
442 my += y;
443
444 if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
445 my < 0 || my >= s->v_edge_pos - height - 1) {
446 emu = 1;
447 mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
448 my = av_clip(my, -16, s->v_edge_pos - height + 15);
449 }
450
451 /* form component predictions */
452 dest = s->cur_pic->f->data[0] + x + y * linesize;
453 src = pic->f->data[0] + mx + my * linesize;
454
455 if (emu) {
456 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
457 linesize, linesize,
458 width + 1, height + 1,
459 mx, my, s->h_edge_pos, s->v_edge_pos);
460 src = s->edge_emu_buffer;
461 }
462 if (thirdpel)
463 (avg ? s->tdsp.avg_tpel_pixels_tab
464 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, linesize,
465 width, height);
466 else
467 (avg ? s->hdsp.avg_pixels_tab
468 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, linesize,
469 height);
470
471 if (!(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
472 mx = mx + (mx < (int) x) >> 1;
473 my = my + (my < (int) y) >> 1;
474 width = width >> 1;
475 height = height >> 1;
476 blocksize++;
477
478 for (i = 1; i < 3; i++) {
479 dest = s->cur_pic->f->data[i] + (x >> 1) + (y >> 1) * uvlinesize;
480 src = pic->f->data[i] + mx + my * uvlinesize;
481
482 if (emu) {
483 s->vdsp.emulated_edge_mc(s->edge_emu_buffer, src,
484 uvlinesize, uvlinesize,
485 width + 1, height + 1,
486 mx, my, (s->h_edge_pos >> 1),
487 s->v_edge_pos >> 1);
488 src = s->edge_emu_buffer;
489 }
490 if (thirdpel)
491 (avg ? s->tdsp.avg_tpel_pixels_tab
492 : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
493 uvlinesize,
494 width, height);
495 else
496 (avg ? s->hdsp.avg_pixels_tab
497 : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
498 uvlinesize,
499 height);
500 }
501 }
502}
503
504static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
505 int dir, int avg)
506{
507 int i, j, k, mx, my, dx, dy, x, y;
508 const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
509 const int part_height = 16 >> ((unsigned)(size + 1) / 3);
510 const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
511 const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
512 const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
513
514 for (i = 0; i < 16; i += part_height)
515 for (j = 0; j < 16; j += part_width) {
516 const int b_xy = (4 * s->mb_x + (j >> 2)) +
517 (4 * s->mb_y + (i >> 2)) * s->b_stride;
518 int dxy;
519 x = 16 * s->mb_x + j;
520 y = 16 * s->mb_y + i;
521 k = (j >> 2 & 1) + (i >> 1 & 2) +
522 (j >> 1 & 4) + (i & 8);
523
524 if (mode != PREDICT_MODE) {
525 svq3_pred_motion(s, k, part_width >> 2, dir, 1, &mx, &my);
526 } else {
527 mx = s->next_pic->motion_val[0][b_xy][0] << 1;
528 my = s->next_pic->motion_val[0][b_xy][1] << 1;
529
530 if (dir == 0) {
531 mx = mx * s->frame_num_offset /
532 s->prev_frame_num_offset + 1 >> 1;
533 my = my * s->frame_num_offset /
534 s->prev_frame_num_offset + 1 >> 1;
535 } else {
536 mx = mx * (s->frame_num_offset - s->prev_frame_num_offset) /
537 s->prev_frame_num_offset + 1 >> 1;
538 my = my * (s->frame_num_offset - s->prev_frame_num_offset) /
539 s->prev_frame_num_offset + 1 >> 1;
540 }
541 }
542
543 /* clip motion vector prediction to frame border */
544 mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
545 my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
546
547 /* get (optional) motion vector differential */
548 if (mode == PREDICT_MODE) {
549 dx = dy = 0;
550 } else {
551 dy = get_interleaved_se_golomb(&s->gb_slice);
552 dx = get_interleaved_se_golomb(&s->gb_slice);
553
554 if (dx == INVALID_VLC || dy == INVALID_VLC) {
555 av_log(s->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
556 return -1;
557 }
558 }
559
560 /* compute motion vector */
561 if (mode == THIRDPEL_MODE) {
562 int fx, fy;
563 mx = (mx + 1 >> 1) + dx;
564 my = (my + 1 >> 1) + dy;
565 fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
566 fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
567 dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
568
569 svq3_mc_dir_part(s, x, y, part_width, part_height,
570 fx, fy, dxy, 1, dir, avg);
571 mx += mx;
572 my += my;
573 } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
574 mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
575 my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
576 dxy = (mx & 1) + 2 * (my & 1);
577
578 svq3_mc_dir_part(s, x, y, part_width, part_height,
579 mx >> 1, my >> 1, dxy, 0, dir, avg);
580 mx *= 3;
581 my *= 3;
582 } else {
583 mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
584 my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
585
586 svq3_mc_dir_part(s, x, y, part_width, part_height,
587 mx, my, 0, 0, dir, avg);
588 mx *= 6;
589 my *= 6;
590 }
591
592 /* update mv_cache */
593 if (mode != PREDICT_MODE) {
594 int32_t mv = pack16to32(mx, my);
595
596 if (part_height == 8 && i < 8) {
597 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 * 8], mv);
598
599 if (part_width == 8 && j < 8)
600 AV_WN32A(s->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
601 }
602 if (part_width == 8 && j < 8)
603 AV_WN32A(s->mv_cache[dir][scan8[k] + 1], mv);
604 if (part_width == 4 || part_height == 4)
605 AV_WN32A(s->mv_cache[dir][scan8[k]], mv);
606 }
607
608 /* write back motion vectors */
609 fill_rectangle(s->cur_pic->motion_val[dir][b_xy],
610 part_width >> 2, part_height >> 2, s->b_stride,
611 pack16to32(mx, my), 4);
612 }
613
614 return 0;
615}
616
617static av_always_inline void hl_decode_mb_idct_luma(SVQ3Context *s,
618 int mb_type, const int *block_offset,
619 int linesize, uint8_t *dest_y)
620{
621 int i;
622 if (!IS_INTRA4x4(mb_type)) {
623 for (i = 0; i < 16; i++)
624 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
625 uint8_t *const ptr = dest_y + block_offset[i];
626 svq3_add_idct_c(ptr, s->mb + i * 16, linesize,
627 s->qscale, IS_INTRA(mb_type) ? 1 : 0);
628 }
629 }
630}
631
632static av_always_inline void hl_decode_mb_predict_luma(SVQ3Context *s,
633 int mb_type,
634 const int *block_offset,
635 int linesize,
636 uint8_t *dest_y)
637{
638 int i;
639 int qscale = s->qscale;
640
641 if (IS_INTRA4x4(mb_type)) {
642 for (i = 0; i < 16; i++) {
643 uint8_t *const ptr = dest_y + block_offset[i];
644 const int dir = s->intra4x4_pred_mode_cache[scan8[i]];
645
646 uint8_t *topright;
647 int nnz, tr;
648 if (dir == DIAG_DOWN_LEFT_PRED || dir == VERT_LEFT_PRED) {
649 const int topright_avail = (s->topright_samples_available << i) & 0x8000;
650 av_assert2(s->mb_y || linesize <= block_offset[i]);
651 if (!topright_avail) {
652 tr = ptr[3 - linesize] * 0x01010101u;
653 topright = (uint8_t *)&tr;
654 } else
655 topright = ptr + 4 - linesize;
656 } else
657 topright = NULL;
658
659 s->hpc.pred4x4[dir](ptr, topright, linesize);
660 nnz = s->non_zero_count_cache[scan8[i]];
661 if (nnz) {
662 svq3_add_idct_c(ptr, s->mb + i * 16, linesize, qscale, 0);
663 }
664 }
665 } else {
666 s->hpc.pred16x16[s->intra16x16_pred_mode](dest_y, linesize);
667 svq3_luma_dc_dequant_idct_c(s->mb, s->mb_luma_dc[0], qscale);
668 }
669}
670
671static void hl_decode_mb(SVQ3Context *s)
672{
673 const int mb_x = s->mb_x;
674 const int mb_y = s->mb_y;
675 const int mb_xy = s->mb_xy;
676 const int mb_type = s->cur_pic->mb_type[mb_xy];
677 uint8_t *dest_y, *dest_cb, *dest_cr;
678 int linesize, uvlinesize;
679 int i, j;
680 const int *block_offset = &s->block_offset[0];
681 const int block_h = 16 >> 1;
682
683 linesize = s->cur_pic->f->linesize[0];
684 uvlinesize = s->cur_pic->f->linesize[1];
685
686 dest_y = s->cur_pic->f->data[0] + (mb_x + mb_y * linesize) * 16;
687 dest_cb = s->cur_pic->f->data[1] + mb_x * 8 + mb_y * uvlinesize * block_h;
688 dest_cr = s->cur_pic->f->data[2] + mb_x * 8 + mb_y * uvlinesize * block_h;
689
690 s->vdsp.prefetch(dest_y + (s->mb_x & 3) * 4 * linesize + 64, linesize, 4);
691 s->vdsp.prefetch(dest_cb + (s->mb_x & 7) * uvlinesize + 64, dest_cr - dest_cb, 2);
692
693 if (IS_INTRA(mb_type)) {
694 s->hpc.pred8x8[s->chroma_pred_mode](dest_cb, uvlinesize);
695 s->hpc.pred8x8[s->chroma_pred_mode](dest_cr, uvlinesize);
696
697 hl_decode_mb_predict_luma(s, mb_type, block_offset, linesize, dest_y);
698 }
699
700 hl_decode_mb_idct_luma(s, mb_type, block_offset, linesize, dest_y);
701
702 if (s->cbp & 0x30) {
703 uint8_t *dest[2] = { dest_cb, dest_cr };
704 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 1,
705 s->dequant4_coeff[4][0]);
706 s->h264dsp.h264_chroma_dc_dequant_idct(s->mb + 16 * 16 * 2,
707 s->dequant4_coeff[4][0]);
708 for (j = 1; j < 3; j++) {
709 for (i = j * 16; i < j * 16 + 4; i++)
710 if (s->non_zero_count_cache[scan8[i]] || s->mb[i * 16]) {
711 uint8_t *const ptr = dest[j - 1] + block_offset[i];
712 svq3_add_idct_c(ptr, s->mb + i * 16,
713 uvlinesize, ff_h264_chroma_qp[0][s->qscale + 12] - 12, 2);
714 }
715 }
716 }
717}
718
719static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
720{
721 int i, j, k, m, dir, mode;
722 int cbp = 0;
723 uint32_t vlc;
724 int8_t *top, *left;
725 const int mb_xy = s->mb_xy;
726 const int b_xy = 4 * s->mb_x + 4 * s->mb_y * s->b_stride;
727
728 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
729 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
730 s->topright_samples_available = 0xFFFF;
731
732 if (mb_type == 0) { /* SKIP */
733 if (s->pict_type == AV_PICTURE_TYPE_P ||
734 s->next_pic->mb_type[mb_xy] == -1) {
735 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
736 0, 0, 0, 0, 0, 0);
737
738 if (s->pict_type == AV_PICTURE_TYPE_B)
739 svq3_mc_dir_part(s, 16 * s->mb_x, 16 * s->mb_y, 16, 16,
740 0, 0, 0, 0, 1, 1);
741
742 mb_type = MB_TYPE_SKIP;
743 } else {
744 mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
745 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
746 return -1;
747 if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
748 return -1;
749
750 mb_type = MB_TYPE_16x16;
751 }
752 } else if (mb_type < 8) { /* INTER */
753 if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&s->gb_slice))
754 mode = THIRDPEL_MODE;
755 else if (s->halfpel_flag &&
756 s->thirdpel_flag == !get_bits1(&s->gb_slice))
757 mode = HALFPEL_MODE;
758 else
759 mode = FULLPEL_MODE;
760
761 /* fill caches */
762 /* note ref_cache should contain here:
763 * ????????
764 * ???11111
765 * N??11111
766 * N??11111
767 * N??11111
768 */
769
770 for (m = 0; m < 2; m++) {
771 if (s->mb_x > 0 && s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6] != -1) {
772 for (i = 0; i < 4; i++)
773 AV_COPY32(s->mv_cache[m][scan8[0] - 1 + i * 8],
774 s->cur_pic->motion_val[m][b_xy - 1 + i * s->b_stride]);
775 } else {
776 for (i = 0; i < 4; i++)
777 AV_ZERO32(s->mv_cache[m][scan8[0] - 1 + i * 8]);
778 }
779 if (s->mb_y > 0) {
780 memcpy(s->mv_cache[m][scan8[0] - 1 * 8],
781 s->cur_pic->motion_val[m][b_xy - s->b_stride],
782 4 * 2 * sizeof(int16_t));
783 memset(&s->ref_cache[m][scan8[0] - 1 * 8],
784 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
785
786 if (s->mb_x < s->mb_width - 1) {
787 AV_COPY32(s->mv_cache[m][scan8[0] + 4 - 1 * 8],
788 s->cur_pic->motion_val[m][b_xy - s->b_stride + 4]);
789 s->ref_cache[m][scan8[0] + 4 - 1 * 8] =
790 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride + 1] + 6] == -1 ||
791 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
792 } else
793 s->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
794 if (s->mb_x > 0) {
795 AV_COPY32(s->mv_cache[m][scan8[0] - 1 - 1 * 8],
796 s->cur_pic->motion_val[m][b_xy - s->b_stride - 1]);
797 s->ref_cache[m][scan8[0] - 1 - 1 * 8] =
798 (s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
799 } else
800 s->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
801 } else
802 memset(&s->ref_cache[m][scan8[0] - 1 * 8 - 1],
803 PART_NOT_AVAILABLE, 8);
804
805 if (s->pict_type != AV_PICTURE_TYPE_B)
806 break;
807 }
808
809 /* decode motion vector(s) and form prediction(s) */
810 if (s->pict_type == AV_PICTURE_TYPE_P) {
811 if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
812 return -1;
813 } else { /* AV_PICTURE_TYPE_B */
814 if (mb_type != 2) {
815 if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
816 return -1;
817 } else {
818 for (i = 0; i < 4; i++)
819 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
820 0, 4 * 2 * sizeof(int16_t));
821 }
822 if (mb_type != 1) {
823 if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
824 return -1;
825 } else {
826 for (i = 0; i < 4; i++)
827 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
828 0, 4 * 2 * sizeof(int16_t));
829 }
830 }
831
832 mb_type = MB_TYPE_16x16;
833 } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
834 int8_t *i4x4 = s->intra4x4_pred_mode + s->mb2br_xy[s->mb_xy];
835 int8_t *i4x4_cache = s->intra4x4_pred_mode_cache;
836
837 memset(s->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
838
839 if (mb_type == 8) {
840 if (s->mb_x > 0) {
841 for (i = 0; i < 4; i++)
842 s->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - 1] + 6 - i];
843 if (s->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
844 s->left_samples_available = 0x5F5F;
845 }
846 if (s->mb_y > 0) {
847 s->intra4x4_pred_mode_cache[4 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 0];
848 s->intra4x4_pred_mode_cache[5 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 1];
849 s->intra4x4_pred_mode_cache[6 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 2];
850 s->intra4x4_pred_mode_cache[7 + 8 * 0] = s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride] + 3];
851
852 if (s->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
853 s->top_samples_available = 0x33FF;
854 }
855
856 /* decode prediction codes for luma blocks */
857 for (i = 0; i < 16; i += 2) {
858 vlc = get_interleaved_ue_golomb(&s->gb_slice);
859
860 if (vlc >= 25U) {
861 av_log(s->avctx, AV_LOG_ERROR,
862 "luma prediction:%"PRIu32"\n", vlc);
863 return -1;
864 }
865
866 left = &s->intra4x4_pred_mode_cache[scan8[i] - 1];
867 top = &s->intra4x4_pred_mode_cache[scan8[i] - 8];
868
869 left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
870 left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
871
872 if (left[1] == -1 || left[2] == -1) {
873 av_log(s->avctx, AV_LOG_ERROR, "weird prediction\n");
874 return -1;
875 }
876 }
877 } else { /* mb_type == 33, DC_128_PRED block type */
878 for (i = 0; i < 4; i++)
879 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
880 }
881
882 AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
883 i4x4[4] = i4x4_cache[7 + 8 * 3];
884 i4x4[5] = i4x4_cache[7 + 8 * 2];
885 i4x4[6] = i4x4_cache[7 + 8 * 1];
886
887 if (mb_type == 8) {
888 ff_h264_check_intra4x4_pred_mode(s->intra4x4_pred_mode_cache,
889 s->avctx, s->top_samples_available,
890 s->left_samples_available);
891
892 s->top_samples_available = (s->mb_y == 0) ? 0x33FF : 0xFFFF;
893 s->left_samples_available = (s->mb_x == 0) ? 0x5F5F : 0xFFFF;
894 } else {
895 for (i = 0; i < 4; i++)
896 memset(&s->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
897
898 s->top_samples_available = 0x33FF;
899 s->left_samples_available = 0x5F5F;
900 }
901
902 mb_type = MB_TYPE_INTRA4x4;
903 } else { /* INTRA16x16 */
904 dir = ff_h264_i_mb_type_info[mb_type - 8].pred_mode;
905 dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
906
907 if ((s->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
908 s->left_samples_available, dir, 0)) < 0) {
909 av_log(s->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
910 return s->intra16x16_pred_mode;
911 }
912
913 cbp = ff_h264_i_mb_type_info[mb_type - 8].cbp;
914 mb_type = MB_TYPE_INTRA16x16;
915 }
916
917 if (!IS_INTER(mb_type) && s->pict_type != AV_PICTURE_TYPE_I) {
918 for (i = 0; i < 4; i++)
919 memset(s->cur_pic->motion_val[0][b_xy + i * s->b_stride],
920 0, 4 * 2 * sizeof(int16_t));
921 if (s->pict_type == AV_PICTURE_TYPE_B) {
922 for (i = 0; i < 4; i++)
923 memset(s->cur_pic->motion_val[1][b_xy + i * s->b_stride],
924 0, 4 * 2 * sizeof(int16_t));
925 }
926 }
927 if (!IS_INTRA4x4(mb_type)) {
928 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy], DC_PRED, 8);
929 }
930 if (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B) {
931 memset(s->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
932 }
933
934 if (!IS_INTRA16x16(mb_type) &&
935 (!IS_SKIP(mb_type) || s->pict_type == AV_PICTURE_TYPE_B)) {
936 if ((vlc = get_interleaved_ue_golomb(&s->gb_slice)) >= 48U){
937 av_log(s->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
938 return -1;
939 }
940
941 cbp = IS_INTRA(mb_type) ? ff_h264_golomb_to_intra4x4_cbp[vlc]
942 : ff_h264_golomb_to_inter_cbp[vlc];
943 }
944 if (IS_INTRA16x16(mb_type) ||
945 (s->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
946 s->qscale += get_interleaved_se_golomb(&s->gb_slice);
947
948 if (s->qscale > 31u) {
949 av_log(s->avctx, AV_LOG_ERROR, "qscale:%d\n", s->qscale);
950 return -1;
951 }
952 }
953 if (IS_INTRA16x16(mb_type)) {
954 AV_ZERO128(s->mb_luma_dc[0] + 0);
955 AV_ZERO128(s->mb_luma_dc[0] + 8);
956 if (svq3_decode_block(&s->gb_slice, s->mb_luma_dc[0], 0, 1)) {
957 av_log(s->avctx, AV_LOG_ERROR,
958 "error while decoding intra luma dc\n");
959 return -1;
960 }
961 }
962
963 if (cbp) {
964 const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
965 const int type = ((s->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
966
967 for (i = 0; i < 4; i++)
968 if ((cbp & (1 << i))) {
969 for (j = 0; j < 4; j++) {
970 k = index ? (1 * (j & 1) + 2 * (i & 1) +
971 2 * (j & 2) + 4 * (i & 2))
972 : (4 * i + j);
973 s->non_zero_count_cache[scan8[k]] = 1;
974
975 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], index, type)) {
976 av_log(s->avctx, AV_LOG_ERROR,
977 "error while decoding block\n");
978 return -1;
979 }
980 }
981 }
982
983 if ((cbp & 0x30)) {
984 for (i = 1; i < 3; ++i)
985 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * 16 * i], 0, 3)) {
986 av_log(s->avctx, AV_LOG_ERROR,
987 "error while decoding chroma dc block\n");
988 return -1;
989 }
990
991 if ((cbp & 0x20)) {
992 for (i = 1; i < 3; i++) {
993 for (j = 0; j < 4; j++) {
994 k = 16 * i + j;
995 s->non_zero_count_cache[scan8[k]] = 1;
996
997 if (svq3_decode_block(&s->gb_slice, &s->mb[16 * k], 1, 1)) {
998 av_log(s->avctx, AV_LOG_ERROR,
999 "error while decoding chroma ac block\n");
1000 return -1;
1001 }
1002 }
1003 }
1004 }
1005 }
1006 }
1007
1008 s->cbp = cbp;
1009 s->cur_pic->mb_type[mb_xy] = mb_type;
1010
1011 if (IS_INTRA(mb_type))
1012 s->chroma_pred_mode = ff_h264_check_intra_pred_mode(s->avctx, s->top_samples_available,
1013 s->left_samples_available, DC_PRED8x8, 1);
1014
1015 return 0;
1016}
1017
1018static int svq3_decode_slice_header(AVCodecContext *avctx)
1019{
1020 SVQ3Context *s = avctx->priv_data;
1021 const int mb_xy = s->mb_xy;
1022 int i, header;
1023 unsigned slice_id;
1024
1025 header = get_bits(&s->gb, 8);
1026
1027 if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
1028 /* TODO: what? */
1029 av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
1030 return -1;
1031 } else {
1032 int slice_bits, slice_bytes, slice_length;
1033 int length = header >> 5 & 3;
1034
1035 slice_length = show_bits(&s->gb, 8 * length);
1036 slice_bits = slice_length * 8;
1037 slice_bytes = slice_length + length - 1;
1038
1039 if (8LL*slice_bytes > get_bits_left(&s->gb)) {
1040 av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
1041 return -1;
1042 }
1043
1044 skip_bits(&s->gb, 8);
1045
1046 av_fast_malloc(&s->slice_buf, &s->slice_size, slice_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
1047 if (!s->slice_buf)
1048 return AVERROR(ENOMEM);
1049
1050 memcpy(s->slice_buf, s->gb.buffer + s->gb.index / 8, slice_bytes);
1051
1052 init_get_bits(&s->gb_slice, s->slice_buf, slice_bits);
1053
1054 if (s->watermark_key) {
1055 uint32_t header = AV_RL32(&s->gb_slice.buffer[1]);
1056 AV_WL32(&s->gb_slice.buffer[1], header ^ s->watermark_key);
1057 }
1058 if (length > 0) {
1059 memmove(s->slice_buf, &s->slice_buf[slice_length], length - 1);
1060 }
1061 skip_bits_long(&s->gb, slice_bytes * 8);
1062 }
1063
1064 if ((slice_id = get_interleaved_ue_golomb(&s->gb_slice)) >= 3) {
1065 av_log(s->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
1066 return -1;
1067 }
1068 if (get_bits1(&s->gb_slice)) {
1069 avpriv_report_missing_feature(s->avctx, "Media key encryption");
1070 return AVERROR_PATCHWELCOME;
1071 }
1072
1073 s->slice_type = ff_h264_golomb_to_pict_type[slice_id];
1074
1075 if ((header & 0x9F) == 2) {
1076 i = (s->mb_num < 64) ? 5 : av_log2(s->mb_num - 1);
1077 get_bits(&s->gb_slice, i);
1078 }
1079
1080 s->slice_num = get_bits(&s->gb_slice, 8);
1081 s->qscale = get_bits(&s->gb_slice, 5);
1082 s->adaptive_quant = get_bits1(&s->gb_slice);
1083
1084 /* unknown fields */
1085 skip_bits1(&s->gb_slice);
1086
1087 if (s->has_watermark)
1088 skip_bits1(&s->gb_slice);
1089
1090 skip_bits1(&s->gb_slice);
1091 skip_bits(&s->gb_slice, 2);
1092
1093 if (skip_1stop_8data_bits(&s->gb_slice) < 0)
1094 return AVERROR_INVALIDDATA;
1095
1096 /* reset intra predictors and invalidate motion vector references */
1097 if (s->mb_x > 0) {
1098 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - 1] + 3,
1099 -1, 4 * sizeof(int8_t));
1100 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_x],
1101 -1, 8 * sizeof(int8_t) * s->mb_x);
1102 }
1103 if (s->mb_y > 0) {
1104 memset(s->intra4x4_pred_mode + s->mb2br_xy[mb_xy - s->mb_stride],
1105 -1, 8 * sizeof(int8_t) * (s->mb_width - s->mb_x));
1106
1107 if (s->mb_x > 0)
1108 s->intra4x4_pred_mode[s->mb2br_xy[mb_xy - s->mb_stride - 1] + 3] = -1;
1109 }
1110
1111 return 0;
1112}
1113
1114static void init_dequant4_coeff_table(SVQ3Context *s)
1115{
1116 int q, x;
1117 const int max_qp = 51;
1118
1119 for (q = 0; q < max_qp + 1; q++) {
1120 int shift = ff_h264_quant_div6[q] + 2;
1121 int idx = ff_h264_quant_rem6[q];
1122 for (x = 0; x < 16; x++)
1123 s->dequant4_coeff[q][(x >> 2) | ((x << 2) & 0xF)] =
1124 ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] * 16) << shift;
1125 }
1126}
1127
1128static av_cold int svq3_decode_init(AVCodecContext *avctx)
1129{
1130 SVQ3Context *s = avctx->priv_data;
1131 int m, x, y;
1132 unsigned char *extradata;
1133 unsigned char *extradata_end;
1134 unsigned int size;
1135 int marker_found = 0;
1136 int ret;
1137
1138 s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
1139 s->last_pic = av_mallocz(sizeof(*s->last_pic));
1140 s->next_pic = av_mallocz(sizeof(*s->next_pic));
1141 if (!s->next_pic || !s->last_pic || !s->cur_pic) {
1142 ret = AVERROR(ENOMEM);
1143 goto fail;
1144 }
1145
1146 s->cur_pic->f = av_frame_alloc();
1147 s->last_pic->f = av_frame_alloc();
1148 s->next_pic->f = av_frame_alloc();
1149 if (!s->cur_pic->f || !s->last_pic->f || !s->next_pic->f)
1150 return AVERROR(ENOMEM);
1151
1152 ff_h264dsp_init(&s->h264dsp, 8, 1);
1153 ff_h264_pred_init(&s->hpc, AV_CODEC_ID_SVQ3, 8, 1);
1154 ff_videodsp_init(&s->vdsp, 8);
1155
1156
1157 avctx->bits_per_raw_sample = 8;
1158
1159 ff_hpeldsp_init(&s->hdsp, avctx->flags);
1160 ff_tpeldsp_init(&s->tdsp);
1161
1162 avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
1163 avctx->color_range = AVCOL_RANGE_JPEG;
1164
1165 s->avctx = avctx;
1166 s->halfpel_flag = 1;
1167 s->thirdpel_flag = 1;
1168 s->has_watermark = 0;
1169
1170 /* prowl for the "SEQH" marker in the extradata */
1171 extradata = (unsigned char *)avctx->extradata;
1172 extradata_end = avctx->extradata + avctx->extradata_size;
1173 if (extradata) {
1174 for (m = 0; m + 8 < avctx->extradata_size; m++) {
1175 if (!memcmp(extradata, "SEQH", 4)) {
1176 marker_found = 1;
1177 break;
1178 }
1179 extradata++;
1180 }
1181 }
1182
1183 /* if a match was found, parse the extra data */
1184 if (marker_found) {
1185 GetBitContext gb;
1186 int frame_size_code;
1187 int unk0, unk1, unk2, unk3, unk4;
1188
1189 size = AV_RB32(&extradata[4]);
1190 if (size > extradata_end - extradata - 8) {
1191 ret = AVERROR_INVALIDDATA;
1192 goto fail;
1193 }
1194 init_get_bits(&gb, extradata + 8, size * 8);
1195
1196 /* 'frame size code' and optional 'width, height' */
1197 frame_size_code = get_bits(&gb, 3);
1198 switch (frame_size_code) {
1199 case 0:
1200 avctx->width = 160;
1201 avctx->height = 120;
1202 break;
1203 case 1:
1204 avctx->width = 128;
1205 avctx->height = 96;
1206 break;
1207 case 2:
1208 avctx->width = 176;
1209 avctx->height = 144;
1210 break;
1211 case 3:
1212 avctx->width = 352;
1213 avctx->height = 288;
1214 break;
1215 case 4:
1216 avctx->width = 704;
1217 avctx->height = 576;
1218 break;
1219 case 5:
1220 avctx->width = 240;
1221 avctx->height = 180;
1222 break;
1223 case 6:
1224 avctx->width = 320;
1225 avctx->height = 240;
1226 break;
1227 case 7:
1228 avctx->width = get_bits(&gb, 12);
1229 avctx->height = get_bits(&gb, 12);
1230 break;
1231 }
1232
1233 s->halfpel_flag = get_bits1(&gb);
1234 s->thirdpel_flag = get_bits1(&gb);
1235
1236 /* unknown fields */
1237 unk0 = get_bits1(&gb);
1238 unk1 = get_bits1(&gb);
1239 unk2 = get_bits1(&gb);
1240 unk3 = get_bits1(&gb);
1241
1242 s->low_delay = get_bits1(&gb);
1243
1244 /* unknown field */
1245 unk4 = get_bits1(&gb);
1246
1247 av_log(avctx, AV_LOG_DEBUG, "Unknown fields %d %d %d %d %d\n",
1248 unk0, unk1, unk2, unk3, unk4);
1249
1250 if (skip_1stop_8data_bits(&gb) < 0) {
1251 ret = AVERROR_INVALIDDATA;
1252 goto fail;
1253 }
1254
1255 s->has_watermark = get_bits1(&gb);
1256 avctx->has_b_frames = !s->low_delay;
1257 if (s->has_watermark) {
1258#if CONFIG_ZLIB
1259 unsigned watermark_width = get_interleaved_ue_golomb(&gb);
1260 unsigned watermark_height = get_interleaved_ue_golomb(&gb);
1261 int u1 = get_interleaved_ue_golomb(&gb);
1262 int u2 = get_bits(&gb, 8);
1263 int u3 = get_bits(&gb, 2);
1264 int u4 = get_interleaved_ue_golomb(&gb);
1265 unsigned long buf_len = watermark_width *
1266 watermark_height * 4;
1267 int offset = get_bits_count(&gb) + 7 >> 3;
1268 uint8_t *buf;
1269
1270 if (watermark_height <= 0 ||
1271 (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1272 ret = -1;
1273 goto fail;
1274 }
1275
1276 buf = av_malloc(buf_len);
1277 if (!buf) {
1278 ret = AVERROR(ENOMEM);
1279 goto fail;
1280 }
1281 av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1282 watermark_width, watermark_height);
1283 av_log(avctx, AV_LOG_DEBUG,
1284 "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1285 u1, u2, u3, u4, offset);
1286 if (uncompress(buf, &buf_len, extradata + 8 + offset,
1287 size - offset) != Z_OK) {
1288 av_log(avctx, AV_LOG_ERROR,
1289 "could not uncompress watermark logo\n");
1290 av_free(buf);
1291 ret = -1;
1292 goto fail;
1293 }
1294 s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1295 s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1296 av_log(avctx, AV_LOG_DEBUG,
1297 "watermark key %#"PRIx32"\n", s->watermark_key);
1298 av_free(buf);
1299#else
1300 av_log(avctx, AV_LOG_ERROR,
1301 "this svq3 file contains watermark which need zlib support compiled in\n");
1302 ret = -1;
1303 goto fail;
1304#endif
1305 }
1306 }
1307
1308 s->mb_width = (avctx->width + 15) / 16;
1309 s->mb_height = (avctx->height + 15) / 16;
1310 s->mb_stride = s->mb_width + 1;
1311 s->mb_num = s->mb_width * s->mb_height;
1312 s->b_stride = 4 * s->mb_width;
1313 s->h_edge_pos = s->mb_width * 16;
1314 s->v_edge_pos = s->mb_height * 16;
1315
1316 s->intra4x4_pred_mode = av_mallocz(s->mb_stride * 2 * 8);
1317 if (!s->intra4x4_pred_mode)
1318 return AVERROR(ENOMEM);
1319
1320 s->mb2br_xy = av_mallocz(s->mb_stride * (s->mb_height + 1) *
1321 sizeof(*s->mb2br_xy));
1322 if (!s->mb2br_xy)
1323 return AVERROR(ENOMEM);
1324
1325 for (y = 0; y < s->mb_height; y++)
1326 for (x = 0; x < s->mb_width; x++) {
1327 const int mb_xy = x + y * s->mb_stride;
1328
1329 s->mb2br_xy[mb_xy] = 8 * (mb_xy % (2 * s->mb_stride));
1330 }
1331
1332 init_dequant4_coeff_table(s);
1333
1334 return 0;
1335fail:
1336 svq3_decode_end(avctx);
1337 return ret;
1338}
1339
1340static void free_picture(AVCodecContext *avctx, SVQ3Frame *pic)
1341{
1342 int i;
1343 for (i = 0; i < 2; i++) {
1344 av_buffer_unref(&pic->motion_val_buf[i]);
1345 av_buffer_unref(&pic->ref_index_buf[i]);
1346 }
1347 av_buffer_unref(&pic->mb_type_buf);
1348
1349 av_frame_unref(pic->f);
1350}
1351
1352static int get_buffer(AVCodecContext *avctx, SVQ3Frame *pic)
1353{
1354 SVQ3Context *s = avctx->priv_data;
1355 const int big_mb_num = s->mb_stride * (s->mb_height + 1) + 1;
1356 const int mb_array_size = s->mb_stride * s->mb_height;
1357 const int b4_stride = s->mb_width * 4 + 1;
1358 const int b4_array_size = b4_stride * s->mb_height * 4;
1359 int ret;
1360
1361 if (!pic->motion_val_buf[0]) {
1362 int i;
1363
1364 pic->mb_type_buf = av_buffer_allocz((big_mb_num + s->mb_stride) * sizeof(uint32_t));
1365 if (!pic->mb_type_buf)
1366 return AVERROR(ENOMEM);
1367 pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * s->mb_stride + 1;
1368
1369 for (i = 0; i < 2; i++) {
1370 pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1371 pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1372 if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1373 ret = AVERROR(ENOMEM);
1374 goto fail;
1375 }
1376
1377 pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1378 pic->ref_index[i] = pic->ref_index_buf[i]->data;
1379 }
1380 }
1381
1382 ret = ff_get_buffer(avctx, pic->f,
1383 (s->pict_type != AV_PICTURE_TYPE_B) ?
1384 AV_GET_BUFFER_FLAG_REF : 0);
1385 if (ret < 0)
1386 goto fail;
1387
1388 if (!s->edge_emu_buffer) {
1389 s->edge_emu_buffer = av_mallocz_array(pic->f->linesize[0], 17);
1390 if (!s->edge_emu_buffer)
1391 return AVERROR(ENOMEM);
1392 }
1393
1394 return 0;
1395fail:
1396 free_picture(avctx, pic);
1397 return ret;
1398}
1399
1400static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1401 int *got_frame, AVPacket *avpkt)
1402{
1403 SVQ3Context *s = avctx->priv_data;
1404 int buf_size = avpkt->size;
1405 int left;
1406 uint8_t *buf;
1407 int ret, m, i;
1408
1409 /* special case for last picture */
1410 if (buf_size == 0) {
1411 if (s->next_pic->f->data[0] && !s->low_delay && !s->last_frame_output) {
1412 ret = av_frame_ref(data, s->next_pic->f);
1413 if (ret < 0)
1414 return ret;
1415 s->last_frame_output = 1;
1416 *got_frame = 1;
1417 }
1418 return 0;
1419 }
1420
1421 s->mb_x = s->mb_y = s->mb_xy = 0;
1422
1423 if (s->watermark_key) {
1424 av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1425 if (!s->buf)
1426 return AVERROR(ENOMEM);
1427 memcpy(s->buf, avpkt->data, buf_size);
1428 buf = s->buf;
1429 } else {
1430 buf = avpkt->data;
1431 }
1432
1433 ret = init_get_bits(&s->gb, buf, 8 * buf_size);
1434 if (ret < 0)
1435 return ret;
1436
1437 if (svq3_decode_slice_header(avctx))
1438 return -1;
1439
1440 s->pict_type = s->slice_type;
1441
1442 if (s->pict_type != AV_PICTURE_TYPE_B)
1443 FFSWAP(SVQ3Frame*, s->next_pic, s->last_pic);
1444
1445 av_frame_unref(s->cur_pic->f);
1446
1447 /* for skipping the frame */
1448 s->cur_pic->f->pict_type = s->pict_type;
1449 s->cur_pic->f->key_frame = (s->pict_type == AV_PICTURE_TYPE_I);
1450
1451 ret = get_buffer(avctx, s->cur_pic);
1452 if (ret < 0)
1453 return ret;
1454
1455 for (i = 0; i < 16; i++) {
1456 s->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1457 s->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
1458 }
1459 for (i = 0; i < 16; i++) {
1460 s->block_offset[16 + i] =
1461 s->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1462 s->block_offset[48 + 16 + i] =
1463 s->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * s->cur_pic->f->linesize[1] * ((scan8[i] - scan8[0]) >> 3);
1464 }
1465
1466 if (s->pict_type != AV_PICTURE_TYPE_I) {
1467 if (!s->last_pic->f->data[0]) {
1468 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1469 av_frame_unref(s->last_pic->f);
1470 ret = get_buffer(avctx, s->last_pic);
1471 if (ret < 0)
1472 return ret;
1473 memset(s->last_pic->f->data[0], 0, avctx->height * s->last_pic->f->linesize[0]);
1474 memset(s->last_pic->f->data[1], 0x80, (avctx->height / 2) *
1475 s->last_pic->f->linesize[1]);
1476 memset(s->last_pic->f->data[2], 0x80, (avctx->height / 2) *
1477 s->last_pic->f->linesize[2]);
1478 }
1479
1480 if (s->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f->data[0]) {
1481 av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1482 av_frame_unref(s->next_pic->f);
1483 ret = get_buffer(avctx, s->next_pic);
1484 if (ret < 0)
1485 return ret;
1486 memset(s->next_pic->f->data[0], 0, avctx->height * s->next_pic->f->linesize[0]);
1487 memset(s->next_pic->f->data[1], 0x80, (avctx->height / 2) *
1488 s->next_pic->f->linesize[1]);
1489 memset(s->next_pic->f->data[2], 0x80, (avctx->height / 2) *
1490 s->next_pic->f->linesize[2]);
1491 }
1492 }
1493
1494 if (avctx->debug & FF_DEBUG_PICT_INFO)
1495 av_log(s->avctx, AV_LOG_DEBUG,
1496 "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1497 av_get_picture_type_char(s->pict_type),
1498 s->halfpel_flag, s->thirdpel_flag,
1499 s->adaptive_quant, s->qscale, s->slice_num);
1500
1501 if (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B ||
1502 avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I ||
1503 avctx->skip_frame >= AVDISCARD_ALL)
1504 return 0;
1505
1506 if (s->next_p_frame_damaged) {
1507 if (s->pict_type == AV_PICTURE_TYPE_B)
1508 return 0;
1509 else
1510 s->next_p_frame_damaged = 0;
1511 }
1512
1513 if (s->pict_type == AV_PICTURE_TYPE_B) {
1514 s->frame_num_offset = s->slice_num - s->prev_frame_num;
1515
1516 if (s->frame_num_offset < 0)
1517 s->frame_num_offset += 256;
1518 if (s->frame_num_offset == 0 ||
1519 s->frame_num_offset >= s->prev_frame_num_offset) {
1520 av_log(s->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1521 return -1;
1522 }
1523 } else {
1524 s->prev_frame_num = s->frame_num;
1525 s->frame_num = s->slice_num;
1526 s->prev_frame_num_offset = s->frame_num - s->prev_frame_num;
1527
1528 if (s->prev_frame_num_offset < 0)
1529 s->prev_frame_num_offset += 256;
1530 }
1531
1532 for (m = 0; m < 2; m++) {
1533 int i;
1534 for (i = 0; i < 4; i++) {
1535 int j;
1536 for (j = -1; j < 4; j++)
1537 s->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1538 if (i < 3)
1539 s->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1540 }
1541 }
1542
1543 for (s->mb_y = 0; s->mb_y < s->mb_height; s->mb_y++) {
1544 for (s->mb_x = 0; s->mb_x < s->mb_width; s->mb_x++) {
1545 unsigned mb_type;
1546 s->mb_xy = s->mb_x + s->mb_y * s->mb_stride;
1547
1548 if ((get_bits_left(&s->gb_slice)) <= 7) {
1549 if (((get_bits_count(&s->gb_slice) & 7) == 0 ||
1550 show_bits(&s->gb_slice, get_bits_left(&s->gb_slice) & 7) == 0)) {
1551
1552 if (svq3_decode_slice_header(avctx))
1553 return -1;
1554 }
1555 if (s->slice_type != s->pict_type) {
1556 avpriv_request_sample(avctx, "non constant slice type");
1557 }
1558 /* TODO: support s->mb_skip_run */
1559 }
1560
1561 mb_type = get_interleaved_ue_golomb(&s->gb_slice);
1562
1563 if (s->pict_type == AV_PICTURE_TYPE_I)
1564 mb_type += 8;
1565 else if (s->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1566 mb_type += 4;
1567 if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1568 av_log(s->avctx, AV_LOG_ERROR,
1569 "error while decoding MB %d %d\n", s->mb_x, s->mb_y);
1570 return -1;
1571 }
1572
1573 if (mb_type != 0 || s->cbp)
1574 hl_decode_mb(s);
1575
1576 if (s->pict_type != AV_PICTURE_TYPE_B && !s->low_delay)
1577 s->cur_pic->mb_type[s->mb_x + s->mb_y * s->mb_stride] =
1578 (s->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1579 }
1580
1581 ff_draw_horiz_band(avctx, s->cur_pic->f,
1582 s->last_pic->f->data[0] ? s->last_pic->f : NULL,
1583 16 * s->mb_y, 16, PICT_FRAME, 0,
1584 s->low_delay);
1585 }
1586
1587 left = buf_size*8 - get_bits_count(&s->gb_slice);
1588
1589 if (s->mb_y != s->mb_height || s->mb_x != s->mb_width) {
1590 av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, s->mb_y, s->mb_x, left);
1591 //av_hex_dump(stderr, buf+buf_size-8, 8);
1592 }
1593
1594 if (left < 0) {
1595 av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1596 return -1;
1597 }
1598
1599 if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
1600 ret = av_frame_ref(data, s->cur_pic->f);
1601 else if (s->last_pic->f->data[0])
1602 ret = av_frame_ref(data, s->last_pic->f);
1603 if (ret < 0)
1604 return ret;
1605
1606 /* Do not output the last pic after seeking. */
1607 if (s->last_pic->f->data[0] || s->low_delay)
1608 *got_frame = 1;
1609
1610 if (s->pict_type != AV_PICTURE_TYPE_B) {
1611 FFSWAP(SVQ3Frame*, s->cur_pic, s->next_pic);
1612 } else {
1613 av_frame_unref(s->cur_pic->f);
1614 }
1615
1616 return buf_size;
1617}
1618
1619static av_cold int svq3_decode_end(AVCodecContext *avctx)
1620{
1621 SVQ3Context *s = avctx->priv_data;
1622
1623 free_picture(avctx, s->cur_pic);
1624 free_picture(avctx, s->next_pic);
1625 free_picture(avctx, s->last_pic);
1626 av_frame_free(&s->cur_pic->f);
1627 av_frame_free(&s->next_pic->f);
1628 av_frame_free(&s->last_pic->f);
1629 av_freep(&s->cur_pic);
1630 av_freep(&s->next_pic);
1631 av_freep(&s->last_pic);
1632 av_freep(&s->slice_buf);
1633 av_freep(&s->intra4x4_pred_mode);
1634 av_freep(&s->edge_emu_buffer);
1635 av_freep(&s->mb2br_xy);
1636
1637
1638 av_freep(&s->buf);
1639 s->buf_size = 0;
1640
1641 return 0;
1642}
1643
1644AVCodec ff_svq3_decoder = {
1645 .name = "svq3",
1646 .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1647 .type = AVMEDIA_TYPE_VIDEO,
1648 .id = AV_CODEC_ID_SVQ3,
1649 .priv_data_size = sizeof(SVQ3Context),
1650 .init = svq3_decode_init,
1651 .close = svq3_decode_end,
1652 .decode = svq3_decode_frame,
1653 .capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
1654 AV_CODEC_CAP_DR1 |
1655 AV_CODEC_CAP_DELAY,
1656 .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1657 AV_PIX_FMT_NONE},
1658};
1659