summaryrefslogtreecommitdiff
path: root/libavcodec/truemotion2.c (plain)
blob: 245a32a8d769d2288926a975f992eeda826a64d8
1/*
2 * Duck/ON2 TrueMotion 2 Decoder
3 * Copyright (c) 2005 Konstantin Shishkov
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22/**
23 * @file
24 * Duck TrueMotion2 decoder.
25 */
26
27#include <inttypes.h>
28
29#include "avcodec.h"
30#include "bswapdsp.h"
31#include "bytestream.h"
32#include "get_bits.h"
33#include "internal.h"
34
35#define TM2_ESCAPE 0x80000000
36#define TM2_DELTAS 64
37
38/* Huffman-coded streams of different types of blocks */
39enum TM2_STREAMS {
40 TM2_C_HI = 0,
41 TM2_C_LO,
42 TM2_L_HI,
43 TM2_L_LO,
44 TM2_UPD,
45 TM2_MOT,
46 TM2_TYPE,
47 TM2_NUM_STREAMS
48};
49
50/* Block types */
51enum TM2_BLOCKS {
52 TM2_HI_RES = 0,
53 TM2_MED_RES,
54 TM2_LOW_RES,
55 TM2_NULL_RES,
56 TM2_UPDATE,
57 TM2_STILL,
58 TM2_MOTION
59};
60
61typedef struct TM2Context {
62 AVCodecContext *avctx;
63 AVFrame *pic;
64
65 GetBitContext gb;
66 BswapDSPContext bdsp;
67
68 uint8_t *buffer;
69 int buffer_size;
70
71 /* TM2 streams */
72 int *tokens[TM2_NUM_STREAMS];
73 int tok_lens[TM2_NUM_STREAMS];
74 int tok_ptrs[TM2_NUM_STREAMS];
75 int deltas[TM2_NUM_STREAMS][TM2_DELTAS];
76 /* for blocks decoding */
77 int D[4];
78 int CD[4];
79 int *last;
80 int *clast;
81
82 /* data for current and previous frame */
83 int *Y1_base, *U1_base, *V1_base, *Y2_base, *U2_base, *V2_base;
84 int *Y1, *U1, *V1, *Y2, *U2, *V2;
85 int y_stride, uv_stride;
86 int cur;
87} TM2Context;
88
89/**
90* Huffman codes for each of streams
91*/
92typedef struct TM2Codes {
93 VLC vlc; ///< table for FFmpeg bitstream reader
94 int bits;
95 int *recode; ///< table for converting from code indexes to values
96 int length;
97} TM2Codes;
98
99/**
100* structure for gathering Huffman codes information
101*/
102typedef struct TM2Huff {
103 int val_bits; ///< length of literal
104 int max_bits; ///< maximum length of code
105 int min_bits; ///< minimum length of code
106 int nodes; ///< total number of nodes in tree
107 int num; ///< current number filled
108 int max_num; ///< total number of codes
109 int *nums; ///< literals
110 uint32_t *bits; ///< codes
111 int *lens; ///< codelengths
112} TM2Huff;
113
114static int tm2_read_tree(TM2Context *ctx, uint32_t prefix, int length, TM2Huff *huff)
115{
116 int ret;
117 if (length > huff->max_bits) {
118 av_log(ctx->avctx, AV_LOG_ERROR, "Tree exceeded its given depth (%i)\n",
119 huff->max_bits);
120 return AVERROR_INVALIDDATA;
121 }
122
123 if (!get_bits1(&ctx->gb)) { /* literal */
124 if (length == 0) {
125 length = 1;
126 }
127 if (huff->num >= huff->max_num) {
128 av_log(ctx->avctx, AV_LOG_DEBUG, "Too many literals\n");
129 return AVERROR_INVALIDDATA;
130 }
131 huff->nums[huff->num] = get_bits_long(&ctx->gb, huff->val_bits);
132 huff->bits[huff->num] = prefix;
133 huff->lens[huff->num] = length;
134 huff->num++;
135 return 0;
136 } else { /* non-terminal node */
137 if ((ret = tm2_read_tree(ctx, prefix << 1, length + 1, huff)) < 0)
138 return ret;
139 if ((ret = tm2_read_tree(ctx, (prefix << 1) | 1, length + 1, huff)) < 0)
140 return ret;
141 }
142 return 0;
143}
144
145static int tm2_build_huff_table(TM2Context *ctx, TM2Codes *code)
146{
147 TM2Huff huff;
148 int res = 0;
149
150 huff.val_bits = get_bits(&ctx->gb, 5);
151 huff.max_bits = get_bits(&ctx->gb, 5);
152 huff.min_bits = get_bits(&ctx->gb, 5);
153 huff.nodes = get_bits_long(&ctx->gb, 17);
154 huff.num = 0;
155
156 /* check for correct codes parameters */
157 if ((huff.val_bits < 1) || (huff.val_bits > 32) ||
158 (huff.max_bits < 0) || (huff.max_bits > 25)) {
159 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect tree parameters - literal "
160 "length: %i, max code length: %i\n", huff.val_bits, huff.max_bits);
161 return AVERROR_INVALIDDATA;
162 }
163 if ((huff.nodes <= 0) || (huff.nodes > 0x10000)) {
164 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of Huffman tree "
165 "nodes: %i\n", huff.nodes);
166 return AVERROR_INVALIDDATA;
167 }
168 /* one-node tree */
169 if (huff.max_bits == 0)
170 huff.max_bits = 1;
171
172 /* allocate space for codes - it is exactly ceil(nodes / 2) entries */
173 huff.max_num = (huff.nodes + 1) >> 1;
174 huff.nums = av_calloc(huff.max_num, sizeof(int));
175 huff.bits = av_calloc(huff.max_num, sizeof(uint32_t));
176 huff.lens = av_calloc(huff.max_num, sizeof(int));
177
178 if (!huff.nums || !huff.bits || !huff.lens) {
179 res = AVERROR(ENOMEM);
180 goto out;
181 }
182
183 res = tm2_read_tree(ctx, 0, 0, &huff);
184
185 if (huff.num != huff.max_num) {
186 av_log(ctx->avctx, AV_LOG_ERROR, "Got less codes than expected: %i of %i\n",
187 huff.num, huff.max_num);
188 res = AVERROR_INVALIDDATA;
189 }
190
191 /* convert codes to vlc_table */
192 if (res >= 0) {
193 int i;
194
195 res = init_vlc(&code->vlc, huff.max_bits, huff.max_num,
196 huff.lens, sizeof(int), sizeof(int),
197 huff.bits, sizeof(uint32_t), sizeof(uint32_t), 0);
198 if (res < 0)
199 av_log(ctx->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
200 else {
201 code->bits = huff.max_bits;
202 code->length = huff.max_num;
203 code->recode = av_malloc_array(code->length, sizeof(int));
204 if (!code->recode) {
205 res = AVERROR(ENOMEM);
206 goto out;
207 }
208 for (i = 0; i < code->length; i++)
209 code->recode[i] = huff.nums[i];
210 }
211 }
212
213out:
214 /* free allocated memory */
215 av_free(huff.nums);
216 av_free(huff.bits);
217 av_free(huff.lens);
218
219 return res;
220}
221
222static void tm2_free_codes(TM2Codes *code)
223{
224 av_free(code->recode);
225 if (code->vlc.table)
226 ff_free_vlc(&code->vlc);
227}
228
229static inline int tm2_get_token(GetBitContext *gb, TM2Codes *code)
230{
231 int val;
232 val = get_vlc2(gb, code->vlc.table, code->bits, 1);
233 if(val<0)
234 return -1;
235 return code->recode[val];
236}
237
238#define TM2_OLD_HEADER_MAGIC 0x00000100
239#define TM2_NEW_HEADER_MAGIC 0x00000101
240
241static inline int tm2_read_header(TM2Context *ctx, const uint8_t *buf)
242{
243 uint32_t magic = AV_RL32(buf);
244
245 switch (magic) {
246 case TM2_OLD_HEADER_MAGIC:
247 avpriv_request_sample(ctx->avctx, "Old TM2 header");
248 return 0;
249 case TM2_NEW_HEADER_MAGIC:
250 return 0;
251 default:
252 av_log(ctx->avctx, AV_LOG_ERROR, "Not a TM2 header: 0x%08"PRIX32"\n",
253 magic);
254 return AVERROR_INVALIDDATA;
255 }
256}
257
258static int tm2_read_deltas(TM2Context *ctx, int stream_id)
259{
260 int d, mb;
261 int i, v;
262
263 d = get_bits(&ctx->gb, 9);
264 mb = get_bits(&ctx->gb, 5);
265
266 av_assert2(mb < 32);
267 if ((d < 1) || (d > TM2_DELTAS) || (mb < 1)) {
268 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect delta table: %i deltas x %i bits\n", d, mb);
269 return AVERROR_INVALIDDATA;
270 }
271
272 for (i = 0; i < d; i++) {
273 v = get_bits_long(&ctx->gb, mb);
274 if (v & (1 << (mb - 1)))
275 ctx->deltas[stream_id][i] = v - (1 << mb);
276 else
277 ctx->deltas[stream_id][i] = v;
278 }
279 for (; i < TM2_DELTAS; i++)
280 ctx->deltas[stream_id][i] = 0;
281
282 return 0;
283}
284
285static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
286{
287 int i, ret;
288 int skip = 0;
289 int len, toks, pos;
290 TM2Codes codes;
291 GetByteContext gb;
292
293 if (buf_size < 4) {
294 av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
295 return AVERROR_INVALIDDATA;
296 }
297
298 /* get stream length in dwords */
299 bytestream2_init(&gb, buf, buf_size);
300 len = bytestream2_get_be32(&gb);
301 skip = len * 4 + 4;
302
303 if (len == 0)
304 return 4;
305
306 if (len >= INT_MAX / 4 - 1 || len < 0 || skip > buf_size) {
307 av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
308 return AVERROR_INVALIDDATA;
309 }
310
311 toks = bytestream2_get_be32(&gb);
312 if (toks & 1) {
313 len = bytestream2_get_be32(&gb);
314 if (len == TM2_ESCAPE) {
315 len = bytestream2_get_be32(&gb);
316 }
317 if (len > 0) {
318 pos = bytestream2_tell(&gb);
319 if (skip <= pos)
320 return AVERROR_INVALIDDATA;
321 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
322 if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
323 return ret;
324 bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
325 }
326 }
327 /* skip unused fields */
328 len = bytestream2_get_be32(&gb);
329 if (len == TM2_ESCAPE) { /* some unknown length - could be escaped too */
330 bytestream2_skip(&gb, 8); /* unused by decoder */
331 } else {
332 bytestream2_skip(&gb, 4); /* unused by decoder */
333 }
334
335 pos = bytestream2_tell(&gb);
336 if (skip <= pos)
337 return AVERROR_INVALIDDATA;
338 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
339 if ((ret = tm2_build_huff_table(ctx, &codes)) < 0)
340 return ret;
341 bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
342
343 toks >>= 1;
344 /* check if we have sane number of tokens */
345 if ((toks < 0) || (toks > 0xFFFFFF)) {
346 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
347 ret = AVERROR_INVALIDDATA;
348 goto end;
349 }
350 ret = av_reallocp_array(&ctx->tokens[stream_id], toks, sizeof(int));
351 if (ret < 0) {
352 ctx->tok_lens[stream_id] = 0;
353 goto end;
354 }
355 ctx->tok_lens[stream_id] = toks;
356 len = bytestream2_get_be32(&gb);
357 if (len > 0) {
358 pos = bytestream2_tell(&gb);
359 if (skip <= pos) {
360 ret = AVERROR_INVALIDDATA;
361 goto end;
362 }
363 init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
364 for (i = 0; i < toks; i++) {
365 if (get_bits_left(&ctx->gb) <= 0) {
366 av_log(ctx->avctx, AV_LOG_ERROR, "Incorrect number of tokens: %i\n", toks);
367 ret = AVERROR_INVALIDDATA;
368 goto end;
369 }
370 ctx->tokens[stream_id][i] = tm2_get_token(&ctx->gb, &codes);
371 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS || ctx->tokens[stream_id][i]<0) {
372 av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
373 ctx->tokens[stream_id][i], stream_id, i);
374 ret = AVERROR_INVALIDDATA;
375 goto end;
376 }
377 }
378 } else {
379 for (i = 0; i < toks; i++) {
380 ctx->tokens[stream_id][i] = codes.recode[0];
381 if (stream_id <= TM2_MOT && ctx->tokens[stream_id][i] >= TM2_DELTAS) {
382 av_log(ctx->avctx, AV_LOG_ERROR, "Invalid delta token index %d for type %d, n=%d\n",
383 ctx->tokens[stream_id][i], stream_id, i);
384 ret = AVERROR_INVALIDDATA;
385 goto end;
386 }
387 }
388 }
389
390 ret = skip;
391
392end:
393 tm2_free_codes(&codes);
394 return ret;
395}
396
397static inline int GET_TOK(TM2Context *ctx,int type)
398{
399 if (ctx->tok_ptrs[type] >= ctx->tok_lens[type]) {
400 av_log(ctx->avctx, AV_LOG_ERROR, "Read token from stream %i out of bounds (%i>=%i)\n", type, ctx->tok_ptrs[type], ctx->tok_lens[type]);
401 return 0;
402 }
403 if (type <= TM2_MOT) {
404 if (ctx->tokens[type][ctx->tok_ptrs[type]] >= TM2_DELTAS) {
405 av_log(ctx->avctx, AV_LOG_ERROR, "token %d is too large\n", ctx->tokens[type][ctx->tok_ptrs[type]]);
406 return 0;
407 }
408 return ctx->deltas[type][ctx->tokens[type][ctx->tok_ptrs[type]++]];
409 }
410 return ctx->tokens[type][ctx->tok_ptrs[type]++];
411}
412
413/* blocks decoding routines */
414
415/* common Y, U, V pointers initialisation */
416#define TM2_INIT_POINTERS() \
417 int *last, *clast; \
418 int *Y, *U, *V;\
419 int Ystride, Ustride, Vstride;\
420\
421 Ystride = ctx->y_stride;\
422 Vstride = ctx->uv_stride;\
423 Ustride = ctx->uv_stride;\
424 Y = (ctx->cur?ctx->Y2:ctx->Y1) + by * 4 * Ystride + bx * 4;\
425 V = (ctx->cur?ctx->V2:ctx->V1) + by * 2 * Vstride + bx * 2;\
426 U = (ctx->cur?ctx->U2:ctx->U1) + by * 2 * Ustride + bx * 2;\
427 last = ctx->last + bx * 4;\
428 clast = ctx->clast + bx * 4;
429
430#define TM2_INIT_POINTERS_2() \
431 int *Yo, *Uo, *Vo;\
432 int oYstride, oUstride, oVstride;\
433\
434 TM2_INIT_POINTERS();\
435 oYstride = Ystride;\
436 oVstride = Vstride;\
437 oUstride = Ustride;\
438 Yo = (ctx->cur?ctx->Y1:ctx->Y2) + by * 4 * oYstride + bx * 4;\
439 Vo = (ctx->cur?ctx->V1:ctx->V2) + by * 2 * oVstride + bx * 2;\
440 Uo = (ctx->cur?ctx->U1:ctx->U2) + by * 2 * oUstride + bx * 2;
441
442/* recalculate last and delta values for next blocks */
443#define TM2_RECALC_BLOCK(CHR, stride, last, CD) {\
444 CD[0] = CHR[1] - last[1];\
445 CD[1] = (int)CHR[stride + 1] - (int)CHR[1];\
446 last[0] = (int)CHR[stride + 0];\
447 last[1] = (int)CHR[stride + 1];}
448
449/* common operations - add deltas to 4x4 block of luma or 2x2 blocks of chroma */
450static inline void tm2_apply_deltas(TM2Context *ctx, int* Y, int stride, int *deltas, int *last)
451{
452 int ct, d;
453 int i, j;
454
455 for (j = 0; j < 4; j++){
456 ct = ctx->D[j];
457 for (i = 0; i < 4; i++){
458 d = deltas[i + j * 4];
459 ct += d;
460 last[i] += ct;
461 Y[i] = av_clip_uint8(last[i]);
462 }
463 Y += stride;
464 ctx->D[j] = ct;
465 }
466}
467
468static inline void tm2_high_chroma(int *data, int stride, int *last, int *CD, int *deltas)
469{
470 int i, j;
471 for (j = 0; j < 2; j++) {
472 for (i = 0; i < 2; i++) {
473 CD[j] += deltas[i + j * 2];
474 last[i] += CD[j];
475 data[i] = last[i];
476 }
477 data += stride;
478 }
479}
480
481static inline void tm2_low_chroma(int *data, int stride, int *clast, int *CD, int *deltas, int bx)
482{
483 int t;
484 int l;
485 int prev;
486
487 if (bx > 0)
488 prev = clast[-3];
489 else
490 prev = 0;
491 t = (CD[0] + CD[1]) >> 1;
492 l = (prev - CD[0] - CD[1] + clast[1]) >> 1;
493 CD[1] = CD[0] + CD[1] - t;
494 CD[0] = t;
495 clast[0] = l;
496
497 tm2_high_chroma(data, stride, clast, CD, deltas);
498}
499
500static inline void tm2_hi_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
501{
502 int i;
503 int deltas[16];
504 TM2_INIT_POINTERS();
505
506 /* hi-res chroma */
507 for (i = 0; i < 4; i++) {
508 deltas[i] = GET_TOK(ctx, TM2_C_HI);
509 deltas[i + 4] = GET_TOK(ctx, TM2_C_HI);
510 }
511 tm2_high_chroma(U, Ustride, clast, ctx->CD, deltas);
512 tm2_high_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas + 4);
513
514 /* hi-res luma */
515 for (i = 0; i < 16; i++)
516 deltas[i] = GET_TOK(ctx, TM2_L_HI);
517
518 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
519}
520
521static inline void tm2_med_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
522{
523 int i;
524 int deltas[16];
525 TM2_INIT_POINTERS();
526
527 /* low-res chroma */
528 deltas[0] = GET_TOK(ctx, TM2_C_LO);
529 deltas[1] = deltas[2] = deltas[3] = 0;
530 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
531
532 deltas[0] = GET_TOK(ctx, TM2_C_LO);
533 deltas[1] = deltas[2] = deltas[3] = 0;
534 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
535
536 /* hi-res luma */
537 for (i = 0; i < 16; i++)
538 deltas[i] = GET_TOK(ctx, TM2_L_HI);
539
540 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
541}
542
543static inline void tm2_low_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
544{
545 int i;
546 int t1, t2;
547 int deltas[16];
548 TM2_INIT_POINTERS();
549
550 /* low-res chroma */
551 deltas[0] = GET_TOK(ctx, TM2_C_LO);
552 deltas[1] = deltas[2] = deltas[3] = 0;
553 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
554
555 deltas[0] = GET_TOK(ctx, TM2_C_LO);
556 deltas[1] = deltas[2] = deltas[3] = 0;
557 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
558
559 /* low-res luma */
560 for (i = 0; i < 16; i++)
561 deltas[i] = 0;
562
563 deltas[ 0] = GET_TOK(ctx, TM2_L_LO);
564 deltas[ 2] = GET_TOK(ctx, TM2_L_LO);
565 deltas[ 8] = GET_TOK(ctx, TM2_L_LO);
566 deltas[10] = GET_TOK(ctx, TM2_L_LO);
567
568 if (bx > 0)
569 last[0] = (last[-1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3] + last[1]) >> 1;
570 else
571 last[0] = (last[1] - ctx->D[0] - ctx->D[1] - ctx->D[2] - ctx->D[3])>> 1;
572 last[2] = (last[1] + last[3]) >> 1;
573
574 t1 = ctx->D[0] + ctx->D[1];
575 ctx->D[0] = t1 >> 1;
576 ctx->D[1] = t1 - (t1 >> 1);
577 t2 = ctx->D[2] + ctx->D[3];
578 ctx->D[2] = t2 >> 1;
579 ctx->D[3] = t2 - (t2 >> 1);
580
581 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
582}
583
584static inline void tm2_null_res_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
585{
586 int i;
587 int ct;
588 int left, right, diff;
589 int deltas[16];
590 TM2_INIT_POINTERS();
591
592 /* null chroma */
593 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
594 tm2_low_chroma(U, Ustride, clast, ctx->CD, deltas, bx);
595
596 deltas[0] = deltas[1] = deltas[2] = deltas[3] = 0;
597 tm2_low_chroma(V, Vstride, clast + 2, ctx->CD + 2, deltas, bx);
598
599 /* null luma */
600 for (i = 0; i < 16; i++)
601 deltas[i] = 0;
602
603 ct = ctx->D[0] + ctx->D[1] + ctx->D[2] + ctx->D[3];
604
605 if (bx > 0)
606 left = last[-1] - ct;
607 else
608 left = 0;
609
610 right = last[3];
611 diff = right - left;
612 last[0] = left + (diff >> 2);
613 last[1] = left + (diff >> 1);
614 last[2] = right - (diff >> 2);
615 last[3] = right;
616 {
617 int tp = left;
618
619 ctx->D[0] = (tp + (ct >> 2)) - left;
620 left += ctx->D[0];
621 ctx->D[1] = (tp + (ct >> 1)) - left;
622 left += ctx->D[1];
623 ctx->D[2] = ((tp + ct) - (ct >> 2)) - left;
624 left += ctx->D[2];
625 ctx->D[3] = (tp + ct) - left;
626 }
627 tm2_apply_deltas(ctx, Y, Ystride, deltas, last);
628}
629
630static inline void tm2_still_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
631{
632 int i, j;
633 TM2_INIT_POINTERS_2();
634
635 /* update chroma */
636 for (j = 0; j < 2; j++) {
637 for (i = 0; i < 2; i++){
638 U[i] = Uo[i];
639 V[i] = Vo[i];
640 }
641 U += Ustride; V += Vstride;
642 Uo += oUstride; Vo += oVstride;
643 }
644 U -= Ustride * 2;
645 V -= Vstride * 2;
646 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
647 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
648
649 /* update deltas */
650 ctx->D[0] = Yo[3] - last[3];
651 ctx->D[1] = Yo[3 + oYstride] - Yo[3];
652 ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
653 ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
654
655 for (j = 0; j < 4; j++) {
656 for (i = 0; i < 4; i++) {
657 Y[i] = Yo[i];
658 last[i] = Yo[i];
659 }
660 Y += Ystride;
661 Yo += oYstride;
662 }
663}
664
665static inline void tm2_update_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
666{
667 int i, j;
668 int d;
669 TM2_INIT_POINTERS_2();
670
671 /* update chroma */
672 for (j = 0; j < 2; j++) {
673 for (i = 0; i < 2; i++) {
674 U[i] = Uo[i] + GET_TOK(ctx, TM2_UPD);
675 V[i] = Vo[i] + GET_TOK(ctx, TM2_UPD);
676 }
677 U += Ustride;
678 V += Vstride;
679 Uo += oUstride;
680 Vo += oVstride;
681 }
682 U -= Ustride * 2;
683 V -= Vstride * 2;
684 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
685 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
686
687 /* update deltas */
688 ctx->D[0] = Yo[3] - last[3];
689 ctx->D[1] = Yo[3 + oYstride] - Yo[3];
690 ctx->D[2] = Yo[3 + oYstride * 2] - Yo[3 + oYstride];
691 ctx->D[3] = Yo[3 + oYstride * 3] - Yo[3 + oYstride * 2];
692
693 for (j = 0; j < 4; j++) {
694 d = last[3];
695 for (i = 0; i < 4; i++) {
696 Y[i] = Yo[i] + GET_TOK(ctx, TM2_UPD);
697 last[i] = Y[i];
698 }
699 ctx->D[j] = last[3] - d;
700 Y += Ystride;
701 Yo += oYstride;
702 }
703}
704
705static inline void tm2_motion_block(TM2Context *ctx, AVFrame *pic, int bx, int by)
706{
707 int i, j;
708 int mx, my;
709 TM2_INIT_POINTERS_2();
710
711 mx = GET_TOK(ctx, TM2_MOT);
712 my = GET_TOK(ctx, TM2_MOT);
713 mx = av_clip(mx, -(bx * 4 + 4), ctx->avctx->width - bx * 4);
714 my = av_clip(my, -(by * 4 + 4), ctx->avctx->height - by * 4);
715
716 if (4*bx+mx<0 || 4*by+my<0 || 4*bx+mx+4 > ctx->avctx->width || 4*by+my+4 > ctx->avctx->height) {
717 av_log(ctx->avctx, AV_LOG_ERROR, "MV out of picture\n");
718 return;
719 }
720
721 Yo += my * oYstride + mx;
722 Uo += (my >> 1) * oUstride + (mx >> 1);
723 Vo += (my >> 1) * oVstride + (mx >> 1);
724
725 /* copy chroma */
726 for (j = 0; j < 2; j++) {
727 for (i = 0; i < 2; i++) {
728 U[i] = Uo[i];
729 V[i] = Vo[i];
730 }
731 U += Ustride;
732 V += Vstride;
733 Uo += oUstride;
734 Vo += oVstride;
735 }
736 U -= Ustride * 2;
737 V -= Vstride * 2;
738 TM2_RECALC_BLOCK(U, Ustride, clast, ctx->CD);
739 TM2_RECALC_BLOCK(V, Vstride, (clast + 2), (ctx->CD + 2));
740
741 /* copy luma */
742 for (j = 0; j < 4; j++) {
743 for (i = 0; i < 4; i++) {
744 Y[i] = Yo[i];
745 }
746 Y += Ystride;
747 Yo += oYstride;
748 }
749 /* calculate deltas */
750 Y -= Ystride * 4;
751 ctx->D[0] = Y[3] - last[3];
752 ctx->D[1] = Y[3 + Ystride] - Y[3];
753 ctx->D[2] = Y[3 + Ystride * 2] - Y[3 + Ystride];
754 ctx->D[3] = Y[3 + Ystride * 3] - Y[3 + Ystride * 2];
755 for (i = 0; i < 4; i++)
756 last[i] = Y[i + Ystride * 3];
757}
758
759static int tm2_decode_blocks(TM2Context *ctx, AVFrame *p)
760{
761 int i, j;
762 int w = ctx->avctx->width, h = ctx->avctx->height, bw = w >> 2, bh = h >> 2, cw = w >> 1;
763 int type;
764 int keyframe = 1;
765 int *Y, *U, *V;
766 uint8_t *dst;
767
768 for (i = 0; i < TM2_NUM_STREAMS; i++)
769 ctx->tok_ptrs[i] = 0;
770
771 if (ctx->tok_lens[TM2_TYPE]<bw*bh) {
772 av_log(ctx->avctx,AV_LOG_ERROR,"Got %i tokens for %i blocks\n",ctx->tok_lens[TM2_TYPE],bw*bh);
773 return AVERROR_INVALIDDATA;
774 }
775
776 memset(ctx->last, 0, 4 * bw * sizeof(int));
777 memset(ctx->clast, 0, 4 * bw * sizeof(int));
778
779 for (j = 0; j < bh; j++) {
780 memset(ctx->D, 0, 4 * sizeof(int));
781 memset(ctx->CD, 0, 4 * sizeof(int));
782 for (i = 0; i < bw; i++) {
783 type = GET_TOK(ctx, TM2_TYPE);
784 switch(type) {
785 case TM2_HI_RES:
786 tm2_hi_res_block(ctx, p, i, j);
787 break;
788 case TM2_MED_RES:
789 tm2_med_res_block(ctx, p, i, j);
790 break;
791 case TM2_LOW_RES:
792 tm2_low_res_block(ctx, p, i, j);
793 break;
794 case TM2_NULL_RES:
795 tm2_null_res_block(ctx, p, i, j);
796 break;
797 case TM2_UPDATE:
798 tm2_update_block(ctx, p, i, j);
799 keyframe = 0;
800 break;
801 case TM2_STILL:
802 tm2_still_block(ctx, p, i, j);
803 keyframe = 0;
804 break;
805 case TM2_MOTION:
806 tm2_motion_block(ctx, p, i, j);
807 keyframe = 0;
808 break;
809 default:
810 av_log(ctx->avctx, AV_LOG_ERROR, "Skipping unknown block type %i\n", type);
811 }
812 }
813 }
814
815 /* copy data from our buffer to AVFrame */
816 Y = (ctx->cur?ctx->Y2:ctx->Y1);
817 U = (ctx->cur?ctx->U2:ctx->U1);
818 V = (ctx->cur?ctx->V2:ctx->V1);
819 dst = p->data[0];
820 for (j = 0; j < h; j++) {
821 for (i = 0; i < w; i++) {
822 int y = Y[i], u = U[i >> 1], v = V[i >> 1];
823 dst[3*i+0] = av_clip_uint8(y + v);
824 dst[3*i+1] = av_clip_uint8(y);
825 dst[3*i+2] = av_clip_uint8(y + u);
826 }
827
828 /* horizontal edge extension */
829 Y[-4] = Y[-3] = Y[-2] = Y[-1] = Y[0];
830 Y[w + 3] = Y[w + 2] = Y[w + 1] = Y[w] = Y[w - 1];
831
832 /* vertical edge extension */
833 if (j == 0) {
834 memcpy(Y - 4 - 1 * ctx->y_stride, Y - 4, ctx->y_stride);
835 memcpy(Y - 4 - 2 * ctx->y_stride, Y - 4, ctx->y_stride);
836 memcpy(Y - 4 - 3 * ctx->y_stride, Y - 4, ctx->y_stride);
837 memcpy(Y - 4 - 4 * ctx->y_stride, Y - 4, ctx->y_stride);
838 } else if (j == h - 1) {
839 memcpy(Y - 4 + 1 * ctx->y_stride, Y - 4, ctx->y_stride);
840 memcpy(Y - 4 + 2 * ctx->y_stride, Y - 4, ctx->y_stride);
841 memcpy(Y - 4 + 3 * ctx->y_stride, Y - 4, ctx->y_stride);
842 memcpy(Y - 4 + 4 * ctx->y_stride, Y - 4, ctx->y_stride);
843 }
844
845 Y += ctx->y_stride;
846 if (j & 1) {
847 /* horizontal edge extension */
848 U[-2] = U[-1] = U[0];
849 V[-2] = V[-1] = V[0];
850 U[cw + 1] = U[cw] = U[cw - 1];
851 V[cw + 1] = V[cw] = V[cw - 1];
852
853 /* vertical edge extension */
854 if (j == 1) {
855 memcpy(U - 2 - 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
856 memcpy(V - 2 - 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
857 memcpy(U - 2 - 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
858 memcpy(V - 2 - 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
859 } else if (j == h - 1) {
860 memcpy(U - 2 + 1 * ctx->uv_stride, U - 2, ctx->uv_stride);
861 memcpy(V - 2 + 1 * ctx->uv_stride, V - 2, ctx->uv_stride);
862 memcpy(U - 2 + 2 * ctx->uv_stride, U - 2, ctx->uv_stride);
863 memcpy(V - 2 + 2 * ctx->uv_stride, V - 2, ctx->uv_stride);
864 }
865
866 U += ctx->uv_stride;
867 V += ctx->uv_stride;
868 }
869 dst += p->linesize[0];
870 }
871
872 return keyframe;
873}
874
875static const int tm2_stream_order[TM2_NUM_STREAMS] = {
876 TM2_C_HI, TM2_C_LO, TM2_L_HI, TM2_L_LO, TM2_UPD, TM2_MOT, TM2_TYPE
877};
878
879#define TM2_HEADER_SIZE 40
880
881static int decode_frame(AVCodecContext *avctx,
882 void *data, int *got_frame,
883 AVPacket *avpkt)
884{
885 TM2Context * const l = avctx->priv_data;
886 const uint8_t *buf = avpkt->data;
887 int buf_size = avpkt->size & ~3;
888 AVFrame * const p = l->pic;
889 int offset = TM2_HEADER_SIZE;
890 int i, t, ret;
891
892 av_fast_padded_malloc(&l->buffer, &l->buffer_size, buf_size);
893 if (!l->buffer) {
894 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
895 return AVERROR(ENOMEM);
896 }
897
898 if ((ret = ff_reget_buffer(avctx, p)) < 0)
899 return ret;
900
901 l->bdsp.bswap_buf((uint32_t *) l->buffer, (const uint32_t *) buf,
902 buf_size >> 2);
903
904 if ((ret = tm2_read_header(l, l->buffer)) < 0) {
905 return ret;
906 }
907
908 for (i = 0; i < TM2_NUM_STREAMS; i++) {
909 if (offset >= buf_size) {
910 av_log(avctx, AV_LOG_ERROR, "no space for tm2_read_stream\n");
911 return AVERROR_INVALIDDATA;
912 }
913
914 t = tm2_read_stream(l, l->buffer + offset, tm2_stream_order[i],
915 buf_size - offset);
916 if (t < 0) {
917 int j = tm2_stream_order[i];
918 memset(l->tokens[j], 0, sizeof(**l->tokens) * l->tok_lens[j]);
919 return t;
920 }
921 offset += t;
922 }
923 p->key_frame = tm2_decode_blocks(l, p);
924 if (p->key_frame)
925 p->pict_type = AV_PICTURE_TYPE_I;
926 else
927 p->pict_type = AV_PICTURE_TYPE_P;
928
929 l->cur = !l->cur;
930 *got_frame = 1;
931 ret = av_frame_ref(data, l->pic);
932
933 return (ret < 0) ? ret : buf_size;
934}
935
936static av_cold int decode_init(AVCodecContext *avctx)
937{
938 TM2Context * const l = avctx->priv_data;
939 int i, w = avctx->width, h = avctx->height;
940
941 if ((avctx->width & 3) || (avctx->height & 3)) {
942 av_log(avctx, AV_LOG_ERROR, "Width and height must be multiple of 4\n");
943 return AVERROR(EINVAL);
944 }
945
946 l->avctx = avctx;
947 avctx->pix_fmt = AV_PIX_FMT_BGR24;
948
949 l->pic = av_frame_alloc();
950 if (!l->pic)
951 return AVERROR(ENOMEM);
952
953 ff_bswapdsp_init(&l->bdsp);
954
955 l->last = av_malloc_array(w >> 2, 4 * sizeof(*l->last) );
956 l->clast = av_malloc_array(w >> 2, 4 * sizeof(*l->clast));
957
958 for (i = 0; i < TM2_NUM_STREAMS; i++) {
959 l->tokens[i] = NULL;
960 l->tok_lens[i] = 0;
961 }
962
963 w += 8;
964 h += 8;
965 l->Y1_base = av_calloc(w * h, sizeof(*l->Y1_base));
966 l->Y2_base = av_calloc(w * h, sizeof(*l->Y2_base));
967 l->y_stride = w;
968 w = (w + 1) >> 1;
969 h = (h + 1) >> 1;
970 l->U1_base = av_calloc(w * h, sizeof(*l->U1_base));
971 l->V1_base = av_calloc(w * h, sizeof(*l->V1_base));
972 l->U2_base = av_calloc(w * h, sizeof(*l->U2_base));
973 l->V2_base = av_calloc(w * h, sizeof(*l->V1_base));
974 l->uv_stride = w;
975 l->cur = 0;
976 if (!l->Y1_base || !l->Y2_base || !l->U1_base ||
977 !l->V1_base || !l->U2_base || !l->V2_base ||
978 !l->last || !l->clast) {
979 av_freep(&l->Y1_base);
980 av_freep(&l->Y2_base);
981 av_freep(&l->U1_base);
982 av_freep(&l->U2_base);
983 av_freep(&l->V1_base);
984 av_freep(&l->V2_base);
985 av_freep(&l->last);
986 av_freep(&l->clast);
987 av_frame_free(&l->pic);
988 return AVERROR(ENOMEM);
989 }
990 l->Y1 = l->Y1_base + l->y_stride * 4 + 4;
991 l->Y2 = l->Y2_base + l->y_stride * 4 + 4;
992 l->U1 = l->U1_base + l->uv_stride * 2 + 2;
993 l->U2 = l->U2_base + l->uv_stride * 2 + 2;
994 l->V1 = l->V1_base + l->uv_stride * 2 + 2;
995 l->V2 = l->V2_base + l->uv_stride * 2 + 2;
996
997 return 0;
998}
999
1000static av_cold int decode_end(AVCodecContext *avctx)
1001{
1002 TM2Context * const l = avctx->priv_data;
1003 int i;
1004
1005 av_free(l->last);
1006 av_free(l->clast);
1007 for (i = 0; i < TM2_NUM_STREAMS; i++)
1008 av_freep(&l->tokens[i]);
1009 if (l->Y1) {
1010 av_freep(&l->Y1_base);
1011 av_freep(&l->U1_base);
1012 av_freep(&l->V1_base);
1013 av_freep(&l->Y2_base);
1014 av_freep(&l->U2_base);
1015 av_freep(&l->V2_base);
1016 }
1017 av_freep(&l->buffer);
1018 l->buffer_size = 0;
1019
1020 av_frame_free(&l->pic);
1021
1022 return 0;
1023}
1024
1025AVCodec ff_truemotion2_decoder = {
1026 .name = "truemotion2",
1027 .long_name = NULL_IF_CONFIG_SMALL("Duck TrueMotion 2.0"),
1028 .type = AVMEDIA_TYPE_VIDEO,
1029 .id = AV_CODEC_ID_TRUEMOTION2,
1030 .priv_data_size = sizeof(TM2Context),
1031 .init = decode_init,
1032 .close = decode_end,
1033 .decode = decode_frame,
1034 .capabilities = AV_CODEC_CAP_DR1,
1035};
1036