blob: cab3129f8f387a1b905a07562cf51357c5e93edf
1 | /* |
2 | * Packed Animation File video decoder |
3 | * Copyright (c) 2012 Paul B Mahol |
4 | * |
5 | * This file is part of FFmpeg. |
6 | * |
7 | * FFmpeg is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * FFmpeg is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with FFmpeg; if not, write to the Free Software |
19 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
20 | */ |
21 | |
22 | #include "libavutil/imgutils.h" |
23 | |
24 | #include "avcodec.h" |
25 | #include "bytestream.h" |
26 | #include "copy_block.h" |
27 | #include "internal.h" |
28 | |
29 | |
30 | static const uint8_t block_sequences[16][8] = { |
31 | { 0, 0, 0, 0, 0, 0, 0, 0 }, |
32 | { 2, 0, 0, 0, 0, 0, 0, 0 }, |
33 | { 5, 7, 0, 0, 0, 0, 0, 0 }, |
34 | { 5, 0, 0, 0, 0, 0, 0, 0 }, |
35 | { 6, 0, 0, 0, 0, 0, 0, 0 }, |
36 | { 5, 7, 5, 7, 0, 0, 0, 0 }, |
37 | { 5, 7, 5, 0, 0, 0, 0, 0 }, |
38 | { 5, 7, 6, 0, 0, 0, 0, 0 }, |
39 | { 5, 5, 0, 0, 0, 0, 0, 0 }, |
40 | { 3, 0, 0, 0, 0, 0, 0, 0 }, |
41 | { 6, 6, 0, 0, 0, 0, 0, 0 }, |
42 | { 2, 4, 0, 0, 0, 0, 0, 0 }, |
43 | { 2, 4, 5, 7, 0, 0, 0, 0 }, |
44 | { 2, 4, 5, 0, 0, 0, 0, 0 }, |
45 | { 2, 4, 6, 0, 0, 0, 0, 0 }, |
46 | { 2, 4, 5, 7, 5, 7, 0, 0 }, |
47 | }; |
48 | |
49 | typedef struct PAFVideoDecContext { |
50 | AVFrame *pic; |
51 | GetByteContext gb; |
52 | |
53 | int width; |
54 | int height; |
55 | |
56 | int current_frame; |
57 | uint8_t *frame[4]; |
58 | int frame_size; |
59 | int video_size; |
60 | |
61 | uint8_t *opcodes; |
62 | } PAFVideoDecContext; |
63 | |
64 | static av_cold int paf_video_close(AVCodecContext *avctx) |
65 | { |
66 | PAFVideoDecContext *c = avctx->priv_data; |
67 | int i; |
68 | |
69 | av_frame_free(&c->pic); |
70 | |
71 | for (i = 0; i < 4; i++) |
72 | av_freep(&c->frame[i]); |
73 | |
74 | return 0; |
75 | } |
76 | |
77 | static av_cold int paf_video_init(AVCodecContext *avctx) |
78 | { |
79 | PAFVideoDecContext *c = avctx->priv_data; |
80 | int i; |
81 | |
82 | c->width = avctx->width; |
83 | c->height = avctx->height; |
84 | |
85 | if (avctx->height & 3 || avctx->width & 3) { |
86 | av_log(avctx, AV_LOG_ERROR, |
87 | "width %d and height %d must be multiplie of 4.\n", |
88 | avctx->width, avctx->height); |
89 | return AVERROR_INVALIDDATA; |
90 | } |
91 | |
92 | avctx->pix_fmt = AV_PIX_FMT_PAL8; |
93 | |
94 | c->pic = av_frame_alloc(); |
95 | if (!c->pic) |
96 | return AVERROR(ENOMEM); |
97 | |
98 | c->frame_size = avctx->width * FFALIGN(avctx->height, 256); |
99 | c->video_size = avctx->width * avctx->height; |
100 | for (i = 0; i < 4; i++) { |
101 | c->frame[i] = av_mallocz(c->frame_size); |
102 | if (!c->frame[i]) { |
103 | paf_video_close(avctx); |
104 | return AVERROR(ENOMEM); |
105 | } |
106 | } |
107 | |
108 | return 0; |
109 | } |
110 | |
111 | static void read4x4block(PAFVideoDecContext *c, uint8_t *dst, int width) |
112 | { |
113 | int i; |
114 | |
115 | for (i = 0; i < 4; i++) { |
116 | bytestream2_get_buffer(&c->gb, dst, 4); |
117 | dst += width; |
118 | } |
119 | } |
120 | |
121 | static void copy_color_mask(uint8_t *dst, int width, uint8_t mask, uint8_t color) |
122 | { |
123 | int i; |
124 | |
125 | for (i = 0; i < 4; i++) { |
126 | if (mask & (1 << 7 - i)) |
127 | dst[i] = color; |
128 | if (mask & (1 << 3 - i)) |
129 | dst[width + i] = color; |
130 | } |
131 | } |
132 | |
133 | static void copy_src_mask(uint8_t *dst, int width, uint8_t mask, const uint8_t *src) |
134 | { |
135 | int i; |
136 | |
137 | for (i = 0; i < 4; i++) { |
138 | if (mask & (1 << 7 - i)) |
139 | dst[i] = src[i]; |
140 | if (mask & (1 << 3 - i)) |
141 | dst[width + i] = src[width + i]; |
142 | } |
143 | } |
144 | |
145 | static void set_src_position(PAFVideoDecContext *c, |
146 | const uint8_t **p, |
147 | const uint8_t **pend) |
148 | { |
149 | int val = bytestream2_get_be16(&c->gb); |
150 | int page = val >> 14; |
151 | int x = (val & 0x7F); |
152 | int y = ((val >> 7) & 0x7F); |
153 | |
154 | *p = c->frame[page] + x * 2 + y * 2 * c->width; |
155 | *pend = c->frame[page] + c->frame_size; |
156 | } |
157 | |
158 | static int decode_0(PAFVideoDecContext *c, uint8_t *pkt, uint8_t code) |
159 | { |
160 | uint32_t opcode_size, offset; |
161 | uint8_t *dst, *dend, mask = 0, color = 0; |
162 | const uint8_t *src, *send, *opcodes; |
163 | int i, j, op = 0; |
164 | |
165 | i = bytestream2_get_byte(&c->gb); |
166 | if (i) { |
167 | if (code & 0x10) { |
168 | int align; |
169 | |
170 | align = bytestream2_tell(&c->gb) & 3; |
171 | if (align) |
172 | bytestream2_skip(&c->gb, 4 - align); |
173 | } |
174 | do { |
175 | int page, val, x, y; |
176 | val = bytestream2_get_be16(&c->gb); |
177 | page = val >> 14; |
178 | x = (val & 0x7F) * 2; |
179 | y = ((val >> 7) & 0x7F) * 2; |
180 | dst = c->frame[page] + x + y * c->width; |
181 | dend = c->frame[page] + c->frame_size; |
182 | offset = (x & 0x7F) * 2; |
183 | j = bytestream2_get_le16(&c->gb) + offset; |
184 | do { |
185 | offset++; |
186 | if (dst + 3 * c->width + 4 > dend) |
187 | return AVERROR_INVALIDDATA; |
188 | read4x4block(c, dst, c->width); |
189 | if ((offset & 0x3F) == 0) |
190 | dst += c->width * 3; |
191 | dst += 4; |
192 | } while (offset < j); |
193 | } while (--i); |
194 | } |
195 | |
196 | dst = c->frame[c->current_frame]; |
197 | dend = c->frame[c->current_frame] + c->frame_size; |
198 | do { |
199 | set_src_position(c, &src, &send); |
200 | if ((src + 3 * c->width + 4 > send) || |
201 | (dst + 3 * c->width + 4 > dend)) |
202 | return AVERROR_INVALIDDATA; |
203 | copy_block4(dst, src, c->width, c->width, 4); |
204 | i++; |
205 | if ((i & 0x3F) == 0) |
206 | dst += c->width * 3; |
207 | dst += 4; |
208 | } while (i < c->video_size / 16); |
209 | |
210 | opcode_size = bytestream2_get_le16(&c->gb); |
211 | bytestream2_skip(&c->gb, 2); |
212 | |
213 | if (bytestream2_get_bytes_left(&c->gb) < opcode_size) |
214 | return AVERROR_INVALIDDATA; |
215 | |
216 | opcodes = pkt + bytestream2_tell(&c->gb); |
217 | bytestream2_skipu(&c->gb, opcode_size); |
218 | |
219 | dst = c->frame[c->current_frame]; |
220 | |
221 | for (i = 0; i < c->height; i += 4, dst += c->width * 3) |
222 | for (j = 0; j < c->width; j += 4, dst += 4) { |
223 | int opcode, k = 0; |
224 | if (op > opcode_size) |
225 | return AVERROR_INVALIDDATA; |
226 | if (j & 4) { |
227 | opcode = opcodes[op] & 15; |
228 | op++; |
229 | } else { |
230 | opcode = opcodes[op] >> 4; |
231 | } |
232 | |
233 | while (block_sequences[opcode][k]) { |
234 | offset = c->width * 2; |
235 | code = block_sequences[opcode][k++]; |
236 | |
237 | switch (code) { |
238 | case 2: |
239 | offset = 0; |
240 | case 3: |
241 | color = bytestream2_get_byte(&c->gb); |
242 | case 4: |
243 | mask = bytestream2_get_byte(&c->gb); |
244 | copy_color_mask(dst + offset, c->width, mask, color); |
245 | break; |
246 | case 5: |
247 | offset = 0; |
248 | case 6: |
249 | set_src_position(c, &src, &send); |
250 | case 7: |
251 | if (src + offset + c->width + 4 > send) |
252 | return AVERROR_INVALIDDATA; |
253 | mask = bytestream2_get_byte(&c->gb); |
254 | copy_src_mask(dst + offset, c->width, mask, src + offset); |
255 | break; |
256 | } |
257 | } |
258 | } |
259 | |
260 | return 0; |
261 | } |
262 | |
263 | static int paf_video_decode(AVCodecContext *avctx, void *data, |
264 | int *got_frame, AVPacket *pkt) |
265 | { |
266 | PAFVideoDecContext *c = avctx->priv_data; |
267 | uint8_t code, *dst, *end; |
268 | int i, frame, ret; |
269 | |
270 | if ((ret = ff_reget_buffer(avctx, c->pic)) < 0) |
271 | return ret; |
272 | |
273 | bytestream2_init(&c->gb, pkt->data, pkt->size); |
274 | |
275 | code = bytestream2_get_byte(&c->gb); |
276 | if (code & 0x20) { // frame is keyframe |
277 | for (i = 0; i < 4; i++) |
278 | memset(c->frame[i], 0, c->frame_size); |
279 | |
280 | memset(c->pic->data[1], 0, AVPALETTE_SIZE); |
281 | c->current_frame = 0; |
282 | c->pic->key_frame = 1; |
283 | c->pic->pict_type = AV_PICTURE_TYPE_I; |
284 | } else { |
285 | c->pic->key_frame = 0; |
286 | c->pic->pict_type = AV_PICTURE_TYPE_P; |
287 | } |
288 | |
289 | if (code & 0x40) { // palette update |
290 | uint32_t *out = (uint32_t *)c->pic->data[1]; |
291 | int index, count; |
292 | |
293 | index = bytestream2_get_byte(&c->gb); |
294 | count = bytestream2_get_byte(&c->gb) + 1; |
295 | |
296 | if (index + count > 256) |
297 | return AVERROR_INVALIDDATA; |
298 | if (bytestream2_get_bytes_left(&c->gb) < 3 * count) |
299 | return AVERROR_INVALIDDATA; |
300 | |
301 | out += index; |
302 | for (i = 0; i < count; i++) { |
303 | unsigned r, g, b; |
304 | |
305 | r = bytestream2_get_byteu(&c->gb); |
306 | r = r << 2 | r >> 4; |
307 | g = bytestream2_get_byteu(&c->gb); |
308 | g = g << 2 | g >> 4; |
309 | b = bytestream2_get_byteu(&c->gb); |
310 | b = b << 2 | b >> 4; |
311 | *out++ = (0xFFU << 24) | (r << 16) | (g << 8) | b; |
312 | } |
313 | c->pic->palette_has_changed = 1; |
314 | } |
315 | |
316 | switch (code & 0x0F) { |
317 | case 0: |
318 | /* Block-based motion compensation using 4x4 blocks with either |
319 | * horizontal or vertical vectors; might incorporate VQ as well. */ |
320 | if ((ret = decode_0(c, pkt->data, code)) < 0) |
321 | return ret; |
322 | break; |
323 | case 1: |
324 | /* Uncompressed data. This mode specifies that (width * height) bytes |
325 | * should be copied directly from the encoded buffer into the output. */ |
326 | dst = c->frame[c->current_frame]; |
327 | // possibly chunk length data |
328 | bytestream2_skip(&c->gb, 2); |
329 | if (bytestream2_get_bytes_left(&c->gb) < c->video_size) |
330 | return AVERROR_INVALIDDATA; |
331 | bytestream2_get_bufferu(&c->gb, dst, c->video_size); |
332 | break; |
333 | case 2: |
334 | /* Copy reference frame: Consume the next byte in the stream as the |
335 | * reference frame (which should be 0, 1, 2, or 3, and should not be |
336 | * the same as the current frame number). */ |
337 | frame = bytestream2_get_byte(&c->gb); |
338 | if (frame > 3) |
339 | return AVERROR_INVALIDDATA; |
340 | if (frame != c->current_frame) |
341 | memcpy(c->frame[c->current_frame], c->frame[frame], c->frame_size); |
342 | break; |
343 | case 4: |
344 | /* Run length encoding.*/ |
345 | dst = c->frame[c->current_frame]; |
346 | end = dst + c->video_size; |
347 | |
348 | bytestream2_skip(&c->gb, 2); |
349 | |
350 | while (dst < end) { |
351 | int8_t code; |
352 | int count; |
353 | |
354 | if (bytestream2_get_bytes_left(&c->gb) < 2) |
355 | return AVERROR_INVALIDDATA; |
356 | |
357 | code = bytestream2_get_byteu(&c->gb); |
358 | count = FFABS(code) + 1; |
359 | |
360 | if (dst + count > end) |
361 | return AVERROR_INVALIDDATA; |
362 | if (code < 0) |
363 | memset(dst, bytestream2_get_byteu(&c->gb), count); |
364 | else |
365 | bytestream2_get_buffer(&c->gb, dst, count); |
366 | dst += count; |
367 | } |
368 | break; |
369 | default: |
370 | avpriv_request_sample(avctx, "unknown/invalid code"); |
371 | return AVERROR_INVALIDDATA; |
372 | } |
373 | |
374 | av_image_copy_plane(c->pic->data[0], c->pic->linesize[0], |
375 | c->frame[c->current_frame], c->width, |
376 | c->width, c->height); |
377 | |
378 | c->current_frame = (c->current_frame + 1) & 3; |
379 | if ((ret = av_frame_ref(data, c->pic)) < 0) |
380 | return ret; |
381 | |
382 | *got_frame = 1; |
383 | |
384 | return pkt->size; |
385 | } |
386 | |
387 | AVCodec ff_paf_video_decoder = { |
388 | .name = "paf_video", |
389 | .long_name = NULL_IF_CONFIG_SMALL("Amazing Studio Packed Animation File Video"), |
390 | .type = AVMEDIA_TYPE_VIDEO, |
391 | .id = AV_CODEC_ID_PAF_VIDEO, |
392 | .priv_data_size = sizeof(PAFVideoDecContext), |
393 | .init = paf_video_init, |
394 | .close = paf_video_close, |
395 | .decode = paf_video_decode, |
396 | .capabilities = AV_CODEC_CAP_DR1, |
397 | }; |
398 |