blob: ce5b4a0d3e208c59380a32e5a8fdd800bcf5cb9b
1 | /* |
2 | * Monkey's Audio lossless audio decoder |
3 | * Copyright (c) 2007 Benjamin Zores <ben@geexbox.org> |
4 | * based upon libdemac from Dave Chapman. |
5 | * |
6 | * This file is part of FFmpeg. |
7 | * |
8 | * FFmpeg is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU Lesser General Public |
10 | * License as published by the Free Software Foundation; either |
11 | * version 2.1 of the License, or (at your option) any later version. |
12 | * |
13 | * FFmpeg is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * Lesser General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU Lesser General Public |
19 | * License along with FFmpeg; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
21 | */ |
22 | #include <stdio.h> |
23 | #include <stdlib.h> |
24 | #include <limits.h> |
25 | #include <stdint.h> |
26 | #include "Ape_decoder.h" |
27 | #include "../../amadec/adec-armdec-mgt.h" |
28 | #include <android/log.h> |
29 | #ifdef __ARM_HAVE_NEON |
30 | #include <arm_neon.h> |
31 | #endif |
32 | #include <sys/time.h> |
33 | |
34 | #define LOG_TAG "ApeDecoder" |
35 | #define audio_codec_print(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__) |
36 | |
37 | static APEIOBuf apeiobuf = {0}; |
38 | static APE_Decoder_t *apedec; |
39 | static int read_buffersize_per_time = 102400 ; //100k |
40 | ape_extra_data headinfo ; |
41 | |
42 | //#include< |
43 | /** |
44 | * @file libavcodec/apedec.c |
45 | * Monkey's Audio lossless audio decoder |
46 | */ |
47 | |
48 | |
49 | /** @} */ |
50 | #define ALT_BITSTREAM_READER_LE |
51 | #define APE_FILTER_LEVELS 3 |
52 | |
53 | /** Filter orders depending on compression level */ |
54 | static const uint16_t ape_filter_orders[5][APE_FILTER_LEVELS] = { |
55 | { 0, 0, 0 }, |
56 | { 16, 0, 0 }, |
57 | { 64, 0, 0 }, |
58 | { 32, 256, 0 }, |
59 | { 16, 256, 1280 } |
60 | }; |
61 | |
62 | /** Filter fraction bits depending on compression level */ |
63 | static const uint8_t ape_filter_fracbits[5][APE_FILTER_LEVELS] = { |
64 | { 0, 0, 0 }, |
65 | { 11, 0, 0 }, |
66 | { 11, 0, 0 }, |
67 | { 10, 13, 0 }, |
68 | { 11, 13, 15 } |
69 | }; |
70 | |
71 | static inline uint32_t bytestream_get_be32(const uint8_t** ptr) |
72 | { |
73 | uint32_t tmp; |
74 | tmp = (*ptr)[3] | ((*ptr)[2] << 8) | ((*ptr)[1] << 16) | ((*ptr)[0] << 24); |
75 | *ptr += 4; |
76 | return tmp; |
77 | } |
78 | static inline uint8_t bytestream_get_byte(const uint8_t** ptr) |
79 | { |
80 | uint8_t tmp; |
81 | tmp = **ptr; |
82 | *ptr += 1; |
83 | return tmp; |
84 | } |
85 | |
86 | void * dsp_malloc(int size) |
87 | { |
88 | return malloc(size); |
89 | } |
90 | |
91 | void dsp_free(void * buf) |
92 | { |
93 | free(buf); |
94 | } |
95 | |
96 | void * dsp_realloc(void *ptr, size_t size) |
97 | |
98 | { |
99 | return realloc(ptr, size); |
100 | } |
101 | |
102 | |
103 | /***build a new ape decoder instance***/ |
104 | APE_Decoder_t* ape_decoder_new(void* ape_head_context) |
105 | { |
106 | APE_Decoder_t *decoder; |
107 | |
108 | decoder = (APE_Decoder_t*)dsp_malloc(sizeof(APE_Decoder_t)); |
109 | memset(decoder, 0, sizeof(APE_Decoder_t)); |
110 | if (!decoder) { |
111 | audio_codec_print("====malloc failed 1\n"); |
112 | return 0; |
113 | } |
114 | |
115 | decoder->public_data = (APE_Codec_Public_t*)dsp_malloc(sizeof(APE_Codec_Public_t)); |
116 | if (decoder->public_data == 0) { |
117 | audio_codec_print("====malloc failed 2\n"); |
118 | dsp_free(decoder); |
119 | return 0; |
120 | } else { |
121 | memset(decoder->public_data, 0, sizeof(APE_Codec_Public_t)); |
122 | } |
123 | |
124 | |
125 | decoder->private_data = (APE_COdec_Private_t*)dsp_malloc(sizeof(APE_COdec_Private_t)); |
126 | if (decoder->private_data == 0) { |
127 | audio_codec_print("====malloc failed 3\n"); |
128 | dsp_free(decoder->public_data); |
129 | dsp_free(decoder); |
130 | return 0; |
131 | } |
132 | memset(decoder->private_data, 0, sizeof(APE_COdec_Private_t)); |
133 | |
134 | decoder->public_data->current_decoding_frame = 0; |
135 | decoder->public_data->ape_header_context = ape_head_context; |
136 | |
137 | return decoder; |
138 | } |
139 | |
140 | void ape_decoder_delete(APE_Decoder_t *decoder) |
141 | { |
142 | APE_COdec_Private_t *s = decoder->private_data; |
143 | int i = 0; |
144 | if (decoder->private_data->data) { |
145 | dsp_free(decoder->private_data->data); |
146 | decoder->private_data->data = NULL; |
147 | } |
148 | if (decoder->private_data->filterbuf[0]) { |
149 | for (i = 0; i < APE_FILTER_LEVELS; i++) { |
150 | if (s->filterbuf[i]) { |
151 | dsp_free(s->filterbuf[i]); |
152 | } |
153 | } |
154 | } |
155 | if (decoder->private_data) { |
156 | dsp_free(decoder->private_data); |
157 | decoder->private_data = NULL; |
158 | } |
159 | if (decoder->public_data) { |
160 | dsp_free(decoder->public_data); |
161 | decoder->public_data = NULL; |
162 | |
163 | } |
164 | dsp_free(decoder); |
165 | decoder = NULL ; |
166 | return ; |
167 | } |
168 | // TODO: dsputilize |
169 | |
170 | APE_Decode_status_t ape_decode_init(APE_Decoder_t *avctx) |
171 | { |
172 | APE_COdec_Private_t *s = avctx->private_data; |
173 | APE_Codec_Public_t *p = avctx->public_data; |
174 | ape_extra_data *apecontext = (ape_extra_data *) avctx->public_data->ape_header_context; |
175 | int i; |
176 | audio_codec_print("===param==bps:%d channel:%d \n", apecontext->bps, apecontext->channels); |
177 | if (apecontext->bps != 16) { |
178 | audio_codec_print("OOnly 16-bit samples are supported\n"); |
179 | return APE_DECODE_INIT_ERROR; |
180 | } |
181 | if (apecontext->channels > 2) { |
182 | audio_codec_print("Only mono and stereo is supported\n"); |
183 | return APE_DECODE_INIT_ERROR; |
184 | } |
185 | s->APE_Decoder = avctx; |
186 | s->channels = apecontext->channels; |
187 | s->fileversion = apecontext->fileversion; |
188 | s->compression_level = apecontext->compressiontype; |
189 | s->flags = apecontext->formatflags; |
190 | /** some public parameter **/ |
191 | p->bits_per_sample = apecontext->bps; |
192 | p->sample_rate = apecontext->samplerate; |
193 | if (s->compression_level % 1000 || s->compression_level > COMPRESSION_LEVEL_INSANE) { |
194 | audio_codec_print("Incorrect compression level %d\n", s->compression_level); |
195 | return APE_DECODE_INIT_ERROR; |
196 | } |
197 | s->fset = s->compression_level / 1000 - 1; |
198 | for (i = 0; i < APE_FILTER_LEVELS; i++) { |
199 | if (!ape_filter_orders[s->fset][i]) { |
200 | break; |
201 | } |
202 | s->filterbuf[i] = (int16_t*)dsp_malloc((ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4); |
203 | if (s->filterbuf[i] == NULL) { |
204 | audio_codec_print("s->filterbuf[i] malloc error size:%d %d %d \n", (ape_filter_orders[s->fset][i] * 3 + HISTORY_SIZE) * 4, ape_filter_orders[s->fset][i], HISTORY_SIZE); |
205 | } |
206 | } |
207 | return APE_DECODE_INIT_FINISH; |
208 | } |
209 | |
210 | static int ape_decode_close(APE_Decoder_t * avctx) |
211 | { |
212 | APE_COdec_Private_t *s = avctx->private_data; |
213 | int i; |
214 | |
215 | for (i = 0; i < APE_FILTER_LEVELS; i++) { |
216 | dsp_free(&s->filterbuf[i]); |
217 | } |
218 | |
219 | return 0; |
220 | } |
221 | |
222 | /** |
223 | * @defgroup rangecoder APE range decoder |
224 | * @{ |
225 | */ |
226 | |
227 | #define CODE_BITS 32 |
228 | #define TOP_VALUE ((unsigned int)1 << (CODE_BITS-1)) |
229 | #define SHIFT_BITS (CODE_BITS - 9) |
230 | #define EXTRA_BITS ((CODE_BITS-2) % 8 + 1) |
231 | #define BOTTOM_VALUE (TOP_VALUE >> 8) |
232 | |
233 | /** Start the decoder */ |
234 | static inline void range_start_decoding(APE_COdec_Private_t * ctx) |
235 | { |
236 | ctx->rc.buffer = bytestream_get_byte(&ctx->ptr); |
237 | ctx->rc.low = ctx->rc.buffer >> (8 - EXTRA_BITS); |
238 | ctx->rc.range = (uint32_t) 1 << EXTRA_BITS; |
239 | } |
240 | |
241 | /** Perform normalization */ |
242 | static inline void range_dec_normalize(APE_COdec_Private_t * ctx) |
243 | { |
244 | while (ctx->rc.range <= BOTTOM_VALUE) { |
245 | ctx->rc.buffer <<= 8; |
246 | if (ctx->ptr < ctx->data_end) { |
247 | ctx->rc.buffer += *ctx->ptr; |
248 | } |
249 | ctx->ptr++; |
250 | ctx->rc.low = (ctx->rc.low << 8) | ((ctx->rc.buffer >> 1) & 0xFF); |
251 | ctx->rc.range <<= 8; |
252 | if (ctx->rc.range == 0) { //in error condition.if == 0,no chance return,so added |
253 | return; |
254 | } |
255 | } |
256 | } |
257 | |
258 | /** |
259 | * Calculate culmulative frequency for next symbol. Does NO update! |
260 | * @param ctx decoder context |
261 | * @param tot_f is the total frequency or (code_value)1<<shift |
262 | * @return the culmulative frequency |
263 | */ |
264 | static inline int range_decode_culfreq(APE_COdec_Private_t * ctx, int tot_f) |
265 | { |
266 | range_dec_normalize(ctx); |
267 | ctx->rc.help = ctx->rc.range / tot_f; |
268 | return ctx->rc.low / ctx->rc.help; |
269 | } |
270 | |
271 | /** |
272 | * Decode value with given size in bits |
273 | * @param ctx decoder context |
274 | * @param shift number of bits to decode |
275 | */ |
276 | static inline int range_decode_culshift(APE_COdec_Private_t * ctx, int shift) |
277 | { |
278 | range_dec_normalize(ctx); |
279 | ctx->rc.help = ctx->rc.range >> shift; |
280 | return ctx->rc.low / ctx->rc.help; |
281 | } |
282 | |
283 | |
284 | /** |
285 | * Update decoding state |
286 | * @param ctx decoder context |
287 | * @param sy_f the interval length (frequency of the symbol) |
288 | * @param lt_f the lower end (frequency sum of < symbols) |
289 | */ |
290 | static inline void range_decode_update(APE_COdec_Private_t * ctx, int sy_f, int lt_f) |
291 | { |
292 | ctx->rc.low -= ctx->rc.help * lt_f; |
293 | ctx->rc.range = ctx->rc.help * sy_f; |
294 | } |
295 | |
296 | /** Decode n bits (n <= 16) without modelling */ |
297 | static inline int range_decode_bits(APE_COdec_Private_t * ctx, int n) |
298 | { |
299 | int sym = range_decode_culshift(ctx, n); |
300 | range_decode_update(ctx, 1, sym); |
301 | return sym; |
302 | } |
303 | |
304 | |
305 | #define MODEL_ELEMENTS 64 |
306 | |
307 | /** |
308 | * Fixed probabilities for symbols in Monkey Audio version 3.97 |
309 | */ |
310 | static const uint16_t counts_3970[22] = { |
311 | 0, 14824, 28224, 39348, 47855, 53994, 58171, 60926, |
312 | 62682, 63786, 64463, 64878, 65126, 65276, 65365, 65419, |
313 | 65450, 65469, 65480, 65487, 65491, 65493, |
314 | }; |
315 | |
316 | /** |
317 | * Probability ranges for symbols in Monkey Audio version 3.97 |
318 | */ |
319 | static const uint16_t counts_diff_3970[21] = { |
320 | 14824, 13400, 11124, 8507, 6139, 4177, 2755, 1756, |
321 | 1104, 677, 415, 248, 150, 89, 54, 31, |
322 | 19, 11, 7, 4, 2, |
323 | }; |
324 | |
325 | /** |
326 | * Fixed probabilities for symbols in Monkey Audio version 3.98 |
327 | */ |
328 | static const uint16_t counts_3980[22] = { |
329 | 0, 19578, 36160, 48417, 56323, 60899, 63265, 64435, |
330 | 64971, 65232, 65351, 65416, 65447, 65466, 65476, 65482, |
331 | 65485, 65488, 65490, 65491, 65492, 65493, |
332 | }; |
333 | |
334 | /** |
335 | * Probability ranges for symbols in Monkey Audio version 3.98 |
336 | */ |
337 | static const uint16_t counts_diff_3980[21] = { |
338 | 19578, 16582, 12257, 7906, 4576, 2366, 1170, 536, |
339 | 261, 119, 65, 31, 19, 10, 6, 3, |
340 | 3, 2, 1, 1, 1, |
341 | }; |
342 | |
343 | /** |
344 | * Decode symbol |
345 | * @param ctx decoder context |
346 | * @param counts probability range start position |
347 | * @param counts_diff probability range widths |
348 | */ |
349 | static inline int range_get_symbol(APE_COdec_Private_t * ctx, |
350 | const uint16_t counts[], |
351 | const uint16_t counts_diff[]) |
352 | { |
353 | int symbol, cf; |
354 | |
355 | cf = range_decode_culshift(ctx, 16); |
356 | |
357 | if (cf > 65492) { |
358 | symbol = cf - 65535 + 63; |
359 | range_decode_update(ctx, 1, cf); |
360 | if (cf > 65535) { |
361 | ctx->error = 1; |
362 | } |
363 | return symbol; |
364 | } |
365 | /* figure out the symbol inefficiently; a binary search would be much better */ |
366 | for (symbol = 0; counts[symbol + 1] <= cf; symbol++) { |
367 | ; |
368 | } |
369 | |
370 | range_decode_update(ctx, counts_diff[symbol], counts[symbol]); |
371 | |
372 | return symbol; |
373 | } |
374 | /** @} */ // group rangecoder |
375 | |
376 | static inline void update_rice(APERice *rice, int x) |
377 | { |
378 | int lim = rice->k ? (1 << (rice->k + 4)) : 0; |
379 | rice->ksum += ((x + 1) / 2) - ((rice->ksum + 16) >> 5); |
380 | |
381 | if (rice->ksum < lim) { |
382 | rice->k--; |
383 | } else if (rice->ksum >= (1 << (rice->k + 5))) { |
384 | rice->k++; |
385 | } |
386 | } |
387 | |
388 | static inline int ape_decode_value(APE_COdec_Private_t * ctx, APERice *rice) |
389 | { |
390 | int x, overflow; |
391 | |
392 | if (ctx->fileversion < 3990) { |
393 | int tmpk; |
394 | |
395 | overflow = range_get_symbol(ctx, counts_3970, counts_diff_3970); |
396 | |
397 | if (overflow == (MODEL_ELEMENTS - 1)) { |
398 | tmpk = range_decode_bits(ctx, 5); |
399 | overflow = 0; |
400 | } else { |
401 | tmpk = (rice->k < 1) ? 0 : rice->k - 1; |
402 | } |
403 | |
404 | if (tmpk <= 16) { |
405 | x = range_decode_bits(ctx, tmpk); |
406 | } else { |
407 | x = range_decode_bits(ctx, 16); |
408 | x |= (range_decode_bits(ctx, tmpk - 16) << 16); |
409 | } |
410 | x += overflow << tmpk; |
411 | } else { |
412 | int base, pivot; |
413 | |
414 | pivot = rice->ksum >> 5; |
415 | if (pivot == 0) { |
416 | pivot = 1; |
417 | } |
418 | |
419 | overflow = range_get_symbol(ctx, counts_3980, counts_diff_3980); |
420 | |
421 | if (overflow == (MODEL_ELEMENTS - 1)) { |
422 | overflow = range_decode_bits(ctx, 16) << 16; |
423 | overflow |= range_decode_bits(ctx, 16); |
424 | } |
425 | |
426 | base = range_decode_culfreq(ctx, pivot); |
427 | range_decode_update(ctx, 1, base); |
428 | |
429 | x = base + overflow * pivot; |
430 | } |
431 | |
432 | update_rice(rice, x); |
433 | |
434 | /* Convert to signed */ |
435 | if (x & 1) { |
436 | return (x >> 1) + 1; |
437 | } else { |
438 | return -(x >> 1); |
439 | } |
440 | } |
441 | |
442 | static void entropy_decode(APE_COdec_Private_t * ctx, int blockstodecode, int stereo) |
443 | { |
444 | int32_t *decoded0 = ctx->decoded0; |
445 | int32_t *decoded1 = ctx->decoded1; |
446 | |
447 | ctx->blocksdecoded = blockstodecode; |
448 | |
449 | if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { |
450 | /* We are pure silence, just memset the output buffer. */ |
451 | memset(decoded0, 0, blockstodecode * sizeof(int32_t)); |
452 | memset(decoded1, 0, blockstodecode * sizeof(int32_t)); |
453 | } else { |
454 | while (blockstodecode--) { |
455 | *decoded0++ = ape_decode_value(ctx, &ctx->riceY); |
456 | if (stereo) { |
457 | *decoded1++ = ape_decode_value(ctx, &ctx->riceX); |
458 | } |
459 | } |
460 | } |
461 | |
462 | if (ctx->blocksdecoded == ctx->currentframeblocks) { |
463 | range_dec_normalize(ctx); /* normalize to use up all bytes */ |
464 | } |
465 | } |
466 | |
467 | static void init_entropy_decoder(APE_COdec_Private_t * ctx) |
468 | { |
469 | /* Read the CRC */ |
470 | ctx->CRC = bytestream_get_be32(&ctx->ptr); |
471 | |
472 | /* Read the frame flags if they exist */ |
473 | ctx->frameflags = 0; |
474 | if ((ctx->fileversion > 3820) && (ctx->CRC & 0x80000000)) { |
475 | ctx->CRC &= ~0x80000000; |
476 | |
477 | ctx->frameflags = bytestream_get_be32(&ctx->ptr); |
478 | } |
479 | |
480 | /* Keep a count of the blocks decoded in this frame */ |
481 | ctx->blocksdecoded = 0; |
482 | |
483 | /* Initialize the rice structs */ |
484 | ctx->riceX.k = 10; |
485 | ctx->riceX.ksum = (1 << ctx->riceX.k) * 16; |
486 | ctx->riceY.k = 10; |
487 | ctx->riceY.ksum = (1 << ctx->riceY.k) * 16; |
488 | |
489 | /* The first 8 bits of input are ignored. */ |
490 | ctx->ptr++; |
491 | |
492 | range_start_decoding(ctx); |
493 | } |
494 | |
495 | static const int32_t initial_coeffs[4] = { |
496 | 360, 317, -109, 98 |
497 | }; |
498 | |
499 | static void init_predictor_decoder(APE_COdec_Private_t * ctx) |
500 | { |
501 | APEPredictor *p = &ctx->predictor; |
502 | |
503 | /* Zero the history buffers */ |
504 | memset(p->historybuffer, 0, PREDICTOR_SIZE * sizeof(int32_t)); |
505 | p->buf = p->historybuffer; |
506 | |
507 | /* Initialize and zero the coefficients */ |
508 | memcpy(p->coeffsA[0], initial_coeffs, sizeof(initial_coeffs)); |
509 | memcpy(p->coeffsA[1], initial_coeffs, sizeof(initial_coeffs)); |
510 | memset(p->coeffsB, 0, sizeof(p->coeffsB)); |
511 | |
512 | p->filterA[0] = p->filterA[1] = 0; |
513 | p->filterB[0] = p->filterB[1] = 0; |
514 | p->lastA[0] = p->lastA[1] = 0; |
515 | } |
516 | |
517 | /** Get inverse sign of integer (-1 for positive, 1 for negative and 0 for zero) */ |
518 | static inline int APESIGN(int32_t x) |
519 | { |
520 | return (x < 0) - (x > 0); |
521 | } |
522 | |
523 | static int predictor_update_filter(APEPredictor *p, const int decoded, const int filter, const int delayA, const int delayB, const int adaptA, const int adaptB) |
524 | { |
525 | int32_t predictionA, predictionB; |
526 | |
527 | p->buf[delayA] = p->lastA[filter]; |
528 | p->buf[adaptA] = APESIGN(p->buf[delayA]); |
529 | p->buf[delayA - 1] = p->buf[delayA] - p->buf[delayA - 1]; |
530 | p->buf[adaptA - 1] = APESIGN(p->buf[delayA - 1]); |
531 | |
532 | predictionA = p->buf[delayA ] * p->coeffsA[filter][0] + |
533 | p->buf[delayA - 1] * p->coeffsA[filter][1] + |
534 | p->buf[delayA - 2] * p->coeffsA[filter][2] + |
535 | p->buf[delayA - 3] * p->coeffsA[filter][3]; |
536 | |
537 | /* Apply a scaled first-order filter compression */ |
538 | p->buf[delayB] = p->filterA[filter ^ 1] - ((p->filterB[filter] * 31) >> 5); |
539 | p->buf[adaptB] = APESIGN(p->buf[delayB]); |
540 | p->buf[delayB - 1] = p->buf[delayB] - p->buf[delayB - 1]; |
541 | p->buf[adaptB - 1] = APESIGN(p->buf[delayB - 1]); |
542 | p->filterB[filter] = p->filterA[filter ^ 1]; |
543 | |
544 | predictionB = p->buf[delayB ] * p->coeffsB[filter][0] + |
545 | p->buf[delayB - 1] * p->coeffsB[filter][1] + |
546 | p->buf[delayB - 2] * p->coeffsB[filter][2] + |
547 | p->buf[delayB - 3] * p->coeffsB[filter][3] + |
548 | p->buf[delayB - 4] * p->coeffsB[filter][4]; |
549 | |
550 | p->lastA[filter] = decoded + ((predictionA + (predictionB >> 1)) >> 10); |
551 | p->filterA[filter] = p->lastA[filter] + ((p->filterA[filter] * 31) >> 5); |
552 | |
553 | if (!decoded) { // no need updating filter coefficients |
554 | return p->filterA[filter]; |
555 | } |
556 | |
557 | if (decoded > 0) { |
558 | p->coeffsA[filter][0] -= p->buf[adaptA ]; |
559 | p->coeffsA[filter][1] -= p->buf[adaptA - 1]; |
560 | p->coeffsA[filter][2] -= p->buf[adaptA - 2]; |
561 | p->coeffsA[filter][3] -= p->buf[adaptA - 3]; |
562 | |
563 | p->coeffsB[filter][0] -= p->buf[adaptB ]; |
564 | p->coeffsB[filter][1] -= p->buf[adaptB - 1]; |
565 | p->coeffsB[filter][2] -= p->buf[adaptB - 2]; |
566 | p->coeffsB[filter][3] -= p->buf[adaptB - 3]; |
567 | p->coeffsB[filter][4] -= p->buf[adaptB - 4]; |
568 | } else { |
569 | p->coeffsA[filter][0] += p->buf[adaptA ]; |
570 | p->coeffsA[filter][1] += p->buf[adaptA - 1]; |
571 | p->coeffsA[filter][2] += p->buf[adaptA - 2]; |
572 | p->coeffsA[filter][3] += p->buf[adaptA - 3]; |
573 | |
574 | p->coeffsB[filter][0] += p->buf[adaptB ]; |
575 | p->coeffsB[filter][1] += p->buf[adaptB - 1]; |
576 | p->coeffsB[filter][2] += p->buf[adaptB - 2]; |
577 | p->coeffsB[filter][3] += p->buf[adaptB - 3]; |
578 | p->coeffsB[filter][4] += p->buf[adaptB - 4]; |
579 | } |
580 | return p->filterA[filter]; |
581 | } |
582 | |
583 | static void predictor_decode_stereo(APE_COdec_Private_t * ctx, int count) |
584 | { |
585 | int32_t predictionA, predictionB; |
586 | APEPredictor *p = &ctx->predictor; |
587 | int32_t *decoded0 = ctx->decoded0; |
588 | int32_t *decoded1 = ctx->decoded1; |
589 | |
590 | while (count--) { |
591 | /* Predictor Y */ |
592 | predictionA = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, YADAPTCOEFFSA, YADAPTCOEFFSB); |
593 | predictionB = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, XADAPTCOEFFSA, XADAPTCOEFFSB); |
594 | *(decoded0++) = predictionA; |
595 | *(decoded1++) = predictionB; |
596 | |
597 | /* Combined */ |
598 | p->buf++; |
599 | |
600 | /* Have we filled the history buffer? */ |
601 | if (p->buf == p->historybuffer + HISTORY_SIZE) { |
602 | memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); |
603 | p->buf = p->historybuffer; |
604 | } |
605 | } |
606 | } |
607 | |
608 | static void predictor_decode_mono(APE_COdec_Private_t * ctx, int count) |
609 | { |
610 | APEPredictor *p = &ctx->predictor; |
611 | int32_t *decoded0 = ctx->decoded0; |
612 | int32_t predictionA, currentA, A; |
613 | |
614 | currentA = p->lastA[0]; |
615 | |
616 | while (count--) { |
617 | A = *decoded0; |
618 | |
619 | p->buf[YDELAYA] = currentA; |
620 | p->buf[YDELAYA - 1] = p->buf[YDELAYA] - p->buf[YDELAYA - 1]; |
621 | |
622 | predictionA = p->buf[YDELAYA ] * p->coeffsA[0][0] + |
623 | p->buf[YDELAYA - 1] * p->coeffsA[0][1] + |
624 | p->buf[YDELAYA - 2] * p->coeffsA[0][2] + |
625 | p->buf[YDELAYA - 3] * p->coeffsA[0][3]; |
626 | |
627 | currentA = A + (predictionA >> 10); |
628 | |
629 | p->buf[YADAPTCOEFFSA] = APESIGN(p->buf[YDELAYA ]); |
630 | p->buf[YADAPTCOEFFSA - 1] = APESIGN(p->buf[YDELAYA - 1]); |
631 | |
632 | if (A > 0) { |
633 | p->coeffsA[0][0] -= p->buf[YADAPTCOEFFSA ]; |
634 | p->coeffsA[0][1] -= p->buf[YADAPTCOEFFSA - 1]; |
635 | p->coeffsA[0][2] -= p->buf[YADAPTCOEFFSA - 2]; |
636 | p->coeffsA[0][3] -= p->buf[YADAPTCOEFFSA - 3]; |
637 | } else if (A < 0) { |
638 | p->coeffsA[0][0] += p->buf[YADAPTCOEFFSA ]; |
639 | p->coeffsA[0][1] += p->buf[YADAPTCOEFFSA - 1]; |
640 | p->coeffsA[0][2] += p->buf[YADAPTCOEFFSA - 2]; |
641 | p->coeffsA[0][3] += p->buf[YADAPTCOEFFSA - 3]; |
642 | } |
643 | |
644 | p->buf++; |
645 | |
646 | /* Have we filled the history buffer? */ |
647 | if (p->buf == p->historybuffer + HISTORY_SIZE) { |
648 | memmove(p->historybuffer, p->buf, PREDICTOR_SIZE * sizeof(int32_t)); |
649 | p->buf = p->historybuffer; |
650 | } |
651 | |
652 | p->filterA[0] = currentA + ((p->filterA[0] * 31) >> 5); |
653 | *(decoded0++) = p->filterA[0]; |
654 | } |
655 | |
656 | p->lastA[0] = currentA; |
657 | } |
658 | |
659 | static void do_init_filter(APEFilter *f, int16_t * buf, int order) |
660 | { |
661 | f->coeffs = buf; |
662 | f->historybuffer = buf + order; |
663 | f->delay = f->historybuffer + order * 2; |
664 | f->adaptcoeffs = f->historybuffer + order; |
665 | |
666 | memset(f->historybuffer, 0, (order * 2) * sizeof(int16_t)); |
667 | memset(f->coeffs, 0, order * sizeof(int16_t)); |
668 | f->avg = 0; |
669 | } |
670 | |
671 | static void init_filter(APE_COdec_Private_t * ctx, APEFilter *f, int16_t * buf, int order) |
672 | { |
673 | do_init_filter(&f[0], buf, order); |
674 | do_init_filter(&f[1], buf + order * 3 + HISTORY_SIZE, order); |
675 | } |
676 | static int32_t scalarproduct_int16_c(int16_t * v1, int16_t * v2, int order, int shift) |
677 | { |
678 | int res = 0; |
679 | |
680 | #if !(defined __ARM_HAVE_NEON) |
681 | while (order--) { |
682 | res += (*v1++ * *v2++)/* >> shift*/; |
683 | } |
684 | #else |
685 | int j = order / 4; |
686 | int k = order % 4; |
687 | int32x4_t neonres = vdupq_n_s32(0); |
688 | |
689 | while (j--) { |
690 | neonres = vmlal_s16(neonres, vld1_s16(v1), vld1_s16(v2)); |
691 | v1 += 4; |
692 | v2 += 4; |
693 | } |
694 | |
695 | while (k--) { |
696 | res += (*v1++ * *v2++); |
697 | } |
698 | |
699 | res += vgetq_lane_s32(neonres, 0) + vgetq_lane_s32(neonres, 1) + |
700 | vgetq_lane_s32(neonres, 2) + vgetq_lane_s32(neonres, 3); |
701 | #endif |
702 | return res; |
703 | } |
704 | static void add_int16_c(int16_t * v1, int16_t * v2, int order) |
705 | { |
706 | #if !(defined __ARM_HAVE_NEON) |
707 | while (order--) { |
708 | *v1++ += *v2++; |
709 | } |
710 | #else |
711 | int j = order / 8; |
712 | int k = order % 8; |
713 | int16x8_t neonv1; |
714 | |
715 | while (j--) { |
716 | neonv1 = vaddq_s16(vld1q_s16(v1), vld1q_s16(v2)); |
717 | vst1q_s16(v1, neonv1); |
718 | v1 += 8; |
719 | v2 += 8; |
720 | } |
721 | |
722 | while (k--) { |
723 | *v1++ += *v2++; |
724 | } |
725 | #endif |
726 | } |
727 | |
728 | static void sub_int16_c(int16_t * v1, int16_t * v2, int order) |
729 | { |
730 | #if !(defined __ARM_HAVE_NEON) |
731 | while (order--) { |
732 | *v1++ -= *v2++; |
733 | } |
734 | #else |
735 | int j = order / 8; |
736 | int k = order % 8; |
737 | int16x8_t neonv1; |
738 | |
739 | while (j--) { |
740 | neonv1 = vsubq_s16(vld1q_s16(v1), vld1q_s16(v2)); |
741 | vst1q_s16(v1, neonv1); |
742 | v1 += 8; |
743 | v2 += 8; |
744 | } |
745 | |
746 | while (k--) { |
747 | *v1++ -= *v2++; |
748 | } |
749 | #endif |
750 | } |
751 | static inline int16_t av_clip_int16(int a) |
752 | { |
753 | if ((a + 32768) & ~65535) { |
754 | return (a >> 31) ^ 32767; |
755 | } else { |
756 | return a; |
757 | } |
758 | } |
759 | static inline void do_apply_filter(APE_COdec_Private_t * ctx, int version, APEFilter *f, int32_t *data, int count, int order, int fracbits) |
760 | { |
761 | int res; |
762 | int absres; |
763 | |
764 | while (count--) { |
765 | /* round fixedpoint scalar product */ |
766 | res = (scalarproduct_int16_c(f->delay - order, f->coeffs, order, 0) + (1 << (fracbits - 1))) >> fracbits; |
767 | |
768 | if (*data < 0) { |
769 | add_int16_c(f->coeffs, f->adaptcoeffs - order, order); |
770 | } else if (*data > 0) { |
771 | sub_int16_c(f->coeffs, f->adaptcoeffs - order, order); |
772 | } |
773 | |
774 | res += *data; |
775 | |
776 | *data++ = res; |
777 | |
778 | /* Update the output history */ |
779 | *f->delay++ = av_clip_int16(res); |
780 | |
781 | if (version < 3980) { |
782 | /* Version ??? to < 3.98 files (untested) */ |
783 | f->adaptcoeffs[0] = (res == 0) ? 0 : ((res >> 28) & 8) - 4; |
784 | f->adaptcoeffs[-4] >>= 1; |
785 | f->adaptcoeffs[-8] >>= 1; |
786 | } else { |
787 | /* Version 3.98 and later files */ |
788 | |
789 | /* Update the adaption coefficients */ |
790 | absres = (res < 0 ? -res : res); |
791 | |
792 | if (absres > (f->avg * 3)) { |
793 | *f->adaptcoeffs = ((res >> 25) & 64) - 32; |
794 | } else if (absres > (f->avg * 4) / 3) { |
795 | *f->adaptcoeffs = ((res >> 26) & 32) - 16; |
796 | } else if (absres > 0) { |
797 | *f->adaptcoeffs = ((res >> 27) & 16) - 8; |
798 | } else { |
799 | *f->adaptcoeffs = 0; |
800 | } |
801 | |
802 | f->avg += (absres - f->avg) / 16; |
803 | |
804 | f->adaptcoeffs[-1] >>= 1; |
805 | f->adaptcoeffs[-2] >>= 1; |
806 | f->adaptcoeffs[-8] >>= 1; |
807 | } |
808 | |
809 | f->adaptcoeffs++; |
810 | |
811 | /* Have we filled the history buffer? */ |
812 | if (f->delay == f->historybuffer + HISTORY_SIZE + (order * 2)) { |
813 | memmove(f->historybuffer, f->delay - (order * 2), |
814 | (order * 2) * sizeof(int16_t)); |
815 | f->delay = f->historybuffer + order * 2; |
816 | f->adaptcoeffs = f->historybuffer + order; |
817 | } |
818 | } |
819 | } |
820 | |
821 | static void apply_filter(APE_COdec_Private_t * ctx, APEFilter *f, |
822 | int32_t * data0, int32_t * data1, |
823 | int count, int order, int fracbits) |
824 | { |
825 | do_apply_filter(ctx, ctx->fileversion, &f[0], data0, count, order, fracbits); |
826 | if (data1) { |
827 | do_apply_filter(ctx, ctx->fileversion, &f[1], data1, count, order, fracbits); |
828 | } |
829 | } |
830 | |
831 | static void ape_apply_filters(APE_COdec_Private_t * ctx, int32_t * decoded0, |
832 | int32_t * decoded1, int count) |
833 | { |
834 | int i; |
835 | |
836 | for (i = 0; i < APE_FILTER_LEVELS; i++) { |
837 | if (!ape_filter_orders[ctx->fset][i]) { |
838 | break; |
839 | } |
840 | apply_filter(ctx, ctx->filters[i], decoded0, decoded1, count, ape_filter_orders[ctx->fset][i], ape_filter_fracbits[ctx->fset][i]); |
841 | } |
842 | } |
843 | |
844 | static void init_frame_decoder(APE_COdec_Private_t * ctx) |
845 | { |
846 | int i; |
847 | init_entropy_decoder(ctx); |
848 | init_predictor_decoder(ctx); |
849 | |
850 | for (i = 0; i < APE_FILTER_LEVELS; i++) { |
851 | if (!ape_filter_orders[ctx->fset][i]) { |
852 | break; |
853 | } |
854 | init_filter(ctx, ctx->filters[i], ctx->filterbuf[i], ape_filter_orders[ctx->fset][i]); |
855 | } |
856 | } |
857 | |
858 | static void ape_unpack_mono(APE_COdec_Private_t * ctx, int count) |
859 | { |
860 | int32_t left; |
861 | int32_t *decoded0 = ctx->decoded0; |
862 | int32_t *decoded1 = ctx->decoded1; |
863 | |
864 | if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { |
865 | entropy_decode(ctx, count, 0); |
866 | /* We are pure silence, so we're done. */ |
867 | printf("pure silence mono\n"); |
868 | return; |
869 | } |
870 | |
871 | entropy_decode(ctx, count, 0); |
872 | ape_apply_filters(ctx, decoded0, NULL, count); |
873 | |
874 | /* Now apply the predictor decoding */ |
875 | predictor_decode_mono(ctx, count); |
876 | |
877 | /* Pseudo-stereo - just copy left channel to right channel */ |
878 | if (ctx->channels == 2) { |
879 | while (count--) { |
880 | left = *decoded0; |
881 | *(decoded1++) = *(decoded0++) = left; |
882 | } |
883 | } |
884 | } |
885 | |
886 | static void ape_unpack_stereo(APE_COdec_Private_t * ctx, int count) |
887 | { |
888 | int32_t left, right; |
889 | int32_t *decoded0 = ctx->decoded0; |
890 | int32_t *decoded1 = ctx->decoded1; |
891 | |
892 | if (ctx->frameflags & APE_FRAMECODE_STEREO_SILENCE) { |
893 | /* We are pure silence, so we're done. */ |
894 | printf("Function %s:pure silence stereo\n,""ape_unpack_stereo"); |
895 | return; |
896 | } |
897 | |
898 | entropy_decode(ctx, count, 1); |
899 | ape_apply_filters(ctx, decoded0, decoded1, count); |
900 | |
901 | /* Now apply the predictor decoding */ |
902 | predictor_decode_stereo(ctx, count); |
903 | |
904 | /* Decorrelate and scale to output depth */ |
905 | while (count--) { |
906 | left = *decoded1 - (*decoded0 / 2); |
907 | right = left + *decoded0; |
908 | |
909 | *(decoded0++) = left; |
910 | *(decoded1++) = right; |
911 | } |
912 | } |
913 | static void bswap_buf(uint32_t *dst, const uint32_t *src, int w) |
914 | { |
915 | int i; |
916 | |
917 | for (i = 0; i + 8 <= w; i += 8) { |
918 | dst[i + 0] = bswap_32(src[i + 0]); |
919 | dst[i + 1] = bswap_32(src[i + 1]); |
920 | dst[i + 2] = bswap_32(src[i + 2]); |
921 | dst[i + 3] = bswap_32(src[i + 3]); |
922 | dst[i + 4] = bswap_32(src[i + 4]); |
923 | dst[i + 5] = bswap_32(src[i + 5]); |
924 | dst[i + 6] = bswap_32(src[i + 6]); |
925 | dst[i + 7] = bswap_32(src[i + 7]); |
926 | } |
927 | for (; i < w; i++) { |
928 | dst[i + 0] = bswap_32(src[i + 0]); |
929 | } |
930 | } |
931 | |
932 | APE_Decode_status_t ape_decode_frame(APE_Decoder_t * avctx, \ |
933 | void *data, int *data_size , \ |
934 | const unsigned char * buf, int buf_size) |
935 | { |
936 | |
937 | APE_COdec_Private_t *s = avctx->private_data; |
938 | //APEContext *apecontext =(APEContext *) avctx->public_data->ape_header_context; |
939 | int16_t *samples = data; |
940 | int nblocks; |
941 | int i, n; |
942 | int blockstodecode; |
943 | // unsigned *inputbuf; |
944 | if (buf_size == 0 && !s->samples) { |
945 | *data_size = 0; |
946 | printf("error parameter in:buf_size:%d\n", buf_size); |
947 | return APE_DECODE_ERROR_ABORT; |
948 | } |
949 | if (!s->samples) { //the new frame decode loop |
950 | if (s->data) { |
951 | s->data = dsp_realloc(s->data, (buf_size + 3) & ~3); |
952 | } else { |
953 | s->data = dsp_malloc((buf_size + 3) & ~3); |
954 | } |
955 | if (!s->data) { |
956 | printf("malloc for input frame failed,enlarge the mem pool!\r\n"); |
957 | } |
958 | bswap_buf((uint32_t*)s->data, (const uint32_t*)buf, buf_size >> 2); |
959 | s->ptr = s->last_ptr = s->data;//the current data position |
960 | s->data_end = s->data + buf_size; |
961 | nblocks = s->samples = bytestream_get_be32(&s->ptr);//the current frame block num |
962 | n = bytestream_get_be32(&s->ptr);//skip |
963 | if (n < 0 || n > 3) { |
964 | audio_codec_print("Incorrect offset passed:%d\n", n); |
965 | printf("current block num this frame is %d\n", nblocks); |
966 | s->data = NULL; |
967 | s->samples = 0; |
968 | return APE_DECODE_ERROR_ABORT; |
969 | } |
970 | s->ptr += n;// the begin of the data read loop |
971 | |
972 | s->currentframeblocks = nblocks; |
973 | buf += 4; |
974 | if (s->samples <= 0) { |
975 | printf("it seems that the samples num frame<= 0\n"); |
976 | *data_size = 0; |
977 | return APE_DECODE_ERROR_ABORT; |
978 | } |
979 | |
980 | //s->samples = apecontext->frames[apecontext->currentframe]->nblocks; |
981 | memset(s->decoded0, 0, sizeof(s->decoded0)); |
982 | memset(s->decoded1, 0, sizeof(s->decoded1)); |
983 | |
984 | /* Initialize the frame decoder */ |
985 | init_frame_decoder(s); |
986 | } |
987 | |
988 | if (!s->data) { |
989 | *data_size = 0; |
990 | printf("it seems that s->data== 0\n"); |
991 | return APE_DECODE_ERROR_ABORT; |
992 | } |
993 | |
994 | nblocks = s->samples; |
995 | blockstodecode = BLOCKS_PER_LOOP > nblocks ? nblocks : BLOCKS_PER_LOOP; |
996 | |
997 | s->error = 0; |
998 | |
999 | if ((s->channels == 1) || (s->frameflags & APE_FRAMECODE_PSEUDO_STEREO)) { |
1000 | ape_unpack_mono(s, blockstodecode); |
1001 | } else { |
1002 | ape_unpack_stereo(s, blockstodecode); |
1003 | } |
1004 | |
1005 | if (s->error || s->ptr > s->data_end) { |
1006 | s->samples = 0; |
1007 | *data_size = 0; |
1008 | printf("Error decoding frame.error num 0x%x.s->ptr 0x%x bigger s->data_end %x\n", s->error, s->ptr, s->data_end); |
1009 | return APE_DECODE_ERROR_ABORT; |
1010 | } |
1011 | |
1012 | for (i = 0; i < blockstodecode; i++) { |
1013 | *samples++ = s->decoded0[i]; |
1014 | if (s->channels == 2) { |
1015 | *samples++ = s->decoded1[i]; |
1016 | } |
1017 | } |
1018 | |
1019 | s->samples -= blockstodecode; |
1020 | |
1021 | *data_size = s->samples ? s->ptr - s->last_ptr : buf_size; |
1022 | s->last_ptr = s->ptr; |
1023 | return APE_DECODE_ONE_FRAME_FINISH; |
1024 | } |
1025 | |
1026 | //confirmed one frame |
1027 | int audio_dec_decode(audio_decoder_operations_t *adec_ops, char *outbuf, int *outlen, char *inbuf, int inlen) |
1028 | { |
1029 | unsigned char buffer[5]; |
1030 | unsigned current_framesize = 0; |
1031 | char extra_data = 8; |
1032 | unsigned int first_read = 0; |
1033 | apeiobuf.bytesLeft = 0; |
1034 | int nDecodedSize = 0; |
1035 | if (apeiobuf.bytesLeft == 0) { |
1036 | current_framesize = inlen;//sss |
1037 | apeiobuf.readPtr = inbuf; |
1038 | int buffersize_remain = current_framesize; |
1039 | unsigned char * read_buf_ptr = apeiobuf.readPtr; |
1040 | apeiobuf.bytesLeft += current_framesize; |
1041 | apedec->public_data->current_decoding_frame++; |
1042 | } |
1043 | |
1044 | if (apeiobuf.bytesLeft) { |
1045 | int err = 0; |
1046 | if ((err = ape_decode_frame(apedec, apeiobuf.outBuf, \ |
1047 | &apeiobuf.thislop_decoded_size, \ |
1048 | apeiobuf.readPtr, apeiobuf.bytesLeft)) != APE_DECODE_ONE_FRAME_FINISH) { |
1049 | audio_codec_print("apeiobuf.thislop_decoded_size=%d\n", apeiobuf.thislop_decoded_size); |
1050 | if (apeiobuf.thislop_decoded_size <= 0) { |
1051 | audio_codec_print("error id:%d happened when decoding ape frame\n", err); |
1052 | apeiobuf.bytesLeft = 0; |
1053 | } |
1054 | nDecodedSize = 0; |
1055 | } else { |
1056 | audio_codec_print("decode_one_frame_finished\n"); |
1057 | audio_codec_print("Enter into write_buffer operation\n"); |
1058 | int size = (apedec->private_data->blocksdecoded) * (apedec->private_data->channels) * 2; |
1059 | *outlen = size; |
1060 | nDecodedSize = apeiobuf.thislop_decoded_size; |
1061 | audio_codec_print("apedec->private_data->blocksdecoded=%d\n", apedec->private_data->blocksdecoded); |
1062 | audio_codec_print("apedec->private_data->channels=%d\n", apedec->private_data->channels); |
1063 | audio_codec_print(">>>>>>>>>>>>>>>>size = %d\n", size); |
1064 | memcpy(outbuf, (unsigned char*)apeiobuf.outBuf, size); |
1065 | } |
1066 | } |
1067 | return nDecodedSize; |
1068 | |
1069 | } |
1070 | |
1071 | #define DefaultReadSize 1024*10 //read count from kernel audio buf one time |
1072 | #define DefaultOutBufSize 1024*1024*2 |
1073 | int audio_dec_init(audio_decoder_operations_t *adec_ops) |
1074 | { |
1075 | int x = 1; |
1076 | char *p = (char *)&x; |
1077 | //audio_codec_print("\n\n[%s]BuildDate--%s BuildTime--%s", __FUNCTION__, __DATE__, __TIME__); |
1078 | if (*p == 1) { |
1079 | audio_codec_print("Little endian\n"); |
1080 | } else { |
1081 | audio_codec_print("Big endian\n"); |
1082 | } |
1083 | |
1084 | apedec = NULL; |
1085 | if (!apedec) { |
1086 | headinfo.bps = adec_ops->bps; |
1087 | headinfo.channels = adec_ops->channels; |
1088 | headinfo.samplerate = adec_ops->samplerate; |
1089 | headinfo.fileversion = ((*(adec_ops->extradata + 1)) << 8) | (*(adec_ops->extradata)); // the info below 3 row are based on ape.c encodec relatively |
1090 | headinfo.compressiontype = ((*(adec_ops->extradata + 3)) << 8) | (*(adec_ops->extradata + 2)); |
1091 | headinfo.formatflags = ((*(adec_ops->extradata + 5)) << 8) | (*(adec_ops->extradata + 4)); |
1092 | /*pass the ape header info to the decoder instance**/ |
1093 | apedec = ape_decoder_new((void*)&headinfo); |
1094 | |
1095 | } |
1096 | if (!apedec) { |
1097 | audio_codec_print("%s: FATAL ERROR creating the decoder instance\n", "ape"); |
1098 | return -1; |
1099 | } |
1100 | if (ape_decode_init(apedec) != APE_DECODE_INIT_FINISH) { |
1101 | audio_codec_print("%s: FATAL ERROR inititate the decoder instance\n", "ape"); |
1102 | return -1; |
1103 | } |
1104 | adec_ops->nInBufSize = DefaultReadSize; |
1105 | adec_ops->nOutBufSize = DefaultOutBufSize; |
1106 | audio_codec_print("ape_Init.\n"); |
1107 | return 0; |
1108 | } |
1109 | int audio_dec_release(audio_decoder_operations_t *adec_ops) |
1110 | { |
1111 | return 0; |
1112 | } |
1113 | int audio_dec_getinfo(audio_decoder_operations_t *adec_ops, void *pAudioInfo) |
1114 | { |
1115 | return 0; |
1116 | } |
1117 | |
1118 | |
1119 | |
1120 | |
1121 |