summaryrefslogtreecommitdiff
path: root/audio_codec/libfaad/sbr_qmf.c (plain)
blob: 0131a39ef05f7652e2fdc13d6620bb0b9bb18de8
1/*
2** FAAD2 - Freeware Advanced Audio (AAC) Decoder including SBR decoding
3** Copyright (C) 2003-2005 M. Bakker, Nero AG, http://www.nero.com
4**
5** This program is free software; you can redistribute it and/or modify
6** it under the terms of the GNU General Public License as published by
7** the Free Software Foundation; either version 2 of the License, or
8** (at your option) any later version.
9**
10** This program is distributed in the hope that it will be useful,
11** but WITHOUT ANY WARRANTY; without even the implied warranty of
12** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13** GNU General Public License for more details.
14**
15** You should have received a copy of the GNU General Public License
16** along with this program; if not, write to the Free Software
17** Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18**
19** Any non-GPL usage of this software or parts of this software is strictly
20** forbidden.
21**
22** The "appropriate copyright message" mentioned in section 2c of the GPLv2
23** must read: "Code from FAAD2 is copyright (c) Nero AG, www.nero.com"
24**
25** Commercial non-GPL licensing of this software is possible.
26** For more info contact Nero AG through Mpeg4AAClicense@nero.com.
27**
28** $Id: sbr_qmf.c,v 1.32 2007/11/01 12:33:36 menno Exp $
29**/
30#include <stdlib.h>
31#include "common.h"
32#include "structs.h"
33
34#ifdef SBR_DEC
35
36
37#include <string.h>
38#include "sbr_dct.h"
39#include "sbr_qmf.h"
40#include "sbr_qmf_c.h"
41#include "sbr_syntax.h"
42
43qmfa_info *qmfa_init(uint8_t channels)
44{
45 qmfa_info *qmfa = (qmfa_info*)faad_malloc(sizeof(qmfa_info));
46
47 /* x is implemented as double ringbuffer */
48 qmfa->x = (real_t*)faad_malloc(2 * channels * 10 * sizeof(real_t));
49 memset(qmfa->x, 0, 2 * channels * 10 * sizeof(real_t));
50
51 /* ringbuffer index */
52 qmfa->x_index = 0;
53
54 qmfa->channels = channels;
55
56 return qmfa;
57}
58
59void qmfa_end(qmfa_info *qmfa)
60{
61 if (qmfa) {
62 if (qmfa->x) {
63 faad_free(qmfa->x);
64 }
65 faad_free(qmfa);
66 }
67}
68
69void sbr_qmf_analysis_32(sbr_info *sbr, qmfa_info *qmfa, const real_t *input,
70 qmf_t X[MAX_NTSRHFG][64], uint8_t offset, uint8_t kx)
71{
72 ALIGN real_t u[64];
73#ifndef SBR_LOW_POWER
74 ALIGN real_t in_real[32], in_imag[32], out_real[32], out_imag[32];
75#else
76 ALIGN real_t y[32];
77#endif
78 uint32_t in = 0;
79 uint8_t l;
80
81 /* qmf subsample l */
82 for (l = 0; l < sbr->numTimeSlotsRate; l++) {
83 int16_t n;
84
85 /* shift input buffer x */
86 /* input buffer is not shifted anymore, x is implemented as double ringbuffer */
87 //memmove(qmfa->x + 32, qmfa->x, (320-32)*sizeof(real_t));
88
89 /* add new samples to input buffer x */
90 for (n = 32 - 1; n >= 0; n--) {
91#ifdef FIXED_POINT
92 qmfa->x[qmfa->x_index + n] = qmfa->x[qmfa->x_index + n + 320] = (input[in++]) >> 4;
93#else
94 qmfa->x[qmfa->x_index + n] = qmfa->x[qmfa->x_index + n + 320] = input[in++];
95#endif
96 }
97
98 /* window and summation to create array u */
99 for (n = 0; n < 64; n++) {
100 u[n] = MUL_F(qmfa->x[qmfa->x_index + n], qmf_c[2 * n]) +
101 MUL_F(qmfa->x[qmfa->x_index + n + 64], qmf_c[2 * (n + 64)]) +
102 MUL_F(qmfa->x[qmfa->x_index + n + 128], qmf_c[2 * (n + 128)]) +
103 MUL_F(qmfa->x[qmfa->x_index + n + 192], qmf_c[2 * (n + 192)]) +
104 MUL_F(qmfa->x[qmfa->x_index + n + 256], qmf_c[2 * (n + 256)]);
105 }
106
107 /* update ringbuffer index */
108 qmfa->x_index -= 32;
109 if (qmfa->x_index < 0) {
110 qmfa->x_index = (320 - 32);
111 }
112
113 /* calculate 32 subband samples by introducing X */
114#ifdef SBR_LOW_POWER
115 y[0] = u[48];
116 for (n = 1; n < 16; n++) {
117 y[n] = u[n + 48] + u[48 - n];
118 }
119 for (n = 16; n < 32; n++) {
120 y[n] = -u[n - 16] + u[48 - n];
121 }
122
123 DCT3_32_unscaled(u, y);
124
125 for (n = 0; n < 32; n++) {
126 if (n < kx) {
127#ifdef FIXED_POINT
128 QMF_RE(X[l + offset][n]) = u[n] /*<< 1*/;
129#else
130 QMF_RE(X[l + offset][n]) = 2. * u[n];
131#endif
132 } else {
133 QMF_RE(X[l + offset][n]) = 0;
134 }
135 }
136#else
137
138 // Reordering of data moved from DCT_IV to here
139 in_imag[31] = u[1];
140 in_real[0] = u[0];
141 for (n = 1; n < 31; n++) {
142 in_imag[31 - n] = u[n + 1];
143 in_real[n] = -u[64 - n];
144 }
145 in_imag[0] = u[32];
146 in_real[31] = -u[33];
147
148 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
149 dct4_kernel(in_real, in_imag, out_real, out_imag);
150
151 // Reordering of data moved from DCT_IV to here
152 for (n = 0; n < 16; n++) {
153 if (2 * n + 1 < kx) {
154#ifdef FIXED_POINT
155 QMF_RE(X[l + offset][2 * n]) = out_real[n];
156 QMF_IM(X[l + offset][2 * n]) = out_imag[n];
157 QMF_RE(X[l + offset][2 * n + 1]) = -out_imag[31 - n];
158 QMF_IM(X[l + offset][2 * n + 1]) = -out_real[31 - n];
159#else
160 QMF_RE(X[l + offset][2 * n]) = 2. * out_real[n];
161 QMF_IM(X[l + offset][2 * n]) = 2. * out_imag[n];
162 QMF_RE(X[l + offset][2 * n + 1]) = -2. * out_imag[31 - n];
163 QMF_IM(X[l + offset][2 * n + 1]) = -2. * out_real[31 - n];
164#endif
165 } else {
166 if (2 * n < kx) {
167#ifdef FIXED_POINT
168 QMF_RE(X[l + offset][2 * n]) = out_real[n];
169 QMF_IM(X[l + offset][2 * n]) = out_imag[n];
170#else
171 QMF_RE(X[l + offset][2 * n]) = 2. * out_real[n];
172 QMF_IM(X[l + offset][2 * n]) = 2. * out_imag[n];
173#endif
174 } else {
175 QMF_RE(X[l + offset][2 * n]) = 0;
176 QMF_IM(X[l + offset][2 * n]) = 0;
177 }
178 QMF_RE(X[l + offset][2 * n + 1]) = 0;
179 QMF_IM(X[l + offset][2 * n + 1]) = 0;
180 }
181 }
182#endif
183 }
184}
185
186static const complex_t qmf32_pre_twiddle[] = {
187 { FRAC_CONST(0.999924701839145), FRAC_CONST(-0.012271538285720) },
188 { FRAC_CONST(0.999322384588350), FRAC_CONST(-0.036807222941359) },
189 { FRAC_CONST(0.998118112900149), FRAC_CONST(-0.061320736302209) },
190 { FRAC_CONST(0.996312612182778), FRAC_CONST(-0.085797312344440) },
191 { FRAC_CONST(0.993906970002356), FRAC_CONST(-0.110222207293883) },
192 { FRAC_CONST(0.990902635427780), FRAC_CONST(-0.134580708507126) },
193 { FRAC_CONST(0.987301418157858), FRAC_CONST(-0.158858143333861) },
194 { FRAC_CONST(0.983105487431216), FRAC_CONST(-0.183039887955141) },
195 { FRAC_CONST(0.978317370719628), FRAC_CONST(-0.207111376192219) },
196 { FRAC_CONST(0.972939952205560), FRAC_CONST(-0.231058108280671) },
197 { FRAC_CONST(0.966976471044852), FRAC_CONST(-0.254865659604515) },
198 { FRAC_CONST(0.960430519415566), FRAC_CONST(-0.278519689385053) },
199 { FRAC_CONST(0.953306040354194), FRAC_CONST(-0.302005949319228) },
200 { FRAC_CONST(0.945607325380521), FRAC_CONST(-0.325310292162263) },
201 { FRAC_CONST(0.937339011912575), FRAC_CONST(-0.348418680249435) },
202 { FRAC_CONST(0.928506080473216), FRAC_CONST(-0.371317193951838) },
203 { FRAC_CONST(0.919113851690058), FRAC_CONST(-0.393992040061048) },
204 { FRAC_CONST(0.909167983090522), FRAC_CONST(-0.416429560097637) },
205 { FRAC_CONST(0.898674465693954), FRAC_CONST(-0.438616238538528) },
206 { FRAC_CONST(0.887639620402854), FRAC_CONST(-0.460538710958240) },
207 { FRAC_CONST(0.876070094195407), FRAC_CONST(-0.482183772079123) },
208 { FRAC_CONST(0.863972856121587), FRAC_CONST(-0.503538383725718) },
209 { FRAC_CONST(0.851355193105265), FRAC_CONST(-0.524589682678469) },
210 { FRAC_CONST(0.838224705554838), FRAC_CONST(-0.545324988422046) },
211 { FRAC_CONST(0.824589302785025), FRAC_CONST(-0.565731810783613) },
212 { FRAC_CONST(0.810457198252595), FRAC_CONST(-0.585797857456439) },
213 { FRAC_CONST(0.795836904608884), FRAC_CONST(-0.605511041404326) },
214 { FRAC_CONST(0.780737228572094), FRAC_CONST(-0.624859488142386) },
215 { FRAC_CONST(0.765167265622459), FRAC_CONST(-0.643831542889791) },
216 { FRAC_CONST(0.749136394523459), FRAC_CONST(-0.662415777590172) },
217 { FRAC_CONST(0.732654271672413), FRAC_CONST(-0.680600997795453) },
218 { FRAC_CONST(0.715730825283819), FRAC_CONST(-0.698376249408973) }
219};
220
221qmfs_info *qmfs_init(uint8_t channels)
222{
223 qmfs_info *qmfs = (qmfs_info*)faad_malloc(sizeof(qmfs_info));
224
225 /* v is a double ringbuffer */
226 qmfs->v = (real_t*)faad_malloc(2 * channels * 20 * sizeof(real_t));
227 memset(qmfs->v, 0, 2 * channels * 20 * sizeof(real_t));
228
229 qmfs->v_index = 0;
230
231 qmfs->channels = channels;
232
233 return qmfs;
234}
235
236void qmfs_end(qmfs_info *qmfs)
237{
238 if (qmfs) {
239 if (qmfs->v) {
240 faad_free(qmfs->v);
241 }
242 faad_free(qmfs);
243 }
244}
245
246#ifdef SBR_LOW_POWER
247
248void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
249 real_t *output)
250{
251 ALIGN real_t x[16];
252 ALIGN real_t y[16];
253 int32_t n, k, out = 0;
254 uint8_t l;
255
256 /* qmf subsample l */
257 for (l = 0; l < sbr->numTimeSlotsRate; l++) {
258 /* shift buffers */
259 /* we are not shifting v, it is a double ringbuffer */
260 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
261
262 /* calculate 64 samples */
263 for (k = 0; k < 16; k++) {
264#ifdef FIXED_POINT
265 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k]));
266 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k]));
267#else
268 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][31 - k])) / 32.0;
269 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][31 - k])) / 32.0;
270#endif
271 }
272
273 /* even n samples */
274 DCT2_16_unscaled(x, x);
275 /* odd n samples */
276 DCT4_16(y, y);
277
278 for (n = 8; n < 24; n++) {
279 qmfs->v[qmfs->v_index + n * 2] = qmfs->v[qmfs->v_index + 640 + n * 2] = x[n - 8];
280 qmfs->v[qmfs->v_index + n * 2 + 1] = qmfs->v[qmfs->v_index + 640 + n * 2 + 1] = y[n - 8];
281 }
282 for (n = 0; n < 16; n++) {
283 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 640 + n] = qmfs->v[qmfs->v_index + 32 - n];
284 }
285 qmfs->v[qmfs->v_index + 48] = qmfs->v[qmfs->v_index + 640 + 48] = 0;
286 for (n = 1; n < 16; n++) {
287 qmfs->v[qmfs->v_index + 48 + n] = qmfs->v[qmfs->v_index + 640 + 48 + n] = -qmfs->v[qmfs->v_index + 48 - n];
288 }
289
290 /* calculate 32 output samples and window */
291 for (k = 0; k < 32; k++) {
292 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[2 * k]) +
293 MUL_F(qmfs->v[qmfs->v_index + 96 + k], qmf_c[64 + 2 * k]) +
294 MUL_F(qmfs->v[qmfs->v_index + 128 + k], qmf_c[128 + 2 * k]) +
295 MUL_F(qmfs->v[qmfs->v_index + 224 + k], qmf_c[192 + 2 * k]) +
296 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[256 + 2 * k]) +
297 MUL_F(qmfs->v[qmfs->v_index + 352 + k], qmf_c[320 + 2 * k]) +
298 MUL_F(qmfs->v[qmfs->v_index + 384 + k], qmf_c[384 + 2 * k]) +
299 MUL_F(qmfs->v[qmfs->v_index + 480 + k], qmf_c[448 + 2 * k]) +
300 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[512 + 2 * k]) +
301 MUL_F(qmfs->v[qmfs->v_index + 608 + k], qmf_c[576 + 2 * k]);
302 }
303
304 /* update the ringbuffer index */
305 qmfs->v_index -= 64;
306 if (qmfs->v_index < 0) {
307 qmfs->v_index = (640 - 64);
308 }
309 }
310}
311
312void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
313 real_t *output)
314{
315 ALIGN real_t x[64];
316 ALIGN real_t y[64];
317 int32_t n, k, out = 0;
318 uint8_t l;
319
320
321 /* qmf subsample l */
322 for (l = 0; l < sbr->numTimeSlotsRate; l++) {
323 /* shift buffers */
324 /* we are not shifting v, it is a double ringbuffer */
325 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
326
327 /* calculate 128 samples */
328 for (k = 0; k < 32; k++) {
329#ifdef FIXED_POINT
330 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k]));
331 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k]));
332#else
333 y[k] = (QMF_RE(X[l][k]) - QMF_RE(X[l][63 - k])) / 32.0;
334 x[k] = (QMF_RE(X[l][k]) + QMF_RE(X[l][63 - k])) / 32.0;
335#endif
336 }
337
338 /* even n samples */
339 DCT2_32_unscaled(x, x);
340 /* odd n samples */
341 DCT4_32(y, y);
342
343 for (n = 16; n < 48; n++) {
344 qmfs->v[qmfs->v_index + n * 2] = qmfs->v[qmfs->v_index + 1280 + n * 2] = x[n - 16];
345 qmfs->v[qmfs->v_index + n * 2 + 1] = qmfs->v[qmfs->v_index + 1280 + n * 2 + 1] = y[n - 16];
346 }
347 for (n = 0; n < 32; n++) {
348 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 1280 + n] = qmfs->v[qmfs->v_index + 64 - n];
349 }
350 qmfs->v[qmfs->v_index + 96] = qmfs->v[qmfs->v_index + 1280 + 96] = 0;
351 for (n = 1; n < 32; n++) {
352 qmfs->v[qmfs->v_index + 96 + n] = qmfs->v[qmfs->v_index + 1280 + 96 + n] = -qmfs->v[qmfs->v_index + 96 - n];
353 }
354
355 /* calculate 64 output samples and window */
356 for (k = 0; k < 64; k++) {
357 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[k]) +
358 MUL_F(qmfs->v[qmfs->v_index + 192 + k], qmf_c[64 + k]) +
359 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[128 + k]) +
360 MUL_F(qmfs->v[qmfs->v_index + 256 + 192 + k], qmf_c[128 + 64 + k]) +
361 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[256 + k]) +
362 MUL_F(qmfs->v[qmfs->v_index + 512 + 192 + k], qmf_c[256 + 64 + k]) +
363 MUL_F(qmfs->v[qmfs->v_index + 768 + k], qmf_c[384 + k]) +
364 MUL_F(qmfs->v[qmfs->v_index + 768 + 192 + k], qmf_c[384 + 64 + k]) +
365 MUL_F(qmfs->v[qmfs->v_index + 1024 + k], qmf_c[512 + k]) +
366 MUL_F(qmfs->v[qmfs->v_index + 1024 + 192 + k], qmf_c[512 + 64 + k]);
367 }
368
369 /* update the ringbuffer index */
370 qmfs->v_index -= 128;
371 if (qmfs->v_index < 0) {
372 qmfs->v_index = (1280 - 128);
373 }
374 }
375}
376#else
377void sbr_qmf_synthesis_32(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
378 real_t *output)
379{
380 ALIGN real_t x1[32], x2[32];
381#ifndef FIXED_POINT
382 real_t scale = 1.f / 64.f;
383#endif
384 int32_t n, k, out = 0;
385 uint8_t l;
386
387
388 /* qmf subsample l */
389 for (l = 0; l < sbr->numTimeSlotsRate; l++) {
390 /* shift buffer v */
391 /* buffer is not shifted, we are using a ringbuffer */
392 //memmove(qmfs->v + 64, qmfs->v, (640-64)*sizeof(real_t));
393
394 /* calculate 64 samples */
395 /* complex pre-twiddle */
396 for (k = 0; k < 32; k++) {
397 x1[k] = MUL_F(QMF_RE(X[l][k]), RE(qmf32_pre_twiddle[k])) - MUL_F(QMF_IM(X[l][k]), IM(qmf32_pre_twiddle[k]));
398 x2[k] = MUL_F(QMF_IM(X[l][k]), RE(qmf32_pre_twiddle[k])) + MUL_F(QMF_RE(X[l][k]), IM(qmf32_pre_twiddle[k]));
399
400#ifndef FIXED_POINT
401 x1[k] *= scale;
402 x2[k] *= scale;
403#else
404 x1[k] >>= 1;
405 x2[k] >>= 1;
406#endif
407 }
408
409 /* transform */
410 DCT4_32(x1, x1);
411 DST4_32(x2, x2);
412
413 for (n = 0; n < 32; n++) {
414 qmfs->v[qmfs->v_index + n] = qmfs->v[qmfs->v_index + 640 + n] = -x1[n] + x2[n];
415 qmfs->v[qmfs->v_index + 63 - n] = qmfs->v[qmfs->v_index + 640 + 63 - n] = x1[n] + x2[n];
416 }
417
418 /* calculate 32 output samples and window */
419 for (k = 0; k < 32; k++) {
420 output[out++] = MUL_F(qmfs->v[qmfs->v_index + k], qmf_c[2 * k]) +
421 MUL_F(qmfs->v[qmfs->v_index + 96 + k], qmf_c[64 + 2 * k]) +
422 MUL_F(qmfs->v[qmfs->v_index + 128 + k], qmf_c[128 + 2 * k]) +
423 MUL_F(qmfs->v[qmfs->v_index + 224 + k], qmf_c[192 + 2 * k]) +
424 MUL_F(qmfs->v[qmfs->v_index + 256 + k], qmf_c[256 + 2 * k]) +
425 MUL_F(qmfs->v[qmfs->v_index + 352 + k], qmf_c[320 + 2 * k]) +
426 MUL_F(qmfs->v[qmfs->v_index + 384 + k], qmf_c[384 + 2 * k]) +
427 MUL_F(qmfs->v[qmfs->v_index + 480 + k], qmf_c[448 + 2 * k]) +
428 MUL_F(qmfs->v[qmfs->v_index + 512 + k], qmf_c[512 + 2 * k]) +
429 MUL_F(qmfs->v[qmfs->v_index + 608 + k], qmf_c[576 + 2 * k]);
430 }
431
432 /* update ringbuffer index */
433 qmfs->v_index -= 64;
434 if (qmfs->v_index < 0) {
435 qmfs->v_index = (640 - 64);
436 }
437 }
438}
439
440void sbr_qmf_synthesis_64(sbr_info *sbr, qmfs_info *qmfs, qmf_t X[MAX_NTSRHFG][64],
441 real_t *output)
442{
443 // ALIGN real_t x1[64], x2[64];
444#ifndef SBR_LOW_POWER
445 ALIGN real_t in_real1[32], in_imag1[32], out_real1[32], out_imag1[32];
446 ALIGN real_t in_real2[32], in_imag2[32], out_real2[32], out_imag2[32];
447#endif
448 qmf_t * pX;
449 real_t * pring_buffer_1, * pring_buffer_3;
450 // real_t * ptemp_1, * ptemp_2;
451#ifdef PREFER_POINTERS
452 // These pointers are used if target platform has autoinc address generators
453 real_t * pring_buffer_2, * pring_buffer_4;
454 real_t * pring_buffer_5, * pring_buffer_6;
455 real_t * pring_buffer_7, * pring_buffer_8;
456 real_t * pring_buffer_9, * pring_buffer_10;
457 const real_t * pqmf_c_1, * pqmf_c_2, * pqmf_c_3, * pqmf_c_4;
458 const real_t * pqmf_c_5, * pqmf_c_6, * pqmf_c_7, * pqmf_c_8;
459 const real_t * pqmf_c_9, * pqmf_c_10;
460#endif // #ifdef PREFER_POINTERS
461#ifndef FIXED_POINT
462 real_t scale = 1.f / 64.f;
463#endif
464 int32_t n, k, out = 0;
465 uint8_t l;
466
467
468 /* qmf subsample l */
469 for (l = 0; l < sbr->numTimeSlotsRate; l++) {
470 /* shift buffer v */
471 /* buffer is not shifted, we use double ringbuffer */
472 //memmove(qmfs->v + 128, qmfs->v, (1280-128)*sizeof(real_t));
473
474 /* calculate 128 samples */
475#ifndef FIXED_POINT
476
477 pX = X[l];
478
479 in_imag1[31] = scale * QMF_RE(pX[1]);
480 in_real1[0] = scale * QMF_RE(pX[0]);
481 in_imag2[31] = scale * QMF_IM(pX[63 - 1]);
482 in_real2[0] = scale * QMF_IM(pX[63 - 0]);
483 for (k = 1; k < 31; k++) {
484 in_imag1[31 - k] = scale * QMF_RE(pX[2 * k + 1]);
485 in_real1[ k] = scale * QMF_RE(pX[2 * k ]);
486 in_imag2[31 - k] = scale * QMF_IM(pX[63 - (2 * k + 1)]);
487 in_real2[ k] = scale * QMF_IM(pX[63 - (2 * k)]);
488 }
489 in_imag1[0] = scale * QMF_RE(pX[63]);
490 in_real1[31] = scale * QMF_RE(pX[62]);
491 in_imag2[0] = scale * QMF_IM(pX[63 - 63]);
492 in_real2[31] = scale * QMF_IM(pX[63 - 62]);
493
494#else
495
496 pX = X[l];
497
498 in_imag1[31] = QMF_RE(pX[1]) >> 1;
499 in_real1[0] = QMF_RE(pX[0]) >> 1;
500 in_imag2[31] = QMF_IM(pX[62]) >> 1;
501 in_real2[0] = QMF_IM(pX[63]) >> 1;
502 for (k = 1; k < 31; k++) {
503 in_imag1[31 - k] = QMF_RE(pX[2 * k + 1]) >> 1;
504 in_real1[ k] = QMF_RE(pX[2 * k ]) >> 1;
505 in_imag2[31 - k] = QMF_IM(pX[63 - (2 * k + 1)]) >> 1;
506 in_real2[ k] = QMF_IM(pX[63 - (2 * k)]) >> 1;
507 }
508 in_imag1[0] = QMF_RE(pX[63]) >> 1;
509 in_real1[31] = QMF_RE(pX[62]) >> 1;
510 in_imag2[0] = QMF_IM(pX[0]) >> 1;
511 in_real2[31] = QMF_IM(pX[1]) >> 1;
512
513#endif
514
515
516 // dct4_kernel is DCT_IV without reordering which is done before and after FFT
517 dct4_kernel(in_real1, in_imag1, out_real1, out_imag1);
518 dct4_kernel(in_real2, in_imag2, out_real2, out_imag2);
519
520
521 pring_buffer_1 = qmfs->v + qmfs->v_index;
522 pring_buffer_3 = pring_buffer_1 + 1280;
523#ifdef PREFER_POINTERS
524 pring_buffer_2 = pring_buffer_1 + 127;
525 pring_buffer_4 = pring_buffer_1 + (1280 + 127);
526#endif // #ifdef PREFER_POINTERS
527 // ptemp_1 = x1;
528 // ptemp_2 = x2;
529#ifdef PREFER_POINTERS
530 for (n = 0; n < 32; n ++) {
531 //real_t x1 = *ptemp_1++;
532 //real_t x2 = *ptemp_2++;
533 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
534 *pring_buffer_1++ = *pring_buffer_3++ = out_real2[n] - out_real1[n];
535 *pring_buffer_2-- = *pring_buffer_4-- = out_real2[n] + out_real1[n];
536 //x1 = *ptemp_1++;
537 //x2 = *ptemp_2++;
538 *pring_buffer_1++ = *pring_buffer_3++ = out_imag2[31 - n] + out_imag1[31 - n];
539 *pring_buffer_2-- = *pring_buffer_4-- = out_imag2[31 - n] - out_imag1[31 - n];
540 }
541#else // #ifdef PREFER_POINTERS
542
543 for (n = 0; n < 32; n++) {
544 // pring_buffer_3 and pring_buffer_4 are needed only for double ring buffer
545 pring_buffer_1[2 * n] = pring_buffer_3[2 * n] = out_real2[n] - out_real1[n];
546 pring_buffer_1[127 - 2 * n] = pring_buffer_3[127 - 2 * n] = out_real2[n] + out_real1[n];
547 pring_buffer_1[2 * n + 1] = pring_buffer_3[2 * n + 1] = out_imag2[31 - n] + out_imag1[31 - n];
548 pring_buffer_1[127 - (2 * n + 1)] = pring_buffer_3[127 - (2 * n + 1)] = out_imag2[31 - n] - out_imag1[31 - n];
549 }
550
551#endif // #ifdef PREFER_POINTERS
552
553 pring_buffer_1 = qmfs->v + qmfs->v_index;
554#ifdef PREFER_POINTERS
555 pring_buffer_2 = pring_buffer_1 + 192;
556 pring_buffer_3 = pring_buffer_1 + 256;
557 pring_buffer_4 = pring_buffer_1 + (256 + 192);
558 pring_buffer_5 = pring_buffer_1 + 512;
559 pring_buffer_6 = pring_buffer_1 + (512 + 192);
560 pring_buffer_7 = pring_buffer_1 + 768;
561 pring_buffer_8 = pring_buffer_1 + (768 + 192);
562 pring_buffer_9 = pring_buffer_1 + 1024;
563 pring_buffer_10 = pring_buffer_1 + (1024 + 192);
564 pqmf_c_1 = qmf_c;
565 pqmf_c_2 = qmf_c + 64;
566 pqmf_c_3 = qmf_c + 128;
567 pqmf_c_4 = qmf_c + 192;
568 pqmf_c_5 = qmf_c + 256;
569 pqmf_c_6 = qmf_c + 320;
570 pqmf_c_7 = qmf_c + 384;
571 pqmf_c_8 = qmf_c + 448;
572 pqmf_c_9 = qmf_c + 512;
573 pqmf_c_10 = qmf_c + 576;
574#endif // #ifdef PREFER_POINTERS
575
576 /* calculate 64 output samples and window */
577 for (k = 0; k < 64; k++) {
578#ifdef PREFER_POINTERS
579 output[out++] =
580 MUL_F(*pring_buffer_1++, *pqmf_c_1++) +
581 MUL_F(*pring_buffer_2++, *pqmf_c_2++) +
582 MUL_F(*pring_buffer_3++, *pqmf_c_3++) +
583 MUL_F(*pring_buffer_4++, *pqmf_c_4++) +
584 MUL_F(*pring_buffer_5++, *pqmf_c_5++) +
585 MUL_F(*pring_buffer_6++, *pqmf_c_6++) +
586 MUL_F(*pring_buffer_7++, *pqmf_c_7++) +
587 MUL_F(*pring_buffer_8++, *pqmf_c_8++) +
588 MUL_F(*pring_buffer_9++, *pqmf_c_9++) +
589 MUL_F(*pring_buffer_10++, *pqmf_c_10++);
590#else // #ifdef PREFER_POINTERS
591 output[out++] =
592 MUL_F(pring_buffer_1[k + 0], qmf_c[k + 0]) +
593 MUL_F(pring_buffer_1[k + 192], qmf_c[k + 64]) +
594 MUL_F(pring_buffer_1[k + 256], qmf_c[k + 128]) +
595 MUL_F(pring_buffer_1[k + (256 + 192)], qmf_c[k + 192]) +
596 MUL_F(pring_buffer_1[k + 512], qmf_c[k + 256]) +
597 MUL_F(pring_buffer_1[k + (512 + 192)], qmf_c[k + 320]) +
598 MUL_F(pring_buffer_1[k + 768], qmf_c[k + 384]) +
599 MUL_F(pring_buffer_1[k + (768 + 192)], qmf_c[k + 448]) +
600 MUL_F(pring_buffer_1[k + 1024], qmf_c[k + 512]) +
601 MUL_F(pring_buffer_1[k + (1024 + 192)], qmf_c[k + 576]);
602#endif // #ifdef PREFER_POINTERS
603 }
604
605 /* update ringbuffer index */
606 qmfs->v_index -= 128;
607 if (qmfs->v_index < 0) {
608 qmfs->v_index = (1280 - 128);
609 }
610 }
611}
612#endif
613
614#endif
615