summaryrefslogtreecommitdiff
path: root/audio_codec/libmad/fixed.h (plain)
blob: 5556e15e0fa482b59a62505f5f747229e81e7c84
1/*
2 * libmad - MPEG audio decoder library
3 * Copyright (C) 2000-2004 Underbit Technologies, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 * $Id: fixed.h,v 1.38 2004/02/17 02:02:03 rob Exp $
20 */
21
22# ifndef LIBMAD_FIXED_H
23# define LIBMAD_FIXED_H
24#include "config.h"
25# if SIZEOF_INT >= 4
26typedef signed int mad_fixed_t;
27
28typedef signed int mad_fixed64hi_t;
29typedef unsigned int mad_fixed64lo_t;
30# else
31typedef signed long mad_fixed_t;
32
33typedef signed long mad_fixed64hi_t;
34typedef unsigned long mad_fixed64lo_t;
35# endif
36
37# if defined(_MSC_VER)
38# define mad_fixed64_t signed __int64
39# elif 1 || defined(__GNUC__)
40# define mad_fixed64_t signed long long
41# endif
42
43# if defined(FPM_FLOAT)
44typedef double mad_sample_t;
45# else
46typedef mad_fixed_t mad_sample_t;
47# endif
48
49/*
50 * Fixed-point format: 0xABBBBBBB
51 * A == whole part (sign + 3 bits)
52 * B == fractional part (28 bits)
53 *
54 * Values are signed two's complement, so the effective range is:
55 * 0x80000000 to 0x7fffffff
56 * -8.0 to +7.9999999962747097015380859375
57 *
58 * The smallest representable value is:
59 * 0x00000001 == 0.0000000037252902984619140625 (i.e. about 3.725e-9)
60 *
61 * 28 bits of fractional accuracy represent about
62 * 8.6 digits of decimal accuracy.
63 *
64 * Fixed-point numbers can be added or subtracted as normal
65 * integers, but multiplication requires shifting the 64-bit result
66 * from 56 fractional bits back to 28 (and rounding.)
67 *
68 * Changing the definition of MAD_F_FRACBITS is only partially
69 * supported, and must be done with care.
70 */
71
72# define MAD_F_FRACBITS 28
73
74# if MAD_F_FRACBITS == 28
75# define MAD_F(x) ((mad_fixed_t) (x##L))
76# else
77# if MAD_F_FRACBITS < 28
78# warning "MAD_F_FRACBITS < 28"
79# define MAD_F(x) ((mad_fixed_t) \
80 (((x##L) + \
81 (1L << (28 - MAD_F_FRACBITS - 1))) >> \
82 (28 - MAD_F_FRACBITS)))
83# elif MAD_F_FRACBITS > 28
84# error "MAD_F_FRACBITS > 28 not currently supported"
85# define MAD_F(x) ((mad_fixed_t) \
86 ((x##L) << (MAD_F_FRACBITS - 28)))
87# endif
88# endif
89
90# define MAD_F_MIN ((mad_fixed_t) -0x80000000L)
91# define MAD_F_MAX ((mad_fixed_t) +0x7fffffffL)
92
93# define MAD_F_ONE MAD_F(0x10000000)
94
95# define mad_f_tofixed(x) ((mad_fixed_t) \
96 ((x) * (double) (1L << MAD_F_FRACBITS) + 0.5))
97# define mad_f_todouble(x) ((double) \
98 ((x) / (double) (1L << MAD_F_FRACBITS)))
99
100# define mad_f_intpart(x) ((x) >> MAD_F_FRACBITS)
101# define mad_f_fracpart(x) ((x) & ((1L << MAD_F_FRACBITS) - 1))
102/* (x should be positive) */
103
104# define mad_f_fromint(x) ((x) << MAD_F_FRACBITS)
105
106# define mad_f_add(x, y) ((x) + (y))
107# define mad_f_sub(x, y) ((x) - (y))
108
109# if defined(FPM_FLOAT)
110# error "FPM_FLOAT not yet supported"
111
112# undef MAD_F
113# define MAD_F(x) mad_f_todouble(x)
114
115# define mad_f_mul(x, y) ((x) * (y))
116# define mad_f_scale64
117
118# undef ASO_ZEROCHECK
119
120# elif defined(FPM_64BIT)
121
122/*
123 * This version should be the most accurate if 64-bit types are supported by
124 * the compiler, although it may not be the most efficient.
125 */
126# if defined(OPT_ACCURACY)
127# define mad_f_mul(x, y) \
128 ((mad_fixed_t) \
129 ((((mad_fixed64_t) (x) * (y)) + \
130 (1L << (MAD_F_SCALEBITS - 1))) >> MAD_F_SCALEBITS))
131# else
132# define mad_f_mul(x, y) \
133 ((mad_fixed_t) (((mad_fixed64_t) (x) * (y)) >> MAD_F_SCALEBITS))
134# endif
135
136# define MAD_F_SCALEBITS MAD_F_FRACBITS
137
138/* --- Intel --------------------------------------------------------------- */
139
140# elif defined(FPM_INTEL)
141
142# if defined(_MSC_VER)
143# pragma warning(push)
144# pragma warning(disable: 4035) /* no return value */
145static __forceinline
146mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y)
147{
148 enum {
149 fracbits = MAD_F_FRACBITS
150 };
151
152 __asm {
153 mov eax, x
154 imul y
155 shrd eax, edx, fracbits
156 }
157
158 /* implicit return of eax */
159}
160# pragma warning(pop)
161
162# define mad_f_mul mad_f_mul_inline
163# define mad_f_scale64
164# else
165/*
166 * This Intel version is fast and accurate; the disposition of the least
167 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
168 */
169# define MAD_F_MLX(hi, lo, x, y) \
170 asm ("imull %3" \
171 : "=a" (lo), "=d" (hi) \
172 : "%a" (x), "rm" (y) \
173 : "cc")
174
175# if defined(OPT_ACCURACY)
176/*
177 * This gives best accuracy but is not very fast.
178 */
179# define MAD_F_MLA(hi, lo, x, y) \
180 ({ mad_fixed64hi_t __hi; \
181 mad_fixed64lo_t __lo; \
182 MAD_F_MLX(__hi, __lo, (x), (y)); \
183 asm ("addl %2,%0\n\t" \
184 "adcl %3,%1" \
185 : "=rm" (lo), "=rm" (hi) \
186 : "r" (__lo), "r" (__hi), "0" (lo), "1" (hi) \
187 : "cc"); \
188 })
189# endif /* OPT_ACCURACY */
190
191# if defined(OPT_ACCURACY)
192/*
193 * Surprisingly, this is faster than SHRD followed by ADC.
194 */
195# define mad_f_scale64(hi, lo) \
196 ({ mad_fixed64hi_t __hi_; \
197 mad_fixed64lo_t __lo_; \
198 mad_fixed_t __result; \
199 asm ("addl %4,%2\n\t" \
200 "adcl %5,%3" \
201 : "=rm" (__lo_), "=rm" (__hi_) \
202 : "0" (lo), "1" (hi), \
203 "ir" (1L << (MAD_F_SCALEBITS - 1)), "ir" (0) \
204 : "cc"); \
205 asm ("shrdl %3,%2,%1" \
206 : "=rm" (__result) \
207 : "0" (__lo_), "r" (__hi_), "I" (MAD_F_SCALEBITS) \
208 : "cc"); \
209 __result; \
210 })
211# elif defined(OPT_INTEL)
212/*
213 * Alternate Intel scaling that may or may not perform better.
214 */
215# define mad_f_scale64(hi, lo) \
216 ({ mad_fixed_t __result; \
217 asm ("shrl %3,%1\n\t" \
218 "shll %4,%2\n\t" \
219 "orl %2,%1" \
220 : "=rm" (__result) \
221 : "0" (lo), "r" (hi), \
222 "I" (MAD_F_SCALEBITS), "I" (32 - MAD_F_SCALEBITS) \
223 : "cc"); \
224 __result; \
225 })
226# else
227# define mad_f_scale64(hi, lo) \
228 ({ mad_fixed_t __result; \
229 asm ("shrdl %3,%2,%1" \
230 : "=rm" (__result) \
231 : "0" (lo), "r" (hi), "I" (MAD_F_SCALEBITS) \
232 : "cc"); \
233 __result; \
234 })
235# endif /* OPT_ACCURACY */
236
237# define MAD_F_SCALEBITS MAD_F_FRACBITS
238# endif
239
240/* --- ARM ----------------------------------------------------------------- */
241
242# elif defined(FPM_ARM)
243
244/*
245 * This ARM V4 version is as accurate as FPM_64BIT but much faster. The
246 * least significant bit is properly rounded at no CPU cycle cost!
247 */
248# if 1
249/*
250 * This is faster than the default implementation via MAD_F_MLX() and
251 * mad_f_scale64().
252 */
253# define mad_f_mul(x, y) \
254 ({ mad_fixed64hi_t __hi; \
255 mad_fixed64lo_t __lo; \
256 mad_fixed_t __result; \
257 asm ("smull %0, %1, %3, %4\n\t" \
258 "movs %0, %0, lsr %5\n\t" \
259 "adc %2, %0, %1, lsl %6" \
260 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
261 : "%r" (x), "r" (y), \
262 "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS) \
263 : "cc"); \
264 __result; \
265 })
266# endif
267
268# define MAD_F_MLX(hi, lo, x, y) \
269 asm ("smull %0, %1, %2, %3" \
270 : "=&r" (lo), "=&r" (hi) \
271 : "%r" (x), "r" (y))
272
273# define MAD_F_MLA(hi, lo, x, y) \
274 asm ("smlal %0, %1, %2, %3" \
275 : "+r" (lo), "+r" (hi) \
276 : "%r" (x), "r" (y))
277
278# define MAD_F_MLN(hi, lo) \
279 asm ("rsbs %0, %2, #0\n\t" \
280 "rsc %1, %3, #0" \
281 : "=r" (lo), "=r" (hi) \
282 : "0" (lo), "1" (hi) \
283 : "cc")
284
285# define mad_f_scale64(hi, lo) \
286 ({ mad_fixed_t __result; \
287 asm ("movs %0, %1, lsr %3\n\t" \
288 "adc %0, %0, %2, lsl %4" \
289 : "=&r" (__result) \
290 : "r" (lo), "r" (hi), \
291 "M" (MAD_F_SCALEBITS), "M" (32 - MAD_F_SCALEBITS) \
292 : "cc"); \
293 __result; \
294 })
295
296# define MAD_F_SCALEBITS MAD_F_FRACBITS
297
298/* --- MIPS ---------------------------------------------------------------- */
299
300# elif defined(FPM_MIPS)
301
302/*
303 * This MIPS version is fast and accurate; the disposition of the least
304 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
305 */
306# define MAD_F_MLX(hi, lo, x, y) \
307 asm ("mult %2,%3" \
308 : "=l" (lo), "=h" (hi) \
309 : "%r" (x), "r" (y))
310
311# if defined(HAVE_MADD_ASM)
312# define MAD_F_MLA(hi, lo, x, y) \
313 asm ("madd %2,%3" \
314 : "+l" (lo), "+h" (hi) \
315 : "%r" (x), "r" (y))
316# elif defined(HAVE_MADD16_ASM)
317/*
318 * This loses significant accuracy due to the 16-bit integer limit in the
319 * multiply/accumulate instruction.
320 */
321# define MAD_F_ML0(hi, lo, x, y) \
322 asm ("mult %2,%3" \
323 : "=l" (lo), "=h" (hi) \
324 : "%r" ((x) >> 12), "r" ((y) >> 16))
325# define MAD_F_MLA(hi, lo, x, y) \
326 asm ("madd16 %2,%3" \
327 : "+l" (lo), "+h" (hi) \
328 : "%r" ((x) >> 12), "r" ((y) >> 16))
329# define MAD_F_MLZ(hi, lo) ((mad_fixed_t) (lo))
330# endif
331
332# if defined(OPT_SPEED)
333# define mad_f_scale64(hi, lo) \
334 ((mad_fixed_t) ((hi) << (32 - MAD_F_SCALEBITS)))
335# define MAD_F_SCALEBITS MAD_F_FRACBITS
336# endif
337
338/* --- SPARC --------------------------------------------------------------- */
339
340# elif defined(FPM_SPARC)
341
342/*
343 * This SPARC V8 version is fast and accurate; the disposition of the least
344 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
345 */
346# define MAD_F_MLX(hi, lo, x, y) \
347 asm ("smul %2, %3, %0\n\t" \
348 "rd %%y, %1" \
349 : "=r" (lo), "=r" (hi) \
350 : "%r" (x), "rI" (y))
351
352/* --- PowerPC ------------------------------------------------------------- */
353
354# elif defined(FPM_PPC)
355
356/*
357 * This PowerPC version is fast and accurate; the disposition of the least
358 * significant bit depends on OPT_ACCURACY via mad_f_scale64().
359 */
360# define MAD_F_MLX(hi, lo, x, y) \
361 do { \
362 asm ("mullw %0,%1,%2" \
363 : "=r" (lo) \
364 : "%r" (x), "r" (y)); \
365 asm ("mulhw %0,%1,%2" \
366 : "=r" (hi) \
367 : "%r" (x), "r" (y)); \
368 } \
369 while (0)
370
371# if defined(OPT_ACCURACY)
372/*
373 * This gives best accuracy but is not very fast.
374 */
375# define MAD_F_MLA(hi, lo, x, y) \
376 ({ mad_fixed64hi_t __hi; \
377 mad_fixed64lo_t __lo; \
378 MAD_F_MLX(__hi, __lo, (x), (y)); \
379 asm ("addc %0,%2,%3\n\t" \
380 "adde %1,%4,%5" \
381 : "=r" (lo), "=r" (hi) \
382 : "%r" (lo), "r" (__lo), \
383 "%r" (hi), "r" (__hi) \
384 : "xer"); \
385 })
386# endif
387
388# if defined(OPT_ACCURACY)
389/*
390 * This is slower than the truncating version below it.
391 */
392# define mad_f_scale64(hi, lo) \
393 ({ mad_fixed_t __result, __round; \
394 asm ("rotrwi %0,%1,%2" \
395 : "=r" (__result) \
396 : "r" (lo), "i" (MAD_F_SCALEBITS)); \
397 asm ("extrwi %0,%1,1,0" \
398 : "=r" (__round) \
399 : "r" (__result)); \
400 asm ("insrwi %0,%1,%2,0" \
401 : "+r" (__result) \
402 : "r" (hi), "i" (MAD_F_SCALEBITS)); \
403 asm ("add %0,%1,%2" \
404 : "=r" (__result) \
405 : "%r" (__result), "r" (__round)); \
406 __result; \
407 })
408# else
409# define mad_f_scale64(hi, lo) \
410 ({ mad_fixed_t __result; \
411 asm ("rotrwi %0,%1,%2" \
412 : "=r" (__result) \
413 : "r" (lo), "i" (MAD_F_SCALEBITS)); \
414 asm ("insrwi %0,%1,%2,0" \
415 : "+r" (__result) \
416 : "r" (hi), "i" (MAD_F_SCALEBITS)); \
417 __result; \
418 })
419# endif
420
421# define MAD_F_SCALEBITS MAD_F_FRACBITS
422
423/* --- Default ------------------------------------------------------------- */
424
425# elif defined(FPM_DEFAULT)
426
427/*
428 * This version is the most portable but it loses significant accuracy.
429 * Furthermore, accuracy is biased against the second argument, so care
430 * should be taken when ordering operands.
431 *
432 * The scale factors are constant as this is not used with SSO.
433 *
434 * Pre-rounding is required to stay within the limits of compliance.
435 */
436# if defined(OPT_SPEED)
437# define mad_f_mul(x, y) (((x) >> 12) * ((y) >> 16))
438# else
439# define mad_f_mul(x, y) ((((x) + (1L << 11)) >> 12) * \
440 (((y) + (1L << 15)) >> 16))
441# endif
442
443/* ------------------------------------------------------------------------- */
444
445# else
446# error "no FPM selected"
447# endif
448
449/* default implementations */
450
451# if !defined(mad_f_mul)
452# define mad_f_mul(x, y) \
453 ({ register mad_fixed64hi_t __hi; \
454 register mad_fixed64lo_t __lo; \
455 MAD_F_MLX(__hi, __lo, (x), (y)); \
456 mad_f_scale64(__hi, __lo); \
457 })
458# endif
459
460# if !defined(MAD_F_MLA)
461# define MAD_F_ML0(hi, lo, x, y) ((lo) = mad_f_mul((x), (y)))
462# define MAD_F_MLA(hi, lo, x, y) ((lo) += mad_f_mul((x), (y)))
463# define MAD_F_MLN(hi, lo) ((lo) = -(lo))
464# define MAD_F_MLZ(hi, lo) ((void) (hi), (mad_fixed_t) (lo))
465# endif
466
467# if !defined(MAD_F_ML0)
468# define MAD_F_ML0(hi, lo, x, y) MAD_F_MLX((hi), (lo), (x), (y))
469# endif
470
471# if !defined(MAD_F_MLN)
472# define MAD_F_MLN(hi, lo) ((hi) = ((lo) = -(lo)) ? ~(hi) : -(hi))
473# endif
474
475# if !defined(MAD_F_MLZ)
476# define MAD_F_MLZ(hi, lo) mad_f_scale64((hi), (lo))
477# endif
478
479# if !defined(mad_f_scale64)
480# if defined(OPT_ACCURACY)
481# define mad_f_scale64(hi, lo) \
482 ((((mad_fixed_t) \
483 (((hi) << (32 - (MAD_F_SCALEBITS - 1))) | \
484 ((lo) >> (MAD_F_SCALEBITS - 1)))) + 1) >> 1)
485# else
486# define mad_f_scale64(hi, lo) \
487 ((mad_fixed_t) \
488 (((hi) << (32 - MAD_F_SCALEBITS)) | \
489 ((lo) >> MAD_F_SCALEBITS)))
490# endif
491# define MAD_F_SCALEBITS MAD_F_FRACBITS
492# endif
493
494/* C routines */
495
496mad_fixed_t mad_f_abs(mad_fixed_t);
497mad_fixed_t mad_f_div(mad_fixed_t, mad_fixed_t);
498
499# endif
500