Home | History | Annotate | Line # | Download | only in softfloat
      1  1.11  martin /* $NetBSD: qp.c,v 1.11 2014/02/02 08:14:39 martin Exp $ */
      2   1.4  petrov 
      3   1.4  petrov /*-
      4   1.4  petrov  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
      5   1.4  petrov  * All rights reserved.
      6   1.4  petrov  *
      7   1.4  petrov  * Redistribution and use in source and binary forms, with or without
      8   1.4  petrov  * modification, are permitted provided that the following conditions
      9   1.4  petrov  * are met:
     10   1.4  petrov  * 1. Redistributions of source code must retain the above copyright
     11   1.4  petrov  *    notice, this list of conditions and the following disclaimer.
     12   1.4  petrov  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.4  petrov  *    notice, this list of conditions and the following disclaimer in the
     14   1.4  petrov  *    documentation and/or other materials provided with the distribution.
     15   1.4  petrov  *
     16   1.4  petrov  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17   1.4  petrov  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18   1.4  petrov  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19   1.4  petrov  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20   1.4  petrov  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21   1.4  petrov  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22   1.4  petrov  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23   1.4  petrov  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24   1.4  petrov  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25   1.4  petrov  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26   1.4  petrov  * POSSIBILITY OF SUCH DAMAGE.
     27   1.4  petrov  */
     28   1.1     jmc 
     29   1.1     jmc #include <sys/cdefs.h>
     30   1.1     jmc #include <memory.h>
     31   1.1     jmc 
     32   1.1     jmc #include "milieu.h"
     33   1.1     jmc #include "softfloat.h"
     34   1.1     jmc 
     35  1.11  martin int printf(const char *, ...);
     36   1.1     jmc 
     37   1.1     jmc void _Qp_add(float128 *c, float128 *a, float128 *b);
     38   1.4  petrov int  _Qp_cmp(float128 *a, float128 *b);
     39   1.4  petrov int  _Qp_cmpe(float128 *a, float128 *b);
     40   1.4  petrov void _Qp_div(float128 *c, float128 *a, float128 *b);
     41   1.4  petrov void _Qp_dtoq(float128 *c, double a);
     42   1.4  petrov int  _Qp_feq(float128 *a, float128 *b);
     43   1.4  petrov int  _Qp_fge(float128 *a, float128 *b);
     44   1.4  petrov int  _Qp_fgt(float128 *a, float128 *b);
     45   1.4  petrov int  _Qp_fle(float128 *a, float128 *b);
     46   1.4  petrov int  _Qp_flt(float128 *a, float128 *b);
     47   1.4  petrov int  _Qp_fne(float128 *a, float128 *b);
     48   1.4  petrov void _Qp_itoq(float128 *c, int a);
     49   1.4  petrov void _Qp_mul(float128 *c, float128 *a, float128 *b);
     50   1.4  petrov void _Qp_neg(float128 *c, float128 *a);
     51   1.4  petrov double _Qp_qtod(float128 *a);
     52   1.4  petrov int _Qp_qtoi(float128 *a);
     53   1.4  petrov float _Qp_qtos(float128 *a);
     54   1.4  petrov unsigned int _Qp_qtoui(float128 *a);
     55   1.4  petrov unsigned long _Qp_qtoux(float128 *a);
     56   1.4  petrov long _Qp_qtox(float128 *a);
     57   1.4  petrov void _Qp_sqrt(float128 *c, float128 *a);
     58   1.4  petrov void _Qp_stoq(float128 *c, float a);
     59   1.4  petrov void _Qp_sub(float128 *c, float128 *a, float128 *b);
     60   1.4  petrov void _Qp_uitoq(float128 *c, unsigned int a);
     61   1.4  petrov void _Qp_uxtoq(float128 *c, unsigned long a);
     62   1.4  petrov void _Qp_xtoq(float128 *c, long a);
     63   1.4  petrov 
     64   1.1     jmc 
     65   1.4  petrov void
     66   1.4  petrov _Qp_add(float128 *c, float128 *a, float128 *b)
     67   1.1     jmc {
     68   1.1     jmc 	 *c =  float128_add(*a, *b);
     69   1.1     jmc }
     70   1.1     jmc 
     71   1.1     jmc 
     72   1.4  petrov int
     73   1.4  petrov _Qp_cmp(float128 *a, float128 *b)
     74   1.1     jmc {
     75   1.1     jmc 
     76   1.1     jmc 	if (float128_eq(*a, *b))
     77   1.1     jmc 		return 0;
     78   1.1     jmc 
     79   1.1     jmc 	if (float128_le(*a, *b))
     80   1.1     jmc 		return 1;
     81   1.1     jmc 
     82   1.1     jmc 	return 2;
     83   1.1     jmc }
     84   1.1     jmc 
     85   1.1     jmc 
     86   1.1     jmc /*
     87   1.1     jmc  * XXX
     88   1.1     jmc  */
     89   1.4  petrov int
     90   1.4  petrov _Qp_cmpe(float128 *a, float128 *b)
     91   1.1     jmc {
     92   1.1     jmc 	return _Qp_cmp(a, b);
     93   1.1     jmc }
     94   1.1     jmc 
     95   1.1     jmc 
     96   1.4  petrov void
     97   1.4  petrov _Qp_div(float128 *c, float128 *a, float128 *b)
     98   1.1     jmc {
     99   1.1     jmc 	*c = float128_div(*a, *b);
    100   1.1     jmc }
    101   1.1     jmc 
    102   1.1     jmc 
    103   1.4  petrov void
    104   1.4  petrov _Qp_dtoq(float128 *c, double a)
    105   1.1     jmc {
    106   1.2     jmc 	float64 _b;
    107   1.4  petrov 
    108   1.2     jmc 	memcpy (&_b, &a, sizeof(float64));
    109   1.3     jmc 	*c = float64_to_float128(_b);
    110   1.1     jmc }
    111   1.1     jmc 
    112   1.1     jmc 
    113   1.4  petrov int
    114   1.4  petrov _Qp_feq(float128 *a, float128 *b)
    115   1.1     jmc {
    116   1.1     jmc 	return float128_eq(*a, *b);
    117   1.1     jmc }
    118   1.1     jmc 
    119   1.1     jmc 
    120   1.4  petrov int
    121   1.4  petrov _Qp_fge(float128 *a, float128 *b)
    122   1.1     jmc {
    123   1.1     jmc 	return float128_le(*b, *a);
    124   1.1     jmc }
    125   1.1     jmc 
    126   1.1     jmc 
    127   1.4  petrov int
    128   1.4  petrov _Qp_fgt(float128 *a, float128 *b)
    129   1.1     jmc {
    130   1.1     jmc 	return float128_lt(*b, *a);
    131   1.1     jmc }
    132   1.1     jmc 
    133   1.1     jmc 
    134   1.4  petrov int
    135   1.4  petrov _Qp_fle(float128 *a, float128 *b)
    136   1.1     jmc {
    137   1.1     jmc 	return float128_le(*a, *b);
    138   1.1     jmc }
    139   1.1     jmc 
    140   1.1     jmc 
    141   1.4  petrov int
    142   1.4  petrov _Qp_flt(float128 *a, float128 *b)
    143   1.1     jmc {
    144   1.1     jmc 	return float128_lt(*a, *b);
    145   1.1     jmc }
    146   1.1     jmc 
    147   1.1     jmc 
    148   1.4  petrov int
    149   1.4  petrov _Qp_fne(float128 *a, float128 *b)
    150   1.1     jmc {
    151   1.1     jmc 	return !float128_eq(*a, *b);
    152   1.1     jmc }
    153   1.1     jmc 
    154   1.1     jmc 
    155   1.4  petrov void
    156   1.4  petrov _Qp_itoq(float128 *c, int a)
    157   1.1     jmc {
    158   1.1     jmc 	*c = int32_to_float128(a);
    159   1.1     jmc }
    160   1.1     jmc 
    161   1.1     jmc 
    162   1.4  petrov void
    163   1.4  petrov _Qp_mul(float128 *c, float128 *a, float128 *b)
    164   1.1     jmc {
    165   1.1     jmc 	*c = float128_mul(*a, *b);
    166   1.1     jmc }
    167   1.1     jmc 
    168   1.1     jmc 
    169   1.1     jmc /*
    170  1.11  martin  * XXX need corresponding softfloat functions
    171   1.1     jmc  */
    172   1.8  martin static float128 __sf128_zero = {0x4034000000000000, 0x00000000};
    173  1.11  martin static float128 __sf128_one = {0x3fff000000000000, 0};
    174   1.1     jmc 
    175   1.4  petrov void
    176   1.4  petrov _Qp_neg(float128 *c, float128 *a)
    177   1.1     jmc {
    178   1.8  martin 	*c = float128_sub(__sf128_zero, *a);
    179   1.1     jmc }
    180   1.1     jmc 
    181   1.1     jmc 
    182   1.4  petrov double
    183   1.4  petrov _Qp_qtod(float128 *a)
    184   1.1     jmc {
    185   1.1     jmc 	float64 _c;
    186   1.1     jmc 	double c;
    187   1.1     jmc 
    188   1.1     jmc 	_c = float128_to_float64(*a);
    189   1.1     jmc 
    190   1.1     jmc 	memcpy(&c, &_c, sizeof(double));
    191   1.1     jmc 
    192   1.1     jmc 	return c;
    193   1.1     jmc }
    194   1.1     jmc 
    195   1.1     jmc 
    196   1.4  petrov int
    197   1.4  petrov _Qp_qtoi(float128 *a)
    198   1.1     jmc {
    199  1.10  martin 	return float128_to_int32_round_to_zero(*a);
    200   1.1     jmc }
    201   1.1     jmc 
    202   1.1     jmc 
    203   1.4  petrov float
    204   1.4  petrov  _Qp_qtos(float128 *a)
    205   1.1     jmc {
    206   1.1     jmc 	float c;
    207   1.1     jmc 	float32 _c;
    208   1.1     jmc 
    209   1.1     jmc 	_c = float128_to_float32(*a);
    210   1.1     jmc 
    211   1.1     jmc 	memcpy(&c, &_c, sizeof(_c));
    212   1.1     jmc 
    213   1.1     jmc 	return c;
    214   1.1     jmc }
    215   1.1     jmc 
    216   1.1     jmc 
    217   1.4  petrov unsigned int
    218   1.4  petrov _Qp_qtoui(float128 *a)
    219   1.1     jmc {
    220  1.10  martin 	return (unsigned int)float128_to_int64_round_to_zero(*a);
    221   1.1     jmc }
    222   1.1     jmc 
    223   1.1     jmc 
    224   1.4  petrov unsigned long
    225   1.4  petrov _Qp_qtoux(float128 *a)
    226   1.1     jmc {
    227   1.6  martin 	return (unsigned long)float128_to_uint64_round_to_zero(*a);
    228   1.1     jmc }
    229   1.1     jmc 
    230   1.1     jmc 
    231   1.4  petrov long
    232   1.4  petrov _Qp_qtox(float128 *a)
    233   1.1     jmc {
    234   1.6  martin 	return (long)float128_to_int64_round_to_zero(*a);
    235   1.1     jmc }
    236   1.1     jmc 
    237   1.1     jmc 
    238   1.4  petrov void
    239   1.4  petrov _Qp_sqrt(float128 *c, float128 *a)
    240   1.1     jmc {
    241   1.1     jmc 	*c = float128_sqrt(*a);
    242   1.1     jmc }
    243   1.1     jmc 
    244   1.1     jmc 
    245   1.4  petrov void
    246   1.4  petrov _Qp_stoq(float128 *c, float a)
    247   1.1     jmc {
    248   1.1     jmc 	float32 _a;
    249   1.1     jmc 
    250   1.1     jmc 	memcpy(&_a, &a, sizeof(a));
    251   1.1     jmc 
    252   1.1     jmc 	*c = float32_to_float128(_a);
    253   1.1     jmc }
    254   1.1     jmc 
    255   1.1     jmc 
    256   1.4  petrov void
    257   1.4  petrov _Qp_sub(float128 *c, float128 *a, float128 *b)
    258   1.1     jmc {
    259   1.1     jmc 	*c = float128_sub(*a, *b);
    260   1.1     jmc }
    261   1.1     jmc 
    262   1.1     jmc 
    263   1.4  petrov void
    264   1.4  petrov _Qp_uitoq(float128 *c, unsigned int a)
    265   1.1     jmc {
    266   1.4  petrov 	*c = int64_to_float128(a);
    267   1.1     jmc }
    268   1.1     jmc 
    269   1.1     jmc 
    270   1.4  petrov void
    271   1.4  petrov _Qp_uxtoq(float128 *c, unsigned long a)
    272   1.4  petrov {
    273   1.4  petrov 	if (a & 0x8000000000000000ULL) {
    274  1.11  martin 		/* a would not fit in a signed conversion */
    275  1.11  martin 		*c = int64_to_float128((long long)(a>>1));
    276   1.4  petrov 		*c = float128_add(*c, *c);
    277  1.11  martin 		if (a & 1)
    278  1.11  martin 			*c = float128_add(*c, __sf128_one);
    279  1.11  martin 	} else {
    280   1.9  martin 		*c = int64_to_float128((long long)a);
    281  1.11  martin 	}
    282   1.1     jmc }
    283   1.1     jmc 
    284   1.1     jmc 
    285   1.4  petrov void
    286   1.4  petrov _Qp_xtoq(float128 *c, long a)
    287   1.1     jmc {
    288   1.9  martin 	*c = int64_to_float128((long long)a);
    289   1.1     jmc }
    290