Home | History | Annotate | Line # | Download | only in libc_vfp
vfpdf.S revision 1.2
      1  1.1  matt /*-
      2  1.1  matt  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  1.1  matt  * All rights reserved.
      4  1.1  matt  *
      5  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      6  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      7  1.1  matt  *
      8  1.1  matt  * Redistribution and use in source and binary forms, with or without
      9  1.1  matt  * modification, are permitted provided that the following conditions
     10  1.1  matt  * are met:
     11  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     12  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     13  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  matt  *    documentation and/or other materials provided with the distribution.
     16  1.1  matt  *
     17  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     28  1.1  matt  */
     29  1.1  matt 
     30  1.1  matt #include <arm/asm.h>
     31  1.1  matt 
     32  1.2  matt RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
     33  1.1  matt 
     34  1.1  matt /*
     35  1.1  matt  * This file provides softfloat compatible routines which use VFP instructions
     36  1.1  matt  * to do the actual work.  This should give near hard-float performance while
     37  1.1  matt  * being compatible with soft-float code.
     38  1.1  matt  *
     39  1.1  matt  * This file implements the double precision floating point routines.
     40  1.1  matt  */
     41  1.1  matt 
     42  1.1  matt #ifdef	__ARMEL__
     43  1.1  matt #define	vmov_arg0	vmov	d0, r0, r1
     44  1.1  matt #define	vmov_arg1	vmov	d1, r2, r3
     45  1.1  matt #define	vmov_ret	vmov	r0, r1, d0
     46  1.1  matt #else
     47  1.1  matt #define	vmov_arg0	vmov	d0, r1, r0
     48  1.1  matt #define	vmov_arg1	vmov	d1, r3, r2
     49  1.1  matt #define	vmov_ret	vmov	r1, r0, d0
     50  1.1  matt #endif
     51  1.1  matt #define	vmov_args	vmov_arg0; vmov_arg1
     52  1.1  matt 
     53  1.2  matt #ifdef __ARM_EABI__
     54  1.2  matt #define	__adddf3	__aeabi_dadd
     55  1.2  matt #define	__divdf3	__aeabi_ddiv
     56  1.2  matt #define	__muldf3	__aeabi_dmul
     57  1.2  matt #define	__subdf3	__aeabi_dsub
     58  1.2  matt #define	__negdf2	__aeabi_dneg
     59  1.2  matt #define	__extendsfdf2	__aeabi_f2d
     60  1.2  matt #define	__fixdfsi	__aeabi_d2iz
     61  1.2  matt #define	__fixunsdfsi	__aeabi_d2uiz
     62  1.2  matt #define	__floatsidf	__aeabi_i2d
     63  1.2  matt #define	__floatunsidf	__aeabi_ui2d
     64  1.2  matt #endif
     65  1.2  matt 
     66  1.1  matt ENTRY(__adddf3)
     67  1.1  matt 	vmov_args
     68  1.1  matt 	vadd.f64	d0, d0, d1
     69  1.1  matt 	vmov_ret
     70  1.1  matt 	RET
     71  1.1  matt END(__adddf3)
     72  1.1  matt 
     73  1.1  matt ENTRY(__subdf3)
     74  1.1  matt 	vmov_args
     75  1.1  matt 	vsub.f64	d0, d0, d1
     76  1.1  matt 	vmov_ret
     77  1.1  matt 	RET
     78  1.1  matt END(__subdf3)
     79  1.1  matt 
     80  1.2  matt #ifdef __ARM_EABI__
     81  1.2  matt ENTRY(__aeabi_drsub)
     82  1.2  matt 	vmov_args
     83  1.2  matt 	vsub.f64	d0, d1, d0
     84  1.2  matt 	vmov_ret
     85  1.2  matt 	RET
     86  1.2  matt END(__aeabi_drsub)
     87  1.2  matt #endif
     88  1.2  matt 
     89  1.1  matt ENTRY(__muldf3)
     90  1.1  matt 	vmov_args
     91  1.1  matt 	vmul.f64	d0, d0, d1
     92  1.1  matt 	vmov_ret
     93  1.1  matt 	RET
     94  1.1  matt END(__muldf3)
     95  1.1  matt 
     96  1.1  matt ENTRY(__divdf3)
     97  1.1  matt 	vmov_args
     98  1.1  matt 	vdiv.f64	d0, d0, d1
     99  1.1  matt 	vmov_ret
    100  1.1  matt 	RET
    101  1.1  matt END(__divdf3)
    102  1.1  matt 
    103  1.1  matt ENTRY(__negdf2)
    104  1.1  matt 	vmov_arg0
    105  1.1  matt 	vneg.f64	d0, d0
    106  1.1  matt 	vmov_ret
    107  1.1  matt 	RET
    108  1.1  matt END(__negdf2)
    109  1.1  matt 
    110  1.1  matt ENTRY(__extendsfdf2)
    111  1.1  matt 	vmov		s0, r0
    112  1.1  matt 	vcvt.f64.f32	d0, s0
    113  1.1  matt 	vmov_ret
    114  1.1  matt 	RET
    115  1.1  matt END(__extendsfdf2)
    116  1.1  matt 
    117  1.1  matt ENTRY(__fixdfsi)
    118  1.1  matt 	vmov_arg0
    119  1.1  matt 	vcvt.s32.f64	s0, d0
    120  1.1  matt 	vmov		r0, s0
    121  1.1  matt 	RET
    122  1.1  matt END(__fixdfsi)
    123  1.1  matt 
    124  1.1  matt ENTRY(__fixunsdfsi)
    125  1.1  matt 	vmov_arg0
    126  1.1  matt 	vcvt.u32.f64	s0, d0
    127  1.1  matt 	vmov		r0, s0
    128  1.1  matt 	RET
    129  1.1  matt END(__fixunsdfsi)
    130  1.1  matt 
    131  1.1  matt ENTRY(__floatsidf)
    132  1.1  matt 	vmov		s0, r0
    133  1.1  matt 	vcvt.f64.s32	d0, s0
    134  1.1  matt 	vmov_ret
    135  1.1  matt 	RET
    136  1.1  matt END(__floatsidf)
    137  1.1  matt 
    138  1.1  matt ENTRY(__floatunsidf)
    139  1.1  matt 	vmov		s0, r0
    140  1.1  matt 	vcvt.f64.u32	d0, s0
    141  1.1  matt 	vmov_ret
    142  1.1  matt 	RET
    143  1.1  matt END(__floatunsidf)
    144  1.1  matt 
    145  1.2  matt /*
    146  1.2  matt  * Effect of a floating point comparision on the condition flags.
    147  1.2  matt  *      N Z C V
    148  1.2  matt  * EQ = 0 1 1 0
    149  1.2  matt  * LT = 1 0 0 0
    150  1.2  matt  * GT = 0 0 1 0
    151  1.2  matt  * UN = 0 0 1 1
    152  1.2  matt  */
    153  1.2  matt #ifdef __ARM_EABI__
    154  1.2  matt ENTRY(__aeabi_cdcmpeq)
    155  1.2  matt 	vmov_args
    156  1.2  matt 	vcmp.f64	d0, d1
    157  1.2  matt 	vmrs		APSR_nzcv, fpscr
    158  1.2  matt 	RET
    159  1.2  matt END(__aeabi_cdcmpeq)
    160  1.2  matt 
    161  1.2  matt ENTRY(__aeabi_cdcmple)
    162  1.2  matt 	vmov_args
    163  1.2  matt 	vcmpe.f64	d0, d1
    164  1.2  matt 	vmrs		APSR_nzcv, fpscr
    165  1.2  matt 	RET
    166  1.2  matt END(__aeabi_cdcmple)
    167  1.2  matt 
    168  1.2  matt ENTRY(__aeabi_cdrcmple)
    169  1.2  matt 	vmov_args
    170  1.2  matt 	vcmpe.f64	d1, d0
    171  1.2  matt 	vmrs		APSR_nzcv, fpscr
    172  1.2  matt 	RET
    173  1.2  matt END(__aeabi_cdrcmple)
    174  1.2  matt 
    175  1.2  matt ENTRY(__aeabi_dcmpeq)
    176  1.2  matt 	vmov_args
    177  1.2  matt 	vcmp.f64	d0, d1
    178  1.2  matt 	vmrs		APSR_nzcv, fpscr
    179  1.2  matt 	moveq		r0, #1		/* (a == b) */
    180  1.2  matt 	movne		r0, #0		/* (a != b) or unordered */
    181  1.2  matt 	RET
    182  1.2  matt END(__aeabi_dcmpeq)
    183  1.2  matt 
    184  1.2  matt ENTRY(__aeabi_dcmplt)
    185  1.2  matt 	vmov_args
    186  1.2  matt 	vcmp.f64	d0, d1
    187  1.2  matt 	vmrs		APSR_nzcv, fpscr
    188  1.2  matt 	movlt		r0, #1		/* (a < b) */
    189  1.2  matt 	movcs		r0, #0		/* (a >= b) or unordered */
    190  1.2  matt 	RET
    191  1.2  matt END(__aeabi_dcmplt)
    192  1.2  matt 
    193  1.2  matt ENTRY(__aeabi_dcmple)
    194  1.2  matt 	vmov_args
    195  1.2  matt 	vcmp.f64	d0, d1
    196  1.2  matt 	vmrs		APSR_nzcv, fpscr
    197  1.2  matt 	movls		r0, #1		/* (a <= b) */
    198  1.2  matt 	movhi		r0, #0		/* (a > b) or unordered */
    199  1.2  matt 	RET
    200  1.2  matt END(__aeabi_dcmple)
    201  1.2  matt 
    202  1.2  matt ENTRY(__aeabi_dcmpge)
    203  1.2  matt 	vmov_args
    204  1.2  matt 	vcmp.f64	d0, d1
    205  1.2  matt 	vmrs		APSR_nzcv, fpscr
    206  1.2  matt 	movge		r0, #1		/* (a >= b) */
    207  1.2  matt 	movlt		r0, #0		/* (a < b) or unordered */
    208  1.2  matt 	RET
    209  1.2  matt END(__aeabi_dcmpge)
    210  1.2  matt 
    211  1.2  matt ENTRY(__aeabi_dcmpgt)
    212  1.2  matt 	vmov_args
    213  1.2  matt 	vcmp.f64	d0, d1
    214  1.2  matt 	vmrs		APSR_nzcv, fpscr
    215  1.2  matt 	movgt		r0, #1		/* (a > b) */
    216  1.2  matt 	movle		r0, #0		/* (a <= b) or unordered */
    217  1.2  matt 	RET
    218  1.2  matt END(__aeabi_dcmpgt)
    219  1.2  matt 
    220  1.2  matt ENTRY(__aeabi_dcmpun)
    221  1.2  matt 	vmov_args
    222  1.2  matt 	vcmp.f64	d0, d1
    223  1.2  matt 	vmrs		APSR_nzcv, fpscr
    224  1.2  matt 	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
    225  1.2  matt 	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
    226  1.2  matt 	RET
    227  1.2  matt END(__aeabi_dcmpun)
    228  1.2  matt 
    229  1.2  matt #else
    230  1.1  matt /* N set if compare <= result */
    231  1.1  matt /* Z set if compare = result */
    232  1.1  matt /* C set if compare (=,>=,UNORD) result */
    233  1.1  matt /* V set if compare UNORD result */
    234  1.1  matt 
    235  1.1  matt STRONG_ALIAS(__eqdf2, __nedf2)
    236  1.1  matt ENTRY(__nedf2)
    237  1.1  matt 	vmov_args
    238  1.1  matt 	vcmp.f64	d0, d1
    239  1.1  matt 	vmrs		APSR_nzcv, fpscr
    240  1.1  matt 	moveq		r0, #0		/* !(a == b) */
    241  1.1  matt 	movne		r0, #1		/* !(a == b) */
    242  1.1  matt 	RET
    243  1.1  matt END(__nedf2)
    244  1.1  matt 
    245  1.1  matt STRONG_ALIAS(__gedf2, __ltdf2)
    246  1.1  matt ENTRY(__ltdf2)
    247  1.1  matt 	vmov_args
    248  1.1  matt 	vcmp.f64	d0, d1
    249  1.1  matt 	vmrs		APSR_nzcv, fpscr
    250  1.1  matt 	mvnmi		r0, #0		/* -(a < b) */
    251  1.1  matt 	movpl		r0, #0		/* -(a < b) */
    252  1.1  matt 	RET
    253  1.1  matt END(__ltdf2)
    254  1.1  matt 
    255  1.1  matt STRONG_ALIAS(__gtdf2, __ledf2)
    256  1.1  matt ENTRY(__ledf2)
    257  1.1  matt 	vmov_args
    258  1.1  matt 	vcmp.f64	d0, d1
    259  1.1  matt 	vmrs		APSR_nzcv, fpscr
    260  1.1  matt 	movgt		r0, #1		/* (a > b) */
    261  1.1  matt 	movle		r0, #0		/* (a > b) */
    262  1.1  matt 	RET
    263  1.1  matt END(__ledf2)
    264  1.1  matt 
    265  1.1  matt ENTRY(__unorddf2)
    266  1.1  matt 	vmov_args
    267  1.1  matt 	vcmp.f64	d0, d1
    268  1.1  matt 	vmrs		APSR_nzcv, fpscr
    269  1.1  matt 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    270  1.1  matt 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    271  1.1  matt 	RET
    272  1.1  matt END(__unorddf2)
    273  1.2  matt #endif /* !__ARM_EABI__ */
    274