Home | History | Annotate | Line # | Download | only in libc_vfp
      1 /*-
      2  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <arm/asm.h>
     31 
     32 RCSID("$NetBSD: vfpdf.S,v 1.5 2020/12/02 14:20:20 wiz Exp $")
     33 
     34 /*
     35  * This file provides softfloat compatible routines which use VFP instructions
     36  * to do the actual work.  This should give near hard-float performance while
     37  * being compatible with soft-float code.
     38  *
     39  * This file implements the double precision floating point routines.
     40  */
     41 
     42 .fpu vfp
     43 
     44 #ifdef	__ARMEL__
     45 #define	vmov_arg0	vmov	d0, r0, r1
     46 #define	vmov_arg1	vmov	d1, r2, r3
     47 #define	vmov_ret	vmov	r0, r1, d0
     48 #else
     49 #define	vmov_arg0	vmov	d0, r1, r0
     50 #define	vmov_arg1	vmov	d1, r3, r2
     51 #define	vmov_ret	vmov	r1, r0, d0
     52 #endif
     53 #define	vmov_args	vmov_arg0; vmov_arg1
     54 
     55 #ifdef __ARM_EABI__
     56 #define	__adddf3	__aeabi_dadd
     57 #define	__divdf3	__aeabi_ddiv
     58 #define	__muldf3	__aeabi_dmul
     59 #define	__subdf3	__aeabi_dsub
     60 #define	__negdf2	__aeabi_dneg
     61 #define	__extendsfdf2	__aeabi_f2d
     62 #define	__fixdfsi	__aeabi_d2iz
     63 #define	__fixunsdfsi	__aeabi_d2uiz
     64 #define	__floatsidf	__aeabi_i2d
     65 #define	__floatunsidf	__aeabi_ui2d
     66 #endif
     67 
     68 ENTRY(__adddf3)
     69 	vmov_args
     70 	vadd.f64	d0, d0, d1
     71 	vmov_ret
     72 	RET
     73 END(__adddf3)
     74 
     75 ENTRY(__subdf3)
     76 	vmov_args
     77 	vsub.f64	d0, d0, d1
     78 	vmov_ret
     79 	RET
     80 END(__subdf3)
     81 
     82 #ifdef __ARM_EABI__
     83 ENTRY(__aeabi_drsub)
     84 	vmov_args
     85 	vsub.f64	d0, d1, d0
     86 	vmov_ret
     87 	RET
     88 END(__aeabi_drsub)
     89 #endif
     90 
     91 ENTRY(__muldf3)
     92 	vmov_args
     93 	vmul.f64	d0, d0, d1
     94 	vmov_ret
     95 	RET
     96 END(__muldf3)
     97 
     98 ENTRY(__divdf3)
     99 	vmov_args
    100 	vdiv.f64	d0, d0, d1
    101 	vmov_ret
    102 	RET
    103 END(__divdf3)
    104 
    105 ENTRY(__negdf2)
    106 	vmov_arg0
    107 	vneg.f64	d0, d0
    108 	vmov_ret
    109 	RET
    110 END(__negdf2)
    111 
    112 ENTRY(__extendsfdf2)
    113 	vmov		s0, r0
    114 	vcvt.f64.f32	d0, s0
    115 	vmov_ret
    116 	RET
    117 END(__extendsfdf2)
    118 
    119 ENTRY(__fixdfsi)
    120 	vmov_arg0
    121 	vcvt.s32.f64	s0, d0
    122 	vmov		r0, s0
    123 	RET
    124 END(__fixdfsi)
    125 
    126 ENTRY(__fixunsdfsi)
    127 	vmov_arg0
    128 	vcvt.u32.f64	s0, d0
    129 	vmov		r0, s0
    130 	RET
    131 END(__fixunsdfsi)
    132 
    133 ENTRY(__floatsidf)
    134 	vmov		s0, r0
    135 	vcvt.f64.s32	d0, s0
    136 	vmov_ret
    137 	RET
    138 END(__floatsidf)
    139 
    140 ENTRY(__floatunsidf)
    141 	vmov		s0, r0
    142 	vcvt.f64.u32	d0, s0
    143 	vmov_ret
    144 	RET
    145 END(__floatunsidf)
    146 
    147 /*
    148  * Effect of a floating point comparison on the condition flags.
    149  *      N Z C V
    150  * EQ = 0 1 1 0
    151  * LT = 1 0 0 0
    152  * GT = 0 0 1 0
    153  * UN = 0 0 1 1
    154  */
    155 #ifdef __ARM_EABI__
    156 ENTRY(__aeabi_cdcmpeq)
    157 	vmov_args
    158 	vcmp.f64	d0, d1
    159 	vmrs		APSR_nzcv, fpscr
    160 	RET
    161 END(__aeabi_cdcmpeq)
    162 
    163 ENTRY(__aeabi_cdcmple)
    164 	vmov_args
    165 	vcmpe.f64	d0, d1
    166 	vmrs		APSR_nzcv, fpscr
    167 	RET
    168 END(__aeabi_cdcmple)
    169 
    170 ENTRY(__aeabi_cdrcmple)
    171 	vmov_args
    172 	vcmpe.f64	d1, d0
    173 	vmrs		APSR_nzcv, fpscr
    174 	RET
    175 END(__aeabi_cdrcmple)
    176 
    177 ENTRY(__aeabi_dcmpeq)
    178 	vmov_args
    179 	vcmp.f64	d0, d1
    180 	vmrs		APSR_nzcv, fpscr
    181 	moveq		r0, #1		/* (a == b) */
    182 	movne		r0, #0		/* (a != b) or unordered */
    183 	RET
    184 END(__aeabi_dcmpeq)
    185 
    186 ENTRY(__aeabi_dcmplt)
    187 	vmov_args
    188 	vcmp.f64	d0, d1
    189 	vmrs		APSR_nzcv, fpscr
    190 	movlt		r0, #1		/* (a < b) */
    191 	movcs		r0, #0		/* (a >= b) or unordered */
    192 	RET
    193 END(__aeabi_dcmplt)
    194 
    195 ENTRY(__aeabi_dcmple)
    196 	vmov_args
    197 	vcmp.f64	d0, d1
    198 	vmrs		APSR_nzcv, fpscr
    199 	movls		r0, #1		/* (a <= b) */
    200 	movhi		r0, #0		/* (a > b) or unordered */
    201 	RET
    202 END(__aeabi_dcmple)
    203 
    204 ENTRY(__aeabi_dcmpge)
    205 	vmov_args
    206 	vcmp.f64	d0, d1
    207 	vmrs		APSR_nzcv, fpscr
    208 	movge		r0, #1		/* (a >= b) */
    209 	movlt		r0, #0		/* (a < b) or unordered */
    210 	RET
    211 END(__aeabi_dcmpge)
    212 
    213 ENTRY(__aeabi_dcmpgt)
    214 	vmov_args
    215 	vcmp.f64	d0, d1
    216 	vmrs		APSR_nzcv, fpscr
    217 	movgt		r0, #1		/* (a > b) */
    218 	movle		r0, #0		/* (a <= b) or unordered */
    219 	RET
    220 END(__aeabi_dcmpgt)
    221 
    222 ENTRY(__aeabi_dcmpun)
    223 	vmov_args
    224 	vcmp.f64	d0, d1
    225 	vmrs		APSR_nzcv, fpscr
    226 	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
    227 	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
    228 	RET
    229 END(__aeabi_dcmpun)
    230 
    231 #else
    232 /* N set if compare <= result */
    233 /* Z set if compare = result */
    234 /* C set if compare (=,>=,UNORD) result */
    235 /* V set if compare UNORD result */
    236 
    237 STRONG_ALIAS(__eqdf2, __nedf2)
    238 ENTRY(__nedf2)
    239 	vmov_args
    240 	vcmp.f64	d0, d1
    241 	vmrs		APSR_nzcv, fpscr
    242 	moveq		r0, #0		/* !(a == b) */
    243 	movne		r0, #1		/* !(a == b) */
    244 	RET
    245 END(__nedf2)
    246 
    247 STRONG_ALIAS(__gedf2, __ltdf2)
    248 ENTRY(__ltdf2)
    249 	vmov_args
    250 	vcmp.f64	d0, d1
    251 	vmrs		APSR_nzcv, fpscr
    252 	mvnmi		r0, #0		/* -(a < b) */
    253 	movpl		r0, #0		/* -(a < b) */
    254 	RET
    255 END(__ltdf2)
    256 
    257 STRONG_ALIAS(__gtdf2, __ledf2)
    258 ENTRY(__ledf2)
    259 	vmov_args
    260 	vcmp.f64	d0, d1
    261 	vmrs		APSR_nzcv, fpscr
    262 	movgt		r0, #1		/* (a > b) */
    263 	movle		r0, #0		/* (a > b) */
    264 	RET
    265 END(__ledf2)
    266 
    267 ENTRY(__unorddf2)
    268 	vmov_args
    269 	vcmp.f64	d0, d1
    270 	vmrs		APSR_nzcv, fpscr
    271 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    272 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    273 	RET
    274 END(__unorddf2)
    275 #endif /* !__ARM_EABI__ */
    276