Home | History | Annotate | Line # | Download | only in libc_vfp
      1  1.1   matt /*-
      2  1.1   matt  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  1.1   matt  * All rights reserved.
      4  1.1   matt  *
      5  1.1   matt  * This code is derived from software contributed to The NetBSD Foundation
      6  1.1   matt  * by Matt Thomas of 3am Software Foundry.
      7  1.1   matt  *
      8  1.1   matt  * Redistribution and use in source and binary forms, with or without
      9  1.1   matt  * modification, are permitted provided that the following conditions
     10  1.1   matt  * are met:
     11  1.1   matt  * 1. Redistributions of source code must retain the above copyright
     12  1.1   matt  *    notice, this list of conditions and the following disclaimer.
     13  1.1   matt  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1   matt  *    notice, this list of conditions and the following disclaimer in the
     15  1.1   matt  *    documentation and/or other materials provided with the distribution.
     16  1.1   matt  *
     17  1.1   matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  1.1   matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  1.1   matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  1.1   matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  1.1   matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1   matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1   matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1   matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1   matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1   matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1   matt  * POSSIBILITY OF SUCH DAMAGE.
     28  1.1   matt  */
     29  1.1   matt 
     30  1.1   matt #include <arm/asm.h>
     31  1.1   matt #include <arm/vfpreg.h>
     32  1.1   matt 
     33  1.5    wiz RCSID("$NetBSD: vfpsf.S,v 1.5 2020/12/02 14:20:20 wiz Exp $")
     34  1.1   matt 
     35  1.1   matt /*
     36  1.1   matt  * This file provides softfloat compatible routines which use VFP instructions
     37  1.1   matt  * to do the actual work.  This should give near hard-float performance while
     38  1.1   matt  * being compatible with soft-float code.
     39  1.1   matt  *
     40  1.1   matt  * This file implements the single precision floating point routines.
     41  1.1   matt  */
     42  1.1   matt 
     43  1.4  joerg .fpu vfp
     44  1.3  joerg 
     45  1.2   matt #ifdef __ARM_EABI__
     46  1.2   matt #define	__addsf3	__aeabi_fadd
     47  1.2   matt #define	__divsf3	__aeabi_fdiv
     48  1.2   matt #define	__mulsf3	__aeabi_fmul
     49  1.2   matt #define	__subsf3	__aeabi_fsub
     50  1.2   matt #define	__negsf2	__aeabi_fneg
     51  1.2   matt #define	__truncdfsf2	__aeabi_d2f
     52  1.2   matt #define	__fixsfsi	__aeabi_f2iz
     53  1.2   matt #define	__fixunssfsi	__aeabi_f2uiz
     54  1.2   matt #define	__floatsisf	__aeabi_i2f
     55  1.2   matt #define	__floatunsisf	__aeabi_ui2f
     56  1.2   matt #endif
     57  1.2   matt 
     58  1.1   matt ENTRY(__addsf3)
     59  1.1   matt 	vmov		s0, s1, r0, r1
     60  1.1   matt 	vadd.f32	s0, s0, s1
     61  1.1   matt 	vmov		r0, s0
     62  1.1   matt 	RET
     63  1.1   matt END(__addsf3)
     64  1.1   matt 
     65  1.1   matt ENTRY(__subsf3)
     66  1.1   matt 	vmov		s0, s1, r0, r1
     67  1.1   matt 	vsub.f32	s0, s0, s1
     68  1.1   matt 	vmov		r0, s0
     69  1.1   matt 	RET
     70  1.1   matt END(__subsf3)
     71  1.1   matt 
     72  1.2   matt #ifdef __ARM_EABI__
     73  1.2   matt ENTRY(__aeabi_frsub)
     74  1.2   matt 	vmov		s0, s1, r0, r1
     75  1.2   matt 	vsub.f32	s0, s1, s0
     76  1.2   matt 	vmov		r0, s0
     77  1.2   matt 	RET
     78  1.2   matt END(__aeabi_frsub)
     79  1.2   matt #endif
     80  1.2   matt 
     81  1.1   matt ENTRY(__mulsf3)
     82  1.1   matt 	vmov		s0, s1, r0, r1
     83  1.1   matt 	vmul.f32	s0, s0, s1
     84  1.1   matt 	vmov		r0, s0
     85  1.1   matt 	RET
     86  1.1   matt END(__mulsf3)
     87  1.1   matt 
     88  1.1   matt ENTRY(__divsf3)
     89  1.1   matt 	vmov		s0, s1, r0, r1
     90  1.1   matt 	vdiv.f32	s0, s0, s1
     91  1.1   matt 	vmov		r0, s0
     92  1.1   matt 	RET
     93  1.1   matt END(__divsf3)
     94  1.1   matt 
     95  1.1   matt ENTRY(__negsf2)
     96  1.1   matt 	vmov		s0, r0
     97  1.1   matt 	vneg.f32	s0, s0
     98  1.1   matt 	vmov		r0, s0
     99  1.1   matt 	RET
    100  1.1   matt END(__negsf2)
    101  1.1   matt 
    102  1.1   matt ENTRY(__truncdfsf2)
    103  1.1   matt #ifdef __ARMEL__
    104  1.1   matt 	vmov		d0, r0, r1
    105  1.1   matt #else
    106  1.1   matt 	vmov		d0, r1, r0
    107  1.1   matt #endif
    108  1.1   matt 	vcvt.f32.f64	s0, d0
    109  1.1   matt 	vmov		r0, s0
    110  1.1   matt 	RET
    111  1.1   matt END(__truncdfsf2)
    112  1.1   matt 
    113  1.1   matt ENTRY(__fixsfsi)
    114  1.1   matt 	vmov		s0, r0
    115  1.1   matt 	vcvt.s32.f32	s0, s0
    116  1.1   matt 	vmov		r0, s0
    117  1.1   matt 	RET
    118  1.1   matt END(__fixsfsi)
    119  1.1   matt 
    120  1.1   matt ENTRY(__fixunssfsi)
    121  1.1   matt 	vmov		s0, r0
    122  1.1   matt 	vcvt.u32.f32	s0, s0
    123  1.1   matt 	vmov		r0, s0
    124  1.1   matt 	RET
    125  1.1   matt END(__fixunssfsi)
    126  1.1   matt 
    127  1.1   matt ENTRY(__floatsisf)
    128  1.1   matt 	vmov		s0, r0
    129  1.1   matt 	vcvt.f32.s32	s0, s0
    130  1.1   matt 	vmov		r0, s0
    131  1.1   matt 	RET
    132  1.1   matt END(__floatsisf)
    133  1.1   matt 
    134  1.1   matt ENTRY(__floatunsisf)
    135  1.1   matt 	vmov		s0, r0
    136  1.1   matt 	vcvt.f32.u32	s0, s0
    137  1.1   matt 	vmov		r0, s0
    138  1.1   matt 	RET
    139  1.1   matt END(__floatunsisf)
    140  1.1   matt 
    141  1.2   matt /*
    142  1.5    wiz  * Effect of a floating point comparison on the condition flags.
    143  1.2   matt  *      N Z C V
    144  1.2   matt  * EQ = 0 1 1 0
    145  1.2   matt  * LT = 1 0 0 0
    146  1.2   matt  * GT = 0 0 1 0
    147  1.2   matt  * UN = 0 0 1 1
    148  1.2   matt  */
    149  1.2   matt #ifdef __ARM_EABI__
    150  1.2   matt ENTRY(__aeabi_cfcmpeq)
    151  1.2   matt 	vmov		s0, s1, r0, r1
    152  1.2   matt 	vcmp.f32	s0, s1
    153  1.2   matt 	vmrs		APSR_nzcv, fpscr
    154  1.2   matt 	RET
    155  1.2   matt END(__aeabi_cfcmpeq)
    156  1.2   matt 
    157  1.2   matt ENTRY(__aeabi_cfcmple)
    158  1.2   matt 	vmov		s0, s1, r0, r1
    159  1.2   matt 	vcmpe.f32	s0, s1
    160  1.2   matt 	vmrs		APSR_nzcv, fpscr
    161  1.2   matt 	RET
    162  1.2   matt END(__aeabi_cfcmple)
    163  1.2   matt 
    164  1.2   matt ENTRY(__aeabi_cfrcmple)
    165  1.2   matt 	vmov		s0, s1, r0, r1
    166  1.2   matt 	vcmpe.f32	s1, s0
    167  1.2   matt 	vmrs		APSR_nzcv, fpscr
    168  1.2   matt 	RET
    169  1.2   matt END(__aeabi_cfrcmple)
    170  1.2   matt 
    171  1.2   matt ENTRY(__aeabi_fcmpeq)
    172  1.2   matt 	vmov		s0, s1, r0, r1
    173  1.2   matt 	vcmp.f32	s0, s1
    174  1.2   matt 	vmrs		APSR_nzcv, fpscr
    175  1.2   matt 	moveq		r0, #1		/* (a == b) */
    176  1.2   matt 	movne		r0, #0		/* (a != b) or unordered */
    177  1.2   matt 	RET
    178  1.2   matt END(__aeabi_fcmpeq)
    179  1.2   matt 
    180  1.2   matt ENTRY(__aeabi_fcmplt)
    181  1.2   matt 	vmov		s0, s1, r0, r1
    182  1.2   matt 	vcmp.f32	s0, s1
    183  1.2   matt 	vmrs		APSR_nzcv, fpscr
    184  1.2   matt 	movlt		r0, #1		/* (a < b) */
    185  1.2   matt 	movcs		r0, #0		/* (a >= b) or unordered */
    186  1.2   matt 	RET
    187  1.2   matt END(__aeabi_fcmplt)
    188  1.2   matt 
    189  1.2   matt ENTRY(__aeabi_fcmple)
    190  1.2   matt 	vmov		s0, s1, r0, r1
    191  1.2   matt 	vcmp.f32	s0, s1
    192  1.2   matt 	vmrs		APSR_nzcv, fpscr
    193  1.2   matt 	movls		r0, #1		/* (a <= b) */
    194  1.2   matt 	movhi		r0, #0		/* (a > b) or unordered */
    195  1.2   matt 	RET
    196  1.2   matt END(__aeabi_fcmple)
    197  1.2   matt 
    198  1.2   matt ENTRY(__aeabi_fcmpge)
    199  1.2   matt 	vmov		s0, s1, r0, r1
    200  1.2   matt 	vcmp.f32	s0, s1
    201  1.2   matt 	vmrs		APSR_nzcv, fpscr
    202  1.2   matt 	movge		r0, #1		/* (a >= b) */
    203  1.2   matt 	movlt		r0, #0		/* (a < b) or unordered */
    204  1.2   matt 	RET
    205  1.2   matt END(__aeabi_fcmpge)
    206  1.2   matt 
    207  1.2   matt ENTRY(__aeabi_fcmpgt)
    208  1.2   matt 	vmov		s0, s1, r0, r1
    209  1.2   matt 	vcmp.f32	s0, s1
    210  1.2   matt 	vmrs		APSR_nzcv, fpscr
    211  1.2   matt 	movgt		r0, #1		/* (a > b) */
    212  1.2   matt 	movle		r0, #0		/* (a <= b) or unordered */
    213  1.2   matt 	RET
    214  1.2   matt END(__aeabi_fcmpgt)
    215  1.2   matt 
    216  1.2   matt ENTRY(__aeabi_fcmpun)
    217  1.2   matt 	vmov		s0, s1, r0, r1
    218  1.2   matt 	vcmp.f32	s0, s1
    219  1.2   matt 	vmrs		APSR_nzcv, fpscr
    220  1.2   matt 	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
    221  1.2   matt 	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
    222  1.2   matt 	RET
    223  1.2   matt END(__aeabi_fcmpun)
    224  1.2   matt 
    225  1.2   matt #else
    226  1.1   matt /* N set if compare <= result */
    227  1.1   matt /* Z set if compare = result */
    228  1.1   matt /* C set if compare (=,>=,UNORD) result */
    229  1.1   matt /* V set if compare UNORD result */
    230  1.1   matt 
    231  1.1   matt STRONG_ALIAS(__eqsf2, __nesf2)
    232  1.1   matt ENTRY(__nesf2)
    233  1.1   matt 	vmov		s0, s1, r0, r1
    234  1.1   matt 	vcmp.f32	s0, s1
    235  1.1   matt 	vmrs		APSR_nzcv, fpscr
    236  1.1   matt 	moveq		r0, #0		/* !(a == b) */
    237  1.1   matt 	movne		r0, #1		/* !(a == b) */
    238  1.1   matt 	RET
    239  1.1   matt END(__nesf2)
    240  1.1   matt 
    241  1.1   matt STRONG_ALIAS(__gesf2, __ltsf2)
    242  1.1   matt ENTRY(__ltsf2)
    243  1.1   matt 	vmov		s0, s1, r0, r1
    244  1.1   matt 	vcmp.f32	s0, s1
    245  1.1   matt 	vmrs		APSR_nzcv, fpscr
    246  1.1   matt 	mvnmi		r0, #0		/* -(a < b) */
    247  1.1   matt 	movpl		r0, #0		/* -(a < b) */
    248  1.1   matt 	RET
    249  1.1   matt END(__ltsf2)
    250  1.1   matt 
    251  1.1   matt STRONG_ALIAS(__gtsf2, __lesf2)
    252  1.1   matt ENTRY(__lesf2)
    253  1.1   matt 	vmov		s0, s1, r0, r1
    254  1.1   matt 	vcmp.f32	s0, s1
    255  1.1   matt 	vmrs		APSR_nzcv, fpscr
    256  1.1   matt 	movgt		r0, #1		/* (a > b) */
    257  1.1   matt 	movle		r0, #0		/* (a > b) */
    258  1.1   matt 	RET
    259  1.1   matt END(__lesf2)
    260  1.1   matt 
    261  1.1   matt ENTRY(__unordsf2)
    262  1.1   matt 	vmov		s0, s1, r0, r1
    263  1.1   matt 	vcmp.f32	s0, s1
    264  1.1   matt 	vmrs		APSR_nzcv, fpscr
    265  1.1   matt 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    266  1.1   matt 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    267  1.1   matt 	RET
    268  1.1   matt END(__unordsf2)
    269  1.2   matt #endif /* !__ARM_EABI__ */
    270