Home | History | Annotate | Line # | Download | only in libc_vfp
vfpdf.S revision 1.2.28.1
      1       1.1      matt /*-
      2       1.1      matt  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3       1.1      matt  * All rights reserved.
      4       1.1      matt  *
      5       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      6       1.1      matt  * by Matt Thomas of 3am Software Foundry.
      7       1.1      matt  *
      8       1.1      matt  * Redistribution and use in source and binary forms, with or without
      9       1.1      matt  * modification, are permitted provided that the following conditions
     10       1.1      matt  * are met:
     11       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     12       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     13       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     14       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     15       1.1      matt  *    documentation and/or other materials provided with the distribution.
     16       1.1      matt  *
     17       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     28       1.1      matt  */
     29       1.1      matt 
     30       1.1      matt #include <arm/asm.h>
     31       1.1      matt 
     32  1.2.28.1  pgoyette RCSID("$NetBSD: vfpdf.S,v 1.2.28.1 2018/07/28 04:37:23 pgoyette Exp $")
     33       1.1      matt 
     34       1.1      matt /*
     35       1.1      matt  * This file provides softfloat compatible routines which use VFP instructions
     36       1.1      matt  * to do the actual work.  This should give near hard-float performance while
     37       1.1      matt  * being compatible with soft-float code.
     38       1.1      matt  *
     39       1.1      matt  * This file implements the double precision floating point routines.
     40       1.1      matt  */
     41       1.1      matt 
     42  1.2.28.1  pgoyette .fpu vfp
     43  1.2.28.1  pgoyette 
     44       1.1      matt #ifdef	__ARMEL__
     45       1.1      matt #define	vmov_arg0	vmov	d0, r0, r1
     46       1.1      matt #define	vmov_arg1	vmov	d1, r2, r3
     47       1.1      matt #define	vmov_ret	vmov	r0, r1, d0
     48       1.1      matt #else
     49       1.1      matt #define	vmov_arg0	vmov	d0, r1, r0
     50       1.1      matt #define	vmov_arg1	vmov	d1, r3, r2
     51       1.1      matt #define	vmov_ret	vmov	r1, r0, d0
     52       1.1      matt #endif
     53       1.1      matt #define	vmov_args	vmov_arg0; vmov_arg1
     54       1.1      matt 
     55       1.2      matt #ifdef __ARM_EABI__
     56       1.2      matt #define	__adddf3	__aeabi_dadd
     57       1.2      matt #define	__divdf3	__aeabi_ddiv
     58       1.2      matt #define	__muldf3	__aeabi_dmul
     59       1.2      matt #define	__subdf3	__aeabi_dsub
     60       1.2      matt #define	__negdf2	__aeabi_dneg
     61       1.2      matt #define	__extendsfdf2	__aeabi_f2d
     62       1.2      matt #define	__fixdfsi	__aeabi_d2iz
     63       1.2      matt #define	__fixunsdfsi	__aeabi_d2uiz
     64       1.2      matt #define	__floatsidf	__aeabi_i2d
     65       1.2      matt #define	__floatunsidf	__aeabi_ui2d
     66       1.2      matt #endif
     67       1.2      matt 
     68       1.1      matt ENTRY(__adddf3)
     69       1.1      matt 	vmov_args
     70       1.1      matt 	vadd.f64	d0, d0, d1
     71       1.1      matt 	vmov_ret
     72       1.1      matt 	RET
     73       1.1      matt END(__adddf3)
     74       1.1      matt 
     75       1.1      matt ENTRY(__subdf3)
     76       1.1      matt 	vmov_args
     77       1.1      matt 	vsub.f64	d0, d0, d1
     78       1.1      matt 	vmov_ret
     79       1.1      matt 	RET
     80       1.1      matt END(__subdf3)
     81       1.1      matt 
     82       1.2      matt #ifdef __ARM_EABI__
     83       1.2      matt ENTRY(__aeabi_drsub)
     84       1.2      matt 	vmov_args
     85       1.2      matt 	vsub.f64	d0, d1, d0
     86       1.2      matt 	vmov_ret
     87       1.2      matt 	RET
     88       1.2      matt END(__aeabi_drsub)
     89       1.2      matt #endif
     90       1.2      matt 
     91       1.1      matt ENTRY(__muldf3)
     92       1.1      matt 	vmov_args
     93       1.1      matt 	vmul.f64	d0, d0, d1
     94       1.1      matt 	vmov_ret
     95       1.1      matt 	RET
     96       1.1      matt END(__muldf3)
     97       1.1      matt 
     98       1.1      matt ENTRY(__divdf3)
     99       1.1      matt 	vmov_args
    100       1.1      matt 	vdiv.f64	d0, d0, d1
    101       1.1      matt 	vmov_ret
    102       1.1      matt 	RET
    103       1.1      matt END(__divdf3)
    104       1.1      matt 
    105       1.1      matt ENTRY(__negdf2)
    106       1.1      matt 	vmov_arg0
    107       1.1      matt 	vneg.f64	d0, d0
    108       1.1      matt 	vmov_ret
    109       1.1      matt 	RET
    110       1.1      matt END(__negdf2)
    111       1.1      matt 
    112       1.1      matt ENTRY(__extendsfdf2)
    113       1.1      matt 	vmov		s0, r0
    114       1.1      matt 	vcvt.f64.f32	d0, s0
    115       1.1      matt 	vmov_ret
    116       1.1      matt 	RET
    117       1.1      matt END(__extendsfdf2)
    118       1.1      matt 
    119       1.1      matt ENTRY(__fixdfsi)
    120       1.1      matt 	vmov_arg0
    121       1.1      matt 	vcvt.s32.f64	s0, d0
    122       1.1      matt 	vmov		r0, s0
    123       1.1      matt 	RET
    124       1.1      matt END(__fixdfsi)
    125       1.1      matt 
    126       1.1      matt ENTRY(__fixunsdfsi)
    127       1.1      matt 	vmov_arg0
    128       1.1      matt 	vcvt.u32.f64	s0, d0
    129       1.1      matt 	vmov		r0, s0
    130       1.1      matt 	RET
    131       1.1      matt END(__fixunsdfsi)
    132       1.1      matt 
    133       1.1      matt ENTRY(__floatsidf)
    134       1.1      matt 	vmov		s0, r0
    135       1.1      matt 	vcvt.f64.s32	d0, s0
    136       1.1      matt 	vmov_ret
    137       1.1      matt 	RET
    138       1.1      matt END(__floatsidf)
    139       1.1      matt 
    140       1.1      matt ENTRY(__floatunsidf)
    141       1.1      matt 	vmov		s0, r0
    142       1.1      matt 	vcvt.f64.u32	d0, s0
    143       1.1      matt 	vmov_ret
    144       1.1      matt 	RET
    145       1.1      matt END(__floatunsidf)
    146       1.1      matt 
    147       1.2      matt /*
    148       1.2      matt  * Effect of a floating point comparision on the condition flags.
    149       1.2      matt  *      N Z C V
    150       1.2      matt  * EQ = 0 1 1 0
    151       1.2      matt  * LT = 1 0 0 0
    152       1.2      matt  * GT = 0 0 1 0
    153       1.2      matt  * UN = 0 0 1 1
    154       1.2      matt  */
    155       1.2      matt #ifdef __ARM_EABI__
    156       1.2      matt ENTRY(__aeabi_cdcmpeq)
    157       1.2      matt 	vmov_args
    158       1.2      matt 	vcmp.f64	d0, d1
    159       1.2      matt 	vmrs		APSR_nzcv, fpscr
    160       1.2      matt 	RET
    161       1.2      matt END(__aeabi_cdcmpeq)
    162       1.2      matt 
    163       1.2      matt ENTRY(__aeabi_cdcmple)
    164       1.2      matt 	vmov_args
    165       1.2      matt 	vcmpe.f64	d0, d1
    166       1.2      matt 	vmrs		APSR_nzcv, fpscr
    167       1.2      matt 	RET
    168       1.2      matt END(__aeabi_cdcmple)
    169       1.2      matt 
    170       1.2      matt ENTRY(__aeabi_cdrcmple)
    171       1.2      matt 	vmov_args
    172       1.2      matt 	vcmpe.f64	d1, d0
    173       1.2      matt 	vmrs		APSR_nzcv, fpscr
    174       1.2      matt 	RET
    175       1.2      matt END(__aeabi_cdrcmple)
    176       1.2      matt 
    177       1.2      matt ENTRY(__aeabi_dcmpeq)
    178       1.2      matt 	vmov_args
    179       1.2      matt 	vcmp.f64	d0, d1
    180       1.2      matt 	vmrs		APSR_nzcv, fpscr
    181       1.2      matt 	moveq		r0, #1		/* (a == b) */
    182       1.2      matt 	movne		r0, #0		/* (a != b) or unordered */
    183       1.2      matt 	RET
    184       1.2      matt END(__aeabi_dcmpeq)
    185       1.2      matt 
    186       1.2      matt ENTRY(__aeabi_dcmplt)
    187       1.2      matt 	vmov_args
    188       1.2      matt 	vcmp.f64	d0, d1
    189       1.2      matt 	vmrs		APSR_nzcv, fpscr
    190       1.2      matt 	movlt		r0, #1		/* (a < b) */
    191       1.2      matt 	movcs		r0, #0		/* (a >= b) or unordered */
    192       1.2      matt 	RET
    193       1.2      matt END(__aeabi_dcmplt)
    194       1.2      matt 
    195       1.2      matt ENTRY(__aeabi_dcmple)
    196       1.2      matt 	vmov_args
    197       1.2      matt 	vcmp.f64	d0, d1
    198       1.2      matt 	vmrs		APSR_nzcv, fpscr
    199       1.2      matt 	movls		r0, #1		/* (a <= b) */
    200       1.2      matt 	movhi		r0, #0		/* (a > b) or unordered */
    201       1.2      matt 	RET
    202       1.2      matt END(__aeabi_dcmple)
    203       1.2      matt 
    204       1.2      matt ENTRY(__aeabi_dcmpge)
    205       1.2      matt 	vmov_args
    206       1.2      matt 	vcmp.f64	d0, d1
    207       1.2      matt 	vmrs		APSR_nzcv, fpscr
    208       1.2      matt 	movge		r0, #1		/* (a >= b) */
    209       1.2      matt 	movlt		r0, #0		/* (a < b) or unordered */
    210       1.2      matt 	RET
    211       1.2      matt END(__aeabi_dcmpge)
    212       1.2      matt 
    213       1.2      matt ENTRY(__aeabi_dcmpgt)
    214       1.2      matt 	vmov_args
    215       1.2      matt 	vcmp.f64	d0, d1
    216       1.2      matt 	vmrs		APSR_nzcv, fpscr
    217       1.2      matt 	movgt		r0, #1		/* (a > b) */
    218       1.2      matt 	movle		r0, #0		/* (a <= b) or unordered */
    219       1.2      matt 	RET
    220       1.2      matt END(__aeabi_dcmpgt)
    221       1.2      matt 
    222       1.2      matt ENTRY(__aeabi_dcmpun)
    223       1.2      matt 	vmov_args
    224       1.2      matt 	vcmp.f64	d0, d1
    225       1.2      matt 	vmrs		APSR_nzcv, fpscr
    226       1.2      matt 	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
    227       1.2      matt 	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
    228       1.2      matt 	RET
    229       1.2      matt END(__aeabi_dcmpun)
    230       1.2      matt 
    231       1.2      matt #else
    232       1.1      matt /* N set if compare <= result */
    233       1.1      matt /* Z set if compare = result */
    234       1.1      matt /* C set if compare (=,>=,UNORD) result */
    235       1.1      matt /* V set if compare UNORD result */
    236       1.1      matt 
    237       1.1      matt STRONG_ALIAS(__eqdf2, __nedf2)
    238       1.1      matt ENTRY(__nedf2)
    239       1.1      matt 	vmov_args
    240       1.1      matt 	vcmp.f64	d0, d1
    241       1.1      matt 	vmrs		APSR_nzcv, fpscr
    242       1.1      matt 	moveq		r0, #0		/* !(a == b) */
    243       1.1      matt 	movne		r0, #1		/* !(a == b) */
    244       1.1      matt 	RET
    245       1.1      matt END(__nedf2)
    246       1.1      matt 
    247       1.1      matt STRONG_ALIAS(__gedf2, __ltdf2)
    248       1.1      matt ENTRY(__ltdf2)
    249       1.1      matt 	vmov_args
    250       1.1      matt 	vcmp.f64	d0, d1
    251       1.1      matt 	vmrs		APSR_nzcv, fpscr
    252       1.1      matt 	mvnmi		r0, #0		/* -(a < b) */
    253       1.1      matt 	movpl		r0, #0		/* -(a < b) */
    254       1.1      matt 	RET
    255       1.1      matt END(__ltdf2)
    256       1.1      matt 
    257       1.1      matt STRONG_ALIAS(__gtdf2, __ledf2)
    258       1.1      matt ENTRY(__ledf2)
    259       1.1      matt 	vmov_args
    260       1.1      matt 	vcmp.f64	d0, d1
    261       1.1      matt 	vmrs		APSR_nzcv, fpscr
    262       1.1      matt 	movgt		r0, #1		/* (a > b) */
    263       1.1      matt 	movle		r0, #0		/* (a > b) */
    264       1.1      matt 	RET
    265       1.1      matt END(__ledf2)
    266       1.1      matt 
    267       1.1      matt ENTRY(__unorddf2)
    268       1.1      matt 	vmov_args
    269       1.1      matt 	vcmp.f64	d0, d1
    270       1.1      matt 	vmrs		APSR_nzcv, fpscr
    271       1.1      matt 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    272       1.1      matt 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    273       1.1      matt 	RET
    274       1.1      matt END(__unorddf2)
    275       1.2      matt #endif /* !__ARM_EABI__ */
    276