Home | History | Annotate | Line # | Download | only in libc_vfp
vfpdf.S revision 1.1
      1 /*-
      2  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <arm/asm.h>
     31 
     32 RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
     33 
     34 /*
     35  * This file provides softfloat compatible routines which use VFP instructions
     36  * to do the actual work.  This should give near hard-float performance while
     37  * being compatible with soft-float code.
     38  *
     39  * This file implements the double precision floating point routines.
     40  */
     41 
     42 #ifdef	__ARMEL__
     43 #define	vmov_arg0	vmov	d0, r0, r1
     44 #define	vmov_arg1	vmov	d1, r2, r3
     45 #define	vmov_ret	vmov	r0, r1, d0
     46 #else
     47 #define	vmov_arg0	vmov	d0, r1, r0
     48 #define	vmov_arg1	vmov	d1, r3, r2
     49 #define	vmov_ret	vmov	r1, r0, d0
     50 #endif
     51 #define	vmov_args	vmov_arg0; vmov_arg1
     52 
     53 ENTRY(__adddf3)
     54 	vmov_args
     55 	vadd.f64	d0, d0, d1
     56 	vmov_ret
     57 	RET
     58 END(__adddf3)
     59 
     60 ENTRY(__subdf3)
     61 	vmov_args
     62 	vsub.f64	d0, d0, d1
     63 	vmov_ret
     64 	RET
     65 END(__subdf3)
     66 
     67 ENTRY(__muldf3)
     68 	vmov_args
     69 	vmul.f64	d0, d0, d1
     70 	vmov_ret
     71 	RET
     72 END(__muldf3)
     73 
     74 ENTRY(__divdf3)
     75 	vmov_args
     76 	vdiv.f64	d0, d0, d1
     77 	vmov_ret
     78 	RET
     79 END(__divdf3)
     80 
     81 ENTRY(__negdf2)
     82 	vmov_arg0
     83 	vneg.f64	d0, d0
     84 	vmov_ret
     85 	RET
     86 END(__negdf2)
     87 
     88 ENTRY(__extendsfdf2)
     89 	vmov		s0, r0
     90 	vcvt.f64.f32	d0, s0
     91 	vmov_ret
     92 	RET
     93 END(__extendsfdf2)
     94 
     95 ENTRY(__fixdfsi)
     96 	vmov_arg0
     97 	vcvt.s32.f64	s0, d0
     98 	vmov		r0, s0
     99 	RET
    100 END(__fixdfsi)
    101 
    102 ENTRY(__fixunsdfsi)
    103 	vmov_arg0
    104 	vcvt.u32.f64	s0, d0
    105 	vmov		r0, s0
    106 	RET
    107 END(__fixunsdfsi)
    108 
    109 ENTRY(__floatsidf)
    110 	vmov		s0, r0
    111 	vcvt.f64.s32	d0, s0
    112 	vmov_ret
    113 	RET
    114 END(__floatsidf)
    115 
    116 ENTRY(__floatunsidf)
    117 	vmov		s0, r0
    118 	vcvt.f64.u32	d0, s0
    119 	vmov_ret
    120 	RET
    121 END(__floatunsidf)
    122 
    123 /* N set if compare <= result */
    124 /* Z set if compare = result */
    125 /* C set if compare (=,>=,UNORD) result */
    126 /* V set if compare UNORD result */
    127 
    128 STRONG_ALIAS(__eqdf2, __nedf2)
    129 ENTRY(__nedf2)
    130 	vmov_args
    131 	vcmp.f64	d0, d1
    132 	vmrs		APSR_nzcv, fpscr
    133 	moveq		r0, #0		/* !(a == b) */
    134 	movne		r0, #1		/* !(a == b) */
    135 	RET
    136 END(__nedf2)
    137 
    138 STRONG_ALIAS(__gedf2, __ltdf2)
    139 ENTRY(__ltdf2)
    140 	vmov_args
    141 	vcmp.f64	d0, d1
    142 	vmrs		APSR_nzcv, fpscr
    143 	mvnmi		r0, #0		/* -(a < b) */
    144 	movpl		r0, #0		/* -(a < b) */
    145 	RET
    146 END(__ltdf2)
    147 
    148 STRONG_ALIAS(__gtdf2, __ledf2)
    149 ENTRY(__ledf2)
    150 	vmov_args
    151 	vcmp.f64	d0, d1
    152 	vmrs		APSR_nzcv, fpscr
    153 	movgt		r0, #1		/* (a > b) */
    154 	movle		r0, #0		/* (a > b) */
    155 	RET
    156 END(__ledf2)
    157 
    158 ENTRY(__unorddf2)
    159 	vmov_args
    160 	vcmp.f64	d0, d1
    161 	vmrs		APSR_nzcv, fpscr
    162 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    163 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    164 	RET
    165 END(__unorddf2)
    166