Home | History | Annotate | Line # | Download | only in libc_vfp
vfpsf.S revision 1.1.4.2
      1 /*-
      2  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  * All rights reserved.
      4  *
      5  * This code is derived from software contributed to The NetBSD Foundation
      6  * by Matt Thomas of 3am Software Foundry.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  * POSSIBILITY OF SUCH DAMAGE.
     28  */
     29 
     30 #include <arm/asm.h>
     31 #include <arm/vfpreg.h>
     32 
     33 RCSID("$NetBSD: vfpsf.S,v 1.1.4.2 2013/02/25 00:27:55 tls Exp $")
     34 
     35 /*
     36  * This file provides softfloat compatible routines which use VFP instructions
     37  * to do the actual work.  This should give near hard-float performance while
     38  * being compatible with soft-float code.
     39  *
     40  * This file implements the single precision floating point routines.
     41  */
     42 
     43 ENTRY(__addsf3)
     44 	vmov		s0, s1, r0, r1
     45 	vadd.f32	s0, s0, s1
     46 	vmov		r0, s0
     47 	RET
     48 END(__addsf3)
     49 
     50 ENTRY(__subsf3)
     51 	vmov		s0, s1, r0, r1
     52 	vsub.f32	s0, s0, s1
     53 	vmov		r0, s0
     54 	RET
     55 END(__subsf3)
     56 
     57 ENTRY(__mulsf3)
     58 	vmov		s0, s1, r0, r1
     59 	vmul.f32	s0, s0, s1
     60 	vmov		r0, s0
     61 	RET
     62 END(__mulsf3)
     63 
     64 ENTRY(__divsf3)
     65 	vmov		s0, s1, r0, r1
     66 	vdiv.f32	s0, s0, s1
     67 	vmov		r0, s0
     68 	RET
     69 END(__divsf3)
     70 
     71 ENTRY(__negsf2)
     72 	vmov		s0, r0
     73 	vneg.f32	s0, s0
     74 	vmov		r0, s0
     75 	RET
     76 END(__negsf2)
     77 
     78 ENTRY(__truncdfsf2)
     79 #ifdef __ARMEL__
     80 	vmov		d0, r0, r1
     81 #else
     82 	vmov		d0, r1, r0
     83 #endif
     84 	vcvt.f32.f64	s0, d0
     85 	vmov		r0, s0
     86 	RET
     87 END(__truncdfsf2)
     88 
     89 ENTRY(__fixsfsi)
     90 	vmov		s0, r0
     91 	vcvt.s32.f32	s0, s0
     92 	vmov		r0, s0
     93 	RET
     94 END(__fixsfsi)
     95 
     96 ENTRY(__fixunssfsi)
     97 	vmov		s0, r0
     98 	vcvt.u32.f32	s0, s0
     99 	vmov		r0, s0
    100 	RET
    101 END(__fixunssfsi)
    102 
    103 ENTRY(__floatsisf)
    104 	vmov		s0, r0
    105 	vcvt.f32.s32	s0, s0
    106 	vmov		r0, s0
    107 	RET
    108 END(__floatsisf)
    109 
    110 ENTRY(__floatunsisf)
    111 	vmov		s0, r0
    112 	vcvt.f32.u32	s0, s0
    113 	vmov		r0, s0
    114 	RET
    115 END(__floatunsisf)
    116 
    117 /* N set if compare <= result */
    118 /* Z set if compare = result */
    119 /* C set if compare (=,>=,UNORD) result */
    120 /* V set if compare UNORD result */
    121 
    122 STRONG_ALIAS(__eqsf2, __nesf2)
    123 ENTRY(__nesf2)
    124 	vmov		s0, s1, r0, r1
    125 	vcmp.f32	s0, s1
    126 	vmrs		APSR_nzcv, fpscr
    127 	moveq		r0, #0		/* !(a == b) */
    128 	movne		r0, #1		/* !(a == b) */
    129 	RET
    130 END(__nesf2)
    131 
    132 STRONG_ALIAS(__gesf2, __ltsf2)
    133 ENTRY(__ltsf2)
    134 	vmov		s0, s1, r0, r1
    135 	vcmp.f32	s0, s1
    136 	vmrs		APSR_nzcv, fpscr
    137 	mvnmi		r0, #0		/* -(a < b) */
    138 	movpl		r0, #0		/* -(a < b) */
    139 	RET
    140 END(__ltsf2)
    141 
    142 STRONG_ALIAS(__gtsf2, __lesf2)
    143 ENTRY(__lesf2)
    144 	vmov		s0, s1, r0, r1
    145 	vcmp.f32	s0, s1
    146 	vmrs		APSR_nzcv, fpscr
    147 	movgt		r0, #1		/* (a > b) */
    148 	movle		r0, #0		/* (a > b) */
    149 	RET
    150 END(__lesf2)
    151 
    152 ENTRY(__unordsf2)
    153 	vmov		s0, s1, r0, r1
    154 	vcmp.f32	s0, s1
    155 	vmrs		APSR_nzcv, fpscr
    156 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    157 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    158 	RET
    159 END(__unordsf2)
    160