Home | History | Annotate | Line # | Download | only in libc_vfp
vfpsf.S revision 1.1
      1  1.1  matt /*-
      2  1.1  matt  * Copyright (c) 2013 The NetBSD Foundation, Inc.
      3  1.1  matt  * All rights reserved.
      4  1.1  matt  *
      5  1.1  matt  * This code is derived from software contributed to The NetBSD Foundation
      6  1.1  matt  * by Matt Thomas of 3am Software Foundry.
      7  1.1  matt  *
      8  1.1  matt  * Redistribution and use in source and binary forms, with or without
      9  1.1  matt  * modification, are permitted provided that the following conditions
     10  1.1  matt  * are met:
     11  1.1  matt  * 1. Redistributions of source code must retain the above copyright
     12  1.1  matt  *    notice, this list of conditions and the following disclaimer.
     13  1.1  matt  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  matt  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  matt  *    documentation and/or other materials provided with the distribution.
     16  1.1  matt  *
     17  1.1  matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     18  1.1  matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     19  1.1  matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     20  1.1  matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     21  1.1  matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     22  1.1  matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     23  1.1  matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     24  1.1  matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     25  1.1  matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     26  1.1  matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     27  1.1  matt  * POSSIBILITY OF SUCH DAMAGE.
     28  1.1  matt  */
     29  1.1  matt 
     30  1.1  matt #include <arm/asm.h>
     31  1.1  matt #include <arm/vfpreg.h>
     32  1.1  matt 
     33  1.1  matt RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
     34  1.1  matt 
     35  1.1  matt /*
     36  1.1  matt  * This file provides softfloat compatible routines which use VFP instructions
     37  1.1  matt  * to do the actual work.  This should give near hard-float performance while
     38  1.1  matt  * being compatible with soft-float code.
     39  1.1  matt  *
     40  1.1  matt  * This file implements the single precision floating point routines.
     41  1.1  matt  */
     42  1.1  matt 
     43  1.1  matt ENTRY(__addsf3)
     44  1.1  matt 	vmov		s0, s1, r0, r1
     45  1.1  matt 	vadd.f32	s0, s0, s1
     46  1.1  matt 	vmov		r0, s0
     47  1.1  matt 	RET
     48  1.1  matt END(__addsf3)
     49  1.1  matt 
     50  1.1  matt ENTRY(__subsf3)
     51  1.1  matt 	vmov		s0, s1, r0, r1
     52  1.1  matt 	vsub.f32	s0, s0, s1
     53  1.1  matt 	vmov		r0, s0
     54  1.1  matt 	RET
     55  1.1  matt END(__subsf3)
     56  1.1  matt 
     57  1.1  matt ENTRY(__mulsf3)
     58  1.1  matt 	vmov		s0, s1, r0, r1
     59  1.1  matt 	vmul.f32	s0, s0, s1
     60  1.1  matt 	vmov		r0, s0
     61  1.1  matt 	RET
     62  1.1  matt END(__mulsf3)
     63  1.1  matt 
     64  1.1  matt ENTRY(__divsf3)
     65  1.1  matt 	vmov		s0, s1, r0, r1
     66  1.1  matt 	vdiv.f32	s0, s0, s1
     67  1.1  matt 	vmov		r0, s0
     68  1.1  matt 	RET
     69  1.1  matt END(__divsf3)
     70  1.1  matt 
     71  1.1  matt ENTRY(__negsf2)
     72  1.1  matt 	vmov		s0, r0
     73  1.1  matt 	vneg.f32	s0, s0
     74  1.1  matt 	vmov		r0, s0
     75  1.1  matt 	RET
     76  1.1  matt END(__negsf2)
     77  1.1  matt 
     78  1.1  matt ENTRY(__truncdfsf2)
     79  1.1  matt #ifdef __ARMEL__
     80  1.1  matt 	vmov		d0, r0, r1
     81  1.1  matt #else
     82  1.1  matt 	vmov		d0, r1, r0
     83  1.1  matt #endif
     84  1.1  matt 	vcvt.f32.f64	s0, d0
     85  1.1  matt 	vmov		r0, s0
     86  1.1  matt 	RET
     87  1.1  matt END(__truncdfsf2)
     88  1.1  matt 
     89  1.1  matt ENTRY(__fixsfsi)
     90  1.1  matt 	vmov		s0, r0
     91  1.1  matt 	vcvt.s32.f32	s0, s0
     92  1.1  matt 	vmov		r0, s0
     93  1.1  matt 	RET
     94  1.1  matt END(__fixsfsi)
     95  1.1  matt 
     96  1.1  matt ENTRY(__fixunssfsi)
     97  1.1  matt 	vmov		s0, r0
     98  1.1  matt 	vcvt.u32.f32	s0, s0
     99  1.1  matt 	vmov		r0, s0
    100  1.1  matt 	RET
    101  1.1  matt END(__fixunssfsi)
    102  1.1  matt 
    103  1.1  matt ENTRY(__floatsisf)
    104  1.1  matt 	vmov		s0, r0
    105  1.1  matt 	vcvt.f32.s32	s0, s0
    106  1.1  matt 	vmov		r0, s0
    107  1.1  matt 	RET
    108  1.1  matt END(__floatsisf)
    109  1.1  matt 
    110  1.1  matt ENTRY(__floatunsisf)
    111  1.1  matt 	vmov		s0, r0
    112  1.1  matt 	vcvt.f32.u32	s0, s0
    113  1.1  matt 	vmov		r0, s0
    114  1.1  matt 	RET
    115  1.1  matt END(__floatunsisf)
    116  1.1  matt 
    117  1.1  matt /* N set if compare <= result */
    118  1.1  matt /* Z set if compare = result */
    119  1.1  matt /* C set if compare (=,>=,UNORD) result */
    120  1.1  matt /* V set if compare UNORD result */
    121  1.1  matt 
    122  1.1  matt STRONG_ALIAS(__eqsf2, __nesf2)
    123  1.1  matt ENTRY(__nesf2)
    124  1.1  matt 	vmov		s0, s1, r0, r1
    125  1.1  matt 	vcmp.f32	s0, s1
    126  1.1  matt 	vmrs		APSR_nzcv, fpscr
    127  1.1  matt 	moveq		r0, #0		/* !(a == b) */
    128  1.1  matt 	movne		r0, #1		/* !(a == b) */
    129  1.1  matt 	RET
    130  1.1  matt END(__nesf2)
    131  1.1  matt 
    132  1.1  matt STRONG_ALIAS(__gesf2, __ltsf2)
    133  1.1  matt ENTRY(__ltsf2)
    134  1.1  matt 	vmov		s0, s1, r0, r1
    135  1.1  matt 	vcmp.f32	s0, s1
    136  1.1  matt 	vmrs		APSR_nzcv, fpscr
    137  1.1  matt 	mvnmi		r0, #0		/* -(a < b) */
    138  1.1  matt 	movpl		r0, #0		/* -(a < b) */
    139  1.1  matt 	RET
    140  1.1  matt END(__ltsf2)
    141  1.1  matt 
    142  1.1  matt STRONG_ALIAS(__gtsf2, __lesf2)
    143  1.1  matt ENTRY(__lesf2)
    144  1.1  matt 	vmov		s0, s1, r0, r1
    145  1.1  matt 	vcmp.f32	s0, s1
    146  1.1  matt 	vmrs		APSR_nzcv, fpscr
    147  1.1  matt 	movgt		r0, #1		/* (a > b) */
    148  1.1  matt 	movle		r0, #0		/* (a > b) */
    149  1.1  matt 	RET
    150  1.1  matt END(__lesf2)
    151  1.1  matt 
    152  1.1  matt ENTRY(__unordsf2)
    153  1.1  matt 	vmov		s0, s1, r0, r1
    154  1.1  matt 	vcmp.f32	s0, s1
    155  1.1  matt 	vmrs		APSR_nzcv, fpscr
    156  1.1  matt 	movvs		r0, #1		/* isnan(a) || isnan(b) */
    157  1.1  matt 	movvc		r0, #0		/* isnan(a) || isnan(b) */
    158  1.1  matt 	RET
    159  1.1  matt END(__unordsf2)
    160