vfpsf.S revision 1.2.6.2 1 1.2.6.2 yamt /*-
2 1.2.6.2 yamt * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 1.2.6.2 yamt * All rights reserved.
4 1.2.6.2 yamt *
5 1.2.6.2 yamt * This code is derived from software contributed to The NetBSD Foundation
6 1.2.6.2 yamt * by Matt Thomas of 3am Software Foundry.
7 1.2.6.2 yamt *
8 1.2.6.2 yamt * Redistribution and use in source and binary forms, with or without
9 1.2.6.2 yamt * modification, are permitted provided that the following conditions
10 1.2.6.2 yamt * are met:
11 1.2.6.2 yamt * 1. Redistributions of source code must retain the above copyright
12 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer.
13 1.2.6.2 yamt * 2. Redistributions in binary form must reproduce the above copyright
14 1.2.6.2 yamt * notice, this list of conditions and the following disclaimer in the
15 1.2.6.2 yamt * documentation and/or other materials provided with the distribution.
16 1.2.6.2 yamt *
17 1.2.6.2 yamt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 1.2.6.2 yamt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 1.2.6.2 yamt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 1.2.6.2 yamt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 1.2.6.2 yamt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 1.2.6.2 yamt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 1.2.6.2 yamt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 1.2.6.2 yamt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 1.2.6.2 yamt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 1.2.6.2 yamt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 1.2.6.2 yamt * POSSIBILITY OF SUCH DAMAGE.
28 1.2.6.2 yamt */
29 1.2.6.2 yamt
30 1.2.6.2 yamt #include <arm/asm.h>
31 1.2.6.2 yamt #include <arm/vfpreg.h>
32 1.2.6.2 yamt
33 1.2.6.2 yamt RCSID("$NetBSD: vfpsf.S,v 1.2.6.2 2014/05/22 11:36:55 yamt Exp $")
34 1.2.6.2 yamt
35 1.2.6.2 yamt /*
36 1.2.6.2 yamt * This file provides softfloat compatible routines which use VFP instructions
37 1.2.6.2 yamt * to do the actual work. This should give near hard-float performance while
38 1.2.6.2 yamt * being compatible with soft-float code.
39 1.2.6.2 yamt *
40 1.2.6.2 yamt * This file implements the single precision floating point routines.
41 1.2.6.2 yamt */
42 1.2.6.2 yamt
43 1.2.6.2 yamt #ifdef __ARM_EABI__
44 1.2.6.2 yamt #define __addsf3 __aeabi_fadd
45 1.2.6.2 yamt #define __divsf3 __aeabi_fdiv
46 1.2.6.2 yamt #define __mulsf3 __aeabi_fmul
47 1.2.6.2 yamt #define __subsf3 __aeabi_fsub
48 1.2.6.2 yamt #define __negsf2 __aeabi_fneg
49 1.2.6.2 yamt #define __truncdfsf2 __aeabi_d2f
50 1.2.6.2 yamt #define __fixsfsi __aeabi_f2iz
51 1.2.6.2 yamt #define __fixunssfsi __aeabi_f2uiz
52 1.2.6.2 yamt #define __floatsisf __aeabi_i2f
53 1.2.6.2 yamt #define __floatunsisf __aeabi_ui2f
54 1.2.6.2 yamt #endif
55 1.2.6.2 yamt
56 1.2.6.2 yamt ENTRY(__addsf3)
57 1.2.6.2 yamt vmov s0, s1, r0, r1
58 1.2.6.2 yamt vadd.f32 s0, s0, s1
59 1.2.6.2 yamt vmov r0, s0
60 1.2.6.2 yamt RET
61 1.2.6.2 yamt END(__addsf3)
62 1.2.6.2 yamt
63 1.2.6.2 yamt ENTRY(__subsf3)
64 1.2.6.2 yamt vmov s0, s1, r0, r1
65 1.2.6.2 yamt vsub.f32 s0, s0, s1
66 1.2.6.2 yamt vmov r0, s0
67 1.2.6.2 yamt RET
68 1.2.6.2 yamt END(__subsf3)
69 1.2.6.2 yamt
70 1.2.6.2 yamt #ifdef __ARM_EABI__
71 1.2.6.2 yamt ENTRY(__aeabi_frsub)
72 1.2.6.2 yamt vmov s0, s1, r0, r1
73 1.2.6.2 yamt vsub.f32 s0, s1, s0
74 1.2.6.2 yamt vmov r0, s0
75 1.2.6.2 yamt RET
76 1.2.6.2 yamt END(__aeabi_frsub)
77 1.2.6.2 yamt #endif
78 1.2.6.2 yamt
79 1.2.6.2 yamt ENTRY(__mulsf3)
80 1.2.6.2 yamt vmov s0, s1, r0, r1
81 1.2.6.2 yamt vmul.f32 s0, s0, s1
82 1.2.6.2 yamt vmov r0, s0
83 1.2.6.2 yamt RET
84 1.2.6.2 yamt END(__mulsf3)
85 1.2.6.2 yamt
86 1.2.6.2 yamt ENTRY(__divsf3)
87 1.2.6.2 yamt vmov s0, s1, r0, r1
88 1.2.6.2 yamt vdiv.f32 s0, s0, s1
89 1.2.6.2 yamt vmov r0, s0
90 1.2.6.2 yamt RET
91 1.2.6.2 yamt END(__divsf3)
92 1.2.6.2 yamt
93 1.2.6.2 yamt ENTRY(__negsf2)
94 1.2.6.2 yamt vmov s0, r0
95 1.2.6.2 yamt vneg.f32 s0, s0
96 1.2.6.2 yamt vmov r0, s0
97 1.2.6.2 yamt RET
98 1.2.6.2 yamt END(__negsf2)
99 1.2.6.2 yamt
100 1.2.6.2 yamt ENTRY(__truncdfsf2)
101 1.2.6.2 yamt #ifdef __ARMEL__
102 1.2.6.2 yamt vmov d0, r0, r1
103 1.2.6.2 yamt #else
104 1.2.6.2 yamt vmov d0, r1, r0
105 1.2.6.2 yamt #endif
106 1.2.6.2 yamt vcvt.f32.f64 s0, d0
107 1.2.6.2 yamt vmov r0, s0
108 1.2.6.2 yamt RET
109 1.2.6.2 yamt END(__truncdfsf2)
110 1.2.6.2 yamt
111 1.2.6.2 yamt ENTRY(__fixsfsi)
112 1.2.6.2 yamt vmov s0, r0
113 1.2.6.2 yamt vcvt.s32.f32 s0, s0
114 1.2.6.2 yamt vmov r0, s0
115 1.2.6.2 yamt RET
116 1.2.6.2 yamt END(__fixsfsi)
117 1.2.6.2 yamt
118 1.2.6.2 yamt ENTRY(__fixunssfsi)
119 1.2.6.2 yamt vmov s0, r0
120 1.2.6.2 yamt vcvt.u32.f32 s0, s0
121 1.2.6.2 yamt vmov r0, s0
122 1.2.6.2 yamt RET
123 1.2.6.2 yamt END(__fixunssfsi)
124 1.2.6.2 yamt
125 1.2.6.2 yamt ENTRY(__floatsisf)
126 1.2.6.2 yamt vmov s0, r0
127 1.2.6.2 yamt vcvt.f32.s32 s0, s0
128 1.2.6.2 yamt vmov r0, s0
129 1.2.6.2 yamt RET
130 1.2.6.2 yamt END(__floatsisf)
131 1.2.6.2 yamt
132 1.2.6.2 yamt ENTRY(__floatunsisf)
133 1.2.6.2 yamt vmov s0, r0
134 1.2.6.2 yamt vcvt.f32.u32 s0, s0
135 1.2.6.2 yamt vmov r0, s0
136 1.2.6.2 yamt RET
137 1.2.6.2 yamt END(__floatunsisf)
138 1.2.6.2 yamt
139 1.2.6.2 yamt /*
140 1.2.6.2 yamt * Effect of a floating point comparision on the condition flags.
141 1.2.6.2 yamt * N Z C V
142 1.2.6.2 yamt * EQ = 0 1 1 0
143 1.2.6.2 yamt * LT = 1 0 0 0
144 1.2.6.2 yamt * GT = 0 0 1 0
145 1.2.6.2 yamt * UN = 0 0 1 1
146 1.2.6.2 yamt */
147 1.2.6.2 yamt #ifdef __ARM_EABI__
148 1.2.6.2 yamt ENTRY(__aeabi_cfcmpeq)
149 1.2.6.2 yamt vmov s0, s1, r0, r1
150 1.2.6.2 yamt vcmp.f32 s0, s1
151 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
152 1.2.6.2 yamt RET
153 1.2.6.2 yamt END(__aeabi_cfcmpeq)
154 1.2.6.2 yamt
155 1.2.6.2 yamt ENTRY(__aeabi_cfcmple)
156 1.2.6.2 yamt vmov s0, s1, r0, r1
157 1.2.6.2 yamt vcmpe.f32 s0, s1
158 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
159 1.2.6.2 yamt RET
160 1.2.6.2 yamt END(__aeabi_cfcmple)
161 1.2.6.2 yamt
162 1.2.6.2 yamt ENTRY(__aeabi_cfrcmple)
163 1.2.6.2 yamt vmov s0, s1, r0, r1
164 1.2.6.2 yamt vcmpe.f32 s1, s0
165 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
166 1.2.6.2 yamt RET
167 1.2.6.2 yamt END(__aeabi_cfrcmple)
168 1.2.6.2 yamt
169 1.2.6.2 yamt ENTRY(__aeabi_fcmpeq)
170 1.2.6.2 yamt vmov s0, s1, r0, r1
171 1.2.6.2 yamt vcmp.f32 s0, s1
172 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
173 1.2.6.2 yamt moveq r0, #1 /* (a == b) */
174 1.2.6.2 yamt movne r0, #0 /* (a != b) or unordered */
175 1.2.6.2 yamt RET
176 1.2.6.2 yamt END(__aeabi_fcmpeq)
177 1.2.6.2 yamt
178 1.2.6.2 yamt ENTRY(__aeabi_fcmplt)
179 1.2.6.2 yamt vmov s0, s1, r0, r1
180 1.2.6.2 yamt vcmp.f32 s0, s1
181 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
182 1.2.6.2 yamt movlt r0, #1 /* (a < b) */
183 1.2.6.2 yamt movcs r0, #0 /* (a >= b) or unordered */
184 1.2.6.2 yamt RET
185 1.2.6.2 yamt END(__aeabi_fcmplt)
186 1.2.6.2 yamt
187 1.2.6.2 yamt ENTRY(__aeabi_fcmple)
188 1.2.6.2 yamt vmov s0, s1, r0, r1
189 1.2.6.2 yamt vcmp.f32 s0, s1
190 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
191 1.2.6.2 yamt movls r0, #1 /* (a <= b) */
192 1.2.6.2 yamt movhi r0, #0 /* (a > b) or unordered */
193 1.2.6.2 yamt RET
194 1.2.6.2 yamt END(__aeabi_fcmple)
195 1.2.6.2 yamt
196 1.2.6.2 yamt ENTRY(__aeabi_fcmpge)
197 1.2.6.2 yamt vmov s0, s1, r0, r1
198 1.2.6.2 yamt vcmp.f32 s0, s1
199 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
200 1.2.6.2 yamt movge r0, #1 /* (a >= b) */
201 1.2.6.2 yamt movlt r0, #0 /* (a < b) or unordered */
202 1.2.6.2 yamt RET
203 1.2.6.2 yamt END(__aeabi_fcmpge)
204 1.2.6.2 yamt
205 1.2.6.2 yamt ENTRY(__aeabi_fcmpgt)
206 1.2.6.2 yamt vmov s0, s1, r0, r1
207 1.2.6.2 yamt vcmp.f32 s0, s1
208 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
209 1.2.6.2 yamt movgt r0, #1 /* (a > b) */
210 1.2.6.2 yamt movle r0, #0 /* (a <= b) or unordered */
211 1.2.6.2 yamt RET
212 1.2.6.2 yamt END(__aeabi_fcmpgt)
213 1.2.6.2 yamt
214 1.2.6.2 yamt ENTRY(__aeabi_fcmpun)
215 1.2.6.2 yamt vmov s0, s1, r0, r1
216 1.2.6.2 yamt vcmp.f32 s0, s1
217 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
218 1.2.6.2 yamt movvs r0, #1 /* (isnan(a) || isnan(b)) */
219 1.2.6.2 yamt movvc r0, #0 /* !isnan(a) && !isnan(b) */
220 1.2.6.2 yamt RET
221 1.2.6.2 yamt END(__aeabi_fcmpun)
222 1.2.6.2 yamt
223 1.2.6.2 yamt #else
224 1.2.6.2 yamt /* N set if compare <= result */
225 1.2.6.2 yamt /* Z set if compare = result */
226 1.2.6.2 yamt /* C set if compare (=,>=,UNORD) result */
227 1.2.6.2 yamt /* V set if compare UNORD result */
228 1.2.6.2 yamt
229 1.2.6.2 yamt STRONG_ALIAS(__eqsf2, __nesf2)
230 1.2.6.2 yamt ENTRY(__nesf2)
231 1.2.6.2 yamt vmov s0, s1, r0, r1
232 1.2.6.2 yamt vcmp.f32 s0, s1
233 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
234 1.2.6.2 yamt moveq r0, #0 /* !(a == b) */
235 1.2.6.2 yamt movne r0, #1 /* !(a == b) */
236 1.2.6.2 yamt RET
237 1.2.6.2 yamt END(__nesf2)
238 1.2.6.2 yamt
239 1.2.6.2 yamt STRONG_ALIAS(__gesf2, __ltsf2)
240 1.2.6.2 yamt ENTRY(__ltsf2)
241 1.2.6.2 yamt vmov s0, s1, r0, r1
242 1.2.6.2 yamt vcmp.f32 s0, s1
243 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
244 1.2.6.2 yamt mvnmi r0, #0 /* -(a < b) */
245 1.2.6.2 yamt movpl r0, #0 /* -(a < b) */
246 1.2.6.2 yamt RET
247 1.2.6.2 yamt END(__ltsf2)
248 1.2.6.2 yamt
249 1.2.6.2 yamt STRONG_ALIAS(__gtsf2, __lesf2)
250 1.2.6.2 yamt ENTRY(__lesf2)
251 1.2.6.2 yamt vmov s0, s1, r0, r1
252 1.2.6.2 yamt vcmp.f32 s0, s1
253 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
254 1.2.6.2 yamt movgt r0, #1 /* (a > b) */
255 1.2.6.2 yamt movle r0, #0 /* (a > b) */
256 1.2.6.2 yamt RET
257 1.2.6.2 yamt END(__lesf2)
258 1.2.6.2 yamt
259 1.2.6.2 yamt ENTRY(__unordsf2)
260 1.2.6.2 yamt vmov s0, s1, r0, r1
261 1.2.6.2 yamt vcmp.f32 s0, s1
262 1.2.6.2 yamt vmrs APSR_nzcv, fpscr
263 1.2.6.2 yamt movvs r0, #1 /* isnan(a) || isnan(b) */
264 1.2.6.2 yamt movvc r0, #0 /* isnan(a) || isnan(b) */
265 1.2.6.2 yamt RET
266 1.2.6.2 yamt END(__unordsf2)
267 1.2.6.2 yamt #endif /* !__ARM_EABI__ */
268