vfpsf.S revision 1.2 1 /*-
2 * Copyright (c) 2013 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas of 3am Software Foundry.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include <arm/asm.h>
31 #include <arm/vfpreg.h>
32
33 RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
34
35 /*
36 * This file provides softfloat compatible routines which use VFP instructions
37 * to do the actual work. This should give near hard-float performance while
38 * being compatible with soft-float code.
39 *
40 * This file implements the single precision floating point routines.
41 */
42
43 #ifdef __ARM_EABI__
44 #define __addsf3 __aeabi_fadd
45 #define __divsf3 __aeabi_fdiv
46 #define __mulsf3 __aeabi_fmul
47 #define __subsf3 __aeabi_fsub
48 #define __negsf2 __aeabi_fneg
49 #define __truncdfsf2 __aeabi_d2f
50 #define __fixsfsi __aeabi_f2iz
51 #define __fixunssfsi __aeabi_f2uiz
52 #define __floatsisf __aeabi_i2f
53 #define __floatunsisf __aeabi_ui2f
54 #endif
55
56 ENTRY(__addsf3)
57 vmov s0, s1, r0, r1
58 vadd.f32 s0, s0, s1
59 vmov r0, s0
60 RET
61 END(__addsf3)
62
63 ENTRY(__subsf3)
64 vmov s0, s1, r0, r1
65 vsub.f32 s0, s0, s1
66 vmov r0, s0
67 RET
68 END(__subsf3)
69
70 #ifdef __ARM_EABI__
71 ENTRY(__aeabi_frsub)
72 vmov s0, s1, r0, r1
73 vsub.f32 s0, s1, s0
74 vmov r0, s0
75 RET
76 END(__aeabi_frsub)
77 #endif
78
79 ENTRY(__mulsf3)
80 vmov s0, s1, r0, r1
81 vmul.f32 s0, s0, s1
82 vmov r0, s0
83 RET
84 END(__mulsf3)
85
86 ENTRY(__divsf3)
87 vmov s0, s1, r0, r1
88 vdiv.f32 s0, s0, s1
89 vmov r0, s0
90 RET
91 END(__divsf3)
92
93 ENTRY(__negsf2)
94 vmov s0, r0
95 vneg.f32 s0, s0
96 vmov r0, s0
97 RET
98 END(__negsf2)
99
100 ENTRY(__truncdfsf2)
101 #ifdef __ARMEL__
102 vmov d0, r0, r1
103 #else
104 vmov d0, r1, r0
105 #endif
106 vcvt.f32.f64 s0, d0
107 vmov r0, s0
108 RET
109 END(__truncdfsf2)
110
111 ENTRY(__fixsfsi)
112 vmov s0, r0
113 vcvt.s32.f32 s0, s0
114 vmov r0, s0
115 RET
116 END(__fixsfsi)
117
118 ENTRY(__fixunssfsi)
119 vmov s0, r0
120 vcvt.u32.f32 s0, s0
121 vmov r0, s0
122 RET
123 END(__fixunssfsi)
124
125 ENTRY(__floatsisf)
126 vmov s0, r0
127 vcvt.f32.s32 s0, s0
128 vmov r0, s0
129 RET
130 END(__floatsisf)
131
132 ENTRY(__floatunsisf)
133 vmov s0, r0
134 vcvt.f32.u32 s0, s0
135 vmov r0, s0
136 RET
137 END(__floatunsisf)
138
139 /*
140 * Effect of a floating point comparision on the condition flags.
141 * N Z C V
142 * EQ = 0 1 1 0
143 * LT = 1 0 0 0
144 * GT = 0 0 1 0
145 * UN = 0 0 1 1
146 */
147 #ifdef __ARM_EABI__
148 ENTRY(__aeabi_cfcmpeq)
149 vmov s0, s1, r0, r1
150 vcmp.f32 s0, s1
151 vmrs APSR_nzcv, fpscr
152 RET
153 END(__aeabi_cfcmpeq)
154
155 ENTRY(__aeabi_cfcmple)
156 vmov s0, s1, r0, r1
157 vcmpe.f32 s0, s1
158 vmrs APSR_nzcv, fpscr
159 RET
160 END(__aeabi_cfcmple)
161
162 ENTRY(__aeabi_cfrcmple)
163 vmov s0, s1, r0, r1
164 vcmpe.f32 s1, s0
165 vmrs APSR_nzcv, fpscr
166 RET
167 END(__aeabi_cfrcmple)
168
169 ENTRY(__aeabi_fcmpeq)
170 vmov s0, s1, r0, r1
171 vcmp.f32 s0, s1
172 vmrs APSR_nzcv, fpscr
173 moveq r0, #1 /* (a == b) */
174 movne r0, #0 /* (a != b) or unordered */
175 RET
176 END(__aeabi_fcmpeq)
177
178 ENTRY(__aeabi_fcmplt)
179 vmov s0, s1, r0, r1
180 vcmp.f32 s0, s1
181 vmrs APSR_nzcv, fpscr
182 movlt r0, #1 /* (a < b) */
183 movcs r0, #0 /* (a >= b) or unordered */
184 RET
185 END(__aeabi_fcmplt)
186
187 ENTRY(__aeabi_fcmple)
188 vmov s0, s1, r0, r1
189 vcmp.f32 s0, s1
190 vmrs APSR_nzcv, fpscr
191 movls r0, #1 /* (a <= b) */
192 movhi r0, #0 /* (a > b) or unordered */
193 RET
194 END(__aeabi_fcmple)
195
196 ENTRY(__aeabi_fcmpge)
197 vmov s0, s1, r0, r1
198 vcmp.f32 s0, s1
199 vmrs APSR_nzcv, fpscr
200 movge r0, #1 /* (a >= b) */
201 movlt r0, #0 /* (a < b) or unordered */
202 RET
203 END(__aeabi_fcmpge)
204
205 ENTRY(__aeabi_fcmpgt)
206 vmov s0, s1, r0, r1
207 vcmp.f32 s0, s1
208 vmrs APSR_nzcv, fpscr
209 movgt r0, #1 /* (a > b) */
210 movle r0, #0 /* (a <= b) or unordered */
211 RET
212 END(__aeabi_fcmpgt)
213
214 ENTRY(__aeabi_fcmpun)
215 vmov s0, s1, r0, r1
216 vcmp.f32 s0, s1
217 vmrs APSR_nzcv, fpscr
218 movvs r0, #1 /* (isnan(a) || isnan(b)) */
219 movvc r0, #0 /* !isnan(a) && !isnan(b) */
220 RET
221 END(__aeabi_fcmpun)
222
223 #else
224 /* N set if compare <= result */
225 /* Z set if compare = result */
226 /* C set if compare (=,>=,UNORD) result */
227 /* V set if compare UNORD result */
228
229 STRONG_ALIAS(__eqsf2, __nesf2)
230 ENTRY(__nesf2)
231 vmov s0, s1, r0, r1
232 vcmp.f32 s0, s1
233 vmrs APSR_nzcv, fpscr
234 moveq r0, #0 /* !(a == b) */
235 movne r0, #1 /* !(a == b) */
236 RET
237 END(__nesf2)
238
239 STRONG_ALIAS(__gesf2, __ltsf2)
240 ENTRY(__ltsf2)
241 vmov s0, s1, r0, r1
242 vcmp.f32 s0, s1
243 vmrs APSR_nzcv, fpscr
244 mvnmi r0, #0 /* -(a < b) */
245 movpl r0, #0 /* -(a < b) */
246 RET
247 END(__ltsf2)
248
249 STRONG_ALIAS(__gtsf2, __lesf2)
250 ENTRY(__lesf2)
251 vmov s0, s1, r0, r1
252 vcmp.f32 s0, s1
253 vmrs APSR_nzcv, fpscr
254 movgt r0, #1 /* (a > b) */
255 movle r0, #0 /* (a > b) */
256 RET
257 END(__lesf2)
258
259 ENTRY(__unordsf2)
260 vmov s0, s1, r0, r1
261 vcmp.f32 s0, s1
262 vmrs APSR_nzcv, fpscr
263 movvs r0, #1 /* isnan(a) || isnan(b) */
264 movvc r0, #0 /* isnan(a) || isnan(b) */
265 RET
266 END(__unordsf2)
267 #endif /* !__ARM_EABI__ */
268