n_support.S revision 1.11 1 1.11 riastrad /* $NetBSD: n_support.S,v 1.11 2024/05/07 15:15:10 riastradh Exp $ */
2 1.1 ragge /*
3 1.1 ragge * Copyright (c) 1985, 1993
4 1.1 ragge * The Regents of the University of California. All rights reserved.
5 1.1 ragge *
6 1.1 ragge * Redistribution and use in source and binary forms, with or without
7 1.1 ragge * modification, are permitted provided that the following conditions
8 1.1 ragge * are met:
9 1.1 ragge * 1. Redistributions of source code must retain the above copyright
10 1.1 ragge * notice, this list of conditions and the following disclaimer.
11 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 ragge * notice, this list of conditions and the following disclaimer in the
13 1.1 ragge * documentation and/or other materials provided with the distribution.
14 1.6 agc * 3. Neither the name of the University nor the names of its contributors
15 1.1 ragge * may be used to endorse or promote products derived from this software
16 1.1 ragge * without specific prior written permission.
17 1.1 ragge *
18 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 1.1 ragge * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 1.1 ragge * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 1.1 ragge * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 1.1 ragge * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 ragge * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 1.1 ragge * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 ragge * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 ragge * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 ragge * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 ragge * SUCH DAMAGE.
29 1.1 ragge *
30 1.1 ragge * @(#)support.s 8.1 (Berkeley) 6/4/93
31 1.1 ragge */
32 1.3 matt #include <machine/asm.h>
33 1.3 matt
34 1.7 martin WEAK_ALIAS(logbl,logb)
35 1.10 joerg WEAK_ALIAS(copysignl, _copysignl)
36 1.10 joerg WEAK_ALIAS(_copysignl, copysign)
37 1.7 martin
38 1.3 matt .text
39 1.1 ragge _sccsid:
40 1.3 matt .asciz "@(#)support.s\t1.3 (Berkeley) 8/21/85; 8.1 (ucb.elefunt) 6/4/93"
41 1.1 ragge
42 1.1 ragge /*
43 1.1 ragge * copysign(x,y),
44 1.1 ragge * logb(x),
45 1.1 ragge * scalb(x,N),
46 1.1 ragge * finite(x),
47 1.1 ragge * drem(x,y),
48 1.1 ragge * Coded in vax assembly language by K.C. Ng, 3/14/85.
49 1.1 ragge * Revised by K.C. Ng on 4/9/85.
50 1.1 ragge */
51 1.1 ragge
52 1.1 ragge /*
53 1.3 matt * double copysign(double x,double y)
54 1.1 ragge */
55 1.3 matt
56 1.3 matt ENTRY(copysign, 0)
57 1.4 matt movq 4(%ap),%r0 # load x into %r0
58 1.4 matt bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
59 1.1 ragge beql Lz # if zero or reserved op then return x
60 1.4 matt bicw3 $0x7fff,12(%ap),%r2 # copy the sign bit of y into %r2
61 1.4 matt bicw2 $0x8000,%r0 # replace x by |x|
62 1.4 matt bisw2 %r2,%r0 # copy the sign bit of y to x
63 1.1 ragge Lz: ret
64 1.11 riastrad END(copysign)
65 1.1 ragge
66 1.8 martin ENTRY(copysignf, 0)
67 1.9 martin movl 4(%ap),%r0 # load x into %r0
68 1.8 martin bicw3 $0x807f,%r0,%r2 # mask off the exponent of x
69 1.8 martin beql 1f # if zero or reserved op then return x
70 1.8 martin bicw3 $0x7fff,8(%ap),%r2 # copy the sign bit of y into %r2
71 1.8 martin bicw2 $0x8000,%r0 # replace x by |x|
72 1.8 martin bisw2 %r2,%r0 # copy the sign bit of y to x
73 1.8 martin 1: ret
74 1.11 riastrad END(copysignf)
75 1.8 martin
76 1.1 ragge /*
77 1.7 martin * float logbf(float x);
78 1.7 martin */
79 1.7 martin ENTRY(logbf, 0)
80 1.7 martin cvtfd 4(%ap),-(%sp)
81 1.7 martin calls $2,_C_LABEL(logb)
82 1.7 martin cvtdf %r0,%r0
83 1.7 martin ret
84 1.11 riastrad END(logbf)
85 1.7 martin
86 1.7 martin /*
87 1.3 matt * double logb(double x);
88 1.1 ragge */
89 1.3 matt ENTRY(logb, 0)
90 1.4 matt bicl3 $0xffff807f,4(%ap),%r0 # mask off the exponent of x
91 1.1 ragge beql Ln
92 1.4 matt ashl $-7,%r0,%r0 # get the bias exponent
93 1.4 matt subl2 $129,%r0 # get the unbias exponent
94 1.4 matt cvtld %r0,%r0 # return the answer in double
95 1.1 ragge ret
96 1.4 matt Ln: movq 4(%ap),%r0 # %r0:1 = x (zero or reserved op)
97 1.1 ragge bneq 1f # simply return if reserved op
98 1.4 matt movq $0x0000fe00ffffcfff,%r0 # -2147483647.0
99 1.1 ragge 1: ret
100 1.11 riastrad END(logb)
101 1.1 ragge
102 1.1 ragge /*
103 1.3 matt * long finite(double x);
104 1.1 ragge */
105 1.5 matt #ifndef __GFLOAT__
106 1.5 matt .globl finitef
107 1.5 matt finitef = finite
108 1.5 matt #endif
109 1.3 matt ENTRY(finite, 0)
110 1.5 matt bicw3 $0x7f,4(%ap),%r0 # mask off the mantissa
111 1.4 matt cmpw %r0,$0x8000 # to see if x is the reserved op
112 1.1 ragge beql 1f # if so, return FALSE (0)
113 1.4 matt movl $1,%r0 # else return TRUE (1)
114 1.1 ragge ret
115 1.4 matt 1: clrl %r0
116 1.1 ragge ret
117 1.11 riastrad END(finite)
118 1.1 ragge
119 1.3 matt /* int isnan(double x);
120 1.3 matt */
121 1.3 matt #if 0
122 1.3 matt ENTRY(isnan, 0)
123 1.4 matt clrl %r0
124 1.3 matt ret
125 1.3 matt #endif
126 1.3 matt
127 1.3 matt /* int isnanf(float x);
128 1.3 matt */
129 1.3 matt ENTRY(isnanf, 0)
130 1.4 matt clrl %r0
131 1.3 matt ret
132 1.11 riastrad END(isnanf)
133 1.3 matt
134 1.1 ragge /*
135 1.1 ragge * double scalb(x,N)
136 1.1 ragge * double x; double N;
137 1.1 ragge */
138 1.1 ragge .set ERANGE,34
139 1.3 matt
140 1.3 matt ENTRY(scalb, 0)
141 1.4 matt movq 4(%ap),%r0
142 1.4 matt bicl3 $0xffff807f,%r0,%r3
143 1.1 ragge beql ret1 # 0 or reserved operand
144 1.4 matt movq 12(%ap),%r4
145 1.4 matt cvtdl %r4, %r2
146 1.4 matt cmpl %r2,$0x12c
147 1.1 ragge bgeq ovfl
148 1.4 matt cmpl %r2,$-0x12c
149 1.1 ragge bleq unfl
150 1.4 matt ashl $7,%r2,%r2
151 1.4 matt addl2 %r2,%r3
152 1.1 ragge bleq unfl
153 1.4 matt cmpl %r3,$0x8000
154 1.1 ragge bgeq ovfl
155 1.4 matt addl2 %r2,%r0
156 1.1 ragge ret
157 1.1 ragge ovfl: pushl $ERANGE
158 1.3 matt calls $1,_C_LABEL(infnan) # if it returns
159 1.4 matt bicw3 $0x7fff,4(%ap),%r2 # get the sign of input arg
160 1.4 matt bisw2 %r2,%r0 # re-attach the sign to %r0/1
161 1.1 ragge ret
162 1.4 matt unfl: movq $0,%r0
163 1.1 ragge ret1: ret
164 1.11 riastrad END(scalb)
165 1.1 ragge
166 1.1 ragge /*
167 1.1 ragge * DREM(X,Y)
168 1.1 ragge * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
169 1.1 ragge * DOUBLE PRECISION (VAX D format 56 bits)
170 1.1 ragge * CODED IN VAX ASSEMBLY LANGUAGE BY K.C. NG, 4/8/85.
171 1.1 ragge */
172 1.1 ragge .set EDOM,33
173 1.3 matt
174 1.3 matt ENTRY(drem, 0x0fc0)
175 1.4 matt subl2 $12,%sp
176 1.4 matt movq 4(%ap),%r0 #%r0=x
177 1.4 matt movq 12(%ap),%r2 #%r2=y
178 1.1 ragge jeql Rop #if y=0 then generate reserved op fault
179 1.4 matt bicw3 $0x007f,%r0,%r4 #check if x is Rop
180 1.4 matt cmpw %r4,$0x8000
181 1.1 ragge jeql Ret #if x is Rop then return Rop
182 1.4 matt bicl3 $0x007f,%r2,%r4 #check if y is Rop
183 1.4 matt cmpw %r4,$0x8000
184 1.1 ragge jeql Ret #if y is Rop then return Rop
185 1.4 matt bicw2 $0x8000,%r2 #y := |y|
186 1.4 matt movw $0,-4(%fp) #-4(%fp) = nx := 0
187 1.4 matt cmpw %r2,$0x1c80 #yexp ? 57
188 1.1 ragge bgtr C1 #if yexp > 57 goto C1
189 1.4 matt addw2 $0x1c80,%r2 #scale up y by 2**57
190 1.4 matt movw $0x1c80,-4(%fp) #nx := 57 (exponent field)
191 1.1 ragge C1:
192 1.4 matt movw -4(%fp),-8(%fp) #-8(%fp) = nf := nx
193 1.4 matt bicw3 $0x7fff,%r0,-12(%fp) #-12(%fp) = sign of x
194 1.4 matt bicw2 $0x8000,%r0 #x := |x|
195 1.4 matt movq %r2,%r10 #y1 := y
196 1.4 matt bicl2 $0xffff07ff,%r11 #clear the last 27 bits of y1
197 1.1 ragge loop:
198 1.4 matt cmpd %r0,%r2 #x ? y
199 1.1 ragge bleq E1 #if x <= y goto E1
200 1.1 ragge /* begin argument reduction */
201 1.4 matt movq %r2,%r4 #t =y
202 1.4 matt movq %r10,%r6 #t1=y1
203 1.4 matt bicw3 $0x807f,%r0,%r8 #xexp= exponent of x
204 1.4 matt bicw3 $0x807f,%r2,%r9 #yexp= exponent fo y
205 1.4 matt subw2 %r9,%r8 #xexp-yexp
206 1.4 matt subw2 $0x0c80,%r8 #k=xexp-yexp-25(exponent bit field)
207 1.1 ragge blss C2 #if k<0 goto C2
208 1.4 matt addw2 %r8,%r4 #t +=k
209 1.4 matt addw2 %r8,%r6 #t1+=k, scale up t and t1
210 1.1 ragge C2:
211 1.4 matt divd3 %r4,%r0,%r8 #x/t
212 1.4 matt cvtdl %r8,%r8 #n=[x/t] truncated
213 1.4 matt cvtld %r8,%r8 #float(n)
214 1.4 matt subd2 %r6,%r4 #t:=t-t1
215 1.4 matt muld2 %r8,%r4 #n*(t-t1)
216 1.4 matt muld2 %r8,%r6 #n*t1
217 1.4 matt subd2 %r6,%r0 #x-n*t1
218 1.4 matt subd2 %r4,%r0 #(x-n*t1)-n*(t-t1)
219 1.3 matt jbr loop
220 1.1 ragge E1:
221 1.4 matt movw -4(%fp),%r6 #%r6=nx
222 1.1 ragge beql C3 #if nx=0 goto C3
223 1.4 matt addw2 %r6,%r0 #x:=x*2**57 scale up x by nx
224 1.4 matt movw $0,-4(%fp) #clear nx
225 1.3 matt jbr loop
226 1.1 ragge C3:
227 1.4 matt movq %r2,%r4 #%r4 = y
228 1.4 matt subw2 $0x80,%r4 #%r4 = y/2
229 1.4 matt cmpd %r0,%r4 #x:y/2
230 1.1 ragge blss E2 #if x < y/2 goto E2
231 1.1 ragge bgtr C4 #if x > y/2 goto C4
232 1.4 matt cvtdl %r8,%r8 #ifix(float(n))
233 1.4 matt blbc %r8,E2 #if the last bit is zero, goto E2
234 1.1 ragge C4:
235 1.4 matt subd2 %r2,%r0 #x-y
236 1.1 ragge E2:
237 1.4 matt xorw2 -12(%fp),%r0 #x^sign (exclusive or)
238 1.4 matt movw -8(%fp),%r6 #%r6=nf
239 1.4 matt bicw3 $0x807f,%r0,%r8 #%r8=exponent of x
240 1.4 matt bicw2 $0x7f80,%r0 #clear the exponent of x
241 1.4 matt subw2 %r6,%r8 #%r8=xexp-nf
242 1.1 ragge bgtr C5 #if xexp-nf is positive goto C5
243 1.4 matt movw $0,%r8 #clear %r8
244 1.4 matt movq $0,%r0 #x underflow to zero
245 1.1 ragge C5:
246 1.4 matt bisw2 %r8,%r0 /* put %r8 into x's exponent field */
247 1.1 ragge ret
248 1.1 ragge Rop: #Reserved operand
249 1.1 ragge pushl $EDOM
250 1.3 matt calls $1,_C_LABEL(infnan) #generate reserved op fault
251 1.1 ragge ret
252 1.1 ragge Ret:
253 1.4 matt movq $0x8000,%r0 #propagate reserved op
254 1.1 ragge ret
255 1.11 riastrad END(drem)
256