n_atan2.S revision 1.6 1 1.6 agc /* $NetBSD: n_atan2.S,v 1.6 2003/08/07 16:44:44 agc Exp $ */
2 1.1 ragge /*
3 1.1 ragge * Copyright (c) 1985, 1993
4 1.1 ragge * The Regents of the University of California. All rights reserved.
5 1.1 ragge *
6 1.1 ragge * Redistribution and use in source and binary forms, with or without
7 1.1 ragge * modification, are permitted provided that the following conditions
8 1.1 ragge * are met:
9 1.1 ragge * 1. Redistributions of source code must retain the above copyright
10 1.1 ragge * notice, this list of conditions and the following disclaimer.
11 1.1 ragge * 2. Redistributions in binary form must reproduce the above copyright
12 1.1 ragge * notice, this list of conditions and the following disclaimer in the
13 1.1 ragge * documentation and/or other materials provided with the distribution.
14 1.6 agc * 3. Neither the name of the University nor the names of its contributors
15 1.1 ragge * may be used to endorse or promote products derived from this software
16 1.1 ragge * without specific prior written permission.
17 1.1 ragge *
18 1.1 ragge * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 1.1 ragge * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 1.1 ragge * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 1.1 ragge * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 1.1 ragge * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 1.1 ragge * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 1.1 ragge * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 1.1 ragge * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 1.1 ragge * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 1.1 ragge * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 1.1 ragge * SUCH DAMAGE.
29 1.1 ragge *
30 1.1 ragge * @(#)atan2.s 8.1 (Berkeley) 6/4/93
31 1.1 ragge */
32 1.1 ragge
33 1.4 matt #include <machine/asm.h>
34 1.4 matt
35 1.1 ragge /*
36 1.1 ragge * ATAN2(Y,X)
37 1.1 ragge * RETURN ARG (X+iY)
38 1.1 ragge * VAX D FORMAT (56 BITS PRECISION)
39 1.3 simonb * CODED IN VAX ASSEMBLY LANGUAGE BY K.C. NG, 4/16/85;
40 1.3 simonb *
41 1.1 ragge *
42 1.1 ragge * Method :
43 1.1 ragge * 1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
44 1.3 simonb * 2. Reduce x to positive by (if x and y are unexceptional):
45 1.1 ragge * ARG (x+iy) = arctan(y/x) ... if x > 0,
46 1.1 ragge * ARG (x+iy) = pi - arctan[y/(-x)] ... if x < 0,
47 1.3 simonb * 3. According to the integer k=4t+0.25 truncated , t=y/x, the argument
48 1.3 simonb * is further reduced to one of the following intervals and the
49 1.1 ragge * arctangent of y/x is evaluated by the corresponding formula:
50 1.1 ragge *
51 1.1 ragge * [0,7/16] atan(y/x) = t - t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
52 1.1 ragge * [7/16,11/16] atan(y/x) = atan(1/2) + atan( (y-x/2)/(x+y/2) )
53 1.1 ragge * [11/16.19/16] atan(y/x) = atan( 1 ) + atan( (y-x)/(x+y) )
54 1.1 ragge * [19/16,39/16] atan(y/x) = atan(3/2) + atan( (y-1.5x)/(x+1.5y) )
55 1.1 ragge * [39/16,INF] atan(y/x) = atan(INF) + atan( -x/y )
56 1.1 ragge *
57 1.1 ragge * Special cases:
58 1.1 ragge * Notations: atan2(y,x) == ARG (x+iy) == ARG(x,y).
59 1.1 ragge *
60 1.1 ragge * ARG( NAN , (anything) ) is NaN;
61 1.1 ragge * ARG( (anything), NaN ) is NaN;
62 1.1 ragge * ARG(+(anything but NaN), +-0) is +-0 ;
63 1.1 ragge * ARG(-(anything but NaN), +-0) is +-PI ;
64 1.1 ragge * ARG( 0, +-(anything but 0 and NaN) ) is +-PI/2;
65 1.1 ragge * ARG( +INF,+-(anything but INF and NaN) ) is +-0 ;
66 1.1 ragge * ARG( -INF,+-(anything but INF and NaN) ) is +-PI;
67 1.1 ragge * ARG( +INF,+-INF ) is +-PI/4 ;
68 1.1 ragge * ARG( -INF,+-INF ) is +-3PI/4;
69 1.1 ragge * ARG( (anything but,0,NaN, and INF),+-INF ) is +-PI/2;
70 1.1 ragge *
71 1.1 ragge * Accuracy:
72 1.3 simonb * atan2(y,x) returns the exact ARG(x+iy) nearly rounded.
73 1.1 ragge */
74 1.1 ragge
75 1.4 matt ENTRY(atan2, 0x0fc0)
76 1.5 matt movq 4(%ap),%r2 # %r2 = y
77 1.5 matt movq 12(%ap),%r4 # %r4 = x
78 1.5 matt bicw3 $0x7f,%r2,%r0
79 1.5 matt bicw3 $0x7f,%r4,%r1
80 1.5 matt cmpw %r0,$0x8000 # y is the reserved operand
81 1.1 ragge jeql resop
82 1.5 matt cmpw %r1,$0x8000 # x is the reserved operand
83 1.1 ragge jeql resop
84 1.5 matt subl2 $8,%sp
85 1.5 matt bicw3 $0x7fff,%r2,-4(%fp) # copy y sign bit to -4(%fp)
86 1.5 matt bicw3 $0x7fff,%r4,-8(%fp) # copy x sign bit to -8(%fp)
87 1.5 matt cmpd %r4,$0x4080 # x = 1.0 ?
88 1.1 ragge bneq xnot1
89 1.5 matt movq %r2,%r0
90 1.5 matt bicw2 $0x8000,%r0 # t = |y|
91 1.5 matt movq %r0,%r2 # y = |y|
92 1.4 matt jbr begin
93 1.1 ragge xnot1:
94 1.5 matt bicw3 $0x807f,%r2,%r11 # yexp
95 1.1 ragge jeql yeq0 # if y=0 goto yeq0
96 1.5 matt bicw3 $0x807f,%r4,%r10 # xexp
97 1.1 ragge jeql pio2 # if x=0 goto pio2
98 1.5 matt subw2 %r10,%r11 # k = yexp - xexp
99 1.5 matt cmpw %r11,$0x2000 # k >= 64 (exp) ?
100 1.1 ragge jgeq pio2 # atan2 = +-pi/2
101 1.5 matt divd3 %r4,%r2,%r0 # t = y/x never overflow
102 1.5 matt bicw2 $0x8000,%r0 # t > 0
103 1.5 matt bicw2 $0xff80,%r2 # clear the exponent of y
104 1.5 matt bicw2 $0xff80,%r4 # clear the exponent of x
105 1.5 matt bisw2 $0x4080,%r2 # normalize y to [1,2)
106 1.5 matt bisw2 $0x4080,%r4 # normalize x to [1,2)
107 1.5 matt subw2 %r11,%r4 # scale x so that yexp-xexp=k
108 1.1 ragge begin:
109 1.5 matt cmpw %r0,$0x411c # t : 39/16
110 1.1 ragge jgeq L50
111 1.5 matt addl3 $0x180,%r0,%r10 # 8*t
112 1.5 matt cvtrfl %r10,%r10 # [8*t] rounded to int
113 1.5 matt ashl $-1,%r10,%r10 # [8*t]/2
114 1.5 matt casel %r10,$0,$4
115 1.3 simonb L1:
116 1.1 ragge .word L20-L1
117 1.1 ragge .word L20-L1
118 1.1 ragge .word L30-L1
119 1.1 ragge .word L40-L1
120 1.1 ragge .word L40-L1
121 1.3 simonb L10:
122 1.5 matt movq $0xb4d9940f985e407b,%r6 # Hi=.98279372324732906796d0
123 1.5 matt movq $0x21b1879a3bc2a2fc,%r8 # Lo=-.17092002525602665777d-17
124 1.5 matt subd3 %r4,%r2,%r0 # y-x
125 1.5 matt addw2 $0x80,%r0 # 2(y-x)
126 1.5 matt subd2 %r4,%r0 # 2(y-x)-x
127 1.5 matt addw2 $0x80,%r4 # 2x
128 1.5 matt movq %r2,%r10
129 1.5 matt addw2 $0x80,%r10 # 2y
130 1.5 matt addd2 %r10,%r2 # 3y
131 1.5 matt addd2 %r4,%r2 # 3y+2x
132 1.5 matt divd2 %r2,%r0 # (2y-3x)/(2x+3y)
133 1.4 matt jbr L60
134 1.3 simonb L20:
135 1.5 matt cmpw %r0,$0x3280 # t : 2**(-28)
136 1.1 ragge jlss L80
137 1.5 matt clrq %r6 # Hi=%r6=0, Lo=%r8=0
138 1.5 matt clrq %r8
139 1.4 matt jbr L60
140 1.3 simonb L30:
141 1.5 matt movq $0xda7b2b0d63383fed,%r6 # Hi=.46364760900080611433d0
142 1.5 matt movq $0xf0ea17b2bf912295,%r8 # Lo=.10147340032515978826d-17
143 1.5 matt movq %r2,%r0
144 1.5 matt addw2 $0x80,%r0 # 2y
145 1.5 matt subd2 %r4,%r0 # 2y-x
146 1.5 matt addw2 $0x80,%r4 # 2x
147 1.5 matt addd2 %r2,%r4 # 2x+y
148 1.5 matt divd2 %r4,%r0 # (2y-x)/(2x+y)
149 1.4 matt jbr L60
150 1.3 simonb L50:
151 1.5 matt movq $0x68c2a2210fda40c9,%r6 # Hi=1.5707963267948966135d1
152 1.5 matt movq $0x06e0145c26332326,%r8 # Lo=.22517417741562176079d-17
153 1.5 matt cmpw %r0,$0x5100 # y : 2**57
154 1.1 ragge bgeq L90
155 1.5 matt divd3 %r2,%r4,%r0
156 1.5 matt bisw2 $0x8000,%r0 # -x/y
157 1.4 matt jbr L60
158 1.3 simonb L40:
159 1.5 matt movq $0x68c2a2210fda4049,%r6 # Hi=.78539816339744830676d0
160 1.5 matt movq $0x06e0145c263322a6,%r8 # Lo=.11258708870781088040d-17
161 1.5 matt subd3 %r4,%r2,%r0 # y-x
162 1.5 matt addd2 %r4,%r2 # y+x
163 1.5 matt divd2 %r2,%r0 # (y-x)/(y+x)
164 1.3 simonb L60:
165 1.5 matt movq %r0,%r10
166 1.5 matt muld2 %r0,%r0
167 1.5 matt polyd %r0,$12,ptable
168 1.5 matt muld2 %r10,%r0
169 1.5 matt subd2 %r0,%r8
170 1.5 matt addd3 %r8,%r10,%r0
171 1.5 matt addd2 %r6,%r0
172 1.3 simonb L80:
173 1.5 matt movw -8(%fp),%r2
174 1.1 ragge bneq pim
175 1.5 matt bisw2 -4(%fp),%r0 # return sign(y)*%r0
176 1.1 ragge ret
177 1.3 simonb L90: # x >= 2**25
178 1.5 matt movq %r6,%r0
179 1.4 matt jbr L80
180 1.1 ragge pim:
181 1.5 matt subd3 %r0,$0x68c2a2210fda4149,%r0 # pi-t
182 1.5 matt bisw2 -4(%fp),%r0
183 1.1 ragge ret
184 1.1 ragge yeq0:
185 1.5 matt movw -8(%fp),%r2
186 1.1 ragge beql zero # if sign(x)=1 return pi
187 1.5 matt movq $0x68c2a2210fda4149,%r0 # pi=3.1415926535897932270d1
188 1.1 ragge ret
189 1.1 ragge zero:
190 1.5 matt clrq %r0 # return 0
191 1.1 ragge ret
192 1.1 ragge pio2:
193 1.5 matt movq $0x68c2a2210fda40c9,%r0 # pi/2=1.5707963267948966135d1
194 1.5 matt bisw2 -4(%fp),%r0 # return sign(y)*pi/2
195 1.1 ragge ret
196 1.1 ragge resop:
197 1.5 matt movq $0x8000,%r0 # propagate the reserved operand
198 1.1 ragge ret
199 1.4 matt
200 1.4 matt _ALIGN_TEXT
201 1.1 ragge ptable:
202 1.1 ragge .quad 0xb50f5ce96e7abd60
203 1.1 ragge .quad 0x51e44a42c1073e02
204 1.1 ragge .quad 0x3487e3289643be35
205 1.1 ragge .quad 0xdb62066dffba3e54
206 1.1 ragge .quad 0xcf8e2d5199abbe70
207 1.1 ragge .quad 0x26f39cb884883e88
208 1.1 ragge .quad 0x135117d18998be9d
209 1.1 ragge .quad 0x602ce9742e883eba
210 1.1 ragge .quad 0xa35ad0be8e38bee3
211 1.1 ragge .quad 0xffac922249243f12
212 1.1 ragge .quad 0x7f14ccccccccbf4c
213 1.1 ragge .quad 0xaa8faaaaaaaa3faa
214 1.1 ragge .quad 0x0000000000000000
215