Home | History | Annotate | Line # | Download | only in vax
n_support.S revision 1.7
      1  1.7  martin /*	$NetBSD: n_support.S,v 1.7 2014/02/03 21:22:21 martin Exp $	*/
      2  1.1   ragge /*
      3  1.1   ragge  * Copyright (c) 1985, 1993
      4  1.1   ragge  *	The Regents of the University of California.  All rights reserved.
      5  1.1   ragge  *
      6  1.1   ragge  * Redistribution and use in source and binary forms, with or without
      7  1.1   ragge  * modification, are permitted provided that the following conditions
      8  1.1   ragge  * are met:
      9  1.1   ragge  * 1. Redistributions of source code must retain the above copyright
     10  1.1   ragge  *    notice, this list of conditions and the following disclaimer.
     11  1.1   ragge  * 2. Redistributions in binary form must reproduce the above copyright
     12  1.1   ragge  *    notice, this list of conditions and the following disclaimer in the
     13  1.1   ragge  *    documentation and/or other materials provided with the distribution.
     14  1.6     agc  * 3. Neither the name of the University nor the names of its contributors
     15  1.1   ragge  *    may be used to endorse or promote products derived from this software
     16  1.1   ragge  *    without specific prior written permission.
     17  1.1   ragge  *
     18  1.1   ragge  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     19  1.1   ragge  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20  1.1   ragge  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21  1.1   ragge  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     22  1.1   ragge  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23  1.1   ragge  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     24  1.1   ragge  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  1.1   ragge  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  1.1   ragge  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  1.1   ragge  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  1.1   ragge  * SUCH DAMAGE.
     29  1.1   ragge  *
     30  1.1   ragge  *	@(#)support.s	8.1 (Berkeley) 6/4/93
     31  1.1   ragge  */
     32  1.3    matt #include <machine/asm.h>
     33  1.3    matt 
     34  1.7  martin WEAK_ALIAS(logbl,logb)
     35  1.7  martin 
     36  1.3    matt 	.text
     37  1.1   ragge _sccsid:
     38  1.3    matt 	.asciz "@(#)support.s\t1.3 (Berkeley) 8/21/85; 8.1 (ucb.elefunt) 6/4/93"
     39  1.1   ragge 
     40  1.1   ragge /*
     41  1.1   ragge  * copysign(x,y),
     42  1.1   ragge  * logb(x),
     43  1.1   ragge  * scalb(x,N),
     44  1.1   ragge  * finite(x),
     45  1.1   ragge  * drem(x,y),
     46  1.1   ragge  * Coded in vax assembly language by K.C. Ng,  3/14/85.
     47  1.1   ragge  * Revised by K.C. Ng on 4/9/85.
     48  1.1   ragge  */
     49  1.1   ragge 
     50  1.1   ragge /*
     51  1.3    matt  * double copysign(double x,double y)
     52  1.1   ragge  */
     53  1.3    matt 
     54  1.3    matt ENTRY(copysign, 0)
     55  1.4    matt 	movq	4(%ap),%r0		# load x into %r0
     56  1.4    matt 	bicw3	$0x807f,%r0,%r2		# mask off the exponent of x
     57  1.1   ragge 	beql	Lz			# if zero or reserved op then return x
     58  1.4    matt 	bicw3	$0x7fff,12(%ap),%r2	# copy the sign bit of y into %r2
     59  1.4    matt 	bicw2	$0x8000,%r0		# replace x by |x|
     60  1.4    matt 	bisw2	%r2,%r0			# copy the sign bit of y to x
     61  1.1   ragge Lz:	ret
     62  1.1   ragge 
     63  1.1   ragge /*
     64  1.7  martin  * float logbf(float x);
     65  1.7  martin  */
     66  1.7  martin ENTRY(logbf, 0)
     67  1.7  martin 	cvtfd   4(%ap),-(%sp)
     68  1.7  martin 	calls   $2,_C_LABEL(logb)
     69  1.7  martin 	cvtdf   %r0,%r0
     70  1.7  martin 	ret
     71  1.7  martin 
     72  1.7  martin /*
     73  1.3    matt  * double logb(double x);
     74  1.1   ragge  */
     75  1.3    matt ENTRY(logb, 0)
     76  1.4    matt 	bicl3	$0xffff807f,4(%ap),%r0	# mask off the exponent of x
     77  1.1   ragge 	beql    Ln
     78  1.4    matt 	ashl	$-7,%r0,%r0		# get the bias exponent
     79  1.4    matt 	subl2	$129,%r0			# get the unbias exponent
     80  1.4    matt 	cvtld	%r0,%r0			# return the answer in double
     81  1.1   ragge 	ret
     82  1.4    matt Ln:	movq	4(%ap),%r0		# %r0:1 = x (zero or reserved op)
     83  1.1   ragge 	bneq	1f			# simply return if reserved op
     84  1.4    matt 	movq 	$0x0000fe00ffffcfff,%r0  # -2147483647.0
     85  1.1   ragge 1:	ret
     86  1.1   ragge 
     87  1.1   ragge /*
     88  1.3    matt  * long finite(double x);
     89  1.1   ragge  */
     90  1.5    matt #ifndef __GFLOAT__
     91  1.5    matt 	.globl finitef
     92  1.5    matt finitef = finite
     93  1.5    matt #endif
     94  1.3    matt ENTRY(finite, 0)
     95  1.5    matt 	bicw3	$0x7f,4(%ap),%r0	# mask off the mantissa
     96  1.4    matt 	cmpw	%r0,$0x8000		# to see if x is the reserved op
     97  1.1   ragge 	beql	1f			# if so, return FALSE (0)
     98  1.4    matt 	movl	$1,%r0			# else return TRUE (1)
     99  1.1   ragge 	ret
    100  1.4    matt 1:	clrl	%r0
    101  1.1   ragge 	ret
    102  1.1   ragge 
    103  1.3    matt /* int isnan(double x);
    104  1.3    matt  */
    105  1.3    matt #if 0
    106  1.3    matt ENTRY(isnan, 0)
    107  1.4    matt 	clrl	%r0
    108  1.3    matt 	ret
    109  1.3    matt #endif
    110  1.3    matt 
    111  1.3    matt /* int isnanf(float x);
    112  1.3    matt  */
    113  1.3    matt ENTRY(isnanf, 0)
    114  1.4    matt 	clrl	%r0
    115  1.3    matt 	ret
    116  1.3    matt 
    117  1.1   ragge /*
    118  1.1   ragge  * double scalb(x,N)
    119  1.1   ragge  * double x; double N;
    120  1.1   ragge  */
    121  1.1   ragge 	.set	ERANGE,34
    122  1.3    matt 
    123  1.3    matt ENTRY(scalb, 0)
    124  1.4    matt 	movq	4(%ap),%r0
    125  1.4    matt 	bicl3	$0xffff807f,%r0,%r3
    126  1.1   ragge 	beql	ret1			# 0 or reserved operand
    127  1.4    matt 	movq	12(%ap),%r4
    128  1.4    matt 	cvtdl	%r4, %r2
    129  1.4    matt 	cmpl	%r2,$0x12c
    130  1.1   ragge 	bgeq	ovfl
    131  1.4    matt 	cmpl	%r2,$-0x12c
    132  1.1   ragge 	bleq	unfl
    133  1.4    matt 	ashl	$7,%r2,%r2
    134  1.4    matt 	addl2	%r2,%r3
    135  1.1   ragge 	bleq	unfl
    136  1.4    matt 	cmpl	%r3,$0x8000
    137  1.1   ragge 	bgeq	ovfl
    138  1.4    matt 	addl2	%r2,%r0
    139  1.1   ragge 	ret
    140  1.1   ragge ovfl:	pushl	$ERANGE
    141  1.3    matt 	calls	$1,_C_LABEL(infnan)	# if it returns
    142  1.4    matt 	bicw3	$0x7fff,4(%ap),%r2	# get the sign of input arg
    143  1.4    matt 	bisw2	%r2,%r0			# re-attach the sign to %r0/1
    144  1.1   ragge 	ret
    145  1.4    matt unfl:	movq	$0,%r0
    146  1.1   ragge ret1:	ret
    147  1.1   ragge 
    148  1.1   ragge /*
    149  1.1   ragge  * DREM(X,Y)
    150  1.1   ragge  * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
    151  1.1   ragge  * DOUBLE PRECISION (VAX D format 56 bits)
    152  1.1   ragge  * CODED IN VAX ASSEMBLY LANGUAGE BY K.C. NG, 4/8/85.
    153  1.1   ragge  */
    154  1.1   ragge 	.set	EDOM,33
    155  1.3    matt 
    156  1.3    matt ENTRY(drem, 0x0fc0)
    157  1.4    matt 	subl2	$12,%sp
    158  1.4    matt 	movq	4(%ap),%r0		#%r0=x
    159  1.4    matt 	movq	12(%ap),%r2		#%r2=y
    160  1.1   ragge 	jeql	Rop			#if y=0 then generate reserved op fault
    161  1.4    matt 	bicw3	$0x007f,%r0,%r4		#check if x is Rop
    162  1.4    matt 	cmpw	%r4,$0x8000
    163  1.1   ragge 	jeql	Ret			#if x is Rop then return Rop
    164  1.4    matt 	bicl3	$0x007f,%r2,%r4		#check if y is Rop
    165  1.4    matt 	cmpw	%r4,$0x8000
    166  1.1   ragge 	jeql	Ret			#if y is Rop then return Rop
    167  1.4    matt 	bicw2	$0x8000,%r2		#y  := |y|
    168  1.4    matt 	movw	$0,-4(%fp)		#-4(%fp) = nx := 0
    169  1.4    matt 	cmpw	%r2,$0x1c80		#yexp ? 57
    170  1.1   ragge 	bgtr	C1			#if yexp > 57 goto C1
    171  1.4    matt 	addw2	$0x1c80,%r2		#scale up y by 2**57
    172  1.4    matt 	movw	$0x1c80,-4(%fp)		#nx := 57 (exponent field)
    173  1.1   ragge C1:
    174  1.4    matt 	movw	-4(%fp),-8(%fp)		#-8(%fp) = nf := nx
    175  1.4    matt 	bicw3	$0x7fff,%r0,-12(%fp)	#-12(%fp) = sign of x
    176  1.4    matt 	bicw2	$0x8000,%r0		#x  := |x|
    177  1.4    matt 	movq	%r2,%r10			#y1 := y
    178  1.4    matt 	bicl2	$0xffff07ff,%r11		#clear the last 27 bits of y1
    179  1.1   ragge loop:
    180  1.4    matt 	cmpd	%r0,%r2			#x ? y
    181  1.1   ragge 	bleq	E1			#if x <= y goto E1
    182  1.1   ragge  /* begin argument reduction */
    183  1.4    matt 	movq	%r2,%r4			#t =y
    184  1.4    matt 	movq	%r10,%r6			#t1=y1
    185  1.4    matt 	bicw3	$0x807f,%r0,%r8		#xexp= exponent of x
    186  1.4    matt 	bicw3	$0x807f,%r2,%r9		#yexp= exponent fo y
    187  1.4    matt 	subw2	%r9,%r8			#xexp-yexp
    188  1.4    matt 	subw2	$0x0c80,%r8		#k=xexp-yexp-25(exponent bit field)
    189  1.1   ragge 	blss	C2			#if k<0 goto C2
    190  1.4    matt 	addw2	%r8,%r4			#t +=k
    191  1.4    matt 	addw2	%r8,%r6			#t1+=k, scale up t and t1
    192  1.1   ragge C2:
    193  1.4    matt 	divd3	%r4,%r0,%r8		#x/t
    194  1.4    matt 	cvtdl	%r8,%r8			#n=[x/t] truncated
    195  1.4    matt 	cvtld	%r8,%r8			#float(n)
    196  1.4    matt 	subd2	%r6,%r4			#t:=t-t1
    197  1.4    matt 	muld2	%r8,%r4			#n*(t-t1)
    198  1.4    matt 	muld2	%r8,%r6			#n*t1
    199  1.4    matt 	subd2	%r6,%r0			#x-n*t1
    200  1.4    matt 	subd2	%r4,%r0			#(x-n*t1)-n*(t-t1)
    201  1.3    matt 	jbr	loop
    202  1.1   ragge E1:
    203  1.4    matt 	movw	-4(%fp),%r6		#%r6=nx
    204  1.1   ragge 	beql	C3			#if nx=0 goto C3
    205  1.4    matt 	addw2	%r6,%r0			#x:=x*2**57 scale up x by nx
    206  1.4    matt 	movw	$0,-4(%fp)		#clear nx
    207  1.3    matt 	jbr	loop
    208  1.1   ragge C3:
    209  1.4    matt 	movq	%r2,%r4			#%r4 = y
    210  1.4    matt 	subw2	$0x80,%r4		#%r4 = y/2
    211  1.4    matt 	cmpd	%r0,%r4			#x:y/2
    212  1.1   ragge 	blss	E2			#if x < y/2 goto E2
    213  1.1   ragge 	bgtr	C4			#if x > y/2 goto C4
    214  1.4    matt 	cvtdl	%r8,%r8			#ifix(float(n))
    215  1.4    matt 	blbc	%r8,E2			#if the last bit is zero, goto E2
    216  1.1   ragge C4:
    217  1.4    matt 	subd2	%r2,%r0			#x-y
    218  1.1   ragge E2:
    219  1.4    matt 	xorw2	-12(%fp),%r0		#x^sign (exclusive or)
    220  1.4    matt 	movw	-8(%fp),%r6		#%r6=nf
    221  1.4    matt 	bicw3	$0x807f,%r0,%r8		#%r8=exponent of x
    222  1.4    matt 	bicw2	$0x7f80,%r0		#clear the exponent of x
    223  1.4    matt 	subw2	%r6,%r8			#%r8=xexp-nf
    224  1.1   ragge 	bgtr	C5			#if xexp-nf is positive goto C5
    225  1.4    matt 	movw	$0,%r8			#clear %r8
    226  1.4    matt 	movq	$0,%r0			#x underflow to zero
    227  1.1   ragge C5:
    228  1.4    matt 	bisw2	%r8,%r0			/* put %r8 into x's exponent field */
    229  1.1   ragge 	ret
    230  1.1   ragge Rop:					#Reserved operand
    231  1.1   ragge 	pushl	$EDOM
    232  1.3    matt 	calls	$1,_C_LABEL(infnan)	#generate reserved op fault
    233  1.1   ragge 	ret
    234  1.1   ragge Ret:
    235  1.4    matt 	movq	$0x8000,%r0		#propagate reserved op
    236  1.1   ragge 	ret
    237