Home | History | Annotate | Line # | Download | only in vax
n_support.S revision 1.12
      1  1.12  riastrad /*	$NetBSD: n_support.S,v 1.12 2024/05/08 01:04:24 riastradh Exp $	*/
      2   1.1     ragge /*
      3   1.1     ragge  * Copyright (c) 1985, 1993
      4   1.1     ragge  *	The Regents of the University of California.  All rights reserved.
      5   1.1     ragge  *
      6   1.1     ragge  * Redistribution and use in source and binary forms, with or without
      7   1.1     ragge  * modification, are permitted provided that the following conditions
      8   1.1     ragge  * are met:
      9   1.1     ragge  * 1. Redistributions of source code must retain the above copyright
     10   1.1     ragge  *    notice, this list of conditions and the following disclaimer.
     11   1.1     ragge  * 2. Redistributions in binary form must reproduce the above copyright
     12   1.1     ragge  *    notice, this list of conditions and the following disclaimer in the
     13   1.1     ragge  *    documentation and/or other materials provided with the distribution.
     14   1.6       agc  * 3. Neither the name of the University nor the names of its contributors
     15   1.1     ragge  *    may be used to endorse or promote products derived from this software
     16   1.1     ragge  *    without specific prior written permission.
     17   1.1     ragge  *
     18   1.1     ragge  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     19   1.1     ragge  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     20   1.1     ragge  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     21   1.1     ragge  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     22   1.1     ragge  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     23   1.1     ragge  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     24   1.1     ragge  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25   1.1     ragge  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26   1.1     ragge  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27   1.1     ragge  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28   1.1     ragge  * SUCH DAMAGE.
     29   1.1     ragge  *
     30   1.1     ragge  *	@(#)support.s	8.1 (Berkeley) 6/4/93
     31   1.1     ragge  */
     32   1.3      matt #include <machine/asm.h>
     33   1.3      matt 
     34   1.7    martin WEAK_ALIAS(logbl,logb)
     35  1.10     joerg WEAK_ALIAS(copysignl, _copysignl)
     36  1.10     joerg WEAK_ALIAS(_copysignl, copysign)
     37   1.7    martin 
     38   1.3      matt 	.text
     39   1.1     ragge _sccsid:
     40   1.3      matt 	.asciz "@(#)support.s\t1.3 (Berkeley) 8/21/85; 8.1 (ucb.elefunt) 6/4/93"
     41   1.1     ragge 
     42   1.1     ragge /*
     43   1.1     ragge  * copysign(x,y),
     44   1.1     ragge  * logb(x),
     45   1.1     ragge  * scalb(x,N),
     46   1.1     ragge  * finite(x),
     47   1.1     ragge  * drem(x,y),
     48   1.1     ragge  * Coded in vax assembly language by K.C. Ng,  3/14/85.
     49   1.1     ragge  * Revised by K.C. Ng on 4/9/85.
     50   1.1     ragge  */
     51   1.1     ragge 
     52   1.1     ragge /*
     53   1.3      matt  * double copysign(double x,double y)
     54   1.1     ragge  */
     55   1.3      matt 
     56   1.3      matt ENTRY(copysign, 0)
     57   1.4      matt 	movq	4(%ap),%r0		# load x into %r0
     58   1.4      matt 	bicw3	$0x807f,%r0,%r2		# mask off the exponent of x
     59   1.1     ragge 	beql	Lz			# if zero or reserved op then return x
     60   1.4      matt 	bicw3	$0x7fff,12(%ap),%r2	# copy the sign bit of y into %r2
     61   1.4      matt 	bicw2	$0x8000,%r0		# replace x by |x|
     62   1.4      matt 	bisw2	%r2,%r0			# copy the sign bit of y to x
     63   1.1     ragge Lz:	ret
     64  1.11  riastrad END(copysign)
     65   1.1     ragge 
     66   1.8    martin ENTRY(copysignf, 0)
     67   1.9    martin 	movl	4(%ap),%r0		# load x into %r0
     68   1.8    martin 	bicw3	$0x807f,%r0,%r2		# mask off the exponent of x
     69   1.8    martin 	beql	1f			# if zero or reserved op then return x
     70   1.8    martin 	bicw3	$0x7fff,8(%ap),%r2	# copy the sign bit of y into %r2
     71   1.8    martin 	bicw2	$0x8000,%r0		# replace x by |x|
     72   1.8    martin 	bisw2	%r2,%r0			# copy the sign bit of y to x
     73   1.8    martin 1:	ret
     74  1.11  riastrad END(copysignf)
     75   1.8    martin 
     76   1.1     ragge /*
     77   1.7    martin  * float logbf(float x);
     78   1.7    martin  */
     79   1.7    martin ENTRY(logbf, 0)
     80   1.7    martin 	cvtfd   4(%ap),-(%sp)
     81   1.7    martin 	calls   $2,_C_LABEL(logb)
     82   1.7    martin 	cvtdf   %r0,%r0
     83   1.7    martin 	ret
     84  1.11  riastrad END(logbf)
     85   1.7    martin 
     86   1.7    martin /*
     87   1.3      matt  * double logb(double x);
     88   1.1     ragge  */
     89   1.3      matt ENTRY(logb, 0)
     90   1.4      matt 	bicl3	$0xffff807f,4(%ap),%r0	# mask off the exponent of x
     91   1.1     ragge 	beql    Ln
     92   1.4      matt 	ashl	$-7,%r0,%r0		# get the bias exponent
     93   1.4      matt 	subl2	$129,%r0			# get the unbias exponent
     94   1.4      matt 	cvtld	%r0,%r0			# return the answer in double
     95   1.1     ragge 	ret
     96   1.4      matt Ln:	movq	4(%ap),%r0		# %r0:1 = x (zero or reserved op)
     97   1.1     ragge 	bneq	1f			# simply return if reserved op
     98   1.4      matt 	movq 	$0x0000fe00ffffcfff,%r0  # -2147483647.0
     99   1.1     ragge 1:	ret
    100  1.11  riastrad END(logb)
    101   1.1     ragge 
    102   1.1     ragge /*
    103   1.3      matt  * long finite(double x);
    104   1.1     ragge  */
    105   1.5      matt #ifndef __GFLOAT__
    106  1.12  riastrad WEAK_ALIAS(finitef, _finitef)
    107  1.12  riastrad STRONG_ALIAS(_finitef, _finite)
    108   1.5      matt #endif
    109  1.12  riastrad WEAK_ALIAS(finite, _finite)
    110  1.12  riastrad ENTRY(_finite, 0)
    111   1.5      matt 	bicw3	$0x7f,4(%ap),%r0	# mask off the mantissa
    112   1.4      matt 	cmpw	%r0,$0x8000		# to see if x is the reserved op
    113   1.1     ragge 	beql	1f			# if so, return FALSE (0)
    114   1.4      matt 	movl	$1,%r0			# else return TRUE (1)
    115   1.1     ragge 	ret
    116   1.4      matt 1:	clrl	%r0
    117   1.1     ragge 	ret
    118  1.12  riastrad END(_finite)
    119   1.1     ragge 
    120   1.3      matt /* int isnan(double x);
    121   1.3      matt  */
    122   1.3      matt #if 0
    123   1.3      matt ENTRY(isnan, 0)
    124   1.4      matt 	clrl	%r0
    125   1.3      matt 	ret
    126   1.3      matt #endif
    127   1.3      matt 
    128   1.3      matt /* int isnanf(float x);
    129   1.3      matt  */
    130   1.3      matt ENTRY(isnanf, 0)
    131   1.4      matt 	clrl	%r0
    132   1.3      matt 	ret
    133  1.11  riastrad END(isnanf)
    134   1.3      matt 
    135   1.1     ragge /*
    136   1.1     ragge  * double scalb(x,N)
    137   1.1     ragge  * double x; double N;
    138   1.1     ragge  */
    139   1.1     ragge 	.set	ERANGE,34
    140   1.3      matt 
    141   1.3      matt ENTRY(scalb, 0)
    142   1.4      matt 	movq	4(%ap),%r0
    143   1.4      matt 	bicl3	$0xffff807f,%r0,%r3
    144   1.1     ragge 	beql	ret1			# 0 or reserved operand
    145   1.4      matt 	movq	12(%ap),%r4
    146   1.4      matt 	cvtdl	%r4, %r2
    147   1.4      matt 	cmpl	%r2,$0x12c
    148   1.1     ragge 	bgeq	ovfl
    149   1.4      matt 	cmpl	%r2,$-0x12c
    150   1.1     ragge 	bleq	unfl
    151   1.4      matt 	ashl	$7,%r2,%r2
    152   1.4      matt 	addl2	%r2,%r3
    153   1.1     ragge 	bleq	unfl
    154   1.4      matt 	cmpl	%r3,$0x8000
    155   1.1     ragge 	bgeq	ovfl
    156   1.4      matt 	addl2	%r2,%r0
    157   1.1     ragge 	ret
    158   1.1     ragge ovfl:	pushl	$ERANGE
    159   1.3      matt 	calls	$1,_C_LABEL(infnan)	# if it returns
    160   1.4      matt 	bicw3	$0x7fff,4(%ap),%r2	# get the sign of input arg
    161   1.4      matt 	bisw2	%r2,%r0			# re-attach the sign to %r0/1
    162   1.1     ragge 	ret
    163   1.4      matt unfl:	movq	$0,%r0
    164   1.1     ragge ret1:	ret
    165  1.11  riastrad END(scalb)
    166   1.1     ragge 
    167   1.1     ragge /*
    168   1.1     ragge  * DREM(X,Y)
    169   1.1     ragge  * RETURN X REM Y =X-N*Y, N=[X/Y] ROUNDED (ROUNDED TO EVEN IN THE HALF WAY CASE)
    170   1.1     ragge  * DOUBLE PRECISION (VAX D format 56 bits)
    171   1.1     ragge  * CODED IN VAX ASSEMBLY LANGUAGE BY K.C. NG, 4/8/85.
    172   1.1     ragge  */
    173   1.1     ragge 	.set	EDOM,33
    174   1.3      matt 
    175   1.3      matt ENTRY(drem, 0x0fc0)
    176   1.4      matt 	subl2	$12,%sp
    177   1.4      matt 	movq	4(%ap),%r0		#%r0=x
    178   1.4      matt 	movq	12(%ap),%r2		#%r2=y
    179   1.1     ragge 	jeql	Rop			#if y=0 then generate reserved op fault
    180   1.4      matt 	bicw3	$0x007f,%r0,%r4		#check if x is Rop
    181   1.4      matt 	cmpw	%r4,$0x8000
    182   1.1     ragge 	jeql	Ret			#if x is Rop then return Rop
    183   1.4      matt 	bicl3	$0x007f,%r2,%r4		#check if y is Rop
    184   1.4      matt 	cmpw	%r4,$0x8000
    185   1.1     ragge 	jeql	Ret			#if y is Rop then return Rop
    186   1.4      matt 	bicw2	$0x8000,%r2		#y  := |y|
    187   1.4      matt 	movw	$0,-4(%fp)		#-4(%fp) = nx := 0
    188   1.4      matt 	cmpw	%r2,$0x1c80		#yexp ? 57
    189   1.1     ragge 	bgtr	C1			#if yexp > 57 goto C1
    190   1.4      matt 	addw2	$0x1c80,%r2		#scale up y by 2**57
    191   1.4      matt 	movw	$0x1c80,-4(%fp)		#nx := 57 (exponent field)
    192   1.1     ragge C1:
    193   1.4      matt 	movw	-4(%fp),-8(%fp)		#-8(%fp) = nf := nx
    194   1.4      matt 	bicw3	$0x7fff,%r0,-12(%fp)	#-12(%fp) = sign of x
    195   1.4      matt 	bicw2	$0x8000,%r0		#x  := |x|
    196   1.4      matt 	movq	%r2,%r10			#y1 := y
    197   1.4      matt 	bicl2	$0xffff07ff,%r11		#clear the last 27 bits of y1
    198   1.1     ragge loop:
    199   1.4      matt 	cmpd	%r0,%r2			#x ? y
    200   1.1     ragge 	bleq	E1			#if x <= y goto E1
    201   1.1     ragge  /* begin argument reduction */
    202   1.4      matt 	movq	%r2,%r4			#t =y
    203   1.4      matt 	movq	%r10,%r6			#t1=y1
    204   1.4      matt 	bicw3	$0x807f,%r0,%r8		#xexp= exponent of x
    205   1.4      matt 	bicw3	$0x807f,%r2,%r9		#yexp= exponent fo y
    206   1.4      matt 	subw2	%r9,%r8			#xexp-yexp
    207   1.4      matt 	subw2	$0x0c80,%r8		#k=xexp-yexp-25(exponent bit field)
    208   1.1     ragge 	blss	C2			#if k<0 goto C2
    209   1.4      matt 	addw2	%r8,%r4			#t +=k
    210   1.4      matt 	addw2	%r8,%r6			#t1+=k, scale up t and t1
    211   1.1     ragge C2:
    212   1.4      matt 	divd3	%r4,%r0,%r8		#x/t
    213   1.4      matt 	cvtdl	%r8,%r8			#n=[x/t] truncated
    214   1.4      matt 	cvtld	%r8,%r8			#float(n)
    215   1.4      matt 	subd2	%r6,%r4			#t:=t-t1
    216   1.4      matt 	muld2	%r8,%r4			#n*(t-t1)
    217   1.4      matt 	muld2	%r8,%r6			#n*t1
    218   1.4      matt 	subd2	%r6,%r0			#x-n*t1
    219   1.4      matt 	subd2	%r4,%r0			#(x-n*t1)-n*(t-t1)
    220   1.3      matt 	jbr	loop
    221   1.1     ragge E1:
    222   1.4      matt 	movw	-4(%fp),%r6		#%r6=nx
    223   1.1     ragge 	beql	C3			#if nx=0 goto C3
    224   1.4      matt 	addw2	%r6,%r0			#x:=x*2**57 scale up x by nx
    225   1.4      matt 	movw	$0,-4(%fp)		#clear nx
    226   1.3      matt 	jbr	loop
    227   1.1     ragge C3:
    228   1.4      matt 	movq	%r2,%r4			#%r4 = y
    229   1.4      matt 	subw2	$0x80,%r4		#%r4 = y/2
    230   1.4      matt 	cmpd	%r0,%r4			#x:y/2
    231   1.1     ragge 	blss	E2			#if x < y/2 goto E2
    232   1.1     ragge 	bgtr	C4			#if x > y/2 goto C4
    233   1.4      matt 	cvtdl	%r8,%r8			#ifix(float(n))
    234   1.4      matt 	blbc	%r8,E2			#if the last bit is zero, goto E2
    235   1.1     ragge C4:
    236   1.4      matt 	subd2	%r2,%r0			#x-y
    237   1.1     ragge E2:
    238   1.4      matt 	xorw2	-12(%fp),%r0		#x^sign (exclusive or)
    239   1.4      matt 	movw	-8(%fp),%r6		#%r6=nf
    240   1.4      matt 	bicw3	$0x807f,%r0,%r8		#%r8=exponent of x
    241   1.4      matt 	bicw2	$0x7f80,%r0		#clear the exponent of x
    242   1.4      matt 	subw2	%r6,%r8			#%r8=xexp-nf
    243   1.1     ragge 	bgtr	C5			#if xexp-nf is positive goto C5
    244   1.4      matt 	movw	$0,%r8			#clear %r8
    245   1.4      matt 	movq	$0,%r0			#x underflow to zero
    246   1.1     ragge C5:
    247   1.4      matt 	bisw2	%r8,%r0			/* put %r8 into x's exponent field */
    248   1.1     ragge 	ret
    249   1.1     ragge Rop:					#Reserved operand
    250   1.1     ragge 	pushl	$EDOM
    251   1.3      matt 	calls	$1,_C_LABEL(infnan)	#generate reserved op fault
    252   1.1     ragge 	ret
    253   1.1     ragge Ret:
    254   1.4      matt 	movq	$0x8000,%r0		#propagate reserved op
    255   1.1     ragge 	ret
    256  1.11  riastrad END(drem)
    257