Home | History | Annotate | Line # | Download | only in m68k
      1   1.1       mrg /* libgcc routines for 68000 w/o floating-point hardware.
      2  1.11       mrg    Copyright (C) 1994-2024 Free Software Foundation, Inc.
      3   1.1       mrg 
      4   1.1       mrg This file is part of GCC.
      5   1.1       mrg 
      6   1.1       mrg GCC is free software; you can redistribute it and/or modify it
      7   1.1       mrg under the terms of the GNU General Public License as published by the
      8   1.1       mrg Free Software Foundation; either version 3, or (at your option) any
      9   1.1       mrg later version.
     10   1.1       mrg 
     11   1.1       mrg This file is distributed in the hope that it will be useful, but
     12   1.1       mrg WITHOUT ANY WARRANTY; without even the implied warranty of
     13   1.1       mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14   1.1       mrg General Public License for more details.
     15   1.1       mrg 
     16   1.1       mrg Under Section 7 of GPL version 3, you are granted additional
     17   1.1       mrg permissions described in the GCC Runtime Library Exception, version
     18   1.1       mrg 3.1, as published by the Free Software Foundation.
     19   1.1       mrg 
     20   1.1       mrg You should have received a copy of the GNU General Public License and
     21   1.1       mrg a copy of the GCC Runtime Library Exception along with this program;
     22   1.1       mrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23   1.1       mrg <http://www.gnu.org/licenses/>.  */
     24   1.1       mrg 
     25   1.1       mrg /* Use this one for any 680x0; assumes no floating point hardware.
     26   1.1       mrg    The trailing " '" appearing on some lines is for ANSI preprocessors.  Yuk.
     27   1.1       mrg    Some of this code comes from MINIX, via the folks at ericsson.
     28   1.1       mrg    D. V. Henkel-Wallace (gumby (at) cygnus.com) Fete Bastille, 1992
     29   1.1       mrg */
     30   1.1       mrg 
     31   1.1       mrg /* These are predefined by new versions of GNU cpp.  */
     32   1.1       mrg 
     33   1.1       mrg #ifndef __USER_LABEL_PREFIX__
     34   1.1       mrg #define __USER_LABEL_PREFIX__ _
     35   1.1       mrg #endif
     36   1.1       mrg 
     37   1.1       mrg #ifndef __REGISTER_PREFIX__
     38   1.1       mrg #define __REGISTER_PREFIX__
     39   1.1       mrg #endif
     40   1.1       mrg 
     41   1.1       mrg #ifndef __IMMEDIATE_PREFIX__
     42   1.1       mrg #define __IMMEDIATE_PREFIX__ #
     43   1.1       mrg #endif
     44   1.1       mrg 
     45   1.1       mrg /* ANSI concatenation macros.  */
     46   1.1       mrg 
     47   1.1       mrg #define CONCAT1(a, b) CONCAT2(a, b)
     48   1.1       mrg #define CONCAT2(a, b) a ## b
     49   1.1       mrg 
     50   1.1       mrg /* Use the right prefix for global labels.  */
     51   1.1       mrg 
     52   1.1       mrg #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
     53   1.1       mrg 
     54   1.1       mrg /* Note that X is a function.  */
     55   1.1       mrg 
     56   1.1       mrg #ifdef __ELF__
     57   1.1       mrg #define FUNC(x) .type SYM(x),function
     58   1.1       mrg #else
     59   1.1       mrg /* The .proc pseudo-op is accepted, but ignored, by GAS.  We could just
     60   1.1       mrg    define this to the empty string for non-ELF systems, but defining it
     61   1.1       mrg    to .proc means that the information is available to the assembler if
     62   1.1       mrg    the need arises.  */
     63   1.1       mrg #define FUNC(x) .proc
     64   1.1       mrg #endif
     65   1.1       mrg 
     66   1.1       mrg /* Use the right prefix for registers.  */
     67   1.1       mrg 
     68   1.1       mrg #define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
     69   1.1       mrg 
     70   1.1       mrg /* Use the right prefix for immediate values.  */
     71   1.1       mrg 
     72   1.1       mrg #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
     73   1.1       mrg 
     74   1.1       mrg #define d0 REG (d0)
     75   1.1       mrg #define d1 REG (d1)
     76   1.1       mrg #define d2 REG (d2)
     77   1.1       mrg #define d3 REG (d3)
     78   1.1       mrg #define d4 REG (d4)
     79   1.1       mrg #define d5 REG (d5)
     80   1.1       mrg #define d6 REG (d6)
     81   1.1       mrg #define d7 REG (d7)
     82   1.1       mrg #define a0 REG (a0)
     83   1.1       mrg #define a1 REG (a1)
     84   1.1       mrg #define a2 REG (a2)
     85   1.1       mrg #define a3 REG (a3)
     86   1.1       mrg #define a4 REG (a4)
     87   1.1       mrg #define a5 REG (a5)
     88   1.1       mrg #define a6 REG (a6)
     89   1.1       mrg #define fp REG (fp)
     90   1.1       mrg #define sp REG (sp)
     91   1.1       mrg #define pc REG (pc)
     92   1.1       mrg 
     93   1.1       mrg /* Provide a few macros to allow for PIC code support.
     94   1.1       mrg  * With PIC, data is stored A5 relative so we've got to take a bit of special
     95   1.1       mrg  * care to ensure that all loads of global data is via A5.  PIC also requires
     96   1.1       mrg  * jumps and subroutine calls to be PC relative rather than absolute.  We cheat
     97   1.1       mrg  * a little on this and in the PIC case, we use short offset branches and
     98   1.1       mrg  * hope that the final object code is within range (which it should be).
     99   1.1       mrg  */
    100   1.1       mrg #ifndef __PIC__
    101   1.1       mrg 
    102   1.1       mrg 	/* Non PIC (absolute/relocatable) versions */
    103   1.1       mrg 
    104   1.1       mrg 	.macro PICCALL addr
    105   1.1       mrg 	jbsr	\addr
    106   1.1       mrg 	.endm
    107   1.1       mrg 
    108   1.1       mrg 	.macro PICJUMP addr
    109   1.1       mrg 	jmp	\addr
    110   1.1       mrg 	.endm
    111   1.1       mrg 
    112   1.1       mrg 	.macro PICLEA sym, reg
    113   1.1       mrg 	lea	\sym, \reg
    114   1.1       mrg 	.endm
    115   1.1       mrg 
    116   1.1       mrg 	.macro PICPEA sym, areg
    117   1.1       mrg 	pea	\sym
    118   1.1       mrg 	.endm
    119   1.1       mrg 
    120   1.1       mrg #else /* __PIC__ */
    121   1.1       mrg 
    122   1.1       mrg # if defined (__uClinux__)
    123   1.1       mrg 
    124   1.1       mrg 	/* Versions for uClinux */
    125   1.1       mrg 
    126   1.1       mrg #  if defined(__ID_SHARED_LIBRARY__)
    127   1.1       mrg 
    128   1.1       mrg 	/* -mid-shared-library versions  */
    129   1.1       mrg 
    130   1.1       mrg 	.macro PICLEA sym, reg
    131   1.1       mrg 	movel	a5@(_current_shared_library_a5_offset_), \reg
    132   1.1       mrg 	movel	\sym@GOT(\reg), \reg
    133   1.1       mrg 	.endm
    134   1.1       mrg 
    135   1.1       mrg 	.macro PICPEA sym, areg
    136   1.1       mrg 	movel	a5@(_current_shared_library_a5_offset_), \areg
    137   1.1       mrg 	movel	\sym@GOT(\areg), sp@-
    138   1.1       mrg 	.endm
    139   1.1       mrg 
    140   1.1       mrg 	.macro PICCALL addr
    141   1.1       mrg 	PICLEA	\addr,a0
    142   1.1       mrg 	jsr	a0@
    143   1.1       mrg 	.endm
    144   1.1       mrg 
    145   1.1       mrg 	.macro PICJUMP addr
    146   1.1       mrg 	PICLEA	\addr,a0
    147   1.1       mrg 	jmp	a0@
    148   1.1       mrg 	.endm
    149   1.1       mrg 
    150   1.1       mrg #  else /* !__ID_SHARED_LIBRARY__ */
    151   1.1       mrg 
    152   1.1       mrg 	/* Versions for -msep-data */
    153   1.1       mrg 
    154   1.1       mrg 	.macro PICLEA sym, reg
    155   1.1       mrg 	movel	\sym@GOT(a5), \reg
    156   1.1       mrg 	.endm
    157   1.1       mrg 
    158   1.1       mrg 	.macro PICPEA sym, areg
    159   1.1       mrg 	movel	\sym@GOT(a5), sp@-
    160   1.1       mrg 	.endm
    161   1.1       mrg 
    162   1.1       mrg 	.macro PICCALL addr
    163   1.1       mrg #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
    164   1.1       mrg 	lea	\addr-.-8,a0
    165   1.1       mrg 	jsr	pc@(a0)
    166   1.1       mrg #else
    167   1.4  christos 	jbsr	\addr
    168   1.1       mrg #endif
    169   1.1       mrg 	.endm
    170   1.1       mrg 
    171   1.1       mrg 	.macro PICJUMP addr
    172   1.1       mrg 	/* ISA C has no bra.l instruction, and since this assembly file
    173   1.1       mrg 	   gets assembled into multiple object files, we avoid the
    174   1.1       mrg 	   bra instruction entirely.  */
    175   1.1       mrg #if defined (__mcoldfire__) && !defined (__mcfisab__)
    176   1.1       mrg 	lea	\addr-.-8,a0
    177   1.1       mrg 	jmp	pc@(a0)
    178   1.1       mrg #else
    179   1.4  christos 	bra	\addr
    180   1.1       mrg #endif
    181   1.1       mrg 	.endm
    182   1.1       mrg 
    183   1.1       mrg #  endif
    184   1.1       mrg 
    185   1.1       mrg # else /* !__uClinux__ */
    186   1.1       mrg 
    187   1.1       mrg 	/* Versions for Linux */
    188   1.1       mrg 
    189   1.1       mrg 	.macro PICLEA sym, reg
    190   1.1       mrg 	movel	#_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
    191   1.1       mrg 	lea	(-6, pc, \reg), \reg
    192   1.1       mrg 	movel	\sym@GOT(\reg), \reg
    193   1.1       mrg 	.endm
    194   1.1       mrg 
    195   1.1       mrg 	.macro PICPEA sym, areg
    196   1.1       mrg 	movel	#_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
    197   1.1       mrg 	lea	(-6, pc, \areg), \areg
    198   1.1       mrg 	movel	\sym@GOT(\areg), sp@-
    199   1.1       mrg 	.endm
    200   1.1       mrg 
    201   1.1       mrg 	.macro PICCALL addr
    202   1.1       mrg #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
    203   1.1       mrg 	lea	\addr-.-8,a0
    204   1.1       mrg 	jsr	pc@(a0)
    205   1.1       mrg #else
    206   1.2  christos 	jbsr	\addr@PLTPC
    207   1.1       mrg #endif
    208   1.1       mrg 	.endm
    209   1.1       mrg 
    210   1.1       mrg 	.macro PICJUMP addr
    211   1.1       mrg 	/* ISA C has no bra.l instruction, and since this assembly file
    212   1.1       mrg 	   gets assembled into multiple object files, we avoid the
    213   1.1       mrg 	   bra instruction entirely.  */
    214   1.1       mrg #if defined (__mcoldfire__) && !defined (__mcfisab__)
    215   1.1       mrg 	lea	\addr-.-8,a0
    216   1.1       mrg 	jmp	pc@(a0)
    217   1.1       mrg #else
    218   1.2  christos 	bra	\addr@PLTPC
    219   1.1       mrg #endif
    220   1.1       mrg 	.endm
    221   1.1       mrg 
    222   1.1       mrg # endif
    223   1.1       mrg #endif /* __PIC__ */
    224   1.1       mrg 
    225   1.1       mrg 
    226   1.1       mrg #ifdef L_floatex
    227   1.1       mrg 
    228   1.1       mrg | This is an attempt at a decent floating point (single, double and
    229   1.1       mrg | extended double) code for the GNU C compiler. It should be easy to
    230   1.1       mrg | adapt to other compilers (but beware of the local labels!).
    231   1.1       mrg 
    232   1.1       mrg | Starting date: 21 October, 1990
    233   1.1       mrg 
    234   1.1       mrg | It is convenient to introduce the notation (s,e,f) for a floating point
    235   1.1       mrg | number, where s=sign, e=exponent, f=fraction. We will call a floating
    236   1.1       mrg | point number fpn to abbreviate, independently of the precision.
    237   1.1       mrg | Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
    238   1.1       mrg | for doubles and 16383 for long doubles). We then have the following
    239   1.1       mrg | different cases:
    240   1.1       mrg |  1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
    241   1.1       mrg |     (-1)^s x 1.f x 2^(e-bias-1).
    242   1.1       mrg |  2. Denormalized fpns have e=0. They correspond to numbers of the form
    243   1.1       mrg |     (-1)^s x 0.f x 2^(-bias).
    244   1.1       mrg |  3. +/-INFINITY have e=MAX_EXP, f=0.
    245   1.1       mrg |  4. Quiet NaN (Not a Number) have all bits set.
    246   1.1       mrg |  5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
    247   1.1       mrg 
    248   1.1       mrg |=============================================================================
    249   1.1       mrg |                                  exceptions
    250   1.1       mrg |=============================================================================
    251   1.1       mrg 
    252   1.1       mrg | This is the floating point condition code register (_fpCCR):
    253   1.1       mrg |
    254   1.1       mrg | struct {
    255   1.1       mrg |   short _exception_bits;
    256   1.1       mrg |   short _trap_enable_bits;
    257   1.1       mrg |   short _sticky_bits;
    258   1.1       mrg |   short _rounding_mode;
    259   1.1       mrg |   short _format;
    260   1.1       mrg |   short _last_operation;
    261   1.1       mrg |   union {
    262   1.1       mrg |     float sf;
    263   1.1       mrg |     double df;
    264   1.1       mrg |   } _operand1;
    265   1.1       mrg |   union {
    266   1.1       mrg |     float sf;
    267   1.1       mrg |     double df;
    268   1.1       mrg |   } _operand2;
    269   1.1       mrg | } _fpCCR;
    270   1.1       mrg 
    271   1.1       mrg 	.data
    272   1.1       mrg 	.even
    273   1.1       mrg 
    274   1.1       mrg 	.globl	SYM (_fpCCR)
    275   1.1       mrg 
    276   1.1       mrg SYM (_fpCCR):
    277   1.1       mrg __exception_bits:
    278   1.1       mrg 	.word	0
    279   1.1       mrg __trap_enable_bits:
    280   1.1       mrg 	.word	0
    281   1.1       mrg __sticky_bits:
    282   1.1       mrg 	.word	0
    283   1.1       mrg __rounding_mode:
    284   1.1       mrg 	.word	ROUND_TO_NEAREST
    285   1.1       mrg __format:
    286   1.1       mrg 	.word	NIL
    287   1.1       mrg __last_operation:
    288   1.1       mrg 	.word	NOOP
    289   1.1       mrg __operand1:
    290   1.1       mrg 	.long	0
    291   1.1       mrg 	.long	0
    292   1.1       mrg __operand2:
    293   1.1       mrg 	.long 	0
    294   1.1       mrg 	.long	0
    295   1.1       mrg 
    296   1.1       mrg | Offsets:
    297   1.1       mrg EBITS  = __exception_bits - SYM (_fpCCR)
    298   1.1       mrg TRAPE  = __trap_enable_bits - SYM (_fpCCR)
    299   1.1       mrg STICK  = __sticky_bits - SYM (_fpCCR)
    300   1.1       mrg ROUND  = __rounding_mode - SYM (_fpCCR)
    301   1.1       mrg FORMT  = __format - SYM (_fpCCR)
    302   1.1       mrg LASTO  = __last_operation - SYM (_fpCCR)
    303   1.1       mrg OPER1  = __operand1 - SYM (_fpCCR)
    304   1.1       mrg OPER2  = __operand2 - SYM (_fpCCR)
    305   1.1       mrg 
    306   1.1       mrg | The following exception types are supported:
    307   1.1       mrg INEXACT_RESULT 		= 0x0001
    308   1.1       mrg UNDERFLOW 		= 0x0002
    309   1.1       mrg OVERFLOW 		= 0x0004
    310   1.1       mrg DIVIDE_BY_ZERO 		= 0x0008
    311   1.1       mrg INVALID_OPERATION 	= 0x0010
    312   1.1       mrg 
    313   1.1       mrg | The allowed rounding modes are:
    314   1.1       mrg UNKNOWN           = -1
    315   1.1       mrg ROUND_TO_NEAREST  = 0 | round result to nearest representable value
    316   1.1       mrg ROUND_TO_ZERO     = 1 | round result towards zero
    317   1.1       mrg ROUND_TO_PLUS     = 2 | round result towards plus infinity
    318   1.1       mrg ROUND_TO_MINUS    = 3 | round result towards minus infinity
    319   1.1       mrg 
    320   1.1       mrg | The allowed values of format are:
    321   1.1       mrg NIL          = 0
    322   1.1       mrg SINGLE_FLOAT = 1
    323   1.1       mrg DOUBLE_FLOAT = 2
    324   1.1       mrg LONG_FLOAT   = 3
    325   1.1       mrg 
    326   1.1       mrg | The allowed values for the last operation are:
    327   1.1       mrg NOOP         = 0
    328   1.1       mrg ADD          = 1
    329   1.1       mrg MULTIPLY     = 2
    330   1.1       mrg DIVIDE       = 3
    331   1.1       mrg NEGATE       = 4
    332   1.1       mrg COMPARE      = 5
    333   1.1       mrg EXTENDSFDF   = 6
    334   1.1       mrg TRUNCDFSF    = 7
    335   1.1       mrg 
    336   1.1       mrg |=============================================================================
    337   1.1       mrg |                           __clear_sticky_bits
    338   1.1       mrg |=============================================================================
    339   1.1       mrg 
    340   1.1       mrg | The sticky bits are normally not cleared (thus the name), whereas the
    341   1.1       mrg | exception type and exception value reflect the last computation.
    342   1.1       mrg | This routine is provided to clear them (you can also write to _fpCCR,
    343   1.1       mrg | since it is globally visible).
    344   1.1       mrg 
    345   1.1       mrg 	.globl  SYM (__clear_sticky_bit)
    346   1.1       mrg 
    347   1.1       mrg 	.text
    348   1.1       mrg 	.even
    349   1.1       mrg 
    350   1.1       mrg | void __clear_sticky_bits(void);
    351   1.1       mrg SYM (__clear_sticky_bit):
    352   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
    353   1.1       mrg #ifndef __mcoldfire__
    354   1.1       mrg 	movew	IMM (0),a0@(STICK)
    355   1.1       mrg #else
    356   1.1       mrg 	clr.w	a0@(STICK)
    357   1.1       mrg #endif
    358   1.1       mrg 	rts
    359   1.1       mrg 
    360   1.1       mrg |=============================================================================
    361   1.1       mrg |                           $_exception_handler
    362   1.1       mrg |=============================================================================
    363   1.1       mrg 
    364   1.1       mrg 	.globl  $_exception_handler
    365   1.1       mrg 
    366   1.1       mrg 	.text
    367   1.1       mrg 	.even
    368   1.1       mrg 
    369   1.1       mrg | This is the common exit point if an exception occurs.
    370   1.1       mrg | NOTE: it is NOT callable from C!
    371   1.1       mrg | It expects the exception type in d7, the format (SINGLE_FLOAT,
    372   1.1       mrg | DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
    373   1.1       mrg | It sets the corresponding exception and sticky bits, and the format.
    374   1.1       mrg | Depending on the format if fills the corresponding slots for the
    375   1.1       mrg | operands which produced the exception (all this information is provided
    376   1.1       mrg | so if you write your own exception handlers you have enough information
    377   1.1       mrg | to deal with the problem).
    378   1.1       mrg | Then checks to see if the corresponding exception is trap-enabled,
    379   1.1       mrg | in which case it pushes the address of _fpCCR and traps through
    380   1.1       mrg | trap FPTRAP (15 for the moment).
    381   1.1       mrg 
    382   1.1       mrg FPTRAP = 15
    383   1.1       mrg 
    384   1.1       mrg $_exception_handler:
    385   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
    386   1.1       mrg 	movew	d7,a0@(EBITS)	| set __exception_bits
    387   1.1       mrg #ifndef __mcoldfire__
    388   1.1       mrg 	orw	d7,a0@(STICK)	| and __sticky_bits
    389   1.1       mrg #else
    390   1.1       mrg 	movew	a0@(STICK),d4
    391   1.1       mrg 	orl	d7,d4
    392   1.1       mrg 	movew	d4,a0@(STICK)
    393   1.1       mrg #endif
    394   1.1       mrg 	movew	d6,a0@(FORMT)	| and __format
    395   1.1       mrg 	movew	d5,a0@(LASTO)	| and __last_operation
    396   1.1       mrg 
    397   1.1       mrg | Now put the operands in place:
    398   1.1       mrg #ifndef __mcoldfire__
    399   1.1       mrg 	cmpw	IMM (SINGLE_FLOAT),d6
    400   1.1       mrg #else
    401   1.1       mrg 	cmpl	IMM (SINGLE_FLOAT),d6
    402   1.1       mrg #endif
    403   1.1       mrg 	beq	1f
    404   1.1       mrg 	movel	a6@(8),a0@(OPER1)
    405   1.1       mrg 	movel	a6@(12),a0@(OPER1+4)
    406   1.1       mrg 	movel	a6@(16),a0@(OPER2)
    407   1.1       mrg 	movel	a6@(20),a0@(OPER2+4)
    408   1.1       mrg 	bra	2f
    409   1.1       mrg 1:	movel	a6@(8),a0@(OPER1)
    410   1.1       mrg 	movel	a6@(12),a0@(OPER2)
    411   1.1       mrg 2:
    412   1.1       mrg | And check whether the exception is trap-enabled:
    413   1.1       mrg #ifndef __mcoldfire__
    414   1.1       mrg 	andw	a0@(TRAPE),d7	| is exception trap-enabled?
    415   1.1       mrg #else
    416   1.1       mrg 	clrl	d6
    417   1.1       mrg 	movew	a0@(TRAPE),d6
    418   1.1       mrg 	andl	d6,d7
    419   1.1       mrg #endif
    420   1.1       mrg 	beq	1f		| no, exit
    421   1.1       mrg 	PICPEA	SYM (_fpCCR),a1	| yes, push address of _fpCCR
    422   1.1       mrg 	trap	IMM (FPTRAP)	| and trap
    423   1.1       mrg #ifndef __mcoldfire__
    424   1.1       mrg 1:	moveml	sp@+,d2-d7	| restore data registers
    425   1.1       mrg #else
    426   1.1       mrg 1:	moveml	sp@,d2-d7
    427   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
    428   1.1       mrg 	| be adjusted here.
    429   1.1       mrg #endif
    430   1.1       mrg 	unlk	a6		| and return
    431   1.1       mrg 	rts
    432   1.1       mrg #endif /* L_floatex */
    433   1.1       mrg 
    434   1.1       mrg #ifdef  L_mulsi3
    435   1.1       mrg 	.text
    436   1.1       mrg 	FUNC(__mulsi3)
    437   1.1       mrg 	.globl	SYM (__mulsi3)
    438  1.10       mrg 	.globl	SYM (__mulsi3_internal)
    439  1.10       mrg 	.hidden	SYM (__mulsi3_internal)
    440   1.1       mrg SYM (__mulsi3):
    441  1.10       mrg SYM (__mulsi3_internal):
    442   1.1       mrg 	movew	sp@(4), d0	/* x0 -> d0 */
    443   1.1       mrg 	muluw	sp@(10), d0	/* x0*y1 */
    444   1.1       mrg 	movew	sp@(6), d1	/* x1 -> d1 */
    445   1.1       mrg 	muluw	sp@(8), d1	/* x1*y0 */
    446   1.1       mrg #ifndef __mcoldfire__
    447   1.1       mrg 	addw	d1, d0
    448   1.1       mrg #else
    449   1.1       mrg 	addl	d1, d0
    450   1.1       mrg #endif
    451   1.1       mrg 	swap	d0
    452   1.1       mrg 	clrw	d0
    453   1.1       mrg 	movew	sp@(6), d1	/* x1 -> d1 */
    454   1.1       mrg 	muluw	sp@(10), d1	/* x1*y1 */
    455   1.1       mrg 	addl	d1, d0
    456   1.1       mrg 
    457   1.1       mrg 	rts
    458   1.1       mrg #endif /* L_mulsi3 */
    459   1.1       mrg 
    460   1.1       mrg #ifdef  L_udivsi3
    461   1.1       mrg 	.text
    462   1.1       mrg 	FUNC(__udivsi3)
    463   1.1       mrg 	.globl	SYM (__udivsi3)
    464  1.10       mrg 	.globl	SYM (__udivsi3_internal)
    465  1.10       mrg 	.hidden	SYM (__udivsi3_internal)
    466   1.1       mrg SYM (__udivsi3):
    467  1.10       mrg SYM (__udivsi3_internal):
    468   1.1       mrg #ifndef __mcoldfire__
    469   1.1       mrg 	movel	d2, sp@-
    470   1.1       mrg 	movel	sp@(12), d1	/* d1 = divisor */
    471   1.1       mrg 	movel	sp@(8), d0	/* d0 = dividend */
    472   1.1       mrg 
    473   1.1       mrg 	cmpl	IMM (0x10000), d1 /* divisor >= 2 ^ 16 ?   */
    474   1.1       mrg 	jcc	L3		/* then try next algorithm */
    475   1.1       mrg 	movel	d0, d2
    476   1.1       mrg 	clrw	d2
    477   1.1       mrg 	swap	d2
    478   1.1       mrg 	divu	d1, d2          /* high quotient in lower word */
    479   1.1       mrg 	movew	d2, d0		/* save high quotient */
    480   1.1       mrg 	swap	d0
    481   1.1       mrg 	movew	sp@(10), d2	/* get low dividend + high rest */
    482   1.1       mrg 	divu	d1, d2		/* low quotient */
    483   1.1       mrg 	movew	d2, d0
    484   1.1       mrg 	jra	L6
    485   1.1       mrg 
    486   1.1       mrg L3:	movel	d1, d2		/* use d2 as divisor backup */
    487   1.1       mrg L4:	lsrl	IMM (1), d1	/* shift divisor */
    488   1.1       mrg 	lsrl	IMM (1), d0	/* shift dividend */
    489   1.1       mrg 	cmpl	IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ?  */
    490   1.1       mrg 	jcc	L4
    491   1.1       mrg 	divu	d1, d0		/* now we have 16-bit divisor */
    492   1.1       mrg 	andl	IMM (0xffff), d0 /* mask out divisor, ignore remainder */
    493   1.1       mrg 
    494   1.1       mrg /* Multiply the 16-bit tentative quotient with the 32-bit divisor.  Because of
    495   1.1       mrg    the operand ranges, this might give a 33-bit product.  If this product is
    496   1.1       mrg    greater than the dividend, the tentative quotient was too large. */
    497   1.1       mrg 	movel	d2, d1
    498   1.1       mrg 	mulu	d0, d1		/* low part, 32 bits */
    499   1.1       mrg 	swap	d2
    500   1.1       mrg 	mulu	d0, d2		/* high part, at most 17 bits */
    501   1.1       mrg 	swap	d2		/* align high part with low part */
    502   1.1       mrg 	tstw	d2		/* high part 17 bits? */
    503   1.1       mrg 	jne	L5		/* if 17 bits, quotient was too large */
    504   1.1       mrg 	addl	d2, d1		/* add parts */
    505   1.1       mrg 	jcs	L5		/* if sum is 33 bits, quotient was too large */
    506   1.1       mrg 	cmpl	sp@(8), d1	/* compare the sum with the dividend */
    507   1.1       mrg 	jls	L6		/* if sum > dividend, quotient was too large */
    508   1.1       mrg L5:	subql	IMM (1), d0	/* adjust quotient */
    509   1.1       mrg 
    510   1.1       mrg L6:	movel	sp@+, d2
    511   1.1       mrg 	rts
    512   1.1       mrg 
    513   1.1       mrg #else /* __mcoldfire__ */
    514   1.1       mrg 
    515   1.1       mrg /* ColdFire implementation of non-restoring division algorithm from
    516   1.1       mrg    Hennessy & Patterson, Appendix A. */
    517   1.1       mrg 	link	a6,IMM (-12)
    518   1.1       mrg 	moveml	d2-d4,sp@
    519   1.1       mrg 	movel	a6@(8),d0
    520   1.1       mrg 	movel	a6@(12),d1
    521   1.1       mrg 	clrl	d2		| clear p
    522   1.1       mrg 	moveq	IMM (31),d4
    523   1.1       mrg L1:	addl	d0,d0		| shift reg pair (p,a) one bit left
    524   1.1       mrg 	addxl	d2,d2
    525   1.1       mrg 	movl	d2,d3		| subtract b from p, store in tmp.
    526   1.1       mrg 	subl	d1,d3
    527   1.1       mrg 	jcs	L2		| if no carry,
    528   1.1       mrg 	bset	IMM (0),d0	| set the low order bit of a to 1,
    529   1.1       mrg 	movl	d3,d2		| and store tmp in p.
    530   1.1       mrg L2:	subql	IMM (1),d4
    531   1.1       mrg 	jcc	L1
    532   1.1       mrg 	moveml	sp@,d2-d4	| restore data registers
    533   1.1       mrg 	unlk	a6		| and return
    534   1.1       mrg 	rts
    535   1.1       mrg #endif /* __mcoldfire__ */
    536   1.1       mrg 
    537   1.1       mrg #endif /* L_udivsi3 */
    538   1.1       mrg 
    539   1.1       mrg #ifdef  L_divsi3
    540   1.1       mrg 	.text
    541   1.1       mrg 	FUNC(__divsi3)
    542   1.1       mrg 	.globl	SYM (__divsi3)
    543  1.10       mrg 	.globl	SYM (__divsi3_internal)
    544  1.10       mrg 	.hidden	SYM (__divsi3_internal)
    545   1.1       mrg SYM (__divsi3):
    546  1.10       mrg SYM (__divsi3_internal):
    547   1.1       mrg 	movel	d2, sp@-
    548   1.1       mrg 
    549   1.1       mrg 	moveq	IMM (1), d2	/* sign of result stored in d2 (=1 or =-1) */
    550   1.1       mrg 	movel	sp@(12), d1	/* d1 = divisor */
    551   1.1       mrg 	jpl	L1
    552   1.1       mrg 	negl	d1
    553   1.1       mrg #ifndef __mcoldfire__
    554   1.1       mrg 	negb	d2		/* change sign because divisor <0  */
    555   1.1       mrg #else
    556   1.1       mrg 	negl	d2		/* change sign because divisor <0  */
    557   1.1       mrg #endif
    558   1.1       mrg L1:	movel	sp@(8), d0	/* d0 = dividend */
    559   1.1       mrg 	jpl	L2
    560   1.1       mrg 	negl	d0
    561   1.1       mrg #ifndef __mcoldfire__
    562   1.1       mrg 	negb	d2
    563   1.1       mrg #else
    564   1.1       mrg 	negl	d2
    565   1.1       mrg #endif
    566   1.1       mrg 
    567   1.1       mrg L2:	movel	d1, sp@-
    568   1.1       mrg 	movel	d0, sp@-
    569  1.10       mrg 	PICCALL	SYM (__udivsi3_internal)	/* divide abs(dividend) by abs(divisor) */
    570   1.1       mrg 	addql	IMM (8), sp
    571   1.1       mrg 
    572   1.1       mrg 	tstb	d2
    573   1.1       mrg 	jpl	L3
    574   1.1       mrg 	negl	d0
    575   1.1       mrg 
    576   1.1       mrg L3:	movel	sp@+, d2
    577   1.1       mrg 	rts
    578   1.1       mrg #endif /* L_divsi3 */
    579   1.1       mrg 
    580   1.1       mrg #ifdef  L_umodsi3
    581   1.1       mrg 	.text
    582   1.1       mrg 	FUNC(__umodsi3)
    583   1.1       mrg 	.globl	SYM (__umodsi3)
    584   1.1       mrg SYM (__umodsi3):
    585   1.1       mrg 	movel	sp@(8), d1	/* d1 = divisor */
    586   1.1       mrg 	movel	sp@(4), d0	/* d0 = dividend */
    587   1.1       mrg 	movel	d1, sp@-
    588   1.1       mrg 	movel	d0, sp@-
    589  1.10       mrg 	PICCALL	SYM (__udivsi3_internal)
    590   1.1       mrg 	addql	IMM (8), sp
    591   1.1       mrg 	movel	sp@(8), d1	/* d1 = divisor */
    592   1.1       mrg #ifndef __mcoldfire__
    593   1.1       mrg 	movel	d1, sp@-
    594   1.1       mrg 	movel	d0, sp@-
    595  1.10       mrg 	PICCALL	SYM (__mulsi3_internal)	/* d0 = (a/b)*b */
    596   1.1       mrg 	addql	IMM (8), sp
    597   1.1       mrg #else
    598   1.1       mrg 	mulsl	d1,d0
    599   1.1       mrg #endif
    600   1.1       mrg 	movel	sp@(4), d1	/* d1 = dividend */
    601   1.1       mrg 	subl	d0, d1		/* d1 = a - (a/b)*b */
    602   1.1       mrg 	movel	d1, d0
    603   1.1       mrg 	rts
    604   1.1       mrg #endif /* L_umodsi3 */
    605   1.1       mrg 
    606   1.1       mrg #ifdef  L_modsi3
    607   1.1       mrg 	.text
    608   1.1       mrg 	FUNC(__modsi3)
    609   1.1       mrg 	.globl	SYM (__modsi3)
    610   1.1       mrg SYM (__modsi3):
    611   1.1       mrg 	movel	sp@(8), d1	/* d1 = divisor */
    612   1.1       mrg 	movel	sp@(4), d0	/* d0 = dividend */
    613   1.1       mrg 	movel	d1, sp@-
    614   1.1       mrg 	movel	d0, sp@-
    615  1.10       mrg 	PICCALL	SYM (__divsi3_internal)
    616   1.1       mrg 	addql	IMM (8), sp
    617   1.1       mrg 	movel	sp@(8), d1	/* d1 = divisor */
    618   1.1       mrg #ifndef __mcoldfire__
    619   1.1       mrg 	movel	d1, sp@-
    620   1.1       mrg 	movel	d0, sp@-
    621  1.10       mrg 	PICCALL	SYM (__mulsi3_internal)	/* d0 = (a/b)*b */
    622   1.1       mrg 	addql	IMM (8), sp
    623   1.1       mrg #else
    624   1.1       mrg 	mulsl	d1,d0
    625   1.1       mrg #endif
    626   1.1       mrg 	movel	sp@(4), d1	/* d1 = dividend */
    627   1.1       mrg 	subl	d0, d1		/* d1 = a - (a/b)*b */
    628   1.1       mrg 	movel	d1, d0
    629   1.1       mrg 	rts
    630   1.1       mrg #endif /* L_modsi3 */
    631   1.1       mrg 
    632   1.1       mrg 
    633   1.1       mrg #ifdef  L_double
    634   1.1       mrg 
    635   1.1       mrg 	.globl	SYM (_fpCCR)
    636   1.1       mrg 	.globl  $_exception_handler
    637   1.1       mrg 
    638   1.1       mrg QUIET_NaN      = 0xffffffff
    639   1.1       mrg 
    640   1.1       mrg D_MAX_EXP      = 0x07ff
    641   1.1       mrg D_BIAS         = 1022
    642   1.1       mrg DBL_MAX_EXP    = D_MAX_EXP - D_BIAS
    643   1.1       mrg DBL_MIN_EXP    = 1 - D_BIAS
    644   1.1       mrg DBL_MANT_DIG   = 53
    645   1.1       mrg 
    646   1.1       mrg INEXACT_RESULT 		= 0x0001
    647   1.1       mrg UNDERFLOW 		= 0x0002
    648   1.1       mrg OVERFLOW 		= 0x0004
    649   1.1       mrg DIVIDE_BY_ZERO 		= 0x0008
    650   1.1       mrg INVALID_OPERATION 	= 0x0010
    651   1.1       mrg 
    652   1.1       mrg DOUBLE_FLOAT = 2
    653   1.1       mrg 
    654   1.1       mrg NOOP         = 0
    655   1.1       mrg ADD          = 1
    656   1.1       mrg MULTIPLY     = 2
    657   1.1       mrg DIVIDE       = 3
    658   1.1       mrg NEGATE       = 4
    659   1.1       mrg COMPARE      = 5
    660   1.1       mrg EXTENDSFDF   = 6
    661   1.1       mrg TRUNCDFSF    = 7
    662   1.1       mrg 
    663   1.1       mrg UNKNOWN           = -1
    664   1.1       mrg ROUND_TO_NEAREST  = 0 | round result to nearest representable value
    665   1.1       mrg ROUND_TO_ZERO     = 1 | round result towards zero
    666   1.1       mrg ROUND_TO_PLUS     = 2 | round result towards plus infinity
    667   1.1       mrg ROUND_TO_MINUS    = 3 | round result towards minus infinity
    668   1.1       mrg 
    669   1.1       mrg | Entry points:
    670   1.1       mrg 
    671   1.1       mrg 	.globl SYM (__adddf3)
    672   1.1       mrg 	.globl SYM (__subdf3)
    673   1.1       mrg 	.globl SYM (__muldf3)
    674   1.1       mrg 	.globl SYM (__divdf3)
    675   1.1       mrg 	.globl SYM (__negdf2)
    676   1.1       mrg 	.globl SYM (__cmpdf2)
    677   1.1       mrg 	.globl SYM (__cmpdf2_internal)
    678   1.1       mrg 	.hidden SYM (__cmpdf2_internal)
    679   1.1       mrg 
    680   1.1       mrg 	.text
    681   1.1       mrg 	.even
    682   1.1       mrg 
    683   1.1       mrg | These are common routines to return and signal exceptions.
    684   1.1       mrg 
    685   1.1       mrg Ld$den:
    686   1.1       mrg | Return and signal a denormalized number
    687   1.1       mrg 	orl	d7,d0
    688   1.1       mrg 	movew	IMM (INEXACT_RESULT+UNDERFLOW),d7
    689   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
    690   1.1       mrg 	PICJUMP	$_exception_handler
    691   1.1       mrg 
    692   1.1       mrg Ld$infty:
    693   1.1       mrg Ld$overflow:
    694   1.1       mrg | Return a properly signed INFINITY and set the exception flags
    695   1.1       mrg 	movel	IMM (0x7ff00000),d0
    696   1.1       mrg 	movel	IMM (0),d1
    697   1.1       mrg 	orl	d7,d0
    698   1.1       mrg 	movew	IMM (INEXACT_RESULT+OVERFLOW),d7
    699   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
    700   1.1       mrg 	PICJUMP	$_exception_handler
    701   1.1       mrg 
    702   1.1       mrg Ld$underflow:
    703   1.1       mrg | Return 0 and set the exception flags
    704   1.1       mrg 	movel	IMM (0),d0
    705   1.1       mrg 	movel	d0,d1
    706   1.1       mrg 	movew	IMM (INEXACT_RESULT+UNDERFLOW),d7
    707   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
    708   1.1       mrg 	PICJUMP	$_exception_handler
    709   1.1       mrg 
    710   1.1       mrg Ld$inop:
    711   1.1       mrg | Return a quiet NaN and set the exception flags
    712   1.1       mrg 	movel	IMM (QUIET_NaN),d0
    713   1.1       mrg 	movel	d0,d1
    714   1.1       mrg 	movew	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
    715   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
    716   1.1       mrg 	PICJUMP	$_exception_handler
    717   1.1       mrg 
    718   1.1       mrg Ld$div$0:
    719   1.1       mrg | Return a properly signed INFINITY and set the exception flags
    720   1.1       mrg 	movel	IMM (0x7ff00000),d0
    721   1.1       mrg 	movel	IMM (0),d1
    722   1.1       mrg 	orl	d7,d0
    723   1.1       mrg 	movew	IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
    724   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
    725   1.1       mrg 	PICJUMP	$_exception_handler
    726   1.1       mrg 
    727   1.1       mrg |=============================================================================
    728   1.1       mrg |=============================================================================
    729   1.1       mrg |                         double precision routines
    730   1.1       mrg |=============================================================================
    731   1.1       mrg |=============================================================================
    732   1.1       mrg 
    733   1.1       mrg | A double precision floating point number (double) has the format:
    734   1.1       mrg |
    735   1.1       mrg | struct _double {
    736   1.1       mrg |  unsigned int sign      : 1;  /* sign bit */
    737   1.1       mrg |  unsigned int exponent  : 11; /* exponent, shifted by 126 */
    738   1.1       mrg |  unsigned int fraction  : 52; /* fraction */
    739   1.1       mrg | } double;
    740   1.1       mrg |
    741   1.1       mrg | Thus sizeof(double) = 8 (64 bits).
    742   1.1       mrg |
    743   1.1       mrg | All the routines are callable from C programs, and return the result
    744   1.1       mrg | in the register pair d0-d1. They also preserve all registers except
    745   1.1       mrg | d0-d1 and a0-a1.
    746   1.1       mrg 
    747   1.1       mrg |=============================================================================
    748   1.1       mrg |                              __subdf3
    749   1.1       mrg |=============================================================================
    750   1.1       mrg 
    751   1.1       mrg | double __subdf3(double, double);
    752   1.1       mrg 	FUNC(__subdf3)
    753   1.1       mrg SYM (__subdf3):
    754   1.1       mrg 	bchg	IMM (31),sp@(12) | change sign of second operand
    755   1.1       mrg 				| and fall through, so we always add
    756   1.1       mrg |=============================================================================
    757   1.1       mrg |                              __adddf3
    758   1.1       mrg |=============================================================================
    759   1.1       mrg 
    760   1.1       mrg | double __adddf3(double, double);
    761   1.1       mrg 	FUNC(__adddf3)
    762   1.1       mrg SYM (__adddf3):
    763   1.1       mrg #ifndef __mcoldfire__
    764   1.1       mrg 	link	a6,IMM (0)	| everything will be done in registers
    765   1.1       mrg 	moveml	d2-d7,sp@-	| save all data registers and a2 (but d0-d1)
    766   1.1       mrg #else
    767   1.1       mrg 	link	a6,IMM (-24)
    768   1.1       mrg 	moveml	d2-d7,sp@
    769   1.1       mrg #endif
    770   1.1       mrg 	movel	a6@(8),d0	| get first operand
    771   1.1       mrg 	movel	a6@(12),d1	|
    772   1.1       mrg 	movel	a6@(16),d2	| get second operand
    773   1.1       mrg 	movel	a6@(20),d3	|
    774   1.1       mrg 
    775   1.1       mrg 	movel	d0,d7		| get d0's sign bit in d7 '
    776   1.1       mrg 	addl	d1,d1		| check and clear sign bit of a, and gain one
    777   1.1       mrg 	addxl	d0,d0		| bit of extra precision
    778   1.1       mrg 	beq	Ladddf$b	| if zero return second operand
    779   1.1       mrg 
    780   1.1       mrg 	movel	d2,d6		| save sign in d6
    781   1.1       mrg 	addl	d3,d3		| get rid of sign bit and gain one bit of
    782   1.1       mrg 	addxl	d2,d2		| extra precision
    783   1.1       mrg 	beq	Ladddf$a	| if zero return first operand
    784   1.1       mrg 
    785   1.1       mrg 	andl	IMM (0x80000000),d7 | isolate a's sign bit '
    786   1.1       mrg         swap	d6		| and also b's sign bit '
    787   1.1       mrg #ifndef __mcoldfire__
    788   1.1       mrg 	andw	IMM (0x8000),d6	|
    789   1.1       mrg 	orw	d6,d7		| and combine them into d7, so that a's sign '
    790   1.1       mrg 				| bit is in the high word and b's is in the '
    791   1.1       mrg 				| low word, so d6 is free to be used
    792   1.1       mrg #else
    793   1.1       mrg 	andl	IMM (0x8000),d6
    794   1.1       mrg 	orl	d6,d7
    795   1.1       mrg #endif
    796   1.1       mrg 	movel	d7,a0		| now save d7 into a0, so d7 is free to
    797   1.1       mrg                 		| be used also
    798   1.1       mrg 
    799   1.1       mrg | Get the exponents and check for denormalized and/or infinity.
    800   1.1       mrg 
    801   1.1       mrg 	movel	IMM (0x001fffff),d6 | mask for the fraction
    802   1.1       mrg 	movel	IMM (0x00200000),d7 | mask to put hidden bit back
    803   1.1       mrg 
    804   1.1       mrg 	movel	d0,d4		|
    805   1.1       mrg 	andl	d6,d0		| get fraction in d0
    806   1.1       mrg 	notl	d6		| make d6 into mask for the exponent
    807   1.1       mrg 	andl	d6,d4		| get exponent in d4
    808   1.1       mrg 	beq	Ladddf$a$den	| branch if a is denormalized
    809   1.1       mrg 	cmpl	d6,d4		| check for INFINITY or NaN
    810   1.1       mrg 	beq	Ladddf$nf       |
    811   1.1       mrg 	orl	d7,d0		| and put hidden bit back
    812   1.1       mrg Ladddf$1:
    813   1.1       mrg 	swap	d4		| shift right exponent so that it starts
    814   1.1       mrg #ifndef __mcoldfire__
    815   1.1       mrg 	lsrw	IMM (5),d4	| in bit 0 and not bit 20
    816   1.1       mrg #else
    817   1.1       mrg 	lsrl	IMM (5),d4	| in bit 0 and not bit 20
    818   1.1       mrg #endif
    819   1.1       mrg | Now we have a's exponent in d4 and fraction in d0-d1 '
    820   1.1       mrg 	movel	d2,d5		| save b to get exponent
    821   1.1       mrg 	andl	d6,d5		| get exponent in d5
    822   1.1       mrg 	beq	Ladddf$b$den	| branch if b is denormalized
    823   1.1       mrg 	cmpl	d6,d5		| check for INFINITY or NaN
    824   1.1       mrg 	beq	Ladddf$nf
    825   1.1       mrg 	notl	d6		| make d6 into mask for the fraction again
    826   1.1       mrg 	andl	d6,d2		| and get fraction in d2
    827   1.1       mrg 	orl	d7,d2		| and put hidden bit back
    828   1.1       mrg Ladddf$2:
    829   1.1       mrg 	swap	d5		| shift right exponent so that it starts
    830   1.1       mrg #ifndef __mcoldfire__
    831   1.1       mrg 	lsrw	IMM (5),d5	| in bit 0 and not bit 20
    832   1.1       mrg #else
    833   1.1       mrg 	lsrl	IMM (5),d5	| in bit 0 and not bit 20
    834   1.1       mrg #endif
    835   1.1       mrg 
    836   1.1       mrg | Now we have b's exponent in d5 and fraction in d2-d3. '
    837   1.1       mrg 
    838   1.1       mrg | The situation now is as follows: the signs are combined in a0, the
    839   1.1       mrg | numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
    840   1.1       mrg | and d5 (b). To do the rounding correctly we need to keep all the
    841   1.1       mrg | bits until the end, so we need to use d0-d1-d2-d3 for the first number
    842   1.1       mrg | and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
    843   1.1       mrg | exponents in a2-a3.
    844   1.1       mrg 
    845   1.1       mrg #ifndef __mcoldfire__
    846   1.1       mrg 	moveml	a2-a3,sp@-	| save the address registers
    847   1.1       mrg #else
    848   1.1       mrg 	movel	a2,sp@-
    849   1.1       mrg 	movel	a3,sp@-
    850   1.1       mrg 	movel	a4,sp@-
    851   1.1       mrg #endif
    852   1.1       mrg 
    853   1.1       mrg 	movel	d4,a2		| save the exponents
    854   1.1       mrg 	movel	d5,a3		|
    855   1.1       mrg 
    856   1.1       mrg 	movel	IMM (0),d7	| and move the numbers around
    857   1.1       mrg 	movel	d7,d6		|
    858   1.1       mrg 	movel	d3,d5		|
    859   1.1       mrg 	movel	d2,d4		|
    860   1.1       mrg 	movel	d7,d3		|
    861   1.1       mrg 	movel	d7,d2		|
    862   1.1       mrg 
    863   1.1       mrg | Here we shift the numbers until the exponents are the same, and put
    864   1.1       mrg | the largest exponent in a2.
    865   1.1       mrg #ifndef __mcoldfire__
    866   1.1       mrg 	exg	d4,a2		| get exponents back
    867   1.1       mrg 	exg	d5,a3		|
    868   1.1       mrg 	cmpw	d4,d5		| compare the exponents
    869   1.1       mrg #else
    870   1.1       mrg 	movel	d4,a4		| get exponents back
    871   1.1       mrg 	movel	a2,d4
    872   1.1       mrg 	movel	a4,a2
    873   1.1       mrg 	movel	d5,a4
    874   1.1       mrg 	movel	a3,d5
    875   1.1       mrg 	movel	a4,a3
    876   1.1       mrg 	cmpl	d4,d5		| compare the exponents
    877   1.1       mrg #endif
    878   1.1       mrg 	beq	Ladddf$3	| if equal don't shift '
    879   1.1       mrg 	bhi	9f		| branch if second exponent is higher
    880   1.1       mrg 
    881   1.1       mrg | Here we have a's exponent larger than b's, so we have to shift b. We do
    882   1.1       mrg | this by using as counter d2:
    883   1.1       mrg 1:	movew	d4,d2		| move largest exponent to d2
    884   1.1       mrg #ifndef __mcoldfire__
    885   1.1       mrg 	subw	d5,d2		| and subtract second exponent
    886   1.1       mrg 	exg	d4,a2		| get back the longs we saved
    887   1.1       mrg 	exg	d5,a3		|
    888   1.1       mrg #else
    889   1.1       mrg 	subl	d5,d2		| and subtract second exponent
    890   1.1       mrg 	movel	d4,a4		| get back the longs we saved
    891   1.1       mrg 	movel	a2,d4
    892   1.1       mrg 	movel	a4,a2
    893   1.1       mrg 	movel	d5,a4
    894   1.1       mrg 	movel	a3,d5
    895   1.1       mrg 	movel	a4,a3
    896   1.1       mrg #endif
    897   1.1       mrg | if difference is too large we don't shift (actually, we can just exit) '
    898   1.1       mrg #ifndef __mcoldfire__
    899   1.1       mrg 	cmpw	IMM (DBL_MANT_DIG+2),d2
    900   1.1       mrg #else
    901   1.1       mrg 	cmpl	IMM (DBL_MANT_DIG+2),d2
    902   1.1       mrg #endif
    903   1.1       mrg 	bge	Ladddf$b$small
    904   1.1       mrg #ifndef __mcoldfire__
    905   1.1       mrg 	cmpw	IMM (32),d2	| if difference >= 32, shift by longs
    906   1.1       mrg #else
    907   1.1       mrg 	cmpl	IMM (32),d2	| if difference >= 32, shift by longs
    908   1.1       mrg #endif
    909   1.1       mrg 	bge	5f
    910   1.1       mrg 2:
    911   1.1       mrg #ifndef __mcoldfire__
    912   1.1       mrg 	cmpw	IMM (16),d2	| if difference >= 16, shift by words
    913   1.1       mrg #else
    914   1.1       mrg 	cmpl	IMM (16),d2	| if difference >= 16, shift by words
    915   1.1       mrg #endif
    916   1.1       mrg 	bge	6f
    917   1.1       mrg 	bra	3f		| enter dbra loop
    918   1.1       mrg 
    919   1.1       mrg 4:
    920   1.1       mrg #ifndef __mcoldfire__
    921   1.1       mrg 	lsrl	IMM (1),d4
    922   1.1       mrg 	roxrl	IMM (1),d5
    923   1.1       mrg 	roxrl	IMM (1),d6
    924   1.1       mrg 	roxrl	IMM (1),d7
    925   1.1       mrg #else
    926   1.1       mrg 	lsrl	IMM (1),d7
    927   1.1       mrg 	btst	IMM (0),d6
    928   1.1       mrg 	beq	10f
    929   1.1       mrg 	bset	IMM (31),d7
    930   1.1       mrg 10:	lsrl	IMM (1),d6
    931   1.1       mrg 	btst	IMM (0),d5
    932   1.1       mrg 	beq	11f
    933   1.1       mrg 	bset	IMM (31),d6
    934   1.1       mrg 11:	lsrl	IMM (1),d5
    935   1.1       mrg 	btst	IMM (0),d4
    936   1.1       mrg 	beq	12f
    937   1.1       mrg 	bset	IMM (31),d5
    938   1.1       mrg 12:	lsrl	IMM (1),d4
    939   1.1       mrg #endif
    940   1.1       mrg 3:
    941   1.1       mrg #ifndef __mcoldfire__
    942   1.1       mrg 	dbra	d2,4b
    943   1.1       mrg #else
    944   1.1       mrg 	subql	IMM (1),d2
    945   1.1       mrg 	bpl	4b
    946   1.1       mrg #endif
    947   1.1       mrg 	movel	IMM (0),d2
    948   1.1       mrg 	movel	d2,d3
    949   1.1       mrg 	bra	Ladddf$4
    950   1.1       mrg 5:
    951   1.1       mrg 	movel	d6,d7
    952   1.1       mrg 	movel	d5,d6
    953   1.1       mrg 	movel	d4,d5
    954   1.1       mrg 	movel	IMM (0),d4
    955   1.1       mrg #ifndef __mcoldfire__
    956   1.1       mrg 	subw	IMM (32),d2
    957   1.1       mrg #else
    958   1.1       mrg 	subl	IMM (32),d2
    959   1.1       mrg #endif
    960   1.1       mrg 	bra	2b
    961   1.1       mrg 6:
    962   1.1       mrg 	movew	d6,d7
    963   1.1       mrg 	swap	d7
    964   1.1       mrg 	movew	d5,d6
    965   1.1       mrg 	swap	d6
    966   1.1       mrg 	movew	d4,d5
    967   1.1       mrg 	swap	d5
    968   1.1       mrg 	movew	IMM (0),d4
    969   1.1       mrg 	swap	d4
    970   1.1       mrg #ifndef __mcoldfire__
    971   1.1       mrg 	subw	IMM (16),d2
    972   1.1       mrg #else
    973   1.1       mrg 	subl	IMM (16),d2
    974   1.1       mrg #endif
    975   1.1       mrg 	bra	3b
    976   1.1       mrg 
    977   1.1       mrg 9:
    978   1.1       mrg #ifndef __mcoldfire__
    979   1.1       mrg 	exg	d4,d5
    980   1.1       mrg 	movew	d4,d6
    981   1.1       mrg 	subw	d5,d6		| keep d5 (largest exponent) in d4
    982   1.1       mrg 	exg	d4,a2
    983   1.1       mrg 	exg	d5,a3
    984   1.1       mrg #else
    985   1.1       mrg 	movel	d5,d6
    986   1.1       mrg 	movel	d4,d5
    987   1.1       mrg 	movel	d6,d4
    988   1.1       mrg 	subl	d5,d6
    989   1.1       mrg 	movel	d4,a4
    990   1.1       mrg 	movel	a2,d4
    991   1.1       mrg 	movel	a4,a2
    992   1.1       mrg 	movel	d5,a4
    993   1.1       mrg 	movel	a3,d5
    994   1.1       mrg 	movel	a4,a3
    995   1.1       mrg #endif
    996   1.1       mrg | if difference is too large we don't shift (actually, we can just exit) '
    997   1.1       mrg #ifndef __mcoldfire__
    998   1.1       mrg 	cmpw	IMM (DBL_MANT_DIG+2),d6
    999   1.1       mrg #else
   1000   1.1       mrg 	cmpl	IMM (DBL_MANT_DIG+2),d6
   1001   1.1       mrg #endif
   1002   1.1       mrg 	bge	Ladddf$a$small
   1003   1.1       mrg #ifndef __mcoldfire__
   1004   1.1       mrg 	cmpw	IMM (32),d6	| if difference >= 32, shift by longs
   1005   1.1       mrg #else
   1006   1.1       mrg 	cmpl	IMM (32),d6	| if difference >= 32, shift by longs
   1007   1.1       mrg #endif
   1008   1.1       mrg 	bge	5f
   1009   1.1       mrg 2:
   1010   1.1       mrg #ifndef __mcoldfire__
   1011   1.1       mrg 	cmpw	IMM (16),d6	| if difference >= 16, shift by words
   1012   1.1       mrg #else
   1013   1.1       mrg 	cmpl	IMM (16),d6	| if difference >= 16, shift by words
   1014   1.1       mrg #endif
   1015   1.1       mrg 	bge	6f
   1016   1.1       mrg 	bra	3f		| enter dbra loop
   1017   1.1       mrg 
   1018   1.1       mrg 4:
   1019   1.1       mrg #ifndef __mcoldfire__
   1020   1.1       mrg 	lsrl	IMM (1),d0
   1021   1.1       mrg 	roxrl	IMM (1),d1
   1022   1.1       mrg 	roxrl	IMM (1),d2
   1023   1.1       mrg 	roxrl	IMM (1),d3
   1024   1.1       mrg #else
   1025   1.1       mrg 	lsrl	IMM (1),d3
   1026   1.1       mrg 	btst	IMM (0),d2
   1027   1.1       mrg 	beq	10f
   1028   1.1       mrg 	bset	IMM (31),d3
   1029   1.1       mrg 10:	lsrl	IMM (1),d2
   1030   1.1       mrg 	btst	IMM (0),d1
   1031   1.1       mrg 	beq	11f
   1032   1.1       mrg 	bset	IMM (31),d2
   1033   1.1       mrg 11:	lsrl	IMM (1),d1
   1034   1.1       mrg 	btst	IMM (0),d0
   1035   1.1       mrg 	beq	12f
   1036   1.1       mrg 	bset	IMM (31),d1
   1037   1.1       mrg 12:	lsrl	IMM (1),d0
   1038   1.1       mrg #endif
   1039   1.1       mrg 3:
   1040   1.1       mrg #ifndef __mcoldfire__
   1041   1.1       mrg 	dbra	d6,4b
   1042   1.1       mrg #else
   1043   1.1       mrg 	subql	IMM (1),d6
   1044   1.1       mrg 	bpl	4b
   1045   1.1       mrg #endif
   1046   1.1       mrg 	movel	IMM (0),d7
   1047   1.1       mrg 	movel	d7,d6
   1048   1.1       mrg 	bra	Ladddf$4
   1049   1.1       mrg 5:
   1050   1.1       mrg 	movel	d2,d3
   1051   1.1       mrg 	movel	d1,d2
   1052   1.1       mrg 	movel	d0,d1
   1053   1.1       mrg 	movel	IMM (0),d0
   1054   1.1       mrg #ifndef __mcoldfire__
   1055   1.1       mrg 	subw	IMM (32),d6
   1056   1.1       mrg #else
   1057   1.1       mrg 	subl	IMM (32),d6
   1058   1.1       mrg #endif
   1059   1.1       mrg 	bra	2b
   1060   1.1       mrg 6:
   1061   1.1       mrg 	movew	d2,d3
   1062   1.1       mrg 	swap	d3
   1063   1.1       mrg 	movew	d1,d2
   1064   1.1       mrg 	swap	d2
   1065   1.1       mrg 	movew	d0,d1
   1066   1.1       mrg 	swap	d1
   1067   1.1       mrg 	movew	IMM (0),d0
   1068   1.1       mrg 	swap	d0
   1069   1.1       mrg #ifndef __mcoldfire__
   1070   1.1       mrg 	subw	IMM (16),d6
   1071   1.1       mrg #else
   1072   1.1       mrg 	subl	IMM (16),d6
   1073   1.1       mrg #endif
   1074   1.1       mrg 	bra	3b
   1075   1.1       mrg Ladddf$3:
   1076   1.1       mrg #ifndef __mcoldfire__
   1077   1.1       mrg 	exg	d4,a2
   1078   1.1       mrg 	exg	d5,a3
   1079   1.1       mrg #else
   1080   1.1       mrg 	movel	d4,a4
   1081   1.1       mrg 	movel	a2,d4
   1082   1.1       mrg 	movel	a4,a2
   1083   1.1       mrg 	movel	d5,a4
   1084   1.1       mrg 	movel	a3,d5
   1085   1.1       mrg 	movel	a4,a3
   1086   1.1       mrg #endif
   1087   1.1       mrg Ladddf$4:
   1088   1.1       mrg | Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
   1089   1.1       mrg | the signs in a4.
   1090   1.1       mrg 
   1091   1.1       mrg | Here we have to decide whether to add or subtract the numbers:
   1092   1.1       mrg #ifndef __mcoldfire__
   1093   1.1       mrg 	exg	d7,a0		| get the signs
   1094   1.1       mrg 	exg	d6,a3		| a3 is free to be used
   1095   1.1       mrg #else
   1096   1.1       mrg 	movel	d7,a4
   1097   1.1       mrg 	movel	a0,d7
   1098   1.1       mrg 	movel	a4,a0
   1099   1.1       mrg 	movel	d6,a4
   1100   1.1       mrg 	movel	a3,d6
   1101   1.1       mrg 	movel	a4,a3
   1102   1.1       mrg #endif
   1103   1.1       mrg 	movel	d7,d6		|
   1104   1.1       mrg 	movew	IMM (0),d7	| get a's sign in d7 '
   1105   1.1       mrg 	swap	d6              |
   1106   1.1       mrg 	movew	IMM (0),d6	| and b's sign in d6 '
   1107   1.1       mrg 	eorl	d7,d6		| compare the signs
   1108   1.1       mrg 	bmi	Lsubdf$0	| if the signs are different we have
   1109   1.1       mrg 				| to subtract
   1110   1.1       mrg #ifndef __mcoldfire__
   1111   1.1       mrg 	exg	d7,a0		| else we add the numbers
   1112   1.1       mrg 	exg	d6,a3		|
   1113   1.1       mrg #else
   1114   1.1       mrg 	movel	d7,a4
   1115   1.1       mrg 	movel	a0,d7
   1116   1.1       mrg 	movel	a4,a0
   1117   1.1       mrg 	movel	d6,a4
   1118   1.1       mrg 	movel	a3,d6
   1119   1.1       mrg 	movel	a4,a3
   1120   1.1       mrg #endif
   1121   1.1       mrg 	addl	d7,d3		|
   1122   1.1       mrg 	addxl	d6,d2		|
   1123   1.1       mrg 	addxl	d5,d1		|
   1124   1.1       mrg 	addxl	d4,d0           |
   1125   1.1       mrg 
   1126   1.1       mrg 	movel	a2,d4		| return exponent to d4
   1127   1.1       mrg 	movel	a0,d7		|
   1128   1.1       mrg 	andl	IMM (0x80000000),d7 | d7 now has the sign
   1129   1.1       mrg 
   1130   1.1       mrg #ifndef __mcoldfire__
   1131   1.1       mrg 	moveml	sp@+,a2-a3
   1132   1.1       mrg #else
   1133   1.1       mrg 	movel	sp@+,a4
   1134   1.1       mrg 	movel	sp@+,a3
   1135   1.1       mrg 	movel	sp@+,a2
   1136   1.1       mrg #endif
   1137   1.1       mrg 
   1138   1.1       mrg | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
   1139   1.1       mrg | the case of denormalized numbers in the rounding routine itself).
   1140   1.1       mrg | As in the addition (not in the subtraction!) we could have set
   1141   1.1       mrg | one more bit we check this:
   1142   1.1       mrg 	btst	IMM (DBL_MANT_DIG+1),d0
   1143   1.1       mrg 	beq	1f
   1144   1.1       mrg #ifndef __mcoldfire__
   1145   1.1       mrg 	lsrl	IMM (1),d0
   1146   1.1       mrg 	roxrl	IMM (1),d1
   1147   1.1       mrg 	roxrl	IMM (1),d2
   1148   1.1       mrg 	roxrl	IMM (1),d3
   1149   1.1       mrg 	addw	IMM (1),d4
   1150   1.1       mrg #else
   1151   1.1       mrg 	lsrl	IMM (1),d3
   1152   1.1       mrg 	btst	IMM (0),d2
   1153   1.1       mrg 	beq	10f
   1154   1.1       mrg 	bset	IMM (31),d3
   1155   1.1       mrg 10:	lsrl	IMM (1),d2
   1156   1.1       mrg 	btst	IMM (0),d1
   1157   1.1       mrg 	beq	11f
   1158   1.1       mrg 	bset	IMM (31),d2
   1159   1.1       mrg 11:	lsrl	IMM (1),d1
   1160   1.1       mrg 	btst	IMM (0),d0
   1161   1.1       mrg 	beq	12f
   1162   1.1       mrg 	bset	IMM (31),d1
   1163   1.1       mrg 12:	lsrl	IMM (1),d0
   1164   1.1       mrg 	addl	IMM (1),d4
   1165   1.1       mrg #endif
   1166   1.1       mrg 1:
   1167   1.1       mrg 	lea	pc@(Ladddf$5),a0 | to return from rounding routine
   1168   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   1169   1.1       mrg #ifdef __mcoldfire__
   1170   1.1       mrg 	clrl	d6
   1171   1.1       mrg #endif
   1172   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   1173   1.1       mrg 	beq	Lround$to$nearest
   1174   1.1       mrg #ifndef __mcoldfire__
   1175   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   1176   1.1       mrg #else
   1177   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   1178   1.1       mrg #endif
   1179   1.1       mrg 	bhi	Lround$to$minus
   1180   1.1       mrg 	blt	Lround$to$zero
   1181   1.1       mrg 	bra	Lround$to$plus
   1182   1.1       mrg Ladddf$5:
   1183   1.1       mrg | Put back the exponent and check for overflow
   1184   1.1       mrg #ifndef __mcoldfire__
   1185   1.1       mrg 	cmpw	IMM (0x7ff),d4	| is the exponent big?
   1186   1.1       mrg #else
   1187   1.1       mrg 	cmpl	IMM (0x7ff),d4	| is the exponent big?
   1188   1.1       mrg #endif
   1189   1.1       mrg 	bge	1f
   1190   1.1       mrg 	bclr	IMM (DBL_MANT_DIG-1),d0
   1191   1.1       mrg #ifndef __mcoldfire__
   1192   1.1       mrg 	lslw	IMM (4),d4	| put exponent back into position
   1193   1.1       mrg #else
   1194   1.1       mrg 	lsll	IMM (4),d4	| put exponent back into position
   1195   1.1       mrg #endif
   1196   1.1       mrg 	swap	d0		|
   1197   1.1       mrg #ifndef __mcoldfire__
   1198   1.1       mrg 	orw	d4,d0		|
   1199   1.1       mrg #else
   1200   1.1       mrg 	orl	d4,d0		|
   1201   1.1       mrg #endif
   1202   1.1       mrg 	swap	d0		|
   1203   1.1       mrg 	bra	Ladddf$ret
   1204   1.1       mrg 1:
   1205   1.1       mrg 	moveq	IMM (ADD),d5
   1206   1.1       mrg 	bra	Ld$overflow
   1207   1.1       mrg 
   1208   1.1       mrg Lsubdf$0:
   1209   1.1       mrg | Here we do the subtraction.
   1210   1.1       mrg #ifndef __mcoldfire__
   1211   1.1       mrg 	exg	d7,a0		| put sign back in a0
   1212   1.1       mrg 	exg	d6,a3		|
   1213   1.1       mrg #else
   1214   1.1       mrg 	movel	d7,a4
   1215   1.1       mrg 	movel	a0,d7
   1216   1.1       mrg 	movel	a4,a0
   1217   1.1       mrg 	movel	d6,a4
   1218   1.1       mrg 	movel	a3,d6
   1219   1.1       mrg 	movel	a4,a3
   1220   1.1       mrg #endif
   1221   1.1       mrg 	subl	d7,d3		|
   1222   1.1       mrg 	subxl	d6,d2		|
   1223   1.1       mrg 	subxl	d5,d1		|
   1224   1.1       mrg 	subxl	d4,d0		|
   1225   1.1       mrg 	beq	Ladddf$ret$1	| if zero just exit
   1226   1.1       mrg 	bpl	1f		| if positive skip the following
   1227   1.1       mrg 	movel	a0,d7		|
   1228   1.1       mrg 	bchg	IMM (31),d7	| change sign bit in d7
   1229   1.1       mrg 	movel	d7,a0		|
   1230   1.1       mrg 	negl	d3		|
   1231   1.1       mrg 	negxl	d2		|
   1232   1.1       mrg 	negxl	d1              | and negate result
   1233   1.1       mrg 	negxl	d0              |
   1234   1.1       mrg 1:
   1235   1.1       mrg 	movel	a2,d4		| return exponent to d4
   1236   1.1       mrg 	movel	a0,d7
   1237   1.1       mrg 	andl	IMM (0x80000000),d7 | isolate sign bit
   1238   1.1       mrg #ifndef __mcoldfire__
   1239   1.1       mrg 	moveml	sp@+,a2-a3	|
   1240   1.1       mrg #else
   1241   1.1       mrg 	movel	sp@+,a4
   1242   1.1       mrg 	movel	sp@+,a3
   1243   1.1       mrg 	movel	sp@+,a2
   1244   1.1       mrg #endif
   1245   1.1       mrg 
   1246   1.1       mrg | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
   1247   1.1       mrg | the case of denormalized numbers in the rounding routine itself).
   1248   1.1       mrg | As in the addition (not in the subtraction!) we could have set
   1249   1.1       mrg | one more bit we check this:
   1250   1.1       mrg 	btst	IMM (DBL_MANT_DIG+1),d0
   1251   1.1       mrg 	beq	1f
   1252   1.1       mrg #ifndef __mcoldfire__
   1253   1.1       mrg 	lsrl	IMM (1),d0
   1254   1.1       mrg 	roxrl	IMM (1),d1
   1255   1.1       mrg 	roxrl	IMM (1),d2
   1256   1.1       mrg 	roxrl	IMM (1),d3
   1257   1.1       mrg 	addw	IMM (1),d4
   1258   1.1       mrg #else
   1259   1.1       mrg 	lsrl	IMM (1),d3
   1260   1.1       mrg 	btst	IMM (0),d2
   1261   1.1       mrg 	beq	10f
   1262   1.1       mrg 	bset	IMM (31),d3
   1263   1.1       mrg 10:	lsrl	IMM (1),d2
   1264   1.1       mrg 	btst	IMM (0),d1
   1265   1.1       mrg 	beq	11f
   1266   1.1       mrg 	bset	IMM (31),d2
   1267   1.1       mrg 11:	lsrl	IMM (1),d1
   1268   1.1       mrg 	btst	IMM (0),d0
   1269   1.1       mrg 	beq	12f
   1270   1.1       mrg 	bset	IMM (31),d1
   1271   1.1       mrg 12:	lsrl	IMM (1),d0
   1272   1.1       mrg 	addl	IMM (1),d4
   1273   1.1       mrg #endif
   1274   1.1       mrg 1:
   1275   1.1       mrg 	lea	pc@(Lsubdf$1),a0 | to return from rounding routine
   1276   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   1277   1.1       mrg #ifdef __mcoldfire__
   1278   1.1       mrg 	clrl	d6
   1279   1.1       mrg #endif
   1280   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   1281   1.1       mrg 	beq	Lround$to$nearest
   1282   1.1       mrg #ifndef __mcoldfire__
   1283   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   1284   1.1       mrg #else
   1285   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   1286   1.1       mrg #endif
   1287   1.1       mrg 	bhi	Lround$to$minus
   1288   1.1       mrg 	blt	Lround$to$zero
   1289   1.1       mrg 	bra	Lround$to$plus
   1290   1.1       mrg Lsubdf$1:
   1291   1.1       mrg | Put back the exponent and sign (we don't have overflow). '
   1292   1.1       mrg 	bclr	IMM (DBL_MANT_DIG-1),d0
   1293   1.1       mrg #ifndef __mcoldfire__
   1294   1.1       mrg 	lslw	IMM (4),d4	| put exponent back into position
   1295   1.1       mrg #else
   1296   1.1       mrg 	lsll	IMM (4),d4	| put exponent back into position
   1297   1.1       mrg #endif
   1298   1.1       mrg 	swap	d0		|
   1299   1.1       mrg #ifndef __mcoldfire__
   1300   1.1       mrg 	orw	d4,d0		|
   1301   1.1       mrg #else
   1302   1.1       mrg 	orl	d4,d0		|
   1303   1.1       mrg #endif
   1304   1.1       mrg 	swap	d0		|
   1305   1.1       mrg 	bra	Ladddf$ret
   1306   1.1       mrg 
   1307   1.1       mrg | If one of the numbers was too small (difference of exponents >=
   1308   1.1       mrg | DBL_MANT_DIG+1) we return the other (and now we don't have to '
   1309   1.1       mrg | check for finiteness or zero).
   1310   1.1       mrg Ladddf$a$small:
   1311   1.1       mrg #ifndef __mcoldfire__
   1312   1.1       mrg 	moveml	sp@+,a2-a3
   1313   1.1       mrg #else
   1314   1.1       mrg 	movel	sp@+,a4
   1315   1.1       mrg 	movel	sp@+,a3
   1316   1.1       mrg 	movel	sp@+,a2
   1317   1.1       mrg #endif
   1318   1.1       mrg 	movel	a6@(16),d0
   1319   1.1       mrg 	movel	a6@(20),d1
   1320   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   1321   1.1       mrg 	movew	IMM (0),a0@
   1322   1.1       mrg #ifndef __mcoldfire__
   1323   1.1       mrg 	moveml	sp@+,d2-d7	| restore data registers
   1324   1.1       mrg #else
   1325   1.1       mrg 	moveml	sp@,d2-d7
   1326   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   1327   1.1       mrg 	| be adjusted here.
   1328   1.1       mrg #endif
   1329   1.1       mrg 	unlk	a6		| and return
   1330   1.1       mrg 	rts
   1331   1.1       mrg 
   1332   1.1       mrg Ladddf$b$small:
   1333   1.1       mrg #ifndef __mcoldfire__
   1334   1.1       mrg 	moveml	sp@+,a2-a3
   1335   1.1       mrg #else
   1336   1.1       mrg 	movel	sp@+,a4
   1337   1.1       mrg 	movel	sp@+,a3
   1338   1.1       mrg 	movel	sp@+,a2
   1339   1.1       mrg #endif
   1340   1.1       mrg 	movel	a6@(8),d0
   1341   1.1       mrg 	movel	a6@(12),d1
   1342   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   1343   1.1       mrg 	movew	IMM (0),a0@
   1344   1.1       mrg #ifndef __mcoldfire__
   1345   1.1       mrg 	moveml	sp@+,d2-d7	| restore data registers
   1346   1.1       mrg #else
   1347   1.1       mrg 	moveml	sp@,d2-d7
   1348   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   1349   1.1       mrg 	| be adjusted here.
   1350   1.1       mrg #endif
   1351   1.1       mrg 	unlk	a6		| and return
   1352   1.1       mrg 	rts
   1353   1.1       mrg 
   1354   1.1       mrg Ladddf$a$den:
   1355   1.1       mrg 	movel	d7,d4		| d7 contains 0x00200000
   1356   1.1       mrg 	bra	Ladddf$1
   1357   1.1       mrg 
   1358   1.1       mrg Ladddf$b$den:
   1359   1.1       mrg 	movel	d7,d5           | d7 contains 0x00200000
   1360   1.1       mrg 	notl	d6
   1361   1.1       mrg 	bra	Ladddf$2
   1362   1.1       mrg 
   1363   1.1       mrg Ladddf$b:
   1364   1.1       mrg | Return b (if a is zero)
   1365   1.1       mrg 	movel	d2,d0
   1366   1.1       mrg 	movel	d3,d1
   1367   1.1       mrg 	bne	1f			| Check if b is -0
   1368   1.1       mrg 	cmpl	IMM (0x80000000),d0
   1369   1.1       mrg 	bne	1f
   1370   1.1       mrg 	andl	IMM (0x80000000),d7	| Use the sign of a
   1371   1.1       mrg 	clrl	d0
   1372   1.1       mrg 	bra	Ladddf$ret
   1373   1.1       mrg Ladddf$a:
   1374   1.1       mrg 	movel	a6@(8),d0
   1375   1.1       mrg 	movel	a6@(12),d1
   1376   1.1       mrg 1:
   1377   1.1       mrg 	moveq	IMM (ADD),d5
   1378   1.1       mrg | Check for NaN and +/-INFINITY.
   1379   1.1       mrg 	movel	d0,d7         		|
   1380   1.1       mrg 	andl	IMM (0x80000000),d7	|
   1381   1.1       mrg 	bclr	IMM (31),d0		|
   1382   1.1       mrg 	cmpl	IMM (0x7ff00000),d0	|
   1383   1.1       mrg 	bge	2f			|
   1384   1.1       mrg 	movel	d0,d0           	| check for zero, since we don't  '
   1385   1.1       mrg 	bne	Ladddf$ret		| want to return -0 by mistake
   1386  1.11       mrg 	movel	d1,d1			|
   1387  1.11       mrg 	bne	Ladddf$ret		|
   1388   1.1       mrg 	bclr	IMM (31),d7		|
   1389   1.1       mrg 	bra	Ladddf$ret		|
   1390   1.1       mrg 2:
   1391   1.1       mrg 	andl	IMM (0x000fffff),d0	| check for NaN (nonzero fraction)
   1392   1.1       mrg 	orl	d1,d0			|
   1393   1.1       mrg 	bne	Ld$inop         	|
   1394   1.1       mrg 	bra	Ld$infty		|
   1395   1.1       mrg 
   1396   1.1       mrg Ladddf$ret$1:
   1397   1.1       mrg #ifndef __mcoldfire__
   1398   1.1       mrg 	moveml	sp@+,a2-a3	| restore regs and exit
   1399   1.1       mrg #else
   1400   1.1       mrg 	movel	sp@+,a4
   1401   1.1       mrg 	movel	sp@+,a3
   1402   1.1       mrg 	movel	sp@+,a2
   1403   1.1       mrg #endif
   1404   1.1       mrg 
   1405   1.1       mrg Ladddf$ret:
   1406   1.1       mrg | Normal exit.
   1407   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   1408   1.1       mrg 	movew	IMM (0),a0@
   1409   1.1       mrg 	orl	d7,d0		| put sign bit back
   1410   1.1       mrg #ifndef __mcoldfire__
   1411   1.1       mrg 	moveml	sp@+,d2-d7
   1412   1.1       mrg #else
   1413   1.1       mrg 	moveml	sp@,d2-d7
   1414   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   1415   1.1       mrg 	| be adjusted here.
   1416   1.1       mrg #endif
   1417   1.1       mrg 	unlk	a6
   1418   1.1       mrg 	rts
   1419   1.1       mrg 
   1420   1.1       mrg Ladddf$ret$den:
   1421   1.1       mrg | Return a denormalized number.
   1422   1.1       mrg #ifndef __mcoldfire__
   1423   1.1       mrg 	lsrl	IMM (1),d0	| shift right once more
   1424   1.1       mrg 	roxrl	IMM (1),d1	|
   1425   1.1       mrg #else
   1426   1.1       mrg 	lsrl	IMM (1),d1
   1427   1.1       mrg 	btst	IMM (0),d0
   1428   1.1       mrg 	beq	10f
   1429   1.1       mrg 	bset	IMM (31),d1
   1430   1.1       mrg 10:	lsrl	IMM (1),d0
   1431   1.1       mrg #endif
   1432   1.1       mrg 	bra	Ladddf$ret
   1433   1.1       mrg 
   1434   1.1       mrg Ladddf$nf:
   1435   1.1       mrg 	moveq	IMM (ADD),d5
   1436   1.1       mrg | This could be faster but it is not worth the effort, since it is not
   1437   1.1       mrg | executed very often. We sacrifice speed for clarity here.
   1438   1.1       mrg 	movel	a6@(8),d0	| get the numbers back (remember that we
   1439   1.1       mrg 	movel	a6@(12),d1	| did some processing already)
   1440   1.1       mrg 	movel	a6@(16),d2	|
   1441   1.1       mrg 	movel	a6@(20),d3	|
   1442   1.1       mrg 	movel	IMM (0x7ff00000),d4 | useful constant (INFINITY)
   1443   1.1       mrg 	movel	d0,d7		| save sign bits
   1444   1.1       mrg 	movel	d2,d6		|
   1445   1.1       mrg 	bclr	IMM (31),d0	| clear sign bits
   1446   1.1       mrg 	bclr	IMM (31),d2	|
   1447   1.1       mrg | We know that one of them is either NaN of +/-INFINITY
   1448   1.1       mrg | Check for NaN (if either one is NaN return NaN)
   1449   1.1       mrg 	cmpl	d4,d0		| check first a (d0)
   1450   1.1       mrg 	bhi	Ld$inop		| if d0 > 0x7ff00000 or equal and
   1451   1.1       mrg 	bne	2f
   1452   1.1       mrg 	tstl	d1		| d1 > 0, a is NaN
   1453   1.1       mrg 	bne	Ld$inop		|
   1454   1.1       mrg 2:	cmpl	d4,d2		| check now b (d1)
   1455   1.1       mrg 	bhi	Ld$inop		|
   1456   1.1       mrg 	bne	3f
   1457   1.1       mrg 	tstl	d3		|
   1458   1.1       mrg 	bne	Ld$inop		|
   1459   1.1       mrg 3:
   1460   1.1       mrg | Now comes the check for +/-INFINITY. We know that both are (maybe not
   1461   1.1       mrg | finite) numbers, but we have to check if both are infinite whether we
   1462   1.1       mrg | are adding or subtracting them.
   1463   1.1       mrg 	eorl	d7,d6		| to check sign bits
   1464   1.1       mrg 	bmi	1f
   1465   1.1       mrg 	andl	IMM (0x80000000),d7 | get (common) sign bit
   1466   1.1       mrg 	bra	Ld$infty
   1467   1.1       mrg 1:
   1468   1.1       mrg | We know one (or both) are infinite, so we test for equality between the
   1469   1.1       mrg | two numbers (if they are equal they have to be infinite both, so we
   1470   1.1       mrg | return NaN).
   1471   1.1       mrg 	cmpl	d2,d0		| are both infinite?
   1472   1.1       mrg 	bne	1f		| if d0 <> d2 they are not equal
   1473   1.1       mrg 	cmpl	d3,d1		| if d0 == d2 test d3 and d1
   1474   1.1       mrg 	beq	Ld$inop		| if equal return NaN
   1475   1.1       mrg 1:
   1476   1.1       mrg 	andl	IMM (0x80000000),d7 | get a's sign bit '
   1477   1.1       mrg 	cmpl	d4,d0		| test now for infinity
   1478   1.1       mrg 	beq	Ld$infty	| if a is INFINITY return with this sign
   1479   1.1       mrg 	bchg	IMM (31),d7	| else we know b is INFINITY and has
   1480   1.1       mrg 	bra	Ld$infty	| the opposite sign
   1481   1.1       mrg 
   1482   1.1       mrg |=============================================================================
   1483   1.1       mrg |                              __muldf3
   1484   1.1       mrg |=============================================================================
   1485   1.1       mrg 
   1486   1.1       mrg | double __muldf3(double, double);
   1487   1.1       mrg 	FUNC(__muldf3)
   1488   1.1       mrg SYM (__muldf3):
   1489   1.1       mrg #ifndef __mcoldfire__
   1490   1.1       mrg 	link	a6,IMM (0)
   1491   1.1       mrg 	moveml	d2-d7,sp@-
   1492   1.1       mrg #else
   1493   1.1       mrg 	link	a6,IMM (-24)
   1494   1.1       mrg 	moveml	d2-d7,sp@
   1495   1.1       mrg #endif
   1496   1.1       mrg 	movel	a6@(8),d0		| get a into d0-d1
   1497   1.1       mrg 	movel	a6@(12),d1		|
   1498   1.1       mrg 	movel	a6@(16),d2		| and b into d2-d3
   1499   1.1       mrg 	movel	a6@(20),d3		|
   1500   1.1       mrg 	movel	d0,d7			| d7 will hold the sign of the product
   1501   1.1       mrg 	eorl	d2,d7			|
   1502   1.1       mrg 	andl	IMM (0x80000000),d7	|
   1503   1.1       mrg 	movel	d7,a0			| save sign bit into a0
   1504   1.1       mrg 	movel	IMM (0x7ff00000),d7	| useful constant (+INFINITY)
   1505   1.1       mrg 	movel	d7,d6			| another (mask for fraction)
   1506   1.1       mrg 	notl	d6			|
   1507   1.1       mrg 	bclr	IMM (31),d0		| get rid of a's sign bit '
   1508   1.1       mrg 	movel	d0,d4			|
   1509   1.1       mrg 	orl	d1,d4			|
   1510   1.1       mrg 	beq	Lmuldf$a$0		| branch if a is zero
   1511   1.1       mrg 	movel	d0,d4			|
   1512   1.1       mrg 	bclr	IMM (31),d2		| get rid of b's sign bit '
   1513   1.1       mrg 	movel	d2,d5			|
   1514   1.1       mrg 	orl	d3,d5			|
   1515   1.1       mrg 	beq	Lmuldf$b$0		| branch if b is zero
   1516   1.1       mrg 	movel	d2,d5			|
   1517   1.1       mrg 	cmpl	d7,d0			| is a big?
   1518   1.1       mrg 	bhi	Lmuldf$inop		| if a is NaN return NaN
   1519   1.1       mrg 	beq	Lmuldf$a$nf		| we still have to check d1 and b ...
   1520   1.1       mrg 	cmpl	d7,d2			| now compare b with INFINITY
   1521   1.1       mrg 	bhi	Lmuldf$inop		| is b NaN?
   1522   1.1       mrg 	beq	Lmuldf$b$nf 		| we still have to check d3 ...
   1523   1.1       mrg | Here we have both numbers finite and nonzero (and with no sign bit).
   1524   1.1       mrg | Now we get the exponents into d4 and d5.
   1525   1.1       mrg 	andl	d7,d4			| isolate exponent in d4
   1526   1.1       mrg 	beq	Lmuldf$a$den		| if exponent zero, have denormalized
   1527   1.1       mrg 	andl	d6,d0			| isolate fraction
   1528   1.1       mrg 	orl	IMM (0x00100000),d0	| and put hidden bit back
   1529   1.1       mrg 	swap	d4			| I like exponents in the first byte
   1530   1.1       mrg #ifndef __mcoldfire__
   1531   1.1       mrg 	lsrw	IMM (4),d4		|
   1532   1.1       mrg #else
   1533   1.1       mrg 	lsrl	IMM (4),d4		|
   1534   1.1       mrg #endif
   1535   1.1       mrg Lmuldf$1:
   1536   1.1       mrg 	andl	d7,d5			|
   1537   1.1       mrg 	beq	Lmuldf$b$den		|
   1538   1.1       mrg 	andl	d6,d2			|
   1539   1.1       mrg 	orl	IMM (0x00100000),d2	| and put hidden bit back
   1540   1.1       mrg 	swap	d5			|
   1541   1.1       mrg #ifndef __mcoldfire__
   1542   1.1       mrg 	lsrw	IMM (4),d5		|
   1543   1.1       mrg #else
   1544   1.1       mrg 	lsrl	IMM (4),d5		|
   1545   1.1       mrg #endif
   1546   1.1       mrg Lmuldf$2:				|
   1547   1.1       mrg #ifndef __mcoldfire__
   1548   1.1       mrg 	addw	d5,d4			| add exponents
   1549   1.1       mrg 	subw	IMM (D_BIAS+1),d4	| and subtract bias (plus one)
   1550   1.1       mrg #else
   1551   1.1       mrg 	addl	d5,d4			| add exponents
   1552   1.1       mrg 	subl	IMM (D_BIAS+1),d4	| and subtract bias (plus one)
   1553   1.1       mrg #endif
   1554   1.1       mrg 
   1555   1.1       mrg | We are now ready to do the multiplication. The situation is as follows:
   1556   1.1       mrg | both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
   1557   1.1       mrg | denormalized to start with!), which means that in the product bit 104
   1558   1.1       mrg | (which will correspond to bit 8 of the fourth long) is set.
   1559   1.1       mrg 
   1560   1.1       mrg | Here we have to do the product.
   1561   1.1       mrg | To do it we have to juggle the registers back and forth, as there are not
   1562   1.1       mrg | enough to keep everything in them. So we use the address registers to keep
   1563   1.1       mrg | some intermediate data.
   1564   1.1       mrg 
   1565   1.1       mrg #ifndef __mcoldfire__
   1566   1.1       mrg 	moveml	a2-a3,sp@-	| save a2 and a3 for temporary use
   1567   1.1       mrg #else
   1568   1.1       mrg 	movel	a2,sp@-
   1569   1.1       mrg 	movel	a3,sp@-
   1570   1.1       mrg 	movel	a4,sp@-
   1571   1.1       mrg #endif
   1572   1.1       mrg 	movel	IMM (0),a2	| a2 is a null register
   1573   1.1       mrg 	movel	d4,a3		| and a3 will preserve the exponent
   1574   1.1       mrg 
   1575   1.1       mrg | First, shift d2-d3 so bit 20 becomes bit 31:
   1576   1.1       mrg #ifndef __mcoldfire__
   1577   1.1       mrg 	rorl	IMM (5),d2	| rotate d2 5 places right
   1578   1.1       mrg 	swap	d2		| and swap it
   1579   1.1       mrg 	rorl	IMM (5),d3	| do the same thing with d3
   1580   1.1       mrg 	swap	d3		|
   1581   1.1       mrg 	movew	d3,d6		| get the rightmost 11 bits of d3
   1582   1.1       mrg 	andw	IMM (0x07ff),d6	|
   1583   1.1       mrg 	orw	d6,d2		| and put them into d2
   1584   1.1       mrg 	andw	IMM (0xf800),d3	| clear those bits in d3
   1585   1.1       mrg #else
   1586   1.1       mrg 	moveq	IMM (11),d7	| left shift d2 11 bits
   1587   1.1       mrg 	lsll	d7,d2
   1588   1.1       mrg 	movel	d3,d6		| get a copy of d3
   1589   1.1       mrg 	lsll	d7,d3		| left shift d3 11 bits
   1590   1.1       mrg 	andl	IMM (0xffe00000),d6 | get the top 11 bits of d3
   1591   1.1       mrg 	moveq	IMM (21),d7	| right shift them 21 bits
   1592   1.1       mrg 	lsrl	d7,d6
   1593   1.1       mrg 	orl	d6,d2		| stick them at the end of d2
   1594   1.1       mrg #endif
   1595   1.1       mrg 
   1596   1.1       mrg 	movel	d2,d6		| move b into d6-d7
   1597   1.1       mrg 	movel	d3,d7           | move a into d4-d5
   1598   1.1       mrg 	movel	d0,d4           | and clear d0-d1-d2-d3 (to put result)
   1599   1.1       mrg 	movel	d1,d5           |
   1600   1.1       mrg 	movel	IMM (0),d3	|
   1601   1.1       mrg 	movel	d3,d2           |
   1602   1.1       mrg 	movel	d3,d1           |
   1603   1.1       mrg 	movel	d3,d0	        |
   1604   1.1       mrg 
   1605   1.1       mrg | We use a1 as counter:
   1606   1.1       mrg 	movel	IMM (DBL_MANT_DIG-1),a1
   1607   1.1       mrg #ifndef __mcoldfire__
   1608   1.1       mrg 	exg	d7,a1
   1609   1.1       mrg #else
   1610   1.1       mrg 	movel	d7,a4
   1611   1.1       mrg 	movel	a1,d7
   1612   1.1       mrg 	movel	a4,a1
   1613   1.1       mrg #endif
   1614   1.1       mrg 
   1615   1.1       mrg 1:
   1616   1.1       mrg #ifndef __mcoldfire__
   1617   1.1       mrg 	exg	d7,a1		| put counter back in a1
   1618   1.1       mrg #else
   1619   1.1       mrg 	movel	d7,a4
   1620   1.1       mrg 	movel	a1,d7
   1621   1.1       mrg 	movel	a4,a1
   1622   1.1       mrg #endif
   1623   1.1       mrg 	addl	d3,d3		| shift sum once left
   1624   1.1       mrg 	addxl	d2,d2           |
   1625   1.1       mrg 	addxl	d1,d1           |
   1626   1.1       mrg 	addxl	d0,d0           |
   1627   1.1       mrg 	addl	d7,d7		|
   1628   1.1       mrg 	addxl	d6,d6		|
   1629   1.1       mrg 	bcc	2f		| if bit clear skip the following
   1630   1.1       mrg #ifndef __mcoldfire__
   1631   1.1       mrg 	exg	d7,a2		|
   1632   1.1       mrg #else
   1633   1.1       mrg 	movel	d7,a4
   1634   1.1       mrg 	movel	a2,d7
   1635   1.1       mrg 	movel	a4,a2
   1636   1.1       mrg #endif
   1637   1.1       mrg 	addl	d5,d3		| else add a to the sum
   1638   1.1       mrg 	addxl	d4,d2		|
   1639   1.1       mrg 	addxl	d7,d1		|
   1640   1.1       mrg 	addxl	d7,d0		|
   1641   1.1       mrg #ifndef __mcoldfire__
   1642   1.1       mrg 	exg	d7,a2		|
   1643   1.1       mrg #else
   1644   1.1       mrg 	movel	d7,a4
   1645   1.1       mrg 	movel	a2,d7
   1646   1.1       mrg 	movel	a4,a2
   1647   1.1       mrg #endif
   1648   1.1       mrg 2:
   1649   1.1       mrg #ifndef __mcoldfire__
   1650   1.1       mrg 	exg	d7,a1		| put counter in d7
   1651   1.1       mrg 	dbf	d7,1b		| decrement and branch
   1652   1.1       mrg #else
   1653   1.1       mrg 	movel	d7,a4
   1654   1.1       mrg 	movel	a1,d7
   1655   1.1       mrg 	movel	a4,a1
   1656   1.1       mrg 	subql	IMM (1),d7
   1657   1.1       mrg 	bpl	1b
   1658   1.1       mrg #endif
   1659   1.1       mrg 
   1660   1.1       mrg 	movel	a3,d4		| restore exponent
   1661   1.1       mrg #ifndef __mcoldfire__
   1662   1.1       mrg 	moveml	sp@+,a2-a3
   1663   1.1       mrg #else
   1664   1.1       mrg 	movel	sp@+,a4
   1665   1.1       mrg 	movel	sp@+,a3
   1666   1.1       mrg 	movel	sp@+,a2
   1667   1.1       mrg #endif
   1668   1.1       mrg 
   1669   1.1       mrg | Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
   1670   1.1       mrg | first thing to do now is to normalize it so bit 8 becomes bit
   1671   1.1       mrg | DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
   1672   1.1       mrg 	swap	d0
   1673   1.1       mrg 	swap	d1
   1674   1.1       mrg 	movew	d1,d0
   1675   1.1       mrg 	swap	d2
   1676   1.1       mrg 	movew	d2,d1
   1677   1.1       mrg 	swap	d3
   1678   1.1       mrg 	movew	d3,d2
   1679   1.1       mrg 	movew	IMM (0),d3
   1680   1.1       mrg #ifndef __mcoldfire__
   1681   1.1       mrg 	lsrl	IMM (1),d0
   1682   1.1       mrg 	roxrl	IMM (1),d1
   1683   1.1       mrg 	roxrl	IMM (1),d2
   1684   1.1       mrg 	roxrl	IMM (1),d3
   1685   1.1       mrg 	lsrl	IMM (1),d0
   1686   1.1       mrg 	roxrl	IMM (1),d1
   1687   1.1       mrg 	roxrl	IMM (1),d2
   1688   1.1       mrg 	roxrl	IMM (1),d3
   1689   1.1       mrg 	lsrl	IMM (1),d0
   1690   1.1       mrg 	roxrl	IMM (1),d1
   1691   1.1       mrg 	roxrl	IMM (1),d2
   1692   1.1       mrg 	roxrl	IMM (1),d3
   1693   1.1       mrg #else
   1694   1.1       mrg 	moveq	IMM (29),d6
   1695   1.1       mrg 	lsrl	IMM (3),d3
   1696   1.1       mrg 	movel	d2,d7
   1697   1.1       mrg 	lsll	d6,d7
   1698   1.1       mrg 	orl	d7,d3
   1699   1.1       mrg 	lsrl	IMM (3),d2
   1700   1.1       mrg 	movel	d1,d7
   1701   1.1       mrg 	lsll	d6,d7
   1702   1.1       mrg 	orl	d7,d2
   1703   1.1       mrg 	lsrl	IMM (3),d1
   1704   1.1       mrg 	movel	d0,d7
   1705   1.1       mrg 	lsll	d6,d7
   1706   1.1       mrg 	orl	d7,d1
   1707   1.1       mrg 	lsrl	IMM (3),d0
   1708   1.1       mrg #endif
   1709   1.1       mrg 
   1710   1.1       mrg | Now round, check for over- and underflow, and exit.
   1711   1.1       mrg 	movel	a0,d7		| get sign bit back into d7
   1712   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   1713   1.1       mrg 
   1714   1.1       mrg 	btst	IMM (DBL_MANT_DIG+1-32),d0
   1715   1.1       mrg 	beq	Lround$exit
   1716   1.1       mrg #ifndef __mcoldfire__
   1717   1.1       mrg 	lsrl	IMM (1),d0
   1718   1.1       mrg 	roxrl	IMM (1),d1
   1719   1.1       mrg 	addw	IMM (1),d4
   1720   1.1       mrg #else
   1721   1.1       mrg 	lsrl	IMM (1),d1
   1722   1.1       mrg 	btst	IMM (0),d0
   1723   1.1       mrg 	beq	10f
   1724   1.1       mrg 	bset	IMM (31),d1
   1725   1.1       mrg 10:	lsrl	IMM (1),d0
   1726   1.1       mrg 	addl	IMM (1),d4
   1727   1.1       mrg #endif
   1728   1.1       mrg 	bra	Lround$exit
   1729   1.1       mrg 
   1730   1.1       mrg Lmuldf$inop:
   1731   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   1732   1.1       mrg 	bra	Ld$inop
   1733   1.1       mrg 
   1734   1.1       mrg Lmuldf$b$nf:
   1735   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   1736   1.1       mrg 	movel	a0,d7		| get sign bit back into d7
   1737   1.1       mrg 	tstl	d3		| we know d2 == 0x7ff00000, so check d3
   1738   1.1       mrg 	bne	Ld$inop		| if d3 <> 0 b is NaN
   1739   1.1       mrg 	bra	Ld$overflow	| else we have overflow (since a is finite)
   1740   1.1       mrg 
   1741   1.1       mrg Lmuldf$a$nf:
   1742   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   1743   1.1       mrg 	movel	a0,d7		| get sign bit back into d7
   1744   1.1       mrg 	tstl	d1		| we know d0 == 0x7ff00000, so check d1
   1745   1.1       mrg 	bne	Ld$inop		| if d1 <> 0 a is NaN
   1746   1.1       mrg 	bra	Ld$overflow	| else signal overflow
   1747   1.1       mrg 
   1748   1.1       mrg | If either number is zero return zero, unless the other is +/-INFINITY or
   1749   1.1       mrg | NaN, in which case we return NaN.
   1750   1.1       mrg Lmuldf$b$0:
   1751   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   1752   1.1       mrg #ifndef __mcoldfire__
   1753   1.1       mrg 	exg	d2,d0		| put b (==0) into d0-d1
   1754   1.1       mrg 	exg	d3,d1		| and a (with sign bit cleared) into d2-d3
   1755   1.1       mrg 	movel	a0,d0		| set result sign
   1756   1.1       mrg #else
   1757   1.1       mrg 	movel	d0,d2		| put a into d2-d3
   1758   1.1       mrg 	movel	d1,d3
   1759   1.1       mrg 	movel	a0,d0		| put result zero into d0-d1
   1760   1.1       mrg 	movq	IMM(0),d1
   1761   1.1       mrg #endif
   1762   1.1       mrg 	bra	1f
   1763   1.1       mrg Lmuldf$a$0:
   1764   1.1       mrg 	movel	a0,d0		| set result sign
   1765   1.1       mrg 	movel	a6@(16),d2	| put b into d2-d3 again
   1766   1.1       mrg 	movel	a6@(20),d3	|
   1767   1.1       mrg 	bclr	IMM (31),d2	| clear sign bit
   1768   1.1       mrg 1:	cmpl	IMM (0x7ff00000),d2 | check for non-finiteness
   1769   1.1       mrg 	bge	Ld$inop		| in case NaN or +/-INFINITY return NaN
   1770   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   1771   1.1       mrg 	movew	IMM (0),a0@
   1772   1.1       mrg #ifndef __mcoldfire__
   1773   1.1       mrg 	moveml	sp@+,d2-d7
   1774   1.1       mrg #else
   1775   1.1       mrg 	moveml	sp@,d2-d7
   1776   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   1777   1.1       mrg 	| be adjusted here.
   1778   1.1       mrg #endif
   1779   1.1       mrg 	unlk	a6
   1780   1.1       mrg 	rts
   1781   1.1       mrg 
   1782   1.1       mrg | If a number is denormalized we put an exponent of 1 but do not put the
   1783   1.1       mrg | hidden bit back into the fraction; instead we shift left until bit 21
   1784   1.1       mrg | (the hidden bit) is set, adjusting the exponent accordingly. We do this
   1785   1.1       mrg | to ensure that the product of the fractions is close to 1.
   1786   1.1       mrg Lmuldf$a$den:
   1787   1.1       mrg 	movel	IMM (1),d4
   1788   1.1       mrg 	andl	d6,d0
   1789   1.1       mrg 1:	addl	d1,d1           | shift a left until bit 20 is set
   1790   1.1       mrg 	addxl	d0,d0		|
   1791   1.1       mrg #ifndef __mcoldfire__
   1792   1.1       mrg 	subw	IMM (1),d4	| and adjust exponent
   1793   1.1       mrg #else
   1794   1.1       mrg 	subl	IMM (1),d4	| and adjust exponent
   1795   1.1       mrg #endif
   1796   1.1       mrg 	btst	IMM (20),d0	|
   1797   1.1       mrg 	bne	Lmuldf$1        |
   1798   1.1       mrg 	bra	1b
   1799   1.1       mrg 
   1800   1.1       mrg Lmuldf$b$den:
   1801   1.1       mrg 	movel	IMM (1),d5
   1802   1.1       mrg 	andl	d6,d2
   1803   1.1       mrg 1:	addl	d3,d3		| shift b left until bit 20 is set
   1804   1.1       mrg 	addxl	d2,d2		|
   1805   1.1       mrg #ifndef __mcoldfire__
   1806   1.1       mrg 	subw	IMM (1),d5	| and adjust exponent
   1807   1.1       mrg #else
   1808   1.1       mrg 	subql	IMM (1),d5	| and adjust exponent
   1809   1.1       mrg #endif
   1810   1.1       mrg 	btst	IMM (20),d2	|
   1811   1.1       mrg 	bne	Lmuldf$2	|
   1812   1.1       mrg 	bra	1b
   1813   1.1       mrg 
   1814   1.1       mrg 
   1815   1.1       mrg |=============================================================================
   1816   1.1       mrg |                              __divdf3
   1817   1.1       mrg |=============================================================================
   1818   1.1       mrg 
   1819   1.1       mrg | double __divdf3(double, double);
   1820   1.1       mrg 	FUNC(__divdf3)
   1821   1.1       mrg SYM (__divdf3):
   1822   1.1       mrg #ifndef __mcoldfire__
   1823   1.1       mrg 	link	a6,IMM (0)
   1824   1.1       mrg 	moveml	d2-d7,sp@-
   1825   1.1       mrg #else
   1826   1.1       mrg 	link	a6,IMM (-24)
   1827   1.1       mrg 	moveml	d2-d7,sp@
   1828   1.1       mrg #endif
   1829   1.1       mrg 	movel	a6@(8),d0	| get a into d0-d1
   1830   1.1       mrg 	movel	a6@(12),d1	|
   1831   1.1       mrg 	movel	a6@(16),d2	| and b into d2-d3
   1832   1.1       mrg 	movel	a6@(20),d3	|
   1833   1.1       mrg 	movel	d0,d7		| d7 will hold the sign of the result
   1834   1.1       mrg 	eorl	d2,d7		|
   1835   1.1       mrg 	andl	IMM (0x80000000),d7
   1836   1.1       mrg 	movel	d7,a0		| save sign into a0
   1837   1.1       mrg 	movel	IMM (0x7ff00000),d7 | useful constant (+INFINITY)
   1838   1.1       mrg 	movel	d7,d6		| another (mask for fraction)
   1839   1.1       mrg 	notl	d6		|
   1840   1.1       mrg 	bclr	IMM (31),d0	| get rid of a's sign bit '
   1841   1.1       mrg 	movel	d0,d4		|
   1842   1.1       mrg 	orl	d1,d4		|
   1843   1.1       mrg 	beq	Ldivdf$a$0	| branch if a is zero
   1844   1.1       mrg 	movel	d0,d4		|
   1845   1.1       mrg 	bclr	IMM (31),d2	| get rid of b's sign bit '
   1846   1.1       mrg 	movel	d2,d5		|
   1847   1.1       mrg 	orl	d3,d5		|
   1848   1.1       mrg 	beq	Ldivdf$b$0	| branch if b is zero
   1849   1.1       mrg 	movel	d2,d5
   1850   1.1       mrg 	cmpl	d7,d0		| is a big?
   1851   1.1       mrg 	bhi	Ldivdf$inop	| if a is NaN return NaN
   1852   1.1       mrg 	beq	Ldivdf$a$nf	| if d0 == 0x7ff00000 we check d1
   1853   1.1       mrg 	cmpl	d7,d2		| now compare b with INFINITY
   1854   1.1       mrg 	bhi	Ldivdf$inop	| if b is NaN return NaN
   1855   1.1       mrg 	beq	Ldivdf$b$nf	| if d2 == 0x7ff00000 we check d3
   1856   1.1       mrg | Here we have both numbers finite and nonzero (and with no sign bit).
   1857   1.1       mrg | Now we get the exponents into d4 and d5 and normalize the numbers to
   1858   1.1       mrg | ensure that the ratio of the fractions is around 1. We do this by
   1859   1.1       mrg | making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
   1860   1.1       mrg | set, even if they were denormalized to start with.
   1861   1.1       mrg | Thus, the result will satisfy: 2 > result > 1/2.
   1862   1.1       mrg 	andl	d7,d4		| and isolate exponent in d4
   1863   1.1       mrg 	beq	Ldivdf$a$den	| if exponent is zero we have a denormalized
   1864   1.1       mrg 	andl	d6,d0		| and isolate fraction
   1865   1.1       mrg 	orl	IMM (0x00100000),d0 | and put hidden bit back
   1866   1.1       mrg 	swap	d4		| I like exponents in the first byte
   1867   1.1       mrg #ifndef __mcoldfire__
   1868   1.1       mrg 	lsrw	IMM (4),d4	|
   1869   1.1       mrg #else
   1870   1.1       mrg 	lsrl	IMM (4),d4	|
   1871   1.1       mrg #endif
   1872   1.1       mrg Ldivdf$1:			|
   1873   1.1       mrg 	andl	d7,d5		|
   1874   1.1       mrg 	beq	Ldivdf$b$den	|
   1875   1.1       mrg 	andl	d6,d2		|
   1876   1.1       mrg 	orl	IMM (0x00100000),d2
   1877   1.1       mrg 	swap	d5		|
   1878   1.1       mrg #ifndef __mcoldfire__
   1879   1.1       mrg 	lsrw	IMM (4),d5	|
   1880   1.1       mrg #else
   1881   1.1       mrg 	lsrl	IMM (4),d5	|
   1882   1.1       mrg #endif
   1883   1.1       mrg Ldivdf$2:			|
   1884   1.1       mrg #ifndef __mcoldfire__
   1885   1.1       mrg 	subw	d5,d4		| subtract exponents
   1886   1.1       mrg 	addw	IMM (D_BIAS),d4	| and add bias
   1887   1.1       mrg #else
   1888   1.1       mrg 	subl	d5,d4		| subtract exponents
   1889   1.1       mrg 	addl	IMM (D_BIAS),d4	| and add bias
   1890   1.1       mrg #endif
   1891   1.1       mrg 
   1892   1.1       mrg | We are now ready to do the division. We have prepared things in such a way
   1893   1.1       mrg | that the ratio of the fractions will be less than 2 but greater than 1/2.
   1894   1.1       mrg | At this point the registers in use are:
   1895   1.1       mrg | d0-d1	hold a (first operand, bit DBL_MANT_DIG-32=0, bit
   1896   1.1       mrg | DBL_MANT_DIG-1-32=1)
   1897   1.1       mrg | d2-d3	hold b (second operand, bit DBL_MANT_DIG-32=1)
   1898   1.1       mrg | d4	holds the difference of the exponents, corrected by the bias
   1899   1.1       mrg | a0	holds the sign of the ratio
   1900   1.1       mrg 
   1901   1.1       mrg | To do the rounding correctly we need to keep information about the
   1902   1.1       mrg | nonsignificant bits. One way to do this would be to do the division
   1903   1.1       mrg | using four registers; another is to use two registers (as originally
   1904   1.1       mrg | I did), but use a sticky bit to preserve information about the
   1905   1.1       mrg | fractional part. Note that we can keep that info in a1, which is not
   1906   1.1       mrg | used.
   1907   1.1       mrg 	movel	IMM (0),d6	| d6-d7 will hold the result
   1908   1.1       mrg 	movel	d6,d7		|
   1909   1.1       mrg 	movel	IMM (0),a1	| and a1 will hold the sticky bit
   1910   1.1       mrg 
   1911   1.1       mrg 	movel	IMM (DBL_MANT_DIG-32+1),d5
   1912   1.1       mrg 
   1913   1.1       mrg 1:	cmpl	d0,d2		| is a < b?
   1914   1.1       mrg 	bhi	3f		| if b > a skip the following
   1915   1.1       mrg 	beq	4f		| if d0==d2 check d1 and d3
   1916   1.1       mrg 2:	subl	d3,d1		|
   1917   1.1       mrg 	subxl	d2,d0		| a <-- a - b
   1918   1.1       mrg 	bset	d5,d6		| set the corresponding bit in d6
   1919   1.1       mrg 3:	addl	d1,d1		| shift a by 1
   1920   1.1       mrg 	addxl	d0,d0		|
   1921   1.1       mrg #ifndef __mcoldfire__
   1922   1.1       mrg 	dbra	d5,1b		| and branch back
   1923   1.1       mrg #else
   1924   1.1       mrg 	subql	IMM (1), d5
   1925   1.1       mrg 	bpl	1b
   1926   1.1       mrg #endif
   1927   1.1       mrg 	bra	5f
   1928   1.1       mrg 4:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1929   1.1       mrg 	bhi	3b		| if d1 > d2 skip the subtraction
   1930   1.1       mrg 	bra	2b		| else go do it
   1931   1.1       mrg 5:
   1932   1.1       mrg | Here we have to start setting the bits in the second long.
   1933   1.1       mrg 	movel	IMM (31),d5	| again d5 is counter
   1934   1.1       mrg 
   1935   1.1       mrg 1:	cmpl	d0,d2		| is a < b?
   1936   1.1       mrg 	bhi	3f		| if b > a skip the following
   1937   1.1       mrg 	beq	4f		| if d0==d2 check d1 and d3
   1938   1.1       mrg 2:	subl	d3,d1		|
   1939   1.1       mrg 	subxl	d2,d0		| a <-- a - b
   1940   1.1       mrg 	bset	d5,d7		| set the corresponding bit in d7
   1941   1.1       mrg 3:	addl	d1,d1		| shift a by 1
   1942   1.1       mrg 	addxl	d0,d0		|
   1943   1.1       mrg #ifndef __mcoldfire__
   1944   1.1       mrg 	dbra	d5,1b		| and branch back
   1945   1.1       mrg #else
   1946   1.1       mrg 	subql	IMM (1), d5
   1947   1.1       mrg 	bpl	1b
   1948   1.1       mrg #endif
   1949   1.1       mrg 	bra	5f
   1950   1.1       mrg 4:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1951   1.1       mrg 	bhi	3b		| if d1 > d2 skip the subtraction
   1952   1.1       mrg 	bra	2b		| else go do it
   1953   1.1       mrg 5:
   1954   1.1       mrg | Now go ahead checking until we hit a one, which we store in d2.
   1955   1.1       mrg 	movel	IMM (DBL_MANT_DIG),d5
   1956   1.1       mrg 1:	cmpl	d2,d0		| is a < b?
   1957   1.1       mrg 	bhi	4f		| if b < a, exit
   1958   1.1       mrg 	beq	3f		| if d0==d2 check d1 and d3
   1959   1.1       mrg 2:	addl	d1,d1		| shift a by 1
   1960   1.1       mrg 	addxl	d0,d0		|
   1961   1.1       mrg #ifndef __mcoldfire__
   1962   1.1       mrg 	dbra	d5,1b		| and branch back
   1963   1.1       mrg #else
   1964   1.1       mrg 	subql	IMM (1), d5
   1965   1.1       mrg 	bpl	1b
   1966   1.1       mrg #endif
   1967   1.1       mrg 	movel	IMM (0),d2	| here no sticky bit was found
   1968   1.1       mrg 	movel	d2,d3
   1969   1.1       mrg 	bra	5f
   1970   1.1       mrg 3:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1971   1.1       mrg 	bhi	2b		| if d1 > d2 go back
   1972   1.1       mrg 4:
   1973   1.1       mrg | Here put the sticky bit in d2-d3 (in the position which actually corresponds
   1974   1.1       mrg | to it; if you don't do this the algorithm loses in some cases). '
   1975   1.1       mrg 	movel	IMM (0),d2
   1976   1.1       mrg 	movel	d2,d3
   1977   1.1       mrg #ifndef __mcoldfire__
   1978   1.1       mrg 	subw	IMM (DBL_MANT_DIG),d5
   1979   1.1       mrg 	addw	IMM (63),d5
   1980   1.1       mrg 	cmpw	IMM (31),d5
   1981   1.1       mrg #else
   1982   1.1       mrg 	subl	IMM (DBL_MANT_DIG),d5
   1983   1.1       mrg 	addl	IMM (63),d5
   1984   1.1       mrg 	cmpl	IMM (31),d5
   1985   1.1       mrg #endif
   1986   1.1       mrg 	bhi	2f
   1987   1.1       mrg 1:	bset	d5,d3
   1988   1.1       mrg 	bra	5f
   1989   1.1       mrg #ifndef __mcoldfire__
   1990   1.1       mrg 	subw	IMM (32),d5
   1991   1.1       mrg #else
   1992   1.1       mrg 	subl	IMM (32),d5
   1993   1.1       mrg #endif
   1994   1.1       mrg 2:	bset	d5,d2
   1995   1.1       mrg 5:
   1996   1.1       mrg | Finally we are finished! Move the longs in the address registers to
   1997   1.1       mrg | their final destination:
   1998   1.1       mrg 	movel	d6,d0
   1999   1.1       mrg 	movel	d7,d1
   2000   1.1       mrg 	movel	IMM (0),d3
   2001   1.1       mrg 
   2002   1.1       mrg | Here we have finished the division, with the result in d0-d1-d2-d3, with
   2003   1.1       mrg | 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
   2004   1.1       mrg | If it is not, then definitely bit 21 is set. Normalize so bit 22 is
   2005   1.1       mrg | not set:
   2006   1.1       mrg 	btst	IMM (DBL_MANT_DIG-32+1),d0
   2007   1.1       mrg 	beq	1f
   2008   1.1       mrg #ifndef __mcoldfire__
   2009   1.1       mrg 	lsrl	IMM (1),d0
   2010   1.1       mrg 	roxrl	IMM (1),d1
   2011   1.1       mrg 	roxrl	IMM (1),d2
   2012   1.1       mrg 	roxrl	IMM (1),d3
   2013   1.1       mrg 	addw	IMM (1),d4
   2014   1.1       mrg #else
   2015   1.1       mrg 	lsrl	IMM (1),d3
   2016   1.1       mrg 	btst	IMM (0),d2
   2017   1.1       mrg 	beq	10f
   2018   1.1       mrg 	bset	IMM (31),d3
   2019   1.1       mrg 10:	lsrl	IMM (1),d2
   2020   1.1       mrg 	btst	IMM (0),d1
   2021   1.1       mrg 	beq	11f
   2022   1.1       mrg 	bset	IMM (31),d2
   2023   1.1       mrg 11:	lsrl	IMM (1),d1
   2024   1.1       mrg 	btst	IMM (0),d0
   2025   1.1       mrg 	beq	12f
   2026   1.1       mrg 	bset	IMM (31),d1
   2027   1.1       mrg 12:	lsrl	IMM (1),d0
   2028   1.1       mrg 	addl	IMM (1),d4
   2029   1.1       mrg #endif
   2030   1.1       mrg 1:
   2031   1.1       mrg | Now round, check for over- and underflow, and exit.
   2032   1.1       mrg 	movel	a0,d7		| restore sign bit to d7
   2033   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2034   1.1       mrg 	bra	Lround$exit
   2035   1.1       mrg 
   2036   1.1       mrg Ldivdf$inop:
   2037   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2038   1.1       mrg 	bra	Ld$inop
   2039   1.1       mrg 
   2040   1.1       mrg Ldivdf$a$0:
   2041   1.1       mrg | If a is zero check to see whether b is zero also. In that case return
   2042   1.1       mrg | NaN; then check if b is NaN, and return NaN also in that case. Else
   2043   1.1       mrg | return a properly signed zero.
   2044   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2045   1.1       mrg 	bclr	IMM (31),d2	|
   2046   1.1       mrg 	movel	d2,d4		|
   2047   1.1       mrg 	orl	d3,d4		|
   2048   1.1       mrg 	beq	Ld$inop		| if b is also zero return NaN
   2049   1.1       mrg 	cmpl	IMM (0x7ff00000),d2 | check for NaN
   2050   1.1       mrg 	bhi	Ld$inop		|
   2051   1.1       mrg 	blt	1f		|
   2052   1.1       mrg 	tstl	d3		|
   2053   1.1       mrg 	bne	Ld$inop		|
   2054   1.1       mrg 1:	movel	a0,d0		| else return signed zero
   2055   1.1       mrg 	moveq	IMM(0),d1	|
   2056   1.1       mrg 	PICLEA	SYM (_fpCCR),a0	| clear exception flags
   2057   1.1       mrg 	movew	IMM (0),a0@	|
   2058   1.1       mrg #ifndef __mcoldfire__
   2059   1.1       mrg 	moveml	sp@+,d2-d7	|
   2060   1.1       mrg #else
   2061   1.1       mrg 	moveml	sp@,d2-d7	|
   2062   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2063   1.1       mrg 	| be adjusted here.
   2064   1.1       mrg #endif
   2065   1.1       mrg 	unlk	a6		|
   2066   1.1       mrg 	rts			|
   2067   1.1       mrg 
   2068   1.1       mrg Ldivdf$b$0:
   2069   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2070   1.1       mrg | If we got here a is not zero. Check if a is NaN; in that case return NaN,
   2071   1.1       mrg | else return +/-INFINITY. Remember that a is in d0 with the sign bit
   2072   1.1       mrg | cleared already.
   2073   1.1       mrg 	movel	a0,d7		| put a's sign bit back in d7 '
   2074   1.1       mrg 	cmpl	IMM (0x7ff00000),d0 | compare d0 with INFINITY
   2075   1.1       mrg 	bhi	Ld$inop		| if larger it is NaN
   2076   1.1       mrg 	tstl	d1		|
   2077   1.1       mrg 	bne	Ld$inop		|
   2078   1.1       mrg 	bra	Ld$div$0	| else signal DIVIDE_BY_ZERO
   2079   1.1       mrg 
   2080   1.1       mrg Ldivdf$b$nf:
   2081   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2082   1.1       mrg | If d2 == 0x7ff00000 we have to check d3.
   2083   1.1       mrg 	tstl	d3		|
   2084   1.1       mrg 	bne	Ld$inop		| if d3 <> 0, b is NaN
   2085   1.1       mrg 	bra	Ld$underflow	| else b is +/-INFINITY, so signal underflow
   2086   1.1       mrg 
   2087   1.1       mrg Ldivdf$a$nf:
   2088   1.1       mrg 	moveq	IMM (DIVIDE),d5
   2089   1.1       mrg | If d0 == 0x7ff00000 we have to check d1.
   2090   1.1       mrg 	tstl	d1		|
   2091   1.1       mrg 	bne	Ld$inop		| if d1 <> 0, a is NaN
   2092   1.1       mrg | If a is INFINITY we have to check b
   2093   1.1       mrg 	cmpl	d7,d2		| compare b with INFINITY
   2094   1.1       mrg 	bge	Ld$inop		| if b is NaN or INFINITY return NaN
   2095  1.11       mrg 	movl	a0,d7		| restore sign bit to d7
   2096   1.1       mrg 	bra	Ld$overflow	| else return overflow
   2097   1.1       mrg 
   2098   1.1       mrg | If a number is denormalized we put an exponent of 1 but do not put the
   2099   1.1       mrg | bit back into the fraction.
   2100   1.1       mrg Ldivdf$a$den:
   2101   1.1       mrg 	movel	IMM (1),d4
   2102   1.1       mrg 	andl	d6,d0
   2103   1.1       mrg 1:	addl	d1,d1		| shift a left until bit 20 is set
   2104   1.1       mrg 	addxl	d0,d0
   2105   1.1       mrg #ifndef __mcoldfire__
   2106   1.1       mrg 	subw	IMM (1),d4	| and adjust exponent
   2107   1.1       mrg #else
   2108   1.1       mrg 	subl	IMM (1),d4	| and adjust exponent
   2109   1.1       mrg #endif
   2110   1.1       mrg 	btst	IMM (DBL_MANT_DIG-32-1),d0
   2111   1.1       mrg 	bne	Ldivdf$1
   2112   1.1       mrg 	bra	1b
   2113   1.1       mrg 
   2114   1.1       mrg Ldivdf$b$den:
   2115   1.1       mrg 	movel	IMM (1),d5
   2116   1.1       mrg 	andl	d6,d2
   2117   1.1       mrg 1:	addl	d3,d3		| shift b left until bit 20 is set
   2118   1.1       mrg 	addxl	d2,d2
   2119   1.1       mrg #ifndef __mcoldfire__
   2120   1.1       mrg 	subw	IMM (1),d5	| and adjust exponent
   2121   1.1       mrg #else
   2122   1.1       mrg 	subql	IMM (1),d5	| and adjust exponent
   2123   1.1       mrg #endif
   2124   1.1       mrg 	btst	IMM (DBL_MANT_DIG-32-1),d2
   2125   1.1       mrg 	bne	Ldivdf$2
   2126   1.1       mrg 	bra	1b
   2127   1.1       mrg 
   2128   1.1       mrg Lround$exit:
   2129   1.1       mrg | This is a common exit point for __muldf3 and __divdf3. When they enter
   2130   1.1       mrg | this point the sign of the result is in d7, the result in d0-d1, normalized
   2131   1.1       mrg | so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
   2132   1.1       mrg 
   2133   1.1       mrg | First check for underlow in the exponent:
   2134   1.1       mrg #ifndef __mcoldfire__
   2135   1.1       mrg 	cmpw	IMM (-DBL_MANT_DIG-1),d4
   2136   1.1       mrg #else
   2137   1.1       mrg 	cmpl	IMM (-DBL_MANT_DIG-1),d4
   2138   1.1       mrg #endif
   2139   1.1       mrg 	blt	Ld$underflow
   2140   1.1       mrg | It could happen that the exponent is less than 1, in which case the
   2141   1.1       mrg | number is denormalized. In this case we shift right and adjust the
   2142   1.1       mrg | exponent until it becomes 1 or the fraction is zero (in the latter case
   2143   1.1       mrg | we signal underflow and return zero).
   2144   1.1       mrg 	movel	d7,a0		|
   2145   1.1       mrg 	movel	IMM (0),d6	| use d6-d7 to collect bits flushed right
   2146   1.1       mrg 	movel	d6,d7		| use d6-d7 to collect bits flushed right
   2147   1.1       mrg #ifndef __mcoldfire__
   2148   1.1       mrg 	cmpw	IMM (1),d4	| if the exponent is less than 1 we
   2149   1.1       mrg #else
   2150   1.1       mrg 	cmpl	IMM (1),d4	| if the exponent is less than 1 we
   2151   1.1       mrg #endif
   2152   1.1       mrg 	bge	2f		| have to shift right (denormalize)
   2153   1.1       mrg 1:
   2154   1.1       mrg #ifndef __mcoldfire__
   2155   1.1       mrg 	addw	IMM (1),d4	| adjust the exponent
   2156   1.1       mrg 	lsrl	IMM (1),d0	| shift right once
   2157   1.1       mrg 	roxrl	IMM (1),d1	|
   2158   1.1       mrg 	roxrl	IMM (1),d2	|
   2159   1.1       mrg 	roxrl	IMM (1),d3	|
   2160   1.1       mrg 	roxrl	IMM (1),d6	|
   2161   1.1       mrg 	roxrl	IMM (1),d7	|
   2162   1.1       mrg 	cmpw	IMM (1),d4	| is the exponent 1 already?
   2163   1.1       mrg #else
   2164   1.1       mrg 	addl	IMM (1),d4	| adjust the exponent
   2165   1.1       mrg 	lsrl	IMM (1),d7
   2166   1.1       mrg 	btst	IMM (0),d6
   2167   1.1       mrg 	beq	13f
   2168   1.1       mrg 	bset	IMM (31),d7
   2169   1.1       mrg 13:	lsrl	IMM (1),d6
   2170   1.1       mrg 	btst	IMM (0),d3
   2171   1.1       mrg 	beq	14f
   2172   1.1       mrg 	bset	IMM (31),d6
   2173   1.1       mrg 14:	lsrl	IMM (1),d3
   2174   1.1       mrg 	btst	IMM (0),d2
   2175   1.1       mrg 	beq	10f
   2176   1.1       mrg 	bset	IMM (31),d3
   2177   1.1       mrg 10:	lsrl	IMM (1),d2
   2178   1.1       mrg 	btst	IMM (0),d1
   2179   1.1       mrg 	beq	11f
   2180   1.1       mrg 	bset	IMM (31),d2
   2181   1.1       mrg 11:	lsrl	IMM (1),d1
   2182   1.1       mrg 	btst	IMM (0),d0
   2183   1.1       mrg 	beq	12f
   2184   1.1       mrg 	bset	IMM (31),d1
   2185   1.1       mrg 12:	lsrl	IMM (1),d0
   2186   1.1       mrg 	cmpl	IMM (1),d4	| is the exponent 1 already?
   2187   1.1       mrg #endif
   2188   1.1       mrg 	beq	2f		| if not loop back
   2189   1.1       mrg 	bra	1b              |
   2190   1.1       mrg 	bra	Ld$underflow	| safety check, shouldn't execute '
   2191   1.1       mrg 2:	orl	d6,d2		| this is a trick so we don't lose  '
   2192   1.1       mrg 	orl	d7,d3		| the bits which were flushed right
   2193   1.1       mrg 	movel	a0,d7		| get back sign bit into d7
   2194   1.1       mrg | Now call the rounding routine (which takes care of denormalized numbers):
   2195   1.1       mrg 	lea	pc@(Lround$0),a0 | to return from rounding routine
   2196   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2197   1.1       mrg #ifdef __mcoldfire__
   2198   1.1       mrg 	clrl	d6
   2199   1.1       mrg #endif
   2200   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   2201   1.1       mrg 	beq	Lround$to$nearest
   2202   1.1       mrg #ifndef __mcoldfire__
   2203   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   2204   1.1       mrg #else
   2205   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   2206   1.1       mrg #endif
   2207   1.1       mrg 	bhi	Lround$to$minus
   2208   1.1       mrg 	blt	Lround$to$zero
   2209   1.1       mrg 	bra	Lround$to$plus
   2210   1.1       mrg Lround$0:
   2211   1.1       mrg | Here we have a correctly rounded result (either normalized or denormalized).
   2212   1.1       mrg 
   2213   1.1       mrg | Here we should have either a normalized number or a denormalized one, and
   2214   1.1       mrg | the exponent is necessarily larger or equal to 1 (so we don't have to  '
   2215   1.1       mrg | check again for underflow!). We have to check for overflow or for a
   2216   1.1       mrg | denormalized number (which also signals underflow).
   2217   1.1       mrg | Check for overflow (i.e., exponent >= 0x7ff).
   2218   1.1       mrg #ifndef __mcoldfire__
   2219   1.1       mrg 	cmpw	IMM (0x07ff),d4
   2220   1.1       mrg #else
   2221   1.1       mrg 	cmpl	IMM (0x07ff),d4
   2222   1.1       mrg #endif
   2223   1.1       mrg 	bge	Ld$overflow
   2224   1.1       mrg | Now check for a denormalized number (exponent==0):
   2225   1.1       mrg 	movew	d4,d4
   2226   1.1       mrg 	beq	Ld$den
   2227   1.1       mrg 1:
   2228   1.1       mrg | Put back the exponents and sign and return.
   2229   1.1       mrg #ifndef __mcoldfire__
   2230   1.1       mrg 	lslw	IMM (4),d4	| exponent back to fourth byte
   2231   1.1       mrg #else
   2232   1.1       mrg 	lsll	IMM (4),d4	| exponent back to fourth byte
   2233   1.1       mrg #endif
   2234   1.1       mrg 	bclr	IMM (DBL_MANT_DIG-32-1),d0
   2235   1.1       mrg 	swap	d0		| and put back exponent
   2236   1.1       mrg #ifndef __mcoldfire__
   2237   1.1       mrg 	orw	d4,d0		|
   2238   1.1       mrg #else
   2239   1.1       mrg 	orl	d4,d0		|
   2240   1.1       mrg #endif
   2241   1.1       mrg 	swap	d0		|
   2242   1.1       mrg 	orl	d7,d0		| and sign also
   2243   1.1       mrg 
   2244   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   2245   1.1       mrg 	movew	IMM (0),a0@
   2246   1.1       mrg #ifndef __mcoldfire__
   2247   1.1       mrg 	moveml	sp@+,d2-d7
   2248   1.1       mrg #else
   2249   1.1       mrg 	moveml	sp@,d2-d7
   2250   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2251   1.1       mrg 	| be adjusted here.
   2252   1.1       mrg #endif
   2253   1.1       mrg 	unlk	a6
   2254   1.1       mrg 	rts
   2255   1.1       mrg 
   2256   1.1       mrg |=============================================================================
   2257   1.1       mrg |                              __negdf2
   2258   1.1       mrg |=============================================================================
   2259   1.1       mrg 
   2260   1.1       mrg | double __negdf2(double, double);
   2261   1.1       mrg 	FUNC(__negdf2)
   2262   1.1       mrg SYM (__negdf2):
   2263   1.1       mrg #ifndef __mcoldfire__
   2264   1.1       mrg 	link	a6,IMM (0)
   2265   1.1       mrg 	moveml	d2-d7,sp@-
   2266   1.1       mrg #else
   2267   1.1       mrg 	link	a6,IMM (-24)
   2268   1.1       mrg 	moveml	d2-d7,sp@
   2269   1.1       mrg #endif
   2270   1.1       mrg 	moveq	IMM (NEGATE),d5
   2271   1.1       mrg 	movel	a6@(8),d0	| get number to negate in d0-d1
   2272   1.1       mrg 	movel	a6@(12),d1	|
   2273   1.1       mrg 	bchg	IMM (31),d0	| negate
   2274   1.1       mrg 	movel	d0,d2		| make a positive copy (for the tests)
   2275   1.1       mrg 	bclr	IMM (31),d2	|
   2276   1.1       mrg 	movel	d2,d4		| check for zero
   2277   1.1       mrg 	orl	d1,d4		|
   2278   1.1       mrg 	beq	2f		| if zero (either sign) return +zero
   2279   1.1       mrg 	cmpl	IMM (0x7ff00000),d2 | compare to +INFINITY
   2280   1.1       mrg 	blt	1f		| if finite, return
   2281   1.1       mrg 	bhi	Ld$inop		| if larger (fraction not zero) is NaN
   2282   1.1       mrg 	tstl	d1		| if d2 == 0x7ff00000 check d1
   2283   1.1       mrg 	bne	Ld$inop		|
   2284   1.1       mrg 	movel	d0,d7		| else get sign and return INFINITY
   2285   1.1       mrg 	andl	IMM (0x80000000),d7
   2286   1.1       mrg 	bra	Ld$infty
   2287   1.1       mrg 1:	PICLEA	SYM (_fpCCR),a0
   2288   1.1       mrg 	movew	IMM (0),a0@
   2289   1.1       mrg #ifndef __mcoldfire__
   2290   1.1       mrg 	moveml	sp@+,d2-d7
   2291   1.1       mrg #else
   2292   1.1       mrg 	moveml	sp@,d2-d7
   2293   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2294   1.1       mrg 	| be adjusted here.
   2295   1.1       mrg #endif
   2296   1.1       mrg 	unlk	a6
   2297   1.1       mrg 	rts
   2298   1.1       mrg 2:	bclr	IMM (31),d0
   2299   1.1       mrg 	bra	1b
   2300   1.1       mrg 
   2301   1.1       mrg |=============================================================================
   2302   1.1       mrg |                              __cmpdf2
   2303   1.1       mrg |=============================================================================
   2304   1.1       mrg 
   2305   1.1       mrg GREATER =  1
   2306   1.1       mrg LESS    = -1
   2307   1.1       mrg EQUAL   =  0
   2308   1.1       mrg 
   2309   1.1       mrg | int __cmpdf2_internal(double, double, int);
   2310   1.1       mrg SYM (__cmpdf2_internal):
   2311   1.1       mrg #ifndef __mcoldfire__
   2312   1.1       mrg 	link	a6,IMM (0)
   2313   1.1       mrg 	moveml	d2-d7,sp@- 	| save registers
   2314   1.1       mrg #else
   2315   1.1       mrg 	link	a6,IMM (-24)
   2316   1.1       mrg 	moveml	d2-d7,sp@
   2317   1.1       mrg #endif
   2318   1.1       mrg 	moveq	IMM (COMPARE),d5
   2319   1.1       mrg 	movel	a6@(8),d0	| get first operand
   2320   1.1       mrg 	movel	a6@(12),d1	|
   2321   1.1       mrg 	movel	a6@(16),d2	| get second operand
   2322   1.1       mrg 	movel	a6@(20),d3	|
   2323   1.1       mrg | First check if a and/or b are (+/-) zero and in that case clear
   2324   1.1       mrg | the sign bit.
   2325   1.1       mrg 	movel	d0,d6		| copy signs into d6 (a) and d7(b)
   2326   1.1       mrg 	bclr	IMM (31),d0	| and clear signs in d0 and d2
   2327   1.1       mrg 	movel	d2,d7		|
   2328   1.1       mrg 	bclr	IMM (31),d2	|
   2329   1.1       mrg 	cmpl	IMM (0x7ff00000),d0 | check for a == NaN
   2330   1.1       mrg 	bhi	Lcmpd$inop		| if d0 > 0x7ff00000, a is NaN
   2331   1.1       mrg 	beq	Lcmpdf$a$nf	| if equal can be INFINITY, so check d1
   2332   1.1       mrg 	movel	d0,d4		| copy into d4 to test for zero
   2333   1.1       mrg 	orl	d1,d4		|
   2334   1.1       mrg 	beq	Lcmpdf$a$0	|
   2335   1.1       mrg Lcmpdf$0:
   2336   1.1       mrg 	cmpl	IMM (0x7ff00000),d2 | check for b == NaN
   2337   1.1       mrg 	bhi	Lcmpd$inop		| if d2 > 0x7ff00000, b is NaN
   2338   1.1       mrg 	beq	Lcmpdf$b$nf	| if equal can be INFINITY, so check d3
   2339   1.1       mrg 	movel	d2,d4		|
   2340   1.1       mrg 	orl	d3,d4		|
   2341   1.1       mrg 	beq	Lcmpdf$b$0	|
   2342   1.1       mrg Lcmpdf$1:
   2343   1.1       mrg | Check the signs
   2344   1.1       mrg 	eorl	d6,d7
   2345   1.1       mrg 	bpl	1f
   2346   1.1       mrg | If the signs are not equal check if a >= 0
   2347   1.1       mrg 	tstl	d6
   2348   1.1       mrg 	bpl	Lcmpdf$a$gt$b	| if (a >= 0 && b < 0) => a > b
   2349   1.1       mrg 	bmi	Lcmpdf$b$gt$a	| if (a < 0 && b >= 0) => a < b
   2350   1.1       mrg 1:
   2351   1.1       mrg | If the signs are equal check for < 0
   2352   1.1       mrg 	tstl	d6
   2353   1.1       mrg 	bpl	1f
   2354   1.1       mrg | If both are negative exchange them
   2355   1.1       mrg #ifndef __mcoldfire__
   2356   1.1       mrg 	exg	d0,d2
   2357   1.1       mrg 	exg	d1,d3
   2358   1.1       mrg #else
   2359   1.1       mrg 	movel	d0,d7
   2360   1.1       mrg 	movel	d2,d0
   2361   1.1       mrg 	movel	d7,d2
   2362   1.1       mrg 	movel	d1,d7
   2363   1.1       mrg 	movel	d3,d1
   2364   1.1       mrg 	movel	d7,d3
   2365   1.1       mrg #endif
   2366   1.1       mrg 1:
   2367   1.1       mrg | Now that they are positive we just compare them as longs (does this also
   2368   1.1       mrg | work for denormalized numbers?).
   2369   1.1       mrg 	cmpl	d0,d2
   2370   1.1       mrg 	bhi	Lcmpdf$b$gt$a	| |b| > |a|
   2371   1.1       mrg 	bne	Lcmpdf$a$gt$b	| |b| < |a|
   2372   1.1       mrg | If we got here d0 == d2, so we compare d1 and d3.
   2373   1.1       mrg 	cmpl	d1,d3
   2374   1.1       mrg 	bhi	Lcmpdf$b$gt$a	| |b| > |a|
   2375   1.1       mrg 	bne	Lcmpdf$a$gt$b	| |b| < |a|
   2376   1.1       mrg | If we got here a == b.
   2377   1.1       mrg 	movel	IMM (EQUAL),d0
   2378   1.1       mrg #ifndef __mcoldfire__
   2379   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   2380   1.1       mrg #else
   2381   1.1       mrg 	moveml	sp@,d2-d7
   2382   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2383   1.1       mrg 	| be adjusted here.
   2384   1.1       mrg #endif
   2385   1.1       mrg 	unlk	a6
   2386   1.1       mrg 	rts
   2387   1.1       mrg Lcmpdf$a$gt$b:
   2388   1.1       mrg 	movel	IMM (GREATER),d0
   2389   1.1       mrg #ifndef __mcoldfire__
   2390   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   2391   1.1       mrg #else
   2392   1.1       mrg 	moveml	sp@,d2-d7
   2393   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2394   1.1       mrg 	| be adjusted here.
   2395   1.1       mrg #endif
   2396   1.1       mrg 	unlk	a6
   2397   1.1       mrg 	rts
   2398   1.1       mrg Lcmpdf$b$gt$a:
   2399   1.1       mrg 	movel	IMM (LESS),d0
   2400   1.1       mrg #ifndef __mcoldfire__
   2401   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   2402   1.1       mrg #else
   2403   1.1       mrg 	moveml	sp@,d2-d7
   2404   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   2405   1.1       mrg 	| be adjusted here.
   2406   1.1       mrg #endif
   2407   1.1       mrg 	unlk	a6
   2408   1.1       mrg 	rts
   2409   1.1       mrg 
   2410   1.1       mrg Lcmpdf$a$0:
   2411   1.1       mrg 	bclr	IMM (31),d6
   2412   1.1       mrg 	bra	Lcmpdf$0
   2413   1.1       mrg Lcmpdf$b$0:
   2414   1.1       mrg 	bclr	IMM (31),d7
   2415   1.1       mrg 	bra	Lcmpdf$1
   2416   1.1       mrg 
   2417   1.1       mrg Lcmpdf$a$nf:
   2418   1.1       mrg 	tstl	d1
   2419   1.1       mrg 	bne	Ld$inop
   2420   1.1       mrg 	bra	Lcmpdf$0
   2421   1.1       mrg 
   2422   1.1       mrg Lcmpdf$b$nf:
   2423   1.1       mrg 	tstl	d3
   2424   1.1       mrg 	bne	Ld$inop
   2425   1.1       mrg 	bra	Lcmpdf$1
   2426   1.1       mrg 
   2427   1.1       mrg Lcmpd$inop:
   2428   1.1       mrg 	movl	a6@(24),d0
   2429   1.1       mrg 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   2430   1.1       mrg 	moveq	IMM (DOUBLE_FLOAT),d6
   2431   1.1       mrg 	PICJUMP	$_exception_handler
   2432   1.1       mrg 
   2433   1.1       mrg | int __cmpdf2(double, double);
   2434   1.1       mrg 	FUNC(__cmpdf2)
   2435   1.1       mrg SYM (__cmpdf2):
   2436   1.1       mrg 	link	a6,IMM (0)
   2437   1.1       mrg 	pea	1
   2438   1.1       mrg 	movl	a6@(20),sp@-
   2439   1.1       mrg 	movl	a6@(16),sp@-
   2440   1.1       mrg 	movl	a6@(12),sp@-
   2441   1.1       mrg 	movl	a6@(8),sp@-
   2442   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   2443   1.1       mrg 	unlk	a6
   2444   1.1       mrg 	rts
   2445   1.1       mrg 
   2446   1.1       mrg |=============================================================================
   2447   1.1       mrg |                           rounding routines
   2448   1.1       mrg |=============================================================================
   2449   1.1       mrg 
   2450   1.1       mrg | The rounding routines expect the number to be normalized in registers
   2451   1.1       mrg | d0-d1-d2-d3, with the exponent in register d4. They assume that the
   2452   1.1       mrg | exponent is larger or equal to 1. They return a properly normalized number
   2453   1.1       mrg | if possible, and a denormalized number otherwise. The exponent is returned
   2454   1.1       mrg | in d4.
   2455   1.1       mrg 
   2456   1.1       mrg Lround$to$nearest:
   2457   1.1       mrg | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
   2458   1.1       mrg | Here we assume that the exponent is not too small (this should be checked
   2459   1.1       mrg | before entering the rounding routine), but the number could be denormalized.
   2460   1.1       mrg 
   2461   1.1       mrg | Check for denormalized numbers:
   2462   1.1       mrg 1:	btst	IMM (DBL_MANT_DIG-32),d0
   2463   1.1       mrg 	bne	2f		| if set the number is normalized
   2464   1.1       mrg | Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
   2465   1.1       mrg | is one (remember that a denormalized number corresponds to an
   2466   1.1       mrg | exponent of -D_BIAS+1).
   2467   1.1       mrg #ifndef __mcoldfire__
   2468   1.1       mrg 	cmpw	IMM (1),d4	| remember that the exponent is at least one
   2469   1.1       mrg #else
   2470   1.1       mrg 	cmpl	IMM (1),d4	| remember that the exponent is at least one
   2471   1.1       mrg #endif
   2472   1.1       mrg  	beq	2f		| an exponent of one means denormalized
   2473   1.1       mrg 	addl	d3,d3		| else shift and adjust the exponent
   2474   1.1       mrg 	addxl	d2,d2		|
   2475   1.1       mrg 	addxl	d1,d1		|
   2476   1.1       mrg 	addxl	d0,d0		|
   2477   1.1       mrg #ifndef __mcoldfire__
   2478   1.1       mrg 	dbra	d4,1b		|
   2479   1.1       mrg #else
   2480   1.1       mrg 	subql	IMM (1), d4
   2481   1.1       mrg 	bpl	1b
   2482   1.1       mrg #endif
   2483   1.1       mrg 2:
   2484   1.1       mrg | Now round: we do it as follows: after the shifting we can write the
   2485   1.1       mrg | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
   2486   1.1       mrg | If delta < 1, do nothing. If delta > 1, add 1 to f.
   2487   1.1       mrg | If delta == 1, we make sure the rounded number will be even (odd?)
   2488   1.1       mrg | (after shifting).
   2489   1.1       mrg 	btst	IMM (0),d1	| is delta < 1?
   2490   1.1       mrg 	beq	2f		| if so, do not do anything
   2491   1.1       mrg 	orl	d2,d3		| is delta == 1?
   2492   1.1       mrg 	bne	1f		| if so round to even
   2493   1.1       mrg 	movel	d1,d3		|
   2494   1.1       mrg 	andl	IMM (2),d3	| bit 1 is the last significant bit
   2495   1.1       mrg 	movel	IMM (0),d2	|
   2496   1.1       mrg 	addl	d3,d1		|
   2497   1.1       mrg 	addxl	d2,d0		|
   2498   1.1       mrg 	bra	2f		|
   2499   1.1       mrg 1:	movel	IMM (1),d3	| else add 1
   2500   1.1       mrg 	movel	IMM (0),d2	|
   2501   1.1       mrg 	addl	d3,d1		|
   2502   1.1       mrg 	addxl	d2,d0
   2503   1.1       mrg | Shift right once (because we used bit #DBL_MANT_DIG-32!).
   2504   1.1       mrg 2:
   2505   1.1       mrg #ifndef __mcoldfire__
   2506   1.1       mrg 	lsrl	IMM (1),d0
   2507   1.1       mrg 	roxrl	IMM (1),d1
   2508   1.1       mrg #else
   2509   1.1       mrg 	lsrl	IMM (1),d1
   2510   1.1       mrg 	btst	IMM (0),d0
   2511   1.1       mrg 	beq	10f
   2512   1.1       mrg 	bset	IMM (31),d1
   2513   1.1       mrg 10:	lsrl	IMM (1),d0
   2514   1.1       mrg #endif
   2515   1.1       mrg 
   2516   1.1       mrg | Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
   2517   1.1       mrg | 'fraction overflow' ...).
   2518   1.1       mrg 	btst	IMM (DBL_MANT_DIG-32),d0
   2519   1.1       mrg 	beq	1f
   2520   1.1       mrg #ifndef __mcoldfire__
   2521   1.1       mrg 	lsrl	IMM (1),d0
   2522   1.1       mrg 	roxrl	IMM (1),d1
   2523   1.1       mrg 	addw	IMM (1),d4
   2524   1.1       mrg #else
   2525   1.1       mrg 	lsrl	IMM (1),d1
   2526   1.1       mrg 	btst	IMM (0),d0
   2527   1.1       mrg 	beq	10f
   2528   1.1       mrg 	bset	IMM (31),d1
   2529   1.1       mrg 10:	lsrl	IMM (1),d0
   2530   1.1       mrg 	addl	IMM (1),d4
   2531   1.1       mrg #endif
   2532   1.1       mrg 1:
   2533   1.1       mrg | If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
   2534   1.1       mrg | have to put the exponent to zero and return a denormalized number.
   2535   1.1       mrg 	btst	IMM (DBL_MANT_DIG-32-1),d0
   2536   1.1       mrg 	beq	1f
   2537   1.1       mrg 	jmp	a0@
   2538   1.1       mrg 1:	movel	IMM (0),d4
   2539   1.1       mrg 	jmp	a0@
   2540   1.1       mrg 
   2541   1.1       mrg Lround$to$zero:
   2542   1.1       mrg Lround$to$plus:
   2543   1.1       mrg Lround$to$minus:
   2544   1.1       mrg 	jmp	a0@
   2545   1.1       mrg #endif /* L_double */
   2546   1.1       mrg 
   2547   1.1       mrg #ifdef  L_float
   2548   1.1       mrg 
   2549   1.1       mrg 	.globl	SYM (_fpCCR)
   2550   1.1       mrg 	.globl  $_exception_handler
   2551   1.1       mrg 
   2552   1.1       mrg QUIET_NaN    = 0xffffffff
   2553   1.1       mrg SIGNL_NaN    = 0x7f800001
   2554   1.1       mrg INFINITY     = 0x7f800000
   2555   1.1       mrg 
   2556   1.1       mrg F_MAX_EXP      = 0xff
   2557   1.1       mrg F_BIAS         = 126
   2558   1.1       mrg FLT_MAX_EXP    = F_MAX_EXP - F_BIAS
   2559   1.1       mrg FLT_MIN_EXP    = 1 - F_BIAS
   2560   1.1       mrg FLT_MANT_DIG   = 24
   2561   1.1       mrg 
   2562   1.1       mrg INEXACT_RESULT 		= 0x0001
   2563   1.1       mrg UNDERFLOW 		= 0x0002
   2564   1.1       mrg OVERFLOW 		= 0x0004
   2565   1.1       mrg DIVIDE_BY_ZERO 		= 0x0008
   2566   1.1       mrg INVALID_OPERATION 	= 0x0010
   2567   1.1       mrg 
   2568   1.1       mrg SINGLE_FLOAT = 1
   2569   1.1       mrg 
   2570   1.1       mrg NOOP         = 0
   2571   1.1       mrg ADD          = 1
   2572   1.1       mrg MULTIPLY     = 2
   2573   1.1       mrg DIVIDE       = 3
   2574   1.1       mrg NEGATE       = 4
   2575   1.1       mrg COMPARE      = 5
   2576   1.1       mrg EXTENDSFDF   = 6
   2577   1.1       mrg TRUNCDFSF    = 7
   2578   1.1       mrg 
   2579   1.1       mrg UNKNOWN           = -1
   2580   1.1       mrg ROUND_TO_NEAREST  = 0 | round result to nearest representable value
   2581   1.1       mrg ROUND_TO_ZERO     = 1 | round result towards zero
   2582   1.1       mrg ROUND_TO_PLUS     = 2 | round result towards plus infinity
   2583   1.1       mrg ROUND_TO_MINUS    = 3 | round result towards minus infinity
   2584   1.1       mrg 
   2585   1.1       mrg | Entry points:
   2586   1.1       mrg 
   2587   1.1       mrg 	.globl SYM (__addsf3)
   2588   1.1       mrg 	.globl SYM (__subsf3)
   2589   1.1       mrg 	.globl SYM (__mulsf3)
   2590   1.1       mrg 	.globl SYM (__divsf3)
   2591   1.1       mrg 	.globl SYM (__negsf2)
   2592   1.1       mrg 	.globl SYM (__cmpsf2)
   2593   1.1       mrg 	.globl SYM (__cmpsf2_internal)
   2594   1.1       mrg 	.hidden SYM (__cmpsf2_internal)
   2595   1.1       mrg 
   2596   1.1       mrg | These are common routines to return and signal exceptions.
   2597   1.1       mrg 
   2598   1.1       mrg 	.text
   2599   1.1       mrg 	.even
   2600   1.1       mrg 
   2601   1.1       mrg Lf$den:
   2602   1.1       mrg | Return and signal a denormalized number
   2603   1.1       mrg 	orl	d7,d0
   2604   1.1       mrg 	moveq	IMM (INEXACT_RESULT+UNDERFLOW),d7
   2605   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   2606   1.1       mrg 	PICJUMP	$_exception_handler
   2607   1.1       mrg 
   2608   1.1       mrg Lf$infty:
   2609   1.1       mrg Lf$overflow:
   2610   1.1       mrg | Return a properly signed INFINITY and set the exception flags
   2611   1.1       mrg 	movel	IMM (INFINITY),d0
   2612   1.1       mrg 	orl	d7,d0
   2613   1.1       mrg 	moveq	IMM (INEXACT_RESULT+OVERFLOW),d7
   2614   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   2615   1.1       mrg 	PICJUMP	$_exception_handler
   2616   1.1       mrg 
   2617   1.1       mrg Lf$underflow:
   2618   1.1       mrg | Return 0 and set the exception flags
   2619   1.1       mrg 	moveq	IMM (0),d0
   2620   1.1       mrg 	moveq	IMM (INEXACT_RESULT+UNDERFLOW),d7
   2621   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   2622   1.1       mrg 	PICJUMP	$_exception_handler
   2623   1.1       mrg 
   2624   1.1       mrg Lf$inop:
   2625   1.1       mrg | Return a quiet NaN and set the exception flags
   2626   1.1       mrg 	movel	IMM (QUIET_NaN),d0
   2627   1.1       mrg 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   2628   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   2629   1.1       mrg 	PICJUMP	$_exception_handler
   2630   1.1       mrg 
   2631   1.1       mrg Lf$div$0:
   2632   1.1       mrg | Return a properly signed INFINITY and set the exception flags
   2633   1.1       mrg 	movel	IMM (INFINITY),d0
   2634   1.1       mrg 	orl	d7,d0
   2635   1.1       mrg 	moveq	IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
   2636   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   2637   1.1       mrg 	PICJUMP	$_exception_handler
   2638   1.1       mrg 
   2639   1.1       mrg |=============================================================================
   2640   1.1       mrg |=============================================================================
   2641   1.1       mrg |                         single precision routines
   2642   1.1       mrg |=============================================================================
   2643   1.1       mrg |=============================================================================
   2644   1.1       mrg 
   2645   1.1       mrg | A single precision floating point number (float) has the format:
   2646   1.1       mrg |
   2647   1.1       mrg | struct _float {
   2648   1.1       mrg |  unsigned int sign      : 1;  /* sign bit */
   2649   1.1       mrg |  unsigned int exponent  : 8;  /* exponent, shifted by 126 */
   2650   1.1       mrg |  unsigned int fraction  : 23; /* fraction */
   2651   1.1       mrg | } float;
   2652   1.1       mrg |
   2653   1.1       mrg | Thus sizeof(float) = 4 (32 bits).
   2654   1.1       mrg |
   2655   1.1       mrg | All the routines are callable from C programs, and return the result
   2656   1.1       mrg | in the single register d0. They also preserve all registers except
   2657   1.1       mrg | d0-d1 and a0-a1.
   2658   1.1       mrg 
   2659   1.1       mrg |=============================================================================
   2660   1.1       mrg |                              __subsf3
   2661   1.1       mrg |=============================================================================
   2662   1.1       mrg 
   2663   1.1       mrg | float __subsf3(float, float);
   2664   1.1       mrg 	FUNC(__subsf3)
   2665   1.1       mrg SYM (__subsf3):
   2666   1.1       mrg 	bchg	IMM (31),sp@(8)	| change sign of second operand
   2667   1.1       mrg 				| and fall through
   2668   1.1       mrg |=============================================================================
   2669   1.1       mrg |                              __addsf3
   2670   1.1       mrg |=============================================================================
   2671   1.1       mrg 
   2672   1.1       mrg | float __addsf3(float, float);
   2673   1.1       mrg 	FUNC(__addsf3)
   2674   1.1       mrg SYM (__addsf3):
   2675   1.1       mrg #ifndef __mcoldfire__
   2676   1.1       mrg 	link	a6,IMM (0)	| everything will be done in registers
   2677   1.1       mrg 	moveml	d2-d7,sp@-	| save all data registers but d0-d1
   2678   1.1       mrg #else
   2679   1.1       mrg 	link	a6,IMM (-24)
   2680   1.1       mrg 	moveml	d2-d7,sp@
   2681   1.1       mrg #endif
   2682   1.1       mrg 	movel	a6@(8),d0	| get first operand
   2683   1.1       mrg 	movel	a6@(12),d1	| get second operand
   2684   1.1       mrg 	movel	d0,a0		| get d0's sign bit '
   2685   1.1       mrg 	addl	d0,d0		| check and clear sign bit of a
   2686   1.1       mrg 	beq	Laddsf$b	| if zero return second operand
   2687   1.1       mrg 	movel	d1,a1		| save b's sign bit '
   2688   1.1       mrg 	addl	d1,d1		| get rid of sign bit
   2689   1.1       mrg 	beq	Laddsf$a	| if zero return first operand
   2690   1.1       mrg 
   2691   1.1       mrg | Get the exponents and check for denormalized and/or infinity.
   2692   1.1       mrg 
   2693   1.1       mrg 	movel	IMM (0x00ffffff),d4	| mask to get fraction
   2694   1.1       mrg 	movel	IMM (0x01000000),d5	| mask to put hidden bit back
   2695   1.1       mrg 
   2696   1.1       mrg 	movel	d0,d6		| save a to get exponent
   2697   1.1       mrg 	andl	d4,d0		| get fraction in d0
   2698   1.1       mrg 	notl 	d4		| make d4 into a mask for the exponent
   2699   1.1       mrg 	andl	d4,d6		| get exponent in d6
   2700   1.1       mrg 	beq	Laddsf$a$den	| branch if a is denormalized
   2701   1.1       mrg 	cmpl	d4,d6		| check for INFINITY or NaN
   2702   1.1       mrg 	beq	Laddsf$nf
   2703   1.1       mrg 	swap	d6		| put exponent into first word
   2704   1.1       mrg 	orl	d5,d0		| and put hidden bit back
   2705   1.1       mrg Laddsf$1:
   2706   1.1       mrg | Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
   2707   1.1       mrg 	movel	d1,d7		| get exponent in d7
   2708   1.1       mrg 	andl	d4,d7		|
   2709   1.1       mrg 	beq	Laddsf$b$den	| branch if b is denormalized
   2710   1.1       mrg 	cmpl	d4,d7		| check for INFINITY or NaN
   2711   1.1       mrg 	beq	Laddsf$nf
   2712   1.1       mrg 	swap	d7		| put exponent into first word
   2713   1.1       mrg 	notl 	d4		| make d4 into a mask for the fraction
   2714   1.1       mrg 	andl	d4,d1		| get fraction in d1
   2715   1.1       mrg 	orl	d5,d1		| and put hidden bit back
   2716   1.1       mrg Laddsf$2:
   2717   1.1       mrg | Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
   2718   1.1       mrg 
   2719   1.1       mrg | Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
   2720   1.1       mrg | shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
   2721   1.1       mrg | bit).
   2722   1.1       mrg 
   2723   1.1       mrg 	movel	d1,d2		| move b to d2, since we want to use
   2724   1.1       mrg 				| two registers to do the sum
   2725   1.1       mrg 	movel	IMM (0),d1	| and clear the new ones
   2726   1.1       mrg 	movel	d1,d3		|
   2727   1.1       mrg 
   2728   1.1       mrg | Here we shift the numbers in registers d0 and d1 so the exponents are the
   2729   1.1       mrg | same, and put the largest exponent in d6. Note that we are using two
   2730   1.1       mrg | registers for each number (see the discussion by D. Knuth in "Seminumerical
   2731   1.1       mrg | Algorithms").
   2732   1.1       mrg #ifndef __mcoldfire__
   2733   1.1       mrg 	cmpw	d6,d7		| compare exponents
   2734   1.1       mrg #else
   2735   1.1       mrg 	cmpl	d6,d7		| compare exponents
   2736   1.1       mrg #endif
   2737   1.1       mrg 	beq	Laddsf$3	| if equal don't shift '
   2738   1.1       mrg 	bhi	5f		| branch if second exponent largest
   2739   1.1       mrg 1:
   2740   1.1       mrg 	subl	d6,d7		| keep the largest exponent
   2741   1.1       mrg 	negl	d7
   2742   1.1       mrg #ifndef __mcoldfire__
   2743   1.1       mrg 	lsrw	IMM (8),d7	| put difference in lower byte
   2744   1.1       mrg #else
   2745   1.1       mrg 	lsrl	IMM (8),d7	| put difference in lower byte
   2746   1.1       mrg #endif
   2747   1.1       mrg | if difference is too large we don't shift (actually, we can just exit) '
   2748   1.1       mrg #ifndef __mcoldfire__
   2749   1.1       mrg 	cmpw	IMM (FLT_MANT_DIG+2),d7
   2750   1.1       mrg #else
   2751   1.1       mrg 	cmpl	IMM (FLT_MANT_DIG+2),d7
   2752   1.1       mrg #endif
   2753   1.1       mrg 	bge	Laddsf$b$small
   2754   1.1       mrg #ifndef __mcoldfire__
   2755   1.1       mrg 	cmpw	IMM (16),d7	| if difference >= 16 swap
   2756   1.1       mrg #else
   2757   1.1       mrg 	cmpl	IMM (16),d7	| if difference >= 16 swap
   2758   1.1       mrg #endif
   2759   1.1       mrg 	bge	4f
   2760   1.1       mrg 2:
   2761   1.1       mrg #ifndef __mcoldfire__
   2762   1.1       mrg 	subw	IMM (1),d7
   2763   1.1       mrg #else
   2764   1.1       mrg 	subql	IMM (1), d7
   2765   1.1       mrg #endif
   2766   1.1       mrg 3:
   2767   1.1       mrg #ifndef __mcoldfire__
   2768   1.1       mrg 	lsrl	IMM (1),d2	| shift right second operand
   2769   1.1       mrg 	roxrl	IMM (1),d3
   2770   1.1       mrg 	dbra	d7,3b
   2771   1.1       mrg #else
   2772   1.1       mrg 	lsrl	IMM (1),d3
   2773   1.1       mrg 	btst	IMM (0),d2
   2774   1.1       mrg 	beq	10f
   2775   1.1       mrg 	bset	IMM (31),d3
   2776   1.1       mrg 10:	lsrl	IMM (1),d2
   2777   1.1       mrg 	subql	IMM (1), d7
   2778   1.1       mrg 	bpl	3b
   2779   1.1       mrg #endif
   2780   1.1       mrg 	bra	Laddsf$3
   2781   1.1       mrg 4:
   2782   1.1       mrg 	movew	d2,d3
   2783   1.1       mrg 	swap	d3
   2784   1.1       mrg 	movew	d3,d2
   2785   1.1       mrg 	swap	d2
   2786   1.1       mrg #ifndef __mcoldfire__
   2787   1.1       mrg 	subw	IMM (16),d7
   2788   1.1       mrg #else
   2789   1.1       mrg 	subl	IMM (16),d7
   2790   1.1       mrg #endif
   2791   1.1       mrg 	bne	2b		| if still more bits, go back to normal case
   2792   1.1       mrg 	bra	Laddsf$3
   2793   1.1       mrg 5:
   2794   1.1       mrg #ifndef __mcoldfire__
   2795   1.1       mrg 	exg	d6,d7		| exchange the exponents
   2796   1.1       mrg #else
   2797   1.1       mrg 	eorl	d6,d7
   2798   1.1       mrg 	eorl	d7,d6
   2799   1.1       mrg 	eorl	d6,d7
   2800   1.1       mrg #endif
   2801   1.1       mrg 	subl	d6,d7		| keep the largest exponent
   2802   1.1       mrg 	negl	d7		|
   2803   1.1       mrg #ifndef __mcoldfire__
   2804   1.1       mrg 	lsrw	IMM (8),d7	| put difference in lower byte
   2805   1.1       mrg #else
   2806   1.1       mrg 	lsrl	IMM (8),d7	| put difference in lower byte
   2807   1.1       mrg #endif
   2808   1.1       mrg | if difference is too large we don't shift (and exit!) '
   2809   1.1       mrg #ifndef __mcoldfire__
   2810   1.1       mrg 	cmpw	IMM (FLT_MANT_DIG+2),d7
   2811   1.1       mrg #else
   2812   1.1       mrg 	cmpl	IMM (FLT_MANT_DIG+2),d7
   2813   1.1       mrg #endif
   2814   1.1       mrg 	bge	Laddsf$a$small
   2815   1.1       mrg #ifndef __mcoldfire__
   2816   1.1       mrg 	cmpw	IMM (16),d7	| if difference >= 16 swap
   2817   1.1       mrg #else
   2818   1.1       mrg 	cmpl	IMM (16),d7	| if difference >= 16 swap
   2819   1.1       mrg #endif
   2820   1.1       mrg 	bge	8f
   2821   1.1       mrg 6:
   2822   1.1       mrg #ifndef __mcoldfire__
   2823   1.1       mrg 	subw	IMM (1),d7
   2824   1.1       mrg #else
   2825   1.1       mrg 	subl	IMM (1),d7
   2826   1.1       mrg #endif
   2827   1.1       mrg 7:
   2828   1.1       mrg #ifndef __mcoldfire__
   2829   1.1       mrg 	lsrl	IMM (1),d0	| shift right first operand
   2830   1.1       mrg 	roxrl	IMM (1),d1
   2831   1.1       mrg 	dbra	d7,7b
   2832   1.1       mrg #else
   2833   1.1       mrg 	lsrl	IMM (1),d1
   2834   1.1       mrg 	btst	IMM (0),d0
   2835   1.1       mrg 	beq	10f
   2836   1.1       mrg 	bset	IMM (31),d1
   2837   1.1       mrg 10:	lsrl	IMM (1),d0
   2838   1.1       mrg 	subql	IMM (1),d7
   2839   1.1       mrg 	bpl	7b
   2840   1.1       mrg #endif
   2841   1.1       mrg 	bra	Laddsf$3
   2842   1.1       mrg 8:
   2843   1.1       mrg 	movew	d0,d1
   2844   1.1       mrg 	swap	d1
   2845   1.1       mrg 	movew	d1,d0
   2846   1.1       mrg 	swap	d0
   2847   1.1       mrg #ifndef __mcoldfire__
   2848   1.1       mrg 	subw	IMM (16),d7
   2849   1.1       mrg #else
   2850   1.1       mrg 	subl	IMM (16),d7
   2851   1.1       mrg #endif
   2852   1.1       mrg 	bne	6b		| if still more bits, go back to normal case
   2853   1.1       mrg 				| otherwise we fall through
   2854   1.1       mrg 
   2855   1.1       mrg | Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
   2856   1.1       mrg | signs are stored in a0 and a1).
   2857   1.1       mrg 
   2858   1.1       mrg Laddsf$3:
   2859   1.1       mrg | Here we have to decide whether to add or subtract the numbers
   2860   1.1       mrg #ifndef __mcoldfire__
   2861   1.1       mrg 	exg	d6,a0		| get signs back
   2862   1.1       mrg 	exg	d7,a1		| and save the exponents
   2863   1.1       mrg #else
   2864   1.1       mrg 	movel	d6,d4
   2865   1.1       mrg 	movel	a0,d6
   2866   1.1       mrg 	movel	d4,a0
   2867   1.1       mrg 	movel	d7,d4
   2868   1.1       mrg 	movel	a1,d7
   2869   1.1       mrg 	movel	d4,a1
   2870   1.1       mrg #endif
   2871   1.1       mrg 	eorl	d6,d7		| combine sign bits
   2872   1.1       mrg 	bmi	Lsubsf$0	| if negative a and b have opposite
   2873   1.1       mrg 				| sign so we actually subtract the
   2874   1.1       mrg 				| numbers
   2875   1.1       mrg 
   2876   1.1       mrg | Here we have both positive or both negative
   2877   1.1       mrg #ifndef __mcoldfire__
   2878   1.1       mrg 	exg	d6,a0		| now we have the exponent in d6
   2879   1.1       mrg #else
   2880   1.1       mrg 	movel	d6,d4
   2881   1.1       mrg 	movel	a0,d6
   2882   1.1       mrg 	movel	d4,a0
   2883   1.1       mrg #endif
   2884   1.1       mrg 	movel	a0,d7		| and sign in d7
   2885   1.1       mrg 	andl	IMM (0x80000000),d7
   2886   1.1       mrg | Here we do the addition.
   2887   1.1       mrg 	addl	d3,d1
   2888   1.1       mrg 	addxl	d2,d0
   2889   1.1       mrg | Note: now we have d2, d3, d4 and d5 to play with!
   2890   1.1       mrg 
   2891   1.1       mrg | Put the exponent, in the first byte, in d2, to use the "standard" rounding
   2892   1.1       mrg | routines:
   2893   1.1       mrg 	movel	d6,d2
   2894   1.1       mrg #ifndef __mcoldfire__
   2895   1.1       mrg 	lsrw	IMM (8),d2
   2896   1.1       mrg #else
   2897   1.1       mrg 	lsrl	IMM (8),d2
   2898   1.1       mrg #endif
   2899   1.1       mrg 
   2900   1.1       mrg | Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
   2901   1.1       mrg | the case of denormalized numbers in the rounding routine itself).
   2902   1.1       mrg | As in the addition (not in the subtraction!) we could have set
   2903   1.1       mrg | one more bit we check this:
   2904   1.1       mrg 	btst	IMM (FLT_MANT_DIG+1),d0
   2905   1.1       mrg 	beq	1f
   2906   1.1       mrg #ifndef __mcoldfire__
   2907   1.1       mrg 	lsrl	IMM (1),d0
   2908   1.1       mrg 	roxrl	IMM (1),d1
   2909   1.1       mrg #else
   2910   1.1       mrg 	lsrl	IMM (1),d1
   2911   1.1       mrg 	btst	IMM (0),d0
   2912   1.1       mrg 	beq	10f
   2913   1.1       mrg 	bset	IMM (31),d1
   2914   1.1       mrg 10:	lsrl	IMM (1),d0
   2915   1.1       mrg #endif
   2916   1.1       mrg 	addl	IMM (1),d2
   2917   1.1       mrg 1:
   2918   1.1       mrg 	lea	pc@(Laddsf$4),a0 | to return from rounding routine
   2919   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2920   1.1       mrg #ifdef __mcoldfire__
   2921   1.1       mrg 	clrl	d6
   2922   1.1       mrg #endif
   2923   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   2924   1.1       mrg 	beq	Lround$to$nearest
   2925   1.1       mrg #ifndef __mcoldfire__
   2926   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   2927   1.1       mrg #else
   2928   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   2929   1.1       mrg #endif
   2930   1.1       mrg 	bhi	Lround$to$minus
   2931   1.1       mrg 	blt	Lround$to$zero
   2932   1.1       mrg 	bra	Lround$to$plus
   2933   1.1       mrg Laddsf$4:
   2934   1.1       mrg | Put back the exponent, but check for overflow.
   2935   1.1       mrg #ifndef __mcoldfire__
   2936   1.1       mrg 	cmpw	IMM (0xff),d2
   2937   1.1       mrg #else
   2938   1.1       mrg 	cmpl	IMM (0xff),d2
   2939   1.1       mrg #endif
   2940  1.11       mrg 	bge	1f
   2941   1.1       mrg 	bclr	IMM (FLT_MANT_DIG-1),d0
   2942   1.1       mrg #ifndef __mcoldfire__
   2943   1.1       mrg 	lslw	IMM (7),d2
   2944   1.1       mrg #else
   2945   1.1       mrg 	lsll	IMM (7),d2
   2946   1.1       mrg #endif
   2947   1.1       mrg 	swap	d2
   2948   1.1       mrg 	orl	d2,d0
   2949   1.1       mrg 	bra	Laddsf$ret
   2950   1.1       mrg 1:
   2951   1.1       mrg 	moveq	IMM (ADD),d5
   2952   1.1       mrg 	bra	Lf$overflow
   2953   1.1       mrg 
   2954   1.1       mrg Lsubsf$0:
   2955   1.1       mrg | We are here if a > 0 and b < 0 (sign bits cleared).
   2956   1.1       mrg | Here we do the subtraction.
   2957   1.1       mrg 	movel	d6,d7		| put sign in d7
   2958   1.1       mrg 	andl	IMM (0x80000000),d7
   2959   1.1       mrg 
   2960   1.1       mrg 	subl	d3,d1		| result in d0-d1
   2961   1.1       mrg 	subxl	d2,d0		|
   2962   1.1       mrg 	beq	Laddsf$ret	| if zero just exit
   2963   1.1       mrg 	bpl	1f		| if positive skip the following
   2964   1.1       mrg 	bchg	IMM (31),d7	| change sign bit in d7
   2965   1.1       mrg 	negl	d1
   2966   1.1       mrg 	negxl	d0
   2967   1.1       mrg 1:
   2968   1.1       mrg #ifndef __mcoldfire__
   2969   1.1       mrg 	exg	d2,a0		| now we have the exponent in d2
   2970   1.1       mrg 	lsrw	IMM (8),d2	| put it in the first byte
   2971   1.1       mrg #else
   2972   1.1       mrg 	movel	d2,d4
   2973   1.1       mrg 	movel	a0,d2
   2974   1.1       mrg 	movel	d4,a0
   2975   1.1       mrg 	lsrl	IMM (8),d2	| put it in the first byte
   2976   1.1       mrg #endif
   2977   1.1       mrg 
   2978   1.1       mrg | Now d0-d1 is positive and the sign bit is in d7.
   2979   1.1       mrg 
   2980   1.1       mrg | Note that we do not have to normalize, since in the subtraction bit
   2981   1.1       mrg | #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
   2982   1.1       mrg | the rounding routines themselves.
   2983   1.1       mrg 	lea	pc@(Lsubsf$1),a0 | to return from rounding routine
   2984   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2985   1.1       mrg #ifdef __mcoldfire__
   2986   1.1       mrg 	clrl	d6
   2987   1.1       mrg #endif
   2988   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   2989   1.1       mrg 	beq	Lround$to$nearest
   2990   1.1       mrg #ifndef __mcoldfire__
   2991   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   2992   1.1       mrg #else
   2993   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   2994   1.1       mrg #endif
   2995   1.1       mrg 	bhi	Lround$to$minus
   2996   1.1       mrg 	blt	Lround$to$zero
   2997   1.1       mrg 	bra	Lround$to$plus
   2998   1.1       mrg Lsubsf$1:
   2999   1.1       mrg | Put back the exponent (we can't have overflow!). '
   3000   1.1       mrg 	bclr	IMM (FLT_MANT_DIG-1),d0
   3001   1.1       mrg #ifndef __mcoldfire__
   3002   1.1       mrg 	lslw	IMM (7),d2
   3003   1.1       mrg #else
   3004   1.1       mrg 	lsll	IMM (7),d2
   3005   1.1       mrg #endif
   3006   1.1       mrg 	swap	d2
   3007   1.1       mrg 	orl	d2,d0
   3008   1.1       mrg 	bra	Laddsf$ret
   3009   1.1       mrg 
   3010   1.1       mrg | If one of the numbers was too small (difference of exponents >=
   3011   1.1       mrg | FLT_MANT_DIG+2) we return the other (and now we don't have to '
   3012   1.1       mrg | check for finiteness or zero).
   3013   1.1       mrg Laddsf$a$small:
   3014   1.1       mrg 	movel	a6@(12),d0
   3015   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   3016   1.1       mrg 	movew	IMM (0),a0@
   3017   1.1       mrg #ifndef __mcoldfire__
   3018   1.1       mrg 	moveml	sp@+,d2-d7	| restore data registers
   3019   1.1       mrg #else
   3020   1.1       mrg 	moveml	sp@,d2-d7
   3021   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3022   1.1       mrg 	| be adjusted here.
   3023   1.1       mrg #endif
   3024   1.1       mrg 	unlk	a6		| and return
   3025   1.1       mrg 	rts
   3026   1.1       mrg 
   3027   1.1       mrg Laddsf$b$small:
   3028   1.1       mrg 	movel	a6@(8),d0
   3029   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   3030   1.1       mrg 	movew	IMM (0),a0@
   3031   1.1       mrg #ifndef __mcoldfire__
   3032   1.1       mrg 	moveml	sp@+,d2-d7	| restore data registers
   3033   1.1       mrg #else
   3034   1.1       mrg 	moveml	sp@,d2-d7
   3035   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3036   1.1       mrg 	| be adjusted here.
   3037   1.1       mrg #endif
   3038   1.1       mrg 	unlk	a6		| and return
   3039   1.1       mrg 	rts
   3040   1.1       mrg 
   3041   1.1       mrg | If the numbers are denormalized remember to put exponent equal to 1.
   3042   1.1       mrg 
   3043   1.1       mrg Laddsf$a$den:
   3044   1.1       mrg 	movel	d5,d6		| d5 contains 0x01000000
   3045   1.1       mrg 	swap	d6
   3046   1.1       mrg 	bra	Laddsf$1
   3047   1.1       mrg 
   3048   1.1       mrg Laddsf$b$den:
   3049   1.1       mrg 	movel	d5,d7
   3050   1.1       mrg 	swap	d7
   3051   1.1       mrg 	notl 	d4		| make d4 into a mask for the fraction
   3052   1.1       mrg 				| (this was not executed after the jump)
   3053   1.1       mrg 	bra	Laddsf$2
   3054   1.1       mrg 
   3055   1.1       mrg | The rest is mainly code for the different results which can be
   3056   1.1       mrg | returned (checking always for +/-INFINITY and NaN).
   3057   1.1       mrg 
   3058   1.1       mrg Laddsf$b:
   3059   1.1       mrg | Return b (if a is zero).
   3060   1.1       mrg 	movel	a6@(12),d0
   3061   1.1       mrg 	cmpl	IMM (0x80000000),d0	| Check if b is -0
   3062   1.1       mrg 	bne	1f
   3063   1.1       mrg 	movel	a0,d7
   3064   1.1       mrg 	andl	IMM (0x80000000),d7	| Use the sign of a
   3065   1.1       mrg 	clrl	d0
   3066   1.1       mrg 	bra	Laddsf$ret
   3067   1.1       mrg Laddsf$a:
   3068   1.1       mrg | Return a (if b is zero).
   3069   1.1       mrg 	movel	a6@(8),d0
   3070   1.1       mrg 1:
   3071   1.1       mrg 	moveq	IMM (ADD),d5
   3072   1.1       mrg | We have to check for NaN and +/-infty.
   3073   1.1       mrg 	movel	d0,d7
   3074   1.1       mrg 	andl	IMM (0x80000000),d7	| put sign in d7
   3075   1.1       mrg 	bclr	IMM (31),d0		| clear sign
   3076   1.1       mrg 	cmpl	IMM (INFINITY),d0	| check for infty or NaN
   3077   1.1       mrg 	bge	2f
   3078   1.1       mrg 	movel	d0,d0		| check for zero (we do this because we don't '
   3079   1.1       mrg 	bne	Laddsf$ret	| want to return -0 by mistake
   3080   1.1       mrg 	bclr	IMM (31),d7	| if zero be sure to clear sign
   3081   1.1       mrg 	bra	Laddsf$ret	| if everything OK just return
   3082   1.1       mrg 2:
   3083   1.1       mrg | The value to be returned is either +/-infty or NaN
   3084   1.1       mrg 	andl	IMM (0x007fffff),d0	| check for NaN
   3085   1.1       mrg 	bne	Lf$inop			| if mantissa not zero is NaN
   3086   1.1       mrg 	bra	Lf$infty
   3087   1.1       mrg 
   3088   1.1       mrg Laddsf$ret:
   3089   1.1       mrg | Normal exit (a and b nonzero, result is not NaN nor +/-infty).
   3090   1.1       mrg | We have to clear the exception flags (just the exception type).
   3091   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   3092   1.1       mrg 	movew	IMM (0),a0@
   3093   1.1       mrg 	orl	d7,d0		| put sign bit
   3094   1.1       mrg #ifndef __mcoldfire__
   3095   1.1       mrg 	moveml	sp@+,d2-d7	| restore data registers
   3096   1.1       mrg #else
   3097   1.1       mrg 	moveml	sp@,d2-d7
   3098   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3099   1.1       mrg 	| be adjusted here.
   3100   1.1       mrg #endif
   3101   1.1       mrg 	unlk	a6		| and return
   3102   1.1       mrg 	rts
   3103   1.1       mrg 
   3104   1.1       mrg Laddsf$ret$den:
   3105   1.1       mrg | Return a denormalized number (for addition we don't signal underflow) '
   3106   1.1       mrg 	lsrl	IMM (1),d0	| remember to shift right back once
   3107   1.1       mrg 	bra	Laddsf$ret	| and return
   3108   1.1       mrg 
   3109   1.1       mrg | Note: when adding two floats of the same sign if either one is
   3110   1.1       mrg | NaN we return NaN without regard to whether the other is finite or
   3111   1.1       mrg | not. When subtracting them (i.e., when adding two numbers of
   3112   1.1       mrg | opposite signs) things are more complicated: if both are INFINITY
   3113   1.1       mrg | we return NaN, if only one is INFINITY and the other is NaN we return
   3114   1.1       mrg | NaN, but if it is finite we return INFINITY with the corresponding sign.
   3115   1.1       mrg 
   3116   1.1       mrg Laddsf$nf:
   3117   1.1       mrg 	moveq	IMM (ADD),d5
   3118   1.1       mrg | This could be faster but it is not worth the effort, since it is not
   3119   1.1       mrg | executed very often. We sacrifice speed for clarity here.
   3120   1.1       mrg 	movel	a6@(8),d0	| get the numbers back (remember that we
   3121   1.1       mrg 	movel	a6@(12),d1	| did some processing already)
   3122   1.1       mrg 	movel	IMM (INFINITY),d4 | useful constant (INFINITY)
   3123   1.1       mrg 	movel	d0,d2		| save sign bits
   3124   1.8       mrg 	movel	d0,d7		| into d7 as well as we may need the sign
   3125   1.8       mrg 				| bit before jumping to LfSinfty
   3126   1.1       mrg 	movel	d1,d3
   3127   1.1       mrg 	bclr	IMM (31),d0	| clear sign bits
   3128   1.1       mrg 	bclr	IMM (31),d1
   3129   1.1       mrg | We know that one of them is either NaN of +/-INFINITY
   3130   1.1       mrg | Check for NaN (if either one is NaN return NaN)
   3131   1.1       mrg 	cmpl	d4,d0		| check first a (d0)
   3132   1.1       mrg 	bhi	Lf$inop
   3133   1.1       mrg 	cmpl	d4,d1		| check now b (d1)
   3134   1.1       mrg 	bhi	Lf$inop
   3135   1.1       mrg | Now comes the check for +/-INFINITY. We know that both are (maybe not
   3136   1.1       mrg | finite) numbers, but we have to check if both are infinite whether we
   3137   1.1       mrg | are adding or subtracting them.
   3138   1.1       mrg 	eorl	d3,d2		| to check sign bits
   3139   1.1       mrg 	bmi	1f
   3140   1.1       mrg 	andl	IMM (0x80000000),d7	| get (common) sign bit
   3141   1.1       mrg 	bra	Lf$infty
   3142   1.1       mrg 1:
   3143   1.1       mrg | We know one (or both) are infinite, so we test for equality between the
   3144   1.1       mrg | two numbers (if they are equal they have to be infinite both, so we
   3145   1.1       mrg | return NaN).
   3146   1.1       mrg 	cmpl	d1,d0		| are both infinite?
   3147   1.1       mrg 	beq	Lf$inop		| if so return NaN
   3148   1.1       mrg 
   3149   1.1       mrg 	andl	IMM (0x80000000),d7 | get a's sign bit '
   3150   1.1       mrg 	cmpl	d4,d0		| test now for infinity
   3151   1.1       mrg 	beq	Lf$infty	| if a is INFINITY return with this sign
   3152   1.1       mrg 	bchg	IMM (31),d7	| else we know b is INFINITY and has
   3153   1.1       mrg 	bra	Lf$infty	| the opposite sign
   3154   1.1       mrg 
   3155   1.1       mrg |=============================================================================
   3156   1.1       mrg |                             __mulsf3
   3157   1.1       mrg |=============================================================================
   3158   1.1       mrg 
   3159   1.1       mrg | float __mulsf3(float, float);
   3160   1.1       mrg 	FUNC(__mulsf3)
   3161   1.1       mrg SYM (__mulsf3):
   3162   1.1       mrg #ifndef __mcoldfire__
   3163   1.1       mrg 	link	a6,IMM (0)
   3164   1.1       mrg 	moveml	d2-d7,sp@-
   3165   1.1       mrg #else
   3166   1.1       mrg 	link	a6,IMM (-24)
   3167   1.1       mrg 	moveml	d2-d7,sp@
   3168   1.1       mrg #endif
   3169   1.1       mrg 	movel	a6@(8),d0	| get a into d0
   3170   1.1       mrg 	movel	a6@(12),d1	| and b into d1
   3171   1.1       mrg 	movel	d0,d7		| d7 will hold the sign of the product
   3172   1.1       mrg 	eorl	d1,d7		|
   3173   1.1       mrg 	andl	IMM (0x80000000),d7
   3174   1.1       mrg 	movel	IMM (INFINITY),d6	| useful constant (+INFINITY)
   3175   1.1       mrg 	movel	d6,d5			| another (mask for fraction)
   3176   1.1       mrg 	notl	d5			|
   3177   1.1       mrg 	movel	IMM (0x00800000),d4	| this is to put hidden bit back
   3178   1.1       mrg 	bclr	IMM (31),d0		| get rid of a's sign bit '
   3179   1.1       mrg 	movel	d0,d2			|
   3180   1.1       mrg 	beq	Lmulsf$a$0		| branch if a is zero
   3181   1.1       mrg 	bclr	IMM (31),d1		| get rid of b's sign bit '
   3182   1.1       mrg 	movel	d1,d3		|
   3183   1.1       mrg 	beq	Lmulsf$b$0	| branch if b is zero
   3184   1.1       mrg 	cmpl	d6,d0		| is a big?
   3185   1.1       mrg 	bhi	Lmulsf$inop	| if a is NaN return NaN
   3186   1.1       mrg 	beq	Lmulsf$inf	| if a is INFINITY we have to check b
   3187   1.1       mrg 	cmpl	d6,d1		| now compare b with INFINITY
   3188   1.1       mrg 	bhi	Lmulsf$inop	| is b NaN?
   3189   1.1       mrg 	beq	Lmulsf$overflow | is b INFINITY?
   3190   1.1       mrg | Here we have both numbers finite and nonzero (and with no sign bit).
   3191   1.1       mrg | Now we get the exponents into d2 and d3.
   3192   1.1       mrg 	andl	d6,d2		| and isolate exponent in d2
   3193   1.1       mrg 	beq	Lmulsf$a$den	| if exponent is zero we have a denormalized
   3194   1.1       mrg 	andl	d5,d0		| and isolate fraction
   3195   1.1       mrg 	orl	d4,d0		| and put hidden bit back
   3196   1.1       mrg 	swap	d2		| I like exponents in the first byte
   3197   1.1       mrg #ifndef __mcoldfire__
   3198   1.1       mrg 	lsrw	IMM (7),d2	|
   3199   1.1       mrg #else
   3200   1.1       mrg 	lsrl	IMM (7),d2	|
   3201   1.1       mrg #endif
   3202   1.1       mrg Lmulsf$1:			| number
   3203   1.1       mrg 	andl	d6,d3		|
   3204   1.1       mrg 	beq	Lmulsf$b$den	|
   3205   1.1       mrg 	andl	d5,d1		|
   3206   1.1       mrg 	orl	d4,d1		|
   3207   1.1       mrg 	swap	d3		|
   3208   1.1       mrg #ifndef __mcoldfire__
   3209   1.1       mrg 	lsrw	IMM (7),d3	|
   3210   1.1       mrg #else
   3211   1.1       mrg 	lsrl	IMM (7),d3	|
   3212   1.1       mrg #endif
   3213   1.1       mrg Lmulsf$2:			|
   3214   1.1       mrg #ifndef __mcoldfire__
   3215   1.1       mrg 	addw	d3,d2		| add exponents
   3216   1.1       mrg 	subw	IMM (F_BIAS+1),d2 | and subtract bias (plus one)
   3217   1.1       mrg #else
   3218   1.1       mrg 	addl	d3,d2		| add exponents
   3219   1.1       mrg 	subl	IMM (F_BIAS+1),d2 | and subtract bias (plus one)
   3220   1.1       mrg #endif
   3221   1.1       mrg 
   3222   1.1       mrg | We are now ready to do the multiplication. The situation is as follows:
   3223   1.1       mrg | both a and b have bit FLT_MANT_DIG-1 set (even if they were
   3224   1.1       mrg | denormalized to start with!), which means that in the product
   3225   1.1       mrg | bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
   3226   1.1       mrg | high long) is set.
   3227   1.1       mrg 
   3228   1.1       mrg | To do the multiplication let us move the number a little bit around ...
   3229   1.1       mrg 	movel	d1,d6		| second operand in d6
   3230   1.1       mrg 	movel	d0,d5		| first operand in d4-d5
   3231   1.1       mrg 	movel	IMM (0),d4
   3232   1.1       mrg 	movel	d4,d1		| the sums will go in d0-d1
   3233   1.1       mrg 	movel	d4,d0
   3234   1.1       mrg 
   3235   1.1       mrg | now bit FLT_MANT_DIG-1 becomes bit 31:
   3236   1.1       mrg 	lsll	IMM (31-FLT_MANT_DIG+1),d6
   3237   1.1       mrg 
   3238   1.1       mrg | Start the loop (we loop #FLT_MANT_DIG times):
   3239   1.1       mrg 	moveq	IMM (FLT_MANT_DIG-1),d3
   3240   1.1       mrg 1:	addl	d1,d1		| shift sum
   3241   1.1       mrg 	addxl	d0,d0
   3242   1.1       mrg 	lsll	IMM (1),d6	| get bit bn
   3243   1.1       mrg 	bcc	2f		| if not set skip sum
   3244   1.1       mrg 	addl	d5,d1		| add a
   3245   1.1       mrg 	addxl	d4,d0
   3246   1.1       mrg 2:
   3247   1.1       mrg #ifndef __mcoldfire__
   3248   1.1       mrg 	dbf	d3,1b		| loop back
   3249   1.1       mrg #else
   3250   1.1       mrg 	subql	IMM (1),d3
   3251   1.1       mrg 	bpl	1b
   3252   1.1       mrg #endif
   3253   1.1       mrg 
   3254   1.1       mrg | Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
   3255   1.1       mrg | (mod 32) of d0 set. The first thing to do now is to normalize it so bit
   3256   1.1       mrg | FLT_MANT_DIG is set (to do the rounding).
   3257   1.1       mrg #ifndef __mcoldfire__
   3258   1.1       mrg 	rorl	IMM (6),d1
   3259   1.1       mrg 	swap	d1
   3260   1.1       mrg 	movew	d1,d3
   3261   1.1       mrg 	andw	IMM (0x03ff),d3
   3262   1.1       mrg 	andw	IMM (0xfd00),d1
   3263   1.1       mrg #else
   3264   1.1       mrg 	movel	d1,d3
   3265   1.1       mrg 	lsll	IMM (8),d1
   3266   1.1       mrg 	addl	d1,d1
   3267   1.1       mrg 	addl	d1,d1
   3268   1.1       mrg 	moveq	IMM (22),d5
   3269   1.1       mrg 	lsrl	d5,d3
   3270   1.1       mrg 	orl	d3,d1
   3271   1.1       mrg 	andl	IMM (0xfffffd00),d1
   3272   1.1       mrg #endif
   3273   1.1       mrg 	lsll	IMM (8),d0
   3274   1.1       mrg 	addl	d0,d0
   3275   1.1       mrg 	addl	d0,d0
   3276   1.1       mrg #ifndef __mcoldfire__
   3277   1.1       mrg 	orw	d3,d0
   3278   1.1       mrg #else
   3279   1.1       mrg 	orl	d3,d0
   3280   1.1       mrg #endif
   3281   1.1       mrg 
   3282   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   3283   1.1       mrg 
   3284   1.1       mrg 	btst	IMM (FLT_MANT_DIG+1),d0
   3285   1.1       mrg 	beq	Lround$exit
   3286   1.1       mrg #ifndef __mcoldfire__
   3287   1.1       mrg 	lsrl	IMM (1),d0
   3288   1.1       mrg 	roxrl	IMM (1),d1
   3289   1.1       mrg 	addw	IMM (1),d2
   3290   1.1       mrg #else
   3291   1.1       mrg 	lsrl	IMM (1),d1
   3292   1.1       mrg 	btst	IMM (0),d0
   3293   1.1       mrg 	beq	10f
   3294   1.1       mrg 	bset	IMM (31),d1
   3295   1.1       mrg 10:	lsrl	IMM (1),d0
   3296   1.1       mrg 	addql	IMM (1),d2
   3297   1.1       mrg #endif
   3298   1.1       mrg 	bra	Lround$exit
   3299   1.1       mrg 
   3300   1.1       mrg Lmulsf$inop:
   3301   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   3302   1.1       mrg 	bra	Lf$inop
   3303   1.1       mrg 
   3304   1.1       mrg Lmulsf$overflow:
   3305   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   3306   1.1       mrg 	bra	Lf$overflow
   3307   1.1       mrg 
   3308   1.1       mrg Lmulsf$inf:
   3309   1.1       mrg 	moveq	IMM (MULTIPLY),d5
   3310   1.1       mrg | If either is NaN return NaN; else both are (maybe infinite) numbers, so
   3311   1.1       mrg | return INFINITY with the correct sign (which is in d7).
   3312   1.1       mrg 	cmpl	d6,d1		| is b NaN?
   3313   1.1       mrg 	bhi	Lf$inop		| if so return NaN
   3314   1.1       mrg 	bra	Lf$overflow	| else return +/-INFINITY
   3315   1.1       mrg 
   3316   1.1       mrg | If either number is zero return zero, unless the other is +/-INFINITY,
   3317   1.1       mrg | or NaN, in which case we return NaN.
   3318   1.1       mrg Lmulsf$b$0:
   3319   1.1       mrg | Here d1 (==b) is zero.
   3320   1.1       mrg 	movel	a6@(8),d1	| get a again to check for non-finiteness
   3321   1.1       mrg 	bra	1f
   3322   1.1       mrg Lmulsf$a$0:
   3323   1.1       mrg 	movel	a6@(12),d1	| get b again to check for non-finiteness
   3324   1.1       mrg 1:	bclr	IMM (31),d1	| clear sign bit
   3325   1.1       mrg 	cmpl	IMM (INFINITY),d1 | and check for a large exponent
   3326   1.1       mrg 	bge	Lf$inop		| if b is +/-INFINITY or NaN return NaN
   3327   1.1       mrg 	movel	d7,d0		| else return signed zero
   3328   1.1       mrg 	PICLEA	SYM (_fpCCR),a0	|
   3329   1.1       mrg 	movew	IMM (0),a0@	|
   3330   1.1       mrg #ifndef __mcoldfire__
   3331   1.1       mrg 	moveml	sp@+,d2-d7	|
   3332   1.1       mrg #else
   3333   1.1       mrg 	moveml	sp@,d2-d7
   3334   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3335   1.1       mrg 	| be adjusted here.
   3336   1.1       mrg #endif
   3337   1.1       mrg 	unlk	a6		|
   3338   1.1       mrg 	rts			|
   3339   1.1       mrg 
   3340   1.1       mrg | If a number is denormalized we put an exponent of 1 but do not put the
   3341   1.1       mrg | hidden bit back into the fraction; instead we shift left until bit 23
   3342   1.1       mrg | (the hidden bit) is set, adjusting the exponent accordingly. We do this
   3343   1.1       mrg | to ensure that the product of the fractions is close to 1.
   3344   1.1       mrg Lmulsf$a$den:
   3345   1.1       mrg 	movel	IMM (1),d2
   3346   1.1       mrg 	andl	d5,d0
   3347   1.1       mrg 1:	addl	d0,d0		| shift a left (until bit 23 is set)
   3348   1.1       mrg #ifndef __mcoldfire__
   3349   1.1       mrg 	subw	IMM (1),d2	| and adjust exponent
   3350   1.1       mrg #else
   3351   1.1       mrg 	subql	IMM (1),d2	| and adjust exponent
   3352   1.1       mrg #endif
   3353   1.1       mrg 	btst	IMM (FLT_MANT_DIG-1),d0
   3354   1.1       mrg 	bne	Lmulsf$1	|
   3355   1.1       mrg 	bra	1b		| else loop back
   3356   1.1       mrg 
   3357   1.1       mrg Lmulsf$b$den:
   3358   1.1       mrg 	movel	IMM (1),d3
   3359   1.1       mrg 	andl	d5,d1
   3360   1.1       mrg 1:	addl	d1,d1		| shift b left until bit 23 is set
   3361   1.1       mrg #ifndef __mcoldfire__
   3362   1.1       mrg 	subw	IMM (1),d3	| and adjust exponent
   3363   1.1       mrg #else
   3364   1.1       mrg 	subql	IMM (1),d3	| and adjust exponent
   3365   1.1       mrg #endif
   3366   1.1       mrg 	btst	IMM (FLT_MANT_DIG-1),d1
   3367   1.1       mrg 	bne	Lmulsf$2	|
   3368   1.1       mrg 	bra	1b		| else loop back
   3369   1.1       mrg 
   3370   1.1       mrg |=============================================================================
   3371   1.1       mrg |                             __divsf3
   3372   1.1       mrg |=============================================================================
   3373   1.1       mrg 
   3374   1.1       mrg | float __divsf3(float, float);
   3375   1.1       mrg 	FUNC(__divsf3)
   3376   1.1       mrg SYM (__divsf3):
   3377   1.1       mrg #ifndef __mcoldfire__
   3378   1.1       mrg 	link	a6,IMM (0)
   3379   1.1       mrg 	moveml	d2-d7,sp@-
   3380   1.1       mrg #else
   3381   1.1       mrg 	link	a6,IMM (-24)
   3382   1.1       mrg 	moveml	d2-d7,sp@
   3383   1.1       mrg #endif
   3384   1.1       mrg 	movel	a6@(8),d0		| get a into d0
   3385   1.1       mrg 	movel	a6@(12),d1		| and b into d1
   3386   1.1       mrg 	movel	d0,d7			| d7 will hold the sign of the result
   3387   1.1       mrg 	eorl	d1,d7			|
   3388   1.1       mrg 	andl	IMM (0x80000000),d7	|
   3389   1.1       mrg 	movel	IMM (INFINITY),d6	| useful constant (+INFINITY)
   3390   1.1       mrg 	movel	d6,d5			| another (mask for fraction)
   3391   1.1       mrg 	notl	d5			|
   3392   1.1       mrg 	movel	IMM (0x00800000),d4	| this is to put hidden bit back
   3393   1.1       mrg 	bclr	IMM (31),d0		| get rid of a's sign bit '
   3394   1.1       mrg 	movel	d0,d2			|
   3395   1.1       mrg 	beq	Ldivsf$a$0		| branch if a is zero
   3396   1.1       mrg 	bclr	IMM (31),d1		| get rid of b's sign bit '
   3397   1.1       mrg 	movel	d1,d3			|
   3398   1.1       mrg 	beq	Ldivsf$b$0		| branch if b is zero
   3399   1.1       mrg 	cmpl	d6,d0			| is a big?
   3400   1.1       mrg 	bhi	Ldivsf$inop		| if a is NaN return NaN
   3401   1.1       mrg 	beq	Ldivsf$inf		| if a is INFINITY we have to check b
   3402   1.1       mrg 	cmpl	d6,d1			| now compare b with INFINITY
   3403   1.1       mrg 	bhi	Ldivsf$inop		| if b is NaN return NaN
   3404   1.1       mrg 	beq	Ldivsf$underflow
   3405   1.1       mrg | Here we have both numbers finite and nonzero (and with no sign bit).
   3406   1.1       mrg | Now we get the exponents into d2 and d3 and normalize the numbers to
   3407   1.1       mrg | ensure that the ratio of the fractions is close to 1. We do this by
   3408   1.1       mrg | making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
   3409   1.1       mrg 	andl	d6,d2		| and isolate exponent in d2
   3410   1.1       mrg 	beq	Ldivsf$a$den	| if exponent is zero we have a denormalized
   3411   1.1       mrg 	andl	d5,d0		| and isolate fraction
   3412   1.1       mrg 	orl	d4,d0		| and put hidden bit back
   3413   1.1       mrg 	swap	d2		| I like exponents in the first byte
   3414   1.1       mrg #ifndef __mcoldfire__
   3415   1.1       mrg 	lsrw	IMM (7),d2	|
   3416   1.1       mrg #else
   3417   1.1       mrg 	lsrl	IMM (7),d2	|
   3418   1.1       mrg #endif
   3419   1.1       mrg Ldivsf$1:			|
   3420   1.1       mrg 	andl	d6,d3		|
   3421   1.1       mrg 	beq	Ldivsf$b$den	|
   3422   1.1       mrg 	andl	d5,d1		|
   3423   1.1       mrg 	orl	d4,d1		|
   3424   1.1       mrg 	swap	d3		|
   3425   1.1       mrg #ifndef __mcoldfire__
   3426   1.1       mrg 	lsrw	IMM (7),d3	|
   3427   1.1       mrg #else
   3428   1.1       mrg 	lsrl	IMM (7),d3	|
   3429   1.1       mrg #endif
   3430   1.1       mrg Ldivsf$2:			|
   3431   1.1       mrg #ifndef __mcoldfire__
   3432   1.1       mrg 	subw	d3,d2		| subtract exponents
   3433   1.1       mrg  	addw	IMM (F_BIAS),d2	| and add bias
   3434   1.1       mrg #else
   3435   1.1       mrg 	subl	d3,d2		| subtract exponents
   3436   1.1       mrg  	addl	IMM (F_BIAS),d2	| and add bias
   3437   1.1       mrg #endif
   3438   1.1       mrg 
   3439   1.1       mrg | We are now ready to do the division. We have prepared things in such a way
   3440   1.1       mrg | that the ratio of the fractions will be less than 2 but greater than 1/2.
   3441   1.1       mrg | At this point the registers in use are:
   3442   1.1       mrg | d0	holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
   3443   1.1       mrg | d1	holds b (second operand, bit FLT_MANT_DIG=1)
   3444   1.1       mrg | d2	holds the difference of the exponents, corrected by the bias
   3445   1.1       mrg | d7	holds the sign of the ratio
   3446   1.1       mrg | d4, d5, d6 hold some constants
   3447   1.1       mrg 	movel	d7,a0		| d6-d7 will hold the ratio of the fractions
   3448   1.1       mrg 	movel	IMM (0),d6	|
   3449   1.1       mrg 	movel	d6,d7
   3450   1.1       mrg 
   3451   1.1       mrg 	moveq	IMM (FLT_MANT_DIG+1),d3
   3452   1.1       mrg 1:	cmpl	d0,d1		| is a < b?
   3453   1.1       mrg 	bhi	2f		|
   3454   1.1       mrg 	bset	d3,d6		| set a bit in d6
   3455   1.1       mrg 	subl	d1,d0		| if a >= b  a <-- a-b
   3456   1.1       mrg 	beq	3f		| if a is zero, exit
   3457   1.1       mrg 2:	addl	d0,d0		| multiply a by 2
   3458   1.1       mrg #ifndef __mcoldfire__
   3459   1.1       mrg 	dbra	d3,1b
   3460   1.1       mrg #else
   3461   1.1       mrg 	subql	IMM (1),d3
   3462   1.1       mrg 	bpl	1b
   3463   1.1       mrg #endif
   3464   1.1       mrg 
   3465   1.1       mrg | Now we keep going to set the sticky bit ...
   3466   1.1       mrg 	moveq	IMM (FLT_MANT_DIG),d3
   3467   1.1       mrg 1:	cmpl	d0,d1
   3468   1.1       mrg 	ble	2f
   3469   1.1       mrg 	addl	d0,d0
   3470   1.1       mrg #ifndef __mcoldfire__
   3471   1.1       mrg 	dbra	d3,1b
   3472   1.1       mrg #else
   3473   1.1       mrg 	subql	IMM(1),d3
   3474   1.1       mrg 	bpl	1b
   3475   1.1       mrg #endif
   3476   1.1       mrg 	movel	IMM (0),d1
   3477   1.1       mrg 	bra	3f
   3478   1.1       mrg 2:	movel	IMM (0),d1
   3479   1.1       mrg #ifndef __mcoldfire__
   3480   1.1       mrg 	subw	IMM (FLT_MANT_DIG),d3
   3481   1.1       mrg 	addw	IMM (31),d3
   3482   1.1       mrg #else
   3483   1.1       mrg 	subl	IMM (FLT_MANT_DIG),d3
   3484   1.1       mrg 	addl	IMM (31),d3
   3485   1.1       mrg #endif
   3486   1.1       mrg 	bset	d3,d1
   3487   1.1       mrg 3:
   3488   1.1       mrg 	movel	d6,d0		| put the ratio in d0-d1
   3489   1.1       mrg 	movel	a0,d7		| get sign back
   3490   1.1       mrg 
   3491   1.1       mrg | Because of the normalization we did before we are guaranteed that
   3492   1.1       mrg | d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
   3493   1.1       mrg | bit 25 could be set, and if it is not set then bit 24 is necessarily set.
   3494   1.1       mrg 	btst	IMM (FLT_MANT_DIG+1),d0
   3495   1.1       mrg 	beq	1f              | if it is not set, then bit 24 is set
   3496   1.1       mrg 	lsrl	IMM (1),d0	|
   3497   1.1       mrg #ifndef __mcoldfire__
   3498   1.1       mrg 	addw	IMM (1),d2	|
   3499   1.1       mrg #else
   3500   1.1       mrg 	addl	IMM (1),d2	|
   3501   1.1       mrg #endif
   3502   1.1       mrg 1:
   3503   1.1       mrg | Now round, check for over- and underflow, and exit.
   3504   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3505   1.1       mrg 	bra	Lround$exit
   3506   1.1       mrg 
   3507   1.1       mrg Ldivsf$inop:
   3508   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3509   1.1       mrg 	bra	Lf$inop
   3510   1.1       mrg 
   3511   1.1       mrg Ldivsf$overflow:
   3512   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3513   1.1       mrg 	bra	Lf$overflow
   3514   1.1       mrg 
   3515   1.1       mrg Ldivsf$underflow:
   3516   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3517   1.1       mrg 	bra	Lf$underflow
   3518   1.1       mrg 
   3519   1.1       mrg Ldivsf$a$0:
   3520   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3521   1.1       mrg | If a is zero check to see whether b is zero also. In that case return
   3522   1.1       mrg | NaN; then check if b is NaN, and return NaN also in that case. Else
   3523   1.1       mrg | return a properly signed zero.
   3524   1.1       mrg 	andl	IMM (0x7fffffff),d1	| clear sign bit and test b
   3525   1.1       mrg 	beq	Lf$inop			| if b is also zero return NaN
   3526   1.1       mrg 	cmpl	IMM (INFINITY),d1	| check for NaN
   3527   1.1       mrg 	bhi	Lf$inop			|
   3528   1.1       mrg 	movel	d7,d0			| else return signed zero
   3529   1.1       mrg 	PICLEA	SYM (_fpCCR),a0		|
   3530   1.1       mrg 	movew	IMM (0),a0@		|
   3531   1.1       mrg #ifndef __mcoldfire__
   3532   1.1       mrg 	moveml	sp@+,d2-d7		|
   3533   1.1       mrg #else
   3534   1.1       mrg 	moveml	sp@,d2-d7		|
   3535   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3536   1.1       mrg 	| be adjusted here.
   3537   1.1       mrg #endif
   3538   1.1       mrg 	unlk	a6			|
   3539   1.1       mrg 	rts				|
   3540   1.1       mrg 
   3541   1.1       mrg Ldivsf$b$0:
   3542   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3543   1.1       mrg | If we got here a is not zero. Check if a is NaN; in that case return NaN,
   3544   1.1       mrg | else return +/-INFINITY. Remember that a is in d0 with the sign bit
   3545   1.1       mrg | cleared already.
   3546   1.1       mrg 	cmpl	IMM (INFINITY),d0	| compare d0 with INFINITY
   3547   1.1       mrg 	bhi	Lf$inop			| if larger it is NaN
   3548   1.1       mrg 	bra	Lf$div$0		| else signal DIVIDE_BY_ZERO
   3549   1.1       mrg 
   3550   1.1       mrg Ldivsf$inf:
   3551   1.1       mrg 	moveq	IMM (DIVIDE),d5
   3552   1.1       mrg | If a is INFINITY we have to check b
   3553   1.1       mrg 	cmpl	IMM (INFINITY),d1	| compare b with INFINITY
   3554   1.1       mrg 	bge	Lf$inop			| if b is NaN or INFINITY return NaN
   3555   1.1       mrg 	bra	Lf$overflow		| else return overflow
   3556   1.1       mrg 
   3557   1.1       mrg | If a number is denormalized we put an exponent of 1 but do not put the
   3558   1.1       mrg | bit back into the fraction.
   3559   1.1       mrg Ldivsf$a$den:
   3560   1.1       mrg 	movel	IMM (1),d2
   3561   1.1       mrg 	andl	d5,d0
   3562   1.1       mrg 1:	addl	d0,d0		| shift a left until bit FLT_MANT_DIG-1 is set
   3563   1.1       mrg #ifndef __mcoldfire__
   3564   1.1       mrg 	subw	IMM (1),d2	| and adjust exponent
   3565   1.1       mrg #else
   3566   1.1       mrg 	subl	IMM (1),d2	| and adjust exponent
   3567   1.1       mrg #endif
   3568   1.1       mrg 	btst	IMM (FLT_MANT_DIG-1),d0
   3569   1.1       mrg 	bne	Ldivsf$1
   3570   1.1       mrg 	bra	1b
   3571   1.1       mrg 
   3572   1.1       mrg Ldivsf$b$den:
   3573   1.1       mrg 	movel	IMM (1),d3
   3574   1.1       mrg 	andl	d5,d1
   3575   1.1       mrg 1:	addl	d1,d1		| shift b left until bit FLT_MANT_DIG is set
   3576   1.1       mrg #ifndef __mcoldfire__
   3577   1.1       mrg 	subw	IMM (1),d3	| and adjust exponent
   3578   1.1       mrg #else
   3579   1.1       mrg 	subl	IMM (1),d3	| and adjust exponent
   3580   1.1       mrg #endif
   3581   1.1       mrg 	btst	IMM (FLT_MANT_DIG-1),d1
   3582   1.1       mrg 	bne	Ldivsf$2
   3583   1.1       mrg 	bra	1b
   3584   1.1       mrg 
   3585   1.1       mrg Lround$exit:
   3586   1.1       mrg | This is a common exit point for __mulsf3 and __divsf3.
   3587   1.1       mrg 
   3588   1.1       mrg | First check for underlow in the exponent:
   3589   1.1       mrg #ifndef __mcoldfire__
   3590   1.1       mrg 	cmpw	IMM (-FLT_MANT_DIG-1),d2
   3591   1.1       mrg #else
   3592   1.1       mrg 	cmpl	IMM (-FLT_MANT_DIG-1),d2
   3593   1.1       mrg #endif
   3594   1.1       mrg 	blt	Lf$underflow
   3595   1.1       mrg | It could happen that the exponent is less than 1, in which case the
   3596   1.1       mrg | number is denormalized. In this case we shift right and adjust the
   3597   1.1       mrg | exponent until it becomes 1 or the fraction is zero (in the latter case
   3598   1.1       mrg | we signal underflow and return zero).
   3599   1.1       mrg 	movel	IMM (0),d6	| d6 is used temporarily
   3600   1.1       mrg #ifndef __mcoldfire__
   3601   1.1       mrg 	cmpw	IMM (1),d2	| if the exponent is less than 1 we
   3602   1.1       mrg #else
   3603   1.1       mrg 	cmpl	IMM (1),d2	| if the exponent is less than 1 we
   3604   1.1       mrg #endif
   3605   1.1       mrg 	bge	2f		| have to shift right (denormalize)
   3606   1.1       mrg 1:
   3607   1.1       mrg #ifndef __mcoldfire__
   3608   1.1       mrg 	addw	IMM (1),d2	| adjust the exponent
   3609   1.1       mrg 	lsrl	IMM (1),d0	| shift right once
   3610   1.1       mrg 	roxrl	IMM (1),d1	|
   3611   1.1       mrg 	roxrl	IMM (1),d6	| d6 collect bits we would lose otherwise
   3612   1.1       mrg 	cmpw	IMM (1),d2	| is the exponent 1 already?
   3613   1.1       mrg #else
   3614   1.1       mrg 	addql	IMM (1),d2	| adjust the exponent
   3615   1.1       mrg 	lsrl	IMM (1),d6
   3616   1.1       mrg 	btst	IMM (0),d1
   3617   1.1       mrg 	beq	11f
   3618   1.1       mrg 	bset	IMM (31),d6
   3619   1.1       mrg 11:	lsrl	IMM (1),d1
   3620   1.1       mrg 	btst	IMM (0),d0
   3621   1.1       mrg 	beq	10f
   3622   1.1       mrg 	bset	IMM (31),d1
   3623   1.1       mrg 10:	lsrl	IMM (1),d0
   3624   1.1       mrg 	cmpl	IMM (1),d2	| is the exponent 1 already?
   3625   1.1       mrg #endif
   3626   1.1       mrg 	beq	2f		| if not loop back
   3627   1.1       mrg 	bra	1b              |
   3628   1.1       mrg 	bra	Lf$underflow	| safety check, shouldn't execute '
   3629   1.1       mrg 2:	orl	d6,d1		| this is a trick so we don't lose  '
   3630   1.1       mrg 				| the extra bits which were flushed right
   3631   1.1       mrg | Now call the rounding routine (which takes care of denormalized numbers):
   3632   1.1       mrg 	lea	pc@(Lround$0),a0 | to return from rounding routine
   3633   1.1       mrg 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   3634   1.1       mrg #ifdef __mcoldfire__
   3635   1.1       mrg 	clrl	d6
   3636   1.1       mrg #endif
   3637   1.1       mrg 	movew	a1@(6),d6	| rounding mode in d6
   3638   1.1       mrg 	beq	Lround$to$nearest
   3639   1.1       mrg #ifndef __mcoldfire__
   3640   1.1       mrg 	cmpw	IMM (ROUND_TO_PLUS),d6
   3641   1.1       mrg #else
   3642   1.1       mrg 	cmpl	IMM (ROUND_TO_PLUS),d6
   3643   1.1       mrg #endif
   3644   1.1       mrg 	bhi	Lround$to$minus
   3645   1.1       mrg 	blt	Lround$to$zero
   3646   1.1       mrg 	bra	Lround$to$plus
   3647   1.1       mrg Lround$0:
   3648   1.1       mrg | Here we have a correctly rounded result (either normalized or denormalized).
   3649   1.1       mrg 
   3650   1.1       mrg | Here we should have either a normalized number or a denormalized one, and
   3651   1.1       mrg | the exponent is necessarily larger or equal to 1 (so we don't have to  '
   3652   1.1       mrg | check again for underflow!). We have to check for overflow or for a
   3653   1.1       mrg | denormalized number (which also signals underflow).
   3654   1.1       mrg | Check for overflow (i.e., exponent >= 255).
   3655   1.1       mrg #ifndef __mcoldfire__
   3656   1.1       mrg 	cmpw	IMM (0x00ff),d2
   3657   1.1       mrg #else
   3658   1.1       mrg 	cmpl	IMM (0x00ff),d2
   3659   1.1       mrg #endif
   3660   1.1       mrg 	bge	Lf$overflow
   3661   1.1       mrg | Now check for a denormalized number (exponent==0).
   3662   1.1       mrg 	movew	d2,d2
   3663   1.1       mrg 	beq	Lf$den
   3664   1.1       mrg 1:
   3665   1.1       mrg | Put back the exponents and sign and return.
   3666   1.1       mrg #ifndef __mcoldfire__
   3667   1.1       mrg 	lslw	IMM (7),d2	| exponent back to fourth byte
   3668   1.1       mrg #else
   3669   1.1       mrg 	lsll	IMM (7),d2	| exponent back to fourth byte
   3670   1.1       mrg #endif
   3671   1.1       mrg 	bclr	IMM (FLT_MANT_DIG-1),d0
   3672   1.1       mrg 	swap	d0		| and put back exponent
   3673   1.1       mrg #ifndef __mcoldfire__
   3674   1.1       mrg 	orw	d2,d0		|
   3675   1.1       mrg #else
   3676   1.1       mrg 	orl	d2,d0
   3677   1.1       mrg #endif
   3678   1.1       mrg 	swap	d0		|
   3679   1.1       mrg 	orl	d7,d0		| and sign also
   3680   1.1       mrg 
   3681   1.1       mrg 	PICLEA	SYM (_fpCCR),a0
   3682   1.1       mrg 	movew	IMM (0),a0@
   3683   1.1       mrg #ifndef __mcoldfire__
   3684   1.1       mrg 	moveml	sp@+,d2-d7
   3685   1.1       mrg #else
   3686   1.1       mrg 	moveml	sp@,d2-d7
   3687   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3688   1.1       mrg 	| be adjusted here.
   3689   1.1       mrg #endif
   3690   1.1       mrg 	unlk	a6
   3691   1.1       mrg 	rts
   3692   1.1       mrg 
   3693   1.1       mrg |=============================================================================
   3694   1.1       mrg |                             __negsf2
   3695   1.1       mrg |=============================================================================
   3696   1.1       mrg 
   3697   1.1       mrg | This is trivial and could be shorter if we didn't bother checking for NaN '
   3698   1.1       mrg | and +/-INFINITY.
   3699   1.1       mrg 
   3700   1.1       mrg | float __negsf2(float);
   3701   1.1       mrg 	FUNC(__negsf2)
   3702   1.1       mrg SYM (__negsf2):
   3703   1.1       mrg #ifndef __mcoldfire__
   3704   1.1       mrg 	link	a6,IMM (0)
   3705   1.1       mrg 	moveml	d2-d7,sp@-
   3706   1.1       mrg #else
   3707   1.1       mrg 	link	a6,IMM (-24)
   3708   1.1       mrg 	moveml	d2-d7,sp@
   3709   1.1       mrg #endif
   3710   1.1       mrg 	moveq	IMM (NEGATE),d5
   3711   1.1       mrg 	movel	a6@(8),d0	| get number to negate in d0
   3712   1.1       mrg 	bchg	IMM (31),d0	| negate
   3713   1.1       mrg 	movel	d0,d1		| make a positive copy
   3714   1.1       mrg 	bclr	IMM (31),d1	|
   3715   1.1       mrg 	tstl	d1		| check for zero
   3716   1.1       mrg 	beq	2f		| if zero (either sign) return +zero
   3717   1.1       mrg 	cmpl	IMM (INFINITY),d1 | compare to +INFINITY
   3718   1.1       mrg 	blt	1f		|
   3719   1.1       mrg 	bhi	Lf$inop		| if larger (fraction not zero) is NaN
   3720   1.1       mrg 	movel	d0,d7		| else get sign and return INFINITY
   3721   1.1       mrg 	andl	IMM (0x80000000),d7
   3722   1.1       mrg 	bra	Lf$infty
   3723   1.1       mrg 1:	PICLEA	SYM (_fpCCR),a0
   3724   1.1       mrg 	movew	IMM (0),a0@
   3725   1.1       mrg #ifndef __mcoldfire__
   3726   1.1       mrg 	moveml	sp@+,d2-d7
   3727   1.1       mrg #else
   3728   1.1       mrg 	moveml	sp@,d2-d7
   3729   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3730   1.1       mrg 	| be adjusted here.
   3731   1.1       mrg #endif
   3732   1.1       mrg 	unlk	a6
   3733   1.1       mrg 	rts
   3734   1.1       mrg 2:	bclr	IMM (31),d0
   3735   1.1       mrg 	bra	1b
   3736   1.1       mrg 
   3737   1.1       mrg |=============================================================================
   3738   1.1       mrg |                             __cmpsf2
   3739   1.1       mrg |=============================================================================
   3740   1.1       mrg 
   3741   1.1       mrg GREATER =  1
   3742   1.1       mrg LESS    = -1
   3743   1.1       mrg EQUAL   =  0
   3744   1.1       mrg 
   3745   1.1       mrg | int __cmpsf2_internal(float, float, int);
   3746   1.1       mrg SYM (__cmpsf2_internal):
   3747   1.1       mrg #ifndef __mcoldfire__
   3748   1.1       mrg 	link	a6,IMM (0)
   3749   1.1       mrg 	moveml	d2-d7,sp@- 	| save registers
   3750   1.1       mrg #else
   3751   1.1       mrg 	link	a6,IMM (-24)
   3752   1.1       mrg 	moveml	d2-d7,sp@
   3753   1.1       mrg #endif
   3754   1.1       mrg 	moveq	IMM (COMPARE),d5
   3755   1.1       mrg 	movel	a6@(8),d0	| get first operand
   3756   1.1       mrg 	movel	a6@(12),d1	| get second operand
   3757   1.1       mrg | Check if either is NaN, and in that case return garbage and signal
   3758   1.1       mrg | INVALID_OPERATION. Check also if either is zero, and clear the signs
   3759   1.1       mrg | if necessary.
   3760   1.1       mrg 	movel	d0,d6
   3761   1.1       mrg 	andl	IMM (0x7fffffff),d0
   3762   1.1       mrg 	beq	Lcmpsf$a$0
   3763   1.1       mrg 	cmpl	IMM (0x7f800000),d0
   3764   1.1       mrg 	bhi	Lcmpf$inop
   3765   1.1       mrg Lcmpsf$1:
   3766   1.1       mrg 	movel	d1,d7
   3767   1.1       mrg 	andl	IMM (0x7fffffff),d1
   3768   1.1       mrg 	beq	Lcmpsf$b$0
   3769   1.1       mrg 	cmpl	IMM (0x7f800000),d1
   3770   1.1       mrg 	bhi	Lcmpf$inop
   3771   1.1       mrg Lcmpsf$2:
   3772   1.1       mrg | Check the signs
   3773   1.1       mrg 	eorl	d6,d7
   3774   1.1       mrg 	bpl	1f
   3775   1.1       mrg | If the signs are not equal check if a >= 0
   3776   1.1       mrg 	tstl	d6
   3777   1.1       mrg 	bpl	Lcmpsf$a$gt$b	| if (a >= 0 && b < 0) => a > b
   3778   1.1       mrg 	bmi	Lcmpsf$b$gt$a	| if (a < 0 && b >= 0) => a < b
   3779   1.1       mrg 1:
   3780   1.1       mrg | If the signs are equal check for < 0
   3781   1.1       mrg 	tstl	d6
   3782   1.1       mrg 	bpl	1f
   3783   1.1       mrg | If both are negative exchange them
   3784   1.1       mrg #ifndef __mcoldfire__
   3785   1.1       mrg 	exg	d0,d1
   3786   1.1       mrg #else
   3787   1.1       mrg 	movel	d0,d7
   3788   1.1       mrg 	movel	d1,d0
   3789   1.1       mrg 	movel	d7,d1
   3790   1.1       mrg #endif
   3791   1.1       mrg 1:
   3792   1.1       mrg | Now that they are positive we just compare them as longs (does this also
   3793   1.1       mrg | work for denormalized numbers?).
   3794   1.1       mrg 	cmpl	d0,d1
   3795   1.1       mrg 	bhi	Lcmpsf$b$gt$a	| |b| > |a|
   3796   1.1       mrg 	bne	Lcmpsf$a$gt$b	| |b| < |a|
   3797   1.1       mrg | If we got here a == b.
   3798   1.1       mrg 	movel	IMM (EQUAL),d0
   3799   1.1       mrg #ifndef __mcoldfire__
   3800   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   3801   1.1       mrg #else
   3802   1.1       mrg 	moveml	sp@,d2-d7
   3803   1.1       mrg #endif
   3804   1.1       mrg 	unlk	a6
   3805   1.1       mrg 	rts
   3806   1.1       mrg Lcmpsf$a$gt$b:
   3807   1.1       mrg 	movel	IMM (GREATER),d0
   3808   1.1       mrg #ifndef __mcoldfire__
   3809   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   3810   1.1       mrg #else
   3811   1.1       mrg 	moveml	sp@,d2-d7
   3812   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3813   1.1       mrg 	| be adjusted here.
   3814   1.1       mrg #endif
   3815   1.1       mrg 	unlk	a6
   3816   1.1       mrg 	rts
   3817   1.1       mrg Lcmpsf$b$gt$a:
   3818   1.1       mrg 	movel	IMM (LESS),d0
   3819   1.1       mrg #ifndef __mcoldfire__
   3820   1.1       mrg 	moveml	sp@+,d2-d7 	| put back the registers
   3821   1.1       mrg #else
   3822   1.1       mrg 	moveml	sp@,d2-d7
   3823   1.1       mrg 	| XXX if frame pointer is ever removed, stack pointer must
   3824   1.1       mrg 	| be adjusted here.
   3825   1.1       mrg #endif
   3826   1.1       mrg 	unlk	a6
   3827   1.1       mrg 	rts
   3828   1.1       mrg 
   3829   1.1       mrg Lcmpsf$a$0:
   3830   1.1       mrg 	bclr	IMM (31),d6
   3831   1.1       mrg 	bra	Lcmpsf$1
   3832   1.1       mrg Lcmpsf$b$0:
   3833   1.1       mrg 	bclr	IMM (31),d7
   3834   1.1       mrg 	bra	Lcmpsf$2
   3835   1.1       mrg 
   3836   1.1       mrg Lcmpf$inop:
   3837   1.1       mrg 	movl	a6@(16),d0
   3838   1.1       mrg 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   3839   1.1       mrg 	moveq	IMM (SINGLE_FLOAT),d6
   3840   1.1       mrg 	PICJUMP	$_exception_handler
   3841   1.1       mrg 
   3842   1.1       mrg | int __cmpsf2(float, float);
   3843   1.1       mrg 	FUNC(__cmpsf2)
   3844   1.1       mrg SYM (__cmpsf2):
   3845   1.1       mrg 	link	a6,IMM (0)
   3846   1.1       mrg 	pea	1
   3847   1.1       mrg 	movl	a6@(12),sp@-
   3848   1.1       mrg 	movl	a6@(8),sp@-
   3849   1.1       mrg 	PICCALL SYM (__cmpsf2_internal)
   3850   1.1       mrg 	unlk	a6
   3851   1.1       mrg 	rts
   3852   1.1       mrg 
   3853   1.1       mrg |=============================================================================
   3854   1.1       mrg |                           rounding routines
   3855   1.1       mrg |=============================================================================
   3856   1.1       mrg 
   3857   1.1       mrg | The rounding routines expect the number to be normalized in registers
   3858   1.1       mrg | d0-d1, with the exponent in register d2. They assume that the
   3859   1.1       mrg | exponent is larger or equal to 1. They return a properly normalized number
   3860   1.1       mrg | if possible, and a denormalized number otherwise. The exponent is returned
   3861   1.1       mrg | in d2.
   3862   1.1       mrg 
   3863   1.1       mrg Lround$to$nearest:
   3864   1.1       mrg | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
   3865   1.1       mrg | Here we assume that the exponent is not too small (this should be checked
   3866   1.1       mrg | before entering the rounding routine), but the number could be denormalized.
   3867   1.1       mrg 
   3868   1.1       mrg | Check for denormalized numbers:
   3869   1.1       mrg 1:	btst	IMM (FLT_MANT_DIG),d0
   3870   1.1       mrg 	bne	2f		| if set the number is normalized
   3871   1.1       mrg | Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
   3872   1.1       mrg | is one (remember that a denormalized number corresponds to an
   3873   1.1       mrg | exponent of -F_BIAS+1).
   3874   1.1       mrg #ifndef __mcoldfire__
   3875   1.1       mrg 	cmpw	IMM (1),d2	| remember that the exponent is at least one
   3876   1.1       mrg #else
   3877   1.1       mrg 	cmpl	IMM (1),d2	| remember that the exponent is at least one
   3878   1.1       mrg #endif
   3879   1.1       mrg  	beq	2f		| an exponent of one means denormalized
   3880   1.1       mrg 	addl	d1,d1		| else shift and adjust the exponent
   3881   1.1       mrg 	addxl	d0,d0		|
   3882   1.1       mrg #ifndef __mcoldfire__
   3883   1.1       mrg 	dbra	d2,1b		|
   3884   1.1       mrg #else
   3885   1.1       mrg 	subql	IMM (1),d2
   3886   1.1       mrg 	bpl	1b
   3887   1.1       mrg #endif
   3888   1.1       mrg 2:
   3889   1.1       mrg | Now round: we do it as follows: after the shifting we can write the
   3890   1.1       mrg | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
   3891   1.1       mrg | If delta < 1, do nothing. If delta > 1, add 1 to f.
   3892   1.1       mrg | If delta == 1, we make sure the rounded number will be even (odd?)
   3893   1.1       mrg | (after shifting).
   3894   1.1       mrg 	btst	IMM (0),d0	| is delta < 1?
   3895   1.1       mrg 	beq	2f		| if so, do not do anything
   3896   1.1       mrg 	tstl	d1		| is delta == 1?
   3897   1.1       mrg 	bne	1f		| if so round to even
   3898   1.1       mrg 	movel	d0,d1		|
   3899   1.1       mrg 	andl	IMM (2),d1	| bit 1 is the last significant bit
   3900   1.1       mrg 	addl	d1,d0		|
   3901   1.1       mrg 	bra	2f		|
   3902   1.1       mrg 1:	movel	IMM (1),d1	| else add 1
   3903   1.1       mrg 	addl	d1,d0		|
   3904   1.1       mrg | Shift right once (because we used bit #FLT_MANT_DIG!).
   3905   1.1       mrg 2:	lsrl	IMM (1),d0
   3906   1.1       mrg | Now check again bit #FLT_MANT_DIG (rounding could have produced a
   3907   1.1       mrg | 'fraction overflow' ...).
   3908   1.1       mrg 	btst	IMM (FLT_MANT_DIG),d0
   3909   1.1       mrg 	beq	1f
   3910   1.1       mrg 	lsrl	IMM (1),d0
   3911   1.1       mrg #ifndef __mcoldfire__
   3912   1.1       mrg 	addw	IMM (1),d2
   3913   1.1       mrg #else
   3914   1.1       mrg 	addql	IMM (1),d2
   3915   1.1       mrg #endif
   3916   1.1       mrg 1:
   3917   1.1       mrg | If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
   3918   1.1       mrg | have to put the exponent to zero and return a denormalized number.
   3919   1.1       mrg 	btst	IMM (FLT_MANT_DIG-1),d0
   3920   1.1       mrg 	beq	1f
   3921   1.1       mrg 	jmp	a0@
   3922   1.1       mrg 1:	movel	IMM (0),d2
   3923   1.1       mrg 	jmp	a0@
   3924   1.1       mrg 
   3925   1.1       mrg Lround$to$zero:
   3926   1.1       mrg Lround$to$plus:
   3927   1.1       mrg Lround$to$minus:
   3928   1.1       mrg 	jmp	a0@
   3929   1.1       mrg #endif /* L_float */
   3930   1.1       mrg 
   3931   1.1       mrg | gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
   3932   1.1       mrg | __ledf2, __ltdf2 to all return the same value as a direct call to
   3933   1.1       mrg | __cmpdf2 would.  In this implementation, each of these routines
   3934   1.1       mrg | simply calls __cmpdf2.  It would be more efficient to give the
   3935   1.1       mrg | __cmpdf2 routine several names, but separating them out will make it
   3936   1.1       mrg | easier to write efficient versions of these routines someday.
   3937   1.1       mrg | If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
   3938   1.1       mrg | The other routines return 1.
   3939   1.1       mrg 
   3940   1.1       mrg #ifdef  L_eqdf2
   3941   1.1       mrg 	.text
   3942   1.1       mrg 	FUNC(__eqdf2)
   3943   1.1       mrg 	.globl	SYM (__eqdf2)
   3944   1.1       mrg SYM (__eqdf2):
   3945   1.1       mrg 	link	a6,IMM (0)
   3946   1.1       mrg 	pea	1
   3947   1.1       mrg 	movl	a6@(20),sp@-
   3948   1.1       mrg 	movl	a6@(16),sp@-
   3949   1.1       mrg 	movl	a6@(12),sp@-
   3950   1.1       mrg 	movl	a6@(8),sp@-
   3951   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   3952   1.1       mrg 	unlk	a6
   3953   1.1       mrg 	rts
   3954   1.1       mrg #endif /* L_eqdf2 */
   3955   1.1       mrg 
   3956   1.1       mrg #ifdef  L_nedf2
   3957   1.1       mrg 	.text
   3958   1.1       mrg 	FUNC(__nedf2)
   3959   1.1       mrg 	.globl	SYM (__nedf2)
   3960   1.1       mrg SYM (__nedf2):
   3961   1.1       mrg 	link	a6,IMM (0)
   3962   1.1       mrg 	pea	1
   3963   1.1       mrg 	movl	a6@(20),sp@-
   3964   1.1       mrg 	movl	a6@(16),sp@-
   3965   1.1       mrg 	movl	a6@(12),sp@-
   3966   1.1       mrg 	movl	a6@(8),sp@-
   3967   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   3968   1.1       mrg 	unlk	a6
   3969   1.1       mrg 	rts
   3970   1.1       mrg #endif /* L_nedf2 */
   3971   1.1       mrg 
   3972   1.1       mrg #ifdef  L_gtdf2
   3973   1.1       mrg 	.text
   3974   1.1       mrg 	FUNC(__gtdf2)
   3975   1.1       mrg 	.globl	SYM (__gtdf2)
   3976   1.1       mrg SYM (__gtdf2):
   3977   1.1       mrg 	link	a6,IMM (0)
   3978   1.1       mrg 	pea	-1
   3979   1.1       mrg 	movl	a6@(20),sp@-
   3980   1.1       mrg 	movl	a6@(16),sp@-
   3981   1.1       mrg 	movl	a6@(12),sp@-
   3982   1.1       mrg 	movl	a6@(8),sp@-
   3983   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   3984   1.1       mrg 	unlk	a6
   3985   1.1       mrg 	rts
   3986   1.1       mrg #endif /* L_gtdf2 */
   3987   1.1       mrg 
   3988   1.1       mrg #ifdef  L_gedf2
   3989   1.1       mrg 	.text
   3990   1.1       mrg 	FUNC(__gedf2)
   3991   1.1       mrg 	.globl	SYM (__gedf2)
   3992   1.1       mrg SYM (__gedf2):
   3993   1.1       mrg 	link	a6,IMM (0)
   3994   1.1       mrg 	pea	-1
   3995   1.1       mrg 	movl	a6@(20),sp@-
   3996   1.1       mrg 	movl	a6@(16),sp@-
   3997   1.1       mrg 	movl	a6@(12),sp@-
   3998   1.1       mrg 	movl	a6@(8),sp@-
   3999   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   4000   1.1       mrg 	unlk	a6
   4001   1.1       mrg 	rts
   4002   1.1       mrg #endif /* L_gedf2 */
   4003   1.1       mrg 
   4004   1.1       mrg #ifdef  L_ltdf2
   4005   1.1       mrg 	.text
   4006   1.1       mrg 	FUNC(__ltdf2)
   4007   1.1       mrg 	.globl	SYM (__ltdf2)
   4008   1.1       mrg SYM (__ltdf2):
   4009   1.1       mrg 	link	a6,IMM (0)
   4010   1.1       mrg 	pea	1
   4011   1.1       mrg 	movl	a6@(20),sp@-
   4012   1.1       mrg 	movl	a6@(16),sp@-
   4013   1.1       mrg 	movl	a6@(12),sp@-
   4014   1.1       mrg 	movl	a6@(8),sp@-
   4015   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   4016   1.1       mrg 	unlk	a6
   4017   1.1       mrg 	rts
   4018   1.1       mrg #endif /* L_ltdf2 */
   4019   1.1       mrg 
   4020   1.1       mrg #ifdef  L_ledf2
   4021   1.1       mrg 	.text
   4022   1.1       mrg 	FUNC(__ledf2)
   4023   1.1       mrg 	.globl	SYM (__ledf2)
   4024   1.1       mrg SYM (__ledf2):
   4025   1.1       mrg 	link	a6,IMM (0)
   4026   1.1       mrg 	pea	1
   4027   1.1       mrg 	movl	a6@(20),sp@-
   4028   1.1       mrg 	movl	a6@(16),sp@-
   4029   1.1       mrg 	movl	a6@(12),sp@-
   4030   1.1       mrg 	movl	a6@(8),sp@-
   4031   1.1       mrg 	PICCALL	SYM (__cmpdf2_internal)
   4032   1.1       mrg 	unlk	a6
   4033   1.1       mrg 	rts
   4034   1.1       mrg #endif /* L_ledf2 */
   4035   1.1       mrg 
   4036   1.1       mrg | The comments above about __eqdf2, et. al., also apply to __eqsf2,
   4037   1.1       mrg | et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
   4038   1.1       mrg 
   4039   1.1       mrg #ifdef  L_eqsf2
   4040   1.1       mrg 	.text
   4041   1.1       mrg 	FUNC(__eqsf2)
   4042   1.1       mrg 	.globl	SYM (__eqsf2)
   4043   1.1       mrg SYM (__eqsf2):
   4044   1.1       mrg 	link	a6,IMM (0)
   4045   1.1       mrg 	pea	1
   4046   1.1       mrg 	movl	a6@(12),sp@-
   4047   1.1       mrg 	movl	a6@(8),sp@-
   4048   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4049   1.1       mrg 	unlk	a6
   4050   1.1       mrg 	rts
   4051   1.1       mrg #endif /* L_eqsf2 */
   4052   1.1       mrg 
   4053   1.1       mrg #ifdef  L_nesf2
   4054   1.1       mrg 	.text
   4055   1.1       mrg 	FUNC(__nesf2)
   4056   1.1       mrg 	.globl	SYM (__nesf2)
   4057   1.1       mrg SYM (__nesf2):
   4058   1.1       mrg 	link	a6,IMM (0)
   4059   1.1       mrg 	pea	1
   4060   1.1       mrg 	movl	a6@(12),sp@-
   4061   1.1       mrg 	movl	a6@(8),sp@-
   4062   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4063   1.1       mrg 	unlk	a6
   4064   1.1       mrg 	rts
   4065   1.1       mrg #endif /* L_nesf2 */
   4066   1.1       mrg 
   4067   1.1       mrg #ifdef  L_gtsf2
   4068   1.1       mrg 	.text
   4069   1.1       mrg 	FUNC(__gtsf2)
   4070   1.1       mrg 	.globl	SYM (__gtsf2)
   4071   1.1       mrg SYM (__gtsf2):
   4072   1.1       mrg 	link	a6,IMM (0)
   4073   1.1       mrg 	pea	-1
   4074   1.1       mrg 	movl	a6@(12),sp@-
   4075   1.1       mrg 	movl	a6@(8),sp@-
   4076   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4077   1.1       mrg 	unlk	a6
   4078   1.1       mrg 	rts
   4079   1.1       mrg #endif /* L_gtsf2 */
   4080   1.1       mrg 
   4081   1.1       mrg #ifdef  L_gesf2
   4082   1.1       mrg 	.text
   4083   1.1       mrg 	FUNC(__gesf2)
   4084   1.1       mrg 	.globl	SYM (__gesf2)
   4085   1.1       mrg SYM (__gesf2):
   4086   1.1       mrg 	link	a6,IMM (0)
   4087   1.1       mrg 	pea	-1
   4088   1.1       mrg 	movl	a6@(12),sp@-
   4089   1.1       mrg 	movl	a6@(8),sp@-
   4090   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4091   1.1       mrg 	unlk	a6
   4092   1.1       mrg 	rts
   4093   1.1       mrg #endif /* L_gesf2 */
   4094   1.1       mrg 
   4095   1.1       mrg #ifdef  L_ltsf2
   4096   1.1       mrg 	.text
   4097   1.1       mrg 	FUNC(__ltsf2)
   4098   1.1       mrg 	.globl	SYM (__ltsf2)
   4099   1.1       mrg SYM (__ltsf2):
   4100   1.1       mrg 	link	a6,IMM (0)
   4101   1.1       mrg 	pea	1
   4102   1.1       mrg 	movl	a6@(12),sp@-
   4103   1.1       mrg 	movl	a6@(8),sp@-
   4104   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4105   1.1       mrg 	unlk	a6
   4106   1.1       mrg 	rts
   4107   1.1       mrg #endif /* L_ltsf2 */
   4108   1.1       mrg 
   4109   1.1       mrg #ifdef  L_lesf2
   4110   1.1       mrg 	.text
   4111   1.1       mrg 	FUNC(__lesf2)
   4112   1.1       mrg 	.globl	SYM (__lesf2)
   4113   1.1       mrg SYM (__lesf2):
   4114   1.1       mrg 	link	a6,IMM (0)
   4115   1.1       mrg 	pea	1
   4116   1.1       mrg 	movl	a6@(12),sp@-
   4117   1.1       mrg 	movl	a6@(8),sp@-
   4118   1.1       mrg 	PICCALL	SYM (__cmpsf2_internal)
   4119   1.1       mrg 	unlk	a6
   4120   1.1       mrg 	rts
   4121   1.1       mrg #endif /* L_lesf2 */
   4122   1.1       mrg 
   4123   1.1       mrg #if defined (__ELF__) && defined (__linux__)
   4124   1.1       mrg 	/* Make stack non-executable for ELF linux targets.  */
   4125   1.1       mrg 	.section	.note.GNU-stack,"",@progbits
   4126   1.1       mrg #endif
   4127