Home | History | Annotate | Line # | Download | only in m68k
lb1sf68.S revision 1.10
      1 /* libgcc routines for 68000 w/o floating-point hardware.
      2    Copyright (C) 1994-2022 Free Software Foundation, Inc.
      3 
      4 This file is part of GCC.
      5 
      6 GCC is free software; you can redistribute it and/or modify it
      7 under the terms of the GNU General Public License as published by the
      8 Free Software Foundation; either version 3, or (at your option) any
      9 later version.
     10 
     11 This file is distributed in the hope that it will be useful, but
     12 WITHOUT ANY WARRANTY; without even the implied warranty of
     13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14 General Public License for more details.
     15 
     16 Under Section 7 of GPL version 3, you are granted additional
     17 permissions described in the GCC Runtime Library Exception, version
     18 3.1, as published by the Free Software Foundation.
     19 
     20 You should have received a copy of the GNU General Public License and
     21 a copy of the GCC Runtime Library Exception along with this program;
     22 see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23 <http://www.gnu.org/licenses/>.  */
     24 
     25 /* Use this one for any 680x0; assumes no floating point hardware.
     26    The trailing " '" appearing on some lines is for ANSI preprocessors.  Yuk.
     27    Some of this code comes from MINIX, via the folks at ericsson.
     28    D. V. Henkel-Wallace (gumby (at) cygnus.com) Fete Bastille, 1992
     29 */
     30 
     31 /* These are predefined by new versions of GNU cpp.  */
     32 
     33 #ifndef __USER_LABEL_PREFIX__
     34 #define __USER_LABEL_PREFIX__ _
     35 #endif
     36 
     37 #ifndef __REGISTER_PREFIX__
     38 #define __REGISTER_PREFIX__
     39 #endif
     40 
     41 #ifndef __IMMEDIATE_PREFIX__
     42 #define __IMMEDIATE_PREFIX__ #
     43 #endif
     44 
     45 /* ANSI concatenation macros.  */
     46 
     47 #define CONCAT1(a, b) CONCAT2(a, b)
     48 #define CONCAT2(a, b) a ## b
     49 
     50 /* Use the right prefix for global labels.  */
     51 
     52 #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
     53 
     54 /* Note that X is a function.  */
     55 
     56 #ifdef __ELF__
     57 #define FUNC(x) .type SYM(x),function
     58 #else
     59 /* The .proc pseudo-op is accepted, but ignored, by GAS.  We could just
     60    define this to the empty string for non-ELF systems, but defining it
     61    to .proc means that the information is available to the assembler if
     62    the need arises.  */
     63 #define FUNC(x) .proc
     64 #endif
     65 
     66 /* Use the right prefix for registers.  */
     67 
     68 #define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
     69 
     70 /* Use the right prefix for immediate values.  */
     71 
     72 #define IMM(x) CONCAT1 (__IMMEDIATE_PREFIX__, x)
     73 
     74 #define d0 REG (d0)
     75 #define d1 REG (d1)
     76 #define d2 REG (d2)
     77 #define d3 REG (d3)
     78 #define d4 REG (d4)
     79 #define d5 REG (d5)
     80 #define d6 REG (d6)
     81 #define d7 REG (d7)
     82 #define a0 REG (a0)
     83 #define a1 REG (a1)
     84 #define a2 REG (a2)
     85 #define a3 REG (a3)
     86 #define a4 REG (a4)
     87 #define a5 REG (a5)
     88 #define a6 REG (a6)
     89 #define fp REG (fp)
     90 #define sp REG (sp)
     91 #define pc REG (pc)
     92 
     93 /* Provide a few macros to allow for PIC code support.
     94  * With PIC, data is stored A5 relative so we've got to take a bit of special
     95  * care to ensure that all loads of global data is via A5.  PIC also requires
     96  * jumps and subroutine calls to be PC relative rather than absolute.  We cheat
     97  * a little on this and in the PIC case, we use short offset branches and
     98  * hope that the final object code is within range (which it should be).
     99  */
    100 #ifndef __PIC__
    101 
    102 	/* Non PIC (absolute/relocatable) versions */
    103 
    104 	.macro PICCALL addr
    105 	jbsr	\addr
    106 	.endm
    107 
    108 	.macro PICJUMP addr
    109 	jmp	\addr
    110 	.endm
    111 
    112 	.macro PICLEA sym, reg
    113 	lea	\sym, \reg
    114 	.endm
    115 
    116 	.macro PICPEA sym, areg
    117 	pea	\sym
    118 	.endm
    119 
    120 #else /* __PIC__ */
    121 
    122 # if defined (__uClinux__)
    123 
    124 	/* Versions for uClinux */
    125 
    126 #  if defined(__ID_SHARED_LIBRARY__)
    127 
    128 	/* -mid-shared-library versions  */
    129 
    130 	.macro PICLEA sym, reg
    131 	movel	a5@(_current_shared_library_a5_offset_), \reg
    132 	movel	\sym@GOT(\reg), \reg
    133 	.endm
    134 
    135 	.macro PICPEA sym, areg
    136 	movel	a5@(_current_shared_library_a5_offset_), \areg
    137 	movel	\sym@GOT(\areg), sp@-
    138 	.endm
    139 
    140 	.macro PICCALL addr
    141 	PICLEA	\addr,a0
    142 	jsr	a0@
    143 	.endm
    144 
    145 	.macro PICJUMP addr
    146 	PICLEA	\addr,a0
    147 	jmp	a0@
    148 	.endm
    149 
    150 #  else /* !__ID_SHARED_LIBRARY__ */
    151 
    152 	/* Versions for -msep-data */
    153 
    154 	.macro PICLEA sym, reg
    155 	movel	\sym@GOT(a5), \reg
    156 	.endm
    157 
    158 	.macro PICPEA sym, areg
    159 	movel	\sym@GOT(a5), sp@-
    160 	.endm
    161 
    162 	.macro PICCALL addr
    163 #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
    164 	lea	\addr-.-8,a0
    165 	jsr	pc@(a0)
    166 #else
    167 	jbsr	\addr
    168 #endif
    169 	.endm
    170 
    171 	.macro PICJUMP addr
    172 	/* ISA C has no bra.l instruction, and since this assembly file
    173 	   gets assembled into multiple object files, we avoid the
    174 	   bra instruction entirely.  */
    175 #if defined (__mcoldfire__) && !defined (__mcfisab__)
    176 	lea	\addr-.-8,a0
    177 	jmp	pc@(a0)
    178 #else
    179 	bra	\addr
    180 #endif
    181 	.endm
    182 
    183 #  endif
    184 
    185 # else /* !__uClinux__ */
    186 
    187 	/* Versions for Linux */
    188 
    189 	.macro PICLEA sym, reg
    190 	movel	#_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
    191 	lea	(-6, pc, \reg), \reg
    192 	movel	\sym@GOT(\reg), \reg
    193 	.endm
    194 
    195 	.macro PICPEA sym, areg
    196 	movel	#_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
    197 	lea	(-6, pc, \areg), \areg
    198 	movel	\sym@GOT(\areg), sp@-
    199 	.endm
    200 
    201 	.macro PICCALL addr
    202 #if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
    203 	lea	\addr-.-8,a0
    204 	jsr	pc@(a0)
    205 #else
    206 	jbsr	\addr@PLTPC
    207 #endif
    208 	.endm
    209 
    210 	.macro PICJUMP addr
    211 	/* ISA C has no bra.l instruction, and since this assembly file
    212 	   gets assembled into multiple object files, we avoid the
    213 	   bra instruction entirely.  */
    214 #if defined (__mcoldfire__) && !defined (__mcfisab__)
    215 	lea	\addr-.-8,a0
    216 	jmp	pc@(a0)
    217 #else
    218 	bra	\addr@PLTPC
    219 #endif
    220 	.endm
    221 
    222 # endif
    223 #endif /* __PIC__ */
    224 
    225 
    226 #ifdef L_floatex
    227 
    228 | This is an attempt at a decent floating point (single, double and
    229 | extended double) code for the GNU C compiler. It should be easy to
    230 | adapt to other compilers (but beware of the local labels!).
    231 
    232 | Starting date: 21 October, 1990
    233 
    234 | It is convenient to introduce the notation (s,e,f) for a floating point
    235 | number, where s=sign, e=exponent, f=fraction. We will call a floating
    236 | point number fpn to abbreviate, independently of the precision.
    237 | Let MAX_EXP be in each case the maximum exponent (255 for floats, 1023
    238 | for doubles and 16383 for long doubles). We then have the following
    239 | different cases:
    240 |  1. Normalized fpns have 0 < e < MAX_EXP. They correspond to
    241 |     (-1)^s x 1.f x 2^(e-bias-1).
    242 |  2. Denormalized fpns have e=0. They correspond to numbers of the form
    243 |     (-1)^s x 0.f x 2^(-bias).
    244 |  3. +/-INFINITY have e=MAX_EXP, f=0.
    245 |  4. Quiet NaN (Not a Number) have all bits set.
    246 |  5. Signaling NaN (Not a Number) have s=0, e=MAX_EXP, f=1.
    247 
    248 |=============================================================================
    249 |                                  exceptions
    250 |=============================================================================
    251 
    252 | This is the floating point condition code register (_fpCCR):
    253 |
    254 | struct {
    255 |   short _exception_bits;
    256 |   short _trap_enable_bits;
    257 |   short _sticky_bits;
    258 |   short _rounding_mode;
    259 |   short _format;
    260 |   short _last_operation;
    261 |   union {
    262 |     float sf;
    263 |     double df;
    264 |   } _operand1;
    265 |   union {
    266 |     float sf;
    267 |     double df;
    268 |   } _operand2;
    269 | } _fpCCR;
    270 
    271 	.data
    272 	.even
    273 
    274 	.globl	SYM (_fpCCR)
    275 
    276 SYM (_fpCCR):
    277 __exception_bits:
    278 	.word	0
    279 __trap_enable_bits:
    280 	.word	0
    281 __sticky_bits:
    282 	.word	0
    283 __rounding_mode:
    284 	.word	ROUND_TO_NEAREST
    285 __format:
    286 	.word	NIL
    287 __last_operation:
    288 	.word	NOOP
    289 __operand1:
    290 	.long	0
    291 	.long	0
    292 __operand2:
    293 	.long 	0
    294 	.long	0
    295 
    296 | Offsets:
    297 EBITS  = __exception_bits - SYM (_fpCCR)
    298 TRAPE  = __trap_enable_bits - SYM (_fpCCR)
    299 STICK  = __sticky_bits - SYM (_fpCCR)
    300 ROUND  = __rounding_mode - SYM (_fpCCR)
    301 FORMT  = __format - SYM (_fpCCR)
    302 LASTO  = __last_operation - SYM (_fpCCR)
    303 OPER1  = __operand1 - SYM (_fpCCR)
    304 OPER2  = __operand2 - SYM (_fpCCR)
    305 
    306 | The following exception types are supported:
    307 INEXACT_RESULT 		= 0x0001
    308 UNDERFLOW 		= 0x0002
    309 OVERFLOW 		= 0x0004
    310 DIVIDE_BY_ZERO 		= 0x0008
    311 INVALID_OPERATION 	= 0x0010
    312 
    313 | The allowed rounding modes are:
    314 UNKNOWN           = -1
    315 ROUND_TO_NEAREST  = 0 | round result to nearest representable value
    316 ROUND_TO_ZERO     = 1 | round result towards zero
    317 ROUND_TO_PLUS     = 2 | round result towards plus infinity
    318 ROUND_TO_MINUS    = 3 | round result towards minus infinity
    319 
    320 | The allowed values of format are:
    321 NIL          = 0
    322 SINGLE_FLOAT = 1
    323 DOUBLE_FLOAT = 2
    324 LONG_FLOAT   = 3
    325 
    326 | The allowed values for the last operation are:
    327 NOOP         = 0
    328 ADD          = 1
    329 MULTIPLY     = 2
    330 DIVIDE       = 3
    331 NEGATE       = 4
    332 COMPARE      = 5
    333 EXTENDSFDF   = 6
    334 TRUNCDFSF    = 7
    335 
    336 |=============================================================================
    337 |                           __clear_sticky_bits
    338 |=============================================================================
    339 
    340 | The sticky bits are normally not cleared (thus the name), whereas the
    341 | exception type and exception value reflect the last computation.
    342 | This routine is provided to clear them (you can also write to _fpCCR,
    343 | since it is globally visible).
    344 
    345 	.globl  SYM (__clear_sticky_bit)
    346 
    347 	.text
    348 	.even
    349 
    350 | void __clear_sticky_bits(void);
    351 SYM (__clear_sticky_bit):
    352 	PICLEA	SYM (_fpCCR),a0
    353 #ifndef __mcoldfire__
    354 	movew	IMM (0),a0@(STICK)
    355 #else
    356 	clr.w	a0@(STICK)
    357 #endif
    358 	rts
    359 
    360 |=============================================================================
    361 |                           $_exception_handler
    362 |=============================================================================
    363 
    364 	.globl  $_exception_handler
    365 
    366 	.text
    367 	.even
    368 
    369 | This is the common exit point if an exception occurs.
    370 | NOTE: it is NOT callable from C!
    371 | It expects the exception type in d7, the format (SINGLE_FLOAT,
    372 | DOUBLE_FLOAT or LONG_FLOAT) in d6, and the last operation code in d5.
    373 | It sets the corresponding exception and sticky bits, and the format.
    374 | Depending on the format if fills the corresponding slots for the
    375 | operands which produced the exception (all this information is provided
    376 | so if you write your own exception handlers you have enough information
    377 | to deal with the problem).
    378 | Then checks to see if the corresponding exception is trap-enabled,
    379 | in which case it pushes the address of _fpCCR and traps through
    380 | trap FPTRAP (15 for the moment).
    381 
    382 FPTRAP = 15
    383 
    384 $_exception_handler:
    385 	PICLEA	SYM (_fpCCR),a0
    386 	movew	d7,a0@(EBITS)	| set __exception_bits
    387 #ifndef __mcoldfire__
    388 	orw	d7,a0@(STICK)	| and __sticky_bits
    389 #else
    390 	movew	a0@(STICK),d4
    391 	orl	d7,d4
    392 	movew	d4,a0@(STICK)
    393 #endif
    394 	movew	d6,a0@(FORMT)	| and __format
    395 	movew	d5,a0@(LASTO)	| and __last_operation
    396 
    397 | Now put the operands in place:
    398 #ifndef __mcoldfire__
    399 	cmpw	IMM (SINGLE_FLOAT),d6
    400 #else
    401 	cmpl	IMM (SINGLE_FLOAT),d6
    402 #endif
    403 	beq	1f
    404 	movel	a6@(8),a0@(OPER1)
    405 	movel	a6@(12),a0@(OPER1+4)
    406 	movel	a6@(16),a0@(OPER2)
    407 	movel	a6@(20),a0@(OPER2+4)
    408 	bra	2f
    409 1:	movel	a6@(8),a0@(OPER1)
    410 	movel	a6@(12),a0@(OPER2)
    411 2:
    412 | And check whether the exception is trap-enabled:
    413 #ifndef __mcoldfire__
    414 	andw	a0@(TRAPE),d7	| is exception trap-enabled?
    415 #else
    416 	clrl	d6
    417 	movew	a0@(TRAPE),d6
    418 	andl	d6,d7
    419 #endif
    420 	beq	1f		| no, exit
    421 	PICPEA	SYM (_fpCCR),a1	| yes, push address of _fpCCR
    422 	trap	IMM (FPTRAP)	| and trap
    423 #ifndef __mcoldfire__
    424 1:	moveml	sp@+,d2-d7	| restore data registers
    425 #else
    426 1:	moveml	sp@,d2-d7
    427 	| XXX if frame pointer is ever removed, stack pointer must
    428 	| be adjusted here.
    429 #endif
    430 	unlk	a6		| and return
    431 	rts
    432 #endif /* L_floatex */
    433 
    434 #ifdef  L_mulsi3
    435 	.text
    436 	FUNC(__mulsi3)
    437 	.globl	SYM (__mulsi3)
    438 	.globl	SYM (__mulsi3_internal)
    439 	.hidden	SYM (__mulsi3_internal)
    440 SYM (__mulsi3):
    441 SYM (__mulsi3_internal):
    442 	movew	sp@(4), d0	/* x0 -> d0 */
    443 	muluw	sp@(10), d0	/* x0*y1 */
    444 	movew	sp@(6), d1	/* x1 -> d1 */
    445 	muluw	sp@(8), d1	/* x1*y0 */
    446 #ifndef __mcoldfire__
    447 	addw	d1, d0
    448 #else
    449 	addl	d1, d0
    450 #endif
    451 	swap	d0
    452 	clrw	d0
    453 	movew	sp@(6), d1	/* x1 -> d1 */
    454 	muluw	sp@(10), d1	/* x1*y1 */
    455 	addl	d1, d0
    456 
    457 	rts
    458 #endif /* L_mulsi3 */
    459 
    460 #ifdef  L_udivsi3
    461 	.text
    462 	FUNC(__udivsi3)
    463 	.globl	SYM (__udivsi3)
    464 	.globl	SYM (__udivsi3_internal)
    465 	.hidden	SYM (__udivsi3_internal)
    466 SYM (__udivsi3):
    467 SYM (__udivsi3_internal):
    468 #ifndef __mcoldfire__
    469 	movel	d2, sp@-
    470 	movel	sp@(12), d1	/* d1 = divisor */
    471 	movel	sp@(8), d0	/* d0 = dividend */
    472 
    473 	cmpl	IMM (0x10000), d1 /* divisor >= 2 ^ 16 ?   */
    474 	jcc	L3		/* then try next algorithm */
    475 	movel	d0, d2
    476 	clrw	d2
    477 	swap	d2
    478 	divu	d1, d2          /* high quotient in lower word */
    479 	movew	d2, d0		/* save high quotient */
    480 	swap	d0
    481 	movew	sp@(10), d2	/* get low dividend + high rest */
    482 	divu	d1, d2		/* low quotient */
    483 	movew	d2, d0
    484 	jra	L6
    485 
    486 L3:	movel	d1, d2		/* use d2 as divisor backup */
    487 L4:	lsrl	IMM (1), d1	/* shift divisor */
    488 	lsrl	IMM (1), d0	/* shift dividend */
    489 	cmpl	IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ?  */
    490 	jcc	L4
    491 	divu	d1, d0		/* now we have 16-bit divisor */
    492 	andl	IMM (0xffff), d0 /* mask out divisor, ignore remainder */
    493 
    494 /* Multiply the 16-bit tentative quotient with the 32-bit divisor.  Because of
    495    the operand ranges, this might give a 33-bit product.  If this product is
    496    greater than the dividend, the tentative quotient was too large. */
    497 	movel	d2, d1
    498 	mulu	d0, d1		/* low part, 32 bits */
    499 	swap	d2
    500 	mulu	d0, d2		/* high part, at most 17 bits */
    501 	swap	d2		/* align high part with low part */
    502 	tstw	d2		/* high part 17 bits? */
    503 	jne	L5		/* if 17 bits, quotient was too large */
    504 	addl	d2, d1		/* add parts */
    505 	jcs	L5		/* if sum is 33 bits, quotient was too large */
    506 	cmpl	sp@(8), d1	/* compare the sum with the dividend */
    507 	jls	L6		/* if sum > dividend, quotient was too large */
    508 L5:	subql	IMM (1), d0	/* adjust quotient */
    509 
    510 L6:	movel	sp@+, d2
    511 	rts
    512 
    513 #else /* __mcoldfire__ */
    514 
    515 /* ColdFire implementation of non-restoring division algorithm from
    516    Hennessy & Patterson, Appendix A. */
    517 	link	a6,IMM (-12)
    518 	moveml	d2-d4,sp@
    519 	movel	a6@(8),d0
    520 	movel	a6@(12),d1
    521 	clrl	d2		| clear p
    522 	moveq	IMM (31),d4
    523 L1:	addl	d0,d0		| shift reg pair (p,a) one bit left
    524 	addxl	d2,d2
    525 	movl	d2,d3		| subtract b from p, store in tmp.
    526 	subl	d1,d3
    527 	jcs	L2		| if no carry,
    528 	bset	IMM (0),d0	| set the low order bit of a to 1,
    529 	movl	d3,d2		| and store tmp in p.
    530 L2:	subql	IMM (1),d4
    531 	jcc	L1
    532 	moveml	sp@,d2-d4	| restore data registers
    533 	unlk	a6		| and return
    534 	rts
    535 #endif /* __mcoldfire__ */
    536 
    537 #endif /* L_udivsi3 */
    538 
    539 #ifdef  L_divsi3
    540 	.text
    541 	FUNC(__divsi3)
    542 	.globl	SYM (__divsi3)
    543 	.globl	SYM (__divsi3_internal)
    544 	.hidden	SYM (__divsi3_internal)
    545 SYM (__divsi3):
    546 SYM (__divsi3_internal):
    547 	movel	d2, sp@-
    548 
    549 	moveq	IMM (1), d2	/* sign of result stored in d2 (=1 or =-1) */
    550 	movel	sp@(12), d1	/* d1 = divisor */
    551 	jpl	L1
    552 	negl	d1
    553 #ifndef __mcoldfire__
    554 	negb	d2		/* change sign because divisor <0  */
    555 #else
    556 	negl	d2		/* change sign because divisor <0  */
    557 #endif
    558 L1:	movel	sp@(8), d0	/* d0 = dividend */
    559 	jpl	L2
    560 	negl	d0
    561 #ifndef __mcoldfire__
    562 	negb	d2
    563 #else
    564 	negl	d2
    565 #endif
    566 
    567 L2:	movel	d1, sp@-
    568 	movel	d0, sp@-
    569 	PICCALL	SYM (__udivsi3_internal)	/* divide abs(dividend) by abs(divisor) */
    570 	addql	IMM (8), sp
    571 
    572 	tstb	d2
    573 	jpl	L3
    574 	negl	d0
    575 
    576 L3:	movel	sp@+, d2
    577 	rts
    578 #endif /* L_divsi3 */
    579 
    580 #ifdef  L_umodsi3
    581 	.text
    582 	FUNC(__umodsi3)
    583 	.globl	SYM (__umodsi3)
    584 SYM (__umodsi3):
    585 	movel	sp@(8), d1	/* d1 = divisor */
    586 	movel	sp@(4), d0	/* d0 = dividend */
    587 	movel	d1, sp@-
    588 	movel	d0, sp@-
    589 	PICCALL	SYM (__udivsi3_internal)
    590 	addql	IMM (8), sp
    591 	movel	sp@(8), d1	/* d1 = divisor */
    592 #ifndef __mcoldfire__
    593 	movel	d1, sp@-
    594 	movel	d0, sp@-
    595 	PICCALL	SYM (__mulsi3_internal)	/* d0 = (a/b)*b */
    596 	addql	IMM (8), sp
    597 #else
    598 	mulsl	d1,d0
    599 #endif
    600 	movel	sp@(4), d1	/* d1 = dividend */
    601 	subl	d0, d1		/* d1 = a - (a/b)*b */
    602 	movel	d1, d0
    603 	rts
    604 #endif /* L_umodsi3 */
    605 
    606 #ifdef  L_modsi3
    607 	.text
    608 	FUNC(__modsi3)
    609 	.globl	SYM (__modsi3)
    610 SYM (__modsi3):
    611 	movel	sp@(8), d1	/* d1 = divisor */
    612 	movel	sp@(4), d0	/* d0 = dividend */
    613 	movel	d1, sp@-
    614 	movel	d0, sp@-
    615 	PICCALL	SYM (__divsi3_internal)
    616 	addql	IMM (8), sp
    617 	movel	sp@(8), d1	/* d1 = divisor */
    618 #ifndef __mcoldfire__
    619 	movel	d1, sp@-
    620 	movel	d0, sp@-
    621 	PICCALL	SYM (__mulsi3_internal)	/* d0 = (a/b)*b */
    622 	addql	IMM (8), sp
    623 #else
    624 	mulsl	d1,d0
    625 #endif
    626 	movel	sp@(4), d1	/* d1 = dividend */
    627 	subl	d0, d1		/* d1 = a - (a/b)*b */
    628 	movel	d1, d0
    629 	rts
    630 #endif /* L_modsi3 */
    631 
    632 
    633 #ifdef  L_double
    634 
    635 	.globl	SYM (_fpCCR)
    636 	.globl  $_exception_handler
    637 
    638 QUIET_NaN      = 0xffffffff
    639 
    640 D_MAX_EXP      = 0x07ff
    641 D_BIAS         = 1022
    642 DBL_MAX_EXP    = D_MAX_EXP - D_BIAS
    643 DBL_MIN_EXP    = 1 - D_BIAS
    644 DBL_MANT_DIG   = 53
    645 
    646 INEXACT_RESULT 		= 0x0001
    647 UNDERFLOW 		= 0x0002
    648 OVERFLOW 		= 0x0004
    649 DIVIDE_BY_ZERO 		= 0x0008
    650 INVALID_OPERATION 	= 0x0010
    651 
    652 DOUBLE_FLOAT = 2
    653 
    654 NOOP         = 0
    655 ADD          = 1
    656 MULTIPLY     = 2
    657 DIVIDE       = 3
    658 NEGATE       = 4
    659 COMPARE      = 5
    660 EXTENDSFDF   = 6
    661 TRUNCDFSF    = 7
    662 
    663 UNKNOWN           = -1
    664 ROUND_TO_NEAREST  = 0 | round result to nearest representable value
    665 ROUND_TO_ZERO     = 1 | round result towards zero
    666 ROUND_TO_PLUS     = 2 | round result towards plus infinity
    667 ROUND_TO_MINUS    = 3 | round result towards minus infinity
    668 
    669 | Entry points:
    670 
    671 	.globl SYM (__adddf3)
    672 	.globl SYM (__subdf3)
    673 	.globl SYM (__muldf3)
    674 	.globl SYM (__divdf3)
    675 	.globl SYM (__negdf2)
    676 	.globl SYM (__cmpdf2)
    677 	.globl SYM (__cmpdf2_internal)
    678 	.hidden SYM (__cmpdf2_internal)
    679 
    680 	.text
    681 	.even
    682 
    683 | These are common routines to return and signal exceptions.
    684 
    685 Ld$den:
    686 | Return and signal a denormalized number
    687 	orl	d7,d0
    688 	movew	IMM (INEXACT_RESULT+UNDERFLOW),d7
    689 	moveq	IMM (DOUBLE_FLOAT),d6
    690 	PICJUMP	$_exception_handler
    691 
    692 Ld$infty:
    693 Ld$overflow:
    694 | Return a properly signed INFINITY and set the exception flags
    695 	movel	IMM (0x7ff00000),d0
    696 	movel	IMM (0),d1
    697 	orl	d7,d0
    698 	movew	IMM (INEXACT_RESULT+OVERFLOW),d7
    699 	moveq	IMM (DOUBLE_FLOAT),d6
    700 	PICJUMP	$_exception_handler
    701 
    702 Ld$underflow:
    703 | Return 0 and set the exception flags
    704 	movel	IMM (0),d0
    705 	movel	d0,d1
    706 	movew	IMM (INEXACT_RESULT+UNDERFLOW),d7
    707 	moveq	IMM (DOUBLE_FLOAT),d6
    708 	PICJUMP	$_exception_handler
    709 
    710 Ld$inop:
    711 | Return a quiet NaN and set the exception flags
    712 	movel	IMM (QUIET_NaN),d0
    713 	movel	d0,d1
    714 	movew	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
    715 	moveq	IMM (DOUBLE_FLOAT),d6
    716 	PICJUMP	$_exception_handler
    717 
    718 Ld$div$0:
    719 | Return a properly signed INFINITY and set the exception flags
    720 	movel	IMM (0x7ff00000),d0
    721 	movel	IMM (0),d1
    722 	orl	d7,d0
    723 	movew	IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
    724 	moveq	IMM (DOUBLE_FLOAT),d6
    725 	PICJUMP	$_exception_handler
    726 
    727 |=============================================================================
    728 |=============================================================================
    729 |                         double precision routines
    730 |=============================================================================
    731 |=============================================================================
    732 
    733 | A double precision floating point number (double) has the format:
    734 |
    735 | struct _double {
    736 |  unsigned int sign      : 1;  /* sign bit */
    737 |  unsigned int exponent  : 11; /* exponent, shifted by 126 */
    738 |  unsigned int fraction  : 52; /* fraction */
    739 | } double;
    740 |
    741 | Thus sizeof(double) = 8 (64 bits).
    742 |
    743 | All the routines are callable from C programs, and return the result
    744 | in the register pair d0-d1. They also preserve all registers except
    745 | d0-d1 and a0-a1.
    746 
    747 |=============================================================================
    748 |                              __subdf3
    749 |=============================================================================
    750 
    751 | double __subdf3(double, double);
    752 	FUNC(__subdf3)
    753 SYM (__subdf3):
    754 	bchg	IMM (31),sp@(12) | change sign of second operand
    755 				| and fall through, so we always add
    756 |=============================================================================
    757 |                              __adddf3
    758 |=============================================================================
    759 
    760 | double __adddf3(double, double);
    761 	FUNC(__adddf3)
    762 SYM (__adddf3):
    763 #ifndef __mcoldfire__
    764 	link	a6,IMM (0)	| everything will be done in registers
    765 	moveml	d2-d7,sp@-	| save all data registers and a2 (but d0-d1)
    766 #else
    767 	link	a6,IMM (-24)
    768 	moveml	d2-d7,sp@
    769 #endif
    770 	movel	a6@(8),d0	| get first operand
    771 	movel	a6@(12),d1	|
    772 	movel	a6@(16),d2	| get second operand
    773 	movel	a6@(20),d3	|
    774 
    775 	movel	d0,d7		| get d0's sign bit in d7 '
    776 	addl	d1,d1		| check and clear sign bit of a, and gain one
    777 	addxl	d0,d0		| bit of extra precision
    778 	beq	Ladddf$b	| if zero return second operand
    779 
    780 	movel	d2,d6		| save sign in d6
    781 	addl	d3,d3		| get rid of sign bit and gain one bit of
    782 	addxl	d2,d2		| extra precision
    783 	beq	Ladddf$a	| if zero return first operand
    784 
    785 	andl	IMM (0x80000000),d7 | isolate a's sign bit '
    786         swap	d6		| and also b's sign bit '
    787 #ifndef __mcoldfire__
    788 	andw	IMM (0x8000),d6	|
    789 	orw	d6,d7		| and combine them into d7, so that a's sign '
    790 				| bit is in the high word and b's is in the '
    791 				| low word, so d6 is free to be used
    792 #else
    793 	andl	IMM (0x8000),d6
    794 	orl	d6,d7
    795 #endif
    796 	movel	d7,a0		| now save d7 into a0, so d7 is free to
    797                 		| be used also
    798 
    799 | Get the exponents and check for denormalized and/or infinity.
    800 
    801 	movel	IMM (0x001fffff),d6 | mask for the fraction
    802 	movel	IMM (0x00200000),d7 | mask to put hidden bit back
    803 
    804 	movel	d0,d4		|
    805 	andl	d6,d0		| get fraction in d0
    806 	notl	d6		| make d6 into mask for the exponent
    807 	andl	d6,d4		| get exponent in d4
    808 	beq	Ladddf$a$den	| branch if a is denormalized
    809 	cmpl	d6,d4		| check for INFINITY or NaN
    810 	beq	Ladddf$nf       |
    811 	orl	d7,d0		| and put hidden bit back
    812 Ladddf$1:
    813 	swap	d4		| shift right exponent so that it starts
    814 #ifndef __mcoldfire__
    815 	lsrw	IMM (5),d4	| in bit 0 and not bit 20
    816 #else
    817 	lsrl	IMM (5),d4	| in bit 0 and not bit 20
    818 #endif
    819 | Now we have a's exponent in d4 and fraction in d0-d1 '
    820 	movel	d2,d5		| save b to get exponent
    821 	andl	d6,d5		| get exponent in d5
    822 	beq	Ladddf$b$den	| branch if b is denormalized
    823 	cmpl	d6,d5		| check for INFINITY or NaN
    824 	beq	Ladddf$nf
    825 	notl	d6		| make d6 into mask for the fraction again
    826 	andl	d6,d2		| and get fraction in d2
    827 	orl	d7,d2		| and put hidden bit back
    828 Ladddf$2:
    829 	swap	d5		| shift right exponent so that it starts
    830 #ifndef __mcoldfire__
    831 	lsrw	IMM (5),d5	| in bit 0 and not bit 20
    832 #else
    833 	lsrl	IMM (5),d5	| in bit 0 and not bit 20
    834 #endif
    835 
    836 | Now we have b's exponent in d5 and fraction in d2-d3. '
    837 
    838 | The situation now is as follows: the signs are combined in a0, the
    839 | numbers are in d0-d1 (a) and d2-d3 (b), and the exponents in d4 (a)
    840 | and d5 (b). To do the rounding correctly we need to keep all the
    841 | bits until the end, so we need to use d0-d1-d2-d3 for the first number
    842 | and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
    843 | exponents in a2-a3.
    844 
    845 #ifndef __mcoldfire__
    846 	moveml	a2-a3,sp@-	| save the address registers
    847 #else
    848 	movel	a2,sp@-
    849 	movel	a3,sp@-
    850 	movel	a4,sp@-
    851 #endif
    852 
    853 	movel	d4,a2		| save the exponents
    854 	movel	d5,a3		|
    855 
    856 	movel	IMM (0),d7	| and move the numbers around
    857 	movel	d7,d6		|
    858 	movel	d3,d5		|
    859 	movel	d2,d4		|
    860 	movel	d7,d3		|
    861 	movel	d7,d2		|
    862 
    863 | Here we shift the numbers until the exponents are the same, and put
    864 | the largest exponent in a2.
    865 #ifndef __mcoldfire__
    866 	exg	d4,a2		| get exponents back
    867 	exg	d5,a3		|
    868 	cmpw	d4,d5		| compare the exponents
    869 #else
    870 	movel	d4,a4		| get exponents back
    871 	movel	a2,d4
    872 	movel	a4,a2
    873 	movel	d5,a4
    874 	movel	a3,d5
    875 	movel	a4,a3
    876 	cmpl	d4,d5		| compare the exponents
    877 #endif
    878 	beq	Ladddf$3	| if equal don't shift '
    879 	bhi	9f		| branch if second exponent is higher
    880 
    881 | Here we have a's exponent larger than b's, so we have to shift b. We do
    882 | this by using as counter d2:
    883 1:	movew	d4,d2		| move largest exponent to d2
    884 #ifndef __mcoldfire__
    885 	subw	d5,d2		| and subtract second exponent
    886 	exg	d4,a2		| get back the longs we saved
    887 	exg	d5,a3		|
    888 #else
    889 	subl	d5,d2		| and subtract second exponent
    890 	movel	d4,a4		| get back the longs we saved
    891 	movel	a2,d4
    892 	movel	a4,a2
    893 	movel	d5,a4
    894 	movel	a3,d5
    895 	movel	a4,a3
    896 #endif
    897 | if difference is too large we don't shift (actually, we can just exit) '
    898 #ifndef __mcoldfire__
    899 	cmpw	IMM (DBL_MANT_DIG+2),d2
    900 #else
    901 	cmpl	IMM (DBL_MANT_DIG+2),d2
    902 #endif
    903 	bge	Ladddf$b$small
    904 #ifndef __mcoldfire__
    905 	cmpw	IMM (32),d2	| if difference >= 32, shift by longs
    906 #else
    907 	cmpl	IMM (32),d2	| if difference >= 32, shift by longs
    908 #endif
    909 	bge	5f
    910 2:
    911 #ifndef __mcoldfire__
    912 	cmpw	IMM (16),d2	| if difference >= 16, shift by words
    913 #else
    914 	cmpl	IMM (16),d2	| if difference >= 16, shift by words
    915 #endif
    916 	bge	6f
    917 	bra	3f		| enter dbra loop
    918 
    919 4:
    920 #ifndef __mcoldfire__
    921 	lsrl	IMM (1),d4
    922 	roxrl	IMM (1),d5
    923 	roxrl	IMM (1),d6
    924 	roxrl	IMM (1),d7
    925 #else
    926 	lsrl	IMM (1),d7
    927 	btst	IMM (0),d6
    928 	beq	10f
    929 	bset	IMM (31),d7
    930 10:	lsrl	IMM (1),d6
    931 	btst	IMM (0),d5
    932 	beq	11f
    933 	bset	IMM (31),d6
    934 11:	lsrl	IMM (1),d5
    935 	btst	IMM (0),d4
    936 	beq	12f
    937 	bset	IMM (31),d5
    938 12:	lsrl	IMM (1),d4
    939 #endif
    940 3:
    941 #ifndef __mcoldfire__
    942 	dbra	d2,4b
    943 #else
    944 	subql	IMM (1),d2
    945 	bpl	4b
    946 #endif
    947 	movel	IMM (0),d2
    948 	movel	d2,d3
    949 	bra	Ladddf$4
    950 5:
    951 	movel	d6,d7
    952 	movel	d5,d6
    953 	movel	d4,d5
    954 	movel	IMM (0),d4
    955 #ifndef __mcoldfire__
    956 	subw	IMM (32),d2
    957 #else
    958 	subl	IMM (32),d2
    959 #endif
    960 	bra	2b
    961 6:
    962 	movew	d6,d7
    963 	swap	d7
    964 	movew	d5,d6
    965 	swap	d6
    966 	movew	d4,d5
    967 	swap	d5
    968 	movew	IMM (0),d4
    969 	swap	d4
    970 #ifndef __mcoldfire__
    971 	subw	IMM (16),d2
    972 #else
    973 	subl	IMM (16),d2
    974 #endif
    975 	bra	3b
    976 
    977 9:
    978 #ifndef __mcoldfire__
    979 	exg	d4,d5
    980 	movew	d4,d6
    981 	subw	d5,d6		| keep d5 (largest exponent) in d4
    982 	exg	d4,a2
    983 	exg	d5,a3
    984 #else
    985 	movel	d5,d6
    986 	movel	d4,d5
    987 	movel	d6,d4
    988 	subl	d5,d6
    989 	movel	d4,a4
    990 	movel	a2,d4
    991 	movel	a4,a2
    992 	movel	d5,a4
    993 	movel	a3,d5
    994 	movel	a4,a3
    995 #endif
    996 | if difference is too large we don't shift (actually, we can just exit) '
    997 #ifndef __mcoldfire__
    998 	cmpw	IMM (DBL_MANT_DIG+2),d6
    999 #else
   1000 	cmpl	IMM (DBL_MANT_DIG+2),d6
   1001 #endif
   1002 	bge	Ladddf$a$small
   1003 #ifndef __mcoldfire__
   1004 	cmpw	IMM (32),d6	| if difference >= 32, shift by longs
   1005 #else
   1006 	cmpl	IMM (32),d6	| if difference >= 32, shift by longs
   1007 #endif
   1008 	bge	5f
   1009 2:
   1010 #ifndef __mcoldfire__
   1011 	cmpw	IMM (16),d6	| if difference >= 16, shift by words
   1012 #else
   1013 	cmpl	IMM (16),d6	| if difference >= 16, shift by words
   1014 #endif
   1015 	bge	6f
   1016 	bra	3f		| enter dbra loop
   1017 
   1018 4:
   1019 #ifndef __mcoldfire__
   1020 	lsrl	IMM (1),d0
   1021 	roxrl	IMM (1),d1
   1022 	roxrl	IMM (1),d2
   1023 	roxrl	IMM (1),d3
   1024 #else
   1025 	lsrl	IMM (1),d3
   1026 	btst	IMM (0),d2
   1027 	beq	10f
   1028 	bset	IMM (31),d3
   1029 10:	lsrl	IMM (1),d2
   1030 	btst	IMM (0),d1
   1031 	beq	11f
   1032 	bset	IMM (31),d2
   1033 11:	lsrl	IMM (1),d1
   1034 	btst	IMM (0),d0
   1035 	beq	12f
   1036 	bset	IMM (31),d1
   1037 12:	lsrl	IMM (1),d0
   1038 #endif
   1039 3:
   1040 #ifndef __mcoldfire__
   1041 	dbra	d6,4b
   1042 #else
   1043 	subql	IMM (1),d6
   1044 	bpl	4b
   1045 #endif
   1046 	movel	IMM (0),d7
   1047 	movel	d7,d6
   1048 	bra	Ladddf$4
   1049 5:
   1050 	movel	d2,d3
   1051 	movel	d1,d2
   1052 	movel	d0,d1
   1053 	movel	IMM (0),d0
   1054 #ifndef __mcoldfire__
   1055 	subw	IMM (32),d6
   1056 #else
   1057 	subl	IMM (32),d6
   1058 #endif
   1059 	bra	2b
   1060 6:
   1061 	movew	d2,d3
   1062 	swap	d3
   1063 	movew	d1,d2
   1064 	swap	d2
   1065 	movew	d0,d1
   1066 	swap	d1
   1067 	movew	IMM (0),d0
   1068 	swap	d0
   1069 #ifndef __mcoldfire__
   1070 	subw	IMM (16),d6
   1071 #else
   1072 	subl	IMM (16),d6
   1073 #endif
   1074 	bra	3b
   1075 Ladddf$3:
   1076 #ifndef __mcoldfire__
   1077 	exg	d4,a2
   1078 	exg	d5,a3
   1079 #else
   1080 	movel	d4,a4
   1081 	movel	a2,d4
   1082 	movel	a4,a2
   1083 	movel	d5,a4
   1084 	movel	a3,d5
   1085 	movel	a4,a3
   1086 #endif
   1087 Ladddf$4:
   1088 | Now we have the numbers in d0--d3 and d4--d7, the exponent in a2, and
   1089 | the signs in a4.
   1090 
   1091 | Here we have to decide whether to add or subtract the numbers:
   1092 #ifndef __mcoldfire__
   1093 	exg	d7,a0		| get the signs
   1094 	exg	d6,a3		| a3 is free to be used
   1095 #else
   1096 	movel	d7,a4
   1097 	movel	a0,d7
   1098 	movel	a4,a0
   1099 	movel	d6,a4
   1100 	movel	a3,d6
   1101 	movel	a4,a3
   1102 #endif
   1103 	movel	d7,d6		|
   1104 	movew	IMM (0),d7	| get a's sign in d7 '
   1105 	swap	d6              |
   1106 	movew	IMM (0),d6	| and b's sign in d6 '
   1107 	eorl	d7,d6		| compare the signs
   1108 	bmi	Lsubdf$0	| if the signs are different we have
   1109 				| to subtract
   1110 #ifndef __mcoldfire__
   1111 	exg	d7,a0		| else we add the numbers
   1112 	exg	d6,a3		|
   1113 #else
   1114 	movel	d7,a4
   1115 	movel	a0,d7
   1116 	movel	a4,a0
   1117 	movel	d6,a4
   1118 	movel	a3,d6
   1119 	movel	a4,a3
   1120 #endif
   1121 	addl	d7,d3		|
   1122 	addxl	d6,d2		|
   1123 	addxl	d5,d1		|
   1124 	addxl	d4,d0           |
   1125 
   1126 	movel	a2,d4		| return exponent to d4
   1127 	movel	a0,d7		|
   1128 	andl	IMM (0x80000000),d7 | d7 now has the sign
   1129 
   1130 #ifndef __mcoldfire__
   1131 	moveml	sp@+,a2-a3
   1132 #else
   1133 	movel	sp@+,a4
   1134 	movel	sp@+,a3
   1135 	movel	sp@+,a2
   1136 #endif
   1137 
   1138 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
   1139 | the case of denormalized numbers in the rounding routine itself).
   1140 | As in the addition (not in the subtraction!) we could have set
   1141 | one more bit we check this:
   1142 	btst	IMM (DBL_MANT_DIG+1),d0
   1143 	beq	1f
   1144 #ifndef __mcoldfire__
   1145 	lsrl	IMM (1),d0
   1146 	roxrl	IMM (1),d1
   1147 	roxrl	IMM (1),d2
   1148 	roxrl	IMM (1),d3
   1149 	addw	IMM (1),d4
   1150 #else
   1151 	lsrl	IMM (1),d3
   1152 	btst	IMM (0),d2
   1153 	beq	10f
   1154 	bset	IMM (31),d3
   1155 10:	lsrl	IMM (1),d2
   1156 	btst	IMM (0),d1
   1157 	beq	11f
   1158 	bset	IMM (31),d2
   1159 11:	lsrl	IMM (1),d1
   1160 	btst	IMM (0),d0
   1161 	beq	12f
   1162 	bset	IMM (31),d1
   1163 12:	lsrl	IMM (1),d0
   1164 	addl	IMM (1),d4
   1165 #endif
   1166 1:
   1167 	lea	pc@(Ladddf$5),a0 | to return from rounding routine
   1168 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   1169 #ifdef __mcoldfire__
   1170 	clrl	d6
   1171 #endif
   1172 	movew	a1@(6),d6	| rounding mode in d6
   1173 	beq	Lround$to$nearest
   1174 #ifndef __mcoldfire__
   1175 	cmpw	IMM (ROUND_TO_PLUS),d6
   1176 #else
   1177 	cmpl	IMM (ROUND_TO_PLUS),d6
   1178 #endif
   1179 	bhi	Lround$to$minus
   1180 	blt	Lround$to$zero
   1181 	bra	Lround$to$plus
   1182 Ladddf$5:
   1183 | Put back the exponent and check for overflow
   1184 #ifndef __mcoldfire__
   1185 	cmpw	IMM (0x7ff),d4	| is the exponent big?
   1186 #else
   1187 	cmpl	IMM (0x7ff),d4	| is the exponent big?
   1188 #endif
   1189 	bge	1f
   1190 	bclr	IMM (DBL_MANT_DIG-1),d0
   1191 #ifndef __mcoldfire__
   1192 	lslw	IMM (4),d4	| put exponent back into position
   1193 #else
   1194 	lsll	IMM (4),d4	| put exponent back into position
   1195 #endif
   1196 	swap	d0		|
   1197 #ifndef __mcoldfire__
   1198 	orw	d4,d0		|
   1199 #else
   1200 	orl	d4,d0		|
   1201 #endif
   1202 	swap	d0		|
   1203 	bra	Ladddf$ret
   1204 1:
   1205 	moveq	IMM (ADD),d5
   1206 	bra	Ld$overflow
   1207 
   1208 Lsubdf$0:
   1209 | Here we do the subtraction.
   1210 #ifndef __mcoldfire__
   1211 	exg	d7,a0		| put sign back in a0
   1212 	exg	d6,a3		|
   1213 #else
   1214 	movel	d7,a4
   1215 	movel	a0,d7
   1216 	movel	a4,a0
   1217 	movel	d6,a4
   1218 	movel	a3,d6
   1219 	movel	a4,a3
   1220 #endif
   1221 	subl	d7,d3		|
   1222 	subxl	d6,d2		|
   1223 	subxl	d5,d1		|
   1224 	subxl	d4,d0		|
   1225 	beq	Ladddf$ret$1	| if zero just exit
   1226 	bpl	1f		| if positive skip the following
   1227 	movel	a0,d7		|
   1228 	bchg	IMM (31),d7	| change sign bit in d7
   1229 	movel	d7,a0		|
   1230 	negl	d3		|
   1231 	negxl	d2		|
   1232 	negxl	d1              | and negate result
   1233 	negxl	d0              |
   1234 1:
   1235 	movel	a2,d4		| return exponent to d4
   1236 	movel	a0,d7
   1237 	andl	IMM (0x80000000),d7 | isolate sign bit
   1238 #ifndef __mcoldfire__
   1239 	moveml	sp@+,a2-a3	|
   1240 #else
   1241 	movel	sp@+,a4
   1242 	movel	sp@+,a3
   1243 	movel	sp@+,a2
   1244 #endif
   1245 
   1246 | Before rounding normalize so bit #DBL_MANT_DIG is set (we will consider
   1247 | the case of denormalized numbers in the rounding routine itself).
   1248 | As in the addition (not in the subtraction!) we could have set
   1249 | one more bit we check this:
   1250 	btst	IMM (DBL_MANT_DIG+1),d0
   1251 	beq	1f
   1252 #ifndef __mcoldfire__
   1253 	lsrl	IMM (1),d0
   1254 	roxrl	IMM (1),d1
   1255 	roxrl	IMM (1),d2
   1256 	roxrl	IMM (1),d3
   1257 	addw	IMM (1),d4
   1258 #else
   1259 	lsrl	IMM (1),d3
   1260 	btst	IMM (0),d2
   1261 	beq	10f
   1262 	bset	IMM (31),d3
   1263 10:	lsrl	IMM (1),d2
   1264 	btst	IMM (0),d1
   1265 	beq	11f
   1266 	bset	IMM (31),d2
   1267 11:	lsrl	IMM (1),d1
   1268 	btst	IMM (0),d0
   1269 	beq	12f
   1270 	bset	IMM (31),d1
   1271 12:	lsrl	IMM (1),d0
   1272 	addl	IMM (1),d4
   1273 #endif
   1274 1:
   1275 	lea	pc@(Lsubdf$1),a0 | to return from rounding routine
   1276 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   1277 #ifdef __mcoldfire__
   1278 	clrl	d6
   1279 #endif
   1280 	movew	a1@(6),d6	| rounding mode in d6
   1281 	beq	Lround$to$nearest
   1282 #ifndef __mcoldfire__
   1283 	cmpw	IMM (ROUND_TO_PLUS),d6
   1284 #else
   1285 	cmpl	IMM (ROUND_TO_PLUS),d6
   1286 #endif
   1287 	bhi	Lround$to$minus
   1288 	blt	Lround$to$zero
   1289 	bra	Lround$to$plus
   1290 Lsubdf$1:
   1291 | Put back the exponent and sign (we don't have overflow). '
   1292 	bclr	IMM (DBL_MANT_DIG-1),d0
   1293 #ifndef __mcoldfire__
   1294 	lslw	IMM (4),d4	| put exponent back into position
   1295 #else
   1296 	lsll	IMM (4),d4	| put exponent back into position
   1297 #endif
   1298 	swap	d0		|
   1299 #ifndef __mcoldfire__
   1300 	orw	d4,d0		|
   1301 #else
   1302 	orl	d4,d0		|
   1303 #endif
   1304 	swap	d0		|
   1305 	bra	Ladddf$ret
   1306 
   1307 | If one of the numbers was too small (difference of exponents >=
   1308 | DBL_MANT_DIG+1) we return the other (and now we don't have to '
   1309 | check for finiteness or zero).
   1310 Ladddf$a$small:
   1311 #ifndef __mcoldfire__
   1312 	moveml	sp@+,a2-a3
   1313 #else
   1314 	movel	sp@+,a4
   1315 	movel	sp@+,a3
   1316 	movel	sp@+,a2
   1317 #endif
   1318 	movel	a6@(16),d0
   1319 	movel	a6@(20),d1
   1320 	PICLEA	SYM (_fpCCR),a0
   1321 	movew	IMM (0),a0@
   1322 #ifndef __mcoldfire__
   1323 	moveml	sp@+,d2-d7	| restore data registers
   1324 #else
   1325 	moveml	sp@,d2-d7
   1326 	| XXX if frame pointer is ever removed, stack pointer must
   1327 	| be adjusted here.
   1328 #endif
   1329 	unlk	a6		| and return
   1330 	rts
   1331 
   1332 Ladddf$b$small:
   1333 #ifndef __mcoldfire__
   1334 	moveml	sp@+,a2-a3
   1335 #else
   1336 	movel	sp@+,a4
   1337 	movel	sp@+,a3
   1338 	movel	sp@+,a2
   1339 #endif
   1340 	movel	a6@(8),d0
   1341 	movel	a6@(12),d1
   1342 	PICLEA	SYM (_fpCCR),a0
   1343 	movew	IMM (0),a0@
   1344 #ifndef __mcoldfire__
   1345 	moveml	sp@+,d2-d7	| restore data registers
   1346 #else
   1347 	moveml	sp@,d2-d7
   1348 	| XXX if frame pointer is ever removed, stack pointer must
   1349 	| be adjusted here.
   1350 #endif
   1351 	unlk	a6		| and return
   1352 	rts
   1353 
   1354 Ladddf$a$den:
   1355 	movel	d7,d4		| d7 contains 0x00200000
   1356 	bra	Ladddf$1
   1357 
   1358 Ladddf$b$den:
   1359 	movel	d7,d5           | d7 contains 0x00200000
   1360 	notl	d6
   1361 	bra	Ladddf$2
   1362 
   1363 Ladddf$b:
   1364 | Return b (if a is zero)
   1365 	movel	d2,d0
   1366 	movel	d3,d1
   1367 	bne	1f			| Check if b is -0
   1368 	cmpl	IMM (0x80000000),d0
   1369 	bne	1f
   1370 	andl	IMM (0x80000000),d7	| Use the sign of a
   1371 	clrl	d0
   1372 	bra	Ladddf$ret
   1373 Ladddf$a:
   1374 	movel	a6@(8),d0
   1375 	movel	a6@(12),d1
   1376 1:
   1377 	moveq	IMM (ADD),d5
   1378 | Check for NaN and +/-INFINITY.
   1379 	movel	d0,d7         		|
   1380 	andl	IMM (0x80000000),d7	|
   1381 	bclr	IMM (31),d0		|
   1382 	cmpl	IMM (0x7ff00000),d0	|
   1383 	bge	2f			|
   1384 	movel	d0,d0           	| check for zero, since we don't  '
   1385 	bne	Ladddf$ret		| want to return -0 by mistake
   1386 	bclr	IMM (31),d7		|
   1387 	bra	Ladddf$ret		|
   1388 2:
   1389 	andl	IMM (0x000fffff),d0	| check for NaN (nonzero fraction)
   1390 	orl	d1,d0			|
   1391 	bne	Ld$inop         	|
   1392 	bra	Ld$infty		|
   1393 
   1394 Ladddf$ret$1:
   1395 #ifndef __mcoldfire__
   1396 	moveml	sp@+,a2-a3	| restore regs and exit
   1397 #else
   1398 	movel	sp@+,a4
   1399 	movel	sp@+,a3
   1400 	movel	sp@+,a2
   1401 #endif
   1402 
   1403 Ladddf$ret:
   1404 | Normal exit.
   1405 	PICLEA	SYM (_fpCCR),a0
   1406 	movew	IMM (0),a0@
   1407 	orl	d7,d0		| put sign bit back
   1408 #ifndef __mcoldfire__
   1409 	moveml	sp@+,d2-d7
   1410 #else
   1411 	moveml	sp@,d2-d7
   1412 	| XXX if frame pointer is ever removed, stack pointer must
   1413 	| be adjusted here.
   1414 #endif
   1415 	unlk	a6
   1416 	rts
   1417 
   1418 Ladddf$ret$den:
   1419 | Return a denormalized number.
   1420 #ifndef __mcoldfire__
   1421 	lsrl	IMM (1),d0	| shift right once more
   1422 	roxrl	IMM (1),d1	|
   1423 #else
   1424 	lsrl	IMM (1),d1
   1425 	btst	IMM (0),d0
   1426 	beq	10f
   1427 	bset	IMM (31),d1
   1428 10:	lsrl	IMM (1),d0
   1429 #endif
   1430 	bra	Ladddf$ret
   1431 
   1432 Ladddf$nf:
   1433 	moveq	IMM (ADD),d5
   1434 | This could be faster but it is not worth the effort, since it is not
   1435 | executed very often. We sacrifice speed for clarity here.
   1436 	movel	a6@(8),d0	| get the numbers back (remember that we
   1437 	movel	a6@(12),d1	| did some processing already)
   1438 	movel	a6@(16),d2	|
   1439 	movel	a6@(20),d3	|
   1440 	movel	IMM (0x7ff00000),d4 | useful constant (INFINITY)
   1441 	movel	d0,d7		| save sign bits
   1442 	movel	d2,d6		|
   1443 	bclr	IMM (31),d0	| clear sign bits
   1444 	bclr	IMM (31),d2	|
   1445 | We know that one of them is either NaN of +/-INFINITY
   1446 | Check for NaN (if either one is NaN return NaN)
   1447 	cmpl	d4,d0		| check first a (d0)
   1448 	bhi	Ld$inop		| if d0 > 0x7ff00000 or equal and
   1449 	bne	2f
   1450 	tstl	d1		| d1 > 0, a is NaN
   1451 	bne	Ld$inop		|
   1452 2:	cmpl	d4,d2		| check now b (d1)
   1453 	bhi	Ld$inop		|
   1454 	bne	3f
   1455 	tstl	d3		|
   1456 	bne	Ld$inop		|
   1457 3:
   1458 | Now comes the check for +/-INFINITY. We know that both are (maybe not
   1459 | finite) numbers, but we have to check if both are infinite whether we
   1460 | are adding or subtracting them.
   1461 	eorl	d7,d6		| to check sign bits
   1462 	bmi	1f
   1463 	andl	IMM (0x80000000),d7 | get (common) sign bit
   1464 	bra	Ld$infty
   1465 1:
   1466 | We know one (or both) are infinite, so we test for equality between the
   1467 | two numbers (if they are equal they have to be infinite both, so we
   1468 | return NaN).
   1469 	cmpl	d2,d0		| are both infinite?
   1470 	bne	1f		| if d0 <> d2 they are not equal
   1471 	cmpl	d3,d1		| if d0 == d2 test d3 and d1
   1472 	beq	Ld$inop		| if equal return NaN
   1473 1:
   1474 	andl	IMM (0x80000000),d7 | get a's sign bit '
   1475 	cmpl	d4,d0		| test now for infinity
   1476 	beq	Ld$infty	| if a is INFINITY return with this sign
   1477 	bchg	IMM (31),d7	| else we know b is INFINITY and has
   1478 	bra	Ld$infty	| the opposite sign
   1479 
   1480 |=============================================================================
   1481 |                              __muldf3
   1482 |=============================================================================
   1483 
   1484 | double __muldf3(double, double);
   1485 	FUNC(__muldf3)
   1486 SYM (__muldf3):
   1487 #ifndef __mcoldfire__
   1488 	link	a6,IMM (0)
   1489 	moveml	d2-d7,sp@-
   1490 #else
   1491 	link	a6,IMM (-24)
   1492 	moveml	d2-d7,sp@
   1493 #endif
   1494 	movel	a6@(8),d0		| get a into d0-d1
   1495 	movel	a6@(12),d1		|
   1496 	movel	a6@(16),d2		| and b into d2-d3
   1497 	movel	a6@(20),d3		|
   1498 	movel	d0,d7			| d7 will hold the sign of the product
   1499 	eorl	d2,d7			|
   1500 	andl	IMM (0x80000000),d7	|
   1501 	movel	d7,a0			| save sign bit into a0
   1502 	movel	IMM (0x7ff00000),d7	| useful constant (+INFINITY)
   1503 	movel	d7,d6			| another (mask for fraction)
   1504 	notl	d6			|
   1505 	bclr	IMM (31),d0		| get rid of a's sign bit '
   1506 	movel	d0,d4			|
   1507 	orl	d1,d4			|
   1508 	beq	Lmuldf$a$0		| branch if a is zero
   1509 	movel	d0,d4			|
   1510 	bclr	IMM (31),d2		| get rid of b's sign bit '
   1511 	movel	d2,d5			|
   1512 	orl	d3,d5			|
   1513 	beq	Lmuldf$b$0		| branch if b is zero
   1514 	movel	d2,d5			|
   1515 	cmpl	d7,d0			| is a big?
   1516 	bhi	Lmuldf$inop		| if a is NaN return NaN
   1517 	beq	Lmuldf$a$nf		| we still have to check d1 and b ...
   1518 	cmpl	d7,d2			| now compare b with INFINITY
   1519 	bhi	Lmuldf$inop		| is b NaN?
   1520 	beq	Lmuldf$b$nf 		| we still have to check d3 ...
   1521 | Here we have both numbers finite and nonzero (and with no sign bit).
   1522 | Now we get the exponents into d4 and d5.
   1523 	andl	d7,d4			| isolate exponent in d4
   1524 	beq	Lmuldf$a$den		| if exponent zero, have denormalized
   1525 	andl	d6,d0			| isolate fraction
   1526 	orl	IMM (0x00100000),d0	| and put hidden bit back
   1527 	swap	d4			| I like exponents in the first byte
   1528 #ifndef __mcoldfire__
   1529 	lsrw	IMM (4),d4		|
   1530 #else
   1531 	lsrl	IMM (4),d4		|
   1532 #endif
   1533 Lmuldf$1:
   1534 	andl	d7,d5			|
   1535 	beq	Lmuldf$b$den		|
   1536 	andl	d6,d2			|
   1537 	orl	IMM (0x00100000),d2	| and put hidden bit back
   1538 	swap	d5			|
   1539 #ifndef __mcoldfire__
   1540 	lsrw	IMM (4),d5		|
   1541 #else
   1542 	lsrl	IMM (4),d5		|
   1543 #endif
   1544 Lmuldf$2:				|
   1545 #ifndef __mcoldfire__
   1546 	addw	d5,d4			| add exponents
   1547 	subw	IMM (D_BIAS+1),d4	| and subtract bias (plus one)
   1548 #else
   1549 	addl	d5,d4			| add exponents
   1550 	subl	IMM (D_BIAS+1),d4	| and subtract bias (plus one)
   1551 #endif
   1552 
   1553 | We are now ready to do the multiplication. The situation is as follows:
   1554 | both a and b have bit 52 ( bit 20 of d0 and d2) set (even if they were
   1555 | denormalized to start with!), which means that in the product bit 104
   1556 | (which will correspond to bit 8 of the fourth long) is set.
   1557 
   1558 | Here we have to do the product.
   1559 | To do it we have to juggle the registers back and forth, as there are not
   1560 | enough to keep everything in them. So we use the address registers to keep
   1561 | some intermediate data.
   1562 
   1563 #ifndef __mcoldfire__
   1564 	moveml	a2-a3,sp@-	| save a2 and a3 for temporary use
   1565 #else
   1566 	movel	a2,sp@-
   1567 	movel	a3,sp@-
   1568 	movel	a4,sp@-
   1569 #endif
   1570 	movel	IMM (0),a2	| a2 is a null register
   1571 	movel	d4,a3		| and a3 will preserve the exponent
   1572 
   1573 | First, shift d2-d3 so bit 20 becomes bit 31:
   1574 #ifndef __mcoldfire__
   1575 	rorl	IMM (5),d2	| rotate d2 5 places right
   1576 	swap	d2		| and swap it
   1577 	rorl	IMM (5),d3	| do the same thing with d3
   1578 	swap	d3		|
   1579 	movew	d3,d6		| get the rightmost 11 bits of d3
   1580 	andw	IMM (0x07ff),d6	|
   1581 	orw	d6,d2		| and put them into d2
   1582 	andw	IMM (0xf800),d3	| clear those bits in d3
   1583 #else
   1584 	moveq	IMM (11),d7	| left shift d2 11 bits
   1585 	lsll	d7,d2
   1586 	movel	d3,d6		| get a copy of d3
   1587 	lsll	d7,d3		| left shift d3 11 bits
   1588 	andl	IMM (0xffe00000),d6 | get the top 11 bits of d3
   1589 	moveq	IMM (21),d7	| right shift them 21 bits
   1590 	lsrl	d7,d6
   1591 	orl	d6,d2		| stick them at the end of d2
   1592 #endif
   1593 
   1594 	movel	d2,d6		| move b into d6-d7
   1595 	movel	d3,d7           | move a into d4-d5
   1596 	movel	d0,d4           | and clear d0-d1-d2-d3 (to put result)
   1597 	movel	d1,d5           |
   1598 	movel	IMM (0),d3	|
   1599 	movel	d3,d2           |
   1600 	movel	d3,d1           |
   1601 	movel	d3,d0	        |
   1602 
   1603 | We use a1 as counter:
   1604 	movel	IMM (DBL_MANT_DIG-1),a1
   1605 #ifndef __mcoldfire__
   1606 	exg	d7,a1
   1607 #else
   1608 	movel	d7,a4
   1609 	movel	a1,d7
   1610 	movel	a4,a1
   1611 #endif
   1612 
   1613 1:
   1614 #ifndef __mcoldfire__
   1615 	exg	d7,a1		| put counter back in a1
   1616 #else
   1617 	movel	d7,a4
   1618 	movel	a1,d7
   1619 	movel	a4,a1
   1620 #endif
   1621 	addl	d3,d3		| shift sum once left
   1622 	addxl	d2,d2           |
   1623 	addxl	d1,d1           |
   1624 	addxl	d0,d0           |
   1625 	addl	d7,d7		|
   1626 	addxl	d6,d6		|
   1627 	bcc	2f		| if bit clear skip the following
   1628 #ifndef __mcoldfire__
   1629 	exg	d7,a2		|
   1630 #else
   1631 	movel	d7,a4
   1632 	movel	a2,d7
   1633 	movel	a4,a2
   1634 #endif
   1635 	addl	d5,d3		| else add a to the sum
   1636 	addxl	d4,d2		|
   1637 	addxl	d7,d1		|
   1638 	addxl	d7,d0		|
   1639 #ifndef __mcoldfire__
   1640 	exg	d7,a2		|
   1641 #else
   1642 	movel	d7,a4
   1643 	movel	a2,d7
   1644 	movel	a4,a2
   1645 #endif
   1646 2:
   1647 #ifndef __mcoldfire__
   1648 	exg	d7,a1		| put counter in d7
   1649 	dbf	d7,1b		| decrement and branch
   1650 #else
   1651 	movel	d7,a4
   1652 	movel	a1,d7
   1653 	movel	a4,a1
   1654 	subql	IMM (1),d7
   1655 	bpl	1b
   1656 #endif
   1657 
   1658 	movel	a3,d4		| restore exponent
   1659 #ifndef __mcoldfire__
   1660 	moveml	sp@+,a2-a3
   1661 #else
   1662 	movel	sp@+,a4
   1663 	movel	sp@+,a3
   1664 	movel	sp@+,a2
   1665 #endif
   1666 
   1667 | Now we have the product in d0-d1-d2-d3, with bit 8 of d0 set. The
   1668 | first thing to do now is to normalize it so bit 8 becomes bit
   1669 | DBL_MANT_DIG-32 (to do the rounding); later we will shift right.
   1670 	swap	d0
   1671 	swap	d1
   1672 	movew	d1,d0
   1673 	swap	d2
   1674 	movew	d2,d1
   1675 	swap	d3
   1676 	movew	d3,d2
   1677 	movew	IMM (0),d3
   1678 #ifndef __mcoldfire__
   1679 	lsrl	IMM (1),d0
   1680 	roxrl	IMM (1),d1
   1681 	roxrl	IMM (1),d2
   1682 	roxrl	IMM (1),d3
   1683 	lsrl	IMM (1),d0
   1684 	roxrl	IMM (1),d1
   1685 	roxrl	IMM (1),d2
   1686 	roxrl	IMM (1),d3
   1687 	lsrl	IMM (1),d0
   1688 	roxrl	IMM (1),d1
   1689 	roxrl	IMM (1),d2
   1690 	roxrl	IMM (1),d3
   1691 #else
   1692 	moveq	IMM (29),d6
   1693 	lsrl	IMM (3),d3
   1694 	movel	d2,d7
   1695 	lsll	d6,d7
   1696 	orl	d7,d3
   1697 	lsrl	IMM (3),d2
   1698 	movel	d1,d7
   1699 	lsll	d6,d7
   1700 	orl	d7,d2
   1701 	lsrl	IMM (3),d1
   1702 	movel	d0,d7
   1703 	lsll	d6,d7
   1704 	orl	d7,d1
   1705 	lsrl	IMM (3),d0
   1706 #endif
   1707 
   1708 | Now round, check for over- and underflow, and exit.
   1709 	movel	a0,d7		| get sign bit back into d7
   1710 	moveq	IMM (MULTIPLY),d5
   1711 
   1712 	btst	IMM (DBL_MANT_DIG+1-32),d0
   1713 	beq	Lround$exit
   1714 #ifndef __mcoldfire__
   1715 	lsrl	IMM (1),d0
   1716 	roxrl	IMM (1),d1
   1717 	addw	IMM (1),d4
   1718 #else
   1719 	lsrl	IMM (1),d1
   1720 	btst	IMM (0),d0
   1721 	beq	10f
   1722 	bset	IMM (31),d1
   1723 10:	lsrl	IMM (1),d0
   1724 	addl	IMM (1),d4
   1725 #endif
   1726 	bra	Lround$exit
   1727 
   1728 Lmuldf$inop:
   1729 	moveq	IMM (MULTIPLY),d5
   1730 	bra	Ld$inop
   1731 
   1732 Lmuldf$b$nf:
   1733 	moveq	IMM (MULTIPLY),d5
   1734 	movel	a0,d7		| get sign bit back into d7
   1735 	tstl	d3		| we know d2 == 0x7ff00000, so check d3
   1736 	bne	Ld$inop		| if d3 <> 0 b is NaN
   1737 	bra	Ld$overflow	| else we have overflow (since a is finite)
   1738 
   1739 Lmuldf$a$nf:
   1740 	moveq	IMM (MULTIPLY),d5
   1741 	movel	a0,d7		| get sign bit back into d7
   1742 	tstl	d1		| we know d0 == 0x7ff00000, so check d1
   1743 	bne	Ld$inop		| if d1 <> 0 a is NaN
   1744 	bra	Ld$overflow	| else signal overflow
   1745 
   1746 | If either number is zero return zero, unless the other is +/-INFINITY or
   1747 | NaN, in which case we return NaN.
   1748 Lmuldf$b$0:
   1749 	moveq	IMM (MULTIPLY),d5
   1750 #ifndef __mcoldfire__
   1751 	exg	d2,d0		| put b (==0) into d0-d1
   1752 	exg	d3,d1		| and a (with sign bit cleared) into d2-d3
   1753 	movel	a0,d0		| set result sign
   1754 #else
   1755 	movel	d0,d2		| put a into d2-d3
   1756 	movel	d1,d3
   1757 	movel	a0,d0		| put result zero into d0-d1
   1758 	movq	IMM(0),d1
   1759 #endif
   1760 	bra	1f
   1761 Lmuldf$a$0:
   1762 	movel	a0,d0		| set result sign
   1763 	movel	a6@(16),d2	| put b into d2-d3 again
   1764 	movel	a6@(20),d3	|
   1765 	bclr	IMM (31),d2	| clear sign bit
   1766 1:	cmpl	IMM (0x7ff00000),d2 | check for non-finiteness
   1767 	bge	Ld$inop		| in case NaN or +/-INFINITY return NaN
   1768 	PICLEA	SYM (_fpCCR),a0
   1769 	movew	IMM (0),a0@
   1770 #ifndef __mcoldfire__
   1771 	moveml	sp@+,d2-d7
   1772 #else
   1773 	moveml	sp@,d2-d7
   1774 	| XXX if frame pointer is ever removed, stack pointer must
   1775 	| be adjusted here.
   1776 #endif
   1777 	unlk	a6
   1778 	rts
   1779 
   1780 | If a number is denormalized we put an exponent of 1 but do not put the
   1781 | hidden bit back into the fraction; instead we shift left until bit 21
   1782 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
   1783 | to ensure that the product of the fractions is close to 1.
   1784 Lmuldf$a$den:
   1785 	movel	IMM (1),d4
   1786 	andl	d6,d0
   1787 1:	addl	d1,d1           | shift a left until bit 20 is set
   1788 	addxl	d0,d0		|
   1789 #ifndef __mcoldfire__
   1790 	subw	IMM (1),d4	| and adjust exponent
   1791 #else
   1792 	subl	IMM (1),d4	| and adjust exponent
   1793 #endif
   1794 	btst	IMM (20),d0	|
   1795 	bne	Lmuldf$1        |
   1796 	bra	1b
   1797 
   1798 Lmuldf$b$den:
   1799 	movel	IMM (1),d5
   1800 	andl	d6,d2
   1801 1:	addl	d3,d3		| shift b left until bit 20 is set
   1802 	addxl	d2,d2		|
   1803 #ifndef __mcoldfire__
   1804 	subw	IMM (1),d5	| and adjust exponent
   1805 #else
   1806 	subql	IMM (1),d5	| and adjust exponent
   1807 #endif
   1808 	btst	IMM (20),d2	|
   1809 	bne	Lmuldf$2	|
   1810 	bra	1b
   1811 
   1812 
   1813 |=============================================================================
   1814 |                              __divdf3
   1815 |=============================================================================
   1816 
   1817 | double __divdf3(double, double);
   1818 	FUNC(__divdf3)
   1819 SYM (__divdf3):
   1820 #ifndef __mcoldfire__
   1821 	link	a6,IMM (0)
   1822 	moveml	d2-d7,sp@-
   1823 #else
   1824 	link	a6,IMM (-24)
   1825 	moveml	d2-d7,sp@
   1826 #endif
   1827 	movel	a6@(8),d0	| get a into d0-d1
   1828 	movel	a6@(12),d1	|
   1829 	movel	a6@(16),d2	| and b into d2-d3
   1830 	movel	a6@(20),d3	|
   1831 	movel	d0,d7		| d7 will hold the sign of the result
   1832 	eorl	d2,d7		|
   1833 	andl	IMM (0x80000000),d7
   1834 	movel	d7,a0		| save sign into a0
   1835 	movel	IMM (0x7ff00000),d7 | useful constant (+INFINITY)
   1836 	movel	d7,d6		| another (mask for fraction)
   1837 	notl	d6		|
   1838 	bclr	IMM (31),d0	| get rid of a's sign bit '
   1839 	movel	d0,d4		|
   1840 	orl	d1,d4		|
   1841 	beq	Ldivdf$a$0	| branch if a is zero
   1842 	movel	d0,d4		|
   1843 	bclr	IMM (31),d2	| get rid of b's sign bit '
   1844 	movel	d2,d5		|
   1845 	orl	d3,d5		|
   1846 	beq	Ldivdf$b$0	| branch if b is zero
   1847 	movel	d2,d5
   1848 	cmpl	d7,d0		| is a big?
   1849 	bhi	Ldivdf$inop	| if a is NaN return NaN
   1850 	beq	Ldivdf$a$nf	| if d0 == 0x7ff00000 we check d1
   1851 	cmpl	d7,d2		| now compare b with INFINITY
   1852 	bhi	Ldivdf$inop	| if b is NaN return NaN
   1853 	beq	Ldivdf$b$nf	| if d2 == 0x7ff00000 we check d3
   1854 | Here we have both numbers finite and nonzero (and with no sign bit).
   1855 | Now we get the exponents into d4 and d5 and normalize the numbers to
   1856 | ensure that the ratio of the fractions is around 1. We do this by
   1857 | making sure that both numbers have bit #DBL_MANT_DIG-32-1 (hidden bit)
   1858 | set, even if they were denormalized to start with.
   1859 | Thus, the result will satisfy: 2 > result > 1/2.
   1860 	andl	d7,d4		| and isolate exponent in d4
   1861 	beq	Ldivdf$a$den	| if exponent is zero we have a denormalized
   1862 	andl	d6,d0		| and isolate fraction
   1863 	orl	IMM (0x00100000),d0 | and put hidden bit back
   1864 	swap	d4		| I like exponents in the first byte
   1865 #ifndef __mcoldfire__
   1866 	lsrw	IMM (4),d4	|
   1867 #else
   1868 	lsrl	IMM (4),d4	|
   1869 #endif
   1870 Ldivdf$1:			|
   1871 	andl	d7,d5		|
   1872 	beq	Ldivdf$b$den	|
   1873 	andl	d6,d2		|
   1874 	orl	IMM (0x00100000),d2
   1875 	swap	d5		|
   1876 #ifndef __mcoldfire__
   1877 	lsrw	IMM (4),d5	|
   1878 #else
   1879 	lsrl	IMM (4),d5	|
   1880 #endif
   1881 Ldivdf$2:			|
   1882 #ifndef __mcoldfire__
   1883 	subw	d5,d4		| subtract exponents
   1884 	addw	IMM (D_BIAS),d4	| and add bias
   1885 #else
   1886 	subl	d5,d4		| subtract exponents
   1887 	addl	IMM (D_BIAS),d4	| and add bias
   1888 #endif
   1889 
   1890 | We are now ready to do the division. We have prepared things in such a way
   1891 | that the ratio of the fractions will be less than 2 but greater than 1/2.
   1892 | At this point the registers in use are:
   1893 | d0-d1	hold a (first operand, bit DBL_MANT_DIG-32=0, bit
   1894 | DBL_MANT_DIG-1-32=1)
   1895 | d2-d3	hold b (second operand, bit DBL_MANT_DIG-32=1)
   1896 | d4	holds the difference of the exponents, corrected by the bias
   1897 | a0	holds the sign of the ratio
   1898 
   1899 | To do the rounding correctly we need to keep information about the
   1900 | nonsignificant bits. One way to do this would be to do the division
   1901 | using four registers; another is to use two registers (as originally
   1902 | I did), but use a sticky bit to preserve information about the
   1903 | fractional part. Note that we can keep that info in a1, which is not
   1904 | used.
   1905 	movel	IMM (0),d6	| d6-d7 will hold the result
   1906 	movel	d6,d7		|
   1907 	movel	IMM (0),a1	| and a1 will hold the sticky bit
   1908 
   1909 	movel	IMM (DBL_MANT_DIG-32+1),d5
   1910 
   1911 1:	cmpl	d0,d2		| is a < b?
   1912 	bhi	3f		| if b > a skip the following
   1913 	beq	4f		| if d0==d2 check d1 and d3
   1914 2:	subl	d3,d1		|
   1915 	subxl	d2,d0		| a <-- a - b
   1916 	bset	d5,d6		| set the corresponding bit in d6
   1917 3:	addl	d1,d1		| shift a by 1
   1918 	addxl	d0,d0		|
   1919 #ifndef __mcoldfire__
   1920 	dbra	d5,1b		| and branch back
   1921 #else
   1922 	subql	IMM (1), d5
   1923 	bpl	1b
   1924 #endif
   1925 	bra	5f
   1926 4:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1927 	bhi	3b		| if d1 > d2 skip the subtraction
   1928 	bra	2b		| else go do it
   1929 5:
   1930 | Here we have to start setting the bits in the second long.
   1931 	movel	IMM (31),d5	| again d5 is counter
   1932 
   1933 1:	cmpl	d0,d2		| is a < b?
   1934 	bhi	3f		| if b > a skip the following
   1935 	beq	4f		| if d0==d2 check d1 and d3
   1936 2:	subl	d3,d1		|
   1937 	subxl	d2,d0		| a <-- a - b
   1938 	bset	d5,d7		| set the corresponding bit in d7
   1939 3:	addl	d1,d1		| shift a by 1
   1940 	addxl	d0,d0		|
   1941 #ifndef __mcoldfire__
   1942 	dbra	d5,1b		| and branch back
   1943 #else
   1944 	subql	IMM (1), d5
   1945 	bpl	1b
   1946 #endif
   1947 	bra	5f
   1948 4:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1949 	bhi	3b		| if d1 > d2 skip the subtraction
   1950 	bra	2b		| else go do it
   1951 5:
   1952 | Now go ahead checking until we hit a one, which we store in d2.
   1953 	movel	IMM (DBL_MANT_DIG),d5
   1954 1:	cmpl	d2,d0		| is a < b?
   1955 	bhi	4f		| if b < a, exit
   1956 	beq	3f		| if d0==d2 check d1 and d3
   1957 2:	addl	d1,d1		| shift a by 1
   1958 	addxl	d0,d0		|
   1959 #ifndef __mcoldfire__
   1960 	dbra	d5,1b		| and branch back
   1961 #else
   1962 	subql	IMM (1), d5
   1963 	bpl	1b
   1964 #endif
   1965 	movel	IMM (0),d2	| here no sticky bit was found
   1966 	movel	d2,d3
   1967 	bra	5f
   1968 3:	cmpl	d1,d3		| here d0==d2, so check d1 and d3
   1969 	bhi	2b		| if d1 > d2 go back
   1970 4:
   1971 | Here put the sticky bit in d2-d3 (in the position which actually corresponds
   1972 | to it; if you don't do this the algorithm loses in some cases). '
   1973 	movel	IMM (0),d2
   1974 	movel	d2,d3
   1975 #ifndef __mcoldfire__
   1976 	subw	IMM (DBL_MANT_DIG),d5
   1977 	addw	IMM (63),d5
   1978 	cmpw	IMM (31),d5
   1979 #else
   1980 	subl	IMM (DBL_MANT_DIG),d5
   1981 	addl	IMM (63),d5
   1982 	cmpl	IMM (31),d5
   1983 #endif
   1984 	bhi	2f
   1985 1:	bset	d5,d3
   1986 	bra	5f
   1987 #ifndef __mcoldfire__
   1988 	subw	IMM (32),d5
   1989 #else
   1990 	subl	IMM (32),d5
   1991 #endif
   1992 2:	bset	d5,d2
   1993 5:
   1994 | Finally we are finished! Move the longs in the address registers to
   1995 | their final destination:
   1996 	movel	d6,d0
   1997 	movel	d7,d1
   1998 	movel	IMM (0),d3
   1999 
   2000 | Here we have finished the division, with the result in d0-d1-d2-d3, with
   2001 | 2^21 <= d6 < 2^23. Thus bit 23 is not set, but bit 22 could be set.
   2002 | If it is not, then definitely bit 21 is set. Normalize so bit 22 is
   2003 | not set:
   2004 	btst	IMM (DBL_MANT_DIG-32+1),d0
   2005 	beq	1f
   2006 #ifndef __mcoldfire__
   2007 	lsrl	IMM (1),d0
   2008 	roxrl	IMM (1),d1
   2009 	roxrl	IMM (1),d2
   2010 	roxrl	IMM (1),d3
   2011 	addw	IMM (1),d4
   2012 #else
   2013 	lsrl	IMM (1),d3
   2014 	btst	IMM (0),d2
   2015 	beq	10f
   2016 	bset	IMM (31),d3
   2017 10:	lsrl	IMM (1),d2
   2018 	btst	IMM (0),d1
   2019 	beq	11f
   2020 	bset	IMM (31),d2
   2021 11:	lsrl	IMM (1),d1
   2022 	btst	IMM (0),d0
   2023 	beq	12f
   2024 	bset	IMM (31),d1
   2025 12:	lsrl	IMM (1),d0
   2026 	addl	IMM (1),d4
   2027 #endif
   2028 1:
   2029 | Now round, check for over- and underflow, and exit.
   2030 	movel	a0,d7		| restore sign bit to d7
   2031 	moveq	IMM (DIVIDE),d5
   2032 	bra	Lround$exit
   2033 
   2034 Ldivdf$inop:
   2035 	moveq	IMM (DIVIDE),d5
   2036 	bra	Ld$inop
   2037 
   2038 Ldivdf$a$0:
   2039 | If a is zero check to see whether b is zero also. In that case return
   2040 | NaN; then check if b is NaN, and return NaN also in that case. Else
   2041 | return a properly signed zero.
   2042 	moveq	IMM (DIVIDE),d5
   2043 	bclr	IMM (31),d2	|
   2044 	movel	d2,d4		|
   2045 	orl	d3,d4		|
   2046 	beq	Ld$inop		| if b is also zero return NaN
   2047 	cmpl	IMM (0x7ff00000),d2 | check for NaN
   2048 	bhi	Ld$inop		|
   2049 	blt	1f		|
   2050 	tstl	d3		|
   2051 	bne	Ld$inop		|
   2052 1:	movel	a0,d0		| else return signed zero
   2053 	moveq	IMM(0),d1	|
   2054 	PICLEA	SYM (_fpCCR),a0	| clear exception flags
   2055 	movew	IMM (0),a0@	|
   2056 #ifndef __mcoldfire__
   2057 	moveml	sp@+,d2-d7	|
   2058 #else
   2059 	moveml	sp@,d2-d7	|
   2060 	| XXX if frame pointer is ever removed, stack pointer must
   2061 	| be adjusted here.
   2062 #endif
   2063 	unlk	a6		|
   2064 	rts			|
   2065 
   2066 Ldivdf$b$0:
   2067 	moveq	IMM (DIVIDE),d5
   2068 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
   2069 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
   2070 | cleared already.
   2071 	movel	a0,d7		| put a's sign bit back in d7 '
   2072 	cmpl	IMM (0x7ff00000),d0 | compare d0 with INFINITY
   2073 	bhi	Ld$inop		| if larger it is NaN
   2074 	tstl	d1		|
   2075 	bne	Ld$inop		|
   2076 	bra	Ld$div$0	| else signal DIVIDE_BY_ZERO
   2077 
   2078 Ldivdf$b$nf:
   2079 	moveq	IMM (DIVIDE),d5
   2080 | If d2 == 0x7ff00000 we have to check d3.
   2081 	tstl	d3		|
   2082 	bne	Ld$inop		| if d3 <> 0, b is NaN
   2083 	bra	Ld$underflow	| else b is +/-INFINITY, so signal underflow
   2084 
   2085 Ldivdf$a$nf:
   2086 	moveq	IMM (DIVIDE),d5
   2087 | If d0 == 0x7ff00000 we have to check d1.
   2088 	tstl	d1		|
   2089 	bne	Ld$inop		| if d1 <> 0, a is NaN
   2090 | If a is INFINITY we have to check b
   2091 	cmpl	d7,d2		| compare b with INFINITY
   2092 	bge	Ld$inop		| if b is NaN or INFINITY return NaN
   2093 	tstl	d3		|
   2094 	bne	Ld$inop		|
   2095 	bra	Ld$overflow	| else return overflow
   2096 
   2097 | If a number is denormalized we put an exponent of 1 but do not put the
   2098 | bit back into the fraction.
   2099 Ldivdf$a$den:
   2100 	movel	IMM (1),d4
   2101 	andl	d6,d0
   2102 1:	addl	d1,d1		| shift a left until bit 20 is set
   2103 	addxl	d0,d0
   2104 #ifndef __mcoldfire__
   2105 	subw	IMM (1),d4	| and adjust exponent
   2106 #else
   2107 	subl	IMM (1),d4	| and adjust exponent
   2108 #endif
   2109 	btst	IMM (DBL_MANT_DIG-32-1),d0
   2110 	bne	Ldivdf$1
   2111 	bra	1b
   2112 
   2113 Ldivdf$b$den:
   2114 	movel	IMM (1),d5
   2115 	andl	d6,d2
   2116 1:	addl	d3,d3		| shift b left until bit 20 is set
   2117 	addxl	d2,d2
   2118 #ifndef __mcoldfire__
   2119 	subw	IMM (1),d5	| and adjust exponent
   2120 #else
   2121 	subql	IMM (1),d5	| and adjust exponent
   2122 #endif
   2123 	btst	IMM (DBL_MANT_DIG-32-1),d2
   2124 	bne	Ldivdf$2
   2125 	bra	1b
   2126 
   2127 Lround$exit:
   2128 | This is a common exit point for __muldf3 and __divdf3. When they enter
   2129 | this point the sign of the result is in d7, the result in d0-d1, normalized
   2130 | so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
   2131 
   2132 | First check for underlow in the exponent:
   2133 #ifndef __mcoldfire__
   2134 	cmpw	IMM (-DBL_MANT_DIG-1),d4
   2135 #else
   2136 	cmpl	IMM (-DBL_MANT_DIG-1),d4
   2137 #endif
   2138 	blt	Ld$underflow
   2139 | It could happen that the exponent is less than 1, in which case the
   2140 | number is denormalized. In this case we shift right and adjust the
   2141 | exponent until it becomes 1 or the fraction is zero (in the latter case
   2142 | we signal underflow and return zero).
   2143 	movel	d7,a0		|
   2144 	movel	IMM (0),d6	| use d6-d7 to collect bits flushed right
   2145 	movel	d6,d7		| use d6-d7 to collect bits flushed right
   2146 #ifndef __mcoldfire__
   2147 	cmpw	IMM (1),d4	| if the exponent is less than 1 we
   2148 #else
   2149 	cmpl	IMM (1),d4	| if the exponent is less than 1 we
   2150 #endif
   2151 	bge	2f		| have to shift right (denormalize)
   2152 1:
   2153 #ifndef __mcoldfire__
   2154 	addw	IMM (1),d4	| adjust the exponent
   2155 	lsrl	IMM (1),d0	| shift right once
   2156 	roxrl	IMM (1),d1	|
   2157 	roxrl	IMM (1),d2	|
   2158 	roxrl	IMM (1),d3	|
   2159 	roxrl	IMM (1),d6	|
   2160 	roxrl	IMM (1),d7	|
   2161 	cmpw	IMM (1),d4	| is the exponent 1 already?
   2162 #else
   2163 	addl	IMM (1),d4	| adjust the exponent
   2164 	lsrl	IMM (1),d7
   2165 	btst	IMM (0),d6
   2166 	beq	13f
   2167 	bset	IMM (31),d7
   2168 13:	lsrl	IMM (1),d6
   2169 	btst	IMM (0),d3
   2170 	beq	14f
   2171 	bset	IMM (31),d6
   2172 14:	lsrl	IMM (1),d3
   2173 	btst	IMM (0),d2
   2174 	beq	10f
   2175 	bset	IMM (31),d3
   2176 10:	lsrl	IMM (1),d2
   2177 	btst	IMM (0),d1
   2178 	beq	11f
   2179 	bset	IMM (31),d2
   2180 11:	lsrl	IMM (1),d1
   2181 	btst	IMM (0),d0
   2182 	beq	12f
   2183 	bset	IMM (31),d1
   2184 12:	lsrl	IMM (1),d0
   2185 	cmpl	IMM (1),d4	| is the exponent 1 already?
   2186 #endif
   2187 	beq	2f		| if not loop back
   2188 	bra	1b              |
   2189 	bra	Ld$underflow	| safety check, shouldn't execute '
   2190 2:	orl	d6,d2		| this is a trick so we don't lose  '
   2191 	orl	d7,d3		| the bits which were flushed right
   2192 	movel	a0,d7		| get back sign bit into d7
   2193 | Now call the rounding routine (which takes care of denormalized numbers):
   2194 	lea	pc@(Lround$0),a0 | to return from rounding routine
   2195 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2196 #ifdef __mcoldfire__
   2197 	clrl	d6
   2198 #endif
   2199 	movew	a1@(6),d6	| rounding mode in d6
   2200 	beq	Lround$to$nearest
   2201 #ifndef __mcoldfire__
   2202 	cmpw	IMM (ROUND_TO_PLUS),d6
   2203 #else
   2204 	cmpl	IMM (ROUND_TO_PLUS),d6
   2205 #endif
   2206 	bhi	Lround$to$minus
   2207 	blt	Lround$to$zero
   2208 	bra	Lround$to$plus
   2209 Lround$0:
   2210 | Here we have a correctly rounded result (either normalized or denormalized).
   2211 
   2212 | Here we should have either a normalized number or a denormalized one, and
   2213 | the exponent is necessarily larger or equal to 1 (so we don't have to  '
   2214 | check again for underflow!). We have to check for overflow or for a
   2215 | denormalized number (which also signals underflow).
   2216 | Check for overflow (i.e., exponent >= 0x7ff).
   2217 #ifndef __mcoldfire__
   2218 	cmpw	IMM (0x07ff),d4
   2219 #else
   2220 	cmpl	IMM (0x07ff),d4
   2221 #endif
   2222 	bge	Ld$overflow
   2223 | Now check for a denormalized number (exponent==0):
   2224 	movew	d4,d4
   2225 	beq	Ld$den
   2226 1:
   2227 | Put back the exponents and sign and return.
   2228 #ifndef __mcoldfire__
   2229 	lslw	IMM (4),d4	| exponent back to fourth byte
   2230 #else
   2231 	lsll	IMM (4),d4	| exponent back to fourth byte
   2232 #endif
   2233 	bclr	IMM (DBL_MANT_DIG-32-1),d0
   2234 	swap	d0		| and put back exponent
   2235 #ifndef __mcoldfire__
   2236 	orw	d4,d0		|
   2237 #else
   2238 	orl	d4,d0		|
   2239 #endif
   2240 	swap	d0		|
   2241 	orl	d7,d0		| and sign also
   2242 
   2243 	PICLEA	SYM (_fpCCR),a0
   2244 	movew	IMM (0),a0@
   2245 #ifndef __mcoldfire__
   2246 	moveml	sp@+,d2-d7
   2247 #else
   2248 	moveml	sp@,d2-d7
   2249 	| XXX if frame pointer is ever removed, stack pointer must
   2250 	| be adjusted here.
   2251 #endif
   2252 	unlk	a6
   2253 	rts
   2254 
   2255 |=============================================================================
   2256 |                              __negdf2
   2257 |=============================================================================
   2258 
   2259 | double __negdf2(double, double);
   2260 	FUNC(__negdf2)
   2261 SYM (__negdf2):
   2262 #ifndef __mcoldfire__
   2263 	link	a6,IMM (0)
   2264 	moveml	d2-d7,sp@-
   2265 #else
   2266 	link	a6,IMM (-24)
   2267 	moveml	d2-d7,sp@
   2268 #endif
   2269 	moveq	IMM (NEGATE),d5
   2270 	movel	a6@(8),d0	| get number to negate in d0-d1
   2271 	movel	a6@(12),d1	|
   2272 	bchg	IMM (31),d0	| negate
   2273 	movel	d0,d2		| make a positive copy (for the tests)
   2274 	bclr	IMM (31),d2	|
   2275 	movel	d2,d4		| check for zero
   2276 	orl	d1,d4		|
   2277 	beq	2f		| if zero (either sign) return +zero
   2278 	cmpl	IMM (0x7ff00000),d2 | compare to +INFINITY
   2279 	blt	1f		| if finite, return
   2280 	bhi	Ld$inop		| if larger (fraction not zero) is NaN
   2281 	tstl	d1		| if d2 == 0x7ff00000 check d1
   2282 	bne	Ld$inop		|
   2283 	movel	d0,d7		| else get sign and return INFINITY
   2284 	andl	IMM (0x80000000),d7
   2285 	bra	Ld$infty
   2286 1:	PICLEA	SYM (_fpCCR),a0
   2287 	movew	IMM (0),a0@
   2288 #ifndef __mcoldfire__
   2289 	moveml	sp@+,d2-d7
   2290 #else
   2291 	moveml	sp@,d2-d7
   2292 	| XXX if frame pointer is ever removed, stack pointer must
   2293 	| be adjusted here.
   2294 #endif
   2295 	unlk	a6
   2296 	rts
   2297 2:	bclr	IMM (31),d0
   2298 	bra	1b
   2299 
   2300 |=============================================================================
   2301 |                              __cmpdf2
   2302 |=============================================================================
   2303 
   2304 GREATER =  1
   2305 LESS    = -1
   2306 EQUAL   =  0
   2307 
   2308 | int __cmpdf2_internal(double, double, int);
   2309 SYM (__cmpdf2_internal):
   2310 #ifndef __mcoldfire__
   2311 	link	a6,IMM (0)
   2312 	moveml	d2-d7,sp@- 	| save registers
   2313 #else
   2314 	link	a6,IMM (-24)
   2315 	moveml	d2-d7,sp@
   2316 #endif
   2317 	moveq	IMM (COMPARE),d5
   2318 	movel	a6@(8),d0	| get first operand
   2319 	movel	a6@(12),d1	|
   2320 	movel	a6@(16),d2	| get second operand
   2321 	movel	a6@(20),d3	|
   2322 | First check if a and/or b are (+/-) zero and in that case clear
   2323 | the sign bit.
   2324 	movel	d0,d6		| copy signs into d6 (a) and d7(b)
   2325 	bclr	IMM (31),d0	| and clear signs in d0 and d2
   2326 	movel	d2,d7		|
   2327 	bclr	IMM (31),d2	|
   2328 	cmpl	IMM (0x7ff00000),d0 | check for a == NaN
   2329 	bhi	Lcmpd$inop		| if d0 > 0x7ff00000, a is NaN
   2330 	beq	Lcmpdf$a$nf	| if equal can be INFINITY, so check d1
   2331 	movel	d0,d4		| copy into d4 to test for zero
   2332 	orl	d1,d4		|
   2333 	beq	Lcmpdf$a$0	|
   2334 Lcmpdf$0:
   2335 	cmpl	IMM (0x7ff00000),d2 | check for b == NaN
   2336 	bhi	Lcmpd$inop		| if d2 > 0x7ff00000, b is NaN
   2337 	beq	Lcmpdf$b$nf	| if equal can be INFINITY, so check d3
   2338 	movel	d2,d4		|
   2339 	orl	d3,d4		|
   2340 	beq	Lcmpdf$b$0	|
   2341 Lcmpdf$1:
   2342 | Check the signs
   2343 	eorl	d6,d7
   2344 	bpl	1f
   2345 | If the signs are not equal check if a >= 0
   2346 	tstl	d6
   2347 	bpl	Lcmpdf$a$gt$b	| if (a >= 0 && b < 0) => a > b
   2348 	bmi	Lcmpdf$b$gt$a	| if (a < 0 && b >= 0) => a < b
   2349 1:
   2350 | If the signs are equal check for < 0
   2351 	tstl	d6
   2352 	bpl	1f
   2353 | If both are negative exchange them
   2354 #ifndef __mcoldfire__
   2355 	exg	d0,d2
   2356 	exg	d1,d3
   2357 #else
   2358 	movel	d0,d7
   2359 	movel	d2,d0
   2360 	movel	d7,d2
   2361 	movel	d1,d7
   2362 	movel	d3,d1
   2363 	movel	d7,d3
   2364 #endif
   2365 1:
   2366 | Now that they are positive we just compare them as longs (does this also
   2367 | work for denormalized numbers?).
   2368 	cmpl	d0,d2
   2369 	bhi	Lcmpdf$b$gt$a	| |b| > |a|
   2370 	bne	Lcmpdf$a$gt$b	| |b| < |a|
   2371 | If we got here d0 == d2, so we compare d1 and d3.
   2372 	cmpl	d1,d3
   2373 	bhi	Lcmpdf$b$gt$a	| |b| > |a|
   2374 	bne	Lcmpdf$a$gt$b	| |b| < |a|
   2375 | If we got here a == b.
   2376 	movel	IMM (EQUAL),d0
   2377 #ifndef __mcoldfire__
   2378 	moveml	sp@+,d2-d7 	| put back the registers
   2379 #else
   2380 	moveml	sp@,d2-d7
   2381 	| XXX if frame pointer is ever removed, stack pointer must
   2382 	| be adjusted here.
   2383 #endif
   2384 	unlk	a6
   2385 	rts
   2386 Lcmpdf$a$gt$b:
   2387 	movel	IMM (GREATER),d0
   2388 #ifndef __mcoldfire__
   2389 	moveml	sp@+,d2-d7 	| put back the registers
   2390 #else
   2391 	moveml	sp@,d2-d7
   2392 	| XXX if frame pointer is ever removed, stack pointer must
   2393 	| be adjusted here.
   2394 #endif
   2395 	unlk	a6
   2396 	rts
   2397 Lcmpdf$b$gt$a:
   2398 	movel	IMM (LESS),d0
   2399 #ifndef __mcoldfire__
   2400 	moveml	sp@+,d2-d7 	| put back the registers
   2401 #else
   2402 	moveml	sp@,d2-d7
   2403 	| XXX if frame pointer is ever removed, stack pointer must
   2404 	| be adjusted here.
   2405 #endif
   2406 	unlk	a6
   2407 	rts
   2408 
   2409 Lcmpdf$a$0:
   2410 	bclr	IMM (31),d6
   2411 	bra	Lcmpdf$0
   2412 Lcmpdf$b$0:
   2413 	bclr	IMM (31),d7
   2414 	bra	Lcmpdf$1
   2415 
   2416 Lcmpdf$a$nf:
   2417 	tstl	d1
   2418 	bne	Ld$inop
   2419 	bra	Lcmpdf$0
   2420 
   2421 Lcmpdf$b$nf:
   2422 	tstl	d3
   2423 	bne	Ld$inop
   2424 	bra	Lcmpdf$1
   2425 
   2426 Lcmpd$inop:
   2427 	movl	a6@(24),d0
   2428 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   2429 	moveq	IMM (DOUBLE_FLOAT),d6
   2430 	PICJUMP	$_exception_handler
   2431 
   2432 | int __cmpdf2(double, double);
   2433 	FUNC(__cmpdf2)
   2434 SYM (__cmpdf2):
   2435 	link	a6,IMM (0)
   2436 	pea	1
   2437 	movl	a6@(20),sp@-
   2438 	movl	a6@(16),sp@-
   2439 	movl	a6@(12),sp@-
   2440 	movl	a6@(8),sp@-
   2441 	PICCALL	SYM (__cmpdf2_internal)
   2442 	unlk	a6
   2443 	rts
   2444 
   2445 |=============================================================================
   2446 |                           rounding routines
   2447 |=============================================================================
   2448 
   2449 | The rounding routines expect the number to be normalized in registers
   2450 | d0-d1-d2-d3, with the exponent in register d4. They assume that the
   2451 | exponent is larger or equal to 1. They return a properly normalized number
   2452 | if possible, and a denormalized number otherwise. The exponent is returned
   2453 | in d4.
   2454 
   2455 Lround$to$nearest:
   2456 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
   2457 | Here we assume that the exponent is not too small (this should be checked
   2458 | before entering the rounding routine), but the number could be denormalized.
   2459 
   2460 | Check for denormalized numbers:
   2461 1:	btst	IMM (DBL_MANT_DIG-32),d0
   2462 	bne	2f		| if set the number is normalized
   2463 | Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
   2464 | is one (remember that a denormalized number corresponds to an
   2465 | exponent of -D_BIAS+1).
   2466 #ifndef __mcoldfire__
   2467 	cmpw	IMM (1),d4	| remember that the exponent is at least one
   2468 #else
   2469 	cmpl	IMM (1),d4	| remember that the exponent is at least one
   2470 #endif
   2471  	beq	2f		| an exponent of one means denormalized
   2472 	addl	d3,d3		| else shift and adjust the exponent
   2473 	addxl	d2,d2		|
   2474 	addxl	d1,d1		|
   2475 	addxl	d0,d0		|
   2476 #ifndef __mcoldfire__
   2477 	dbra	d4,1b		|
   2478 #else
   2479 	subql	IMM (1), d4
   2480 	bpl	1b
   2481 #endif
   2482 2:
   2483 | Now round: we do it as follows: after the shifting we can write the
   2484 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
   2485 | If delta < 1, do nothing. If delta > 1, add 1 to f.
   2486 | If delta == 1, we make sure the rounded number will be even (odd?)
   2487 | (after shifting).
   2488 	btst	IMM (0),d1	| is delta < 1?
   2489 	beq	2f		| if so, do not do anything
   2490 	orl	d2,d3		| is delta == 1?
   2491 	bne	1f		| if so round to even
   2492 	movel	d1,d3		|
   2493 	andl	IMM (2),d3	| bit 1 is the last significant bit
   2494 	movel	IMM (0),d2	|
   2495 	addl	d3,d1		|
   2496 	addxl	d2,d0		|
   2497 	bra	2f		|
   2498 1:	movel	IMM (1),d3	| else add 1
   2499 	movel	IMM (0),d2	|
   2500 	addl	d3,d1		|
   2501 	addxl	d2,d0
   2502 | Shift right once (because we used bit #DBL_MANT_DIG-32!).
   2503 2:
   2504 #ifndef __mcoldfire__
   2505 	lsrl	IMM (1),d0
   2506 	roxrl	IMM (1),d1
   2507 #else
   2508 	lsrl	IMM (1),d1
   2509 	btst	IMM (0),d0
   2510 	beq	10f
   2511 	bset	IMM (31),d1
   2512 10:	lsrl	IMM (1),d0
   2513 #endif
   2514 
   2515 | Now check again bit #DBL_MANT_DIG-32 (rounding could have produced a
   2516 | 'fraction overflow' ...).
   2517 	btst	IMM (DBL_MANT_DIG-32),d0
   2518 	beq	1f
   2519 #ifndef __mcoldfire__
   2520 	lsrl	IMM (1),d0
   2521 	roxrl	IMM (1),d1
   2522 	addw	IMM (1),d4
   2523 #else
   2524 	lsrl	IMM (1),d1
   2525 	btst	IMM (0),d0
   2526 	beq	10f
   2527 	bset	IMM (31),d1
   2528 10:	lsrl	IMM (1),d0
   2529 	addl	IMM (1),d4
   2530 #endif
   2531 1:
   2532 | If bit #DBL_MANT_DIG-32-1 is clear we have a denormalized number, so we
   2533 | have to put the exponent to zero and return a denormalized number.
   2534 	btst	IMM (DBL_MANT_DIG-32-1),d0
   2535 	beq	1f
   2536 	jmp	a0@
   2537 1:	movel	IMM (0),d4
   2538 	jmp	a0@
   2539 
   2540 Lround$to$zero:
   2541 Lround$to$plus:
   2542 Lround$to$minus:
   2543 	jmp	a0@
   2544 #endif /* L_double */
   2545 
   2546 #ifdef  L_float
   2547 
   2548 	.globl	SYM (_fpCCR)
   2549 	.globl  $_exception_handler
   2550 
   2551 QUIET_NaN    = 0xffffffff
   2552 SIGNL_NaN    = 0x7f800001
   2553 INFINITY     = 0x7f800000
   2554 
   2555 F_MAX_EXP      = 0xff
   2556 F_BIAS         = 126
   2557 FLT_MAX_EXP    = F_MAX_EXP - F_BIAS
   2558 FLT_MIN_EXP    = 1 - F_BIAS
   2559 FLT_MANT_DIG   = 24
   2560 
   2561 INEXACT_RESULT 		= 0x0001
   2562 UNDERFLOW 		= 0x0002
   2563 OVERFLOW 		= 0x0004
   2564 DIVIDE_BY_ZERO 		= 0x0008
   2565 INVALID_OPERATION 	= 0x0010
   2566 
   2567 SINGLE_FLOAT = 1
   2568 
   2569 NOOP         = 0
   2570 ADD          = 1
   2571 MULTIPLY     = 2
   2572 DIVIDE       = 3
   2573 NEGATE       = 4
   2574 COMPARE      = 5
   2575 EXTENDSFDF   = 6
   2576 TRUNCDFSF    = 7
   2577 
   2578 UNKNOWN           = -1
   2579 ROUND_TO_NEAREST  = 0 | round result to nearest representable value
   2580 ROUND_TO_ZERO     = 1 | round result towards zero
   2581 ROUND_TO_PLUS     = 2 | round result towards plus infinity
   2582 ROUND_TO_MINUS    = 3 | round result towards minus infinity
   2583 
   2584 | Entry points:
   2585 
   2586 	.globl SYM (__addsf3)
   2587 	.globl SYM (__subsf3)
   2588 	.globl SYM (__mulsf3)
   2589 	.globl SYM (__divsf3)
   2590 	.globl SYM (__negsf2)
   2591 	.globl SYM (__cmpsf2)
   2592 	.globl SYM (__cmpsf2_internal)
   2593 	.hidden SYM (__cmpsf2_internal)
   2594 
   2595 | These are common routines to return and signal exceptions.
   2596 
   2597 	.text
   2598 	.even
   2599 
   2600 Lf$den:
   2601 | Return and signal a denormalized number
   2602 	orl	d7,d0
   2603 	moveq	IMM (INEXACT_RESULT+UNDERFLOW),d7
   2604 	moveq	IMM (SINGLE_FLOAT),d6
   2605 	PICJUMP	$_exception_handler
   2606 
   2607 Lf$infty:
   2608 Lf$overflow:
   2609 | Return a properly signed INFINITY and set the exception flags
   2610 	movel	IMM (INFINITY),d0
   2611 	orl	d7,d0
   2612 	moveq	IMM (INEXACT_RESULT+OVERFLOW),d7
   2613 	moveq	IMM (SINGLE_FLOAT),d6
   2614 	PICJUMP	$_exception_handler
   2615 
   2616 Lf$underflow:
   2617 | Return 0 and set the exception flags
   2618 	moveq	IMM (0),d0
   2619 	moveq	IMM (INEXACT_RESULT+UNDERFLOW),d7
   2620 	moveq	IMM (SINGLE_FLOAT),d6
   2621 	PICJUMP	$_exception_handler
   2622 
   2623 Lf$inop:
   2624 | Return a quiet NaN and set the exception flags
   2625 	movel	IMM (QUIET_NaN),d0
   2626 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   2627 	moveq	IMM (SINGLE_FLOAT),d6
   2628 	PICJUMP	$_exception_handler
   2629 
   2630 Lf$div$0:
   2631 | Return a properly signed INFINITY and set the exception flags
   2632 	movel	IMM (INFINITY),d0
   2633 	orl	d7,d0
   2634 	moveq	IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
   2635 	moveq	IMM (SINGLE_FLOAT),d6
   2636 	PICJUMP	$_exception_handler
   2637 
   2638 |=============================================================================
   2639 |=============================================================================
   2640 |                         single precision routines
   2641 |=============================================================================
   2642 |=============================================================================
   2643 
   2644 | A single precision floating point number (float) has the format:
   2645 |
   2646 | struct _float {
   2647 |  unsigned int sign      : 1;  /* sign bit */
   2648 |  unsigned int exponent  : 8;  /* exponent, shifted by 126 */
   2649 |  unsigned int fraction  : 23; /* fraction */
   2650 | } float;
   2651 |
   2652 | Thus sizeof(float) = 4 (32 bits).
   2653 |
   2654 | All the routines are callable from C programs, and return the result
   2655 | in the single register d0. They also preserve all registers except
   2656 | d0-d1 and a0-a1.
   2657 
   2658 |=============================================================================
   2659 |                              __subsf3
   2660 |=============================================================================
   2661 
   2662 | float __subsf3(float, float);
   2663 	FUNC(__subsf3)
   2664 SYM (__subsf3):
   2665 	bchg	IMM (31),sp@(8)	| change sign of second operand
   2666 				| and fall through
   2667 |=============================================================================
   2668 |                              __addsf3
   2669 |=============================================================================
   2670 
   2671 | float __addsf3(float, float);
   2672 	FUNC(__addsf3)
   2673 SYM (__addsf3):
   2674 #ifndef __mcoldfire__
   2675 	link	a6,IMM (0)	| everything will be done in registers
   2676 	moveml	d2-d7,sp@-	| save all data registers but d0-d1
   2677 #else
   2678 	link	a6,IMM (-24)
   2679 	moveml	d2-d7,sp@
   2680 #endif
   2681 	movel	a6@(8),d0	| get first operand
   2682 	movel	a6@(12),d1	| get second operand
   2683 	movel	d0,a0		| get d0's sign bit '
   2684 	addl	d0,d0		| check and clear sign bit of a
   2685 	beq	Laddsf$b	| if zero return second operand
   2686 	movel	d1,a1		| save b's sign bit '
   2687 	addl	d1,d1		| get rid of sign bit
   2688 	beq	Laddsf$a	| if zero return first operand
   2689 
   2690 | Get the exponents and check for denormalized and/or infinity.
   2691 
   2692 	movel	IMM (0x00ffffff),d4	| mask to get fraction
   2693 	movel	IMM (0x01000000),d5	| mask to put hidden bit back
   2694 
   2695 	movel	d0,d6		| save a to get exponent
   2696 	andl	d4,d0		| get fraction in d0
   2697 	notl 	d4		| make d4 into a mask for the exponent
   2698 	andl	d4,d6		| get exponent in d6
   2699 	beq	Laddsf$a$den	| branch if a is denormalized
   2700 	cmpl	d4,d6		| check for INFINITY or NaN
   2701 	beq	Laddsf$nf
   2702 	swap	d6		| put exponent into first word
   2703 	orl	d5,d0		| and put hidden bit back
   2704 Laddsf$1:
   2705 | Now we have a's exponent in d6 (second byte) and the mantissa in d0. '
   2706 	movel	d1,d7		| get exponent in d7
   2707 	andl	d4,d7		|
   2708 	beq	Laddsf$b$den	| branch if b is denormalized
   2709 	cmpl	d4,d7		| check for INFINITY or NaN
   2710 	beq	Laddsf$nf
   2711 	swap	d7		| put exponent into first word
   2712 	notl 	d4		| make d4 into a mask for the fraction
   2713 	andl	d4,d1		| get fraction in d1
   2714 	orl	d5,d1		| and put hidden bit back
   2715 Laddsf$2:
   2716 | Now we have b's exponent in d7 (second byte) and the mantissa in d1. '
   2717 
   2718 | Note that the hidden bit corresponds to bit #FLT_MANT_DIG-1, and we
   2719 | shifted right once, so bit #FLT_MANT_DIG is set (so we have one extra
   2720 | bit).
   2721 
   2722 	movel	d1,d2		| move b to d2, since we want to use
   2723 				| two registers to do the sum
   2724 	movel	IMM (0),d1	| and clear the new ones
   2725 	movel	d1,d3		|
   2726 
   2727 | Here we shift the numbers in registers d0 and d1 so the exponents are the
   2728 | same, and put the largest exponent in d6. Note that we are using two
   2729 | registers for each number (see the discussion by D. Knuth in "Seminumerical
   2730 | Algorithms").
   2731 #ifndef __mcoldfire__
   2732 	cmpw	d6,d7		| compare exponents
   2733 #else
   2734 	cmpl	d6,d7		| compare exponents
   2735 #endif
   2736 	beq	Laddsf$3	| if equal don't shift '
   2737 	bhi	5f		| branch if second exponent largest
   2738 1:
   2739 	subl	d6,d7		| keep the largest exponent
   2740 	negl	d7
   2741 #ifndef __mcoldfire__
   2742 	lsrw	IMM (8),d7	| put difference in lower byte
   2743 #else
   2744 	lsrl	IMM (8),d7	| put difference in lower byte
   2745 #endif
   2746 | if difference is too large we don't shift (actually, we can just exit) '
   2747 #ifndef __mcoldfire__
   2748 	cmpw	IMM (FLT_MANT_DIG+2),d7
   2749 #else
   2750 	cmpl	IMM (FLT_MANT_DIG+2),d7
   2751 #endif
   2752 	bge	Laddsf$b$small
   2753 #ifndef __mcoldfire__
   2754 	cmpw	IMM (16),d7	| if difference >= 16 swap
   2755 #else
   2756 	cmpl	IMM (16),d7	| if difference >= 16 swap
   2757 #endif
   2758 	bge	4f
   2759 2:
   2760 #ifndef __mcoldfire__
   2761 	subw	IMM (1),d7
   2762 #else
   2763 	subql	IMM (1), d7
   2764 #endif
   2765 3:
   2766 #ifndef __mcoldfire__
   2767 	lsrl	IMM (1),d2	| shift right second operand
   2768 	roxrl	IMM (1),d3
   2769 	dbra	d7,3b
   2770 #else
   2771 	lsrl	IMM (1),d3
   2772 	btst	IMM (0),d2
   2773 	beq	10f
   2774 	bset	IMM (31),d3
   2775 10:	lsrl	IMM (1),d2
   2776 	subql	IMM (1), d7
   2777 	bpl	3b
   2778 #endif
   2779 	bra	Laddsf$3
   2780 4:
   2781 	movew	d2,d3
   2782 	swap	d3
   2783 	movew	d3,d2
   2784 	swap	d2
   2785 #ifndef __mcoldfire__
   2786 	subw	IMM (16),d7
   2787 #else
   2788 	subl	IMM (16),d7
   2789 #endif
   2790 	bne	2b		| if still more bits, go back to normal case
   2791 	bra	Laddsf$3
   2792 5:
   2793 #ifndef __mcoldfire__
   2794 	exg	d6,d7		| exchange the exponents
   2795 #else
   2796 	eorl	d6,d7
   2797 	eorl	d7,d6
   2798 	eorl	d6,d7
   2799 #endif
   2800 	subl	d6,d7		| keep the largest exponent
   2801 	negl	d7		|
   2802 #ifndef __mcoldfire__
   2803 	lsrw	IMM (8),d7	| put difference in lower byte
   2804 #else
   2805 	lsrl	IMM (8),d7	| put difference in lower byte
   2806 #endif
   2807 | if difference is too large we don't shift (and exit!) '
   2808 #ifndef __mcoldfire__
   2809 	cmpw	IMM (FLT_MANT_DIG+2),d7
   2810 #else
   2811 	cmpl	IMM (FLT_MANT_DIG+2),d7
   2812 #endif
   2813 	bge	Laddsf$a$small
   2814 #ifndef __mcoldfire__
   2815 	cmpw	IMM (16),d7	| if difference >= 16 swap
   2816 #else
   2817 	cmpl	IMM (16),d7	| if difference >= 16 swap
   2818 #endif
   2819 	bge	8f
   2820 6:
   2821 #ifndef __mcoldfire__
   2822 	subw	IMM (1),d7
   2823 #else
   2824 	subl	IMM (1),d7
   2825 #endif
   2826 7:
   2827 #ifndef __mcoldfire__
   2828 	lsrl	IMM (1),d0	| shift right first operand
   2829 	roxrl	IMM (1),d1
   2830 	dbra	d7,7b
   2831 #else
   2832 	lsrl	IMM (1),d1
   2833 	btst	IMM (0),d0
   2834 	beq	10f
   2835 	bset	IMM (31),d1
   2836 10:	lsrl	IMM (1),d0
   2837 	subql	IMM (1),d7
   2838 	bpl	7b
   2839 #endif
   2840 	bra	Laddsf$3
   2841 8:
   2842 	movew	d0,d1
   2843 	swap	d1
   2844 	movew	d1,d0
   2845 	swap	d0
   2846 #ifndef __mcoldfire__
   2847 	subw	IMM (16),d7
   2848 #else
   2849 	subl	IMM (16),d7
   2850 #endif
   2851 	bne	6b		| if still more bits, go back to normal case
   2852 				| otherwise we fall through
   2853 
   2854 | Now we have a in d0-d1, b in d2-d3, and the largest exponent in d6 (the
   2855 | signs are stored in a0 and a1).
   2856 
   2857 Laddsf$3:
   2858 | Here we have to decide whether to add or subtract the numbers
   2859 #ifndef __mcoldfire__
   2860 	exg	d6,a0		| get signs back
   2861 	exg	d7,a1		| and save the exponents
   2862 #else
   2863 	movel	d6,d4
   2864 	movel	a0,d6
   2865 	movel	d4,a0
   2866 	movel	d7,d4
   2867 	movel	a1,d7
   2868 	movel	d4,a1
   2869 #endif
   2870 	eorl	d6,d7		| combine sign bits
   2871 	bmi	Lsubsf$0	| if negative a and b have opposite
   2872 				| sign so we actually subtract the
   2873 				| numbers
   2874 
   2875 | Here we have both positive or both negative
   2876 #ifndef __mcoldfire__
   2877 	exg	d6,a0		| now we have the exponent in d6
   2878 #else
   2879 	movel	d6,d4
   2880 	movel	a0,d6
   2881 	movel	d4,a0
   2882 #endif
   2883 	movel	a0,d7		| and sign in d7
   2884 	andl	IMM (0x80000000),d7
   2885 | Here we do the addition.
   2886 	addl	d3,d1
   2887 	addxl	d2,d0
   2888 | Note: now we have d2, d3, d4 and d5 to play with!
   2889 
   2890 | Put the exponent, in the first byte, in d2, to use the "standard" rounding
   2891 | routines:
   2892 	movel	d6,d2
   2893 #ifndef __mcoldfire__
   2894 	lsrw	IMM (8),d2
   2895 #else
   2896 	lsrl	IMM (8),d2
   2897 #endif
   2898 
   2899 | Before rounding normalize so bit #FLT_MANT_DIG is set (we will consider
   2900 | the case of denormalized numbers in the rounding routine itself).
   2901 | As in the addition (not in the subtraction!) we could have set
   2902 | one more bit we check this:
   2903 	btst	IMM (FLT_MANT_DIG+1),d0
   2904 	beq	1f
   2905 #ifndef __mcoldfire__
   2906 	lsrl	IMM (1),d0
   2907 	roxrl	IMM (1),d1
   2908 #else
   2909 	lsrl	IMM (1),d1
   2910 	btst	IMM (0),d0
   2911 	beq	10f
   2912 	bset	IMM (31),d1
   2913 10:	lsrl	IMM (1),d0
   2914 #endif
   2915 	addl	IMM (1),d2
   2916 1:
   2917 	lea	pc@(Laddsf$4),a0 | to return from rounding routine
   2918 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2919 #ifdef __mcoldfire__
   2920 	clrl	d6
   2921 #endif
   2922 	movew	a1@(6),d6	| rounding mode in d6
   2923 	beq	Lround$to$nearest
   2924 #ifndef __mcoldfire__
   2925 	cmpw	IMM (ROUND_TO_PLUS),d6
   2926 #else
   2927 	cmpl	IMM (ROUND_TO_PLUS),d6
   2928 #endif
   2929 	bhi	Lround$to$minus
   2930 	blt	Lround$to$zero
   2931 	bra	Lround$to$plus
   2932 Laddsf$4:
   2933 | Put back the exponent, but check for overflow.
   2934 #ifndef __mcoldfire__
   2935 	cmpw	IMM (0xff),d2
   2936 #else
   2937 	cmpl	IMM (0xff),d2
   2938 #endif
   2939 	bhi	1f
   2940 	bclr	IMM (FLT_MANT_DIG-1),d0
   2941 #ifndef __mcoldfire__
   2942 	lslw	IMM (7),d2
   2943 #else
   2944 	lsll	IMM (7),d2
   2945 #endif
   2946 	swap	d2
   2947 	orl	d2,d0
   2948 	bra	Laddsf$ret
   2949 1:
   2950 	moveq	IMM (ADD),d5
   2951 	bra	Lf$overflow
   2952 
   2953 Lsubsf$0:
   2954 | We are here if a > 0 and b < 0 (sign bits cleared).
   2955 | Here we do the subtraction.
   2956 	movel	d6,d7		| put sign in d7
   2957 	andl	IMM (0x80000000),d7
   2958 
   2959 	subl	d3,d1		| result in d0-d1
   2960 	subxl	d2,d0		|
   2961 	beq	Laddsf$ret	| if zero just exit
   2962 	bpl	1f		| if positive skip the following
   2963 	bchg	IMM (31),d7	| change sign bit in d7
   2964 	negl	d1
   2965 	negxl	d0
   2966 1:
   2967 #ifndef __mcoldfire__
   2968 	exg	d2,a0		| now we have the exponent in d2
   2969 	lsrw	IMM (8),d2	| put it in the first byte
   2970 #else
   2971 	movel	d2,d4
   2972 	movel	a0,d2
   2973 	movel	d4,a0
   2974 	lsrl	IMM (8),d2	| put it in the first byte
   2975 #endif
   2976 
   2977 | Now d0-d1 is positive and the sign bit is in d7.
   2978 
   2979 | Note that we do not have to normalize, since in the subtraction bit
   2980 | #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
   2981 | the rounding routines themselves.
   2982 	lea	pc@(Lsubsf$1),a0 | to return from rounding routine
   2983 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   2984 #ifdef __mcoldfire__
   2985 	clrl	d6
   2986 #endif
   2987 	movew	a1@(6),d6	| rounding mode in d6
   2988 	beq	Lround$to$nearest
   2989 #ifndef __mcoldfire__
   2990 	cmpw	IMM (ROUND_TO_PLUS),d6
   2991 #else
   2992 	cmpl	IMM (ROUND_TO_PLUS),d6
   2993 #endif
   2994 	bhi	Lround$to$minus
   2995 	blt	Lround$to$zero
   2996 	bra	Lround$to$plus
   2997 Lsubsf$1:
   2998 | Put back the exponent (we can't have overflow!). '
   2999 	bclr	IMM (FLT_MANT_DIG-1),d0
   3000 #ifndef __mcoldfire__
   3001 	lslw	IMM (7),d2
   3002 #else
   3003 	lsll	IMM (7),d2
   3004 #endif
   3005 	swap	d2
   3006 	orl	d2,d0
   3007 	bra	Laddsf$ret
   3008 
   3009 | If one of the numbers was too small (difference of exponents >=
   3010 | FLT_MANT_DIG+2) we return the other (and now we don't have to '
   3011 | check for finiteness or zero).
   3012 Laddsf$a$small:
   3013 	movel	a6@(12),d0
   3014 	PICLEA	SYM (_fpCCR),a0
   3015 	movew	IMM (0),a0@
   3016 #ifndef __mcoldfire__
   3017 	moveml	sp@+,d2-d7	| restore data registers
   3018 #else
   3019 	moveml	sp@,d2-d7
   3020 	| XXX if frame pointer is ever removed, stack pointer must
   3021 	| be adjusted here.
   3022 #endif
   3023 	unlk	a6		| and return
   3024 	rts
   3025 
   3026 Laddsf$b$small:
   3027 	movel	a6@(8),d0
   3028 	PICLEA	SYM (_fpCCR),a0
   3029 	movew	IMM (0),a0@
   3030 #ifndef __mcoldfire__
   3031 	moveml	sp@+,d2-d7	| restore data registers
   3032 #else
   3033 	moveml	sp@,d2-d7
   3034 	| XXX if frame pointer is ever removed, stack pointer must
   3035 	| be adjusted here.
   3036 #endif
   3037 	unlk	a6		| and return
   3038 	rts
   3039 
   3040 | If the numbers are denormalized remember to put exponent equal to 1.
   3041 
   3042 Laddsf$a$den:
   3043 	movel	d5,d6		| d5 contains 0x01000000
   3044 	swap	d6
   3045 	bra	Laddsf$1
   3046 
   3047 Laddsf$b$den:
   3048 	movel	d5,d7
   3049 	swap	d7
   3050 	notl 	d4		| make d4 into a mask for the fraction
   3051 				| (this was not executed after the jump)
   3052 	bra	Laddsf$2
   3053 
   3054 | The rest is mainly code for the different results which can be
   3055 | returned (checking always for +/-INFINITY and NaN).
   3056 
   3057 Laddsf$b:
   3058 | Return b (if a is zero).
   3059 	movel	a6@(12),d0
   3060 	cmpl	IMM (0x80000000),d0	| Check if b is -0
   3061 	bne	1f
   3062 	movel	a0,d7
   3063 	andl	IMM (0x80000000),d7	| Use the sign of a
   3064 	clrl	d0
   3065 	bra	Laddsf$ret
   3066 Laddsf$a:
   3067 | Return a (if b is zero).
   3068 	movel	a6@(8),d0
   3069 1:
   3070 	moveq	IMM (ADD),d5
   3071 | We have to check for NaN and +/-infty.
   3072 	movel	d0,d7
   3073 	andl	IMM (0x80000000),d7	| put sign in d7
   3074 	bclr	IMM (31),d0		| clear sign
   3075 	cmpl	IMM (INFINITY),d0	| check for infty or NaN
   3076 	bge	2f
   3077 	movel	d0,d0		| check for zero (we do this because we don't '
   3078 	bne	Laddsf$ret	| want to return -0 by mistake
   3079 	bclr	IMM (31),d7	| if zero be sure to clear sign
   3080 	bra	Laddsf$ret	| if everything OK just return
   3081 2:
   3082 | The value to be returned is either +/-infty or NaN
   3083 	andl	IMM (0x007fffff),d0	| check for NaN
   3084 	bne	Lf$inop			| if mantissa not zero is NaN
   3085 	bra	Lf$infty
   3086 
   3087 Laddsf$ret:
   3088 | Normal exit (a and b nonzero, result is not NaN nor +/-infty).
   3089 | We have to clear the exception flags (just the exception type).
   3090 	PICLEA	SYM (_fpCCR),a0
   3091 	movew	IMM (0),a0@
   3092 	orl	d7,d0		| put sign bit
   3093 #ifndef __mcoldfire__
   3094 	moveml	sp@+,d2-d7	| restore data registers
   3095 #else
   3096 	moveml	sp@,d2-d7
   3097 	| XXX if frame pointer is ever removed, stack pointer must
   3098 	| be adjusted here.
   3099 #endif
   3100 	unlk	a6		| and return
   3101 	rts
   3102 
   3103 Laddsf$ret$den:
   3104 | Return a denormalized number (for addition we don't signal underflow) '
   3105 	lsrl	IMM (1),d0	| remember to shift right back once
   3106 	bra	Laddsf$ret	| and return
   3107 
   3108 | Note: when adding two floats of the same sign if either one is
   3109 | NaN we return NaN without regard to whether the other is finite or
   3110 | not. When subtracting them (i.e., when adding two numbers of
   3111 | opposite signs) things are more complicated: if both are INFINITY
   3112 | we return NaN, if only one is INFINITY and the other is NaN we return
   3113 | NaN, but if it is finite we return INFINITY with the corresponding sign.
   3114 
   3115 Laddsf$nf:
   3116 	moveq	IMM (ADD),d5
   3117 | This could be faster but it is not worth the effort, since it is not
   3118 | executed very often. We sacrifice speed for clarity here.
   3119 	movel	a6@(8),d0	| get the numbers back (remember that we
   3120 	movel	a6@(12),d1	| did some processing already)
   3121 	movel	IMM (INFINITY),d4 | useful constant (INFINITY)
   3122 	movel	d0,d2		| save sign bits
   3123 	movel	d0,d7		| into d7 as well as we may need the sign
   3124 				| bit before jumping to LfSinfty
   3125 	movel	d1,d3
   3126 	bclr	IMM (31),d0	| clear sign bits
   3127 	bclr	IMM (31),d1
   3128 | We know that one of them is either NaN of +/-INFINITY
   3129 | Check for NaN (if either one is NaN return NaN)
   3130 	cmpl	d4,d0		| check first a (d0)
   3131 	bhi	Lf$inop
   3132 	cmpl	d4,d1		| check now b (d1)
   3133 	bhi	Lf$inop
   3134 | Now comes the check for +/-INFINITY. We know that both are (maybe not
   3135 | finite) numbers, but we have to check if both are infinite whether we
   3136 | are adding or subtracting them.
   3137 	eorl	d3,d2		| to check sign bits
   3138 	bmi	1f
   3139 	andl	IMM (0x80000000),d7	| get (common) sign bit
   3140 	bra	Lf$infty
   3141 1:
   3142 | We know one (or both) are infinite, so we test for equality between the
   3143 | two numbers (if they are equal they have to be infinite both, so we
   3144 | return NaN).
   3145 	cmpl	d1,d0		| are both infinite?
   3146 	beq	Lf$inop		| if so return NaN
   3147 
   3148 	andl	IMM (0x80000000),d7 | get a's sign bit '
   3149 	cmpl	d4,d0		| test now for infinity
   3150 	beq	Lf$infty	| if a is INFINITY return with this sign
   3151 	bchg	IMM (31),d7	| else we know b is INFINITY and has
   3152 	bra	Lf$infty	| the opposite sign
   3153 
   3154 |=============================================================================
   3155 |                             __mulsf3
   3156 |=============================================================================
   3157 
   3158 | float __mulsf3(float, float);
   3159 	FUNC(__mulsf3)
   3160 SYM (__mulsf3):
   3161 #ifndef __mcoldfire__
   3162 	link	a6,IMM (0)
   3163 	moveml	d2-d7,sp@-
   3164 #else
   3165 	link	a6,IMM (-24)
   3166 	moveml	d2-d7,sp@
   3167 #endif
   3168 	movel	a6@(8),d0	| get a into d0
   3169 	movel	a6@(12),d1	| and b into d1
   3170 	movel	d0,d7		| d7 will hold the sign of the product
   3171 	eorl	d1,d7		|
   3172 	andl	IMM (0x80000000),d7
   3173 	movel	IMM (INFINITY),d6	| useful constant (+INFINITY)
   3174 	movel	d6,d5			| another (mask for fraction)
   3175 	notl	d5			|
   3176 	movel	IMM (0x00800000),d4	| this is to put hidden bit back
   3177 	bclr	IMM (31),d0		| get rid of a's sign bit '
   3178 	movel	d0,d2			|
   3179 	beq	Lmulsf$a$0		| branch if a is zero
   3180 	bclr	IMM (31),d1		| get rid of b's sign bit '
   3181 	movel	d1,d3		|
   3182 	beq	Lmulsf$b$0	| branch if b is zero
   3183 	cmpl	d6,d0		| is a big?
   3184 	bhi	Lmulsf$inop	| if a is NaN return NaN
   3185 	beq	Lmulsf$inf	| if a is INFINITY we have to check b
   3186 	cmpl	d6,d1		| now compare b with INFINITY
   3187 	bhi	Lmulsf$inop	| is b NaN?
   3188 	beq	Lmulsf$overflow | is b INFINITY?
   3189 | Here we have both numbers finite and nonzero (and with no sign bit).
   3190 | Now we get the exponents into d2 and d3.
   3191 	andl	d6,d2		| and isolate exponent in d2
   3192 	beq	Lmulsf$a$den	| if exponent is zero we have a denormalized
   3193 	andl	d5,d0		| and isolate fraction
   3194 	orl	d4,d0		| and put hidden bit back
   3195 	swap	d2		| I like exponents in the first byte
   3196 #ifndef __mcoldfire__
   3197 	lsrw	IMM (7),d2	|
   3198 #else
   3199 	lsrl	IMM (7),d2	|
   3200 #endif
   3201 Lmulsf$1:			| number
   3202 	andl	d6,d3		|
   3203 	beq	Lmulsf$b$den	|
   3204 	andl	d5,d1		|
   3205 	orl	d4,d1		|
   3206 	swap	d3		|
   3207 #ifndef __mcoldfire__
   3208 	lsrw	IMM (7),d3	|
   3209 #else
   3210 	lsrl	IMM (7),d3	|
   3211 #endif
   3212 Lmulsf$2:			|
   3213 #ifndef __mcoldfire__
   3214 	addw	d3,d2		| add exponents
   3215 	subw	IMM (F_BIAS+1),d2 | and subtract bias (plus one)
   3216 #else
   3217 	addl	d3,d2		| add exponents
   3218 	subl	IMM (F_BIAS+1),d2 | and subtract bias (plus one)
   3219 #endif
   3220 
   3221 | We are now ready to do the multiplication. The situation is as follows:
   3222 | both a and b have bit FLT_MANT_DIG-1 set (even if they were
   3223 | denormalized to start with!), which means that in the product
   3224 | bit 2*(FLT_MANT_DIG-1) (that is, bit 2*FLT_MANT_DIG-2-32 of the
   3225 | high long) is set.
   3226 
   3227 | To do the multiplication let us move the number a little bit around ...
   3228 	movel	d1,d6		| second operand in d6
   3229 	movel	d0,d5		| first operand in d4-d5
   3230 	movel	IMM (0),d4
   3231 	movel	d4,d1		| the sums will go in d0-d1
   3232 	movel	d4,d0
   3233 
   3234 | now bit FLT_MANT_DIG-1 becomes bit 31:
   3235 	lsll	IMM (31-FLT_MANT_DIG+1),d6
   3236 
   3237 | Start the loop (we loop #FLT_MANT_DIG times):
   3238 	moveq	IMM (FLT_MANT_DIG-1),d3
   3239 1:	addl	d1,d1		| shift sum
   3240 	addxl	d0,d0
   3241 	lsll	IMM (1),d6	| get bit bn
   3242 	bcc	2f		| if not set skip sum
   3243 	addl	d5,d1		| add a
   3244 	addxl	d4,d0
   3245 2:
   3246 #ifndef __mcoldfire__
   3247 	dbf	d3,1b		| loop back
   3248 #else
   3249 	subql	IMM (1),d3
   3250 	bpl	1b
   3251 #endif
   3252 
   3253 | Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
   3254 | (mod 32) of d0 set. The first thing to do now is to normalize it so bit
   3255 | FLT_MANT_DIG is set (to do the rounding).
   3256 #ifndef __mcoldfire__
   3257 	rorl	IMM (6),d1
   3258 	swap	d1
   3259 	movew	d1,d3
   3260 	andw	IMM (0x03ff),d3
   3261 	andw	IMM (0xfd00),d1
   3262 #else
   3263 	movel	d1,d3
   3264 	lsll	IMM (8),d1
   3265 	addl	d1,d1
   3266 	addl	d1,d1
   3267 	moveq	IMM (22),d5
   3268 	lsrl	d5,d3
   3269 	orl	d3,d1
   3270 	andl	IMM (0xfffffd00),d1
   3271 #endif
   3272 	lsll	IMM (8),d0
   3273 	addl	d0,d0
   3274 	addl	d0,d0
   3275 #ifndef __mcoldfire__
   3276 	orw	d3,d0
   3277 #else
   3278 	orl	d3,d0
   3279 #endif
   3280 
   3281 	moveq	IMM (MULTIPLY),d5
   3282 
   3283 	btst	IMM (FLT_MANT_DIG+1),d0
   3284 	beq	Lround$exit
   3285 #ifndef __mcoldfire__
   3286 	lsrl	IMM (1),d0
   3287 	roxrl	IMM (1),d1
   3288 	addw	IMM (1),d2
   3289 #else
   3290 	lsrl	IMM (1),d1
   3291 	btst	IMM (0),d0
   3292 	beq	10f
   3293 	bset	IMM (31),d1
   3294 10:	lsrl	IMM (1),d0
   3295 	addql	IMM (1),d2
   3296 #endif
   3297 	bra	Lround$exit
   3298 
   3299 Lmulsf$inop:
   3300 	moveq	IMM (MULTIPLY),d5
   3301 	bra	Lf$inop
   3302 
   3303 Lmulsf$overflow:
   3304 	moveq	IMM (MULTIPLY),d5
   3305 	bra	Lf$overflow
   3306 
   3307 Lmulsf$inf:
   3308 	moveq	IMM (MULTIPLY),d5
   3309 | If either is NaN return NaN; else both are (maybe infinite) numbers, so
   3310 | return INFINITY with the correct sign (which is in d7).
   3311 	cmpl	d6,d1		| is b NaN?
   3312 	bhi	Lf$inop		| if so return NaN
   3313 	bra	Lf$overflow	| else return +/-INFINITY
   3314 
   3315 | If either number is zero return zero, unless the other is +/-INFINITY,
   3316 | or NaN, in which case we return NaN.
   3317 Lmulsf$b$0:
   3318 | Here d1 (==b) is zero.
   3319 	movel	a6@(8),d1	| get a again to check for non-finiteness
   3320 	bra	1f
   3321 Lmulsf$a$0:
   3322 	movel	a6@(12),d1	| get b again to check for non-finiteness
   3323 1:	bclr	IMM (31),d1	| clear sign bit
   3324 	cmpl	IMM (INFINITY),d1 | and check for a large exponent
   3325 	bge	Lf$inop		| if b is +/-INFINITY or NaN return NaN
   3326 	movel	d7,d0		| else return signed zero
   3327 	PICLEA	SYM (_fpCCR),a0	|
   3328 	movew	IMM (0),a0@	|
   3329 #ifndef __mcoldfire__
   3330 	moveml	sp@+,d2-d7	|
   3331 #else
   3332 	moveml	sp@,d2-d7
   3333 	| XXX if frame pointer is ever removed, stack pointer must
   3334 	| be adjusted here.
   3335 #endif
   3336 	unlk	a6		|
   3337 	rts			|
   3338 
   3339 | If a number is denormalized we put an exponent of 1 but do not put the
   3340 | hidden bit back into the fraction; instead we shift left until bit 23
   3341 | (the hidden bit) is set, adjusting the exponent accordingly. We do this
   3342 | to ensure that the product of the fractions is close to 1.
   3343 Lmulsf$a$den:
   3344 	movel	IMM (1),d2
   3345 	andl	d5,d0
   3346 1:	addl	d0,d0		| shift a left (until bit 23 is set)
   3347 #ifndef __mcoldfire__
   3348 	subw	IMM (1),d2	| and adjust exponent
   3349 #else
   3350 	subql	IMM (1),d2	| and adjust exponent
   3351 #endif
   3352 	btst	IMM (FLT_MANT_DIG-1),d0
   3353 	bne	Lmulsf$1	|
   3354 	bra	1b		| else loop back
   3355 
   3356 Lmulsf$b$den:
   3357 	movel	IMM (1),d3
   3358 	andl	d5,d1
   3359 1:	addl	d1,d1		| shift b left until bit 23 is set
   3360 #ifndef __mcoldfire__
   3361 	subw	IMM (1),d3	| and adjust exponent
   3362 #else
   3363 	subql	IMM (1),d3	| and adjust exponent
   3364 #endif
   3365 	btst	IMM (FLT_MANT_DIG-1),d1
   3366 	bne	Lmulsf$2	|
   3367 	bra	1b		| else loop back
   3368 
   3369 |=============================================================================
   3370 |                             __divsf3
   3371 |=============================================================================
   3372 
   3373 | float __divsf3(float, float);
   3374 	FUNC(__divsf3)
   3375 SYM (__divsf3):
   3376 #ifndef __mcoldfire__
   3377 	link	a6,IMM (0)
   3378 	moveml	d2-d7,sp@-
   3379 #else
   3380 	link	a6,IMM (-24)
   3381 	moveml	d2-d7,sp@
   3382 #endif
   3383 	movel	a6@(8),d0		| get a into d0
   3384 	movel	a6@(12),d1		| and b into d1
   3385 	movel	d0,d7			| d7 will hold the sign of the result
   3386 	eorl	d1,d7			|
   3387 	andl	IMM (0x80000000),d7	|
   3388 	movel	IMM (INFINITY),d6	| useful constant (+INFINITY)
   3389 	movel	d6,d5			| another (mask for fraction)
   3390 	notl	d5			|
   3391 	movel	IMM (0x00800000),d4	| this is to put hidden bit back
   3392 	bclr	IMM (31),d0		| get rid of a's sign bit '
   3393 	movel	d0,d2			|
   3394 	beq	Ldivsf$a$0		| branch if a is zero
   3395 	bclr	IMM (31),d1		| get rid of b's sign bit '
   3396 	movel	d1,d3			|
   3397 	beq	Ldivsf$b$0		| branch if b is zero
   3398 	cmpl	d6,d0			| is a big?
   3399 	bhi	Ldivsf$inop		| if a is NaN return NaN
   3400 	beq	Ldivsf$inf		| if a is INFINITY we have to check b
   3401 	cmpl	d6,d1			| now compare b with INFINITY
   3402 	bhi	Ldivsf$inop		| if b is NaN return NaN
   3403 	beq	Ldivsf$underflow
   3404 | Here we have both numbers finite and nonzero (and with no sign bit).
   3405 | Now we get the exponents into d2 and d3 and normalize the numbers to
   3406 | ensure that the ratio of the fractions is close to 1. We do this by
   3407 | making sure that bit #FLT_MANT_DIG-1 (hidden bit) is set.
   3408 	andl	d6,d2		| and isolate exponent in d2
   3409 	beq	Ldivsf$a$den	| if exponent is zero we have a denormalized
   3410 	andl	d5,d0		| and isolate fraction
   3411 	orl	d4,d0		| and put hidden bit back
   3412 	swap	d2		| I like exponents in the first byte
   3413 #ifndef __mcoldfire__
   3414 	lsrw	IMM (7),d2	|
   3415 #else
   3416 	lsrl	IMM (7),d2	|
   3417 #endif
   3418 Ldivsf$1:			|
   3419 	andl	d6,d3		|
   3420 	beq	Ldivsf$b$den	|
   3421 	andl	d5,d1		|
   3422 	orl	d4,d1		|
   3423 	swap	d3		|
   3424 #ifndef __mcoldfire__
   3425 	lsrw	IMM (7),d3	|
   3426 #else
   3427 	lsrl	IMM (7),d3	|
   3428 #endif
   3429 Ldivsf$2:			|
   3430 #ifndef __mcoldfire__
   3431 	subw	d3,d2		| subtract exponents
   3432  	addw	IMM (F_BIAS),d2	| and add bias
   3433 #else
   3434 	subl	d3,d2		| subtract exponents
   3435  	addl	IMM (F_BIAS),d2	| and add bias
   3436 #endif
   3437 
   3438 | We are now ready to do the division. We have prepared things in such a way
   3439 | that the ratio of the fractions will be less than 2 but greater than 1/2.
   3440 | At this point the registers in use are:
   3441 | d0	holds a (first operand, bit FLT_MANT_DIG=0, bit FLT_MANT_DIG-1=1)
   3442 | d1	holds b (second operand, bit FLT_MANT_DIG=1)
   3443 | d2	holds the difference of the exponents, corrected by the bias
   3444 | d7	holds the sign of the ratio
   3445 | d4, d5, d6 hold some constants
   3446 	movel	d7,a0		| d6-d7 will hold the ratio of the fractions
   3447 	movel	IMM (0),d6	|
   3448 	movel	d6,d7
   3449 
   3450 	moveq	IMM (FLT_MANT_DIG+1),d3
   3451 1:	cmpl	d0,d1		| is a < b?
   3452 	bhi	2f		|
   3453 	bset	d3,d6		| set a bit in d6
   3454 	subl	d1,d0		| if a >= b  a <-- a-b
   3455 	beq	3f		| if a is zero, exit
   3456 2:	addl	d0,d0		| multiply a by 2
   3457 #ifndef __mcoldfire__
   3458 	dbra	d3,1b
   3459 #else
   3460 	subql	IMM (1),d3
   3461 	bpl	1b
   3462 #endif
   3463 
   3464 | Now we keep going to set the sticky bit ...
   3465 	moveq	IMM (FLT_MANT_DIG),d3
   3466 1:	cmpl	d0,d1
   3467 	ble	2f
   3468 	addl	d0,d0
   3469 #ifndef __mcoldfire__
   3470 	dbra	d3,1b
   3471 #else
   3472 	subql	IMM(1),d3
   3473 	bpl	1b
   3474 #endif
   3475 	movel	IMM (0),d1
   3476 	bra	3f
   3477 2:	movel	IMM (0),d1
   3478 #ifndef __mcoldfire__
   3479 	subw	IMM (FLT_MANT_DIG),d3
   3480 	addw	IMM (31),d3
   3481 #else
   3482 	subl	IMM (FLT_MANT_DIG),d3
   3483 	addl	IMM (31),d3
   3484 #endif
   3485 	bset	d3,d1
   3486 3:
   3487 	movel	d6,d0		| put the ratio in d0-d1
   3488 	movel	a0,d7		| get sign back
   3489 
   3490 | Because of the normalization we did before we are guaranteed that
   3491 | d0 is smaller than 2^26 but larger than 2^24. Thus bit 26 is not set,
   3492 | bit 25 could be set, and if it is not set then bit 24 is necessarily set.
   3493 	btst	IMM (FLT_MANT_DIG+1),d0
   3494 	beq	1f              | if it is not set, then bit 24 is set
   3495 	lsrl	IMM (1),d0	|
   3496 #ifndef __mcoldfire__
   3497 	addw	IMM (1),d2	|
   3498 #else
   3499 	addl	IMM (1),d2	|
   3500 #endif
   3501 1:
   3502 | Now round, check for over- and underflow, and exit.
   3503 	moveq	IMM (DIVIDE),d5
   3504 	bra	Lround$exit
   3505 
   3506 Ldivsf$inop:
   3507 	moveq	IMM (DIVIDE),d5
   3508 	bra	Lf$inop
   3509 
   3510 Ldivsf$overflow:
   3511 	moveq	IMM (DIVIDE),d5
   3512 	bra	Lf$overflow
   3513 
   3514 Ldivsf$underflow:
   3515 	moveq	IMM (DIVIDE),d5
   3516 	bra	Lf$underflow
   3517 
   3518 Ldivsf$a$0:
   3519 	moveq	IMM (DIVIDE),d5
   3520 | If a is zero check to see whether b is zero also. In that case return
   3521 | NaN; then check if b is NaN, and return NaN also in that case. Else
   3522 | return a properly signed zero.
   3523 	andl	IMM (0x7fffffff),d1	| clear sign bit and test b
   3524 	beq	Lf$inop			| if b is also zero return NaN
   3525 	cmpl	IMM (INFINITY),d1	| check for NaN
   3526 	bhi	Lf$inop			|
   3527 	movel	d7,d0			| else return signed zero
   3528 	PICLEA	SYM (_fpCCR),a0		|
   3529 	movew	IMM (0),a0@		|
   3530 #ifndef __mcoldfire__
   3531 	moveml	sp@+,d2-d7		|
   3532 #else
   3533 	moveml	sp@,d2-d7		|
   3534 	| XXX if frame pointer is ever removed, stack pointer must
   3535 	| be adjusted here.
   3536 #endif
   3537 	unlk	a6			|
   3538 	rts				|
   3539 
   3540 Ldivsf$b$0:
   3541 	moveq	IMM (DIVIDE),d5
   3542 | If we got here a is not zero. Check if a is NaN; in that case return NaN,
   3543 | else return +/-INFINITY. Remember that a is in d0 with the sign bit
   3544 | cleared already.
   3545 	cmpl	IMM (INFINITY),d0	| compare d0 with INFINITY
   3546 	bhi	Lf$inop			| if larger it is NaN
   3547 	bra	Lf$div$0		| else signal DIVIDE_BY_ZERO
   3548 
   3549 Ldivsf$inf:
   3550 	moveq	IMM (DIVIDE),d5
   3551 | If a is INFINITY we have to check b
   3552 	cmpl	IMM (INFINITY),d1	| compare b with INFINITY
   3553 	bge	Lf$inop			| if b is NaN or INFINITY return NaN
   3554 	bra	Lf$overflow		| else return overflow
   3555 
   3556 | If a number is denormalized we put an exponent of 1 but do not put the
   3557 | bit back into the fraction.
   3558 Ldivsf$a$den:
   3559 	movel	IMM (1),d2
   3560 	andl	d5,d0
   3561 1:	addl	d0,d0		| shift a left until bit FLT_MANT_DIG-1 is set
   3562 #ifndef __mcoldfire__
   3563 	subw	IMM (1),d2	| and adjust exponent
   3564 #else
   3565 	subl	IMM (1),d2	| and adjust exponent
   3566 #endif
   3567 	btst	IMM (FLT_MANT_DIG-1),d0
   3568 	bne	Ldivsf$1
   3569 	bra	1b
   3570 
   3571 Ldivsf$b$den:
   3572 	movel	IMM (1),d3
   3573 	andl	d5,d1
   3574 1:	addl	d1,d1		| shift b left until bit FLT_MANT_DIG is set
   3575 #ifndef __mcoldfire__
   3576 	subw	IMM (1),d3	| and adjust exponent
   3577 #else
   3578 	subl	IMM (1),d3	| and adjust exponent
   3579 #endif
   3580 	btst	IMM (FLT_MANT_DIG-1),d1
   3581 	bne	Ldivsf$2
   3582 	bra	1b
   3583 
   3584 Lround$exit:
   3585 | This is a common exit point for __mulsf3 and __divsf3.
   3586 
   3587 | First check for underlow in the exponent:
   3588 #ifndef __mcoldfire__
   3589 	cmpw	IMM (-FLT_MANT_DIG-1),d2
   3590 #else
   3591 	cmpl	IMM (-FLT_MANT_DIG-1),d2
   3592 #endif
   3593 	blt	Lf$underflow
   3594 | It could happen that the exponent is less than 1, in which case the
   3595 | number is denormalized. In this case we shift right and adjust the
   3596 | exponent until it becomes 1 or the fraction is zero (in the latter case
   3597 | we signal underflow and return zero).
   3598 	movel	IMM (0),d6	| d6 is used temporarily
   3599 #ifndef __mcoldfire__
   3600 	cmpw	IMM (1),d2	| if the exponent is less than 1 we
   3601 #else
   3602 	cmpl	IMM (1),d2	| if the exponent is less than 1 we
   3603 #endif
   3604 	bge	2f		| have to shift right (denormalize)
   3605 1:
   3606 #ifndef __mcoldfire__
   3607 	addw	IMM (1),d2	| adjust the exponent
   3608 	lsrl	IMM (1),d0	| shift right once
   3609 	roxrl	IMM (1),d1	|
   3610 	roxrl	IMM (1),d6	| d6 collect bits we would lose otherwise
   3611 	cmpw	IMM (1),d2	| is the exponent 1 already?
   3612 #else
   3613 	addql	IMM (1),d2	| adjust the exponent
   3614 	lsrl	IMM (1),d6
   3615 	btst	IMM (0),d1
   3616 	beq	11f
   3617 	bset	IMM (31),d6
   3618 11:	lsrl	IMM (1),d1
   3619 	btst	IMM (0),d0
   3620 	beq	10f
   3621 	bset	IMM (31),d1
   3622 10:	lsrl	IMM (1),d0
   3623 	cmpl	IMM (1),d2	| is the exponent 1 already?
   3624 #endif
   3625 	beq	2f		| if not loop back
   3626 	bra	1b              |
   3627 	bra	Lf$underflow	| safety check, shouldn't execute '
   3628 2:	orl	d6,d1		| this is a trick so we don't lose  '
   3629 				| the extra bits which were flushed right
   3630 | Now call the rounding routine (which takes care of denormalized numbers):
   3631 	lea	pc@(Lround$0),a0 | to return from rounding routine
   3632 	PICLEA	SYM (_fpCCR),a1	| check the rounding mode
   3633 #ifdef __mcoldfire__
   3634 	clrl	d6
   3635 #endif
   3636 	movew	a1@(6),d6	| rounding mode in d6
   3637 	beq	Lround$to$nearest
   3638 #ifndef __mcoldfire__
   3639 	cmpw	IMM (ROUND_TO_PLUS),d6
   3640 #else
   3641 	cmpl	IMM (ROUND_TO_PLUS),d6
   3642 #endif
   3643 	bhi	Lround$to$minus
   3644 	blt	Lround$to$zero
   3645 	bra	Lround$to$plus
   3646 Lround$0:
   3647 | Here we have a correctly rounded result (either normalized or denormalized).
   3648 
   3649 | Here we should have either a normalized number or a denormalized one, and
   3650 | the exponent is necessarily larger or equal to 1 (so we don't have to  '
   3651 | check again for underflow!). We have to check for overflow or for a
   3652 | denormalized number (which also signals underflow).
   3653 | Check for overflow (i.e., exponent >= 255).
   3654 #ifndef __mcoldfire__
   3655 	cmpw	IMM (0x00ff),d2
   3656 #else
   3657 	cmpl	IMM (0x00ff),d2
   3658 #endif
   3659 	bge	Lf$overflow
   3660 | Now check for a denormalized number (exponent==0).
   3661 	movew	d2,d2
   3662 	beq	Lf$den
   3663 1:
   3664 | Put back the exponents and sign and return.
   3665 #ifndef __mcoldfire__
   3666 	lslw	IMM (7),d2	| exponent back to fourth byte
   3667 #else
   3668 	lsll	IMM (7),d2	| exponent back to fourth byte
   3669 #endif
   3670 	bclr	IMM (FLT_MANT_DIG-1),d0
   3671 	swap	d0		| and put back exponent
   3672 #ifndef __mcoldfire__
   3673 	orw	d2,d0		|
   3674 #else
   3675 	orl	d2,d0
   3676 #endif
   3677 	swap	d0		|
   3678 	orl	d7,d0		| and sign also
   3679 
   3680 	PICLEA	SYM (_fpCCR),a0
   3681 	movew	IMM (0),a0@
   3682 #ifndef __mcoldfire__
   3683 	moveml	sp@+,d2-d7
   3684 #else
   3685 	moveml	sp@,d2-d7
   3686 	| XXX if frame pointer is ever removed, stack pointer must
   3687 	| be adjusted here.
   3688 #endif
   3689 	unlk	a6
   3690 	rts
   3691 
   3692 |=============================================================================
   3693 |                             __negsf2
   3694 |=============================================================================
   3695 
   3696 | This is trivial and could be shorter if we didn't bother checking for NaN '
   3697 | and +/-INFINITY.
   3698 
   3699 | float __negsf2(float);
   3700 	FUNC(__negsf2)
   3701 SYM (__negsf2):
   3702 #ifndef __mcoldfire__
   3703 	link	a6,IMM (0)
   3704 	moveml	d2-d7,sp@-
   3705 #else
   3706 	link	a6,IMM (-24)
   3707 	moveml	d2-d7,sp@
   3708 #endif
   3709 	moveq	IMM (NEGATE),d5
   3710 	movel	a6@(8),d0	| get number to negate in d0
   3711 	bchg	IMM (31),d0	| negate
   3712 	movel	d0,d1		| make a positive copy
   3713 	bclr	IMM (31),d1	|
   3714 	tstl	d1		| check for zero
   3715 	beq	2f		| if zero (either sign) return +zero
   3716 	cmpl	IMM (INFINITY),d1 | compare to +INFINITY
   3717 	blt	1f		|
   3718 	bhi	Lf$inop		| if larger (fraction not zero) is NaN
   3719 	movel	d0,d7		| else get sign and return INFINITY
   3720 	andl	IMM (0x80000000),d7
   3721 	bra	Lf$infty
   3722 1:	PICLEA	SYM (_fpCCR),a0
   3723 	movew	IMM (0),a0@
   3724 #ifndef __mcoldfire__
   3725 	moveml	sp@+,d2-d7
   3726 #else
   3727 	moveml	sp@,d2-d7
   3728 	| XXX if frame pointer is ever removed, stack pointer must
   3729 	| be adjusted here.
   3730 #endif
   3731 	unlk	a6
   3732 	rts
   3733 2:	bclr	IMM (31),d0
   3734 	bra	1b
   3735 
   3736 |=============================================================================
   3737 |                             __cmpsf2
   3738 |=============================================================================
   3739 
   3740 GREATER =  1
   3741 LESS    = -1
   3742 EQUAL   =  0
   3743 
   3744 | int __cmpsf2_internal(float, float, int);
   3745 SYM (__cmpsf2_internal):
   3746 #ifndef __mcoldfire__
   3747 	link	a6,IMM (0)
   3748 	moveml	d2-d7,sp@- 	| save registers
   3749 #else
   3750 	link	a6,IMM (-24)
   3751 	moveml	d2-d7,sp@
   3752 #endif
   3753 	moveq	IMM (COMPARE),d5
   3754 	movel	a6@(8),d0	| get first operand
   3755 	movel	a6@(12),d1	| get second operand
   3756 | Check if either is NaN, and in that case return garbage and signal
   3757 | INVALID_OPERATION. Check also if either is zero, and clear the signs
   3758 | if necessary.
   3759 	movel	d0,d6
   3760 	andl	IMM (0x7fffffff),d0
   3761 	beq	Lcmpsf$a$0
   3762 	cmpl	IMM (0x7f800000),d0
   3763 	bhi	Lcmpf$inop
   3764 Lcmpsf$1:
   3765 	movel	d1,d7
   3766 	andl	IMM (0x7fffffff),d1
   3767 	beq	Lcmpsf$b$0
   3768 	cmpl	IMM (0x7f800000),d1
   3769 	bhi	Lcmpf$inop
   3770 Lcmpsf$2:
   3771 | Check the signs
   3772 	eorl	d6,d7
   3773 	bpl	1f
   3774 | If the signs are not equal check if a >= 0
   3775 	tstl	d6
   3776 	bpl	Lcmpsf$a$gt$b	| if (a >= 0 && b < 0) => a > b
   3777 	bmi	Lcmpsf$b$gt$a	| if (a < 0 && b >= 0) => a < b
   3778 1:
   3779 | If the signs are equal check for < 0
   3780 	tstl	d6
   3781 	bpl	1f
   3782 | If both are negative exchange them
   3783 #ifndef __mcoldfire__
   3784 	exg	d0,d1
   3785 #else
   3786 	movel	d0,d7
   3787 	movel	d1,d0
   3788 	movel	d7,d1
   3789 #endif
   3790 1:
   3791 | Now that they are positive we just compare them as longs (does this also
   3792 | work for denormalized numbers?).
   3793 	cmpl	d0,d1
   3794 	bhi	Lcmpsf$b$gt$a	| |b| > |a|
   3795 	bne	Lcmpsf$a$gt$b	| |b| < |a|
   3796 | If we got here a == b.
   3797 	movel	IMM (EQUAL),d0
   3798 #ifndef __mcoldfire__
   3799 	moveml	sp@+,d2-d7 	| put back the registers
   3800 #else
   3801 	moveml	sp@,d2-d7
   3802 #endif
   3803 	unlk	a6
   3804 	rts
   3805 Lcmpsf$a$gt$b:
   3806 	movel	IMM (GREATER),d0
   3807 #ifndef __mcoldfire__
   3808 	moveml	sp@+,d2-d7 	| put back the registers
   3809 #else
   3810 	moveml	sp@,d2-d7
   3811 	| XXX if frame pointer is ever removed, stack pointer must
   3812 	| be adjusted here.
   3813 #endif
   3814 	unlk	a6
   3815 	rts
   3816 Lcmpsf$b$gt$a:
   3817 	movel	IMM (LESS),d0
   3818 #ifndef __mcoldfire__
   3819 	moveml	sp@+,d2-d7 	| put back the registers
   3820 #else
   3821 	moveml	sp@,d2-d7
   3822 	| XXX if frame pointer is ever removed, stack pointer must
   3823 	| be adjusted here.
   3824 #endif
   3825 	unlk	a6
   3826 	rts
   3827 
   3828 Lcmpsf$a$0:
   3829 	bclr	IMM (31),d6
   3830 	bra	Lcmpsf$1
   3831 Lcmpsf$b$0:
   3832 	bclr	IMM (31),d7
   3833 	bra	Lcmpsf$2
   3834 
   3835 Lcmpf$inop:
   3836 	movl	a6@(16),d0
   3837 	moveq	IMM (INEXACT_RESULT+INVALID_OPERATION),d7
   3838 	moveq	IMM (SINGLE_FLOAT),d6
   3839 	PICJUMP	$_exception_handler
   3840 
   3841 | int __cmpsf2(float, float);
   3842 	FUNC(__cmpsf2)
   3843 SYM (__cmpsf2):
   3844 	link	a6,IMM (0)
   3845 	pea	1
   3846 	movl	a6@(12),sp@-
   3847 	movl	a6@(8),sp@-
   3848 	PICCALL SYM (__cmpsf2_internal)
   3849 	unlk	a6
   3850 	rts
   3851 
   3852 |=============================================================================
   3853 |                           rounding routines
   3854 |=============================================================================
   3855 
   3856 | The rounding routines expect the number to be normalized in registers
   3857 | d0-d1, with the exponent in register d2. They assume that the
   3858 | exponent is larger or equal to 1. They return a properly normalized number
   3859 | if possible, and a denormalized number otherwise. The exponent is returned
   3860 | in d2.
   3861 
   3862 Lround$to$nearest:
   3863 | We now normalize as suggested by D. Knuth ("Seminumerical Algorithms"):
   3864 | Here we assume that the exponent is not too small (this should be checked
   3865 | before entering the rounding routine), but the number could be denormalized.
   3866 
   3867 | Check for denormalized numbers:
   3868 1:	btst	IMM (FLT_MANT_DIG),d0
   3869 	bne	2f		| if set the number is normalized
   3870 | Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
   3871 | is one (remember that a denormalized number corresponds to an
   3872 | exponent of -F_BIAS+1).
   3873 #ifndef __mcoldfire__
   3874 	cmpw	IMM (1),d2	| remember that the exponent is at least one
   3875 #else
   3876 	cmpl	IMM (1),d2	| remember that the exponent is at least one
   3877 #endif
   3878  	beq	2f		| an exponent of one means denormalized
   3879 	addl	d1,d1		| else shift and adjust the exponent
   3880 	addxl	d0,d0		|
   3881 #ifndef __mcoldfire__
   3882 	dbra	d2,1b		|
   3883 #else
   3884 	subql	IMM (1),d2
   3885 	bpl	1b
   3886 #endif
   3887 2:
   3888 | Now round: we do it as follows: after the shifting we can write the
   3889 | fraction part as f + delta, where 1 < f < 2^25, and 0 <= delta <= 2.
   3890 | If delta < 1, do nothing. If delta > 1, add 1 to f.
   3891 | If delta == 1, we make sure the rounded number will be even (odd?)
   3892 | (after shifting).
   3893 	btst	IMM (0),d0	| is delta < 1?
   3894 	beq	2f		| if so, do not do anything
   3895 	tstl	d1		| is delta == 1?
   3896 	bne	1f		| if so round to even
   3897 	movel	d0,d1		|
   3898 	andl	IMM (2),d1	| bit 1 is the last significant bit
   3899 	addl	d1,d0		|
   3900 	bra	2f		|
   3901 1:	movel	IMM (1),d1	| else add 1
   3902 	addl	d1,d0		|
   3903 | Shift right once (because we used bit #FLT_MANT_DIG!).
   3904 2:	lsrl	IMM (1),d0
   3905 | Now check again bit #FLT_MANT_DIG (rounding could have produced a
   3906 | 'fraction overflow' ...).
   3907 	btst	IMM (FLT_MANT_DIG),d0
   3908 	beq	1f
   3909 	lsrl	IMM (1),d0
   3910 #ifndef __mcoldfire__
   3911 	addw	IMM (1),d2
   3912 #else
   3913 	addql	IMM (1),d2
   3914 #endif
   3915 1:
   3916 | If bit #FLT_MANT_DIG-1 is clear we have a denormalized number, so we
   3917 | have to put the exponent to zero and return a denormalized number.
   3918 	btst	IMM (FLT_MANT_DIG-1),d0
   3919 	beq	1f
   3920 	jmp	a0@
   3921 1:	movel	IMM (0),d2
   3922 	jmp	a0@
   3923 
   3924 Lround$to$zero:
   3925 Lround$to$plus:
   3926 Lround$to$minus:
   3927 	jmp	a0@
   3928 #endif /* L_float */
   3929 
   3930 | gcc expects the routines __eqdf2, __nedf2, __gtdf2, __gedf2,
   3931 | __ledf2, __ltdf2 to all return the same value as a direct call to
   3932 | __cmpdf2 would.  In this implementation, each of these routines
   3933 | simply calls __cmpdf2.  It would be more efficient to give the
   3934 | __cmpdf2 routine several names, but separating them out will make it
   3935 | easier to write efficient versions of these routines someday.
   3936 | If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
   3937 | The other routines return 1.
   3938 
   3939 #ifdef  L_eqdf2
   3940 	.text
   3941 	FUNC(__eqdf2)
   3942 	.globl	SYM (__eqdf2)
   3943 SYM (__eqdf2):
   3944 	link	a6,IMM (0)
   3945 	pea	1
   3946 	movl	a6@(20),sp@-
   3947 	movl	a6@(16),sp@-
   3948 	movl	a6@(12),sp@-
   3949 	movl	a6@(8),sp@-
   3950 	PICCALL	SYM (__cmpdf2_internal)
   3951 	unlk	a6
   3952 	rts
   3953 #endif /* L_eqdf2 */
   3954 
   3955 #ifdef  L_nedf2
   3956 	.text
   3957 	FUNC(__nedf2)
   3958 	.globl	SYM (__nedf2)
   3959 SYM (__nedf2):
   3960 	link	a6,IMM (0)
   3961 	pea	1
   3962 	movl	a6@(20),sp@-
   3963 	movl	a6@(16),sp@-
   3964 	movl	a6@(12),sp@-
   3965 	movl	a6@(8),sp@-
   3966 	PICCALL	SYM (__cmpdf2_internal)
   3967 	unlk	a6
   3968 	rts
   3969 #endif /* L_nedf2 */
   3970 
   3971 #ifdef  L_gtdf2
   3972 	.text
   3973 	FUNC(__gtdf2)
   3974 	.globl	SYM (__gtdf2)
   3975 SYM (__gtdf2):
   3976 	link	a6,IMM (0)
   3977 	pea	-1
   3978 	movl	a6@(20),sp@-
   3979 	movl	a6@(16),sp@-
   3980 	movl	a6@(12),sp@-
   3981 	movl	a6@(8),sp@-
   3982 	PICCALL	SYM (__cmpdf2_internal)
   3983 	unlk	a6
   3984 	rts
   3985 #endif /* L_gtdf2 */
   3986 
   3987 #ifdef  L_gedf2
   3988 	.text
   3989 	FUNC(__gedf2)
   3990 	.globl	SYM (__gedf2)
   3991 SYM (__gedf2):
   3992 	link	a6,IMM (0)
   3993 	pea	-1
   3994 	movl	a6@(20),sp@-
   3995 	movl	a6@(16),sp@-
   3996 	movl	a6@(12),sp@-
   3997 	movl	a6@(8),sp@-
   3998 	PICCALL	SYM (__cmpdf2_internal)
   3999 	unlk	a6
   4000 	rts
   4001 #endif /* L_gedf2 */
   4002 
   4003 #ifdef  L_ltdf2
   4004 	.text
   4005 	FUNC(__ltdf2)
   4006 	.globl	SYM (__ltdf2)
   4007 SYM (__ltdf2):
   4008 	link	a6,IMM (0)
   4009 	pea	1
   4010 	movl	a6@(20),sp@-
   4011 	movl	a6@(16),sp@-
   4012 	movl	a6@(12),sp@-
   4013 	movl	a6@(8),sp@-
   4014 	PICCALL	SYM (__cmpdf2_internal)
   4015 	unlk	a6
   4016 	rts
   4017 #endif /* L_ltdf2 */
   4018 
   4019 #ifdef  L_ledf2
   4020 	.text
   4021 	FUNC(__ledf2)
   4022 	.globl	SYM (__ledf2)
   4023 SYM (__ledf2):
   4024 	link	a6,IMM (0)
   4025 	pea	1
   4026 	movl	a6@(20),sp@-
   4027 	movl	a6@(16),sp@-
   4028 	movl	a6@(12),sp@-
   4029 	movl	a6@(8),sp@-
   4030 	PICCALL	SYM (__cmpdf2_internal)
   4031 	unlk	a6
   4032 	rts
   4033 #endif /* L_ledf2 */
   4034 
   4035 | The comments above about __eqdf2, et. al., also apply to __eqsf2,
   4036 | et. al., except that the latter call __cmpsf2 rather than __cmpdf2.
   4037 
   4038 #ifdef  L_eqsf2
   4039 	.text
   4040 	FUNC(__eqsf2)
   4041 	.globl	SYM (__eqsf2)
   4042 SYM (__eqsf2):
   4043 	link	a6,IMM (0)
   4044 	pea	1
   4045 	movl	a6@(12),sp@-
   4046 	movl	a6@(8),sp@-
   4047 	PICCALL	SYM (__cmpsf2_internal)
   4048 	unlk	a6
   4049 	rts
   4050 #endif /* L_eqsf2 */
   4051 
   4052 #ifdef  L_nesf2
   4053 	.text
   4054 	FUNC(__nesf2)
   4055 	.globl	SYM (__nesf2)
   4056 SYM (__nesf2):
   4057 	link	a6,IMM (0)
   4058 	pea	1
   4059 	movl	a6@(12),sp@-
   4060 	movl	a6@(8),sp@-
   4061 	PICCALL	SYM (__cmpsf2_internal)
   4062 	unlk	a6
   4063 	rts
   4064 #endif /* L_nesf2 */
   4065 
   4066 #ifdef  L_gtsf2
   4067 	.text
   4068 	FUNC(__gtsf2)
   4069 	.globl	SYM (__gtsf2)
   4070 SYM (__gtsf2):
   4071 	link	a6,IMM (0)
   4072 	pea	-1
   4073 	movl	a6@(12),sp@-
   4074 	movl	a6@(8),sp@-
   4075 	PICCALL	SYM (__cmpsf2_internal)
   4076 	unlk	a6
   4077 	rts
   4078 #endif /* L_gtsf2 */
   4079 
   4080 #ifdef  L_gesf2
   4081 	.text
   4082 	FUNC(__gesf2)
   4083 	.globl	SYM (__gesf2)
   4084 SYM (__gesf2):
   4085 	link	a6,IMM (0)
   4086 	pea	-1
   4087 	movl	a6@(12),sp@-
   4088 	movl	a6@(8),sp@-
   4089 	PICCALL	SYM (__cmpsf2_internal)
   4090 	unlk	a6
   4091 	rts
   4092 #endif /* L_gesf2 */
   4093 
   4094 #ifdef  L_ltsf2
   4095 	.text
   4096 	FUNC(__ltsf2)
   4097 	.globl	SYM (__ltsf2)
   4098 SYM (__ltsf2):
   4099 	link	a6,IMM (0)
   4100 	pea	1
   4101 	movl	a6@(12),sp@-
   4102 	movl	a6@(8),sp@-
   4103 	PICCALL	SYM (__cmpsf2_internal)
   4104 	unlk	a6
   4105 	rts
   4106 #endif /* L_ltsf2 */
   4107 
   4108 #ifdef  L_lesf2
   4109 	.text
   4110 	FUNC(__lesf2)
   4111 	.globl	SYM (__lesf2)
   4112 SYM (__lesf2):
   4113 	link	a6,IMM (0)
   4114 	pea	1
   4115 	movl	a6@(12),sp@-
   4116 	movl	a6@(8),sp@-
   4117 	PICCALL	SYM (__cmpsf2_internal)
   4118 	unlk	a6
   4119 	rts
   4120 #endif /* L_lesf2 */
   4121 
   4122 #if defined (__ELF__) && defined (__linux__)
   4123 	/* Make stack non-executable for ELF linux targets.  */
   4124 	.section	.note.GNU-stack,"",@progbits
   4125 #endif
   4126