Home | History | Annotate | Line # | Download | only in arm
      1       1.1  mrg /* ieee754-df.S double-precision floating point support for ARM
      2       1.1  mrg 
      3  1.1.1.11  mrg    Copyright (C) 2003-2024 Free Software Foundation, Inc.
      4   1.1.1.8  mrg    Contributed by Nicolas Pitre (nico (at) fluxnic.net)
      5       1.1  mrg 
      6       1.1  mrg    This file is free software; you can redistribute it and/or modify it
      7       1.1  mrg    under the terms of the GNU General Public License as published by the
      8       1.1  mrg    Free Software Foundation; either version 3, or (at your option) any
      9       1.1  mrg    later version.
     10       1.1  mrg 
     11       1.1  mrg    This file is distributed in the hope that it will be useful, but
     12       1.1  mrg    WITHOUT ANY WARRANTY; without even the implied warranty of
     13       1.1  mrg    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14       1.1  mrg    General Public License for more details.
     15       1.1  mrg 
     16       1.1  mrg    Under Section 7 of GPL version 3, you are granted additional
     17       1.1  mrg    permissions described in the GCC Runtime Library Exception, version
     18       1.1  mrg    3.1, as published by the Free Software Foundation.
     19       1.1  mrg 
     20       1.1  mrg    You should have received a copy of the GNU General Public License and
     21       1.1  mrg    a copy of the GCC Runtime Library Exception along with this program;
     22       1.1  mrg    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23       1.1  mrg    <http://www.gnu.org/licenses/>.  */
     24       1.1  mrg 
     25       1.1  mrg /*
     26       1.1  mrg  * Notes:
     27       1.1  mrg  *
     28       1.1  mrg  * The goal of this code is to be as fast as possible.  This is
     29       1.1  mrg  * not meant to be easy to understand for the casual reader.
     30       1.1  mrg  * For slightly simpler code please see the single precision version
     31       1.1  mrg  * of this file.
     32       1.1  mrg  *
     33       1.1  mrg  * Only the default rounding mode is intended for best performances.
     34       1.1  mrg  * Exceptions aren't supported yet, but that can be added quite easily
     35       1.1  mrg  * if necessary without impacting performances.
     36   1.1.1.3  mrg  *
     37   1.1.1.3  mrg  * In the CFI related comments, 'previousOffset' refers to the previous offset
     38   1.1.1.3  mrg  * from sp used to compute the CFA.
     39       1.1  mrg  */
     40       1.1  mrg 
     41   1.1.1.3  mrg 	.cfi_sections .debug_frame
     42       1.1  mrg 
     43       1.1  mrg #ifndef __ARMEB__
     44       1.1  mrg #define xl r0
     45       1.1  mrg #define xh r1
     46       1.1  mrg #define yl r2
     47       1.1  mrg #define yh r3
     48       1.1  mrg #else
     49       1.1  mrg #define xh r0
     50       1.1  mrg #define xl r1
     51       1.1  mrg #define yh r2
     52       1.1  mrg #define yl r3
     53       1.1  mrg #endif
     54       1.1  mrg 
     55       1.1  mrg 
     56       1.1  mrg #ifdef L_arm_negdf2
     57       1.1  mrg 
     58       1.1  mrg ARM_FUNC_START negdf2
     59       1.1  mrg ARM_FUNC_ALIAS aeabi_dneg negdf2
     60   1.1.1.3  mrg 	CFI_START_FUNCTION
     61       1.1  mrg 
     62       1.1  mrg 	@ flip sign bit
     63       1.1  mrg 	eor	xh, xh, #0x80000000
     64       1.1  mrg 	RET
     65       1.1  mrg 
     66   1.1.1.3  mrg 	CFI_END_FUNCTION
     67       1.1  mrg 	FUNC_END aeabi_dneg
     68       1.1  mrg 	FUNC_END negdf2
     69       1.1  mrg 
     70       1.1  mrg #endif
     71       1.1  mrg 
     72       1.1  mrg #ifdef L_arm_addsubdf3
     73       1.1  mrg 
     74       1.1  mrg ARM_FUNC_START aeabi_drsub
     75   1.1.1.3  mrg 	CFI_START_FUNCTION
     76       1.1  mrg 
     77       1.1  mrg 	eor	xh, xh, #0x80000000	@ flip sign bit of first arg
     78       1.1  mrg 	b	1f
     79       1.1  mrg 
     80       1.1  mrg ARM_FUNC_START subdf3
     81       1.1  mrg ARM_FUNC_ALIAS aeabi_dsub subdf3
     82       1.1  mrg 
     83       1.1  mrg 	eor	yh, yh, #0x80000000	@ flip sign bit of second arg
     84       1.1  mrg #if defined(__INTERWORKING_STUBS__)
     85       1.1  mrg 	b	1f			@ Skip Thumb-code prologue
     86       1.1  mrg #endif
     87       1.1  mrg 
     88       1.1  mrg ARM_FUNC_START adddf3
     89       1.1  mrg ARM_FUNC_ALIAS aeabi_dadd adddf3
     90       1.1  mrg 
     91   1.1.1.3  mrg 1:  do_push {r4, r5, lr}        @ sp -= 12
     92   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
     93   1.1.1.3  mrg 	.cfi_rel_offset r4, 0       @ Registers are saved from sp to sp + 8
     94   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
     95   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
     96       1.1  mrg 
     97       1.1  mrg 	@ Look for zeroes, equal values, INF, or NAN.
     98       1.1  mrg 	shift1	lsl, r4, xh, #1
     99       1.1  mrg 	shift1	lsl, r5, yh, #1
    100       1.1  mrg 	teq	r4, r5
    101       1.1  mrg 	do_it	eq
    102       1.1  mrg 	teqeq	xl, yl
    103       1.1  mrg 	do_it	ne, ttt
    104       1.1  mrg 	COND(orr,s,ne)	ip, r4, xl
    105       1.1  mrg 	COND(orr,s,ne)	ip, r5, yl
    106       1.1  mrg 	COND(mvn,s,ne)	ip, r4, asr #21
    107       1.1  mrg 	COND(mvn,s,ne)	ip, r5, asr #21
    108       1.1  mrg 	beq	LSYM(Lad_s)
    109       1.1  mrg 
    110       1.1  mrg 	@ Compute exponent difference.  Make largest exponent in r4,
    111       1.1  mrg 	@ corresponding arg in xh-xl, and positive exponent difference in r5.
    112       1.1  mrg 	shift1	lsr, r4, r4, #21
    113       1.1  mrg 	rsbs	r5, r4, r5, lsr #21
    114       1.1  mrg 	do_it	lt
    115       1.1  mrg 	rsblt	r5, r5, #0
    116       1.1  mrg 	ble	1f
    117       1.1  mrg 	add	r4, r4, r5
    118       1.1  mrg 	eor	yl, xl, yl
    119       1.1  mrg 	eor	yh, xh, yh
    120       1.1  mrg 	eor	xl, yl, xl
    121       1.1  mrg 	eor	xh, yh, xh
    122       1.1  mrg 	eor	yl, xl, yl
    123       1.1  mrg 	eor	yh, xh, yh
    124       1.1  mrg 1:
    125       1.1  mrg 	@ If exponent difference is too large, return largest argument
    126       1.1  mrg 	@ already in xh-xl.  We need up to 54 bit to handle proper rounding
    127       1.1  mrg 	@ of 0x1p54 - 1.1.
    128       1.1  mrg 	cmp	r5, #54
    129       1.1  mrg 	do_it	hi
    130       1.1  mrg 	RETLDM	"r4, r5" hi
    131       1.1  mrg 
    132       1.1  mrg 	@ Convert mantissa to signed integer.
    133       1.1  mrg 	tst	xh, #0x80000000
    134       1.1  mrg 	mov	xh, xh, lsl #12
    135       1.1  mrg 	mov	ip, #0x00100000
    136       1.1  mrg 	orr	xh, ip, xh, lsr #12
    137       1.1  mrg 	beq	1f
    138       1.1  mrg #if defined(__thumb2__)
    139       1.1  mrg 	negs	xl, xl
    140       1.1  mrg 	sbc	xh, xh, xh, lsl #1
    141       1.1  mrg #else
    142       1.1  mrg 	rsbs	xl, xl, #0
    143       1.1  mrg 	rsc	xh, xh, #0
    144       1.1  mrg #endif
    145       1.1  mrg 1:
    146       1.1  mrg 	tst	yh, #0x80000000
    147       1.1  mrg 	mov	yh, yh, lsl #12
    148       1.1  mrg 	orr	yh, ip, yh, lsr #12
    149       1.1  mrg 	beq	1f
    150       1.1  mrg #if defined(__thumb2__)
    151       1.1  mrg 	negs	yl, yl
    152       1.1  mrg 	sbc	yh, yh, yh, lsl #1
    153       1.1  mrg #else
    154       1.1  mrg 	rsbs	yl, yl, #0
    155       1.1  mrg 	rsc	yh, yh, #0
    156       1.1  mrg #endif
    157       1.1  mrg 1:
    158       1.1  mrg 	@ If exponent == difference, one or both args were denormalized.
    159       1.1  mrg 	@ Since this is not common case, rescale them off line.
    160       1.1  mrg 	teq	r4, r5
    161       1.1  mrg 	beq	LSYM(Lad_d)
    162   1.1.1.3  mrg 
    163   1.1.1.4  mrg @ CFI note: we're lucky that the branches to Lad_* that appear after this
    164   1.1.1.4  mrg @ function have a CFI state that's exactly the same as the one we're in at this
    165   1.1.1.3  mrg @ point. Otherwise the CFI would change to a different state after the branch,
    166   1.1.1.3  mrg @ which would be disastrous for backtracing.
    167       1.1  mrg LSYM(Lad_x):
    168       1.1  mrg 
    169       1.1  mrg 	@ Compensate for the exponent overlapping the mantissa MSB added later
    170       1.1  mrg 	sub	r4, r4, #1
    171       1.1  mrg 
    172       1.1  mrg 	@ Shift yh-yl right per r5, add to xh-xl, keep leftover bits into ip.
    173       1.1  mrg 	rsbs	lr, r5, #32
    174       1.1  mrg 	blt	1f
    175       1.1  mrg 	shift1	lsl, ip, yl, lr
    176       1.1  mrg 	shiftop adds xl xl yl lsr r5 yl
    177       1.1  mrg 	adc	xh, xh, #0
    178       1.1  mrg 	shiftop adds xl xl yh lsl lr yl
    179       1.1  mrg 	shiftop adcs xh xh yh asr r5 yh
    180       1.1  mrg 	b	2f
    181       1.1  mrg 1:	sub	r5, r5, #32
    182       1.1  mrg 	add	lr, lr, #32
    183       1.1  mrg 	cmp	yl, #1
    184       1.1  mrg 	shift1	lsl,ip, yh, lr
    185       1.1  mrg 	do_it	cs
    186       1.1  mrg 	orrcs	ip, ip, #2		@ 2 not 1, to allow lsr #1 later
    187       1.1  mrg 	shiftop adds xl xl yh asr r5 yh
    188       1.1  mrg 	adcs	xh, xh, yh, asr #31
    189       1.1  mrg 2:
    190       1.1  mrg 	@ We now have a result in xh-xl-ip.
    191       1.1  mrg 	@ Keep absolute value in xh-xl-ip, sign in r5 (the n bit was set above)
    192       1.1  mrg 	and	r5, xh, #0x80000000
    193       1.1  mrg 	bpl	LSYM(Lad_p)
    194       1.1  mrg #if defined(__thumb2__)
    195       1.1  mrg 	mov	lr, #0
    196       1.1  mrg 	negs	ip, ip
    197       1.1  mrg 	sbcs	xl, lr, xl
    198       1.1  mrg 	sbc	xh, lr, xh
    199       1.1  mrg #else
    200       1.1  mrg 	rsbs	ip, ip, #0
    201       1.1  mrg 	rscs	xl, xl, #0
    202       1.1  mrg 	rsc	xh, xh, #0
    203       1.1  mrg #endif
    204       1.1  mrg 
    205       1.1  mrg 	@ Determine how to normalize the result.
    206       1.1  mrg LSYM(Lad_p):
    207       1.1  mrg 	cmp	xh, #0x00100000
    208       1.1  mrg 	bcc	LSYM(Lad_a)
    209       1.1  mrg 	cmp	xh, #0x00200000
    210       1.1  mrg 	bcc	LSYM(Lad_e)
    211       1.1  mrg 
    212       1.1  mrg 	@ Result needs to be shifted right.
    213       1.1  mrg 	movs	xh, xh, lsr #1
    214       1.1  mrg 	movs	xl, xl, rrx
    215       1.1  mrg 	mov	ip, ip, rrx
    216       1.1  mrg 	add	r4, r4, #1
    217       1.1  mrg 
    218       1.1  mrg 	@ Make sure we did not bust our exponent.
    219       1.1  mrg 	mov	r2, r4, lsl #21
    220       1.1  mrg 	cmn	r2, #(2 << 21)
    221       1.1  mrg 	bcs	LSYM(Lad_o)
    222       1.1  mrg 
    223       1.1  mrg 	@ Our result is now properly aligned into xh-xl, remaining bits in ip.
    224       1.1  mrg 	@ Round with MSB of ip. If halfway between two numbers, round towards
    225       1.1  mrg 	@ LSB of xl = 0.
    226       1.1  mrg 	@ Pack final result together.
    227       1.1  mrg LSYM(Lad_e):
    228       1.1  mrg 	cmp	ip, #0x80000000
    229       1.1  mrg 	do_it	eq
    230       1.1  mrg 	COND(mov,s,eq)	ip, xl, lsr #1
    231       1.1  mrg 	adcs	xl, xl, #0
    232       1.1  mrg 	adc	xh, xh, r4, lsl #20
    233       1.1  mrg 	orr	xh, xh, r5
    234       1.1  mrg 	RETLDM	"r4, r5"
    235       1.1  mrg 
    236       1.1  mrg 	@ Result must be shifted left and exponent adjusted.
    237       1.1  mrg LSYM(Lad_a):
    238       1.1  mrg 	movs	ip, ip, lsl #1
    239       1.1  mrg 	adcs	xl, xl, xl
    240       1.1  mrg 	adc	xh, xh, xh
    241   1.1.1.8  mrg 	subs	r4, r4, #1
    242   1.1.1.8  mrg 	do_it	hs
    243   1.1.1.8  mrg 	cmphs	xh, #0x00100000
    244   1.1.1.8  mrg 	bhs	LSYM(Lad_e)
    245       1.1  mrg 
    246       1.1  mrg 	@ No rounding necessary since ip will always be 0 at this point.
    247       1.1  mrg LSYM(Lad_l):
    248       1.1  mrg 
    249   1.1.1.8  mrg #if !defined (__ARM_FEATURE_CLZ)
    250       1.1  mrg 
    251       1.1  mrg 	teq	xh, #0
    252       1.1  mrg 	movne	r3, #20
    253       1.1  mrg 	moveq	r3, #52
    254       1.1  mrg 	moveq	xh, xl
    255       1.1  mrg 	moveq	xl, #0
    256       1.1  mrg 	mov	r2, xh
    257       1.1  mrg 	cmp	r2, #(1 << 16)
    258       1.1  mrg 	movhs	r2, r2, lsr #16
    259       1.1  mrg 	subhs	r3, r3, #16
    260       1.1  mrg 	cmp	r2, #(1 << 8)
    261       1.1  mrg 	movhs	r2, r2, lsr #8
    262       1.1  mrg 	subhs	r3, r3, #8
    263       1.1  mrg 	cmp	r2, #(1 << 4)
    264       1.1  mrg 	movhs	r2, r2, lsr #4
    265       1.1  mrg 	subhs	r3, r3, #4
    266       1.1  mrg 	cmp	r2, #(1 << 2)
    267       1.1  mrg 	subhs	r3, r3, #2
    268       1.1  mrg 	sublo	r3, r3, r2, lsr #1
    269       1.1  mrg 	sub	r3, r3, r2, lsr #3
    270       1.1  mrg 
    271       1.1  mrg #else
    272       1.1  mrg 
    273       1.1  mrg 	teq	xh, #0
    274       1.1  mrg 	do_it	eq, t
    275       1.1  mrg 	moveq	xh, xl
    276       1.1  mrg 	moveq	xl, #0
    277       1.1  mrg 	clz	r3, xh
    278       1.1  mrg 	do_it	eq
    279       1.1  mrg 	addeq	r3, r3, #32
    280       1.1  mrg 	sub	r3, r3, #11
    281       1.1  mrg 
    282       1.1  mrg #endif
    283       1.1  mrg 
    284       1.1  mrg 	@ determine how to shift the value.
    285       1.1  mrg 	subs	r2, r3, #32
    286       1.1  mrg 	bge	2f
    287       1.1  mrg 	adds	r2, r2, #12
    288       1.1  mrg 	ble	1f
    289       1.1  mrg 
    290       1.1  mrg 	@ shift value left 21 to 31 bits, or actually right 11 to 1 bits
    291       1.1  mrg 	@ since a register switch happened above.
    292       1.1  mrg 	add	ip, r2, #20
    293       1.1  mrg 	rsb	r2, r2, #12
    294       1.1  mrg 	shift1	lsl, xl, xh, ip
    295       1.1  mrg 	shift1	lsr, xh, xh, r2
    296       1.1  mrg 	b	3f
    297       1.1  mrg 
    298       1.1  mrg 	@ actually shift value left 1 to 20 bits, which might also represent
    299       1.1  mrg 	@ 32 to 52 bits if counting the register switch that happened earlier.
    300       1.1  mrg 1:	add	r2, r2, #20
    301       1.1  mrg 2:	do_it	le
    302       1.1  mrg 	rsble	ip, r2, #32
    303       1.1  mrg 	shift1	lsl, xh, xh, r2
    304       1.1  mrg #if defined(__thumb2__)
    305       1.1  mrg 	lsr	ip, xl, ip
    306       1.1  mrg 	itt	le
    307       1.1  mrg 	orrle	xh, xh, ip
    308       1.1  mrg 	lslle	xl, xl, r2
    309       1.1  mrg #else
    310       1.1  mrg 	orrle	xh, xh, xl, lsr ip
    311       1.1  mrg 	movle	xl, xl, lsl r2
    312       1.1  mrg #endif
    313       1.1  mrg 
    314       1.1  mrg 	@ adjust exponent accordingly.
    315       1.1  mrg 3:	subs	r4, r4, r3
    316       1.1  mrg 	do_it	ge, tt
    317       1.1  mrg 	addge	xh, xh, r4, lsl #20
    318       1.1  mrg 	orrge	xh, xh, r5
    319       1.1  mrg 	RETLDM	"r4, r5" ge
    320       1.1  mrg 
    321       1.1  mrg 	@ Exponent too small, denormalize result.
    322       1.1  mrg 	@ Find out proper shift value.
    323       1.1  mrg 	mvn	r4, r4
    324       1.1  mrg 	subs	r4, r4, #31
    325       1.1  mrg 	bge	2f
    326       1.1  mrg 	adds	r4, r4, #12
    327       1.1  mrg 	bgt	1f
    328       1.1  mrg 
    329       1.1  mrg 	@ shift result right of 1 to 20 bits, sign is in r5.
    330       1.1  mrg 	add	r4, r4, #20
    331       1.1  mrg 	rsb	r2, r4, #32
    332       1.1  mrg 	shift1	lsr, xl, xl, r4
    333       1.1  mrg 	shiftop orr xl xl xh lsl r2 yh
    334       1.1  mrg 	shiftop orr xh r5 xh lsr r4 yh
    335       1.1  mrg 	RETLDM	"r4, r5"
    336       1.1  mrg 
    337       1.1  mrg 	@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
    338       1.1  mrg 	@ a register switch from xh to xl.
    339       1.1  mrg 1:	rsb	r4, r4, #12
    340       1.1  mrg 	rsb	r2, r4, #32
    341       1.1  mrg 	shift1	lsr, xl, xl, r2
    342       1.1  mrg 	shiftop orr xl xl xh lsl r4 yh
    343       1.1  mrg 	mov	xh, r5
    344       1.1  mrg 	RETLDM	"r4, r5"
    345       1.1  mrg 
    346       1.1  mrg 	@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
    347       1.1  mrg 	@ from xh to xl.
    348       1.1  mrg 2:	shift1	lsr, xl, xh, r4
    349       1.1  mrg 	mov	xh, r5
    350       1.1  mrg 	RETLDM	"r4, r5"
    351       1.1  mrg 
    352       1.1  mrg 	@ Adjust exponents for denormalized arguments.
    353       1.1  mrg 	@ Note that r4 must not remain equal to 0.
    354       1.1  mrg LSYM(Lad_d):
    355       1.1  mrg 	teq	r4, #0
    356       1.1  mrg 	eor	yh, yh, #0x00100000
    357       1.1  mrg 	do_it	eq, te
    358       1.1  mrg 	eoreq	xh, xh, #0x00100000
    359       1.1  mrg 	addeq	r4, r4, #1
    360       1.1  mrg 	subne	r5, r5, #1
    361       1.1  mrg 	b	LSYM(Lad_x)
    362       1.1  mrg 
    363       1.1  mrg 
    364       1.1  mrg LSYM(Lad_s):
    365       1.1  mrg 	mvns	ip, r4, asr #21
    366       1.1  mrg 	do_it	ne
    367       1.1  mrg 	COND(mvn,s,ne)	ip, r5, asr #21
    368       1.1  mrg 	beq	LSYM(Lad_i)
    369       1.1  mrg 
    370       1.1  mrg 	teq	r4, r5
    371       1.1  mrg 	do_it	eq
    372       1.1  mrg 	teqeq	xl, yl
    373       1.1  mrg 	beq	1f
    374       1.1  mrg 
    375       1.1  mrg 	@ Result is x + 0.0 = x or 0.0 + y = y.
    376       1.1  mrg 	orrs	ip, r4, xl
    377       1.1  mrg 	do_it	eq, t
    378       1.1  mrg 	moveq	xh, yh
    379       1.1  mrg 	moveq	xl, yl
    380       1.1  mrg 	RETLDM	"r4, r5"
    381       1.1  mrg 
    382       1.1  mrg 1:	teq	xh, yh
    383       1.1  mrg 
    384       1.1  mrg 	@ Result is x - x = 0.
    385       1.1  mrg 	do_it	ne, tt
    386       1.1  mrg 	movne	xh, #0
    387       1.1  mrg 	movne	xl, #0
    388       1.1  mrg 	RETLDM	"r4, r5" ne
    389       1.1  mrg 
    390       1.1  mrg 	@ Result is x + x = 2x.
    391       1.1  mrg 	movs	ip, r4, lsr #21
    392       1.1  mrg 	bne	2f
    393       1.1  mrg 	movs	xl, xl, lsl #1
    394       1.1  mrg 	adcs	xh, xh, xh
    395       1.1  mrg 	do_it	cs
    396       1.1  mrg 	orrcs	xh, xh, #0x80000000
    397       1.1  mrg 	RETLDM	"r4, r5"
    398       1.1  mrg 2:	adds	r4, r4, #(2 << 21)
    399       1.1  mrg 	do_it	cc, t
    400       1.1  mrg 	addcc	xh, xh, #(1 << 20)
    401       1.1  mrg 	RETLDM	"r4, r5" cc
    402       1.1  mrg 	and	r5, xh, #0x80000000
    403       1.1  mrg 
    404       1.1  mrg 	@ Overflow: return INF.
    405       1.1  mrg LSYM(Lad_o):
    406       1.1  mrg 	orr	xh, r5, #0x7f000000
    407       1.1  mrg 	orr	xh, xh, #0x00f00000
    408       1.1  mrg 	mov	xl, #0
    409       1.1  mrg 	RETLDM	"r4, r5"
    410       1.1  mrg 
    411       1.1  mrg 	@ At least one of x or y is INF/NAN.
    412       1.1  mrg 	@   if xh-xl != INF/NAN: return yh-yl (which is INF/NAN)
    413       1.1  mrg 	@   if yh-yl != INF/NAN: return xh-xl (which is INF/NAN)
    414       1.1  mrg 	@   if either is NAN: return NAN
    415       1.1  mrg 	@   if opposite sign: return NAN
    416       1.1  mrg 	@   otherwise return xh-xl (which is INF or -INF)
    417       1.1  mrg LSYM(Lad_i):
    418       1.1  mrg 	mvns	ip, r4, asr #21
    419       1.1  mrg 	do_it	ne, te
    420       1.1  mrg 	movne	xh, yh
    421       1.1  mrg 	movne	xl, yl
    422       1.1  mrg 	COND(mvn,s,eq)	ip, r5, asr #21
    423       1.1  mrg 	do_it	ne, t
    424       1.1  mrg 	movne	yh, xh
    425       1.1  mrg 	movne	yl, xl
    426       1.1  mrg 	orrs	r4, xl, xh, lsl #12
    427       1.1  mrg 	do_it	eq, te
    428       1.1  mrg 	COND(orr,s,eq)	r5, yl, yh, lsl #12
    429       1.1  mrg 	teqeq	xh, yh
    430       1.1  mrg 	orrne	xh, xh, #0x00080000	@ quiet NAN
    431       1.1  mrg 	RETLDM	"r4, r5"
    432       1.1  mrg 
    433   1.1.1.3  mrg 	CFI_END_FUNCTION
    434       1.1  mrg 	FUNC_END aeabi_dsub
    435       1.1  mrg 	FUNC_END subdf3
    436       1.1  mrg 	FUNC_END aeabi_dadd
    437       1.1  mrg 	FUNC_END adddf3
    438       1.1  mrg 
    439       1.1  mrg ARM_FUNC_START floatunsidf
    440       1.1  mrg ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
    441   1.1.1.3  mrg 	CFI_START_FUNCTION
    442       1.1  mrg 
    443       1.1  mrg 	teq	r0, #0
    444       1.1  mrg 	do_it	eq, t
    445       1.1  mrg 	moveq	r1, #0
    446       1.1  mrg 	RETc(eq)
    447   1.1.1.3  mrg 
    448   1.1.1.3  mrg 	do_push {r4, r5, lr}        @ sp -= 12
    449   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
    450   1.1.1.3  mrg 	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
    451   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    452   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
    453   1.1.1.3  mrg 
    454       1.1  mrg 	mov	r4, #0x400		@ initial exponent
    455       1.1  mrg 	add	r4, r4, #(52-1 - 1)
    456       1.1  mrg 	mov	r5, #0			@ sign bit is 0
    457       1.1  mrg 	.ifnc	xl, r0
    458       1.1  mrg 	mov	xl, r0
    459       1.1  mrg 	.endif
    460       1.1  mrg 	mov	xh, #0
    461       1.1  mrg 	b	LSYM(Lad_l)
    462       1.1  mrg 
    463   1.1.1.3  mrg 	CFI_END_FUNCTION
    464       1.1  mrg 	FUNC_END aeabi_ui2d
    465       1.1  mrg 	FUNC_END floatunsidf
    466       1.1  mrg 
    467       1.1  mrg ARM_FUNC_START floatsidf
    468       1.1  mrg ARM_FUNC_ALIAS aeabi_i2d floatsidf
    469   1.1.1.3  mrg 	CFI_START_FUNCTION
    470       1.1  mrg 
    471       1.1  mrg 	teq	r0, #0
    472       1.1  mrg 	do_it	eq, t
    473       1.1  mrg 	moveq	r1, #0
    474       1.1  mrg 	RETc(eq)
    475   1.1.1.3  mrg 
    476   1.1.1.3  mrg 	do_push {r4, r5, lr}        @ sp -= 12
    477   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
    478   1.1.1.3  mrg 	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
    479   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    480   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
    481   1.1.1.3  mrg 
    482       1.1  mrg 	mov	r4, #0x400		@ initial exponent
    483       1.1  mrg 	add	r4, r4, #(52-1 - 1)
    484       1.1  mrg 	ands	r5, r0, #0x80000000	@ sign bit in r5
    485       1.1  mrg 	do_it	mi
    486       1.1  mrg 	rsbmi	r0, r0, #0		@ absolute value
    487       1.1  mrg 	.ifnc	xl, r0
    488       1.1  mrg 	mov	xl, r0
    489       1.1  mrg 	.endif
    490       1.1  mrg 	mov	xh, #0
    491       1.1  mrg 	b	LSYM(Lad_l)
    492       1.1  mrg 
    493   1.1.1.3  mrg 	CFI_END_FUNCTION
    494       1.1  mrg 	FUNC_END aeabi_i2d
    495       1.1  mrg 	FUNC_END floatsidf
    496       1.1  mrg 
    497       1.1  mrg ARM_FUNC_START extendsfdf2
    498       1.1  mrg ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
    499   1.1.1.3  mrg 	CFI_START_FUNCTION
    500       1.1  mrg 
    501       1.1  mrg 	movs	r2, r0, lsl #1		@ toss sign bit
    502       1.1  mrg 	mov	xh, r2, asr #3		@ stretch exponent
    503       1.1  mrg 	mov	xh, xh, rrx		@ retrieve sign bit
    504       1.1  mrg 	mov	xl, r2, lsl #28		@ retrieve remaining bits
    505       1.1  mrg 	do_it	ne, ttt
    506       1.1  mrg 	COND(and,s,ne)	r3, r2, #0xff000000	@ isolate exponent
    507       1.1  mrg 	teqne	r3, #0xff000000		@ if not 0, check if INF or NAN
    508       1.1  mrg 	eorne	xh, xh, #0x38000000	@ fixup exponent otherwise.
    509       1.1  mrg 	RETc(ne)			@ and return it.
    510       1.1  mrg 
    511   1.1.1.4  mrg 	bics	r2, r2, #0xff000000	@ isolate mantissa
    512   1.1.1.4  mrg 	do_it	eq			@ if 0, that is ZERO or INF,
    513       1.1  mrg 	RETc(eq)			@ we are done already.
    514       1.1  mrg 
    515   1.1.1.4  mrg 	teq	r3, #0xff000000		@ check for NAN
    516   1.1.1.4  mrg 	do_it	eq, t
    517   1.1.1.4  mrg 	orreq	xh, xh, #0x00080000	@ change to quiet NAN
    518   1.1.1.4  mrg 	RETc(eq)			@ and return it.
    519   1.1.1.4  mrg 
    520       1.1  mrg 	@ value was denormalized.  We can normalize it now.
    521       1.1  mrg 	do_push	{r4, r5, lr}
    522   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12   @ CFA is now sp + previousOffset + 12
    523   1.1.1.3  mrg 	.cfi_rel_offset r4, 0       @ Registers are saved from sp + 0 to sp + 8.
    524   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    525   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
    526   1.1.1.3  mrg 
    527       1.1  mrg 	mov	r4, #0x380		@ setup corresponding exponent
    528       1.1  mrg 	and	r5, xh, #0x80000000	@ move sign bit in r5
    529       1.1  mrg 	bic	xh, xh, #0x80000000
    530       1.1  mrg 	b	LSYM(Lad_l)
    531       1.1  mrg 
    532   1.1.1.3  mrg 	CFI_END_FUNCTION
    533       1.1  mrg 	FUNC_END aeabi_f2d
    534       1.1  mrg 	FUNC_END extendsfdf2
    535       1.1  mrg 
    536       1.1  mrg ARM_FUNC_START floatundidf
    537       1.1  mrg ARM_FUNC_ALIAS aeabi_ul2d floatundidf
    538   1.1.1.3  mrg 	CFI_START_FUNCTION
    539   1.1.1.3  mrg 	.cfi_remember_state        @ Save the current CFA state.
    540       1.1  mrg 
    541       1.1  mrg 	orrs	r2, r0, r1
    542       1.1  mrg 	do_it	eq
    543       1.1  mrg 	RETc(eq)
    544       1.1  mrg 
    545   1.1.1.3  mrg 	do_push {r4, r5, lr}       @ sp -= 12
    546   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12  @ CFA is now sp + previousOffset + 12
    547   1.1.1.3  mrg 	.cfi_rel_offset r4, 0      @ Registers are saved from sp + 0 to sp + 8
    548   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    549   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
    550       1.1  mrg 
    551       1.1  mrg 	mov	r5, #0
    552       1.1  mrg 	b	2f
    553       1.1  mrg 
    554       1.1  mrg ARM_FUNC_START floatdidf
    555       1.1  mrg ARM_FUNC_ALIAS aeabi_l2d floatdidf
    556   1.1.1.3  mrg 	.cfi_restore_state
    557   1.1.1.3  mrg 	@ Restore the CFI state we saved above. If we didn't do this then the
    558   1.1.1.3  mrg 	@ following instructions would have the CFI state that was set by the
    559   1.1.1.3  mrg 	@ offset adjustments made in floatundidf.
    560       1.1  mrg 
    561       1.1  mrg 	orrs	r2, r0, r1
    562       1.1  mrg 	do_it	eq
    563       1.1  mrg 	RETc(eq)
    564       1.1  mrg 
    565   1.1.1.3  mrg 	do_push {r4, r5, lr}       @ sp -= 12
    566   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 12  @ CFA is now sp + previousOffset + 12
    567   1.1.1.3  mrg 	.cfi_rel_offset r4, 0      @ Registers are saved from sp to sp + 8
    568   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    569   1.1.1.3  mrg 	.cfi_rel_offset lr, 8
    570       1.1  mrg 
    571       1.1  mrg 	ands	r5, ah, #0x80000000	@ sign bit in r5
    572       1.1  mrg 	bpl	2f
    573       1.1  mrg #if defined(__thumb2__)
    574       1.1  mrg 	negs	al, al
    575       1.1  mrg 	sbc	ah, ah, ah, lsl #1
    576       1.1  mrg #else
    577       1.1  mrg 	rsbs	al, al, #0
    578       1.1  mrg 	rsc	ah, ah, #0
    579       1.1  mrg #endif
    580       1.1  mrg 2:
    581       1.1  mrg 	mov	r4, #0x400		@ initial exponent
    582       1.1  mrg 	add	r4, r4, #(52-1 - 1)
    583       1.1  mrg 
    584       1.1  mrg 	@ If FP word order does not match integer word order, swap the words.
    585       1.1  mrg 	.ifnc	xh, ah
    586       1.1  mrg 	mov	ip, al
    587       1.1  mrg 	mov	xh, ah
    588       1.1  mrg 	mov	xl, ip
    589       1.1  mrg 	.endif
    590       1.1  mrg 
    591       1.1  mrg 	movs	ip, xh, lsr #22
    592       1.1  mrg 	beq	LSYM(Lad_p)
    593       1.1  mrg 
    594       1.1  mrg 	@ The value is too big.  Scale it down a bit...
    595       1.1  mrg 	mov	r2, #3
    596       1.1  mrg 	movs	ip, ip, lsr #3
    597       1.1  mrg 	do_it	ne
    598       1.1  mrg 	addne	r2, r2, #3
    599       1.1  mrg 	movs	ip, ip, lsr #3
    600       1.1  mrg 	do_it	ne
    601       1.1  mrg 	addne	r2, r2, #3
    602       1.1  mrg 	add	r2, r2, ip, lsr #3
    603       1.1  mrg 
    604       1.1  mrg 	rsb	r3, r2, #32
    605       1.1  mrg 	shift1	lsl, ip, xl, r3
    606       1.1  mrg 	shift1	lsr, xl, xl, r2
    607       1.1  mrg 	shiftop orr xl xl xh lsl r3 lr
    608       1.1  mrg 	shift1	lsr, xh, xh, r2
    609       1.1  mrg 	add	r4, r4, r2
    610       1.1  mrg 	b	LSYM(Lad_p)
    611       1.1  mrg 
    612   1.1.1.3  mrg 	CFI_END_FUNCTION
    613       1.1  mrg 	FUNC_END floatdidf
    614       1.1  mrg 	FUNC_END aeabi_l2d
    615       1.1  mrg 	FUNC_END floatundidf
    616       1.1  mrg 	FUNC_END aeabi_ul2d
    617       1.1  mrg 
    618       1.1  mrg #endif /* L_addsubdf3 */
    619       1.1  mrg 
    620   1.1.1.8  mrg #if defined(L_arm_muldf3) || defined(L_arm_muldivdf3)
    621   1.1.1.8  mrg 
    622   1.1.1.8  mrg @ Define multiplication as weak in _arm_muldf3.o so that it can be overriden
    623   1.1.1.8  mrg @ by the global definition in _arm_muldivdf3.o.  This allows a program only
    624   1.1.1.8  mrg @ using multiplication to take the weak definition which does not contain the
    625   1.1.1.8  mrg @ division code. Programs using only division or both division and
    626   1.1.1.8  mrg @ multiplication will pull _arm_muldivdf3.o from which both the multiplication
    627   1.1.1.8  mrg @ and division are taken thanks to the override.
    628   1.1.1.8  mrg #ifdef L_arm_muldf3
    629   1.1.1.8  mrg WEAK muldf3
    630   1.1.1.8  mrg WEAK aeabi_dmul
    631   1.1.1.8  mrg #endif
    632       1.1  mrg 
    633       1.1  mrg ARM_FUNC_START muldf3
    634       1.1  mrg ARM_FUNC_ALIAS aeabi_dmul muldf3
    635   1.1.1.3  mrg 	CFI_START_FUNCTION
    636   1.1.1.3  mrg 
    637   1.1.1.3  mrg 	do_push {r4, r5, r6, lr}    @ sp -= 16
    638   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 16   @ CFA is now sp + previousOffset + 16
    639   1.1.1.3  mrg 	.cfi_rel_offset r4, 0       @ Registers are saved from sp to sp + 12.
    640   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    641   1.1.1.3  mrg 	.cfi_rel_offset r6, 8
    642   1.1.1.3  mrg 	.cfi_rel_offset lr, 12
    643       1.1  mrg 
    644       1.1  mrg 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
    645       1.1  mrg 	mov	ip, #0xff
    646       1.1  mrg 	orr	ip, ip, #0x700
    647       1.1  mrg 	ands	r4, ip, xh, lsr #20
    648       1.1  mrg 	do_it	ne, tte
    649       1.1  mrg 	COND(and,s,ne)	r5, ip, yh, lsr #20
    650       1.1  mrg 	teqne	r4, ip
    651       1.1  mrg 	teqne	r5, ip
    652       1.1  mrg 	bleq	LSYM(Lml_s)
    653       1.1  mrg 
    654       1.1  mrg 	@ Add exponents together
    655       1.1  mrg 	add	r4, r4, r5
    656       1.1  mrg 
    657       1.1  mrg 	@ Determine final sign.
    658       1.1  mrg 	eor	r6, xh, yh
    659       1.1  mrg 
    660       1.1  mrg 	@ Convert mantissa to unsigned integer.
    661       1.1  mrg 	@ If power of two, branch to a separate path.
    662       1.1  mrg 	bic	xh, xh, ip, lsl #21
    663       1.1  mrg 	bic	yh, yh, ip, lsl #21
    664       1.1  mrg 	orrs	r5, xl, xh, lsl #12
    665       1.1  mrg 	do_it	ne
    666       1.1  mrg 	COND(orr,s,ne)	r5, yl, yh, lsl #12
    667       1.1  mrg 	orr	xh, xh, #0x00100000
    668       1.1  mrg 	orr	yh, yh, #0x00100000
    669       1.1  mrg 	beq	LSYM(Lml_1)
    670       1.1  mrg 
    671       1.1  mrg 	@ Here is the actual multiplication.
    672   1.1.1.8  mrg 	@ This code works on architecture versions >= 4
    673       1.1  mrg 	umull	ip, lr, xl, yl
    674       1.1  mrg 	mov	r5, #0
    675       1.1  mrg 	umlal	lr, r5, xh, yl
    676       1.1  mrg 	and	yl, r6, #0x80000000
    677       1.1  mrg 	umlal	lr, r5, xl, yh
    678       1.1  mrg 	mov	r6, #0
    679       1.1  mrg 	umlal	r5, r6, xh, yh
    680       1.1  mrg 
    681       1.1  mrg 	@ The LSBs in ip are only significant for the final rounding.
    682       1.1  mrg 	@ Fold them into lr.
    683       1.1  mrg 	teq	ip, #0
    684       1.1  mrg 	do_it	ne
    685       1.1  mrg 	orrne	lr, lr, #1
    686       1.1  mrg 
    687       1.1  mrg 	@ Adjust result upon the MSB position.
    688       1.1  mrg 	sub	r4, r4, #0xff
    689       1.1  mrg 	cmp	r6, #(1 << (20-11))
    690       1.1  mrg 	sbc	r4, r4, #0x300
    691       1.1  mrg 	bcs	1f
    692       1.1  mrg 	movs	lr, lr, lsl #1
    693       1.1  mrg 	adcs	r5, r5, r5
    694       1.1  mrg 	adc	r6, r6, r6
    695       1.1  mrg 1:
    696       1.1  mrg 	@ Shift to final position, add sign to result.
    697       1.1  mrg 	orr	xh, yl, r6, lsl #11
    698       1.1  mrg 	orr	xh, xh, r5, lsr #21
    699       1.1  mrg 	mov	xl, r5, lsl #11
    700       1.1  mrg 	orr	xl, xl, lr, lsr #21
    701       1.1  mrg 	mov	lr, lr, lsl #11
    702       1.1  mrg 
    703       1.1  mrg 	@ Check exponent range for under/overflow.
    704       1.1  mrg 	subs	ip, r4, #(254 - 1)
    705       1.1  mrg 	do_it	hi
    706       1.1  mrg 	cmphi	ip, #0x700
    707       1.1  mrg 	bhi	LSYM(Lml_u)
    708       1.1  mrg 
    709       1.1  mrg 	@ Round the result, merge final exponent.
    710       1.1  mrg 	cmp	lr, #0x80000000
    711       1.1  mrg 	do_it	eq
    712       1.1  mrg 	COND(mov,s,eq)	lr, xl, lsr #1
    713       1.1  mrg 	adcs	xl, xl, #0
    714       1.1  mrg 	adc	xh, xh, r4, lsl #20
    715       1.1  mrg 	RETLDM	"r4, r5, r6"
    716       1.1  mrg 
    717       1.1  mrg 	@ Multiplication by 0x1p*: let''s shortcut a lot of code.
    718       1.1  mrg LSYM(Lml_1):
    719       1.1  mrg 	and	r6, r6, #0x80000000
    720       1.1  mrg 	orr	xh, r6, xh
    721       1.1  mrg 	orr	xl, xl, yl
    722       1.1  mrg 	eor	xh, xh, yh
    723       1.1  mrg 	subs	r4, r4, ip, lsr #1
    724       1.1  mrg 	do_it	gt, tt
    725       1.1  mrg 	COND(rsb,s,gt)	r5, r4, ip
    726       1.1  mrg 	orrgt	xh, xh, r4, lsl #20
    727       1.1  mrg 	RETLDM	"r4, r5, r6" gt
    728       1.1  mrg 
    729       1.1  mrg 	@ Under/overflow: fix things up for the code below.
    730       1.1  mrg 	orr	xh, xh, #0x00100000
    731       1.1  mrg 	mov	lr, #0
    732       1.1  mrg 	subs	r4, r4, #1
    733       1.1  mrg LSYM(Lml_u):
    734       1.1  mrg 	@ Overflow?
    735       1.1  mrg 	bgt	LSYM(Lml_o)
    736       1.1  mrg 
    737       1.1  mrg 	@ Check if denormalized result is possible, otherwise return signed 0.
    738       1.1  mrg 	cmn	r4, #(53 + 1)
    739       1.1  mrg 	do_it	le, tt
    740       1.1  mrg 	movle	xl, #0
    741       1.1  mrg 	bicle	xh, xh, #0x7fffffff
    742       1.1  mrg 	RETLDM	"r4, r5, r6" le
    743       1.1  mrg 
    744       1.1  mrg 	@ Find out proper shift value.
    745       1.1  mrg 	rsb	r4, r4, #0
    746       1.1  mrg 	subs	r4, r4, #32
    747       1.1  mrg 	bge	2f
    748       1.1  mrg 	adds	r4, r4, #12
    749       1.1  mrg 	bgt	1f
    750       1.1  mrg 
    751       1.1  mrg 	@ shift result right of 1 to 20 bits, preserve sign bit, round, etc.
    752       1.1  mrg 	add	r4, r4, #20
    753       1.1  mrg 	rsb	r5, r4, #32
    754       1.1  mrg 	shift1	lsl, r3, xl, r5
    755       1.1  mrg 	shift1	lsr, xl, xl, r4
    756       1.1  mrg 	shiftop orr xl xl xh lsl r5 r2
    757       1.1  mrg 	and	r2, xh, #0x80000000
    758       1.1  mrg 	bic	xh, xh, #0x80000000
    759       1.1  mrg 	adds	xl, xl, r3, lsr #31
    760       1.1  mrg 	shiftop adc xh r2 xh lsr r4 r6
    761       1.1  mrg 	orrs	lr, lr, r3, lsl #1
    762       1.1  mrg 	do_it	eq
    763       1.1  mrg 	biceq	xl, xl, r3, lsr #31
    764       1.1  mrg 	RETLDM	"r4, r5, r6"
    765       1.1  mrg 
    766       1.1  mrg 	@ shift result right of 21 to 31 bits, or left 11 to 1 bits after
    767       1.1  mrg 	@ a register switch from xh to xl. Then round.
    768       1.1  mrg 1:	rsb	r4, r4, #12
    769       1.1  mrg 	rsb	r5, r4, #32
    770       1.1  mrg 	shift1	lsl, r3, xl, r4
    771       1.1  mrg 	shift1	lsr, xl, xl, r5
    772       1.1  mrg 	shiftop orr xl xl xh lsl r4 r2
    773       1.1  mrg 	bic	xh, xh, #0x7fffffff
    774       1.1  mrg 	adds	xl, xl, r3, lsr #31
    775       1.1  mrg 	adc	xh, xh, #0
    776       1.1  mrg 	orrs	lr, lr, r3, lsl #1
    777       1.1  mrg 	do_it	eq
    778       1.1  mrg 	biceq	xl, xl, r3, lsr #31
    779       1.1  mrg 	RETLDM	"r4, r5, r6"
    780       1.1  mrg 
    781       1.1  mrg 	@ Shift value right of 32 to 64 bits, or 0 to 32 bits after a switch
    782       1.1  mrg 	@ from xh to xl.  Leftover bits are in r3-r6-lr for rounding.
    783       1.1  mrg 2:	rsb	r5, r4, #32
    784       1.1  mrg 	shiftop orr lr lr xl lsl r5 r2
    785       1.1  mrg 	shift1	lsr, r3, xl, r4
    786       1.1  mrg 	shiftop orr r3 r3 xh lsl r5 r2
    787       1.1  mrg 	shift1	lsr, xl, xh, r4
    788       1.1  mrg 	bic	xh, xh, #0x7fffffff
    789       1.1  mrg 	shiftop bic xl xl xh lsr r4 r2
    790       1.1  mrg 	add	xl, xl, r3, lsr #31
    791       1.1  mrg 	orrs	lr, lr, r3, lsl #1
    792       1.1  mrg 	do_it	eq
    793       1.1  mrg 	biceq	xl, xl, r3, lsr #31
    794       1.1  mrg 	RETLDM	"r4, r5, r6"
    795       1.1  mrg 
    796       1.1  mrg 	@ One or both arguments are denormalized.
    797       1.1  mrg 	@ Scale them leftwards and preserve sign bit.
    798       1.1  mrg LSYM(Lml_d):
    799       1.1  mrg 	teq	r4, #0
    800       1.1  mrg 	bne	2f
    801       1.1  mrg 	and	r6, xh, #0x80000000
    802       1.1  mrg 1:	movs	xl, xl, lsl #1
    803       1.1  mrg 	adc	xh, xh, xh
    804       1.1  mrg 	tst	xh, #0x00100000
    805       1.1  mrg 	do_it	eq
    806       1.1  mrg 	subeq	r4, r4, #1
    807       1.1  mrg 	beq	1b
    808       1.1  mrg 	orr	xh, xh, r6
    809       1.1  mrg 	teq	r5, #0
    810       1.1  mrg 	do_it	ne
    811       1.1  mrg 	RETc(ne)
    812       1.1  mrg 2:	and	r6, yh, #0x80000000
    813       1.1  mrg 3:	movs	yl, yl, lsl #1
    814       1.1  mrg 	adc	yh, yh, yh
    815       1.1  mrg 	tst	yh, #0x00100000
    816       1.1  mrg 	do_it	eq
    817       1.1  mrg 	subeq	r5, r5, #1
    818       1.1  mrg 	beq	3b
    819       1.1  mrg 	orr	yh, yh, r6
    820       1.1  mrg 	RET
    821       1.1  mrg 
    822       1.1  mrg LSYM(Lml_s):
    823       1.1  mrg 	@ Isolate the INF and NAN cases away
    824       1.1  mrg 	teq	r4, ip
    825       1.1  mrg 	and	r5, ip, yh, lsr #20
    826       1.1  mrg 	do_it	ne
    827       1.1  mrg 	teqne	r5, ip
    828       1.1  mrg 	beq	1f
    829       1.1  mrg 
    830       1.1  mrg 	@ Here, one or more arguments are either denormalized or zero.
    831       1.1  mrg 	orrs	r6, xl, xh, lsl #1
    832       1.1  mrg 	do_it	ne
    833       1.1  mrg 	COND(orr,s,ne)	r6, yl, yh, lsl #1
    834       1.1  mrg 	bne	LSYM(Lml_d)
    835       1.1  mrg 
    836       1.1  mrg 	@ Result is 0, but determine sign anyway.
    837       1.1  mrg LSYM(Lml_z):
    838       1.1  mrg 	eor	xh, xh, yh
    839       1.1  mrg 	and	xh, xh, #0x80000000
    840       1.1  mrg 	mov	xl, #0
    841       1.1  mrg 	RETLDM	"r4, r5, r6"
    842       1.1  mrg 
    843       1.1  mrg 1:	@ One or both args are INF or NAN.
    844       1.1  mrg 	orrs	r6, xl, xh, lsl #1
    845       1.1  mrg 	do_it	eq, te
    846       1.1  mrg 	moveq	xl, yl
    847       1.1  mrg 	moveq	xh, yh
    848       1.1  mrg 	COND(orr,s,ne)	r6, yl, yh, lsl #1
    849       1.1  mrg 	beq	LSYM(Lml_n)		@ 0 * INF or INF * 0 -> NAN
    850       1.1  mrg 	teq	r4, ip
    851       1.1  mrg 	bne	1f
    852       1.1  mrg 	orrs	r6, xl, xh, lsl #12
    853       1.1  mrg 	bne	LSYM(Lml_n)		@ NAN * <anything> -> NAN
    854       1.1  mrg 1:	teq	r5, ip
    855       1.1  mrg 	bne	LSYM(Lml_i)
    856       1.1  mrg 	orrs	r6, yl, yh, lsl #12
    857       1.1  mrg 	do_it	ne, t
    858       1.1  mrg 	movne	xl, yl
    859       1.1  mrg 	movne	xh, yh
    860       1.1  mrg 	bne	LSYM(Lml_n)		@ <anything> * NAN -> NAN
    861       1.1  mrg 
    862       1.1  mrg 	@ Result is INF, but we need to determine its sign.
    863       1.1  mrg LSYM(Lml_i):
    864       1.1  mrg 	eor	xh, xh, yh
    865       1.1  mrg 
    866       1.1  mrg 	@ Overflow: return INF (sign already in xh).
    867       1.1  mrg LSYM(Lml_o):
    868       1.1  mrg 	and	xh, xh, #0x80000000
    869       1.1  mrg 	orr	xh, xh, #0x7f000000
    870       1.1  mrg 	orr	xh, xh, #0x00f00000
    871       1.1  mrg 	mov	xl, #0
    872       1.1  mrg 	RETLDM	"r4, r5, r6"
    873       1.1  mrg 
    874       1.1  mrg 	@ Return a quiet NAN.
    875       1.1  mrg LSYM(Lml_n):
    876       1.1  mrg 	orr	xh, xh, #0x7f000000
    877       1.1  mrg 	orr	xh, xh, #0x00f80000
    878       1.1  mrg 	RETLDM	"r4, r5, r6"
    879       1.1  mrg 
    880   1.1.1.3  mrg 	CFI_END_FUNCTION
    881       1.1  mrg 	FUNC_END aeabi_dmul
    882       1.1  mrg 	FUNC_END muldf3
    883       1.1  mrg 
    884   1.1.1.8  mrg #ifdef L_arm_muldivdf3
    885   1.1.1.8  mrg 
    886       1.1  mrg ARM_FUNC_START divdf3
    887       1.1  mrg ARM_FUNC_ALIAS aeabi_ddiv divdf3
    888   1.1.1.3  mrg 	CFI_START_FUNCTION
    889       1.1  mrg 
    890       1.1  mrg 	do_push	{r4, r5, r6, lr}
    891   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 16
    892   1.1.1.3  mrg 	.cfi_rel_offset r4, 0
    893   1.1.1.3  mrg 	.cfi_rel_offset r5, 4
    894   1.1.1.3  mrg 	.cfi_rel_offset r6, 8
    895   1.1.1.3  mrg 	.cfi_rel_offset lr, 12
    896       1.1  mrg 
    897       1.1  mrg 	@ Mask out exponents, trap any zero/denormal/INF/NAN.
    898       1.1  mrg 	mov	ip, #0xff
    899       1.1  mrg 	orr	ip, ip, #0x700
    900       1.1  mrg 	ands	r4, ip, xh, lsr #20
    901       1.1  mrg 	do_it	ne, tte
    902       1.1  mrg 	COND(and,s,ne)	r5, ip, yh, lsr #20
    903       1.1  mrg 	teqne	r4, ip
    904       1.1  mrg 	teqne	r5, ip
    905       1.1  mrg 	bleq	LSYM(Ldv_s)
    906       1.1  mrg 
    907       1.1  mrg 	@ Subtract divisor exponent from dividend''s.
    908       1.1  mrg 	sub	r4, r4, r5
    909       1.1  mrg 
    910       1.1  mrg 	@ Preserve final sign into lr.
    911       1.1  mrg 	eor	lr, xh, yh
    912       1.1  mrg 
    913       1.1  mrg 	@ Convert mantissa to unsigned integer.
    914       1.1  mrg 	@ Dividend -> r5-r6, divisor -> yh-yl.
    915       1.1  mrg 	orrs	r5, yl, yh, lsl #12
    916       1.1  mrg 	mov	xh, xh, lsl #12
    917       1.1  mrg 	beq	LSYM(Ldv_1)
    918       1.1  mrg 	mov	yh, yh, lsl #12
    919       1.1  mrg 	mov	r5, #0x10000000
    920       1.1  mrg 	orr	yh, r5, yh, lsr #4
    921       1.1  mrg 	orr	yh, yh, yl, lsr #24
    922       1.1  mrg 	mov	yl, yl, lsl #8
    923       1.1  mrg 	orr	r5, r5, xh, lsr #4
    924       1.1  mrg 	orr	r5, r5, xl, lsr #24
    925       1.1  mrg 	mov	r6, xl, lsl #8
    926       1.1  mrg 
    927       1.1  mrg 	@ Initialize xh with final sign bit.
    928       1.1  mrg 	and	xh, lr, #0x80000000
    929       1.1  mrg 
    930       1.1  mrg 	@ Ensure result will land to known bit position.
    931       1.1  mrg 	@ Apply exponent bias accordingly.
    932       1.1  mrg 	cmp	r5, yh
    933       1.1  mrg 	do_it	eq
    934       1.1  mrg 	cmpeq	r6, yl
    935       1.1  mrg 	adc	r4, r4, #(255 - 2)
    936       1.1  mrg 	add	r4, r4, #0x300
    937       1.1  mrg 	bcs	1f
    938       1.1  mrg 	movs	yh, yh, lsr #1
    939       1.1  mrg 	mov	yl, yl, rrx
    940       1.1  mrg 1:
    941       1.1  mrg 	@ Perform first subtraction to align result to a nibble.
    942       1.1  mrg 	subs	r6, r6, yl
    943       1.1  mrg 	sbc	r5, r5, yh
    944       1.1  mrg 	movs	yh, yh, lsr #1
    945       1.1  mrg 	mov	yl, yl, rrx
    946       1.1  mrg 	mov	xl, #0x00100000
    947       1.1  mrg 	mov	ip, #0x00080000
    948       1.1  mrg 
    949       1.1  mrg 	@ The actual division loop.
    950       1.1  mrg 1:	subs	lr, r6, yl
    951       1.1  mrg 	sbcs	lr, r5, yh
    952       1.1  mrg 	do_it	cs, tt
    953       1.1  mrg 	subcs	r6, r6, yl
    954       1.1  mrg 	movcs	r5, lr
    955       1.1  mrg 	orrcs	xl, xl, ip
    956       1.1  mrg 	movs	yh, yh, lsr #1
    957       1.1  mrg 	mov	yl, yl, rrx
    958       1.1  mrg 	subs	lr, r6, yl
    959       1.1  mrg 	sbcs	lr, r5, yh
    960       1.1  mrg 	do_it	cs, tt
    961       1.1  mrg 	subcs	r6, r6, yl
    962       1.1  mrg 	movcs	r5, lr
    963       1.1  mrg 	orrcs	xl, xl, ip, lsr #1
    964       1.1  mrg 	movs	yh, yh, lsr #1
    965       1.1  mrg 	mov	yl, yl, rrx
    966       1.1  mrg 	subs	lr, r6, yl
    967       1.1  mrg 	sbcs	lr, r5, yh
    968       1.1  mrg 	do_it	cs, tt
    969       1.1  mrg 	subcs	r6, r6, yl
    970       1.1  mrg 	movcs	r5, lr
    971       1.1  mrg 	orrcs	xl, xl, ip, lsr #2
    972       1.1  mrg 	movs	yh, yh, lsr #1
    973       1.1  mrg 	mov	yl, yl, rrx
    974       1.1  mrg 	subs	lr, r6, yl
    975       1.1  mrg 	sbcs	lr, r5, yh
    976       1.1  mrg 	do_it	cs, tt
    977       1.1  mrg 	subcs	r6, r6, yl
    978       1.1  mrg 	movcs	r5, lr
    979       1.1  mrg 	orrcs	xl, xl, ip, lsr #3
    980       1.1  mrg 
    981       1.1  mrg 	orrs	lr, r5, r6
    982       1.1  mrg 	beq	2f
    983       1.1  mrg 	mov	r5, r5, lsl #4
    984       1.1  mrg 	orr	r5, r5, r6, lsr #28
    985       1.1  mrg 	mov	r6, r6, lsl #4
    986       1.1  mrg 	mov	yh, yh, lsl #3
    987       1.1  mrg 	orr	yh, yh, yl, lsr #29
    988       1.1  mrg 	mov	yl, yl, lsl #3
    989       1.1  mrg 	movs	ip, ip, lsr #4
    990       1.1  mrg 	bne	1b
    991       1.1  mrg 
    992       1.1  mrg 	@ We are done with a word of the result.
    993       1.1  mrg 	@ Loop again for the low word if this pass was for the high word.
    994       1.1  mrg 	tst	xh, #0x00100000
    995       1.1  mrg 	bne	3f
    996       1.1  mrg 	orr	xh, xh, xl
    997       1.1  mrg 	mov	xl, #0
    998       1.1  mrg 	mov	ip, #0x80000000
    999       1.1  mrg 	b	1b
   1000       1.1  mrg 2:
   1001       1.1  mrg 	@ Be sure result starts in the high word.
   1002       1.1  mrg 	tst	xh, #0x00100000
   1003       1.1  mrg 	do_it	eq, t
   1004       1.1  mrg 	orreq	xh, xh, xl
   1005       1.1  mrg 	moveq	xl, #0
   1006       1.1  mrg 3:
   1007       1.1  mrg 	@ Check exponent range for under/overflow.
   1008       1.1  mrg 	subs	ip, r4, #(254 - 1)
   1009       1.1  mrg 	do_it	hi
   1010       1.1  mrg 	cmphi	ip, #0x700
   1011       1.1  mrg 	bhi	LSYM(Lml_u)
   1012       1.1  mrg 
   1013       1.1  mrg 	@ Round the result, merge final exponent.
   1014       1.1  mrg 	subs	ip, r5, yh
   1015       1.1  mrg 	do_it	eq, t
   1016       1.1  mrg 	COND(sub,s,eq)	ip, r6, yl
   1017       1.1  mrg 	COND(mov,s,eq)	ip, xl, lsr #1
   1018       1.1  mrg 	adcs	xl, xl, #0
   1019       1.1  mrg 	adc	xh, xh, r4, lsl #20
   1020       1.1  mrg 	RETLDM	"r4, r5, r6"
   1021       1.1  mrg 
   1022       1.1  mrg 	@ Division by 0x1p*: shortcut a lot of code.
   1023       1.1  mrg LSYM(Ldv_1):
   1024       1.1  mrg 	and	lr, lr, #0x80000000
   1025       1.1  mrg 	orr	xh, lr, xh, lsr #12
   1026       1.1  mrg 	adds	r4, r4, ip, lsr #1
   1027       1.1  mrg 	do_it	gt, tt
   1028       1.1  mrg 	COND(rsb,s,gt)	r5, r4, ip
   1029       1.1  mrg 	orrgt	xh, xh, r4, lsl #20
   1030       1.1  mrg 	RETLDM	"r4, r5, r6" gt
   1031       1.1  mrg 
   1032       1.1  mrg 	orr	xh, xh, #0x00100000
   1033       1.1  mrg 	mov	lr, #0
   1034       1.1  mrg 	subs	r4, r4, #1
   1035       1.1  mrg 	b	LSYM(Lml_u)
   1036       1.1  mrg 
   1037       1.1  mrg 	@ Result mightt need to be denormalized: put remainder bits
   1038       1.1  mrg 	@ in lr for rounding considerations.
   1039       1.1  mrg LSYM(Ldv_u):
   1040       1.1  mrg 	orr	lr, r5, r6
   1041       1.1  mrg 	b	LSYM(Lml_u)
   1042       1.1  mrg 
   1043       1.1  mrg 	@ One or both arguments is either INF, NAN or zero.
   1044       1.1  mrg LSYM(Ldv_s):
   1045       1.1  mrg 	and	r5, ip, yh, lsr #20
   1046       1.1  mrg 	teq	r4, ip
   1047       1.1  mrg 	do_it	eq
   1048       1.1  mrg 	teqeq	r5, ip
   1049       1.1  mrg 	beq	LSYM(Lml_n)		@ INF/NAN / INF/NAN -> NAN
   1050       1.1  mrg 	teq	r4, ip
   1051       1.1  mrg 	bne	1f
   1052       1.1  mrg 	orrs	r4, xl, xh, lsl #12
   1053       1.1  mrg 	bne	LSYM(Lml_n)		@ NAN / <anything> -> NAN
   1054       1.1  mrg 	teq	r5, ip
   1055       1.1  mrg 	bne	LSYM(Lml_i)		@ INF / <anything> -> INF
   1056       1.1  mrg 	mov	xl, yl
   1057       1.1  mrg 	mov	xh, yh
   1058       1.1  mrg 	b	LSYM(Lml_n)		@ INF / (INF or NAN) -> NAN
   1059       1.1  mrg 1:	teq	r5, ip
   1060       1.1  mrg 	bne	2f
   1061       1.1  mrg 	orrs	r5, yl, yh, lsl #12
   1062       1.1  mrg 	beq	LSYM(Lml_z)		@ <anything> / INF -> 0
   1063       1.1  mrg 	mov	xl, yl
   1064       1.1  mrg 	mov	xh, yh
   1065       1.1  mrg 	b	LSYM(Lml_n)		@ <anything> / NAN -> NAN
   1066       1.1  mrg 2:	@ If both are nonzero, we need to normalize and resume above.
   1067       1.1  mrg 	orrs	r6, xl, xh, lsl #1
   1068       1.1  mrg 	do_it	ne
   1069       1.1  mrg 	COND(orr,s,ne)	r6, yl, yh, lsl #1
   1070       1.1  mrg 	bne	LSYM(Lml_d)
   1071       1.1  mrg 	@ One or both arguments are 0.
   1072       1.1  mrg 	orrs	r4, xl, xh, lsl #1
   1073       1.1  mrg 	bne	LSYM(Lml_i)		@ <non_zero> / 0 -> INF
   1074       1.1  mrg 	orrs	r5, yl, yh, lsl #1
   1075       1.1  mrg 	bne	LSYM(Lml_z)		@ 0 / <non_zero> -> 0
   1076       1.1  mrg 	b	LSYM(Lml_n)		@ 0 / 0 -> NAN
   1077       1.1  mrg 
   1078   1.1.1.3  mrg 	CFI_END_FUNCTION
   1079       1.1  mrg 	FUNC_END aeabi_ddiv
   1080       1.1  mrg 	FUNC_END divdf3
   1081       1.1  mrg 
   1082       1.1  mrg #endif /* L_muldivdf3 */
   1083   1.1.1.8  mrg #endif /* L_arm_muldf3 || L_arm_muldivdf3 */
   1084       1.1  mrg 
   1085       1.1  mrg #ifdef L_arm_cmpdf2
   1086       1.1  mrg 
   1087       1.1  mrg @ Note: only r0 (return value) and ip are clobbered here.
   1088       1.1  mrg 
   1089       1.1  mrg ARM_FUNC_START gtdf2
   1090       1.1  mrg ARM_FUNC_ALIAS gedf2 gtdf2
   1091   1.1.1.3  mrg 	CFI_START_FUNCTION
   1092       1.1  mrg 	mov	ip, #-1
   1093       1.1  mrg 	b	1f
   1094       1.1  mrg 
   1095       1.1  mrg ARM_FUNC_START ltdf2
   1096       1.1  mrg ARM_FUNC_ALIAS ledf2 ltdf2
   1097       1.1  mrg 	mov	ip, #1
   1098       1.1  mrg 	b	1f
   1099       1.1  mrg 
   1100       1.1  mrg ARM_FUNC_START cmpdf2
   1101       1.1  mrg ARM_FUNC_ALIAS nedf2 cmpdf2
   1102       1.1  mrg ARM_FUNC_ALIAS eqdf2 cmpdf2
   1103       1.1  mrg 	mov	ip, #1			@ how should we specify unordered here?
   1104       1.1  mrg 
   1105       1.1  mrg 1:	str	ip, [sp, #-4]!
   1106   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 4        @ CFA is now sp + previousOffset + 4.
   1107   1.1.1.3  mrg 	@ We're not adding CFI for ip as it's pushed into the stack
   1108   1.1.1.4  mrg 	@ only because it may be popped off later as a return value
   1109   1.1.1.4  mrg 	@ (i.e. we're not preserving it anyways).
   1110       1.1  mrg 
   1111       1.1  mrg 	@ Trap any INF/NAN first.
   1112       1.1  mrg 	mov	ip, xh, lsl #1
   1113       1.1  mrg 	mvns	ip, ip, asr #21
   1114       1.1  mrg 	mov	ip, yh, lsl #1
   1115       1.1  mrg 	do_it	ne
   1116       1.1  mrg 	COND(mvn,s,ne)	ip, ip, asr #21
   1117       1.1  mrg 	beq	3f
   1118   1.1.1.3  mrg 	.cfi_remember_state
   1119   1.1.1.4  mrg 	@ Save the current CFI state.  This is done because the branch
   1120   1.1.1.4  mrg 	@ is conditional, and if we don't take it we'll issue a
   1121   1.1.1.4  mrg 	@ .cfi_adjust_cfa_offset and return.  If we do take it,
   1122   1.1.1.4  mrg 	@ however, the .cfi_adjust_cfa_offset from the non-branch code
   1123   1.1.1.4  mrg 	@ will affect the branch code as well.  To avoid this we'll
   1124   1.1.1.4  mrg 	@ restore the current state before executing the branch code.
   1125       1.1  mrg 
   1126   1.1.1.4  mrg 	@ Test for equality.  Note that 0.0 is equal to -0.0.
   1127       1.1  mrg 2:	add	sp, sp, #4
   1128   1.1.1.3  mrg 	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
   1129   1.1.1.3  mrg 
   1130       1.1  mrg 	orrs	ip, xl, xh, lsl #1	@ if x == 0.0 or -0.0
   1131       1.1  mrg 	do_it	eq, e
   1132       1.1  mrg 	COND(orr,s,eq)	ip, yl, yh, lsl #1	@ and y == 0.0 or -0.0
   1133       1.1  mrg 	teqne	xh, yh			@ or xh == yh
   1134       1.1  mrg 	do_it	eq, tt
   1135       1.1  mrg 	teqeq	xl, yl			@ and xl == yl
   1136       1.1  mrg 	moveq	r0, #0			@ then equal.
   1137       1.1  mrg 	RETc(eq)
   1138       1.1  mrg 
   1139       1.1  mrg 	@ Clear C flag
   1140       1.1  mrg 	cmn	r0, #0
   1141       1.1  mrg 
   1142       1.1  mrg 	@ Compare sign,
   1143       1.1  mrg 	teq	xh, yh
   1144       1.1  mrg 
   1145       1.1  mrg 	@ Compare values if same sign
   1146       1.1  mrg 	do_it	pl
   1147       1.1  mrg 	cmppl	xh, yh
   1148       1.1  mrg 	do_it	eq
   1149       1.1  mrg 	cmpeq	xl, yl
   1150       1.1  mrg 
   1151       1.1  mrg 	@ Result:
   1152       1.1  mrg 	do_it	cs, e
   1153       1.1  mrg 	movcs	r0, yh, asr #31
   1154       1.1  mrg 	mvncc	r0, yh, asr #31
   1155       1.1  mrg 	orr	r0, r0, #1
   1156       1.1  mrg 	RET
   1157       1.1  mrg 
   1158   1.1.1.3  mrg 3:  @ Look for a NAN.
   1159   1.1.1.3  mrg 
   1160   1.1.1.3  mrg 	@ Restore the previous CFI state (i.e. keep the CFI state as it was
   1161   1.1.1.3  mrg 	@ before the branch).
   1162   1.1.1.3  mrg 	.cfi_restore_state
   1163   1.1.1.3  mrg 
   1164   1.1.1.3  mrg 	mov ip, xh, lsl #1
   1165       1.1  mrg 	mvns	ip, ip, asr #21
   1166       1.1  mrg 	bne	4f
   1167       1.1  mrg 	orrs	ip, xl, xh, lsl #12
   1168       1.1  mrg 	bne	5f			@ x is NAN
   1169       1.1  mrg 4:	mov	ip, yh, lsl #1
   1170       1.1  mrg 	mvns	ip, ip, asr #21
   1171       1.1  mrg 	bne	2b
   1172       1.1  mrg 	orrs	ip, yl, yh, lsl #12
   1173       1.1  mrg 	beq	2b			@ y is not NAN
   1174   1.1.1.3  mrg 
   1175       1.1  mrg 5:	ldr	r0, [sp], #4		@ unordered return code
   1176   1.1.1.3  mrg 	.cfi_adjust_cfa_offset -4       @ CFA is now sp + previousOffset.
   1177   1.1.1.3  mrg 
   1178       1.1  mrg 	RET
   1179       1.1  mrg 
   1180   1.1.1.3  mrg 	CFI_END_FUNCTION
   1181       1.1  mrg 	FUNC_END gedf2
   1182       1.1  mrg 	FUNC_END gtdf2
   1183       1.1  mrg 	FUNC_END ledf2
   1184       1.1  mrg 	FUNC_END ltdf2
   1185       1.1  mrg 	FUNC_END nedf2
   1186       1.1  mrg 	FUNC_END eqdf2
   1187       1.1  mrg 	FUNC_END cmpdf2
   1188       1.1  mrg 
   1189       1.1  mrg ARM_FUNC_START aeabi_cdrcmple
   1190   1.1.1.3  mrg 	CFI_START_FUNCTION
   1191       1.1  mrg 
   1192       1.1  mrg 	mov	ip, r0
   1193       1.1  mrg 	mov	r0, r2
   1194       1.1  mrg 	mov	r2, ip
   1195       1.1  mrg 	mov	ip, r1
   1196       1.1  mrg 	mov	r1, r3
   1197       1.1  mrg 	mov	r3, ip
   1198       1.1  mrg 	b	6f
   1199   1.1.1.3  mrg 
   1200       1.1  mrg ARM_FUNC_START aeabi_cdcmpeq
   1201       1.1  mrg ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
   1202       1.1  mrg 
   1203       1.1  mrg 	@ The status-returning routines are required to preserve all
   1204       1.1  mrg 	@ registers except ip, lr, and cpsr.
   1205       1.1  mrg 6:	do_push	{r0, lr}
   1206   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8.
   1207   1.1.1.3  mrg 	.cfi_rel_offset r0, 0     @ Previous r0 is saved at sp.
   1208   1.1.1.3  mrg 	.cfi_rel_offset lr, 4     @ Previous lr is saved at sp + 4.
   1209   1.1.1.3  mrg 
   1210       1.1  mrg 	ARM_CALL cmpdf2
   1211       1.1  mrg 	@ Set the Z flag correctly, and the C flag unconditionally.
   1212       1.1  mrg 	cmp	r0, #0
   1213       1.1  mrg 	@ Clear the C flag if the return value was -1, indicating
   1214       1.1  mrg 	@ that the first operand was smaller than the second.
   1215       1.1  mrg 	do_it	mi
   1216       1.1  mrg 	cmnmi	r0, #0
   1217   1.1.1.3  mrg 
   1218       1.1  mrg 	RETLDM	"r0"
   1219       1.1  mrg 
   1220   1.1.1.3  mrg 	CFI_END_FUNCTION
   1221       1.1  mrg 	FUNC_END aeabi_cdcmple
   1222       1.1  mrg 	FUNC_END aeabi_cdcmpeq
   1223       1.1  mrg 	FUNC_END aeabi_cdrcmple
   1224       1.1  mrg 
   1225       1.1  mrg ARM_FUNC_START	aeabi_dcmpeq
   1226   1.1.1.3  mrg 	CFI_START_FUNCTION
   1227   1.1.1.3  mrg 
   1228   1.1.1.3  mrg 	str lr, [sp, #-8]!        @ sp -= 8
   1229   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
   1230   1.1.1.3  mrg 	.cfi_rel_offset lr, 0     @ lr is at sp
   1231       1.1  mrg 
   1232       1.1  mrg 	ARM_CALL aeabi_cdcmple
   1233       1.1  mrg 	do_it	eq, e
   1234       1.1  mrg 	moveq	r0, #1	@ Equal to.
   1235       1.1  mrg 	movne	r0, #0	@ Less than, greater than, or unordered.
   1236   1.1.1.3  mrg 
   1237       1.1  mrg 	RETLDM
   1238       1.1  mrg 
   1239   1.1.1.3  mrg 	CFI_END_FUNCTION
   1240       1.1  mrg 	FUNC_END aeabi_dcmpeq
   1241       1.1  mrg 
   1242       1.1  mrg ARM_FUNC_START	aeabi_dcmplt
   1243   1.1.1.3  mrg 	CFI_START_FUNCTION
   1244   1.1.1.3  mrg 
   1245   1.1.1.3  mrg 	str lr, [sp, #-8]!        @ sp -= 8
   1246   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
   1247   1.1.1.3  mrg 	.cfi_rel_offset lr, 0     @ lr is at sp
   1248       1.1  mrg 
   1249       1.1  mrg 	ARM_CALL aeabi_cdcmple
   1250       1.1  mrg 	do_it	cc, e
   1251       1.1  mrg 	movcc	r0, #1	@ Less than.
   1252       1.1  mrg 	movcs	r0, #0	@ Equal to, greater than, or unordered.
   1253       1.1  mrg 	RETLDM
   1254       1.1  mrg 
   1255   1.1.1.3  mrg 	CFI_END_FUNCTION
   1256       1.1  mrg 	FUNC_END aeabi_dcmplt
   1257       1.1  mrg 
   1258       1.1  mrg ARM_FUNC_START	aeabi_dcmple
   1259   1.1.1.3  mrg 	CFI_START_FUNCTION
   1260   1.1.1.3  mrg 
   1261   1.1.1.3  mrg 	str lr, [sp, #-8]!        @ sp -= 8
   1262   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
   1263   1.1.1.3  mrg 	.cfi_rel_offset lr, 0     @ lr is at sp
   1264       1.1  mrg 
   1265       1.1  mrg 	ARM_CALL aeabi_cdcmple
   1266       1.1  mrg 	do_it	ls, e
   1267       1.1  mrg 	movls	r0, #1  @ Less than or equal to.
   1268       1.1  mrg 	movhi	r0, #0	@ Greater than or unordered.
   1269       1.1  mrg 	RETLDM
   1270       1.1  mrg 
   1271   1.1.1.3  mrg 	CFI_END_FUNCTION
   1272       1.1  mrg 	FUNC_END aeabi_dcmple
   1273       1.1  mrg 
   1274       1.1  mrg ARM_FUNC_START	aeabi_dcmpge
   1275   1.1.1.3  mrg 	CFI_START_FUNCTION
   1276   1.1.1.3  mrg 
   1277   1.1.1.3  mrg 	str lr, [sp, #-8]!        @ sp -= 8
   1278   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
   1279   1.1.1.3  mrg 	.cfi_rel_offset lr, 0     @ lr is at sp
   1280       1.1  mrg 
   1281       1.1  mrg 	ARM_CALL aeabi_cdrcmple
   1282       1.1  mrg 	do_it	ls, e
   1283       1.1  mrg 	movls	r0, #1	@ Operand 2 is less than or equal to operand 1.
   1284       1.1  mrg 	movhi	r0, #0	@ Operand 2 greater than operand 1, or unordered.
   1285       1.1  mrg 	RETLDM
   1286       1.1  mrg 
   1287   1.1.1.3  mrg 	CFI_END_FUNCTION
   1288       1.1  mrg 	FUNC_END aeabi_dcmpge
   1289       1.1  mrg 
   1290       1.1  mrg ARM_FUNC_START	aeabi_dcmpgt
   1291   1.1.1.3  mrg 	CFI_START_FUNCTION
   1292   1.1.1.3  mrg 
   1293   1.1.1.3  mrg 	str lr, [sp, #-8]!        @ sp -= 8
   1294   1.1.1.3  mrg 	.cfi_adjust_cfa_offset 8  @ CFA is now sp + previousOffset + 8
   1295   1.1.1.3  mrg 	.cfi_rel_offset lr, 0     @ lr is at sp
   1296       1.1  mrg 
   1297       1.1  mrg 	ARM_CALL aeabi_cdrcmple
   1298       1.1  mrg 	do_it	cc, e
   1299       1.1  mrg 	movcc	r0, #1	@ Operand 2 is less than operand 1.
   1300       1.1  mrg 	movcs	r0, #0  @ Operand 2 is greater than or equal to operand 1,
   1301       1.1  mrg 			@ or they are unordered.
   1302       1.1  mrg 	RETLDM
   1303       1.1  mrg 
   1304   1.1.1.3  mrg 	CFI_END_FUNCTION
   1305       1.1  mrg 	FUNC_END aeabi_dcmpgt
   1306       1.1  mrg 
   1307       1.1  mrg #endif /* L_cmpdf2 */
   1308       1.1  mrg 
   1309       1.1  mrg #ifdef L_arm_unorddf2
   1310       1.1  mrg 
   1311       1.1  mrg ARM_FUNC_START unorddf2
   1312       1.1  mrg ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
   1313   1.1.1.3  mrg 	.cfi_startproc
   1314       1.1  mrg 
   1315       1.1  mrg 	mov	ip, xh, lsl #1
   1316       1.1  mrg 	mvns	ip, ip, asr #21
   1317       1.1  mrg 	bne	1f
   1318       1.1  mrg 	orrs	ip, xl, xh, lsl #12
   1319       1.1  mrg 	bne	3f			@ x is NAN
   1320       1.1  mrg 1:	mov	ip, yh, lsl #1
   1321       1.1  mrg 	mvns	ip, ip, asr #21
   1322       1.1  mrg 	bne	2f
   1323       1.1  mrg 	orrs	ip, yl, yh, lsl #12
   1324       1.1  mrg 	bne	3f			@ y is NAN
   1325       1.1  mrg 2:	mov	r0, #0			@ arguments are ordered.
   1326       1.1  mrg 	RET
   1327       1.1  mrg 
   1328       1.1  mrg 3:	mov	r0, #1			@ arguments are unordered.
   1329       1.1  mrg 	RET
   1330       1.1  mrg 
   1331   1.1.1.3  mrg 	.cfi_endproc
   1332       1.1  mrg 	FUNC_END aeabi_dcmpun
   1333       1.1  mrg 	FUNC_END unorddf2
   1334       1.1  mrg 
   1335       1.1  mrg #endif /* L_unorddf2 */
   1336       1.1  mrg 
   1337       1.1  mrg #ifdef L_arm_fixdfsi
   1338       1.1  mrg 
   1339       1.1  mrg ARM_FUNC_START fixdfsi
   1340       1.1  mrg ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
   1341   1.1.1.3  mrg 	CFI_START_FUNCTION
   1342       1.1  mrg 
   1343       1.1  mrg 	@ check exponent range.
   1344       1.1  mrg 	mov	r2, xh, lsl #1
   1345       1.1  mrg 	adds	r2, r2, #(1 << 21)
   1346       1.1  mrg 	bcs	2f			@ value is INF or NAN
   1347       1.1  mrg 	bpl	1f			@ value is too small
   1348       1.1  mrg 	mov	r3, #(0xfffffc00 + 31)
   1349       1.1  mrg 	subs	r2, r3, r2, asr #21
   1350       1.1  mrg 	bls	3f			@ value is too large
   1351       1.1  mrg 
   1352       1.1  mrg 	@ scale value
   1353       1.1  mrg 	mov	r3, xh, lsl #11
   1354       1.1  mrg 	orr	r3, r3, #0x80000000
   1355       1.1  mrg 	orr	r3, r3, xl, lsr #21
   1356       1.1  mrg 	tst	xh, #0x80000000		@ the sign bit
   1357       1.1  mrg 	shift1	lsr, r0, r3, r2
   1358       1.1  mrg 	do_it	ne
   1359       1.1  mrg 	rsbne	r0, r0, #0
   1360       1.1  mrg 	RET
   1361       1.1  mrg 
   1362       1.1  mrg 1:	mov	r0, #0
   1363       1.1  mrg 	RET
   1364       1.1  mrg 
   1365       1.1  mrg 2:	orrs	xl, xl, xh, lsl #12
   1366       1.1  mrg 	bne	4f			@ x is NAN.
   1367       1.1  mrg 3:	ands	r0, xh, #0x80000000	@ the sign bit
   1368       1.1  mrg 	do_it	eq
   1369       1.1  mrg 	moveq	r0, #0x7fffffff		@ maximum signed positive si
   1370       1.1  mrg 	RET
   1371       1.1  mrg 
   1372       1.1  mrg 4:	mov	r0, #0			@ How should we convert NAN?
   1373       1.1  mrg 	RET
   1374       1.1  mrg 
   1375   1.1.1.3  mrg 	CFI_END_FUNCTION
   1376       1.1  mrg 	FUNC_END aeabi_d2iz
   1377       1.1  mrg 	FUNC_END fixdfsi
   1378       1.1  mrg 
   1379       1.1  mrg #endif /* L_fixdfsi */
   1380       1.1  mrg 
   1381       1.1  mrg #ifdef L_arm_fixunsdfsi
   1382       1.1  mrg 
   1383       1.1  mrg ARM_FUNC_START fixunsdfsi
   1384       1.1  mrg ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
   1385   1.1.1.3  mrg 	CFI_START_FUNCTION
   1386       1.1  mrg 
   1387       1.1  mrg 	@ check exponent range.
   1388       1.1  mrg 	movs	r2, xh, lsl #1
   1389       1.1  mrg 	bcs	1f			@ value is negative
   1390       1.1  mrg 	adds	r2, r2, #(1 << 21)
   1391       1.1  mrg 	bcs	2f			@ value is INF or NAN
   1392       1.1  mrg 	bpl	1f			@ value is too small
   1393       1.1  mrg 	mov	r3, #(0xfffffc00 + 31)
   1394       1.1  mrg 	subs	r2, r3, r2, asr #21
   1395       1.1  mrg 	bmi	3f			@ value is too large
   1396       1.1  mrg 
   1397       1.1  mrg 	@ scale value
   1398       1.1  mrg 	mov	r3, xh, lsl #11
   1399       1.1  mrg 	orr	r3, r3, #0x80000000
   1400       1.1  mrg 	orr	r3, r3, xl, lsr #21
   1401       1.1  mrg 	shift1	lsr, r0, r3, r2
   1402       1.1  mrg 	RET
   1403       1.1  mrg 
   1404       1.1  mrg 1:	mov	r0, #0
   1405       1.1  mrg 	RET
   1406       1.1  mrg 
   1407       1.1  mrg 2:	orrs	xl, xl, xh, lsl #12
   1408       1.1  mrg 	bne	4f			@ value is NAN.
   1409       1.1  mrg 3:	mov	r0, #0xffffffff		@ maximum unsigned si
   1410       1.1  mrg 	RET
   1411       1.1  mrg 
   1412       1.1  mrg 4:	mov	r0, #0			@ How should we convert NAN?
   1413       1.1  mrg 	RET
   1414       1.1  mrg 
   1415   1.1.1.3  mrg 	CFI_END_FUNCTION
   1416       1.1  mrg 	FUNC_END aeabi_d2uiz
   1417       1.1  mrg 	FUNC_END fixunsdfsi
   1418       1.1  mrg 
   1419       1.1  mrg #endif /* L_fixunsdfsi */
   1420       1.1  mrg 
   1421       1.1  mrg #ifdef L_arm_truncdfsf2
   1422       1.1  mrg 
   1423       1.1  mrg ARM_FUNC_START truncdfsf2
   1424       1.1  mrg ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
   1425   1.1.1.3  mrg 	CFI_START_FUNCTION
   1426       1.1  mrg 
   1427       1.1  mrg 	@ check exponent range.
   1428       1.1  mrg 	mov	r2, xh, lsl #1
   1429       1.1  mrg 	subs	r3, r2, #((1023 - 127) << 21)
   1430       1.1  mrg 	do_it	cs, t
   1431       1.1  mrg 	COND(sub,s,cs)	ip, r3, #(1 << 21)
   1432       1.1  mrg 	COND(rsb,s,cs)	ip, ip, #(254 << 21)
   1433       1.1  mrg 	bls	2f			@ value is out of range
   1434       1.1  mrg 
   1435       1.1  mrg 1:	@ shift and round mantissa
   1436       1.1  mrg 	and	ip, xh, #0x80000000
   1437       1.1  mrg 	mov	r2, xl, lsl #3
   1438       1.1  mrg 	orr	xl, ip, xl, lsr #29
   1439       1.1  mrg 	cmp	r2, #0x80000000
   1440       1.1  mrg 	adc	r0, xl, r3, lsl #2
   1441       1.1  mrg 	do_it	eq
   1442       1.1  mrg 	biceq	r0, r0, #1
   1443       1.1  mrg 	RET
   1444       1.1  mrg 
   1445       1.1  mrg 2:	@ either overflow or underflow
   1446       1.1  mrg 	tst	xh, #0x40000000
   1447       1.1  mrg 	bne	3f			@ overflow
   1448       1.1  mrg 
   1449       1.1  mrg 	@ check if denormalized value is possible
   1450       1.1  mrg 	adds	r2, r3, #(23 << 21)
   1451       1.1  mrg 	do_it	lt, t
   1452       1.1  mrg 	andlt	r0, xh, #0x80000000	@ too small, return signed 0.
   1453       1.1  mrg 	RETc(lt)
   1454       1.1  mrg 
   1455       1.1  mrg 	@ denormalize value so we can resume with the code above afterwards.
   1456       1.1  mrg 	orr	xh, xh, #0x00100000
   1457       1.1  mrg 	mov	r2, r2, lsr #21
   1458       1.1  mrg 	rsb	r2, r2, #24
   1459       1.1  mrg 	rsb	ip, r2, #32
   1460       1.1  mrg #if defined(__thumb2__)
   1461       1.1  mrg 	lsls	r3, xl, ip
   1462       1.1  mrg #else
   1463       1.1  mrg 	movs	r3, xl, lsl ip
   1464       1.1  mrg #endif
   1465       1.1  mrg 	shift1	lsr, xl, xl, r2
   1466       1.1  mrg 	do_it	ne
   1467       1.1  mrg 	orrne	xl, xl, #1		@ fold r3 for rounding considerations.
   1468       1.1  mrg 	mov	r3, xh, lsl #11
   1469       1.1  mrg 	mov	r3, r3, lsr #11
   1470       1.1  mrg 	shiftop orr xl xl r3 lsl ip ip
   1471       1.1  mrg 	shift1	lsr, r3, r3, r2
   1472       1.1  mrg 	mov	r3, r3, lsl #1
   1473       1.1  mrg 	b	1b
   1474       1.1  mrg 
   1475       1.1  mrg 3:	@ chech for NAN
   1476       1.1  mrg 	mvns	r3, r2, asr #21
   1477       1.1  mrg 	bne	5f			@ simple overflow
   1478       1.1  mrg 	orrs	r3, xl, xh, lsl #12
   1479       1.1  mrg 	do_it	ne, tt
   1480       1.1  mrg 	movne	r0, #0x7f000000
   1481       1.1  mrg 	orrne	r0, r0, #0x00c00000
   1482       1.1  mrg 	RETc(ne)			@ return NAN
   1483       1.1  mrg 
   1484       1.1  mrg 5:	@ return INF with sign
   1485       1.1  mrg 	and	r0, xh, #0x80000000
   1486       1.1  mrg 	orr	r0, r0, #0x7f000000
   1487       1.1  mrg 	orr	r0, r0, #0x00800000
   1488       1.1  mrg 	RET
   1489       1.1  mrg 
   1490   1.1.1.3  mrg 	CFI_END_FUNCTION
   1491       1.1  mrg 	FUNC_END aeabi_d2f
   1492       1.1  mrg 	FUNC_END truncdfsf2
   1493       1.1  mrg 
   1494       1.1  mrg #endif /* L_truncdfsf2 */
   1495