Home | History | Annotate | Line # | Download | only in arm
lib1funcs.S revision 1.1.1.11
      1       1.1  mrg @ libgcc routines for ARM cpu.
      2       1.1  mrg @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
      3       1.1  mrg 
      4  1.1.1.11  mrg /* Copyright (C) 1995-2024 Free Software Foundation, Inc.
      5       1.1  mrg 
      6       1.1  mrg This file is free software; you can redistribute it and/or modify it
      7       1.1  mrg under the terms of the GNU General Public License as published by the
      8       1.1  mrg Free Software Foundation; either version 3, or (at your option) any
      9       1.1  mrg later version.
     10       1.1  mrg 
     11       1.1  mrg This file is distributed in the hope that it will be useful, but
     12       1.1  mrg WITHOUT ANY WARRANTY; without even the implied warranty of
     13       1.1  mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14       1.1  mrg General Public License for more details.
     15       1.1  mrg 
     16       1.1  mrg Under Section 7 of GPL version 3, you are granted additional
     17       1.1  mrg permissions described in the GCC Runtime Library Exception, version
     18       1.1  mrg 3.1, as published by the Free Software Foundation.
     19       1.1  mrg 
     20       1.1  mrg You should have received a copy of the GNU General Public License and
     21       1.1  mrg a copy of the GCC Runtime Library Exception along with this program;
     22       1.1  mrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23       1.1  mrg <http://www.gnu.org/licenses/>.  */
     24       1.1  mrg 
     25   1.1.1.9  mrg /* Everything in this file should now use unified syntax.  */
     26   1.1.1.9  mrg 
     27   1.1.1.9  mrg 	.syntax unified
     28   1.1.1.9  mrg 
     29       1.1  mrg /* An executable stack is *not* required for these functions.  */
     30       1.1  mrg #if defined(__ELF__) && defined(__linux__)
     31       1.1  mrg .section .note.GNU-stack,"",%progbits
     32       1.1  mrg .previous
     33       1.1  mrg #endif  /* __ELF__ and __linux__ */
     34       1.1  mrg 
     35       1.1  mrg #ifdef __ARM_EABI__
     36       1.1  mrg /* Some attributes that are common to all routines in this file.  */
     37       1.1  mrg 	/* Tag_ABI_align_needed: This code does not require 8-byte
     38       1.1  mrg 	   alignment from the caller.  */
     39       1.1  mrg 	/* .eabi_attribute 24, 0  -- default setting.  */
     40       1.1  mrg 	/* Tag_ABI_align_preserved: This code preserves 8-byte
     41       1.1  mrg 	   alignment in any callee.  */
     42       1.1  mrg 	.eabi_attribute 25, 1
     43       1.1  mrg #endif /* __ARM_EABI__ */
     44       1.1  mrg /* ------------------------------------------------------------------------ */
     45       1.1  mrg 
     46       1.1  mrg /* We need to know what prefix to add to function names.  */
     47       1.1  mrg 
     48       1.1  mrg #ifndef __USER_LABEL_PREFIX__
     49       1.1  mrg #error  __USER_LABEL_PREFIX__ not defined
     50       1.1  mrg #endif
     51       1.1  mrg 
     52       1.1  mrg /* ANSI concatenation macros.  */
     53       1.1  mrg 
     54       1.1  mrg #define CONCAT1(a, b) CONCAT2(a, b)
     55       1.1  mrg #define CONCAT2(a, b) a ## b
     56       1.1  mrg 
     57       1.1  mrg /* Use the right prefix for global labels.  */
     58       1.1  mrg 
     59       1.1  mrg #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
     60       1.1  mrg 
     61       1.1  mrg #ifdef __ELF__
     62       1.1  mrg #ifdef __thumb__
     63       1.1  mrg #define __PLT__  /* Not supported in Thumb assembler (for now).  */
     64       1.1  mrg #elif defined __vxworks && !defined __PIC__
     65       1.1  mrg #define __PLT__ /* Not supported by the kernel loader.  */
     66       1.1  mrg #else
     67       1.1  mrg #define __PLT__ (PLT)
     68       1.1  mrg #endif
     69       1.1  mrg #define TYPE(x) .type SYM(x),function
     70       1.1  mrg #define SIZE(x) .size SYM(x), . - SYM(x)
     71       1.1  mrg #define LSYM(x) .x
     72       1.1  mrg #else
     73       1.1  mrg #define __PLT__
     74       1.1  mrg #define TYPE(x)
     75       1.1  mrg #define SIZE(x)
     76       1.1  mrg #define LSYM(x) x
     77       1.1  mrg #endif
     78       1.1  mrg 
     79       1.1  mrg /* Function end macros.  Variants for interworking.  */
     80       1.1  mrg 
     81       1.1  mrg /* There are times when we might prefer Thumb1 code even if ARM code is
     82       1.1  mrg    permitted, for example, the code might be smaller, or there might be
     83       1.1  mrg    interworking problems with switching to ARM state if interworking is
     84       1.1  mrg    disabled.  */
     85       1.1  mrg #if (defined(__thumb__)			\
     86       1.1  mrg      && !defined(__thumb2__)		\
     87       1.1  mrg      && (!defined(__THUMB_INTERWORK__)	\
     88       1.1  mrg 	 || defined (__OPTIMIZE_SIZE__)	\
     89   1.1.1.4  mrg 	 || !__ARM_ARCH_ISA_ARM))
     90       1.1  mrg # define __prefer_thumb__
     91       1.1  mrg #endif
     92       1.1  mrg 
     93   1.1.1.4  mrg #if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
     94   1.1.1.4  mrg #define NOT_ISA_TARGET_32BIT 1
     95   1.1.1.4  mrg #endif
     96   1.1.1.4  mrg 
     97       1.1  mrg /* How to return from a function call depends on the architecture variant.  */
     98       1.1  mrg 
     99   1.1.1.8  mrg #if (__ARM_ARCH > 4) || defined(__ARM_ARCH_4T__)
    100       1.1  mrg 
    101       1.1  mrg # define RET		bx	lr
    102       1.1  mrg # define RETc(x)	bx##x	lr
    103       1.1  mrg 
    104       1.1  mrg /* Special precautions for interworking on armv4t.  */
    105   1.1.1.8  mrg # if (__ARM_ARCH == 4)
    106       1.1  mrg 
    107       1.1  mrg /* Always use bx, not ldr pc.  */
    108       1.1  mrg #  if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
    109       1.1  mrg #    define __INTERWORKING__
    110       1.1  mrg #   endif /* __THUMB__ || __THUMB_INTERWORK__ */
    111       1.1  mrg 
    112       1.1  mrg /* Include thumb stub before arm mode code.  */
    113       1.1  mrg #  if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
    114       1.1  mrg #   define __INTERWORKING_STUBS__
    115       1.1  mrg #  endif /* __thumb__ && !__THUMB_INTERWORK__ */
    116       1.1  mrg 
    117       1.1  mrg #endif /* __ARM_ARCH == 4 */
    118       1.1  mrg 
    119       1.1  mrg #else
    120       1.1  mrg 
    121       1.1  mrg # define RET		mov	pc, lr
    122       1.1  mrg # define RETc(x)	mov##x	pc, lr
    123       1.1  mrg 
    124       1.1  mrg #endif
    125       1.1  mrg 
    126       1.1  mrg .macro	cfi_pop		advance, reg, cfa_offset
    127       1.1  mrg #ifdef __ELF__
    128       1.1  mrg 	.pushsection	.debug_frame
    129       1.1  mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    130       1.1  mrg 	.4byte	\advance
    131       1.1  mrg 	.byte	(0xc0 | \reg)	/* DW_CFA_restore */
    132       1.1  mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    133       1.1  mrg 	.uleb128 \cfa_offset
    134       1.1  mrg 	.popsection
    135       1.1  mrg #endif
    136       1.1  mrg .endm
    137       1.1  mrg .macro	cfi_push	advance, reg, offset, cfa_offset
    138       1.1  mrg #ifdef __ELF__
    139       1.1  mrg 	.pushsection	.debug_frame
    140       1.1  mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    141       1.1  mrg 	.4byte	\advance
    142       1.1  mrg 	.byte	(0x80 | \reg)	/* DW_CFA_offset */
    143       1.1  mrg 	.uleb128 (\offset / -4)
    144       1.1  mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    145       1.1  mrg 	.uleb128 \cfa_offset
    146       1.1  mrg 	.popsection
    147       1.1  mrg #endif
    148       1.1  mrg .endm
    149       1.1  mrg .macro cfi_start	start_label, end_label
    150       1.1  mrg #ifdef __ELF__
    151       1.1  mrg 	.pushsection	.debug_frame
    152       1.1  mrg LSYM(Lstart_frame):
    153       1.1  mrg 	.4byte	LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
    154       1.1  mrg LSYM(Lstart_cie):
    155       1.1  mrg         .4byte	0xffffffff	@ CIE Identifier Tag
    156       1.1  mrg         .byte	0x1	@ CIE Version
    157       1.1  mrg         .ascii	"\0"	@ CIE Augmentation
    158       1.1  mrg         .uleb128 0x1	@ CIE Code Alignment Factor
    159       1.1  mrg         .sleb128 -4	@ CIE Data Alignment Factor
    160       1.1  mrg         .byte	0xe	@ CIE RA Column
    161       1.1  mrg         .byte	0xc	@ DW_CFA_def_cfa
    162       1.1  mrg         .uleb128 0xd
    163       1.1  mrg         .uleb128 0x0
    164       1.1  mrg 
    165       1.1  mrg 	.align 2
    166       1.1  mrg LSYM(Lend_cie):
    167       1.1  mrg 	.4byte	LSYM(Lend_fde)-LSYM(Lstart_fde)	@ FDE Length
    168       1.1  mrg LSYM(Lstart_fde):
    169       1.1  mrg 	.4byte	LSYM(Lstart_frame)	@ FDE CIE offset
    170       1.1  mrg 	.4byte	\start_label	@ FDE initial location
    171       1.1  mrg 	.4byte	\end_label-\start_label	@ FDE address range
    172       1.1  mrg 	.popsection
    173       1.1  mrg #endif
    174       1.1  mrg .endm
    175       1.1  mrg .macro cfi_end	end_label
    176       1.1  mrg #ifdef __ELF__
    177       1.1  mrg 	.pushsection	.debug_frame
    178       1.1  mrg 	.align	2
    179       1.1  mrg LSYM(Lend_fde):
    180       1.1  mrg 	.popsection
    181       1.1  mrg \end_label:
    182       1.1  mrg #endif
    183       1.1  mrg .endm
    184       1.1  mrg 
    185       1.1  mrg /* Don't pass dirn, it's there just to get token pasting right.  */
    186       1.1  mrg 
    187       1.1  mrg .macro	RETLDM	regs=, cond=, unwind=, dirn=ia
    188       1.1  mrg #if defined (__INTERWORKING__)
    189       1.1  mrg 	.ifc "\regs",""
    190       1.1  mrg 	ldr\cond	lr, [sp], #8
    191       1.1  mrg 	.else
    192       1.1  mrg # if defined(__thumb2__)
    193       1.1  mrg 	pop\cond	{\regs, lr}
    194       1.1  mrg # else
    195       1.1  mrg 	ldm\cond\dirn	sp!, {\regs, lr}
    196       1.1  mrg # endif
    197       1.1  mrg 	.endif
    198       1.1  mrg 	.ifnc "\unwind", ""
    199       1.1  mrg 	/* Mark LR as restored.  */
    200       1.1  mrg 97:	cfi_pop 97b - \unwind, 0xe, 0x0
    201       1.1  mrg 	.endif
    202       1.1  mrg 	bx\cond	lr
    203       1.1  mrg #else
    204       1.1  mrg 	/* Caller is responsible for providing IT instruction.  */
    205       1.1  mrg 	.ifc "\regs",""
    206       1.1  mrg 	ldr\cond	pc, [sp], #8
    207       1.1  mrg 	.else
    208       1.1  mrg # if defined(__thumb2__)
    209       1.1  mrg 	pop\cond	{\regs, pc}
    210       1.1  mrg # else
    211       1.1  mrg 	ldm\cond\dirn	sp!, {\regs, pc}
    212       1.1  mrg # endif
    213       1.1  mrg 	.endif
    214       1.1  mrg #endif
    215       1.1  mrg .endm
    216       1.1  mrg 
    217       1.1  mrg /* The Unified assembly syntax allows the same code to be assembled for both
    218       1.1  mrg    ARM and Thumb-2.  However this is only supported by recent gas, so define
    219       1.1  mrg    a set of macros to allow ARM code on older assemblers.  */
    220       1.1  mrg #if defined(__thumb2__)
    221       1.1  mrg .macro do_it cond, suffix=""
    222       1.1  mrg 	it\suffix	\cond
    223       1.1  mrg .endm
    224       1.1  mrg .macro shift1 op, arg0, arg1, arg2
    225       1.1  mrg 	\op	\arg0, \arg1, \arg2
    226       1.1  mrg .endm
    227       1.1  mrg #define do_push	push
    228       1.1  mrg #define do_pop	pop
    229       1.1  mrg /* Perform an arithmetic operation with a variable shift operand.  This
    230       1.1  mrg    requires two instructions and a scratch register on Thumb-2.  */
    231       1.1  mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    232       1.1  mrg 	\shiftop \tmp, \src2, \shiftreg
    233       1.1  mrg 	\name \dest, \src1, \tmp
    234       1.1  mrg .endm
    235       1.1  mrg #else
    236       1.1  mrg .macro do_it cond, suffix=""
    237       1.1  mrg .endm
    238       1.1  mrg .macro shift1 op, arg0, arg1, arg2
    239       1.1  mrg 	mov	\arg0, \arg1, \op \arg2
    240       1.1  mrg .endm
    241       1.1  mrg #define do_push	stmfd sp!,
    242       1.1  mrg #define do_pop	ldmfd sp!,
    243       1.1  mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    244       1.1  mrg 	\name \dest, \src1, \src2, \shiftop \shiftreg
    245       1.1  mrg .endm
    246       1.1  mrg #endif
    247       1.1  mrg 
    248   1.1.1.9  mrg #define COND(op1, op2, cond) op1 ## op2 ## cond
    249   1.1.1.9  mrg 
    250       1.1  mrg #ifdef __ARM_EABI__
    251       1.1  mrg .macro ARM_LDIV0 name signed
    252       1.1  mrg 	cmp	r0, #0
    253       1.1  mrg 	.ifc	\signed, unsigned
    254       1.1  mrg 	movne	r0, #0xffffffff
    255       1.1  mrg 	.else
    256       1.1  mrg 	movgt	r0, #0x7fffffff
    257       1.1  mrg 	movlt	r0, #0x80000000
    258       1.1  mrg 	.endif
    259       1.1  mrg 	b	SYM (__aeabi_idiv0) __PLT__
    260       1.1  mrg .endm
    261       1.1  mrg #else
    262       1.1  mrg .macro ARM_LDIV0 name signed
    263       1.1  mrg 	str	lr, [sp, #-8]!
    264       1.1  mrg 98:	cfi_push 98b - __\name, 0xe, -0x8, 0x8
    265       1.1  mrg 	bl	SYM (__div0) __PLT__
    266       1.1  mrg 	mov	r0, #0			@ About as wrong as it could be.
    267       1.1  mrg 	RETLDM	unwind=98b
    268       1.1  mrg .endm
    269       1.1  mrg #endif
    270       1.1  mrg 
    271       1.1  mrg 
    272       1.1  mrg #ifdef __ARM_EABI__
    273       1.1  mrg .macro THUMB_LDIV0 name signed
    274   1.1.1.4  mrg #ifdef NOT_ISA_TARGET_32BIT
    275   1.1.1.4  mrg 
    276   1.1.1.4  mrg 	push	{r0, lr}
    277   1.1.1.9  mrg 	movs	r0, #0
    278   1.1.1.4  mrg 	bl	SYM(__aeabi_idiv0)
    279       1.1  mrg 	@ We know we are not on armv4t, so pop pc is safe.
    280   1.1.1.4  mrg 	pop	{r1, pc}
    281   1.1.1.4  mrg 
    282       1.1  mrg #elif defined(__thumb2__)
    283       1.1  mrg 	.syntax unified
    284       1.1  mrg 	.ifc \signed, unsigned
    285       1.1  mrg 	cbz	r0, 1f
    286       1.1  mrg 	mov	r0, #0xffffffff
    287       1.1  mrg 1:
    288       1.1  mrg 	.else
    289       1.1  mrg 	cmp	r0, #0
    290       1.1  mrg 	do_it	gt
    291       1.1  mrg 	movgt	r0, #0x7fffffff
    292       1.1  mrg 	do_it	lt
    293       1.1  mrg 	movlt	r0, #0x80000000
    294       1.1  mrg 	.endif
    295       1.1  mrg 	b.w	SYM(__aeabi_idiv0) __PLT__
    296       1.1  mrg #else
    297       1.1  mrg 	.align	2
    298       1.1  mrg 	bx	pc
    299       1.1  mrg 	nop
    300       1.1  mrg 	.arm
    301       1.1  mrg 	cmp	r0, #0
    302       1.1  mrg 	.ifc	\signed, unsigned
    303       1.1  mrg 	movne	r0, #0xffffffff
    304       1.1  mrg 	.else
    305       1.1  mrg 	movgt	r0, #0x7fffffff
    306       1.1  mrg 	movlt	r0, #0x80000000
    307       1.1  mrg 	.endif
    308       1.1  mrg 	b	SYM(__aeabi_idiv0) __PLT__
    309       1.1  mrg 	.thumb
    310       1.1  mrg #endif
    311       1.1  mrg .endm
    312       1.1  mrg #else
    313       1.1  mrg .macro THUMB_LDIV0 name signed
    314       1.1  mrg 	push	{ r1, lr }
    315       1.1  mrg 98:	cfi_push 98b - __\name, 0xe, -0x4, 0x8
    316       1.1  mrg 	bl	SYM (__div0)
    317   1.1.1.9  mrg 	movs	r0, #0			@ About as wrong as it could be.
    318       1.1  mrg #if defined (__INTERWORKING__)
    319       1.1  mrg 	pop	{ r1, r2 }
    320       1.1  mrg 	bx	r2
    321       1.1  mrg #else
    322       1.1  mrg 	pop	{ r1, pc }
    323       1.1  mrg #endif
    324       1.1  mrg .endm
    325       1.1  mrg #endif
    326       1.1  mrg 
    327       1.1  mrg .macro FUNC_END name
    328       1.1  mrg 	SIZE (__\name)
    329       1.1  mrg .endm
    330       1.1  mrg 
    331       1.1  mrg .macro DIV_FUNC_END name signed
    332       1.1  mrg 	cfi_start	__\name, LSYM(Lend_div0)
    333       1.1  mrg LSYM(Ldiv0):
    334       1.1  mrg #ifdef __thumb__
    335       1.1  mrg 	THUMB_LDIV0 \name \signed
    336       1.1  mrg #else
    337       1.1  mrg 	ARM_LDIV0 \name \signed
    338       1.1  mrg #endif
    339       1.1  mrg 	cfi_end	LSYM(Lend_div0)
    340       1.1  mrg 	FUNC_END \name
    341       1.1  mrg .endm
    342       1.1  mrg 
    343       1.1  mrg .macro THUMB_FUNC_START name
    344       1.1  mrg 	.globl	SYM (\name)
    345       1.1  mrg 	TYPE	(\name)
    346       1.1  mrg 	.thumb_func
    347       1.1  mrg SYM (\name):
    348       1.1  mrg .endm
    349       1.1  mrg 
    350       1.1  mrg /* Function start macros.  Variants for ARM and Thumb.  */
    351       1.1  mrg 
    352       1.1  mrg #ifdef __thumb__
    353       1.1  mrg #define THUMB_FUNC .thumb_func
    354       1.1  mrg #define THUMB_CODE .force_thumb
    355       1.1  mrg # if defined(__thumb2__)
    356   1.1.1.9  mrg #define THUMB_SYNTAX
    357       1.1  mrg # else
    358       1.1  mrg #define THUMB_SYNTAX
    359       1.1  mrg # endif
    360       1.1  mrg #else
    361       1.1  mrg #define THUMB_FUNC
    362       1.1  mrg #define THUMB_CODE
    363       1.1  mrg #define THUMB_SYNTAX
    364       1.1  mrg #endif
    365       1.1  mrg 
    366   1.1.1.8  mrg .macro FUNC_START name
    367       1.1  mrg 	.text
    368       1.1  mrg 	.globl SYM (__\name)
    369       1.1  mrg 	TYPE (__\name)
    370       1.1  mrg 	.align 0
    371       1.1  mrg 	THUMB_CODE
    372       1.1  mrg 	THUMB_FUNC
    373       1.1  mrg 	THUMB_SYNTAX
    374       1.1  mrg SYM (__\name):
    375       1.1  mrg .endm
    376       1.1  mrg 
    377   1.1.1.2  mrg .macro ARM_SYM_START name
    378   1.1.1.2  mrg        TYPE (\name)
    379   1.1.1.2  mrg        .align 0
    380   1.1.1.2  mrg SYM (\name):
    381   1.1.1.2  mrg .endm
    382   1.1.1.2  mrg 
    383   1.1.1.2  mrg .macro SYM_END name
    384   1.1.1.2  mrg        SIZE (\name)
    385   1.1.1.2  mrg .endm
    386   1.1.1.2  mrg 
    387       1.1  mrg /* Special function that will always be coded in ARM assembly, even if
    388       1.1  mrg    in Thumb-only compilation.  */
    389       1.1  mrg 
    390       1.1  mrg #if defined(__thumb2__)
    391       1.1  mrg 
    392       1.1  mrg /* For Thumb-2 we build everything in thumb mode.  */
    393   1.1.1.8  mrg .macro ARM_FUNC_START name
    394   1.1.1.8  mrg        FUNC_START \name
    395       1.1  mrg        .syntax unified
    396       1.1  mrg .endm
    397       1.1  mrg #define EQUIV .thumb_set
    398       1.1  mrg .macro  ARM_CALL name
    399       1.1  mrg 	bl	__\name
    400       1.1  mrg .endm
    401       1.1  mrg 
    402       1.1  mrg #elif defined(__INTERWORKING_STUBS__)
    403       1.1  mrg 
    404       1.1  mrg .macro	ARM_FUNC_START name
    405       1.1  mrg 	FUNC_START \name
    406       1.1  mrg 	bx	pc
    407       1.1  mrg 	nop
    408       1.1  mrg 	.arm
    409       1.1  mrg /* A hook to tell gdb that we've switched to ARM mode.  Also used to call
    410       1.1  mrg    directly from other local arm routines.  */
    411       1.1  mrg _L__\name:
    412       1.1  mrg .endm
    413       1.1  mrg #define EQUIV .thumb_set
    414       1.1  mrg /* Branch directly to a function declared with ARM_FUNC_START.
    415       1.1  mrg    Must be called in arm mode.  */
    416       1.1  mrg .macro  ARM_CALL name
    417       1.1  mrg 	bl	_L__\name
    418       1.1  mrg .endm
    419       1.1  mrg 
    420       1.1  mrg #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
    421       1.1  mrg 
    422   1.1.1.4  mrg #ifdef NOT_ISA_TARGET_32BIT
    423       1.1  mrg #define EQUIV .thumb_set
    424       1.1  mrg #else
    425   1.1.1.8  mrg .macro	ARM_FUNC_START name
    426       1.1  mrg 	.text
    427       1.1  mrg 	.globl SYM (__\name)
    428       1.1  mrg 	TYPE (__\name)
    429       1.1  mrg 	.align 0
    430       1.1  mrg 	.arm
    431       1.1  mrg SYM (__\name):
    432       1.1  mrg .endm
    433       1.1  mrg #define EQUIV .set
    434       1.1  mrg .macro  ARM_CALL name
    435       1.1  mrg 	bl	__\name
    436       1.1  mrg .endm
    437       1.1  mrg #endif
    438       1.1  mrg 
    439       1.1  mrg #endif
    440       1.1  mrg 
    441       1.1  mrg .macro	FUNC_ALIAS new old
    442       1.1  mrg 	.globl	SYM (__\new)
    443       1.1  mrg #if defined (__thumb__)
    444       1.1  mrg 	.thumb_set	SYM (__\new), SYM (__\old)
    445       1.1  mrg #else
    446       1.1  mrg 	.set	SYM (__\new), SYM (__\old)
    447       1.1  mrg #endif
    448       1.1  mrg .endm
    449       1.1  mrg 
    450   1.1.1.4  mrg #ifndef NOT_ISA_TARGET_32BIT
    451       1.1  mrg .macro	ARM_FUNC_ALIAS new old
    452       1.1  mrg 	.globl	SYM (__\new)
    453       1.1  mrg 	EQUIV	SYM (__\new), SYM (__\old)
    454       1.1  mrg #if defined(__INTERWORKING_STUBS__)
    455       1.1  mrg 	.set	SYM (_L__\new), SYM (_L__\old)
    456       1.1  mrg #endif
    457       1.1  mrg .endm
    458       1.1  mrg #endif
    459       1.1  mrg 
    460       1.1  mrg #ifdef __ARMEB__
    461       1.1  mrg #define xxh r0
    462       1.1  mrg #define xxl r1
    463       1.1  mrg #define yyh r2
    464       1.1  mrg #define yyl r3
    465       1.1  mrg #else
    466       1.1  mrg #define xxh r1
    467       1.1  mrg #define xxl r0
    468       1.1  mrg #define yyh r3
    469       1.1  mrg #define yyl r2
    470       1.1  mrg #endif
    471       1.1  mrg 
    472       1.1  mrg #ifdef __ARM_EABI__
    473       1.1  mrg .macro	WEAK name
    474       1.1  mrg 	.weak SYM (__\name)
    475       1.1  mrg .endm
    476       1.1  mrg #endif
    477       1.1  mrg 
    478       1.1  mrg #ifdef __thumb__
    479       1.1  mrg /* Register aliases.  */
    480       1.1  mrg 
    481       1.1  mrg work		.req	r4	@ XXXX is this safe ?
    482       1.1  mrg dividend	.req	r0
    483       1.1  mrg divisor		.req	r1
    484       1.1  mrg overdone	.req	r2
    485       1.1  mrg result		.req	r2
    486       1.1  mrg curbit		.req	r3
    487       1.1  mrg #endif
    488       1.1  mrg #if 0
    489       1.1  mrg ip		.req	r12
    490       1.1  mrg sp		.req	r13
    491       1.1  mrg lr		.req	r14
    492       1.1  mrg pc		.req	r15
    493       1.1  mrg #endif
    494       1.1  mrg 
    495       1.1  mrg /* ------------------------------------------------------------------------ */
    496       1.1  mrg /*		Bodies of the division and modulo routines.		    */
    497   1.1.1.9  mrg /* ------------------------------------------------------------------------ */
    498   1.1.1.9  mrg 
    499       1.1  mrg .macro ARM_DIV_BODY dividend, divisor, result, curbit
    500       1.1  mrg 
    501   1.1.1.8  mrg #if defined (__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__)
    502       1.1  mrg 
    503       1.1  mrg #if defined (__thumb2__)
    504       1.1  mrg 	clz	\curbit, \dividend
    505       1.1  mrg 	clz	\result, \divisor
    506       1.1  mrg 	sub	\curbit, \result, \curbit
    507       1.1  mrg 	rsb	\curbit, \curbit, #31
    508       1.1  mrg 	adr	\result, 1f
    509       1.1  mrg 	add	\curbit, \result, \curbit, lsl #4
    510       1.1  mrg 	mov	\result, #0
    511       1.1  mrg 	mov	pc, \curbit
    512       1.1  mrg .p2align 3
    513       1.1  mrg 1:
    514       1.1  mrg 	.set	shift, 32
    515       1.1  mrg 	.rept	32
    516       1.1  mrg 	.set	shift, shift - 1
    517       1.1  mrg 	cmp.w	\dividend, \divisor, lsl #shift
    518       1.1  mrg 	nop.n
    519       1.1  mrg 	adc.w	\result, \result, \result
    520       1.1  mrg 	it	cs
    521       1.1  mrg 	subcs.w	\dividend, \dividend, \divisor, lsl #shift
    522       1.1  mrg 	.endr
    523       1.1  mrg #else
    524       1.1  mrg 	clz	\curbit, \dividend
    525       1.1  mrg 	clz	\result, \divisor
    526       1.1  mrg 	sub	\curbit, \result, \curbit
    527       1.1  mrg 	rsbs	\curbit, \curbit, #31
    528       1.1  mrg 	addne	\curbit, \curbit, \curbit, lsl #1
    529       1.1  mrg 	mov	\result, #0
    530       1.1  mrg 	addne	pc, pc, \curbit, lsl #2
    531       1.1  mrg 	nop
    532       1.1  mrg 	.set	shift, 32
    533       1.1  mrg 	.rept	32
    534       1.1  mrg 	.set	shift, shift - 1
    535       1.1  mrg 	cmp	\dividend, \divisor, lsl #shift
    536       1.1  mrg 	adc	\result, \result, \result
    537       1.1  mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    538       1.1  mrg 	.endr
    539       1.1  mrg #endif
    540       1.1  mrg 
    541   1.1.1.8  mrg #else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
    542   1.1.1.8  mrg #if defined (__ARM_FEATURE_CLZ)
    543       1.1  mrg 
    544       1.1  mrg 	clz	\curbit, \divisor
    545       1.1  mrg 	clz	\result, \dividend
    546       1.1  mrg 	sub	\result, \curbit, \result
    547       1.1  mrg 	mov	\curbit, #1
    548       1.1  mrg 	mov	\divisor, \divisor, lsl \result
    549       1.1  mrg 	mov	\curbit, \curbit, lsl \result
    550       1.1  mrg 	mov	\result, #0
    551       1.1  mrg 
    552   1.1.1.8  mrg #else /* !defined (__ARM_FEATURE_CLZ) */
    553       1.1  mrg 
    554       1.1  mrg 	@ Initially shift the divisor left 3 bits if possible,
    555       1.1  mrg 	@ set curbit accordingly.  This allows for curbit to be located
    556       1.1  mrg 	@ at the left end of each 4-bit nibbles in the division loop
    557       1.1  mrg 	@ to save one loop in most cases.
    558       1.1  mrg 	tst	\divisor, #0xe0000000
    559       1.1  mrg 	moveq	\divisor, \divisor, lsl #3
    560       1.1  mrg 	moveq	\curbit, #8
    561       1.1  mrg 	movne	\curbit, #1
    562       1.1  mrg 
    563       1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    564       1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    565       1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    566       1.1  mrg 	@ larger than the dividend.
    567       1.1  mrg 1:	cmp	\divisor, #0x10000000
    568       1.1  mrg 	cmplo	\divisor, \dividend
    569       1.1  mrg 	movlo	\divisor, \divisor, lsl #4
    570       1.1  mrg 	movlo	\curbit, \curbit, lsl #4
    571       1.1  mrg 	blo	1b
    572       1.1  mrg 
    573       1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    574       1.1  mrg 	@ we will be in danger of overflowing.
    575       1.1  mrg 1:	cmp	\divisor, #0x80000000
    576       1.1  mrg 	cmplo	\divisor, \dividend
    577       1.1  mrg 	movlo	\divisor, \divisor, lsl #1
    578       1.1  mrg 	movlo	\curbit, \curbit, lsl #1
    579       1.1  mrg 	blo	1b
    580       1.1  mrg 
    581       1.1  mrg 	mov	\result, #0
    582       1.1  mrg 
    583   1.1.1.8  mrg #endif /* !defined (__ARM_FEATURE_CLZ) */
    584       1.1  mrg 
    585       1.1  mrg 	@ Division loop
    586       1.1  mrg 1:	cmp	\dividend, \divisor
    587       1.1  mrg 	do_it	hs, t
    588       1.1  mrg 	subhs	\dividend, \dividend, \divisor
    589       1.1  mrg 	orrhs	\result,   \result,   \curbit
    590       1.1  mrg 	cmp	\dividend, \divisor,  lsr #1
    591       1.1  mrg 	do_it	hs, t
    592       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    593       1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #1
    594       1.1  mrg 	cmp	\dividend, \divisor,  lsr #2
    595       1.1  mrg 	do_it	hs, t
    596       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    597       1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #2
    598       1.1  mrg 	cmp	\dividend, \divisor,  lsr #3
    599       1.1  mrg 	do_it	hs, t
    600       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    601       1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #3
    602       1.1  mrg 	cmp	\dividend, #0			@ Early termination?
    603       1.1  mrg 	do_it	ne, t
    604       1.1  mrg 	movnes	\curbit,   \curbit,  lsr #4	@ No, any more bits to do?
    605       1.1  mrg 	movne	\divisor,  \divisor, lsr #4
    606       1.1  mrg 	bne	1b
    607       1.1  mrg 
    608   1.1.1.8  mrg #endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
    609       1.1  mrg 
    610       1.1  mrg .endm
    611       1.1  mrg /* ------------------------------------------------------------------------ */
    612       1.1  mrg .macro ARM_DIV2_ORDER divisor, order
    613       1.1  mrg 
    614   1.1.1.8  mrg #if defined (__ARM_FEATURE_CLZ)
    615       1.1  mrg 
    616       1.1  mrg 	clz	\order, \divisor
    617       1.1  mrg 	rsb	\order, \order, #31
    618       1.1  mrg 
    619       1.1  mrg #else
    620       1.1  mrg 
    621       1.1  mrg 	cmp	\divisor, #(1 << 16)
    622       1.1  mrg 	movhs	\divisor, \divisor, lsr #16
    623       1.1  mrg 	movhs	\order, #16
    624       1.1  mrg 	movlo	\order, #0
    625       1.1  mrg 
    626       1.1  mrg 	cmp	\divisor, #(1 << 8)
    627       1.1  mrg 	movhs	\divisor, \divisor, lsr #8
    628       1.1  mrg 	addhs	\order, \order, #8
    629       1.1  mrg 
    630       1.1  mrg 	cmp	\divisor, #(1 << 4)
    631       1.1  mrg 	movhs	\divisor, \divisor, lsr #4
    632       1.1  mrg 	addhs	\order, \order, #4
    633       1.1  mrg 
    634       1.1  mrg 	cmp	\divisor, #(1 << 2)
    635       1.1  mrg 	addhi	\order, \order, #3
    636       1.1  mrg 	addls	\order, \order, \divisor, lsr #1
    637       1.1  mrg 
    638       1.1  mrg #endif
    639       1.1  mrg 
    640       1.1  mrg .endm
    641       1.1  mrg /* ------------------------------------------------------------------------ */
    642       1.1  mrg .macro ARM_MOD_BODY dividend, divisor, order, spare
    643       1.1  mrg 
    644   1.1.1.8  mrg #if defined(__ARM_FEATURE_CLZ) && ! defined (__OPTIMIZE_SIZE__)
    645       1.1  mrg 
    646       1.1  mrg 	clz	\order, \divisor
    647       1.1  mrg 	clz	\spare, \dividend
    648       1.1  mrg 	sub	\order, \order, \spare
    649       1.1  mrg 	rsbs	\order, \order, #31
    650       1.1  mrg 	addne	pc, pc, \order, lsl #3
    651       1.1  mrg 	nop
    652       1.1  mrg 	.set	shift, 32
    653       1.1  mrg 	.rept	32
    654       1.1  mrg 	.set	shift, shift - 1
    655       1.1  mrg 	cmp	\dividend, \divisor, lsl #shift
    656       1.1  mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    657       1.1  mrg 	.endr
    658       1.1  mrg 
    659   1.1.1.8  mrg #else /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
    660   1.1.1.8  mrg #if defined (__ARM_FEATURE_CLZ)
    661       1.1  mrg 
    662       1.1  mrg 	clz	\order, \divisor
    663       1.1  mrg 	clz	\spare, \dividend
    664       1.1  mrg 	sub	\order, \order, \spare
    665       1.1  mrg 	mov	\divisor, \divisor, lsl \order
    666       1.1  mrg 
    667   1.1.1.8  mrg #else /* !defined (__ARM_FEATURE_CLZ) */
    668       1.1  mrg 
    669       1.1  mrg 	mov	\order, #0
    670       1.1  mrg 
    671       1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    672       1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    673       1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    674       1.1  mrg 	@ larger than the dividend.
    675       1.1  mrg 1:	cmp	\divisor, #0x10000000
    676       1.1  mrg 	cmplo	\divisor, \dividend
    677       1.1  mrg 	movlo	\divisor, \divisor, lsl #4
    678       1.1  mrg 	addlo	\order, \order, #4
    679       1.1  mrg 	blo	1b
    680       1.1  mrg 
    681       1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    682       1.1  mrg 	@ we will be in danger of overflowing.
    683       1.1  mrg 1:	cmp	\divisor, #0x80000000
    684       1.1  mrg 	cmplo	\divisor, \dividend
    685       1.1  mrg 	movlo	\divisor, \divisor, lsl #1
    686       1.1  mrg 	addlo	\order, \order, #1
    687       1.1  mrg 	blo	1b
    688       1.1  mrg 
    689   1.1.1.8  mrg #endif /* !defined (__ARM_FEATURE_CLZ) */
    690       1.1  mrg 
    691       1.1  mrg 	@ Perform all needed substractions to keep only the reminder.
    692       1.1  mrg 	@ Do comparisons in batch of 4 first.
    693       1.1  mrg 	subs	\order, \order, #3		@ yes, 3 is intended here
    694       1.1  mrg 	blt	2f
    695       1.1  mrg 
    696       1.1  mrg 1:	cmp	\dividend, \divisor
    697       1.1  mrg 	subhs	\dividend, \dividend, \divisor
    698       1.1  mrg 	cmp	\dividend, \divisor,  lsr #1
    699       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    700       1.1  mrg 	cmp	\dividend, \divisor,  lsr #2
    701       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    702       1.1  mrg 	cmp	\dividend, \divisor,  lsr #3
    703       1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    704       1.1  mrg 	cmp	\dividend, #1
    705       1.1  mrg 	mov	\divisor, \divisor, lsr #4
    706       1.1  mrg 	subges	\order, \order, #4
    707       1.1  mrg 	bge	1b
    708       1.1  mrg 
    709       1.1  mrg 	tst	\order, #3
    710       1.1  mrg 	teqne	\dividend, #0
    711       1.1  mrg 	beq	5f
    712       1.1  mrg 
    713       1.1  mrg 	@ Either 1, 2 or 3 comparison/substractions are left.
    714       1.1  mrg 2:	cmn	\order, #2
    715       1.1  mrg 	blt	4f
    716       1.1  mrg 	beq	3f
    717       1.1  mrg 	cmp	\dividend, \divisor
    718       1.1  mrg 	subhs	\dividend, \dividend, \divisor
    719       1.1  mrg 	mov	\divisor,  \divisor,  lsr #1
    720       1.1  mrg 3:	cmp	\dividend, \divisor
    721       1.1  mrg 	subhs	\dividend, \dividend, \divisor
    722       1.1  mrg 	mov	\divisor,  \divisor,  lsr #1
    723       1.1  mrg 4:	cmp	\dividend, \divisor
    724       1.1  mrg 	subhs	\dividend, \dividend, \divisor
    725       1.1  mrg 5:
    726       1.1  mrg 
    727   1.1.1.8  mrg #endif /* !defined (__ARM_FEATURE_CLZ) || defined (__OPTIMIZE_SIZE__) */
    728       1.1  mrg 
    729       1.1  mrg .endm
    730       1.1  mrg /* ------------------------------------------------------------------------ */
    731       1.1  mrg .macro THUMB_DIV_MOD_BODY modulo
    732       1.1  mrg 	@ Load the constant 0x10000000 into our work register.
    733   1.1.1.9  mrg 	movs	work, #1
    734   1.1.1.9  mrg 	lsls	work, #28
    735       1.1  mrg LSYM(Loop1):
    736       1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    737       1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    738       1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    739       1.1  mrg 	@ larger than the dividend.
    740       1.1  mrg 	cmp	divisor, work
    741       1.1  mrg 	bhs	LSYM(Lbignum)
    742       1.1  mrg 	cmp	divisor, dividend
    743       1.1  mrg 	bhs	LSYM(Lbignum)
    744   1.1.1.9  mrg 	lsls	divisor, #4
    745   1.1.1.9  mrg 	lsls	curbit,  #4
    746       1.1  mrg 	b	LSYM(Loop1)
    747       1.1  mrg LSYM(Lbignum):
    748       1.1  mrg 	@ Set work to 0x80000000
    749   1.1.1.9  mrg 	lsls	work, #3
    750       1.1  mrg LSYM(Loop2):
    751       1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    752       1.1  mrg 	@ we will be in danger of overflowing.
    753       1.1  mrg 	cmp	divisor, work
    754       1.1  mrg 	bhs	LSYM(Loop3)
    755       1.1  mrg 	cmp	divisor, dividend
    756       1.1  mrg 	bhs	LSYM(Loop3)
    757   1.1.1.9  mrg 	lsls	divisor, #1
    758   1.1.1.9  mrg 	lsls	curbit,  #1
    759       1.1  mrg 	b	LSYM(Loop2)
    760       1.1  mrg LSYM(Loop3):
    761       1.1  mrg 	@ Test for possible subtractions ...
    762       1.1  mrg   .if \modulo
    763       1.1  mrg 	@ ... On the final pass, this may subtract too much from the dividend,
    764       1.1  mrg 	@ so keep track of which subtractions are done, we can fix them up
    765       1.1  mrg 	@ afterwards.
    766   1.1.1.9  mrg 	movs	overdone, #0
    767       1.1  mrg 	cmp	dividend, divisor
    768       1.1  mrg 	blo	LSYM(Lover1)
    769   1.1.1.9  mrg 	subs	dividend, dividend, divisor
    770       1.1  mrg LSYM(Lover1):
    771   1.1.1.9  mrg 	lsrs	work, divisor, #1
    772       1.1  mrg 	cmp	dividend, work
    773       1.1  mrg 	blo	LSYM(Lover2)
    774   1.1.1.9  mrg 	subs	dividend, dividend, work
    775       1.1  mrg 	mov	ip, curbit
    776   1.1.1.9  mrg 	movs	work, #1
    777   1.1.1.9  mrg 	rors	curbit, work
    778   1.1.1.9  mrg 	orrs	overdone, curbit
    779       1.1  mrg 	mov	curbit, ip
    780       1.1  mrg LSYM(Lover2):
    781   1.1.1.9  mrg 	lsrs	work, divisor, #2
    782       1.1  mrg 	cmp	dividend, work
    783       1.1  mrg 	blo	LSYM(Lover3)
    784   1.1.1.9  mrg 	subs	dividend, dividend, work
    785       1.1  mrg 	mov	ip, curbit
    786   1.1.1.9  mrg 	movs	work, #2
    787   1.1.1.9  mrg 	rors	curbit, work
    788   1.1.1.9  mrg 	orrs	overdone, curbit
    789       1.1  mrg 	mov	curbit, ip
    790       1.1  mrg LSYM(Lover3):
    791   1.1.1.9  mrg 	lsrs	work, divisor, #3
    792       1.1  mrg 	cmp	dividend, work
    793       1.1  mrg 	blo	LSYM(Lover4)
    794   1.1.1.9  mrg 	subs	dividend, dividend, work
    795       1.1  mrg 	mov	ip, curbit
    796   1.1.1.9  mrg 	movs	work, #3
    797   1.1.1.9  mrg 	rors	curbit, work
    798   1.1.1.9  mrg 	orrs	overdone, curbit
    799       1.1  mrg 	mov	curbit, ip
    800       1.1  mrg LSYM(Lover4):
    801       1.1  mrg 	mov	ip, curbit
    802       1.1  mrg   .else
    803       1.1  mrg 	@ ... and note which bits are done in the result.  On the final pass,
    804       1.1  mrg 	@ this may subtract too much from the dividend, but the result will be ok,
    805       1.1  mrg 	@ since the "bit" will have been shifted out at the bottom.
    806       1.1  mrg 	cmp	dividend, divisor
    807       1.1  mrg 	blo	LSYM(Lover1)
    808   1.1.1.9  mrg 	subs	dividend, dividend, divisor
    809   1.1.1.9  mrg 	orrs	result, result, curbit
    810       1.1  mrg LSYM(Lover1):
    811   1.1.1.9  mrg 	lsrs	work, divisor, #1
    812       1.1  mrg 	cmp	dividend, work
    813       1.1  mrg 	blo	LSYM(Lover2)
    814   1.1.1.9  mrg 	subs	dividend, dividend, work
    815   1.1.1.9  mrg 	lsrs	work, curbit, #1
    816   1.1.1.9  mrg 	orrs	result, work
    817       1.1  mrg LSYM(Lover2):
    818   1.1.1.9  mrg 	lsrs	work, divisor, #2
    819       1.1  mrg 	cmp	dividend, work
    820       1.1  mrg 	blo	LSYM(Lover3)
    821   1.1.1.9  mrg 	subs	dividend, dividend, work
    822   1.1.1.9  mrg 	lsrs	work, curbit, #2
    823   1.1.1.9  mrg 	orrs	result, work
    824       1.1  mrg LSYM(Lover3):
    825   1.1.1.9  mrg 	lsrs	work, divisor, #3
    826       1.1  mrg 	cmp	dividend, work
    827       1.1  mrg 	blo	LSYM(Lover4)
    828   1.1.1.9  mrg 	subs	dividend, dividend, work
    829   1.1.1.9  mrg 	lsrs	work, curbit, #3
    830   1.1.1.9  mrg 	orrs	result, work
    831       1.1  mrg LSYM(Lover4):
    832       1.1  mrg   .endif
    833       1.1  mrg 
    834       1.1  mrg 	cmp	dividend, #0			@ Early termination?
    835       1.1  mrg 	beq	LSYM(Lover5)
    836   1.1.1.9  mrg 	lsrs	curbit,  #4			@ No, any more bits to do?
    837       1.1  mrg 	beq	LSYM(Lover5)
    838   1.1.1.9  mrg 	lsrs	divisor, #4
    839       1.1  mrg 	b	LSYM(Loop3)
    840       1.1  mrg LSYM(Lover5):
    841       1.1  mrg   .if \modulo
    842       1.1  mrg 	@ Any subtractions that we should not have done will be recorded in
    843       1.1  mrg 	@ the top three bits of "overdone".  Exactly which were not needed
    844       1.1  mrg 	@ are governed by the position of the bit, stored in ip.
    845   1.1.1.9  mrg 	movs	work, #0xe
    846   1.1.1.9  mrg 	lsls	work, #28
    847   1.1.1.9  mrg 	ands	overdone, work
    848       1.1  mrg 	beq	LSYM(Lgot_result)
    849       1.1  mrg 
    850       1.1  mrg 	@ If we terminated early, because dividend became zero, then the
    851       1.1  mrg 	@ bit in ip will not be in the bottom nibble, and we should not
    852       1.1  mrg 	@ perform the additions below.  We must test for this though
    853       1.1  mrg 	@ (rather relying upon the TSTs to prevent the additions) since
    854       1.1  mrg 	@ the bit in ip could be in the top two bits which might then match
    855       1.1  mrg 	@ with one of the smaller RORs.
    856       1.1  mrg 	mov	curbit, ip
    857   1.1.1.9  mrg 	movs	work, #0x7
    858       1.1  mrg 	tst	curbit, work
    859       1.1  mrg 	beq	LSYM(Lgot_result)
    860       1.1  mrg 
    861       1.1  mrg 	mov	curbit, ip
    862   1.1.1.9  mrg 	movs	work, #3
    863   1.1.1.9  mrg 	rors	curbit, work
    864       1.1  mrg 	tst	overdone, curbit
    865       1.1  mrg 	beq	LSYM(Lover6)
    866   1.1.1.9  mrg 	lsrs	work, divisor, #3
    867   1.1.1.9  mrg 	adds	dividend, work
    868       1.1  mrg LSYM(Lover6):
    869       1.1  mrg 	mov	curbit, ip
    870   1.1.1.9  mrg 	movs	work, #2
    871   1.1.1.9  mrg 	rors	curbit, work
    872       1.1  mrg 	tst	overdone, curbit
    873       1.1  mrg 	beq	LSYM(Lover7)
    874   1.1.1.9  mrg 	lsrs	work, divisor, #2
    875   1.1.1.9  mrg 	adds	dividend, work
    876       1.1  mrg LSYM(Lover7):
    877       1.1  mrg 	mov	curbit, ip
    878   1.1.1.9  mrg 	movs	work, #1
    879   1.1.1.9  mrg 	rors	curbit, work
    880       1.1  mrg 	tst	overdone, curbit
    881       1.1  mrg 	beq	LSYM(Lgot_result)
    882   1.1.1.9  mrg 	lsrs	work, divisor, #1
    883   1.1.1.9  mrg 	adds	dividend, work
    884       1.1  mrg   .endif
    885       1.1  mrg LSYM(Lgot_result):
    886   1.1.1.4  mrg .endm
    887   1.1.1.4  mrg 
    888   1.1.1.4  mrg /* If performance is preferred, the following functions are provided.  */
    889   1.1.1.4  mrg #if defined(__prefer_thumb__) && !defined(__OPTIMIZE_SIZE__)
    890   1.1.1.4  mrg 
    891   1.1.1.4  mrg /* Branch to div(n), and jump to label if curbit is lo than divisior.  */
    892   1.1.1.4  mrg .macro BranchToDiv n, label
    893   1.1.1.9  mrg 	lsrs	curbit, dividend, \n
    894   1.1.1.4  mrg 	cmp	curbit, divisor
    895   1.1.1.4  mrg 	blo	\label
    896   1.1.1.4  mrg .endm
    897   1.1.1.4  mrg 
    898   1.1.1.4  mrg /* Body of div(n).  Shift the divisor in n bits and compare the divisor
    899   1.1.1.4  mrg    and dividend.  Update the dividend as the substruction result.  */
    900   1.1.1.4  mrg .macro DoDiv n
    901   1.1.1.9  mrg 	lsrs	curbit, dividend, \n
    902   1.1.1.4  mrg 	cmp	curbit, divisor
    903   1.1.1.4  mrg 	bcc	1f
    904   1.1.1.9  mrg 	lsls	curbit, divisor, \n
    905   1.1.1.9  mrg 	subs	dividend, dividend, curbit
    906   1.1.1.4  mrg 
    907   1.1.1.9  mrg 1:	adcs	result, result
    908   1.1.1.4  mrg .endm
    909   1.1.1.4  mrg 
    910   1.1.1.4  mrg /* The body of division with positive divisor.  Unless the divisor is very
    911   1.1.1.4  mrg    big, shift it up in multiples of four bits, since this is the amount of
    912   1.1.1.4  mrg    unwinding in the main division loop.  Continue shifting until the divisor
    913   1.1.1.4  mrg    is larger than the dividend.  */
    914   1.1.1.4  mrg .macro THUMB1_Div_Positive
    915   1.1.1.9  mrg 	movs	result, #0
    916   1.1.1.4  mrg 	BranchToDiv #1, LSYM(Lthumb1_div1)
    917   1.1.1.4  mrg 	BranchToDiv #4, LSYM(Lthumb1_div4)
    918   1.1.1.4  mrg 	BranchToDiv #8, LSYM(Lthumb1_div8)
    919   1.1.1.4  mrg 	BranchToDiv #12, LSYM(Lthumb1_div12)
    920   1.1.1.4  mrg 	BranchToDiv #16, LSYM(Lthumb1_div16)
    921   1.1.1.4  mrg LSYM(Lthumb1_div_large_positive):
    922   1.1.1.9  mrg 	movs	result, #0xff
    923   1.1.1.9  mrg 	lsls	divisor, divisor, #8
    924   1.1.1.4  mrg 	rev	result, result
    925   1.1.1.9  mrg 	lsrs	curbit, dividend, #16
    926   1.1.1.4  mrg 	cmp	curbit, divisor
    927   1.1.1.4  mrg 	blo	1f
    928   1.1.1.9  mrg 	asrs	result, #8
    929   1.1.1.9  mrg 	lsls	divisor, divisor, #8
    930   1.1.1.4  mrg 	beq	LSYM(Ldivbyzero_waypoint)
    931   1.1.1.4  mrg 
    932   1.1.1.9  mrg 1:	lsrs	curbit, dividend, #12
    933   1.1.1.4  mrg 	cmp	curbit, divisor
    934   1.1.1.4  mrg 	blo	LSYM(Lthumb1_div12)
    935   1.1.1.4  mrg 	b	LSYM(Lthumb1_div16)
    936   1.1.1.4  mrg LSYM(Lthumb1_div_loop):
    937   1.1.1.9  mrg 	lsrs	divisor, divisor, #8
    938   1.1.1.4  mrg LSYM(Lthumb1_div16):
    939   1.1.1.4  mrg 	Dodiv	#15
    940   1.1.1.4  mrg 	Dodiv	#14
    941   1.1.1.4  mrg 	Dodiv	#13
    942   1.1.1.4  mrg 	Dodiv	#12
    943   1.1.1.4  mrg LSYM(Lthumb1_div12):
    944   1.1.1.4  mrg 	Dodiv	#11
    945   1.1.1.4  mrg 	Dodiv	#10
    946   1.1.1.4  mrg 	Dodiv	#9
    947   1.1.1.4  mrg 	Dodiv	#8
    948   1.1.1.4  mrg 	bcs	LSYM(Lthumb1_div_loop)
    949   1.1.1.4  mrg LSYM(Lthumb1_div8):
    950   1.1.1.4  mrg 	Dodiv	#7
    951   1.1.1.4  mrg 	Dodiv	#6
    952   1.1.1.4  mrg 	Dodiv	#5
    953   1.1.1.4  mrg LSYM(Lthumb1_div5):
    954   1.1.1.4  mrg 	Dodiv	#4
    955   1.1.1.4  mrg LSYM(Lthumb1_div4):
    956   1.1.1.4  mrg 	Dodiv	#3
    957   1.1.1.4  mrg LSYM(Lthumb1_div3):
    958   1.1.1.4  mrg 	Dodiv	#2
    959   1.1.1.4  mrg LSYM(Lthumb1_div2):
    960   1.1.1.4  mrg 	Dodiv	#1
    961   1.1.1.4  mrg LSYM(Lthumb1_div1):
    962   1.1.1.9  mrg 	subs	divisor, dividend, divisor
    963   1.1.1.4  mrg 	bcs	1f
    964   1.1.1.4  mrg 	cpy	divisor, dividend
    965   1.1.1.4  mrg 
    966   1.1.1.9  mrg 1:	adcs	result, result
    967   1.1.1.4  mrg 	cpy	dividend, result
    968   1.1.1.4  mrg 	RET
    969   1.1.1.4  mrg 
    970   1.1.1.4  mrg LSYM(Ldivbyzero_waypoint):
    971   1.1.1.4  mrg 	b	LSYM(Ldiv0)
    972   1.1.1.4  mrg .endm
    973   1.1.1.4  mrg 
    974   1.1.1.4  mrg /* The body of division with negative divisor.  Similar with
    975   1.1.1.4  mrg    THUMB1_Div_Positive except that the shift steps are in multiples
    976   1.1.1.4  mrg    of six bits.  */
    977   1.1.1.4  mrg .macro THUMB1_Div_Negative
    978   1.1.1.9  mrg 	lsrs	result, divisor, #31
    979   1.1.1.4  mrg 	beq	1f
    980   1.1.1.9  mrg 	negs	divisor, divisor
    981   1.1.1.4  mrg 
    982   1.1.1.9  mrg 1:	asrs	curbit, dividend, #32
    983   1.1.1.4  mrg 	bcc	2f
    984   1.1.1.9  mrg 	negs	dividend, dividend
    985   1.1.1.4  mrg 
    986   1.1.1.9  mrg 2:	eors	curbit, result
    987   1.1.1.9  mrg 	movs	result, #0
    988   1.1.1.4  mrg 	cpy	ip, curbit
    989   1.1.1.4  mrg 	BranchToDiv #4, LSYM(Lthumb1_div_negative4)
    990   1.1.1.4  mrg 	BranchToDiv #8, LSYM(Lthumb1_div_negative8)
    991   1.1.1.4  mrg LSYM(Lthumb1_div_large):
    992   1.1.1.9  mrg 	movs	result, #0xfc
    993   1.1.1.9  mrg 	lsls	divisor, divisor, #6
    994   1.1.1.4  mrg 	rev	result, result
    995   1.1.1.9  mrg 	lsrs	curbit, dividend, #8
    996   1.1.1.4  mrg 	cmp	curbit, divisor
    997   1.1.1.4  mrg 	blo	LSYM(Lthumb1_div_negative8)
    998   1.1.1.4  mrg 
    999   1.1.1.9  mrg 	lsls	divisor, divisor, #6
   1000   1.1.1.9  mrg 	asrs	result, result, #6
   1001   1.1.1.4  mrg 	cmp	curbit, divisor
   1002   1.1.1.4  mrg 	blo	LSYM(Lthumb1_div_negative8)
   1003   1.1.1.4  mrg 
   1004   1.1.1.9  mrg 	lsls	divisor, divisor, #6
   1005   1.1.1.9  mrg 	asrs	result, result, #6
   1006   1.1.1.4  mrg 	cmp	curbit, divisor
   1007   1.1.1.4  mrg 	blo	LSYM(Lthumb1_div_negative8)
   1008   1.1.1.4  mrg 
   1009   1.1.1.9  mrg 	lsls	divisor, divisor, #6
   1010   1.1.1.4  mrg 	beq	LSYM(Ldivbyzero_negative)
   1011   1.1.1.9  mrg 	asrs	result, result, #6
   1012   1.1.1.4  mrg 	b	LSYM(Lthumb1_div_negative8)
   1013   1.1.1.4  mrg LSYM(Lthumb1_div_negative_loop):
   1014   1.1.1.9  mrg 	lsrs	divisor, divisor, #6
   1015   1.1.1.4  mrg LSYM(Lthumb1_div_negative8):
   1016   1.1.1.4  mrg 	DoDiv	#7
   1017   1.1.1.4  mrg 	DoDiv	#6
   1018   1.1.1.4  mrg 	DoDiv	#5
   1019   1.1.1.4  mrg 	DoDiv	#4
   1020   1.1.1.4  mrg LSYM(Lthumb1_div_negative4):
   1021   1.1.1.4  mrg 	DoDiv	#3
   1022   1.1.1.4  mrg 	DoDiv	#2
   1023   1.1.1.4  mrg 	bcs	LSYM(Lthumb1_div_negative_loop)
   1024   1.1.1.4  mrg 	DoDiv	#1
   1025   1.1.1.9  mrg 	subs	divisor, dividend, divisor
   1026   1.1.1.4  mrg 	bcs	1f
   1027   1.1.1.4  mrg 	cpy	divisor, dividend
   1028   1.1.1.4  mrg 
   1029   1.1.1.4  mrg 1:	cpy	curbit, ip
   1030   1.1.1.9  mrg 	adcs	result, result
   1031   1.1.1.9  mrg 	asrs	curbit, curbit, #1
   1032   1.1.1.4  mrg 	cpy	dividend, result
   1033   1.1.1.4  mrg 	bcc	2f
   1034   1.1.1.9  mrg 	negs	dividend, dividend
   1035   1.1.1.4  mrg 	cmp	curbit, #0
   1036   1.1.1.4  mrg 
   1037   1.1.1.4  mrg 2:	bpl	3f
   1038   1.1.1.9  mrg 	negs	divisor, divisor
   1039   1.1.1.4  mrg 
   1040   1.1.1.4  mrg 3:	RET
   1041   1.1.1.4  mrg 
   1042   1.1.1.4  mrg LSYM(Ldivbyzero_negative):
   1043   1.1.1.4  mrg 	cpy	curbit, ip
   1044   1.1.1.9  mrg 	asrs	curbit, curbit, #1
   1045   1.1.1.4  mrg 	bcc	LSYM(Ldiv0)
   1046   1.1.1.9  mrg 	negs	dividend, dividend
   1047   1.1.1.4  mrg .endm
   1048   1.1.1.4  mrg #endif /* ARM Thumb version.  */
   1049   1.1.1.4  mrg 
   1050       1.1  mrg /* ------------------------------------------------------------------------ */
   1051       1.1  mrg /*		Start of the Real Functions				    */
   1052       1.1  mrg /* ------------------------------------------------------------------------ */
   1053       1.1  mrg #ifdef L_udivsi3
   1054       1.1  mrg 
   1055       1.1  mrg #if defined(__prefer_thumb__)
   1056       1.1  mrg 
   1057       1.1  mrg 	FUNC_START udivsi3
   1058       1.1  mrg 	FUNC_ALIAS aeabi_uidiv udivsi3
   1059   1.1.1.4  mrg #if defined(__OPTIMIZE_SIZE__)
   1060       1.1  mrg 
   1061       1.1  mrg 	cmp	divisor, #0
   1062       1.1  mrg 	beq	LSYM(Ldiv0)
   1063       1.1  mrg LSYM(udivsi3_skip_div0_test):
   1064   1.1.1.9  mrg 	movs	curbit, #1
   1065   1.1.1.9  mrg 	movs	result, #0
   1066       1.1  mrg 
   1067       1.1  mrg 	push	{ work }
   1068       1.1  mrg 	cmp	dividend, divisor
   1069       1.1  mrg 	blo	LSYM(Lgot_result)
   1070       1.1  mrg 
   1071       1.1  mrg 	THUMB_DIV_MOD_BODY 0
   1072       1.1  mrg 
   1073   1.1.1.9  mrg 	movs	r0, result
   1074       1.1  mrg 	pop	{ work }
   1075       1.1  mrg 	RET
   1076       1.1  mrg 
   1077   1.1.1.4  mrg /* Implementation of aeabi_uidiv for ARMv6m.  This version is only
   1078   1.1.1.4  mrg    used in ARMv6-M when we need an efficient implementation.  */
   1079   1.1.1.4  mrg #else
   1080   1.1.1.4  mrg LSYM(udivsi3_skip_div0_test):
   1081   1.1.1.4  mrg 	THUMB1_Div_Positive
   1082   1.1.1.4  mrg 
   1083   1.1.1.4  mrg #endif /* __OPTIMIZE_SIZE__ */
   1084   1.1.1.4  mrg 
   1085       1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1086       1.1  mrg 
   1087       1.1  mrg 	ARM_FUNC_START udivsi3
   1088       1.1  mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
   1089       1.1  mrg 
   1090       1.1  mrg 	cmp	r1, #0
   1091       1.1  mrg 	beq	LSYM(Ldiv0)
   1092       1.1  mrg 
   1093       1.1  mrg 	udiv	r0, r0, r1
   1094       1.1  mrg 	RET
   1095       1.1  mrg 
   1096       1.1  mrg #else /* ARM version/Thumb-2.  */
   1097       1.1  mrg 
   1098       1.1  mrg 	ARM_FUNC_START udivsi3
   1099       1.1  mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
   1100       1.1  mrg 
   1101       1.1  mrg 	/* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
   1102       1.1  mrg 	   check for division-by-zero a second time.  */
   1103       1.1  mrg LSYM(udivsi3_skip_div0_test):
   1104       1.1  mrg 	subs	r2, r1, #1
   1105       1.1  mrg 	do_it	eq
   1106       1.1  mrg 	RETc(eq)
   1107       1.1  mrg 	bcc	LSYM(Ldiv0)
   1108       1.1  mrg 	cmp	r0, r1
   1109       1.1  mrg 	bls	11f
   1110       1.1  mrg 	tst	r1, r2
   1111       1.1  mrg 	beq	12f
   1112       1.1  mrg 
   1113       1.1  mrg 	ARM_DIV_BODY r0, r1, r2, r3
   1114       1.1  mrg 
   1115       1.1  mrg 	mov	r0, r2
   1116       1.1  mrg 	RET
   1117       1.1  mrg 
   1118       1.1  mrg 11:	do_it	eq, e
   1119       1.1  mrg 	moveq	r0, #1
   1120       1.1  mrg 	movne	r0, #0
   1121       1.1  mrg 	RET
   1122       1.1  mrg 
   1123       1.1  mrg 12:	ARM_DIV2_ORDER r1, r2
   1124       1.1  mrg 
   1125       1.1  mrg 	mov	r0, r0, lsr r2
   1126       1.1  mrg 	RET
   1127       1.1  mrg 
   1128       1.1  mrg #endif /* ARM version */
   1129       1.1  mrg 
   1130       1.1  mrg 	DIV_FUNC_END udivsi3 unsigned
   1131       1.1  mrg 
   1132       1.1  mrg #if defined(__prefer_thumb__)
   1133       1.1  mrg FUNC_START aeabi_uidivmod
   1134       1.1  mrg 	cmp	r1, #0
   1135       1.1  mrg 	beq	LSYM(Ldiv0)
   1136   1.1.1.4  mrg # if defined(__OPTIMIZE_SIZE__)
   1137       1.1  mrg 	push	{r0, r1, lr}
   1138       1.1  mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1139       1.1  mrg 	POP	{r1, r2, r3}
   1140   1.1.1.9  mrg 	muls	r2, r0
   1141   1.1.1.9  mrg 	subs	r1, r1, r2
   1142       1.1  mrg 	bx	r3
   1143   1.1.1.4  mrg # else
   1144   1.1.1.4  mrg 	/* Both the quotient and remainder are calculated simultaneously
   1145   1.1.1.4  mrg 	   in THUMB1_Div_Positive.  There is no need to calculate the
   1146   1.1.1.4  mrg 	   remainder again here.  */
   1147   1.1.1.4  mrg 	b	LSYM(udivsi3_skip_div0_test)
   1148   1.1.1.4  mrg 	RET
   1149   1.1.1.4  mrg # endif /* __OPTIMIZE_SIZE__ */
   1150   1.1.1.4  mrg 
   1151       1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1152       1.1  mrg ARM_FUNC_START aeabi_uidivmod
   1153       1.1  mrg 	cmp	r1, #0
   1154       1.1  mrg 	beq	LSYM(Ldiv0)
   1155   1.1.1.9  mrg 	mov     r2, r0
   1156       1.1  mrg 	udiv	r0, r0, r1
   1157       1.1  mrg 	mls     r1, r0, r1, r2
   1158       1.1  mrg 	RET
   1159       1.1  mrg #else
   1160       1.1  mrg ARM_FUNC_START aeabi_uidivmod
   1161       1.1  mrg 	cmp	r1, #0
   1162       1.1  mrg 	beq	LSYM(Ldiv0)
   1163       1.1  mrg 	stmfd	sp!, { r0, r1, lr }
   1164       1.1  mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1165       1.1  mrg 	ldmfd	sp!, { r1, r2, lr }
   1166       1.1  mrg 	mul	r3, r2, r0
   1167       1.1  mrg 	sub	r1, r1, r3
   1168       1.1  mrg 	RET
   1169       1.1  mrg #endif
   1170       1.1  mrg 	FUNC_END aeabi_uidivmod
   1171       1.1  mrg 
   1172       1.1  mrg #endif /* L_udivsi3 */
   1173       1.1  mrg /* ------------------------------------------------------------------------ */
   1174       1.1  mrg #ifdef L_umodsi3
   1175       1.1  mrg 
   1176   1.1.1.4  mrg #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
   1177       1.1  mrg 
   1178       1.1  mrg 	ARM_FUNC_START umodsi3
   1179       1.1  mrg 
   1180       1.1  mrg 	cmp	r1, #0
   1181       1.1  mrg 	beq	LSYM(Ldiv0)
   1182       1.1  mrg 	udiv	r2, r0, r1
   1183       1.1  mrg 	mls     r0, r1, r2, r0
   1184       1.1  mrg 	RET
   1185       1.1  mrg 
   1186       1.1  mrg #elif defined(__thumb__)
   1187       1.1  mrg 
   1188       1.1  mrg 	FUNC_START umodsi3
   1189       1.1  mrg 
   1190       1.1  mrg 	cmp	divisor, #0
   1191       1.1  mrg 	beq	LSYM(Ldiv0)
   1192   1.1.1.9  mrg 	movs	curbit, #1
   1193       1.1  mrg 	cmp	dividend, divisor
   1194       1.1  mrg 	bhs	LSYM(Lover10)
   1195       1.1  mrg 	RET
   1196       1.1  mrg 
   1197       1.1  mrg LSYM(Lover10):
   1198       1.1  mrg 	push	{ work }
   1199       1.1  mrg 
   1200       1.1  mrg 	THUMB_DIV_MOD_BODY 1
   1201       1.1  mrg 
   1202       1.1  mrg 	pop	{ work }
   1203       1.1  mrg 	RET
   1204       1.1  mrg 
   1205       1.1  mrg #else  /* ARM version.  */
   1206   1.1.1.4  mrg 
   1207       1.1  mrg 	FUNC_START umodsi3
   1208       1.1  mrg 
   1209       1.1  mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1210       1.1  mrg 	bcc	LSYM(Ldiv0)
   1211       1.1  mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1212       1.1  mrg 	moveq   r0, #0
   1213       1.1  mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1214       1.1  mrg 	andeq	r0, r0, r2
   1215       1.1  mrg 	RETc(ls)
   1216       1.1  mrg 
   1217       1.1  mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1218       1.1  mrg 
   1219       1.1  mrg 	RET
   1220       1.1  mrg 
   1221       1.1  mrg #endif /* ARM version.  */
   1222       1.1  mrg 
   1223       1.1  mrg 	DIV_FUNC_END umodsi3 unsigned
   1224       1.1  mrg 
   1225       1.1  mrg #endif /* L_umodsi3 */
   1226       1.1  mrg /* ------------------------------------------------------------------------ */
   1227       1.1  mrg #ifdef L_divsi3
   1228       1.1  mrg 
   1229       1.1  mrg #if defined(__prefer_thumb__)
   1230       1.1  mrg 
   1231   1.1.1.4  mrg 	FUNC_START divsi3
   1232       1.1  mrg 	FUNC_ALIAS aeabi_idiv divsi3
   1233   1.1.1.4  mrg #if defined(__OPTIMIZE_SIZE__)
   1234       1.1  mrg 
   1235       1.1  mrg 	cmp	divisor, #0
   1236       1.1  mrg 	beq	LSYM(Ldiv0)
   1237       1.1  mrg LSYM(divsi3_skip_div0_test):
   1238       1.1  mrg 	push	{ work }
   1239   1.1.1.9  mrg 	movs	work, dividend
   1240   1.1.1.9  mrg 	eors	work, divisor		@ Save the sign of the result.
   1241       1.1  mrg 	mov	ip, work
   1242   1.1.1.9  mrg 	movs	curbit, #1
   1243   1.1.1.9  mrg 	movs	result, #0
   1244       1.1  mrg 	cmp	divisor, #0
   1245       1.1  mrg 	bpl	LSYM(Lover10)
   1246   1.1.1.9  mrg 	negs	divisor, divisor	@ Loops below use unsigned.
   1247       1.1  mrg LSYM(Lover10):
   1248       1.1  mrg 	cmp	dividend, #0
   1249       1.1  mrg 	bpl	LSYM(Lover11)
   1250   1.1.1.9  mrg 	negs	dividend, dividend
   1251       1.1  mrg LSYM(Lover11):
   1252       1.1  mrg 	cmp	dividend, divisor
   1253       1.1  mrg 	blo	LSYM(Lgot_result)
   1254       1.1  mrg 
   1255       1.1  mrg 	THUMB_DIV_MOD_BODY 0
   1256   1.1.1.4  mrg 
   1257   1.1.1.9  mrg 	movs	r0, result
   1258       1.1  mrg 	mov	work, ip
   1259       1.1  mrg 	cmp	work, #0
   1260       1.1  mrg 	bpl	LSYM(Lover12)
   1261   1.1.1.9  mrg 	negs	r0, r0
   1262       1.1  mrg LSYM(Lover12):
   1263       1.1  mrg 	pop	{ work }
   1264       1.1  mrg 	RET
   1265       1.1  mrg 
   1266   1.1.1.4  mrg /* Implementation of aeabi_idiv for ARMv6m.  This version is only
   1267   1.1.1.4  mrg    used in ARMv6-M when we need an efficient implementation.  */
   1268   1.1.1.4  mrg #else
   1269   1.1.1.4  mrg LSYM(divsi3_skip_div0_test):
   1270   1.1.1.4  mrg 	cpy	curbit, dividend
   1271   1.1.1.9  mrg 	orrs	curbit, divisor
   1272   1.1.1.4  mrg 	bmi	LSYM(Lthumb1_div_negative)
   1273   1.1.1.4  mrg 
   1274   1.1.1.4  mrg LSYM(Lthumb1_div_positive):
   1275   1.1.1.4  mrg 	THUMB1_Div_Positive
   1276   1.1.1.4  mrg 
   1277   1.1.1.4  mrg LSYM(Lthumb1_div_negative):
   1278   1.1.1.4  mrg 	THUMB1_Div_Negative
   1279   1.1.1.4  mrg 
   1280   1.1.1.4  mrg #endif /* __OPTIMIZE_SIZE__ */
   1281   1.1.1.4  mrg 
   1282       1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1283       1.1  mrg 
   1284       1.1  mrg 	ARM_FUNC_START divsi3
   1285       1.1  mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1286       1.1  mrg 
   1287       1.1  mrg 	cmp 	r1, #0
   1288       1.1  mrg 	beq	LSYM(Ldiv0)
   1289       1.1  mrg 	sdiv	r0, r0, r1
   1290       1.1  mrg 	RET
   1291       1.1  mrg 
   1292       1.1  mrg #else /* ARM/Thumb-2 version.  */
   1293   1.1.1.4  mrg 
   1294   1.1.1.4  mrg 	ARM_FUNC_START divsi3
   1295       1.1  mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1296       1.1  mrg 
   1297       1.1  mrg 	cmp	r1, #0
   1298       1.1  mrg 	beq	LSYM(Ldiv0)
   1299       1.1  mrg LSYM(divsi3_skip_div0_test):
   1300       1.1  mrg 	eor	ip, r0, r1			@ save the sign of the result.
   1301       1.1  mrg 	do_it	mi
   1302       1.1  mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1303       1.1  mrg 	subs	r2, r1, #1			@ division by 1 or -1 ?
   1304       1.1  mrg 	beq	10f
   1305       1.1  mrg 	movs	r3, r0
   1306       1.1  mrg 	do_it	mi
   1307       1.1  mrg 	rsbmi	r3, r0, #0			@ positive dividend value
   1308       1.1  mrg 	cmp	r3, r1
   1309       1.1  mrg 	bls	11f
   1310       1.1  mrg 	tst	r1, r2				@ divisor is power of 2 ?
   1311       1.1  mrg 	beq	12f
   1312       1.1  mrg 
   1313       1.1  mrg 	ARM_DIV_BODY r3, r1, r0, r2
   1314       1.1  mrg 
   1315       1.1  mrg 	cmp	ip, #0
   1316       1.1  mrg 	do_it	mi
   1317       1.1  mrg 	rsbmi	r0, r0, #0
   1318       1.1  mrg 	RET
   1319       1.1  mrg 
   1320       1.1  mrg 10:	teq	ip, r0				@ same sign ?
   1321       1.1  mrg 	do_it	mi
   1322       1.1  mrg 	rsbmi	r0, r0, #0
   1323       1.1  mrg 	RET
   1324       1.1  mrg 
   1325       1.1  mrg 11:	do_it	lo
   1326       1.1  mrg 	movlo	r0, #0
   1327       1.1  mrg 	do_it	eq,t
   1328       1.1  mrg 	moveq	r0, ip, asr #31
   1329       1.1  mrg 	orreq	r0, r0, #1
   1330       1.1  mrg 	RET
   1331       1.1  mrg 
   1332       1.1  mrg 12:	ARM_DIV2_ORDER r1, r2
   1333       1.1  mrg 
   1334       1.1  mrg 	cmp	ip, #0
   1335       1.1  mrg 	mov	r0, r3, lsr r2
   1336       1.1  mrg 	do_it	mi
   1337       1.1  mrg 	rsbmi	r0, r0, #0
   1338       1.1  mrg 	RET
   1339       1.1  mrg 
   1340       1.1  mrg #endif /* ARM version */
   1341       1.1  mrg 
   1342       1.1  mrg 	DIV_FUNC_END divsi3 signed
   1343       1.1  mrg 
   1344       1.1  mrg #if defined(__prefer_thumb__)
   1345       1.1  mrg FUNC_START aeabi_idivmod
   1346       1.1  mrg 	cmp	r1, #0
   1347       1.1  mrg 	beq	LSYM(Ldiv0)
   1348   1.1.1.4  mrg # if defined(__OPTIMIZE_SIZE__)
   1349       1.1  mrg 	push	{r0, r1, lr}
   1350       1.1  mrg 	bl	LSYM(divsi3_skip_div0_test)
   1351       1.1  mrg 	POP	{r1, r2, r3}
   1352   1.1.1.9  mrg 	muls	r2, r0
   1353   1.1.1.9  mrg 	subs	r1, r1, r2
   1354       1.1  mrg 	bx	r3
   1355   1.1.1.4  mrg # else
   1356   1.1.1.4  mrg 	/* Both the quotient and remainder are calculated simultaneously
   1357   1.1.1.4  mrg 	   in THUMB1_Div_Positive and THUMB1_Div_Negative.  There is no
   1358   1.1.1.4  mrg 	   need to calculate the remainder again here.  */
   1359   1.1.1.4  mrg 	b	LSYM(divsi3_skip_div0_test)
   1360   1.1.1.4  mrg 	RET
   1361   1.1.1.4  mrg # endif /* __OPTIMIZE_SIZE__ */
   1362   1.1.1.4  mrg 
   1363       1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1364       1.1  mrg ARM_FUNC_START aeabi_idivmod
   1365       1.1  mrg 	cmp 	r1, #0
   1366       1.1  mrg 	beq	LSYM(Ldiv0)
   1367       1.1  mrg 	mov     r2, r0
   1368       1.1  mrg 	sdiv	r0, r0, r1
   1369       1.1  mrg 	mls     r1, r0, r1, r2
   1370       1.1  mrg 	RET
   1371       1.1  mrg #else
   1372       1.1  mrg ARM_FUNC_START aeabi_idivmod
   1373       1.1  mrg 	cmp	r1, #0
   1374       1.1  mrg 	beq	LSYM(Ldiv0)
   1375       1.1  mrg 	stmfd	sp!, { r0, r1, lr }
   1376       1.1  mrg 	bl	LSYM(divsi3_skip_div0_test)
   1377       1.1  mrg 	ldmfd	sp!, { r1, r2, lr }
   1378       1.1  mrg 	mul	r3, r2, r0
   1379       1.1  mrg 	sub	r1, r1, r3
   1380       1.1  mrg 	RET
   1381       1.1  mrg #endif
   1382       1.1  mrg 	FUNC_END aeabi_idivmod
   1383       1.1  mrg 
   1384       1.1  mrg #endif /* L_divsi3 */
   1385       1.1  mrg /* ------------------------------------------------------------------------ */
   1386       1.1  mrg #ifdef L_modsi3
   1387       1.1  mrg 
   1388   1.1.1.4  mrg #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
   1389       1.1  mrg 
   1390       1.1  mrg 	ARM_FUNC_START modsi3
   1391       1.1  mrg 
   1392       1.1  mrg 	cmp	r1, #0
   1393       1.1  mrg 	beq	LSYM(Ldiv0)
   1394       1.1  mrg 
   1395       1.1  mrg 	sdiv	r2, r0, r1
   1396       1.1  mrg 	mls     r0, r1, r2, r0
   1397       1.1  mrg 	RET
   1398       1.1  mrg 
   1399       1.1  mrg #elif defined(__thumb__)
   1400       1.1  mrg 
   1401       1.1  mrg 	FUNC_START modsi3
   1402       1.1  mrg 
   1403   1.1.1.9  mrg 	movs	curbit, #1
   1404       1.1  mrg 	cmp	divisor, #0
   1405       1.1  mrg 	beq	LSYM(Ldiv0)
   1406       1.1  mrg 	bpl	LSYM(Lover10)
   1407   1.1.1.9  mrg 	negs	divisor, divisor		@ Loops below use unsigned.
   1408       1.1  mrg LSYM(Lover10):
   1409       1.1  mrg 	push	{ work }
   1410       1.1  mrg 	@ Need to save the sign of the dividend, unfortunately, we need
   1411       1.1  mrg 	@ work later on.  Must do this after saving the original value of
   1412       1.1  mrg 	@ the work register, because we will pop this value off first.
   1413       1.1  mrg 	push	{ dividend }
   1414       1.1  mrg 	cmp	dividend, #0
   1415       1.1  mrg 	bpl	LSYM(Lover11)
   1416   1.1.1.9  mrg 	negs	dividend, dividend
   1417       1.1  mrg LSYM(Lover11):
   1418       1.1  mrg 	cmp	dividend, divisor
   1419       1.1  mrg 	blo	LSYM(Lgot_result)
   1420       1.1  mrg 
   1421       1.1  mrg 	THUMB_DIV_MOD_BODY 1
   1422       1.1  mrg 
   1423       1.1  mrg 	pop	{ work }
   1424       1.1  mrg 	cmp	work, #0
   1425       1.1  mrg 	bpl	LSYM(Lover12)
   1426   1.1.1.9  mrg 	negs	dividend, dividend
   1427       1.1  mrg LSYM(Lover12):
   1428       1.1  mrg 	pop	{ work }
   1429       1.1  mrg 	RET
   1430       1.1  mrg 
   1431       1.1  mrg #else /* ARM version.  */
   1432       1.1  mrg 
   1433       1.1  mrg 	FUNC_START modsi3
   1434       1.1  mrg 
   1435       1.1  mrg 	cmp	r1, #0
   1436       1.1  mrg 	beq	LSYM(Ldiv0)
   1437       1.1  mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1438       1.1  mrg 	movs	ip, r0				@ preserve sign of dividend
   1439       1.1  mrg 	rsbmi	r0, r0, #0			@ if negative make positive
   1440       1.1  mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1441       1.1  mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1442       1.1  mrg 	moveq	r0, #0
   1443       1.1  mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1444       1.1  mrg 	andeq	r0, r0, r2
   1445       1.1  mrg 	bls	10f
   1446       1.1  mrg 
   1447       1.1  mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1448       1.1  mrg 
   1449       1.1  mrg 10:	cmp	ip, #0
   1450       1.1  mrg 	rsbmi	r0, r0, #0
   1451       1.1  mrg 	RET
   1452       1.1  mrg 
   1453       1.1  mrg #endif /* ARM version */
   1454       1.1  mrg 
   1455       1.1  mrg 	DIV_FUNC_END modsi3 signed
   1456       1.1  mrg 
   1457       1.1  mrg #endif /* L_modsi3 */
   1458       1.1  mrg /* ------------------------------------------------------------------------ */
   1459       1.1  mrg #ifdef L_dvmd_tls
   1460       1.1  mrg 
   1461       1.1  mrg #ifdef __ARM_EABI__
   1462       1.1  mrg 	WEAK aeabi_idiv0
   1463       1.1  mrg 	WEAK aeabi_ldiv0
   1464       1.1  mrg 	FUNC_START aeabi_idiv0
   1465       1.1  mrg 	FUNC_START aeabi_ldiv0
   1466       1.1  mrg 	RET
   1467       1.1  mrg 	FUNC_END aeabi_ldiv0
   1468       1.1  mrg 	FUNC_END aeabi_idiv0
   1469       1.1  mrg #else
   1470       1.1  mrg 	FUNC_START div0
   1471       1.1  mrg 	RET
   1472       1.1  mrg 	FUNC_END div0
   1473       1.1  mrg #endif
   1474       1.1  mrg 
   1475       1.1  mrg #endif /* L_divmodsi_tools */
   1476       1.1  mrg /* ------------------------------------------------------------------------ */
   1477       1.1  mrg #ifdef L_dvmd_lnx
   1478       1.1  mrg @ GNU/Linux division-by zero handler.  Used in place of L_dvmd_tls
   1479       1.1  mrg 
   1480       1.1  mrg /* Constant taken from <asm/signal.h>.  */
   1481       1.1  mrg #define SIGFPE	8
   1482       1.1  mrg 
   1483       1.1  mrg #ifdef __ARM_EABI__
   1484   1.1.1.3  mrg 	cfi_start	__aeabi_ldiv0, LSYM(Lend_aeabi_ldiv0)
   1485       1.1  mrg 	WEAK aeabi_idiv0
   1486       1.1  mrg 	WEAK aeabi_ldiv0
   1487       1.1  mrg 	ARM_FUNC_START aeabi_idiv0
   1488       1.1  mrg 	ARM_FUNC_START aeabi_ldiv0
   1489   1.1.1.3  mrg 	do_push	{r1, lr}
   1490   1.1.1.3  mrg 98:	cfi_push 98b - __aeabi_ldiv0, 0xe, -0x4, 0x8
   1491       1.1  mrg #else
   1492   1.1.1.3  mrg 	cfi_start	__div0, LSYM(Lend_div0)
   1493       1.1  mrg 	ARM_FUNC_START div0
   1494   1.1.1.3  mrg 	do_push	{r1, lr}
   1495   1.1.1.3  mrg 98:	cfi_push 98b - __div0, 0xe, -0x4, 0x8
   1496       1.1  mrg #endif
   1497       1.1  mrg 
   1498       1.1  mrg 	mov	r0, #SIGFPE
   1499       1.1  mrg 	bl	SYM(raise) __PLT__
   1500   1.1.1.3  mrg 	RETLDM	r1 unwind=98b
   1501       1.1  mrg 
   1502       1.1  mrg #ifdef __ARM_EABI__
   1503   1.1.1.3  mrg 	cfi_end	LSYM(Lend_aeabi_ldiv0)
   1504       1.1  mrg 	FUNC_END aeabi_ldiv0
   1505       1.1  mrg 	FUNC_END aeabi_idiv0
   1506       1.1  mrg #else
   1507   1.1.1.3  mrg 	cfi_end	LSYM(Lend_div0)
   1508       1.1  mrg 	FUNC_END div0
   1509       1.1  mrg #endif
   1510       1.1  mrg 
   1511       1.1  mrg #endif /* L_dvmd_lnx */
   1512       1.1  mrg #ifdef L_clear_cache
   1513       1.1  mrg #if defined __ARM_EABI__ && defined __linux__
   1514       1.1  mrg @ EABI GNU/Linux call to cacheflush syscall.
   1515       1.1  mrg 	ARM_FUNC_START clear_cache
   1516       1.1  mrg 	do_push	{r7}
   1517   1.1.1.8  mrg #if __ARM_ARCH >= 7 || defined(__ARM_ARCH_6T2__)
   1518       1.1  mrg 	movw	r7, #2
   1519       1.1  mrg 	movt	r7, #0xf
   1520       1.1  mrg #else
   1521       1.1  mrg 	mov	r7, #0xf0000
   1522       1.1  mrg 	add	r7, r7, #2
   1523       1.1  mrg #endif
   1524       1.1  mrg 	mov	r2, #0
   1525  1.1.1.11  mrg 	svc	0
   1526       1.1  mrg 	do_pop	{r7}
   1527       1.1  mrg 	RET
   1528       1.1  mrg 	FUNC_END clear_cache
   1529       1.1  mrg #else
   1530       1.1  mrg #error "This is only for ARM EABI GNU/Linux"
   1531       1.1  mrg #endif
   1532       1.1  mrg #endif /* L_clear_cache */
   1533   1.1.1.8  mrg 
   1534   1.1.1.8  mrg #ifdef L_speculation_barrier
   1535   1.1.1.8  mrg 	FUNC_START speculation_barrier
   1536   1.1.1.8  mrg #if __ARM_ARCH >= 7
   1537   1.1.1.8  mrg 	isb
   1538   1.1.1.8  mrg 	dsb sy
   1539   1.1.1.8  mrg #elif defined __ARM_EABI__ && defined __linux__
   1540   1.1.1.8  mrg 	/* We don't have a speculation barrier directly for this
   1541   1.1.1.8  mrg 	   platform/architecture variant.  But we can use a kernel
   1542   1.1.1.8  mrg 	   clear_cache service routine which will emit such instructions
   1543   1.1.1.8  mrg 	   if run on a later version of the architecture.  We don't
   1544   1.1.1.8  mrg 	   really want to flush the cache, but we must give it a valid
   1545   1.1.1.8  mrg 	   address, so just clear pc..pc+1.  */
   1546   1.1.1.8  mrg #if defined __thumb__ && !defined __thumb2__
   1547   1.1.1.8  mrg 	push	{r7}
   1548   1.1.1.9  mrg 	movs	r7, #0xf
   1549   1.1.1.9  mrg 	lsls	r7, #16
   1550   1.1.1.9  mrg 	adds	r7, #2
   1551   1.1.1.8  mrg 	adr	r0, . + 4
   1552   1.1.1.9  mrg 	adds	r1, r0, #1
   1553   1.1.1.9  mrg 	movs	r2, #0
   1554   1.1.1.8  mrg 	svc	0
   1555   1.1.1.8  mrg 	pop	{r7}
   1556   1.1.1.8  mrg #else
   1557   1.1.1.8  mrg 	do_push	{r7}
   1558   1.1.1.8  mrg #ifdef __ARM_ARCH_6T2__
   1559   1.1.1.8  mrg 	movw	r7, #2
   1560   1.1.1.8  mrg 	movt	r7, #0xf
   1561   1.1.1.8  mrg #else
   1562   1.1.1.8  mrg 	mov	r7, #0xf0000
   1563   1.1.1.8  mrg 	add	r7, r7, #2
   1564   1.1.1.8  mrg #endif
   1565   1.1.1.8  mrg 	add	r0, pc, #0	/* ADR.  */
   1566   1.1.1.8  mrg 	add	r1, r0, #1
   1567   1.1.1.8  mrg 	mov	r2, #0
   1568   1.1.1.8  mrg 	svc	0
   1569   1.1.1.8  mrg 	do_pop	{r7}
   1570   1.1.1.8  mrg #endif /* Thumb1 only */
   1571   1.1.1.8  mrg #else
   1572   1.1.1.8  mrg #warning "No speculation barrier defined for this platform"
   1573   1.1.1.8  mrg #endif
   1574   1.1.1.8  mrg 	RET
   1575   1.1.1.8  mrg 	FUNC_END speculation_barrier
   1576   1.1.1.8  mrg #endif
   1577       1.1  mrg /* ------------------------------------------------------------------------ */
   1578       1.1  mrg /* Dword shift operations.  */
   1579       1.1  mrg /* All the following Dword shift variants rely on the fact that
   1580       1.1  mrg 	shft xxx, Reg
   1581       1.1  mrg    is in fact done as
   1582       1.1  mrg 	shft xxx, (Reg & 255)
   1583       1.1  mrg    so for Reg value in (32...63) and (-1...-31) we will get zero (in the
   1584       1.1  mrg    case of logical shifts) or the sign (for asr).  */
   1585       1.1  mrg 
   1586       1.1  mrg #ifdef __ARMEB__
   1587       1.1  mrg #define al	r1
   1588       1.1  mrg #define ah	r0
   1589       1.1  mrg #else
   1590       1.1  mrg #define al	r0
   1591       1.1  mrg #define ah	r1
   1592       1.1  mrg #endif
   1593       1.1  mrg 
   1594       1.1  mrg /* Prevent __aeabi double-word shifts from being produced on SymbianOS.  */
   1595       1.1  mrg #ifndef __symbian__
   1596       1.1  mrg 
   1597       1.1  mrg #ifdef L_lshrdi3
   1598       1.1  mrg 
   1599       1.1  mrg 	FUNC_START lshrdi3
   1600       1.1  mrg 	FUNC_ALIAS aeabi_llsr lshrdi3
   1601       1.1  mrg 
   1602       1.1  mrg #ifdef __thumb__
   1603   1.1.1.9  mrg 	lsrs	al, r2
   1604   1.1.1.9  mrg 	movs	r3, ah
   1605   1.1.1.9  mrg 	lsrs	ah, r2
   1606       1.1  mrg 	mov	ip, r3
   1607   1.1.1.9  mrg 	subs	r2, #32
   1608   1.1.1.9  mrg 	lsrs	r3, r2
   1609   1.1.1.9  mrg 	orrs	al, r3
   1610   1.1.1.9  mrg 	negs	r2, r2
   1611       1.1  mrg 	mov	r3, ip
   1612   1.1.1.9  mrg 	lsls	r3, r2
   1613   1.1.1.9  mrg 	orrs	al, r3
   1614       1.1  mrg 	RET
   1615       1.1  mrg #else
   1616       1.1  mrg 	subs	r3, r2, #32
   1617       1.1  mrg 	rsb	ip, r2, #32
   1618       1.1  mrg 	movmi	al, al, lsr r2
   1619       1.1  mrg 	movpl	al, ah, lsr r3
   1620       1.1  mrg 	orrmi	al, al, ah, lsl ip
   1621       1.1  mrg 	mov	ah, ah, lsr r2
   1622       1.1  mrg 	RET
   1623       1.1  mrg #endif
   1624       1.1  mrg 	FUNC_END aeabi_llsr
   1625       1.1  mrg 	FUNC_END lshrdi3
   1626       1.1  mrg 
   1627       1.1  mrg #endif
   1628       1.1  mrg 
   1629       1.1  mrg #ifdef L_ashrdi3
   1630       1.1  mrg 
   1631       1.1  mrg 	FUNC_START ashrdi3
   1632       1.1  mrg 	FUNC_ALIAS aeabi_lasr ashrdi3
   1633       1.1  mrg 
   1634       1.1  mrg #ifdef __thumb__
   1635   1.1.1.9  mrg 	lsrs	al, r2
   1636   1.1.1.9  mrg 	movs	r3, ah
   1637   1.1.1.9  mrg 	asrs	ah, r2
   1638   1.1.1.9  mrg 	subs	r2, #32
   1639       1.1  mrg 	@ If r2 is negative at this point the following step would OR
   1640       1.1  mrg 	@ the sign bit into all of AL.  That's not what we want...
   1641       1.1  mrg 	bmi	1f
   1642       1.1  mrg 	mov	ip, r3
   1643   1.1.1.9  mrg 	asrs	r3, r2
   1644   1.1.1.9  mrg 	orrs	al, r3
   1645       1.1  mrg 	mov	r3, ip
   1646       1.1  mrg 1:
   1647   1.1.1.9  mrg 	negs	r2, r2
   1648   1.1.1.9  mrg 	lsls	r3, r2
   1649   1.1.1.9  mrg 	orrs	al, r3
   1650       1.1  mrg 	RET
   1651       1.1  mrg #else
   1652       1.1  mrg 	subs	r3, r2, #32
   1653       1.1  mrg 	rsb	ip, r2, #32
   1654       1.1  mrg 	movmi	al, al, lsr r2
   1655       1.1  mrg 	movpl	al, ah, asr r3
   1656       1.1  mrg 	orrmi	al, al, ah, lsl ip
   1657       1.1  mrg 	mov	ah, ah, asr r2
   1658       1.1  mrg 	RET
   1659       1.1  mrg #endif
   1660       1.1  mrg 
   1661       1.1  mrg 	FUNC_END aeabi_lasr
   1662       1.1  mrg 	FUNC_END ashrdi3
   1663       1.1  mrg 
   1664       1.1  mrg #endif
   1665       1.1  mrg 
   1666       1.1  mrg #ifdef L_ashldi3
   1667       1.1  mrg 
   1668       1.1  mrg 	FUNC_START ashldi3
   1669       1.1  mrg 	FUNC_ALIAS aeabi_llsl ashldi3
   1670       1.1  mrg 
   1671       1.1  mrg #ifdef __thumb__
   1672   1.1.1.9  mrg 	lsls	ah, r2
   1673   1.1.1.9  mrg 	movs	r3, al
   1674   1.1.1.9  mrg 	lsls	al, r2
   1675       1.1  mrg 	mov	ip, r3
   1676   1.1.1.9  mrg 	subs	r2, #32
   1677   1.1.1.9  mrg 	lsls	r3, r2
   1678   1.1.1.9  mrg 	orrs	ah, r3
   1679   1.1.1.9  mrg 	negs	r2, r2
   1680       1.1  mrg 	mov	r3, ip
   1681   1.1.1.9  mrg 	lsrs	r3, r2
   1682   1.1.1.9  mrg 	orrs	ah, r3
   1683       1.1  mrg 	RET
   1684       1.1  mrg #else
   1685       1.1  mrg 	subs	r3, r2, #32
   1686       1.1  mrg 	rsb	ip, r2, #32
   1687       1.1  mrg 	movmi	ah, ah, lsl r2
   1688       1.1  mrg 	movpl	ah, al, lsl r3
   1689       1.1  mrg 	orrmi	ah, ah, al, lsr ip
   1690       1.1  mrg 	mov	al, al, lsl r2
   1691       1.1  mrg 	RET
   1692       1.1  mrg #endif
   1693       1.1  mrg 	FUNC_END aeabi_llsl
   1694       1.1  mrg 	FUNC_END ashldi3
   1695       1.1  mrg 
   1696       1.1  mrg #endif
   1697       1.1  mrg 
   1698       1.1  mrg #endif /* __symbian__ */
   1699       1.1  mrg 
   1700       1.1  mrg #ifdef L_clzsi2
   1701   1.1.1.4  mrg #ifdef NOT_ISA_TARGET_32BIT
   1702       1.1  mrg FUNC_START clzsi2
   1703   1.1.1.9  mrg 	movs	r1, #28
   1704   1.1.1.9  mrg 	movs	r3, #1
   1705   1.1.1.9  mrg 	lsls	r3, r3, #16
   1706       1.1  mrg 	cmp	r0, r3 /* 0x10000 */
   1707       1.1  mrg 	bcc	2f
   1708   1.1.1.9  mrg 	lsrs	r0, r0, #16
   1709   1.1.1.9  mrg 	subs	r1, r1, #16
   1710   1.1.1.9  mrg 2:	lsrs	r3, r3, #8
   1711       1.1  mrg 	cmp	r0, r3 /* #0x100 */
   1712       1.1  mrg 	bcc	2f
   1713   1.1.1.9  mrg 	lsrs	r0, r0, #8
   1714   1.1.1.9  mrg 	subs	r1, r1, #8
   1715   1.1.1.9  mrg 2:	lsrs	r3, r3, #4
   1716       1.1  mrg 	cmp	r0, r3 /* #0x10 */
   1717       1.1  mrg 	bcc	2f
   1718   1.1.1.9  mrg 	lsrs	r0, r0, #4
   1719   1.1.1.9  mrg 	subs	r1, r1, #4
   1720       1.1  mrg 2:	adr	r2, 1f
   1721       1.1  mrg 	ldrb	r0, [r2, r0]
   1722   1.1.1.9  mrg 	adds	r0, r0, r1
   1723       1.1  mrg 	bx lr
   1724       1.1  mrg .align 2
   1725       1.1  mrg 1:
   1726       1.1  mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1727       1.1  mrg 	FUNC_END clzsi2
   1728       1.1  mrg #else
   1729       1.1  mrg ARM_FUNC_START clzsi2
   1730   1.1.1.8  mrg # if defined (__ARM_FEATURE_CLZ)
   1731       1.1  mrg 	clz	r0, r0
   1732       1.1  mrg 	RET
   1733       1.1  mrg # else
   1734       1.1  mrg 	mov	r1, #28
   1735       1.1  mrg 	cmp	r0, #0x10000
   1736       1.1  mrg 	do_it	cs, t
   1737       1.1  mrg 	movcs	r0, r0, lsr #16
   1738       1.1  mrg 	subcs	r1, r1, #16
   1739       1.1  mrg 	cmp	r0, #0x100
   1740       1.1  mrg 	do_it	cs, t
   1741       1.1  mrg 	movcs	r0, r0, lsr #8
   1742       1.1  mrg 	subcs	r1, r1, #8
   1743       1.1  mrg 	cmp	r0, #0x10
   1744       1.1  mrg 	do_it	cs, t
   1745       1.1  mrg 	movcs	r0, r0, lsr #4
   1746       1.1  mrg 	subcs	r1, r1, #4
   1747       1.1  mrg 	adr	r2, 1f
   1748       1.1  mrg 	ldrb	r0, [r2, r0]
   1749       1.1  mrg 	add	r0, r0, r1
   1750       1.1  mrg 	RET
   1751       1.1  mrg .align 2
   1752       1.1  mrg 1:
   1753       1.1  mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1754   1.1.1.8  mrg # endif /* !defined (__ARM_FEATURE_CLZ) */
   1755       1.1  mrg 	FUNC_END clzsi2
   1756       1.1  mrg #endif
   1757       1.1  mrg #endif /* L_clzsi2 */
   1758       1.1  mrg 
   1759       1.1  mrg #ifdef L_clzdi2
   1760   1.1.1.8  mrg #if !defined (__ARM_FEATURE_CLZ)
   1761       1.1  mrg 
   1762   1.1.1.4  mrg # ifdef NOT_ISA_TARGET_32BIT
   1763       1.1  mrg FUNC_START clzdi2
   1764       1.1  mrg 	push	{r4, lr}
   1765   1.1.1.9  mrg 	cmp	xxh, #0
   1766   1.1.1.9  mrg 	bne	1f
   1767   1.1.1.9  mrg #  ifdef __ARMEB__
   1768   1.1.1.9  mrg 	movs	r0, xxl
   1769   1.1.1.9  mrg 	bl	__clzsi2
   1770   1.1.1.9  mrg 	adds	r0, r0, #32
   1771   1.1.1.9  mrg 	b 2f
   1772   1.1.1.9  mrg 1:
   1773   1.1.1.9  mrg 	bl	__clzsi2
   1774   1.1.1.9  mrg #  else
   1775   1.1.1.9  mrg 	bl	__clzsi2
   1776   1.1.1.9  mrg 	adds	r0, r0, #32
   1777   1.1.1.9  mrg 	b 2f
   1778   1.1.1.9  mrg 1:
   1779   1.1.1.9  mrg 	movs	r0, xxh
   1780   1.1.1.9  mrg 	bl	__clzsi2
   1781   1.1.1.9  mrg #  endif
   1782   1.1.1.9  mrg 2:
   1783   1.1.1.9  mrg 	pop	{r4, pc}
   1784   1.1.1.9  mrg # else /* NOT_ISA_TARGET_32BIT */
   1785       1.1  mrg ARM_FUNC_START clzdi2
   1786       1.1  mrg 	do_push	{r4, lr}
   1787       1.1  mrg 	cmp	xxh, #0
   1788       1.1  mrg 	bne	1f
   1789   1.1.1.9  mrg #  ifdef __ARMEB__
   1790       1.1  mrg 	mov	r0, xxl
   1791       1.1  mrg 	bl	__clzsi2
   1792       1.1  mrg 	add	r0, r0, #32
   1793       1.1  mrg 	b 2f
   1794       1.1  mrg 1:
   1795       1.1  mrg 	bl	__clzsi2
   1796   1.1.1.9  mrg #  else
   1797       1.1  mrg 	bl	__clzsi2
   1798       1.1  mrg 	add	r0, r0, #32
   1799       1.1  mrg 	b 2f
   1800       1.1  mrg 1:
   1801       1.1  mrg 	mov	r0, xxh
   1802       1.1  mrg 	bl	__clzsi2
   1803   1.1.1.9  mrg #  endif
   1804       1.1  mrg 2:
   1805       1.1  mrg 	RETLDM	r4
   1806       1.1  mrg 	FUNC_END clzdi2
   1807   1.1.1.9  mrg # endif /* NOT_ISA_TARGET_32BIT */
   1808       1.1  mrg 
   1809   1.1.1.8  mrg #else /* defined (__ARM_FEATURE_CLZ) */
   1810       1.1  mrg 
   1811       1.1  mrg ARM_FUNC_START clzdi2
   1812       1.1  mrg 	cmp	xxh, #0
   1813       1.1  mrg 	do_it	eq, et
   1814       1.1  mrg 	clzeq	r0, xxl
   1815       1.1  mrg 	clzne	r0, xxh
   1816       1.1  mrg 	addeq	r0, r0, #32
   1817       1.1  mrg 	RET
   1818       1.1  mrg 	FUNC_END clzdi2
   1819       1.1  mrg 
   1820       1.1  mrg #endif
   1821       1.1  mrg #endif /* L_clzdi2 */
   1822       1.1  mrg 
   1823       1.1  mrg #ifdef L_ctzsi2
   1824   1.1.1.4  mrg #ifdef NOT_ISA_TARGET_32BIT
   1825       1.1  mrg FUNC_START ctzsi2
   1826   1.1.1.9  mrg 	negs	r1, r0
   1827   1.1.1.9  mrg 	ands	r0, r0, r1
   1828   1.1.1.9  mrg 	movs	r1, #28
   1829   1.1.1.9  mrg 	movs	r3, #1
   1830   1.1.1.9  mrg 	lsls	r3, r3, #16
   1831       1.1  mrg 	cmp	r0, r3 /* 0x10000 */
   1832       1.1  mrg 	bcc	2f
   1833   1.1.1.9  mrg 	lsrs	r0, r0, #16
   1834   1.1.1.9  mrg 	subs	r1, r1, #16
   1835   1.1.1.9  mrg 2:	lsrs	r3, r3, #8
   1836       1.1  mrg 	cmp	r0, r3 /* #0x100 */
   1837       1.1  mrg 	bcc	2f
   1838   1.1.1.9  mrg 	lsrs	r0, r0, #8
   1839   1.1.1.9  mrg 	subs	r1, r1, #8
   1840   1.1.1.9  mrg 2:	lsrs	r3, r3, #4
   1841       1.1  mrg 	cmp	r0, r3 /* #0x10 */
   1842       1.1  mrg 	bcc	2f
   1843   1.1.1.9  mrg 	lsrs	r0, r0, #4
   1844   1.1.1.9  mrg 	subs	r1, r1, #4
   1845       1.1  mrg 2:	adr	r2, 1f
   1846       1.1  mrg 	ldrb	r0, [r2, r0]
   1847   1.1.1.9  mrg 	subs	r0, r0, r1
   1848       1.1  mrg 	bx lr
   1849       1.1  mrg .align 2
   1850       1.1  mrg 1:
   1851       1.1  mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1852       1.1  mrg 	FUNC_END ctzsi2
   1853       1.1  mrg #else
   1854       1.1  mrg ARM_FUNC_START ctzsi2
   1855       1.1  mrg 	rsb	r1, r0, #0
   1856       1.1  mrg 	and	r0, r0, r1
   1857   1.1.1.8  mrg # if defined (__ARM_FEATURE_CLZ)
   1858       1.1  mrg 	clz	r0, r0
   1859       1.1  mrg 	rsb	r0, r0, #31
   1860       1.1  mrg 	RET
   1861       1.1  mrg # else
   1862       1.1  mrg 	mov	r1, #28
   1863       1.1  mrg 	cmp	r0, #0x10000
   1864       1.1  mrg 	do_it	cs, t
   1865       1.1  mrg 	movcs	r0, r0, lsr #16
   1866       1.1  mrg 	subcs	r1, r1, #16
   1867       1.1  mrg 	cmp	r0, #0x100
   1868       1.1  mrg 	do_it	cs, t
   1869       1.1  mrg 	movcs	r0, r0, lsr #8
   1870       1.1  mrg 	subcs	r1, r1, #8
   1871       1.1  mrg 	cmp	r0, #0x10
   1872       1.1  mrg 	do_it	cs, t
   1873       1.1  mrg 	movcs	r0, r0, lsr #4
   1874       1.1  mrg 	subcs	r1, r1, #4
   1875       1.1  mrg 	adr	r2, 1f
   1876       1.1  mrg 	ldrb	r0, [r2, r0]
   1877       1.1  mrg 	sub	r0, r0, r1
   1878       1.1  mrg 	RET
   1879       1.1  mrg .align 2
   1880       1.1  mrg 1:
   1881       1.1  mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1882   1.1.1.8  mrg # endif /* !defined (__ARM_FEATURE_CLZ) */
   1883       1.1  mrg 	FUNC_END ctzsi2
   1884       1.1  mrg #endif
   1885       1.1  mrg #endif /* L_clzsi2 */
   1886       1.1  mrg 
   1887       1.1  mrg /* ------------------------------------------------------------------------ */
   1888       1.1  mrg /* These next two sections are here despite the fact that they contain Thumb
   1889       1.1  mrg    assembler because their presence allows interworked code to be linked even
   1890       1.1  mrg    when the GCC library is this one.  */
   1891       1.1  mrg 
   1892       1.1  mrg /* Do not build the interworking functions when the target architecture does
   1893       1.1  mrg    not support Thumb instructions.  (This can be a multilib option).  */
   1894       1.1  mrg #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
   1895       1.1  mrg       || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
   1896   1.1.1.8  mrg       || __ARM_ARCH >= 6
   1897       1.1  mrg 
   1898       1.1  mrg #if defined L_call_via_rX
   1899       1.1  mrg 
   1900       1.1  mrg /* These labels & instructions are used by the Arm/Thumb interworking code.
   1901       1.1  mrg    The address of function to be called is loaded into a register and then
   1902       1.1  mrg    one of these labels is called via a BL instruction.  This puts the
   1903       1.1  mrg    return address into the link register with the bottom bit set, and the
   1904       1.1  mrg    code here switches to the correct mode before executing the function.  */
   1905       1.1  mrg 
   1906       1.1  mrg 	.text
   1907       1.1  mrg 	.align 0
   1908       1.1  mrg         .force_thumb
   1909       1.1  mrg 
   1910       1.1  mrg .macro call_via register
   1911       1.1  mrg 	THUMB_FUNC_START _call_via_\register
   1912       1.1  mrg 
   1913       1.1  mrg 	bx	\register
   1914       1.1  mrg 	nop
   1915       1.1  mrg 
   1916       1.1  mrg 	SIZE	(_call_via_\register)
   1917       1.1  mrg .endm
   1918       1.1  mrg 
   1919       1.1  mrg 	call_via r0
   1920       1.1  mrg 	call_via r1
   1921       1.1  mrg 	call_via r2
   1922       1.1  mrg 	call_via r3
   1923       1.1  mrg 	call_via r4
   1924       1.1  mrg 	call_via r5
   1925       1.1  mrg 	call_via r6
   1926       1.1  mrg 	call_via r7
   1927       1.1  mrg 	call_via r8
   1928       1.1  mrg 	call_via r9
   1929       1.1  mrg 	call_via sl
   1930       1.1  mrg 	call_via fp
   1931       1.1  mrg 	call_via ip
   1932       1.1  mrg 	call_via sp
   1933       1.1  mrg 	call_via lr
   1934       1.1  mrg 
   1935       1.1  mrg #endif /* L_call_via_rX */
   1936       1.1  mrg 
   1937       1.1  mrg /* Don't bother with the old interworking routines for Thumb-2.  */
   1938       1.1  mrg /* ??? Maybe only omit these on "m" variants.  */
   1939   1.1.1.4  mrg #if !defined(__thumb2__) && __ARM_ARCH_ISA_ARM
   1940       1.1  mrg 
   1941       1.1  mrg #if defined L_interwork_call_via_rX
   1942       1.1  mrg 
   1943       1.1  mrg /* These labels & instructions are used by the Arm/Thumb interworking code,
   1944       1.1  mrg    when the target address is in an unknown instruction set.  The address
   1945       1.1  mrg    of function to be called is loaded into a register and then one of these
   1946       1.1  mrg    labels is called via a BL instruction.  This puts the return address
   1947       1.1  mrg    into the link register with the bottom bit set, and the code here
   1948       1.1  mrg    switches to the correct mode before executing the function.  Unfortunately
   1949       1.1  mrg    the target code cannot be relied upon to return via a BX instruction, so
   1950       1.1  mrg    instead we have to store the resturn address on the stack and allow the
   1951       1.1  mrg    called function to return here instead.  Upon return we recover the real
   1952       1.1  mrg    return address and use a BX to get back to Thumb mode.
   1953       1.1  mrg 
   1954       1.1  mrg    There are three variations of this code.  The first,
   1955       1.1  mrg    _interwork_call_via_rN(), will push the return address onto the
   1956       1.1  mrg    stack and pop it in _arm_return().  It should only be used if all
   1957       1.1  mrg    arguments are passed in registers.
   1958       1.1  mrg 
   1959       1.1  mrg    The second, _interwork_r7_call_via_rN(), instead stores the return
   1960       1.1  mrg    address at [r7, #-4].  It is the caller's responsibility to ensure
   1961       1.1  mrg    that this address is valid and contains no useful data.
   1962       1.1  mrg 
   1963       1.1  mrg    The third, _interwork_r11_call_via_rN(), works in the same way but
   1964       1.1  mrg    uses r11 instead of r7.  It is useful if the caller does not really
   1965       1.1  mrg    need a frame pointer.  */
   1966       1.1  mrg 
   1967       1.1  mrg 	.text
   1968       1.1  mrg 	.align 0
   1969       1.1  mrg 
   1970       1.1  mrg 	.code   32
   1971       1.1  mrg 	.globl _arm_return
   1972       1.1  mrg LSYM(Lstart_arm_return):
   1973       1.1  mrg 	cfi_start	LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
   1974       1.1  mrg 	cfi_push	0, 0xe, -0x8, 0x8
   1975       1.1  mrg 	nop	@ This nop is for the benefit of debuggers, so that
   1976       1.1  mrg 		@ backtraces will use the correct unwind information.
   1977       1.1  mrg _arm_return:
   1978       1.1  mrg 	RETLDM	unwind=LSYM(Lstart_arm_return)
   1979       1.1  mrg 	cfi_end	LSYM(Lend_arm_return)
   1980       1.1  mrg 
   1981       1.1  mrg 	.globl _arm_return_r7
   1982       1.1  mrg _arm_return_r7:
   1983       1.1  mrg 	ldr	lr, [r7, #-4]
   1984       1.1  mrg 	bx	lr
   1985       1.1  mrg 
   1986       1.1  mrg 	.globl _arm_return_r11
   1987       1.1  mrg _arm_return_r11:
   1988       1.1  mrg 	ldr	lr, [r11, #-4]
   1989       1.1  mrg 	bx	lr
   1990       1.1  mrg 
   1991       1.1  mrg .macro interwork_with_frame frame, register, name, return
   1992       1.1  mrg 	.code	16
   1993       1.1  mrg 
   1994       1.1  mrg 	THUMB_FUNC_START \name
   1995       1.1  mrg 
   1996       1.1  mrg 	bx	pc
   1997       1.1  mrg 	nop
   1998       1.1  mrg 
   1999       1.1  mrg 	.code	32
   2000       1.1  mrg 	tst	\register, #1
   2001       1.1  mrg 	streq	lr, [\frame, #-4]
   2002       1.1  mrg 	adreq	lr, _arm_return_\frame
   2003       1.1  mrg 	bx	\register
   2004       1.1  mrg 
   2005       1.1  mrg 	SIZE	(\name)
   2006       1.1  mrg .endm
   2007       1.1  mrg 
   2008       1.1  mrg .macro interwork register
   2009       1.1  mrg 	.code	16
   2010       1.1  mrg 
   2011       1.1  mrg 	THUMB_FUNC_START _interwork_call_via_\register
   2012       1.1  mrg 
   2013       1.1  mrg 	bx	pc
   2014       1.1  mrg 	nop
   2015       1.1  mrg 
   2016       1.1  mrg 	.code	32
   2017       1.1  mrg 	.globl LSYM(Lchange_\register)
   2018       1.1  mrg LSYM(Lchange_\register):
   2019       1.1  mrg 	tst	\register, #1
   2020       1.1  mrg 	streq	lr, [sp, #-8]!
   2021       1.1  mrg 	adreq	lr, _arm_return
   2022       1.1  mrg 	bx	\register
   2023       1.1  mrg 
   2024       1.1  mrg 	SIZE	(_interwork_call_via_\register)
   2025       1.1  mrg 
   2026       1.1  mrg 	interwork_with_frame r7,\register,_interwork_r7_call_via_\register
   2027       1.1  mrg 	interwork_with_frame r11,\register,_interwork_r11_call_via_\register
   2028       1.1  mrg .endm
   2029       1.1  mrg 
   2030       1.1  mrg 	interwork r0
   2031       1.1  mrg 	interwork r1
   2032       1.1  mrg 	interwork r2
   2033       1.1  mrg 	interwork r3
   2034       1.1  mrg 	interwork r4
   2035       1.1  mrg 	interwork r5
   2036       1.1  mrg 	interwork r6
   2037       1.1  mrg 	interwork r7
   2038       1.1  mrg 	interwork r8
   2039       1.1  mrg 	interwork r9
   2040       1.1  mrg 	interwork sl
   2041       1.1  mrg 	interwork fp
   2042       1.1  mrg 	interwork ip
   2043       1.1  mrg 	interwork sp
   2044       1.1  mrg 
   2045       1.1  mrg 	/* The LR case has to be handled a little differently...  */
   2046       1.1  mrg 	.code 16
   2047       1.1  mrg 
   2048       1.1  mrg 	THUMB_FUNC_START _interwork_call_via_lr
   2049       1.1  mrg 
   2050       1.1  mrg 	bx 	pc
   2051       1.1  mrg 	nop
   2052       1.1  mrg 
   2053       1.1  mrg 	.code 32
   2054       1.1  mrg 	.globl .Lchange_lr
   2055       1.1  mrg .Lchange_lr:
   2056       1.1  mrg 	tst	lr, #1
   2057       1.1  mrg 	stmeqdb	r13!, {lr, pc}
   2058       1.1  mrg 	mov	ip, lr
   2059       1.1  mrg 	adreq	lr, _arm_return
   2060       1.1  mrg 	bx	ip
   2061       1.1  mrg 
   2062       1.1  mrg 	SIZE	(_interwork_call_via_lr)
   2063       1.1  mrg 
   2064       1.1  mrg #endif /* L_interwork_call_via_rX */
   2065       1.1  mrg #endif /* !__thumb2__ */
   2066       1.1  mrg 
   2067       1.1  mrg /* Functions to support compact pic switch tables in thumb1 state.
   2068       1.1  mrg    All these routines take an index into the table in r0.  The
   2069       1.1  mrg    table is at LR & ~1 (but this must be rounded up in the case
   2070       1.1  mrg    of 32-bit entires).  They are only permitted to clobber r12
   2071       1.1  mrg    and r14 and r0 must be preserved on exit.  */
   2072       1.1  mrg #ifdef L_thumb1_case_sqi
   2073       1.1  mrg 
   2074       1.1  mrg 	.text
   2075       1.1  mrg 	.align 0
   2076       1.1  mrg         .force_thumb
   2077       1.1  mrg 	.syntax unified
   2078       1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_sqi
   2079       1.1  mrg 	push	{r1}
   2080       1.1  mrg 	mov	r1, lr
   2081       1.1  mrg 	lsrs	r1, r1, #1
   2082       1.1  mrg 	lsls	r1, r1, #1
   2083       1.1  mrg 	ldrsb	r1, [r1, r0]
   2084       1.1  mrg 	lsls	r1, r1, #1
   2085       1.1  mrg 	add	lr, lr, r1
   2086       1.1  mrg 	pop	{r1}
   2087       1.1  mrg 	bx	lr
   2088       1.1  mrg 	SIZE (__gnu_thumb1_case_sqi)
   2089       1.1  mrg #endif
   2090       1.1  mrg 
   2091       1.1  mrg #ifdef L_thumb1_case_uqi
   2092       1.1  mrg 
   2093       1.1  mrg 	.text
   2094       1.1  mrg 	.align 0
   2095       1.1  mrg         .force_thumb
   2096       1.1  mrg 	.syntax unified
   2097       1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_uqi
   2098       1.1  mrg 	push	{r1}
   2099       1.1  mrg 	mov	r1, lr
   2100       1.1  mrg 	lsrs	r1, r1, #1
   2101       1.1  mrg 	lsls	r1, r1, #1
   2102       1.1  mrg 	ldrb	r1, [r1, r0]
   2103       1.1  mrg 	lsls	r1, r1, #1
   2104       1.1  mrg 	add	lr, lr, r1
   2105       1.1  mrg 	pop	{r1}
   2106       1.1  mrg 	bx	lr
   2107       1.1  mrg 	SIZE (__gnu_thumb1_case_uqi)
   2108       1.1  mrg #endif
   2109       1.1  mrg 
   2110       1.1  mrg #ifdef L_thumb1_case_shi
   2111       1.1  mrg 
   2112       1.1  mrg 	.text
   2113       1.1  mrg 	.align 0
   2114       1.1  mrg         .force_thumb
   2115       1.1  mrg 	.syntax unified
   2116       1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_shi
   2117       1.1  mrg 	push	{r0, r1}
   2118       1.1  mrg 	mov	r1, lr
   2119       1.1  mrg 	lsrs	r1, r1, #1
   2120       1.1  mrg 	lsls	r0, r0, #1
   2121       1.1  mrg 	lsls	r1, r1, #1
   2122       1.1  mrg 	ldrsh	r1, [r1, r0]
   2123       1.1  mrg 	lsls	r1, r1, #1
   2124       1.1  mrg 	add	lr, lr, r1
   2125       1.1  mrg 	pop	{r0, r1}
   2126       1.1  mrg 	bx	lr
   2127       1.1  mrg 	SIZE (__gnu_thumb1_case_shi)
   2128       1.1  mrg #endif
   2129       1.1  mrg 
   2130       1.1  mrg #ifdef L_thumb1_case_uhi
   2131       1.1  mrg 
   2132       1.1  mrg 	.text
   2133       1.1  mrg 	.align 0
   2134       1.1  mrg         .force_thumb
   2135       1.1  mrg 	.syntax unified
   2136       1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_uhi
   2137       1.1  mrg 	push	{r0, r1}
   2138       1.1  mrg 	mov	r1, lr
   2139       1.1  mrg 	lsrs	r1, r1, #1
   2140       1.1  mrg 	lsls	r0, r0, #1
   2141       1.1  mrg 	lsls	r1, r1, #1
   2142       1.1  mrg 	ldrh	r1, [r1, r0]
   2143       1.1  mrg 	lsls	r1, r1, #1
   2144       1.1  mrg 	add	lr, lr, r1
   2145       1.1  mrg 	pop	{r0, r1}
   2146       1.1  mrg 	bx	lr
   2147       1.1  mrg 	SIZE (__gnu_thumb1_case_uhi)
   2148       1.1  mrg #endif
   2149       1.1  mrg 
   2150  1.1.1.11  mrg #ifdef L_sync_none
   2151  1.1.1.11  mrg 	/* Null implementation of __sync_synchronize, for use when
   2152  1.1.1.11  mrg 	   it is known that the system is single threaded.  */
   2153  1.1.1.11  mrg 	.text
   2154  1.1.1.11  mrg 	.align 0
   2155  1.1.1.11  mrg 	FUNC_START sync_synchronize_none
   2156  1.1.1.11  mrg 	bx	lr
   2157  1.1.1.11  mrg 	FUNC_END sync_synchronize_none
   2158  1.1.1.11  mrg #endif
   2159  1.1.1.11  mrg 
   2160  1.1.1.11  mrg #ifdef L_sync_dmb
   2161  1.1.1.11  mrg 	/* Full memory barrier using DMB.  Requires Armv7 (all profiles)
   2162  1.1.1.11  mrg 	or armv6-m, or later.  */
   2163  1.1.1.11  mrg 	.text
   2164  1.1.1.11  mrg 	.align 0
   2165  1.1.1.11  mrg #if __ARM_ARCH_PROFILE == 'M'
   2166  1.1.1.11  mrg 	.arch armv6-m
   2167  1.1.1.11  mrg #else
   2168  1.1.1.11  mrg 	.arch armv7-a
   2169  1.1.1.11  mrg #endif
   2170  1.1.1.11  mrg 	FUNC_START sync_synchronize_dmb
   2171  1.1.1.11  mrg 	/* M-profile devices only support SY as the synchronization level,
   2172  1.1.1.11  mrg 	   but that's probably what we want here anyway.  */
   2173  1.1.1.11  mrg 	dmb
   2174  1.1.1.11  mrg 	RET
   2175  1.1.1.11  mrg 	FUNC_END sync_synchronize_dmb
   2176  1.1.1.11  mrg #endif
   2177  1.1.1.11  mrg 
   2178  1.1.1.11  mrg #ifdef L_sync_cp15dmb
   2179  1.1.1.11  mrg #ifndef NOT_ISA_TARGET_32BIT
   2180  1.1.1.11  mrg 	/* Implementation of DMB using CP15 operations.  This was first
   2181  1.1.1.11  mrg 	   defined in Armv6, but deprecated in Armv7 and can give
   2182  1.1.1.11  mrg 	   sub-optimal performance.  */
   2183  1.1.1.11  mrg 	.text
   2184  1.1.1.11  mrg 	.align 0
   2185  1.1.1.11  mrg 	ARM_FUNC_START sync_synchronize_cp15dmb
   2186  1.1.1.11  mrg 	mcr	p15, 0, r0, c7, c10, 5
   2187  1.1.1.11  mrg 	RET
   2188  1.1.1.11  mrg 	FUNC_END sync_synchronize_cp15dmb
   2189  1.1.1.11  mrg #endif
   2190  1.1.1.11  mrg #endif
   2191  1.1.1.11  mrg 
   2192  1.1.1.11  mrg #ifdef L_sync_synchronize
   2193  1.1.1.11  mrg 	/* Generic version of the synchronization primitive.  If we know
   2194  1.1.1.11  mrg 	   that DMB exists, then use it.  Otherwise, arrange for a link
   2195  1.1.1.11  mrg 	   time warning explaining how to pick a suitable alternative.
   2196  1.1.1.11  mrg 	   We choose not to use CP15DMB because it is performance
   2197  1.1.1.11  mrg 	   deprecated.  We only define this function if generating
   2198  1.1.1.11  mrg 	   ELF binaries as otherwise we can't rely on the warning being
   2199  1.1.1.11  mrg 	   generated.  */
   2200  1.1.1.11  mrg 
   2201  1.1.1.11  mrg #ifdef __ELF__
   2202  1.1.1.11  mrg 	.text
   2203  1.1.1.11  mrg 	.align 0
   2204  1.1.1.11  mrg 	FUNC_START sync_synchronize
   2205  1.1.1.11  mrg #if __ARM_ARCH >= 7 || __ARM_ARCH_PROFILE == 'M'
   2206  1.1.1.11  mrg 	dmb
   2207  1.1.1.11  mrg #endif
   2208  1.1.1.11  mrg 	RET
   2209  1.1.1.11  mrg 	FUNC_END sync_synchronize
   2210  1.1.1.11  mrg #if !(__ARM_ARCH >= 7 || __ARM_ARCH_PROFILE == 'M')
   2211  1.1.1.11  mrg 	.section .gnu.warning.__sync_synchronize
   2212  1.1.1.11  mrg 	.align 0
   2213  1.1.1.11  mrg 	.ascii "This implementation of __sync_synchronize is a stub with "
   2214  1.1.1.11  mrg 	.ascii "no effect.  Relink with\n"
   2215  1.1.1.11  mrg 	.ascii "  -specs=sync-{none,dmb,cp15dmb}.specs\n"
   2216  1.1.1.11  mrg 	.ascii "to specify exactly which barrier format to use and avoid "
   2217  1.1.1.11  mrg 	.ascii "this warning\0"
   2218  1.1.1.11  mrg #endif
   2219  1.1.1.11  mrg #endif
   2220  1.1.1.11  mrg #endif
   2221  1.1.1.11  mrg 
   2222       1.1  mrg #ifdef L_thumb1_case_si
   2223       1.1  mrg 
   2224       1.1  mrg 	.text
   2225       1.1  mrg 	.align 0
   2226       1.1  mrg         .force_thumb
   2227       1.1  mrg 	.syntax unified
   2228       1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_si
   2229       1.1  mrg 	push	{r0, r1}
   2230       1.1  mrg 	mov	r1, lr
   2231       1.1  mrg 	adds.n	r1, r1, #2	/* Align to word.  */
   2232       1.1  mrg 	lsrs	r1, r1, #2
   2233       1.1  mrg 	lsls	r0, r0, #2
   2234       1.1  mrg 	lsls	r1, r1, #2
   2235       1.1  mrg 	ldr	r0, [r1, r0]
   2236       1.1  mrg 	adds	r0, r0, r1
   2237       1.1  mrg 	mov	lr, r0
   2238       1.1  mrg 	pop	{r0, r1}
   2239       1.1  mrg 	mov	pc, lr		/* We know we were called from thumb code.  */
   2240       1.1  mrg 	SIZE (__gnu_thumb1_case_si)
   2241       1.1  mrg #endif
   2242       1.1  mrg 
   2243       1.1  mrg #endif /* Arch supports thumb.  */
   2244       1.1  mrg 
   2245   1.1.1.3  mrg .macro CFI_START_FUNCTION
   2246   1.1.1.3  mrg 	.cfi_startproc
   2247   1.1.1.3  mrg 	.cfi_remember_state
   2248   1.1.1.3  mrg .endm
   2249   1.1.1.3  mrg 
   2250   1.1.1.3  mrg .macro CFI_END_FUNCTION
   2251   1.1.1.3  mrg 	.cfi_restore_state
   2252   1.1.1.3  mrg 	.cfi_endproc
   2253   1.1.1.3  mrg .endm
   2254   1.1.1.3  mrg 
   2255       1.1  mrg #ifndef __symbian__
   2256   1.1.1.8  mrg /* The condition here must match the one in gcc/config/arm/elf.h and
   2257   1.1.1.8  mrg    libgcc/config/arm/t-elf.  */
   2258   1.1.1.4  mrg #ifndef NOT_ISA_TARGET_32BIT
   2259       1.1  mrg #include "ieee754-df.S"
   2260       1.1  mrg #include "ieee754-sf.S"
   2261       1.1  mrg #include "bpabi.S"
   2262   1.1.1.4  mrg #else /* NOT_ISA_TARGET_32BIT */
   2263       1.1  mrg #include "bpabi-v6m.S"
   2264   1.1.1.4  mrg #endif /* NOT_ISA_TARGET_32BIT */
   2265       1.1  mrg #endif /* !__symbian__ */
   2266