Home | History | Annotate | Line # | Download | only in arm
lib1funcs.S revision 1.3.4.2
      1      1.1       mrg @ libgcc routines for ARM cpu.
      2      1.1       mrg @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
      3      1.1       mrg 
      4  1.3.4.2    martin /* Copyright (C) 1995-2017 Free Software Foundation, Inc.
      5      1.1       mrg 
      6      1.1       mrg This file is free software; you can redistribute it and/or modify it
      7      1.1       mrg under the terms of the GNU General Public License as published by the
      8      1.1       mrg Free Software Foundation; either version 3, or (at your option) any
      9      1.1       mrg later version.
     10      1.1       mrg 
     11      1.1       mrg This file is distributed in the hope that it will be useful, but
     12      1.1       mrg WITHOUT ANY WARRANTY; without even the implied warranty of
     13      1.1       mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14      1.1       mrg General Public License for more details.
     15      1.1       mrg 
     16      1.1       mrg Under Section 7 of GPL version 3, you are granted additional
     17      1.1       mrg permissions described in the GCC Runtime Library Exception, version
     18      1.1       mrg 3.1, as published by the Free Software Foundation.
     19      1.1       mrg 
     20      1.1       mrg You should have received a copy of the GNU General Public License and
     21      1.1       mrg a copy of the GCC Runtime Library Exception along with this program;
     22      1.1       mrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23      1.1       mrg <http://www.gnu.org/licenses/>.  */
     24      1.1       mrg 
     25      1.1       mrg /* An executable stack is *not* required for these functions.  */
     26      1.1       mrg #if defined(__ELF__) && defined(__linux__)
     27      1.1       mrg .section .note.GNU-stack,"",%progbits
     28      1.1       mrg .previous
     29      1.1       mrg #endif  /* __ELF__ and __linux__ */
     30      1.1       mrg 
     31      1.1       mrg #ifdef __ARM_EABI__
     32      1.1       mrg /* Some attributes that are common to all routines in this file.  */
     33      1.1       mrg 	/* Tag_ABI_align_needed: This code does not require 8-byte
     34      1.1       mrg 	   alignment from the caller.  */
     35      1.1       mrg 	/* .eabi_attribute 24, 0  -- default setting.  */
     36      1.1       mrg 	/* Tag_ABI_align_preserved: This code preserves 8-byte
     37      1.1       mrg 	   alignment in any callee.  */
     38      1.1       mrg 	.eabi_attribute 25, 1
     39      1.1       mrg #endif /* __ARM_EABI__ */
     40      1.1       mrg /* ------------------------------------------------------------------------ */
     41      1.1       mrg 
     42      1.1       mrg /* We need to know what prefix to add to function names.  */
     43      1.1       mrg 
     44      1.1       mrg #ifndef __USER_LABEL_PREFIX__
     45      1.1       mrg #error  __USER_LABEL_PREFIX__ not defined
     46      1.1       mrg #endif
     47      1.1       mrg 
     48      1.1       mrg /* ANSI concatenation macros.  */
     49      1.1       mrg 
     50      1.1       mrg #define CONCAT1(a, b) CONCAT2(a, b)
     51      1.1       mrg #define CONCAT2(a, b) a ## b
     52      1.1       mrg 
     53      1.1       mrg /* Use the right prefix for global labels.  */
     54      1.1       mrg 
     55      1.1       mrg #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
     56      1.1       mrg 
     57      1.1       mrg #ifdef __ELF__
     58      1.1       mrg #ifdef __thumb__
     59      1.1       mrg #define __PLT__  /* Not supported in Thumb assembler (for now).  */
     60      1.1       mrg #elif defined __vxworks && !defined __PIC__
     61      1.1       mrg #define __PLT__ /* Not supported by the kernel loader.  */
     62      1.1       mrg #else
     63      1.1       mrg #define __PLT__ (PLT)
     64      1.1       mrg #endif
     65      1.1       mrg #define TYPE(x) .type SYM(x),function
     66      1.1       mrg #define SIZE(x) .size SYM(x), . - SYM(x)
     67      1.1       mrg #define LSYM(x) .x
     68      1.1       mrg #else
     69      1.1       mrg #define __PLT__
     70      1.1       mrg #define TYPE(x)
     71      1.1       mrg #define SIZE(x)
     72      1.1       mrg #define LSYM(x) x
     73      1.1       mrg #endif
     74      1.1       mrg 
     75      1.1       mrg /* Function end macros.  Variants for interworking.  */
     76      1.1       mrg 
     77      1.1       mrg #if defined(__ARM_ARCH_2__)
     78      1.1       mrg # define __ARM_ARCH__ 2
     79      1.1       mrg #endif
     80      1.1       mrg 
     81      1.1       mrg #if defined(__ARM_ARCH_3__)
     82      1.1       mrg # define __ARM_ARCH__ 3
     83      1.1       mrg #endif
     84      1.1       mrg 
     85      1.1       mrg #if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
     86      1.1       mrg 	|| defined(__ARM_ARCH_4T__)
     87      1.1       mrg /* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
     88      1.1       mrg    long multiply instructions.  That includes v3M.  */
     89      1.1       mrg # define __ARM_ARCH__ 4
     90      1.1       mrg #endif
     91      1.1       mrg 
     92      1.1       mrg #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
     93      1.1       mrg 	|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
     94      1.1       mrg 	|| defined(__ARM_ARCH_5TEJ__)
     95      1.1       mrg # define __ARM_ARCH__ 5
     96      1.1       mrg #endif
     97      1.1       mrg 
     98      1.1       mrg #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
     99      1.1       mrg 	|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
    100      1.1       mrg 	|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
    101      1.1       mrg 	|| defined(__ARM_ARCH_6M__)
    102      1.1       mrg # define __ARM_ARCH__ 6
    103      1.1       mrg #endif
    104      1.1       mrg 
    105      1.1       mrg #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
    106      1.1       mrg 	|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
    107      1.1       mrg 	|| defined(__ARM_ARCH_7EM__)
    108      1.1       mrg # define __ARM_ARCH__ 7
    109      1.1       mrg #endif
    110      1.1       mrg 
    111  1.3.4.2    martin #if defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8M_BASE__) \
    112  1.3.4.2    martin 	|| defined(__ARM_ARCH_8M_MAIN__)
    113      1.1       mrg # define __ARM_ARCH__ 8
    114      1.1       mrg #endif
    115      1.1       mrg 
    116      1.1       mrg #ifndef __ARM_ARCH__
    117      1.1       mrg #error Unable to determine architecture.
    118      1.1       mrg #endif
    119      1.1       mrg 
    120      1.1       mrg /* There are times when we might prefer Thumb1 code even if ARM code is
    121      1.1       mrg    permitted, for example, the code might be smaller, or there might be
    122      1.1       mrg    interworking problems with switching to ARM state if interworking is
    123      1.1       mrg    disabled.  */
    124      1.1       mrg #if (defined(__thumb__)			\
    125      1.1       mrg      && !defined(__thumb2__)		\
    126      1.1       mrg      && (!defined(__THUMB_INTERWORK__)	\
    127      1.1       mrg 	 || defined (__OPTIMIZE_SIZE__)	\
    128  1.3.4.2    martin 	 || !__ARM_ARCH_ISA_ARM))
    129      1.1       mrg # define __prefer_thumb__
    130      1.1       mrg #endif
    131      1.1       mrg 
    132  1.3.4.2    martin #if !__ARM_ARCH_ISA_ARM && __ARM_ARCH_ISA_THUMB == 1
    133  1.3.4.2    martin #define NOT_ISA_TARGET_32BIT 1
    134  1.3.4.2    martin #endif
    135  1.3.4.2    martin 
    136      1.1       mrg /* How to return from a function call depends on the architecture variant.  */
    137      1.1       mrg 
    138      1.1       mrg #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
    139      1.1       mrg 
    140      1.1       mrg # define RET		bx	lr
    141      1.1       mrg # define RETc(x)	bx##x	lr
    142      1.1       mrg 
    143      1.1       mrg /* Special precautions for interworking on armv4t.  */
    144      1.1       mrg # if (__ARM_ARCH__ == 4)
    145      1.1       mrg 
    146      1.1       mrg /* Always use bx, not ldr pc.  */
    147      1.1       mrg #  if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
    148      1.1       mrg #    define __INTERWORKING__
    149      1.1       mrg #   endif /* __THUMB__ || __THUMB_INTERWORK__ */
    150      1.1       mrg 
    151      1.1       mrg /* Include thumb stub before arm mode code.  */
    152      1.1       mrg #  if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
    153      1.1       mrg #   define __INTERWORKING_STUBS__
    154      1.1       mrg #  endif /* __thumb__ && !__THUMB_INTERWORK__ */
    155      1.1       mrg 
    156      1.1       mrg #endif /* __ARM_ARCH == 4 */
    157      1.1       mrg 
    158      1.1       mrg #else
    159      1.1       mrg 
    160      1.1       mrg # define RET		mov	pc, lr
    161      1.1       mrg # define RETc(x)	mov##x	pc, lr
    162      1.1       mrg 
    163      1.1       mrg #endif
    164      1.1       mrg 
    165      1.1       mrg .macro	cfi_pop		advance, reg, cfa_offset
    166      1.1       mrg #ifdef __ELF__
    167      1.1       mrg 	.pushsection	.debug_frame
    168      1.1       mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    169      1.1       mrg 	.4byte	\advance
    170      1.1       mrg 	.byte	(0xc0 | \reg)	/* DW_CFA_restore */
    171      1.1       mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    172      1.1       mrg 	.uleb128 \cfa_offset
    173      1.1       mrg 	.popsection
    174      1.1       mrg #endif
    175      1.1       mrg .endm
    176      1.1       mrg .macro	cfi_push	advance, reg, offset, cfa_offset
    177      1.1       mrg #ifdef __ELF__
    178      1.1       mrg 	.pushsection	.debug_frame
    179      1.1       mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    180      1.1       mrg 	.4byte	\advance
    181      1.1       mrg 	.byte	(0x80 | \reg)	/* DW_CFA_offset */
    182      1.1       mrg 	.uleb128 (\offset / -4)
    183      1.1       mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    184      1.1       mrg 	.uleb128 \cfa_offset
    185      1.1       mrg 	.popsection
    186      1.1       mrg #endif
    187      1.1       mrg .endm
    188      1.1       mrg .macro cfi_start	start_label, end_label
    189      1.1       mrg #ifdef __ELF__
    190      1.1       mrg 	.pushsection	.debug_frame
    191      1.1       mrg LSYM(Lstart_frame):
    192      1.1       mrg 	.4byte	LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
    193      1.1       mrg LSYM(Lstart_cie):
    194      1.1       mrg         .4byte	0xffffffff	@ CIE Identifier Tag
    195      1.1       mrg         .byte	0x1	@ CIE Version
    196      1.1       mrg         .ascii	"\0"	@ CIE Augmentation
    197      1.1       mrg         .uleb128 0x1	@ CIE Code Alignment Factor
    198      1.1       mrg         .sleb128 -4	@ CIE Data Alignment Factor
    199      1.1       mrg         .byte	0xe	@ CIE RA Column
    200      1.1       mrg         .byte	0xc	@ DW_CFA_def_cfa
    201      1.1       mrg         .uleb128 0xd
    202      1.1       mrg         .uleb128 0x0
    203      1.1       mrg 
    204      1.1       mrg 	.align 2
    205      1.1       mrg LSYM(Lend_cie):
    206      1.1       mrg 	.4byte	LSYM(Lend_fde)-LSYM(Lstart_fde)	@ FDE Length
    207      1.1       mrg LSYM(Lstart_fde):
    208      1.1       mrg 	.4byte	LSYM(Lstart_frame)	@ FDE CIE offset
    209      1.1       mrg 	.4byte	\start_label	@ FDE initial location
    210      1.1       mrg 	.4byte	\end_label-\start_label	@ FDE address range
    211      1.1       mrg 	.popsection
    212      1.1       mrg #endif
    213      1.1       mrg .endm
    214      1.1       mrg .macro cfi_end	end_label
    215      1.1       mrg #ifdef __ELF__
    216      1.1       mrg 	.pushsection	.debug_frame
    217      1.1       mrg 	.align	2
    218      1.1       mrg LSYM(Lend_fde):
    219      1.1       mrg 	.popsection
    220      1.1       mrg \end_label:
    221      1.1       mrg #endif
    222      1.1       mrg .endm
    223      1.1       mrg 
    224      1.1       mrg /* Don't pass dirn, it's there just to get token pasting right.  */
    225      1.1       mrg 
    226      1.1       mrg .macro	RETLDM	regs=, cond=, unwind=, dirn=ia
    227      1.1       mrg #if defined (__INTERWORKING__)
    228      1.1       mrg 	.ifc "\regs",""
    229      1.1       mrg 	ldr\cond	lr, [sp], #8
    230      1.1       mrg 	.else
    231      1.1       mrg # if defined(__thumb2__)
    232      1.1       mrg 	pop\cond	{\regs, lr}
    233      1.1       mrg # else
    234      1.1       mrg 	ldm\cond\dirn	sp!, {\regs, lr}
    235      1.1       mrg # endif
    236      1.1       mrg 	.endif
    237      1.1       mrg 	.ifnc "\unwind", ""
    238      1.1       mrg 	/* Mark LR as restored.  */
    239      1.1       mrg 97:	cfi_pop 97b - \unwind, 0xe, 0x0
    240      1.1       mrg 	.endif
    241      1.1       mrg 	bx\cond	lr
    242      1.1       mrg #else
    243      1.1       mrg 	/* Caller is responsible for providing IT instruction.  */
    244      1.1       mrg 	.ifc "\regs",""
    245      1.1       mrg 	ldr\cond	pc, [sp], #8
    246      1.1       mrg 	.else
    247      1.1       mrg # if defined(__thumb2__)
    248      1.1       mrg 	pop\cond	{\regs, pc}
    249      1.1       mrg # else
    250      1.1       mrg 	ldm\cond\dirn	sp!, {\regs, pc}
    251      1.1       mrg # endif
    252      1.1       mrg 	.endif
    253      1.1       mrg #endif
    254      1.1       mrg .endm
    255      1.1       mrg 
    256      1.1       mrg /* The Unified assembly syntax allows the same code to be assembled for both
    257      1.1       mrg    ARM and Thumb-2.  However this is only supported by recent gas, so define
    258      1.1       mrg    a set of macros to allow ARM code on older assemblers.  */
    259      1.1       mrg #if defined(__thumb2__)
    260      1.1       mrg .macro do_it cond, suffix=""
    261      1.1       mrg 	it\suffix	\cond
    262      1.1       mrg .endm
    263      1.1       mrg .macro shift1 op, arg0, arg1, arg2
    264      1.1       mrg 	\op	\arg0, \arg1, \arg2
    265      1.1       mrg .endm
    266      1.1       mrg #define do_push	push
    267      1.1       mrg #define do_pop	pop
    268      1.1       mrg #define COND(op1, op2, cond) op1 ## op2 ## cond
    269      1.1       mrg /* Perform an arithmetic operation with a variable shift operand.  This
    270      1.1       mrg    requires two instructions and a scratch register on Thumb-2.  */
    271      1.1       mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    272      1.1       mrg 	\shiftop \tmp, \src2, \shiftreg
    273      1.1       mrg 	\name \dest, \src1, \tmp
    274      1.1       mrg .endm
    275      1.1       mrg #else
    276      1.1       mrg .macro do_it cond, suffix=""
    277      1.1       mrg .endm
    278      1.1       mrg .macro shift1 op, arg0, arg1, arg2
    279      1.1       mrg 	mov	\arg0, \arg1, \op \arg2
    280      1.1       mrg .endm
    281      1.1       mrg #define do_push	stmfd sp!,
    282      1.1       mrg #define do_pop	ldmfd sp!,
    283      1.1       mrg #define COND(op1, op2, cond) op1 ## cond ## op2
    284      1.1       mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    285      1.1       mrg 	\name \dest, \src1, \src2, \shiftop \shiftreg
    286      1.1       mrg .endm
    287      1.1       mrg #endif
    288      1.1       mrg 
    289      1.1       mrg #ifdef __ARM_EABI__
    290      1.1       mrg .macro ARM_LDIV0 name signed
    291      1.1       mrg 	cmp	r0, #0
    292      1.1       mrg 	.ifc	\signed, unsigned
    293      1.1       mrg 	movne	r0, #0xffffffff
    294      1.1       mrg 	.else
    295      1.1       mrg 	movgt	r0, #0x7fffffff
    296      1.1       mrg 	movlt	r0, #0x80000000
    297      1.1       mrg 	.endif
    298      1.1       mrg 	b	SYM (__aeabi_idiv0) __PLT__
    299      1.1       mrg .endm
    300      1.1       mrg #else
    301      1.1       mrg .macro ARM_LDIV0 name signed
    302      1.1       mrg 	str	lr, [sp, #-8]!
    303      1.1       mrg 98:	cfi_push 98b - __\name, 0xe, -0x8, 0x8
    304      1.1       mrg 	bl	SYM (__div0) __PLT__
    305      1.1       mrg 	mov	r0, #0			@ About as wrong as it could be.
    306      1.1       mrg 	RETLDM	unwind=98b
    307      1.1       mrg .endm
    308      1.1       mrg #endif
    309      1.1       mrg 
    310      1.1       mrg 
    311      1.1       mrg #ifdef __ARM_EABI__
    312      1.1       mrg .macro THUMB_LDIV0 name signed
    313  1.3.4.2    martin #ifdef NOT_ISA_TARGET_32BIT
    314  1.3.4.2    martin 
    315  1.3.4.2    martin 	push	{r0, lr}
    316      1.1       mrg 	mov	r0, #0
    317  1.3.4.2    martin 	bl	SYM(__aeabi_idiv0)
    318      1.1       mrg 	@ We know we are not on armv4t, so pop pc is safe.
    319  1.3.4.2    martin 	pop	{r1, pc}
    320  1.3.4.2    martin 
    321      1.1       mrg #elif defined(__thumb2__)
    322      1.1       mrg 	.syntax unified
    323      1.1       mrg 	.ifc \signed, unsigned
    324      1.1       mrg 	cbz	r0, 1f
    325      1.1       mrg 	mov	r0, #0xffffffff
    326      1.1       mrg 1:
    327      1.1       mrg 	.else
    328      1.1       mrg 	cmp	r0, #0
    329      1.1       mrg 	do_it	gt
    330      1.1       mrg 	movgt	r0, #0x7fffffff
    331      1.1       mrg 	do_it	lt
    332      1.1       mrg 	movlt	r0, #0x80000000
    333      1.1       mrg 	.endif
    334      1.1       mrg 	b.w	SYM(__aeabi_idiv0) __PLT__
    335      1.1       mrg #else
    336      1.1       mrg 	.align	2
    337      1.1       mrg 	bx	pc
    338      1.1       mrg 	nop
    339      1.1       mrg 	.arm
    340      1.1       mrg 	cmp	r0, #0
    341      1.1       mrg 	.ifc	\signed, unsigned
    342      1.1       mrg 	movne	r0, #0xffffffff
    343      1.1       mrg 	.else
    344      1.1       mrg 	movgt	r0, #0x7fffffff
    345      1.1       mrg 	movlt	r0, #0x80000000
    346      1.1       mrg 	.endif
    347      1.1       mrg 	b	SYM(__aeabi_idiv0) __PLT__
    348      1.1       mrg 	.thumb
    349      1.1       mrg #endif
    350      1.1       mrg .endm
    351      1.1       mrg #else
    352      1.1       mrg .macro THUMB_LDIV0 name signed
    353      1.1       mrg 	push	{ r1, lr }
    354      1.1       mrg 98:	cfi_push 98b - __\name, 0xe, -0x4, 0x8
    355      1.1       mrg 	bl	SYM (__div0)
    356      1.1       mrg 	mov	r0, #0			@ About as wrong as it could be.
    357      1.1       mrg #if defined (__INTERWORKING__)
    358      1.1       mrg 	pop	{ r1, r2 }
    359      1.1       mrg 	bx	r2
    360      1.1       mrg #else
    361      1.1       mrg 	pop	{ r1, pc }
    362      1.1       mrg #endif
    363      1.1       mrg .endm
    364      1.1       mrg #endif
    365      1.1       mrg 
    366      1.1       mrg .macro FUNC_END name
    367      1.1       mrg 	SIZE (__\name)
    368      1.1       mrg .endm
    369      1.1       mrg 
    370      1.1       mrg .macro DIV_FUNC_END name signed
    371      1.1       mrg 	cfi_start	__\name, LSYM(Lend_div0)
    372      1.1       mrg LSYM(Ldiv0):
    373      1.1       mrg #ifdef __thumb__
    374      1.1       mrg 	THUMB_LDIV0 \name \signed
    375      1.1       mrg #else
    376      1.1       mrg 	ARM_LDIV0 \name \signed
    377      1.1       mrg #endif
    378      1.1       mrg 	cfi_end	LSYM(Lend_div0)
    379      1.1       mrg 	FUNC_END \name
    380      1.1       mrg .endm
    381      1.1       mrg 
    382      1.1       mrg .macro THUMB_FUNC_START name
    383      1.1       mrg 	.globl	SYM (\name)
    384      1.1       mrg 	TYPE	(\name)
    385      1.1       mrg 	.thumb_func
    386      1.1       mrg SYM (\name):
    387      1.1       mrg .endm
    388      1.1       mrg 
    389      1.1       mrg /* Function start macros.  Variants for ARM and Thumb.  */
    390      1.1       mrg 
    391      1.1       mrg #ifdef __thumb__
    392      1.1       mrg #define THUMB_FUNC .thumb_func
    393      1.1       mrg #define THUMB_CODE .force_thumb
    394      1.1       mrg # if defined(__thumb2__)
    395      1.1       mrg #define THUMB_SYNTAX .syntax divided
    396      1.1       mrg # else
    397      1.1       mrg #define THUMB_SYNTAX
    398      1.1       mrg # endif
    399      1.1       mrg #else
    400      1.1       mrg #define THUMB_FUNC
    401      1.1       mrg #define THUMB_CODE
    402      1.1       mrg #define THUMB_SYNTAX
    403      1.1       mrg #endif
    404      1.1       mrg 
    405      1.3       mrg .macro FUNC_START name sp_section=
    406      1.3       mrg   .ifc \sp_section, function_section
    407      1.3       mrg 	.section	.text.__\name,"ax",%progbits
    408      1.3       mrg   .else
    409      1.1       mrg 	.text
    410      1.3       mrg   .endif
    411      1.1       mrg 	.globl SYM (__\name)
    412      1.1       mrg 	TYPE (__\name)
    413      1.1       mrg 	.align 0
    414      1.1       mrg 	THUMB_CODE
    415      1.1       mrg 	THUMB_FUNC
    416      1.1       mrg 	THUMB_SYNTAX
    417      1.1       mrg SYM (__\name):
    418      1.1       mrg .endm
    419      1.1       mrg 
    420      1.3       mrg .macro ARM_SYM_START name
    421      1.3       mrg        TYPE (\name)
    422      1.3       mrg        .align 0
    423      1.3       mrg SYM (\name):
    424      1.3       mrg .endm
    425      1.3       mrg 
    426      1.3       mrg .macro SYM_END name
    427      1.3       mrg        SIZE (\name)
    428      1.3       mrg .endm
    429      1.3       mrg 
    430      1.1       mrg /* Special function that will always be coded in ARM assembly, even if
    431      1.1       mrg    in Thumb-only compilation.  */
    432      1.1       mrg 
    433      1.1       mrg #if defined(__thumb2__)
    434      1.1       mrg 
    435      1.1       mrg /* For Thumb-2 we build everything in thumb mode.  */
    436      1.3       mrg .macro ARM_FUNC_START name sp_section=
    437      1.3       mrg        FUNC_START \name \sp_section
    438      1.1       mrg        .syntax unified
    439      1.1       mrg .endm
    440      1.1       mrg #define EQUIV .thumb_set
    441      1.1       mrg .macro  ARM_CALL name
    442      1.1       mrg 	bl	__\name
    443      1.1       mrg .endm
    444      1.1       mrg 
    445      1.1       mrg #elif defined(__INTERWORKING_STUBS__)
    446      1.1       mrg 
    447      1.1       mrg .macro	ARM_FUNC_START name
    448      1.1       mrg 	FUNC_START \name
    449      1.1       mrg 	bx	pc
    450      1.1       mrg 	nop
    451      1.1       mrg 	.arm
    452      1.1       mrg /* A hook to tell gdb that we've switched to ARM mode.  Also used to call
    453      1.1       mrg    directly from other local arm routines.  */
    454      1.1       mrg _L__\name:
    455      1.1       mrg .endm
    456      1.1       mrg #define EQUIV .thumb_set
    457      1.1       mrg /* Branch directly to a function declared with ARM_FUNC_START.
    458      1.1       mrg    Must be called in arm mode.  */
    459      1.1       mrg .macro  ARM_CALL name
    460      1.1       mrg 	bl	_L__\name
    461      1.1       mrg .endm
    462      1.1       mrg 
    463      1.1       mrg #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
    464      1.1       mrg 
    465  1.3.4.2    martin #ifdef NOT_ISA_TARGET_32BIT
    466      1.1       mrg #define EQUIV .thumb_set
    467      1.1       mrg #else
    468      1.3       mrg .macro	ARM_FUNC_START name sp_section=
    469      1.3       mrg   .ifc \sp_section, function_section
    470      1.3       mrg 	.section	.text.__\name,"ax",%progbits
    471      1.3       mrg   .else
    472      1.1       mrg 	.text
    473      1.3       mrg   .endif
    474      1.1       mrg 	.globl SYM (__\name)
    475      1.1       mrg 	TYPE (__\name)
    476      1.1       mrg 	.align 0
    477      1.1       mrg 	.arm
    478      1.1       mrg SYM (__\name):
    479      1.1       mrg .endm
    480      1.1       mrg #define EQUIV .set
    481      1.1       mrg .macro  ARM_CALL name
    482      1.1       mrg 	bl	__\name
    483      1.1       mrg .endm
    484      1.1       mrg #endif
    485      1.1       mrg 
    486      1.1       mrg #endif
    487      1.1       mrg 
    488      1.1       mrg .macro	FUNC_ALIAS new old
    489      1.1       mrg 	.globl	SYM (__\new)
    490      1.1       mrg #if defined (__thumb__)
    491      1.1       mrg 	.thumb_set	SYM (__\new), SYM (__\old)
    492      1.1       mrg #else
    493      1.1       mrg 	.set	SYM (__\new), SYM (__\old)
    494      1.1       mrg #endif
    495      1.1       mrg .endm
    496      1.1       mrg 
    497  1.3.4.2    martin #ifndef NOT_ISA_TARGET_32BIT
    498      1.1       mrg .macro	ARM_FUNC_ALIAS new old
    499      1.1       mrg 	.globl	SYM (__\new)
    500      1.1       mrg 	EQUIV	SYM (__\new), SYM (__\old)
    501      1.1       mrg #if defined(__INTERWORKING_STUBS__)
    502      1.1       mrg 	.set	SYM (_L__\new), SYM (_L__\old)
    503      1.1       mrg #endif
    504      1.1       mrg .endm
    505      1.1       mrg #endif
    506      1.1       mrg 
    507      1.1       mrg #ifdef __ARMEB__
    508      1.1       mrg #define xxh r0
    509      1.1       mrg #define xxl r1
    510      1.1       mrg #define yyh r2
    511      1.1       mrg #define yyl r3
    512      1.1       mrg #else
    513      1.1       mrg #define xxh r1
    514      1.1       mrg #define xxl r0
    515      1.1       mrg #define yyh r3
    516      1.1       mrg #define yyl r2
    517      1.1       mrg #endif
    518      1.1       mrg 
    519      1.1       mrg #ifdef __ARM_EABI__
    520      1.1       mrg .macro	WEAK name
    521      1.1       mrg 	.weak SYM (__\name)
    522      1.1       mrg .endm
    523      1.1       mrg #endif
    524      1.1       mrg 
    525      1.1       mrg #ifdef __thumb__
    526      1.1       mrg /* Register aliases.  */
    527      1.1       mrg 
    528      1.1       mrg work		.req	r4	@ XXXX is this safe ?
    529      1.1       mrg dividend	.req	r0
    530      1.1       mrg divisor		.req	r1
    531      1.1       mrg overdone	.req	r2
    532      1.1       mrg result		.req	r2
    533      1.1       mrg curbit		.req	r3
    534      1.1       mrg #endif
    535      1.1       mrg #if 0
    536      1.1       mrg ip		.req	r12
    537      1.1       mrg sp		.req	r13
    538      1.1       mrg lr		.req	r14
    539      1.1       mrg pc		.req	r15
    540      1.1       mrg #endif
    541      1.1       mrg 
    542      1.1       mrg /* ------------------------------------------------------------------------ */
    543      1.1       mrg /*		Bodies of the division and modulo routines.		    */
    544      1.1       mrg /* ------------------------------------------------------------------------ */
    545      1.1       mrg .macro ARM_DIV_BODY dividend, divisor, result, curbit
    546      1.1       mrg 
    547      1.1       mrg #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
    548      1.1       mrg 
    549      1.1       mrg #if defined (__thumb2__)
    550      1.1       mrg 	clz	\curbit, \dividend
    551      1.1       mrg 	clz	\result, \divisor
    552      1.1       mrg 	sub	\curbit, \result, \curbit
    553      1.1       mrg 	rsb	\curbit, \curbit, #31
    554      1.1       mrg 	adr	\result, 1f
    555      1.1       mrg 	add	\curbit, \result, \curbit, lsl #4
    556      1.1       mrg 	mov	\result, #0
    557      1.1       mrg 	mov	pc, \curbit
    558      1.1       mrg .p2align 3
    559      1.1       mrg 1:
    560      1.1       mrg 	.set	shift, 32
    561      1.1       mrg 	.rept	32
    562      1.1       mrg 	.set	shift, shift - 1
    563      1.1       mrg 	cmp.w	\dividend, \divisor, lsl #shift
    564      1.1       mrg 	nop.n
    565      1.1       mrg 	adc.w	\result, \result, \result
    566      1.1       mrg 	it	cs
    567      1.1       mrg 	subcs.w	\dividend, \dividend, \divisor, lsl #shift
    568      1.1       mrg 	.endr
    569      1.1       mrg #else
    570      1.1       mrg 	clz	\curbit, \dividend
    571      1.1       mrg 	clz	\result, \divisor
    572      1.1       mrg 	sub	\curbit, \result, \curbit
    573      1.1       mrg 	rsbs	\curbit, \curbit, #31
    574      1.1       mrg 	addne	\curbit, \curbit, \curbit, lsl #1
    575      1.1       mrg 	mov	\result, #0
    576      1.1       mrg 	addne	pc, pc, \curbit, lsl #2
    577      1.1       mrg 	nop
    578      1.1       mrg 	.set	shift, 32
    579      1.1       mrg 	.rept	32
    580      1.1       mrg 	.set	shift, shift - 1
    581      1.1       mrg 	cmp	\dividend, \divisor, lsl #shift
    582      1.1       mrg 	adc	\result, \result, \result
    583      1.1       mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    584      1.1       mrg 	.endr
    585      1.1       mrg #endif
    586      1.1       mrg 
    587      1.1       mrg #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    588      1.1       mrg #if __ARM_ARCH__ >= 5
    589      1.1       mrg 
    590      1.1       mrg 	clz	\curbit, \divisor
    591      1.1       mrg 	clz	\result, \dividend
    592      1.1       mrg 	sub	\result, \curbit, \result
    593      1.1       mrg 	mov	\curbit, #1
    594      1.1       mrg 	mov	\divisor, \divisor, lsl \result
    595      1.1       mrg 	mov	\curbit, \curbit, lsl \result
    596      1.1       mrg 	mov	\result, #0
    597      1.1       mrg 
    598      1.1       mrg #else /* __ARM_ARCH__ < 5 */
    599      1.1       mrg 
    600      1.1       mrg 	@ Initially shift the divisor left 3 bits if possible,
    601      1.1       mrg 	@ set curbit accordingly.  This allows for curbit to be located
    602      1.1       mrg 	@ at the left end of each 4-bit nibbles in the division loop
    603      1.1       mrg 	@ to save one loop in most cases.
    604      1.1       mrg 	tst	\divisor, #0xe0000000
    605      1.1       mrg 	moveq	\divisor, \divisor, lsl #3
    606      1.1       mrg 	moveq	\curbit, #8
    607      1.1       mrg 	movne	\curbit, #1
    608      1.1       mrg 
    609      1.1       mrg 	@ Unless the divisor is very big, shift it up in multiples of
    610      1.1       mrg 	@ four bits, since this is the amount of unwinding in the main
    611      1.1       mrg 	@ division loop.  Continue shifting until the divisor is
    612      1.1       mrg 	@ larger than the dividend.
    613      1.1       mrg 1:	cmp	\divisor, #0x10000000
    614      1.1       mrg 	cmplo	\divisor, \dividend
    615      1.1       mrg 	movlo	\divisor, \divisor, lsl #4
    616      1.1       mrg 	movlo	\curbit, \curbit, lsl #4
    617      1.1       mrg 	blo	1b
    618      1.1       mrg 
    619      1.1       mrg 	@ For very big divisors, we must shift it a bit at a time, or
    620      1.1       mrg 	@ we will be in danger of overflowing.
    621      1.1       mrg 1:	cmp	\divisor, #0x80000000
    622      1.1       mrg 	cmplo	\divisor, \dividend
    623      1.1       mrg 	movlo	\divisor, \divisor, lsl #1
    624      1.1       mrg 	movlo	\curbit, \curbit, lsl #1
    625      1.1       mrg 	blo	1b
    626      1.1       mrg 
    627      1.1       mrg 	mov	\result, #0
    628      1.1       mrg 
    629      1.1       mrg #endif /* __ARM_ARCH__ < 5 */
    630      1.1       mrg 
    631      1.1       mrg 	@ Division loop
    632      1.1       mrg 1:	cmp	\dividend, \divisor
    633      1.1       mrg 	do_it	hs, t
    634      1.1       mrg 	subhs	\dividend, \dividend, \divisor
    635      1.1       mrg 	orrhs	\result,   \result,   \curbit
    636      1.1       mrg 	cmp	\dividend, \divisor,  lsr #1
    637      1.1       mrg 	do_it	hs, t
    638      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    639      1.1       mrg 	orrhs	\result,   \result,   \curbit,  lsr #1
    640      1.1       mrg 	cmp	\dividend, \divisor,  lsr #2
    641      1.1       mrg 	do_it	hs, t
    642      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    643      1.1       mrg 	orrhs	\result,   \result,   \curbit,  lsr #2
    644      1.1       mrg 	cmp	\dividend, \divisor,  lsr #3
    645      1.1       mrg 	do_it	hs, t
    646      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    647      1.1       mrg 	orrhs	\result,   \result,   \curbit,  lsr #3
    648      1.1       mrg 	cmp	\dividend, #0			@ Early termination?
    649      1.1       mrg 	do_it	ne, t
    650      1.1       mrg 	movnes	\curbit,   \curbit,  lsr #4	@ No, any more bits to do?
    651      1.1       mrg 	movne	\divisor,  \divisor, lsr #4
    652      1.1       mrg 	bne	1b
    653      1.1       mrg 
    654      1.1       mrg #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    655      1.1       mrg 
    656      1.1       mrg .endm
    657      1.1       mrg /* ------------------------------------------------------------------------ */
    658      1.1       mrg .macro ARM_DIV2_ORDER divisor, order
    659      1.1       mrg 
    660      1.1       mrg #if __ARM_ARCH__ >= 5
    661      1.1       mrg 
    662      1.1       mrg 	clz	\order, \divisor
    663      1.1       mrg 	rsb	\order, \order, #31
    664      1.1       mrg 
    665      1.1       mrg #else
    666      1.1       mrg 
    667      1.1       mrg 	cmp	\divisor, #(1 << 16)
    668      1.1       mrg 	movhs	\divisor, \divisor, lsr #16
    669      1.1       mrg 	movhs	\order, #16
    670      1.1       mrg 	movlo	\order, #0
    671      1.1       mrg 
    672      1.1       mrg 	cmp	\divisor, #(1 << 8)
    673      1.1       mrg 	movhs	\divisor, \divisor, lsr #8
    674      1.1       mrg 	addhs	\order, \order, #8
    675      1.1       mrg 
    676      1.1       mrg 	cmp	\divisor, #(1 << 4)
    677      1.1       mrg 	movhs	\divisor, \divisor, lsr #4
    678      1.1       mrg 	addhs	\order, \order, #4
    679      1.1       mrg 
    680      1.1       mrg 	cmp	\divisor, #(1 << 2)
    681      1.1       mrg 	addhi	\order, \order, #3
    682      1.1       mrg 	addls	\order, \order, \divisor, lsr #1
    683      1.1       mrg 
    684      1.1       mrg #endif
    685      1.1       mrg 
    686      1.1       mrg .endm
    687      1.1       mrg /* ------------------------------------------------------------------------ */
    688      1.1       mrg .macro ARM_MOD_BODY dividend, divisor, order, spare
    689      1.1       mrg 
    690      1.1       mrg #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
    691      1.1       mrg 
    692      1.1       mrg 	clz	\order, \divisor
    693      1.1       mrg 	clz	\spare, \dividend
    694      1.1       mrg 	sub	\order, \order, \spare
    695      1.1       mrg 	rsbs	\order, \order, #31
    696      1.1       mrg 	addne	pc, pc, \order, lsl #3
    697      1.1       mrg 	nop
    698      1.1       mrg 	.set	shift, 32
    699      1.1       mrg 	.rept	32
    700      1.1       mrg 	.set	shift, shift - 1
    701      1.1       mrg 	cmp	\dividend, \divisor, lsl #shift
    702      1.1       mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    703      1.1       mrg 	.endr
    704      1.1       mrg 
    705      1.1       mrg #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    706      1.1       mrg #if __ARM_ARCH__ >= 5
    707      1.1       mrg 
    708      1.1       mrg 	clz	\order, \divisor
    709      1.1       mrg 	clz	\spare, \dividend
    710      1.1       mrg 	sub	\order, \order, \spare
    711      1.1       mrg 	mov	\divisor, \divisor, lsl \order
    712      1.1       mrg 
    713      1.1       mrg #else /* __ARM_ARCH__ < 5 */
    714      1.1       mrg 
    715      1.1       mrg 	mov	\order, #0
    716      1.1       mrg 
    717      1.1       mrg 	@ Unless the divisor is very big, shift it up in multiples of
    718      1.1       mrg 	@ four bits, since this is the amount of unwinding in the main
    719      1.1       mrg 	@ division loop.  Continue shifting until the divisor is
    720      1.1       mrg 	@ larger than the dividend.
    721      1.1       mrg 1:	cmp	\divisor, #0x10000000
    722      1.1       mrg 	cmplo	\divisor, \dividend
    723      1.1       mrg 	movlo	\divisor, \divisor, lsl #4
    724      1.1       mrg 	addlo	\order, \order, #4
    725      1.1       mrg 	blo	1b
    726      1.1       mrg 
    727      1.1       mrg 	@ For very big divisors, we must shift it a bit at a time, or
    728      1.1       mrg 	@ we will be in danger of overflowing.
    729      1.1       mrg 1:	cmp	\divisor, #0x80000000
    730      1.1       mrg 	cmplo	\divisor, \dividend
    731      1.1       mrg 	movlo	\divisor, \divisor, lsl #1
    732      1.1       mrg 	addlo	\order, \order, #1
    733      1.1       mrg 	blo	1b
    734      1.1       mrg 
    735      1.1       mrg #endif /* __ARM_ARCH__ < 5 */
    736      1.1       mrg 
    737      1.1       mrg 	@ Perform all needed substractions to keep only the reminder.
    738      1.1       mrg 	@ Do comparisons in batch of 4 first.
    739      1.1       mrg 	subs	\order, \order, #3		@ yes, 3 is intended here
    740      1.1       mrg 	blt	2f
    741      1.1       mrg 
    742      1.1       mrg 1:	cmp	\dividend, \divisor
    743      1.1       mrg 	subhs	\dividend, \dividend, \divisor
    744      1.1       mrg 	cmp	\dividend, \divisor,  lsr #1
    745      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    746      1.1       mrg 	cmp	\dividend, \divisor,  lsr #2
    747      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    748      1.1       mrg 	cmp	\dividend, \divisor,  lsr #3
    749      1.1       mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    750      1.1       mrg 	cmp	\dividend, #1
    751      1.1       mrg 	mov	\divisor, \divisor, lsr #4
    752      1.1       mrg 	subges	\order, \order, #4
    753      1.1       mrg 	bge	1b
    754      1.1       mrg 
    755      1.1       mrg 	tst	\order, #3
    756      1.1       mrg 	teqne	\dividend, #0
    757      1.1       mrg 	beq	5f
    758      1.1       mrg 
    759      1.1       mrg 	@ Either 1, 2 or 3 comparison/substractions are left.
    760      1.1       mrg 2:	cmn	\order, #2
    761      1.1       mrg 	blt	4f
    762      1.1       mrg 	beq	3f
    763      1.1       mrg 	cmp	\dividend, \divisor
    764      1.1       mrg 	subhs	\dividend, \dividend, \divisor
    765      1.1       mrg 	mov	\divisor,  \divisor,  lsr #1
    766      1.1       mrg 3:	cmp	\dividend, \divisor
    767      1.1       mrg 	subhs	\dividend, \dividend, \divisor
    768      1.1       mrg 	mov	\divisor,  \divisor,  lsr #1
    769      1.1       mrg 4:	cmp	\dividend, \divisor
    770      1.1       mrg 	subhs	\dividend, \dividend, \divisor
    771      1.1       mrg 5:
    772      1.1       mrg 
    773      1.1       mrg #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    774      1.1       mrg 
    775      1.1       mrg .endm
    776      1.1       mrg /* ------------------------------------------------------------------------ */
    777      1.1       mrg .macro THUMB_DIV_MOD_BODY modulo
    778      1.1       mrg 	@ Load the constant 0x10000000 into our work register.
    779      1.1       mrg 	mov	work, #1
    780      1.1       mrg 	lsl	work, #28
    781      1.1       mrg LSYM(Loop1):
    782      1.1       mrg 	@ Unless the divisor is very big, shift it up in multiples of
    783      1.1       mrg 	@ four bits, since this is the amount of unwinding in the main
    784      1.1       mrg 	@ division loop.  Continue shifting until the divisor is
    785      1.1       mrg 	@ larger than the dividend.
    786      1.1       mrg 	cmp	divisor, work
    787      1.1       mrg 	bhs	LSYM(Lbignum)
    788      1.1       mrg 	cmp	divisor, dividend
    789      1.1       mrg 	bhs	LSYM(Lbignum)
    790      1.1       mrg 	lsl	divisor, #4
    791      1.1       mrg 	lsl	curbit,  #4
    792      1.1       mrg 	b	LSYM(Loop1)
    793      1.1       mrg LSYM(Lbignum):
    794      1.1       mrg 	@ Set work to 0x80000000
    795      1.1       mrg 	lsl	work, #3
    796      1.1       mrg LSYM(Loop2):
    797      1.1       mrg 	@ For very big divisors, we must shift it a bit at a time, or
    798      1.1       mrg 	@ we will be in danger of overflowing.
    799      1.1       mrg 	cmp	divisor, work
    800      1.1       mrg 	bhs	LSYM(Loop3)
    801      1.1       mrg 	cmp	divisor, dividend
    802      1.1       mrg 	bhs	LSYM(Loop3)
    803      1.1       mrg 	lsl	divisor, #1
    804      1.1       mrg 	lsl	curbit,  #1
    805      1.1       mrg 	b	LSYM(Loop2)
    806      1.1       mrg LSYM(Loop3):
    807      1.1       mrg 	@ Test for possible subtractions ...
    808      1.1       mrg   .if \modulo
    809      1.1       mrg 	@ ... On the final pass, this may subtract too much from the dividend,
    810      1.1       mrg 	@ so keep track of which subtractions are done, we can fix them up
    811      1.1       mrg 	@ afterwards.
    812      1.1       mrg 	mov	overdone, #0
    813      1.1       mrg 	cmp	dividend, divisor
    814      1.1       mrg 	blo	LSYM(Lover1)
    815      1.1       mrg 	sub	dividend, dividend, divisor
    816      1.1       mrg LSYM(Lover1):
    817      1.1       mrg 	lsr	work, divisor, #1
    818      1.1       mrg 	cmp	dividend, work
    819      1.1       mrg 	blo	LSYM(Lover2)
    820      1.1       mrg 	sub	dividend, dividend, work
    821      1.1       mrg 	mov	ip, curbit
    822      1.1       mrg 	mov	work, #1
    823      1.1       mrg 	ror	curbit, work
    824      1.1       mrg 	orr	overdone, curbit
    825      1.1       mrg 	mov	curbit, ip
    826      1.1       mrg LSYM(Lover2):
    827      1.1       mrg 	lsr	work, divisor, #2
    828      1.1       mrg 	cmp	dividend, work
    829      1.1       mrg 	blo	LSYM(Lover3)
    830      1.1       mrg 	sub	dividend, dividend, work
    831      1.1       mrg 	mov	ip, curbit
    832      1.1       mrg 	mov	work, #2
    833      1.1       mrg 	ror	curbit, work
    834      1.1       mrg 	orr	overdone, curbit
    835      1.1       mrg 	mov	curbit, ip
    836      1.1       mrg LSYM(Lover3):
    837      1.1       mrg 	lsr	work, divisor, #3
    838      1.1       mrg 	cmp	dividend, work
    839      1.1       mrg 	blo	LSYM(Lover4)
    840      1.1       mrg 	sub	dividend, dividend, work
    841      1.1       mrg 	mov	ip, curbit
    842      1.1       mrg 	mov	work, #3
    843      1.1       mrg 	ror	curbit, work
    844      1.1       mrg 	orr	overdone, curbit
    845      1.1       mrg 	mov	curbit, ip
    846      1.1       mrg LSYM(Lover4):
    847      1.1       mrg 	mov	ip, curbit
    848      1.1       mrg   .else
    849      1.1       mrg 	@ ... and note which bits are done in the result.  On the final pass,
    850      1.1       mrg 	@ this may subtract too much from the dividend, but the result will be ok,
    851      1.1       mrg 	@ since the "bit" will have been shifted out at the bottom.
    852      1.1       mrg 	cmp	dividend, divisor
    853      1.1       mrg 	blo	LSYM(Lover1)
    854      1.1       mrg 	sub	dividend, dividend, divisor
    855      1.1       mrg 	orr	result, result, curbit
    856      1.1       mrg LSYM(Lover1):
    857      1.1       mrg 	lsr	work, divisor, #1
    858      1.1       mrg 	cmp	dividend, work
    859      1.1       mrg 	blo	LSYM(Lover2)
    860      1.1       mrg 	sub	dividend, dividend, work
    861      1.1       mrg 	lsr	work, curbit, #1
    862      1.1       mrg 	orr	result, work
    863      1.1       mrg LSYM(Lover2):
    864      1.1       mrg 	lsr	work, divisor, #2
    865      1.1       mrg 	cmp	dividend, work
    866      1.1       mrg 	blo	LSYM(Lover3)
    867      1.1       mrg 	sub	dividend, dividend, work
    868      1.1       mrg 	lsr	work, curbit, #2
    869      1.1       mrg 	orr	result, work
    870      1.1       mrg LSYM(Lover3):
    871      1.1       mrg 	lsr	work, divisor, #3
    872      1.1       mrg 	cmp	dividend, work
    873      1.1       mrg 	blo	LSYM(Lover4)
    874      1.1       mrg 	sub	dividend, dividend, work
    875      1.1       mrg 	lsr	work, curbit, #3
    876      1.1       mrg 	orr	result, work
    877      1.1       mrg LSYM(Lover4):
    878      1.1       mrg   .endif
    879      1.1       mrg 
    880      1.1       mrg 	cmp	dividend, #0			@ Early termination?
    881      1.1       mrg 	beq	LSYM(Lover5)
    882      1.1       mrg 	lsr	curbit,  #4			@ No, any more bits to do?
    883      1.1       mrg 	beq	LSYM(Lover5)
    884      1.1       mrg 	lsr	divisor, #4
    885      1.1       mrg 	b	LSYM(Loop3)
    886      1.1       mrg LSYM(Lover5):
    887      1.1       mrg   .if \modulo
    888      1.1       mrg 	@ Any subtractions that we should not have done will be recorded in
    889      1.1       mrg 	@ the top three bits of "overdone".  Exactly which were not needed
    890      1.1       mrg 	@ are governed by the position of the bit, stored in ip.
    891      1.1       mrg 	mov	work, #0xe
    892      1.1       mrg 	lsl	work, #28
    893      1.1       mrg 	and	overdone, work
    894      1.1       mrg 	beq	LSYM(Lgot_result)
    895      1.1       mrg 
    896      1.1       mrg 	@ If we terminated early, because dividend became zero, then the
    897      1.1       mrg 	@ bit in ip will not be in the bottom nibble, and we should not
    898      1.1       mrg 	@ perform the additions below.  We must test for this though
    899      1.1       mrg 	@ (rather relying upon the TSTs to prevent the additions) since
    900      1.1       mrg 	@ the bit in ip could be in the top two bits which might then match
    901      1.1       mrg 	@ with one of the smaller RORs.
    902      1.1       mrg 	mov	curbit, ip
    903      1.1       mrg 	mov	work, #0x7
    904      1.1       mrg 	tst	curbit, work
    905      1.1       mrg 	beq	LSYM(Lgot_result)
    906      1.1       mrg 
    907      1.1       mrg 	mov	curbit, ip
    908      1.1       mrg 	mov	work, #3
    909      1.1       mrg 	ror	curbit, work
    910      1.1       mrg 	tst	overdone, curbit
    911      1.1       mrg 	beq	LSYM(Lover6)
    912      1.1       mrg 	lsr	work, divisor, #3
    913      1.1       mrg 	add	dividend, work
    914      1.1       mrg LSYM(Lover6):
    915      1.1       mrg 	mov	curbit, ip
    916      1.1       mrg 	mov	work, #2
    917      1.1       mrg 	ror	curbit, work
    918      1.1       mrg 	tst	overdone, curbit
    919      1.1       mrg 	beq	LSYM(Lover7)
    920      1.1       mrg 	lsr	work, divisor, #2
    921      1.1       mrg 	add	dividend, work
    922      1.1       mrg LSYM(Lover7):
    923      1.1       mrg 	mov	curbit, ip
    924      1.1       mrg 	mov	work, #1
    925      1.1       mrg 	ror	curbit, work
    926      1.1       mrg 	tst	overdone, curbit
    927      1.1       mrg 	beq	LSYM(Lgot_result)
    928      1.1       mrg 	lsr	work, divisor, #1
    929      1.1       mrg 	add	dividend, work
    930      1.1       mrg   .endif
    931      1.1       mrg LSYM(Lgot_result):
    932  1.3.4.2    martin .endm
    933  1.3.4.2    martin 
    934  1.3.4.2    martin /* If performance is preferred, the following functions are provided.  */
    935  1.3.4.2    martin #if defined(__prefer_thumb__) && !defined(__OPTIMIZE_SIZE__)
    936  1.3.4.2    martin 
    937  1.3.4.2    martin /* Branch to div(n), and jump to label if curbit is lo than divisior.  */
    938  1.3.4.2    martin .macro BranchToDiv n, label
    939  1.3.4.2    martin 	lsr	curbit, dividend, \n
    940  1.3.4.2    martin 	cmp	curbit, divisor
    941  1.3.4.2    martin 	blo	\label
    942  1.3.4.2    martin .endm
    943  1.3.4.2    martin 
    944  1.3.4.2    martin /* Body of div(n).  Shift the divisor in n bits and compare the divisor
    945  1.3.4.2    martin    and dividend.  Update the dividend as the substruction result.  */
    946  1.3.4.2    martin .macro DoDiv n
    947  1.3.4.2    martin 	lsr	curbit, dividend, \n
    948  1.3.4.2    martin 	cmp	curbit, divisor
    949  1.3.4.2    martin 	bcc	1f
    950  1.3.4.2    martin 	lsl	curbit, divisor, \n
    951  1.3.4.2    martin 	sub	dividend, dividend, curbit
    952  1.3.4.2    martin 
    953  1.3.4.2    martin 1:	adc	result, result
    954  1.3.4.2    martin .endm
    955  1.3.4.2    martin 
    956  1.3.4.2    martin /* The body of division with positive divisor.  Unless the divisor is very
    957  1.3.4.2    martin    big, shift it up in multiples of four bits, since this is the amount of
    958  1.3.4.2    martin    unwinding in the main division loop.  Continue shifting until the divisor
    959  1.3.4.2    martin    is larger than the dividend.  */
    960  1.3.4.2    martin .macro THUMB1_Div_Positive
    961  1.3.4.2    martin 	mov	result, #0
    962  1.3.4.2    martin 	BranchToDiv #1, LSYM(Lthumb1_div1)
    963  1.3.4.2    martin 	BranchToDiv #4, LSYM(Lthumb1_div4)
    964  1.3.4.2    martin 	BranchToDiv #8, LSYM(Lthumb1_div8)
    965  1.3.4.2    martin 	BranchToDiv #12, LSYM(Lthumb1_div12)
    966  1.3.4.2    martin 	BranchToDiv #16, LSYM(Lthumb1_div16)
    967  1.3.4.2    martin LSYM(Lthumb1_div_large_positive):
    968  1.3.4.2    martin 	mov	result, #0xff
    969  1.3.4.2    martin 	lsl	divisor, divisor, #8
    970  1.3.4.2    martin 	rev	result, result
    971  1.3.4.2    martin 	lsr	curbit, dividend, #16
    972  1.3.4.2    martin 	cmp	curbit, divisor
    973  1.3.4.2    martin 	blo	1f
    974  1.3.4.2    martin 	asr	result, #8
    975  1.3.4.2    martin 	lsl	divisor, divisor, #8
    976  1.3.4.2    martin 	beq	LSYM(Ldivbyzero_waypoint)
    977  1.3.4.2    martin 
    978  1.3.4.2    martin 1:	lsr	curbit, dividend, #12
    979  1.3.4.2    martin 	cmp	curbit, divisor
    980  1.3.4.2    martin 	blo	LSYM(Lthumb1_div12)
    981  1.3.4.2    martin 	b	LSYM(Lthumb1_div16)
    982  1.3.4.2    martin LSYM(Lthumb1_div_loop):
    983  1.3.4.2    martin 	lsr	divisor, divisor, #8
    984  1.3.4.2    martin LSYM(Lthumb1_div16):
    985  1.3.4.2    martin 	Dodiv	#15
    986  1.3.4.2    martin 	Dodiv	#14
    987  1.3.4.2    martin 	Dodiv	#13
    988  1.3.4.2    martin 	Dodiv	#12
    989  1.3.4.2    martin LSYM(Lthumb1_div12):
    990  1.3.4.2    martin 	Dodiv	#11
    991  1.3.4.2    martin 	Dodiv	#10
    992  1.3.4.2    martin 	Dodiv	#9
    993  1.3.4.2    martin 	Dodiv	#8
    994  1.3.4.2    martin 	bcs	LSYM(Lthumb1_div_loop)
    995  1.3.4.2    martin LSYM(Lthumb1_div8):
    996  1.3.4.2    martin 	Dodiv	#7
    997  1.3.4.2    martin 	Dodiv	#6
    998  1.3.4.2    martin 	Dodiv	#5
    999  1.3.4.2    martin LSYM(Lthumb1_div5):
   1000  1.3.4.2    martin 	Dodiv	#4
   1001  1.3.4.2    martin LSYM(Lthumb1_div4):
   1002  1.3.4.2    martin 	Dodiv	#3
   1003  1.3.4.2    martin LSYM(Lthumb1_div3):
   1004  1.3.4.2    martin 	Dodiv	#2
   1005  1.3.4.2    martin LSYM(Lthumb1_div2):
   1006  1.3.4.2    martin 	Dodiv	#1
   1007  1.3.4.2    martin LSYM(Lthumb1_div1):
   1008  1.3.4.2    martin 	sub	divisor, dividend, divisor
   1009  1.3.4.2    martin 	bcs	1f
   1010  1.3.4.2    martin 	cpy	divisor, dividend
   1011  1.3.4.2    martin 
   1012  1.3.4.2    martin 1:	adc	result, result
   1013  1.3.4.2    martin 	cpy	dividend, result
   1014  1.3.4.2    martin 	RET
   1015  1.3.4.2    martin 
   1016  1.3.4.2    martin LSYM(Ldivbyzero_waypoint):
   1017  1.3.4.2    martin 	b	LSYM(Ldiv0)
   1018  1.3.4.2    martin .endm
   1019  1.3.4.2    martin 
   1020  1.3.4.2    martin /* The body of division with negative divisor.  Similar with
   1021  1.3.4.2    martin    THUMB1_Div_Positive except that the shift steps are in multiples
   1022  1.3.4.2    martin    of six bits.  */
   1023  1.3.4.2    martin .macro THUMB1_Div_Negative
   1024  1.3.4.2    martin 	lsr	result, divisor, #31
   1025  1.3.4.2    martin 	beq	1f
   1026  1.3.4.2    martin 	neg	divisor, divisor
   1027  1.3.4.2    martin 
   1028  1.3.4.2    martin 1:	asr	curbit, dividend, #32
   1029  1.3.4.2    martin 	bcc	2f
   1030  1.3.4.2    martin 	neg	dividend, dividend
   1031  1.3.4.2    martin 
   1032  1.3.4.2    martin 2:	eor	curbit, result
   1033  1.3.4.2    martin 	mov	result, #0
   1034  1.3.4.2    martin 	cpy	ip, curbit
   1035  1.3.4.2    martin 	BranchToDiv #4, LSYM(Lthumb1_div_negative4)
   1036  1.3.4.2    martin 	BranchToDiv #8, LSYM(Lthumb1_div_negative8)
   1037  1.3.4.2    martin LSYM(Lthumb1_div_large):
   1038  1.3.4.2    martin 	mov	result, #0xfc
   1039  1.3.4.2    martin 	lsl	divisor, divisor, #6
   1040  1.3.4.2    martin 	rev	result, result
   1041  1.3.4.2    martin 	lsr	curbit, dividend, #8
   1042  1.3.4.2    martin 	cmp	curbit, divisor
   1043  1.3.4.2    martin 	blo	LSYM(Lthumb1_div_negative8)
   1044  1.3.4.2    martin 
   1045  1.3.4.2    martin 	lsl	divisor, divisor, #6
   1046  1.3.4.2    martin 	asr	result, result, #6
   1047  1.3.4.2    martin 	cmp	curbit, divisor
   1048  1.3.4.2    martin 	blo	LSYM(Lthumb1_div_negative8)
   1049  1.3.4.2    martin 
   1050  1.3.4.2    martin 	lsl	divisor, divisor, #6
   1051  1.3.4.2    martin 	asr	result, result, #6
   1052  1.3.4.2    martin 	cmp	curbit, divisor
   1053  1.3.4.2    martin 	blo	LSYM(Lthumb1_div_negative8)
   1054  1.3.4.2    martin 
   1055  1.3.4.2    martin 	lsl	divisor, divisor, #6
   1056  1.3.4.2    martin 	beq	LSYM(Ldivbyzero_negative)
   1057  1.3.4.2    martin 	asr	result, result, #6
   1058  1.3.4.2    martin 	b	LSYM(Lthumb1_div_negative8)
   1059  1.3.4.2    martin LSYM(Lthumb1_div_negative_loop):
   1060  1.3.4.2    martin 	lsr	divisor, divisor, #6
   1061  1.3.4.2    martin LSYM(Lthumb1_div_negative8):
   1062  1.3.4.2    martin 	DoDiv	#7
   1063  1.3.4.2    martin 	DoDiv	#6
   1064  1.3.4.2    martin 	DoDiv	#5
   1065  1.3.4.2    martin 	DoDiv	#4
   1066  1.3.4.2    martin LSYM(Lthumb1_div_negative4):
   1067  1.3.4.2    martin 	DoDiv	#3
   1068  1.3.4.2    martin 	DoDiv	#2
   1069  1.3.4.2    martin 	bcs	LSYM(Lthumb1_div_negative_loop)
   1070  1.3.4.2    martin 	DoDiv	#1
   1071  1.3.4.2    martin 	sub	divisor, dividend, divisor
   1072  1.3.4.2    martin 	bcs	1f
   1073  1.3.4.2    martin 	cpy	divisor, dividend
   1074  1.3.4.2    martin 
   1075  1.3.4.2    martin 1:	cpy	curbit, ip
   1076  1.3.4.2    martin 	adc	result, result
   1077  1.3.4.2    martin 	asr	curbit, curbit, #1
   1078  1.3.4.2    martin 	cpy	dividend, result
   1079  1.3.4.2    martin 	bcc	2f
   1080  1.3.4.2    martin 	neg	dividend, dividend
   1081  1.3.4.2    martin 	cmp	curbit, #0
   1082  1.3.4.2    martin 
   1083  1.3.4.2    martin 2:	bpl	3f
   1084  1.3.4.2    martin 	neg	divisor, divisor
   1085  1.3.4.2    martin 
   1086  1.3.4.2    martin 3:	RET
   1087  1.3.4.2    martin 
   1088  1.3.4.2    martin LSYM(Ldivbyzero_negative):
   1089  1.3.4.2    martin 	cpy	curbit, ip
   1090  1.3.4.2    martin 	asr	curbit, curbit, #1
   1091  1.3.4.2    martin 	bcc	LSYM(Ldiv0)
   1092  1.3.4.2    martin 	neg	dividend, dividend
   1093  1.3.4.2    martin .endm
   1094  1.3.4.2    martin #endif /* ARM Thumb version.  */
   1095  1.3.4.2    martin 
   1096      1.1       mrg /* ------------------------------------------------------------------------ */
   1097      1.1       mrg /*		Start of the Real Functions				    */
   1098      1.1       mrg /* ------------------------------------------------------------------------ */
   1099      1.1       mrg #ifdef L_udivsi3
   1100      1.1       mrg 
   1101      1.1       mrg #if defined(__prefer_thumb__)
   1102      1.1       mrg 
   1103      1.1       mrg 	FUNC_START udivsi3
   1104      1.1       mrg 	FUNC_ALIAS aeabi_uidiv udivsi3
   1105  1.3.4.2    martin #if defined(__OPTIMIZE_SIZE__)
   1106      1.1       mrg 
   1107      1.1       mrg 	cmp	divisor, #0
   1108      1.1       mrg 	beq	LSYM(Ldiv0)
   1109      1.1       mrg LSYM(udivsi3_skip_div0_test):
   1110      1.1       mrg 	mov	curbit, #1
   1111      1.1       mrg 	mov	result, #0
   1112      1.1       mrg 
   1113      1.1       mrg 	push	{ work }
   1114      1.1       mrg 	cmp	dividend, divisor
   1115      1.1       mrg 	blo	LSYM(Lgot_result)
   1116      1.1       mrg 
   1117      1.1       mrg 	THUMB_DIV_MOD_BODY 0
   1118      1.1       mrg 
   1119      1.1       mrg 	mov	r0, result
   1120      1.1       mrg 	pop	{ work }
   1121      1.1       mrg 	RET
   1122      1.1       mrg 
   1123  1.3.4.2    martin /* Implementation of aeabi_uidiv for ARMv6m.  This version is only
   1124  1.3.4.2    martin    used in ARMv6-M when we need an efficient implementation.  */
   1125  1.3.4.2    martin #else
   1126  1.3.4.2    martin LSYM(udivsi3_skip_div0_test):
   1127  1.3.4.2    martin 	THUMB1_Div_Positive
   1128  1.3.4.2    martin 
   1129  1.3.4.2    martin #endif /* __OPTIMIZE_SIZE__ */
   1130  1.3.4.2    martin 
   1131      1.1       mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1132      1.1       mrg 
   1133      1.1       mrg 	ARM_FUNC_START udivsi3
   1134      1.1       mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
   1135      1.1       mrg 
   1136      1.1       mrg 	cmp	r1, #0
   1137      1.1       mrg 	beq	LSYM(Ldiv0)
   1138      1.1       mrg 
   1139      1.1       mrg 	udiv	r0, r0, r1
   1140      1.1       mrg 	RET
   1141      1.1       mrg 
   1142      1.1       mrg #else /* ARM version/Thumb-2.  */
   1143      1.1       mrg 
   1144      1.1       mrg 	ARM_FUNC_START udivsi3
   1145      1.1       mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
   1146      1.1       mrg 
   1147      1.1       mrg 	/* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
   1148      1.1       mrg 	   check for division-by-zero a second time.  */
   1149      1.1       mrg LSYM(udivsi3_skip_div0_test):
   1150      1.1       mrg 	subs	r2, r1, #1
   1151      1.1       mrg 	do_it	eq
   1152      1.1       mrg 	RETc(eq)
   1153      1.1       mrg 	bcc	LSYM(Ldiv0)
   1154      1.1       mrg 	cmp	r0, r1
   1155      1.1       mrg 	bls	11f
   1156      1.1       mrg 	tst	r1, r2
   1157      1.1       mrg 	beq	12f
   1158      1.1       mrg 
   1159      1.1       mrg 	ARM_DIV_BODY r0, r1, r2, r3
   1160      1.1       mrg 
   1161      1.1       mrg 	mov	r0, r2
   1162      1.1       mrg 	RET
   1163      1.1       mrg 
   1164      1.1       mrg 11:	do_it	eq, e
   1165      1.1       mrg 	moveq	r0, #1
   1166      1.1       mrg 	movne	r0, #0
   1167      1.1       mrg 	RET
   1168      1.1       mrg 
   1169      1.1       mrg 12:	ARM_DIV2_ORDER r1, r2
   1170      1.1       mrg 
   1171      1.1       mrg 	mov	r0, r0, lsr r2
   1172      1.1       mrg 	RET
   1173      1.1       mrg 
   1174      1.1       mrg #endif /* ARM version */
   1175      1.1       mrg 
   1176      1.1       mrg 	DIV_FUNC_END udivsi3 unsigned
   1177      1.1       mrg 
   1178      1.1       mrg #if defined(__prefer_thumb__)
   1179      1.1       mrg FUNC_START aeabi_uidivmod
   1180      1.1       mrg 	cmp	r1, #0
   1181      1.1       mrg 	beq	LSYM(Ldiv0)
   1182  1.3.4.2    martin # if defined(__OPTIMIZE_SIZE__)
   1183      1.1       mrg 	push	{r0, r1, lr}
   1184      1.1       mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1185      1.1       mrg 	POP	{r1, r2, r3}
   1186      1.1       mrg 	mul	r2, r0
   1187      1.1       mrg 	sub	r1, r1, r2
   1188      1.1       mrg 	bx	r3
   1189  1.3.4.2    martin # else
   1190  1.3.4.2    martin 	/* Both the quotient and remainder are calculated simultaneously
   1191  1.3.4.2    martin 	   in THUMB1_Div_Positive.  There is no need to calculate the
   1192  1.3.4.2    martin 	   remainder again here.  */
   1193  1.3.4.2    martin 	b	LSYM(udivsi3_skip_div0_test)
   1194  1.3.4.2    martin 	RET
   1195  1.3.4.2    martin # endif /* __OPTIMIZE_SIZE__ */
   1196  1.3.4.2    martin 
   1197      1.1       mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1198      1.1       mrg ARM_FUNC_START aeabi_uidivmod
   1199      1.1       mrg 	cmp	r1, #0
   1200      1.1       mrg 	beq	LSYM(Ldiv0)
   1201      1.1       mrg 	mov     r2, r0
   1202      1.1       mrg 	udiv	r0, r0, r1
   1203      1.1       mrg 	mls     r1, r0, r1, r2
   1204      1.1       mrg 	RET
   1205      1.1       mrg #else
   1206      1.1       mrg ARM_FUNC_START aeabi_uidivmod
   1207      1.1       mrg 	cmp	r1, #0
   1208      1.1       mrg 	beq	LSYM(Ldiv0)
   1209      1.1       mrg 	stmfd	sp!, { r0, r1, lr }
   1210      1.1       mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1211      1.1       mrg 	ldmfd	sp!, { r1, r2, lr }
   1212      1.1       mrg 	mul	r3, r2, r0
   1213      1.1       mrg 	sub	r1, r1, r3
   1214      1.1       mrg 	RET
   1215      1.1       mrg #endif
   1216      1.1       mrg 	FUNC_END aeabi_uidivmod
   1217      1.1       mrg 
   1218      1.1       mrg #endif /* L_udivsi3 */
   1219      1.1       mrg /* ------------------------------------------------------------------------ */
   1220      1.1       mrg #ifdef L_umodsi3
   1221      1.1       mrg 
   1222  1.3.4.2    martin #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
   1223      1.1       mrg 
   1224      1.1       mrg 	ARM_FUNC_START umodsi3
   1225      1.1       mrg 
   1226      1.1       mrg 	cmp	r1, #0
   1227      1.1       mrg 	beq	LSYM(Ldiv0)
   1228      1.1       mrg 	udiv	r2, r0, r1
   1229      1.1       mrg 	mls     r0, r1, r2, r0
   1230      1.1       mrg 	RET
   1231      1.1       mrg 
   1232      1.1       mrg #elif defined(__thumb__)
   1233      1.1       mrg 
   1234      1.1       mrg 	FUNC_START umodsi3
   1235      1.1       mrg 
   1236      1.1       mrg 	cmp	divisor, #0
   1237      1.1       mrg 	beq	LSYM(Ldiv0)
   1238      1.1       mrg 	mov	curbit, #1
   1239      1.1       mrg 	cmp	dividend, divisor
   1240      1.1       mrg 	bhs	LSYM(Lover10)
   1241      1.1       mrg 	RET
   1242      1.1       mrg 
   1243      1.1       mrg LSYM(Lover10):
   1244      1.1       mrg 	push	{ work }
   1245      1.1       mrg 
   1246      1.1       mrg 	THUMB_DIV_MOD_BODY 1
   1247      1.1       mrg 
   1248      1.1       mrg 	pop	{ work }
   1249      1.1       mrg 	RET
   1250      1.1       mrg 
   1251      1.1       mrg #else  /* ARM version.  */
   1252  1.3.4.2    martin 
   1253      1.1       mrg 	FUNC_START umodsi3
   1254      1.1       mrg 
   1255      1.1       mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1256      1.1       mrg 	bcc	LSYM(Ldiv0)
   1257      1.1       mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1258      1.1       mrg 	moveq   r0, #0
   1259      1.1       mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1260      1.1       mrg 	andeq	r0, r0, r2
   1261      1.1       mrg 	RETc(ls)
   1262      1.1       mrg 
   1263      1.1       mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1264      1.1       mrg 
   1265      1.1       mrg 	RET
   1266      1.1       mrg 
   1267      1.1       mrg #endif /* ARM version.  */
   1268      1.1       mrg 
   1269      1.1       mrg 	DIV_FUNC_END umodsi3 unsigned
   1270      1.1       mrg 
   1271      1.1       mrg #endif /* L_umodsi3 */
   1272      1.1       mrg /* ------------------------------------------------------------------------ */
   1273      1.1       mrg #ifdef L_divsi3
   1274      1.1       mrg 
   1275      1.1       mrg #if defined(__prefer_thumb__)
   1276      1.1       mrg 
   1277  1.3.4.2    martin 	FUNC_START divsi3
   1278      1.1       mrg 	FUNC_ALIAS aeabi_idiv divsi3
   1279  1.3.4.2    martin #if defined(__OPTIMIZE_SIZE__)
   1280      1.1       mrg 
   1281      1.1       mrg 	cmp	divisor, #0
   1282      1.1       mrg 	beq	LSYM(Ldiv0)
   1283      1.1       mrg LSYM(divsi3_skip_div0_test):
   1284      1.1       mrg 	push	{ work }
   1285      1.1       mrg 	mov	work, dividend
   1286      1.1       mrg 	eor	work, divisor		@ Save the sign of the result.
   1287      1.1       mrg 	mov	ip, work
   1288      1.1       mrg 	mov	curbit, #1
   1289      1.1       mrg 	mov	result, #0
   1290      1.1       mrg 	cmp	divisor, #0
   1291      1.1       mrg 	bpl	LSYM(Lover10)
   1292      1.1       mrg 	neg	divisor, divisor	@ Loops below use unsigned.
   1293      1.1       mrg LSYM(Lover10):
   1294      1.1       mrg 	cmp	dividend, #0
   1295      1.1       mrg 	bpl	LSYM(Lover11)
   1296      1.1       mrg 	neg	dividend, dividend
   1297      1.1       mrg LSYM(Lover11):
   1298      1.1       mrg 	cmp	dividend, divisor
   1299      1.1       mrg 	blo	LSYM(Lgot_result)
   1300      1.1       mrg 
   1301      1.1       mrg 	THUMB_DIV_MOD_BODY 0
   1302  1.3.4.2    martin 
   1303      1.1       mrg 	mov	r0, result
   1304      1.1       mrg 	mov	work, ip
   1305      1.1       mrg 	cmp	work, #0
   1306      1.1       mrg 	bpl	LSYM(Lover12)
   1307      1.1       mrg 	neg	r0, r0
   1308      1.1       mrg LSYM(Lover12):
   1309      1.1       mrg 	pop	{ work }
   1310      1.1       mrg 	RET
   1311      1.1       mrg 
   1312  1.3.4.2    martin /* Implementation of aeabi_idiv for ARMv6m.  This version is only
   1313  1.3.4.2    martin    used in ARMv6-M when we need an efficient implementation.  */
   1314  1.3.4.2    martin #else
   1315  1.3.4.2    martin LSYM(divsi3_skip_div0_test):
   1316  1.3.4.2    martin 	cpy	curbit, dividend
   1317  1.3.4.2    martin 	orr	curbit, divisor
   1318  1.3.4.2    martin 	bmi	LSYM(Lthumb1_div_negative)
   1319  1.3.4.2    martin 
   1320  1.3.4.2    martin LSYM(Lthumb1_div_positive):
   1321  1.3.4.2    martin 	THUMB1_Div_Positive
   1322  1.3.4.2    martin 
   1323  1.3.4.2    martin LSYM(Lthumb1_div_negative):
   1324  1.3.4.2    martin 	THUMB1_Div_Negative
   1325  1.3.4.2    martin 
   1326  1.3.4.2    martin #endif /* __OPTIMIZE_SIZE__ */
   1327  1.3.4.2    martin 
   1328      1.1       mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1329      1.1       mrg 
   1330      1.1       mrg 	ARM_FUNC_START divsi3
   1331      1.1       mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1332      1.1       mrg 
   1333      1.1       mrg 	cmp 	r1, #0
   1334      1.1       mrg 	beq	LSYM(Ldiv0)
   1335      1.1       mrg 	sdiv	r0, r0, r1
   1336      1.1       mrg 	RET
   1337      1.1       mrg 
   1338      1.1       mrg #else /* ARM/Thumb-2 version.  */
   1339  1.3.4.2    martin 
   1340  1.3.4.2    martin 	ARM_FUNC_START divsi3
   1341      1.1       mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1342      1.1       mrg 
   1343      1.1       mrg 	cmp	r1, #0
   1344      1.1       mrg 	beq	LSYM(Ldiv0)
   1345      1.1       mrg LSYM(divsi3_skip_div0_test):
   1346      1.1       mrg 	eor	ip, r0, r1			@ save the sign of the result.
   1347      1.1       mrg 	do_it	mi
   1348      1.1       mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1349      1.1       mrg 	subs	r2, r1, #1			@ division by 1 or -1 ?
   1350      1.1       mrg 	beq	10f
   1351      1.1       mrg 	movs	r3, r0
   1352      1.1       mrg 	do_it	mi
   1353      1.1       mrg 	rsbmi	r3, r0, #0			@ positive dividend value
   1354      1.1       mrg 	cmp	r3, r1
   1355      1.1       mrg 	bls	11f
   1356      1.1       mrg 	tst	r1, r2				@ divisor is power of 2 ?
   1357      1.1       mrg 	beq	12f
   1358      1.1       mrg 
   1359      1.1       mrg 	ARM_DIV_BODY r3, r1, r0, r2
   1360      1.1       mrg 
   1361      1.1       mrg 	cmp	ip, #0
   1362      1.1       mrg 	do_it	mi
   1363      1.1       mrg 	rsbmi	r0, r0, #0
   1364      1.1       mrg 	RET
   1365      1.1       mrg 
   1366      1.1       mrg 10:	teq	ip, r0				@ same sign ?
   1367      1.1       mrg 	do_it	mi
   1368      1.1       mrg 	rsbmi	r0, r0, #0
   1369      1.1       mrg 	RET
   1370      1.1       mrg 
   1371      1.1       mrg 11:	do_it	lo
   1372      1.1       mrg 	movlo	r0, #0
   1373      1.1       mrg 	do_it	eq,t
   1374      1.1       mrg 	moveq	r0, ip, asr #31
   1375      1.1       mrg 	orreq	r0, r0, #1
   1376      1.1       mrg 	RET
   1377      1.1       mrg 
   1378      1.1       mrg 12:	ARM_DIV2_ORDER r1, r2
   1379      1.1       mrg 
   1380      1.1       mrg 	cmp	ip, #0
   1381      1.1       mrg 	mov	r0, r3, lsr r2
   1382      1.1       mrg 	do_it	mi
   1383      1.1       mrg 	rsbmi	r0, r0, #0
   1384      1.1       mrg 	RET
   1385      1.1       mrg 
   1386      1.1       mrg #endif /* ARM version */
   1387      1.1       mrg 
   1388      1.1       mrg 	DIV_FUNC_END divsi3 signed
   1389      1.1       mrg 
   1390      1.1       mrg #if defined(__prefer_thumb__)
   1391      1.1       mrg FUNC_START aeabi_idivmod
   1392      1.1       mrg 	cmp	r1, #0
   1393      1.1       mrg 	beq	LSYM(Ldiv0)
   1394  1.3.4.2    martin # if defined(__OPTIMIZE_SIZE__)
   1395      1.1       mrg 	push	{r0, r1, lr}
   1396      1.1       mrg 	bl	LSYM(divsi3_skip_div0_test)
   1397      1.1       mrg 	POP	{r1, r2, r3}
   1398      1.1       mrg 	mul	r2, r0
   1399      1.1       mrg 	sub	r1, r1, r2
   1400      1.1       mrg 	bx	r3
   1401  1.3.4.2    martin # else
   1402  1.3.4.2    martin 	/* Both the quotient and remainder are calculated simultaneously
   1403  1.3.4.2    martin 	   in THUMB1_Div_Positive and THUMB1_Div_Negative.  There is no
   1404  1.3.4.2    martin 	   need to calculate the remainder again here.  */
   1405  1.3.4.2    martin 	b	LSYM(divsi3_skip_div0_test)
   1406  1.3.4.2    martin 	RET
   1407  1.3.4.2    martin # endif /* __OPTIMIZE_SIZE__ */
   1408  1.3.4.2    martin 
   1409      1.1       mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1410      1.1       mrg ARM_FUNC_START aeabi_idivmod
   1411      1.1       mrg 	cmp 	r1, #0
   1412      1.1       mrg 	beq	LSYM(Ldiv0)
   1413      1.1       mrg 	mov     r2, r0
   1414      1.1       mrg 	sdiv	r0, r0, r1
   1415      1.1       mrg 	mls     r1, r0, r1, r2
   1416      1.1       mrg 	RET
   1417      1.1       mrg #else
   1418      1.1       mrg ARM_FUNC_START aeabi_idivmod
   1419      1.1       mrg 	cmp	r1, #0
   1420      1.1       mrg 	beq	LSYM(Ldiv0)
   1421      1.1       mrg 	stmfd	sp!, { r0, r1, lr }
   1422      1.1       mrg 	bl	LSYM(divsi3_skip_div0_test)
   1423      1.1       mrg 	ldmfd	sp!, { r1, r2, lr }
   1424      1.1       mrg 	mul	r3, r2, r0
   1425      1.1       mrg 	sub	r1, r1, r3
   1426      1.1       mrg 	RET
   1427      1.1       mrg #endif
   1428      1.1       mrg 	FUNC_END aeabi_idivmod
   1429      1.1       mrg 
   1430      1.1       mrg #endif /* L_divsi3 */
   1431      1.1       mrg /* ------------------------------------------------------------------------ */
   1432      1.1       mrg #ifdef L_modsi3
   1433      1.1       mrg 
   1434  1.3.4.2    martin #if defined(__ARM_ARCH_EXT_IDIV__) && __ARM_ARCH_ISA_THUMB != 1
   1435      1.1       mrg 
   1436      1.1       mrg 	ARM_FUNC_START modsi3
   1437      1.1       mrg 
   1438      1.1       mrg 	cmp	r1, #0
   1439      1.1       mrg 	beq	LSYM(Ldiv0)
   1440      1.1       mrg 
   1441      1.1       mrg 	sdiv	r2, r0, r1
   1442      1.1       mrg 	mls     r0, r1, r2, r0
   1443      1.1       mrg 	RET
   1444      1.1       mrg 
   1445      1.1       mrg #elif defined(__thumb__)
   1446      1.1       mrg 
   1447      1.1       mrg 	FUNC_START modsi3
   1448      1.1       mrg 
   1449      1.1       mrg 	mov	curbit, #1
   1450      1.1       mrg 	cmp	divisor, #0
   1451      1.1       mrg 	beq	LSYM(Ldiv0)
   1452      1.1       mrg 	bpl	LSYM(Lover10)
   1453      1.1       mrg 	neg	divisor, divisor		@ Loops below use unsigned.
   1454      1.1       mrg LSYM(Lover10):
   1455      1.1       mrg 	push	{ work }
   1456      1.1       mrg 	@ Need to save the sign of the dividend, unfortunately, we need
   1457      1.1       mrg 	@ work later on.  Must do this after saving the original value of
   1458      1.1       mrg 	@ the work register, because we will pop this value off first.
   1459      1.1       mrg 	push	{ dividend }
   1460      1.1       mrg 	cmp	dividend, #0
   1461      1.1       mrg 	bpl	LSYM(Lover11)
   1462      1.1       mrg 	neg	dividend, dividend
   1463      1.1       mrg LSYM(Lover11):
   1464      1.1       mrg 	cmp	dividend, divisor
   1465      1.1       mrg 	blo	LSYM(Lgot_result)
   1466      1.1       mrg 
   1467      1.1       mrg 	THUMB_DIV_MOD_BODY 1
   1468      1.1       mrg 
   1469      1.1       mrg 	pop	{ work }
   1470      1.1       mrg 	cmp	work, #0
   1471      1.1       mrg 	bpl	LSYM(Lover12)
   1472      1.1       mrg 	neg	dividend, dividend
   1473      1.1       mrg LSYM(Lover12):
   1474      1.1       mrg 	pop	{ work }
   1475      1.1       mrg 	RET
   1476      1.1       mrg 
   1477      1.1       mrg #else /* ARM version.  */
   1478      1.1       mrg 
   1479      1.1       mrg 	FUNC_START modsi3
   1480      1.1       mrg 
   1481      1.1       mrg 	cmp	r1, #0
   1482      1.1       mrg 	beq	LSYM(Ldiv0)
   1483      1.1       mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1484      1.1       mrg 	movs	ip, r0				@ preserve sign of dividend
   1485      1.1       mrg 	rsbmi	r0, r0, #0			@ if negative make positive
   1486      1.1       mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1487      1.1       mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1488      1.1       mrg 	moveq	r0, #0
   1489      1.1       mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1490      1.1       mrg 	andeq	r0, r0, r2
   1491      1.1       mrg 	bls	10f
   1492      1.1       mrg 
   1493      1.1       mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1494      1.1       mrg 
   1495      1.1       mrg 10:	cmp	ip, #0
   1496      1.1       mrg 	rsbmi	r0, r0, #0
   1497      1.1       mrg 	RET
   1498      1.1       mrg 
   1499      1.1       mrg #endif /* ARM version */
   1500      1.1       mrg 
   1501      1.1       mrg 	DIV_FUNC_END modsi3 signed
   1502      1.1       mrg 
   1503      1.1       mrg #endif /* L_modsi3 */
   1504      1.1       mrg /* ------------------------------------------------------------------------ */
   1505      1.1       mrg #ifdef L_dvmd_tls
   1506      1.1       mrg 
   1507      1.1       mrg #ifdef __ARM_EABI__
   1508      1.1       mrg 	WEAK aeabi_idiv0
   1509      1.1       mrg 	WEAK aeabi_ldiv0
   1510      1.1       mrg 	FUNC_START aeabi_idiv0
   1511      1.1       mrg 	FUNC_START aeabi_ldiv0
   1512      1.1       mrg 	RET
   1513      1.1       mrg 	FUNC_END aeabi_ldiv0
   1514      1.1       mrg 	FUNC_END aeabi_idiv0
   1515      1.1       mrg #else
   1516      1.1       mrg 	FUNC_START div0
   1517      1.1       mrg 	RET
   1518      1.1       mrg 	FUNC_END div0
   1519      1.1       mrg #endif
   1520      1.1       mrg 
   1521      1.1       mrg #endif /* L_divmodsi_tools */
   1522      1.1       mrg /* ------------------------------------------------------------------------ */
   1523      1.1       mrg #ifdef L_dvmd_lnx
   1524      1.1       mrg @ GNU/Linux division-by zero handler.  Used in place of L_dvmd_tls
   1525      1.1       mrg 
   1526      1.1       mrg /* Constant taken from <asm/signal.h>.  */
   1527      1.1       mrg #define SIGFPE	8
   1528      1.1       mrg 
   1529      1.1       mrg #ifdef __ARM_EABI__
   1530  1.3.4.1  christos 	cfi_start	__aeabi_ldiv0, LSYM(Lend_aeabi_ldiv0)
   1531      1.1       mrg 	WEAK aeabi_idiv0
   1532      1.1       mrg 	WEAK aeabi_ldiv0
   1533      1.1       mrg 	ARM_FUNC_START aeabi_idiv0
   1534      1.1       mrg 	ARM_FUNC_START aeabi_ldiv0
   1535  1.3.4.1  christos 	do_push	{r1, lr}
   1536  1.3.4.1  christos 98:	cfi_push 98b - __aeabi_ldiv0, 0xe, -0x4, 0x8
   1537      1.1       mrg #else
   1538  1.3.4.1  christos 	cfi_start	__div0, LSYM(Lend_div0)
   1539      1.1       mrg 	ARM_FUNC_START div0
   1540  1.3.4.1  christos 	do_push	{r1, lr}
   1541  1.3.4.1  christos 98:	cfi_push 98b - __div0, 0xe, -0x4, 0x8
   1542      1.1       mrg #endif
   1543      1.1       mrg 
   1544      1.1       mrg 	mov	r0, #SIGFPE
   1545      1.1       mrg 	bl	SYM(raise) __PLT__
   1546  1.3.4.1  christos 	RETLDM	r1 unwind=98b
   1547      1.1       mrg 
   1548      1.1       mrg #ifdef __ARM_EABI__
   1549  1.3.4.1  christos 	cfi_end	LSYM(Lend_aeabi_ldiv0)
   1550      1.1       mrg 	FUNC_END aeabi_ldiv0
   1551      1.1       mrg 	FUNC_END aeabi_idiv0
   1552      1.1       mrg #else
   1553  1.3.4.1  christos 	cfi_end	LSYM(Lend_div0)
   1554      1.1       mrg 	FUNC_END div0
   1555      1.1       mrg #endif
   1556      1.1       mrg 
   1557      1.1       mrg #endif /* L_dvmd_lnx */
   1558      1.1       mrg #ifdef L_clear_cache
   1559      1.1       mrg #if defined __ARM_EABI__ && defined __linux__
   1560      1.1       mrg @ EABI GNU/Linux call to cacheflush syscall.
   1561      1.1       mrg 	ARM_FUNC_START clear_cache
   1562      1.1       mrg 	do_push	{r7}
   1563      1.1       mrg #if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
   1564      1.1       mrg 	movw	r7, #2
   1565      1.1       mrg 	movt	r7, #0xf
   1566      1.1       mrg #else
   1567      1.1       mrg 	mov	r7, #0xf0000
   1568      1.1       mrg 	add	r7, r7, #2
   1569      1.1       mrg #endif
   1570      1.1       mrg 	mov	r2, #0
   1571      1.1       mrg 	swi	0
   1572      1.1       mrg 	do_pop	{r7}
   1573      1.1       mrg 	RET
   1574      1.1       mrg 	FUNC_END clear_cache
   1575      1.1       mrg #else
   1576      1.1       mrg #error "This is only for ARM EABI GNU/Linux"
   1577      1.1       mrg #endif
   1578      1.1       mrg #endif /* L_clear_cache */
   1579      1.1       mrg /* ------------------------------------------------------------------------ */
   1580      1.1       mrg /* Dword shift operations.  */
   1581      1.1       mrg /* All the following Dword shift variants rely on the fact that
   1582      1.1       mrg 	shft xxx, Reg
   1583      1.1       mrg    is in fact done as
   1584      1.1       mrg 	shft xxx, (Reg & 255)
   1585      1.1       mrg    so for Reg value in (32...63) and (-1...-31) we will get zero (in the
   1586      1.1       mrg    case of logical shifts) or the sign (for asr).  */
   1587      1.1       mrg 
   1588      1.1       mrg #ifdef __ARMEB__
   1589      1.1       mrg #define al	r1
   1590      1.1       mrg #define ah	r0
   1591      1.1       mrg #else
   1592      1.1       mrg #define al	r0
   1593      1.1       mrg #define ah	r1
   1594      1.1       mrg #endif
   1595      1.1       mrg 
   1596      1.1       mrg /* Prevent __aeabi double-word shifts from being produced on SymbianOS.  */
   1597      1.1       mrg #ifndef __symbian__
   1598      1.1       mrg 
   1599      1.1       mrg #ifdef L_lshrdi3
   1600      1.1       mrg 
   1601      1.1       mrg 	FUNC_START lshrdi3
   1602      1.1       mrg 	FUNC_ALIAS aeabi_llsr lshrdi3
   1603      1.1       mrg 
   1604      1.1       mrg #ifdef __thumb__
   1605      1.1       mrg 	lsr	al, r2
   1606      1.1       mrg 	mov	r3, ah
   1607      1.1       mrg 	lsr	ah, r2
   1608      1.1       mrg 	mov	ip, r3
   1609      1.1       mrg 	sub	r2, #32
   1610      1.1       mrg 	lsr	r3, r2
   1611      1.1       mrg 	orr	al, r3
   1612      1.1       mrg 	neg	r2, r2
   1613      1.1       mrg 	mov	r3, ip
   1614      1.1       mrg 	lsl	r3, r2
   1615      1.1       mrg 	orr	al, r3
   1616      1.1       mrg 	RET
   1617      1.1       mrg #else
   1618      1.1       mrg 	subs	r3, r2, #32
   1619      1.1       mrg 	rsb	ip, r2, #32
   1620      1.1       mrg 	movmi	al, al, lsr r2
   1621      1.1       mrg 	movpl	al, ah, lsr r3
   1622      1.1       mrg 	orrmi	al, al, ah, lsl ip
   1623      1.1       mrg 	mov	ah, ah, lsr r2
   1624      1.1       mrg 	RET
   1625      1.1       mrg #endif
   1626      1.1       mrg 	FUNC_END aeabi_llsr
   1627      1.1       mrg 	FUNC_END lshrdi3
   1628      1.1       mrg 
   1629      1.1       mrg #endif
   1630      1.1       mrg 
   1631      1.1       mrg #ifdef L_ashrdi3
   1632      1.1       mrg 
   1633      1.1       mrg 	FUNC_START ashrdi3
   1634      1.1       mrg 	FUNC_ALIAS aeabi_lasr ashrdi3
   1635      1.1       mrg 
   1636      1.1       mrg #ifdef __thumb__
   1637      1.1       mrg 	lsr	al, r2
   1638      1.1       mrg 	mov	r3, ah
   1639      1.1       mrg 	asr	ah, r2
   1640      1.1       mrg 	sub	r2, #32
   1641      1.1       mrg 	@ If r2 is negative at this point the following step would OR
   1642      1.1       mrg 	@ the sign bit into all of AL.  That's not what we want...
   1643      1.1       mrg 	bmi	1f
   1644      1.1       mrg 	mov	ip, r3
   1645      1.1       mrg 	asr	r3, r2
   1646      1.1       mrg 	orr	al, r3
   1647      1.1       mrg 	mov	r3, ip
   1648      1.1       mrg 1:
   1649      1.1       mrg 	neg	r2, r2
   1650      1.1       mrg 	lsl	r3, r2
   1651      1.1       mrg 	orr	al, r3
   1652      1.1       mrg 	RET
   1653      1.1       mrg #else
   1654      1.1       mrg 	subs	r3, r2, #32
   1655      1.1       mrg 	rsb	ip, r2, #32
   1656      1.1       mrg 	movmi	al, al, lsr r2
   1657      1.1       mrg 	movpl	al, ah, asr r3
   1658      1.1       mrg 	orrmi	al, al, ah, lsl ip
   1659      1.1       mrg 	mov	ah, ah, asr r2
   1660      1.1       mrg 	RET
   1661      1.1       mrg #endif
   1662      1.1       mrg 
   1663      1.1       mrg 	FUNC_END aeabi_lasr
   1664      1.1       mrg 	FUNC_END ashrdi3
   1665      1.1       mrg 
   1666      1.1       mrg #endif
   1667      1.1       mrg 
   1668      1.1       mrg #ifdef L_ashldi3
   1669      1.1       mrg 
   1670      1.1       mrg 	FUNC_START ashldi3
   1671      1.1       mrg 	FUNC_ALIAS aeabi_llsl ashldi3
   1672      1.1       mrg 
   1673      1.1       mrg #ifdef __thumb__
   1674      1.1       mrg 	lsl	ah, r2
   1675      1.1       mrg 	mov	r3, al
   1676      1.1       mrg 	lsl	al, r2
   1677      1.1       mrg 	mov	ip, r3
   1678      1.1       mrg 	sub	r2, #32
   1679      1.1       mrg 	lsl	r3, r2
   1680      1.1       mrg 	orr	ah, r3
   1681      1.1       mrg 	neg	r2, r2
   1682      1.1       mrg 	mov	r3, ip
   1683      1.1       mrg 	lsr	r3, r2
   1684      1.1       mrg 	orr	ah, r3
   1685      1.1       mrg 	RET
   1686      1.1       mrg #else
   1687      1.1       mrg 	subs	r3, r2, #32
   1688      1.1       mrg 	rsb	ip, r2, #32
   1689      1.1       mrg 	movmi	ah, ah, lsl r2
   1690      1.1       mrg 	movpl	ah, al, lsl r3
   1691      1.1       mrg 	orrmi	ah, ah, al, lsr ip
   1692      1.1       mrg 	mov	al, al, lsl r2
   1693      1.1       mrg 	RET
   1694      1.1       mrg #endif
   1695      1.1       mrg 	FUNC_END aeabi_llsl
   1696      1.1       mrg 	FUNC_END ashldi3
   1697      1.1       mrg 
   1698      1.1       mrg #endif
   1699      1.1       mrg 
   1700      1.1       mrg #endif /* __symbian__ */
   1701      1.1       mrg 
   1702  1.3.4.2    martin #if (__ARM_ARCH_ISA_THUMB == 2	\
   1703  1.3.4.2    martin      || (__ARM_ARCH_ISA_ARM	\
   1704  1.3.4.2    martin 	 && (__ARM_ARCH__ > 5	\
   1705  1.3.4.2    martin 	     || (__ARM_ARCH__ == 5 && __ARM_ARCH_ISA_THUMB))))
   1706      1.1       mrg #define HAVE_ARM_CLZ 1
   1707      1.1       mrg #endif
   1708      1.1       mrg 
   1709      1.1       mrg #ifdef L_clzsi2
   1710  1.3.4.2    martin #ifdef NOT_ISA_TARGET_32BIT
   1711      1.1       mrg FUNC_START clzsi2
   1712      1.1       mrg 	mov	r1, #28
   1713      1.1       mrg 	mov	r3, #1
   1714      1.1       mrg 	lsl	r3, r3, #16
   1715      1.1       mrg 	cmp	r0, r3 /* 0x10000 */
   1716      1.1       mrg 	bcc	2f
   1717      1.1       mrg 	lsr	r0, r0, #16
   1718      1.1       mrg 	sub	r1, r1, #16
   1719      1.1       mrg 2:	lsr	r3, r3, #8
   1720      1.1       mrg 	cmp	r0, r3 /* #0x100 */
   1721      1.1       mrg 	bcc	2f
   1722      1.1       mrg 	lsr	r0, r0, #8
   1723      1.1       mrg 	sub	r1, r1, #8
   1724      1.1       mrg 2:	lsr	r3, r3, #4
   1725      1.1       mrg 	cmp	r0, r3 /* #0x10 */
   1726      1.1       mrg 	bcc	2f
   1727      1.1       mrg 	lsr	r0, r0, #4
   1728      1.1       mrg 	sub	r1, r1, #4
   1729      1.1       mrg 2:	adr	r2, 1f
   1730      1.1       mrg 	ldrb	r0, [r2, r0]
   1731      1.1       mrg 	add	r0, r0, r1
   1732      1.1       mrg 	bx lr
   1733      1.1       mrg .align 2
   1734      1.1       mrg 1:
   1735      1.1       mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1736      1.1       mrg 	FUNC_END clzsi2
   1737      1.1       mrg #else
   1738      1.1       mrg ARM_FUNC_START clzsi2
   1739      1.1       mrg # if defined(HAVE_ARM_CLZ)
   1740      1.1       mrg 	clz	r0, r0
   1741      1.1       mrg 	RET
   1742      1.1       mrg # else
   1743      1.1       mrg 	mov	r1, #28
   1744      1.1       mrg 	cmp	r0, #0x10000
   1745      1.1       mrg 	do_it	cs, t
   1746      1.1       mrg 	movcs	r0, r0, lsr #16
   1747      1.1       mrg 	subcs	r1, r1, #16
   1748      1.1       mrg 	cmp	r0, #0x100
   1749      1.1       mrg 	do_it	cs, t
   1750      1.1       mrg 	movcs	r0, r0, lsr #8
   1751      1.1       mrg 	subcs	r1, r1, #8
   1752      1.1       mrg 	cmp	r0, #0x10
   1753      1.1       mrg 	do_it	cs, t
   1754      1.1       mrg 	movcs	r0, r0, lsr #4
   1755      1.1       mrg 	subcs	r1, r1, #4
   1756      1.1       mrg 	adr	r2, 1f
   1757      1.1       mrg 	ldrb	r0, [r2, r0]
   1758      1.1       mrg 	add	r0, r0, r1
   1759      1.1       mrg 	RET
   1760      1.1       mrg .align 2
   1761      1.1       mrg 1:
   1762      1.1       mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1763      1.1       mrg # endif /* !HAVE_ARM_CLZ */
   1764      1.1       mrg 	FUNC_END clzsi2
   1765      1.1       mrg #endif
   1766      1.1       mrg #endif /* L_clzsi2 */
   1767      1.1       mrg 
   1768      1.1       mrg #ifdef L_clzdi2
   1769      1.1       mrg #if !defined(HAVE_ARM_CLZ)
   1770      1.1       mrg 
   1771  1.3.4.2    martin # ifdef NOT_ISA_TARGET_32BIT
   1772      1.1       mrg FUNC_START clzdi2
   1773      1.1       mrg 	push	{r4, lr}
   1774      1.1       mrg # else
   1775      1.1       mrg ARM_FUNC_START clzdi2
   1776      1.1       mrg 	do_push	{r4, lr}
   1777      1.1       mrg # endif
   1778      1.1       mrg 	cmp	xxh, #0
   1779      1.1       mrg 	bne	1f
   1780      1.1       mrg # ifdef __ARMEB__
   1781      1.1       mrg 	mov	r0, xxl
   1782      1.1       mrg 	bl	__clzsi2
   1783      1.1       mrg 	add	r0, r0, #32
   1784      1.1       mrg 	b 2f
   1785      1.1       mrg 1:
   1786      1.1       mrg 	bl	__clzsi2
   1787      1.1       mrg # else
   1788      1.1       mrg 	bl	__clzsi2
   1789      1.1       mrg 	add	r0, r0, #32
   1790      1.1       mrg 	b 2f
   1791      1.1       mrg 1:
   1792      1.1       mrg 	mov	r0, xxh
   1793      1.1       mrg 	bl	__clzsi2
   1794      1.1       mrg # endif
   1795      1.1       mrg 2:
   1796  1.3.4.2    martin # ifdef NOT_ISA_TARGET_32BIT
   1797      1.1       mrg 	pop	{r4, pc}
   1798      1.1       mrg # else
   1799      1.1       mrg 	RETLDM	r4
   1800      1.1       mrg # endif
   1801      1.1       mrg 	FUNC_END clzdi2
   1802      1.1       mrg 
   1803      1.1       mrg #else /* HAVE_ARM_CLZ */
   1804      1.1       mrg 
   1805      1.1       mrg ARM_FUNC_START clzdi2
   1806      1.1       mrg 	cmp	xxh, #0
   1807      1.1       mrg 	do_it	eq, et
   1808      1.1       mrg 	clzeq	r0, xxl
   1809      1.1       mrg 	clzne	r0, xxh
   1810      1.1       mrg 	addeq	r0, r0, #32
   1811      1.1       mrg 	RET
   1812      1.1       mrg 	FUNC_END clzdi2
   1813      1.1       mrg 
   1814      1.1       mrg #endif
   1815      1.1       mrg #endif /* L_clzdi2 */
   1816      1.1       mrg 
   1817      1.1       mrg #ifdef L_ctzsi2
   1818  1.3.4.2    martin #ifdef NOT_ISA_TARGET_32BIT
   1819      1.1       mrg FUNC_START ctzsi2
   1820      1.1       mrg 	neg	r1, r0
   1821      1.1       mrg 	and	r0, r0, r1
   1822      1.1       mrg 	mov	r1, #28
   1823      1.1       mrg 	mov	r3, #1
   1824      1.1       mrg 	lsl	r3, r3, #16
   1825      1.1       mrg 	cmp	r0, r3 /* 0x10000 */
   1826      1.1       mrg 	bcc	2f
   1827      1.1       mrg 	lsr	r0, r0, #16
   1828      1.1       mrg 	sub	r1, r1, #16
   1829      1.1       mrg 2:	lsr	r3, r3, #8
   1830      1.1       mrg 	cmp	r0, r3 /* #0x100 */
   1831      1.1       mrg 	bcc	2f
   1832      1.1       mrg 	lsr	r0, r0, #8
   1833      1.1       mrg 	sub	r1, r1, #8
   1834      1.1       mrg 2:	lsr	r3, r3, #4
   1835      1.1       mrg 	cmp	r0, r3 /* #0x10 */
   1836      1.1       mrg 	bcc	2f
   1837      1.1       mrg 	lsr	r0, r0, #4
   1838      1.1       mrg 	sub	r1, r1, #4
   1839      1.1       mrg 2:	adr	r2, 1f
   1840      1.1       mrg 	ldrb	r0, [r2, r0]
   1841      1.1       mrg 	sub	r0, r0, r1
   1842      1.1       mrg 	bx lr
   1843      1.1       mrg .align 2
   1844      1.1       mrg 1:
   1845      1.1       mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1846      1.1       mrg 	FUNC_END ctzsi2
   1847      1.1       mrg #else
   1848      1.1       mrg ARM_FUNC_START ctzsi2
   1849      1.1       mrg 	rsb	r1, r0, #0
   1850      1.1       mrg 	and	r0, r0, r1
   1851      1.1       mrg # if defined(HAVE_ARM_CLZ)
   1852      1.1       mrg 	clz	r0, r0
   1853      1.1       mrg 	rsb	r0, r0, #31
   1854      1.1       mrg 	RET
   1855      1.1       mrg # else
   1856      1.1       mrg 	mov	r1, #28
   1857      1.1       mrg 	cmp	r0, #0x10000
   1858      1.1       mrg 	do_it	cs, t
   1859      1.1       mrg 	movcs	r0, r0, lsr #16
   1860      1.1       mrg 	subcs	r1, r1, #16
   1861      1.1       mrg 	cmp	r0, #0x100
   1862      1.1       mrg 	do_it	cs, t
   1863      1.1       mrg 	movcs	r0, r0, lsr #8
   1864      1.1       mrg 	subcs	r1, r1, #8
   1865      1.1       mrg 	cmp	r0, #0x10
   1866      1.1       mrg 	do_it	cs, t
   1867      1.1       mrg 	movcs	r0, r0, lsr #4
   1868      1.1       mrg 	subcs	r1, r1, #4
   1869      1.1       mrg 	adr	r2, 1f
   1870      1.1       mrg 	ldrb	r0, [r2, r0]
   1871      1.1       mrg 	sub	r0, r0, r1
   1872      1.1       mrg 	RET
   1873      1.1       mrg .align 2
   1874      1.1       mrg 1:
   1875      1.1       mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1876      1.1       mrg # endif /* !HAVE_ARM_CLZ */
   1877      1.1       mrg 	FUNC_END ctzsi2
   1878      1.1       mrg #endif
   1879      1.1       mrg #endif /* L_clzsi2 */
   1880      1.1       mrg 
   1881      1.1       mrg /* ------------------------------------------------------------------------ */
   1882      1.1       mrg /* These next two sections are here despite the fact that they contain Thumb
   1883      1.1       mrg    assembler because their presence allows interworked code to be linked even
   1884      1.1       mrg    when the GCC library is this one.  */
   1885      1.1       mrg 
   1886      1.1       mrg /* Do not build the interworking functions when the target architecture does
   1887      1.1       mrg    not support Thumb instructions.  (This can be a multilib option).  */
   1888      1.1       mrg #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
   1889      1.1       mrg       || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
   1890      1.1       mrg       || __ARM_ARCH__ >= 6
   1891      1.1       mrg 
   1892      1.1       mrg #if defined L_call_via_rX
   1893      1.1       mrg 
   1894      1.1       mrg /* These labels & instructions are used by the Arm/Thumb interworking code.
   1895      1.1       mrg    The address of function to be called is loaded into a register and then
   1896      1.1       mrg    one of these labels is called via a BL instruction.  This puts the
   1897      1.1       mrg    return address into the link register with the bottom bit set, and the
   1898      1.1       mrg    code here switches to the correct mode before executing the function.  */
   1899      1.1       mrg 
   1900      1.1       mrg 	.text
   1901      1.1       mrg 	.align 0
   1902      1.1       mrg         .force_thumb
   1903      1.1       mrg 
   1904      1.1       mrg .macro call_via register
   1905      1.1       mrg 	THUMB_FUNC_START _call_via_\register
   1906      1.1       mrg 
   1907      1.1       mrg 	bx	\register
   1908      1.1       mrg 	nop
   1909      1.1       mrg 
   1910      1.1       mrg 	SIZE	(_call_via_\register)
   1911      1.1       mrg .endm
   1912      1.1       mrg 
   1913      1.1       mrg 	call_via r0
   1914      1.1       mrg 	call_via r1
   1915      1.1       mrg 	call_via r2
   1916      1.1       mrg 	call_via r3
   1917      1.1       mrg 	call_via r4
   1918      1.1       mrg 	call_via r5
   1919      1.1       mrg 	call_via r6
   1920      1.1       mrg 	call_via r7
   1921      1.1       mrg 	call_via r8
   1922      1.1       mrg 	call_via r9
   1923      1.1       mrg 	call_via sl
   1924      1.1       mrg 	call_via fp
   1925      1.1       mrg 	call_via ip
   1926      1.1       mrg 	call_via sp
   1927      1.1       mrg 	call_via lr
   1928      1.1       mrg 
   1929      1.1       mrg #endif /* L_call_via_rX */
   1930      1.1       mrg 
   1931      1.1       mrg /* Don't bother with the old interworking routines for Thumb-2.  */
   1932      1.1       mrg /* ??? Maybe only omit these on "m" variants.  */
   1933  1.3.4.2    martin #if !defined(__thumb2__) && __ARM_ARCH_ISA_ARM
   1934      1.1       mrg 
   1935      1.1       mrg #if defined L_interwork_call_via_rX
   1936      1.1       mrg 
   1937      1.1       mrg /* These labels & instructions are used by the Arm/Thumb interworking code,
   1938      1.1       mrg    when the target address is in an unknown instruction set.  The address
   1939      1.1       mrg    of function to be called is loaded into a register and then one of these
   1940      1.1       mrg    labels is called via a BL instruction.  This puts the return address
   1941      1.1       mrg    into the link register with the bottom bit set, and the code here
   1942      1.1       mrg    switches to the correct mode before executing the function.  Unfortunately
   1943      1.1       mrg    the target code cannot be relied upon to return via a BX instruction, so
   1944      1.1       mrg    instead we have to store the resturn address on the stack and allow the
   1945      1.1       mrg    called function to return here instead.  Upon return we recover the real
   1946      1.1       mrg    return address and use a BX to get back to Thumb mode.
   1947      1.1       mrg 
   1948      1.1       mrg    There are three variations of this code.  The first,
   1949      1.1       mrg    _interwork_call_via_rN(), will push the return address onto the
   1950      1.1       mrg    stack and pop it in _arm_return().  It should only be used if all
   1951      1.1       mrg    arguments are passed in registers.
   1952      1.1       mrg 
   1953      1.1       mrg    The second, _interwork_r7_call_via_rN(), instead stores the return
   1954      1.1       mrg    address at [r7, #-4].  It is the caller's responsibility to ensure
   1955      1.1       mrg    that this address is valid and contains no useful data.
   1956      1.1       mrg 
   1957      1.1       mrg    The third, _interwork_r11_call_via_rN(), works in the same way but
   1958      1.1       mrg    uses r11 instead of r7.  It is useful if the caller does not really
   1959      1.1       mrg    need a frame pointer.  */
   1960      1.1       mrg 
   1961      1.1       mrg 	.text
   1962      1.1       mrg 	.align 0
   1963      1.1       mrg 
   1964      1.1       mrg 	.code   32
   1965      1.1       mrg 	.globl _arm_return
   1966      1.1       mrg LSYM(Lstart_arm_return):
   1967      1.1       mrg 	cfi_start	LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
   1968      1.1       mrg 	cfi_push	0, 0xe, -0x8, 0x8
   1969      1.1       mrg 	nop	@ This nop is for the benefit of debuggers, so that
   1970      1.1       mrg 		@ backtraces will use the correct unwind information.
   1971      1.1       mrg _arm_return:
   1972      1.1       mrg 	RETLDM	unwind=LSYM(Lstart_arm_return)
   1973      1.1       mrg 	cfi_end	LSYM(Lend_arm_return)
   1974      1.1       mrg 
   1975      1.1       mrg 	.globl _arm_return_r7
   1976      1.1       mrg _arm_return_r7:
   1977      1.1       mrg 	ldr	lr, [r7, #-4]
   1978      1.1       mrg 	bx	lr
   1979      1.1       mrg 
   1980      1.1       mrg 	.globl _arm_return_r11
   1981      1.1       mrg _arm_return_r11:
   1982      1.1       mrg 	ldr	lr, [r11, #-4]
   1983      1.1       mrg 	bx	lr
   1984      1.1       mrg 
   1985      1.1       mrg .macro interwork_with_frame frame, register, name, return
   1986      1.1       mrg 	.code	16
   1987      1.1       mrg 
   1988      1.1       mrg 	THUMB_FUNC_START \name
   1989      1.1       mrg 
   1990      1.1       mrg 	bx	pc
   1991      1.1       mrg 	nop
   1992      1.1       mrg 
   1993      1.1       mrg 	.code	32
   1994      1.1       mrg 	tst	\register, #1
   1995      1.1       mrg 	streq	lr, [\frame, #-4]
   1996      1.1       mrg 	adreq	lr, _arm_return_\frame
   1997      1.1       mrg 	bx	\register
   1998      1.1       mrg 
   1999      1.1       mrg 	SIZE	(\name)
   2000      1.1       mrg .endm
   2001      1.1       mrg 
   2002      1.1       mrg .macro interwork register
   2003      1.1       mrg 	.code	16
   2004      1.1       mrg 
   2005      1.1       mrg 	THUMB_FUNC_START _interwork_call_via_\register
   2006      1.1       mrg 
   2007      1.1       mrg 	bx	pc
   2008      1.1       mrg 	nop
   2009      1.1       mrg 
   2010      1.1       mrg 	.code	32
   2011      1.1       mrg 	.globl LSYM(Lchange_\register)
   2012      1.1       mrg LSYM(Lchange_\register):
   2013      1.1       mrg 	tst	\register, #1
   2014      1.1       mrg 	streq	lr, [sp, #-8]!
   2015      1.1       mrg 	adreq	lr, _arm_return
   2016      1.1       mrg 	bx	\register
   2017      1.1       mrg 
   2018      1.1       mrg 	SIZE	(_interwork_call_via_\register)
   2019      1.1       mrg 
   2020      1.1       mrg 	interwork_with_frame r7,\register,_interwork_r7_call_via_\register
   2021      1.1       mrg 	interwork_with_frame r11,\register,_interwork_r11_call_via_\register
   2022      1.1       mrg .endm
   2023      1.1       mrg 
   2024      1.1       mrg 	interwork r0
   2025      1.1       mrg 	interwork r1
   2026      1.1       mrg 	interwork r2
   2027      1.1       mrg 	interwork r3
   2028      1.1       mrg 	interwork r4
   2029      1.1       mrg 	interwork r5
   2030      1.1       mrg 	interwork r6
   2031      1.1       mrg 	interwork r7
   2032      1.1       mrg 	interwork r8
   2033      1.1       mrg 	interwork r9
   2034      1.1       mrg 	interwork sl
   2035      1.1       mrg 	interwork fp
   2036      1.1       mrg 	interwork ip
   2037      1.1       mrg 	interwork sp
   2038      1.1       mrg 
   2039      1.1       mrg 	/* The LR case has to be handled a little differently...  */
   2040      1.1       mrg 	.code 16
   2041      1.1       mrg 
   2042      1.1       mrg 	THUMB_FUNC_START _interwork_call_via_lr
   2043      1.1       mrg 
   2044      1.1       mrg 	bx 	pc
   2045      1.1       mrg 	nop
   2046      1.1       mrg 
   2047      1.1       mrg 	.code 32
   2048      1.1       mrg 	.globl .Lchange_lr
   2049      1.1       mrg .Lchange_lr:
   2050      1.1       mrg 	tst	lr, #1
   2051      1.1       mrg 	stmeqdb	r13!, {lr, pc}
   2052      1.1       mrg 	mov	ip, lr
   2053      1.1       mrg 	adreq	lr, _arm_return
   2054      1.1       mrg 	bx	ip
   2055      1.1       mrg 
   2056      1.1       mrg 	SIZE	(_interwork_call_via_lr)
   2057      1.1       mrg 
   2058      1.1       mrg #endif /* L_interwork_call_via_rX */
   2059      1.1       mrg #endif /* !__thumb2__ */
   2060      1.1       mrg 
   2061      1.1       mrg /* Functions to support compact pic switch tables in thumb1 state.
   2062      1.1       mrg    All these routines take an index into the table in r0.  The
   2063      1.1       mrg    table is at LR & ~1 (but this must be rounded up in the case
   2064      1.1       mrg    of 32-bit entires).  They are only permitted to clobber r12
   2065      1.1       mrg    and r14 and r0 must be preserved on exit.  */
   2066      1.1       mrg #ifdef L_thumb1_case_sqi
   2067      1.1       mrg 
   2068      1.1       mrg 	.text
   2069      1.1       mrg 	.align 0
   2070      1.1       mrg         .force_thumb
   2071      1.1       mrg 	.syntax unified
   2072      1.1       mrg 	THUMB_FUNC_START __gnu_thumb1_case_sqi
   2073      1.1       mrg 	push	{r1}
   2074      1.1       mrg 	mov	r1, lr
   2075      1.1       mrg 	lsrs	r1, r1, #1
   2076      1.1       mrg 	lsls	r1, r1, #1
   2077      1.1       mrg 	ldrsb	r1, [r1, r0]
   2078      1.1       mrg 	lsls	r1, r1, #1
   2079      1.1       mrg 	add	lr, lr, r1
   2080      1.1       mrg 	pop	{r1}
   2081      1.1       mrg 	bx	lr
   2082      1.1       mrg 	SIZE (__gnu_thumb1_case_sqi)
   2083      1.1       mrg #endif
   2084      1.1       mrg 
   2085      1.1       mrg #ifdef L_thumb1_case_uqi
   2086      1.1       mrg 
   2087      1.1       mrg 	.text
   2088      1.1       mrg 	.align 0
   2089      1.1       mrg         .force_thumb
   2090      1.1       mrg 	.syntax unified
   2091      1.1       mrg 	THUMB_FUNC_START __gnu_thumb1_case_uqi
   2092      1.1       mrg 	push	{r1}
   2093      1.1       mrg 	mov	r1, lr
   2094      1.1       mrg 	lsrs	r1, r1, #1
   2095      1.1       mrg 	lsls	r1, r1, #1
   2096      1.1       mrg 	ldrb	r1, [r1, r0]
   2097      1.1       mrg 	lsls	r1, r1, #1
   2098      1.1       mrg 	add	lr, lr, r1
   2099      1.1       mrg 	pop	{r1}
   2100      1.1       mrg 	bx	lr
   2101      1.1       mrg 	SIZE (__gnu_thumb1_case_uqi)
   2102      1.1       mrg #endif
   2103      1.1       mrg 
   2104      1.1       mrg #ifdef L_thumb1_case_shi
   2105      1.1       mrg 
   2106      1.1       mrg 	.text
   2107      1.1       mrg 	.align 0
   2108      1.1       mrg         .force_thumb
   2109      1.1       mrg 	.syntax unified
   2110      1.1       mrg 	THUMB_FUNC_START __gnu_thumb1_case_shi
   2111      1.1       mrg 	push	{r0, r1}
   2112      1.1       mrg 	mov	r1, lr
   2113      1.1       mrg 	lsrs	r1, r1, #1
   2114      1.1       mrg 	lsls	r0, r0, #1
   2115      1.1       mrg 	lsls	r1, r1, #1
   2116      1.1       mrg 	ldrsh	r1, [r1, r0]
   2117      1.1       mrg 	lsls	r1, r1, #1
   2118      1.1       mrg 	add	lr, lr, r1
   2119      1.1       mrg 	pop	{r0, r1}
   2120      1.1       mrg 	bx	lr
   2121      1.1       mrg 	SIZE (__gnu_thumb1_case_shi)
   2122      1.1       mrg #endif
   2123      1.1       mrg 
   2124      1.1       mrg #ifdef L_thumb1_case_uhi
   2125      1.1       mrg 
   2126      1.1       mrg 	.text
   2127      1.1       mrg 	.align 0
   2128      1.1       mrg         .force_thumb
   2129      1.1       mrg 	.syntax unified
   2130      1.1       mrg 	THUMB_FUNC_START __gnu_thumb1_case_uhi
   2131      1.1       mrg 	push	{r0, r1}
   2132      1.1       mrg 	mov	r1, lr
   2133      1.1       mrg 	lsrs	r1, r1, #1
   2134      1.1       mrg 	lsls	r0, r0, #1
   2135      1.1       mrg 	lsls	r1, r1, #1
   2136      1.1       mrg 	ldrh	r1, [r1, r0]
   2137      1.1       mrg 	lsls	r1, r1, #1
   2138      1.1       mrg 	add	lr, lr, r1
   2139      1.1       mrg 	pop	{r0, r1}
   2140      1.1       mrg 	bx	lr
   2141      1.1       mrg 	SIZE (__gnu_thumb1_case_uhi)
   2142      1.1       mrg #endif
   2143      1.1       mrg 
   2144      1.1       mrg #ifdef L_thumb1_case_si
   2145      1.1       mrg 
   2146      1.1       mrg 	.text
   2147      1.1       mrg 	.align 0
   2148      1.1       mrg         .force_thumb
   2149      1.1       mrg 	.syntax unified
   2150      1.1       mrg 	THUMB_FUNC_START __gnu_thumb1_case_si
   2151      1.1       mrg 	push	{r0, r1}
   2152      1.1       mrg 	mov	r1, lr
   2153      1.1       mrg 	adds.n	r1, r1, #2	/* Align to word.  */
   2154      1.1       mrg 	lsrs	r1, r1, #2
   2155      1.1       mrg 	lsls	r0, r0, #2
   2156      1.1       mrg 	lsls	r1, r1, #2
   2157      1.1       mrg 	ldr	r0, [r1, r0]
   2158      1.1       mrg 	adds	r0, r0, r1
   2159      1.1       mrg 	mov	lr, r0
   2160      1.1       mrg 	pop	{r0, r1}
   2161      1.1       mrg 	mov	pc, lr		/* We know we were called from thumb code.  */
   2162      1.1       mrg 	SIZE (__gnu_thumb1_case_si)
   2163      1.1       mrg #endif
   2164      1.1       mrg 
   2165      1.1       mrg #endif /* Arch supports thumb.  */
   2166      1.1       mrg 
   2167  1.3.4.1  christos .macro CFI_START_FUNCTION
   2168  1.3.4.1  christos 	.cfi_startproc
   2169  1.3.4.1  christos 	.cfi_remember_state
   2170  1.3.4.1  christos .endm
   2171  1.3.4.1  christos 
   2172  1.3.4.1  christos .macro CFI_END_FUNCTION
   2173  1.3.4.1  christos 	.cfi_restore_state
   2174  1.3.4.1  christos 	.cfi_endproc
   2175  1.3.4.1  christos .endm
   2176  1.3.4.1  christos 
   2177      1.1       mrg #ifndef __symbian__
   2178  1.3.4.2    martin /* The condition here must match the one in gcc/config/arm/elf.h.  */
   2179  1.3.4.2    martin #ifndef NOT_ISA_TARGET_32BIT
   2180      1.1       mrg #include "ieee754-df.S"
   2181      1.1       mrg #include "ieee754-sf.S"
   2182      1.1       mrg #include "bpabi.S"
   2183  1.3.4.2    martin #else /* NOT_ISA_TARGET_32BIT */
   2184      1.1       mrg #include "bpabi-v6m.S"
   2185  1.3.4.2    martin #endif /* NOT_ISA_TARGET_32BIT */
   2186      1.1       mrg #endif /* !__symbian__ */
   2187