Home | History | Annotate | Line # | Download | only in arm
lib1funcs.S revision 1.1.1.3
      1      1.1  mrg @ libgcc routines for ARM cpu.
      2      1.1  mrg @ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk)
      3      1.1  mrg 
      4  1.1.1.3  mrg /* Copyright (C) 1995-2016 Free Software Foundation, Inc.
      5      1.1  mrg 
      6      1.1  mrg This file is free software; you can redistribute it and/or modify it
      7      1.1  mrg under the terms of the GNU General Public License as published by the
      8      1.1  mrg Free Software Foundation; either version 3, or (at your option) any
      9      1.1  mrg later version.
     10      1.1  mrg 
     11      1.1  mrg This file is distributed in the hope that it will be useful, but
     12      1.1  mrg WITHOUT ANY WARRANTY; without even the implied warranty of
     13      1.1  mrg MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14      1.1  mrg General Public License for more details.
     15      1.1  mrg 
     16      1.1  mrg Under Section 7 of GPL version 3, you are granted additional
     17      1.1  mrg permissions described in the GCC Runtime Library Exception, version
     18      1.1  mrg 3.1, as published by the Free Software Foundation.
     19      1.1  mrg 
     20      1.1  mrg You should have received a copy of the GNU General Public License and
     21      1.1  mrg a copy of the GCC Runtime Library Exception along with this program;
     22      1.1  mrg see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     23      1.1  mrg <http://www.gnu.org/licenses/>.  */
     24      1.1  mrg 
     25      1.1  mrg /* An executable stack is *not* required for these functions.  */
     26      1.1  mrg #if defined(__ELF__) && defined(__linux__)
     27      1.1  mrg .section .note.GNU-stack,"",%progbits
     28      1.1  mrg .previous
     29      1.1  mrg #endif  /* __ELF__ and __linux__ */
     30      1.1  mrg 
     31      1.1  mrg #ifdef __ARM_EABI__
     32      1.1  mrg /* Some attributes that are common to all routines in this file.  */
     33      1.1  mrg 	/* Tag_ABI_align_needed: This code does not require 8-byte
     34      1.1  mrg 	   alignment from the caller.  */
     35      1.1  mrg 	/* .eabi_attribute 24, 0  -- default setting.  */
     36      1.1  mrg 	/* Tag_ABI_align_preserved: This code preserves 8-byte
     37      1.1  mrg 	   alignment in any callee.  */
     38      1.1  mrg 	.eabi_attribute 25, 1
     39      1.1  mrg #endif /* __ARM_EABI__ */
     40      1.1  mrg /* ------------------------------------------------------------------------ */
     41      1.1  mrg 
     42      1.1  mrg /* We need to know what prefix to add to function names.  */
     43      1.1  mrg 
     44      1.1  mrg #ifndef __USER_LABEL_PREFIX__
     45      1.1  mrg #error  __USER_LABEL_PREFIX__ not defined
     46      1.1  mrg #endif
     47      1.1  mrg 
     48      1.1  mrg /* ANSI concatenation macros.  */
     49      1.1  mrg 
     50      1.1  mrg #define CONCAT1(a, b) CONCAT2(a, b)
     51      1.1  mrg #define CONCAT2(a, b) a ## b
     52      1.1  mrg 
     53      1.1  mrg /* Use the right prefix for global labels.  */
     54      1.1  mrg 
     55      1.1  mrg #define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
     56      1.1  mrg 
     57      1.1  mrg #ifdef __ELF__
     58      1.1  mrg #ifdef __thumb__
     59      1.1  mrg #define __PLT__  /* Not supported in Thumb assembler (for now).  */
     60      1.1  mrg #elif defined __vxworks && !defined __PIC__
     61      1.1  mrg #define __PLT__ /* Not supported by the kernel loader.  */
     62      1.1  mrg #else
     63      1.1  mrg #define __PLT__ (PLT)
     64      1.1  mrg #endif
     65      1.1  mrg #define TYPE(x) .type SYM(x),function
     66      1.1  mrg #define SIZE(x) .size SYM(x), . - SYM(x)
     67      1.1  mrg #define LSYM(x) .x
     68      1.1  mrg #else
     69      1.1  mrg #define __PLT__
     70      1.1  mrg #define TYPE(x)
     71      1.1  mrg #define SIZE(x)
     72      1.1  mrg #define LSYM(x) x
     73      1.1  mrg #endif
     74      1.1  mrg 
     75      1.1  mrg /* Function end macros.  Variants for interworking.  */
     76      1.1  mrg 
     77      1.1  mrg #if defined(__ARM_ARCH_2__)
     78      1.1  mrg # define __ARM_ARCH__ 2
     79      1.1  mrg #endif
     80      1.1  mrg 
     81      1.1  mrg #if defined(__ARM_ARCH_3__)
     82      1.1  mrg # define __ARM_ARCH__ 3
     83      1.1  mrg #endif
     84      1.1  mrg 
     85      1.1  mrg #if defined(__ARM_ARCH_3M__) || defined(__ARM_ARCH_4__) \
     86      1.1  mrg 	|| defined(__ARM_ARCH_4T__)
     87      1.1  mrg /* We use __ARM_ARCH__ set to 4 here, but in reality it's any processor with
     88      1.1  mrg    long multiply instructions.  That includes v3M.  */
     89      1.1  mrg # define __ARM_ARCH__ 4
     90      1.1  mrg #endif
     91      1.1  mrg 
     92      1.1  mrg #if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
     93      1.1  mrg 	|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
     94      1.1  mrg 	|| defined(__ARM_ARCH_5TEJ__)
     95      1.1  mrg # define __ARM_ARCH__ 5
     96      1.1  mrg #endif
     97      1.1  mrg 
     98      1.1  mrg #if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
     99      1.1  mrg 	|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) \
    100      1.1  mrg 	|| defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) \
    101      1.1  mrg 	|| defined(__ARM_ARCH_6M__)
    102      1.1  mrg # define __ARM_ARCH__ 6
    103      1.1  mrg #endif
    104      1.1  mrg 
    105      1.1  mrg #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
    106      1.1  mrg 	|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
    107      1.1  mrg 	|| defined(__ARM_ARCH_7EM__)
    108      1.1  mrg # define __ARM_ARCH__ 7
    109      1.1  mrg #endif
    110      1.1  mrg 
    111      1.1  mrg #if defined(__ARM_ARCH_8A__)
    112      1.1  mrg # define __ARM_ARCH__ 8
    113      1.1  mrg #endif
    114      1.1  mrg 
    115      1.1  mrg #ifndef __ARM_ARCH__
    116      1.1  mrg #error Unable to determine architecture.
    117      1.1  mrg #endif
    118      1.1  mrg 
    119      1.1  mrg /* There are times when we might prefer Thumb1 code even if ARM code is
    120      1.1  mrg    permitted, for example, the code might be smaller, or there might be
    121      1.1  mrg    interworking problems with switching to ARM state if interworking is
    122      1.1  mrg    disabled.  */
    123      1.1  mrg #if (defined(__thumb__)			\
    124      1.1  mrg      && !defined(__thumb2__)		\
    125      1.1  mrg      && (!defined(__THUMB_INTERWORK__)	\
    126      1.1  mrg 	 || defined (__OPTIMIZE_SIZE__)	\
    127      1.1  mrg 	 || defined(__ARM_ARCH_6M__)))
    128      1.1  mrg # define __prefer_thumb__
    129      1.1  mrg #endif
    130      1.1  mrg 
    131      1.1  mrg /* How to return from a function call depends on the architecture variant.  */
    132      1.1  mrg 
    133      1.1  mrg #if (__ARM_ARCH__ > 4) || defined(__ARM_ARCH_4T__)
    134      1.1  mrg 
    135      1.1  mrg # define RET		bx	lr
    136      1.1  mrg # define RETc(x)	bx##x	lr
    137      1.1  mrg 
    138      1.1  mrg /* Special precautions for interworking on armv4t.  */
    139      1.1  mrg # if (__ARM_ARCH__ == 4)
    140      1.1  mrg 
    141      1.1  mrg /* Always use bx, not ldr pc.  */
    142      1.1  mrg #  if (defined(__thumb__) || defined(__THUMB_INTERWORK__))
    143      1.1  mrg #    define __INTERWORKING__
    144      1.1  mrg #   endif /* __THUMB__ || __THUMB_INTERWORK__ */
    145      1.1  mrg 
    146      1.1  mrg /* Include thumb stub before arm mode code.  */
    147      1.1  mrg #  if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
    148      1.1  mrg #   define __INTERWORKING_STUBS__
    149      1.1  mrg #  endif /* __thumb__ && !__THUMB_INTERWORK__ */
    150      1.1  mrg 
    151      1.1  mrg #endif /* __ARM_ARCH == 4 */
    152      1.1  mrg 
    153      1.1  mrg #else
    154      1.1  mrg 
    155      1.1  mrg # define RET		mov	pc, lr
    156      1.1  mrg # define RETc(x)	mov##x	pc, lr
    157      1.1  mrg 
    158      1.1  mrg #endif
    159      1.1  mrg 
    160      1.1  mrg .macro	cfi_pop		advance, reg, cfa_offset
    161      1.1  mrg #ifdef __ELF__
    162      1.1  mrg 	.pushsection	.debug_frame
    163      1.1  mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    164      1.1  mrg 	.4byte	\advance
    165      1.1  mrg 	.byte	(0xc0 | \reg)	/* DW_CFA_restore */
    166      1.1  mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    167      1.1  mrg 	.uleb128 \cfa_offset
    168      1.1  mrg 	.popsection
    169      1.1  mrg #endif
    170      1.1  mrg .endm
    171      1.1  mrg .macro	cfi_push	advance, reg, offset, cfa_offset
    172      1.1  mrg #ifdef __ELF__
    173      1.1  mrg 	.pushsection	.debug_frame
    174      1.1  mrg 	.byte	0x4		/* DW_CFA_advance_loc4 */
    175      1.1  mrg 	.4byte	\advance
    176      1.1  mrg 	.byte	(0x80 | \reg)	/* DW_CFA_offset */
    177      1.1  mrg 	.uleb128 (\offset / -4)
    178      1.1  mrg 	.byte	0xe		/* DW_CFA_def_cfa_offset */
    179      1.1  mrg 	.uleb128 \cfa_offset
    180      1.1  mrg 	.popsection
    181      1.1  mrg #endif
    182      1.1  mrg .endm
    183      1.1  mrg .macro cfi_start	start_label, end_label
    184      1.1  mrg #ifdef __ELF__
    185      1.1  mrg 	.pushsection	.debug_frame
    186      1.1  mrg LSYM(Lstart_frame):
    187      1.1  mrg 	.4byte	LSYM(Lend_cie) - LSYM(Lstart_cie) @ Length of CIE
    188      1.1  mrg LSYM(Lstart_cie):
    189      1.1  mrg         .4byte	0xffffffff	@ CIE Identifier Tag
    190      1.1  mrg         .byte	0x1	@ CIE Version
    191      1.1  mrg         .ascii	"\0"	@ CIE Augmentation
    192      1.1  mrg         .uleb128 0x1	@ CIE Code Alignment Factor
    193      1.1  mrg         .sleb128 -4	@ CIE Data Alignment Factor
    194      1.1  mrg         .byte	0xe	@ CIE RA Column
    195      1.1  mrg         .byte	0xc	@ DW_CFA_def_cfa
    196      1.1  mrg         .uleb128 0xd
    197      1.1  mrg         .uleb128 0x0
    198      1.1  mrg 
    199      1.1  mrg 	.align 2
    200      1.1  mrg LSYM(Lend_cie):
    201      1.1  mrg 	.4byte	LSYM(Lend_fde)-LSYM(Lstart_fde)	@ FDE Length
    202      1.1  mrg LSYM(Lstart_fde):
    203      1.1  mrg 	.4byte	LSYM(Lstart_frame)	@ FDE CIE offset
    204      1.1  mrg 	.4byte	\start_label	@ FDE initial location
    205      1.1  mrg 	.4byte	\end_label-\start_label	@ FDE address range
    206      1.1  mrg 	.popsection
    207      1.1  mrg #endif
    208      1.1  mrg .endm
    209      1.1  mrg .macro cfi_end	end_label
    210      1.1  mrg #ifdef __ELF__
    211      1.1  mrg 	.pushsection	.debug_frame
    212      1.1  mrg 	.align	2
    213      1.1  mrg LSYM(Lend_fde):
    214      1.1  mrg 	.popsection
    215      1.1  mrg \end_label:
    216      1.1  mrg #endif
    217      1.1  mrg .endm
    218      1.1  mrg 
    219      1.1  mrg /* Don't pass dirn, it's there just to get token pasting right.  */
    220      1.1  mrg 
    221      1.1  mrg .macro	RETLDM	regs=, cond=, unwind=, dirn=ia
    222      1.1  mrg #if defined (__INTERWORKING__)
    223      1.1  mrg 	.ifc "\regs",""
    224      1.1  mrg 	ldr\cond	lr, [sp], #8
    225      1.1  mrg 	.else
    226      1.1  mrg # if defined(__thumb2__)
    227      1.1  mrg 	pop\cond	{\regs, lr}
    228      1.1  mrg # else
    229      1.1  mrg 	ldm\cond\dirn	sp!, {\regs, lr}
    230      1.1  mrg # endif
    231      1.1  mrg 	.endif
    232      1.1  mrg 	.ifnc "\unwind", ""
    233      1.1  mrg 	/* Mark LR as restored.  */
    234      1.1  mrg 97:	cfi_pop 97b - \unwind, 0xe, 0x0
    235      1.1  mrg 	.endif
    236      1.1  mrg 	bx\cond	lr
    237      1.1  mrg #else
    238      1.1  mrg 	/* Caller is responsible for providing IT instruction.  */
    239      1.1  mrg 	.ifc "\regs",""
    240      1.1  mrg 	ldr\cond	pc, [sp], #8
    241      1.1  mrg 	.else
    242      1.1  mrg # if defined(__thumb2__)
    243      1.1  mrg 	pop\cond	{\regs, pc}
    244      1.1  mrg # else
    245      1.1  mrg 	ldm\cond\dirn	sp!, {\regs, pc}
    246      1.1  mrg # endif
    247      1.1  mrg 	.endif
    248      1.1  mrg #endif
    249      1.1  mrg .endm
    250      1.1  mrg 
    251      1.1  mrg /* The Unified assembly syntax allows the same code to be assembled for both
    252      1.1  mrg    ARM and Thumb-2.  However this is only supported by recent gas, so define
    253      1.1  mrg    a set of macros to allow ARM code on older assemblers.  */
    254      1.1  mrg #if defined(__thumb2__)
    255      1.1  mrg .macro do_it cond, suffix=""
    256      1.1  mrg 	it\suffix	\cond
    257      1.1  mrg .endm
    258      1.1  mrg .macro shift1 op, arg0, arg1, arg2
    259      1.1  mrg 	\op	\arg0, \arg1, \arg2
    260      1.1  mrg .endm
    261      1.1  mrg #define do_push	push
    262      1.1  mrg #define do_pop	pop
    263      1.1  mrg #define COND(op1, op2, cond) op1 ## op2 ## cond
    264      1.1  mrg /* Perform an arithmetic operation with a variable shift operand.  This
    265      1.1  mrg    requires two instructions and a scratch register on Thumb-2.  */
    266      1.1  mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    267      1.1  mrg 	\shiftop \tmp, \src2, \shiftreg
    268      1.1  mrg 	\name \dest, \src1, \tmp
    269      1.1  mrg .endm
    270      1.1  mrg #else
    271      1.1  mrg .macro do_it cond, suffix=""
    272      1.1  mrg .endm
    273      1.1  mrg .macro shift1 op, arg0, arg1, arg2
    274      1.1  mrg 	mov	\arg0, \arg1, \op \arg2
    275      1.1  mrg .endm
    276      1.1  mrg #define do_push	stmfd sp!,
    277      1.1  mrg #define do_pop	ldmfd sp!,
    278      1.1  mrg #define COND(op1, op2, cond) op1 ## cond ## op2
    279      1.1  mrg .macro shiftop name, dest, src1, src2, shiftop, shiftreg, tmp
    280      1.1  mrg 	\name \dest, \src1, \src2, \shiftop \shiftreg
    281      1.1  mrg .endm
    282      1.1  mrg #endif
    283      1.1  mrg 
    284      1.1  mrg #ifdef __ARM_EABI__
    285      1.1  mrg .macro ARM_LDIV0 name signed
    286      1.1  mrg 	cmp	r0, #0
    287      1.1  mrg 	.ifc	\signed, unsigned
    288      1.1  mrg 	movne	r0, #0xffffffff
    289      1.1  mrg 	.else
    290      1.1  mrg 	movgt	r0, #0x7fffffff
    291      1.1  mrg 	movlt	r0, #0x80000000
    292      1.1  mrg 	.endif
    293      1.1  mrg 	b	SYM (__aeabi_idiv0) __PLT__
    294      1.1  mrg .endm
    295      1.1  mrg #else
    296      1.1  mrg .macro ARM_LDIV0 name signed
    297      1.1  mrg 	str	lr, [sp, #-8]!
    298      1.1  mrg 98:	cfi_push 98b - __\name, 0xe, -0x8, 0x8
    299      1.1  mrg 	bl	SYM (__div0) __PLT__
    300      1.1  mrg 	mov	r0, #0			@ About as wrong as it could be.
    301      1.1  mrg 	RETLDM	unwind=98b
    302      1.1  mrg .endm
    303      1.1  mrg #endif
    304      1.1  mrg 
    305      1.1  mrg 
    306      1.1  mrg #ifdef __ARM_EABI__
    307      1.1  mrg .macro THUMB_LDIV0 name signed
    308      1.1  mrg #if defined(__ARM_ARCH_6M__)
    309      1.1  mrg 	.ifc \signed, unsigned
    310      1.1  mrg 	cmp	r0, #0
    311      1.1  mrg 	beq	1f
    312      1.1  mrg 	mov	r0, #0
    313      1.1  mrg 	mvn	r0, r0		@ 0xffffffff
    314      1.1  mrg 1:
    315      1.1  mrg 	.else
    316      1.1  mrg 	cmp	r0, #0
    317      1.1  mrg 	beq	2f
    318      1.1  mrg 	blt	3f
    319      1.1  mrg 	mov	r0, #0
    320      1.1  mrg 	mvn	r0, r0
    321      1.1  mrg 	lsr	r0, r0, #1	@ 0x7fffffff
    322      1.1  mrg 	b	2f
    323      1.1  mrg 3:	mov	r0, #0x80
    324      1.1  mrg 	lsl	r0, r0, #24	@ 0x80000000
    325      1.1  mrg 2:
    326      1.1  mrg 	.endif
    327      1.1  mrg 	push	{r0, r1, r2}
    328      1.1  mrg 	ldr	r0, 4f
    329      1.1  mrg 	adr	r1, 4f
    330      1.1  mrg 	add	r0, r1
    331      1.1  mrg 	str	r0, [sp, #8]
    332      1.1  mrg 	@ We know we are not on armv4t, so pop pc is safe.
    333      1.1  mrg 	pop	{r0, r1, pc}
    334      1.1  mrg 	.align	2
    335      1.1  mrg 4:
    336      1.1  mrg 	.word	__aeabi_idiv0 - 4b
    337      1.1  mrg #elif defined(__thumb2__)
    338      1.1  mrg 	.syntax unified
    339      1.1  mrg 	.ifc \signed, unsigned
    340      1.1  mrg 	cbz	r0, 1f
    341      1.1  mrg 	mov	r0, #0xffffffff
    342      1.1  mrg 1:
    343      1.1  mrg 	.else
    344      1.1  mrg 	cmp	r0, #0
    345      1.1  mrg 	do_it	gt
    346      1.1  mrg 	movgt	r0, #0x7fffffff
    347      1.1  mrg 	do_it	lt
    348      1.1  mrg 	movlt	r0, #0x80000000
    349      1.1  mrg 	.endif
    350      1.1  mrg 	b.w	SYM(__aeabi_idiv0) __PLT__
    351      1.1  mrg #else
    352      1.1  mrg 	.align	2
    353      1.1  mrg 	bx	pc
    354      1.1  mrg 	nop
    355      1.1  mrg 	.arm
    356      1.1  mrg 	cmp	r0, #0
    357      1.1  mrg 	.ifc	\signed, unsigned
    358      1.1  mrg 	movne	r0, #0xffffffff
    359      1.1  mrg 	.else
    360      1.1  mrg 	movgt	r0, #0x7fffffff
    361      1.1  mrg 	movlt	r0, #0x80000000
    362      1.1  mrg 	.endif
    363      1.1  mrg 	b	SYM(__aeabi_idiv0) __PLT__
    364      1.1  mrg 	.thumb
    365      1.1  mrg #endif
    366      1.1  mrg .endm
    367      1.1  mrg #else
    368      1.1  mrg .macro THUMB_LDIV0 name signed
    369      1.1  mrg 	push	{ r1, lr }
    370      1.1  mrg 98:	cfi_push 98b - __\name, 0xe, -0x4, 0x8
    371      1.1  mrg 	bl	SYM (__div0)
    372      1.1  mrg 	mov	r0, #0			@ About as wrong as it could be.
    373      1.1  mrg #if defined (__INTERWORKING__)
    374      1.1  mrg 	pop	{ r1, r2 }
    375      1.1  mrg 	bx	r2
    376      1.1  mrg #else
    377      1.1  mrg 	pop	{ r1, pc }
    378      1.1  mrg #endif
    379      1.1  mrg .endm
    380      1.1  mrg #endif
    381      1.1  mrg 
    382      1.1  mrg .macro FUNC_END name
    383      1.1  mrg 	SIZE (__\name)
    384      1.1  mrg .endm
    385      1.1  mrg 
    386      1.1  mrg .macro DIV_FUNC_END name signed
    387      1.1  mrg 	cfi_start	__\name, LSYM(Lend_div0)
    388      1.1  mrg LSYM(Ldiv0):
    389      1.1  mrg #ifdef __thumb__
    390      1.1  mrg 	THUMB_LDIV0 \name \signed
    391      1.1  mrg #else
    392      1.1  mrg 	ARM_LDIV0 \name \signed
    393      1.1  mrg #endif
    394      1.1  mrg 	cfi_end	LSYM(Lend_div0)
    395      1.1  mrg 	FUNC_END \name
    396      1.1  mrg .endm
    397      1.1  mrg 
    398      1.1  mrg .macro THUMB_FUNC_START name
    399      1.1  mrg 	.globl	SYM (\name)
    400      1.1  mrg 	TYPE	(\name)
    401      1.1  mrg 	.thumb_func
    402      1.1  mrg SYM (\name):
    403      1.1  mrg .endm
    404      1.1  mrg 
    405      1.1  mrg /* Function start macros.  Variants for ARM and Thumb.  */
    406      1.1  mrg 
    407      1.1  mrg #ifdef __thumb__
    408      1.1  mrg #define THUMB_FUNC .thumb_func
    409      1.1  mrg #define THUMB_CODE .force_thumb
    410      1.1  mrg # if defined(__thumb2__)
    411      1.1  mrg #define THUMB_SYNTAX .syntax divided
    412      1.1  mrg # else
    413      1.1  mrg #define THUMB_SYNTAX
    414      1.1  mrg # endif
    415      1.1  mrg #else
    416      1.1  mrg #define THUMB_FUNC
    417      1.1  mrg #define THUMB_CODE
    418      1.1  mrg #define THUMB_SYNTAX
    419      1.1  mrg #endif
    420      1.1  mrg 
    421  1.1.1.2  mrg .macro FUNC_START name sp_section=
    422  1.1.1.2  mrg   .ifc \sp_section, function_section
    423  1.1.1.2  mrg 	.section	.text.__\name,"ax",%progbits
    424  1.1.1.2  mrg   .else
    425      1.1  mrg 	.text
    426  1.1.1.2  mrg   .endif
    427      1.1  mrg 	.globl SYM (__\name)
    428      1.1  mrg 	TYPE (__\name)
    429      1.1  mrg 	.align 0
    430      1.1  mrg 	THUMB_CODE
    431      1.1  mrg 	THUMB_FUNC
    432      1.1  mrg 	THUMB_SYNTAX
    433      1.1  mrg SYM (__\name):
    434      1.1  mrg .endm
    435      1.1  mrg 
    436  1.1.1.2  mrg .macro ARM_SYM_START name
    437  1.1.1.2  mrg        TYPE (\name)
    438  1.1.1.2  mrg        .align 0
    439  1.1.1.2  mrg SYM (\name):
    440  1.1.1.2  mrg .endm
    441  1.1.1.2  mrg 
    442  1.1.1.2  mrg .macro SYM_END name
    443  1.1.1.2  mrg        SIZE (\name)
    444  1.1.1.2  mrg .endm
    445  1.1.1.2  mrg 
    446      1.1  mrg /* Special function that will always be coded in ARM assembly, even if
    447      1.1  mrg    in Thumb-only compilation.  */
    448      1.1  mrg 
    449      1.1  mrg #if defined(__thumb2__)
    450      1.1  mrg 
    451      1.1  mrg /* For Thumb-2 we build everything in thumb mode.  */
    452  1.1.1.2  mrg .macro ARM_FUNC_START name sp_section=
    453  1.1.1.2  mrg        FUNC_START \name \sp_section
    454      1.1  mrg        .syntax unified
    455      1.1  mrg .endm
    456      1.1  mrg #define EQUIV .thumb_set
    457      1.1  mrg .macro  ARM_CALL name
    458      1.1  mrg 	bl	__\name
    459      1.1  mrg .endm
    460      1.1  mrg 
    461      1.1  mrg #elif defined(__INTERWORKING_STUBS__)
    462      1.1  mrg 
    463      1.1  mrg .macro	ARM_FUNC_START name
    464      1.1  mrg 	FUNC_START \name
    465      1.1  mrg 	bx	pc
    466      1.1  mrg 	nop
    467      1.1  mrg 	.arm
    468      1.1  mrg /* A hook to tell gdb that we've switched to ARM mode.  Also used to call
    469      1.1  mrg    directly from other local arm routines.  */
    470      1.1  mrg _L__\name:
    471      1.1  mrg .endm
    472      1.1  mrg #define EQUIV .thumb_set
    473      1.1  mrg /* Branch directly to a function declared with ARM_FUNC_START.
    474      1.1  mrg    Must be called in arm mode.  */
    475      1.1  mrg .macro  ARM_CALL name
    476      1.1  mrg 	bl	_L__\name
    477      1.1  mrg .endm
    478      1.1  mrg 
    479      1.1  mrg #else /* !(__INTERWORKING_STUBS__ || __thumb2__) */
    480      1.1  mrg 
    481      1.1  mrg #ifdef __ARM_ARCH_6M__
    482      1.1  mrg #define EQUIV .thumb_set
    483      1.1  mrg #else
    484  1.1.1.2  mrg .macro	ARM_FUNC_START name sp_section=
    485  1.1.1.2  mrg   .ifc \sp_section, function_section
    486  1.1.1.2  mrg 	.section	.text.__\name,"ax",%progbits
    487  1.1.1.2  mrg   .else
    488      1.1  mrg 	.text
    489  1.1.1.2  mrg   .endif
    490      1.1  mrg 	.globl SYM (__\name)
    491      1.1  mrg 	TYPE (__\name)
    492      1.1  mrg 	.align 0
    493      1.1  mrg 	.arm
    494      1.1  mrg SYM (__\name):
    495      1.1  mrg .endm
    496      1.1  mrg #define EQUIV .set
    497      1.1  mrg .macro  ARM_CALL name
    498      1.1  mrg 	bl	__\name
    499      1.1  mrg .endm
    500      1.1  mrg #endif
    501      1.1  mrg 
    502      1.1  mrg #endif
    503      1.1  mrg 
    504      1.1  mrg .macro	FUNC_ALIAS new old
    505      1.1  mrg 	.globl	SYM (__\new)
    506      1.1  mrg #if defined (__thumb__)
    507      1.1  mrg 	.thumb_set	SYM (__\new), SYM (__\old)
    508      1.1  mrg #else
    509      1.1  mrg 	.set	SYM (__\new), SYM (__\old)
    510      1.1  mrg #endif
    511      1.1  mrg .endm
    512      1.1  mrg 
    513      1.1  mrg #ifndef __ARM_ARCH_6M__
    514      1.1  mrg .macro	ARM_FUNC_ALIAS new old
    515      1.1  mrg 	.globl	SYM (__\new)
    516      1.1  mrg 	EQUIV	SYM (__\new), SYM (__\old)
    517      1.1  mrg #if defined(__INTERWORKING_STUBS__)
    518      1.1  mrg 	.set	SYM (_L__\new), SYM (_L__\old)
    519      1.1  mrg #endif
    520      1.1  mrg .endm
    521      1.1  mrg #endif
    522      1.1  mrg 
    523      1.1  mrg #ifdef __ARMEB__
    524      1.1  mrg #define xxh r0
    525      1.1  mrg #define xxl r1
    526      1.1  mrg #define yyh r2
    527      1.1  mrg #define yyl r3
    528      1.1  mrg #else
    529      1.1  mrg #define xxh r1
    530      1.1  mrg #define xxl r0
    531      1.1  mrg #define yyh r3
    532      1.1  mrg #define yyl r2
    533      1.1  mrg #endif
    534      1.1  mrg 
    535      1.1  mrg #ifdef __ARM_EABI__
    536      1.1  mrg .macro	WEAK name
    537      1.1  mrg 	.weak SYM (__\name)
    538      1.1  mrg .endm
    539      1.1  mrg #endif
    540      1.1  mrg 
    541      1.1  mrg #ifdef __thumb__
    542      1.1  mrg /* Register aliases.  */
    543      1.1  mrg 
    544      1.1  mrg work		.req	r4	@ XXXX is this safe ?
    545      1.1  mrg dividend	.req	r0
    546      1.1  mrg divisor		.req	r1
    547      1.1  mrg overdone	.req	r2
    548      1.1  mrg result		.req	r2
    549      1.1  mrg curbit		.req	r3
    550      1.1  mrg #endif
    551      1.1  mrg #if 0
    552      1.1  mrg ip		.req	r12
    553      1.1  mrg sp		.req	r13
    554      1.1  mrg lr		.req	r14
    555      1.1  mrg pc		.req	r15
    556      1.1  mrg #endif
    557      1.1  mrg 
    558      1.1  mrg /* ------------------------------------------------------------------------ */
    559      1.1  mrg /*		Bodies of the division and modulo routines.		    */
    560      1.1  mrg /* ------------------------------------------------------------------------ */
    561      1.1  mrg .macro ARM_DIV_BODY dividend, divisor, result, curbit
    562      1.1  mrg 
    563      1.1  mrg #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
    564      1.1  mrg 
    565      1.1  mrg #if defined (__thumb2__)
    566      1.1  mrg 	clz	\curbit, \dividend
    567      1.1  mrg 	clz	\result, \divisor
    568      1.1  mrg 	sub	\curbit, \result, \curbit
    569      1.1  mrg 	rsb	\curbit, \curbit, #31
    570      1.1  mrg 	adr	\result, 1f
    571      1.1  mrg 	add	\curbit, \result, \curbit, lsl #4
    572      1.1  mrg 	mov	\result, #0
    573      1.1  mrg 	mov	pc, \curbit
    574      1.1  mrg .p2align 3
    575      1.1  mrg 1:
    576      1.1  mrg 	.set	shift, 32
    577      1.1  mrg 	.rept	32
    578      1.1  mrg 	.set	shift, shift - 1
    579      1.1  mrg 	cmp.w	\dividend, \divisor, lsl #shift
    580      1.1  mrg 	nop.n
    581      1.1  mrg 	adc.w	\result, \result, \result
    582      1.1  mrg 	it	cs
    583      1.1  mrg 	subcs.w	\dividend, \dividend, \divisor, lsl #shift
    584      1.1  mrg 	.endr
    585      1.1  mrg #else
    586      1.1  mrg 	clz	\curbit, \dividend
    587      1.1  mrg 	clz	\result, \divisor
    588      1.1  mrg 	sub	\curbit, \result, \curbit
    589      1.1  mrg 	rsbs	\curbit, \curbit, #31
    590      1.1  mrg 	addne	\curbit, \curbit, \curbit, lsl #1
    591      1.1  mrg 	mov	\result, #0
    592      1.1  mrg 	addne	pc, pc, \curbit, lsl #2
    593      1.1  mrg 	nop
    594      1.1  mrg 	.set	shift, 32
    595      1.1  mrg 	.rept	32
    596      1.1  mrg 	.set	shift, shift - 1
    597      1.1  mrg 	cmp	\dividend, \divisor, lsl #shift
    598      1.1  mrg 	adc	\result, \result, \result
    599      1.1  mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    600      1.1  mrg 	.endr
    601      1.1  mrg #endif
    602      1.1  mrg 
    603      1.1  mrg #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    604      1.1  mrg #if __ARM_ARCH__ >= 5
    605      1.1  mrg 
    606      1.1  mrg 	clz	\curbit, \divisor
    607      1.1  mrg 	clz	\result, \dividend
    608      1.1  mrg 	sub	\result, \curbit, \result
    609      1.1  mrg 	mov	\curbit, #1
    610      1.1  mrg 	mov	\divisor, \divisor, lsl \result
    611      1.1  mrg 	mov	\curbit, \curbit, lsl \result
    612      1.1  mrg 	mov	\result, #0
    613      1.1  mrg 
    614      1.1  mrg #else /* __ARM_ARCH__ < 5 */
    615      1.1  mrg 
    616      1.1  mrg 	@ Initially shift the divisor left 3 bits if possible,
    617      1.1  mrg 	@ set curbit accordingly.  This allows for curbit to be located
    618      1.1  mrg 	@ at the left end of each 4-bit nibbles in the division loop
    619      1.1  mrg 	@ to save one loop in most cases.
    620      1.1  mrg 	tst	\divisor, #0xe0000000
    621      1.1  mrg 	moveq	\divisor, \divisor, lsl #3
    622      1.1  mrg 	moveq	\curbit, #8
    623      1.1  mrg 	movne	\curbit, #1
    624      1.1  mrg 
    625      1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    626      1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    627      1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    628      1.1  mrg 	@ larger than the dividend.
    629      1.1  mrg 1:	cmp	\divisor, #0x10000000
    630      1.1  mrg 	cmplo	\divisor, \dividend
    631      1.1  mrg 	movlo	\divisor, \divisor, lsl #4
    632      1.1  mrg 	movlo	\curbit, \curbit, lsl #4
    633      1.1  mrg 	blo	1b
    634      1.1  mrg 
    635      1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    636      1.1  mrg 	@ we will be in danger of overflowing.
    637      1.1  mrg 1:	cmp	\divisor, #0x80000000
    638      1.1  mrg 	cmplo	\divisor, \dividend
    639      1.1  mrg 	movlo	\divisor, \divisor, lsl #1
    640      1.1  mrg 	movlo	\curbit, \curbit, lsl #1
    641      1.1  mrg 	blo	1b
    642      1.1  mrg 
    643      1.1  mrg 	mov	\result, #0
    644      1.1  mrg 
    645      1.1  mrg #endif /* __ARM_ARCH__ < 5 */
    646      1.1  mrg 
    647      1.1  mrg 	@ Division loop
    648      1.1  mrg 1:	cmp	\dividend, \divisor
    649      1.1  mrg 	do_it	hs, t
    650      1.1  mrg 	subhs	\dividend, \dividend, \divisor
    651      1.1  mrg 	orrhs	\result,   \result,   \curbit
    652      1.1  mrg 	cmp	\dividend, \divisor,  lsr #1
    653      1.1  mrg 	do_it	hs, t
    654      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    655      1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #1
    656      1.1  mrg 	cmp	\dividend, \divisor,  lsr #2
    657      1.1  mrg 	do_it	hs, t
    658      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    659      1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #2
    660      1.1  mrg 	cmp	\dividend, \divisor,  lsr #3
    661      1.1  mrg 	do_it	hs, t
    662      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    663      1.1  mrg 	orrhs	\result,   \result,   \curbit,  lsr #3
    664      1.1  mrg 	cmp	\dividend, #0			@ Early termination?
    665      1.1  mrg 	do_it	ne, t
    666      1.1  mrg 	movnes	\curbit,   \curbit,  lsr #4	@ No, any more bits to do?
    667      1.1  mrg 	movne	\divisor,  \divisor, lsr #4
    668      1.1  mrg 	bne	1b
    669      1.1  mrg 
    670      1.1  mrg #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    671      1.1  mrg 
    672      1.1  mrg .endm
    673      1.1  mrg /* ------------------------------------------------------------------------ */
    674      1.1  mrg .macro ARM_DIV2_ORDER divisor, order
    675      1.1  mrg 
    676      1.1  mrg #if __ARM_ARCH__ >= 5
    677      1.1  mrg 
    678      1.1  mrg 	clz	\order, \divisor
    679      1.1  mrg 	rsb	\order, \order, #31
    680      1.1  mrg 
    681      1.1  mrg #else
    682      1.1  mrg 
    683      1.1  mrg 	cmp	\divisor, #(1 << 16)
    684      1.1  mrg 	movhs	\divisor, \divisor, lsr #16
    685      1.1  mrg 	movhs	\order, #16
    686      1.1  mrg 	movlo	\order, #0
    687      1.1  mrg 
    688      1.1  mrg 	cmp	\divisor, #(1 << 8)
    689      1.1  mrg 	movhs	\divisor, \divisor, lsr #8
    690      1.1  mrg 	addhs	\order, \order, #8
    691      1.1  mrg 
    692      1.1  mrg 	cmp	\divisor, #(1 << 4)
    693      1.1  mrg 	movhs	\divisor, \divisor, lsr #4
    694      1.1  mrg 	addhs	\order, \order, #4
    695      1.1  mrg 
    696      1.1  mrg 	cmp	\divisor, #(1 << 2)
    697      1.1  mrg 	addhi	\order, \order, #3
    698      1.1  mrg 	addls	\order, \order, \divisor, lsr #1
    699      1.1  mrg 
    700      1.1  mrg #endif
    701      1.1  mrg 
    702      1.1  mrg .endm
    703      1.1  mrg /* ------------------------------------------------------------------------ */
    704      1.1  mrg .macro ARM_MOD_BODY dividend, divisor, order, spare
    705      1.1  mrg 
    706      1.1  mrg #if __ARM_ARCH__ >= 5 && ! defined (__OPTIMIZE_SIZE__)
    707      1.1  mrg 
    708      1.1  mrg 	clz	\order, \divisor
    709      1.1  mrg 	clz	\spare, \dividend
    710      1.1  mrg 	sub	\order, \order, \spare
    711      1.1  mrg 	rsbs	\order, \order, #31
    712      1.1  mrg 	addne	pc, pc, \order, lsl #3
    713      1.1  mrg 	nop
    714      1.1  mrg 	.set	shift, 32
    715      1.1  mrg 	.rept	32
    716      1.1  mrg 	.set	shift, shift - 1
    717      1.1  mrg 	cmp	\dividend, \divisor, lsl #shift
    718      1.1  mrg 	subcs	\dividend, \dividend, \divisor, lsl #shift
    719      1.1  mrg 	.endr
    720      1.1  mrg 
    721      1.1  mrg #else /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    722      1.1  mrg #if __ARM_ARCH__ >= 5
    723      1.1  mrg 
    724      1.1  mrg 	clz	\order, \divisor
    725      1.1  mrg 	clz	\spare, \dividend
    726      1.1  mrg 	sub	\order, \order, \spare
    727      1.1  mrg 	mov	\divisor, \divisor, lsl \order
    728      1.1  mrg 
    729      1.1  mrg #else /* __ARM_ARCH__ < 5 */
    730      1.1  mrg 
    731      1.1  mrg 	mov	\order, #0
    732      1.1  mrg 
    733      1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    734      1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    735      1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    736      1.1  mrg 	@ larger than the dividend.
    737      1.1  mrg 1:	cmp	\divisor, #0x10000000
    738      1.1  mrg 	cmplo	\divisor, \dividend
    739      1.1  mrg 	movlo	\divisor, \divisor, lsl #4
    740      1.1  mrg 	addlo	\order, \order, #4
    741      1.1  mrg 	blo	1b
    742      1.1  mrg 
    743      1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    744      1.1  mrg 	@ we will be in danger of overflowing.
    745      1.1  mrg 1:	cmp	\divisor, #0x80000000
    746      1.1  mrg 	cmplo	\divisor, \dividend
    747      1.1  mrg 	movlo	\divisor, \divisor, lsl #1
    748      1.1  mrg 	addlo	\order, \order, #1
    749      1.1  mrg 	blo	1b
    750      1.1  mrg 
    751      1.1  mrg #endif /* __ARM_ARCH__ < 5 */
    752      1.1  mrg 
    753      1.1  mrg 	@ Perform all needed substractions to keep only the reminder.
    754      1.1  mrg 	@ Do comparisons in batch of 4 first.
    755      1.1  mrg 	subs	\order, \order, #3		@ yes, 3 is intended here
    756      1.1  mrg 	blt	2f
    757      1.1  mrg 
    758      1.1  mrg 1:	cmp	\dividend, \divisor
    759      1.1  mrg 	subhs	\dividend, \dividend, \divisor
    760      1.1  mrg 	cmp	\dividend, \divisor,  lsr #1
    761      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #1
    762      1.1  mrg 	cmp	\dividend, \divisor,  lsr #2
    763      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #2
    764      1.1  mrg 	cmp	\dividend, \divisor,  lsr #3
    765      1.1  mrg 	subhs	\dividend, \dividend, \divisor, lsr #3
    766      1.1  mrg 	cmp	\dividend, #1
    767      1.1  mrg 	mov	\divisor, \divisor, lsr #4
    768      1.1  mrg 	subges	\order, \order, #4
    769      1.1  mrg 	bge	1b
    770      1.1  mrg 
    771      1.1  mrg 	tst	\order, #3
    772      1.1  mrg 	teqne	\dividend, #0
    773      1.1  mrg 	beq	5f
    774      1.1  mrg 
    775      1.1  mrg 	@ Either 1, 2 or 3 comparison/substractions are left.
    776      1.1  mrg 2:	cmn	\order, #2
    777      1.1  mrg 	blt	4f
    778      1.1  mrg 	beq	3f
    779      1.1  mrg 	cmp	\dividend, \divisor
    780      1.1  mrg 	subhs	\dividend, \dividend, \divisor
    781      1.1  mrg 	mov	\divisor,  \divisor,  lsr #1
    782      1.1  mrg 3:	cmp	\dividend, \divisor
    783      1.1  mrg 	subhs	\dividend, \dividend, \divisor
    784      1.1  mrg 	mov	\divisor,  \divisor,  lsr #1
    785      1.1  mrg 4:	cmp	\dividend, \divisor
    786      1.1  mrg 	subhs	\dividend, \dividend, \divisor
    787      1.1  mrg 5:
    788      1.1  mrg 
    789      1.1  mrg #endif /* __ARM_ARCH__ < 5 || defined (__OPTIMIZE_SIZE__) */
    790      1.1  mrg 
    791      1.1  mrg .endm
    792      1.1  mrg /* ------------------------------------------------------------------------ */
    793      1.1  mrg .macro THUMB_DIV_MOD_BODY modulo
    794      1.1  mrg 	@ Load the constant 0x10000000 into our work register.
    795      1.1  mrg 	mov	work, #1
    796      1.1  mrg 	lsl	work, #28
    797      1.1  mrg LSYM(Loop1):
    798      1.1  mrg 	@ Unless the divisor is very big, shift it up in multiples of
    799      1.1  mrg 	@ four bits, since this is the amount of unwinding in the main
    800      1.1  mrg 	@ division loop.  Continue shifting until the divisor is
    801      1.1  mrg 	@ larger than the dividend.
    802      1.1  mrg 	cmp	divisor, work
    803      1.1  mrg 	bhs	LSYM(Lbignum)
    804      1.1  mrg 	cmp	divisor, dividend
    805      1.1  mrg 	bhs	LSYM(Lbignum)
    806      1.1  mrg 	lsl	divisor, #4
    807      1.1  mrg 	lsl	curbit,  #4
    808      1.1  mrg 	b	LSYM(Loop1)
    809      1.1  mrg LSYM(Lbignum):
    810      1.1  mrg 	@ Set work to 0x80000000
    811      1.1  mrg 	lsl	work, #3
    812      1.1  mrg LSYM(Loop2):
    813      1.1  mrg 	@ For very big divisors, we must shift it a bit at a time, or
    814      1.1  mrg 	@ we will be in danger of overflowing.
    815      1.1  mrg 	cmp	divisor, work
    816      1.1  mrg 	bhs	LSYM(Loop3)
    817      1.1  mrg 	cmp	divisor, dividend
    818      1.1  mrg 	bhs	LSYM(Loop3)
    819      1.1  mrg 	lsl	divisor, #1
    820      1.1  mrg 	lsl	curbit,  #1
    821      1.1  mrg 	b	LSYM(Loop2)
    822      1.1  mrg LSYM(Loop3):
    823      1.1  mrg 	@ Test for possible subtractions ...
    824      1.1  mrg   .if \modulo
    825      1.1  mrg 	@ ... On the final pass, this may subtract too much from the dividend,
    826      1.1  mrg 	@ so keep track of which subtractions are done, we can fix them up
    827      1.1  mrg 	@ afterwards.
    828      1.1  mrg 	mov	overdone, #0
    829      1.1  mrg 	cmp	dividend, divisor
    830      1.1  mrg 	blo	LSYM(Lover1)
    831      1.1  mrg 	sub	dividend, dividend, divisor
    832      1.1  mrg LSYM(Lover1):
    833      1.1  mrg 	lsr	work, divisor, #1
    834      1.1  mrg 	cmp	dividend, work
    835      1.1  mrg 	blo	LSYM(Lover2)
    836      1.1  mrg 	sub	dividend, dividend, work
    837      1.1  mrg 	mov	ip, curbit
    838      1.1  mrg 	mov	work, #1
    839      1.1  mrg 	ror	curbit, work
    840      1.1  mrg 	orr	overdone, curbit
    841      1.1  mrg 	mov	curbit, ip
    842      1.1  mrg LSYM(Lover2):
    843      1.1  mrg 	lsr	work, divisor, #2
    844      1.1  mrg 	cmp	dividend, work
    845      1.1  mrg 	blo	LSYM(Lover3)
    846      1.1  mrg 	sub	dividend, dividend, work
    847      1.1  mrg 	mov	ip, curbit
    848      1.1  mrg 	mov	work, #2
    849      1.1  mrg 	ror	curbit, work
    850      1.1  mrg 	orr	overdone, curbit
    851      1.1  mrg 	mov	curbit, ip
    852      1.1  mrg LSYM(Lover3):
    853      1.1  mrg 	lsr	work, divisor, #3
    854      1.1  mrg 	cmp	dividend, work
    855      1.1  mrg 	blo	LSYM(Lover4)
    856      1.1  mrg 	sub	dividend, dividend, work
    857      1.1  mrg 	mov	ip, curbit
    858      1.1  mrg 	mov	work, #3
    859      1.1  mrg 	ror	curbit, work
    860      1.1  mrg 	orr	overdone, curbit
    861      1.1  mrg 	mov	curbit, ip
    862      1.1  mrg LSYM(Lover4):
    863      1.1  mrg 	mov	ip, curbit
    864      1.1  mrg   .else
    865      1.1  mrg 	@ ... and note which bits are done in the result.  On the final pass,
    866      1.1  mrg 	@ this may subtract too much from the dividend, but the result will be ok,
    867      1.1  mrg 	@ since the "bit" will have been shifted out at the bottom.
    868      1.1  mrg 	cmp	dividend, divisor
    869      1.1  mrg 	blo	LSYM(Lover1)
    870      1.1  mrg 	sub	dividend, dividend, divisor
    871      1.1  mrg 	orr	result, result, curbit
    872      1.1  mrg LSYM(Lover1):
    873      1.1  mrg 	lsr	work, divisor, #1
    874      1.1  mrg 	cmp	dividend, work
    875      1.1  mrg 	blo	LSYM(Lover2)
    876      1.1  mrg 	sub	dividend, dividend, work
    877      1.1  mrg 	lsr	work, curbit, #1
    878      1.1  mrg 	orr	result, work
    879      1.1  mrg LSYM(Lover2):
    880      1.1  mrg 	lsr	work, divisor, #2
    881      1.1  mrg 	cmp	dividend, work
    882      1.1  mrg 	blo	LSYM(Lover3)
    883      1.1  mrg 	sub	dividend, dividend, work
    884      1.1  mrg 	lsr	work, curbit, #2
    885      1.1  mrg 	orr	result, work
    886      1.1  mrg LSYM(Lover3):
    887      1.1  mrg 	lsr	work, divisor, #3
    888      1.1  mrg 	cmp	dividend, work
    889      1.1  mrg 	blo	LSYM(Lover4)
    890      1.1  mrg 	sub	dividend, dividend, work
    891      1.1  mrg 	lsr	work, curbit, #3
    892      1.1  mrg 	orr	result, work
    893      1.1  mrg LSYM(Lover4):
    894      1.1  mrg   .endif
    895      1.1  mrg 
    896      1.1  mrg 	cmp	dividend, #0			@ Early termination?
    897      1.1  mrg 	beq	LSYM(Lover5)
    898      1.1  mrg 	lsr	curbit,  #4			@ No, any more bits to do?
    899      1.1  mrg 	beq	LSYM(Lover5)
    900      1.1  mrg 	lsr	divisor, #4
    901      1.1  mrg 	b	LSYM(Loop3)
    902      1.1  mrg LSYM(Lover5):
    903      1.1  mrg   .if \modulo
    904      1.1  mrg 	@ Any subtractions that we should not have done will be recorded in
    905      1.1  mrg 	@ the top three bits of "overdone".  Exactly which were not needed
    906      1.1  mrg 	@ are governed by the position of the bit, stored in ip.
    907      1.1  mrg 	mov	work, #0xe
    908      1.1  mrg 	lsl	work, #28
    909      1.1  mrg 	and	overdone, work
    910      1.1  mrg 	beq	LSYM(Lgot_result)
    911      1.1  mrg 
    912      1.1  mrg 	@ If we terminated early, because dividend became zero, then the
    913      1.1  mrg 	@ bit in ip will not be in the bottom nibble, and we should not
    914      1.1  mrg 	@ perform the additions below.  We must test for this though
    915      1.1  mrg 	@ (rather relying upon the TSTs to prevent the additions) since
    916      1.1  mrg 	@ the bit in ip could be in the top two bits which might then match
    917      1.1  mrg 	@ with one of the smaller RORs.
    918      1.1  mrg 	mov	curbit, ip
    919      1.1  mrg 	mov	work, #0x7
    920      1.1  mrg 	tst	curbit, work
    921      1.1  mrg 	beq	LSYM(Lgot_result)
    922      1.1  mrg 
    923      1.1  mrg 	mov	curbit, ip
    924      1.1  mrg 	mov	work, #3
    925      1.1  mrg 	ror	curbit, work
    926      1.1  mrg 	tst	overdone, curbit
    927      1.1  mrg 	beq	LSYM(Lover6)
    928      1.1  mrg 	lsr	work, divisor, #3
    929      1.1  mrg 	add	dividend, work
    930      1.1  mrg LSYM(Lover6):
    931      1.1  mrg 	mov	curbit, ip
    932      1.1  mrg 	mov	work, #2
    933      1.1  mrg 	ror	curbit, work
    934      1.1  mrg 	tst	overdone, curbit
    935      1.1  mrg 	beq	LSYM(Lover7)
    936      1.1  mrg 	lsr	work, divisor, #2
    937      1.1  mrg 	add	dividend, work
    938      1.1  mrg LSYM(Lover7):
    939      1.1  mrg 	mov	curbit, ip
    940      1.1  mrg 	mov	work, #1
    941      1.1  mrg 	ror	curbit, work
    942      1.1  mrg 	tst	overdone, curbit
    943      1.1  mrg 	beq	LSYM(Lgot_result)
    944      1.1  mrg 	lsr	work, divisor, #1
    945      1.1  mrg 	add	dividend, work
    946      1.1  mrg   .endif
    947      1.1  mrg LSYM(Lgot_result):
    948      1.1  mrg .endm
    949      1.1  mrg /* ------------------------------------------------------------------------ */
    950      1.1  mrg /*		Start of the Real Functions				    */
    951      1.1  mrg /* ------------------------------------------------------------------------ */
    952      1.1  mrg #ifdef L_udivsi3
    953      1.1  mrg 
    954      1.1  mrg #if defined(__prefer_thumb__)
    955      1.1  mrg 
    956      1.1  mrg 	FUNC_START udivsi3
    957      1.1  mrg 	FUNC_ALIAS aeabi_uidiv udivsi3
    958      1.1  mrg 
    959      1.1  mrg 	cmp	divisor, #0
    960      1.1  mrg 	beq	LSYM(Ldiv0)
    961      1.1  mrg LSYM(udivsi3_skip_div0_test):
    962      1.1  mrg 	mov	curbit, #1
    963      1.1  mrg 	mov	result, #0
    964      1.1  mrg 
    965      1.1  mrg 	push	{ work }
    966      1.1  mrg 	cmp	dividend, divisor
    967      1.1  mrg 	blo	LSYM(Lgot_result)
    968      1.1  mrg 
    969      1.1  mrg 	THUMB_DIV_MOD_BODY 0
    970      1.1  mrg 
    971      1.1  mrg 	mov	r0, result
    972      1.1  mrg 	pop	{ work }
    973      1.1  mrg 	RET
    974      1.1  mrg 
    975      1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
    976      1.1  mrg 
    977      1.1  mrg 	ARM_FUNC_START udivsi3
    978      1.1  mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
    979      1.1  mrg 
    980      1.1  mrg 	cmp	r1, #0
    981      1.1  mrg 	beq	LSYM(Ldiv0)
    982      1.1  mrg 
    983      1.1  mrg 	udiv	r0, r0, r1
    984      1.1  mrg 	RET
    985      1.1  mrg 
    986      1.1  mrg #else /* ARM version/Thumb-2.  */
    987      1.1  mrg 
    988      1.1  mrg 	ARM_FUNC_START udivsi3
    989      1.1  mrg 	ARM_FUNC_ALIAS aeabi_uidiv udivsi3
    990      1.1  mrg 
    991      1.1  mrg 	/* Note: if called via udivsi3_skip_div0_test, this will unnecessarily
    992      1.1  mrg 	   check for division-by-zero a second time.  */
    993      1.1  mrg LSYM(udivsi3_skip_div0_test):
    994      1.1  mrg 	subs	r2, r1, #1
    995      1.1  mrg 	do_it	eq
    996      1.1  mrg 	RETc(eq)
    997      1.1  mrg 	bcc	LSYM(Ldiv0)
    998      1.1  mrg 	cmp	r0, r1
    999      1.1  mrg 	bls	11f
   1000      1.1  mrg 	tst	r1, r2
   1001      1.1  mrg 	beq	12f
   1002      1.1  mrg 
   1003      1.1  mrg 	ARM_DIV_BODY r0, r1, r2, r3
   1004      1.1  mrg 
   1005      1.1  mrg 	mov	r0, r2
   1006      1.1  mrg 	RET
   1007      1.1  mrg 
   1008      1.1  mrg 11:	do_it	eq, e
   1009      1.1  mrg 	moveq	r0, #1
   1010      1.1  mrg 	movne	r0, #0
   1011      1.1  mrg 	RET
   1012      1.1  mrg 
   1013      1.1  mrg 12:	ARM_DIV2_ORDER r1, r2
   1014      1.1  mrg 
   1015      1.1  mrg 	mov	r0, r0, lsr r2
   1016      1.1  mrg 	RET
   1017      1.1  mrg 
   1018      1.1  mrg #endif /* ARM version */
   1019      1.1  mrg 
   1020      1.1  mrg 	DIV_FUNC_END udivsi3 unsigned
   1021      1.1  mrg 
   1022      1.1  mrg #if defined(__prefer_thumb__)
   1023      1.1  mrg FUNC_START aeabi_uidivmod
   1024      1.1  mrg 	cmp	r1, #0
   1025      1.1  mrg 	beq	LSYM(Ldiv0)
   1026      1.1  mrg 	push	{r0, r1, lr}
   1027      1.1  mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1028      1.1  mrg 	POP	{r1, r2, r3}
   1029      1.1  mrg 	mul	r2, r0
   1030      1.1  mrg 	sub	r1, r1, r2
   1031      1.1  mrg 	bx	r3
   1032      1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1033      1.1  mrg ARM_FUNC_START aeabi_uidivmod
   1034      1.1  mrg 	cmp	r1, #0
   1035      1.1  mrg 	beq	LSYM(Ldiv0)
   1036      1.1  mrg 	mov     r2, r0
   1037      1.1  mrg 	udiv	r0, r0, r1
   1038      1.1  mrg 	mls     r1, r0, r1, r2
   1039      1.1  mrg 	RET
   1040      1.1  mrg #else
   1041      1.1  mrg ARM_FUNC_START aeabi_uidivmod
   1042      1.1  mrg 	cmp	r1, #0
   1043      1.1  mrg 	beq	LSYM(Ldiv0)
   1044      1.1  mrg 	stmfd	sp!, { r0, r1, lr }
   1045      1.1  mrg 	bl	LSYM(udivsi3_skip_div0_test)
   1046      1.1  mrg 	ldmfd	sp!, { r1, r2, lr }
   1047      1.1  mrg 	mul	r3, r2, r0
   1048      1.1  mrg 	sub	r1, r1, r3
   1049      1.1  mrg 	RET
   1050      1.1  mrg #endif
   1051      1.1  mrg 	FUNC_END aeabi_uidivmod
   1052      1.1  mrg 
   1053      1.1  mrg #endif /* L_udivsi3 */
   1054      1.1  mrg /* ------------------------------------------------------------------------ */
   1055      1.1  mrg #ifdef L_umodsi3
   1056      1.1  mrg 
   1057      1.1  mrg #ifdef __ARM_ARCH_EXT_IDIV__
   1058      1.1  mrg 
   1059      1.1  mrg 	ARM_FUNC_START umodsi3
   1060      1.1  mrg 
   1061      1.1  mrg 	cmp	r1, #0
   1062      1.1  mrg 	beq	LSYM(Ldiv0)
   1063      1.1  mrg 	udiv	r2, r0, r1
   1064      1.1  mrg 	mls     r0, r1, r2, r0
   1065      1.1  mrg 	RET
   1066      1.1  mrg 
   1067      1.1  mrg #elif defined(__thumb__)
   1068      1.1  mrg 
   1069      1.1  mrg 	FUNC_START umodsi3
   1070      1.1  mrg 
   1071      1.1  mrg 	cmp	divisor, #0
   1072      1.1  mrg 	beq	LSYM(Ldiv0)
   1073      1.1  mrg 	mov	curbit, #1
   1074      1.1  mrg 	cmp	dividend, divisor
   1075      1.1  mrg 	bhs	LSYM(Lover10)
   1076      1.1  mrg 	RET
   1077      1.1  mrg 
   1078      1.1  mrg LSYM(Lover10):
   1079      1.1  mrg 	push	{ work }
   1080      1.1  mrg 
   1081      1.1  mrg 	THUMB_DIV_MOD_BODY 1
   1082      1.1  mrg 
   1083      1.1  mrg 	pop	{ work }
   1084      1.1  mrg 	RET
   1085      1.1  mrg 
   1086      1.1  mrg #else  /* ARM version.  */
   1087      1.1  mrg 
   1088      1.1  mrg 	FUNC_START umodsi3
   1089      1.1  mrg 
   1090      1.1  mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1091      1.1  mrg 	bcc	LSYM(Ldiv0)
   1092      1.1  mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1093      1.1  mrg 	moveq   r0, #0
   1094      1.1  mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1095      1.1  mrg 	andeq	r0, r0, r2
   1096      1.1  mrg 	RETc(ls)
   1097      1.1  mrg 
   1098      1.1  mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1099      1.1  mrg 
   1100      1.1  mrg 	RET
   1101      1.1  mrg 
   1102      1.1  mrg #endif /* ARM version.  */
   1103      1.1  mrg 
   1104      1.1  mrg 	DIV_FUNC_END umodsi3 unsigned
   1105      1.1  mrg 
   1106      1.1  mrg #endif /* L_umodsi3 */
   1107      1.1  mrg /* ------------------------------------------------------------------------ */
   1108      1.1  mrg #ifdef L_divsi3
   1109      1.1  mrg 
   1110      1.1  mrg #if defined(__prefer_thumb__)
   1111      1.1  mrg 
   1112      1.1  mrg 	FUNC_START divsi3
   1113      1.1  mrg 	FUNC_ALIAS aeabi_idiv divsi3
   1114      1.1  mrg 
   1115      1.1  mrg 	cmp	divisor, #0
   1116      1.1  mrg 	beq	LSYM(Ldiv0)
   1117      1.1  mrg LSYM(divsi3_skip_div0_test):
   1118      1.1  mrg 	push	{ work }
   1119      1.1  mrg 	mov	work, dividend
   1120      1.1  mrg 	eor	work, divisor		@ Save the sign of the result.
   1121      1.1  mrg 	mov	ip, work
   1122      1.1  mrg 	mov	curbit, #1
   1123      1.1  mrg 	mov	result, #0
   1124      1.1  mrg 	cmp	divisor, #0
   1125      1.1  mrg 	bpl	LSYM(Lover10)
   1126      1.1  mrg 	neg	divisor, divisor	@ Loops below use unsigned.
   1127      1.1  mrg LSYM(Lover10):
   1128      1.1  mrg 	cmp	dividend, #0
   1129      1.1  mrg 	bpl	LSYM(Lover11)
   1130      1.1  mrg 	neg	dividend, dividend
   1131      1.1  mrg LSYM(Lover11):
   1132      1.1  mrg 	cmp	dividend, divisor
   1133      1.1  mrg 	blo	LSYM(Lgot_result)
   1134      1.1  mrg 
   1135      1.1  mrg 	THUMB_DIV_MOD_BODY 0
   1136      1.1  mrg 
   1137      1.1  mrg 	mov	r0, result
   1138      1.1  mrg 	mov	work, ip
   1139      1.1  mrg 	cmp	work, #0
   1140      1.1  mrg 	bpl	LSYM(Lover12)
   1141      1.1  mrg 	neg	r0, r0
   1142      1.1  mrg LSYM(Lover12):
   1143      1.1  mrg 	pop	{ work }
   1144      1.1  mrg 	RET
   1145      1.1  mrg 
   1146      1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1147      1.1  mrg 
   1148      1.1  mrg 	ARM_FUNC_START divsi3
   1149      1.1  mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1150      1.1  mrg 
   1151      1.1  mrg 	cmp 	r1, #0
   1152      1.1  mrg 	beq	LSYM(Ldiv0)
   1153      1.1  mrg 	sdiv	r0, r0, r1
   1154      1.1  mrg 	RET
   1155      1.1  mrg 
   1156      1.1  mrg #else /* ARM/Thumb-2 version.  */
   1157      1.1  mrg 
   1158      1.1  mrg 	ARM_FUNC_START divsi3
   1159      1.1  mrg 	ARM_FUNC_ALIAS aeabi_idiv divsi3
   1160      1.1  mrg 
   1161      1.1  mrg 	cmp	r1, #0
   1162      1.1  mrg 	beq	LSYM(Ldiv0)
   1163      1.1  mrg LSYM(divsi3_skip_div0_test):
   1164      1.1  mrg 	eor	ip, r0, r1			@ save the sign of the result.
   1165      1.1  mrg 	do_it	mi
   1166      1.1  mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1167      1.1  mrg 	subs	r2, r1, #1			@ division by 1 or -1 ?
   1168      1.1  mrg 	beq	10f
   1169      1.1  mrg 	movs	r3, r0
   1170      1.1  mrg 	do_it	mi
   1171      1.1  mrg 	rsbmi	r3, r0, #0			@ positive dividend value
   1172      1.1  mrg 	cmp	r3, r1
   1173      1.1  mrg 	bls	11f
   1174      1.1  mrg 	tst	r1, r2				@ divisor is power of 2 ?
   1175      1.1  mrg 	beq	12f
   1176      1.1  mrg 
   1177      1.1  mrg 	ARM_DIV_BODY r3, r1, r0, r2
   1178      1.1  mrg 
   1179      1.1  mrg 	cmp	ip, #0
   1180      1.1  mrg 	do_it	mi
   1181      1.1  mrg 	rsbmi	r0, r0, #0
   1182      1.1  mrg 	RET
   1183      1.1  mrg 
   1184      1.1  mrg 10:	teq	ip, r0				@ same sign ?
   1185      1.1  mrg 	do_it	mi
   1186      1.1  mrg 	rsbmi	r0, r0, #0
   1187      1.1  mrg 	RET
   1188      1.1  mrg 
   1189      1.1  mrg 11:	do_it	lo
   1190      1.1  mrg 	movlo	r0, #0
   1191      1.1  mrg 	do_it	eq,t
   1192      1.1  mrg 	moveq	r0, ip, asr #31
   1193      1.1  mrg 	orreq	r0, r0, #1
   1194      1.1  mrg 	RET
   1195      1.1  mrg 
   1196      1.1  mrg 12:	ARM_DIV2_ORDER r1, r2
   1197      1.1  mrg 
   1198      1.1  mrg 	cmp	ip, #0
   1199      1.1  mrg 	mov	r0, r3, lsr r2
   1200      1.1  mrg 	do_it	mi
   1201      1.1  mrg 	rsbmi	r0, r0, #0
   1202      1.1  mrg 	RET
   1203      1.1  mrg 
   1204      1.1  mrg #endif /* ARM version */
   1205      1.1  mrg 
   1206      1.1  mrg 	DIV_FUNC_END divsi3 signed
   1207      1.1  mrg 
   1208      1.1  mrg #if defined(__prefer_thumb__)
   1209      1.1  mrg FUNC_START aeabi_idivmod
   1210      1.1  mrg 	cmp	r1, #0
   1211      1.1  mrg 	beq	LSYM(Ldiv0)
   1212      1.1  mrg 	push	{r0, r1, lr}
   1213      1.1  mrg 	bl	LSYM(divsi3_skip_div0_test)
   1214      1.1  mrg 	POP	{r1, r2, r3}
   1215      1.1  mrg 	mul	r2, r0
   1216      1.1  mrg 	sub	r1, r1, r2
   1217      1.1  mrg 	bx	r3
   1218      1.1  mrg #elif defined(__ARM_ARCH_EXT_IDIV__)
   1219      1.1  mrg ARM_FUNC_START aeabi_idivmod
   1220      1.1  mrg 	cmp 	r1, #0
   1221      1.1  mrg 	beq	LSYM(Ldiv0)
   1222      1.1  mrg 	mov     r2, r0
   1223      1.1  mrg 	sdiv	r0, r0, r1
   1224      1.1  mrg 	mls     r1, r0, r1, r2
   1225      1.1  mrg 	RET
   1226      1.1  mrg #else
   1227      1.1  mrg ARM_FUNC_START aeabi_idivmod
   1228      1.1  mrg 	cmp	r1, #0
   1229      1.1  mrg 	beq	LSYM(Ldiv0)
   1230      1.1  mrg 	stmfd	sp!, { r0, r1, lr }
   1231      1.1  mrg 	bl	LSYM(divsi3_skip_div0_test)
   1232      1.1  mrg 	ldmfd	sp!, { r1, r2, lr }
   1233      1.1  mrg 	mul	r3, r2, r0
   1234      1.1  mrg 	sub	r1, r1, r3
   1235      1.1  mrg 	RET
   1236      1.1  mrg #endif
   1237      1.1  mrg 	FUNC_END aeabi_idivmod
   1238      1.1  mrg 
   1239      1.1  mrg #endif /* L_divsi3 */
   1240      1.1  mrg /* ------------------------------------------------------------------------ */
   1241      1.1  mrg #ifdef L_modsi3
   1242      1.1  mrg 
   1243      1.1  mrg #if defined(__ARM_ARCH_EXT_IDIV__)
   1244      1.1  mrg 
   1245      1.1  mrg 	ARM_FUNC_START modsi3
   1246      1.1  mrg 
   1247      1.1  mrg 	cmp	r1, #0
   1248      1.1  mrg 	beq	LSYM(Ldiv0)
   1249      1.1  mrg 
   1250      1.1  mrg 	sdiv	r2, r0, r1
   1251      1.1  mrg 	mls     r0, r1, r2, r0
   1252      1.1  mrg 	RET
   1253      1.1  mrg 
   1254      1.1  mrg #elif defined(__thumb__)
   1255      1.1  mrg 
   1256      1.1  mrg 	FUNC_START modsi3
   1257      1.1  mrg 
   1258      1.1  mrg 	mov	curbit, #1
   1259      1.1  mrg 	cmp	divisor, #0
   1260      1.1  mrg 	beq	LSYM(Ldiv0)
   1261      1.1  mrg 	bpl	LSYM(Lover10)
   1262      1.1  mrg 	neg	divisor, divisor		@ Loops below use unsigned.
   1263      1.1  mrg LSYM(Lover10):
   1264      1.1  mrg 	push	{ work }
   1265      1.1  mrg 	@ Need to save the sign of the dividend, unfortunately, we need
   1266      1.1  mrg 	@ work later on.  Must do this after saving the original value of
   1267      1.1  mrg 	@ the work register, because we will pop this value off first.
   1268      1.1  mrg 	push	{ dividend }
   1269      1.1  mrg 	cmp	dividend, #0
   1270      1.1  mrg 	bpl	LSYM(Lover11)
   1271      1.1  mrg 	neg	dividend, dividend
   1272      1.1  mrg LSYM(Lover11):
   1273      1.1  mrg 	cmp	dividend, divisor
   1274      1.1  mrg 	blo	LSYM(Lgot_result)
   1275      1.1  mrg 
   1276      1.1  mrg 	THUMB_DIV_MOD_BODY 1
   1277      1.1  mrg 
   1278      1.1  mrg 	pop	{ work }
   1279      1.1  mrg 	cmp	work, #0
   1280      1.1  mrg 	bpl	LSYM(Lover12)
   1281      1.1  mrg 	neg	dividend, dividend
   1282      1.1  mrg LSYM(Lover12):
   1283      1.1  mrg 	pop	{ work }
   1284      1.1  mrg 	RET
   1285      1.1  mrg 
   1286      1.1  mrg #else /* ARM version.  */
   1287      1.1  mrg 
   1288      1.1  mrg 	FUNC_START modsi3
   1289      1.1  mrg 
   1290      1.1  mrg 	cmp	r1, #0
   1291      1.1  mrg 	beq	LSYM(Ldiv0)
   1292      1.1  mrg 	rsbmi	r1, r1, #0			@ loops below use unsigned.
   1293      1.1  mrg 	movs	ip, r0				@ preserve sign of dividend
   1294      1.1  mrg 	rsbmi	r0, r0, #0			@ if negative make positive
   1295      1.1  mrg 	subs	r2, r1, #1			@ compare divisor with 1
   1296      1.1  mrg 	cmpne	r0, r1				@ compare dividend with divisor
   1297      1.1  mrg 	moveq	r0, #0
   1298      1.1  mrg 	tsthi	r1, r2				@ see if divisor is power of 2
   1299      1.1  mrg 	andeq	r0, r0, r2
   1300      1.1  mrg 	bls	10f
   1301      1.1  mrg 
   1302      1.1  mrg 	ARM_MOD_BODY r0, r1, r2, r3
   1303      1.1  mrg 
   1304      1.1  mrg 10:	cmp	ip, #0
   1305      1.1  mrg 	rsbmi	r0, r0, #0
   1306      1.1  mrg 	RET
   1307      1.1  mrg 
   1308      1.1  mrg #endif /* ARM version */
   1309      1.1  mrg 
   1310      1.1  mrg 	DIV_FUNC_END modsi3 signed
   1311      1.1  mrg 
   1312      1.1  mrg #endif /* L_modsi3 */
   1313      1.1  mrg /* ------------------------------------------------------------------------ */
   1314      1.1  mrg #ifdef L_dvmd_tls
   1315      1.1  mrg 
   1316      1.1  mrg #ifdef __ARM_EABI__
   1317      1.1  mrg 	WEAK aeabi_idiv0
   1318      1.1  mrg 	WEAK aeabi_ldiv0
   1319      1.1  mrg 	FUNC_START aeabi_idiv0
   1320      1.1  mrg 	FUNC_START aeabi_ldiv0
   1321      1.1  mrg 	RET
   1322      1.1  mrg 	FUNC_END aeabi_ldiv0
   1323      1.1  mrg 	FUNC_END aeabi_idiv0
   1324      1.1  mrg #else
   1325      1.1  mrg 	FUNC_START div0
   1326      1.1  mrg 	RET
   1327      1.1  mrg 	FUNC_END div0
   1328      1.1  mrg #endif
   1329      1.1  mrg 
   1330      1.1  mrg #endif /* L_divmodsi_tools */
   1331      1.1  mrg /* ------------------------------------------------------------------------ */
   1332      1.1  mrg #ifdef L_dvmd_lnx
   1333      1.1  mrg @ GNU/Linux division-by zero handler.  Used in place of L_dvmd_tls
   1334      1.1  mrg 
   1335      1.1  mrg /* Constant taken from <asm/signal.h>.  */
   1336      1.1  mrg #define SIGFPE	8
   1337      1.1  mrg 
   1338      1.1  mrg #ifdef __ARM_EABI__
   1339  1.1.1.3  mrg 	cfi_start	__aeabi_ldiv0, LSYM(Lend_aeabi_ldiv0)
   1340      1.1  mrg 	WEAK aeabi_idiv0
   1341      1.1  mrg 	WEAK aeabi_ldiv0
   1342      1.1  mrg 	ARM_FUNC_START aeabi_idiv0
   1343      1.1  mrg 	ARM_FUNC_START aeabi_ldiv0
   1344  1.1.1.3  mrg 	do_push	{r1, lr}
   1345  1.1.1.3  mrg 98:	cfi_push 98b - __aeabi_ldiv0, 0xe, -0x4, 0x8
   1346      1.1  mrg #else
   1347  1.1.1.3  mrg 	cfi_start	__div0, LSYM(Lend_div0)
   1348      1.1  mrg 	ARM_FUNC_START div0
   1349  1.1.1.3  mrg 	do_push	{r1, lr}
   1350  1.1.1.3  mrg 98:	cfi_push 98b - __div0, 0xe, -0x4, 0x8
   1351      1.1  mrg #endif
   1352      1.1  mrg 
   1353      1.1  mrg 	mov	r0, #SIGFPE
   1354      1.1  mrg 	bl	SYM(raise) __PLT__
   1355  1.1.1.3  mrg 	RETLDM	r1 unwind=98b
   1356      1.1  mrg 
   1357      1.1  mrg #ifdef __ARM_EABI__
   1358  1.1.1.3  mrg 	cfi_end	LSYM(Lend_aeabi_ldiv0)
   1359      1.1  mrg 	FUNC_END aeabi_ldiv0
   1360      1.1  mrg 	FUNC_END aeabi_idiv0
   1361      1.1  mrg #else
   1362  1.1.1.3  mrg 	cfi_end	LSYM(Lend_div0)
   1363      1.1  mrg 	FUNC_END div0
   1364      1.1  mrg #endif
   1365      1.1  mrg 
   1366      1.1  mrg #endif /* L_dvmd_lnx */
   1367      1.1  mrg #ifdef L_clear_cache
   1368      1.1  mrg #if defined __ARM_EABI__ && defined __linux__
   1369      1.1  mrg @ EABI GNU/Linux call to cacheflush syscall.
   1370      1.1  mrg 	ARM_FUNC_START clear_cache
   1371      1.1  mrg 	do_push	{r7}
   1372      1.1  mrg #if __ARM_ARCH__ >= 7 || defined(__ARM_ARCH_6T2__)
   1373      1.1  mrg 	movw	r7, #2
   1374      1.1  mrg 	movt	r7, #0xf
   1375      1.1  mrg #else
   1376      1.1  mrg 	mov	r7, #0xf0000
   1377      1.1  mrg 	add	r7, r7, #2
   1378      1.1  mrg #endif
   1379      1.1  mrg 	mov	r2, #0
   1380      1.1  mrg 	swi	0
   1381      1.1  mrg 	do_pop	{r7}
   1382      1.1  mrg 	RET
   1383      1.1  mrg 	FUNC_END clear_cache
   1384      1.1  mrg #else
   1385      1.1  mrg #error "This is only for ARM EABI GNU/Linux"
   1386      1.1  mrg #endif
   1387      1.1  mrg #endif /* L_clear_cache */
   1388      1.1  mrg /* ------------------------------------------------------------------------ */
   1389      1.1  mrg /* Dword shift operations.  */
   1390      1.1  mrg /* All the following Dword shift variants rely on the fact that
   1391      1.1  mrg 	shft xxx, Reg
   1392      1.1  mrg    is in fact done as
   1393      1.1  mrg 	shft xxx, (Reg & 255)
   1394      1.1  mrg    so for Reg value in (32...63) and (-1...-31) we will get zero (in the
   1395      1.1  mrg    case of logical shifts) or the sign (for asr).  */
   1396      1.1  mrg 
   1397      1.1  mrg #ifdef __ARMEB__
   1398      1.1  mrg #define al	r1
   1399      1.1  mrg #define ah	r0
   1400      1.1  mrg #else
   1401      1.1  mrg #define al	r0
   1402      1.1  mrg #define ah	r1
   1403      1.1  mrg #endif
   1404      1.1  mrg 
   1405      1.1  mrg /* Prevent __aeabi double-word shifts from being produced on SymbianOS.  */
   1406      1.1  mrg #ifndef __symbian__
   1407      1.1  mrg 
   1408      1.1  mrg #ifdef L_lshrdi3
   1409      1.1  mrg 
   1410      1.1  mrg 	FUNC_START lshrdi3
   1411      1.1  mrg 	FUNC_ALIAS aeabi_llsr lshrdi3
   1412      1.1  mrg 
   1413      1.1  mrg #ifdef __thumb__
   1414      1.1  mrg 	lsr	al, r2
   1415      1.1  mrg 	mov	r3, ah
   1416      1.1  mrg 	lsr	ah, r2
   1417      1.1  mrg 	mov	ip, r3
   1418      1.1  mrg 	sub	r2, #32
   1419      1.1  mrg 	lsr	r3, r2
   1420      1.1  mrg 	orr	al, r3
   1421      1.1  mrg 	neg	r2, r2
   1422      1.1  mrg 	mov	r3, ip
   1423      1.1  mrg 	lsl	r3, r2
   1424      1.1  mrg 	orr	al, r3
   1425      1.1  mrg 	RET
   1426      1.1  mrg #else
   1427      1.1  mrg 	subs	r3, r2, #32
   1428      1.1  mrg 	rsb	ip, r2, #32
   1429      1.1  mrg 	movmi	al, al, lsr r2
   1430      1.1  mrg 	movpl	al, ah, lsr r3
   1431      1.1  mrg 	orrmi	al, al, ah, lsl ip
   1432      1.1  mrg 	mov	ah, ah, lsr r2
   1433      1.1  mrg 	RET
   1434      1.1  mrg #endif
   1435      1.1  mrg 	FUNC_END aeabi_llsr
   1436      1.1  mrg 	FUNC_END lshrdi3
   1437      1.1  mrg 
   1438      1.1  mrg #endif
   1439      1.1  mrg 
   1440      1.1  mrg #ifdef L_ashrdi3
   1441      1.1  mrg 
   1442      1.1  mrg 	FUNC_START ashrdi3
   1443      1.1  mrg 	FUNC_ALIAS aeabi_lasr ashrdi3
   1444      1.1  mrg 
   1445      1.1  mrg #ifdef __thumb__
   1446      1.1  mrg 	lsr	al, r2
   1447      1.1  mrg 	mov	r3, ah
   1448      1.1  mrg 	asr	ah, r2
   1449      1.1  mrg 	sub	r2, #32
   1450      1.1  mrg 	@ If r2 is negative at this point the following step would OR
   1451      1.1  mrg 	@ the sign bit into all of AL.  That's not what we want...
   1452      1.1  mrg 	bmi	1f
   1453      1.1  mrg 	mov	ip, r3
   1454      1.1  mrg 	asr	r3, r2
   1455      1.1  mrg 	orr	al, r3
   1456      1.1  mrg 	mov	r3, ip
   1457      1.1  mrg 1:
   1458      1.1  mrg 	neg	r2, r2
   1459      1.1  mrg 	lsl	r3, r2
   1460      1.1  mrg 	orr	al, r3
   1461      1.1  mrg 	RET
   1462      1.1  mrg #else
   1463      1.1  mrg 	subs	r3, r2, #32
   1464      1.1  mrg 	rsb	ip, r2, #32
   1465      1.1  mrg 	movmi	al, al, lsr r2
   1466      1.1  mrg 	movpl	al, ah, asr r3
   1467      1.1  mrg 	orrmi	al, al, ah, lsl ip
   1468      1.1  mrg 	mov	ah, ah, asr r2
   1469      1.1  mrg 	RET
   1470      1.1  mrg #endif
   1471      1.1  mrg 
   1472      1.1  mrg 	FUNC_END aeabi_lasr
   1473      1.1  mrg 	FUNC_END ashrdi3
   1474      1.1  mrg 
   1475      1.1  mrg #endif
   1476      1.1  mrg 
   1477      1.1  mrg #ifdef L_ashldi3
   1478      1.1  mrg 
   1479      1.1  mrg 	FUNC_START ashldi3
   1480      1.1  mrg 	FUNC_ALIAS aeabi_llsl ashldi3
   1481      1.1  mrg 
   1482      1.1  mrg #ifdef __thumb__
   1483      1.1  mrg 	lsl	ah, r2
   1484      1.1  mrg 	mov	r3, al
   1485      1.1  mrg 	lsl	al, r2
   1486      1.1  mrg 	mov	ip, r3
   1487      1.1  mrg 	sub	r2, #32
   1488      1.1  mrg 	lsl	r3, r2
   1489      1.1  mrg 	orr	ah, r3
   1490      1.1  mrg 	neg	r2, r2
   1491      1.1  mrg 	mov	r3, ip
   1492      1.1  mrg 	lsr	r3, r2
   1493      1.1  mrg 	orr	ah, r3
   1494      1.1  mrg 	RET
   1495      1.1  mrg #else
   1496      1.1  mrg 	subs	r3, r2, #32
   1497      1.1  mrg 	rsb	ip, r2, #32
   1498      1.1  mrg 	movmi	ah, ah, lsl r2
   1499      1.1  mrg 	movpl	ah, al, lsl r3
   1500      1.1  mrg 	orrmi	ah, ah, al, lsr ip
   1501      1.1  mrg 	mov	al, al, lsl r2
   1502      1.1  mrg 	RET
   1503      1.1  mrg #endif
   1504      1.1  mrg 	FUNC_END aeabi_llsl
   1505      1.1  mrg 	FUNC_END ashldi3
   1506      1.1  mrg 
   1507      1.1  mrg #endif
   1508      1.1  mrg 
   1509      1.1  mrg #endif /* __symbian__ */
   1510      1.1  mrg 
   1511      1.1  mrg #if ((__ARM_ARCH__ > 5) && !defined(__ARM_ARCH_6M__)) \
   1512      1.1  mrg     || defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
   1513      1.1  mrg     || defined(__ARM_ARCH_5TEJ__)
   1514      1.1  mrg #define HAVE_ARM_CLZ 1
   1515      1.1  mrg #endif
   1516      1.1  mrg 
   1517      1.1  mrg #ifdef L_clzsi2
   1518      1.1  mrg #if defined(__ARM_ARCH_6M__)
   1519      1.1  mrg FUNC_START clzsi2
   1520      1.1  mrg 	mov	r1, #28
   1521      1.1  mrg 	mov	r3, #1
   1522      1.1  mrg 	lsl	r3, r3, #16
   1523      1.1  mrg 	cmp	r0, r3 /* 0x10000 */
   1524      1.1  mrg 	bcc	2f
   1525      1.1  mrg 	lsr	r0, r0, #16
   1526      1.1  mrg 	sub	r1, r1, #16
   1527      1.1  mrg 2:	lsr	r3, r3, #8
   1528      1.1  mrg 	cmp	r0, r3 /* #0x100 */
   1529      1.1  mrg 	bcc	2f
   1530      1.1  mrg 	lsr	r0, r0, #8
   1531      1.1  mrg 	sub	r1, r1, #8
   1532      1.1  mrg 2:	lsr	r3, r3, #4
   1533      1.1  mrg 	cmp	r0, r3 /* #0x10 */
   1534      1.1  mrg 	bcc	2f
   1535      1.1  mrg 	lsr	r0, r0, #4
   1536      1.1  mrg 	sub	r1, r1, #4
   1537      1.1  mrg 2:	adr	r2, 1f
   1538      1.1  mrg 	ldrb	r0, [r2, r0]
   1539      1.1  mrg 	add	r0, r0, r1
   1540      1.1  mrg 	bx lr
   1541      1.1  mrg .align 2
   1542      1.1  mrg 1:
   1543      1.1  mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1544      1.1  mrg 	FUNC_END clzsi2
   1545      1.1  mrg #else
   1546      1.1  mrg ARM_FUNC_START clzsi2
   1547      1.1  mrg # if defined(HAVE_ARM_CLZ)
   1548      1.1  mrg 	clz	r0, r0
   1549      1.1  mrg 	RET
   1550      1.1  mrg # else
   1551      1.1  mrg 	mov	r1, #28
   1552      1.1  mrg 	cmp	r0, #0x10000
   1553      1.1  mrg 	do_it	cs, t
   1554      1.1  mrg 	movcs	r0, r0, lsr #16
   1555      1.1  mrg 	subcs	r1, r1, #16
   1556      1.1  mrg 	cmp	r0, #0x100
   1557      1.1  mrg 	do_it	cs, t
   1558      1.1  mrg 	movcs	r0, r0, lsr #8
   1559      1.1  mrg 	subcs	r1, r1, #8
   1560      1.1  mrg 	cmp	r0, #0x10
   1561      1.1  mrg 	do_it	cs, t
   1562      1.1  mrg 	movcs	r0, r0, lsr #4
   1563      1.1  mrg 	subcs	r1, r1, #4
   1564      1.1  mrg 	adr	r2, 1f
   1565      1.1  mrg 	ldrb	r0, [r2, r0]
   1566      1.1  mrg 	add	r0, r0, r1
   1567      1.1  mrg 	RET
   1568      1.1  mrg .align 2
   1569      1.1  mrg 1:
   1570      1.1  mrg .byte 4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0
   1571      1.1  mrg # endif /* !HAVE_ARM_CLZ */
   1572      1.1  mrg 	FUNC_END clzsi2
   1573      1.1  mrg #endif
   1574      1.1  mrg #endif /* L_clzsi2 */
   1575      1.1  mrg 
   1576      1.1  mrg #ifdef L_clzdi2
   1577      1.1  mrg #if !defined(HAVE_ARM_CLZ)
   1578      1.1  mrg 
   1579      1.1  mrg # if defined(__ARM_ARCH_6M__)
   1580      1.1  mrg FUNC_START clzdi2
   1581      1.1  mrg 	push	{r4, lr}
   1582      1.1  mrg # else
   1583      1.1  mrg ARM_FUNC_START clzdi2
   1584      1.1  mrg 	do_push	{r4, lr}
   1585      1.1  mrg # endif
   1586      1.1  mrg 	cmp	xxh, #0
   1587      1.1  mrg 	bne	1f
   1588      1.1  mrg # ifdef __ARMEB__
   1589      1.1  mrg 	mov	r0, xxl
   1590      1.1  mrg 	bl	__clzsi2
   1591      1.1  mrg 	add	r0, r0, #32
   1592      1.1  mrg 	b 2f
   1593      1.1  mrg 1:
   1594      1.1  mrg 	bl	__clzsi2
   1595      1.1  mrg # else
   1596      1.1  mrg 	bl	__clzsi2
   1597      1.1  mrg 	add	r0, r0, #32
   1598      1.1  mrg 	b 2f
   1599      1.1  mrg 1:
   1600      1.1  mrg 	mov	r0, xxh
   1601      1.1  mrg 	bl	__clzsi2
   1602      1.1  mrg # endif
   1603      1.1  mrg 2:
   1604      1.1  mrg # if defined(__ARM_ARCH_6M__)
   1605      1.1  mrg 	pop	{r4, pc}
   1606      1.1  mrg # else
   1607      1.1  mrg 	RETLDM	r4
   1608      1.1  mrg # endif
   1609      1.1  mrg 	FUNC_END clzdi2
   1610      1.1  mrg 
   1611      1.1  mrg #else /* HAVE_ARM_CLZ */
   1612      1.1  mrg 
   1613      1.1  mrg ARM_FUNC_START clzdi2
   1614      1.1  mrg 	cmp	xxh, #0
   1615      1.1  mrg 	do_it	eq, et
   1616      1.1  mrg 	clzeq	r0, xxl
   1617      1.1  mrg 	clzne	r0, xxh
   1618      1.1  mrg 	addeq	r0, r0, #32
   1619      1.1  mrg 	RET
   1620      1.1  mrg 	FUNC_END clzdi2
   1621      1.1  mrg 
   1622      1.1  mrg #endif
   1623      1.1  mrg #endif /* L_clzdi2 */
   1624      1.1  mrg 
   1625      1.1  mrg #ifdef L_ctzsi2
   1626      1.1  mrg #if defined(__ARM_ARCH_6M__)
   1627      1.1  mrg FUNC_START ctzsi2
   1628      1.1  mrg 	neg	r1, r0
   1629      1.1  mrg 	and	r0, r0, r1
   1630      1.1  mrg 	mov	r1, #28
   1631      1.1  mrg 	mov	r3, #1
   1632      1.1  mrg 	lsl	r3, r3, #16
   1633      1.1  mrg 	cmp	r0, r3 /* 0x10000 */
   1634      1.1  mrg 	bcc	2f
   1635      1.1  mrg 	lsr	r0, r0, #16
   1636      1.1  mrg 	sub	r1, r1, #16
   1637      1.1  mrg 2:	lsr	r3, r3, #8
   1638      1.1  mrg 	cmp	r0, r3 /* #0x100 */
   1639      1.1  mrg 	bcc	2f
   1640      1.1  mrg 	lsr	r0, r0, #8
   1641      1.1  mrg 	sub	r1, r1, #8
   1642      1.1  mrg 2:	lsr	r3, r3, #4
   1643      1.1  mrg 	cmp	r0, r3 /* #0x10 */
   1644      1.1  mrg 	bcc	2f
   1645      1.1  mrg 	lsr	r0, r0, #4
   1646      1.1  mrg 	sub	r1, r1, #4
   1647      1.1  mrg 2:	adr	r2, 1f
   1648      1.1  mrg 	ldrb	r0, [r2, r0]
   1649      1.1  mrg 	sub	r0, r0, r1
   1650      1.1  mrg 	bx lr
   1651      1.1  mrg .align 2
   1652      1.1  mrg 1:
   1653      1.1  mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1654      1.1  mrg 	FUNC_END ctzsi2
   1655      1.1  mrg #else
   1656      1.1  mrg ARM_FUNC_START ctzsi2
   1657      1.1  mrg 	rsb	r1, r0, #0
   1658      1.1  mrg 	and	r0, r0, r1
   1659      1.1  mrg # if defined(HAVE_ARM_CLZ)
   1660      1.1  mrg 	clz	r0, r0
   1661      1.1  mrg 	rsb	r0, r0, #31
   1662      1.1  mrg 	RET
   1663      1.1  mrg # else
   1664      1.1  mrg 	mov	r1, #28
   1665      1.1  mrg 	cmp	r0, #0x10000
   1666      1.1  mrg 	do_it	cs, t
   1667      1.1  mrg 	movcs	r0, r0, lsr #16
   1668      1.1  mrg 	subcs	r1, r1, #16
   1669      1.1  mrg 	cmp	r0, #0x100
   1670      1.1  mrg 	do_it	cs, t
   1671      1.1  mrg 	movcs	r0, r0, lsr #8
   1672      1.1  mrg 	subcs	r1, r1, #8
   1673      1.1  mrg 	cmp	r0, #0x10
   1674      1.1  mrg 	do_it	cs, t
   1675      1.1  mrg 	movcs	r0, r0, lsr #4
   1676      1.1  mrg 	subcs	r1, r1, #4
   1677      1.1  mrg 	adr	r2, 1f
   1678      1.1  mrg 	ldrb	r0, [r2, r0]
   1679      1.1  mrg 	sub	r0, r0, r1
   1680      1.1  mrg 	RET
   1681      1.1  mrg .align 2
   1682      1.1  mrg 1:
   1683      1.1  mrg .byte	27, 28, 29, 29, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31
   1684      1.1  mrg # endif /* !HAVE_ARM_CLZ */
   1685      1.1  mrg 	FUNC_END ctzsi2
   1686      1.1  mrg #endif
   1687      1.1  mrg #endif /* L_clzsi2 */
   1688      1.1  mrg 
   1689      1.1  mrg /* ------------------------------------------------------------------------ */
   1690      1.1  mrg /* These next two sections are here despite the fact that they contain Thumb
   1691      1.1  mrg    assembler because their presence allows interworked code to be linked even
   1692      1.1  mrg    when the GCC library is this one.  */
   1693      1.1  mrg 
   1694      1.1  mrg /* Do not build the interworking functions when the target architecture does
   1695      1.1  mrg    not support Thumb instructions.  (This can be a multilib option).  */
   1696      1.1  mrg #if defined __ARM_ARCH_4T__ || defined __ARM_ARCH_5T__\
   1697      1.1  mrg       || defined __ARM_ARCH_5TE__ || defined __ARM_ARCH_5TEJ__ \
   1698      1.1  mrg       || __ARM_ARCH__ >= 6
   1699      1.1  mrg 
   1700      1.1  mrg #if defined L_call_via_rX
   1701      1.1  mrg 
   1702      1.1  mrg /* These labels & instructions are used by the Arm/Thumb interworking code.
   1703      1.1  mrg    The address of function to be called is loaded into a register and then
   1704      1.1  mrg    one of these labels is called via a BL instruction.  This puts the
   1705      1.1  mrg    return address into the link register with the bottom bit set, and the
   1706      1.1  mrg    code here switches to the correct mode before executing the function.  */
   1707      1.1  mrg 
   1708      1.1  mrg 	.text
   1709      1.1  mrg 	.align 0
   1710      1.1  mrg         .force_thumb
   1711      1.1  mrg 
   1712      1.1  mrg .macro call_via register
   1713      1.1  mrg 	THUMB_FUNC_START _call_via_\register
   1714      1.1  mrg 
   1715      1.1  mrg 	bx	\register
   1716      1.1  mrg 	nop
   1717      1.1  mrg 
   1718      1.1  mrg 	SIZE	(_call_via_\register)
   1719      1.1  mrg .endm
   1720      1.1  mrg 
   1721      1.1  mrg 	call_via r0
   1722      1.1  mrg 	call_via r1
   1723      1.1  mrg 	call_via r2
   1724      1.1  mrg 	call_via r3
   1725      1.1  mrg 	call_via r4
   1726      1.1  mrg 	call_via r5
   1727      1.1  mrg 	call_via r6
   1728      1.1  mrg 	call_via r7
   1729      1.1  mrg 	call_via r8
   1730      1.1  mrg 	call_via r9
   1731      1.1  mrg 	call_via sl
   1732      1.1  mrg 	call_via fp
   1733      1.1  mrg 	call_via ip
   1734      1.1  mrg 	call_via sp
   1735      1.1  mrg 	call_via lr
   1736      1.1  mrg 
   1737      1.1  mrg #endif /* L_call_via_rX */
   1738      1.1  mrg 
   1739      1.1  mrg /* Don't bother with the old interworking routines for Thumb-2.  */
   1740      1.1  mrg /* ??? Maybe only omit these on "m" variants.  */
   1741      1.1  mrg #if !defined(__thumb2__) && !defined(__ARM_ARCH_6M__)
   1742      1.1  mrg 
   1743      1.1  mrg #if defined L_interwork_call_via_rX
   1744      1.1  mrg 
   1745      1.1  mrg /* These labels & instructions are used by the Arm/Thumb interworking code,
   1746      1.1  mrg    when the target address is in an unknown instruction set.  The address
   1747      1.1  mrg    of function to be called is loaded into a register and then one of these
   1748      1.1  mrg    labels is called via a BL instruction.  This puts the return address
   1749      1.1  mrg    into the link register with the bottom bit set, and the code here
   1750      1.1  mrg    switches to the correct mode before executing the function.  Unfortunately
   1751      1.1  mrg    the target code cannot be relied upon to return via a BX instruction, so
   1752      1.1  mrg    instead we have to store the resturn address on the stack and allow the
   1753      1.1  mrg    called function to return here instead.  Upon return we recover the real
   1754      1.1  mrg    return address and use a BX to get back to Thumb mode.
   1755      1.1  mrg 
   1756      1.1  mrg    There are three variations of this code.  The first,
   1757      1.1  mrg    _interwork_call_via_rN(), will push the return address onto the
   1758      1.1  mrg    stack and pop it in _arm_return().  It should only be used if all
   1759      1.1  mrg    arguments are passed in registers.
   1760      1.1  mrg 
   1761      1.1  mrg    The second, _interwork_r7_call_via_rN(), instead stores the return
   1762      1.1  mrg    address at [r7, #-4].  It is the caller's responsibility to ensure
   1763      1.1  mrg    that this address is valid and contains no useful data.
   1764      1.1  mrg 
   1765      1.1  mrg    The third, _interwork_r11_call_via_rN(), works in the same way but
   1766      1.1  mrg    uses r11 instead of r7.  It is useful if the caller does not really
   1767      1.1  mrg    need a frame pointer.  */
   1768      1.1  mrg 
   1769      1.1  mrg 	.text
   1770      1.1  mrg 	.align 0
   1771      1.1  mrg 
   1772      1.1  mrg 	.code   32
   1773      1.1  mrg 	.globl _arm_return
   1774      1.1  mrg LSYM(Lstart_arm_return):
   1775      1.1  mrg 	cfi_start	LSYM(Lstart_arm_return) LSYM(Lend_arm_return)
   1776      1.1  mrg 	cfi_push	0, 0xe, -0x8, 0x8
   1777      1.1  mrg 	nop	@ This nop is for the benefit of debuggers, so that
   1778      1.1  mrg 		@ backtraces will use the correct unwind information.
   1779      1.1  mrg _arm_return:
   1780      1.1  mrg 	RETLDM	unwind=LSYM(Lstart_arm_return)
   1781      1.1  mrg 	cfi_end	LSYM(Lend_arm_return)
   1782      1.1  mrg 
   1783      1.1  mrg 	.globl _arm_return_r7
   1784      1.1  mrg _arm_return_r7:
   1785      1.1  mrg 	ldr	lr, [r7, #-4]
   1786      1.1  mrg 	bx	lr
   1787      1.1  mrg 
   1788      1.1  mrg 	.globl _arm_return_r11
   1789      1.1  mrg _arm_return_r11:
   1790      1.1  mrg 	ldr	lr, [r11, #-4]
   1791      1.1  mrg 	bx	lr
   1792      1.1  mrg 
   1793      1.1  mrg .macro interwork_with_frame frame, register, name, return
   1794      1.1  mrg 	.code	16
   1795      1.1  mrg 
   1796      1.1  mrg 	THUMB_FUNC_START \name
   1797      1.1  mrg 
   1798      1.1  mrg 	bx	pc
   1799      1.1  mrg 	nop
   1800      1.1  mrg 
   1801      1.1  mrg 	.code	32
   1802      1.1  mrg 	tst	\register, #1
   1803      1.1  mrg 	streq	lr, [\frame, #-4]
   1804      1.1  mrg 	adreq	lr, _arm_return_\frame
   1805      1.1  mrg 	bx	\register
   1806      1.1  mrg 
   1807      1.1  mrg 	SIZE	(\name)
   1808      1.1  mrg .endm
   1809      1.1  mrg 
   1810      1.1  mrg .macro interwork register
   1811      1.1  mrg 	.code	16
   1812      1.1  mrg 
   1813      1.1  mrg 	THUMB_FUNC_START _interwork_call_via_\register
   1814      1.1  mrg 
   1815      1.1  mrg 	bx	pc
   1816      1.1  mrg 	nop
   1817      1.1  mrg 
   1818      1.1  mrg 	.code	32
   1819      1.1  mrg 	.globl LSYM(Lchange_\register)
   1820      1.1  mrg LSYM(Lchange_\register):
   1821      1.1  mrg 	tst	\register, #1
   1822      1.1  mrg 	streq	lr, [sp, #-8]!
   1823      1.1  mrg 	adreq	lr, _arm_return
   1824      1.1  mrg 	bx	\register
   1825      1.1  mrg 
   1826      1.1  mrg 	SIZE	(_interwork_call_via_\register)
   1827      1.1  mrg 
   1828      1.1  mrg 	interwork_with_frame r7,\register,_interwork_r7_call_via_\register
   1829      1.1  mrg 	interwork_with_frame r11,\register,_interwork_r11_call_via_\register
   1830      1.1  mrg .endm
   1831      1.1  mrg 
   1832      1.1  mrg 	interwork r0
   1833      1.1  mrg 	interwork r1
   1834      1.1  mrg 	interwork r2
   1835      1.1  mrg 	interwork r3
   1836      1.1  mrg 	interwork r4
   1837      1.1  mrg 	interwork r5
   1838      1.1  mrg 	interwork r6
   1839      1.1  mrg 	interwork r7
   1840      1.1  mrg 	interwork r8
   1841      1.1  mrg 	interwork r9
   1842      1.1  mrg 	interwork sl
   1843      1.1  mrg 	interwork fp
   1844      1.1  mrg 	interwork ip
   1845      1.1  mrg 	interwork sp
   1846      1.1  mrg 
   1847      1.1  mrg 	/* The LR case has to be handled a little differently...  */
   1848      1.1  mrg 	.code 16
   1849      1.1  mrg 
   1850      1.1  mrg 	THUMB_FUNC_START _interwork_call_via_lr
   1851      1.1  mrg 
   1852      1.1  mrg 	bx 	pc
   1853      1.1  mrg 	nop
   1854      1.1  mrg 
   1855      1.1  mrg 	.code 32
   1856      1.1  mrg 	.globl .Lchange_lr
   1857      1.1  mrg .Lchange_lr:
   1858      1.1  mrg 	tst	lr, #1
   1859      1.1  mrg 	stmeqdb	r13!, {lr, pc}
   1860      1.1  mrg 	mov	ip, lr
   1861      1.1  mrg 	adreq	lr, _arm_return
   1862      1.1  mrg 	bx	ip
   1863      1.1  mrg 
   1864      1.1  mrg 	SIZE	(_interwork_call_via_lr)
   1865      1.1  mrg 
   1866      1.1  mrg #endif /* L_interwork_call_via_rX */
   1867      1.1  mrg #endif /* !__thumb2__ */
   1868      1.1  mrg 
   1869      1.1  mrg /* Functions to support compact pic switch tables in thumb1 state.
   1870      1.1  mrg    All these routines take an index into the table in r0.  The
   1871      1.1  mrg    table is at LR & ~1 (but this must be rounded up in the case
   1872      1.1  mrg    of 32-bit entires).  They are only permitted to clobber r12
   1873      1.1  mrg    and r14 and r0 must be preserved on exit.  */
   1874      1.1  mrg #ifdef L_thumb1_case_sqi
   1875      1.1  mrg 
   1876      1.1  mrg 	.text
   1877      1.1  mrg 	.align 0
   1878      1.1  mrg         .force_thumb
   1879      1.1  mrg 	.syntax unified
   1880      1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_sqi
   1881      1.1  mrg 	push	{r1}
   1882      1.1  mrg 	mov	r1, lr
   1883      1.1  mrg 	lsrs	r1, r1, #1
   1884      1.1  mrg 	lsls	r1, r1, #1
   1885      1.1  mrg 	ldrsb	r1, [r1, r0]
   1886      1.1  mrg 	lsls	r1, r1, #1
   1887      1.1  mrg 	add	lr, lr, r1
   1888      1.1  mrg 	pop	{r1}
   1889      1.1  mrg 	bx	lr
   1890      1.1  mrg 	SIZE (__gnu_thumb1_case_sqi)
   1891      1.1  mrg #endif
   1892      1.1  mrg 
   1893      1.1  mrg #ifdef L_thumb1_case_uqi
   1894      1.1  mrg 
   1895      1.1  mrg 	.text
   1896      1.1  mrg 	.align 0
   1897      1.1  mrg         .force_thumb
   1898      1.1  mrg 	.syntax unified
   1899      1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_uqi
   1900      1.1  mrg 	push	{r1}
   1901      1.1  mrg 	mov	r1, lr
   1902      1.1  mrg 	lsrs	r1, r1, #1
   1903      1.1  mrg 	lsls	r1, r1, #1
   1904      1.1  mrg 	ldrb	r1, [r1, r0]
   1905      1.1  mrg 	lsls	r1, r1, #1
   1906      1.1  mrg 	add	lr, lr, r1
   1907      1.1  mrg 	pop	{r1}
   1908      1.1  mrg 	bx	lr
   1909      1.1  mrg 	SIZE (__gnu_thumb1_case_uqi)
   1910      1.1  mrg #endif
   1911      1.1  mrg 
   1912      1.1  mrg #ifdef L_thumb1_case_shi
   1913      1.1  mrg 
   1914      1.1  mrg 	.text
   1915      1.1  mrg 	.align 0
   1916      1.1  mrg         .force_thumb
   1917      1.1  mrg 	.syntax unified
   1918      1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_shi
   1919      1.1  mrg 	push	{r0, r1}
   1920      1.1  mrg 	mov	r1, lr
   1921      1.1  mrg 	lsrs	r1, r1, #1
   1922      1.1  mrg 	lsls	r0, r0, #1
   1923      1.1  mrg 	lsls	r1, r1, #1
   1924      1.1  mrg 	ldrsh	r1, [r1, r0]
   1925      1.1  mrg 	lsls	r1, r1, #1
   1926      1.1  mrg 	add	lr, lr, r1
   1927      1.1  mrg 	pop	{r0, r1}
   1928      1.1  mrg 	bx	lr
   1929      1.1  mrg 	SIZE (__gnu_thumb1_case_shi)
   1930      1.1  mrg #endif
   1931      1.1  mrg 
   1932      1.1  mrg #ifdef L_thumb1_case_uhi
   1933      1.1  mrg 
   1934      1.1  mrg 	.text
   1935      1.1  mrg 	.align 0
   1936      1.1  mrg         .force_thumb
   1937      1.1  mrg 	.syntax unified
   1938      1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_uhi
   1939      1.1  mrg 	push	{r0, r1}
   1940      1.1  mrg 	mov	r1, lr
   1941      1.1  mrg 	lsrs	r1, r1, #1
   1942      1.1  mrg 	lsls	r0, r0, #1
   1943      1.1  mrg 	lsls	r1, r1, #1
   1944      1.1  mrg 	ldrh	r1, [r1, r0]
   1945      1.1  mrg 	lsls	r1, r1, #1
   1946      1.1  mrg 	add	lr, lr, r1
   1947      1.1  mrg 	pop	{r0, r1}
   1948      1.1  mrg 	bx	lr
   1949      1.1  mrg 	SIZE (__gnu_thumb1_case_uhi)
   1950      1.1  mrg #endif
   1951      1.1  mrg 
   1952      1.1  mrg #ifdef L_thumb1_case_si
   1953      1.1  mrg 
   1954      1.1  mrg 	.text
   1955      1.1  mrg 	.align 0
   1956      1.1  mrg         .force_thumb
   1957      1.1  mrg 	.syntax unified
   1958      1.1  mrg 	THUMB_FUNC_START __gnu_thumb1_case_si
   1959      1.1  mrg 	push	{r0, r1}
   1960      1.1  mrg 	mov	r1, lr
   1961      1.1  mrg 	adds.n	r1, r1, #2	/* Align to word.  */
   1962      1.1  mrg 	lsrs	r1, r1, #2
   1963      1.1  mrg 	lsls	r0, r0, #2
   1964      1.1  mrg 	lsls	r1, r1, #2
   1965      1.1  mrg 	ldr	r0, [r1, r0]
   1966      1.1  mrg 	adds	r0, r0, r1
   1967      1.1  mrg 	mov	lr, r0
   1968      1.1  mrg 	pop	{r0, r1}
   1969      1.1  mrg 	mov	pc, lr		/* We know we were called from thumb code.  */
   1970      1.1  mrg 	SIZE (__gnu_thumb1_case_si)
   1971      1.1  mrg #endif
   1972      1.1  mrg 
   1973      1.1  mrg #endif /* Arch supports thumb.  */
   1974      1.1  mrg 
   1975  1.1.1.3  mrg .macro CFI_START_FUNCTION
   1976  1.1.1.3  mrg 	.cfi_startproc
   1977  1.1.1.3  mrg 	.cfi_remember_state
   1978  1.1.1.3  mrg .endm
   1979  1.1.1.3  mrg 
   1980  1.1.1.3  mrg .macro CFI_END_FUNCTION
   1981  1.1.1.3  mrg 	.cfi_restore_state
   1982  1.1.1.3  mrg 	.cfi_endproc
   1983  1.1.1.3  mrg .endm
   1984  1.1.1.3  mrg 
   1985      1.1  mrg #ifndef __symbian__
   1986      1.1  mrg #ifndef __ARM_ARCH_6M__
   1987      1.1  mrg #include "ieee754-df.S"
   1988      1.1  mrg #include "ieee754-sf.S"
   1989      1.1  mrg #include "bpabi.S"
   1990      1.1  mrg #else /* __ARM_ARCH_6M__ */
   1991      1.1  mrg #include "bpabi-v6m.S"
   1992      1.1  mrg #endif /* __ARM_ARCH_6M__ */
   1993      1.1  mrg #endif /* !__symbian__ */
   1994