Home | History | Annotate | Line # | Download | only in arm
udivmodsi4.S revision 1.1.1.2
      1 /*===-- udivmodsi4.S - 32-bit unsigned integer divide and modulus ---------===//
      2  *
      3  *                     The LLVM Compiler Infrastructure
      4  *
      5  * This file is dual licensed under the MIT and the University of Illinois Open
      6  * Source Licenses. See LICENSE.TXT for details.
      7  *
      8  *===----------------------------------------------------------------------===//
      9  *
     10  * This file implements the __udivmodsi4 (32-bit unsigned integer divide and
     11  * modulus) function for the ARM 32-bit architecture.
     12  *
     13  *===----------------------------------------------------------------------===*/
     14 
     15 #include "../assembly.h"
     16 
     17 	.syntax unified
     18 	.text
     19 
     20 #if __ARM_ARCH_ISA_THUMB == 2
     21 	.thumb
     22 #endif
     23 
     24 @ unsigned int __udivmodsi4(unsigned int divident, unsigned int divisor,
     25 @                           unsigned int *remainder)
     26 @   Calculate the quotient and remainder of the (unsigned) division.  The return
     27 @   value is the quotient, the remainder is placed in the variable.
     28 
     29 	.p2align 2
     30 DEFINE_COMPILERRT_FUNCTION(__udivmodsi4)
     31 #if __ARM_ARCH_EXT_IDIV__
     32 	tst     r1, r1
     33 	beq     LOCAL_LABEL(divby0)
     34 	mov 	r3, r0
     35 	udiv	r0, r3, r1
     36 	mls 	r1, r0, r1, r3
     37 	str 	r1, [r2]
     38 	bx  	lr
     39 #else
     40 	cmp	r1, #1
     41 	bcc	LOCAL_LABEL(divby0)
     42 	beq	LOCAL_LABEL(divby1)
     43 	cmp	r0, r1
     44 	bcc	LOCAL_LABEL(quotient0)
     45 	/*
     46 	 * Implement division using binary long division algorithm.
     47 	 *
     48 	 * r0 is the numerator, r1 the denominator.
     49 	 *
     50 	 * The code before JMP computes the correct shift I, so that
     51 	 * r0 and (r1 << I) have the highest bit set in the same position.
     52 	 * At the time of JMP, ip := .Ldiv0block - 12 * I.
     53 	 * This depends on the fixed instruction size of block.
     54 	 * For ARM mode, this is 12 Bytes, for THUMB mode 14 Bytes.
     55 	 *
     56 	 * block(shift) implements the test-and-update-quotient core.
     57 	 * It assumes (r0 << shift) can be computed without overflow and
     58 	 * that (r0 << shift) < 2 * r1. The quotient is stored in r3.
     59 	 */
     60 
     61 #  ifdef __ARM_FEATURE_CLZ
     62 	clz	ip, r0
     63 	clz	r3, r1
     64 	/* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
     65 	sub	r3, r3, ip
     66 #    if __ARM_ARCH_ISA_THUMB == 2
     67 	adr	ip, LOCAL_LABEL(div0block) + 1
     68 	sub	ip, ip, r3, lsl #1
     69 #    else
     70 	adr	ip, LOCAL_LABEL(div0block)
     71 #    endif
     72 	sub	ip, ip, r3, lsl #2
     73 	sub	ip, ip, r3, lsl #3
     74 	mov	r3, #0
     75 	bx	ip
     76 #  else
     77 #    if __ARM_ARCH_ISA_THUMB == 2
     78 #    error THUMB mode requires CLZ or UDIV
     79 #    endif
     80 	str	r4, [sp, #-8]!
     81 
     82 	mov	r4, r0
     83 	adr	ip, LOCAL_LABEL(div0block)
     84 
     85 	lsr	r3, r4, #16
     86 	cmp	r3, r1
     87 	movhs	r4, r3
     88 	subhs	ip, ip, #(16 * 12)
     89 
     90 	lsr	r3, r4, #8
     91 	cmp	r3, r1
     92 	movhs	r4, r3
     93 	subhs	ip, ip, #(8 * 12)
     94 
     95 	lsr	r3, r4, #4
     96 	cmp	r3, r1
     97 	movhs	r4, r3
     98 	subhs	ip, #(4 * 12)
     99 
    100 	lsr	r3, r4, #2
    101 	cmp	r3, r1
    102 	movhs	r4, r3
    103 	subhs	ip, ip, #(2 * 12)
    104 
    105 	/* Last block, no need to update r3 or r4. */
    106 	cmp	r1, r4, lsr #1
    107 	subls	ip, ip, #(1 * 12)
    108 
    109 	ldr	r4, [sp], #8	/* restore r4, we are done with it. */
    110 	mov	r3, #0
    111 
    112 	JMP(ip)
    113 #  endif
    114 
    115 #define	IMM	#
    116 
    117 #define block(shift)                                                           \
    118 	cmp	r0, r1, lsl IMM shift;                                         \
    119 	ITT(hs);                                                               \
    120 	WIDE(addhs)	r3, r3, IMM (1 << shift);                              \
    121 	WIDE(subhs)	r0, r0, r1, lsl IMM shift
    122 
    123 	block(31)
    124 	block(30)
    125 	block(29)
    126 	block(28)
    127 	block(27)
    128 	block(26)
    129 	block(25)
    130 	block(24)
    131 	block(23)
    132 	block(22)
    133 	block(21)
    134 	block(20)
    135 	block(19)
    136 	block(18)
    137 	block(17)
    138 	block(16)
    139 	block(15)
    140 	block(14)
    141 	block(13)
    142 	block(12)
    143 	block(11)
    144 	block(10)
    145 	block(9)
    146 	block(8)
    147 	block(7)
    148 	block(6)
    149 	block(5)
    150 	block(4)
    151 	block(3)
    152 	block(2)
    153 	block(1)
    154 LOCAL_LABEL(div0block):
    155 	block(0)
    156 
    157 	str	r0, [r2]
    158 	mov	r0, r3
    159 	JMP(lr)
    160 
    161 LOCAL_LABEL(quotient0):
    162 	str	r0, [r2]
    163 	mov	r0, #0
    164 	JMP(lr)
    165 
    166 LOCAL_LABEL(divby1):
    167 	mov	r3, #0
    168 	str	r3, [r2]
    169 	JMP(lr)
    170 #endif /* __ARM_ARCH_EXT_IDIV__ */
    171 
    172 LOCAL_LABEL(divby0):
    173 	mov	r0, #0
    174 #ifdef __ARM_EABI__
    175 	b	__aeabi_idiv0
    176 #else
    177 	JMP(lr)
    178 #endif
    179 
    180 END_COMPILERRT_FUNCTION(__udivmodsi4)
    181