udivsi3.S revision 1.1.1.2.4.3 1 /*===-- udivsi3.S - 32-bit unsigned integer divide ------------------------===//
2 *
3 * The LLVM Compiler Infrastructure
4 *
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
7 *
8 *===----------------------------------------------------------------------===//
9 *
10 * This file implements the __udivsi3 (32-bit unsigned integer divide)
11 * function for the ARM 32-bit architecture.
12 *
13 *===----------------------------------------------------------------------===*/
14
15 #include "../assembly.h"
16
17 .syntax unified
18 .text
19
20 #if __ARM_ARCH_ISA_THUMB == 2
21 .thumb
22 #endif
23
24 .p2align 2
25 DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
26
27 @ unsigned int __udivsi3(unsigned int divident, unsigned int divisor)
28 @ Calculate and return the quotient of the (unsigned) division.
29
30 #if __ARM_ARCH_ISA_THUMB == 2
31 DEFINE_COMPILERRT_THUMB_FUNCTION(__udivsi3)
32 #else
33 DEFINE_COMPILERRT_FUNCTION(__udivsi3)
34 #endif
35 #if __ARM_ARCH_EXT_IDIV__
36 tst r1, r1
37 beq LOCAL_LABEL(divby0)
38 udiv r0, r0, r1
39 bx lr
40 #else
41 cmp r1, #1
42 bcc LOCAL_LABEL(divby0)
43 IT(eq)
44 JMPc(lr, eq)
45 cmp r0, r1
46 ITT(cc)
47 movcc r0, #0
48 JMPc(lr, cc)
49 /*
50 * Implement division using binary long division algorithm.
51 *
52 * r0 is the numerator, r1 the denominator.
53 *
54 * The code before JMP computes the correct shift I, so that
55 * r0 and (r1 << I) have the highest bit set in the same position.
56 * At the time of JMP, ip := .Ldiv0block - 12 * I.
57 * This depends on the fixed instruction size of block.
58 * For ARM mode, this is 12 Bytes, for THUMB mode 14 Bytes.
59 *
60 * block(shift) implements the test-and-update-quotient core.
61 * It assumes (r0 << shift) can be computed without overflow and
62 * that (r0 << shift) < 2 * r1. The quotient is stored in r3.
63 */
64
65 # ifdef __ARM_FEATURE_CLZ
66 clz ip, r0
67 clz r3, r1
68 /* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
69 sub r3, r3, ip
70 # if __ARM_ARCH_ISA_THUMB == 2
71 adr ip, LOCAL_LABEL(div0block) + 1
72 sub ip, ip, r3, lsl #1
73 # else
74 adr ip, LOCAL_LABEL(div0block)
75 # endif
76 sub ip, ip, r3, lsl #2
77 sub ip, ip, r3, lsl #3
78 mov r3, #0
79 bx ip
80 # else
81 # if __ARM_ARCH_ISA_THUMB == 2
82 # error THUMB mode requires CLZ or UDIV
83 # endif
84 mov r2, r0
85 adr ip, LOCAL_LABEL(div0block)
86
87 lsr r3, r2, #16
88 cmp r3, r1
89 movhs r2, r3
90 subhs ip, ip, #(16 * 12)
91
92 lsr r3, r2, #8
93 cmp r3, r1
94 movhs r2, r3
95 subhs ip, ip, #(8 * 12)
96
97 lsr r3, r2, #4
98 cmp r3, r1
99 movhs r2, r3
100 subhs ip, #(4 * 12)
101
102 lsr r3, r2, #2
103 cmp r3, r1
104 movhs r2, r3
105 subhs ip, ip, #(2 * 12)
106
107 /* Last block, no need to update r2 or r3. */
108 cmp r1, r2, lsr #1
109 subls ip, ip, #(1 * 12)
110
111 mov r3, #0
112
113 JMP(ip)
114 # endif
115
116 #define IMM #
117
118 #define block(shift) \
119 cmp r0, r1, lsl IMM shift; \
120 ITT(hs); \
121 WIDE(addhs) r3, r3, IMM (1 << shift); \
122 WIDE(subhs) r0, r0, r1, lsl IMM shift
123
124 block(31)
125 block(30)
126 block(29)
127 block(28)
128 block(27)
129 block(26)
130 block(25)
131 block(24)
132 block(23)
133 block(22)
134 block(21)
135 block(20)
136 block(19)
137 block(18)
138 block(17)
139 block(16)
140 block(15)
141 block(14)
142 block(13)
143 block(12)
144 block(11)
145 block(10)
146 block(9)
147 block(8)
148 block(7)
149 block(6)
150 block(5)
151 block(4)
152 block(3)
153 block(2)
154 block(1)
155 LOCAL_LABEL(div0block):
156 block(0)
157
158 mov r0, r3
159 JMP(lr)
160 #endif /* __ARM_ARCH_EXT_IDIV__ */
161
162 LOCAL_LABEL(divby0):
163 mov r0, #0
164 #ifdef __ARM_EABI__
165 b __aeabi_idiv0
166 #else
167 JMP(lr)
168 #endif
169
170 END_COMPILERRT_FUNCTION(__udivsi3)
171