udivmodsi4.S revision 1.1.1.1 1 /*===-- udivmodsi4.S - 32-bit unsigned integer divide and modulus ---------===//
2 *
3 * The LLVM Compiler Infrastructure
4 *
5 * This file is dual licensed under the MIT and the University of Illinois Open
6 * Source Licenses. See LICENSE.TXT for details.
7 *
8 *===----------------------------------------------------------------------===//
9 *
10 * This file implements the __udivmodsi4 (32-bit unsigned integer divide and
11 * modulus) function for the ARM 32-bit architecture.
12 *
13 *===----------------------------------------------------------------------===*/
14
15 #include "../assembly.h"
16
17 .syntax unified
18
19 .text
20 .p2align 2
21 DEFINE_COMPILERRT_FUNCTION(__udivmodsi4)
22 #if __ARM_ARCH_EXT_IDIV__
23 tst r1, r1
24 beq LOCAL_LABEL(divby0)
25 mov r3, r0
26 udiv r0, r3, r1
27 mls r1, r0, r1, r3
28 str r1, [r2]
29 bx lr
30 #else
31 cmp r1, #1
32 bcc LOCAL_LABEL(divby0)
33 beq LOCAL_LABEL(divby1)
34 cmp r0, r1
35 bcc LOCAL_LABEL(quotient0)
36 /*
37 * Implement division using binary long division algorithm.
38 *
39 * r0 is the numerator, r1 the denominator.
40 *
41 * The code before JMP computes the correct shift I, so that
42 * r0 and (r1 << I) have the highest bit set in the same position.
43 * At the time of JMP, ip := .Ldiv0block - 12 * I.
44 * This depends on the fixed instruction size of block.
45 *
46 * block(shift) implements the test-and-update-quotient core.
47 * It assumes (r0 << shift) can be computed without overflow and
48 * that (r0 << shift) < 2 * r1. The quotient is stored in r3.
49 */
50
51 # ifdef __ARM_FEATURE_CLZ
52 clz ip, r0
53 clz r3, r1
54 /* r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3. */
55 sub r3, r3, ip
56 adr ip, LOCAL_LABEL(div0block)
57 sub ip, ip, r3, lsl #2
58 sub ip, ip, r3, lsl #3
59 mov r3, #0
60 bx ip
61 # else
62 str r4, [sp, #-8]!
63
64 mov r4, r0
65 adr ip, LOCAL_LABEL(div0block)
66
67 lsr r3, r4, #16
68 cmp r3, r1
69 movhs r4, r3
70 subhs ip, ip, #(16 * 12)
71
72 lsr r3, r4, #8
73 cmp r3, r1
74 movhs r4, r3
75 subhs ip, ip, #(8 * 12)
76
77 lsr r3, r4, #4
78 cmp r3, r1
79 movhs r4, r3
80 subhs ip, #(4 * 12)
81
82 lsr r3, r4, #2
83 cmp r3, r1
84 movhs r4, r3
85 subhs ip, ip, #(2 * 12)
86
87 /* Last block, no need to update r3 or r4. */
88 cmp r1, r4, lsr #1
89 subls ip, ip, #(1 * 12)
90
91 ldr r4, [sp], #8 /* restore r4, we are done with it. */
92 mov r3, #0
93
94 JMP(ip)
95 # endif
96
97 #define IMM #
98
99 #define block(shift) \
100 cmp r0, r1, lsl IMM shift; \
101 addhs r3, r3, IMM (1 << shift); \
102 subhs r0, r0, r1, lsl IMM shift
103
104 block(31)
105 block(30)
106 block(29)
107 block(28)
108 block(27)
109 block(26)
110 block(25)
111 block(24)
112 block(23)
113 block(22)
114 block(21)
115 block(20)
116 block(19)
117 block(18)
118 block(17)
119 block(16)
120 block(15)
121 block(14)
122 block(13)
123 block(12)
124 block(11)
125 block(10)
126 block(9)
127 block(8)
128 block(7)
129 block(6)
130 block(5)
131 block(4)
132 block(3)
133 block(2)
134 block(1)
135 LOCAL_LABEL(div0block):
136 block(0)
137
138 str r0, [r2]
139 mov r0, r3
140 JMP(lr)
141
142 LOCAL_LABEL(quotient0):
143 str r0, [r2]
144 mov r0, #0
145 JMP(lr)
146
147 LOCAL_LABEL(divby1):
148 mov r3, #0
149 str r3, [r2]
150 JMP(lr)
151 #endif /* __ARM_ARCH_EXT_IDIV__ */
152
153 LOCAL_LABEL(divby0):
154 mov r0, #0
155 #ifdef __ARM_EABI__
156 b __aeabi_idiv0
157 #else
158 JMP(lr)
159 #endif
160
161 END_COMPILERRT_FUNCTION(__udivmodsi4)
162