divmodhi.S revision 1.1 1 /* HImode div/mod functions for the GCC support library for the Renesas RL78 processors.
2 Copyright (C) 2012-2015 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 #include "vregs.h"
27
28 .macro MAKE_GENERIC which,need_result
29
30 .if \need_result
31 quot = r8
32 num = r10
33 den = r12
34 bit = r14
35 .else
36 num = r8
37 quot = r10
38 den = r12
39 bit = r14
40 .endif
41
42 quotB0 = quot
43 quotB1 = quot+1
44
45 numB0 = num
46 numB1 = num+1
47
48 denB0 = den
49 denB1 = den+1
50
51 bitB0 = bit
52 bitB1 = bit+1
53
54 #define bit bc
55 #define bitB0 c
56 #define bitB1 b
57
58 START_FUNC __generic_hidivmod\which
59
60 num_lt_den\which:
61 .if \need_result
62 movw r8, #0
63 .else
64 movw ax, [sp+8]
65 movw r8, ax
66 .endif
67 ret
68
69 ;; These routines leave DE alone - the signed functions use DE
70 ;; to store sign information that must remain intact
71
72 .if \need_result
73 .global __generic_hidiv
74 __generic_hidiv:
75
76 .else
77
78 .global __generic_himod
79 __generic_himod:
80
81 .endif
82
83 ;; (quot,rem) = 8[sp] /% 10[sp]
84
85 movw hl, sp
86 movw ax, [hl+10] ; denH
87 cmpw ax, [hl+8] ; numH
88 bh $num_lt_den\which
89
90 ;; (quot,rem) = 16[sp] /% 20[sp]
91
92 ;; copy numerator
93 movw ax, [hl+8]
94 movw num, ax
95
96 ;; copy denomonator
97 movw ax, [hl+10]
98 movw den, ax
99
100 movw ax, den
101 cmpw ax, #0
102 bnz $den_not_zero\which
103 movw num, #0
104 ret
105
106 den_not_zero\which:
107 .if \need_result
108 ;; zero out quot
109 movw quot, #0
110 .endif
111
112 ;; initialize bit to 1
113 movw bit, #1
114
115 ; while (den < num && !(den & (1L << BITS_MINUS_1)))
116
117 shift_den_bit\which:
118 movw ax, den
119 mov1 cy,a.7
120 bc $enter_main_loop\which
121 cmpw ax, num
122 bh $enter_main_loop\which
123
124 ;; den <<= 1
125 ; movw ax, den ; already has it from the cmpw above
126 shlw ax, 1
127 movw den, ax
128
129 ;; bit <<= 1
130 .if \need_result
131 #ifdef bit
132 shlw bit, 1
133 #else
134 movw ax, bit
135 shlw ax, 1
136 movw bit, ax
137 #endif
138 .else
139 ;; if we don't need to compute the quotent, we don't need an
140 ;; actual bit *mask*, we just need to keep track of which bit
141 inc bitB0
142 .endif
143
144 br $shift_den_bit\which
145
146 main_loop\which:
147
148 ;; if (num >= den) (cmp den > num)
149 movw ax, den
150 cmpw ax, num
151 bh $next_loop\which
152
153 ;; num -= den
154 movw ax, num
155 subw ax, den
156 movw num, ax
157
158 .if \need_result
159 ;; res |= bit
160 mov a, quotB0
161 or a, bitB0
162 mov quotB0, a
163 mov a, quotB1
164 or a, bitB1
165 mov quotB1, a
166 .endif
167
168 next_loop\which:
169
170 ;; den >>= 1
171 movw ax, den
172 shrw ax, 1
173 movw den, ax
174
175 .if \need_result
176 ;; bit >>= 1
177 movw ax, bit
178 shrw ax, 1
179 movw bit, ax
180 .else
181 dec bitB0
182 .endif
183
184 enter_main_loop\which:
185 .if \need_result
186 movw ax, bit
187 cmpw ax, #0
188 .else
189 cmp0 bitB0
190 .endif
191 bnz $main_loop\which
192
193 main_loop_done\which:
194 ret
195 END_FUNC __generic_hidivmod\which
196 .endm
197 ;----------------------------------------------------------------------
198
199 MAKE_GENERIC _d 1
200 MAKE_GENERIC _m 0
201
202 ;----------------------------------------------------------------------
203
204 START_FUNC ___udivhi3
205 ;; r8 = 4[sp] / 6[sp]
206 call $!__generic_hidiv
207 ret
208 END_FUNC ___udivhi3
209
210
211 START_FUNC ___umodhi3
212 ;; r8 = 4[sp] % 6[sp]
213 call $!__generic_himod
214 ret
215 END_FUNC ___umodhi3
216
217 ;----------------------------------------------------------------------
218
219 .macro NEG_AX
220 movw hl, ax
221 movw ax, #0
222 subw ax, [hl]
223 movw [hl], ax
224 .endm
225
226 ;----------------------------------------------------------------------
227
228 START_FUNC ___divhi3
229 ;; r8 = 4[sp] / 6[sp]
230 movw de, #0
231 mov a, [sp+5]
232 mov1 cy, a.7
233 bc $div_signed_num
234 mov a, [sp+7]
235 mov1 cy, a.7
236 bc $div_signed_den
237 call $!__generic_hidiv
238 ret
239
240 div_signed_num:
241 ;; neg [sp+4]
242 movw ax, sp
243 addw ax, #4
244 NEG_AX
245 mov d, #1
246 mov a, [sp+7]
247 mov1 cy, a.7
248 bnc $div_unsigned_den
249 div_signed_den:
250 ;; neg [sp+6]
251 movw ax, sp
252 addw ax, #6
253 NEG_AX
254 mov e, #1
255 div_unsigned_den:
256 call $!__generic_hidiv
257
258 mov a, d
259 cmp0 a
260 bz $div_skip_restore_num
261 ;; We have to restore the numerator [sp+4]
262 movw ax, sp
263 addw ax, #4
264 NEG_AX
265 mov a, d
266 div_skip_restore_num:
267 xor a, e
268 bz $div_no_neg
269 movw ax, #r8
270 NEG_AX
271 div_no_neg:
272 mov a, e
273 cmp0 a
274 bz $div_skip_restore_den
275 movw ax, sp
276 addw ax, #6
277 NEG_AX
278 div_skip_restore_den:
279 ret
280 END_FUNC ___divhi3
281
282
283 START_FUNC ___modhi3
284 ;; r8 = 4[sp] % 6[sp]
285 movw de, #0
286 mov a, [sp+5]
287 mov1 cy, a.7
288 bc $mod_signed_num
289 mov a, [sp+7]
290 mov1 cy, a.7
291 bc $mod_signed_den
292 call $!__generic_himod
293 ret
294
295 mod_signed_num:
296 ;; neg [sp+4]
297 movw ax, sp
298 addw ax, #4
299 NEG_AX
300 mov d, #1
301 mov a, [sp+7]
302 mov1 cy, a.7
303 bnc $mod_unsigned_den
304 mod_signed_den:
305 ;; neg [sp+6]
306 movw ax, sp
307 addw ax, #6
308 NEG_AX
309 mod_unsigned_den:
310 call $!__generic_himod
311
312 mov a, d
313 cmp0 a
314 bz $mod_no_neg
315 movw ax, #r8
316 NEG_AX
317 ;; Also restore numerator
318 movw ax, sp
319 addw ax, #4
320 NEG_AX
321 mod_no_neg:
322 mov a, e
323 cmp0 a
324 bz $mod_skip_restore_den
325 movw ax, sp
326 addw ax, #6
327 NEG_AX
328 mod_skip_restore_den:
329 ret
330 END_FUNC ___modhi3
331