bcopy.S revision 1.6.40.1 1 /* $NetBSD: bcopy.S,v 1.6.40.1 2008/05/16 02:25:36 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matthew Fredette.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copy routines for NetBSD/hppa.
34 */
35
36 #undef _LOCORE
37 #define _LOCORE /* XXX fredette - unfortunate */
38 #include <machine/asm.h>
39 #include <machine/frame.h>
40
41 #if defined(LIBC_SCCS) && !defined(lint)
42 RCSID("$NetBSD: bcopy.S,v 1.6.40.1 2008/05/16 02:25:36 yamt Exp $")
43 #endif /* LIBC_SCCS and not lint */
44
45 /*
46 * The stbys instruction is a little asymmetric. When (%r2 & 3)
47 * is zero, stbys,b,m %r1, 4(%r2) works like stws,ma. You
48 * might then wish that when (%r2 & 3) == 0, stbys,e,m %r1, -4(%r2)
49 * worked like stws,mb. But it doesn't.
50 *
51 * This macro works around this problem. It requires that %t2
52 * hold the number of bytes that will be written by this store
53 * (meaning that it ranges from one to four).
54 *
55 * Watch the delay-slot trickery here. The comib is used to set
56 * up which instruction, either the stws or the stbys, is run
57 * in the delay slot of the b instruction.
58 */
59 #define _STBYS_E_M(r, dst_spc, dst_off) \
60 comib,<> 4, %t2, 4 ! \
61 b 4 ! \
62 stws,mb r, -4(dst_spc, dst_off) ! \
63 stbys,e,m r, 0(dst_spc, dst_off)
64
65 /*
66 * This macro does a bulk copy with no shifting. cmplt and m are
67 * the completer and displacement multiplier, respectively, for
68 * the load and store instructions.
69 */
70 #define _COPY(src_spc, src_off, dst_spc, dst_off, count, cmplt, m) \
71 ! \
72 /* ! \
73 * Loop storing 16 bytes at a time. Since count ! \
74 * may be > INT_MAX, we have to be careful and ! \
75 * avoid comparisons that treat it as a signed ! \
76 * quantity, until after this loop, when count ! \
77 * is guaranteed to be less than 16. ! \
78 */ ! \
79 comib,>>=,n 15, count, _LABEL(_skip16) ! \
80 .label _LABEL(_loop16) ! \
81 addi -16, count, count ! \
82 ldws,cmplt m*4(src_spc, src_off), %t1 ! \
83 ldws,cmplt m*4(src_spc, src_off), %t2 ! \
84 ldws,cmplt m*4(src_spc, src_off), %t3 ! \
85 ldws,cmplt m*4(src_spc, src_off), %t4 ! \
86 stws,cmplt %t1, m*4(dst_spc, dst_off) ! \
87 stws,cmplt %t2, m*4(dst_spc, dst_off) ! \
88 stws,cmplt %t3, m*4(dst_spc, dst_off) ! \
89 comib,<< 15, count, _LABEL(_loop16) ! \
90 stws,cmplt %t4, m*4(dst_spc, dst_off) ! \
91 .label _LABEL(_skip16) ! \
92 ! \
93 /* Loop storing 4 bytes at a time. */ ! \
94 addib,<,n -4, count, _LABEL(_skip4) ! \
95 .label _LABEL(_loop4) ! \
96 ldws,cmplt m*4(src_spc, src_off), %t1 ! \
97 addib,>= -4, count, _LABEL(_loop4) ! \
98 stws,cmplt %t1, m*4(dst_spc, dst_off) ! \
99 .label _LABEL(_skip4) ! \
100 /* Restore the correct count. */ ! \
101 addi 4, count, count ! \
102 ! \
103 .label _LABEL(_do1) ! \
104 ! \
105 /* Loop storing 1 byte at a time. */ ! \
106 addib,<,n -1, count, _LABEL(_skip1) ! \
107 .label _LABEL(_loop1) ! \
108 ldbs,cmplt m*1(src_spc, src_off), %t1 ! \
109 addib,>= -1, count, _LABEL(_loop1) ! \
110 stbs,cmplt %t1, m*1(dst_spc, dst_off) ! \
111 .label _LABEL(_skip1) ! \
112 /* Restore the correct count. */ ! \
113 b _LABEL(_done) ! \
114 addi 1, count, count
115
116 /*
117 * This macro is definitely strange. It exists purely to
118 * allow the _COPYS macro to be reused, but because it
119 * requires this long attempt to explain it, I'm starting
120 * to doubt the value of that.
121 *
122 * Part of the expansion of the _COPYS macro below are loops
123 * that copy four words or one word at a time, performing shifts
124 * to get data to line up correctly in the destination buffer.
125 *
126 * The _COPYS macro is used when copying backwards, as well
127 * as forwards. The 4-word loop always loads into %t1, %t2, %t3,
128 * and %t4 in that order. This means that when copying forward,
129 * %t1 will have the word from the lowest address, and %t4 will
130 * have the word from the highest address. When copying
131 * backwards, the opposite is true.
132 *
133 * The shift instructions need pairs of registers with adjacent
134 * words, with the register containing the word from the lowest
135 * address *always* coming first. It is this assymetry that
136 * gives rise to this macro - depending on which direction
137 * we're copying in, these ordered pairs are different.
138 *
139 * Fortunately, we can compute those register numbers at compile
140 * time, and assemble them manually into a shift instruction.
141 * That's what this macro does.
142 *
143 * This macro takes two arguments. n ranges from 0 to 3 and
144 * is the "shift number", i.e., n = 0 means we're doing the
145 * shift for what will be the first store.
146 *
147 * m is the displacement multiplier from the _COPYS macro call.
148 * This is 1 for a forward copy and -1 for a backwards copy.
149 * So, the ((m + 1) / 2) term yields 0 for a backwards copy and
150 * 1 for a forward copy, and the ((m - 1) / 2) term yields
151 * 0 for a forward copy, and -1 for a backwards copy.
152 * These terms are used to discriminate the register computations
153 * below.
154 *
155 * When copying forward, then, the first register used with
156 * the first vshd will be 19 + (3 - ((0 - 1) & 3)), or %t4,
157 * which matches _COPYS' requirement that the word last loaded
158 * be in %t4. The first register used for the second vshd
159 * will then "wrap" around to 19 + (3 - ((1 - 1) & 3)), or %t1.
160 * And so on to %t2 and %t3.
161 *
162 * When copying forward, the second register used with the first
163 * vshd will be (19 + (3 - ((n + 0) & 3)), or %t1. It will
164 * continue to be %t2, then %t3, and finally %t4.
165 *
166 * When copying backwards, the values for the first and second
167 * register for each vshd are reversed from the forwards case.
168 * (Symmetry reclaimed!) Proving this is "left as an exercise
169 * for the reader" (remember the different discriminating values!)
170 */
171 #define _VSHD(n, m, t) \
172 .word (0xd0000000 | \
173 ((19 + (3 - ((n - 1 * ((m + 1) / 2)) & 3))) << 16) | \
174 ((19 + (3 - ((n + 1 * ((m - 1) / 2)) & 3))) << 21) | \
175 (t))
176
177 /*
178 * This macro does a bulk copy with shifting. cmplt and m are
179 * the completer and displacement multiplier, respectively, for
180 * the load and store instructions. It is assumed that the
181 * word last loaded is already in %t4.
182 */
183 #define _COPYS(src_spc, src_off, dst_spc, dst_off, count, cmplt, m) \
184 ! \
185 /* ! \
186 * Loop storing 16 bytes at a time. Since count ! \
187 * may be > INT_MAX, we have to be careful and ! \
188 * avoid comparisons that treat it as a signed ! \
189 * quantity, until after this loop, when count ! \
190 * is guaranteed to be less than 16. ! \
191 */ ! \
192 comib,>>=,n 15, count, _LABEL(S_skip16) ! \
193 .label _LABEL(S_loop16) ! \
194 addi -16, count, count ! \
195 ldws,cmplt m*4(src_spc, src_off), %t1 ! \
196 ldws,cmplt m*4(src_spc, src_off), %t2 ! \
197 ldws,cmplt m*4(src_spc, src_off), %t3 ! \
198 _VSHD(0, m, 1) /* vshd %t4, %t1, %r1 */ ! \
199 ldws,cmplt m*4(src_spc, src_off), %t4 ! \
200 _VSHD(1, m, 22) /* vshd %t1, %t2, %t1 */ ! \
201 _VSHD(2, m, 21) /* vshd %t2, %t3, %t2 */ ! \
202 _VSHD(3, m, 20) /* vshd %t3, %t4, %t3 */ ! \
203 stws,cmplt %r1, m*4(dst_spc, dst_off) ! \
204 stws,cmplt %t1, m*4(dst_spc, dst_off) ! \
205 stws,cmplt %t2, m*4(dst_spc, dst_off) ! \
206 comib,<< 15, count, _LABEL(S_loop16) ! \
207 stws,cmplt %t3, m*4(dst_spc, dst_off) ! \
208 .label _LABEL(S_skip16) ! \
209 ! \
210 /* Loop storing 4 bytes at a time. */ ! \
211 addib,<,n -4, count, _LABEL(S_skip4) ! \
212 .label _LABEL(S_loop4) ! \
213 ldws,cmplt m*4(src_spc, src_off), %t1 ! \
214 _VSHD(0, m, 1) /* into %r1 (1) */ ! \
215 copy %t1, %t4 ! \
216 addib,>= -4, count, _LABEL(S_loop4) ! \
217 stws,cmplt %r1, m*4(dst_spc, dst_off) ! \
218 .label _LABEL(S_skip4) ! \
219 ! \
220 /* ! \
221 * We now need to "back up" src_off by the ! \
222 * number of bytes remaining in the FIFO ! \
223 * (i.e., the number of bytes remaining in %t4), ! \
224 * because (the correct) count still includes ! \
225 * these bytes, and we intent to keep it that ! \
226 * way, and finish with the single-byte copier. ! \
227 * ! \
228 * The number of bytes remaining in the FIFO is ! \
229 * related to the shift count, so recover it, ! \
230 * restoring the correct count at the same time. ! \
231 */ ! \
232 mfctl %cr11, %t1 ! \
233 addi 4, count, count ! \
234 shd %r0, %t1, 3, %t1 ! \
235 ! \
236 /* ! \
237 * If we're copying forward, the shift count ! \
238 * is the number of bytes remaining in the ! \
239 * FIFO, and we want to subtract it from src_off. ! \
240 * If we're copying backwards, (4 - shift count) ! \
241 * is the number of bytes remaining in the FIFO, ! \
242 * and we want to add it to src_off. ! \
243 * ! \
244 * We observe that x + (4 - y) = x - (y - 4), ! \
245 * and introduce this instruction to add -4 when ! \
246 * m is -1, although this does mean one extra ! \
247 * instruction in the forward case. ! \
248 */ ! \
249 addi 4*((m - 1) / 2), %t1, %t1 ! \
250 ! \
251 /* Now branch to the byte-at-a-time loop. */ ! \
252 b _LABEL(_do1) ! \
253 sub src_off, %t1, src_off
254
255 /*
256 * This macro copies a region in the forward direction.
257 */
258 #define _COPY_FORWARD(src_spc, src_off, dst_spc, dst_off, count) \
259 ! \
260 /* ! \
261 * Since in the shifting-left case we will ! \
262 * load 8 bytes before checking count, to ! \
263 * keep things simple, branch to the byte ! \
264 * copier unless we're copying at least 8. ! \
265 */ ! \
266 comib,>>,n 8, count, _LABEL(_do1) ! \
267 ! \
268 /* ! \
269 * Once we 4-byte align the source offset, ! \
270 * figure out how many bytes from the region ! \
271 * will be in the first 4-byte word we read. ! \
272 * Ditto for writing the destination offset. ! \
273 */ ! \
274 extru src_off, 31, 2, %t1 ! \
275 extru dst_off, 31, 2, %t2 ! \
276 subi 4, %t1, %t1 ! \
277 subi 4, %t2, %t2 ! \
278 ! \
279 /* ! \
280 * Calculate the byte shift required. A ! \
281 * positive value means a source 4-byte word ! \
282 * has to be shifted to the right to line up ! \
283 * as a destination 4-byte word. ! \
284 */ ! \
285 sub %t1, %t2, %t1 ! \
286 ! \
287 /* 4-byte align src_off. */ ! \
288 depi 0, 31, 2, src_off ! \
289 ! \
290 /* ! \
291 * It's somewhat important to note that this ! \
292 * code thinks of count as "the number of bytes ! \
293 * that haven't been stored yet", as opposed to ! \
294 * "the number of bytes that haven't been copied ! \
295 * yet". The distinction is subtle, but becomes ! \
296 * apparent at the end of the shifting code, where ! \
297 * we "back up" src_off to correspond to count, ! \
298 * as opposed to flushing the FIFO. ! \
299 * ! \
300 * We calculated above how many bytes our first ! \
301 * store will store, so update count now. ! \
302 * ! \
303 * If the shift is zero, strictly as an optimization ! \
304 * we use a copy loop that does no shifting. ! \
305 */ ! \
306 comb,<> %r0, %t1, _LABEL(_shifting) ! \
307 sub count, %t2, count ! \
308 ! \
309 /* Load and store the first word. */ ! \
310 ldws,ma 4(src_spc, src_off), %t4 ! \
311 stbys,b,m %t4, 4(dst_spc, dst_off) ! \
312 ! \
313 /* Do the rest of the copy. */ ! \
314 _COPY(src_spc,src_off,dst_spc,dst_off,count,ma,1) ! \
315 ! \
316 .label _LABEL(_shifting) ! \
317 ! \
318 /* ! \
319 * If shift < 0, we need to shift words to the ! \
320 * left. Since we can't do this directly, we ! \
321 * adjust the shift so it's a shift to the right ! \
322 * and load the first word into the high word of ! \
323 * the FIFO. Otherwise, we load a zero into the ! \
324 * high word of the FIFO. ! \
325 */ ! \
326 comb,<= %r0, %t1, _LABEL(_shiftingrt) ! \
327 copy %r0, %t3 ! \
328 addi 4, %t1, %t1 ! \
329 ldws,ma 4(src_spc, src_off), %t3 ! \
330 .label _LABEL(_shiftingrt) ! \
331 ! \
332 /* ! \
333 * Turn the shift byte count into a bit count, ! \
334 * load the next word, set the Shift Amount ! \
335 * Register, and form and store the first word. ! \
336 */ ! \
337 sh3add %t1, %r0, %t1 ! \
338 ldws,ma 4(src_spc, src_off), %t4 ! \
339 mtctl %t1, %cr11 ! \
340 vshd %t3, %t4, %r1 ! \
341 stbys,b,m %r1, 4(dst_spc, dst_off) ! \
342 ! \
343 /* Do the rest of the copy. */ ! \
344 _COPYS(src_spc,src_off,dst_spc,dst_off,count,ma,1)
345
346 /* This macro copies a region in the reverse direction. */
347 #define _COPY_REVERSE(src_spc, src_off, dst_spc, dst_off, count) \
348 ! \
349 /* Immediately add count to both offsets. */ ! \
350 add src_off, count, src_off ! \
351 add dst_off, count, dst_off ! \
352 ! \
353 /* ! \
354 * Since in the shifting-right case we ! \
355 * will load 8 bytes before checking ! \
356 * count, to keep things simple, branch ! \
357 * to the byte copier unless we're ! \
358 * copying at least 8 bytes. ! \
359 */ ! \
360 comib,>>,n 8, count, _LABEL(_do1) ! \
361 ! \
362 /* ! \
363 * Once we 4-byte align the source offset, ! \
364 * figure out how many bytes from the region ! \
365 * will be in the first 4-byte word we read. ! \
366 * Ditto for writing the destination offset. ! \
367 */ ! \
368 extru,<> src_off, 31, 2, %t1 ! \
369 ldi 4, %t1 ! \
370 extru,<> dst_off, 31, 2, %t2 ! \
371 ldi 4, %t2 ! \
372 ! \
373 /* ! \
374 * Calculate the byte shift required. A ! \
375 * positive value means a source 4-byte ! \
376 * word has to be shifted to the right to ! \
377 * line up as a destination 4-byte word. ! \
378 */ ! \
379 sub %t2, %t1, %t1 ! \
380 ! \
381 /* ! \
382 * 4-byte align src_off, leaving it pointing ! \
383 * to the 4-byte word *after* the next word ! \
384 * we intend to load. ! \
385 * ! \
386 * It's somewhat important to note that this ! \
387 * code thinks of count as "the number of bytes ! \
388 * that haven't been stored yet", as opposed to ! \
389 * "the number of bytes that haven't been copied ! \
390 * yet". The distinction is subtle, but becomes ! \
391 * apparent at the end of the shifting code, where ! \
392 * we "back up" src_off to correspond to count, ! \
393 * as opposed to flushing the FIFO. ! \
394 * ! \
395 * We calculated above how many bytes our first ! \
396 * store will store, so update count now. ! \
397 * ! \
398 * If the shift is zero, we use a copy loop that ! \
399 * does no shifting. NB: unlike the forward case, ! \
400 * this is NOT strictly an optimization. If the ! \
401 * SAR is zero the vshds do NOT do the right thing. ! \
402 * This is another assymetry more or less the "fault" ! \
403 * of vshd. ! \
404 */ ! \
405 addi 3, src_off, src_off ! \
406 sub count, %t2, count ! \
407 comb,<> %r0, %t1, _LABEL(_shifting) ! \
408 depi 0, 31, 2, src_off ! \
409 ! \
410 /* Load and store the first word. */ ! \
411 ldws,mb -4(src_spc, src_off), %t4 ! \
412 _STBYS_E_M(%t4, dst_spc, dst_off) ! \
413 ! \
414 /* Do the rest of the copy. */ ! \
415 _COPY(src_spc,src_off,dst_spc,dst_off,count,mb,-1) ! \
416 ! \
417 .label _LABEL(_shifting) ! \
418 ! \
419 /* ! \
420 * If shift < 0, we need to shift words to the ! \
421 * left. Since we can't do this directly, we ! \
422 * adjust the shift so it's a shift to the right ! \
423 * and load a zero in to the low word of the FIFO. ! \
424 * Otherwise, we load the first word into the ! \
425 * low word of the FIFO. ! \
426 * ! \
427 * Note the nullification trickery here. We ! \
428 * assume that we're shifting to the left, and ! \
429 * load zero into the low word of the FIFO. Then ! \
430 * we nullify the addi if we're shifting to the ! \
431 * right. If the addi is not nullified, we are ! \
432 * shifting to the left, so we nullify the load. ! \
433 * we branch if we're shifting to the ! \
434 */ ! \
435 copy %r0, %t3 ! \
436 comb,<=,n %r0, %t1, 0 ! \
437 addi,tr 4, %t1, %t1 ! \
438 ldws,mb -4(src_spc, src_off), %t3 ! \
439 ! \
440 /* ! \
441 * Turn the shift byte count into a bit count, ! \
442 * load the next word, set the Shift Amount ! \
443 * Register, and form and store the first word. ! \
444 */ ! \
445 sh3add %t1, %r0, %t1 ! \
446 ldws,mb -4(src_spc, src_off), %t4 ! \
447 mtctl %t1, %cr11 ! \
448 vshd %t4, %t3, %r1 ! \
449 _STBYS_E_M(%r1, dst_spc, dst_off) ! \
450 ! \
451 /* Do the rest of the copy. */ ! \
452 _COPYS(src_spc,src_off,dst_spc,dst_off,count,mb,-1)
453
454 /*
455 * For paranoia, when things aren't going well, enable this
456 * code to assemble byte-at-a-time-only copying.
457 */
458 #if 1
459 #undef _COPY_FORWARD
460 #define _COPY_FORWARD(src_spc, src_off, dst_spc, dst_off, count) \
461 comb,=,n %r0, count, _LABEL(_done) ! \
462 ldbs,ma 1(src_spc, src_off), %r1 ! \
463 addib,<> -1, count, -12 ! \
464 stbs,ma %r1, 1(dst_spc, dst_off) ! \
465 b,n _LABEL(_done)
466 #undef _COPY_REVERSE
467 #define _COPY_REVERSE(src_spc, src_off, dst_spc, dst_off, count) \
468 comb,= %r0, count, _LABEL(_done) ! \
469 add src_off, count, src_off ! \
470 add dst_off, count, dst_off ! \
471 ldbs,mb -1(src_spc, src_off), %r1 ! \
472 addib,<> -1, count, -12 ! \
473 stbs,mb %r1, -1(dst_spc, dst_off) ! \
474 b,n _LABEL(_done)
475 #endif
476
477 /*
478 * If none of the following are defined, define BCOPY.
479 */
480 #if !(defined(SPCOPY) || defined(MEMCPY) || defined(MEMMOVE))
481 #define BCOPY
482 #endif
483
484 #if defined(SPCOPY) && !defined(_STANDALONE)
485 #include <sys/errno.h>
486 #include "assym.h"
487
488 /*
489 * int spcopy(pa_space_t ssp, const void *src, pa_space_t dsp, void *dst,
490 * size_t len)
491 *
492 * We assume that the regions do not overlap.
493 */
494 LEAF_ENTRY(spcopy)
495
496 /*
497 * Setup the fault handler, and load %ret0
498 * with EFAULT, assuming the copy will fail.
499 */
500 .import curlwp, data
501 ldil L%curlwp, %r31
502 ldw R%curlwp(%r31), %r31
503 #ifdef DIAGNOSTIC
504 comb,<>,n %r0, %r31, Lspcopy_curlwp_ok
505 ldil L%panic, %r1
506 ldil L%Lspcopy_curlwp_bad, %arg0
507 ldo R%panic(%r1), %r1
508 ldo R%Lspcopy_curlwp_bad(%arg0), %arg0
509 .call
510 bv,n %r0(%r1)
511 nop
512 Lspcopy_curlwp_bad:
513 .asciz "spcopy: curlwp == NULL\n"
514 .align 8
515 Lspcopy_curlwp_ok:
516 #endif /* DIAGNOSTIC */
517 ldil L%spcopy_fault, %r1
518 ldw L_ADDR(%r31), %r31
519 ldo R%spcopy_fault(%r1), %r1
520 ldi EFAULT, %ret0
521 stw %r1, U_PCB+PCB_ONFAULT(%r31)
522
523 /* Setup the space registers. */
524 mfsp %sr2, %ret1
525 mtsp %arg0, %sr1
526 mtsp %arg2, %sr2
527
528 /* Get the len argument and do the copy. */
529 ldw HPPA_FRAME_ARG(4)(%sp), %arg0
530 #define _LABEL(l) __CONCAT(spcopy,l)
531 _COPY_FORWARD(%sr1,%arg1,%sr2,%arg3,%arg0)
532 _LABEL(_done):
533
534 /* Return. */
535 copy %r0, %ret0
536 ALTENTRY(spcopy_fault)
537 stw %r0, U_PCB+PCB_ONFAULT(%r31)
538 bv %r0(%rp)
539 mtsp %ret1, %sr2
540 EXIT(spcopy)
541 #endif /* SPCOPY && !_STANDALONE */
542
543 #ifdef MEMCPY
544 /*
545 * void *memcpy(void *restrict dst, const void *restrict src, size_t len);
546 *
547 * memcpy is specifically restricted to working on
548 * non-overlapping regions, so we can just copy forward.
549 */
550 LEAF_ENTRY(memcpy)
551 copy %arg0, %ret0
552 #define _LABEL(l) __CONCAT(memcpy,l)
553 _COPY_FORWARD(%sr0,%arg1,%sr0,%arg0,%arg2)
554 _LABEL(_done):
555 bv,n %r0(%rp)
556 nop
557 EXIT(memcpy)
558 #endif /* MEMCPY */
559
560 #ifdef BCOPY
561 /*
562 * void bcopy(const void *src, void *dst, size_t len);
563 */
564 LEAF_ENTRY(bcopy)
565 copy %arg0, %r1
566 copy %arg1, %arg0
567 copy %r1, %arg1
568 /* FALLTHROUGH */
569 #define _LABEL_F(l) __CONCAT(bcopy_F,l)
570 #define _LABEL_R(l) __CONCAT(bcopy_R,l)
571 #endif
572
573 #ifdef MEMMOVE
574 /*
575 * void *memmove(void *dst, const void *src, size_t len);
576 */
577 LEAF_ENTRY(memmove)
578 #define _LABEL_F(l) __CONCAT(memmove_F,l)
579 #define _LABEL_R(l) __CONCAT(memmove_R,l)
580 copy %arg0, %ret0
581 #endif /* MEMMOVE */
582
583 #if defined(BCOPY) || defined(MEMMOVE)
584
585 /*
586 * If src >= dst or src + len <= dst, we copy
587 * forward, else we copy in reverse.
588 */
589 add %arg1, %arg2, %r1
590 comb,>>=,n %arg1, %arg0, 0
591 comb,>>,n %r1, %arg0, _LABEL_R(_go)
592
593 #define _LABEL _LABEL_F
594 _COPY_FORWARD(%sr0,%arg1,%sr0,%arg0,%arg2)
595 #undef _LABEL
596
597 _LABEL_R(_go):
598 #define _LABEL _LABEL_R
599 _COPY_REVERSE(%sr0,%arg1,%sr0,%arg0,%arg2)
600 #undef _LABEL
601
602 _LABEL_F(_done):
603 _LABEL_R(_done):
604 bv,n %r0(%rp)
605 nop
606 #ifdef BCOPY
607 EXIT(bcopy)
608 #else
609 EXIT(memmove)
610 #endif
611 #endif /* BCOPY || MEMMOVE */
612