bzero.S revision 1.14 1 1.14 joerg /* $NetBSD: bzero.S,v 1.14 2013/09/12 15:36:15 joerg Exp $ */
2 1.1 mjl
3 1.1 mjl /*-
4 1.7 salo * Copyright (C) 2001 Martin J. Laubach <mjl (at) NetBSD.org>
5 1.1 mjl * All rights reserved.
6 1.1 mjl *
7 1.1 mjl * Redistribution and use in source and binary forms, with or without
8 1.1 mjl * modification, are permitted provided that the following conditions
9 1.1 mjl * are met:
10 1.1 mjl * 1. Redistributions of source code must retain the above copyright
11 1.1 mjl * notice, this list of conditions and the following disclaimer.
12 1.1 mjl * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 mjl * notice, this list of conditions and the following disclaimer in the
14 1.1 mjl * documentation and/or other materials provided with the distribution.
15 1.1 mjl * 3. The name of the author may not be used to endorse or promote products
16 1.1 mjl * derived from this software without specific prior written permission.
17 1.1 mjl *
18 1.1 mjl * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 1.1 mjl * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 1.1 mjl * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 1.1 mjl * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 1.1 mjl * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 1.1 mjl * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 1.1 mjl * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 1.1 mjl * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 1.1 mjl * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 1.1 mjl * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 1.1 mjl */
29 1.1 mjl /*----------------------------------------------------------------------*/
30 1.1 mjl
31 1.1 mjl #include <machine/asm.h>
32 1.8 matt
33 1.8 matt
34 1.8 matt #if defined(LIBC_SCCS) && !defined(lint)
35 1.14 joerg __RCSID("$NetBSD: bzero.S,v 1.14 2013/09/12 15:36:15 joerg Exp $")
36 1.8 matt #endif /* LIBC_SCCS && !lint */
37 1.8 matt
38 1.13 matt #include "assym.h"
39 1.1 mjl
40 1.1 mjl #define USE_STSWX 0 /* don't. slower than trivial copy loop */
41 1.1 mjl
42 1.1 mjl /*----------------------------------------------------------------------*/
43 1.1 mjl /*
44 1.5 matt void bzero(void *b %r3, size_t len %r4);
45 1.5 matt void * memset(void *b %r3, int c %r4, size_t len %r5);
46 1.1 mjl */
47 1.1 mjl /*----------------------------------------------------------------------*/
48 1.1 mjl
49 1.5 matt #define r_dst %r3
50 1.5 matt #define r_len %r4
51 1.5 matt #define r_val %r0
52 1.1 mjl
53 1.1 mjl .text
54 1.1 mjl .align 4
55 1.1 mjl ENTRY(bzero)
56 1.1 mjl li r_val, 0 /* Value to stuff in */
57 1.1 mjl b cb_memset
58 1.8 matt END(bzero)
59 1.1 mjl
60 1.1 mjl ENTRY(memset)
61 1.11 matt cmplwi %cr1, %r5, 0
62 1.5 matt mr. %r0, %r4
63 1.5 matt mr %r8, %r3
64 1.11 matt beqlr- %cr1 /* Nothing to do */
65 1.1 mjl
66 1.5 matt rlwimi %r0, %r4, 8, 16, 23 /* word extend fill value */
67 1.5 matt rlwimi %r0, %r0, 16, 0, 15
68 1.5 matt mr %r4, %r5
69 1.1 mjl bne- simple_fill /* =! 0, use trivial fill */
70 1.1 mjl cb_memset:
71 1.1 mjl
72 1.1 mjl /*----------------------------------------------------------------------*/
73 1.2 mjl #ifndef _KERNEL
74 1.14 joerg #ifdef __PIC__
75 1.13 matt /* First get cache line size */
76 1.10 matt mflr %r9
77 1.12 matt bcl 20,31,1f
78 1.13 matt 1: mflr %r10
79 1.10 matt mtlr %r9
80 1.13 matt addis %r10,%r10,_libc_powerpc_cache_info+CACHE_INFO_DCACHE_LINE_SIZE-1b@ha
81 1.13 matt lwz %r9,_libc_powerpc_cache_info+CACHE_INFO_DCACHE_LINE_SIZE-1b@l(%r10)
82 1.1 mjl #else
83 1.13 matt lis %r10,_libc_powerpc_cache_info+CACHE_INFO_DCACHE_LINE_SIZE@ha
84 1.13 matt lwz %r9,_libc_powerpc_cache_info+CACHE_INFO_DCACHE_LINE_SIZE@l(%r10)
85 1.1 mjl #endif
86 1.13 matt cmplwi %cr1, %r9, 0 /* Unknown? */
87 1.13 matt beq- simple_fill /* a trivial fill routine */
88 1.2 mjl #else /* _KERNEL */
89 1.4 eeh #ifdef MULTIPROCESSOR
90 1.5 matt mfsprg %r10, 0 /* Get cpu_info pointer */
91 1.2 mjl #else
92 1.5 matt lis %r10, cpu_info_store@ha
93 1.5 matt addi %r10, %r10, cpu_info_store@l
94 1.2 mjl #endif
95 1.13 matt lwz %r9, CPU_CI+CACHE_INFO_DCACHE_LINE_SIZE(%r10) /* Load D$ line size */
96 1.13 matt #endif /* _KERNEL */
97 1.5 matt cntlzw %r10, %r9 /* Calculate shift.. */
98 1.5 matt li %r6, 31
99 1.5 matt subf %r10, %r10, %r6
100 1.1 mjl /* Back in memory filling business */
101 1.1 mjl
102 1.11 matt cmplwi %cr1, r_len, 0 /* Nothing to do? */
103 1.5 matt add %r5, %r9, %r9
104 1.5 matt cmplw r_len, %r5 /* <= 2*CL bytes to move? */
105 1.11 matt beqlr- %cr1 /* then do nothing */
106 1.1 mjl
107 1.1 mjl blt+ simple_fill /* a trivial fill routine */
108 1.1 mjl
109 1.1 mjl /* Word align the block, fill bytewise until dst even*/
110 1.1 mjl
111 1.5 matt andi. %r5, r_dst, 0x03
112 1.5 matt li %r6, 4
113 1.1 mjl beq+ cb_aligned_w /* already aligned to word? */
114 1.1 mjl
115 1.5 matt subf %r5, %r5, %r6 /* bytes to fill to align4 */
116 1.1 mjl #if USE_STSWX
117 1.5 matt mtxer %r5
118 1.5 matt stswx %r0, 0, r_dst
119 1.5 matt add r_dst, %r5, r_dst
120 1.1 mjl #else
121 1.5 matt mtctr %r5
122 1.1 mjl
123 1.1 mjl subi r_dst, r_dst, 1
124 1.1 mjl 1: stbu r_val, 1(r_dst) /* Fill bytewise */
125 1.1 mjl bdnz 1b
126 1.1 mjl
127 1.1 mjl addi r_dst, r_dst, 1
128 1.1 mjl #endif
129 1.5 matt subf r_len, %r5, r_len
130 1.1 mjl
131 1.1 mjl cb_aligned_w: /* Cache block align, fill wordwise until dst aligned */
132 1.1 mjl
133 1.1 mjl /* I know I have something to do since we had > 2*CL initially */
134 1.1 mjl /* so no need to check for r_len = 0 */
135 1.1 mjl
136 1.6 hannken subi %r6, %r9, 1 /* CL mask */
137 1.6 hannken and. %r5, r_dst, %r6
138 1.6 hannken srwi %r5, %r5, 2
139 1.5 matt srwi %r6, %r9, 2
140 1.1 mjl beq cb_aligned_cb /* already on CL boundary? */
141 1.1 mjl
142 1.5 matt subf %r5, %r5, %r6 /* words to fill to alignment */
143 1.5 matt mtctr %r5
144 1.5 matt slwi %r5, %r5, 2
145 1.5 matt subf r_len, %r5, r_len
146 1.1 mjl
147 1.1 mjl subi r_dst, r_dst, 4
148 1.1 mjl 1: stwu r_val, 4(r_dst) /* Fill wordwise */
149 1.1 mjl bdnz 1b
150 1.1 mjl addi r_dst, r_dst, 4
151 1.1 mjl
152 1.1 mjl cb_aligned_cb: /* no need to check r_len, see above */
153 1.1 mjl
154 1.5 matt srw. %r5, r_len, %r10 /* Number of cache blocks */
155 1.5 matt mtctr %r5
156 1.1 mjl beq cblocks_done
157 1.1 mjl
158 1.5 matt slw %r5, %r5, %r10
159 1.5 matt subf r_len, %r5, r_len
160 1.1 mjl
161 1.1 mjl 1: dcbz 0, r_dst /* Clear blockwise */
162 1.5 matt add r_dst, r_dst, %r9
163 1.1 mjl bdnz 1b
164 1.1 mjl
165 1.1 mjl cblocks_done: /* still CL aligned, but less than CL bytes left */
166 1.11 matt cmplwi %cr1, r_len, 0
167 1.1 mjl cmplwi r_len, 8
168 1.11 matt beq- %cr1, sf_return
169 1.1 mjl
170 1.1 mjl blt- sf_bytewise /* <8 remaining? */
171 1.1 mjl b sf_aligned_w
172 1.1 mjl
173 1.1 mjl /*----------------------------------------------------------------------*/
174 1.1 mjl wbzero: li r_val, 0
175 1.1 mjl
176 1.1 mjl cmplwi r_len, 0
177 1.1 mjl beqlr- /* Nothing to do */
178 1.1 mjl
179 1.1 mjl simple_fill:
180 1.1 mjl #if USE_STSWX
181 1.11 matt cmplwi %cr1, r_len, 12 /* < 12 bytes to move? */
182 1.1 mjl #else
183 1.11 matt cmplwi %cr1, r_len, 8 /* < 8 bytes to move? */
184 1.1 mjl #endif
185 1.5 matt andi. %r5, r_dst, 0x03 /* bytes to fill to align4 */
186 1.11 matt blt %cr1, sf_bytewise /* trivial byte mover */
187 1.1 mjl
188 1.5 matt li %r6, 4
189 1.5 matt subf %r5, %r5, %r6
190 1.1 mjl beq+ sf_aligned_w /* dest is word aligned */
191 1.1 mjl
192 1.1 mjl #if USE_STSWX
193 1.5 matt mtxer %r5
194 1.5 matt stswx %r0, 0, r_dst
195 1.5 matt add r_dst, %r5, r_dst
196 1.1 mjl #else
197 1.5 matt mtctr %r5 /* nope, then fill bytewise */
198 1.1 mjl subi r_dst, r_dst, 1 /* until it is */
199 1.1 mjl 1: stbu r_val, 1(r_dst)
200 1.1 mjl bdnz 1b
201 1.1 mjl
202 1.1 mjl addi r_dst, r_dst, 1
203 1.1 mjl #endif
204 1.5 matt subf r_len, %r5, r_len
205 1.1 mjl
206 1.1 mjl sf_aligned_w: /* no need to check r_len since it were >= 8 bytes initially */
207 1.1 mjl #if USE_STSWX
208 1.5 matt mr %r6, %r0
209 1.5 matt mr %r7, %r0
210 1.1 mjl
211 1.5 matt srwi %r5, r_len, 3
212 1.5 matt mtctr %r5
213 1.1 mjl
214 1.5 matt slwi %r5, %r5, 3 /* adjust len */
215 1.5 matt subf. r_len, %r5, r_len
216 1.1 mjl
217 1.5 matt 1: stswi %r6, r_dst, 8
218 1.1 mjl addi r_dst, r_dst, 8
219 1.1 mjl bdnz 1b
220 1.1 mjl #else
221 1.5 matt srwi %r5, r_len, 2 /* words to fill */
222 1.5 matt mtctr %r5
223 1.1 mjl
224 1.5 matt slwi %r5, %r5, 2
225 1.5 matt subf. r_len, %r5, r_len /* adjust len for fill */
226 1.1 mjl
227 1.1 mjl subi r_dst, r_dst, 4
228 1.1 mjl 1: stwu r_val, 4(r_dst)
229 1.1 mjl bdnz 1b
230 1.1 mjl addi r_dst, r_dst, 4
231 1.1 mjl #endif
232 1.1 mjl
233 1.1 mjl sf_word_done: bne- sf_bytewise
234 1.1 mjl
235 1.5 matt sf_return: mr %r3, %r8 /* restore orig ptr */
236 1.1 mjl blr /* for memset functionality */
237 1.1 mjl
238 1.1 mjl sf_bytewise:
239 1.1 mjl #if USE_STSWX
240 1.5 matt mr %r5, %r0
241 1.5 matt mr %r6, %r0
242 1.5 matt mr %r7, %r0
243 1.1 mjl
244 1.1 mjl mtxer r_len
245 1.5 matt stswx %r5, 0, r_dst
246 1.1 mjl #else
247 1.1 mjl mtctr r_len
248 1.1 mjl
249 1.1 mjl subi r_dst, r_dst, 1
250 1.1 mjl 1: stbu r_val, 1(r_dst)
251 1.1 mjl bdnz 1b
252 1.1 mjl #endif
253 1.5 matt mr %r3, %r8 /* restore orig ptr */
254 1.1 mjl blr /* for memset functionality */
255 1.8 matt END(memset)
256 1.1 mjl
257 1.1 mjl /*----------------------------------------------------------------------*/
258 1.3 mjl #ifndef _KERNEL
259 1.1 mjl .data
260 1.12 matt .p2align 2
261 1.4 eeh cache_info: .long -1, -1, -1, -1
262 1.1 mjl cache_sh: .long 0
263 1.1 mjl
264 1.3 mjl #endif
265 1.1 mjl /*----------------------------------------------------------------------*/
266