cache_r4k.c revision 1.12 1 1.12 macallan /* $NetBSD: cache_r4k.c,v 1.12 2015/06/09 15:58:38 macallan Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*
4 1.2 thorpej * Copyright 2001 Wasabi Systems, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 thorpej *
9 1.2 thorpej * Redistribution and use in source and binary forms, with or without
10 1.2 thorpej * modification, are permitted provided that the following conditions
11 1.2 thorpej * are met:
12 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.2 thorpej * notice, this list of conditions and the following disclaimer.
14 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.2 thorpej * documentation and/or other materials provided with the distribution.
17 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.2 thorpej * must display the following acknowledgement:
19 1.2 thorpej * This product includes software developed for the NetBSD Project by
20 1.2 thorpej * Wasabi Systems, Inc.
21 1.2 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 thorpej * or promote products derived from this software without specific prior
23 1.2 thorpej * written permission.
24 1.2 thorpej *
25 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.2 thorpej */
37 1.8 lukem
38 1.8 lukem #include <sys/cdefs.h>
39 1.12 macallan __KERNEL_RCSID(0, "$NetBSD: cache_r4k.c,v 1.12 2015/06/09 15:58:38 macallan Exp $");
40 1.2 thorpej
41 1.2 thorpej #include <sys/param.h>
42 1.2 thorpej
43 1.12 macallan #include <mips/cpuregs.h>
44 1.2 thorpej #include <mips/cache.h>
45 1.2 thorpej #include <mips/cache_r4k.h>
46 1.2 thorpej
47 1.2 thorpej /*
48 1.2 thorpej * Cache operations for R4000/R4400-style caches:
49 1.2 thorpej *
50 1.2 thorpej * - Direct-mapped
51 1.2 thorpej * - Write-back
52 1.2 thorpej * - Virtually indexed, physically tagged
53 1.2 thorpej *
54 1.2 thorpej * XXX Does not handle split secondary caches.
55 1.2 thorpej */
56 1.2 thorpej
57 1.2 thorpej #define round_line(x) (((x) + 15) & ~15)
58 1.2 thorpej #define trunc_line(x) ((x) & ~15)
59 1.2 thorpej
60 1.2 thorpej __asm(".set mips3");
61 1.2 thorpej
62 1.2 thorpej void
63 1.2 thorpej r4k_icache_sync_all_16(void)
64 1.2 thorpej {
65 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
66 1.11 matt vaddr_t eva = va + mips_cache_info.mci_picache_size;
67 1.2 thorpej
68 1.2 thorpej mips_dcache_wbinv_all();
69 1.2 thorpej
70 1.10 perry __asm volatile("sync");
71 1.2 thorpej
72 1.2 thorpej while (va < eva) {
73 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
74 1.2 thorpej va += (32 * 16);
75 1.2 thorpej }
76 1.2 thorpej }
77 1.2 thorpej
78 1.2 thorpej void
79 1.2 thorpej r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
80 1.2 thorpej {
81 1.2 thorpej vaddr_t eva = round_line(va + size);
82 1.2 thorpej
83 1.2 thorpej va = trunc_line(va);
84 1.2 thorpej
85 1.2 thorpej mips_dcache_wb_range(va, (eva - va));
86 1.2 thorpej
87 1.10 perry __asm volatile("sync");
88 1.2 thorpej
89 1.2 thorpej while ((eva - va) >= (32 * 16)) {
90 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
91 1.2 thorpej va += (32 * 16);
92 1.2 thorpej }
93 1.2 thorpej
94 1.2 thorpej while (va < eva) {
95 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
96 1.2 thorpej va += 16;
97 1.2 thorpej }
98 1.2 thorpej }
99 1.2 thorpej
100 1.2 thorpej void
101 1.2 thorpej r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
102 1.2 thorpej {
103 1.7 cgd vaddr_t eva, orig_va;
104 1.7 cgd
105 1.7 cgd orig_va = va;
106 1.2 thorpej
107 1.2 thorpej eva = round_line(va + size);
108 1.2 thorpej va = trunc_line(va);
109 1.2 thorpej
110 1.2 thorpej mips_dcache_wbinv_range_index(va, (eva - va));
111 1.2 thorpej
112 1.10 perry __asm volatile("sync");
113 1.2 thorpej
114 1.2 thorpej /*
115 1.2 thorpej * Since we're doing Index ops, we expect to not be able
116 1.2 thorpej * to access the address we've been given. So, get the
117 1.2 thorpej * bits that determine the cache index, and make a KSEG0
118 1.2 thorpej * address out of them.
119 1.2 thorpej */
120 1.11 matt va = MIPS_PHYS_TO_KSEG0(orig_va & mips_cache_info.mci_picache_way_mask);
121 1.2 thorpej
122 1.2 thorpej eva = round_line(va + size);
123 1.2 thorpej va = trunc_line(va);
124 1.2 thorpej
125 1.2 thorpej while ((eva - va) >= (32 * 16)) {
126 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
127 1.2 thorpej va += (32 * 16);
128 1.2 thorpej }
129 1.2 thorpej
130 1.2 thorpej while (va < eva) {
131 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
132 1.2 thorpej va += 16;
133 1.2 thorpej }
134 1.2 thorpej }
135 1.2 thorpej
136 1.2 thorpej void
137 1.2 thorpej r4k_pdcache_wbinv_all_16(void)
138 1.2 thorpej {
139 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
140 1.11 matt vaddr_t eva = va + mips_cache_info.mci_pdcache_size;
141 1.2 thorpej
142 1.2 thorpej while (va < eva) {
143 1.2 thorpej cache_r4k_op_32lines_16(va,
144 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
145 1.2 thorpej va += (32 * 16);
146 1.2 thorpej }
147 1.2 thorpej }
148 1.2 thorpej
149 1.2 thorpej void
150 1.2 thorpej r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
151 1.2 thorpej {
152 1.2 thorpej vaddr_t eva = round_line(va + size);
153 1.2 thorpej
154 1.2 thorpej va = trunc_line(va);
155 1.2 thorpej
156 1.2 thorpej while ((eva - va) >= (32 * 16)) {
157 1.2 thorpej cache_r4k_op_32lines_16(va,
158 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
159 1.2 thorpej va += (32 * 16);
160 1.2 thorpej }
161 1.2 thorpej
162 1.2 thorpej while (va < eva) {
163 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
164 1.2 thorpej va += 16;
165 1.2 thorpej }
166 1.2 thorpej }
167 1.2 thorpej
168 1.2 thorpej void
169 1.2 thorpej r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
170 1.2 thorpej {
171 1.2 thorpej vaddr_t eva;
172 1.2 thorpej
173 1.2 thorpej /*
174 1.2 thorpej * Since we're doing Index ops, we expect to not be able
175 1.2 thorpej * to access the address we've been given. So, get the
176 1.2 thorpej * bits that determine the cache index, and make a KSEG0
177 1.2 thorpej * address out of them.
178 1.2 thorpej */
179 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_pdcache_size - 1));
180 1.2 thorpej
181 1.2 thorpej eva = round_line(va + size);
182 1.2 thorpej va = trunc_line(va);
183 1.2 thorpej
184 1.2 thorpej while ((eva - va) >= (32 * 16)) {
185 1.2 thorpej cache_r4k_op_32lines_16(va,
186 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
187 1.2 thorpej va += (32 * 16);
188 1.2 thorpej }
189 1.2 thorpej
190 1.2 thorpej while (va < eva) {
191 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
192 1.2 thorpej va += 16;
193 1.2 thorpej }
194 1.2 thorpej }
195 1.2 thorpej
196 1.2 thorpej void
197 1.2 thorpej r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
198 1.2 thorpej {
199 1.2 thorpej vaddr_t eva = round_line(va + size);
200 1.2 thorpej
201 1.2 thorpej va = trunc_line(va);
202 1.2 thorpej
203 1.2 thorpej while ((eva - va) >= (32 * 16)) {
204 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
205 1.2 thorpej va += (32 * 16);
206 1.2 thorpej }
207 1.2 thorpej
208 1.2 thorpej while (va < eva) {
209 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
210 1.2 thorpej va += 16;
211 1.2 thorpej }
212 1.2 thorpej }
213 1.2 thorpej
214 1.2 thorpej void
215 1.2 thorpej r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
216 1.2 thorpej {
217 1.2 thorpej vaddr_t eva = round_line(va + size);
218 1.2 thorpej
219 1.2 thorpej va = trunc_line(va);
220 1.2 thorpej
221 1.2 thorpej while ((eva - va) >= (32 * 16)) {
222 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
223 1.2 thorpej va += (32 * 16);
224 1.2 thorpej }
225 1.2 thorpej
226 1.2 thorpej while (va < eva) {
227 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
228 1.2 thorpej va += 16;
229 1.2 thorpej }
230 1.2 thorpej }
231 1.2 thorpej
232 1.2 thorpej #undef round_line
233 1.2 thorpej #undef trunc_line
234 1.2 thorpej
235 1.2 thorpej #define round_line(x) (((x) + 31) & ~31)
236 1.2 thorpej #define trunc_line(x) ((x) & ~31)
237 1.6 tsutsui
238 1.6 tsutsui void
239 1.6 tsutsui r4k_icache_sync_all_32(void)
240 1.6 tsutsui {
241 1.6 tsutsui vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
242 1.11 matt vaddr_t eva = va + mips_cache_info.mci_picache_size;
243 1.6 tsutsui
244 1.6 tsutsui mips_dcache_wbinv_all();
245 1.6 tsutsui
246 1.10 perry __asm volatile("sync");
247 1.6 tsutsui
248 1.6 tsutsui while (va < eva) {
249 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
250 1.6 tsutsui va += (32 * 32);
251 1.6 tsutsui }
252 1.6 tsutsui }
253 1.6 tsutsui
254 1.6 tsutsui void
255 1.6 tsutsui r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
256 1.6 tsutsui {
257 1.6 tsutsui vaddr_t eva = round_line(va + size);
258 1.6 tsutsui
259 1.6 tsutsui va = trunc_line(va);
260 1.6 tsutsui
261 1.6 tsutsui mips_dcache_wb_range(va, (eva - va));
262 1.6 tsutsui
263 1.10 perry __asm volatile("sync");
264 1.6 tsutsui
265 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
266 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
267 1.6 tsutsui va += (32 * 32);
268 1.6 tsutsui }
269 1.6 tsutsui
270 1.6 tsutsui while (va < eva) {
271 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
272 1.6 tsutsui va += 32;
273 1.6 tsutsui }
274 1.6 tsutsui }
275 1.6 tsutsui
276 1.6 tsutsui void
277 1.6 tsutsui r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
278 1.6 tsutsui {
279 1.6 tsutsui vaddr_t eva;
280 1.6 tsutsui
281 1.6 tsutsui eva = round_line(va + size);
282 1.6 tsutsui va = trunc_line(va);
283 1.6 tsutsui
284 1.6 tsutsui mips_dcache_wbinv_range_index(va, (eva - va));
285 1.6 tsutsui
286 1.10 perry __asm volatile("sync");
287 1.6 tsutsui
288 1.6 tsutsui /*
289 1.6 tsutsui * Since we're doing Index ops, we expect to not be able
290 1.6 tsutsui * to access the address we've been given. So, get the
291 1.6 tsutsui * bits that determine the cache index, and make a KSEG0
292 1.6 tsutsui * address out of them.
293 1.6 tsutsui */
294 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & mips_cache_info.mci_picache_way_mask);
295 1.6 tsutsui
296 1.6 tsutsui eva = round_line(va + size);
297 1.6 tsutsui va = trunc_line(va);
298 1.6 tsutsui
299 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
300 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
301 1.6 tsutsui va += (32 * 32);
302 1.6 tsutsui }
303 1.6 tsutsui
304 1.6 tsutsui while (va < eva) {
305 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
306 1.6 tsutsui va += 32;
307 1.6 tsutsui }
308 1.6 tsutsui }
309 1.6 tsutsui
310 1.6 tsutsui void
311 1.6 tsutsui r4k_pdcache_wbinv_all_32(void)
312 1.6 tsutsui {
313 1.6 tsutsui vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
314 1.11 matt vaddr_t eva = va + mips_cache_info.mci_pdcache_size;
315 1.6 tsutsui
316 1.6 tsutsui while (va < eva) {
317 1.6 tsutsui cache_r4k_op_32lines_32(va,
318 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
319 1.6 tsutsui va += (32 * 32);
320 1.6 tsutsui }
321 1.6 tsutsui }
322 1.6 tsutsui
323 1.6 tsutsui void
324 1.6 tsutsui r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
325 1.6 tsutsui {
326 1.6 tsutsui vaddr_t eva = round_line(va + size);
327 1.6 tsutsui
328 1.6 tsutsui va = trunc_line(va);
329 1.6 tsutsui
330 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
331 1.6 tsutsui cache_r4k_op_32lines_32(va,
332 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
333 1.6 tsutsui va += (32 * 32);
334 1.6 tsutsui }
335 1.6 tsutsui
336 1.6 tsutsui while (va < eva) {
337 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
338 1.6 tsutsui va += 32;
339 1.6 tsutsui }
340 1.6 tsutsui }
341 1.6 tsutsui
342 1.6 tsutsui void
343 1.6 tsutsui r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
344 1.6 tsutsui {
345 1.6 tsutsui vaddr_t eva;
346 1.6 tsutsui
347 1.6 tsutsui /*
348 1.6 tsutsui * Since we're doing Index ops, we expect to not be able
349 1.6 tsutsui * to access the address we've been given. So, get the
350 1.6 tsutsui * bits that determine the cache index, and make a KSEG0
351 1.6 tsutsui * address out of them.
352 1.6 tsutsui */
353 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_pdcache_size - 1));
354 1.6 tsutsui
355 1.6 tsutsui eva = round_line(va + size);
356 1.6 tsutsui va = trunc_line(va);
357 1.6 tsutsui
358 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
359 1.6 tsutsui cache_r4k_op_32lines_32(va,
360 1.6 tsutsui CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
361 1.6 tsutsui va += (32 * 32);
362 1.6 tsutsui }
363 1.6 tsutsui
364 1.6 tsutsui while (va < eva) {
365 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
366 1.6 tsutsui va += 32;
367 1.6 tsutsui }
368 1.6 tsutsui }
369 1.6 tsutsui
370 1.6 tsutsui void
371 1.6 tsutsui r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
372 1.6 tsutsui {
373 1.6 tsutsui vaddr_t eva = round_line(va + size);
374 1.6 tsutsui
375 1.6 tsutsui va = trunc_line(va);
376 1.6 tsutsui
377 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
378 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
379 1.6 tsutsui va += (32 * 32);
380 1.6 tsutsui }
381 1.6 tsutsui
382 1.6 tsutsui while (va < eva) {
383 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
384 1.6 tsutsui va += 32;
385 1.6 tsutsui }
386 1.6 tsutsui }
387 1.6 tsutsui
388 1.6 tsutsui void
389 1.6 tsutsui r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
390 1.6 tsutsui {
391 1.6 tsutsui vaddr_t eva = round_line(va + size);
392 1.6 tsutsui
393 1.6 tsutsui va = trunc_line(va);
394 1.6 tsutsui
395 1.6 tsutsui while ((eva - va) >= (32 * 32)) {
396 1.6 tsutsui cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
397 1.6 tsutsui va += (32 * 32);
398 1.6 tsutsui }
399 1.6 tsutsui
400 1.6 tsutsui while (va < eva) {
401 1.6 tsutsui cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
402 1.6 tsutsui va += 32;
403 1.6 tsutsui }
404 1.6 tsutsui }
405 1.2 thorpej
406 1.2 thorpej void
407 1.2 thorpej r4k_sdcache_wbinv_all_32(void)
408 1.2 thorpej {
409 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
410 1.11 matt vaddr_t eva = va + mips_cache_info.mci_sdcache_size;
411 1.2 thorpej
412 1.2 thorpej while (va < eva) {
413 1.2 thorpej cache_r4k_op_32lines_32(va,
414 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
415 1.2 thorpej va += (32 * 32);
416 1.2 thorpej }
417 1.2 thorpej }
418 1.2 thorpej
419 1.2 thorpej void
420 1.2 thorpej r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
421 1.2 thorpej {
422 1.2 thorpej vaddr_t eva = round_line(va + size);
423 1.2 thorpej
424 1.2 thorpej va = trunc_line(va);
425 1.2 thorpej
426 1.2 thorpej while ((eva - va) >= (32 * 32)) {
427 1.2 thorpej cache_r4k_op_32lines_32(va,
428 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
429 1.2 thorpej va += (32 * 32);
430 1.2 thorpej }
431 1.2 thorpej
432 1.2 thorpej while (va < eva) {
433 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
434 1.2 thorpej va += 32;
435 1.2 thorpej }
436 1.2 thorpej }
437 1.2 thorpej
438 1.2 thorpej void
439 1.2 thorpej r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
440 1.2 thorpej {
441 1.2 thorpej vaddr_t eva;
442 1.2 thorpej
443 1.2 thorpej /*
444 1.2 thorpej * Since we're doing Index ops, we expect to not be able
445 1.2 thorpej * to access the address we've been given. So, get the
446 1.2 thorpej * bits that determine the cache index, and make a KSEG0
447 1.2 thorpej * address out of them.
448 1.2 thorpej */
449 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));
450 1.2 thorpej
451 1.2 thorpej eva = round_line(va + size);
452 1.2 thorpej va = trunc_line(va);
453 1.2 thorpej
454 1.2 thorpej while ((eva - va) >= (32 * 32)) {
455 1.4 thorpej cache_r4k_op_32lines_32(va,
456 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
457 1.2 thorpej va += (32 * 32);
458 1.2 thorpej }
459 1.2 thorpej
460 1.2 thorpej while (va < eva) {
461 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
462 1.2 thorpej va += 32;
463 1.2 thorpej }
464 1.2 thorpej }
465 1.2 thorpej
466 1.2 thorpej void
467 1.2 thorpej r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
468 1.2 thorpej {
469 1.2 thorpej vaddr_t eva = round_line(va + size);
470 1.2 thorpej
471 1.2 thorpej va = trunc_line(va);
472 1.2 thorpej
473 1.2 thorpej while ((eva - va) >= (32 * 32)) {
474 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
475 1.2 thorpej va += (32 * 32);
476 1.2 thorpej }
477 1.2 thorpej
478 1.2 thorpej while (va < eva) {
479 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
480 1.2 thorpej va += 32;
481 1.2 thorpej }
482 1.2 thorpej }
483 1.2 thorpej
484 1.2 thorpej void
485 1.2 thorpej r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
486 1.2 thorpej {
487 1.2 thorpej vaddr_t eva = round_line(va + size);
488 1.2 thorpej
489 1.2 thorpej va = trunc_line(va);
490 1.2 thorpej
491 1.2 thorpej while ((eva - va) >= (32 * 32)) {
492 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
493 1.2 thorpej va += (32 * 32);
494 1.2 thorpej }
495 1.2 thorpej
496 1.2 thorpej while (va < eva) {
497 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
498 1.2 thorpej va += 32;
499 1.3 thorpej }
500 1.3 thorpej }
501 1.3 thorpej
502 1.3 thorpej #undef round_line
503 1.3 thorpej #undef trunc_line
504 1.3 thorpej
505 1.3 thorpej #define round_line(x) (((x) + 127) & ~127)
506 1.3 thorpej #define trunc_line(x) ((x) & ~127)
507 1.3 thorpej
508 1.3 thorpej void
509 1.3 thorpej r4k_sdcache_wbinv_all_128(void)
510 1.3 thorpej {
511 1.3 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
512 1.11 matt vaddr_t eva = va + mips_cache_info.mci_sdcache_size;
513 1.3 thorpej
514 1.3 thorpej while (va < eva) {
515 1.3 thorpej cache_r4k_op_32lines_128(va,
516 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
517 1.3 thorpej va += (32 * 128);
518 1.3 thorpej }
519 1.3 thorpej }
520 1.3 thorpej
521 1.3 thorpej void
522 1.3 thorpej r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
523 1.3 thorpej {
524 1.3 thorpej vaddr_t eva = round_line(va + size);
525 1.3 thorpej
526 1.3 thorpej va = trunc_line(va);
527 1.3 thorpej
528 1.3 thorpej while ((eva - va) >= (32 * 128)) {
529 1.3 thorpej cache_r4k_op_32lines_128(va,
530 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
531 1.3 thorpej va += (32 * 128);
532 1.3 thorpej }
533 1.3 thorpej
534 1.3 thorpej while (va < eva) {
535 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
536 1.3 thorpej va += 128;
537 1.3 thorpej }
538 1.3 thorpej }
539 1.3 thorpej
540 1.3 thorpej void
541 1.3 thorpej r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
542 1.3 thorpej {
543 1.3 thorpej vaddr_t eva;
544 1.3 thorpej
545 1.3 thorpej /*
546 1.3 thorpej * Since we're doing Index ops, we expect to not be able
547 1.3 thorpej * to access the address we've been given. So, get the
548 1.3 thorpej * bits that determine the cache index, and make a KSEG0
549 1.3 thorpej * address out of them.
550 1.3 thorpej */
551 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));
552 1.3 thorpej
553 1.3 thorpej eva = round_line(va + size);
554 1.3 thorpej va = trunc_line(va);
555 1.3 thorpej
556 1.3 thorpej while ((eva - va) >= (32 * 128)) {
557 1.3 thorpej cache_r4k_op_32lines_128(va,
558 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
559 1.3 thorpej va += (32 * 128);
560 1.3 thorpej }
561 1.3 thorpej
562 1.3 thorpej while (va < eva) {
563 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
564 1.3 thorpej va += 128;
565 1.3 thorpej }
566 1.3 thorpej }
567 1.3 thorpej
568 1.3 thorpej void
569 1.3 thorpej r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
570 1.3 thorpej {
571 1.3 thorpej vaddr_t eva = round_line(va + size);
572 1.3 thorpej
573 1.3 thorpej va = trunc_line(va);
574 1.3 thorpej
575 1.3 thorpej while ((eva - va) >= (32 * 128)) {
576 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
577 1.3 thorpej va += (32 * 128);
578 1.3 thorpej }
579 1.3 thorpej
580 1.3 thorpej while (va < eva) {
581 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
582 1.3 thorpej va += 128;
583 1.3 thorpej }
584 1.3 thorpej }
585 1.3 thorpej
586 1.3 thorpej void
587 1.3 thorpej r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
588 1.3 thorpej {
589 1.3 thorpej vaddr_t eva = round_line(va + size);
590 1.3 thorpej
591 1.3 thorpej va = trunc_line(va);
592 1.3 thorpej
593 1.3 thorpej while ((eva - va) >= (32 * 128)) {
594 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
595 1.3 thorpej va += (32 * 128);
596 1.3 thorpej }
597 1.3 thorpej
598 1.3 thorpej while (va < eva) {
599 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
600 1.3 thorpej va += 128;
601 1.2 thorpej }
602 1.2 thorpej }
603 1.2 thorpej
604 1.2 thorpej #undef round_line
605 1.2 thorpej #undef trunc_line
606 1.2 thorpej
607 1.11 matt #define round_line(x) (((x) + mips_cache_info.mci_sdcache_line_size - 1) & ~(mips_cache_info.mci_sdcache_line_size - 1))
608 1.11 matt #define trunc_line(x) ((x) & ~(mips_cache_info.mci_sdcache_line_size - 1))
609 1.2 thorpej
610 1.2 thorpej void
611 1.2 thorpej r4k_sdcache_wbinv_all_generic(void)
612 1.2 thorpej {
613 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
614 1.11 matt vaddr_t eva = va + mips_cache_info.mci_sdcache_size;
615 1.11 matt int line_size = mips_cache_info.mci_sdcache_line_size;
616 1.2 thorpej
617 1.2 thorpej while (va < eva) {
618 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
619 1.5 shin va += line_size;
620 1.2 thorpej }
621 1.2 thorpej }
622 1.2 thorpej
623 1.2 thorpej void
624 1.2 thorpej r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
625 1.2 thorpej {
626 1.2 thorpej vaddr_t eva = round_line(va + size);
627 1.11 matt int line_size = mips_cache_info.mci_sdcache_line_size;
628 1.2 thorpej
629 1.2 thorpej va = trunc_line(va);
630 1.2 thorpej
631 1.2 thorpej while (va < eva) {
632 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
633 1.5 shin va += line_size;
634 1.2 thorpej }
635 1.2 thorpej }
636 1.2 thorpej
637 1.2 thorpej void
638 1.2 thorpej r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
639 1.2 thorpej {
640 1.2 thorpej vaddr_t eva;
641 1.11 matt int line_size = mips_cache_info.mci_sdcache_line_size;
642 1.2 thorpej
643 1.2 thorpej /*
644 1.2 thorpej * Since we're doing Index ops, we expect to not be able
645 1.2 thorpej * to access the address we've been given. So, get the
646 1.2 thorpej * bits that determine the cache index, and make a KSEG0
647 1.2 thorpej * address out of them.
648 1.2 thorpej */
649 1.11 matt va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));
650 1.2 thorpej
651 1.2 thorpej eva = round_line(va + size);
652 1.2 thorpej va = trunc_line(va);
653 1.2 thorpej
654 1.2 thorpej while (va < eva) {
655 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
656 1.5 shin va += line_size;
657 1.2 thorpej }
658 1.2 thorpej }
659 1.2 thorpej
660 1.2 thorpej void
661 1.2 thorpej r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
662 1.2 thorpej {
663 1.2 thorpej vaddr_t eva = round_line(va + size);
664 1.11 matt int line_size = mips_cache_info.mci_sdcache_line_size;
665 1.2 thorpej
666 1.2 thorpej va = trunc_line(va);
667 1.2 thorpej
668 1.2 thorpej while (va < eva) {
669 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
670 1.5 shin va += line_size;
671 1.2 thorpej }
672 1.2 thorpej }
673 1.2 thorpej
674 1.2 thorpej void
675 1.2 thorpej r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
676 1.2 thorpej {
677 1.2 thorpej vaddr_t eva = round_line(va + size);
678 1.11 matt int line_size = mips_cache_info.mci_sdcache_line_size;
679 1.2 thorpej
680 1.2 thorpej va = trunc_line(va);
681 1.2 thorpej
682 1.2 thorpej while (va < eva) {
683 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
684 1.5 shin va += line_size;
685 1.2 thorpej }
686 1.2 thorpej }
687 1.2 thorpej
688 1.2 thorpej #undef round_line
689 1.2 thorpej #undef trunc_line
690