cache_r4k.c revision 1.4 1 /* $NetBSD: cache_r4k.c,v 1.4 2001/11/18 18:48:55 thorpej Exp $ */
2
3 /*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <sys/param.h>
39
40 #include <mips/cache.h>
41 #include <mips/cache_r4k.h>
42
43 /*
44 * Cache operations for R4000/R4400-style caches:
45 *
46 * - Direct-mapped
47 * - Write-back
48 * - Virtually indexed, physically tagged
49 *
50 * XXX Does not handle split secondary caches.
51 */
52
53 #define round_line(x) (((x) + 15) & ~15)
54 #define trunc_line(x) ((x) & ~15)
55
56 __asm(".set mips3");
57
58 void
59 r4k_icache_sync_all_16(void)
60 {
61 vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
62 vaddr_t eva = va + mips_picache_size;
63
64 mips_dcache_wbinv_all();
65
66 __asm __volatile("sync");
67
68 while (va < eva) {
69 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
70 va += (32 * 16);
71 }
72 }
73
74 void
75 r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
76 {
77 vaddr_t eva = round_line(va + size);
78
79 va = trunc_line(va);
80
81 mips_dcache_wb_range(va, (eva - va));
82
83 __asm __volatile("sync");
84
85 while ((eva - va) >= (32 * 16)) {
86 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
87 va += (32 * 16);
88 }
89
90 while (va < eva) {
91 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
92 va += 16;
93 }
94 }
95
96 void
97 r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
98 {
99 vaddr_t eva;
100
101 eva = round_line(va + size);
102 va = trunc_line(va);
103
104 mips_dcache_wbinv_range_index(va, (eva - va));
105
106 __asm __volatile("sync");
107
108 /*
109 * Since we're doing Index ops, we expect to not be able
110 * to access the address we've been given. So, get the
111 * bits that determine the cache index, and make a KSEG0
112 * address out of them.
113 */
114 va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
115
116 eva = round_line(va + size);
117 va = trunc_line(va);
118
119 while ((eva - va) >= (32 * 16)) {
120 cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
121 va += (32 * 16);
122 }
123
124 while (va < eva) {
125 cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
126 va += 16;
127 }
128 }
129
130 void
131 r4k_pdcache_wbinv_all_16(void)
132 {
133 vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
134 vaddr_t eva = va + mips_pdcache_size;
135
136 while (va < eva) {
137 cache_r4k_op_32lines_16(va,
138 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
139 va += (32 * 16);
140 }
141 }
142
143 void
144 r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
145 {
146 vaddr_t eva = round_line(va + size);
147
148 va = trunc_line(va);
149
150 while ((eva - va) >= (32 * 16)) {
151 cache_r4k_op_32lines_16(va,
152 CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
153 va += (32 * 16);
154 }
155
156 while (va < eva) {
157 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
158 va += 16;
159 }
160 }
161
162 void
163 r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
164 {
165 vaddr_t eva;
166
167 /*
168 * Since we're doing Index ops, we expect to not be able
169 * to access the address we've been given. So, get the
170 * bits that determine the cache index, and make a KSEG0
171 * address out of them.
172 */
173 va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
174
175 eva = round_line(va + size);
176 va = trunc_line(va);
177
178 while ((eva - va) >= (32 * 16)) {
179 cache_r4k_op_32lines_16(va,
180 CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
181 va += (32 * 16);
182 }
183
184 while (va < eva) {
185 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
186 va += 16;
187 }
188 }
189
190 void
191 r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
192 {
193 vaddr_t eva = round_line(va + size);
194
195 va = trunc_line(va);
196
197 while ((eva - va) >= (32 * 16)) {
198 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
199 va += (32 * 16);
200 }
201
202 while (va < eva) {
203 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
204 va += 16;
205 }
206 }
207
208 void
209 r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
210 {
211 vaddr_t eva = round_line(va + size);
212
213 va = trunc_line(va);
214
215 while ((eva - va) >= (32 * 16)) {
216 cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
217 va += (32 * 16);
218 }
219
220 while (va < eva) {
221 cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
222 va += 16;
223 }
224 }
225
226 #undef round_line
227 #undef trunc_line
228
229 #define round_line(x) (((x) + 31) & ~31)
230 #define trunc_line(x) ((x) & ~31)
231
232 void
233 r4k_sdcache_wbinv_all_32(void)
234 {
235 vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
236 vaddr_t eva = va + mips_sdcache_size;
237
238 while (va < eva) {
239 cache_r4k_op_32lines_32(va,
240 CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
241 va += (32 * 32);
242 }
243 }
244
245 void
246 r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
247 {
248 vaddr_t eva = round_line(va + size);
249
250 va = trunc_line(va);
251
252 while ((eva - va) >= (32 * 32)) {
253 cache_r4k_op_32lines_32(va,
254 CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
255 va += (32 * 32);
256 }
257
258 while (va < eva) {
259 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
260 va += 32;
261 }
262 }
263
264 void
265 r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
266 {
267 vaddr_t eva;
268
269 /*
270 * Since we're doing Index ops, we expect to not be able
271 * to access the address we've been given. So, get the
272 * bits that determine the cache index, and make a KSEG0
273 * address out of them.
274 */
275 va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
276
277 eva = round_line(va + size);
278 va = trunc_line(va);
279
280 while ((eva - va) >= (32 * 32)) {
281 cache_r4k_op_32lines_32(va,
282 CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
283 va += (32 * 32);
284 }
285
286 while (va < eva) {
287 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
288 va += 32;
289 }
290 }
291
292 void
293 r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
294 {
295 vaddr_t eva = round_line(va + size);
296
297 va = trunc_line(va);
298
299 while ((eva - va) >= (32 * 32)) {
300 cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
301 va += (32 * 32);
302 }
303
304 while (va < eva) {
305 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
306 va += 32;
307 }
308 }
309
310 void
311 r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
312 {
313 vaddr_t eva = round_line(va + size);
314
315 va = trunc_line(va);
316
317 while ((eva - va) >= (32 * 32)) {
318 cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
319 va += (32 * 32);
320 }
321
322 while (va < eva) {
323 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
324 va += 32;
325 }
326 }
327
328 #undef round_line
329 #undef trunc_line
330
331 #define round_line(x) (((x) + 127) & ~127)
332 #define trunc_line(x) ((x) & ~127)
333
334 void
335 r4k_sdcache_wbinv_all_128(void)
336 {
337 vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
338 vaddr_t eva = va + mips_sdcache_size;
339
340 while (va < eva) {
341 cache_r4k_op_32lines_128(va,
342 CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
343 va += (32 * 128);
344 }
345 }
346
347 void
348 r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
349 {
350 vaddr_t eva = round_line(va + size);
351
352 va = trunc_line(va);
353
354 while ((eva - va) >= (32 * 128)) {
355 cache_r4k_op_32lines_128(va,
356 CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
357 va += (32 * 128);
358 }
359
360 while (va < eva) {
361 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
362 va += 128;
363 }
364 }
365
366 void
367 r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
368 {
369 vaddr_t eva;
370
371 /*
372 * Since we're doing Index ops, we expect to not be able
373 * to access the address we've been given. So, get the
374 * bits that determine the cache index, and make a KSEG0
375 * address out of them.
376 */
377 va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
378
379 eva = round_line(va + size);
380 va = trunc_line(va);
381
382 while ((eva - va) >= (32 * 128)) {
383 cache_r4k_op_32lines_128(va,
384 CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
385 va += (32 * 128);
386 }
387
388 while (va < eva) {
389 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
390 va += 128;
391 }
392 }
393
394 void
395 r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
396 {
397 vaddr_t eva = round_line(va + size);
398
399 va = trunc_line(va);
400
401 while ((eva - va) >= (32 * 128)) {
402 cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
403 va += (32 * 128);
404 }
405
406 while (va < eva) {
407 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
408 va += 128;
409 }
410 }
411
412 void
413 r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
414 {
415 vaddr_t eva = round_line(va + size);
416
417 va = trunc_line(va);
418
419 while ((eva - va) >= (32 * 128)) {
420 cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
421 va += (32 * 128);
422 }
423
424 while (va < eva) {
425 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
426 va += 128;
427 }
428 }
429
430 #undef round_line
431 #undef trunc_line
432
433 #define round_line(x) (((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
434 #define trunc_line(x) ((x) & ~(mips_sdcache_line_size - 1))
435
436 void
437 r4k_sdcache_wbinv_all_generic(void)
438 {
439 vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
440 vaddr_t eva = va + mips_sdcache_size;
441
442 while (va < eva) {
443 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
444 va += mips_sdcache_line_size;
445 }
446 }
447
448 void
449 r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
450 {
451 vaddr_t eva = round_line(va + size);
452
453 va = trunc_line(va);
454
455 while (va < eva) {
456 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
457 va += mips_sdcache_line_size;
458 }
459 }
460
461 void
462 r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
463 {
464 vaddr_t eva;
465
466 /*
467 * Since we're doing Index ops, we expect to not be able
468 * to access the address we've been given. So, get the
469 * bits that determine the cache index, and make a KSEG0
470 * address out of them.
471 */
472 va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
473
474 eva = round_line(va + size);
475 va = trunc_line(va);
476
477 while (va < eva) {
478 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
479 va += mips_sdcache_line_size;
480 }
481 }
482
483 void
484 r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
485 {
486 vaddr_t eva = round_line(va + size);
487
488 va = trunc_line(va);
489
490 while (va < eva) {
491 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
492 va += mips_sdcache_line_size;
493 }
494 }
495
496 void
497 r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
498 {
499 vaddr_t eva = round_line(va + size);
500
501 va = trunc_line(va);
502
503 while (va < eva) {
504 cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
505 va += mips_sdcache_line_size;
506 }
507 }
508
509 #undef round_line
510 #undef trunc_line
511