cache_r4k.c revision 1.5 1 1.5 shin /* $NetBSD: cache_r4k.c,v 1.5 2001/11/20 06:32:21 shin Exp $ */
2 1.2 thorpej
3 1.2 thorpej /*
4 1.2 thorpej * Copyright 2001 Wasabi Systems, Inc.
5 1.2 thorpej * All rights reserved.
6 1.2 thorpej *
7 1.2 thorpej * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 1.2 thorpej *
9 1.2 thorpej * Redistribution and use in source and binary forms, with or without
10 1.2 thorpej * modification, are permitted provided that the following conditions
11 1.2 thorpej * are met:
12 1.2 thorpej * 1. Redistributions of source code must retain the above copyright
13 1.2 thorpej * notice, this list of conditions and the following disclaimer.
14 1.2 thorpej * 2. Redistributions in binary form must reproduce the above copyright
15 1.2 thorpej * notice, this list of conditions and the following disclaimer in the
16 1.2 thorpej * documentation and/or other materials provided with the distribution.
17 1.2 thorpej * 3. All advertising materials mentioning features or use of this software
18 1.2 thorpej * must display the following acknowledgement:
19 1.2 thorpej * This product includes software developed for the NetBSD Project by
20 1.2 thorpej * Wasabi Systems, Inc.
21 1.2 thorpej * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 1.2 thorpej * or promote products derived from this software without specific prior
23 1.2 thorpej * written permission.
24 1.2 thorpej *
25 1.2 thorpej * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 1.2 thorpej * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 1.2 thorpej * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 1.2 thorpej * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 1.2 thorpej * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 1.2 thorpej * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 1.2 thorpej * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 1.2 thorpej * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 1.2 thorpej * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 1.2 thorpej * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 1.2 thorpej * POSSIBILITY OF SUCH DAMAGE.
36 1.2 thorpej */
37 1.2 thorpej
38 1.2 thorpej #include <sys/param.h>
39 1.2 thorpej
40 1.2 thorpej #include <mips/cache.h>
41 1.2 thorpej #include <mips/cache_r4k.h>
42 1.2 thorpej
43 1.2 thorpej /*
44 1.2 thorpej * Cache operations for R4000/R4400-style caches:
45 1.2 thorpej *
46 1.2 thorpej * - Direct-mapped
47 1.2 thorpej * - Write-back
48 1.2 thorpej * - Virtually indexed, physically tagged
49 1.2 thorpej *
50 1.2 thorpej * XXX Does not handle split secondary caches.
51 1.2 thorpej */
52 1.2 thorpej
53 1.2 thorpej #define round_line(x) (((x) + 15) & ~15)
54 1.2 thorpej #define trunc_line(x) ((x) & ~15)
55 1.2 thorpej
56 1.2 thorpej __asm(".set mips3");
57 1.2 thorpej
58 1.2 thorpej void
59 1.2 thorpej r4k_icache_sync_all_16(void)
60 1.2 thorpej {
61 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
62 1.2 thorpej vaddr_t eva = va + mips_picache_size;
63 1.2 thorpej
64 1.2 thorpej mips_dcache_wbinv_all();
65 1.2 thorpej
66 1.2 thorpej __asm __volatile("sync");
67 1.2 thorpej
68 1.2 thorpej while (va < eva) {
69 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
70 1.2 thorpej va += (32 * 16);
71 1.2 thorpej }
72 1.2 thorpej }
73 1.2 thorpej
74 1.2 thorpej void
75 1.2 thorpej r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
76 1.2 thorpej {
77 1.2 thorpej vaddr_t eva = round_line(va + size);
78 1.2 thorpej
79 1.2 thorpej va = trunc_line(va);
80 1.2 thorpej
81 1.2 thorpej mips_dcache_wb_range(va, (eva - va));
82 1.2 thorpej
83 1.2 thorpej __asm __volatile("sync");
84 1.2 thorpej
85 1.2 thorpej while ((eva - va) >= (32 * 16)) {
86 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
87 1.2 thorpej va += (32 * 16);
88 1.2 thorpej }
89 1.2 thorpej
90 1.2 thorpej while (va < eva) {
91 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
92 1.2 thorpej va += 16;
93 1.2 thorpej }
94 1.2 thorpej }
95 1.2 thorpej
96 1.2 thorpej void
97 1.2 thorpej r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
98 1.2 thorpej {
99 1.2 thorpej vaddr_t eva;
100 1.2 thorpej
101 1.2 thorpej eva = round_line(va + size);
102 1.2 thorpej va = trunc_line(va);
103 1.2 thorpej
104 1.2 thorpej mips_dcache_wbinv_range_index(va, (eva - va));
105 1.2 thorpej
106 1.2 thorpej __asm __volatile("sync");
107 1.2 thorpej
108 1.2 thorpej /*
109 1.2 thorpej * Since we're doing Index ops, we expect to not be able
110 1.2 thorpej * to access the address we've been given. So, get the
111 1.2 thorpej * bits that determine the cache index, and make a KSEG0
112 1.2 thorpej * address out of them.
113 1.2 thorpej */
114 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
115 1.2 thorpej
116 1.2 thorpej eva = round_line(va + size);
117 1.2 thorpej va = trunc_line(va);
118 1.2 thorpej
119 1.2 thorpej while ((eva - va) >= (32 * 16)) {
120 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
121 1.2 thorpej va += (32 * 16);
122 1.2 thorpej }
123 1.2 thorpej
124 1.2 thorpej while (va < eva) {
125 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
126 1.2 thorpej va += 16;
127 1.2 thorpej }
128 1.2 thorpej }
129 1.2 thorpej
130 1.2 thorpej void
131 1.2 thorpej r4k_pdcache_wbinv_all_16(void)
132 1.2 thorpej {
133 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
134 1.2 thorpej vaddr_t eva = va + mips_pdcache_size;
135 1.2 thorpej
136 1.2 thorpej while (va < eva) {
137 1.2 thorpej cache_r4k_op_32lines_16(va,
138 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
139 1.2 thorpej va += (32 * 16);
140 1.2 thorpej }
141 1.2 thorpej }
142 1.2 thorpej
143 1.2 thorpej void
144 1.2 thorpej r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
145 1.2 thorpej {
146 1.2 thorpej vaddr_t eva = round_line(va + size);
147 1.2 thorpej
148 1.2 thorpej va = trunc_line(va);
149 1.2 thorpej
150 1.2 thorpej while ((eva - va) >= (32 * 16)) {
151 1.2 thorpej cache_r4k_op_32lines_16(va,
152 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
153 1.2 thorpej va += (32 * 16);
154 1.2 thorpej }
155 1.2 thorpej
156 1.2 thorpej while (va < eva) {
157 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
158 1.2 thorpej va += 16;
159 1.2 thorpej }
160 1.2 thorpej }
161 1.2 thorpej
162 1.2 thorpej void
163 1.2 thorpej r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
164 1.2 thorpej {
165 1.2 thorpej vaddr_t eva;
166 1.2 thorpej
167 1.2 thorpej /*
168 1.2 thorpej * Since we're doing Index ops, we expect to not be able
169 1.2 thorpej * to access the address we've been given. So, get the
170 1.2 thorpej * bits that determine the cache index, and make a KSEG0
171 1.2 thorpej * address out of them.
172 1.2 thorpej */
173 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
174 1.2 thorpej
175 1.2 thorpej eva = round_line(va + size);
176 1.2 thorpej va = trunc_line(va);
177 1.2 thorpej
178 1.2 thorpej while ((eva - va) >= (32 * 16)) {
179 1.2 thorpej cache_r4k_op_32lines_16(va,
180 1.2 thorpej CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
181 1.2 thorpej va += (32 * 16);
182 1.2 thorpej }
183 1.2 thorpej
184 1.2 thorpej while (va < eva) {
185 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
186 1.2 thorpej va += 16;
187 1.2 thorpej }
188 1.2 thorpej }
189 1.2 thorpej
190 1.2 thorpej void
191 1.2 thorpej r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
192 1.2 thorpej {
193 1.2 thorpej vaddr_t eva = round_line(va + size);
194 1.2 thorpej
195 1.2 thorpej va = trunc_line(va);
196 1.2 thorpej
197 1.2 thorpej while ((eva - va) >= (32 * 16)) {
198 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
199 1.2 thorpej va += (32 * 16);
200 1.2 thorpej }
201 1.2 thorpej
202 1.2 thorpej while (va < eva) {
203 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
204 1.2 thorpej va += 16;
205 1.2 thorpej }
206 1.2 thorpej }
207 1.2 thorpej
208 1.2 thorpej void
209 1.2 thorpej r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
210 1.2 thorpej {
211 1.2 thorpej vaddr_t eva = round_line(va + size);
212 1.2 thorpej
213 1.2 thorpej va = trunc_line(va);
214 1.2 thorpej
215 1.2 thorpej while ((eva - va) >= (32 * 16)) {
216 1.2 thorpej cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
217 1.2 thorpej va += (32 * 16);
218 1.2 thorpej }
219 1.2 thorpej
220 1.2 thorpej while (va < eva) {
221 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
222 1.2 thorpej va += 16;
223 1.2 thorpej }
224 1.2 thorpej }
225 1.2 thorpej
226 1.2 thorpej #undef round_line
227 1.2 thorpej #undef trunc_line
228 1.2 thorpej
229 1.2 thorpej #define round_line(x) (((x) + 31) & ~31)
230 1.2 thorpej #define trunc_line(x) ((x) & ~31)
231 1.2 thorpej
232 1.2 thorpej void
233 1.2 thorpej r4k_sdcache_wbinv_all_32(void)
234 1.2 thorpej {
235 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
236 1.2 thorpej vaddr_t eva = va + mips_sdcache_size;
237 1.2 thorpej
238 1.2 thorpej while (va < eva) {
239 1.2 thorpej cache_r4k_op_32lines_32(va,
240 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
241 1.2 thorpej va += (32 * 32);
242 1.2 thorpej }
243 1.2 thorpej }
244 1.2 thorpej
245 1.2 thorpej void
246 1.2 thorpej r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
247 1.2 thorpej {
248 1.2 thorpej vaddr_t eva = round_line(va + size);
249 1.2 thorpej
250 1.2 thorpej va = trunc_line(va);
251 1.2 thorpej
252 1.2 thorpej while ((eva - va) >= (32 * 32)) {
253 1.2 thorpej cache_r4k_op_32lines_32(va,
254 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
255 1.2 thorpej va += (32 * 32);
256 1.2 thorpej }
257 1.2 thorpej
258 1.2 thorpej while (va < eva) {
259 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
260 1.2 thorpej va += 32;
261 1.2 thorpej }
262 1.2 thorpej }
263 1.2 thorpej
264 1.2 thorpej void
265 1.2 thorpej r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
266 1.2 thorpej {
267 1.2 thorpej vaddr_t eva;
268 1.2 thorpej
269 1.2 thorpej /*
270 1.2 thorpej * Since we're doing Index ops, we expect to not be able
271 1.2 thorpej * to access the address we've been given. So, get the
272 1.2 thorpej * bits that determine the cache index, and make a KSEG0
273 1.2 thorpej * address out of them.
274 1.2 thorpej */
275 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
276 1.2 thorpej
277 1.2 thorpej eva = round_line(va + size);
278 1.2 thorpej va = trunc_line(va);
279 1.2 thorpej
280 1.2 thorpej while ((eva - va) >= (32 * 32)) {
281 1.4 thorpej cache_r4k_op_32lines_32(va,
282 1.2 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
283 1.2 thorpej va += (32 * 32);
284 1.2 thorpej }
285 1.2 thorpej
286 1.2 thorpej while (va < eva) {
287 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
288 1.2 thorpej va += 32;
289 1.2 thorpej }
290 1.2 thorpej }
291 1.2 thorpej
292 1.2 thorpej void
293 1.2 thorpej r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
294 1.2 thorpej {
295 1.2 thorpej vaddr_t eva = round_line(va + size);
296 1.2 thorpej
297 1.2 thorpej va = trunc_line(va);
298 1.2 thorpej
299 1.2 thorpej while ((eva - va) >= (32 * 32)) {
300 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
301 1.2 thorpej va += (32 * 32);
302 1.2 thorpej }
303 1.2 thorpej
304 1.2 thorpej while (va < eva) {
305 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
306 1.2 thorpej va += 32;
307 1.2 thorpej }
308 1.2 thorpej }
309 1.2 thorpej
310 1.2 thorpej void
311 1.2 thorpej r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
312 1.2 thorpej {
313 1.2 thorpej vaddr_t eva = round_line(va + size);
314 1.2 thorpej
315 1.2 thorpej va = trunc_line(va);
316 1.2 thorpej
317 1.2 thorpej while ((eva - va) >= (32 * 32)) {
318 1.2 thorpej cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
319 1.2 thorpej va += (32 * 32);
320 1.2 thorpej }
321 1.2 thorpej
322 1.2 thorpej while (va < eva) {
323 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
324 1.2 thorpej va += 32;
325 1.3 thorpej }
326 1.3 thorpej }
327 1.3 thorpej
328 1.3 thorpej #undef round_line
329 1.3 thorpej #undef trunc_line
330 1.3 thorpej
331 1.3 thorpej #define round_line(x) (((x) + 127) & ~127)
332 1.3 thorpej #define trunc_line(x) ((x) & ~127)
333 1.3 thorpej
334 1.3 thorpej void
335 1.3 thorpej r4k_sdcache_wbinv_all_128(void)
336 1.3 thorpej {
337 1.3 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
338 1.3 thorpej vaddr_t eva = va + mips_sdcache_size;
339 1.3 thorpej
340 1.3 thorpej while (va < eva) {
341 1.3 thorpej cache_r4k_op_32lines_128(va,
342 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
343 1.3 thorpej va += (32 * 128);
344 1.3 thorpej }
345 1.3 thorpej }
346 1.3 thorpej
347 1.3 thorpej void
348 1.3 thorpej r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
349 1.3 thorpej {
350 1.3 thorpej vaddr_t eva = round_line(va + size);
351 1.3 thorpej
352 1.3 thorpej va = trunc_line(va);
353 1.3 thorpej
354 1.3 thorpej while ((eva - va) >= (32 * 128)) {
355 1.3 thorpej cache_r4k_op_32lines_128(va,
356 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
357 1.3 thorpej va += (32 * 128);
358 1.3 thorpej }
359 1.3 thorpej
360 1.3 thorpej while (va < eva) {
361 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
362 1.3 thorpej va += 128;
363 1.3 thorpej }
364 1.3 thorpej }
365 1.3 thorpej
366 1.3 thorpej void
367 1.3 thorpej r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
368 1.3 thorpej {
369 1.3 thorpej vaddr_t eva;
370 1.3 thorpej
371 1.3 thorpej /*
372 1.3 thorpej * Since we're doing Index ops, we expect to not be able
373 1.3 thorpej * to access the address we've been given. So, get the
374 1.3 thorpej * bits that determine the cache index, and make a KSEG0
375 1.3 thorpej * address out of them.
376 1.3 thorpej */
377 1.3 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
378 1.3 thorpej
379 1.3 thorpej eva = round_line(va + size);
380 1.3 thorpej va = trunc_line(va);
381 1.3 thorpej
382 1.3 thorpej while ((eva - va) >= (32 * 128)) {
383 1.3 thorpej cache_r4k_op_32lines_128(va,
384 1.3 thorpej CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
385 1.3 thorpej va += (32 * 128);
386 1.3 thorpej }
387 1.3 thorpej
388 1.3 thorpej while (va < eva) {
389 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
390 1.3 thorpej va += 128;
391 1.3 thorpej }
392 1.3 thorpej }
393 1.3 thorpej
394 1.3 thorpej void
395 1.3 thorpej r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
396 1.3 thorpej {
397 1.3 thorpej vaddr_t eva = round_line(va + size);
398 1.3 thorpej
399 1.3 thorpej va = trunc_line(va);
400 1.3 thorpej
401 1.3 thorpej while ((eva - va) >= (32 * 128)) {
402 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
403 1.3 thorpej va += (32 * 128);
404 1.3 thorpej }
405 1.3 thorpej
406 1.3 thorpej while (va < eva) {
407 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
408 1.3 thorpej va += 128;
409 1.3 thorpej }
410 1.3 thorpej }
411 1.3 thorpej
412 1.3 thorpej void
413 1.3 thorpej r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
414 1.3 thorpej {
415 1.3 thorpej vaddr_t eva = round_line(va + size);
416 1.3 thorpej
417 1.3 thorpej va = trunc_line(va);
418 1.3 thorpej
419 1.3 thorpej while ((eva - va) >= (32 * 128)) {
420 1.3 thorpej cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
421 1.3 thorpej va += (32 * 128);
422 1.3 thorpej }
423 1.3 thorpej
424 1.3 thorpej while (va < eva) {
425 1.3 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
426 1.3 thorpej va += 128;
427 1.2 thorpej }
428 1.2 thorpej }
429 1.2 thorpej
430 1.2 thorpej #undef round_line
431 1.2 thorpej #undef trunc_line
432 1.2 thorpej
433 1.2 thorpej #define round_line(x) (((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
434 1.2 thorpej #define trunc_line(x) ((x) & ~(mips_sdcache_line_size - 1))
435 1.2 thorpej
436 1.2 thorpej void
437 1.2 thorpej r4k_sdcache_wbinv_all_generic(void)
438 1.2 thorpej {
439 1.2 thorpej vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
440 1.2 thorpej vaddr_t eva = va + mips_sdcache_size;
441 1.5 shin int line_size = mips_sdcache_line_size;
442 1.2 thorpej
443 1.2 thorpej while (va < eva) {
444 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
445 1.5 shin va += line_size;
446 1.2 thorpej }
447 1.2 thorpej }
448 1.2 thorpej
449 1.2 thorpej void
450 1.2 thorpej r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
451 1.2 thorpej {
452 1.2 thorpej vaddr_t eva = round_line(va + size);
453 1.5 shin int line_size = mips_sdcache_line_size;
454 1.2 thorpej
455 1.2 thorpej va = trunc_line(va);
456 1.2 thorpej
457 1.2 thorpej while (va < eva) {
458 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
459 1.5 shin va += line_size;
460 1.2 thorpej }
461 1.2 thorpej }
462 1.2 thorpej
463 1.2 thorpej void
464 1.2 thorpej r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
465 1.2 thorpej {
466 1.2 thorpej vaddr_t eva;
467 1.5 shin int line_size = mips_sdcache_line_size;
468 1.2 thorpej
469 1.2 thorpej /*
470 1.2 thorpej * Since we're doing Index ops, we expect to not be able
471 1.2 thorpej * to access the address we've been given. So, get the
472 1.2 thorpej * bits that determine the cache index, and make a KSEG0
473 1.2 thorpej * address out of them.
474 1.2 thorpej */
475 1.2 thorpej va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
476 1.2 thorpej
477 1.2 thorpej eva = round_line(va + size);
478 1.2 thorpej va = trunc_line(va);
479 1.2 thorpej
480 1.2 thorpej while (va < eva) {
481 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
482 1.5 shin va += line_size;
483 1.2 thorpej }
484 1.2 thorpej }
485 1.2 thorpej
486 1.2 thorpej void
487 1.2 thorpej r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
488 1.2 thorpej {
489 1.2 thorpej vaddr_t eva = round_line(va + size);
490 1.5 shin int line_size = mips_sdcache_line_size;
491 1.2 thorpej
492 1.2 thorpej va = trunc_line(va);
493 1.2 thorpej
494 1.2 thorpej while (va < eva) {
495 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
496 1.5 shin va += line_size;
497 1.2 thorpej }
498 1.2 thorpej }
499 1.2 thorpej
500 1.2 thorpej void
501 1.2 thorpej r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
502 1.2 thorpej {
503 1.2 thorpej vaddr_t eva = round_line(va + size);
504 1.5 shin int line_size = mips_sdcache_line_size;
505 1.2 thorpej
506 1.2 thorpej va = trunc_line(va);
507 1.2 thorpej
508 1.2 thorpej while (va < eva) {
509 1.2 thorpej cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
510 1.5 shin va += line_size;
511 1.2 thorpej }
512 1.2 thorpej }
513 1.2 thorpej
514 1.2 thorpej #undef round_line
515 1.2 thorpej #undef trunc_line
516