Home | History | Annotate | Line # | Download | only in mips
cache_r4k.c revision 1.2
      1 /*	$NetBSD: cache_r4k.c,v 1.2 2001/11/14 18:26:23 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/param.h>
     39 
     40 #include <mips/cache.h>
     41 #include <mips/cache_r4k.h>
     42 
     43 /*
     44  * Cache operations for R4000/R4400-style caches:
     45  *
     46  *	- Direct-mapped
     47  *	- Write-back
     48  *	- Virtually indexed, physically tagged
     49  *
     50  * XXX Does not handle split secondary caches.
     51  */
     52 
     53 #define	round_line(x)		(((x) + 15) & ~15)
     54 #define	trunc_line(x)		((x) & ~15)
     55 
     56 __asm(".set mips3");
     57 
     58 void
     59 r4k_icache_sync_all_16(void)
     60 {
     61 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
     62 	vaddr_t eva = va + mips_picache_size;
     63 
     64 	mips_dcache_wbinv_all();
     65 
     66 	__asm __volatile("sync");
     67 
     68 	while (va < eva) {
     69 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
     70 		va += (32 * 16);
     71 	}
     72 }
     73 
     74 void
     75 r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
     76 {
     77 	vaddr_t eva = round_line(va + size);
     78 
     79 	va = trunc_line(va);
     80 
     81 	mips_dcache_wb_range(va, (eva - va));
     82 
     83 	__asm __volatile("sync");
     84 
     85 	while ((eva - va) >= (32 * 16)) {
     86 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     87 		va += (32 * 16);
     88 	}
     89 
     90 	while (va < eva) {
     91 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     92 		va += 16;
     93 	}
     94 }
     95 
     96 void
     97 r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
     98 {
     99 	vaddr_t eva;
    100 
    101 	eva = round_line(va + size);
    102 	va = trunc_line(va);
    103 
    104 	mips_dcache_wbinv_range_index(va, (eva - va));
    105 
    106 	__asm __volatile("sync");
    107 
    108 	/*
    109 	 * Since we're doing Index ops, we expect to not be able
    110 	 * to access the address we've been given.  So, get the
    111 	 * bits that determine the cache index, and make a KSEG0
    112 	 * address out of them.
    113 	 */
    114 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
    115 
    116 	eva = round_line(va + size);
    117 	va = trunc_line(va);
    118 
    119 	while ((eva - va) >= (32 * 16)) {
    120 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    121 		va += (32 * 16);
    122 	}
    123 
    124 	while (va < eva) {
    125 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    126 		va += 16;
    127 	}
    128 }
    129 
    130 void
    131 r4k_pdcache_wbinv_all_16(void)
    132 {
    133 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    134 	vaddr_t eva = va + mips_pdcache_size;
    135 
    136 	while (va < eva) {
    137 		cache_r4k_op_32lines_16(va,
    138 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    139 		va += (32 * 16);
    140 	}
    141 }
    142 
    143 void
    144 r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
    145 {
    146 	vaddr_t eva = round_line(va + size);
    147 
    148 	va = trunc_line(va);
    149 
    150 	while ((eva - va) >= (32 * 16)) {
    151 		cache_r4k_op_32lines_16(va,
    152 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    153 		va += (32 * 16);
    154 	}
    155 
    156 	while (va < eva) {
    157 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    158 		va += 16;
    159 	}
    160 }
    161 
    162 void
    163 r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
    164 {
    165 	vaddr_t eva;
    166 
    167 	/*
    168 	 * Since we're doing Index ops, we expect to not be able
    169 	 * to access the address we've been given.  So, get the
    170 	 * bits that determine the cache index, and make a KSEG0
    171 	 * address out of them.
    172 	 */
    173 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
    174 
    175 	eva = round_line(va + size);
    176 	va = trunc_line(va);
    177 
    178 	while ((eva - va) >= (32 * 16)) {
    179 		cache_r4k_op_32lines_16(va,
    180 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    181 		va += (32 * 16);
    182 	}
    183 
    184 	while (va < eva) {
    185 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    186 		va += 16;
    187 	}
    188 }
    189 
    190 void
    191 r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
    192 {
    193 	vaddr_t eva = round_line(va + size);
    194 
    195 	va = trunc_line(va);
    196 
    197 	while ((eva - va) >= (32 * 16)) {
    198 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    199 		va += (32 * 16);
    200 	}
    201 
    202 	while (va < eva) {
    203 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    204 		va += 16;
    205 	}
    206 }
    207 
    208 void
    209 r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
    210 {
    211 	vaddr_t eva = round_line(va + size);
    212 
    213 	va = trunc_line(va);
    214 
    215 	while ((eva - va) >= (32 * 16)) {
    216 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    217 		va += (32 * 16);
    218 	}
    219 
    220 	while (va < eva) {
    221 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    222 		va += 16;
    223 	}
    224 }
    225 
    226 #undef round_line
    227 #undef trunc_line
    228 
    229 #define	round_line(x)		(((x) + 31) & ~31)
    230 #define	trunc_line(x)		((x) & ~31)
    231 
    232 void
    233 r4k_sdcache_wbinv_all_32(void)
    234 {
    235 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    236 	vaddr_t eva = va + mips_sdcache_size;
    237 
    238 	while (va < eva) {
    239 		cache_r4k_op_32lines_32(va,
    240 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    241 		va += (32 * 32);
    242 	}
    243 }
    244 
    245 void
    246 r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
    247 {
    248 	vaddr_t eva = round_line(va + size);
    249 
    250 	va = trunc_line(va);
    251 
    252 	while ((eva - va) >= (32 * 32)) {
    253 		cache_r4k_op_32lines_32(va,
    254 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    255 		va += (32 * 32);
    256 	}
    257 
    258 	while (va < eva) {
    259 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    260 		va += 32;
    261 	}
    262 }
    263 
    264 void
    265 r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
    266 {
    267 	vaddr_t eva;
    268 
    269 	/*
    270 	 * Since we're doing Index ops, we expect to not be able
    271 	 * to access the address we've been given.  So, get the
    272 	 * bits that determine the cache index, and make a KSEG0
    273 	 * address out of them.
    274 	 */
    275 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    276 
    277 	eva = round_line(va + size);
    278 	va = trunc_line(va);
    279 
    280 	while ((eva - va) >= (32 * 32)) {
    281 		cache_r4k_op_32lines_16(va,
    282 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    283 		va += (32 * 32);
    284 	}
    285 
    286 	while (va < eva) {
    287 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    288 		va += 32;
    289 	}
    290 }
    291 
    292 void
    293 r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
    294 {
    295 	vaddr_t eva = round_line(va + size);
    296 
    297 	va = trunc_line(va);
    298 
    299 	while ((eva - va) >= (32 * 32)) {
    300 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    301 		va += (32 * 32);
    302 	}
    303 
    304 	while (va < eva) {
    305 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    306 		va += 32;
    307 	}
    308 }
    309 
    310 void
    311 r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
    312 {
    313 	vaddr_t eva = round_line(va + size);
    314 
    315 	va = trunc_line(va);
    316 
    317 	while ((eva - va) >= (32 * 32)) {
    318 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    319 		va += (32 * 32);
    320 	}
    321 
    322 	while (va < eva) {
    323 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    324 		va += 32;
    325 	}
    326 }
    327 
    328 #undef round_line
    329 #undef trunc_line
    330 
    331 #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
    332 #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
    333 
    334 void
    335 r4k_sdcache_wbinv_all_generic(void)
    336 {
    337 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    338 	vaddr_t eva = va + mips_sdcache_size;
    339 
    340 	while (va < eva) {
    341 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    342 		va += mips_sdcache_line_size;
    343 	}
    344 }
    345 
    346 void
    347 r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
    348 {
    349 	vaddr_t eva = round_line(va + size);
    350 
    351 	va = trunc_line(va);
    352 
    353 	while (va < eva) {
    354 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    355 		va += mips_sdcache_line_size;
    356 	}
    357 }
    358 
    359 void
    360 r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
    361 {
    362 	vaddr_t eva;
    363 
    364 	/*
    365 	 * Since we're doing Index ops, we expect to not be able
    366 	 * to access the address we've been given.  So, get the
    367 	 * bits that determine the cache index, and make a KSEG0
    368 	 * address out of them.
    369 	 */
    370 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    371 
    372 	eva = round_line(va + size);
    373 	va = trunc_line(va);
    374 
    375 	while (va < eva) {
    376 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    377 		va += mips_sdcache_line_size;
    378 	}
    379 }
    380 
    381 void
    382 r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
    383 {
    384 	vaddr_t eva = round_line(va + size);
    385 
    386 	va = trunc_line(va);
    387 
    388 	while (va < eva) {
    389 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    390 		va += mips_sdcache_line_size;
    391 	}
    392 }
    393 
    394 void
    395 r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
    396 {
    397 	vaddr_t eva = round_line(va + size);
    398 
    399 	va = trunc_line(va);
    400 
    401 	while (va < eva) {
    402 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    403 		va += mips_sdcache_line_size;
    404 	}
    405 }
    406 
    407 #undef round_line
    408 #undef trunc_line
    409