Home | History | Annotate | Line # | Download | only in mips
cache_r10k.c revision 1.5
      1 /*	$NetBSD: cache_r10k.c,v 1.5 2011/02/20 07:45:47 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2003 Takao Shinohara.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     25  */
     26 
     27 /*
     28  * Copyright 2001 Wasabi Systems, Inc.
     29  * All rights reserved.
     30  *
     31  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. All advertising materials mentioning features or use of this software
     42  *    must display the following acknowledgement:
     43  *	This product includes software developed for the NetBSD Project by
     44  *	Wasabi Systems, Inc.
     45  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     46  *    or promote products derived from this software without specific prior
     47  *    written permission.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     51  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     52  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     53  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     54  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     55  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     56  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     57  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     58  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     59  * POSSIBILITY OF SUCH DAMAGE.
     60  */
     61 
     62 #include <sys/param.h>
     63 
     64 #include <mips/cache.h>
     65 #include <mips/cache_r4k.h>
     66 #include <mips/cache_r10k.h>
     67 
     68 /*
     69  * Cache operations for R10000-style caches:
     70  *
     71  *	2-way, write-back
     72  *	primary cache: virtual index/physical tag
     73  *	secondary cache: physical index/physical tag
     74  */
     75 
     76 __asm(".set mips3");
     77 
     78 #define	round_line(x)	(((x) + 64 - 1) & ~(64 - 1))
     79 #define	trunc_line(x)	((x) & ~(64 - 1))
     80 
     81 void
     82 r10k_icache_sync_all(void)
     83 {
     84 	const struct mips_cache_info * const mci = &mips_cache_info;
     85 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
     86 	vaddr_t eva = va + mci->mci_picache_way_size;
     87 
     88 	mips_dcache_wbinv_all();
     89 
     90 	__asm volatile("sync");
     91 
     92 	while (va < eva) {
     93 		cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
     94 		cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
     95 		va += 64;
     96 	}
     97 }
     98 
     99 void
    100 r10k_icache_sync_range(vaddr_t va, vsize_t size)
    101 {
    102 	vaddr_t eva = round_line(va + size);
    103 
    104 	va = trunc_line(va);
    105 
    106 	mips_dcache_wb_range(va, (eva - va));
    107 
    108 	__asm volatile("sync");
    109 
    110 	while (va < eva) {
    111 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
    112 		va += 64;
    113 	}
    114 }
    115 
    116 void
    117 r10k_icache_sync_range_index(vaddr_t va, vsize_t size)
    118 {
    119 	const struct mips_cache_info * const mci = &mips_cache_info;
    120 	vaddr_t eva, orig_va;
    121 
    122 	orig_va = va;
    123 
    124 	eva = round_line(va + size);
    125 	va = trunc_line(va);
    126 
    127 	mips_dcache_wbinv_range_index(va, (eva - va));
    128 
    129 	__asm volatile("sync");
    130 
    131 	/*
    132 	 * Since we're doing Index ops, we expect to not be able
    133 	 * to access the address we've been given.  So, get the
    134 	 * bits that determine the cache index, and make a KSEG0
    135 	 * address out of them.
    136 	 */
    137 	va = MIPS_PHYS_TO_KSEG0(orig_va & mci->mci_picache_way_mask);
    138 
    139 	eva = round_line(va + size);
    140 	va = trunc_line(va);
    141 
    142 	while (va < eva) {
    143 		cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    144 		cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    145 		va += 64;
    146 	}
    147 }
    148 
    149 #undef round_line
    150 #undef trunc_line
    151 
    152 #define	round_line(x)	(((x) + 32 - 1) & ~(32 - 1))
    153 #define	trunc_line(x)	((x) & ~(32 - 1))
    154 
    155 void
    156 r10k_pdcache_wbinv_all(void)
    157 {
    158 	const struct mips_cache_info * const mci = &mips_cache_info;
    159 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    160 	vaddr_t eva = va + mci->mci_pdcache_way_size;
    161 
    162 	while (va < eva) {
    163 		cache_op_r4k_line(va+0, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    164 		cache_op_r4k_line(va+1, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    165 		va += 32;
    166 	}
    167 }
    168 
    169 void
    170 r10k_pdcache_wbinv_range(vaddr_t va, vsize_t size)
    171 {
    172 	vaddr_t eva = round_line(va + size);
    173 
    174 	va = trunc_line(va);
    175 
    176 	while (va < eva) {
    177 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    178 		va += 32;
    179 	}
    180 }
    181 
    182 void
    183 r10k_pdcache_wbinv_range_index(vaddr_t va, vsize_t size)
    184 {
    185 	const struct mips_cache_info * const mci = &mips_cache_info;
    186 	vaddr_t eva;
    187 
    188 	/*
    189 	 * Since we're doing Index ops, we expect to not be able
    190 	 * to access the address we've been given.  So, get the
    191 	 * bits that determine the cache index, and make a KSEG0
    192 	 * address out of them.
    193 	 */
    194 	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask);
    195 
    196 	eva = round_line(va + size);
    197 	va = trunc_line(va);
    198 
    199 	while (va < eva) {
    200 		cache_op_r4k_line(va+0, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    201 		cache_op_r4k_line(va+1, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    202 		va += 32;
    203 	}
    204 }
    205 
    206 void
    207 r10k_pdcache_inv_range(vaddr_t va, vsize_t size)
    208 {
    209 	vaddr_t eva = round_line(va + size);
    210 
    211 	va = trunc_line(va);
    212 
    213 	while (va < eva) {
    214 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    215 		va += 32;
    216 	}
    217 }
    218 
    219 void
    220 r10k_pdcache_wb_range(vaddr_t va, vsize_t size)
    221 {
    222 	vaddr_t eva = round_line(va + size);
    223 
    224 	va = trunc_line(va);
    225 
    226 	while (va < eva) {
    227 		/* R10000 does not support HitWriteBack operation */
    228 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    229 		va += 32;
    230 	}
    231 }
    232 
    233 #undef round_line
    234 #undef trunc_line
    235 
    236 #define	round_line(x)	(((x) + mci->mci_sdcache_line_size - 1) & ~(mci->mci_sdcache_line_size - 1))
    237 #define	trunc_line(x)	((x) & ~(mci->mci_sdcache_line_size - 1))
    238 
    239 void
    240 r10k_sdcache_wbinv_all(void)
    241 {
    242 	const struct mips_cache_info * const mci = &mips_cache_info;
    243 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    244 	vaddr_t eva = va + mci->mci_sdcache_way_size;
    245 	vsize_t line_size = mci->mci_sdcache_line_size;
    246 
    247 	while (va < eva) {
    248 		cache_op_r4k_line(va+0, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    249 		cache_op_r4k_line(va+1, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    250 		va += line_size;
    251 	}
    252 }
    253 
    254 void
    255 r10k_sdcache_wbinv_range(vaddr_t va, vsize_t size)
    256 {
    257 	const struct mips_cache_info * const mci = &mips_cache_info;
    258 	vaddr_t eva = round_line(va + size);
    259 	vsize_t line_size = mci->mci_sdcache_line_size;
    260 
    261 	va = trunc_line(va);
    262 
    263 	while (va < eva) {
    264 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    265 		va += line_size;
    266 	}
    267 }
    268 
    269 void
    270 r10k_sdcache_wbinv_range_index(vaddr_t va, vsize_t size)
    271 {
    272 	const struct mips_cache_info * const mci = &mips_cache_info;
    273 	vaddr_t eva;
    274 	vsize_t line_size = mci->mci_sdcache_line_size;
    275 
    276 	/*
    277 	 * Since we're doing Index ops, we expect to not be able
    278 	 * to access the address we've been given.  So, get the
    279 	 * bits that determine the cache index, and make a KSEG0
    280 	 * address out of them.
    281 	 */
    282 	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask);
    283 
    284 	eva = round_line(va + size);
    285 	va = trunc_line(va);
    286 
    287 	while (va < eva) {
    288 		cache_op_r4k_line(va+0, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    289 		cache_op_r4k_line(va+1, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    290 		va += line_size;
    291 	}
    292 }
    293 
    294 void
    295 r10k_sdcache_inv_range(vaddr_t va, vsize_t size)
    296 {
    297 	const struct mips_cache_info * const mci = &mips_cache_info;
    298 	vaddr_t eva = round_line(va + size);
    299 	vsize_t line_size = mci->mci_sdcache_line_size;
    300 
    301 	va = trunc_line(va);
    302 
    303 	while (va < eva) {
    304 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    305 		va += line_size;
    306 	}
    307 }
    308 
    309 void
    310 r10k_sdcache_wb_range(vaddr_t va, vsize_t size)
    311 {
    312 	const struct mips_cache_info * const mci = &mips_cache_info;
    313 	vaddr_t eva = round_line(va + size);
    314 	vsize_t line_size = mci->mci_sdcache_line_size;
    315 
    316 	va = trunc_line(va);
    317 
    318 	while (va < eva) {
    319 		/* R10000 does not support HitWriteBack operation */
    320 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    321 		va += line_size;
    322 	}
    323 }
    324 
    325 #undef round_line
    326 #undef trunc_line
    327