1 1.8 macallan /* $NetBSD: cache_r10k.c,v 1.8 2016/07/13 21:25:15 macallan Exp $ */ 2 1.1 tsutsui 3 1.2 shin /*- 4 1.2 shin * Copyright (c) 2003 Takao Shinohara. 5 1.1 tsutsui * 6 1.1 tsutsui * Redistribution and use in source and binary forms, with or without 7 1.1 tsutsui * modification, are permitted provided that the following conditions 8 1.1 tsutsui * are met: 9 1.1 tsutsui * 1. Redistributions of source code must retain the above copyright 10 1.1 tsutsui * notice, this list of conditions and the following disclaimer. 11 1.1 tsutsui * 2. Redistributions in binary form must reproduce the above copyright 12 1.1 tsutsui * notice, this list of conditions and the following disclaimer in the 13 1.1 tsutsui * documentation and/or other materials provided with the distribution. 14 1.1 tsutsui * 15 1.1 tsutsui * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 1.1 tsutsui * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 1.1 tsutsui * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 1.1 tsutsui * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 1.1 tsutsui * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 1.1 tsutsui * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 1.1 tsutsui * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 1.1 tsutsui * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 1.1 tsutsui * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 1.1 tsutsui * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 1.1 tsutsui */ 26 1.1 tsutsui 27 1.1 tsutsui /* 28 1.1 tsutsui * Copyright 2001 Wasabi Systems, Inc. 29 1.1 tsutsui * All rights reserved. 30 1.1 tsutsui * 31 1.1 tsutsui * Written by Jason R. Thorpe for Wasabi Systems, Inc. 32 1.1 tsutsui * 33 1.1 tsutsui * Redistribution and use in source and binary forms, with or without 34 1.1 tsutsui * modification, are permitted provided that the following conditions 35 1.1 tsutsui * are met: 36 1.1 tsutsui * 1. Redistributions of source code must retain the above copyright 37 1.1 tsutsui * notice, this list of conditions and the following disclaimer. 38 1.1 tsutsui * 2. Redistributions in binary form must reproduce the above copyright 39 1.1 tsutsui * notice, this list of conditions and the following disclaimer in the 40 1.1 tsutsui * documentation and/or other materials provided with the distribution. 41 1.1 tsutsui * 3. All advertising materials mentioning features or use of this software 42 1.1 tsutsui * must display the following acknowledgement: 43 1.1 tsutsui * This product includes software developed for the NetBSD Project by 44 1.1 tsutsui * Wasabi Systems, Inc. 45 1.1 tsutsui * 4. The name of Wasabi Systems, Inc. may not be used to endorse 46 1.1 tsutsui * or promote products derived from this software without specific prior 47 1.1 tsutsui * written permission. 48 1.1 tsutsui * 49 1.1 tsutsui * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 50 1.1 tsutsui * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 51 1.1 tsutsui * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 52 1.1 tsutsui * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 53 1.1 tsutsui * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 54 1.1 tsutsui * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 55 1.1 tsutsui * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 56 1.1 tsutsui * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 57 1.1 tsutsui * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 58 1.1 tsutsui * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 59 1.1 tsutsui * POSSIBILITY OF SUCH DAMAGE. 60 1.1 tsutsui */ 61 1.1 tsutsui 62 1.1 tsutsui #include <sys/param.h> 63 1.1 tsutsui 64 1.6 macallan #include <mips/cpuregs.h> 65 1.1 tsutsui #include <mips/cache.h> 66 1.1 tsutsui #include <mips/cache_r4k.h> 67 1.1 tsutsui #include <mips/cache_r10k.h> 68 1.1 tsutsui 69 1.1 tsutsui /* 70 1.1 tsutsui * Cache operations for R10000-style caches: 71 1.1 tsutsui * 72 1.2 shin * 2-way, write-back 73 1.2 shin * primary cache: virtual index/physical tag 74 1.2 shin * secondary cache: physical index/physical tag 75 1.1 tsutsui */ 76 1.1 tsutsui 77 1.2 shin __asm(".set mips3"); 78 1.1 tsutsui 79 1.2 shin #define round_line(x) (((x) + 64 - 1) & ~(64 - 1)) 80 1.2 shin #define trunc_line(x) ((x) & ~(64 - 1)) 81 1.1 tsutsui 82 1.1 tsutsui void 83 1.2 shin r10k_icache_sync_all(void) 84 1.1 tsutsui { 85 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 86 1.1 tsutsui vaddr_t va = MIPS_PHYS_TO_KSEG0(0); 87 1.5 matt vaddr_t eva = va + mci->mci_picache_way_size; 88 1.1 tsutsui 89 1.1 tsutsui mips_dcache_wbinv_all(); 90 1.1 tsutsui 91 1.4 perry __asm volatile("sync"); 92 1.1 tsutsui 93 1.1 tsutsui while (va < eva) { 94 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); 95 1.8 macallan va++; 96 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); 97 1.8 macallan va += 63; 98 1.1 tsutsui } 99 1.1 tsutsui } 100 1.1 tsutsui 101 1.1 tsutsui void 102 1.7 matt r10k_icache_sync_range(register_t va, vsize_t size) 103 1.1 tsutsui { 104 1.1 tsutsui vaddr_t eva = round_line(va + size); 105 1.1 tsutsui 106 1.1 tsutsui va = trunc_line(va); 107 1.1 tsutsui 108 1.1 tsutsui mips_dcache_wb_range(va, (eva - va)); 109 1.1 tsutsui 110 1.4 perry __asm volatile("sync"); 111 1.1 tsutsui 112 1.1 tsutsui while (va < eva) { 113 1.1 tsutsui cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV); 114 1.1 tsutsui va += 64; 115 1.1 tsutsui } 116 1.1 tsutsui } 117 1.1 tsutsui 118 1.1 tsutsui void 119 1.2 shin r10k_icache_sync_range_index(vaddr_t va, vsize_t size) 120 1.1 tsutsui { 121 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 122 1.2 shin vaddr_t eva, orig_va; 123 1.1 tsutsui 124 1.1 tsutsui orig_va = va; 125 1.1 tsutsui 126 1.1 tsutsui eva = round_line(va + size); 127 1.1 tsutsui va = trunc_line(va); 128 1.1 tsutsui 129 1.1 tsutsui mips_dcache_wbinv_range_index(va, (eva - va)); 130 1.1 tsutsui 131 1.4 perry __asm volatile("sync"); 132 1.1 tsutsui 133 1.1 tsutsui /* 134 1.1 tsutsui * Since we're doing Index ops, we expect to not be able 135 1.1 tsutsui * to access the address we've been given. So, get the 136 1.1 tsutsui * bits that determine the cache index, and make a KSEG0 137 1.1 tsutsui * address out of them. 138 1.1 tsutsui */ 139 1.5 matt va = MIPS_PHYS_TO_KSEG0(orig_va & mci->mci_picache_way_mask); 140 1.1 tsutsui 141 1.1 tsutsui eva = round_line(va + size); 142 1.1 tsutsui va = trunc_line(va); 143 1.1 tsutsui 144 1.2 shin while (va < eva) { 145 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); 146 1.8 macallan va++; 147 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); 148 1.8 macallan va += 63; 149 1.2 shin } 150 1.2 shin } 151 1.2 shin 152 1.2 shin #undef round_line 153 1.2 shin #undef trunc_line 154 1.2 shin 155 1.2 shin #define round_line(x) (((x) + 32 - 1) & ~(32 - 1)) 156 1.2 shin #define trunc_line(x) ((x) & ~(32 - 1)) 157 1.2 shin 158 1.2 shin void 159 1.2 shin r10k_pdcache_wbinv_all(void) 160 1.2 shin { 161 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 162 1.2 shin vaddr_t va = MIPS_PHYS_TO_KSEG0(0); 163 1.5 matt vaddr_t eva = va + mci->mci_pdcache_way_size; 164 1.2 shin 165 1.2 shin while (va < eva) { 166 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); 167 1.8 macallan va++; 168 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); 169 1.8 macallan va += 31; 170 1.2 shin } 171 1.2 shin } 172 1.2 shin 173 1.2 shin void 174 1.7 matt r10k_pdcache_wbinv_range(register_t va, vsize_t size) 175 1.2 shin { 176 1.2 shin vaddr_t eva = round_line(va + size); 177 1.2 shin 178 1.2 shin va = trunc_line(va); 179 1.2 shin 180 1.2 shin while (va < eva) { 181 1.2 shin cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); 182 1.2 shin va += 32; 183 1.2 shin } 184 1.2 shin } 185 1.2 shin 186 1.2 shin void 187 1.2 shin r10k_pdcache_wbinv_range_index(vaddr_t va, vsize_t size) 188 1.2 shin { 189 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 190 1.2 shin vaddr_t eva; 191 1.2 shin 192 1.2 shin /* 193 1.2 shin * Since we're doing Index ops, we expect to not be able 194 1.2 shin * to access the address we've been given. So, get the 195 1.2 shin * bits that determine the cache index, and make a KSEG0 196 1.2 shin * address out of them. 197 1.2 shin */ 198 1.5 matt va = MIPS_PHYS_TO_KSEG0(va & mci->mci_pdcache_way_mask); 199 1.2 shin 200 1.2 shin eva = round_line(va + size); 201 1.2 shin va = trunc_line(va); 202 1.2 shin 203 1.2 shin while (va < eva) { 204 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); 205 1.8 macallan va++; 206 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV); 207 1.8 macallan va += 31; 208 1.1 tsutsui } 209 1.2 shin } 210 1.2 shin 211 1.2 shin void 212 1.7 matt r10k_pdcache_inv_range(register_t va, vsize_t size) 213 1.2 shin { 214 1.2 shin vaddr_t eva = round_line(va + size); 215 1.2 shin 216 1.2 shin va = trunc_line(va); 217 1.1 tsutsui 218 1.1 tsutsui while (va < eva) { 219 1.2 shin cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV); 220 1.2 shin va += 32; 221 1.1 tsutsui } 222 1.1 tsutsui } 223 1.1 tsutsui 224 1.1 tsutsui void 225 1.7 matt r10k_pdcache_wb_range(register_t va, vsize_t size) 226 1.1 tsutsui { 227 1.2 shin vaddr_t eva = round_line(va + size); 228 1.2 shin 229 1.2 shin va = trunc_line(va); 230 1.2 shin 231 1.2 shin while (va < eva) { 232 1.2 shin /* R10000 does not support HitWriteBack operation */ 233 1.2 shin cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV); 234 1.2 shin va += 32; 235 1.2 shin } 236 1.2 shin } 237 1.2 shin 238 1.2 shin #undef round_line 239 1.2 shin #undef trunc_line 240 1.2 shin 241 1.5 matt #define round_line(x) (((x) + mci->mci_sdcache_line_size - 1) & ~(mci->mci_sdcache_line_size - 1)) 242 1.5 matt #define trunc_line(x) ((x) & ~(mci->mci_sdcache_line_size - 1)) 243 1.2 shin 244 1.2 shin void 245 1.2 shin r10k_sdcache_wbinv_all(void) 246 1.2 shin { 247 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 248 1.2 shin vaddr_t va = MIPS_PHYS_TO_KSEG0(0); 249 1.5 matt vaddr_t eva = va + mci->mci_sdcache_way_size; 250 1.5 matt vsize_t line_size = mci->mci_sdcache_line_size; 251 1.2 shin 252 1.2 shin while (va < eva) { 253 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); 254 1.8 macallan va++; 255 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); 256 1.8 macallan va += line_size - 1; 257 1.2 shin } 258 1.2 shin } 259 1.2 shin 260 1.2 shin void 261 1.7 matt r10k_sdcache_wbinv_range(register_t va, vsize_t size) 262 1.2 shin { 263 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 264 1.2 shin vaddr_t eva = round_line(va + size); 265 1.5 matt vsize_t line_size = mci->mci_sdcache_line_size; 266 1.2 shin 267 1.2 shin va = trunc_line(va); 268 1.2 shin 269 1.2 shin while (va < eva) { 270 1.2 shin cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); 271 1.2 shin va += line_size; 272 1.2 shin } 273 1.2 shin } 274 1.2 shin 275 1.2 shin void 276 1.2 shin r10k_sdcache_wbinv_range_index(vaddr_t va, vsize_t size) 277 1.2 shin { 278 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 279 1.2 shin vaddr_t eva; 280 1.5 matt vsize_t line_size = mci->mci_sdcache_line_size; 281 1.2 shin 282 1.2 shin /* 283 1.2 shin * Since we're doing Index ops, we expect to not be able 284 1.2 shin * to access the address we've been given. So, get the 285 1.2 shin * bits that determine the cache index, and make a KSEG0 286 1.2 shin * address out of them. 287 1.2 shin */ 288 1.5 matt va = MIPS_PHYS_TO_KSEG0(va & mci->mci_sdcache_way_mask); 289 1.2 shin 290 1.2 shin eva = round_line(va + size); 291 1.2 shin va = trunc_line(va); 292 1.2 shin 293 1.2 shin while (va < eva) { 294 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); 295 1.8 macallan va++; 296 1.8 macallan cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV); 297 1.8 macallan va += line_size - 1; 298 1.2 shin } 299 1.2 shin } 300 1.2 shin 301 1.2 shin void 302 1.7 matt r10k_sdcache_inv_range(register_t va, vsize_t size) 303 1.2 shin { 304 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 305 1.2 shin vaddr_t eva = round_line(va + size); 306 1.5 matt vsize_t line_size = mci->mci_sdcache_line_size; 307 1.2 shin 308 1.2 shin va = trunc_line(va); 309 1.2 shin 310 1.2 shin while (va < eva) { 311 1.2 shin cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV); 312 1.2 shin va += line_size; 313 1.2 shin } 314 1.2 shin } 315 1.2 shin 316 1.2 shin void 317 1.7 matt r10k_sdcache_wb_range(register_t va, vsize_t size) 318 1.2 shin { 319 1.5 matt const struct mips_cache_info * const mci = &mips_cache_info; 320 1.2 shin vaddr_t eva = round_line(va + size); 321 1.5 matt vsize_t line_size = mci->mci_sdcache_line_size; 322 1.2 shin 323 1.2 shin va = trunc_line(va); 324 1.2 shin 325 1.2 shin while (va < eva) { 326 1.2 shin /* R10000 does not support HitWriteBack operation */ 327 1.2 shin cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV); 328 1.2 shin va += line_size; 329 1.2 shin } 330 1.1 tsutsui } 331 1.1 tsutsui 332 1.2 shin #undef round_line 333 1.2 shin #undef trunc_line 334