Home | History | Annotate | Line # | Download | only in mips
cache_r4k.c revision 1.6
      1  1.6  tsutsui /*	$NetBSD: cache_r4k.c,v 1.6 2001/11/23 06:21:50 tsutsui Exp $	*/
      2  1.2  thorpej 
      3  1.2  thorpej /*
      4  1.2  thorpej  * Copyright 2001 Wasabi Systems, Inc.
      5  1.2  thorpej  * All rights reserved.
      6  1.2  thorpej  *
      7  1.2  thorpej  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  1.2  thorpej  *
      9  1.2  thorpej  * Redistribution and use in source and binary forms, with or without
     10  1.2  thorpej  * modification, are permitted provided that the following conditions
     11  1.2  thorpej  * are met:
     12  1.2  thorpej  * 1. Redistributions of source code must retain the above copyright
     13  1.2  thorpej  *    notice, this list of conditions and the following disclaimer.
     14  1.2  thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.2  thorpej  *    notice, this list of conditions and the following disclaimer in the
     16  1.2  thorpej  *    documentation and/or other materials provided with the distribution.
     17  1.2  thorpej  * 3. All advertising materials mentioning features or use of this software
     18  1.2  thorpej  *    must display the following acknowledgement:
     19  1.2  thorpej  *	This product includes software developed for the NetBSD Project by
     20  1.2  thorpej  *	Wasabi Systems, Inc.
     21  1.2  thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.2  thorpej  *    or promote products derived from this software without specific prior
     23  1.2  thorpej  *    written permission.
     24  1.2  thorpej  *
     25  1.2  thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.2  thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.2  thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.2  thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.2  thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.2  thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.2  thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.2  thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.2  thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.2  thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.2  thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36  1.2  thorpej  */
     37  1.2  thorpej 
     38  1.2  thorpej #include <sys/param.h>
     39  1.2  thorpej 
     40  1.2  thorpej #include <mips/cache.h>
     41  1.2  thorpej #include <mips/cache_r4k.h>
     42  1.2  thorpej 
     43  1.2  thorpej /*
     44  1.2  thorpej  * Cache operations for R4000/R4400-style caches:
     45  1.2  thorpej  *
     46  1.2  thorpej  *	- Direct-mapped
     47  1.2  thorpej  *	- Write-back
     48  1.2  thorpej  *	- Virtually indexed, physically tagged
     49  1.2  thorpej  *
     50  1.2  thorpej  * XXX Does not handle split secondary caches.
     51  1.2  thorpej  */
     52  1.2  thorpej 
     53  1.2  thorpej #define	round_line(x)		(((x) + 15) & ~15)
     54  1.2  thorpej #define	trunc_line(x)		((x) & ~15)
     55  1.2  thorpej 
     56  1.2  thorpej __asm(".set mips3");
     57  1.2  thorpej 
     58  1.2  thorpej void
     59  1.2  thorpej r4k_icache_sync_all_16(void)
     60  1.2  thorpej {
     61  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
     62  1.2  thorpej 	vaddr_t eva = va + mips_picache_size;
     63  1.2  thorpej 
     64  1.2  thorpej 	mips_dcache_wbinv_all();
     65  1.2  thorpej 
     66  1.2  thorpej 	__asm __volatile("sync");
     67  1.2  thorpej 
     68  1.2  thorpej 	while (va < eva) {
     69  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
     70  1.2  thorpej 		va += (32 * 16);
     71  1.2  thorpej 	}
     72  1.2  thorpej }
     73  1.2  thorpej 
     74  1.2  thorpej void
     75  1.2  thorpej r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
     76  1.2  thorpej {
     77  1.2  thorpej 	vaddr_t eva = round_line(va + size);
     78  1.2  thorpej 
     79  1.2  thorpej 	va = trunc_line(va);
     80  1.2  thorpej 
     81  1.2  thorpej 	mips_dcache_wb_range(va, (eva - va));
     82  1.2  thorpej 
     83  1.2  thorpej 	__asm __volatile("sync");
     84  1.2  thorpej 
     85  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
     86  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     87  1.2  thorpej 		va += (32 * 16);
     88  1.2  thorpej 	}
     89  1.2  thorpej 
     90  1.2  thorpej 	while (va < eva) {
     91  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     92  1.2  thorpej 		va += 16;
     93  1.2  thorpej 	}
     94  1.2  thorpej }
     95  1.2  thorpej 
     96  1.2  thorpej void
     97  1.2  thorpej r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
     98  1.2  thorpej {
     99  1.2  thorpej 	vaddr_t eva;
    100  1.2  thorpej 
    101  1.2  thorpej 	eva = round_line(va + size);
    102  1.2  thorpej 	va = trunc_line(va);
    103  1.2  thorpej 
    104  1.2  thorpej 	mips_dcache_wbinv_range_index(va, (eva - va));
    105  1.2  thorpej 
    106  1.2  thorpej 	__asm __volatile("sync");
    107  1.2  thorpej 
    108  1.2  thorpej 	/*
    109  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    110  1.2  thorpej 	 * to access the address we've been given.  So, get the
    111  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    112  1.2  thorpej 	 * address out of them.
    113  1.2  thorpej 	 */
    114  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
    115  1.2  thorpej 
    116  1.2  thorpej 	eva = round_line(va + size);
    117  1.2  thorpej 	va = trunc_line(va);
    118  1.2  thorpej 
    119  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    120  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    121  1.2  thorpej 		va += (32 * 16);
    122  1.2  thorpej 	}
    123  1.2  thorpej 
    124  1.2  thorpej 	while (va < eva) {
    125  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    126  1.2  thorpej 		va += 16;
    127  1.2  thorpej 	}
    128  1.2  thorpej }
    129  1.2  thorpej 
    130  1.2  thorpej void
    131  1.2  thorpej r4k_pdcache_wbinv_all_16(void)
    132  1.2  thorpej {
    133  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    134  1.2  thorpej 	vaddr_t eva = va + mips_pdcache_size;
    135  1.2  thorpej 
    136  1.2  thorpej 	while (va < eva) {
    137  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    138  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    139  1.2  thorpej 		va += (32 * 16);
    140  1.2  thorpej 	}
    141  1.2  thorpej }
    142  1.2  thorpej 
    143  1.2  thorpej void
    144  1.2  thorpej r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
    145  1.2  thorpej {
    146  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    147  1.2  thorpej 
    148  1.2  thorpej 	va = trunc_line(va);
    149  1.2  thorpej 
    150  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    151  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    152  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    153  1.2  thorpej 		va += (32 * 16);
    154  1.2  thorpej 	}
    155  1.2  thorpej 
    156  1.2  thorpej 	while (va < eva) {
    157  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    158  1.2  thorpej 		va += 16;
    159  1.2  thorpej 	}
    160  1.2  thorpej }
    161  1.2  thorpej 
    162  1.2  thorpej void
    163  1.2  thorpej r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
    164  1.2  thorpej {
    165  1.2  thorpej 	vaddr_t eva;
    166  1.2  thorpej 
    167  1.2  thorpej 	/*
    168  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    169  1.2  thorpej 	 * to access the address we've been given.  So, get the
    170  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    171  1.2  thorpej 	 * address out of them.
    172  1.2  thorpej 	 */
    173  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
    174  1.2  thorpej 
    175  1.2  thorpej 	eva = round_line(va + size);
    176  1.2  thorpej 	va = trunc_line(va);
    177  1.2  thorpej 
    178  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    179  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    180  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    181  1.2  thorpej 		va += (32 * 16);
    182  1.2  thorpej 	}
    183  1.2  thorpej 
    184  1.2  thorpej 	while (va < eva) {
    185  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    186  1.2  thorpej 		va += 16;
    187  1.2  thorpej 	}
    188  1.2  thorpej }
    189  1.2  thorpej 
    190  1.2  thorpej void
    191  1.2  thorpej r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
    192  1.2  thorpej {
    193  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    194  1.2  thorpej 
    195  1.2  thorpej 	va = trunc_line(va);
    196  1.2  thorpej 
    197  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    198  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    199  1.2  thorpej 		va += (32 * 16);
    200  1.2  thorpej 	}
    201  1.2  thorpej 
    202  1.2  thorpej 	while (va < eva) {
    203  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    204  1.2  thorpej 		va += 16;
    205  1.2  thorpej 	}
    206  1.2  thorpej }
    207  1.2  thorpej 
    208  1.2  thorpej void
    209  1.2  thorpej r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
    210  1.2  thorpej {
    211  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    212  1.2  thorpej 
    213  1.2  thorpej 	va = trunc_line(va);
    214  1.2  thorpej 
    215  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    216  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    217  1.2  thorpej 		va += (32 * 16);
    218  1.2  thorpej 	}
    219  1.2  thorpej 
    220  1.2  thorpej 	while (va < eva) {
    221  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    222  1.2  thorpej 		va += 16;
    223  1.2  thorpej 	}
    224  1.2  thorpej }
    225  1.2  thorpej 
    226  1.2  thorpej #undef round_line
    227  1.2  thorpej #undef trunc_line
    228  1.2  thorpej 
    229  1.2  thorpej #define	round_line(x)		(((x) + 31) & ~31)
    230  1.2  thorpej #define	trunc_line(x)		((x) & ~31)
    231  1.6  tsutsui 
    232  1.6  tsutsui void
    233  1.6  tsutsui r4k_icache_sync_all_32(void)
    234  1.6  tsutsui {
    235  1.6  tsutsui 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    236  1.6  tsutsui 	vaddr_t eva = va + mips_picache_size;
    237  1.6  tsutsui 
    238  1.6  tsutsui 	mips_dcache_wbinv_all();
    239  1.6  tsutsui 
    240  1.6  tsutsui 	__asm __volatile("sync");
    241  1.6  tsutsui 
    242  1.6  tsutsui 	while (va < eva) {
    243  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    244  1.6  tsutsui 		va += (32 * 32);
    245  1.6  tsutsui 	}
    246  1.6  tsutsui }
    247  1.6  tsutsui 
    248  1.6  tsutsui void
    249  1.6  tsutsui r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
    250  1.6  tsutsui {
    251  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    252  1.6  tsutsui 
    253  1.6  tsutsui 	va = trunc_line(va);
    254  1.6  tsutsui 
    255  1.6  tsutsui 	mips_dcache_wb_range(va, (eva - va));
    256  1.6  tsutsui 
    257  1.6  tsutsui 	__asm __volatile("sync");
    258  1.6  tsutsui 
    259  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    260  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
    261  1.6  tsutsui 		va += (32 * 32);
    262  1.6  tsutsui 	}
    263  1.6  tsutsui 
    264  1.6  tsutsui 	while (va < eva) {
    265  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
    266  1.6  tsutsui 		va += 32;
    267  1.6  tsutsui 	}
    268  1.6  tsutsui }
    269  1.6  tsutsui 
    270  1.6  tsutsui void
    271  1.6  tsutsui r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
    272  1.6  tsutsui {
    273  1.6  tsutsui 	vaddr_t eva;
    274  1.6  tsutsui 
    275  1.6  tsutsui 	eva = round_line(va + size);
    276  1.6  tsutsui 	va = trunc_line(va);
    277  1.6  tsutsui 
    278  1.6  tsutsui 	mips_dcache_wbinv_range_index(va, (eva - va));
    279  1.6  tsutsui 
    280  1.6  tsutsui 	__asm __volatile("sync");
    281  1.6  tsutsui 
    282  1.6  tsutsui 	/*
    283  1.6  tsutsui 	 * Since we're doing Index ops, we expect to not be able
    284  1.6  tsutsui 	 * to access the address we've been given.  So, get the
    285  1.6  tsutsui 	 * bits that determine the cache index, and make a KSEG0
    286  1.6  tsutsui 	 * address out of them.
    287  1.6  tsutsui 	 */
    288  1.6  tsutsui 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
    289  1.6  tsutsui 
    290  1.6  tsutsui 	eva = round_line(va + size);
    291  1.6  tsutsui 	va = trunc_line(va);
    292  1.6  tsutsui 
    293  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    294  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    295  1.6  tsutsui 		va += (32 * 32);
    296  1.6  tsutsui 	}
    297  1.6  tsutsui 
    298  1.6  tsutsui 	while (va < eva) {
    299  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    300  1.6  tsutsui 		va += 32;
    301  1.6  tsutsui 	}
    302  1.6  tsutsui }
    303  1.6  tsutsui 
    304  1.6  tsutsui void
    305  1.6  tsutsui r4k_pdcache_wbinv_all_32(void)
    306  1.6  tsutsui {
    307  1.6  tsutsui 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    308  1.6  tsutsui 	vaddr_t eva = va + mips_pdcache_size;
    309  1.6  tsutsui 
    310  1.6  tsutsui 	while (va < eva) {
    311  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    312  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    313  1.6  tsutsui 		va += (32 * 32);
    314  1.6  tsutsui 	}
    315  1.6  tsutsui }
    316  1.6  tsutsui 
    317  1.6  tsutsui void
    318  1.6  tsutsui r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
    319  1.6  tsutsui {
    320  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    321  1.6  tsutsui 
    322  1.6  tsutsui 	va = trunc_line(va);
    323  1.6  tsutsui 
    324  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    325  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    326  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    327  1.6  tsutsui 		va += (32 * 32);
    328  1.6  tsutsui 	}
    329  1.6  tsutsui 
    330  1.6  tsutsui 	while (va < eva) {
    331  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    332  1.6  tsutsui 		va += 32;
    333  1.6  tsutsui 	}
    334  1.6  tsutsui }
    335  1.6  tsutsui 
    336  1.6  tsutsui void
    337  1.6  tsutsui r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
    338  1.6  tsutsui {
    339  1.6  tsutsui 	vaddr_t eva;
    340  1.6  tsutsui 
    341  1.6  tsutsui 	/*
    342  1.6  tsutsui 	 * Since we're doing Index ops, we expect to not be able
    343  1.6  tsutsui 	 * to access the address we've been given.  So, get the
    344  1.6  tsutsui 	 * bits that determine the cache index, and make a KSEG0
    345  1.6  tsutsui 	 * address out of them.
    346  1.6  tsutsui 	 */
    347  1.6  tsutsui 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
    348  1.6  tsutsui 
    349  1.6  tsutsui 	eva = round_line(va + size);
    350  1.6  tsutsui 	va = trunc_line(va);
    351  1.6  tsutsui 
    352  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    353  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    354  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    355  1.6  tsutsui 		va += (32 * 32);
    356  1.6  tsutsui 	}
    357  1.6  tsutsui 
    358  1.6  tsutsui 	while (va < eva) {
    359  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    360  1.6  tsutsui 		va += 32;
    361  1.6  tsutsui 	}
    362  1.6  tsutsui }
    363  1.6  tsutsui 
    364  1.6  tsutsui void
    365  1.6  tsutsui r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
    366  1.6  tsutsui {
    367  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    368  1.6  tsutsui 
    369  1.6  tsutsui 	va = trunc_line(va);
    370  1.6  tsutsui 
    371  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    372  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    373  1.6  tsutsui 		va += (32 * 32);
    374  1.6  tsutsui 	}
    375  1.6  tsutsui 
    376  1.6  tsutsui 	while (va < eva) {
    377  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    378  1.6  tsutsui 		va += 32;
    379  1.6  tsutsui 	}
    380  1.6  tsutsui }
    381  1.6  tsutsui 
    382  1.6  tsutsui void
    383  1.6  tsutsui r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
    384  1.6  tsutsui {
    385  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    386  1.6  tsutsui 
    387  1.6  tsutsui 	va = trunc_line(va);
    388  1.6  tsutsui 
    389  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    390  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    391  1.6  tsutsui 		va += (32 * 32);
    392  1.6  tsutsui 	}
    393  1.6  tsutsui 
    394  1.6  tsutsui 	while (va < eva) {
    395  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    396  1.6  tsutsui 		va += 32;
    397  1.6  tsutsui 	}
    398  1.6  tsutsui }
    399  1.2  thorpej 
    400  1.2  thorpej void
    401  1.2  thorpej r4k_sdcache_wbinv_all_32(void)
    402  1.2  thorpej {
    403  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    404  1.2  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    405  1.2  thorpej 
    406  1.2  thorpej 	while (va < eva) {
    407  1.2  thorpej 		cache_r4k_op_32lines_32(va,
    408  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    409  1.2  thorpej 		va += (32 * 32);
    410  1.2  thorpej 	}
    411  1.2  thorpej }
    412  1.2  thorpej 
    413  1.2  thorpej void
    414  1.2  thorpej r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
    415  1.2  thorpej {
    416  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    417  1.2  thorpej 
    418  1.2  thorpej 	va = trunc_line(va);
    419  1.2  thorpej 
    420  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    421  1.2  thorpej 		cache_r4k_op_32lines_32(va,
    422  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    423  1.2  thorpej 		va += (32 * 32);
    424  1.2  thorpej 	}
    425  1.2  thorpej 
    426  1.2  thorpej 	while (va < eva) {
    427  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    428  1.2  thorpej 		va += 32;
    429  1.2  thorpej 	}
    430  1.2  thorpej }
    431  1.2  thorpej 
    432  1.2  thorpej void
    433  1.2  thorpej r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
    434  1.2  thorpej {
    435  1.2  thorpej 	vaddr_t eva;
    436  1.2  thorpej 
    437  1.2  thorpej 	/*
    438  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    439  1.2  thorpej 	 * to access the address we've been given.  So, get the
    440  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    441  1.2  thorpej 	 * address out of them.
    442  1.2  thorpej 	 */
    443  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    444  1.2  thorpej 
    445  1.2  thorpej 	eva = round_line(va + size);
    446  1.2  thorpej 	va = trunc_line(va);
    447  1.2  thorpej 
    448  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    449  1.4  thorpej 		cache_r4k_op_32lines_32(va,
    450  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    451  1.2  thorpej 		va += (32 * 32);
    452  1.2  thorpej 	}
    453  1.2  thorpej 
    454  1.2  thorpej 	while (va < eva) {
    455  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    456  1.2  thorpej 		va += 32;
    457  1.2  thorpej 	}
    458  1.2  thorpej }
    459  1.2  thorpej 
    460  1.2  thorpej void
    461  1.2  thorpej r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
    462  1.2  thorpej {
    463  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    464  1.2  thorpej 
    465  1.2  thorpej 	va = trunc_line(va);
    466  1.2  thorpej 
    467  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    468  1.2  thorpej 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    469  1.2  thorpej 		va += (32 * 32);
    470  1.2  thorpej 	}
    471  1.2  thorpej 
    472  1.2  thorpej 	while (va < eva) {
    473  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    474  1.2  thorpej 		va += 32;
    475  1.2  thorpej 	}
    476  1.2  thorpej }
    477  1.2  thorpej 
    478  1.2  thorpej void
    479  1.2  thorpej r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
    480  1.2  thorpej {
    481  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    482  1.2  thorpej 
    483  1.2  thorpej 	va = trunc_line(va);
    484  1.2  thorpej 
    485  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    486  1.2  thorpej 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    487  1.2  thorpej 		va += (32 * 32);
    488  1.2  thorpej 	}
    489  1.2  thorpej 
    490  1.2  thorpej 	while (va < eva) {
    491  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    492  1.2  thorpej 		va += 32;
    493  1.3  thorpej 	}
    494  1.3  thorpej }
    495  1.3  thorpej 
    496  1.3  thorpej #undef round_line
    497  1.3  thorpej #undef trunc_line
    498  1.3  thorpej 
    499  1.3  thorpej #define	round_line(x)		(((x) + 127) & ~127)
    500  1.3  thorpej #define	trunc_line(x)		((x) & ~127)
    501  1.3  thorpej 
    502  1.3  thorpej void
    503  1.3  thorpej r4k_sdcache_wbinv_all_128(void)
    504  1.3  thorpej {
    505  1.3  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    506  1.3  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    507  1.3  thorpej 
    508  1.3  thorpej 	while (va < eva) {
    509  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    510  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    511  1.3  thorpej 		va += (32 * 128);
    512  1.3  thorpej 	}
    513  1.3  thorpej }
    514  1.3  thorpej 
    515  1.3  thorpej void
    516  1.3  thorpej r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
    517  1.3  thorpej {
    518  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    519  1.3  thorpej 
    520  1.3  thorpej 	va = trunc_line(va);
    521  1.3  thorpej 
    522  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    523  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    524  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    525  1.3  thorpej 		va += (32 * 128);
    526  1.3  thorpej 	}
    527  1.3  thorpej 
    528  1.3  thorpej 	while (va < eva) {
    529  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    530  1.3  thorpej 		va += 128;
    531  1.3  thorpej 	}
    532  1.3  thorpej }
    533  1.3  thorpej 
    534  1.3  thorpej void
    535  1.3  thorpej r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
    536  1.3  thorpej {
    537  1.3  thorpej 	vaddr_t eva;
    538  1.3  thorpej 
    539  1.3  thorpej 	/*
    540  1.3  thorpej 	 * Since we're doing Index ops, we expect to not be able
    541  1.3  thorpej 	 * to access the address we've been given.  So, get the
    542  1.3  thorpej 	 * bits that determine the cache index, and make a KSEG0
    543  1.3  thorpej 	 * address out of them.
    544  1.3  thorpej 	 */
    545  1.3  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    546  1.3  thorpej 
    547  1.3  thorpej 	eva = round_line(va + size);
    548  1.3  thorpej 	va = trunc_line(va);
    549  1.3  thorpej 
    550  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    551  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    552  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    553  1.3  thorpej 		va += (32 * 128);
    554  1.3  thorpej 	}
    555  1.3  thorpej 
    556  1.3  thorpej 	while (va < eva) {
    557  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    558  1.3  thorpej 		va += 128;
    559  1.3  thorpej 	}
    560  1.3  thorpej }
    561  1.3  thorpej 
    562  1.3  thorpej void
    563  1.3  thorpej r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
    564  1.3  thorpej {
    565  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    566  1.3  thorpej 
    567  1.3  thorpej 	va = trunc_line(va);
    568  1.3  thorpej 
    569  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    570  1.3  thorpej 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    571  1.3  thorpej 		va += (32 * 128);
    572  1.3  thorpej 	}
    573  1.3  thorpej 
    574  1.3  thorpej 	while (va < eva) {
    575  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    576  1.3  thorpej 		va += 128;
    577  1.3  thorpej 	}
    578  1.3  thorpej }
    579  1.3  thorpej 
    580  1.3  thorpej void
    581  1.3  thorpej r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
    582  1.3  thorpej {
    583  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    584  1.3  thorpej 
    585  1.3  thorpej 	va = trunc_line(va);
    586  1.3  thorpej 
    587  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    588  1.3  thorpej 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    589  1.3  thorpej 		va += (32 * 128);
    590  1.3  thorpej 	}
    591  1.3  thorpej 
    592  1.3  thorpej 	while (va < eva) {
    593  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    594  1.3  thorpej 		va += 128;
    595  1.2  thorpej 	}
    596  1.2  thorpej }
    597  1.2  thorpej 
    598  1.2  thorpej #undef round_line
    599  1.2  thorpej #undef trunc_line
    600  1.2  thorpej 
    601  1.2  thorpej #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
    602  1.2  thorpej #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
    603  1.2  thorpej 
    604  1.2  thorpej void
    605  1.2  thorpej r4k_sdcache_wbinv_all_generic(void)
    606  1.2  thorpej {
    607  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    608  1.2  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    609  1.5     shin 	int line_size = mips_sdcache_line_size;
    610  1.2  thorpej 
    611  1.2  thorpej 	while (va < eva) {
    612  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    613  1.5     shin 		va += line_size;
    614  1.2  thorpej 	}
    615  1.2  thorpej }
    616  1.2  thorpej 
    617  1.2  thorpej void
    618  1.2  thorpej r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
    619  1.2  thorpej {
    620  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    621  1.5     shin 	int line_size = mips_sdcache_line_size;
    622  1.2  thorpej 
    623  1.2  thorpej 	va = trunc_line(va);
    624  1.2  thorpej 
    625  1.2  thorpej 	while (va < eva) {
    626  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    627  1.5     shin 		va += line_size;
    628  1.2  thorpej 	}
    629  1.2  thorpej }
    630  1.2  thorpej 
    631  1.2  thorpej void
    632  1.2  thorpej r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
    633  1.2  thorpej {
    634  1.2  thorpej 	vaddr_t eva;
    635  1.5     shin 	int line_size = mips_sdcache_line_size;
    636  1.2  thorpej 
    637  1.2  thorpej 	/*
    638  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    639  1.2  thorpej 	 * to access the address we've been given.  So, get the
    640  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    641  1.2  thorpej 	 * address out of them.
    642  1.2  thorpej 	 */
    643  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    644  1.2  thorpej 
    645  1.2  thorpej 	eva = round_line(va + size);
    646  1.2  thorpej 	va = trunc_line(va);
    647  1.2  thorpej 
    648  1.2  thorpej 	while (va < eva) {
    649  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    650  1.5     shin 		va += line_size;
    651  1.2  thorpej 	}
    652  1.2  thorpej }
    653  1.2  thorpej 
    654  1.2  thorpej void
    655  1.2  thorpej r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
    656  1.2  thorpej {
    657  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    658  1.5     shin 	int line_size = mips_sdcache_line_size;
    659  1.2  thorpej 
    660  1.2  thorpej 	va = trunc_line(va);
    661  1.2  thorpej 
    662  1.2  thorpej 	while (va < eva) {
    663  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    664  1.5     shin 		va += line_size;
    665  1.2  thorpej 	}
    666  1.2  thorpej }
    667  1.2  thorpej 
    668  1.2  thorpej void
    669  1.2  thorpej r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
    670  1.2  thorpej {
    671  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    672  1.5     shin 	int line_size = mips_sdcache_line_size;
    673  1.2  thorpej 
    674  1.2  thorpej 	va = trunc_line(va);
    675  1.2  thorpej 
    676  1.2  thorpej 	while (va < eva) {
    677  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    678  1.5     shin 		va += line_size;
    679  1.2  thorpej 	}
    680  1.2  thorpej }
    681  1.2  thorpej 
    682  1.2  thorpej #undef round_line
    683  1.2  thorpej #undef trunc_line
    684