Home | History | Annotate | Line # | Download | only in mips
cache_r4k.c revision 1.8
      1  1.8    lukem /*	$NetBSD: cache_r4k.c,v 1.8 2003/07/15 02:43:37 lukem Exp $	*/
      2  1.2  thorpej 
      3  1.2  thorpej /*
      4  1.2  thorpej  * Copyright 2001 Wasabi Systems, Inc.
      5  1.2  thorpej  * All rights reserved.
      6  1.2  thorpej  *
      7  1.2  thorpej  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  1.2  thorpej  *
      9  1.2  thorpej  * Redistribution and use in source and binary forms, with or without
     10  1.2  thorpej  * modification, are permitted provided that the following conditions
     11  1.2  thorpej  * are met:
     12  1.2  thorpej  * 1. Redistributions of source code must retain the above copyright
     13  1.2  thorpej  *    notice, this list of conditions and the following disclaimer.
     14  1.2  thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.2  thorpej  *    notice, this list of conditions and the following disclaimer in the
     16  1.2  thorpej  *    documentation and/or other materials provided with the distribution.
     17  1.2  thorpej  * 3. All advertising materials mentioning features or use of this software
     18  1.2  thorpej  *    must display the following acknowledgement:
     19  1.2  thorpej  *	This product includes software developed for the NetBSD Project by
     20  1.2  thorpej  *	Wasabi Systems, Inc.
     21  1.2  thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.2  thorpej  *    or promote products derived from this software without specific prior
     23  1.2  thorpej  *    written permission.
     24  1.2  thorpej  *
     25  1.2  thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.2  thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.2  thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.2  thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.2  thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.2  thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.2  thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.2  thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.2  thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.2  thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.2  thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36  1.2  thorpej  */
     37  1.8    lukem 
     38  1.8    lukem #include <sys/cdefs.h>
     39  1.8    lukem __KERNEL_RCSID(0, "$NetBSD: cache_r4k.c,v 1.8 2003/07/15 02:43:37 lukem Exp $");
     40  1.2  thorpej 
     41  1.2  thorpej #include <sys/param.h>
     42  1.2  thorpej 
     43  1.2  thorpej #include <mips/cache.h>
     44  1.2  thorpej #include <mips/cache_r4k.h>
     45  1.2  thorpej 
     46  1.2  thorpej /*
     47  1.2  thorpej  * Cache operations for R4000/R4400-style caches:
     48  1.2  thorpej  *
     49  1.2  thorpej  *	- Direct-mapped
     50  1.2  thorpej  *	- Write-back
     51  1.2  thorpej  *	- Virtually indexed, physically tagged
     52  1.2  thorpej  *
     53  1.2  thorpej  * XXX Does not handle split secondary caches.
     54  1.2  thorpej  */
     55  1.2  thorpej 
     56  1.2  thorpej #define	round_line(x)		(((x) + 15) & ~15)
     57  1.2  thorpej #define	trunc_line(x)		((x) & ~15)
     58  1.2  thorpej 
     59  1.2  thorpej __asm(".set mips3");
     60  1.2  thorpej 
     61  1.2  thorpej void
     62  1.2  thorpej r4k_icache_sync_all_16(void)
     63  1.2  thorpej {
     64  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
     65  1.2  thorpej 	vaddr_t eva = va + mips_picache_size;
     66  1.2  thorpej 
     67  1.2  thorpej 	mips_dcache_wbinv_all();
     68  1.2  thorpej 
     69  1.2  thorpej 	__asm __volatile("sync");
     70  1.2  thorpej 
     71  1.2  thorpej 	while (va < eva) {
     72  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
     73  1.2  thorpej 		va += (32 * 16);
     74  1.2  thorpej 	}
     75  1.2  thorpej }
     76  1.2  thorpej 
     77  1.2  thorpej void
     78  1.2  thorpej r4k_icache_sync_range_16(vaddr_t va, vsize_t size)
     79  1.2  thorpej {
     80  1.2  thorpej 	vaddr_t eva = round_line(va + size);
     81  1.2  thorpej 
     82  1.2  thorpej 	va = trunc_line(va);
     83  1.2  thorpej 
     84  1.2  thorpej 	mips_dcache_wb_range(va, (eva - va));
     85  1.2  thorpej 
     86  1.2  thorpej 	__asm __volatile("sync");
     87  1.2  thorpej 
     88  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
     89  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     90  1.2  thorpej 		va += (32 * 16);
     91  1.2  thorpej 	}
     92  1.2  thorpej 
     93  1.2  thorpej 	while (va < eva) {
     94  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
     95  1.2  thorpej 		va += 16;
     96  1.2  thorpej 	}
     97  1.2  thorpej }
     98  1.2  thorpej 
     99  1.2  thorpej void
    100  1.2  thorpej r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size)
    101  1.2  thorpej {
    102  1.7      cgd 	vaddr_t eva, orig_va;
    103  1.7      cgd 
    104  1.7      cgd 	orig_va = va;
    105  1.2  thorpej 
    106  1.2  thorpej 	eva = round_line(va + size);
    107  1.2  thorpej 	va = trunc_line(va);
    108  1.2  thorpej 
    109  1.2  thorpej 	mips_dcache_wbinv_range_index(va, (eva - va));
    110  1.2  thorpej 
    111  1.2  thorpej 	__asm __volatile("sync");
    112  1.2  thorpej 
    113  1.2  thorpej 	/*
    114  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    115  1.2  thorpej 	 * to access the address we've been given.  So, get the
    116  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    117  1.2  thorpej 	 * address out of them.
    118  1.2  thorpej 	 */
    119  1.7      cgd 	va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);
    120  1.2  thorpej 
    121  1.2  thorpej 	eva = round_line(va + size);
    122  1.2  thorpej 	va = trunc_line(va);
    123  1.2  thorpej 
    124  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    125  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    126  1.2  thorpej 		va += (32 * 16);
    127  1.2  thorpej 	}
    128  1.2  thorpej 
    129  1.2  thorpej 	while (va < eva) {
    130  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    131  1.2  thorpej 		va += 16;
    132  1.2  thorpej 	}
    133  1.2  thorpej }
    134  1.2  thorpej 
    135  1.2  thorpej void
    136  1.2  thorpej r4k_pdcache_wbinv_all_16(void)
    137  1.2  thorpej {
    138  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    139  1.2  thorpej 	vaddr_t eva = va + mips_pdcache_size;
    140  1.2  thorpej 
    141  1.2  thorpej 	while (va < eva) {
    142  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    143  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    144  1.2  thorpej 		va += (32 * 16);
    145  1.2  thorpej 	}
    146  1.2  thorpej }
    147  1.2  thorpej 
    148  1.2  thorpej void
    149  1.2  thorpej r4k_pdcache_wbinv_range_16(vaddr_t va, vsize_t size)
    150  1.2  thorpej {
    151  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    152  1.2  thorpej 
    153  1.2  thorpej 	va = trunc_line(va);
    154  1.2  thorpej 
    155  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    156  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    157  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    158  1.2  thorpej 		va += (32 * 16);
    159  1.2  thorpej 	}
    160  1.2  thorpej 
    161  1.2  thorpej 	while (va < eva) {
    162  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    163  1.2  thorpej 		va += 16;
    164  1.2  thorpej 	}
    165  1.2  thorpej }
    166  1.2  thorpej 
    167  1.2  thorpej void
    168  1.2  thorpej r4k_pdcache_wbinv_range_index_16(vaddr_t va, vsize_t size)
    169  1.2  thorpej {
    170  1.2  thorpej 	vaddr_t eva;
    171  1.2  thorpej 
    172  1.2  thorpej 	/*
    173  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    174  1.2  thorpej 	 * to access the address we've been given.  So, get the
    175  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    176  1.2  thorpej 	 * address out of them.
    177  1.2  thorpej 	 */
    178  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
    179  1.2  thorpej 
    180  1.2  thorpej 	eva = round_line(va + size);
    181  1.2  thorpej 	va = trunc_line(va);
    182  1.2  thorpej 
    183  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    184  1.2  thorpej 		cache_r4k_op_32lines_16(va,
    185  1.2  thorpej 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    186  1.2  thorpej 		va += (32 * 16);
    187  1.2  thorpej 	}
    188  1.2  thorpej 
    189  1.2  thorpej 	while (va < eva) {
    190  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    191  1.2  thorpej 		va += 16;
    192  1.2  thorpej 	}
    193  1.2  thorpej }
    194  1.2  thorpej 
    195  1.2  thorpej void
    196  1.2  thorpej r4k_pdcache_inv_range_16(vaddr_t va, vsize_t size)
    197  1.2  thorpej {
    198  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    199  1.2  thorpej 
    200  1.2  thorpej 	va = trunc_line(va);
    201  1.2  thorpej 
    202  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    203  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    204  1.2  thorpej 		va += (32 * 16);
    205  1.2  thorpej 	}
    206  1.2  thorpej 
    207  1.2  thorpej 	while (va < eva) {
    208  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    209  1.2  thorpej 		va += 16;
    210  1.2  thorpej 	}
    211  1.2  thorpej }
    212  1.2  thorpej 
    213  1.2  thorpej void
    214  1.2  thorpej r4k_pdcache_wb_range_16(vaddr_t va, vsize_t size)
    215  1.2  thorpej {
    216  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    217  1.2  thorpej 
    218  1.2  thorpej 	va = trunc_line(va);
    219  1.2  thorpej 
    220  1.2  thorpej 	while ((eva - va) >= (32 * 16)) {
    221  1.2  thorpej 		cache_r4k_op_32lines_16(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    222  1.2  thorpej 		va += (32 * 16);
    223  1.2  thorpej 	}
    224  1.2  thorpej 
    225  1.2  thorpej 	while (va < eva) {
    226  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    227  1.2  thorpej 		va += 16;
    228  1.2  thorpej 	}
    229  1.2  thorpej }
    230  1.2  thorpej 
    231  1.2  thorpej #undef round_line
    232  1.2  thorpej #undef trunc_line
    233  1.2  thorpej 
    234  1.2  thorpej #define	round_line(x)		(((x) + 31) & ~31)
    235  1.2  thorpej #define	trunc_line(x)		((x) & ~31)
    236  1.6  tsutsui 
    237  1.6  tsutsui void
    238  1.6  tsutsui r4k_icache_sync_all_32(void)
    239  1.6  tsutsui {
    240  1.6  tsutsui 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    241  1.6  tsutsui 	vaddr_t eva = va + mips_picache_size;
    242  1.6  tsutsui 
    243  1.6  tsutsui 	mips_dcache_wbinv_all();
    244  1.6  tsutsui 
    245  1.6  tsutsui 	__asm __volatile("sync");
    246  1.6  tsutsui 
    247  1.6  tsutsui 	while (va < eva) {
    248  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    249  1.6  tsutsui 		va += (32 * 32);
    250  1.6  tsutsui 	}
    251  1.6  tsutsui }
    252  1.6  tsutsui 
    253  1.6  tsutsui void
    254  1.6  tsutsui r4k_icache_sync_range_32(vaddr_t va, vsize_t size)
    255  1.6  tsutsui {
    256  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    257  1.6  tsutsui 
    258  1.6  tsutsui 	va = trunc_line(va);
    259  1.6  tsutsui 
    260  1.6  tsutsui 	mips_dcache_wb_range(va, (eva - va));
    261  1.6  tsutsui 
    262  1.6  tsutsui 	__asm __volatile("sync");
    263  1.6  tsutsui 
    264  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    265  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
    266  1.6  tsutsui 		va += (32 * 32);
    267  1.6  tsutsui 	}
    268  1.6  tsutsui 
    269  1.6  tsutsui 	while (va < eva) {
    270  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_HIT_INV);
    271  1.6  tsutsui 		va += 32;
    272  1.6  tsutsui 	}
    273  1.6  tsutsui }
    274  1.6  tsutsui 
    275  1.6  tsutsui void
    276  1.6  tsutsui r4k_icache_sync_range_index_32(vaddr_t va, vsize_t size)
    277  1.6  tsutsui {
    278  1.6  tsutsui 	vaddr_t eva;
    279  1.6  tsutsui 
    280  1.6  tsutsui 	eva = round_line(va + size);
    281  1.6  tsutsui 	va = trunc_line(va);
    282  1.6  tsutsui 
    283  1.6  tsutsui 	mips_dcache_wbinv_range_index(va, (eva - va));
    284  1.6  tsutsui 
    285  1.6  tsutsui 	__asm __volatile("sync");
    286  1.6  tsutsui 
    287  1.6  tsutsui 	/*
    288  1.6  tsutsui 	 * Since we're doing Index ops, we expect to not be able
    289  1.6  tsutsui 	 * to access the address we've been given.  So, get the
    290  1.6  tsutsui 	 * bits that determine the cache index, and make a KSEG0
    291  1.6  tsutsui 	 * address out of them.
    292  1.6  tsutsui 	 */
    293  1.6  tsutsui 	va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask);
    294  1.6  tsutsui 
    295  1.6  tsutsui 	eva = round_line(va + size);
    296  1.6  tsutsui 	va = trunc_line(va);
    297  1.6  tsutsui 
    298  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    299  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    300  1.6  tsutsui 		va += (32 * 32);
    301  1.6  tsutsui 	}
    302  1.6  tsutsui 
    303  1.6  tsutsui 	while (va < eva) {
    304  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
    305  1.6  tsutsui 		va += 32;
    306  1.6  tsutsui 	}
    307  1.6  tsutsui }
    308  1.6  tsutsui 
    309  1.6  tsutsui void
    310  1.6  tsutsui r4k_pdcache_wbinv_all_32(void)
    311  1.6  tsutsui {
    312  1.6  tsutsui 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    313  1.6  tsutsui 	vaddr_t eva = va + mips_pdcache_size;
    314  1.6  tsutsui 
    315  1.6  tsutsui 	while (va < eva) {
    316  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    317  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    318  1.6  tsutsui 		va += (32 * 32);
    319  1.6  tsutsui 	}
    320  1.6  tsutsui }
    321  1.6  tsutsui 
    322  1.6  tsutsui void
    323  1.6  tsutsui r4k_pdcache_wbinv_range_32(vaddr_t va, vsize_t size)
    324  1.6  tsutsui {
    325  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    326  1.6  tsutsui 
    327  1.6  tsutsui 	va = trunc_line(va);
    328  1.6  tsutsui 
    329  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    330  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    331  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    332  1.6  tsutsui 		va += (32 * 32);
    333  1.6  tsutsui 	}
    334  1.6  tsutsui 
    335  1.6  tsutsui 	while (va < eva) {
    336  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB_INV);
    337  1.6  tsutsui 		va += 32;
    338  1.6  tsutsui 	}
    339  1.6  tsutsui }
    340  1.6  tsutsui 
    341  1.6  tsutsui void
    342  1.6  tsutsui r4k_pdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
    343  1.6  tsutsui {
    344  1.6  tsutsui 	vaddr_t eva;
    345  1.6  tsutsui 
    346  1.6  tsutsui 	/*
    347  1.6  tsutsui 	 * Since we're doing Index ops, we expect to not be able
    348  1.6  tsutsui 	 * to access the address we've been given.  So, get the
    349  1.6  tsutsui 	 * bits that determine the cache index, and make a KSEG0
    350  1.6  tsutsui 	 * address out of them.
    351  1.6  tsutsui 	 */
    352  1.6  tsutsui 	va = MIPS_PHYS_TO_KSEG0(va & (mips_pdcache_size - 1));
    353  1.6  tsutsui 
    354  1.6  tsutsui 	eva = round_line(va + size);
    355  1.6  tsutsui 	va = trunc_line(va);
    356  1.6  tsutsui 
    357  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    358  1.6  tsutsui 		cache_r4k_op_32lines_32(va,
    359  1.6  tsutsui 		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    360  1.6  tsutsui 		va += (32 * 32);
    361  1.6  tsutsui 	}
    362  1.6  tsutsui 
    363  1.6  tsutsui 	while (va < eva) {
    364  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
    365  1.6  tsutsui 		va += 32;
    366  1.6  tsutsui 	}
    367  1.6  tsutsui }
    368  1.6  tsutsui 
    369  1.6  tsutsui void
    370  1.6  tsutsui r4k_pdcache_inv_range_32(vaddr_t va, vsize_t size)
    371  1.6  tsutsui {
    372  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    373  1.6  tsutsui 
    374  1.6  tsutsui 	va = trunc_line(va);
    375  1.6  tsutsui 
    376  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    377  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    378  1.6  tsutsui 		va += (32 * 32);
    379  1.6  tsutsui 	}
    380  1.6  tsutsui 
    381  1.6  tsutsui 	while (va < eva) {
    382  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_INV);
    383  1.6  tsutsui 		va += 32;
    384  1.6  tsutsui 	}
    385  1.6  tsutsui }
    386  1.6  tsutsui 
    387  1.6  tsutsui void
    388  1.6  tsutsui r4k_pdcache_wb_range_32(vaddr_t va, vsize_t size)
    389  1.6  tsutsui {
    390  1.6  tsutsui 	vaddr_t eva = round_line(va + size);
    391  1.6  tsutsui 
    392  1.6  tsutsui 	va = trunc_line(va);
    393  1.6  tsutsui 
    394  1.6  tsutsui 	while ((eva - va) >= (32 * 32)) {
    395  1.6  tsutsui 		cache_r4k_op_32lines_32(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    396  1.6  tsutsui 		va += (32 * 32);
    397  1.6  tsutsui 	}
    398  1.6  tsutsui 
    399  1.6  tsutsui 	while (va < eva) {
    400  1.6  tsutsui 		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_HIT_WB);
    401  1.6  tsutsui 		va += 32;
    402  1.6  tsutsui 	}
    403  1.6  tsutsui }
    404  1.2  thorpej 
    405  1.2  thorpej void
    406  1.2  thorpej r4k_sdcache_wbinv_all_32(void)
    407  1.2  thorpej {
    408  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    409  1.2  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    410  1.2  thorpej 
    411  1.2  thorpej 	while (va < eva) {
    412  1.2  thorpej 		cache_r4k_op_32lines_32(va,
    413  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    414  1.2  thorpej 		va += (32 * 32);
    415  1.2  thorpej 	}
    416  1.2  thorpej }
    417  1.2  thorpej 
    418  1.2  thorpej void
    419  1.2  thorpej r4k_sdcache_wbinv_range_32(vaddr_t va, vsize_t size)
    420  1.2  thorpej {
    421  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    422  1.2  thorpej 
    423  1.2  thorpej 	va = trunc_line(va);
    424  1.2  thorpej 
    425  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    426  1.2  thorpej 		cache_r4k_op_32lines_32(va,
    427  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    428  1.2  thorpej 		va += (32 * 32);
    429  1.2  thorpej 	}
    430  1.2  thorpej 
    431  1.2  thorpej 	while (va < eva) {
    432  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    433  1.2  thorpej 		va += 32;
    434  1.2  thorpej 	}
    435  1.2  thorpej }
    436  1.2  thorpej 
    437  1.2  thorpej void
    438  1.2  thorpej r4k_sdcache_wbinv_range_index_32(vaddr_t va, vsize_t size)
    439  1.2  thorpej {
    440  1.2  thorpej 	vaddr_t eva;
    441  1.2  thorpej 
    442  1.2  thorpej 	/*
    443  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    444  1.2  thorpej 	 * to access the address we've been given.  So, get the
    445  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    446  1.2  thorpej 	 * address out of them.
    447  1.2  thorpej 	 */
    448  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    449  1.2  thorpej 
    450  1.2  thorpej 	eva = round_line(va + size);
    451  1.2  thorpej 	va = trunc_line(va);
    452  1.2  thorpej 
    453  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    454  1.4  thorpej 		cache_r4k_op_32lines_32(va,
    455  1.2  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    456  1.2  thorpej 		va += (32 * 32);
    457  1.2  thorpej 	}
    458  1.2  thorpej 
    459  1.2  thorpej 	while (va < eva) {
    460  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    461  1.2  thorpej 		va += 32;
    462  1.2  thorpej 	}
    463  1.2  thorpej }
    464  1.2  thorpej 
    465  1.2  thorpej void
    466  1.2  thorpej r4k_sdcache_inv_range_32(vaddr_t va, vsize_t size)
    467  1.2  thorpej {
    468  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    469  1.2  thorpej 
    470  1.2  thorpej 	va = trunc_line(va);
    471  1.2  thorpej 
    472  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    473  1.2  thorpej 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    474  1.2  thorpej 		va += (32 * 32);
    475  1.2  thorpej 	}
    476  1.2  thorpej 
    477  1.2  thorpej 	while (va < eva) {
    478  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    479  1.2  thorpej 		va += 32;
    480  1.2  thorpej 	}
    481  1.2  thorpej }
    482  1.2  thorpej 
    483  1.2  thorpej void
    484  1.2  thorpej r4k_sdcache_wb_range_32(vaddr_t va, vsize_t size)
    485  1.2  thorpej {
    486  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    487  1.2  thorpej 
    488  1.2  thorpej 	va = trunc_line(va);
    489  1.2  thorpej 
    490  1.2  thorpej 	while ((eva - va) >= (32 * 32)) {
    491  1.2  thorpej 		cache_r4k_op_32lines_32(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    492  1.2  thorpej 		va += (32 * 32);
    493  1.2  thorpej 	}
    494  1.2  thorpej 
    495  1.2  thorpej 	while (va < eva) {
    496  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    497  1.2  thorpej 		va += 32;
    498  1.3  thorpej 	}
    499  1.3  thorpej }
    500  1.3  thorpej 
    501  1.3  thorpej #undef round_line
    502  1.3  thorpej #undef trunc_line
    503  1.3  thorpej 
    504  1.3  thorpej #define	round_line(x)		(((x) + 127) & ~127)
    505  1.3  thorpej #define	trunc_line(x)		((x) & ~127)
    506  1.3  thorpej 
    507  1.3  thorpej void
    508  1.3  thorpej r4k_sdcache_wbinv_all_128(void)
    509  1.3  thorpej {
    510  1.3  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    511  1.3  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    512  1.3  thorpej 
    513  1.3  thorpej 	while (va < eva) {
    514  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    515  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    516  1.3  thorpej 		va += (32 * 128);
    517  1.3  thorpej 	}
    518  1.3  thorpej }
    519  1.3  thorpej 
    520  1.3  thorpej void
    521  1.3  thorpej r4k_sdcache_wbinv_range_128(vaddr_t va, vsize_t size)
    522  1.3  thorpej {
    523  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    524  1.3  thorpej 
    525  1.3  thorpej 	va = trunc_line(va);
    526  1.3  thorpej 
    527  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    528  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    529  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    530  1.3  thorpej 		va += (32 * 128);
    531  1.3  thorpej 	}
    532  1.3  thorpej 
    533  1.3  thorpej 	while (va < eva) {
    534  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    535  1.3  thorpej 		va += 128;
    536  1.3  thorpej 	}
    537  1.3  thorpej }
    538  1.3  thorpej 
    539  1.3  thorpej void
    540  1.3  thorpej r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
    541  1.3  thorpej {
    542  1.3  thorpej 	vaddr_t eva;
    543  1.3  thorpej 
    544  1.3  thorpej 	/*
    545  1.3  thorpej 	 * Since we're doing Index ops, we expect to not be able
    546  1.3  thorpej 	 * to access the address we've been given.  So, get the
    547  1.3  thorpej 	 * bits that determine the cache index, and make a KSEG0
    548  1.3  thorpej 	 * address out of them.
    549  1.3  thorpej 	 */
    550  1.3  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    551  1.3  thorpej 
    552  1.3  thorpej 	eva = round_line(va + size);
    553  1.3  thorpej 	va = trunc_line(va);
    554  1.3  thorpej 
    555  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    556  1.3  thorpej 		cache_r4k_op_32lines_128(va,
    557  1.3  thorpej 		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    558  1.3  thorpej 		va += (32 * 128);
    559  1.3  thorpej 	}
    560  1.3  thorpej 
    561  1.3  thorpej 	while (va < eva) {
    562  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    563  1.3  thorpej 		va += 128;
    564  1.3  thorpej 	}
    565  1.3  thorpej }
    566  1.3  thorpej 
    567  1.3  thorpej void
    568  1.3  thorpej r4k_sdcache_inv_range_128(vaddr_t va, vsize_t size)
    569  1.3  thorpej {
    570  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    571  1.3  thorpej 
    572  1.3  thorpej 	va = trunc_line(va);
    573  1.3  thorpej 
    574  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    575  1.3  thorpej 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    576  1.3  thorpej 		va += (32 * 128);
    577  1.3  thorpej 	}
    578  1.3  thorpej 
    579  1.3  thorpej 	while (va < eva) {
    580  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    581  1.3  thorpej 		va += 128;
    582  1.3  thorpej 	}
    583  1.3  thorpej }
    584  1.3  thorpej 
    585  1.3  thorpej void
    586  1.3  thorpej r4k_sdcache_wb_range_128(vaddr_t va, vsize_t size)
    587  1.3  thorpej {
    588  1.3  thorpej 	vaddr_t eva = round_line(va + size);
    589  1.3  thorpej 
    590  1.3  thorpej 	va = trunc_line(va);
    591  1.3  thorpej 
    592  1.3  thorpej 	while ((eva - va) >= (32 * 128)) {
    593  1.3  thorpej 		cache_r4k_op_32lines_128(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    594  1.3  thorpej 		va += (32 * 128);
    595  1.3  thorpej 	}
    596  1.3  thorpej 
    597  1.3  thorpej 	while (va < eva) {
    598  1.3  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    599  1.3  thorpej 		va += 128;
    600  1.2  thorpej 	}
    601  1.2  thorpej }
    602  1.2  thorpej 
    603  1.2  thorpej #undef round_line
    604  1.2  thorpej #undef trunc_line
    605  1.2  thorpej 
    606  1.2  thorpej #define	round_line(x)		(((x) + mips_sdcache_line_size - 1) & ~(mips_sdcache_line_size - 1))
    607  1.2  thorpej #define	trunc_line(x)		((x) & ~(mips_sdcache_line_size - 1))
    608  1.2  thorpej 
    609  1.2  thorpej void
    610  1.2  thorpej r4k_sdcache_wbinv_all_generic(void)
    611  1.2  thorpej {
    612  1.2  thorpej 	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
    613  1.2  thorpej 	vaddr_t eva = va + mips_sdcache_size;
    614  1.5     shin 	int line_size = mips_sdcache_line_size;
    615  1.2  thorpej 
    616  1.2  thorpej 	while (va < eva) {
    617  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    618  1.5     shin 		va += line_size;
    619  1.2  thorpej 	}
    620  1.2  thorpej }
    621  1.2  thorpej 
    622  1.2  thorpej void
    623  1.2  thorpej r4k_sdcache_wbinv_range_generic(vaddr_t va, vsize_t size)
    624  1.2  thorpej {
    625  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    626  1.5     shin 	int line_size = mips_sdcache_line_size;
    627  1.2  thorpej 
    628  1.2  thorpej 	va = trunc_line(va);
    629  1.2  thorpej 
    630  1.2  thorpej 	while (va < eva) {
    631  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB_INV);
    632  1.5     shin 		va += line_size;
    633  1.2  thorpej 	}
    634  1.2  thorpej }
    635  1.2  thorpej 
    636  1.2  thorpej void
    637  1.2  thorpej r4k_sdcache_wbinv_range_index_generic(vaddr_t va, vsize_t size)
    638  1.2  thorpej {
    639  1.2  thorpej 	vaddr_t eva;
    640  1.5     shin 	int line_size = mips_sdcache_line_size;
    641  1.2  thorpej 
    642  1.2  thorpej 	/*
    643  1.2  thorpej 	 * Since we're doing Index ops, we expect to not be able
    644  1.2  thorpej 	 * to access the address we've been given.  So, get the
    645  1.2  thorpej 	 * bits that determine the cache index, and make a KSEG0
    646  1.2  thorpej 	 * address out of them.
    647  1.2  thorpej 	 */
    648  1.2  thorpej 	va = MIPS_PHYS_TO_KSEG0(va & (mips_sdcache_size - 1));
    649  1.2  thorpej 
    650  1.2  thorpej 	eva = round_line(va + size);
    651  1.2  thorpej 	va = trunc_line(va);
    652  1.2  thorpej 
    653  1.2  thorpej 	while (va < eva) {
    654  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
    655  1.5     shin 		va += line_size;
    656  1.2  thorpej 	}
    657  1.2  thorpej }
    658  1.2  thorpej 
    659  1.2  thorpej void
    660  1.2  thorpej r4k_sdcache_inv_range_generic(vaddr_t va, vsize_t size)
    661  1.2  thorpej {
    662  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    663  1.5     shin 	int line_size = mips_sdcache_line_size;
    664  1.2  thorpej 
    665  1.2  thorpej 	va = trunc_line(va);
    666  1.2  thorpej 
    667  1.2  thorpej 	while (va < eva) {
    668  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_INV);
    669  1.5     shin 		va += line_size;
    670  1.2  thorpej 	}
    671  1.2  thorpej }
    672  1.2  thorpej 
    673  1.2  thorpej void
    674  1.2  thorpej r4k_sdcache_wb_range_generic(vaddr_t va, vsize_t size)
    675  1.2  thorpej {
    676  1.2  thorpej 	vaddr_t eva = round_line(va + size);
    677  1.5     shin 	int line_size = mips_sdcache_line_size;
    678  1.2  thorpej 
    679  1.2  thorpej 	va = trunc_line(va);
    680  1.2  thorpej 
    681  1.2  thorpej 	while (va < eva) {
    682  1.2  thorpej 		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_HIT_WB);
    683  1.5     shin 		va += line_size;
    684  1.2  thorpej 	}
    685  1.2  thorpej }
    686  1.2  thorpej 
    687  1.2  thorpej #undef round_line
    688  1.2  thorpej #undef trunc_line
    689