Home | History | Annotate | Line # | Download | only in include
cache_r4k.h revision 1.7
      1  1.7    simonb /*	$NetBSD: cache_r4k.h,v 1.7 2002/03/05 14:32:26 simonb Exp $	*/
      2  1.2   thorpej 
      3  1.2   thorpej /*
      4  1.2   thorpej  * Copyright 2001 Wasabi Systems, Inc.
      5  1.2   thorpej  * All rights reserved.
      6  1.2   thorpej  *
      7  1.2   thorpej  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  1.2   thorpej  *
      9  1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     10  1.2   thorpej  * modification, are permitted provided that the following conditions
     11  1.2   thorpej  * are met:
     12  1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     13  1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     14  1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     16  1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     17  1.2   thorpej  * 3. All advertising materials mentioning features or use of this software
     18  1.2   thorpej  *    must display the following acknowledgement:
     19  1.2   thorpej  *	This product includes software developed for the NetBSD Project by
     20  1.2   thorpej  *	Wasabi Systems, Inc.
     21  1.2   thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.2   thorpej  *    or promote products derived from this software without specific prior
     23  1.2   thorpej  *    written permission.
     24  1.2   thorpej  *
     25  1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.2   thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36  1.2   thorpej  */
     37  1.2   thorpej 
     38  1.2   thorpej /*
     39  1.2   thorpej  * Cache definitions/operations for R4000-style caches.
     40  1.2   thorpej  */
     41  1.2   thorpej 
     42  1.2   thorpej #define	CACHE_R4K_I			0
     43  1.2   thorpej #define	CACHE_R4K_D			1
     44  1.2   thorpej #define	CACHE_R4K_SI			2
     45  1.2   thorpej #define	CACHE_R4K_SD			3
     46  1.2   thorpej 
     47  1.2   thorpej #define	CACHEOP_R4K_INDEX_INV		(0 << 2)	/* I, SI */
     48  1.2   thorpej #define	CACHEOP_R4K_INDEX_WB_INV	(0 << 2)	/* D, SD */
     49  1.2   thorpej #define	CACHEOP_R4K_INDEX_LOAD_TAG	(1 << 2)	/* all */
     50  1.2   thorpej #define	CACHEOP_R4K_INDEX_STORE_TAG	(2 << 2)	/* all */
     51  1.2   thorpej #define	CACHEOP_R4K_CREATE_DIRTY_EXCL	(3 << 2)	/* D, SD */
     52  1.2   thorpej #define	CACHEOP_R4K_HIT_INV		(4 << 2)	/* all */
     53  1.2   thorpej #define	CACHEOP_R4K_HIT_WB_INV		(5 << 2)	/* D, SD */
     54  1.2   thorpej #define	CACHEOP_R4K_FILL		(5 << 2)	/* I */
     55  1.2   thorpej #define	CACHEOP_R4K_HIT_WB		(6 << 2)	/* I, D, SD */
     56  1.2   thorpej #define	CACHEOP_R4K_HIT_SET_VIRTUAL	(7 << 2)	/* SI, SD */
     57  1.2   thorpej 
     58  1.7    simonb #if defined(_KERNEL) && !defined(_LOCORE)
     59  1.2   thorpej 
     60  1.2   thorpej /*
     61  1.2   thorpej  * cache_r4k_op_line:
     62  1.2   thorpej  *
     63  1.2   thorpej  *	Perform the specified cache operation on a single line.
     64  1.2   thorpej  */
     65  1.2   thorpej #define	cache_op_r4k_line(va, op)					\
     66  1.2   thorpej do {									\
     67  1.2   thorpej 	__asm __volatile(						\
     68  1.2   thorpej 		".set noreorder					\n\t"	\
     69  1.2   thorpej 		"cache %1, 0(%0)				\n\t"	\
     70  1.2   thorpej 		".set reorder"						\
     71  1.2   thorpej 	    :								\
     72  1.2   thorpej 	    : "r" (va), "i" (op)					\
     73  1.2   thorpej 	    : "memory");						\
     74  1.2   thorpej } while (/*CONSTCOND*/0)
     75  1.2   thorpej 
     76  1.2   thorpej /*
     77  1.2   thorpej  * cache_r4k_op_32lines_16:
     78  1.2   thorpej  *
     79  1.2   thorpej  *	Perform the specified cache operation on 32 16-byte
     80  1.2   thorpej  *	cache lines.
     81  1.2   thorpej  */
     82  1.2   thorpej #define	cache_r4k_op_32lines_16(va, op)					\
     83  1.2   thorpej do {									\
     84  1.2   thorpej 	__asm __volatile(						\
     85  1.2   thorpej 		".set noreorder					\n\t"	\
     86  1.2   thorpej 		"cache %1, 0x000(%0); cache %1, 0x010(%0);	\n\t"	\
     87  1.2   thorpej 		"cache %1, 0x020(%0); cache %1, 0x030(%0);	\n\t"	\
     88  1.2   thorpej 		"cache %1, 0x040(%0); cache %1, 0x050(%0);	\n\t"	\
     89  1.2   thorpej 		"cache %1, 0x060(%0); cache %1, 0x070(%0);	\n\t"	\
     90  1.2   thorpej 		"cache %1, 0x080(%0); cache %1, 0x090(%0);	\n\t"	\
     91  1.2   thorpej 		"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);	\n\t"	\
     92  1.2   thorpej 		"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);	\n\t"	\
     93  1.2   thorpej 		"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);	\n\t"	\
     94  1.2   thorpej 		"cache %1, 0x100(%0); cache %1, 0x110(%0);	\n\t"	\
     95  1.2   thorpej 		"cache %1, 0x120(%0); cache %1, 0x130(%0);	\n\t"	\
     96  1.2   thorpej 		"cache %1, 0x140(%0); cache %1, 0x150(%0);	\n\t"	\
     97  1.2   thorpej 		"cache %1, 0x160(%0); cache %1, 0x170(%0);	\n\t"	\
     98  1.2   thorpej 		"cache %1, 0x180(%0); cache %1, 0x190(%0);	\n\t"	\
     99  1.2   thorpej 		"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);	\n\t"	\
    100  1.2   thorpej 		"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);	\n\t"	\
    101  1.2   thorpej 		"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);	\n\t"	\
    102  1.2   thorpej 		".set reorder"						\
    103  1.2   thorpej 	    :								\
    104  1.2   thorpej 	    : "r" (va), "i" (op)					\
    105  1.2   thorpej 	    : "memory");						\
    106  1.2   thorpej } while (/*CONSTCOND*/0)
    107  1.2   thorpej 
    108  1.2   thorpej /*
    109  1.2   thorpej  * cache_r4k_op_32lines_32:
    110  1.2   thorpej  *
    111  1.2   thorpej  *	Perform the specified cache operation on 32 32-byte
    112  1.2   thorpej  *	cache lines.
    113  1.2   thorpej  */
    114  1.2   thorpej #define	cache_r4k_op_32lines_32(va, op)					\
    115  1.2   thorpej do {									\
    116  1.2   thorpej 	__asm __volatile(						\
    117  1.2   thorpej 		".set noreorder					\n\t"	\
    118  1.2   thorpej 		"cache %1, 0x000(%0); cache %1, 0x020(%0);	\n\t"	\
    119  1.2   thorpej 		"cache %1, 0x040(%0); cache %1, 0x060(%0);	\n\t"	\
    120  1.2   thorpej 		"cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\n\t"	\
    121  1.2   thorpej 		"cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\n\t"	\
    122  1.2   thorpej 		"cache %1, 0x100(%0); cache %1, 0x120(%0);	\n\t"	\
    123  1.2   thorpej 		"cache %1, 0x140(%0); cache %1, 0x160(%0);	\n\t"	\
    124  1.2   thorpej 		"cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\n\t"	\
    125  1.2   thorpej 		"cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\n\t"	\
    126  1.2   thorpej 		"cache %1, 0x200(%0); cache %1, 0x220(%0);	\n\t"	\
    127  1.2   thorpej 		"cache %1, 0x240(%0); cache %1, 0x260(%0);	\n\t"	\
    128  1.2   thorpej 		"cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\n\t"	\
    129  1.2   thorpej 		"cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\n\t"	\
    130  1.2   thorpej 		"cache %1, 0x300(%0); cache %1, 0x320(%0);	\n\t"	\
    131  1.2   thorpej 		"cache %1, 0x340(%0); cache %1, 0x360(%0);	\n\t"	\
    132  1.2   thorpej 		"cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\n\t"	\
    133  1.2   thorpej 		"cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\n\t"	\
    134  1.2   thorpej 		".set reorder"						\
    135  1.2   thorpej 	    :								\
    136  1.2   thorpej 	    : "r" (va), "i" (op)					\
    137  1.2   thorpej 	    : "memory");						\
    138  1.2   thorpej } while (/*CONSTCOND*/0)
    139  1.2   thorpej 
    140  1.2   thorpej /*
    141  1.3   thorpej  * cache_r4k_op_32lines_128:
    142  1.3   thorpej  *
    143  1.3   thorpej  *	Perform the specified cache operation on 32 128-byte
    144  1.3   thorpej  *	cache lines.
    145  1.3   thorpej  */
    146  1.3   thorpej #define	cache_r4k_op_32lines_128(va, op)				\
    147  1.3   thorpej do {									\
    148  1.3   thorpej 	__asm __volatile(						\
    149  1.3   thorpej 		".set noreorder					\n\t"	\
    150  1.3   thorpej 		"cache %1, 0x0000(%0); cache %1, 0x0080(%0);	\n\t"	\
    151  1.3   thorpej 		"cache %1, 0x0100(%0); cache %1, 0x0180(%0);	\n\t"	\
    152  1.3   thorpej 		"cache %1, 0x0200(%0); cache %1, 0x0280(%0);	\n\t"	\
    153  1.3   thorpej 		"cache %1, 0x0300(%0); cache %1, 0x0380(%0);	\n\t"	\
    154  1.3   thorpej 		"cache %1, 0x0400(%0); cache %1, 0x0480(%0);	\n\t"	\
    155  1.3   thorpej 		"cache %1, 0x0500(%0); cache %1, 0x0580(%0);	\n\t"	\
    156  1.3   thorpej 		"cache %1, 0x0600(%0); cache %1, 0x0680(%0);	\n\t"	\
    157  1.3   thorpej 		"cache %1, 0x0700(%0); cache %1, 0x0780(%0);	\n\t"	\
    158  1.3   thorpej 		"cache %1, 0x0800(%0); cache %1, 0x0880(%0);	\n\t"	\
    159  1.3   thorpej 		"cache %1, 0x0900(%0); cache %1, 0x0980(%0);	\n\t"	\
    160  1.3   thorpej 		"cache %1, 0x0a00(%0); cache %1, 0x0a80(%0);	\n\t"	\
    161  1.3   thorpej 		"cache %1, 0x0b00(%0); cache %1, 0x0b80(%0);	\n\t"	\
    162  1.3   thorpej 		"cache %1, 0x0c00(%0); cache %1, 0x0c80(%0);	\n\t"	\
    163  1.3   thorpej 		"cache %1, 0x0d00(%0); cache %1, 0x0d80(%0);	\n\t"	\
    164  1.3   thorpej 		"cache %1, 0x0e00(%0); cache %1, 0x0e80(%0);	\n\t"	\
    165  1.3   thorpej 		"cache %1, 0x0f00(%0); cache %1, 0x0f80(%0);	\n\t"	\
    166  1.3   thorpej 		".set reorder"						\
    167  1.3   thorpej 	    :								\
    168  1.3   thorpej 	    : "r" (va), "i" (op)					\
    169  1.3   thorpej 	    : "memory");						\
    170  1.3   thorpej } while (/*CONSTCOND*/0)
    171  1.3   thorpej 
    172  1.3   thorpej /*
    173  1.5  takemura  * cache_r4k_op_16lines_16_2way:
    174  1.5  takemura  *
    175  1.5  takemura  *	Perform the specified cache operation on 16 16-byte
    176  1.5  takemura  * 	cache lines, 2-ways.
    177  1.5  takemura  */
    178  1.5  takemura #define	cache_r4k_op_16lines_16_2way(va1, va2, op)			\
    179  1.5  takemura do {									\
    180  1.5  takemura 	__asm __volatile(						\
    181  1.5  takemura 		".set noreorder					\n\t"	\
    182  1.5  takemura 		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
    183  1.5  takemura 		"cache %2, 0x010(%0); cache %2, 0x010(%1);	\n\t"	\
    184  1.5  takemura 		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
    185  1.5  takemura 		"cache %2, 0x030(%0); cache %2, 0x030(%1);	\n\t"	\
    186  1.5  takemura 		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
    187  1.5  takemura 		"cache %2, 0x050(%0); cache %2, 0x050(%1);	\n\t"	\
    188  1.5  takemura 		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
    189  1.5  takemura 		"cache %2, 0x070(%0); cache %2, 0x070(%1);	\n\t"	\
    190  1.5  takemura 		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
    191  1.5  takemura 		"cache %2, 0x090(%0); cache %2, 0x090(%1);	\n\t"	\
    192  1.5  takemura 		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
    193  1.5  takemura 		"cache %2, 0x0b0(%0); cache %2, 0x0b0(%1);	\n\t"	\
    194  1.5  takemura 		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
    195  1.5  takemura 		"cache %2, 0x0d0(%0); cache %2, 0x0d0(%1);	\n\t"	\
    196  1.5  takemura 		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
    197  1.5  takemura 		"cache %2, 0x0f0(%0); cache %2, 0x0f0(%1);	\n\t"	\
    198  1.5  takemura 		".set reorder"						\
    199  1.5  takemura 	    :								\
    200  1.5  takemura 	    : "r" (va1), "r" (va2), "i" (op)				\
    201  1.5  takemura 	    : "memory");						\
    202  1.5  takemura } while (/*CONSTCOND*/0)
    203  1.5  takemura 
    204  1.5  takemura /*
    205  1.2   thorpej  * cache_r4k_op_16lines_32_2way:
    206  1.2   thorpej  *
    207  1.2   thorpej  *	Perform the specified cache operation on 16 32-byte
    208  1.2   thorpej  * 	cache lines, 2-ways.
    209  1.2   thorpej  */
    210  1.2   thorpej #define	cache_r4k_op_16lines_32_2way(va1, va2, op)			\
    211  1.2   thorpej do {									\
    212  1.2   thorpej 	__asm __volatile(						\
    213  1.2   thorpej 		".set noreorder					\n\t"	\
    214  1.2   thorpej 		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
    215  1.2   thorpej 		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
    216  1.2   thorpej 		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
    217  1.2   thorpej 		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
    218  1.2   thorpej 		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
    219  1.2   thorpej 		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
    220  1.2   thorpej 		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
    221  1.2   thorpej 		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
    222  1.2   thorpej 		"cache %2, 0x100(%0); cache %2, 0x100(%1);	\n\t"	\
    223  1.2   thorpej 		"cache %2, 0x120(%0); cache %2, 0x120(%1);	\n\t"	\
    224  1.2   thorpej 		"cache %2, 0x140(%0); cache %2, 0x140(%1);	\n\t"	\
    225  1.2   thorpej 		"cache %2, 0x160(%0); cache %2, 0x160(%1);	\n\t"	\
    226  1.2   thorpej 		"cache %2, 0x180(%0); cache %2, 0x180(%1);	\n\t"	\
    227  1.2   thorpej 		"cache %2, 0x1a0(%0); cache %2, 0x1a0(%1);	\n\t"	\
    228  1.2   thorpej 		"cache %2, 0x1c0(%0); cache %2, 0x1c0(%1);	\n\t"	\
    229  1.2   thorpej 		"cache %2, 0x1e0(%0); cache %2, 0x1e0(%1);	\n\t"	\
    230  1.2   thorpej 		".set reorder"						\
    231  1.2   thorpej 	    :								\
    232  1.2   thorpej 	    : "r" (va1), "r" (va2), "i" (op)				\
    233  1.2   thorpej 	    : "memory");						\
    234  1.2   thorpej } while (/*CONSTCOND*/0)
    235  1.2   thorpej 
    236  1.7    simonb /*
    237  1.7    simonb  * cache_r4k_op_8lines_16_4way:
    238  1.7    simonb  *
    239  1.7    simonb  *	Perform the specified cache operation on 8 16-byte
    240  1.7    simonb  * 	cache lines, 4-ways.
    241  1.7    simonb  */
    242  1.7    simonb #define	cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op)		\
    243  1.7    simonb do {									\
    244  1.7    simonb 	__asm __volatile(						\
    245  1.7    simonb 		".set noreorder					\n\t"	\
    246  1.7    simonb 		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
    247  1.7    simonb 		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
    248  1.7    simonb 		"cache %4, 0x010(%0); cache %4, 0x010(%1);	\n\t"	\
    249  1.7    simonb 		"cache %4, 0x010(%2); cache %4, 0x010(%3);	\n\t"	\
    250  1.7    simonb 		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
    251  1.7    simonb 		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
    252  1.7    simonb 		"cache %4, 0x030(%0); cache %4, 0x030(%1);	\n\t"	\
    253  1.7    simonb 		"cache %4, 0x030(%2); cache %4, 0x030(%3);	\n\t"	\
    254  1.7    simonb 		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
    255  1.7    simonb 		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
    256  1.7    simonb 		"cache %4, 0x050(%0); cache %4, 0x050(%1);	\n\t"	\
    257  1.7    simonb 		"cache %4, 0x050(%2); cache %4, 0x050(%3);	\n\t"	\
    258  1.7    simonb 		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
    259  1.7    simonb 		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
    260  1.7    simonb 		"cache %4, 0x070(%0); cache %4, 0x070(%1);	\n\t"	\
    261  1.7    simonb 		"cache %4, 0x070(%2); cache %4, 0x070(%3);	\n\t"	\
    262  1.7    simonb 		".set reorder"						\
    263  1.7    simonb 	    :								\
    264  1.7    simonb 	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
    265  1.7    simonb 	    : "memory");						\
    266  1.7    simonb } while (/*CONSTCOND*/0)
    267  1.7    simonb 
    268  1.7    simonb /*
    269  1.7    simonb  * cache_r4k_op_8lines_32_4way:
    270  1.7    simonb  *
    271  1.7    simonb  *	Perform the specified cache operation on 8 32-byte
    272  1.7    simonb  * 	cache lines, 4-ways.
    273  1.7    simonb  */
    274  1.7    simonb #define	cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op)		\
    275  1.7    simonb do {									\
    276  1.7    simonb 	__asm __volatile(						\
    277  1.7    simonb 		".set noreorder					\n\t"	\
    278  1.7    simonb 		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
    279  1.7    simonb 		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
    280  1.7    simonb 		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
    281  1.7    simonb 		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
    282  1.7    simonb 		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
    283  1.7    simonb 		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
    284  1.7    simonb 		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
    285  1.7    simonb 		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
    286  1.7    simonb 		"cache %4, 0x080(%0); cache %4, 0x080(%1);	\n\t"	\
    287  1.7    simonb 		"cache %4, 0x080(%2); cache %4, 0x080(%3);	\n\t"	\
    288  1.7    simonb 		"cache %4, 0x0a0(%0); cache %4, 0x0a0(%1);	\n\t"	\
    289  1.7    simonb 		"cache %4, 0x0a0(%2); cache %4, 0x0a0(%3);	\n\t"	\
    290  1.7    simonb 		"cache %4, 0x0c0(%0); cache %4, 0x0c0(%1);	\n\t"	\
    291  1.7    simonb 		"cache %4, 0x0c0(%2); cache %4, 0x0c0(%3);	\n\t"	\
    292  1.7    simonb 		"cache %4, 0x0e0(%0); cache %4, 0x0e0(%1);	\n\t"	\
    293  1.7    simonb 		"cache %4, 0x0e0(%2); cache %4, 0x0e0(%3);	\n\t"	\
    294  1.7    simonb 		".set reorder"						\
    295  1.7    simonb 	    :								\
    296  1.7    simonb 	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
    297  1.7    simonb 	    : "memory");						\
    298  1.7    simonb } while (/*CONSTCOND*/0)
    299  1.7    simonb 
    300  1.2   thorpej void	r4k_icache_sync_all_16(void);
    301  1.2   thorpej void	r4k_icache_sync_range_16(vaddr_t, vsize_t);
    302  1.2   thorpej void	r4k_icache_sync_range_index_16(vaddr_t, vsize_t);
    303  1.2   thorpej 
    304  1.4   tsutsui void	r4k_icache_sync_all_32(void);
    305  1.4   tsutsui void	r4k_icache_sync_range_32(vaddr_t, vsize_t);
    306  1.4   tsutsui void	r4k_icache_sync_range_index_32(vaddr_t, vsize_t);
    307  1.4   tsutsui 
    308  1.2   thorpej void	r4k_pdcache_wbinv_all_16(void);
    309  1.2   thorpej void	r4k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    310  1.2   thorpej void	r4k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
    311  1.2   thorpej 
    312  1.2   thorpej void	r4k_pdcache_inv_range_16(vaddr_t, vsize_t);
    313  1.2   thorpej void	r4k_pdcache_wb_range_16(vaddr_t, vsize_t);
    314  1.4   tsutsui 
    315  1.4   tsutsui void	r4k_pdcache_wbinv_all_32(void);
    316  1.4   tsutsui void	r4k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    317  1.4   tsutsui void	r4k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    318  1.4   tsutsui 
    319  1.4   tsutsui void	r4k_pdcache_inv_range_32(vaddr_t, vsize_t);
    320  1.4   tsutsui void	r4k_pdcache_wb_range_32(vaddr_t, vsize_t);
    321  1.2   thorpej 
    322  1.2   thorpej void	r5k_icache_sync_all_32(void);
    323  1.2   thorpej void	r5k_icache_sync_range_32(vaddr_t, vsize_t);
    324  1.2   thorpej void	r5k_icache_sync_range_index_32(vaddr_t, vsize_t);
    325  1.2   thorpej 
    326  1.5  takemura void	r5k_pdcache_wbinv_all_16(void);
    327  1.2   thorpej void	r5k_pdcache_wbinv_all_32(void);
    328  1.2   thorpej void	r4600v1_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    329  1.2   thorpej void	r4600v2_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    330  1.6      shin void	vr4131v1_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    331  1.5  takemura void	r5k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    332  1.2   thorpej void	r5k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    333  1.5  takemura void	r5k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
    334  1.2   thorpej void	r5k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    335  1.2   thorpej 
    336  1.2   thorpej void	r4600v1_pdcache_inv_range_32(vaddr_t, vsize_t);
    337  1.2   thorpej void	r4600v2_pdcache_inv_range_32(vaddr_t, vsize_t);
    338  1.5  takemura void	r5k_pdcache_inv_range_16(vaddr_t, vsize_t);
    339  1.2   thorpej void	r5k_pdcache_inv_range_32(vaddr_t, vsize_t);
    340  1.2   thorpej void	r4600v1_pdcache_wb_range_32(vaddr_t, vsize_t);
    341  1.2   thorpej void	r4600v2_pdcache_wb_range_32(vaddr_t, vsize_t);
    342  1.5  takemura void	r5k_pdcache_wb_range_16(vaddr_t, vsize_t);
    343  1.2   thorpej void	r5k_pdcache_wb_range_32(vaddr_t, vsize_t);
    344  1.2   thorpej 
    345  1.2   thorpej void	r4k_sdcache_wbinv_all_32(void);
    346  1.2   thorpej void	r4k_sdcache_wbinv_range_32(vaddr_t, vsize_t);
    347  1.2   thorpej void	r4k_sdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    348  1.2   thorpej 
    349  1.2   thorpej void	r4k_sdcache_inv_range_32(vaddr_t, vsize_t);
    350  1.2   thorpej void	r4k_sdcache_wb_range_32(vaddr_t, vsize_t);
    351  1.3   thorpej 
    352  1.3   thorpej void	r4k_sdcache_wbinv_all_128(void);
    353  1.3   thorpej void	r4k_sdcache_wbinv_range_128(vaddr_t, vsize_t);
    354  1.3   thorpej void	r4k_sdcache_wbinv_range_index_128(vaddr_t, vsize_t);
    355  1.3   thorpej 
    356  1.3   thorpej void	r4k_sdcache_inv_range_128(vaddr_t, vsize_t);
    357  1.3   thorpej void	r4k_sdcache_wb_range_128(vaddr_t, vsize_t);
    358  1.2   thorpej 
    359  1.2   thorpej void	r4k_sdcache_wbinv_all_generic(void);
    360  1.2   thorpej void	r4k_sdcache_wbinv_range_generic(vaddr_t, vsize_t);
    361  1.2   thorpej void	r4k_sdcache_wbinv_range_index_generic(vaddr_t, vsize_t);
    362  1.2   thorpej 
    363  1.2   thorpej void	r4k_sdcache_inv_range_generic(vaddr_t, vsize_t);
    364  1.2   thorpej void	r4k_sdcache_wb_range_generic(vaddr_t, vsize_t);
    365  1.2   thorpej 
    366  1.7    simonb #endif /* _KERNEL && !_LOCORE */
    367