Home | History | Annotate | Line # | Download | only in include
cache_r4k.h revision 1.8
      1  1.8    simonb /*	$NetBSD: cache_r4k.h,v 1.8 2002/11/17 06:40:43 simonb Exp $	*/
      2  1.2   thorpej 
      3  1.2   thorpej /*
      4  1.2   thorpej  * Copyright 2001 Wasabi Systems, Inc.
      5  1.2   thorpej  * All rights reserved.
      6  1.2   thorpej  *
      7  1.2   thorpej  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  1.2   thorpej  *
      9  1.2   thorpej  * Redistribution and use in source and binary forms, with or without
     10  1.2   thorpej  * modification, are permitted provided that the following conditions
     11  1.2   thorpej  * are met:
     12  1.2   thorpej  * 1. Redistributions of source code must retain the above copyright
     13  1.2   thorpej  *    notice, this list of conditions and the following disclaimer.
     14  1.2   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.2   thorpej  *    notice, this list of conditions and the following disclaimer in the
     16  1.2   thorpej  *    documentation and/or other materials provided with the distribution.
     17  1.2   thorpej  * 3. All advertising materials mentioning features or use of this software
     18  1.2   thorpej  *    must display the following acknowledgement:
     19  1.2   thorpej  *	This product includes software developed for the NetBSD Project by
     20  1.2   thorpej  *	Wasabi Systems, Inc.
     21  1.2   thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.2   thorpej  *    or promote products derived from this software without specific prior
     23  1.2   thorpej  *    written permission.
     24  1.2   thorpej  *
     25  1.2   thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.2   thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.2   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.2   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.2   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.2   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.2   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.2   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.2   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.2   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.2   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36  1.2   thorpej  */
     37  1.2   thorpej 
     38  1.2   thorpej /*
     39  1.2   thorpej  * Cache definitions/operations for R4000-style caches.
     40  1.2   thorpej  */
     41  1.2   thorpej 
     42  1.2   thorpej #define	CACHE_R4K_I			0
     43  1.2   thorpej #define	CACHE_R4K_D			1
     44  1.2   thorpej #define	CACHE_R4K_SI			2
     45  1.2   thorpej #define	CACHE_R4K_SD			3
     46  1.2   thorpej 
     47  1.2   thorpej #define	CACHEOP_R4K_INDEX_INV		(0 << 2)	/* I, SI */
     48  1.2   thorpej #define	CACHEOP_R4K_INDEX_WB_INV	(0 << 2)	/* D, SD */
     49  1.2   thorpej #define	CACHEOP_R4K_INDEX_LOAD_TAG	(1 << 2)	/* all */
     50  1.2   thorpej #define	CACHEOP_R4K_INDEX_STORE_TAG	(2 << 2)	/* all */
     51  1.2   thorpej #define	CACHEOP_R4K_CREATE_DIRTY_EXCL	(3 << 2)	/* D, SD */
     52  1.2   thorpej #define	CACHEOP_R4K_HIT_INV		(4 << 2)	/* all */
     53  1.2   thorpej #define	CACHEOP_R4K_HIT_WB_INV		(5 << 2)	/* D, SD */
     54  1.2   thorpej #define	CACHEOP_R4K_FILL		(5 << 2)	/* I */
     55  1.2   thorpej #define	CACHEOP_R4K_HIT_WB		(6 << 2)	/* I, D, SD */
     56  1.2   thorpej #define	CACHEOP_R4K_HIT_SET_VIRTUAL	(7 << 2)	/* SI, SD */
     57  1.2   thorpej 
     58  1.7    simonb #if defined(_KERNEL) && !defined(_LOCORE)
     59  1.2   thorpej 
     60  1.2   thorpej /*
     61  1.2   thorpej  * cache_r4k_op_line:
     62  1.2   thorpej  *
     63  1.2   thorpej  *	Perform the specified cache operation on a single line.
     64  1.2   thorpej  */
     65  1.2   thorpej #define	cache_op_r4k_line(va, op)					\
     66  1.2   thorpej do {									\
     67  1.2   thorpej 	__asm __volatile(						\
     68  1.2   thorpej 		".set noreorder					\n\t"	\
     69  1.2   thorpej 		"cache %1, 0(%0)				\n\t"	\
     70  1.8    simonb 		".set reorder"						\
     71  1.8    simonb 	    :								\
     72  1.8    simonb 	    : "r" (va), "i" (op)					\
     73  1.8    simonb 	    : "memory");						\
     74  1.8    simonb } while (/*CONSTCOND*/0)
     75  1.8    simonb 
     76  1.8    simonb /*
     77  1.8    simonb  * cache_r4k_op_8lines_16:
     78  1.8    simonb  *
     79  1.8    simonb  *	Perform the specified cache operation on 8 16-byte cache lines.
     80  1.8    simonb  */
     81  1.8    simonb #define	cache_r4k_op_8lines_16(va, op)					\
     82  1.8    simonb do {									\
     83  1.8    simonb 	__asm __volatile(						\
     84  1.8    simonb 		".set noreorder					\n\t"	\
     85  1.8    simonb 		"cache %1, 0x00(%0); cache %1, 0x10(%0)		\n\t"	\
     86  1.8    simonb 		"cache %1, 0x20(%0); cache %1, 0x30(%0)		\n\t"	\
     87  1.8    simonb 		"cache %1, 0x40(%0); cache %1, 0x50(%0)		\n\t"	\
     88  1.8    simonb 		"cache %1, 0x60(%0); cache %1, 0x70(%0)		\n\t"	\
     89  1.8    simonb 		".set reorder"						\
     90  1.8    simonb 	    :								\
     91  1.8    simonb 	    : "r" (va), "i" (op)					\
     92  1.8    simonb 	    : "memory");						\
     93  1.8    simonb } while (/*CONSTCOND*/0)
     94  1.8    simonb 
     95  1.8    simonb /*
     96  1.8    simonb  * cache_r4k_op_8lines_32:
     97  1.8    simonb  *
     98  1.8    simonb  *	Perform the specified cache operation on 8 32-byte cache lines.
     99  1.8    simonb  */
    100  1.8    simonb #define	cache_r4k_op_8lines_32(va, op)					\
    101  1.8    simonb do {									\
    102  1.8    simonb 	__asm __volatile(						\
    103  1.8    simonb 		".set noreorder					\n\t"	\
    104  1.8    simonb 		"cache %1, 0x00(%0); cache %1, 0x20(%0)		\n\t"	\
    105  1.8    simonb 		"cache %1, 0x40(%0); cache %1, 0x60(%0)		\n\t"	\
    106  1.8    simonb 		"cache %1, 0x80(%0); cache %1, 0xa0(%0)		\n\t"	\
    107  1.8    simonb 		"cache %1, 0xc0(%0); cache %1, 0xe0(%0)		\n\t"	\
    108  1.2   thorpej 		".set reorder"						\
    109  1.2   thorpej 	    :								\
    110  1.2   thorpej 	    : "r" (va), "i" (op)					\
    111  1.2   thorpej 	    : "memory");						\
    112  1.2   thorpej } while (/*CONSTCOND*/0)
    113  1.2   thorpej 
    114  1.2   thorpej /*
    115  1.2   thorpej  * cache_r4k_op_32lines_16:
    116  1.2   thorpej  *
    117  1.2   thorpej  *	Perform the specified cache operation on 32 16-byte
    118  1.2   thorpej  *	cache lines.
    119  1.2   thorpej  */
    120  1.2   thorpej #define	cache_r4k_op_32lines_16(va, op)					\
    121  1.2   thorpej do {									\
    122  1.2   thorpej 	__asm __volatile(						\
    123  1.2   thorpej 		".set noreorder					\n\t"	\
    124  1.2   thorpej 		"cache %1, 0x000(%0); cache %1, 0x010(%0);	\n\t"	\
    125  1.2   thorpej 		"cache %1, 0x020(%0); cache %1, 0x030(%0);	\n\t"	\
    126  1.2   thorpej 		"cache %1, 0x040(%0); cache %1, 0x050(%0);	\n\t"	\
    127  1.2   thorpej 		"cache %1, 0x060(%0); cache %1, 0x070(%0);	\n\t"	\
    128  1.2   thorpej 		"cache %1, 0x080(%0); cache %1, 0x090(%0);	\n\t"	\
    129  1.2   thorpej 		"cache %1, 0x0a0(%0); cache %1, 0x0b0(%0);	\n\t"	\
    130  1.2   thorpej 		"cache %1, 0x0c0(%0); cache %1, 0x0d0(%0);	\n\t"	\
    131  1.2   thorpej 		"cache %1, 0x0e0(%0); cache %1, 0x0f0(%0);	\n\t"	\
    132  1.2   thorpej 		"cache %1, 0x100(%0); cache %1, 0x110(%0);	\n\t"	\
    133  1.2   thorpej 		"cache %1, 0x120(%0); cache %1, 0x130(%0);	\n\t"	\
    134  1.2   thorpej 		"cache %1, 0x140(%0); cache %1, 0x150(%0);	\n\t"	\
    135  1.2   thorpej 		"cache %1, 0x160(%0); cache %1, 0x170(%0);	\n\t"	\
    136  1.2   thorpej 		"cache %1, 0x180(%0); cache %1, 0x190(%0);	\n\t"	\
    137  1.2   thorpej 		"cache %1, 0x1a0(%0); cache %1, 0x1b0(%0);	\n\t"	\
    138  1.2   thorpej 		"cache %1, 0x1c0(%0); cache %1, 0x1d0(%0);	\n\t"	\
    139  1.2   thorpej 		"cache %1, 0x1e0(%0); cache %1, 0x1f0(%0);	\n\t"	\
    140  1.2   thorpej 		".set reorder"						\
    141  1.2   thorpej 	    :								\
    142  1.2   thorpej 	    : "r" (va), "i" (op)					\
    143  1.2   thorpej 	    : "memory");						\
    144  1.2   thorpej } while (/*CONSTCOND*/0)
    145  1.2   thorpej 
    146  1.2   thorpej /*
    147  1.2   thorpej  * cache_r4k_op_32lines_32:
    148  1.2   thorpej  *
    149  1.2   thorpej  *	Perform the specified cache operation on 32 32-byte
    150  1.2   thorpej  *	cache lines.
    151  1.2   thorpej  */
    152  1.2   thorpej #define	cache_r4k_op_32lines_32(va, op)					\
    153  1.2   thorpej do {									\
    154  1.2   thorpej 	__asm __volatile(						\
    155  1.2   thorpej 		".set noreorder					\n\t"	\
    156  1.2   thorpej 		"cache %1, 0x000(%0); cache %1, 0x020(%0);	\n\t"	\
    157  1.2   thorpej 		"cache %1, 0x040(%0); cache %1, 0x060(%0);	\n\t"	\
    158  1.2   thorpej 		"cache %1, 0x080(%0); cache %1, 0x0a0(%0);	\n\t"	\
    159  1.2   thorpej 		"cache %1, 0x0c0(%0); cache %1, 0x0e0(%0);	\n\t"	\
    160  1.2   thorpej 		"cache %1, 0x100(%0); cache %1, 0x120(%0);	\n\t"	\
    161  1.2   thorpej 		"cache %1, 0x140(%0); cache %1, 0x160(%0);	\n\t"	\
    162  1.2   thorpej 		"cache %1, 0x180(%0); cache %1, 0x1a0(%0);	\n\t"	\
    163  1.2   thorpej 		"cache %1, 0x1c0(%0); cache %1, 0x1e0(%0);	\n\t"	\
    164  1.2   thorpej 		"cache %1, 0x200(%0); cache %1, 0x220(%0);	\n\t"	\
    165  1.2   thorpej 		"cache %1, 0x240(%0); cache %1, 0x260(%0);	\n\t"	\
    166  1.2   thorpej 		"cache %1, 0x280(%0); cache %1, 0x2a0(%0);	\n\t"	\
    167  1.2   thorpej 		"cache %1, 0x2c0(%0); cache %1, 0x2e0(%0);	\n\t"	\
    168  1.2   thorpej 		"cache %1, 0x300(%0); cache %1, 0x320(%0);	\n\t"	\
    169  1.2   thorpej 		"cache %1, 0x340(%0); cache %1, 0x360(%0);	\n\t"	\
    170  1.2   thorpej 		"cache %1, 0x380(%0); cache %1, 0x3a0(%0);	\n\t"	\
    171  1.2   thorpej 		"cache %1, 0x3c0(%0); cache %1, 0x3e0(%0);	\n\t"	\
    172  1.2   thorpej 		".set reorder"						\
    173  1.2   thorpej 	    :								\
    174  1.2   thorpej 	    : "r" (va), "i" (op)					\
    175  1.2   thorpej 	    : "memory");						\
    176  1.2   thorpej } while (/*CONSTCOND*/0)
    177  1.2   thorpej 
    178  1.2   thorpej /*
    179  1.3   thorpej  * cache_r4k_op_32lines_128:
    180  1.3   thorpej  *
    181  1.3   thorpej  *	Perform the specified cache operation on 32 128-byte
    182  1.3   thorpej  *	cache lines.
    183  1.3   thorpej  */
    184  1.3   thorpej #define	cache_r4k_op_32lines_128(va, op)				\
    185  1.3   thorpej do {									\
    186  1.3   thorpej 	__asm __volatile(						\
    187  1.3   thorpej 		".set noreorder					\n\t"	\
    188  1.3   thorpej 		"cache %1, 0x0000(%0); cache %1, 0x0080(%0);	\n\t"	\
    189  1.3   thorpej 		"cache %1, 0x0100(%0); cache %1, 0x0180(%0);	\n\t"	\
    190  1.3   thorpej 		"cache %1, 0x0200(%0); cache %1, 0x0280(%0);	\n\t"	\
    191  1.3   thorpej 		"cache %1, 0x0300(%0); cache %1, 0x0380(%0);	\n\t"	\
    192  1.3   thorpej 		"cache %1, 0x0400(%0); cache %1, 0x0480(%0);	\n\t"	\
    193  1.3   thorpej 		"cache %1, 0x0500(%0); cache %1, 0x0580(%0);	\n\t"	\
    194  1.3   thorpej 		"cache %1, 0x0600(%0); cache %1, 0x0680(%0);	\n\t"	\
    195  1.3   thorpej 		"cache %1, 0x0700(%0); cache %1, 0x0780(%0);	\n\t"	\
    196  1.3   thorpej 		"cache %1, 0x0800(%0); cache %1, 0x0880(%0);	\n\t"	\
    197  1.3   thorpej 		"cache %1, 0x0900(%0); cache %1, 0x0980(%0);	\n\t"	\
    198  1.3   thorpej 		"cache %1, 0x0a00(%0); cache %1, 0x0a80(%0);	\n\t"	\
    199  1.3   thorpej 		"cache %1, 0x0b00(%0); cache %1, 0x0b80(%0);	\n\t"	\
    200  1.3   thorpej 		"cache %1, 0x0c00(%0); cache %1, 0x0c80(%0);	\n\t"	\
    201  1.3   thorpej 		"cache %1, 0x0d00(%0); cache %1, 0x0d80(%0);	\n\t"	\
    202  1.3   thorpej 		"cache %1, 0x0e00(%0); cache %1, 0x0e80(%0);	\n\t"	\
    203  1.3   thorpej 		"cache %1, 0x0f00(%0); cache %1, 0x0f80(%0);	\n\t"	\
    204  1.3   thorpej 		".set reorder"						\
    205  1.3   thorpej 	    :								\
    206  1.3   thorpej 	    : "r" (va), "i" (op)					\
    207  1.3   thorpej 	    : "memory");						\
    208  1.3   thorpej } while (/*CONSTCOND*/0)
    209  1.3   thorpej 
    210  1.3   thorpej /*
    211  1.5  takemura  * cache_r4k_op_16lines_16_2way:
    212  1.5  takemura  *
    213  1.5  takemura  *	Perform the specified cache operation on 16 16-byte
    214  1.5  takemura  * 	cache lines, 2-ways.
    215  1.5  takemura  */
    216  1.5  takemura #define	cache_r4k_op_16lines_16_2way(va1, va2, op)			\
    217  1.5  takemura do {									\
    218  1.5  takemura 	__asm __volatile(						\
    219  1.5  takemura 		".set noreorder					\n\t"	\
    220  1.5  takemura 		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
    221  1.5  takemura 		"cache %2, 0x010(%0); cache %2, 0x010(%1);	\n\t"	\
    222  1.5  takemura 		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
    223  1.5  takemura 		"cache %2, 0x030(%0); cache %2, 0x030(%1);	\n\t"	\
    224  1.5  takemura 		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
    225  1.5  takemura 		"cache %2, 0x050(%0); cache %2, 0x050(%1);	\n\t"	\
    226  1.5  takemura 		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
    227  1.5  takemura 		"cache %2, 0x070(%0); cache %2, 0x070(%1);	\n\t"	\
    228  1.5  takemura 		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
    229  1.5  takemura 		"cache %2, 0x090(%0); cache %2, 0x090(%1);	\n\t"	\
    230  1.5  takemura 		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
    231  1.5  takemura 		"cache %2, 0x0b0(%0); cache %2, 0x0b0(%1);	\n\t"	\
    232  1.5  takemura 		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
    233  1.5  takemura 		"cache %2, 0x0d0(%0); cache %2, 0x0d0(%1);	\n\t"	\
    234  1.5  takemura 		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
    235  1.5  takemura 		"cache %2, 0x0f0(%0); cache %2, 0x0f0(%1);	\n\t"	\
    236  1.5  takemura 		".set reorder"						\
    237  1.5  takemura 	    :								\
    238  1.5  takemura 	    : "r" (va1), "r" (va2), "i" (op)				\
    239  1.5  takemura 	    : "memory");						\
    240  1.5  takemura } while (/*CONSTCOND*/0)
    241  1.5  takemura 
    242  1.5  takemura /*
    243  1.2   thorpej  * cache_r4k_op_16lines_32_2way:
    244  1.2   thorpej  *
    245  1.2   thorpej  *	Perform the specified cache operation on 16 32-byte
    246  1.2   thorpej  * 	cache lines, 2-ways.
    247  1.2   thorpej  */
    248  1.2   thorpej #define	cache_r4k_op_16lines_32_2way(va1, va2, op)			\
    249  1.2   thorpej do {									\
    250  1.2   thorpej 	__asm __volatile(						\
    251  1.2   thorpej 		".set noreorder					\n\t"	\
    252  1.2   thorpej 		"cache %2, 0x000(%0); cache %2, 0x000(%1);	\n\t"	\
    253  1.2   thorpej 		"cache %2, 0x020(%0); cache %2, 0x020(%1);	\n\t"	\
    254  1.2   thorpej 		"cache %2, 0x040(%0); cache %2, 0x040(%1);	\n\t"	\
    255  1.2   thorpej 		"cache %2, 0x060(%0); cache %2, 0x060(%1);	\n\t"	\
    256  1.2   thorpej 		"cache %2, 0x080(%0); cache %2, 0x080(%1);	\n\t"	\
    257  1.2   thorpej 		"cache %2, 0x0a0(%0); cache %2, 0x0a0(%1);	\n\t"	\
    258  1.2   thorpej 		"cache %2, 0x0c0(%0); cache %2, 0x0c0(%1);	\n\t"	\
    259  1.2   thorpej 		"cache %2, 0x0e0(%0); cache %2, 0x0e0(%1);	\n\t"	\
    260  1.2   thorpej 		"cache %2, 0x100(%0); cache %2, 0x100(%1);	\n\t"	\
    261  1.2   thorpej 		"cache %2, 0x120(%0); cache %2, 0x120(%1);	\n\t"	\
    262  1.2   thorpej 		"cache %2, 0x140(%0); cache %2, 0x140(%1);	\n\t"	\
    263  1.2   thorpej 		"cache %2, 0x160(%0); cache %2, 0x160(%1);	\n\t"	\
    264  1.2   thorpej 		"cache %2, 0x180(%0); cache %2, 0x180(%1);	\n\t"	\
    265  1.2   thorpej 		"cache %2, 0x1a0(%0); cache %2, 0x1a0(%1);	\n\t"	\
    266  1.2   thorpej 		"cache %2, 0x1c0(%0); cache %2, 0x1c0(%1);	\n\t"	\
    267  1.2   thorpej 		"cache %2, 0x1e0(%0); cache %2, 0x1e0(%1);	\n\t"	\
    268  1.2   thorpej 		".set reorder"						\
    269  1.2   thorpej 	    :								\
    270  1.2   thorpej 	    : "r" (va1), "r" (va2), "i" (op)				\
    271  1.2   thorpej 	    : "memory");						\
    272  1.2   thorpej } while (/*CONSTCOND*/0)
    273  1.2   thorpej 
    274  1.7    simonb /*
    275  1.7    simonb  * cache_r4k_op_8lines_16_4way:
    276  1.7    simonb  *
    277  1.7    simonb  *	Perform the specified cache operation on 8 16-byte
    278  1.7    simonb  * 	cache lines, 4-ways.
    279  1.7    simonb  */
    280  1.7    simonb #define	cache_r4k_op_8lines_16_4way(va1, va2, va3, va4, op)		\
    281  1.7    simonb do {									\
    282  1.7    simonb 	__asm __volatile(						\
    283  1.7    simonb 		".set noreorder					\n\t"	\
    284  1.7    simonb 		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
    285  1.7    simonb 		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
    286  1.7    simonb 		"cache %4, 0x010(%0); cache %4, 0x010(%1);	\n\t"	\
    287  1.7    simonb 		"cache %4, 0x010(%2); cache %4, 0x010(%3);	\n\t"	\
    288  1.7    simonb 		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
    289  1.7    simonb 		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
    290  1.7    simonb 		"cache %4, 0x030(%0); cache %4, 0x030(%1);	\n\t"	\
    291  1.7    simonb 		"cache %4, 0x030(%2); cache %4, 0x030(%3);	\n\t"	\
    292  1.7    simonb 		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
    293  1.7    simonb 		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
    294  1.7    simonb 		"cache %4, 0x050(%0); cache %4, 0x050(%1);	\n\t"	\
    295  1.7    simonb 		"cache %4, 0x050(%2); cache %4, 0x050(%3);	\n\t"	\
    296  1.7    simonb 		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
    297  1.7    simonb 		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
    298  1.7    simonb 		"cache %4, 0x070(%0); cache %4, 0x070(%1);	\n\t"	\
    299  1.7    simonb 		"cache %4, 0x070(%2); cache %4, 0x070(%3);	\n\t"	\
    300  1.7    simonb 		".set reorder"						\
    301  1.7    simonb 	    :								\
    302  1.7    simonb 	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
    303  1.7    simonb 	    : "memory");						\
    304  1.7    simonb } while (/*CONSTCOND*/0)
    305  1.7    simonb 
    306  1.7    simonb /*
    307  1.7    simonb  * cache_r4k_op_8lines_32_4way:
    308  1.7    simonb  *
    309  1.7    simonb  *	Perform the specified cache operation on 8 32-byte
    310  1.7    simonb  * 	cache lines, 4-ways.
    311  1.7    simonb  */
    312  1.7    simonb #define	cache_r4k_op_8lines_32_4way(va1, va2, va3, va4, op)		\
    313  1.7    simonb do {									\
    314  1.7    simonb 	__asm __volatile(						\
    315  1.7    simonb 		".set noreorder					\n\t"	\
    316  1.7    simonb 		"cache %4, 0x000(%0); cache %4, 0x000(%1);	\n\t"	\
    317  1.7    simonb 		"cache %4, 0x000(%2); cache %4, 0x000(%3);	\n\t"	\
    318  1.7    simonb 		"cache %4, 0x020(%0); cache %4, 0x020(%1);	\n\t"	\
    319  1.7    simonb 		"cache %4, 0x020(%2); cache %4, 0x020(%3);	\n\t"	\
    320  1.7    simonb 		"cache %4, 0x040(%0); cache %4, 0x040(%1);	\n\t"	\
    321  1.7    simonb 		"cache %4, 0x040(%2); cache %4, 0x040(%3);	\n\t"	\
    322  1.7    simonb 		"cache %4, 0x060(%0); cache %4, 0x060(%1);	\n\t"	\
    323  1.7    simonb 		"cache %4, 0x060(%2); cache %4, 0x060(%3);	\n\t"	\
    324  1.7    simonb 		"cache %4, 0x080(%0); cache %4, 0x080(%1);	\n\t"	\
    325  1.7    simonb 		"cache %4, 0x080(%2); cache %4, 0x080(%3);	\n\t"	\
    326  1.7    simonb 		"cache %4, 0x0a0(%0); cache %4, 0x0a0(%1);	\n\t"	\
    327  1.7    simonb 		"cache %4, 0x0a0(%2); cache %4, 0x0a0(%3);	\n\t"	\
    328  1.7    simonb 		"cache %4, 0x0c0(%0); cache %4, 0x0c0(%1);	\n\t"	\
    329  1.7    simonb 		"cache %4, 0x0c0(%2); cache %4, 0x0c0(%3);	\n\t"	\
    330  1.7    simonb 		"cache %4, 0x0e0(%0); cache %4, 0x0e0(%1);	\n\t"	\
    331  1.7    simonb 		"cache %4, 0x0e0(%2); cache %4, 0x0e0(%3);	\n\t"	\
    332  1.7    simonb 		".set reorder"						\
    333  1.7    simonb 	    :								\
    334  1.7    simonb 	    : "r" (va1), "r" (va2), "r" (va3), "r" (va4), "i" (op)	\
    335  1.7    simonb 	    : "memory");						\
    336  1.7    simonb } while (/*CONSTCOND*/0)
    337  1.7    simonb 
    338  1.2   thorpej void	r4k_icache_sync_all_16(void);
    339  1.2   thorpej void	r4k_icache_sync_range_16(vaddr_t, vsize_t);
    340  1.2   thorpej void	r4k_icache_sync_range_index_16(vaddr_t, vsize_t);
    341  1.2   thorpej 
    342  1.4   tsutsui void	r4k_icache_sync_all_32(void);
    343  1.4   tsutsui void	r4k_icache_sync_range_32(vaddr_t, vsize_t);
    344  1.4   tsutsui void	r4k_icache_sync_range_index_32(vaddr_t, vsize_t);
    345  1.4   tsutsui 
    346  1.2   thorpej void	r4k_pdcache_wbinv_all_16(void);
    347  1.2   thorpej void	r4k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    348  1.2   thorpej void	r4k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
    349  1.2   thorpej 
    350  1.2   thorpej void	r4k_pdcache_inv_range_16(vaddr_t, vsize_t);
    351  1.2   thorpej void	r4k_pdcache_wb_range_16(vaddr_t, vsize_t);
    352  1.4   tsutsui 
    353  1.4   tsutsui void	r4k_pdcache_wbinv_all_32(void);
    354  1.4   tsutsui void	r4k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    355  1.4   tsutsui void	r4k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    356  1.4   tsutsui 
    357  1.4   tsutsui void	r4k_pdcache_inv_range_32(vaddr_t, vsize_t);
    358  1.4   tsutsui void	r4k_pdcache_wb_range_32(vaddr_t, vsize_t);
    359  1.2   thorpej 
    360  1.2   thorpej void	r5k_icache_sync_all_32(void);
    361  1.2   thorpej void	r5k_icache_sync_range_32(vaddr_t, vsize_t);
    362  1.2   thorpej void	r5k_icache_sync_range_index_32(vaddr_t, vsize_t);
    363  1.2   thorpej 
    364  1.5  takemura void	r5k_pdcache_wbinv_all_16(void);
    365  1.2   thorpej void	r5k_pdcache_wbinv_all_32(void);
    366  1.2   thorpej void	r4600v1_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    367  1.2   thorpej void	r4600v2_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    368  1.6      shin void	vr4131v1_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    369  1.5  takemura void	r5k_pdcache_wbinv_range_16(vaddr_t, vsize_t);
    370  1.2   thorpej void	r5k_pdcache_wbinv_range_32(vaddr_t, vsize_t);
    371  1.5  takemura void	r5k_pdcache_wbinv_range_index_16(vaddr_t, vsize_t);
    372  1.2   thorpej void	r5k_pdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    373  1.2   thorpej 
    374  1.2   thorpej void	r4600v1_pdcache_inv_range_32(vaddr_t, vsize_t);
    375  1.2   thorpej void	r4600v2_pdcache_inv_range_32(vaddr_t, vsize_t);
    376  1.5  takemura void	r5k_pdcache_inv_range_16(vaddr_t, vsize_t);
    377  1.2   thorpej void	r5k_pdcache_inv_range_32(vaddr_t, vsize_t);
    378  1.2   thorpej void	r4600v1_pdcache_wb_range_32(vaddr_t, vsize_t);
    379  1.2   thorpej void	r4600v2_pdcache_wb_range_32(vaddr_t, vsize_t);
    380  1.5  takemura void	r5k_pdcache_wb_range_16(vaddr_t, vsize_t);
    381  1.2   thorpej void	r5k_pdcache_wb_range_32(vaddr_t, vsize_t);
    382  1.2   thorpej 
    383  1.2   thorpej void	r4k_sdcache_wbinv_all_32(void);
    384  1.2   thorpej void	r4k_sdcache_wbinv_range_32(vaddr_t, vsize_t);
    385  1.2   thorpej void	r4k_sdcache_wbinv_range_index_32(vaddr_t, vsize_t);
    386  1.2   thorpej 
    387  1.2   thorpej void	r4k_sdcache_inv_range_32(vaddr_t, vsize_t);
    388  1.2   thorpej void	r4k_sdcache_wb_range_32(vaddr_t, vsize_t);
    389  1.3   thorpej 
    390  1.3   thorpej void	r4k_sdcache_wbinv_all_128(void);
    391  1.3   thorpej void	r4k_sdcache_wbinv_range_128(vaddr_t, vsize_t);
    392  1.3   thorpej void	r4k_sdcache_wbinv_range_index_128(vaddr_t, vsize_t);
    393  1.3   thorpej 
    394  1.3   thorpej void	r4k_sdcache_inv_range_128(vaddr_t, vsize_t);
    395  1.3   thorpej void	r4k_sdcache_wb_range_128(vaddr_t, vsize_t);
    396  1.2   thorpej 
    397  1.2   thorpej void	r4k_sdcache_wbinv_all_generic(void);
    398  1.2   thorpej void	r4k_sdcache_wbinv_range_generic(vaddr_t, vsize_t);
    399  1.2   thorpej void	r4k_sdcache_wbinv_range_index_generic(vaddr_t, vsize_t);
    400  1.2   thorpej 
    401  1.2   thorpej void	r4k_sdcache_inv_range_generic(vaddr_t, vsize_t);
    402  1.2   thorpej void	r4k_sdcache_wb_range_generic(vaddr_t, vsize_t);
    403  1.2   thorpej 
    404  1.7    simonb #endif /* _KERNEL && !_LOCORE */
    405