Home | History | Annotate | Line # | Download | only in include
cache.h revision 1.4
      1 /*	$NetBSD: cache.h,v 1.4 2002/11/09 19:34:40 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Cache operations.
     40  *
     41  * We define the following primitives:
     42  *
     43  * --- Instruction cache synchronization (mandatory):
     44  *
     45  *	icache_sync_all		Synchronize I-cache
     46  *
     47  *	icache_sync_range	Synchronize I-cache range
     48  *
     49  *	icache_sync_range_index	(index ops)
     50  *
     51  * --- Primary data cache (mandatory):
     52  *
     53  *	pdcache_wbinv_all	Write-back Invalidate primary D-cache
     54  *
     55  *	pdcache_wbinv_range	Write-back Invalidate primary D-cache range
     56  *
     57  *	pdcache_wbinv_range_index (index ops)
     58  *
     59  *	pdcache_inv_range	Invalidate primary D-cache range
     60  *
     61  *	pdcache_wb_range	Write-back primary D-cache range
     62  *
     63  * --- Secondary data cache (optional):
     64  *
     65  *	sdcache_wbinv_all	Write-back Invalidate secondary D-cache
     66  *
     67  *	sdcache_wbinv_range	Write-back Invalidate secondary D-cache range
     68  *
     69  *	sdcache_wbinv_range_index (index ops)
     70  *
     71  *	sdcache_inv_range	Invalidate secondary D-cache range
     72  *
     73  *	sdcache_wb_range	Write-back secondary D-cache range
     74  *
     75  * There are some rules that must be followed:
     76  *
     77  *	I-cache Synch (all or range):
     78  *		The goal is to synchronize the instruction stream,
     79  *		so you may need to write-back dirty data cache
     80  *		blocks first.  If a range is requested, and you
     81  *		can't synchronize just a range, you have to hit
     82  *		the whole thing.
     83  *
     84  *	D-cache Write-back Invalidate range:
     85  *		If you can't WB-Inv a range, you must WB-Inv the
     86  *		entire D-cache.
     87  *
     88  *	D-cache Invalidate:
     89  *		If you can't Inv the D-cache without doing a
     90  *		Write-back, YOU MUST PANIC.  This is to catch
     91  *		errors in calling code.  Callers must be aware
     92  *		of this scenario, and must handle it appropriately
     93  *		(consider the bus_dma(9) operations).
     94  *
     95  *	D-cache Write-back:
     96  *		If you can't Write-back without doing an invalidate,
     97  *		that's fine.  Then treat this as a WB-Inv.  Skipping
     98  *		the invalidate is merely an optimization.
     99  *
    100  *	All operations:
    101  *		Valid virtual addresses must be passed to the
    102  *		cache operation.
    103  *
    104  * Finally, these primitives are grouped together in reasonable
    105  * ways.  For all operations described here, first the primary
    106  * cache is frobbed, then the secondary cache frobbed, if the
    107  * operation for the secondary cache exists.
    108  *
    109  *	mips_icache_sync_all	Synchronize I-cache
    110  *
    111  *	mips_icache_sync_range	Synchronize I-cache range
    112  *
    113  *	mips_icache_sync_range_index (index ops)
    114  *
    115  *	mips_dcache_wbinv_all	Write-back Invalidate D-cache
    116  *
    117  *	mips_dcache_wbinv_range	Write-back Invalidate D-cache range
    118  *
    119  *	mips_dcache_wbinv_range_index (index ops)
    120  *
    121  *	mips_dcache_inv_range	Invalidate D-cache range
    122  *
    123  *	mips_dcache_wb_range	Write-back D-cache range
    124  */
    125 
    126 struct mips_cache_ops {
    127 	void	(*mco_icache_sync_all)(void);
    128 	void	(*mco_icache_sync_range)(vaddr_t, vsize_t);
    129 	void	(*mco_icache_sync_range_index)(vaddr_t, vsize_t);
    130 
    131 	void	(*mco_pdcache_wbinv_all)(void);
    132 	void	(*mco_pdcache_wbinv_range)(vaddr_t, vsize_t);
    133 	void	(*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
    134 	void	(*mco_pdcache_inv_range)(vaddr_t, vsize_t);
    135 	void	(*mco_pdcache_wb_range)(vaddr_t, vsize_t);
    136 
    137 	void	(*mco_sdcache_wbinv_all)(void);
    138 	void	(*mco_sdcache_wbinv_range)(vaddr_t, vsize_t);
    139 	void	(*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
    140 	void	(*mco_sdcache_inv_range)(vaddr_t, vsize_t);
    141 	void	(*mco_sdcache_wb_range)(vaddr_t, vsize_t);
    142 };
    143 
    144 #ifdef _KERNEL
    145 extern struct mips_cache_ops mips_cache_ops;
    146 
    147 /* PRIMARY CACHE VARIABLES */
    148 extern u_int mips_picache_size;
    149 extern u_int mips_picache_line_size;
    150 extern u_int mips_picache_ways;
    151 extern u_int mips_picache_way_size;
    152 extern u_int mips_picache_way_mask;
    153 
    154 extern u_int mips_pdcache_size;		/* and unified */
    155 extern u_int mips_pdcache_line_size;
    156 extern u_int mips_pdcache_ways;
    157 extern u_int mips_pdcache_way_size;
    158 extern u_int mips_pdcache_way_mask;
    159 extern int mips_pdcache_write_through;
    160 
    161 extern int mips_pcache_unified;
    162 
    163 /* SECONDARY CACHE VARIABLES */
    164 extern u_int mips_sicache_size;
    165 extern u_int mips_sicache_line_size;
    166 extern u_int mips_sicache_ways;
    167 extern u_int mips_sicache_way_size;
    168 extern u_int mips_sicache_way_mask;
    169 
    170 extern u_int mips_sdcache_size;		/* and unified */
    171 extern u_int mips_sdcache_line_size;
    172 extern u_int mips_sdcache_ways;
    173 extern u_int mips_sdcache_way_size;
    174 extern u_int mips_sdcache_way_mask;
    175 extern int mips_sdcache_write_through;
    176 
    177 extern int mips_scache_unified;
    178 
    179 /* TERTIARY CACHE VARIABLES */
    180 extern u_int mips_tcache_size;		/* always unified */
    181 extern u_int mips_tcache_line_size;
    182 extern u_int mips_tcache_ways;
    183 extern u_int mips_tcache_way_size;
    184 extern u_int mips_tcache_way_mask;
    185 extern int mips_tcache_write_through;
    186 
    187 extern u_int mips_dcache_align;
    188 extern u_int mips_dcache_align_mask;
    189 
    190 extern u_int mips_cache_alias_mask;
    191 extern u_int mips_cache_prefer_mask;
    192 
    193 /*
    194  * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
    195  */
    196 #define	mips_cache_indexof(x)	(((vaddr_t)(x)) & mips_cache_alias_mask)
    197 
    198 #define	__mco_noargs(x)							\
    199 do {									\
    200 	(*mips_cache_ops.mco_p ## x )();				\
    201 	if (*mips_cache_ops.mco_s ## x )				\
    202 		(*mips_cache_ops.mco_s ## x )();			\
    203 } while (/*CONSTCOND*/0)
    204 
    205 #define	__mco_2args(x, a, b)						\
    206 do {									\
    207 	(*mips_cache_ops.mco_p ## x )((a), (b));			\
    208 	if (*mips_cache_ops.mco_s ## x )				\
    209 		(*mips_cache_ops.mco_s ## x )((a), (b));		\
    210 } while (/*CONSTCOND*/0)
    211 
    212 #define	mips_icache_sync_all()						\
    213 	(*mips_cache_ops.mco_icache_sync_all)()
    214 
    215 #define	mips_icache_sync_range(v, s)					\
    216 	(*mips_cache_ops.mco_icache_sync_range)((v), (s))
    217 
    218 #define	mips_icache_sync_range_index(v, s)				\
    219 	(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
    220 
    221 #define	mips_dcache_wbinv_all()						\
    222 	__mco_noargs(dcache_wbinv_all)
    223 
    224 #define	mips_dcache_wbinv_range(v, s)					\
    225 	__mco_2args(dcache_wbinv_range, (v), (s))
    226 
    227 #define	mips_dcache_wbinv_range_index(v, s)				\
    228 	__mco_2args(dcache_wbinv_range_index, (v), (s))
    229 
    230 #define	mips_dcache_inv_range(v, s)					\
    231 	__mco_2args(dcache_inv_range, (v), (s))
    232 
    233 #define	mips_dcache_wb_range(v, s)					\
    234 	__mco_2args(dcache_wb_range, (v), (s))
    235 
    236 void	mips_config_cache(void);
    237 void	mips_dcache_compute_align(void);
    238 
    239 #endif /* _KERNEL */
    240