Home | History | Annotate | Line # | Download | only in include
cache.h revision 1.10
      1 /*	$NetBSD: cache.h,v 1.10 2011/02/20 07:45:47 matt Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Cache operations.
     40  *
     41  * We define the following primitives:
     42  *
     43  * --- Instruction cache synchronization (mandatory):
     44  *
     45  *	icache_sync_all		Synchronize I-cache
     46  *
     47  *	icache_sync_range	Synchronize I-cache range
     48  *
     49  *	icache_sync_range_index	(index ops)
     50  *
     51  * --- Primary data cache (mandatory):
     52  *
     53  *	pdcache_wbinv_all	Write-back Invalidate primary D-cache
     54  *
     55  *	pdcache_wbinv_range	Write-back Invalidate primary D-cache range
     56  *
     57  *	pdcache_wbinv_range_index (index ops)
     58  *
     59  *	pdcache_inv_range	Invalidate primary D-cache range
     60  *
     61  *	pdcache_wb_range	Write-back primary D-cache range
     62  *
     63  * --- Secondary data cache (optional):
     64  *
     65  *	sdcache_wbinv_all	Write-back Invalidate secondary D-cache
     66  *
     67  *	sdcache_wbinv_range	Write-back Invalidate secondary D-cache range
     68  *
     69  *	sdcache_wbinv_range_index (index ops)
     70  *
     71  *	sdcache_inv_range	Invalidate secondary D-cache range
     72  *
     73  *	sdcache_wb_range	Write-back secondary D-cache range
     74  *
     75  * There are some rules that must be followed:
     76  *
     77  *	I-cache Synch (all or range):
     78  *		The goal is to synchronize the instruction stream,
     79  *		so you may need to write-back dirty data cache
     80  *		blocks first.  If a range is requested, and you
     81  *		can't synchronize just a range, you have to hit
     82  *		the whole thing.
     83  *
     84  *	D-cache Write-back Invalidate range:
     85  *		If you can't WB-Inv a range, you must WB-Inv the
     86  *		entire D-cache.
     87  *
     88  *	D-cache Invalidate:
     89  *		If you can't Inv the D-cache without doing a
     90  *		Write-back, YOU MUST PANIC.  This is to catch
     91  *		errors in calling code.  Callers must be aware
     92  *		of this scenario, and must handle it appropriately
     93  *		(consider the bus_dma(9) operations).
     94  *
     95  *	D-cache Write-back:
     96  *		If you can't Write-back without doing an invalidate,
     97  *		that's fine.  Then treat this as a WB-Inv.  Skipping
     98  *		the invalidate is merely an optimization.
     99  *
    100  *	All operations:
    101  *		Valid virtual addresses must be passed to the
    102  *		cache operation.
    103  *
    104  * Finally, these primitives are grouped together in reasonable
    105  * ways.  For all operations described here, first the primary
    106  * cache is frobbed, then the secondary cache frobbed, if the
    107  * operation for the secondary cache exists.
    108  *
    109  *	mips_icache_sync_all	Synchronize I-cache
    110  *
    111  *	mips_icache_sync_range	Synchronize I-cache range
    112  *
    113  *	mips_icache_sync_range_index (index ops)
    114  *
    115  *	mips_dcache_wbinv_all	Write-back Invalidate D-cache
    116  *
    117  *	mips_dcache_wbinv_range	Write-back Invalidate D-cache range
    118  *
    119  *	mips_dcache_wbinv_range_index (index ops)
    120  *
    121  *	mips_dcache_inv_range	Invalidate D-cache range
    122  *
    123  *	mips_dcache_wb_range	Write-back D-cache range
    124  */
    125 
    126 struct mips_cache_ops {
    127 	void	(*mco_icache_sync_all)(void);
    128 	void	(*mco_icache_sync_range)(vaddr_t, vsize_t);
    129 	void	(*mco_icache_sync_range_index)(vaddr_t, vsize_t);
    130 
    131 	void	(*mco_pdcache_wbinv_all)(void);
    132 	void	(*mco_pdcache_wbinv_range)(vaddr_t, vsize_t);
    133 	void	(*mco_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
    134 	void	(*mco_pdcache_inv_range)(vaddr_t, vsize_t);
    135 	void	(*mco_pdcache_wb_range)(vaddr_t, vsize_t);
    136 
    137 	/* These are called only by the (mipsNN) icache functions. */
    138 	void	(*mco_intern_pdcache_wbinv_all)(void);
    139 	void	(*mco_intern_pdcache_wbinv_range_index)(vaddr_t, vsize_t);
    140 	void	(*mco_intern_pdcache_wb_range)(vaddr_t, vsize_t);
    141 
    142 	void	(*mco_sdcache_wbinv_all)(void);
    143 	void	(*mco_sdcache_wbinv_range)(vaddr_t, vsize_t);
    144 	void	(*mco_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
    145 	void	(*mco_sdcache_inv_range)(vaddr_t, vsize_t);
    146 	void	(*mco_sdcache_wb_range)(vaddr_t, vsize_t);
    147 
    148 	/* These are called only by the (mipsNN) icache functions. */
    149 	void	(*mco_intern_sdcache_wbinv_all)(void);
    150 	void	(*mco_intern_sdcache_wbinv_range_index)(vaddr_t, vsize_t);
    151 	void	(*mco_intern_sdcache_wb_range)(vaddr_t, vsize_t);
    152 };
    153 
    154 extern struct mips_cache_ops mips_cache_ops;
    155 
    156 /* PRIMARY CACHE VARIABLES */
    157 struct mips_cache_info {
    158 	u_int mci_picache_size;
    159 	u_int mci_picache_line_size;
    160 	u_int mci_picache_ways;
    161 	u_int mci_picache_way_size;
    162 	u_int mci_picache_way_mask;
    163 
    164 	u_int mci_pdcache_size;		/* and unified */
    165 	u_int mci_pdcache_line_size;
    166 	u_int mci_pdcache_ways;
    167 	u_int mci_pdcache_way_size;
    168 	u_int mci_pdcache_way_mask;
    169 	bool mci_pdcache_write_through;
    170 
    171 	bool mci_pcache_unified;
    172 
    173 	/* SECONDARY CACHE VARIABLES */
    174 	u_int mci_sicache_size;
    175 	u_int mci_sicache_line_size;
    176 	u_int mci_sicache_ways;
    177 	u_int mci_sicache_way_size;
    178 	u_int mci_sicache_way_mask;
    179 
    180 	u_int mci_sdcache_size;		/* and unified */
    181 	u_int mci_sdcache_line_size;
    182 	u_int mci_sdcache_ways;
    183 	u_int mci_sdcache_way_size;
    184 	u_int mci_sdcache_way_mask;
    185 	bool mci_sdcache_write_through;
    186 
    187 	bool mci_scache_unified;
    188 
    189 	/* TERTIARY CACHE VARIABLES */
    190 	u_int mci_tcache_size;		/* always unified */
    191 	u_int mci_tcache_line_size;
    192 	u_int mci_tcache_ways;
    193 	u_int mci_tcache_way_size;
    194 	u_int mci_tcache_way_mask;
    195 	bool mci_tcache_write_through;
    196 
    197 	/*
    198 	 * These two variables inform the rest of the kernel about the
    199 	 * size of the largest D-cache line present in the system.  The
    200 	 * mask can be used to determine if a region of memory is cache
    201 	 * line size aligned.
    202 	 *
    203 	 * Whenever any code updates a data cache line size, it should
    204 	 * call mips_dcache_compute_align() to recompute these values.
    205 	 */
    206 	u_int mci_dcache_align;
    207 	u_int mci_dcache_align_mask;
    208 
    209 	u_int mci_cache_prefer_mask;
    210 #if defined(MIPS2) || defined(MIPS3) || defined(MIPS32) || defined(MIPS64)
    211 	u_int mci_cache_alias_mask;
    212 
    213 	bool mci_cache_virtual_alias;
    214 
    215 #define	MIPS_CACHE_ALIAS_MASK		mips_cache_info.mci_cache_alias_mask
    216 #define	MIPS_CACHE_VIRTUAL_ALIAS	mips_cache_info.mci_cache_virtual_alias
    217 #elif defined(MIPS1)
    218 #define	MIPS_CACHE_ALIAS_MASK		0
    219 #define	MIPS_CACHE_VIRTUAL_ALIAS	false
    220 #else
    221 #error mci_cache screw up
    222 #endif
    223 };
    224 
    225 extern struct mips_cache_info mips_cache_info;
    226 
    227 
    228 /*
    229  * XXX XXX XXX THIS SHOULD NOT EXIST XXX XXX XXX
    230  */
    231 #define	mips_cache_indexof(x)	(((vaddr_t)(x)) & MIPS_CACHE_ALIAS_MASK)
    232 #define	mips_cache_badalias(x,y) (((vaddr_t)(x)^(vaddr_t)(y)) & MIPS_CACHE_ALIAS_MASK)
    233 
    234 #define	__mco_noargs(prefix, x)						\
    235 do {									\
    236 	(*mips_cache_ops.mco_ ## prefix ## p ## x )();			\
    237 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
    238 		(*mips_cache_ops.mco_ ## prefix ## s ## x )();		\
    239 } while (/*CONSTCOND*/0)
    240 
    241 #define	__mco_2args(prefix, x, a, b)					\
    242 do {									\
    243 	(*mips_cache_ops.mco_ ## prefix ## p ## x )((a), (b));		\
    244 	if (*mips_cache_ops.mco_ ## prefix ## s ## x )			\
    245 		(*mips_cache_ops.mco_ ## prefix ## s ## x )((a), (b));	\
    246 } while (/*CONSTCOND*/0)
    247 
    248 #define	mips_icache_sync_all()						\
    249 	(*mips_cache_ops.mco_icache_sync_all)()
    250 
    251 #define	mips_icache_sync_range(v, s)					\
    252 	(*mips_cache_ops.mco_icache_sync_range)((v), (s))
    253 
    254 #define	mips_icache_sync_range_index(v, s)				\
    255 	(*mips_cache_ops.mco_icache_sync_range_index)((v), (s))
    256 
    257 #define	mips_dcache_wbinv_all()						\
    258 	__mco_noargs(, dcache_wbinv_all)
    259 
    260 #define	mips_dcache_wbinv_range(v, s)					\
    261 	__mco_2args(, dcache_wbinv_range, (v), (s))
    262 
    263 #define	mips_dcache_wbinv_range_index(v, s)				\
    264 	__mco_2args(, dcache_wbinv_range_index, (v), (s))
    265 
    266 #define	mips_dcache_inv_range(v, s)					\
    267 	__mco_2args(, dcache_inv_range, (v), (s))
    268 
    269 #define	mips_dcache_wb_range(v, s)					\
    270 	__mco_2args(, dcache_wb_range, (v), (s))
    271 
    272 
    273 /*
    274  * Private D-cache functions only called from (currently only the
    275  * mipsNN) I-cache functions.
    276  */
    277 #define	mips_intern_dcache_wbinv_all()					\
    278 	__mco_noargs(intern_, dcache_wbinv_all)
    279 
    280 #define	mips_intern_dcache_wbinv_range_index(v, s)			\
    281 	__mco_2args(intern_, dcache_wbinv_range_index, (v), (s))
    282 
    283 #define	mips_intern_dcache_wb_range(v, s)				\
    284 	__mco_2args(intern_, dcache_wb_range, (v), (s))
    285 
    286 void	mips_config_cache(void);
    287 void	mips_dcache_compute_align(void);
    288 
    289 #include <mips/cache_mipsNN.h>
    290