Home | History | Annotate | Line # | Download | only in sparc
      1 /*	$NetBSD: cache.h,v 1.36 2018/01/16 08:23:17 mrg Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996
      5  * 	The President and Fellows of Harvard College. All rights reserved.
      6  * Copyright (c) 1992, 1993
      7  *	The Regents of the University of California.  All rights reserved.
      8  *
      9  * This software was developed by the Computer Systems Engineering group
     10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     11  * contributed to Berkeley.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by Aaron Brown and
     24  *	Harvard University.
     25  *	This product includes software developed by the University of
     26  *	California, Berkeley and its contributors.
     27  * 4. Neither the name of the University nor the names of its contributors
     28  *    may be used to endorse or promote products derived from this software
     29  *    without specific prior written permission.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41  * SUCH DAMAGE.
     42  *
     43  *	@(#)cache.h	8.1 (Berkeley) 6/11/93
     44  */
     45 
     46 #ifndef SPARC_CACHE_H
     47 #define SPARC_CACHE_H
     48 
     49 #if defined(_KERNEL_OPT)
     50 #include "opt_sparc_arch.h"
     51 #endif
     52 
     53 /*
     54  * Cache tags can be written in control space, and must be set to 0
     55  * (or invalid anyway) before turning on the cache.  The tags are
     56  * addressed as an array of 32-bit structures of the form:
     57  *
     58  *	struct cache_tag {
     59  *		u_int	:7,		(unused; must be zero)
     60  *			ct_cid:3,	(context ID)
     61  *			ct_w:1,		(write flag from PTE)
     62  *			ct_s:1,		(supervisor flag from PTE)
     63  *			ct_v:1,		(set => cache entry is valid)
     64  *			:3,		(unused; must be zero)
     65  *			ct_tid:14,	(cache tag ID)
     66  *			:2;		(unused; must be zero)
     67  *	};
     68  *
     69  * (The SS2 has 16 MMU contexts, which makes `ct_cid' one bit wider.)
     70  *
     71  * The SPARCstation 1 cache sees virtual addresses as:
     72  *
     73  *	struct cache_va {
     74  *		u_int	:2,		(unused; probably copies of va_tid<13>)
     75  *			cva_tid:14,	(tag ID)
     76  *			cva_line:12,	(cache line number)
     77  *			cva_byte:4;	(byte in cache line)
     78  *	};
     79  *
     80  * (The SS2 cache is similar but has half as many lines, each twice as long.)
     81  *
     82  * Note that, because the 12-bit line ID is `wider' than the page offset,
     83  * it is possible to have one page map to two different cache lines.
     84  * This can happen whenever two different physical pages have the same bits
     85  * in the part of the virtual address that overlaps the cache line ID, i.e.,
     86  * bits <15:12>.  In order to prevent cache duplication, we have to
     87  * make sure that no one page has more than one virtual address where
     88  * (va1 & 0xf000) != (va2 & 0xf000).  (The cache hardware turns off ct_v
     89  * when a cache miss occurs on a write, i.e., if va1 is in the cache and
     90  * va2 is not, and you write to va2, va1 goes out of the cache.  If va1
     91  * is in the cache and va2 is not, reading va2 also causes va1 to become
     92  * uncached, and the [same] data is then read from main memory into the
     93  * cache.)
     94  *
     95  * The other alternative, of course, is to disable caching of aliased
     96  * pages.  (In a few cases this might be faster anyway, but we do it
     97  * only when forced.)
     98  *
     99  * The Sun4, since it has an 8K pagesize instead of 4K, needs to check
    100  * bits that are one position higher.
    101  */
    102 
    103 /* Some more well-known values: */
    104 
    105 #define	CACHE_ALIAS_DIST_SUN4	0x20000
    106 #define	CACHE_ALIAS_DIST_SUN4C	0x10000
    107 
    108 #define	CACHE_ALIAS_BITS_SUN4	0x1e000
    109 #define	CACHE_ALIAS_BITS_SUN4C	0xf000
    110 
    111 #define CACHE_ALIAS_DIST_HS128k		0x20000
    112 #define CACHE_ALIAS_BITS_HS128k		0x1f000
    113 #define CACHE_ALIAS_DIST_HS256k		0x40000
    114 #define CACHE_ALIAS_BITS_HS256k		0x3f000
    115 
    116 /*
    117  * Assuming a tag format where the least significant bits are the byte offset
    118  * into the cache line, and the next-most significant bits are the line id,
    119  * we can calculate the appropriate aliasing constants. We also assume that
    120  * the linesize and total cache size are powers of 2.
    121  */
    122 #define GUESS_CACHE_ALIAS_BITS		((cpuinfo.cacheinfo.c_totalsize - 1) & ~PGOFSET)
    123 #define GUESS_CACHE_ALIAS_DIST		(cpuinfo.cacheinfo.c_totalsize)
    124 
    125 extern int cache_alias_dist;		/* */
    126 extern int cache_alias_bits;
    127 extern u_long dvma_cachealign;
    128 
    129 /* Optimize cache alias macros on single architecture kernels */
    130 #if defined(SUN4) && !defined(SUN4C) && !defined(SUN4M) && !defined(SUN4D)
    131 #define	CACHE_ALIAS_DIST	CACHE_ALIAS_DIST_SUN4
    132 #define	CACHE_ALIAS_BITS	CACHE_ALIAS_BITS_SUN4
    133 #elif !defined(SUN4) && defined(SUN4C) && !defined(SUN4M) && !defined(SUN4D)
    134 #define	CACHE_ALIAS_DIST	CACHE_ALIAS_DIST_SUN4C
    135 #define	CACHE_ALIAS_BITS	CACHE_ALIAS_BITS_SUN4C
    136 #else
    137 #define	CACHE_ALIAS_DIST	cache_alias_dist
    138 #define	CACHE_ALIAS_BITS	cache_alias_bits
    139 #endif
    140 
    141 /*
    142  * True iff a1 and a2 are `bad' aliases (will cause cache duplication).
    143  */
    144 #define	BADALIAS(a1, a2) (((int)(a1) ^ (int)(a2)) & CACHE_ALIAS_BITS)
    145 
    146 /*
    147  * Routines for dealing with the cache.
    148  */
    149 void	sun4_cache_enable(void);
    150 void	ms1_cache_enable(void);
    151 void	viking_cache_enable(void);
    152 void	hypersparc_cache_enable(void);
    153 void	swift_cache_enable(void);
    154 void	cypress_cache_enable(void);
    155 void	turbosparc_cache_enable(void);
    156 
    157 void	sun4_vcache_flush_context(int);		/* flush current context */
    158 void	sun4_vcache_flush_region(int, int);	/* flush region in cur ctx */
    159 void	sun4_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
    160 void	sun4_vcache_flush_page(int va, int);	/* flush page in cur ctx */
    161 void	sun4_vcache_flush_page_hw(int va, int);	/* flush page in cur ctx */
    162 void	sun4_cache_flush(void *, u_int);	/* flush range */
    163 
    164 void	srmmu_vcache_flush_context(int);	/* flush current context */
    165 void	srmmu_vcache_flush_region(int, int);	/* flush region in cur ctx */
    166 void	srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
    167 void	srmmu_vcache_flush_page(int va, int);	/* flush page in cur ctx */
    168 void	srmmu_vcache_flush_range(int, int, int);
    169 void	srmmu_cache_flush(void *, u_int);	/* flush range */
    170 
    171 /* `Fast trap' versions for use in cross-call cache flushes on MP systems */
    172 #if defined(MULTIPROCESSOR)
    173 void	ft_srmmu_vcache_flush_context(int);	/* flush current context */
    174 void	ft_srmmu_vcache_flush_region(int, int);	/* flush region in cur ctx */
    175 void	ft_srmmu_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
    176 void	ft_srmmu_vcache_flush_page(int va, int);/* flush page in cur ctx */
    177 void	ft_srmmu_vcache_flush_range(int, int, int);/* flush range in cur ctx */
    178 #else
    179 #define ft_srmmu_vcache_flush_context	0
    180 #define ft_srmmu_vcache_flush_region	0
    181 #define ft_srmmu_vcache_flush_segment	0
    182 #define ft_srmmu_vcache_flush_page	0
    183 #define ft_srmmu_vcache_flush_range	0
    184 #endif /* MULTIPROCESSOR */
    185 
    186 void	ms1_cache_flush(void *, u_int);
    187 void	viking_cache_flush(void *, u_int);
    188 void	viking_pcache_flush_page(paddr_t, int);
    189 void	srmmu_pcache_flush_line(int, int);
    190 void	hypersparc_pure_vcache_flush(void);
    191 
    192 void	ms1_cache_flush_all(void);
    193 void	srmmu_cache_flush_all(void);
    194 void	cypress_cache_flush_all(void);
    195 void	hypersparc_cache_flush_all(void);
    196 
    197 extern void sparc_noop(void);
    198 
    199 #define noop_vcache_flush_context	(void (*)(int))sparc_noop
    200 #define noop_vcache_flush_region	(void (*)(int,int))sparc_noop
    201 #define noop_vcache_flush_segment	(void (*)(int,int,int))sparc_noop
    202 #define noop_vcache_flush_page		(void (*)(int,int))sparc_noop
    203 #define noop_vcache_flush_range		(void (*)(int,int,int))sparc_noop
    204 #define noop_cache_flush		(void (*)(void *,u_int))sparc_noop
    205 #define noop_pcache_flush_page		(void (*)(paddr_t,int))sparc_noop
    206 #define noop_pure_vcache_flush		(void (*)(void))sparc_noop
    207 #define noop_cache_flush_all		(void (*)(void))sparc_noop
    208 
    209 /*
    210  * The SMP versions of the cache flush functions. These functions
    211  * send a "cache flush" message to each processor.
    212  */
    213 void	smp_vcache_flush_context(int);		/* flush current context */
    214 void	smp_vcache_flush_region(int,int);	/* flush region in cur ctx */
    215 void	smp_vcache_flush_segment(int, int, int);/* flush seg in cur ctx */
    216 void	smp_vcache_flush_page(int va,int);	/* flush page in cur ctx */
    217 
    218 
    219 #define cache_flush_page(va,ctx)	cpuinfo.vcache_flush_page(va,ctx)
    220 #define cache_flush_segment(vr,vs,ctx)	cpuinfo.vcache_flush_segment(vr,vs,ctx)
    221 #define cache_flush_region(vr,ctx)	cpuinfo.vcache_flush_region(vr,ctx)
    222 #define cache_flush_context(ctx)	cpuinfo.vcache_flush_context(ctx)
    223 #define cache_flush(va,len)		cpuinfo.cache_flush(va,len)
    224 
    225 #define pcache_flush_page(pa,flag)	cpuinfo.pcache_flush_page(pa,flag)
    226 
    227 #define CACHEINFO cpuinfo.cacheinfo
    228 
    229 #endif /* SPARC_CACHE_H */
    230