Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.20
      1 /*	$NetBSD: pmap.h,v 1.20 1996/03/31 22:09:16 pk Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1996
      5  * 	The President and Fellows of Harvard University. All rights reserved.
      6  * Copyright (c) 1992, 1993
      7  *	The Regents of the University of California.  All rights reserved.
      8  *
      9  * This software was developed by the Computer Systems Engineering group
     10  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
     11  * contributed to Berkeley.
     12  *
     13  * All advertising materials mentioning features or use of this software
     14  * must display the following acknowledgement:
     15  *	This product includes software developed by Aaron Brown and
     16  *	Harvard University.
     17  *	This product includes software developed by the University of
     18  *	California, Lawrence Berkeley Laboratory.
     19  *
     20  * @InsertRedistribution@
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by Aaron Brown and
     24  *	Harvard University.
     25  *	This product includes software developed by the University of
     26  *	California, Berkeley and its contributors.
     27  * 4. Neither the name of the University nor the names of its contributors
     28  *    may be used to endorse or promote products derived from this software
     29  *    without specific prior written permission.
     30  *
     31  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     32  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     33  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     34  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     35  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     39  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     40  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     41  * SUCH DAMAGE.
     42  *
     43  *	@(#)pmap.h	8.1 (Berkeley) 6/11/93
     44  *
     45  * $Id: pmap.h,v 1.20 1996/03/31 22:09:16 pk Exp $
     46  */
     47 
     48 #ifndef	_SPARC_PMAP_H_
     49 #define _SPARC_PMAP_H_
     50 
     51 #include <machine/pte.h>
     52 
     53 /*
     54  * Pmap structure.
     55  *
     56  * The pmap structure really comes in two variants, one---a single
     57  * instance---for kernel virtual memory and the other---up to nproc
     58  * instances---for user virtual memory.  Unfortunately, we have to mash
     59  * both into the same structure.  Fortunately, they are almost the same.
     60  *
     61  * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
     62  * some of this is not actually used).  Kernel space, including DVMA
     63  * space (for now?), is mapped identically into all user contexts.
     64  * There is no point in duplicating this mapping in each user process
     65  * so they do not appear in the user structures.
     66  *
     67  * User space begins at 0x00000000 and runs through 0x1fffffff,
     68  * then has a `hole', then resumes at 0xe0000000 and runs until it
     69  * hits the kernel space at 0xf8000000.  This can be mapped
     70  * contiguously by ignorning the top two bits and pretending the
     71  * space goes from 0 to 37ffffff.  Typically the lower range is
     72  * used for text+data and the upper for stack, but the code here
     73  * makes no such distinction.
     74  *
     75  * Since each virtual segment covers 256 kbytes, the user space
     76  * requires 3584 segments, while the kernel (including DVMA) requires
     77  * only 512 segments.
     78  *
     79  *
     80  ** FOR THE SUN4/SUN4C
     81  *
     82  * The segment map entry for virtual segment vseg is offset in
     83  * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
     84  * NUSEG if it is.  We keep a pointer called pmap->pm_segmap
     85  * pre-offset by this value.  pmap->pm_segmap thus contains the
     86  * values to be loaded into the user portion of the hardware segment
     87  * map so as to reach the proper PMEGs within the MMU.  The kernel
     88  * mappings are `set early' and are always valid in every context
     89  * (every change is always propagated immediately).
     90  *
     91  * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
     92  * taken away from context `c', the pmap for context c has its
     93  * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
     94  * map entry is also made invalid at the same time).  Thus
     95  * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
     96  * the corresponding PTEs are not actually in the MMU.  On the other
     97  * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
     98  * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
     99  * must be loaded in the MMU in order to reach those pages.
    100  * pm_npte[vseg] counts the number of valid pages in each vseg.
    101  *
    102  * XXX performance: faster to count valid bits?
    103  *
    104  * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
    105  * allocate a new virtual segment.  Since kernel mappings are never
    106  * `stolen' out of the the MMU, we just keep all its PTEs there, and
    107  * have no software copies.  Its mmu entries are nonetheless kept on lists
    108  * so that the code that fiddles with mmu lists has something to fiddle.
    109  *
    110  ** FOR THE SUN4M
    111  *
    112  * On this architecture, the virtual-to-physical translation (page) tables
    113  * are *not* stored within the MMU as they are in the earlier Sun architect-
    114  * ures; instead, they are maintained entirely within physical memory (there
    115  * is a TLB cache to prevent the high performance hit from keeping all page
    116  * tables in core). Thus there is no need to dynamically allocate PMEGs or
    117  * SMEGs; only contexts must be shared.
    118  *
    119  * We maintain two parallel sets of tables: one is the actual MMU-edible
    120  * hierarchy of page tables in allocated kernel memory; these tables refer
    121  * to each other by physical address pointers in SRMMU format (thus they
    122  * are not very useful to the kernel's management routines). The other set
    123  * of tables is similar to those used for the Sun4/100's 3-level MMU; it
    124  * is a hierarchy of regmap and segmap structures which contain kernel virtual
    125  * pointers to each other. These must (unfortunately) be kept in sync.
    126  *
    127  */
    128 #define NKREG	((int)((-(unsigned)KERNBASE) / NBPRG))	/* i.e., 8 */
    129 #define NUREG	(256 - NKREG)				/* i.e., 248 */
    130 
    131 TAILQ_HEAD(mmuhd,mmuentry);
    132 
    133 /*
    134  * data appearing in both user and kernel pmaps
    135  *
    136  * note: if we want the same binaries to work on the 4/4c and 4m, we have to
    137  *       include the fields for both to make sure that the struct kproc
    138  * 	 is the same size.
    139  */
    140 struct pmap {
    141 	union	ctxinfo *pm_ctx;	/* current context, if any */
    142 	int	pm_ctxnum;		/* current context's number */
    143 #if NCPUS > 1
    144 	simple_lock_data_t pm_lock;	/* spinlock */
    145 #endif
    146 	int	pm_refcount;		/* just what it says */
    147 
    148 	struct mmuhd	pm_reglist;	/* MMU regions on this pmap (4/4c) */
    149 	struct mmuhd	pm_seglist;	/* MMU segments on this pmap (4/4c) */
    150 
    151 	void		*pm_regstore;
    152 	struct regmap	*pm_regmap;
    153 
    154 	int		*pm_reg_ptps;	/* SRMMU-edible region table for 4m */
    155 	int		pm_reg_ptps_pa;	/* _Physical_ address of pm_reg_ptps */
    156 
    157 	int		pm_gap_start;	/* Starting with this vreg there's */
    158 	int		pm_gap_end;	/* no valid mapping until here */
    159 
    160 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    161 };
    162 
    163 struct regmap {
    164 	struct segmap	*rg_segmap;	/* point to NSGPRG PMEGs */
    165 	int		*rg_seg_ptps; 	/* SRMMU-edible segment tables (NULL
    166 					 * indicates invalid region (4m) */
    167 	smeg_t		rg_smeg;	/* the MMU region number (4c) */
    168 	u_char		rg_nsegmap;	/* number of valid PMEGS */
    169 };
    170 
    171 struct segmap {
    172 	int	*sg_pte;		/* points to NPTESG PTEs */
    173 	pmeg_t	sg_pmeg;		/* the MMU segment number (4c) */
    174 	u_char	sg_npte;		/* number of valid PTEs per seg */
    175 };
    176 
    177 typedef struct pmap *pmap_t;
    178 
    179 #if 0
    180 struct kvm_cpustate {
    181 	int		kvm_npmemarr;
    182 	struct memarr	kvm_pmemarr[MA_SIZE];
    183 	int		kvm_seginval;			/* [4,4c] */
    184 	struct segmap	kvm_segmap_store[NKREG*NSEGRG];	/* [4,4c] */
    185 }/*not yet used*/;
    186 #endif
    187 
    188 #ifdef _KERNEL
    189 
    190 #define PMAP_NULL	((pmap_t)0)
    191 
    192 extern struct pmap	kernel_pmap_store;
    193 extern vm_offset_t	vm_first_phys, vm_num_phys;
    194 
    195 /*
    196  * Since PTEs also contain type bits, we have to have some way
    197  * to tell pmap_enter `this is an IO page' or `this is not to
    198  * be cached'.  Since physical addresses are always aligned, we
    199  * can do this with the low order bits.
    200  *
    201  * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
    202  * exactly the PG_NC and PG_TYPE bits.
    203  */
    204 #define	PMAP_OBIO	1		/* tells pmap_enter to use PG_OBIO */
    205 #define	PMAP_VME16	2		/* etc */
    206 #define	PMAP_VME32	3		/* etc */
    207 #define	PMAP_NC		4		/* tells pmap_enter to set PG_NC */
    208 
    209 #define PMAP_TYPE4M	0x78		/* mask to get 4m page type */
    210 #define PMAP_PTESHFT4M	25		/* right shift to put type in pte */
    211 #define PMAP_SHFT4M	0x3		/* left shift to extract type */
    212 #define	PMAP_TNC	\
    213 	(CPU_ISSUN4M?127:7)		/* mask to get PG_TYPE & PG_NC */
    214 /*#define PMAP_IOC      0x00800000      -* IO cacheable, NOT shifted */
    215 
    216 
    217 #if xxx
    218 void		pmap_bootstrap __P((int nmmu, int nctx, int nregion));
    219 int		pmap_count_ptes __P((struct pmap *));
    220 void		pmap_prefer __P((vm_offset_t, vm_offset_t *));
    221 int		pmap_pa_exists __P((vm_offset_t));
    222 #endif
    223 int             pmap_dumpsize __P((void));
    224 int             pmap_dumpmmu __P((int (*)__P((dev_t, daddr_t, caddr_t, size_t)),
    225                                  daddr_t));
    226 
    227 #define	pmap_kernel()	(&kernel_pmap_store)
    228 #define	pmap_resident_count(pmap)	pmap_count_ptes(pmap)
    229 #define	managed(pa)	((unsigned)((pa) - vm_first_phys) < vm_num_phys)
    230 
    231 #define PMAP_ACTIVATE(pmap, pcb, iscurproc)
    232 #define PMAP_DEACTIVATE(pmap, pcb)
    233 #define PMAP_PREFER(fo, ap)		pmap_prefer((fo), (ap))
    234 
    235 #define PMAP_EXCLUDE_DECLS	/* tells MI pmap.h *not* to include decls */
    236 
    237 /* FUNCTION DECLARATIONS FOR COMMON PMAP MODULE */
    238 
    239 void		pmap_bootstrap __P((int nmmu, int nctx, int nregion));
    240 int		pmap_count_ptes __P((struct pmap *));
    241 void		pmap_prefer __P((vm_offset_t, vm_offset_t *));
    242 int		pmap_pa_exists __P((vm_offset_t));
    243 void		*pmap_bootstrap_alloc __P((int));
    244 void            pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t));
    245 void            pmap_collect __P((pmap_t));
    246 void            pmap_copy __P((pmap_t,
    247 			       pmap_t, vm_offset_t, vm_size_t, vm_offset_t));
    248 pmap_t          pmap_create __P((vm_size_t));
    249 void            pmap_destroy __P((pmap_t));
    250 void            pmap_init __P((void));
    251 vm_offset_t     pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int));
    252 void            pmap_pageable __P((pmap_t,
    253 				   vm_offset_t, vm_offset_t, boolean_t));
    254 vm_offset_t     pmap_phys_address __P((int));
    255 void            pmap_pinit __P((pmap_t));
    256 void            pmap_reference __P((pmap_t));
    257 void            pmap_release __P((pmap_t));
    258 void            pmap_remove __P((pmap_t, vm_offset_t, vm_offset_t));
    259 void            pmap_update __P((void));
    260 u_int           pmap_free_pages __P((void));
    261 void            pmap_init __P((void));
    262 boolean_t       pmap_next_page __P((vm_offset_t *));
    263 int		pmap_page_index __P((vm_offset_t));
    264 void            pmap_virtual_space __P((vm_offset_t *, vm_offset_t *));
    265 void		pmap_redzone __P((void));
    266 void		kvm_uncache __P((caddr_t, int));
    267 struct user;
    268 void		switchexit __P((vm_map_t, struct user *, int));
    269 
    270 
    271 /* SUN4/SUN4C SPECIFIC DECLARATIONS */
    272 
    273 #if defined(SUN4) || defined(SUN4C)
    274 void            pmap_clear_modify4_4c __P((vm_offset_t pa));
    275 void            pmap_clear_reference4_4c __P((vm_offset_t pa));
    276 void            pmap_copy_page4_4c __P((vm_offset_t, vm_offset_t));
    277 void            pmap_enter4_4c __P((pmap_t,
    278                     vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    279 vm_offset_t     pmap_extract4_4c __P((pmap_t, vm_offset_t));
    280 boolean_t       pmap_is_modified4_4c __P((vm_offset_t pa));
    281 boolean_t       pmap_is_referenced4_4c __P((vm_offset_t pa));
    282 void            pmap_page_protect4_4c __P((vm_offset_t, vm_prot_t));
    283 void            pmap_protect4_4c __P((pmap_t,
    284                     vm_offset_t, vm_offset_t, vm_prot_t));
    285 void            pmap_zero_page4_4c __P((vm_offset_t));
    286 void		pmap_changeprot4_4c __P((pmap_t, vm_offset_t, vm_prot_t, int));
    287 int		mmu_pagein4_4c __P((pmap_t, int, int));
    288 
    289 #endif
    290 
    291 /* SIMILAR DECLARATIONS FOR SUN4M MODULE */
    292 
    293 #if defined(SUN4M)
    294 void            pmap_clear_modify4m __P((vm_offset_t pa));
    295 void            pmap_clear_reference4m __P((vm_offset_t pa));
    296 void            pmap_copy_page4m __P((vm_offset_t, vm_offset_t));
    297 void            pmap_enter4m __P((pmap_t,
    298                     vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    299 vm_offset_t     pmap_extract4m __P((pmap_t, vm_offset_t));
    300 boolean_t       pmap_is_modified4m __P((vm_offset_t pa));
    301 boolean_t       pmap_is_referenced4m __P((vm_offset_t pa));
    302 void            pmap_page_protect4m __P((vm_offset_t, vm_prot_t));
    303 void            pmap_protect4m __P((pmap_t,
    304                     vm_offset_t, vm_offset_t, vm_prot_t));
    305 void            pmap_zero_page4m __P((vm_offset_t));
    306 void		pmap_changeprot4m __P((pmap_t, vm_offset_t, vm_prot_t, int));
    307 int		mmu_pagein4m __P((pmap_t, int, int));
    308 
    309 #endif /* defined SUN4M */
    310 
    311 #if !defined(SUN4M) && (defined(SUN4) || defined(SUN4C))
    312 
    313 #define	  	pmap_clear_modify	pmap_clear_modify4_4c
    314 #define		pmap_clear_reference	pmap_clear_reference4_4c
    315 #define		pmap_copy_page		pmap_copy_page4_4c
    316 #define		pmap_enter		pmap_enter4_4c
    317 #define		pmap_extract		pmap_extract4_4c
    318 #define		pmap_is_modified	pmap_is_modified4_4c
    319 #define		pmap_is_referenced	pmap_is_referenced4_4c
    320 #define		pmap_page_protect	pmap_page_protect4_4c
    321 #define		pmap_protect		pmap_protect4_4c
    322 #define		pmap_zero_page		pmap_zero_page4_4c
    323 #define		pmap_changeprot		pmap_changeprot4_4c
    324 #define		mmu_pagein		mmu_pagein4_4c
    325 
    326 #elif defined(SUN4M) && !(defined(SUN4) || defined(SUN4C))
    327 
    328 #define	  	pmap_clear_modify	pmap_clear_modify4m
    329 #define		pmap_clear_reference	pmap_clear_reference4m
    330 #define		pmap_copy_page		pmap_copy_page4m
    331 #define		pmap_enter		pmap_enter4m
    332 #define		pmap_extract		pmap_extract4m
    333 #define		pmap_is_modified	pmap_is_modified4m
    334 #define		pmap_is_referenced	pmap_is_referenced4m
    335 #define		pmap_page_protect	pmap_page_protect4m
    336 #define		pmap_protect		pmap_protect4m
    337 #define		pmap_zero_page		pmap_zero_page4m
    338 #define		pmap_changeprot		pmap_changeprot4m
    339 #define		mmu_pagein		mmu_pagein4m
    340 
    341 #else  /* must use function pointers */
    342 
    343 extern void            	(*pmap_clear_modify_p) __P((vm_offset_t pa));
    344 extern void            	(*pmap_clear_reference_p) __P((vm_offset_t pa));
    345 extern void            	(*pmap_copy_page_p) __P((vm_offset_t, vm_offset_t));
    346 extern void            	(*pmap_enter_p) __P((pmap_t,
    347 		            vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    348 extern vm_offset_t     	(*pmap_extract_p) __P((pmap_t, vm_offset_t));
    349 extern boolean_t       	(*pmap_is_modified_p) __P((vm_offset_t pa));
    350 extern boolean_t       	(*pmap_is_referenced_p) __P((vm_offset_t pa));
    351 extern void            	(*pmap_page_protect_p) __P((vm_offset_t, vm_prot_t));
    352 extern void            	(*pmap_protect_p) __P((pmap_t,
    353 		            vm_offset_t, vm_offset_t, vm_prot_t));
    354 extern void            	(*pmap_zero_page_p) __P((vm_offset_t));
    355 extern void	       	(*pmap_changeprot_p) __P((pmap_t, vm_offset_t,
    356 		            vm_prot_t, int));
    357 extern int		(*mmu_pagein_p) __P((pmap_t, int, int));
    358 
    359 #define	  	pmap_clear_modify	(*pmap_clear_modify_p)
    360 #define		pmap_clear_reference	(*pmap_clear_reference_p)
    361 #define		pmap_copy_page		(*pmap_copy_page_p)
    362 #define		pmap_enter		(*pmap_enter_p)
    363 #define		pmap_extract		(*pmap_extract_p)
    364 #define		pmap_is_modified	(*pmap_is_modified_p)
    365 #define		pmap_is_referenced	(*pmap_is_referenced_p)
    366 #define		pmap_page_protect	(*pmap_page_protect_p)
    367 #define		pmap_protect		(*pmap_protect_p)
    368 #define		pmap_zero_page		(*pmap_zero_page_p)
    369 #define		pmap_changeprot		(*pmap_changeprot_p)
    370 #define		mmu_pagein		(*mmu_pagein_p)
    371 
    372 #endif
    373 
    374 #endif /* _KERNEL */
    375 
    376 #endif /* _SPARC_PMAP_H_ */
    377