Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.9
      1 /*	$NetBSD: pmap.h,v 1.9 1994/12/10 11:44:28 pk Exp $ */
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This software was developed by the Computer Systems Engineering group
      8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
      9  * contributed to Berkeley.
     10  *
     11  * All advertising materials mentioning features or use of this software
     12  * must display the following acknowledgement:
     13  *	This product includes software developed by the University of
     14  *	California, Lawrence Berkeley Laboratory.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by the University of
     27  *	California, Berkeley and its contributors.
     28  * 4. Neither the name of the University nor the names of its contributors
     29  *    may be used to endorse or promote products derived from this software
     30  *    without specific prior written permission.
     31  *
     32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     42  * SUCH DAMAGE.
     43  *
     44  *	@(#)pmap.h	8.1 (Berkeley) 6/11/93
     45  */
     46 
     47 #ifndef	_SPARC_PMAP_H_
     48 #define _SPARC_PMAP_H_
     49 
     50 #include <machine/pte.h>
     51 
     52 /*
     53  * Pmap structure.
     54  *
     55  * The pmap structure really comes in two variants, one---a single
     56  * instance---for kernel virtual memory and the other---up to nproc
     57  * instances---for user virtual memory.  Unfortunately, we have to mash
     58  * both into the same structure.  Fortunately, they are almost the same.
     59  *
     60  * The kernel begins at 0xf8000000 and runs to 0xffffffff (although
     61  * some of this is not actually used).  Kernel space, including DVMA
     62  * space (for now?), is mapped identically into all user contexts.
     63  * There is no point in duplicating this mapping in each user process
     64  * so they do not appear in the user structures.
     65  *
     66  * User space begins at 0x00000000 and runs through 0x1fffffff,
     67  * then has a `hole', then resumes at 0xe0000000 and runs until it
     68  * hits the kernel space at 0xf8000000.  This can be mapped
     69  * contiguously by ignorning the top two bits and pretending the
     70  * space goes from 0 to 37ffffff.  Typically the lower range is
     71  * used for text+data and the upper for stack, but the code here
     72  * makes no such distinction.
     73  *
     74  * Since each virtual segment covers 256 kbytes, the user space
     75  * requires 3584 segments, while the kernel (including DVMA) requires
     76  * only 512 segments.
     77  *
     78  * The segment map entry for virtual segment vseg is offset in
     79  * pmap->pm_rsegmap by 0 if pmap is not the kernel pmap, or by
     80  * NUSEG if it is.  We keep a pointer called pmap->pm_segmap
     81  * pre-offset by this value.  pmap->pm_segmap thus contains the
     82  * values to be loaded into the user portion of the hardware segment
     83  * map so as to reach the proper PMEGs within the MMU.  The kernel
     84  * mappings are `set early' and are always valid in every context
     85  * (every change is always propagated immediately).
     86  *
     87  * The PMEGs within the MMU are loaded `on demand'; when a PMEG is
     88  * taken away from context `c', the pmap for context c has its
     89  * corresponding pm_segmap[vseg] entry marked invalid (the MMU segment
     90  * map entry is also made invalid at the same time).  Thus
     91  * pm_segmap[vseg] is the `invalid pmeg' number (127 or 511) whenever
     92  * the corresponding PTEs are not actually in the MMU.  On the other
     93  * hand, pm_pte[vseg] is NULL only if no pages in that virtual segment
     94  * are in core; otherwise it points to a copy of the 32 or 64 PTEs that
     95  * must be loaded in the MMU in order to reach those pages.
     96  * pm_npte[vseg] counts the number of valid pages in each vseg.
     97  *
     98  * XXX performance: faster to count valid bits?
     99  *
    100  * The kernel pmap cannot malloc() PTEs since malloc() will sometimes
    101  * allocate a new virtual segment.  Since kernel mappings are never
    102  * `stolen' out of the the MMU, we just keep all its PTEs there, and
    103  * have no software copies.  Its mmu entries are nonetheless kept on lists
    104  * so that the code that fiddles with mmu lists has something to fiddle.
    105  */
    106 #define	NKSEG	((int)((-(unsigned)KERNBASE) / NBPSG))	/* i.e., 512 */
    107 #define	NUSEG	(4096 - NKSEG)				/* i.e., 3584 */
    108 
    109 /* data appearing in both user and kernel pmaps */
    110 struct pmap {
    111 	union	ctxinfo *pm_ctx;	/* current context, if any */
    112 	int	pm_ctxnum;		/* current context's number */
    113 #if NCPUS > 1
    114 	simple_lock_data_t pm_lock;	/* spinlock */
    115 #endif
    116 	int	pm_refcount;		/* just what it says */
    117 	struct	mmuentry *pm_mmuforw;	/* pmap pmeg chain */
    118 	struct	mmuentry **pm_mmuback;	/* (two way street) */
    119 	void	*pm_segstore;
    120 	pmeg_t	*pm_segmap;		/* points to pm_rsegmap per above */
    121 	u_char	*pm_npte;		/* points to pm_rnpte */
    122 	int	**pm_pte;		/* points to pm_rpte */
    123 	int	pm_gap_start;		/* Starting with this vseg there's */
    124 	int	pm_gap_end;		/* no valid mapping until here */
    125 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    126 };
    127 
    128 /* data appearing only in user pmaps */
    129 struct usegmap {
    130 	pmeg_t	us_segmap[NUSEG];	/* segment map */
    131 	u_char	us_npte[NUSEG];		/* number of valid PTEs per seg */
    132 	int	*us_pte[NUSEG];		/* points to PTEs for valid segments */
    133 };
    134 
    135 /* data appearing only in the kernel pmap */
    136 struct ksegmap {
    137 	pmeg_t	ks_segmap[NKSEG];	/* segment map */
    138 	u_char	ks_npte[NKSEG];		/* number of valid PTEs per kseg */
    139 	int	*ks_pte[NKSEG];		/* always NULL */
    140 };
    141 
    142 typedef struct pmap *pmap_t;
    143 
    144 #ifdef KERNEL
    145 
    146 #define PMAP_NULL	((pmap_t)0)
    147 
    148 extern struct pmap	kernel_pmap_store;
    149 extern struct ksegmap	kernel_segmap_store;
    150 extern pmap_t		kernel_pmap;
    151 
    152 #define PMAP_ACTIVATE(pmap, pcb, iscurproc)
    153 #define PMAP_DEACTIVATE(pmap, pcb)
    154 
    155 /*
    156  * Since PTEs also contain type bits, we have to have some way
    157  * to tell pmap_enter `this is an IO page' or `this is not to
    158  * be cached'.  Since physical addresses are always aligned, we
    159  * can do this with the low order bits.
    160  *
    161  * The ordering below is important: PMAP_PGTYPE << PG_TNC must give
    162  * exactly the PG_NC and PG_TYPE bits.
    163  */
    164 #define	PMAP_OBIO	1		/* tells pmap_enter to use PG_OBIO */
    165 #define	PMAP_VME16	2		/* etc */
    166 #define	PMAP_VME32	3		/* etc */
    167 #define	PMAP_NC		4		/* tells pmap_enter to set PG_NC */
    168 #define	PMAP_TNC	7		/* mask to get PG_TYPE & PG_NC */
    169 
    170 void	pmap_bootstrap __P((int nmmu, int nctx));
    171 void	pmap_init __P((vm_offset_t phys_start, vm_offset_t phys_end));
    172 int	pmap_count_ptes __P((struct pmap *));
    173 vm_offset_t	pmap_prefer __P((vm_offset_t, vm_offset_t));
    174 
    175 #define	pmap_resident_count(pmap)	pmap_count_ptes(pmap)
    176 
    177 #define PMAP_PREFER(pa,va)		pmap_prefer((pa),(va))
    178 
    179 #endif /* KERNEL */
    180 
    181 #endif /* _SPARC_PMAP_H_ */
    182