Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.53.2.1
      1 /*	$NetBSD: pmap.h,v 1.53.2.1 2001/03/05 22:49:16 nathanw Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgment:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * pmap.h: see pmap.c for the history of this pmap module.
     37  */
     38 
     39 #ifndef	_I386_PMAP_H_
     40 #define	_I386_PMAP_H_
     41 
     42 #if defined(_KERNEL) && !defined(_LKM)
     43 #include "opt_user_ldt.h"
     44 #include "opt_largepages.h"
     45 #endif
     46 
     47 #include <machine/cpufunc.h>
     48 #include <machine/pte.h>
     49 #include <machine/segments.h>
     50 #include <uvm/uvm_object.h>
     51 
     52 /*
     53  * see pte.h for a description of i386 MMU terminology and hardware
     54  * interface.
     55  *
     56  * a pmap describes a processes' 4GB virtual address space.  this
     57  * virtual address space can be broken up into 1024 4MB regions which
     58  * are described by PDEs in the PDP.  the PDEs are defined as follows:
     59  *
     60  * (ranges are inclusive -> exclusive, just like vm_map_entry start/end)
     61  * (the following assumes that KERNBASE is 0xc0000000)
     62  *
     63  * PDE#s	VA range		usage
     64  * 0->767	0x0 -> 0xbfc00000	user address space, note that the
     65  *					max user address is 0xbfbfe000
     66  *					the final two pages in the last 4MB
     67  *					used to be reserved for the UAREA
     68  *					but now are no longer used
     69  * 768		0xbfc00000->		recursive mapping of PDP (used for
     70  *			0xc0000000	linear mapping of PTPs)
     71  * 768->1023	0xc0000000->		kernel address space (constant
     72  *			0xffc00000	across all pmap's/processes)
     73  * 1023		0xffc00000->		"alternate" recursive PDP mapping
     74  *			<end>		(for other pmaps)
     75  *
     76  *
     77  * note: a recursive PDP mapping provides a way to map all the PTEs for
     78  * a 4GB address space into a linear chunk of virtual memory.  in other
     79  * words, the PTE for page 0 is the first int mapped into the 4MB recursive
     80  * area.  the PTE for page 1 is the second int.  the very last int in the
     81  * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
     82  * address).
     83  *
     84  * all pmap's PD's must have the same values in slots 768->1023 so that
     85  * the kernel is always mapped in every process.  these values are loaded
     86  * into the PD at pmap creation time.
     87  *
     88  * at any one time only one pmap can be active on a processor.  this is
     89  * the pmap whose PDP is pointed to by processor register %cr3.  this pmap
     90  * will have all its PTEs mapped into memory at the recursive mapping
     91  * point (slot #767 as show above).  when the pmap code wants to find the
     92  * PTE for a virtual address, all it has to do is the following:
     93  *
     94  * address of PTE = (767 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
     95  *                = 0xbfc00000 + (VA / 4096) * 4
     96  *
     97  * what happens if the pmap layer is asked to perform an operation
     98  * on a pmap that is not the one which is currently active?  in that
     99  * case we take the PA of the PDP of non-active pmap and put it in
    100  * slot 1023 of the active pmap.  this causes the non-active pmap's
    101  * PTEs to get mapped in the final 4MB of the 4GB address space
    102  * (e.g. starting at 0xffc00000).
    103  *
    104  * the following figure shows the effects of the recursive PDP mapping:
    105  *
    106  *   PDP (%cr3)
    107  *   +----+
    108  *   |   0| -> PTP#0 that maps VA 0x0 -> 0x400000
    109  *   |    |
    110  *   |    |
    111  *   | 767| -> points back to PDP (%cr3) mapping VA 0xbfc00000 -> 0xc0000000
    112  *   | 768| -> first kernel PTP (maps 0xc0000000 -> 0xf0400000)
    113  *   |    |
    114  *   |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
    115  *   +----+
    116  *
    117  * note that the PDE#767 VA (0xbfc00000) is defined as "PTE_BASE"
    118  * note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE"
    119  *
    120  * starting at VA 0xbfc00000 the current active PDP (%cr3) acts as a
    121  * PTP:
    122  *
    123  * PTP#767 == PDP(%cr3) => maps VA 0xbfc00000 -> 0xc0000000
    124  *   +----+
    125  *   |   0| -> maps the contents of PTP#0 at VA 0xbfc00000->0xbfc01000
    126  *   |    |
    127  *   |    |
    128  *   | 767| -> maps contents of PTP#767 (the PDP) at VA 0xbffbf000
    129  *   | 768| -> maps contents of first kernel PTP
    130  *   |    |
    131  *   |1023|
    132  *   +----+
    133  *
    134  * note that mapping of the PDP at PTP#959's VA (0xeffbf000) is
    135  * defined as "PDP_BASE".... within that mapping there are two
    136  * defines:
    137  *   "PDP_PDE" (0xeffbfefc) is the VA of the PDE in the PDP
    138  *      which points back to itself.
    139  *   "APDP_PDE" (0xeffbfffc) is the VA of the PDE in the PDP which
    140  *      establishes the recursive mapping of the alternate pmap.
    141  *      to set the alternate PDP, one just has to put the correct
    142  *	PA info in *APDP_PDE.
    143  *
    144  * note that in the APTE_BASE space, the APDP appears at VA
    145  * "APDP_BASE" (0xfffff000).
    146  */
    147 
    148 /*
    149  * the following defines identify the slots used as described above.
    150  */
    151 
    152 #define PDSLOT_PTE	((KERNBASE/NBPD)-1) /* 767: for recursive PDP map */
    153 #define PDSLOT_KERN	(KERNBASE/NBPD)	    /* 768: start of kernel space */
    154 #define PDSLOT_APTE	((unsigned)1023) /* 1023: alternative recursive slot */
    155 
    156 /*
    157  * the following defines give the virtual addresses of various MMU
    158  * data structures:
    159  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
    160  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
    161  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
    162  */
    163 
    164 #define PTE_BASE	((pt_entry_t *)  (PDSLOT_PTE * NBPD) )
    165 #define APTE_BASE	((pt_entry_t *)  (PDSLOT_APTE * NBPD) )
    166 #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
    167 #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
    168 #define PDP_PDE		(PDP_BASE + PDSLOT_PTE)
    169 #define APDP_PDE	(PDP_BASE + PDSLOT_APTE)
    170 
    171 /*
    172  * XXXCDC: tmp xlate from old names:
    173  * PTDPTDI -> PDSLOT_PTE
    174  * KPTDI -> PDSLOT_KERN
    175  * APTDPTDI -> PDSLOT_APTE
    176  */
    177 
    178 /*
    179  * the follow define determines how many PTPs should be set up for the
    180  * kernel by locore.s at boot time.  this should be large enough to
    181  * get the VM system running.  once the VM system is running, the
    182  * pmap module can add more PTPs to the kernel area on demand.
    183  */
    184 
    185 #ifndef NKPTP
    186 #define NKPTP		4	/* 16MB to start */
    187 #endif
    188 #define NKPTP_MIN	4	/* smallest value we allow */
    189 #define NKPTP_MAX	(1024 - (KERNBASE/NBPD) - 1)
    190 				/* largest value (-1 for APTP space) */
    191 
    192 /*
    193  * pdei/ptei: generate index into PDP/PTP from a VA
    194  */
    195 #define	pdei(VA)	(((VA) & PD_MASK) >> PDSHIFT)
    196 #define	ptei(VA)	(((VA) & PT_MASK) >> PGSHIFT)
    197 
    198 /*
    199  * PTP macros:
    200  *   a PTP's index is the PD index of the PDE that points to it
    201  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
    202  *   a PTP's VA is the first VA mapped by that PTP
    203  *
    204  * note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
    205  *           NBPD == number of bytes a PTP can map (4MB)
    206  */
    207 
    208 #define ptp_i2o(I)	((I) * NBPG)	/* index => offset */
    209 #define ptp_o2i(O)	((O) / NBPG)	/* offset => index */
    210 #define ptp_i2v(I)	((I) * NBPD)	/* index => VA */
    211 #define ptp_v2i(V)	((V) / NBPD)	/* VA => index (same as pdei) */
    212 
    213 /*
    214  * PG_AVAIL usage: we make use of the ignored bits of the PTE
    215  */
    216 
    217 #define PG_W		PG_AVAIL1	/* "wired" mapping */
    218 #define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
    219 /* PG_AVAIL3 not used */
    220 
    221 #ifdef _KERNEL
    222 /*
    223  * pmap data structures: see pmap.c for details of locking.
    224  */
    225 
    226 struct pmap;
    227 typedef struct pmap *pmap_t;
    228 
    229 /*
    230  * we maintain a list of all non-kernel pmaps
    231  */
    232 
    233 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
    234 
    235 /*
    236  * the pmap structure
    237  *
    238  * note that the pm_obj contains the simple_lock, the reference count,
    239  * page list, and number of PTPs within the pmap.
    240  */
    241 
    242 struct pmap {
    243 	struct uvm_object pm_obj;	/* object (lck by object lock) */
    244 #define	pm_lock	pm_obj.vmobjlock
    245 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
    246 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
    247 	u_int32_t pm_pdirpa;		/* PA of PD (read-only after create) */
    248 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
    249 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
    250 
    251 	int pm_flags;			/* see below */
    252 
    253 	union descriptor *pm_ldt;	/* user-set LDT */
    254 	int pm_ldt_len;			/* number of LDT entries */
    255 	int pm_ldt_sel;			/* LDT selector */
    256 };
    257 
    258 /* pm_flags */
    259 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
    260 
    261 /*
    262  * for each managed physical page we maintain a list of <PMAP,VA>'s
    263  * which it is mapped at.  the list is headed by a pv_head structure.
    264  * there is one pv_head per managed phys page (allocated at boot time).
    265  * the pv_head structure points to a list of pv_entry structures (each
    266  * describes one mapping).
    267  */
    268 
    269 struct pv_entry;
    270 
    271 struct pv_head {
    272 	simple_lock_data_t pvh_lock;	/* locks every pv on this list */
    273 	struct pv_entry *pvh_list;	/* head of list (locked by pvh_lock) */
    274 };
    275 
    276 struct pv_entry {			/* locked by its list's pvh_lock */
    277 	struct pv_entry *pv_next;	/* next entry */
    278 	struct pmap *pv_pmap;		/* the pmap */
    279 	vaddr_t pv_va;			/* the virtual address */
    280 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
    281 };
    282 
    283 /*
    284  * pv_entrys are dynamically allocated in chunks from a single page.
    285  * we keep track of how many pv_entrys are in use for each page and
    286  * we can free pv_entry pages if needed.  there is one lock for the
    287  * entire allocation system.
    288  */
    289 
    290 struct pv_page_info {
    291 	TAILQ_ENTRY(pv_page) pvpi_list;
    292 	struct pv_entry *pvpi_pvfree;
    293 	int pvpi_nfree;
    294 };
    295 
    296 /*
    297  * number of pv_entry's in a pv_page
    298  * (note: won't work on systems where NPBG isn't a constant)
    299  */
    300 
    301 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    302 			sizeof(struct pv_entry))
    303 
    304 /*
    305  * a pv_page: where pv_entrys are allocated from
    306  */
    307 
    308 struct pv_page {
    309 	struct pv_page_info pvinfo;
    310 	struct pv_entry pvents[PVE_PER_PVPAGE];
    311 };
    312 
    313 /*
    314  * pmap_remove_record: a record of VAs that have been unmapped, used to
    315  * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
    316  */
    317 
    318 #define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
    319 
    320 struct pmap_remove_record {
    321 	int prr_npages;
    322 	vaddr_t prr_vas[PMAP_RR_MAX];
    323 };
    324 
    325 /*
    326  * global kernel variables
    327  */
    328 
    329 /* PTDpaddr: is the physical address of the kernel's PDP */
    330 extern u_long PTDpaddr;
    331 
    332 extern struct pmap kernel_pmap_store;	/* kernel pmap */
    333 extern int nkpde;			/* current # of PDEs for kernel */
    334 extern int pmap_pg_g;			/* do we support PG_G? */
    335 
    336 /*
    337  * macros
    338  */
    339 
    340 #define	pmap_kernel()			(&kernel_pmap_store)
    341 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    342 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    343 #define	pmap_update()			/* nothing (yet) */
    344 
    345 #define pmap_clear_modify(pg)		pmap_change_attrs(pg, 0, PG_M)
    346 #define pmap_clear_reference(pg)	pmap_change_attrs(pg, 0, PG_U)
    347 #define pmap_copy(DP,SP,D,L,S)
    348 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
    349 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
    350 #define pmap_move(DP,SP,D,L,S)
    351 #define pmap_phys_address(ppn)		i386_ptob(ppn)
    352 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
    353 
    354 
    355 /*
    356  * prototypes
    357  */
    358 
    359 void		pmap_activate __P((struct lwp *));
    360 void		pmap_bootstrap __P((vaddr_t));
    361 boolean_t	pmap_change_attrs __P((struct vm_page *, int, int));
    362 void		pmap_deactivate __P((struct lwp *));
    363 static void	pmap_page_protect __P((struct vm_page *, vm_prot_t));
    364 void		pmap_page_remove  __P((struct vm_page *));
    365 static void	pmap_protect __P((struct pmap *, vaddr_t,
    366 				vaddr_t, vm_prot_t));
    367 void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
    368 boolean_t	pmap_test_attrs __P((struct vm_page *, int));
    369 static void	pmap_update_pg __P((vaddr_t));
    370 static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
    371 void		pmap_write_protect __P((struct pmap *, vaddr_t,
    372 				vaddr_t, vm_prot_t));
    373 
    374 vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
    375 
    376 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    377 
    378 /*
    379  * Do idle page zero'ing uncached to avoid polluting the cache.
    380  */
    381 boolean_t	pmap_zero_page_uncached __P((paddr_t));
    382 #define	PMAP_PAGEIDLEZERO(pa)	pmap_zero_page_uncached((pa))
    383 
    384 /*
    385  * inline functions
    386  */
    387 
    388 /*
    389  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
    390  *	if hardware doesn't support one-page flushing)
    391  */
    392 
    393 __inline static void
    394 pmap_update_pg(va)
    395 	vaddr_t va;
    396 {
    397 #if defined(I386_CPU)
    398 	if (cpu_class == CPUCLASS_386)
    399 		tlbflush();
    400 	else
    401 #endif
    402 		invlpg((u_int) va);
    403 }
    404 
    405 /*
    406  * pmap_update_2pg: flush two pages from the TLB
    407  */
    408 
    409 __inline static void
    410 pmap_update_2pg(va, vb)
    411 	vaddr_t va, vb;
    412 {
    413 #if defined(I386_CPU)
    414 	if (cpu_class == CPUCLASS_386)
    415 		tlbflush();
    416 	else
    417 #endif
    418 	{
    419 		invlpg((u_int) va);
    420 		invlpg((u_int) vb);
    421 	}
    422 }
    423 
    424 /*
    425  * pmap_page_protect: change the protection of all recorded mappings
    426  *	of a managed page
    427  *
    428  * => this function is a frontend for pmap_page_remove/pmap_change_attrs
    429  * => we only have to worry about making the page more protected.
    430  *	unprotecting a page is done on-demand at fault time.
    431  */
    432 
    433 __inline static void
    434 pmap_page_protect(pg, prot)
    435 	struct vm_page *pg;
    436 	vm_prot_t prot;
    437 {
    438 	if ((prot & VM_PROT_WRITE) == 0) {
    439 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    440 			(void) pmap_change_attrs(pg, PG_RO, PG_RW);
    441 		} else {
    442 			pmap_page_remove(pg);
    443 		}
    444 	}
    445 }
    446 
    447 /*
    448  * pmap_protect: change the protection of pages in a pmap
    449  *
    450  * => this function is a frontend for pmap_remove/pmap_write_protect
    451  * => we only have to worry about making the page more protected.
    452  *	unprotecting a page is done on-demand at fault time.
    453  */
    454 
    455 __inline static void
    456 pmap_protect(pmap, sva, eva, prot)
    457 	struct pmap *pmap;
    458 	vaddr_t sva, eva;
    459 	vm_prot_t prot;
    460 {
    461 	if ((prot & VM_PROT_WRITE) == 0) {
    462 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    463 			pmap_write_protect(pmap, sva, eva, prot);
    464 		} else {
    465 			pmap_remove(pmap, sva, eva);
    466 		}
    467 	}
    468 }
    469 
    470 /*
    471  * various address inlines
    472  *
    473  *  vtopte: return a pointer to the PTE mapping a VA, works only for
    474  *  user and PT addresses
    475  *
    476  *  kvtopte: return a pointer to the PTE mapping a kernel VA
    477  */
    478 
    479 #include <lib/libkern/libkern.h>
    480 
    481 static __inline pt_entry_t *
    482 vtopte(vaddr_t va)
    483 {
    484 
    485 	KASSERT(va < (PDSLOT_KERN << PDSHIFT));
    486 
    487 	return (PTE_BASE + i386_btop(va));
    488 }
    489 
    490 static __inline pt_entry_t *
    491 kvtopte(vaddr_t va)
    492 {
    493 
    494 	KASSERT(va >= (PDSLOT_KERN << PDSHIFT));
    495 
    496 #ifdef LARGEPAGES
    497 	{
    498 		pd_entry_t *pde;
    499 
    500 		pde = PDP_BASE + pdei(va);
    501 		if (*pde & PG_PS)
    502 			return ((pt_entry_t *)pde);
    503 	}
    504 #endif
    505 
    506 	return (PTE_BASE + i386_btop(va));
    507 }
    508 
    509 paddr_t vtophys __P((vaddr_t));
    510 vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
    511 
    512 #if defined(USER_LDT)
    513 void	pmap_ldt_cleanup __P((struct lwp *));
    514 #define	PMAP_FORK
    515 #endif /* USER_LDT */
    516 
    517 #endif /* _KERNEL */
    518 #endif	/* _I386_PMAP_H_ */
    519