Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.34
      1 /*	$NetBSD: pmap.h,v 1.34 1998/02/10 14:11:58 mrg Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1995 Charles M. Hannum.  All rights reserved.
      5  * Copyright (c) 1991 Regents of the University of California.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to Berkeley by
      9  * the Systems Programming Group of the University of Utah Computer
     10  * Science Department and William Jolitz of UUNET Technologies Inc.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by the University of
     23  *	California, Berkeley and its contributors.
     24  * 4. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)pmap.h	7.4 (Berkeley) 5/12/91
     41  */
     42 
     43 /*
     44  * Derived from hp300 version by Mike Hibler, this version by William
     45  * Jolitz uses a recursive map [a pde points to the page directory] to
     46  * map the page tables using the pagetables themselves. This is done to
     47  * reduce the impact on kernel virtual memory for lots of sparse address
     48  * space, and to reduce the cost of memory to each process.
     49  *
     50  * from hp300:	@(#)pmap.h	7.2 (Berkeley) 12/16/90
     51  */
     52 
     53 #if defined(_KERNEL) && !defined(_LKM)
     54 #include "opt_pmap_new.h"
     55 #endif
     56 
     57 #ifdef PMAP_NEW			/* redirect */
     58 #include <machine/pmap.new.h>	/* defines _I386_PMAP_H_ */
     59 #endif
     60 
     61 #ifndef	_I386_PMAP_H_
     62 #define	_I386_PMAP_H_
     63 
     64 #include <machine/cpufunc.h>
     65 #include <machine/pte.h>
     66 
     67 /*
     68  * 386 page table entry and page table directory
     69  * W.Jolitz, 8/89
     70  */
     71 
     72 /*
     73  * PG_AVAIL usage ...
     74  */
     75 
     76 #define PG_W         PG_AVAIL1       /* "wired" mapping */
     77 
     78 /*
     79  * One page directory, shared between
     80  * kernel and user modes.
     81  */
     82 #define	PTDPTDI		0x3bf		/* ptd entry that points to ptd! */
     83 #define	KPTDI		0x3c0		/* start of kernel virtual pde's */
     84 #define	NKPDE_BASE	4		/* min. # of kernel PDEs */
     85 #define	NKPDE_MAX	63		/* max. # of kernel PDEs */
     86 #define	NKPDE_SCALE	1		/* # of kernel PDEs to add per meg. */
     87 #define	APTDPTDI	0x3ff		/* start of alternate page directory */
     88 
     89 #define UPT_MIN_ADDRESS	(PTDPTDI<<PDSHIFT)
     90 #define UPT_MAX_ADDRESS	(UPT_MIN_ADDRESS + (PTDPTDI<<PGSHIFT))
     91 
     92 /*
     93  * Address of current and alternate address space page table maps
     94  * and directories.
     95  */
     96 #ifdef _KERNEL
     97 extern pt_entry_t	PTmap[], APTmap[], Upte;
     98 extern pd_entry_t	PTD[], APTD[], PTDpde, APTDpde, Upde;
     99 extern pt_entry_t	*Sysmap;
    100 
    101 extern int	PTDpaddr;	/* physical address of kernel PTD */
    102 
    103 void pmap_bootstrap __P((vm_offset_t start));
    104 boolean_t pmap_testbit __P((vm_offset_t, int));
    105 void pmap_changebit __P((vm_offset_t, int, int));
    106 #endif
    107 
    108 /*
    109  * virtual address to page table entry and
    110  * to physical address. Likewise for alternate address space.
    111  * Note: these work recursively, thus vtopte of a pte will give
    112  * the corresponding pde that in turn maps it.
    113  */
    114 #define	vtopte(va)	(PTmap + i386_btop(va))
    115 #define	kvtopte(va)	vtopte(va)
    116 #define	ptetov(pt)	(i386_ptob(pt - PTmap))
    117 #define	vtophys(va) \
    118 	((*vtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
    119 
    120 #define	avtopte(va)	(APTmap + i386_btop(va))
    121 #define	ptetoav(pt)	(i386_ptob(pt - APTmap))
    122 #define	avtophys(va) \
    123 	((*avtopte(va) & PG_FRAME) | ((unsigned)(va) & ~PG_FRAME))
    124 
    125 /*
    126  * macros to generate page directory/table indicies
    127  */
    128 #define	pdei(va)	(((va) & PD_MASK) >> PDSHIFT)
    129 #define	ptei(va)	(((va) & PT_MASK) >> PGSHIFT)
    130 
    131 /*
    132  * Pmap stuff
    133  */
    134 typedef struct pmap {
    135 	pd_entry_t		*pm_pdir;	/* KVA of page directory */
    136 	short			pm_dref;	/* page directory ref count */
    137 	short			pm_count;	/* pmap reference count */
    138 	simple_lock_data_t	pm_lock;	/* lock on pmap */
    139 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    140 	long			pm_ptpages;	/* more stats: PT pages */
    141 } *pmap_t;
    142 
    143 /*
    144  * For each vm_page_t, there is a list of all currently valid virtual
    145  * mappings of that page.  An entry is a pv_entry, the list is pv_table.
    146  */
    147 struct pv_entry {
    148 	struct pv_entry	*pv_next;	/* next pv_entry */
    149 	pmap_t		pv_pmap;	/* pmap where mapping lies */
    150 	vm_offset_t	pv_va;		/* virtual address for mapping */
    151 };
    152 
    153 struct pv_page;
    154 
    155 struct pv_page_info {
    156 	TAILQ_ENTRY(pv_page) pgi_list;
    157 	struct pv_entry *pgi_freelist;
    158 	int pgi_nfree;
    159 };
    160 
    161 /*
    162  * This is basically:
    163  * ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
    164  */
    165 #define	NPVPPG	340
    166 
    167 struct pv_page {
    168 	struct pv_page_info pvp_pgi;
    169 	struct pv_entry pvp_pv[NPVPPG];
    170 };
    171 
    172 #ifdef	_KERNEL
    173 extern int		nkpde;		/* number of kernel page dir. ents */
    174 extern struct pmap	kernel_pmap_store;
    175 
    176 #define	pmap_kernel()			(&kernel_pmap_store)
    177 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    178 #define	pmap_update()			tlbflush()
    179 
    180 vm_offset_t reserve_dumppages __P((vm_offset_t));
    181 
    182 static __inline void
    183 pmap_clear_modify(vm_offset_t pa)
    184 {
    185 	pmap_changebit(pa, 0, ~PG_M);
    186 }
    187 
    188 static __inline void
    189 pmap_clear_reference(vm_offset_t pa)
    190 {
    191 	pmap_changebit(pa, 0, ~PG_U);
    192 }
    193 
    194 static __inline void
    195 pmap_copy_on_write(vm_offset_t pa)
    196 {
    197 	pmap_changebit(pa, PG_RO, ~PG_RW);
    198 }
    199 
    200 static __inline boolean_t
    201 pmap_is_modified(vm_offset_t pa)
    202 {
    203 	return pmap_testbit(pa, PG_M);
    204 }
    205 
    206 static __inline boolean_t
    207 pmap_is_referenced(vm_offset_t pa)
    208 {
    209 	return pmap_testbit(pa, PG_U);
    210 }
    211 
    212 static __inline vm_offset_t
    213 pmap_phys_address(int ppn)
    214 {
    215 	return i386_ptob(ppn);
    216 }
    217 
    218 #endif	/* _KERNEL */
    219 
    220 #endif /* _I386_PMAP_H_ */
    221