Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.24
      1 /*	$NetBSD: pmap.h,v 1.24 2009/04/22 10:17:48 cegger Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgment:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * Copyright (c) 2001 Wasabi Systems, Inc.
     37  * All rights reserved.
     38  *
     39  * Written by Frank van der Linden for Wasabi Systems, Inc.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. All advertising materials mentioning features or use of this software
     50  *    must display the following acknowledgement:
     51  *      This product includes software developed for the NetBSD Project by
     52  *      Wasabi Systems, Inc.
     53  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     54  *    or promote products derived from this software without specific prior
     55  *    written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     60  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     61  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     62  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     63  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     64  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     65  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     66  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     67  * POSSIBILITY OF SUCH DAMAGE.
     68  */
     69 
     70 /*
     71  * pmap.h: see pmap.c for the history of this pmap module.
     72  */
     73 
     74 #ifndef _X86_PMAP_H_
     75 #define	_X86_PMAP_H_
     76 
     77 #define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
     78 
     79 /*
     80  * pl*_pi: index in the ptp page for a pde mapping a VA.
     81  * (pl*_i below is the index in the virtual array of all pdes per level)
     82  */
     83 #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
     84 #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
     85 #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
     86 #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
     87 
     88 /*
     89  * pl*_i: generate index into pde/pte arrays in virtual space
     90  */
     91 #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
     92 #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
     93 #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
     94 #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
     95 #define pl_i(va, lvl) \
     96         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
     97 
     98 #define	pl_i_roundup(va, lvl)	pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
     99 
    100 /*
    101  * PTP macros:
    102  *   a PTP's index is the PD index of the PDE that points to it
    103  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
    104  *   a PTP's VA is the first VA mapped by that PTP
    105  */
    106 
    107 #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
    108 
    109 /* size of a PDP: usually one page, exept for PAE */
    110 #ifdef PAE
    111 #define PDP_SIZE 4
    112 #else
    113 #define PDP_SIZE 1
    114 #endif
    115 
    116 
    117 #if defined(_KERNEL)
    118 /*
    119  * pmap data structures: see pmap.c for details of locking.
    120  */
    121 
    122 /*
    123  * we maintain a list of all non-kernel pmaps
    124  */
    125 
    126 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
    127 
    128 /*
    129  * the pmap structure
    130  *
    131  * note that the pm_obj contains the simple_lock, the reference count,
    132  * page list, and number of PTPs within the pmap.
    133  *
    134  * pm_lock is the same as the spinlock for vm object 0. Changes to
    135  * the other objects may only be made if that lock has been taken
    136  * (the other object locks are only used when uvm_pagealloc is called)
    137  *
    138  * XXX If we ever support processor numbers higher than 31, we'll have
    139  * XXX to rethink the CPU mask.
    140  */
    141 
    142 struct pmap {
    143 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
    144 #define	pm_lock	pm_obj[0].vmobjlock
    145 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
    146 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
    147 #ifdef PAE
    148 	paddr_t pm_pdirpa[PDP_SIZE];
    149 #else
    150 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
    151 #endif
    152 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
    153 					/* pointer to a PTP in our pmap */
    154 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
    155 
    156 #if !defined(__x86_64__)
    157 	vaddr_t pm_hiexec;		/* highest executable mapping */
    158 #endif /* !defined(__x86_64__) */
    159 	int pm_flags;			/* see below */
    160 
    161 	union descriptor *pm_ldt;	/* user-set LDT */
    162 	size_t pm_ldt_len;		/* size of LDT in bytes */
    163 	int pm_ldt_sel;			/* LDT selector */
    164 	uint32_t pm_cpus;		/* mask of CPUs using pmap */
    165 	uint32_t pm_kernel_cpus;	/* mask of CPUs using kernel part
    166 					 of pmap */
    167 };
    168 
    169 /* macro to access pm_pdirpa */
    170 #ifdef PAE
    171 #define pmap_pdirpa(pmap, index) \
    172 	((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
    173 #else
    174 #define pmap_pdirpa(pmap, index) \
    175 	((pmap)->pm_pdirpa + (index) * sizeof(pd_entry_t))
    176 #endif
    177 
    178 /*
    179  * MD flags that we use for pmap_enter:
    180  */
    181 #define PMAP_NOCACHE	0x01000000	/* set the non-cacheable bit */
    182 
    183 /*
    184  * global kernel variables
    185  */
    186 
    187 /* PDPpaddr: is the physical address of the kernel's PDP */
    188 extern u_long PDPpaddr;
    189 
    190 extern int pmap_pg_g;			/* do we support PG_G? */
    191 extern long nkptp[PTP_LEVELS];
    192 
    193 /*
    194  * macros
    195  */
    196 
    197 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    198 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    199 
    200 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
    201 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
    202 #define pmap_copy(DP,SP,D,L,S)
    203 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
    204 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
    205 #define pmap_move(DP,SP,D,L,S)
    206 #define pmap_phys_address(ppn)		x86_ptob(ppn)
    207 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
    208 
    209 
    210 /*
    211  * prototypes
    212  */
    213 
    214 void		pmap_activate(struct lwp *);
    215 void		pmap_bootstrap(vaddr_t);
    216 bool		pmap_clear_attrs(struct vm_page *, unsigned);
    217 void		pmap_deactivate(struct lwp *);
    218 void		pmap_page_remove (struct vm_page *);
    219 void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
    220 bool		pmap_test_attrs(struct vm_page *, unsigned);
    221 void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
    222 void		pmap_load(void);
    223 paddr_t		pmap_init_tmp_pgtbl(paddr_t);
    224 void		pmap_remove_all(struct pmap *);
    225 void		pmap_ldt_sync(struct pmap *);
    226 
    227 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
    228 
    229 void	pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
    230 void	pmap_tlb_shootwait(void);
    231 
    232 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    233 #define PMAP_FORK		/* turn on pmap_fork interface */
    234 
    235 /*
    236  * Do idle page zero'ing uncached to avoid polluting the cache.
    237  */
    238 bool	pmap_pageidlezero(paddr_t);
    239 #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    240 
    241 /*
    242  * inline functions
    243  */
    244 
    245 /*
    246  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
    247  *	if hardware doesn't support one-page flushing)
    248  */
    249 
    250 __inline static void __unused
    251 pmap_update_pg(vaddr_t va)
    252 {
    253 	invlpg(va);
    254 }
    255 
    256 /*
    257  * pmap_update_2pg: flush two pages from the TLB
    258  */
    259 
    260 __inline static void __unused
    261 pmap_update_2pg(vaddr_t va, vaddr_t vb)
    262 {
    263 	invlpg(va);
    264 	invlpg(vb);
    265 }
    266 
    267 /*
    268  * pmap_page_protect: change the protection of all recorded mappings
    269  *	of a managed page
    270  *
    271  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
    272  * => we only have to worry about making the page more protected.
    273  *	unprotecting a page is done on-demand at fault time.
    274  */
    275 
    276 __inline static void __unused
    277 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    278 {
    279 	if ((prot & VM_PROT_WRITE) == 0) {
    280 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    281 			(void) pmap_clear_attrs(pg, PG_RW);
    282 		} else {
    283 			pmap_page_remove(pg);
    284 		}
    285 	}
    286 }
    287 
    288 /*
    289  * pmap_protect: change the protection of pages in a pmap
    290  *
    291  * => this function is a frontend for pmap_remove/pmap_write_protect
    292  * => we only have to worry about making the page more protected.
    293  *	unprotecting a page is done on-demand at fault time.
    294  */
    295 
    296 __inline static void __unused
    297 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    298 {
    299 	if ((prot & VM_PROT_WRITE) == 0) {
    300 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    301 			pmap_write_protect(pmap, sva, eva, prot);
    302 		} else {
    303 			pmap_remove(pmap, sva, eva);
    304 		}
    305 	}
    306 }
    307 
    308 /*
    309  * various address inlines
    310  *
    311  *  vtopte: return a pointer to the PTE mapping a VA, works only for
    312  *  user and PT addresses
    313  *
    314  *  kvtopte: return a pointer to the PTE mapping a kernel VA
    315  */
    316 
    317 #include <lib/libkern/libkern.h>
    318 
    319 static __inline pt_entry_t * __unused
    320 vtopte(vaddr_t va)
    321 {
    322 
    323 	KASSERT(va < VM_MIN_KERNEL_ADDRESS);
    324 
    325 	return (PTE_BASE + pl1_i(va));
    326 }
    327 
    328 static __inline pt_entry_t * __unused
    329 kvtopte(vaddr_t va)
    330 {
    331 	pd_entry_t *pde;
    332 
    333 	KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    334 
    335 	pde = L2_BASE + pl2_i(va);
    336 	if (*pde & PG_PS)
    337 		return ((pt_entry_t *)pde);
    338 
    339 	return (PTE_BASE + pl1_i(va));
    340 }
    341 
    342 paddr_t vtophys(vaddr_t);
    343 vaddr_t	pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
    344 void	pmap_cpu_init_early(struct cpu_info *);
    345 void	pmap_cpu_init_late(struct cpu_info *);
    346 bool	sse2_idlezero_page(void *);
    347 
    348 
    349 #ifdef XEN
    350 
    351 #define XPTE_MASK	L1_FRAME
    352 /* XPTE_SHIFT = L1_SHIFT - log2(sizeof(pt_entry_t)) */
    353 #if defined(__x86_64__) || defined(PAE)
    354 #define XPTE_SHIFT	9
    355 #else
    356 #define XPTE_SHIFT	10
    357 #endif
    358 
    359 /* PTE access inline fuctions */
    360 
    361 /*
    362  * Get the machine address of the pointed pte
    363  * We use hardware MMU to get value so works only for levels 1-3
    364  */
    365 
    366 static __inline paddr_t
    367 xpmap_ptetomach(pt_entry_t *pte)
    368 {
    369 	pt_entry_t *up_pte;
    370 	vaddr_t va = (vaddr_t) pte;
    371 
    372 	va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
    373 	up_pte = (pt_entry_t *) va;
    374 
    375 	return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
    376 }
    377 
    378 /*
    379  * xpmap_update()
    380  * Update an active pt entry with Xen
    381  * Equivalent to *pte = npte
    382  */
    383 
    384 static __inline void
    385 xpmap_update (pt_entry_t *pte, pt_entry_t npte)
    386 {
    387         int s = splvm();
    388 
    389         xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
    390         xpq_flush_queue();
    391         splx(s);
    392 }
    393 
    394 
    395 /* Xen helpers to change bits of a pte */
    396 #define XPMAP_UPDATE_DIRECT	1	/* Update direct map entry flags too */
    397 
    398 /* pmap functions with machine addresses */
    399 void	pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t);
    400 int	pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
    401 	    vm_prot_t, u_int, int);
    402 bool	pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
    403 
    404 paddr_t	vtomach(vaddr_t);
    405 #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
    406 
    407 #endif	/* XEN */
    408 
    409 /*
    410  * Hooks for the pool allocator.
    411  */
    412 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    413 
    414 /*
    415  * TLB shootdown mailbox.
    416  */
    417 
    418 struct pmap_mbox {
    419 	volatile void		*mb_pointer;
    420 	volatile uintptr_t	mb_addr1;
    421 	volatile uintptr_t	mb_addr2;
    422 	volatile uintptr_t	mb_head;
    423 	volatile uintptr_t	mb_tail;
    424 	volatile uintptr_t	mb_global;
    425 };
    426 
    427 #endif /* _KERNEL */
    428 
    429 #endif /* _X86_PMAP_H_ */
    430