Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.49.2.3
      1  1.49.2.3       snj /*	$NetBSD: pmap.h,v 1.49.2.3 2017/03/06 08:18:44 snj Exp $	*/
      2       1.2      yamt 
      3       1.2      yamt /*
      4       1.2      yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5       1.2      yamt  * All rights reserved.
      6       1.2      yamt  *
      7       1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8       1.2      yamt  * modification, are permitted provided that the following conditions
      9       1.2      yamt  * are met:
     10       1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11       1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12       1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13       1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14       1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15       1.2      yamt  *
     16       1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17       1.2      yamt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18       1.2      yamt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19       1.2      yamt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20       1.2      yamt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21       1.2      yamt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22       1.2      yamt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23       1.2      yamt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24       1.2      yamt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25       1.2      yamt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26       1.2      yamt  */
     27       1.2      yamt 
     28       1.2      yamt /*
     29       1.2      yamt  * Copyright (c) 2001 Wasabi Systems, Inc.
     30       1.2      yamt  * All rights reserved.
     31       1.2      yamt  *
     32       1.2      yamt  * Written by Frank van der Linden for Wasabi Systems, Inc.
     33       1.2      yamt  *
     34       1.2      yamt  * Redistribution and use in source and binary forms, with or without
     35       1.2      yamt  * modification, are permitted provided that the following conditions
     36       1.2      yamt  * are met:
     37       1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     38       1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     39       1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     40       1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     41       1.2      yamt  *    documentation and/or other materials provided with the distribution.
     42       1.2      yamt  * 3. All advertising materials mentioning features or use of this software
     43       1.2      yamt  *    must display the following acknowledgement:
     44       1.2      yamt  *      This product includes software developed for the NetBSD Project by
     45       1.2      yamt  *      Wasabi Systems, Inc.
     46       1.2      yamt  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     47       1.2      yamt  *    or promote products derived from this software without specific prior
     48       1.2      yamt  *    written permission.
     49       1.2      yamt  *
     50       1.2      yamt  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     51       1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     52       1.2      yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     53       1.2      yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     54       1.2      yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     55       1.2      yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     56       1.2      yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     57       1.2      yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     58       1.2      yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     59       1.2      yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     60       1.2      yamt  * POSSIBILITY OF SUCH DAMAGE.
     61       1.2      yamt  */
     62       1.2      yamt 
     63       1.2      yamt /*
     64       1.2      yamt  * pmap.h: see pmap.c for the history of this pmap module.
     65       1.2      yamt  */
     66       1.2      yamt 
     67       1.2      yamt #ifndef _X86_PMAP_H_
     68       1.2      yamt #define	_X86_PMAP_H_
     69       1.2      yamt 
     70       1.2      yamt /*
     71       1.2      yamt  * pl*_pi: index in the ptp page for a pde mapping a VA.
     72       1.2      yamt  * (pl*_i below is the index in the virtual array of all pdes per level)
     73       1.2      yamt  */
     74       1.2      yamt #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
     75       1.2      yamt #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
     76       1.2      yamt #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
     77       1.2      yamt #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
     78       1.2      yamt 
     79       1.2      yamt /*
     80       1.2      yamt  * pl*_i: generate index into pde/pte arrays in virtual space
     81      1.37      yamt  *
     82      1.37      yamt  * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
     83       1.2      yamt  */
     84       1.2      yamt #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
     85       1.2      yamt #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
     86       1.2      yamt #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
     87       1.2      yamt #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
     88       1.2      yamt #define pl_i(va, lvl) \
     89       1.2      yamt         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
     90       1.2      yamt 
     91       1.2      yamt #define	pl_i_roundup(va, lvl)	pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
     92       1.2      yamt 
     93       1.2      yamt /*
     94       1.2      yamt  * PTP macros:
     95       1.2      yamt  *   a PTP's index is the PD index of the PDE that points to it
     96       1.2      yamt  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
     97       1.2      yamt  *   a PTP's VA is the first VA mapped by that PTP
     98       1.2      yamt  */
     99       1.2      yamt 
    100       1.2      yamt #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
    101       1.2      yamt 
    102      1.29       jym /* size of a PDP: usually one page, except for PAE */
    103      1.12    bouyer #ifdef PAE
    104      1.12    bouyer #define PDP_SIZE 4
    105      1.12    bouyer #else
    106      1.12    bouyer #define PDP_SIZE 1
    107      1.12    bouyer #endif
    108      1.12    bouyer 
    109      1.12    bouyer 
    110       1.2      yamt #if defined(_KERNEL)
    111  1.49.2.2       riz #include <sys/kcpuset.h>
    112  1.49.2.2       riz 
    113       1.2      yamt /*
    114       1.2      yamt  * pmap data structures: see pmap.c for details of locking.
    115       1.2      yamt  */
    116       1.2      yamt 
    117       1.2      yamt /*
    118       1.2      yamt  * we maintain a list of all non-kernel pmaps
    119       1.2      yamt  */
    120       1.2      yamt 
    121       1.2      yamt LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
    122       1.2      yamt 
    123       1.2      yamt /*
    124      1.43       jym  * linked list of all non-kernel pmaps
    125      1.43       jym  */
    126      1.43       jym extern struct pmap_head pmaps;
    127      1.43       jym extern kmutex_t pmaps_lock;    /* protects pmaps */
    128      1.43       jym 
    129      1.43       jym /*
    130      1.46       jym  * pool_cache(9) that PDPs are allocated from
    131      1.46       jym  */
    132      1.46       jym extern struct pool_cache pmap_pdp_cache;
    133      1.46       jym 
    134      1.46       jym /*
    135       1.2      yamt  * the pmap structure
    136       1.2      yamt  *
    137      1.39     rmind  * note that the pm_obj contains the lock pointer, the reference count,
    138       1.2      yamt  * page list, and number of PTPs within the pmap.
    139       1.2      yamt  *
    140      1.39     rmind  * pm_lock is the same as the lock for vm object 0.  Changes to
    141       1.2      yamt  * the other objects may only be made if that lock has been taken
    142       1.2      yamt  * (the other object locks are only used when uvm_pagealloc is called)
    143       1.2      yamt  *
    144       1.2      yamt  * XXX If we ever support processor numbers higher than 31, we'll have
    145       1.2      yamt  * XXX to rethink the CPU mask.
    146       1.2      yamt  */
    147       1.2      yamt 
    148       1.2      yamt struct pmap {
    149       1.2      yamt 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
    150       1.2      yamt #define	pm_lock	pm_obj[0].vmobjlock
    151      1.39     rmind 	kmutex_t pm_obj_lock[PTP_LEVELS-1];	/* locks for pm_objs */
    152       1.2      yamt 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
    153       1.2      yamt 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
    154      1.33       jym 	paddr_t pm_pdirpa[PDP_SIZE];	/* PA of PDs (read-only after create) */
    155       1.2      yamt 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
    156       1.2      yamt 					/* pointer to a PTP in our pmap */
    157       1.2      yamt 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
    158       1.2      yamt 
    159       1.2      yamt #if !defined(__x86_64__)
    160       1.2      yamt 	vaddr_t pm_hiexec;		/* highest executable mapping */
    161       1.2      yamt #endif /* !defined(__x86_64__) */
    162       1.2      yamt 	int pm_flags;			/* see below */
    163       1.2      yamt 
    164       1.2      yamt 	union descriptor *pm_ldt;	/* user-set LDT */
    165      1.22        ad 	size_t pm_ldt_len;		/* size of LDT in bytes */
    166       1.2      yamt 	int pm_ldt_sel;			/* LDT selector */
    167  1.49.2.2       riz 	kcpuset_t *pm_cpus;		/* mask of CPUs using pmap */
    168  1.49.2.2       riz 	kcpuset_t *pm_kernel_cpus;	/* mask of CPUs using kernel part
    169       1.2      yamt 					 of pmap */
    170  1.49.2.2       riz 	kcpuset_t *pm_xen_ptp_cpus;	/* mask of CPUs which have this pmap's
    171  1.49.2.1       riz 					 ptp mapped */
    172      1.39     rmind 	uint64_t pm_ncsw;		/* for assertions */
    173      1.39     rmind 	struct vm_page *pm_gc_ptp;	/* pages from pmap g/c */
    174       1.2      yamt };
    175       1.2      yamt 
    176      1.33       jym /* macro to access pm_pdirpa slots */
    177      1.12    bouyer #ifdef PAE
    178      1.12    bouyer #define pmap_pdirpa(pmap, index) \
    179      1.12    bouyer 	((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
    180      1.12    bouyer #else
    181      1.12    bouyer #define pmap_pdirpa(pmap, index) \
    182      1.33       jym 	((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
    183      1.12    bouyer #endif
    184      1.12    bouyer 
    185      1.45    cherry #define PG_k 0
    186      1.45    cherry 
    187       1.2      yamt /*
    188      1.28    cegger  * MD flags that we use for pmap_enter and pmap_kenter_pa:
    189      1.23    cegger  */
    190      1.23    cegger 
    191      1.23    cegger /*
    192       1.2      yamt  * global kernel variables
    193       1.2      yamt  */
    194       1.2      yamt 
    195      1.32       jym /*
    196      1.32       jym  * PDPpaddr is the physical address of the kernel's PDP.
    197      1.32       jym  * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
    198      1.32       jym  * value associated to the kernel process, proc0.
    199      1.33       jym  * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
    200      1.33       jym  * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
    201      1.32       jym  * - Xen: it corresponds to the PFN of the kernel's PDP.
    202      1.32       jym  */
    203       1.2      yamt extern u_long PDPpaddr;
    204       1.2      yamt 
    205       1.2      yamt extern int pmap_pg_g;			/* do we support PG_G? */
    206       1.2      yamt extern long nkptp[PTP_LEVELS];
    207       1.2      yamt 
    208       1.2      yamt /*
    209       1.2      yamt  * macros
    210       1.2      yamt  */
    211       1.2      yamt 
    212       1.2      yamt #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    213       1.2      yamt #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    214       1.2      yamt 
    215       1.2      yamt #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
    216       1.2      yamt #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
    217       1.2      yamt #define pmap_copy(DP,SP,D,L,S)
    218       1.2      yamt #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
    219       1.2      yamt #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
    220       1.2      yamt #define pmap_move(DP,SP,D,L,S)
    221      1.35  jmcneill #define pmap_phys_address(ppn)		(x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
    222      1.35  jmcneill #define pmap_mmap_flags(ppn)		x86_mmap_flags(ppn)
    223       1.2      yamt #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
    224       1.2      yamt 
    225      1.35  jmcneill #if defined(__x86_64__) || defined(PAE)
    226      1.35  jmcneill #define X86_MMAP_FLAG_SHIFT	(64 - PGSHIFT)
    227      1.35  jmcneill #else
    228      1.35  jmcneill #define X86_MMAP_FLAG_SHIFT	(32 - PGSHIFT)
    229      1.35  jmcneill #endif
    230      1.35  jmcneill 
    231      1.35  jmcneill #define X86_MMAP_FLAG_MASK	0xf
    232      1.35  jmcneill #define X86_MMAP_FLAG_PREFETCH	0x1
    233       1.2      yamt 
    234       1.2      yamt /*
    235       1.2      yamt  * prototypes
    236       1.2      yamt  */
    237       1.2      yamt 
    238       1.2      yamt void		pmap_activate(struct lwp *);
    239       1.2      yamt void		pmap_bootstrap(vaddr_t);
    240       1.2      yamt bool		pmap_clear_attrs(struct vm_page *, unsigned);
    241       1.2      yamt void		pmap_deactivate(struct lwp *);
    242       1.2      yamt void		pmap_page_remove (struct vm_page *);
    243       1.2      yamt void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
    244       1.2      yamt bool		pmap_test_attrs(struct vm_page *, unsigned);
    245       1.2      yamt void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
    246       1.2      yamt void		pmap_load(void);
    247       1.6  jmcneill paddr_t		pmap_init_tmp_pgtbl(paddr_t);
    248      1.18        ad void		pmap_remove_all(struct pmap *);
    249      1.22        ad void		pmap_ldt_sync(struct pmap *);
    250       1.2      yamt 
    251      1.25     rmind void		pmap_emap_enter(vaddr_t, paddr_t, vm_prot_t);
    252      1.25     rmind void		pmap_emap_remove(vaddr_t, vsize_t);
    253      1.26     rmind void		pmap_emap_sync(bool);
    254      1.25     rmind 
    255      1.30    dyoung void		pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
    256      1.30    dyoung 		    pd_entry_t * const **);
    257      1.30    dyoung void		pmap_unmap_ptes(struct pmap *, struct pmap *);
    258      1.30    dyoung 
    259      1.30    dyoung int		pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
    260      1.30    dyoung 
    261      1.35  jmcneill u_int		x86_mmap_flags(paddr_t);
    262      1.35  jmcneill 
    263      1.40       tls bool		pmap_is_curpmap(struct pmap *);
    264      1.40       tls 
    265       1.2      yamt vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
    266       1.2      yamt 
    267      1.39     rmind typedef enum tlbwhy {
    268      1.39     rmind 	TLBSHOOT_APTE,
    269      1.39     rmind 	TLBSHOOT_KENTER,
    270      1.39     rmind 	TLBSHOOT_KREMOVE,
    271      1.39     rmind 	TLBSHOOT_FREE_PTP1,
    272      1.39     rmind 	TLBSHOOT_FREE_PTP2,
    273      1.39     rmind 	TLBSHOOT_REMOVE_PTE,
    274      1.39     rmind 	TLBSHOOT_REMOVE_PTES,
    275      1.39     rmind 	TLBSHOOT_SYNC_PV1,
    276      1.39     rmind 	TLBSHOOT_SYNC_PV2,
    277      1.39     rmind 	TLBSHOOT_WRITE_PROTECT,
    278      1.39     rmind 	TLBSHOOT_ENTER,
    279      1.39     rmind 	TLBSHOOT_UPDATE,
    280      1.39     rmind 	TLBSHOOT_BUS_DMA,
    281      1.39     rmind 	TLBSHOOT_BUS_SPACE,
    282      1.39     rmind 	TLBSHOOT__MAX,
    283      1.39     rmind } tlbwhy_t;
    284      1.39     rmind 
    285      1.39     rmind void		pmap_tlb_init(void);
    286  1.49.2.2       riz void		pmap_tlb_cpu_init(struct cpu_info *);
    287      1.39     rmind void		pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
    288      1.39     rmind void		pmap_tlb_shootnow(void);
    289      1.39     rmind void		pmap_tlb_intr(void);
    290       1.2      yamt 
    291      1.25     rmind #define	__HAVE_PMAP_EMAP
    292      1.25     rmind 
    293       1.2      yamt #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    294      1.19  jmcneill #define PMAP_FORK		/* turn on pmap_fork interface */
    295       1.2      yamt 
    296       1.2      yamt /*
    297       1.2      yamt  * Do idle page zero'ing uncached to avoid polluting the cache.
    298       1.2      yamt  */
    299       1.2      yamt bool	pmap_pageidlezero(paddr_t);
    300       1.2      yamt #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    301       1.2      yamt 
    302       1.2      yamt /*
    303       1.2      yamt  * inline functions
    304       1.2      yamt  */
    305       1.2      yamt 
    306      1.30    dyoung __inline static bool __unused
    307      1.30    dyoung pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
    308      1.30    dyoung {
    309      1.30    dyoung 	return pmap_pdes_invalid(va, pdes, lastpde) == 0;
    310      1.30    dyoung }
    311      1.30    dyoung 
    312       1.2      yamt /*
    313       1.2      yamt  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
    314       1.2      yamt  *	if hardware doesn't support one-page flushing)
    315       1.2      yamt  */
    316       1.2      yamt 
    317       1.7     perry __inline static void __unused
    318       1.2      yamt pmap_update_pg(vaddr_t va)
    319       1.2      yamt {
    320       1.4        ad 	invlpg(va);
    321       1.2      yamt }
    322       1.2      yamt 
    323       1.2      yamt /*
    324       1.2      yamt  * pmap_update_2pg: flush two pages from the TLB
    325       1.2      yamt  */
    326       1.2      yamt 
    327       1.7     perry __inline static void __unused
    328       1.2      yamt pmap_update_2pg(vaddr_t va, vaddr_t vb)
    329       1.2      yamt {
    330       1.4        ad 	invlpg(va);
    331       1.4        ad 	invlpg(vb);
    332       1.2      yamt }
    333       1.2      yamt 
    334       1.2      yamt /*
    335       1.2      yamt  * pmap_page_protect: change the protection of all recorded mappings
    336       1.2      yamt  *	of a managed page
    337       1.2      yamt  *
    338       1.2      yamt  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
    339       1.2      yamt  * => we only have to worry about making the page more protected.
    340       1.2      yamt  *	unprotecting a page is done on-demand at fault time.
    341       1.2      yamt  */
    342       1.2      yamt 
    343       1.7     perry __inline static void __unused
    344       1.2      yamt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    345       1.2      yamt {
    346       1.2      yamt 	if ((prot & VM_PROT_WRITE) == 0) {
    347       1.2      yamt 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    348       1.2      yamt 			(void) pmap_clear_attrs(pg, PG_RW);
    349       1.2      yamt 		} else {
    350       1.2      yamt 			pmap_page_remove(pg);
    351       1.2      yamt 		}
    352       1.2      yamt 	}
    353       1.2      yamt }
    354       1.2      yamt 
    355       1.2      yamt /*
    356       1.2      yamt  * pmap_protect: change the protection of pages in a pmap
    357       1.2      yamt  *
    358       1.2      yamt  * => this function is a frontend for pmap_remove/pmap_write_protect
    359       1.2      yamt  * => we only have to worry about making the page more protected.
    360       1.2      yamt  *	unprotecting a page is done on-demand at fault time.
    361       1.2      yamt  */
    362       1.2      yamt 
    363       1.7     perry __inline static void __unused
    364       1.2      yamt pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    365       1.2      yamt {
    366       1.2      yamt 	if ((prot & VM_PROT_WRITE) == 0) {
    367       1.2      yamt 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    368       1.2      yamt 			pmap_write_protect(pmap, sva, eva, prot);
    369       1.2      yamt 		} else {
    370       1.2      yamt 			pmap_remove(pmap, sva, eva);
    371       1.2      yamt 		}
    372       1.2      yamt 	}
    373       1.2      yamt }
    374       1.2      yamt 
    375       1.2      yamt /*
    376       1.2      yamt  * various address inlines
    377       1.2      yamt  *
    378       1.2      yamt  *  vtopte: return a pointer to the PTE mapping a VA, works only for
    379       1.2      yamt  *  user and PT addresses
    380       1.2      yamt  *
    381       1.2      yamt  *  kvtopte: return a pointer to the PTE mapping a kernel VA
    382       1.2      yamt  */
    383       1.2      yamt 
    384       1.2      yamt #include <lib/libkern/libkern.h>
    385       1.2      yamt 
    386       1.7     perry static __inline pt_entry_t * __unused
    387       1.2      yamt vtopte(vaddr_t va)
    388       1.2      yamt {
    389       1.2      yamt 
    390       1.2      yamt 	KASSERT(va < VM_MIN_KERNEL_ADDRESS);
    391       1.2      yamt 
    392       1.2      yamt 	return (PTE_BASE + pl1_i(va));
    393       1.2      yamt }
    394       1.2      yamt 
    395       1.7     perry static __inline pt_entry_t * __unused
    396       1.2      yamt kvtopte(vaddr_t va)
    397       1.2      yamt {
    398       1.2      yamt 	pd_entry_t *pde;
    399       1.2      yamt 
    400       1.2      yamt 	KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    401       1.2      yamt 
    402       1.2      yamt 	pde = L2_BASE + pl2_i(va);
    403       1.2      yamt 	if (*pde & PG_PS)
    404       1.2      yamt 		return ((pt_entry_t *)pde);
    405       1.2      yamt 
    406       1.2      yamt 	return (PTE_BASE + pl1_i(va));
    407       1.2      yamt }
    408       1.2      yamt 
    409       1.2      yamt paddr_t vtophys(vaddr_t);
    410       1.2      yamt vaddr_t	pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
    411       1.2      yamt void	pmap_cpu_init_late(struct cpu_info *);
    412      1.15        ad bool	sse2_idlezero_page(void *);
    413       1.2      yamt 
    414       1.5    bouyer #ifdef XEN
    415      1.38       jym #include <sys/bitops.h>
    416      1.38       jym 
    417       1.5    bouyer #define XPTE_MASK	L1_FRAME
    418      1.38       jym /* Selects the index of a PTE in (A)PTE_BASE */
    419      1.38       jym #define XPTE_SHIFT	(L1_SHIFT - ilog2(sizeof(pt_entry_t)))
    420       1.5    bouyer 
    421       1.5    bouyer /* PTE access inline fuctions */
    422       1.5    bouyer 
    423       1.5    bouyer /*
    424       1.5    bouyer  * Get the machine address of the pointed pte
    425       1.5    bouyer  * We use hardware MMU to get value so works only for levels 1-3
    426       1.5    bouyer  */
    427       1.5    bouyer 
    428       1.5    bouyer static __inline paddr_t
    429       1.5    bouyer xpmap_ptetomach(pt_entry_t *pte)
    430       1.5    bouyer {
    431       1.5    bouyer 	pt_entry_t *up_pte;
    432       1.5    bouyer 	vaddr_t va = (vaddr_t) pte;
    433       1.5    bouyer 
    434       1.5    bouyer 	va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
    435       1.5    bouyer 	up_pte = (pt_entry_t *) va;
    436       1.5    bouyer 
    437       1.5    bouyer 	return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
    438       1.5    bouyer }
    439       1.5    bouyer 
    440       1.5    bouyer /* Xen helpers to change bits of a pte */
    441       1.5    bouyer #define XPMAP_UPDATE_DIRECT	1	/* Update direct map entry flags too */
    442       1.5    bouyer 
    443      1.30    dyoung paddr_t	vtomach(vaddr_t);
    444      1.30    dyoung #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
    445      1.30    dyoung 
    446      1.39     rmind void	pmap_apte_flush(struct pmap *);
    447      1.39     rmind void	pmap_unmap_apdp(void);
    448      1.30    dyoung #endif	/* XEN */
    449      1.30    dyoung 
    450       1.5    bouyer /* pmap functions with machine addresses */
    451      1.27    cegger void	pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
    452       1.5    bouyer int	pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
    453      1.24    cegger 	    vm_prot_t, u_int, int);
    454       1.5    bouyer bool	pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
    455      1.20    bouyer 
    456       1.2      yamt /*
    457       1.2      yamt  * Hooks for the pool allocator.
    458       1.2      yamt  */
    459       1.2      yamt #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    460       1.2      yamt 
    461      1.49       chs #ifdef __HAVE_DIRECT_MAP
    462      1.49       chs 
    463      1.49       chs #define L4_SLOT_DIRECT		509
    464      1.49       chs #define PDIR_SLOT_DIRECT	L4_SLOT_DIRECT
    465      1.49       chs 
    466      1.49       chs #define PMAP_DIRECT_BASE	(VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
    467      1.49       chs #define PMAP_DIRECT_END		(VA_SIGN_NEG(((L4_SLOT_DIRECT + 1) * NBPD_L4)))
    468      1.49       chs 
    469      1.49       chs #define PMAP_DIRECT_MAP(pa)	((vaddr_t)PMAP_DIRECT_BASE + (pa))
    470      1.49       chs #define PMAP_DIRECT_UNMAP(va)	((paddr_t)(va) - PMAP_DIRECT_BASE)
    471      1.49       chs 
    472      1.49       chs /*
    473      1.49       chs  * Alternate mapping hooks for pool pages.
    474      1.49       chs  */
    475      1.49       chs #define PMAP_MAP_POOLPAGE(pa)	PMAP_DIRECT_MAP((pa))
    476      1.49       chs #define PMAP_UNMAP_POOLPAGE(va)	PMAP_DIRECT_UNMAP((va))
    477      1.49       chs 
    478      1.49       chs void	pagezero(vaddr_t);
    479      1.49       chs 
    480      1.49       chs #endif /* __HAVE_DIRECT_MAP */
    481      1.49       chs 
    482       1.2      yamt #endif /* _KERNEL */
    483       1.2      yamt 
    484       1.2      yamt #endif /* _X86_PMAP_H_ */
    485