Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.80
      1  1.80      maxv /*	$NetBSD: pmap.h,v 1.80 2018/06/20 11:49:38 maxv Exp $	*/
      2   1.2      yamt 
      3   1.2      yamt /*
      4   1.2      yamt  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.2      yamt  * All rights reserved.
      6   1.2      yamt  *
      7   1.2      yamt  * Redistribution and use in source and binary forms, with or without
      8   1.2      yamt  * modification, are permitted provided that the following conditions
      9   1.2      yamt  * are met:
     10   1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     11   1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     12   1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     14   1.2      yamt  *    documentation and/or other materials provided with the distribution.
     15   1.2      yamt  *
     16   1.2      yamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17   1.2      yamt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     18   1.2      yamt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     19   1.2      yamt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     20   1.2      yamt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     21   1.2      yamt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     22   1.2      yamt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     23   1.2      yamt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     24   1.2      yamt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     25   1.2      yamt  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     26   1.2      yamt  */
     27   1.2      yamt 
     28   1.2      yamt /*
     29   1.2      yamt  * Copyright (c) 2001 Wasabi Systems, Inc.
     30   1.2      yamt  * All rights reserved.
     31   1.2      yamt  *
     32   1.2      yamt  * Written by Frank van der Linden for Wasabi Systems, Inc.
     33   1.2      yamt  *
     34   1.2      yamt  * Redistribution and use in source and binary forms, with or without
     35   1.2      yamt  * modification, are permitted provided that the following conditions
     36   1.2      yamt  * are met:
     37   1.2      yamt  * 1. Redistributions of source code must retain the above copyright
     38   1.2      yamt  *    notice, this list of conditions and the following disclaimer.
     39   1.2      yamt  * 2. Redistributions in binary form must reproduce the above copyright
     40   1.2      yamt  *    notice, this list of conditions and the following disclaimer in the
     41   1.2      yamt  *    documentation and/or other materials provided with the distribution.
     42   1.2      yamt  * 3. All advertising materials mentioning features or use of this software
     43   1.2      yamt  *    must display the following acknowledgement:
     44   1.2      yamt  *      This product includes software developed for the NetBSD Project by
     45   1.2      yamt  *      Wasabi Systems, Inc.
     46   1.2      yamt  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     47   1.2      yamt  *    or promote products derived from this software without specific prior
     48   1.2      yamt  *    written permission.
     49   1.2      yamt  *
     50   1.2      yamt  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     51   1.2      yamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     52   1.2      yamt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     53   1.2      yamt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     54   1.2      yamt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     55   1.2      yamt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     56   1.2      yamt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     57   1.2      yamt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     58   1.2      yamt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     59   1.2      yamt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     60   1.2      yamt  * POSSIBILITY OF SUCH DAMAGE.
     61   1.2      yamt  */
     62   1.2      yamt 
     63   1.2      yamt /*
     64   1.2      yamt  * pmap.h: see pmap.c for the history of this pmap module.
     65   1.2      yamt  */
     66   1.2      yamt 
     67   1.2      yamt #ifndef _X86_PMAP_H_
     68   1.2      yamt #define	_X86_PMAP_H_
     69   1.2      yamt 
     70   1.2      yamt /*
     71   1.2      yamt  * pl*_pi: index in the ptp page for a pde mapping a VA.
     72   1.2      yamt  * (pl*_i below is the index in the virtual array of all pdes per level)
     73   1.2      yamt  */
     74   1.2      yamt #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
     75   1.2      yamt #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
     76   1.2      yamt #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
     77   1.2      yamt #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
     78   1.2      yamt 
     79   1.2      yamt /*
     80   1.2      yamt  * pl*_i: generate index into pde/pte arrays in virtual space
     81  1.37      yamt  *
     82  1.37      yamt  * pl_i(va, X) == plX_i(va) <= pl_i_roundup(va, X)
     83   1.2      yamt  */
     84   1.2      yamt #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
     85   1.2      yamt #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
     86   1.2      yamt #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
     87   1.2      yamt #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
     88   1.2      yamt #define pl_i(va, lvl) \
     89   1.2      yamt         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
     90   1.2      yamt 
     91   1.2      yamt #define	pl_i_roundup(va, lvl)	pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
     92   1.2      yamt 
     93   1.2      yamt /*
     94   1.2      yamt  * PTP macros:
     95   1.2      yamt  *   a PTP's index is the PD index of the PDE that points to it
     96   1.2      yamt  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
     97   1.2      yamt  *   a PTP's VA is the first VA mapped by that PTP
     98   1.2      yamt  */
     99   1.2      yamt 
    100   1.2      yamt #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
    101   1.2      yamt 
    102  1.29       jym /* size of a PDP: usually one page, except for PAE */
    103  1.12    bouyer #ifdef PAE
    104  1.12    bouyer #define PDP_SIZE 4
    105  1.12    bouyer #else
    106  1.12    bouyer #define PDP_SIZE 1
    107  1.12    bouyer #endif
    108  1.12    bouyer 
    109  1.12    bouyer 
    110   1.2      yamt #if defined(_KERNEL)
    111  1.52     rmind #include <sys/kcpuset.h>
    112  1.57     skrll #include <uvm/pmap/pmap_pvt.h>
    113  1.52     rmind 
    114  1.71      maxv #define BTSEG_NONE	0
    115  1.71      maxv #define BTSEG_TEXT	1
    116  1.71      maxv #define BTSEG_RODATA	2
    117  1.71      maxv #define BTSEG_DATA	3
    118  1.71      maxv #define BTSPACE_NSEGS	64
    119  1.71      maxv 
    120  1.69      maxv struct bootspace {
    121  1.70      maxv 	struct {
    122  1.70      maxv 		vaddr_t va;
    123  1.70      maxv 		paddr_t pa;
    124  1.70      maxv 		size_t sz;
    125  1.70      maxv 	} head;
    126  1.70      maxv 
    127  1.69      maxv 	/* Kernel segments. */
    128  1.69      maxv 	struct {
    129  1.71      maxv 		int type;
    130  1.69      maxv 		vaddr_t va;
    131  1.69      maxv 		paddr_t pa;
    132  1.69      maxv 		size_t sz;
    133  1.71      maxv 	} segs[BTSPACE_NSEGS];
    134  1.69      maxv 
    135  1.69      maxv 	/*
    136  1.69      maxv 	 * The area used by the early kernel bootstrap. It contains the kernel
    137  1.69      maxv 	 * symbols, the preloaded modules, the bootstrap tables, and the ISA I/O
    138  1.69      maxv 	 * mem.
    139  1.69      maxv 	 */
    140  1.69      maxv 	struct {
    141  1.69      maxv 		vaddr_t va;
    142  1.69      maxv 		paddr_t pa;
    143  1.69      maxv 		size_t sz;
    144  1.69      maxv 	} boot;
    145  1.69      maxv 
    146  1.69      maxv 	/* A magic VA usable by the bootstrap code. */
    147  1.69      maxv 	vaddr_t spareva;
    148  1.69      maxv 
    149  1.69      maxv 	/* Virtual address of the page directory. */
    150  1.69      maxv 	vaddr_t pdir;
    151  1.69      maxv 
    152  1.80      maxv 	/* Area dedicated to kernel modules (amd64 only). */
    153  1.80      maxv 	vaddr_t smodule;
    154  1.69      maxv 	vaddr_t emodule;
    155  1.69      maxv };
    156  1.69      maxv 
    157  1.73      maxv #ifndef MAXGDTSIZ
    158  1.73      maxv #define MAXGDTSIZ 65536 /* XXX */
    159  1.73      maxv #endif
    160  1.73      maxv 
    161  1.73      maxv struct pcpu_entry {
    162  1.73      maxv 	uint8_t gdt[MAXGDTSIZ];
    163  1.73      maxv 	uint8_t tss[PAGE_SIZE];
    164  1.74      maxv 	uint8_t ist0[PAGE_SIZE];
    165  1.73      maxv 	uint8_t ist1[PAGE_SIZE];
    166  1.73      maxv 	uint8_t ist2[PAGE_SIZE];
    167  1.77      maxv 	uint8_t ist3[PAGE_SIZE];
    168  1.75      maxv 	uint8_t rsp0[2 * PAGE_SIZE];
    169  1.73      maxv } __packed;
    170  1.73      maxv 
    171  1.73      maxv struct pcpu_area {
    172  1.75      maxv #ifdef SVS
    173  1.75      maxv 	uint8_t utls[PAGE_SIZE];
    174  1.75      maxv #endif
    175  1.73      maxv 	uint8_t idt[PAGE_SIZE];
    176  1.73      maxv 	uint8_t ldt[PAGE_SIZE];
    177  1.73      maxv 	struct pcpu_entry ent[MAXCPUS];
    178  1.73      maxv } __packed;
    179  1.73      maxv 
    180  1.73      maxv extern struct pcpu_area *pcpuarea;
    181  1.73      maxv 
    182   1.2      yamt /*
    183   1.2      yamt  * pmap data structures: see pmap.c for details of locking.
    184   1.2      yamt  */
    185   1.2      yamt 
    186   1.2      yamt /*
    187   1.2      yamt  * we maintain a list of all non-kernel pmaps
    188   1.2      yamt  */
    189   1.2      yamt 
    190   1.2      yamt LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
    191   1.2      yamt 
    192   1.2      yamt /*
    193  1.43       jym  * linked list of all non-kernel pmaps
    194  1.43       jym  */
    195  1.43       jym extern struct pmap_head pmaps;
    196  1.43       jym extern kmutex_t pmaps_lock;    /* protects pmaps */
    197  1.43       jym 
    198  1.43       jym /*
    199  1.46       jym  * pool_cache(9) that PDPs are allocated from
    200  1.46       jym  */
    201  1.46       jym extern struct pool_cache pmap_pdp_cache;
    202  1.46       jym 
    203  1.46       jym /*
    204   1.2      yamt  * the pmap structure
    205   1.2      yamt  *
    206  1.39     rmind  * note that the pm_obj contains the lock pointer, the reference count,
    207   1.2      yamt  * page list, and number of PTPs within the pmap.
    208   1.2      yamt  *
    209  1.39     rmind  * pm_lock is the same as the lock for vm object 0.  Changes to
    210   1.2      yamt  * the other objects may only be made if that lock has been taken
    211   1.2      yamt  * (the other object locks are only used when uvm_pagealloc is called)
    212   1.2      yamt  */
    213   1.2      yamt 
    214   1.2      yamt struct pmap {
    215   1.2      yamt 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
    216   1.2      yamt #define	pm_lock	pm_obj[0].vmobjlock
    217  1.39     rmind 	kmutex_t pm_obj_lock[PTP_LEVELS-1];	/* locks for pm_objs */
    218   1.2      yamt 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
    219   1.2      yamt 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
    220  1.33       jym 	paddr_t pm_pdirpa[PDP_SIZE];	/* PA of PDs (read-only after create) */
    221   1.2      yamt 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
    222   1.2      yamt 					/* pointer to a PTP in our pmap */
    223   1.2      yamt 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
    224   1.2      yamt 
    225   1.2      yamt #if !defined(__x86_64__)
    226   1.2      yamt 	vaddr_t pm_hiexec;		/* highest executable mapping */
    227   1.2      yamt #endif /* !defined(__x86_64__) */
    228   1.2      yamt 	int pm_flags;			/* see below */
    229   1.2      yamt 
    230   1.2      yamt 	union descriptor *pm_ldt;	/* user-set LDT */
    231  1.22        ad 	size_t pm_ldt_len;		/* size of LDT in bytes */
    232   1.2      yamt 	int pm_ldt_sel;			/* LDT selector */
    233  1.52     rmind 	kcpuset_t *pm_cpus;		/* mask of CPUs using pmap */
    234  1.52     rmind 	kcpuset_t *pm_kernel_cpus;	/* mask of CPUs using kernel part
    235   1.2      yamt 					 of pmap */
    236  1.52     rmind 	kcpuset_t *pm_xen_ptp_cpus;	/* mask of CPUs which have this pmap's
    237  1.50    bouyer 					 ptp mapped */
    238  1.39     rmind 	uint64_t pm_ncsw;		/* for assertions */
    239  1.39     rmind 	struct vm_page *pm_gc_ptp;	/* pages from pmap g/c */
    240   1.2      yamt };
    241   1.2      yamt 
    242  1.33       jym /* macro to access pm_pdirpa slots */
    243  1.12    bouyer #ifdef PAE
    244  1.12    bouyer #define pmap_pdirpa(pmap, index) \
    245  1.12    bouyer 	((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t))
    246  1.12    bouyer #else
    247  1.12    bouyer #define pmap_pdirpa(pmap, index) \
    248  1.33       jym 	((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t))
    249  1.12    bouyer #endif
    250  1.12    bouyer 
    251   1.2      yamt /*
    252  1.28    cegger  * MD flags that we use for pmap_enter and pmap_kenter_pa:
    253  1.23    cegger  */
    254  1.23    cegger 
    255  1.23    cegger /*
    256   1.2      yamt  * global kernel variables
    257   1.2      yamt  */
    258   1.2      yamt 
    259  1.32       jym /*
    260  1.32       jym  * PDPpaddr is the physical address of the kernel's PDP.
    261  1.32       jym  * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3
    262  1.32       jym  * value associated to the kernel process, proc0.
    263  1.33       jym  * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to
    264  1.33       jym  * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more.
    265  1.32       jym  * - Xen: it corresponds to the PFN of the kernel's PDP.
    266  1.32       jym  */
    267   1.2      yamt extern u_long PDPpaddr;
    268   1.2      yamt 
    269  1.58      maxv extern pd_entry_t pmap_pg_g;			/* do we support PG_G? */
    270  1.59      maxv extern pd_entry_t pmap_pg_nx;			/* do we support PG_NX? */
    271  1.68     ozaki extern int pmap_largepages;
    272   1.2      yamt extern long nkptp[PTP_LEVELS];
    273   1.2      yamt 
    274   1.2      yamt /*
    275   1.2      yamt  * macros
    276   1.2      yamt  */
    277   1.2      yamt 
    278   1.2      yamt #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    279   1.2      yamt #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    280   1.2      yamt 
    281   1.2      yamt #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
    282   1.2      yamt #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
    283  1.55  christos #define pmap_copy(DP,SP,D,L,S)		__USE(L)
    284   1.2      yamt #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
    285   1.2      yamt #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
    286   1.2      yamt #define pmap_move(DP,SP,D,L,S)
    287  1.35  jmcneill #define pmap_phys_address(ppn)		(x86_ptob(ppn) & ~X86_MMAP_FLAG_MASK)
    288  1.35  jmcneill #define pmap_mmap_flags(ppn)		x86_mmap_flags(ppn)
    289   1.2      yamt #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
    290   1.2      yamt 
    291  1.35  jmcneill #if defined(__x86_64__) || defined(PAE)
    292  1.35  jmcneill #define X86_MMAP_FLAG_SHIFT	(64 - PGSHIFT)
    293  1.35  jmcneill #else
    294  1.35  jmcneill #define X86_MMAP_FLAG_SHIFT	(32 - PGSHIFT)
    295  1.35  jmcneill #endif
    296  1.35  jmcneill 
    297  1.35  jmcneill #define X86_MMAP_FLAG_MASK	0xf
    298  1.35  jmcneill #define X86_MMAP_FLAG_PREFETCH	0x1
    299   1.2      yamt 
    300   1.2      yamt /*
    301   1.2      yamt  * prototypes
    302   1.2      yamt  */
    303   1.2      yamt 
    304   1.2      yamt void		pmap_activate(struct lwp *);
    305   1.2      yamt void		pmap_bootstrap(vaddr_t);
    306   1.2      yamt bool		pmap_clear_attrs(struct vm_page *, unsigned);
    307  1.56  riastrad bool		pmap_pv_clear_attrs(paddr_t, unsigned);
    308   1.2      yamt void		pmap_deactivate(struct lwp *);
    309  1.56  riastrad void		pmap_page_remove(struct vm_page *);
    310  1.56  riastrad void		pmap_pv_remove(paddr_t);
    311   1.2      yamt void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
    312   1.2      yamt bool		pmap_test_attrs(struct vm_page *, unsigned);
    313   1.2      yamt void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
    314   1.2      yamt void		pmap_load(void);
    315   1.6  jmcneill paddr_t		pmap_init_tmp_pgtbl(paddr_t);
    316  1.18        ad void		pmap_remove_all(struct pmap *);
    317  1.60      maya void		pmap_ldt_cleanup(struct lwp *);
    318  1.22        ad void		pmap_ldt_sync(struct pmap *);
    319  1.53       chs void		pmap_kremove_local(vaddr_t, vsize_t);
    320   1.2      yamt 
    321  1.56  riastrad #define	__HAVE_PMAP_PV_TRACK	1
    322  1.56  riastrad void		pmap_pv_init(void);
    323  1.56  riastrad void		pmap_pv_track(paddr_t, psize_t);
    324  1.56  riastrad void		pmap_pv_untrack(paddr_t, psize_t);
    325  1.56  riastrad 
    326  1.30    dyoung void		pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **,
    327  1.30    dyoung 		    pd_entry_t * const **);
    328  1.30    dyoung void		pmap_unmap_ptes(struct pmap *, struct pmap *);
    329  1.30    dyoung 
    330  1.30    dyoung int		pmap_pdes_invalid(vaddr_t, pd_entry_t * const *, pd_entry_t *);
    331  1.30    dyoung 
    332  1.35  jmcneill u_int		x86_mmap_flags(paddr_t);
    333  1.35  jmcneill 
    334  1.40       tls bool		pmap_is_curpmap(struct pmap *);
    335  1.40       tls 
    336  1.62      maxv #ifndef __HAVE_DIRECT_MAP
    337  1.62      maxv void		pmap_vpage_cpu_init(struct cpu_info *);
    338  1.62      maxv #endif
    339  1.62      maxv 
    340   1.2      yamt vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
    341   1.2      yamt 
    342  1.39     rmind typedef enum tlbwhy {
    343  1.39     rmind 	TLBSHOOT_APTE,
    344  1.39     rmind 	TLBSHOOT_KENTER,
    345  1.39     rmind 	TLBSHOOT_KREMOVE,
    346  1.39     rmind 	TLBSHOOT_FREE_PTP1,
    347  1.39     rmind 	TLBSHOOT_FREE_PTP2,
    348  1.39     rmind 	TLBSHOOT_REMOVE_PTE,
    349  1.39     rmind 	TLBSHOOT_REMOVE_PTES,
    350  1.39     rmind 	TLBSHOOT_SYNC_PV1,
    351  1.39     rmind 	TLBSHOOT_SYNC_PV2,
    352  1.39     rmind 	TLBSHOOT_WRITE_PROTECT,
    353  1.39     rmind 	TLBSHOOT_ENTER,
    354  1.39     rmind 	TLBSHOOT_UPDATE,
    355  1.39     rmind 	TLBSHOOT_BUS_DMA,
    356  1.39     rmind 	TLBSHOOT_BUS_SPACE,
    357  1.39     rmind 	TLBSHOOT__MAX,
    358  1.39     rmind } tlbwhy_t;
    359  1.39     rmind 
    360  1.39     rmind void		pmap_tlb_init(void);
    361  1.52     rmind void		pmap_tlb_cpu_init(struct cpu_info *);
    362  1.39     rmind void		pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t);
    363  1.39     rmind void		pmap_tlb_shootnow(void);
    364  1.39     rmind void		pmap_tlb_intr(void);
    365   1.2      yamt 
    366   1.2      yamt #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    367  1.19  jmcneill #define PMAP_FORK		/* turn on pmap_fork interface */
    368   1.2      yamt 
    369   1.2      yamt /*
    370   1.2      yamt  * Do idle page zero'ing uncached to avoid polluting the cache.
    371   1.2      yamt  */
    372   1.2      yamt bool	pmap_pageidlezero(paddr_t);
    373   1.2      yamt #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    374   1.2      yamt 
    375   1.2      yamt /*
    376   1.2      yamt  * inline functions
    377   1.2      yamt  */
    378   1.2      yamt 
    379  1.30    dyoung __inline static bool __unused
    380  1.30    dyoung pmap_pdes_valid(vaddr_t va, pd_entry_t * const *pdes, pd_entry_t *lastpde)
    381  1.30    dyoung {
    382  1.30    dyoung 	return pmap_pdes_invalid(va, pdes, lastpde) == 0;
    383  1.30    dyoung }
    384  1.30    dyoung 
    385   1.2      yamt /*
    386   1.2      yamt  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
    387   1.2      yamt  *	if hardware doesn't support one-page flushing)
    388   1.2      yamt  */
    389   1.2      yamt 
    390   1.7     perry __inline static void __unused
    391   1.2      yamt pmap_update_pg(vaddr_t va)
    392   1.2      yamt {
    393   1.4        ad 	invlpg(va);
    394   1.2      yamt }
    395   1.2      yamt 
    396   1.2      yamt /*
    397   1.2      yamt  * pmap_page_protect: change the protection of all recorded mappings
    398   1.2      yamt  *	of a managed page
    399   1.2      yamt  *
    400   1.2      yamt  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
    401   1.2      yamt  * => we only have to worry about making the page more protected.
    402   1.2      yamt  *	unprotecting a page is done on-demand at fault time.
    403   1.2      yamt  */
    404   1.2      yamt 
    405   1.7     perry __inline static void __unused
    406   1.2      yamt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    407   1.2      yamt {
    408   1.2      yamt 	if ((prot & VM_PROT_WRITE) == 0) {
    409   1.2      yamt 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    410   1.2      yamt 			(void) pmap_clear_attrs(pg, PG_RW);
    411   1.2      yamt 		} else {
    412   1.2      yamt 			pmap_page_remove(pg);
    413   1.2      yamt 		}
    414   1.2      yamt 	}
    415   1.2      yamt }
    416   1.2      yamt 
    417   1.2      yamt /*
    418  1.56  riastrad  * pmap_pv_protect: change the protection of all recorded mappings
    419  1.56  riastrad  *	of an unmanaged page
    420  1.56  riastrad  */
    421  1.56  riastrad 
    422  1.56  riastrad __inline static void __unused
    423  1.56  riastrad pmap_pv_protect(paddr_t pa, vm_prot_t prot)
    424  1.56  riastrad {
    425  1.56  riastrad 	if ((prot & VM_PROT_WRITE) == 0) {
    426  1.56  riastrad 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    427  1.56  riastrad 			(void) pmap_pv_clear_attrs(pa, PG_RW);
    428  1.56  riastrad 		} else {
    429  1.56  riastrad 			pmap_pv_remove(pa);
    430  1.56  riastrad 		}
    431  1.56  riastrad 	}
    432  1.56  riastrad }
    433  1.56  riastrad 
    434  1.56  riastrad /*
    435   1.2      yamt  * pmap_protect: change the protection of pages in a pmap
    436   1.2      yamt  *
    437   1.2      yamt  * => this function is a frontend for pmap_remove/pmap_write_protect
    438   1.2      yamt  * => we only have to worry about making the page more protected.
    439   1.2      yamt  *	unprotecting a page is done on-demand at fault time.
    440   1.2      yamt  */
    441   1.2      yamt 
    442   1.7     perry __inline static void __unused
    443   1.2      yamt pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
    444   1.2      yamt {
    445   1.2      yamt 	if ((prot & VM_PROT_WRITE) == 0) {
    446   1.2      yamt 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
    447   1.2      yamt 			pmap_write_protect(pmap, sva, eva, prot);
    448   1.2      yamt 		} else {
    449   1.2      yamt 			pmap_remove(pmap, sva, eva);
    450   1.2      yamt 		}
    451   1.2      yamt 	}
    452   1.2      yamt }
    453   1.2      yamt 
    454   1.2      yamt /*
    455   1.2      yamt  * various address inlines
    456   1.2      yamt  *
    457   1.2      yamt  *  vtopte: return a pointer to the PTE mapping a VA, works only for
    458   1.2      yamt  *  user and PT addresses
    459   1.2      yamt  *
    460   1.2      yamt  *  kvtopte: return a pointer to the PTE mapping a kernel VA
    461   1.2      yamt  */
    462   1.2      yamt 
    463   1.2      yamt #include <lib/libkern/libkern.h>
    464   1.2      yamt 
    465   1.7     perry static __inline pt_entry_t * __unused
    466   1.2      yamt vtopte(vaddr_t va)
    467   1.2      yamt {
    468   1.2      yamt 
    469   1.2      yamt 	KASSERT(va < VM_MIN_KERNEL_ADDRESS);
    470   1.2      yamt 
    471   1.2      yamt 	return (PTE_BASE + pl1_i(va));
    472   1.2      yamt }
    473   1.2      yamt 
    474   1.7     perry static __inline pt_entry_t * __unused
    475   1.2      yamt kvtopte(vaddr_t va)
    476   1.2      yamt {
    477   1.2      yamt 	pd_entry_t *pde;
    478   1.2      yamt 
    479   1.2      yamt 	KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
    480   1.2      yamt 
    481   1.2      yamt 	pde = L2_BASE + pl2_i(va);
    482   1.2      yamt 	if (*pde & PG_PS)
    483   1.2      yamt 		return ((pt_entry_t *)pde);
    484   1.2      yamt 
    485   1.2      yamt 	return (PTE_BASE + pl1_i(va));
    486   1.2      yamt }
    487   1.2      yamt 
    488   1.2      yamt paddr_t vtophys(vaddr_t);
    489   1.2      yamt vaddr_t	pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
    490   1.2      yamt void	pmap_cpu_init_late(struct cpu_info *);
    491  1.15        ad bool	sse2_idlezero_page(void *);
    492   1.2      yamt 
    493   1.5    bouyer #ifdef XEN
    494  1.38       jym #include <sys/bitops.h>
    495  1.38       jym 
    496   1.5    bouyer #define XPTE_MASK	L1_FRAME
    497  1.38       jym /* Selects the index of a PTE in (A)PTE_BASE */
    498  1.38       jym #define XPTE_SHIFT	(L1_SHIFT - ilog2(sizeof(pt_entry_t)))
    499   1.5    bouyer 
    500   1.5    bouyer /* PTE access inline fuctions */
    501   1.5    bouyer 
    502   1.5    bouyer /*
    503   1.5    bouyer  * Get the machine address of the pointed pte
    504   1.5    bouyer  * We use hardware MMU to get value so works only for levels 1-3
    505   1.5    bouyer  */
    506   1.5    bouyer 
    507   1.5    bouyer static __inline paddr_t
    508   1.5    bouyer xpmap_ptetomach(pt_entry_t *pte)
    509   1.5    bouyer {
    510   1.5    bouyer 	pt_entry_t *up_pte;
    511   1.5    bouyer 	vaddr_t va = (vaddr_t) pte;
    512   1.5    bouyer 
    513   1.5    bouyer 	va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
    514   1.5    bouyer 	up_pte = (pt_entry_t *) va;
    515   1.5    bouyer 
    516   1.5    bouyer 	return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
    517   1.5    bouyer }
    518   1.5    bouyer 
    519   1.5    bouyer /* Xen helpers to change bits of a pte */
    520   1.5    bouyer #define XPMAP_UPDATE_DIRECT	1	/* Update direct map entry flags too */
    521   1.5    bouyer 
    522  1.30    dyoung paddr_t	vtomach(vaddr_t);
    523  1.30    dyoung #define vtomfn(va) (vtomach(va) >> PAGE_SHIFT)
    524  1.30    dyoung #endif	/* XEN */
    525  1.30    dyoung 
    526   1.5    bouyer /* pmap functions with machine addresses */
    527  1.27    cegger void	pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t, u_int);
    528   1.5    bouyer int	pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
    529  1.24    cegger 	    vm_prot_t, u_int, int);
    530   1.5    bouyer bool	pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
    531  1.61  christos void	pmap_free_ptps(struct vm_page *);
    532  1.20    bouyer 
    533   1.2      yamt /*
    534   1.2      yamt  * Hooks for the pool allocator.
    535   1.2      yamt  */
    536   1.2      yamt #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    537   1.2      yamt 
    538  1.73      maxv #ifdef __HAVE_PCPU_AREA
    539  1.73      maxv extern struct pcpu_area *pcpuarea;
    540  1.73      maxv #define PDIR_SLOT_PCPU		384
    541  1.73      maxv #define PMAP_PCPU_BASE		(VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
    542  1.73      maxv #endif
    543  1.73      maxv 
    544  1.49       chs #ifdef __HAVE_DIRECT_MAP
    545  1.49       chs 
    546  1.72      maxv extern vaddr_t pmap_direct_base;
    547  1.72      maxv extern vaddr_t pmap_direct_end;
    548  1.72      maxv 
    549  1.67      maxv #define L4_SLOT_DIRECT		456
    550  1.49       chs #define PDIR_SLOT_DIRECT	L4_SLOT_DIRECT
    551  1.49       chs 
    552  1.66      maxv #define NL4_SLOT_DIRECT		32
    553  1.66      maxv 
    554  1.72      maxv #define PMAP_DIRECT_DEFAULT_BASE (VA_SIGN_NEG((L4_SLOT_DIRECT * NBPD_L4)))
    555  1.72      maxv 
    556  1.72      maxv #define PMAP_DIRECT_BASE	pmap_direct_base
    557  1.72      maxv #define PMAP_DIRECT_END		pmap_direct_end
    558  1.49       chs 
    559  1.49       chs #define PMAP_DIRECT_MAP(pa)	((vaddr_t)PMAP_DIRECT_BASE + (pa))
    560  1.49       chs #define PMAP_DIRECT_UNMAP(va)	((paddr_t)(va) - PMAP_DIRECT_BASE)
    561  1.49       chs 
    562  1.49       chs /*
    563  1.49       chs  * Alternate mapping hooks for pool pages.
    564  1.49       chs  */
    565  1.49       chs #define PMAP_MAP_POOLPAGE(pa)	PMAP_DIRECT_MAP((pa))
    566  1.49       chs #define PMAP_UNMAP_POOLPAGE(va)	PMAP_DIRECT_UNMAP((va))
    567  1.49       chs 
    568  1.49       chs void	pagezero(vaddr_t);
    569  1.49       chs 
    570  1.49       chs #endif /* __HAVE_DIRECT_MAP */
    571  1.49       chs 
    572   1.2      yamt #endif /* _KERNEL */
    573   1.2      yamt 
    574   1.2      yamt #endif /* _X86_PMAP_H_ */
    575