Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.63.4.1
      1 /*	$NetBSD: pmap.h,v 1.63.4.1 2015/09/22 12:05:47 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1992, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Ralph Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     35  */
     36 
     37 /*
     38  * Copyright (c) 1987 Carnegie-Mellon University
     39  *
     40  * This code is derived from software contributed to Berkeley by
     41  * Ralph Campbell.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by the University of
     54  *	California, Berkeley and its contributors.
     55  * 4. Neither the name of the University nor the names of its contributors
     56  *    may be used to endorse or promote products derived from this software
     57  *    without specific prior written permission.
     58  *
     59  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     60  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     61  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     62  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     63  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     64  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     65  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     66  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     67  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     68  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     69  * SUCH DAMAGE.
     70  *
     71  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     72  */
     73 
     74 #ifndef	_MIPS_PMAP_H_
     75 #define	_MIPS_PMAP_H_
     76 
     77 #ifdef _KERNEL_OPT
     78 #include "opt_multiprocessor.h"
     79 #endif
     80 
     81 #include <sys/evcnt.h>
     82 #include <sys/kcpuset.h>
     83 
     84 #include <mips/cpuregs.h>	/* for KSEG0 below */
     85 
     86 /*
     87  * The user address space is 2Gb (0x0 - 0x80000000).
     88  * User programs are laid out in memory as follows:
     89  *			address
     90  *	USRTEXT		0x00001000
     91  *	USRDATA		USRTEXT + text_size
     92  *	USRSTACK	0x7FFFFFFF
     93  *
     94  * The user address space is mapped using a two level structure where
     95  * virtual address bits 30..22 are used to index into a segment table which
     96  * points to a page worth of PTEs (4096 page can hold 1024 PTEs).
     97  * Bits 21..12 are then used to index a PTE which describes a page within
     98  * a segment.
     99  *
    100  * The wired entries in the TLB will contain the following:
    101  *	0-1	(UPAGES)	for curproc user struct and kernel stack.
    102  *
    103  * Note: The kernel doesn't use the same data structures as user programs.
    104  * All the PTE entries are stored in a single array in Sysmap which is
    105  * dynamically allocated at boot time.
    106  */
    107 
    108 #define pmap_trunc_seg(x)	((vaddr_t)(x) & ~SEGOFSET)
    109 #define pmap_round_seg(x)	(((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
    110 
    111 #define PMAP_INVALID_SEGTAB	NULL
    112 #ifdef _LP64
    113 #define PMAP_SEGTABSIZE		NSEGPG
    114 #else
    115 #define PMAP_SEGTABSIZE		(1 << (31 - SEGSHIFT))
    116 #endif
    117 
    118 union pt_entry;
    119 
    120 union pmap_segtab {
    121 #ifdef _LP64
    122 	union pmap_segtab	*seg_seg[PMAP_SEGTABSIZE];
    123 #else
    124 	union pmap_segtab	*seg_seg[1];
    125 #endif
    126 	union pt_entry	*seg_tab[PMAP_SEGTABSIZE];
    127 };
    128 
    129 typedef union pmap_segtab pmap_segtab_t;
    130 
    131 /*
    132  * Structure defining an tlb entry data set.
    133  */
    134 struct tlb {
    135 	vaddr_t	tlb_hi;		/* should be 64 bits */
    136 	uint32_t tlb_lo0;	/* XXX maybe 64 bits (only 32 really used) */
    137 	uint32_t tlb_lo1;	/* XXX maybe 64 bits (only 32 really used) */
    138 };
    139 
    140 struct tlbmask {
    141 	vaddr_t	tlb_hi;		/* should be 64 bits */
    142 	uint32_t tlb_lo0;	/* XXX maybe 64 bits (only 32 really used) */
    143 	uint32_t tlb_lo1;	/* XXX maybe 64 bits (only 32 really used) */
    144 	uint32_t tlb_mask;
    145 };
    146 
    147 #ifdef _KERNEL
    148 struct pmap;
    149 typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t,
    150 	union pt_entry *, uintptr_t);
    151 union pt_entry *pmap_pte_lookup(struct pmap *, vaddr_t);
    152 union pt_entry *pmap_pte_reserve(struct pmap *, vaddr_t, int);
    153 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,
    154 	uintptr_t);
    155 void pmap_segtab_activate(struct pmap *, struct lwp *);
    156 void pmap_segtab_init(struct pmap *);
    157 void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t);
    158 extern kmutex_t pmap_segtab_lock;
    159 #endif /* _KERNEL */
    160 
    161 /*
    162  * Per TLB (normally same as CPU) asid info
    163  */
    164 struct pmap_asid_info {
    165 	LIST_ENTRY(pmap_asid_info) pai_link;
    166 	uint32_t	pai_asid;	/* TLB address space tag */
    167 };
    168 
    169 #define	TLBINFO_LOCK(ti)		mutex_spin_enter((ti)->ti_lock)
    170 #define	TLBINFO_UNLOCK(ti)		mutex_spin_exit((ti)->ti_lock)
    171 #define	PMAP_PAI_ASIDVALID_P(pai, ti)	((pai)->pai_asid != 0)
    172 #define	PMAP_PAI(pmap, ti)		(&(pmap)->pm_pai[tlbinfo_index(ti)])
    173 #define	PAI_PMAP(pai, ti)	\
    174 	((pmap_t)((intptr_t)(pai) \
    175 	    - offsetof(struct pmap, pm_pai[tlbinfo_index(ti)])))
    176 
    177 /*
    178  * Machine dependent pmap structure.
    179  */
    180 struct pmap {
    181 #ifdef MULTIPROCESSOR
    182 	kcpuset_t		*pm_active;	/* pmap was active on ... */
    183 	kcpuset_t		*pm_onproc;	/* pmap is active on ... */
    184 	volatile u_int		pm_shootdown_pending;
    185 #endif
    186 	pmap_segtab_t		*pm_segtab;	/* pointers to pages of PTEs */
    187 	u_int			pm_count;	/* pmap reference count */
    188 	u_int			pm_flags;
    189 #define	PMAP_DEFERRED_ACTIVATE	0x0001
    190 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    191 	vaddr_t			pm_minaddr;
    192 	vaddr_t			pm_maxaddr;
    193 	struct pmap_asid_info	pm_pai[1];
    194 };
    195 
    196 enum tlb_invalidate_op {
    197 	TLBINV_NOBODY=0,
    198 	TLBINV_ONE=1,
    199 	TLBINV_ALLUSER=2,
    200 	TLBINV_ALLKERNEL=3,
    201 	TLBINV_ALL=4
    202 };
    203 
    204 struct pmap_tlb_info {
    205 	char ti_name[8];
    206 	uint32_t ti_asid_hint;		/* probable next ASID to use */
    207 	uint32_t ti_asids_free;		/* # of ASIDs free */
    208 #define	tlbinfo_noasids_p(ti)	((ti)->ti_asids_free == 0)
    209 	kmutex_t *ti_lock;
    210 	u_int ti_wired;			/* # of wired TLB entries */
    211 	uint32_t ti_asid_mask;
    212 	uint32_t ti_asid_max;
    213 	LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
    214 #ifdef MULTIPROCESSOR
    215 	pmap_t ti_victim;
    216 	uint32_t ti_synci_page_bitmap;	/* page indices needing a syncicache */
    217 	kcpuset_t *ti_kcpuset;		/* bitmask of CPUs sharing this TLB */
    218 	enum tlb_invalidate_op ti_tlbinvop;
    219 	u_int ti_index;
    220 #define tlbinfo_index(ti)	((ti)->ti_index)
    221 	struct evcnt ti_evcnt_synci_asts;
    222 	struct evcnt ti_evcnt_synci_all;
    223 	struct evcnt ti_evcnt_synci_pages;
    224 	struct evcnt ti_evcnt_synci_deferred;
    225 	struct evcnt ti_evcnt_synci_desired;
    226 	struct evcnt ti_evcnt_synci_duplicate;
    227 #else
    228 #define tlbinfo_index(ti)	(0)
    229 #endif
    230 	struct evcnt ti_evcnt_asid_reinits;
    231 	u_long ti_asid_bitmap[256 / (sizeof(u_long) * 8)];
    232 };
    233 
    234 #ifdef	_KERNEL
    235 
    236 struct pmap_kernel {
    237 	struct pmap kernel_pmap;
    238 #ifdef MULTIPROCESSOR
    239 	struct pmap_asid_info kernel_pai[MAXCPUS-1];
    240 #endif
    241 };
    242 
    243 struct pmap_limits {
    244 	paddr_t avail_start;
    245 	paddr_t avail_end;
    246 	vaddr_t virtual_start;
    247 	vaddr_t virtual_end;
    248 };
    249 
    250 extern struct pmap_kernel kernel_pmap_store;
    251 extern struct pmap_tlb_info pmap_tlb0_info;
    252 extern struct pmap_limits pmap_limits;
    253 #ifdef MULTIPROCESSOR
    254 extern struct pmap_tlb_info *pmap_tlbs[MAXCPUS];
    255 extern u_int pmap_ntlbs;
    256 #endif
    257 
    258 #define	pmap_wired_count(pmap) 	((pmap)->pm_stats.wired_count)
    259 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
    260 
    261 #define pmap_phys_address(x)	mips_ptob(x)
    262 
    263 /*
    264  *	Bootstrap the system enough to run with virtual memory.
    265  */
    266 void	pmap_bootstrap(void);
    267 
    268 void	pmap_remove_all(pmap_t);
    269 void	pmap_set_modified(paddr_t);
    270 void	pmap_procwr(struct proc *, vaddr_t, size_t);
    271 #define	PMAP_NEED_PROCWR
    272 
    273 #ifdef MULTIPROCESSOR
    274 void	pmap_tlb_shootdown_process(void);
    275 bool	pmap_tlb_shootdown_bystanders(pmap_t pmap);
    276 void	pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
    277 void	pmap_tlb_syncicache_ast(struct cpu_info *);
    278 void	pmap_tlb_syncicache_wanted(struct cpu_info *);
    279 void	pmap_tlb_syncicache(vaddr_t, const kcpuset_t *);
    280 #endif
    281 void	pmap_tlb_info_init(struct pmap_tlb_info *);
    282 void	pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
    283 void	pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l);
    284 void	pmap_tlb_asid_deactivate(pmap_t pmap);
    285 void	pmap_tlb_asid_check(void);
    286 void	pmap_tlb_asid_release_all(pmap_t pmap);
    287 int	pmap_tlb_update_addr(pmap_t pmap, vaddr_t, uint32_t, bool);
    288 void	pmap_tlb_invalidate_addr(pmap_t pmap, vaddr_t);
    289 
    290 /*
    291  * pmap_prefer() helps reduce virtual-coherency exceptions in
    292  * the virtually-indexed cache on mips3 CPUs.
    293  */
    294 #ifdef MIPS3_PLUS
    295 #define PMAP_PREFER(pa, va, sz, td)	pmap_prefer((pa), (va), (sz), (td))
    296 void	pmap_prefer(vaddr_t, vaddr_t *, vsize_t, int);
    297 #endif /* MIPS3_PLUS */
    298 
    299 #define	PMAP_STEAL_MEMORY	/* enable pmap_steal_memory() */
    300 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    301 
    302 bool	pmap_md_direct_mapped_vaddr_p(vaddr_t);
    303 
    304 /*
    305  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
    306  */
    307 vaddr_t mips_pmap_map_poolpage(paddr_t);
    308 paddr_t mips_pmap_unmap_poolpage(vaddr_t);
    309 struct vm_page *mips_pmap_alloc_poolpage(int);
    310 #define	PMAP_ALLOC_POOLPAGE(flags)	mips_pmap_alloc_poolpage(flags)
    311 #define	PMAP_MAP_POOLPAGE(pa)		mips_pmap_map_poolpage(pa)
    312 #define	PMAP_UNMAP_POOLPAGE(va)		mips_pmap_unmap_poolpage(va)
    313 
    314 /*
    315  * Other hooks for the pool allocator.
    316  */
    317 #ifdef _LP64
    318 #define	POOL_VTOPHYS(va)	(MIPS_KSEG0_P(va) \
    319 				    ? MIPS_KSEG0_TO_PHYS(va) \
    320 				    : MIPS_XKPHYS_TO_PHYS(va))
    321 #define	POOL_PHYSTOV(pa)	MIPS_PHYS_TO_XKPHYS_CACHED((paddr_t)(pa))
    322 #else
    323 #define	POOL_VTOPHYS(va)	MIPS_KSEG0_TO_PHYS((vaddr_t)(va))
    324 #define	POOL_PHYSTOV(pa)	MIPS_PHYS_TO_KSEG0_TO_PHYS((paddr_t)(pa))
    325 #endif
    326 
    327 /*
    328  * Select CCA to use for unmanaged pages.
    329  */
    330 #define	PMAP_CCA_FOR_PA(pa)	CCA_UNCACHED		/* uncached */
    331 
    332 #if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
    333 #define PGC_NOCACHE	0x4000000000000000ULL
    334 #define PGC_PREFETCH	0x2000000000000000ULL
    335 #endif
    336 
    337 #define	__HAVE_VM_PAGE_MD
    338 
    339 /*
    340  * pmap-specific data stored in the vm_page structure.
    341  */
    342 /*
    343  * For each struct vm_page, there is a list of all currently valid virtual
    344  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
    345  * XXX really should do this as a part of the higher level code.
    346  */
    347 typedef struct pv_entry {
    348 	struct pv_entry	*pv_next;	/* next pv_entry */
    349 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
    350 	vaddr_t		pv_va;		/* virtual address for mapping */
    351 #define	PV_KENTER	0x001
    352 } *pv_entry_t;
    353 
    354 #define	PG_MD_UNCACHED		0x0001	/* page is mapped uncached */
    355 #define	PG_MD_MODIFIED		0x0002	/* page has been modified */
    356 #define	PG_MD_REFERENCED	0x0004	/* page has been recently referenced */
    357 #define	PG_MD_POOLPAGE		0x0008	/* page is used as a poolpage */
    358 #define	PG_MD_EXECPAGE		0x0010	/* page is exec mapped */
    359 
    360 #define	PG_MD_CACHED_P(md)	(((md)->pvh_attrs & PG_MD_UNCACHED) == 0)
    361 #define	PG_MD_UNCACHED_P(md)	(((md)->pvh_attrs & PG_MD_UNCACHED) != 0)
    362 #define	PG_MD_MODIFIED_P(md)	(((md)->pvh_attrs & PG_MD_MODIFIED) != 0)
    363 #define	PG_MD_REFERENCED_P(md)	(((md)->pvh_attrs & PG_MD_REFERENCED) != 0)
    364 #define	PG_MD_POOLPAGE_P(md)	(((md)->pvh_attrs & PG_MD_POOLPAGE) != 0)
    365 #define	PG_MD_EXECPAGE_P(md)	(((md)->pvh_attrs & PG_MD_EXECPAGE) != 0)
    366 
    367 struct vm_page_md {
    368 	struct pv_entry pvh_first;	/* pv_entry first */
    369 #ifdef MULTIPROCESSOR
    370 	volatile u_int pvh_attrs;	/* page attributes */
    371 	kmutex_t *pvh_lock;		/* pv list lock */
    372 #define	PG_MD_PVLIST_LOCK_INIT(md) 	((md)->pvh_lock = NULL)
    373 #define	PG_MD_PVLIST_LOCKED_P(md)	(mutex_owner((md)->pvh_lock) != 0)
    374 #define	PG_MD_PVLIST_LOCK(md, lc)	pmap_pvlist_lock((md), (lc))
    375 #define	PG_MD_PVLIST_UNLOCK(md)		mutex_spin_exit((md)->pvh_lock)
    376 #define	PG_MD_PVLIST_GEN(md)		((uint16_t)((md)->pvh_attrs >> 16))
    377 #else
    378 	u_int pvh_attrs;		/* page attributes */
    379 #define	PG_MD_PVLIST_LOCK_INIT(md)	do { } while (/*CONSTCOND*/ 0)
    380 #define	PG_MD_PVLIST_LOCKED_P(md)	true
    381 #define	PG_MD_PVLIST_LOCK(md, lc)	(mutex_spin_enter(&pmap_pvlist_mutex), 0)
    382 #define	PG_MD_PVLIST_UNLOCK(md)		mutex_spin_exit(&pmap_pvlist_mutex)
    383 #define	PG_MD_PVLIST_GEN(md)		(0)
    384 #endif
    385 };
    386 
    387 #define VM_MDPAGE_INIT(pg)					\
    388 do {								\
    389 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);	\
    390 	(md)->pvh_first.pv_next = NULL;				\
    391 	(md)->pvh_first.pv_pmap = NULL;				\
    392 	(md)->pvh_first.pv_va = VM_PAGE_TO_PHYS(pg);		\
    393 	PG_MD_PVLIST_LOCK_INIT(md);				\
    394 	(md)->pvh_attrs = 0;					\
    395 } while (/* CONSTCOND */ 0)
    396 
    397 uint16_t pmap_pvlist_lock(struct vm_page_md *, bool);
    398 
    399 #endif	/* _KERNEL */
    400 #endif	/* _MIPS_PMAP_H_ */
    401