Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.79
      1 /* $NetBSD: pmap.h,v 1.79 2014/01/01 16:09:04 matt Exp $ */
      2 
      3 /*-
      4  * Copyright (c) 1998, 1999, 2000, 2001, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1991, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
     66  */
     67 
     68 /*
     69  * Copyright (c) 1987 Carnegie-Mellon University
     70  *
     71  * This code is derived from software contributed to Berkeley by
     72  * the Systems Programming Group of the University of Utah Computer
     73  * Science Department.
     74  *
     75  * Redistribution and use in source and binary forms, with or without
     76  * modification, are permitted provided that the following conditions
     77  * are met:
     78  * 1. Redistributions of source code must retain the above copyright
     79  *    notice, this list of conditions and the following disclaimer.
     80  * 2. Redistributions in binary form must reproduce the above copyright
     81  *    notice, this list of conditions and the following disclaimer in the
     82  *    documentation and/or other materials provided with the distribution.
     83  * 3. All advertising materials mentioning features or use of this software
     84  *    must display the following acknowledgement:
     85  *	This product includes software developed by the University of
     86  *	California, Berkeley and its contributors.
     87  * 4. Neither the name of the University nor the names of its contributors
     88  *    may be used to endorse or promote products derived from this software
     89  *    without specific prior written permission.
     90  *
     91  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     92  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     93  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     94  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     95  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     96  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     97  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     98  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     99  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
    100  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
    101  * SUCH DAMAGE.
    102  *
    103  *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
    104  */
    105 
    106 #ifndef	_PMAP_MACHINE_
    107 #define	_PMAP_MACHINE_
    108 
    109 #if defined(_KERNEL_OPT)
    110 #include "opt_multiprocessor.h"
    111 #endif
    112 
    113 #include <sys/mutex.h>
    114 #include <sys/queue.h>
    115 
    116 #include <machine/pte.h>
    117 
    118 /*
    119  * Machine-dependent virtual memory state.
    120  *
    121  * If we ever support processor numbers higher than 63, we'll have to
    122  * rethink the CPU mask.
    123  *
    124  * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
    125  * Their size is based on the PCS count from the HWRPB, and indexed
    126  * by processor ID (from `whami').
    127  *
    128  * The kernel pmap is a special case; it gets statically-allocated
    129  * arrays which hold enough for ALPHA_MAXPROCS.
    130  */
    131 struct pmap_asn_info {
    132 	unsigned int		pma_asn;	/* address space number */
    133 	unsigned long		pma_asngen;	/* ASN generation number */
    134 };
    135 
    136 struct pmap {
    137 	TAILQ_ENTRY(pmap)	pm_list;	/* list of all pmaps */
    138 	pt_entry_t		*pm_lev1map;	/* level 1 map */
    139 	int			pm_count;	/* pmap reference count */
    140 	kmutex_t		pm_lock;	/* lock on pmap */
    141 	struct pmap_statistics	pm_stats;	/* pmap statistics */
    142 	unsigned long		pm_cpus;	/* mask of CPUs using pmap */
    143 	unsigned long		pm_needisync;	/* mask of CPUs needing isync */
    144 	struct pmap_asn_info	pm_asni[];	/* ASN information */
    145 			/*	variable length		*/
    146 };
    147 
    148 /*
    149  * Compute the sizeof of a pmap structure.
    150  */
    151 #define	PMAP_SIZEOF(x)							\
    152 	(ALIGN(offsetof(struct pmap, pm_asni[(x)])))
    153 
    154 #define	PMAP_ASN_RESERVED	0	/* reserved for Lev1map users */
    155 
    156 /*
    157  * For each struct vm_page, there is a list of all currently valid virtual
    158  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
    159  */
    160 typedef struct pv_entry {
    161 	struct pv_entry	*pv_next;	/* next pv_entry on list */
    162 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
    163 	vaddr_t		pv_va;		/* virtual address for mapping */
    164 	pt_entry_t	*pv_pte;	/* PTE that maps the VA */
    165 } *pv_entry_t;
    166 
    167 /* pvh_attrs */
    168 #define	PGA_MODIFIED		0x01		/* modified */
    169 #define	PGA_REFERENCED		0x02		/* referenced */
    170 
    171 /* pvh_usage */
    172 #define	PGU_NORMAL		0		/* free or normal use */
    173 #define	PGU_PVENT		1		/* PV entries */
    174 #define	PGU_L1PT		2		/* level 1 page table */
    175 #define	PGU_L2PT		3		/* level 2 page table */
    176 #define	PGU_L3PT		4		/* level 3 page table */
    177 
    178 #ifdef _KERNEL
    179 
    180 #include <sys/atomic.h>
    181 
    182 #ifdef _KERNEL_OPT
    183 #include "opt_dec_kn8ae.h"			/* XXX */
    184 #if defined(DEC_KN8AE)
    185 #define	_PMAP_MAY_USE_PROM_CONSOLE
    186 #endif
    187 #else
    188 #define	_PMAP_MAY_USE_PROM_CONSOLE
    189 #endif
    190 
    191 #ifndef _LKM
    192 #if defined(MULTIPROCESSOR)
    193 struct cpu_info;
    194 struct trapframe;
    195 
    196 void	pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, u_long *);
    197 void	pmap_tlb_shootnow(u_long);
    198 void	pmap_do_tlb_shootdown(struct cpu_info *, struct trapframe *);
    199 #define	PMAP_TLB_SHOOTDOWN_CPUSET_DECL		u_long shootset = 0;
    200 #define	PMAP_TLB_SHOOTDOWN(pm, va, pte)					\
    201 	pmap_tlb_shootdown((pm), (va), (pte), &shootset)
    202 #define	PMAP_TLB_SHOOTNOW()						\
    203 	pmap_tlb_shootnow(shootset)
    204 #else
    205 #define	PMAP_TLB_SHOOTDOWN_CPUSET_DECL		/* nothing */
    206 #define	PMAP_TLB_SHOOTDOWN(pm, va, pte)		/* nothing */
    207 #define	PMAP_TLB_SHOOTNOW()			/* nothing */
    208 #endif /* MULTIPROCESSOR */
    209 #endif /* _LKM */
    210 
    211 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    212 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    213 
    214 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    215 #define	pmap_update(pmap)		/* nothing (yet) */
    216 
    217 static __inline void
    218 pmap_remove_all(struct pmap *pmap)
    219 {
    220 	/* Nothing. */
    221 }
    222 
    223 #define	pmap_is_referenced(pg)						\
    224 	(((pg)->mdpage.pvh_attrs & PGA_REFERENCED) != 0)
    225 #define	pmap_is_modified(pg)						\
    226 	(((pg)->mdpage.pvh_attrs & PGA_MODIFIED) != 0)
    227 
    228 #define	PMAP_STEAL_MEMORY		/* enable pmap_steal_memory() */
    229 #define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */
    230 
    231 /*
    232  * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
    233  */
    234 #define	PMAP_MAP_POOLPAGE(pa)		ALPHA_PHYS_TO_K0SEG((pa))
    235 #define	PMAP_UNMAP_POOLPAGE(va)		ALPHA_K0SEG_TO_PHYS((va))
    236 
    237 /*
    238  * Other hooks for the pool allocator.
    239  */
    240 #define	POOL_VTOPHYS(va)		ALPHA_K0SEG_TO_PHYS((vaddr_t) (va))
    241 
    242 bool	pmap_pageidlezero(paddr_t);
    243 #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    244 
    245 paddr_t vtophys(vaddr_t);
    246 
    247 /* Machine-specific functions. */
    248 void	pmap_bootstrap(paddr_t, u_int, u_long);
    249 int	pmap_emulate_reference(struct lwp *, vaddr_t, int, int);
    250 #ifdef _PMAP_MAY_USE_PROM_CONSOLE
    251 int	pmap_uses_prom_console(void);
    252 #endif
    253 
    254 #define	pmap_pte_pa(pte)	(PG_PFNUM(*(pte)) << PGSHIFT)
    255 #define	pmap_pte_prot(pte)	(*(pte) & PG_PROT)
    256 #define	pmap_pte_w(pte)		(*(pte) & PG_WIRED)
    257 #define	pmap_pte_v(pte)		(*(pte) & PG_V)
    258 #define	pmap_pte_pv(pte)	(*(pte) & PG_PVLIST)
    259 #define	pmap_pte_asm(pte)	(*(pte) & PG_ASM)
    260 #define	pmap_pte_exec(pte)	(*(pte) & PG_EXEC)
    261 
    262 #define	pmap_pte_set_w(pte, v)						\
    263 do {									\
    264 	if (v)								\
    265 		*(pte) |= PG_WIRED;					\
    266 	else								\
    267 		*(pte) &= ~PG_WIRED;					\
    268 } while (0)
    269 
    270 #define	pmap_pte_w_chg(pte, nw)	((nw) ^ pmap_pte_w(pte))
    271 
    272 #define	pmap_pte_set_prot(pte, np)					\
    273 do {									\
    274 	*(pte) &= ~PG_PROT;						\
    275 	*(pte) |= (np);							\
    276 } while (0)
    277 
    278 #define	pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
    279 
    280 static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *);
    281 static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *);
    282 
    283 #define	pmap_l1pte(pmap, v)						\
    284 	(&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))])
    285 
    286 static __inline pt_entry_t *
    287 pmap_l2pte(pmap_t pmap, vaddr_t v, pt_entry_t *l1pte)
    288 {
    289 	pt_entry_t *lev2map;
    290 
    291 	if (l1pte == NULL) {
    292 		l1pte = pmap_l1pte(pmap, v);
    293 		if (pmap_pte_v(l1pte) == 0)
    294 			return (NULL);
    295 	}
    296 
    297 	lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
    298 	return (&lev2map[l2pte_index(v)]);
    299 }
    300 
    301 static __inline pt_entry_t *
    302 pmap_l3pte(pmap_t pmap, vaddr_t v, pt_entry_t *l2pte)
    303 {
    304 	pt_entry_t *l1pte, *lev2map, *lev3map;
    305 
    306 	if (l2pte == NULL) {
    307 		l1pte = pmap_l1pte(pmap, v);
    308 		if (pmap_pte_v(l1pte) == 0)
    309 			return (NULL);
    310 
    311 		lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
    312 		l2pte = &lev2map[l2pte_index(v)];
    313 		if (pmap_pte_v(l2pte) == 0)
    314 			return (NULL);
    315 	}
    316 
    317 	lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
    318 	return (&lev3map[l3pte_index(v)]);
    319 }
    320 
    321 /*
    322  * Macros for locking pmap structures.
    323  *
    324  * Note that we if we access the kernel pmap in interrupt context, it
    325  * is only to update statistics.  Since stats are updated using atomic
    326  * operations, locking the kernel pmap is not necessary.  Therefore,
    327  * it is not necessary to block interrupts when locking pmap strucutres.
    328  */
    329 #define	PMAP_LOCK(pmap)		mutex_enter(&(pmap)->pm_lock)
    330 #define	PMAP_UNLOCK(pmap)	mutex_exit(&(pmap)->pm_lock)
    331 
    332 /*
    333  * Macro for processing deferred I-stream synchronization.
    334  *
    335  * The pmap module may defer syncing the user I-stream until the
    336  * return to userspace, since the IMB PALcode op can be quite
    337  * expensive.  Since user instructions won't be executed until
    338  * the return to userspace, this can be deferred until userret().
    339  */
    340 #define	PMAP_USERRET(pmap)						\
    341 do {									\
    342 	u_long cpu_mask = (1UL << cpu_number());			\
    343 									\
    344 	if ((pmap)->pm_needisync & cpu_mask) {				\
    345 		atomic_and_ulong(&(pmap)->pm_needisync,	~cpu_mask);	\
    346 		alpha_pal_imb();					\
    347 	}								\
    348 } while (0)
    349 
    350 /*
    351  * pmap-specific data store in the vm_page structure.
    352  */
    353 #define	__HAVE_VM_PAGE_MD
    354 struct vm_page_md {
    355 	struct pv_entry *pvh_list;		/* pv_entry list */
    356 	int pvh_attrs;				/* page attributes */
    357 	unsigned pvh_refcnt;
    358 };
    359 
    360 #define	VM_MDPAGE_INIT(pg)						\
    361 do {									\
    362 	(pg)->mdpage.pvh_list = NULL;					\
    363 	(pg)->mdpage.pvh_refcnt = 0;					\
    364 } while (/*CONSTCOND*/0)
    365 
    366 #endif /* _KERNEL */
    367 
    368 #endif /* _PMAP_MACHINE_ */
    369