Home | History | Annotate | Line # | Download | only in arm32
pmap.h revision 1.161
      1 /*	$NetBSD: pmap.h,v 1.161 2020/02/05 07:37:36 skrll Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1994,1995 Mark Brinicombe.
     40  * All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. All advertising materials mentioning features or use of this software
     51  *    must display the following acknowledgement:
     52  *	This product includes software developed by Mark Brinicombe
     53  * 4. The name of the author may not be used to endorse or promote products
     54  *    derived from this software without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66  */
     67 
     68 #ifndef	_ARM32_PMAP_H_
     69 #define	_ARM32_PMAP_H_
     70 
     71 #ifdef _KERNEL
     72 
     73 #include <arm/cpuconf.h>
     74 #include <arm/arm32/pte.h>
     75 #ifndef _LOCORE
     76 #if defined(_KERNEL_OPT)
     77 #include "opt_arm32_pmap.h"
     78 #include "opt_multiprocessor.h"
     79 #endif
     80 #include <arm/cpufunc.h>
     81 #include <arm/locore.h>
     82 #include <uvm/uvm_object.h>
     83 #include <uvm/pmap/pmap_pvt.h>
     84 #endif
     85 
     86 #ifdef ARM_MMU_EXTENDED
     87 #define PMAP_HWPAGEWALKER		1
     88 #define PMAP_TLB_MAX			1
     89 #if PMAP_TLB_MAX > 1
     90 #define PMAP_TLB_NEED_SHOOTDOWN		1
     91 #endif
     92 #define PMAP_TLB_FLUSH_ASID_ON_RESET	(arm_has_tlbiasid_p)
     93 #define PMAP_TLB_NUM_PIDS		256
     94 #define cpu_set_tlb_info(ci, ti)        ((void)((ci)->ci_tlb_info = (ti)))
     95 #if PMAP_TLB_MAX > 1
     96 #define cpu_tlb_info(ci)		((ci)->ci_tlb_info)
     97 #else
     98 #define cpu_tlb_info(ci)		(&pmap_tlb0_info)
     99 #endif
    100 #define pmap_md_tlb_asid_max()		(PMAP_TLB_NUM_PIDS - 1)
    101 #include <uvm/pmap/tlb.h>
    102 #include <uvm/pmap/pmap_tlb.h>
    103 
    104 /*
    105  * If we have an EXTENDED MMU and the address space is split evenly between
    106  * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
    107  * user and kernel address spaces.
    108  */
    109 #if (KERNEL_BASE & 0x80000000) == 0
    110 #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
    111 #endif
    112 #endif  /* ARM_MMU_EXTENDED */
    113 
    114 /*
    115  * a pmap describes a processes' 4GB virtual address space.  this
    116  * virtual address space can be broken up into 4096 1MB regions which
    117  * are described by L1 PTEs in the L1 table.
    118  *
    119  * There is a line drawn at KERNEL_BASE.  Everything below that line
    120  * changes when the VM context is switched.  Everything above that line
    121  * is the same no matter which VM context is running.  This is achieved
    122  * by making the L1 PTEs for those slots above KERNEL_BASE reference
    123  * kernel L2 tables.
    124  *
    125  * The basic layout of the virtual address space thus looks like this:
    126  *
    127  *	0xffffffff
    128  *	.
    129  *	.
    130  *	.
    131  *	KERNEL_BASE
    132  *	--------------------
    133  *	.
    134  *	.
    135  *	.
    136  *	0x00000000
    137  */
    138 
    139 /*
    140  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
    141  * A bucket size of 16 provides for 16MB of contiguous virtual address
    142  * space per l2_dtable. Most processes will, therefore, require only two or
    143  * three of these to map their whole working set.
    144  */
    145 #define	L2_BUCKET_XLOG2	(L1_S_SHIFT)
    146 #define L2_BUCKET_XSIZE	(1 << L2_BUCKET_XLOG2)
    147 #define	L2_BUCKET_LOG2	4
    148 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
    149 
    150 /*
    151  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
    152  * of l2_dtable structures required to track all possible page descriptors
    153  * mappable by an L1 translation table is given by the following constants:
    154  */
    155 #define	L2_LOG2		(32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
    156 #define	L2_SIZE		(1 << L2_LOG2)
    157 
    158 /*
    159  * tell MI code that the cache is virtually-indexed.
    160  * ARMv6 is physically-tagged but all others are virtually-tagged.
    161  */
    162 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    163 #define PMAP_CACHE_VIPT
    164 #else
    165 #define PMAP_CACHE_VIVT
    166 #endif
    167 
    168 #ifndef _LOCORE
    169 
    170 #ifndef ARM_MMU_EXTENDED
    171 struct l1_ttable;
    172 struct l2_dtable;
    173 
    174 /*
    175  * Track cache/tlb occupancy using the following structure
    176  */
    177 union pmap_cache_state {
    178 	struct {
    179 		union {
    180 			uint8_t csu_cache_b[2];
    181 			uint16_t csu_cache;
    182 		} cs_cache_u;
    183 
    184 		union {
    185 			uint8_t csu_tlb_b[2];
    186 			uint16_t csu_tlb;
    187 		} cs_tlb_u;
    188 	} cs_s;
    189 	uint32_t cs_all;
    190 };
    191 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
    192 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
    193 #define	cs_cache	cs_s.cs_cache_u.csu_cache
    194 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
    195 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
    196 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
    197 
    198 /*
    199  * Assigned to cs_all to force cacheops to work for a particular pmap
    200  */
    201 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
    202 #endif /* !ARM_MMU_EXTENDED */
    203 
    204 /*
    205  * This structure is used by machine-dependent code to describe
    206  * static mappings of devices, created at bootstrap time.
    207  */
    208 struct pmap_devmap {
    209 	vaddr_t		pd_va;		/* virtual address */
    210 	paddr_t		pd_pa;		/* physical address */
    211 	psize_t		pd_size;	/* size of region */
    212 	vm_prot_t	pd_prot;	/* protection code */
    213 	int		pd_cache;	/* cache attributes */
    214 };
    215 
    216 #define	DEVMAP_ALIGN(a)	((a) & ~L1_S_OFFSET)
    217 #define	DEVMAP_SIZE(s)	roundup2((s), L1_S_SIZE)
    218 #define	DEVMAP_ENTRY(va, pa, sz)			\
    219 	{						\
    220 		.pd_va = DEVMAP_ALIGN(va),		\
    221 		.pd_pa = DEVMAP_ALIGN(pa),		\
    222 		.pd_size = DEVMAP_SIZE(sz),		\
    223 		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,	\
    224 		.pd_cache = PTE_DEV			\
    225 	}
    226 #define	DEVMAP_ENTRY_END	{ 0 }
    227 
    228 /*
    229  * The pmap structure itself
    230  */
    231 struct pmap {
    232 	struct uvm_object	pm_obj;
    233 	kmutex_t		pm_obj_lock;
    234 #define	pm_lock pm_obj.vmobjlock
    235 #ifndef ARM_HAS_VBAR
    236 	pd_entry_t		*pm_pl1vec;
    237 	pd_entry_t		pm_l1vec;
    238 #endif
    239 	struct l2_dtable	*pm_l2[L2_SIZE];
    240 	struct pmap_statistics	pm_stats;
    241 	LIST_ENTRY(pmap)	pm_list;
    242 #ifdef ARM_MMU_EXTENDED
    243 	pd_entry_t		*pm_l1;
    244 	paddr_t			pm_l1_pa;
    245 	bool			pm_remove_all;
    246 #ifdef MULTIPROCESSOR
    247 	kcpuset_t		*pm_onproc;
    248 	kcpuset_t		*pm_active;
    249 #if PMAP_TLB_MAX > 1
    250 	u_int			pm_shootdown_pending;
    251 #endif
    252 #endif
    253 	struct pmap_asid_info	pm_pai[PMAP_TLB_MAX];
    254 #else
    255 	struct l1_ttable	*pm_l1;
    256 	union pmap_cache_state	pm_cstate;
    257 	uint8_t			pm_domain;
    258 	bool			pm_activated;
    259 	bool			pm_remove_all;
    260 #endif
    261 };
    262 
    263 struct pmap_kernel {
    264 	struct pmap		kernel_pmap;
    265 };
    266 
    267 /*
    268  * Physical / virtual address structure. In a number of places (particularly
    269  * during bootstrapping) we need to keep track of the physical and virtual
    270  * addresses of various pages
    271  */
    272 typedef struct pv_addr {
    273 	SLIST_ENTRY(pv_addr) pv_list;
    274 	paddr_t pv_pa;
    275 	vaddr_t pv_va;
    276 	vsize_t pv_size;
    277 	uint8_t pv_cache;
    278 	uint8_t pv_prot;
    279 } pv_addr_t;
    280 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
    281 
    282 extern pv_addrqh_t pmap_freeq;
    283 extern pv_addr_t kernelstack;
    284 extern pv_addr_t abtstack;
    285 extern pv_addr_t fiqstack;
    286 extern pv_addr_t irqstack;
    287 extern pv_addr_t undstack;
    288 extern pv_addr_t idlestack;
    289 extern pv_addr_t systempage;
    290 extern pv_addr_t kernel_l1pt;
    291 
    292 #ifdef ARM_MMU_EXTENDED
    293 extern bool arm_has_tlbiasid_p;	/* also in <arm/locore.h> */
    294 #endif
    295 
    296 /*
    297  * Determine various modes for PTEs (user vs. kernel, cacheable
    298  * vs. non-cacheable).
    299  */
    300 #define	PTE_KERNEL	0
    301 #define	PTE_USER	1
    302 #define	PTE_NOCACHE	0
    303 #define	PTE_CACHE	1
    304 #define	PTE_PAGETABLE	2
    305 #define	PTE_DEV		3
    306 
    307 /*
    308  * Flags that indicate attributes of pages or mappings of pages.
    309  *
    310  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
    311  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
    312  * pv_entry's for each page.  They live in the same "namespace" so
    313  * that we can clear multiple attributes at a time.
    314  *
    315  * Note the "non-cacheable" flag generally means the page has
    316  * multiple mappings in a given address space.
    317  */
    318 #define	PVF_MOD		0x01		/* page is modified */
    319 #define	PVF_REF		0x02		/* page is referenced */
    320 #define	PVF_WIRED	0x04		/* mapping is wired */
    321 #define	PVF_WRITE	0x08		/* mapping is writable */
    322 #define	PVF_EXEC	0x10		/* mapping is executable */
    323 #ifdef PMAP_CACHE_VIVT
    324 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
    325 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
    326 #define	PVF_NC		(PVF_UNC|PVF_KNC)
    327 #endif
    328 #ifdef PMAP_CACHE_VIPT
    329 #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
    330 #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
    331 #endif
    332 #define	PVF_COLORED	0x80		/* page has or had a color */
    333 #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
    334 #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
    335 #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
    336 #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
    337 #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
    338 #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
    339 
    340 /*
    341  * Commonly referenced structures
    342  */
    343 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
    344 extern int		arm_poolpage_vmfreelist;
    345 
    346 /*
    347  * Macros that we need to export
    348  */
    349 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    350 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    351 
    352 #define	pmap_is_modified(pg)	\
    353 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
    354 #define	pmap_is_referenced(pg)	\
    355 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
    356 #define	pmap_is_page_colored_p(md)	\
    357 	(((md)->pvh_attrs & PVF_COLORED) != 0)
    358 
    359 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    360 
    361 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
    362 u_int arm32_mmap_flags(paddr_t);
    363 #define ARM32_MMAP_WRITECOMBINE		0x40000000
    364 #define ARM32_MMAP_CACHEABLE		0x20000000
    365 #define ARM_MMAP_WRITECOMBINE		ARM32_MMAP_WRITECOMBINE
    366 #define ARM_MMAP_CACHEABLE		ARM32_MMAP_CACHEABLE
    367 #define pmap_mmap_flags(ppn)		arm32_mmap_flags(ppn)
    368 
    369 #define	PMAP_PTE			0x10000000 /* kenter_pa */
    370 #define	PMAP_DEV			0x20000000 /* kenter_pa */
    371 #define	PMAP_DEV_SO			0x40000000 /* kenter_pa */
    372 #define	PMAP_DEV_MASK			(PMAP_DEV | PMAP_DEV_SO)
    373 
    374 /*
    375  * Functions that we need to export
    376  */
    377 void	pmap_procwr(struct proc *, vaddr_t, int);
    378 void	pmap_remove_all(pmap_t);
    379 bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
    380 
    381 #define	PMAP_NEED_PROCWR
    382 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    383 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    384 
    385 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    386 #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
    387 void	pmap_prefer(vaddr_t, vaddr_t *, int);
    388 #endif
    389 
    390 #ifdef ARM_MMU_EXTENDED
    391 int	pmap_maxproc_set(int);
    392 #endif
    393 
    394 void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    395 
    396 /* Functions we use internally. */
    397 #ifdef PMAP_STEAL_MEMORY
    398 void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
    399 void	pmap_boot_pageadd(pv_addr_t *);
    400 vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    401 #endif
    402 void	pmap_bootstrap(vaddr_t, vaddr_t);
    403 
    404 void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
    405 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
    406 int	pmap_prefetchabt_fixup(void *);
    407 bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
    408 bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
    409 bool	pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
    410 
    411 void	pmap_debug(int);
    412 void	pmap_postinit(void);
    413 
    414 void	vector_page_setprot(int);
    415 
    416 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    417 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    418 
    419 /* Bootstrapping routines. */
    420 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
    421 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
    422 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
    423 void	pmap_unmap_chunk(vaddr_t, vaddr_t, vsize_t);
    424 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
    425 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    426 void	pmap_devmap_register(const struct pmap_devmap *);
    427 
    428 /*
    429  * Special page zero routine for use by the idle loop (no cache cleans).
    430  */
    431 bool	pmap_pageidlezero(paddr_t);
    432 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    433 
    434 #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    435 /*
    436  * For the pmap, this is a more useful way to map a direct mapped page.
    437  * It returns either the direct-mapped VA or the VA supplied if it can't
    438  * be direct mapped.
    439  */
    440 vaddr_t	pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
    441 #endif
    442 
    443 /*
    444  * used by dumpsys to record the PA of the L1 table
    445  */
    446 uint32_t pmap_kernel_L1_addr(void);
    447 /*
    448  * The current top of kernel VM
    449  */
    450 extern vaddr_t	pmap_curmaxkvaddr;
    451 
    452 #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
    453 /*
    454  * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
    455  */
    456 extern vaddr_t pmap_directlimit;
    457 #endif
    458 
    459 /*
    460  * Useful macros and constants
    461  */
    462 
    463 /* Virtual address to page table entry */
    464 static inline pt_entry_t *
    465 vtopte(vaddr_t va)
    466 {
    467 	pd_entry_t *pdep;
    468 	pt_entry_t *ptep;
    469 
    470 	KASSERT(trunc_page(va) == va);
    471 
    472 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
    473 		return (NULL);
    474 	return (ptep);
    475 }
    476 
    477 /*
    478  * Virtual address to physical address
    479  */
    480 static inline paddr_t
    481 vtophys(vaddr_t va)
    482 {
    483 	paddr_t pa;
    484 
    485 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
    486 		return (0);	/* XXXSCW: Panic? */
    487 
    488 	return (pa);
    489 }
    490 
    491 /*
    492  * The new pmap ensures that page-tables are always mapping Write-Thru.
    493  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
    494  * on every change.
    495  *
    496  * Unfortunately, not all CPUs have a write-through cache mode.  So we
    497  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
    498  * and if there is the chance for PTE syncs to be needed, we define
    499  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
    500  * the code.
    501  */
    502 extern int pmap_needs_pte_sync;
    503 #if defined(_KERNEL_OPT)
    504 /*
    505  * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
    506  * single MMU type is selected.
    507  *
    508  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
    509  * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
    510  * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
    511  *
    512  * Use run time evaluation for all other cases.
    513  *
    514  */
    515 #if (ARM_NMMUS == 1)
    516 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
    517 #define	PMAP_INCLUDE_PTE_SYNC
    518 #define	PMAP_NEEDS_PTE_SYNC	1
    519 #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
    520 #define	PMAP_NEEDS_PTE_SYNC	0
    521 #endif
    522 #endif
    523 #endif /* _KERNEL_OPT */
    524 
    525 /*
    526  * Provide a fallback in case we were not able to determine it at
    527  * compile-time.
    528  */
    529 #ifndef PMAP_NEEDS_PTE_SYNC
    530 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
    531 #define	PMAP_INCLUDE_PTE_SYNC
    532 #endif
    533 
    534 static inline void
    535 pmap_ptesync(pt_entry_t *ptep, size_t cnt)
    536 {
    537 	if (PMAP_NEEDS_PTE_SYNC) {
    538 		cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
    539 #ifdef SHEEVA_L2_CACHE
    540 		cpu_sdcache_wb_range((vaddr_t)ptep, -1,
    541 		    cnt * sizeof(pt_entry_t));
    542 #endif
    543 	}
    544 	arm_dsb();
    545 }
    546 
    547 #define	PDE_SYNC(pdep)			pmap_ptesync((pdep), 1)
    548 #define	PDE_SYNC_RANGE(pdep, cnt)	pmap_ptesync((pdep), (cnt))
    549 #define	PTE_SYNC(ptep)			pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
    550 #define	PTE_SYNC_RANGE(ptep, cnt)	pmap_ptesync((ptep), (cnt))
    551 
    552 #define l1pte_valid_p(pde)	((pde) != 0)
    553 #define l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
    554 #define l1pte_supersection_p(pde) (l1pte_section_p(pde)	\
    555 				&& ((pde) & L1_S_V6_SUPER) != 0)
    556 #define l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
    557 #define l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
    558 #define l1pte_pa(pde)		((pde) & L1_C_ADDR_MASK)
    559 #define l1pte_index(v)		((vaddr_t)(v) >> L1_S_SHIFT)
    560 #define l1pte_pgindex(v)	l1pte_index((v) & L1_ADDR_BITS \
    561 		& ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
    562 
    563 static inline void
    564 l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
    565 {
    566 	*pdep = pde;
    567 }
    568 
    569 static inline void
    570 l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
    571 {
    572 	*pdep = pde;
    573 	if (l1pte_page_p(pde)) {
    574 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
    575 		for (int k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
    576 			pde += L2_T_SIZE;
    577 			pdep[k] = pde;
    578 		}
    579 	} else if (l1pte_supersection_p(pde)) {
    580 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
    581 		for (int k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
    582 			pdep[k] = pde;
    583 		}
    584 	}
    585 }
    586 
    587 #define l2pte_index(v)		((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
    588 #define l2pte_valid_p(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
    589 #define l2pte_pa(pte)		((pte) & L2_S_FRAME)
    590 #define l1pte_lpage_p(pte)	(((pte) & L2_TYPE_MASK) == L2_TYPE_L)
    591 #define l2pte_minidata_p(pte)	(((pte) & \
    592 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
    593 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
    594 
    595 static inline void
    596 l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
    597 {
    598 	if (l1pte_lpage_p(pte)) {
    599 		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (L2_L_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    600 		for (int k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
    601 			*ptep++ = pte;
    602 		}
    603 	} else {
    604 		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    605 		for (int k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
    606 			KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
    607 			*ptep++ = pte;
    608 			pte += L2_S_SIZE;
    609 			if (opte)
    610 				opte += L2_S_SIZE;
    611 		}
    612 	}
    613 }
    614 
    615 static inline void
    616 l2pte_reset(pt_entry_t *ptep)
    617 {
    618 	KASSERTMSG((((uintptr_t)ptep / sizeof(*ptep)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    619 	*ptep = 0;
    620 	for (int k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
    621 		ptep[k] = 0;
    622 	}
    623 }
    624 
    625 /* L1 and L2 page table macros */
    626 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
    627 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
    628 #define pmap_pde_supersection(pde)	l1pte_supersection_p(*(pde))
    629 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
    630 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
    631 
    632 #define	pmap_pte_v(pte)		l2pte_valid_p(*(pte))
    633 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
    634 
    635 /* Size of the kernel part of the L1 page table */
    636 #define KERNEL_PD_SIZE	\
    637 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
    638 
    639 void	bzero_page(vaddr_t);
    640 void	bcopy_page(vaddr_t, vaddr_t);
    641 
    642 #ifdef FPU_VFP
    643 void	bzero_page_vfp(vaddr_t);
    644 void	bcopy_page_vfp(vaddr_t, vaddr_t);
    645 #endif
    646 
    647 /************************* ARM MMU configuration *****************************/
    648 
    649 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
    650 void	pmap_copy_page_generic(paddr_t, paddr_t);
    651 void	pmap_zero_page_generic(paddr_t);
    652 
    653 void	pmap_pte_init_generic(void);
    654 #if defined(CPU_ARM8)
    655 void	pmap_pte_init_arm8(void);
    656 #endif
    657 #if defined(CPU_ARM9)
    658 void	pmap_pte_init_arm9(void);
    659 #endif /* CPU_ARM9 */
    660 #if defined(CPU_ARM10)
    661 void	pmap_pte_init_arm10(void);
    662 #endif /* CPU_ARM10 */
    663 #if defined(CPU_ARM11)	/* ARM_MMU_V6 */
    664 void	pmap_pte_init_arm11(void);
    665 #endif /* CPU_ARM11 */
    666 #if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
    667 void	pmap_pte_init_arm11mpcore(void);
    668 #endif
    669 #if ARM_MMU_V6 == 1
    670 void	pmap_pte_init_armv6(void);
    671 #endif /* ARM_MMU_V6 */
    672 #if ARM_MMU_V7 == 1
    673 void	pmap_pte_init_armv7(void);
    674 #endif /* ARM_MMU_V7 */
    675 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
    676 
    677 #if ARM_MMU_SA1 == 1
    678 void	pmap_pte_init_sa1(void);
    679 #endif /* ARM_MMU_SA1 == 1 */
    680 
    681 #if ARM_MMU_XSCALE == 1
    682 void	pmap_copy_page_xscale(paddr_t, paddr_t);
    683 void	pmap_zero_page_xscale(paddr_t);
    684 
    685 void	pmap_pte_init_xscale(void);
    686 
    687 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
    688 
    689 #define	PMAP_UAREA(va)		pmap_uarea(va)
    690 void	pmap_uarea(vaddr_t);
    691 #endif /* ARM_MMU_XSCALE == 1 */
    692 
    693 extern pt_entry_t		pte_l1_s_nocache_mode;
    694 extern pt_entry_t		pte_l2_l_nocache_mode;
    695 extern pt_entry_t		pte_l2_s_nocache_mode;
    696 
    697 extern pt_entry_t		pte_l1_s_cache_mode;
    698 extern pt_entry_t		pte_l2_l_cache_mode;
    699 extern pt_entry_t		pte_l2_s_cache_mode;
    700 
    701 extern pt_entry_t		pte_l1_s_cache_mode_pt;
    702 extern pt_entry_t		pte_l2_l_cache_mode_pt;
    703 extern pt_entry_t		pte_l2_s_cache_mode_pt;
    704 
    705 extern pt_entry_t		pte_l1_s_wc_mode;
    706 extern pt_entry_t		pte_l2_l_wc_mode;
    707 extern pt_entry_t		pte_l2_s_wc_mode;
    708 
    709 extern pt_entry_t		pte_l1_s_cache_mask;
    710 extern pt_entry_t		pte_l2_l_cache_mask;
    711 extern pt_entry_t		pte_l2_s_cache_mask;
    712 
    713 extern pt_entry_t		pte_l1_s_prot_u;
    714 extern pt_entry_t		pte_l1_s_prot_w;
    715 extern pt_entry_t		pte_l1_s_prot_ro;
    716 extern pt_entry_t		pte_l1_s_prot_mask;
    717 
    718 extern pt_entry_t		pte_l2_s_prot_u;
    719 extern pt_entry_t		pte_l2_s_prot_w;
    720 extern pt_entry_t		pte_l2_s_prot_ro;
    721 extern pt_entry_t		pte_l2_s_prot_mask;
    722 
    723 extern pt_entry_t		pte_l2_l_prot_u;
    724 extern pt_entry_t		pte_l2_l_prot_w;
    725 extern pt_entry_t		pte_l2_l_prot_ro;
    726 extern pt_entry_t		pte_l2_l_prot_mask;
    727 
    728 extern pt_entry_t		pte_l1_ss_proto;
    729 extern pt_entry_t		pte_l1_s_proto;
    730 extern pt_entry_t		pte_l1_c_proto;
    731 extern pt_entry_t		pte_l2_s_proto;
    732 
    733 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
    734 extern void (*pmap_zero_page_func)(paddr_t);
    735 
    736 #endif /* !_LOCORE */
    737 
    738 /*****************************************************************************/
    739 
    740 #define	KERNEL_PID		0	/* The kernel uses ASID 0 */
    741 
    742 /*
    743  * Definitions for MMU domains
    744  */
    745 #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
    746 #define	PMAP_DOMAIN_KERNEL	0	/* The kernel pmap uses domain #0 */
    747 
    748 #ifdef ARM_MMU_EXTENDED
    749 #define	PMAP_DOMAIN_USER	1	/* User pmaps use domain #1 */
    750 #define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | (DOMAIN_CLIENT << (PMAP_DOMAIN_USER*2)))
    751 #else
    752 #define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)))
    753 #endif
    754 
    755 /*
    756  * These macros define the various bit masks in the PTE.
    757  *
    758  * We use these macros since we use different bits on different processor
    759  * models.
    760  */
    761 #define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
    762 #define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
    763 #define	L1_S_PROT_RO_generic	(0)
    764 #define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    765 
    766 #define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
    767 #define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
    768 #define	L1_S_PROT_RO_xscale	(0)
    769 #define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    770 
    771 #define	L1_S_PROT_U_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    772 #define	L1_S_PROT_W_armv6	(L1_S_AP(AP_W))
    773 #define	L1_S_PROT_RO_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    774 #define	L1_S_PROT_MASK_armv6	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    775 
    776 #define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    777 #define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
    778 #define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    779 #define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    780 
    781 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
    782 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
    783 #define	L1_S_CACHE_MASK_armv6	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
    784 #define	L1_S_CACHE_MASK_armv6n	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
    785 #define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
    786 
    787 #define	L2_L_PROT_U_generic	(L2_AP(AP_U))
    788 #define	L2_L_PROT_W_generic	(L2_AP(AP_W))
    789 #define	L2_L_PROT_RO_generic	(0)
    790 #define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    791 
    792 #define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
    793 #define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
    794 #define	L2_L_PROT_RO_xscale	(0)
    795 #define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    796 
    797 #define	L2_L_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    798 #define	L2_L_PROT_W_armv6n	(L2_AP0(AP_W))
    799 #define	L2_L_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    800 #define	L2_L_PROT_MASK_armv6n	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    801 
    802 #define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    803 #define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
    804 #define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    805 #define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    806 
    807 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
    808 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
    809 #define	L2_L_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
    810 #define	L2_L_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    811 #define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    812 
    813 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
    814 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
    815 #define	L2_S_PROT_RO_generic	(0)
    816 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    817 
    818 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
    819 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
    820 #define	L2_S_PROT_RO_xscale	(0)
    821 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    822 
    823 #define	L2_S_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    824 #define	L2_S_PROT_W_armv6n	(L2_AP0(AP_W))
    825 #define	L2_S_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    826 #define	L2_S_PROT_MASK_armv6n	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    827 
    828 #define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    829 #define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
    830 #define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    831 #define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    832 
    833 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
    834 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
    835 #define	L2_XS_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
    836 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    837 #define	L2_S_CACHE_MASK_armv6c	L2_XS_CACHE_MASK_armv6
    838 #else
    839 #define	L2_S_CACHE_MASK_armv6c	L2_S_CACHE_MASK_generic
    840 #endif
    841 #define	L2_S_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    842 #define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    843 
    844 
    845 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
    846 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
    847 #define	L1_S_PROTO_armv6	(L1_TYPE_S)
    848 #define	L1_S_PROTO_armv7	(L1_TYPE_S)
    849 
    850 #define	L1_SS_PROTO_generic	0
    851 #define	L1_SS_PROTO_xscale	0
    852 #define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
    853 #define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
    854 
    855 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
    856 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
    857 #define	L1_C_PROTO_armv6	(L1_TYPE_C)
    858 #define	L1_C_PROTO_armv7	(L1_TYPE_C)
    859 
    860 #define	L2_L_PROTO		(L2_TYPE_L)
    861 
    862 #define	L2_S_PROTO_generic	(L2_TYPE_S)
    863 #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
    864 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    865 #define	L2_S_PROTO_armv6c	(L2_TYPE_XS)    /* XP=0, extended small page */
    866 #else
    867 #define	L2_S_PROTO_armv6c	(L2_TYPE_S)	/* XP=0, subpage APs */
    868 #endif
    869 #ifdef ARM_MMU_EXTENDED
    870 #define	L2_S_PROTO_armv6n	(L2_TYPE_S|L2_XS_XN)
    871 #else
    872 #define	L2_S_PROTO_armv6n	(L2_TYPE_S)	/* with XP=1 */
    873 #endif
    874 #ifdef ARM_MMU_EXTENDED
    875 #define	L2_S_PROTO_armv7	(L2_TYPE_S|L2_XS_XN)
    876 #else
    877 #define	L2_S_PROTO_armv7	(L2_TYPE_S)
    878 #endif
    879 
    880 /*
    881  * User-visible names for the ones that vary with MMU class.
    882  */
    883 
    884 #if ARM_NMMUS > 1
    885 /* More than one MMU class configured; use variables. */
    886 #define	L1_S_PROT_U		pte_l1_s_prot_u
    887 #define	L1_S_PROT_W		pte_l1_s_prot_w
    888 #define	L1_S_PROT_RO		pte_l1_s_prot_ro
    889 #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
    890 
    891 #define	L2_S_PROT_U		pte_l2_s_prot_u
    892 #define	L2_S_PROT_W		pte_l2_s_prot_w
    893 #define	L2_S_PROT_RO		pte_l2_s_prot_ro
    894 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
    895 
    896 #define	L2_L_PROT_U		pte_l2_l_prot_u
    897 #define	L2_L_PROT_W		pte_l2_l_prot_w
    898 #define	L2_L_PROT_RO		pte_l2_l_prot_ro
    899 #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
    900 
    901 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
    902 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
    903 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
    904 
    905 #define	L1_SS_PROTO		pte_l1_ss_proto
    906 #define	L1_S_PROTO		pte_l1_s_proto
    907 #define	L1_C_PROTO		pte_l1_c_proto
    908 #define	L2_S_PROTO		pte_l2_s_proto
    909 
    910 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
    911 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
    912 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
    913 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    914 #define	L1_S_PROT_W		L1_S_PROT_W_generic
    915 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    916 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    917 
    918 #define	L2_S_PROT_U		L2_S_PROT_U_generic
    919 #define	L2_S_PROT_W		L2_S_PROT_W_generic
    920 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    921 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    922 
    923 #define	L2_L_PROT_U		L2_L_PROT_U_generic
    924 #define	L2_L_PROT_W		L2_L_PROT_W_generic
    925 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    926 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    927 
    928 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    929 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    930 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    931 
    932 #define	L1_SS_PROTO		L1_SS_PROTO_generic
    933 #define	L1_S_PROTO		L1_S_PROTO_generic
    934 #define	L1_C_PROTO		L1_C_PROTO_generic
    935 #define	L2_S_PROTO		L2_S_PROTO_generic
    936 
    937 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    938 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    939 #elif ARM_MMU_V6N != 0
    940 #define	L1_S_PROT_U		L1_S_PROT_U_armv6
    941 #define	L1_S_PROT_W		L1_S_PROT_W_armv6
    942 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv6
    943 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv6
    944 
    945 #define	L2_S_PROT_U		L2_S_PROT_U_armv6n
    946 #define	L2_S_PROT_W		L2_S_PROT_W_armv6n
    947 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv6n
    948 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv6n
    949 
    950 #define	L2_L_PROT_U		L2_L_PROT_U_armv6n
    951 #define	L2_L_PROT_W		L2_L_PROT_W_armv6n
    952 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv6n
    953 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv6n
    954 
    955 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv6n
    956 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv6n
    957 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv6n
    958 
    959 /*
    960  * These prototypes make writeable mappings, while the other MMU types
    961  * make read-only mappings.
    962  */
    963 #define	L1_SS_PROTO		L1_SS_PROTO_armv6
    964 #define	L1_S_PROTO		L1_S_PROTO_armv6
    965 #define	L1_C_PROTO		L1_C_PROTO_armv6
    966 #define	L2_S_PROTO		L2_S_PROTO_armv6n
    967 
    968 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    969 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    970 #elif ARM_MMU_V6C != 0
    971 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    972 #define	L1_S_PROT_W		L1_S_PROT_W_generic
    973 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    974 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    975 
    976 #define	L2_S_PROT_U		L2_S_PROT_U_generic
    977 #define	L2_S_PROT_W		L2_S_PROT_W_generic
    978 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    979 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    980 
    981 #define	L2_L_PROT_U		L2_L_PROT_U_generic
    982 #define	L2_L_PROT_W		L2_L_PROT_W_generic
    983 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    984 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    985 
    986 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    987 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    988 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    989 
    990 #define	L1_SS_PROTO		L1_SS_PROTO_armv6
    991 #define	L1_S_PROTO		L1_S_PROTO_generic
    992 #define	L1_C_PROTO		L1_C_PROTO_generic
    993 #define	L2_S_PROTO		L2_S_PROTO_generic
    994 
    995 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    996 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    997 #elif ARM_MMU_XSCALE == 1
    998 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    999 #define	L1_S_PROT_W		L1_S_PROT_W_generic
   1000 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
   1001 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
   1002 
   1003 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
   1004 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
   1005 #define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
   1006 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
   1007 
   1008 #define	L2_L_PROT_U		L2_L_PROT_U_generic
   1009 #define	L2_L_PROT_W		L2_L_PROT_W_generic
   1010 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
   1011 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
   1012 
   1013 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
   1014 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
   1015 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
   1016 
   1017 #define	L1_SS_PROTO		L1_SS_PROTO_xscale
   1018 #define	L1_S_PROTO		L1_S_PROTO_xscale
   1019 #define	L1_C_PROTO		L1_C_PROTO_xscale
   1020 #define	L2_S_PROTO		L2_S_PROTO_xscale
   1021 
   1022 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
   1023 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
   1024 #elif ARM_MMU_V7 == 1
   1025 #define	L1_S_PROT_U		L1_S_PROT_U_armv7
   1026 #define	L1_S_PROT_W		L1_S_PROT_W_armv7
   1027 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
   1028 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
   1029 
   1030 #define	L2_S_PROT_U		L2_S_PROT_U_armv7
   1031 #define	L2_S_PROT_W		L2_S_PROT_W_armv7
   1032 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
   1033 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
   1034 
   1035 #define	L2_L_PROT_U		L2_L_PROT_U_armv7
   1036 #define	L2_L_PROT_W		L2_L_PROT_W_armv7
   1037 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
   1038 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
   1039 
   1040 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
   1041 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
   1042 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
   1043 
   1044 /*
   1045  * These prototypes make writeable mappings, while the other MMU types
   1046  * make read-only mappings.
   1047  */
   1048 #define	L1_SS_PROTO		L1_SS_PROTO_armv7
   1049 #define	L1_S_PROTO		L1_S_PROTO_armv7
   1050 #define	L1_C_PROTO		L1_C_PROTO_armv7
   1051 #define	L2_S_PROTO		L2_S_PROTO_armv7
   1052 
   1053 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
   1054 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
   1055 #endif /* ARM_NMMUS > 1 */
   1056 
   1057 /*
   1058  * Macros to set and query the write permission on page descriptors.
   1059  */
   1060 #define l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
   1061 #define l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
   1062 
   1063 #define l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
   1064 #define l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
   1065 
   1066 #define l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
   1067 				 (L2_S_PROT_RO == 0 || \
   1068 				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
   1069 
   1070 /*
   1071  * These macros return various bits based on kernel/user and protection.
   1072  * Note that the compiler will usually fold these at compile time.
   1073  */
   1074 
   1075 #define	L1_S_PROT(ku, pr)	(					   \
   1076 	(((ku) == PTE_USER) ? 						   \
   1077 	    L1_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)	   \
   1078 	: 								   \
   1079 	    (((L1_S_PROT_RO && 						   \
   1080 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1081 		    L1_S_PROT_RO : L1_S_PROT_W)))			   \
   1082     )
   1083 
   1084 #define	L2_L_PROT(ku, pr)	(					   \
   1085 	(((ku) == PTE_USER) ?						   \
   1086 	    L2_L_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)	   \
   1087 	:								   \
   1088 	    (((L2_L_PROT_RO && 						   \
   1089 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1090 		    L2_L_PROT_RO : L2_L_PROT_W)))			   \
   1091     )
   1092 
   1093 #define	L2_S_PROT(ku, pr)	(					   \
   1094 	(((ku) == PTE_USER) ?						   \
   1095 	    L2_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)	   \
   1096 	:								   \
   1097 	    (((L2_S_PROT_RO &&						   \
   1098 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1099 		    L2_S_PROT_RO : L2_S_PROT_W)))			   \
   1100     )
   1101 
   1102 /*
   1103  * Macros to test if a mapping is mappable with an L1 SuperSection,
   1104  * L1 Section, or an L2 Large Page mapping.
   1105  */
   1106 #define	L1_SS_MAPPABLE_P(va, pa, size)					\
   1107 	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
   1108 
   1109 #define	L1_S_MAPPABLE_P(va, pa, size)					\
   1110 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
   1111 
   1112 #define	L2_L_MAPPABLE_P(va, pa, size)					\
   1113 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
   1114 
   1115 #define	PMAP_MAPSIZE1	L2_L_SIZE
   1116 #define	PMAP_MAPSIZE2	L1_S_SIZE
   1117 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1118 #define	PMAP_MAPSIZE3	L1_SS_SIZE
   1119 #endif
   1120 
   1121 #ifndef _LOCORE
   1122 /*
   1123  * Hooks for the pool allocator.
   1124  */
   1125 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
   1126 extern paddr_t physical_start, physical_end;
   1127 #ifdef PMAP_NEED_ALLOC_POOLPAGE
   1128 struct vm_page *arm_pmap_alloc_poolpage(int);
   1129 #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
   1130 #endif
   1131 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
   1132 vaddr_t	pmap_map_poolpage(paddr_t);
   1133 paddr_t	pmap_unmap_poolpage(vaddr_t);
   1134 #define	PMAP_MAP_POOLPAGE(pa)	pmap_map_poolpage(pa)
   1135 #define PMAP_UNMAP_POOLPAGE(va)	pmap_unmap_poolpage(va)
   1136 #endif
   1137 
   1138 #define __HAVE_PMAP_PV_TRACK	1
   1139 
   1140 void pmap_pv_protect(paddr_t, vm_prot_t);
   1141 
   1142 struct pmap_page {
   1143 	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
   1144 	int pvh_attrs;				/* page attributes */
   1145 	u_int uro_mappings;
   1146 	u_int urw_mappings;
   1147 	union {
   1148 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
   1149 		u_int i_mappings;
   1150 	} k_u;
   1151 };
   1152 
   1153 /*
   1154  * pmap-specific data store in the vm_page structure.
   1155  */
   1156 #define	__HAVE_VM_PAGE_MD
   1157 struct vm_page_md {
   1158 	struct pmap_page pp;
   1159 #define	pvh_list	pp.pvh_list
   1160 #define	pvh_attrs	pp.pvh_attrs
   1161 #define	uro_mappings	pp.uro_mappings
   1162 #define	urw_mappings	pp.urw_mappings
   1163 #define	kro_mappings	pp.k_u.s_mappings[0]
   1164 #define	krw_mappings	pp.k_u.s_mappings[1]
   1165 #define	k_mappings	pp.k_u.i_mappings
   1166 };
   1167 
   1168 #define PMAP_PAGE_TO_MD(ppage) container_of((ppage), struct vm_page_md, pp)
   1169 
   1170 /*
   1171  * Set the default color of each page.
   1172  */
   1173 #if ARM_MMU_V6 > 0
   1174 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1175 	(pg)->mdpage.pvh_attrs = VM_PAGE_TO_PHYS(pg) & arm_cache_prefer_mask
   1176 #else
   1177 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1178 	(pg)->mdpage.pvh_attrs = 0
   1179 #endif
   1180 
   1181 #define	VM_MDPAGE_INIT(pg)						\
   1182 do {									\
   1183 	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
   1184 	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
   1185 	(pg)->mdpage.uro_mappings = 0;					\
   1186 	(pg)->mdpage.urw_mappings = 0;					\
   1187 	(pg)->mdpage.k_mappings = 0;					\
   1188 } while (/*CONSTCOND*/0)
   1189 
   1190 #endif /* !_LOCORE */
   1191 
   1192 #endif /* _KERNEL */
   1193 
   1194 #endif	/* _ARM32_PMAP_H_ */
   1195