Home | History | Annotate | Line # | Download | only in arm32
pmap.h revision 1.128
      1 /*	$NetBSD: pmap.h,v 1.128 2014/03/31 01:48:37 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1994,1995 Mark Brinicombe.
     40  * All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. All advertising materials mentioning features or use of this software
     51  *    must display the following acknowledgement:
     52  *	This product includes software developed by Mark Brinicombe
     53  * 4. The name of the author may not be used to endorse or promote products
     54  *    derived from this software without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66  */
     67 
     68 #ifndef	_ARM32_PMAP_H_
     69 #define	_ARM32_PMAP_H_
     70 
     71 #ifdef _KERNEL
     72 
     73 #include <arm/cpuconf.h>
     74 #include <arm/arm32/pte.h>
     75 #ifndef _LOCORE
     76 #if defined(_KERNEL_OPT)
     77 #include "opt_arm32_pmap.h"
     78 #endif
     79 #include <arm/cpufunc.h>
     80 #include <uvm/uvm_object.h>
     81 #endif
     82 
     83 #ifdef ARM_MMU_EXTENDED
     84 #define PMAP_TLB_MAX			1
     85 #define PMAP_TLB_HWPAGEWALKER		1
     86 #if PMAP_TLB_MAX > 1
     87 #define PMAP_TLB_NEED_SHOOTDOWN		1
     88 #endif
     89 #define PMAP_TLB_FLUSH_ASID_ON_RESET	(arm_has_tlbiasid_p)
     90 #define PMAP_TLB_NUM_PIDS		256
     91 #define cpu_set_tlb_info(ci, ti)        ((void)((ci)->ci_tlb_info = (ti)))
     92 #if PMAP_TLB_MAX > 1
     93 #define cpu_tlb_info(ci)		((ci)->ci_tlb_info)
     94 #else
     95 #define cpu_tlb_info(ci)		(&pmap_tlb0_info)
     96 #endif
     97 #define pmap_md_tlb_asid_max()		(PMAP_TLB_NUM_PIDS - 1)
     98 #include <uvm/pmap/tlb.h>
     99 #include <uvm/pmap/pmap_tlb.h>
    100 
    101 /*
    102  * If we have an EXTENDED MMU and the address space is split evenly between
    103  * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
    104  * user and kernel address spaces.
    105  */
    106 #if (KERNEL_BASE & 0x80000000) == 0
    107 #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
    108 #endif
    109 #endif  /* ARM_MMU_EXTENDED */
    110 
    111 /*
    112  * a pmap describes a processes' 4GB virtual address space.  this
    113  * virtual address space can be broken up into 4096 1MB regions which
    114  * are described by L1 PTEs in the L1 table.
    115  *
    116  * There is a line drawn at KERNEL_BASE.  Everything below that line
    117  * changes when the VM context is switched.  Everything above that line
    118  * is the same no matter which VM context is running.  This is achieved
    119  * by making the L1 PTEs for those slots above KERNEL_BASE reference
    120  * kernel L2 tables.
    121  *
    122  * The basic layout of the virtual address space thus looks like this:
    123  *
    124  *	0xffffffff
    125  *	.
    126  *	.
    127  *	.
    128  *	KERNEL_BASE
    129  *	--------------------
    130  *	.
    131  *	.
    132  *	.
    133  *	0x00000000
    134  */
    135 
    136 /*
    137  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
    138  * A bucket size of 16 provides for 16MB of contiguous virtual address
    139  * space per l2_dtable. Most processes will, therefore, require only two or
    140  * three of these to map their whole working set.
    141  */
    142 #define	L2_BUCKET_XLOG2	(L1_S_SHIFT)
    143 #define L2_BUCKET_XSIZE	(1 << L2_BUCKET_XLOG2)
    144 #define	L2_BUCKET_LOG2	4
    145 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
    146 
    147 /*
    148  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
    149  * of l2_dtable structures required to track all possible page descriptors
    150  * mappable by an L1 translation table is given by the following constants:
    151  */
    152 #define	L2_LOG2		(32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
    153 #define	L2_SIZE		(1 << L2_LOG2)
    154 
    155 /*
    156  * tell MI code that the cache is virtually-indexed.
    157  * ARMv6 is physically-tagged but all others are virtually-tagged.
    158  */
    159 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    160 #define PMAP_CACHE_VIPT
    161 #else
    162 #define PMAP_CACHE_VIVT
    163 #endif
    164 
    165 #ifndef _LOCORE
    166 
    167 #ifndef PMAP_MMU_EXTENDED
    168 struct l1_ttable;
    169 struct l2_dtable;
    170 
    171 /*
    172  * Track cache/tlb occupancy using the following structure
    173  */
    174 union pmap_cache_state {
    175 	struct {
    176 		union {
    177 			uint8_t csu_cache_b[2];
    178 			uint16_t csu_cache;
    179 		} cs_cache_u;
    180 
    181 		union {
    182 			uint8_t csu_tlb_b[2];
    183 			uint16_t csu_tlb;
    184 		} cs_tlb_u;
    185 	} cs_s;
    186 	uint32_t cs_all;
    187 };
    188 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
    189 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
    190 #define	cs_cache	cs_s.cs_cache_u.csu_cache
    191 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
    192 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
    193 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
    194 
    195 /*
    196  * Assigned to cs_all to force cacheops to work for a particular pmap
    197  */
    198 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
    199 #endif /* !ARM_MMU_EXTENDED */
    200 
    201 /*
    202  * This structure is used by machine-dependent code to describe
    203  * static mappings of devices, created at bootstrap time.
    204  */
    205 struct pmap_devmap {
    206 	vaddr_t		pd_va;		/* virtual address */
    207 	paddr_t		pd_pa;		/* physical address */
    208 	psize_t		pd_size;	/* size of region */
    209 	vm_prot_t	pd_prot;	/* protection code */
    210 	int		pd_cache;	/* cache attributes */
    211 };
    212 
    213 /*
    214  * The pmap structure itself
    215  */
    216 struct pmap {
    217 	struct uvm_object	pm_obj;
    218 	kmutex_t		pm_obj_lock;
    219 #define	pm_lock pm_obj.vmobjlock
    220 #ifndef ARM_HAS_VBAR
    221 	pd_entry_t		*pm_pl1vec;
    222 	pd_entry_t		pm_l1vec;
    223 #endif
    224 	struct l2_dtable	*pm_l2[L2_SIZE];
    225 	struct pmap_statistics	pm_stats;
    226 	LIST_ENTRY(pmap)	pm_list;
    227 #ifdef ARM_MMU_EXTENDED
    228 	pd_entry_t		*pm_l1;
    229 	paddr_t			pm_l1_pa;
    230 	bool			pm_remove_all;
    231 #ifdef MULTIPROCESSOR
    232 	kcpuset_t		*pm_onproc;
    233 	kcpuset_t		*pm_active;
    234 #if PMAP_TLB_MAX > 1
    235 	u_int			pm_shootdown_pending;
    236 #endif
    237 #endif
    238 	struct pmap_asid_info	pm_pai[PMAP_TLB_MAX];
    239 #else
    240 	struct l1_ttable	*pm_l1;
    241 	union pmap_cache_state	pm_cstate;
    242 	uint8_t			pm_domain;
    243 	bool			pm_activated;
    244 	bool			pm_remove_all;
    245 #endif
    246 };
    247 
    248 struct pmap_kernel {
    249 	struct pmap		kernel_pmap;
    250 };
    251 
    252 /*
    253  * Physical / virtual address structure. In a number of places (particularly
    254  * during bootstrapping) we need to keep track of the physical and virtual
    255  * addresses of various pages
    256  */
    257 typedef struct pv_addr {
    258 	SLIST_ENTRY(pv_addr) pv_list;
    259 	paddr_t pv_pa;
    260 	vaddr_t pv_va;
    261 	vsize_t pv_size;
    262 	uint8_t pv_cache;
    263 	uint8_t pv_prot;
    264 } pv_addr_t;
    265 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
    266 
    267 extern pv_addrqh_t pmap_freeq;
    268 extern pv_addr_t kernelstack;
    269 extern pv_addr_t abtstack;
    270 extern pv_addr_t fiqstack;
    271 extern pv_addr_t irqstack;
    272 extern pv_addr_t undstack;
    273 extern pv_addr_t idlestack;
    274 extern pv_addr_t systempage;
    275 extern pv_addr_t kernel_l1pt;
    276 
    277 #ifdef ARM_MMU_EXTENDED
    278 extern bool arm_has_tlbiasid_p;	/* also in <arm/locore.h> */
    279 #endif
    280 
    281 /*
    282  * Determine various modes for PTEs (user vs. kernel, cacheable
    283  * vs. non-cacheable).
    284  */
    285 #define	PTE_KERNEL	0
    286 #define	PTE_USER	1
    287 #define	PTE_NOCACHE	0
    288 #define	PTE_CACHE	1
    289 #define	PTE_PAGETABLE	2
    290 
    291 /*
    292  * Flags that indicate attributes of pages or mappings of pages.
    293  *
    294  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
    295  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
    296  * pv_entry's for each page.  They live in the same "namespace" so
    297  * that we can clear multiple attributes at a time.
    298  *
    299  * Note the "non-cacheable" flag generally means the page has
    300  * multiple mappings in a given address space.
    301  */
    302 #define	PVF_MOD		0x01		/* page is modified */
    303 #define	PVF_REF		0x02		/* page is referenced */
    304 #define	PVF_WIRED	0x04		/* mapping is wired */
    305 #define	PVF_WRITE	0x08		/* mapping is writable */
    306 #define	PVF_EXEC	0x10		/* mapping is executable */
    307 #ifdef PMAP_CACHE_VIVT
    308 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
    309 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
    310 #define	PVF_NC		(PVF_UNC|PVF_KNC)
    311 #endif
    312 #ifdef PMAP_CACHE_VIPT
    313 #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
    314 #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
    315 #endif
    316 #define	PVF_COLORED	0x80		/* page has or had a color */
    317 #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
    318 #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
    319 #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
    320 #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
    321 #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
    322 #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
    323 
    324 /*
    325  * Commonly referenced structures
    326  */
    327 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
    328 extern int		arm_poolpage_vmfreelist;
    329 
    330 /*
    331  * Macros that we need to export
    332  */
    333 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    334 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    335 
    336 #define	pmap_is_modified(pg)	\
    337 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
    338 #define	pmap_is_referenced(pg)	\
    339 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
    340 #define	pmap_is_page_colored_p(md)	\
    341 	(((md)->pvh_attrs & PVF_COLORED) != 0)
    342 
    343 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    344 
    345 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
    346 u_int arm32_mmap_flags(paddr_t);
    347 #define ARM32_MMAP_WRITECOMBINE	0x40000000
    348 #define ARM32_MMAP_CACHEABLE		0x20000000
    349 #define pmap_mmap_flags(ppn)			arm32_mmap_flags(ppn)
    350 
    351 #define	PMAP_PTE			0x10000000 /* kenter_pa */
    352 
    353 /*
    354  * Functions that we need to export
    355  */
    356 void	pmap_procwr(struct proc *, vaddr_t, int);
    357 void	pmap_remove_all(pmap_t);
    358 bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
    359 
    360 #define	PMAP_NEED_PROCWR
    361 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    362 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    363 
    364 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    365 #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
    366 void	pmap_prefer(vaddr_t, vaddr_t *, int);
    367 #endif
    368 
    369 void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    370 
    371 /* Functions we use internally. */
    372 #ifdef PMAP_STEAL_MEMORY
    373 void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
    374 void	pmap_boot_pageadd(pv_addr_t *);
    375 vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    376 #endif
    377 void	pmap_bootstrap(vaddr_t, vaddr_t);
    378 
    379 void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
    380 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
    381 int	pmap_prefetchabt_fixup(void *);
    382 bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
    383 bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
    384 struct pcb;
    385 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
    386 
    387 void	pmap_debug(int);
    388 void	pmap_postinit(void);
    389 
    390 void	vector_page_setprot(int);
    391 
    392 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    393 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    394 
    395 /* Bootstrapping routines. */
    396 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
    397 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
    398 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
    399 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
    400 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    401 void	pmap_devmap_register(const struct pmap_devmap *);
    402 
    403 /*
    404  * Special page zero routine for use by the idle loop (no cache cleans).
    405  */
    406 bool	pmap_pageidlezero(paddr_t);
    407 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    408 
    409 /*
    410  * used by dumpsys to record the PA of the L1 table
    411  */
    412 uint32_t pmap_kernel_L1_addr(void);
    413 /*
    414  * The current top of kernel VM
    415  */
    416 extern vaddr_t	pmap_curmaxkvaddr;
    417 
    418 /*
    419  * Useful macros and constants
    420  */
    421 
    422 /* Virtual address to page table entry */
    423 static inline pt_entry_t *
    424 vtopte(vaddr_t va)
    425 {
    426 	pd_entry_t *pdep;
    427 	pt_entry_t *ptep;
    428 
    429 	KASSERT(trunc_page(va) == va);
    430 
    431 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
    432 		return (NULL);
    433 	return (ptep);
    434 }
    435 
    436 /*
    437  * Virtual address to physical address
    438  */
    439 static inline paddr_t
    440 vtophys(vaddr_t va)
    441 {
    442 	paddr_t pa;
    443 
    444 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
    445 		return (0);	/* XXXSCW: Panic? */
    446 
    447 	return (pa);
    448 }
    449 
    450 /*
    451  * The new pmap ensures that page-tables are always mapping Write-Thru.
    452  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
    453  * on every change.
    454  *
    455  * Unfortunately, not all CPUs have a write-through cache mode.  So we
    456  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
    457  * and if there is the chance for PTE syncs to be needed, we define
    458  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
    459  * the code.
    460  */
    461 extern int pmap_needs_pte_sync;
    462 #if defined(_KERNEL_OPT)
    463 /*
    464  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
    465  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
    466  * this at compile time.
    467  */
    468 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
    469 #define	PMAP_INCLUDE_PTE_SYNC
    470 #if (ARM_MMU_V6 > 0)
    471 #define	PMAP_NEEDS_PTE_SYNC	1
    472 #elif (ARM_MMU_SA1 == 0)
    473 #define	PMAP_NEEDS_PTE_SYNC	0
    474 #endif
    475 #endif
    476 #endif /* _KERNEL_OPT */
    477 
    478 /*
    479  * Provide a fallback in case we were not able to determine it at
    480  * compile-time.
    481  */
    482 #ifndef PMAP_NEEDS_PTE_SYNC
    483 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
    484 #define	PMAP_INCLUDE_PTE_SYNC
    485 #endif
    486 
    487 static inline void
    488 pmap_ptesync(pt_entry_t *ptep, size_t cnt)
    489 {
    490 	if (PMAP_NEEDS_PTE_SYNC)
    491 		cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
    492 #if ARM_MMU_V7 > 0
    493 	__asm("dsb");
    494 #endif
    495 }
    496 
    497 #define	PDE_SYNC(pdep)			pmap_ptesync((pdep), 1)
    498 #define	PDE_SYNC_RANGE(pdep, cnt)	pmap_ptesync((pdep), (cnt))
    499 #define	PTE_SYNC(ptep)			pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
    500 #define	PTE_SYNC_RANGE(ptep, cnt)	pmap_ptesync((ptep), (cnt))
    501 
    502 #define l1pte_valid_p(pde)	((pde) != 0)
    503 #define l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
    504 #define l1pte_supersection_p(pde) (l1pte_section_p(pde)	\
    505 				&& ((pde) & L1_S_V6_SUPER) != 0)
    506 #define l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
    507 #define l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
    508 #define l1pte_pa(pde)		((pde) & L1_C_ADDR_MASK)
    509 #define l1pte_index(v)		((vaddr_t)(v) >> L1_S_SHIFT)
    510 #define l1pte_pgindex(v)	l1pte_index((v) & L1_ADDR_BITS \
    511 		& ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
    512 
    513 static inline void
    514 l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
    515 {
    516 	*pdep = pde;
    517 }
    518 
    519 static inline void
    520 l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
    521 {
    522 	*pdep = pde;
    523 	if (l1pte_page_p(pde)) {
    524 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
    525 		for (size_t k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
    526 			pde += L2_T_SIZE;
    527 			pdep[k] = pde;
    528 		}
    529 	} else if (l1pte_supersection_p(pde)) {
    530 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
    531 		for (size_t k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
    532 			pdep[k] = pde;
    533 		}
    534 	}
    535 }
    536 
    537 #define l2pte_index(v)		((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
    538 #define l2pte_valid_p(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
    539 #define l2pte_pa(pte)		((pte) & L2_S_FRAME)
    540 #define l1pte_lpage_p(pte)	(((pte) & L2_TYPE_MASK) == L2_TYPE_L)
    541 #define l2pte_minidata_p(pte)	(((pte) & \
    542 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
    543 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
    544 
    545 static inline void
    546 l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
    547 {
    548 	for (size_t k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
    549 		KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
    550 		*ptep++ = pte;
    551 		pte += L2_S_SIZE;
    552 		if (opte)
    553 			opte += L2_S_SIZE;
    554 	}
    555 }
    556 
    557 static inline void
    558 l2pte_reset(pt_entry_t *ptep)
    559 {
    560 	*ptep = 0;
    561 	for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
    562 		ptep[k] = 0;
    563 	}
    564 }
    565 
    566 /* L1 and L2 page table macros */
    567 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
    568 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
    569 #define pmap_pde_supersection(pde)	l1pte_supersection_p(*(pde))
    570 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
    571 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
    572 
    573 #define	pmap_pte_v(pte)		l2pte_valid_p(*(pte))
    574 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
    575 
    576 /* Size of the kernel part of the L1 page table */
    577 #define KERNEL_PD_SIZE	\
    578 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
    579 
    580 void	bzero_page(vaddr_t);
    581 void	bcopy_page(vaddr_t, vaddr_t);
    582 
    583 #ifdef FPU_VFP
    584 void	bzero_page_vfp(vaddr_t);
    585 void	bcopy_page_vfp(vaddr_t, vaddr_t);
    586 #endif
    587 
    588 /************************* ARM MMU configuration *****************************/
    589 
    590 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
    591 void	pmap_copy_page_generic(paddr_t, paddr_t);
    592 void	pmap_zero_page_generic(paddr_t);
    593 
    594 void	pmap_pte_init_generic(void);
    595 #if defined(CPU_ARM8)
    596 void	pmap_pte_init_arm8(void);
    597 #endif
    598 #if defined(CPU_ARM9)
    599 void	pmap_pte_init_arm9(void);
    600 #endif /* CPU_ARM9 */
    601 #if defined(CPU_ARM10)
    602 void	pmap_pte_init_arm10(void);
    603 #endif /* CPU_ARM10 */
    604 #if defined(CPU_ARM11)	/* ARM_MMU_V6 */
    605 void	pmap_pte_init_arm11(void);
    606 #endif /* CPU_ARM11 */
    607 #if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
    608 void	pmap_pte_init_arm11mpcore(void);
    609 #endif
    610 #if ARM_MMU_V7 == 1
    611 void	pmap_pte_init_armv7(void);
    612 #endif /* ARM_MMU_V7 */
    613 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
    614 
    615 #if ARM_MMU_SA1 == 1
    616 void	pmap_pte_init_sa1(void);
    617 #endif /* ARM_MMU_SA1 == 1 */
    618 
    619 #if ARM_MMU_XSCALE == 1
    620 void	pmap_copy_page_xscale(paddr_t, paddr_t);
    621 void	pmap_zero_page_xscale(paddr_t);
    622 
    623 void	pmap_pte_init_xscale(void);
    624 
    625 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
    626 
    627 #define	PMAP_UAREA(va)		pmap_uarea(va)
    628 void	pmap_uarea(vaddr_t);
    629 #endif /* ARM_MMU_XSCALE == 1 */
    630 
    631 extern pt_entry_t		pte_l1_s_cache_mode;
    632 extern pt_entry_t		pte_l1_s_cache_mask;
    633 
    634 extern pt_entry_t		pte_l2_l_cache_mode;
    635 extern pt_entry_t		pte_l2_l_cache_mask;
    636 
    637 extern pt_entry_t		pte_l2_s_cache_mode;
    638 extern pt_entry_t		pte_l2_s_cache_mask;
    639 
    640 extern pt_entry_t		pte_l1_s_cache_mode_pt;
    641 extern pt_entry_t		pte_l2_l_cache_mode_pt;
    642 extern pt_entry_t		pte_l2_s_cache_mode_pt;
    643 
    644 extern pt_entry_t		pte_l1_s_wc_mode;
    645 extern pt_entry_t		pte_l2_l_wc_mode;
    646 extern pt_entry_t		pte_l2_s_wc_mode;
    647 
    648 extern pt_entry_t		pte_l1_s_prot_u;
    649 extern pt_entry_t		pte_l1_s_prot_w;
    650 extern pt_entry_t		pte_l1_s_prot_ro;
    651 extern pt_entry_t		pte_l1_s_prot_mask;
    652 
    653 extern pt_entry_t		pte_l2_s_prot_u;
    654 extern pt_entry_t		pte_l2_s_prot_w;
    655 extern pt_entry_t		pte_l2_s_prot_ro;
    656 extern pt_entry_t		pte_l2_s_prot_mask;
    657 
    658 extern pt_entry_t		pte_l2_l_prot_u;
    659 extern pt_entry_t		pte_l2_l_prot_w;
    660 extern pt_entry_t		pte_l2_l_prot_ro;
    661 extern pt_entry_t		pte_l2_l_prot_mask;
    662 
    663 extern pt_entry_t		pte_l1_ss_proto;
    664 extern pt_entry_t		pte_l1_s_proto;
    665 extern pt_entry_t		pte_l1_c_proto;
    666 extern pt_entry_t		pte_l2_s_proto;
    667 
    668 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
    669 extern void (*pmap_zero_page_func)(paddr_t);
    670 
    671 #endif /* !_LOCORE */
    672 
    673 /*****************************************************************************/
    674 
    675 #define	KERNEL_PID		0	/* The kernel uses ASID 0 */
    676 
    677 /*
    678  * Definitions for MMU domains
    679  */
    680 #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
    681 #define	PMAP_DOMAIN_KERNEL	0	/* The kernel pmap uses domain #0 */
    682 #ifdef ARM_MMU_EXTENDED
    683 #define	PMAP_DOMAIN_USER	1	/* User pmaps use domain #1 */
    684 #endif
    685 
    686 /*
    687  * These macros define the various bit masks in the PTE.
    688  *
    689  * We use these macros since we use different bits on different processor
    690  * models.
    691  */
    692 #define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
    693 #define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
    694 #define	L1_S_PROT_RO_generic	(0)
    695 #define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    696 
    697 #define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
    698 #define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
    699 #define	L1_S_PROT_RO_xscale	(0)
    700 #define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    701 
    702 #define	L1_S_PROT_U_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    703 #define	L1_S_PROT_W_armv6	(L1_S_AP(AP_W))
    704 #define	L1_S_PROT_RO_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    705 #define	L1_S_PROT_MASK_armv6	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    706 
    707 #define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    708 #define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
    709 #define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    710 #define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    711 
    712 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
    713 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
    714 #define	L1_S_CACHE_MASK_armv6	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
    715 #define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
    716 
    717 #define	L2_L_PROT_U_generic	(L2_AP(AP_U))
    718 #define	L2_L_PROT_W_generic	(L2_AP(AP_W))
    719 #define	L2_L_PROT_RO_generic	(0)
    720 #define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    721 
    722 #define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
    723 #define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
    724 #define	L2_L_PROT_RO_xscale	(0)
    725 #define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    726 
    727 #define	L2_L_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    728 #define	L2_L_PROT_W_armv6n	(L2_AP0(AP_W))
    729 #define	L2_L_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    730 #define	L2_L_PROT_MASK_armv6n	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    731 
    732 #define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    733 #define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
    734 #define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    735 #define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    736 
    737 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
    738 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
    739 #define	L2_L_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
    740 #define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    741 
    742 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
    743 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
    744 #define	L2_S_PROT_RO_generic	(0)
    745 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    746 
    747 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
    748 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
    749 #define	L2_S_PROT_RO_xscale	(0)
    750 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    751 
    752 #define	L2_S_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    753 #define	L2_S_PROT_W_armv6n	(L2_AP0(AP_W))
    754 #define	L2_S_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    755 #define	L2_S_PROT_MASK_armv6n	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    756 
    757 #define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    758 #define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
    759 #define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    760 #define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    761 
    762 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
    763 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
    764 #define	L2_XS_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
    765 #define	L2_S_CACHE_MASK_armv6n	L2_XS_CACHE_MASK_armv6
    766 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    767 #define	L2_S_CACHE_MASK_armv6c	L2_XS_CACHE_MASK_armv6
    768 #else
    769 #define	L2_S_CACHE_MASK_armv6c	L2_S_CACHE_MASK_generic
    770 #endif
    771 #define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    772 
    773 
    774 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
    775 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
    776 #define	L1_S_PROTO_armv6	(L1_TYPE_S)
    777 #define	L1_S_PROTO_armv7	(L1_TYPE_S)
    778 
    779 #define	L1_SS_PROTO_generic	0
    780 #define	L1_SS_PROTO_xscale	0
    781 #define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
    782 #define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
    783 
    784 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
    785 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
    786 #define	L1_C_PROTO_armv6	(L1_TYPE_C)
    787 #define	L1_C_PROTO_armv7	(L1_TYPE_C)
    788 
    789 #define	L2_L_PROTO		(L2_TYPE_L)
    790 
    791 #define	L2_S_PROTO_generic	(L2_TYPE_S)
    792 #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
    793 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    794 #define	L2_S_PROTO_armv6c	(L2_TYPE_XS)    /* XP=0, extended small page */
    795 #else
    796 #define	L2_S_PROTO_armv6c	(L2_TYPE_S)	/* XP=0, subpage APs */
    797 #endif
    798 #define	L2_S_PROTO_armv6n	(L2_TYPE_S)	/* with XP=1 */
    799 #ifdef ARM_MMU_EXTENDED
    800 #define	L2_S_PROTO_armv7	(L2_TYPE_S|L2_XS_XN)
    801 #else
    802 #define	L2_S_PROTO_armv7	(L2_TYPE_S)
    803 #endif
    804 
    805 /*
    806  * User-visible names for the ones that vary with MMU class.
    807  */
    808 
    809 #if ARM_NMMUS > 1
    810 /* More than one MMU class configured; use variables. */
    811 #define	L1_S_PROT_U		pte_l1_s_prot_u
    812 #define	L1_S_PROT_W		pte_l1_s_prot_w
    813 #define	L1_S_PROT_RO		pte_l1_s_prot_ro
    814 #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
    815 
    816 #define	L2_S_PROT_U		pte_l2_s_prot_u
    817 #define	L2_S_PROT_W		pte_l2_s_prot_w
    818 #define	L2_S_PROT_RO		pte_l2_s_prot_ro
    819 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
    820 
    821 #define	L2_L_PROT_U		pte_l2_l_prot_u
    822 #define	L2_L_PROT_W		pte_l2_l_prot_w
    823 #define	L2_L_PROT_RO		pte_l2_l_prot_ro
    824 #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
    825 
    826 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
    827 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
    828 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
    829 
    830 #define	L1_SS_PROTO		pte_l1_ss_proto
    831 #define	L1_S_PROTO		pte_l1_s_proto
    832 #define	L1_C_PROTO		pte_l1_c_proto
    833 #define	L2_S_PROTO		pte_l2_s_proto
    834 
    835 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
    836 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
    837 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
    838 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    839 #define	L1_S_PROT_W		L1_S_PROT_W_generic
    840 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    841 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    842 
    843 #define	L2_S_PROT_U		L2_S_PROT_U_generic
    844 #define	L2_S_PROT_W		L2_S_PROT_W_generic
    845 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    846 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    847 
    848 #define	L2_L_PROT_U		L2_L_PROT_U_generic
    849 #define	L2_L_PROT_W		L2_L_PROT_W_generic
    850 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    851 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    852 
    853 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    854 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    855 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    856 
    857 #define	L1_SS_PROTO		L1_SS_PROTO_generic
    858 #define	L1_S_PROTO		L1_S_PROTO_generic
    859 #define	L1_C_PROTO		L1_C_PROTO_generic
    860 #define	L2_S_PROTO		L2_S_PROTO_generic
    861 
    862 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    863 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    864 #elif ARM_MMU_V6N != 0
    865 #define	L1_S_PROT_U		L1_S_PROT_U_armv6
    866 #define	L1_S_PROT_W		L1_S_PROT_W_armv6
    867 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv6
    868 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv6
    869 
    870 #define	L2_S_PROT_U		L2_S_PROT_U_armv6n
    871 #define	L2_S_PROT_W		L2_S_PROT_W_armv6n
    872 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv6n
    873 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv6n
    874 
    875 #define	L2_L_PROT_U		L2_L_PROT_U_armv6n
    876 #define	L2_L_PROT_W		L2_L_PROT_W_armv6n
    877 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv6n
    878 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv6n
    879 
    880 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv6
    881 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv6
    882 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv6n
    883 
    884 /* These prototypes make writeable mappings, while the other MMU types
    885  * make read-only mappings. */
    886 #define	L1_SS_PROTO		L1_SS_PROTO_armv6
    887 #define	L1_S_PROTO		L1_S_PROTO_armv6
    888 #define	L1_C_PROTO		L1_C_PROTO_armv6
    889 #define	L2_S_PROTO		L2_S_PROTO_armv6n
    890 
    891 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    892 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    893 #elif ARM_MMU_V6C != 0
    894 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    895 #define	L1_S_PROT_W		L1_S_PROT_W_generic
    896 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    897 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    898 
    899 #define	L2_S_PROT_U		L2_S_PROT_U_generic
    900 #define	L2_S_PROT_W		L2_S_PROT_W_generic
    901 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    902 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    903 
    904 #define	L2_L_PROT_U		L2_L_PROT_U_generic
    905 #define	L2_L_PROT_W		L2_L_PROT_W_generic
    906 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    907 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    908 
    909 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    910 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    911 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    912 
    913 #define	L1_SS_PROTO		L1_SS_PROTO_generic
    914 #define	L1_S_PROTO		L1_S_PROTO_generic
    915 #define	L1_C_PROTO		L1_C_PROTO_generic
    916 #define	L2_S_PROTO		L2_S_PROTO_generic
    917 
    918 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    919 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    920 #elif ARM_MMU_XSCALE == 1
    921 #define	L1_S_PROT_U		L1_S_PROT_U_generic
    922 #define	L1_S_PROT_W		L1_S_PROT_W_generic
    923 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    924 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    925 
    926 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
    927 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
    928 #define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
    929 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
    930 
    931 #define	L2_L_PROT_U		L2_L_PROT_U_generic
    932 #define	L2_L_PROT_W		L2_L_PROT_W_generic
    933 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    934 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    935 
    936 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
    937 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
    938 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
    939 
    940 #define	L1_SS_PROTO		L1_SS_PROTO_xscale
    941 #define	L1_S_PROTO		L1_S_PROTO_xscale
    942 #define	L1_C_PROTO		L1_C_PROTO_xscale
    943 #define	L2_S_PROTO		L2_S_PROTO_xscale
    944 
    945 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
    946 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
    947 #elif ARM_MMU_V7 == 1
    948 #define	L1_S_PROT_U		L1_S_PROT_U_armv7
    949 #define	L1_S_PROT_W		L1_S_PROT_W_armv7
    950 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
    951 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
    952 
    953 #define	L2_S_PROT_U		L2_S_PROT_U_armv7
    954 #define	L2_S_PROT_W		L2_S_PROT_W_armv7
    955 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
    956 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
    957 
    958 #define	L2_L_PROT_U		L2_L_PROT_U_armv7
    959 #define	L2_L_PROT_W		L2_L_PROT_W_armv7
    960 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
    961 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
    962 
    963 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
    964 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
    965 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
    966 
    967 /* These prototypes make writeable mappings, while the other MMU types
    968  * make read-only mappings. */
    969 #define	L1_SS_PROTO		L1_SS_PROTO_armv7
    970 #define	L1_S_PROTO		L1_S_PROTO_armv7
    971 #define	L1_C_PROTO		L1_C_PROTO_armv7
    972 #define	L2_S_PROTO		L2_S_PROTO_armv7
    973 
    974 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    975 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    976 #endif /* ARM_NMMUS > 1 */
    977 
    978 /*
    979  * Macros to set and query the write permission on page descriptors.
    980  */
    981 #define l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
    982 #define l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
    983 #define l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
    984 #define l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
    985 
    986 #define l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
    987 				 (L2_S_PROT_RO == 0 || \
    988 				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
    989 
    990 /*
    991  * These macros return various bits based on kernel/user and protection.
    992  * Note that the compiler will usually fold these at compile time.
    993  */
    994 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
    995 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
    996 
    997 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
    998 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
    999 
   1000 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
   1001 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
   1002 
   1003 /*
   1004  * Macros to test if a mapping is mappable with an L1 SuperSection,
   1005  * L1 Section, or an L2 Large Page mapping.
   1006  */
   1007 #define	L1_SS_MAPPABLE_P(va, pa, size)					\
   1008 	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
   1009 
   1010 #define	L1_S_MAPPABLE_P(va, pa, size)					\
   1011 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
   1012 
   1013 #define	L2_L_MAPPABLE_P(va, pa, size)					\
   1014 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
   1015 
   1016 #ifndef _LOCORE
   1017 /*
   1018  * Hooks for the pool allocator.
   1019  */
   1020 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
   1021 extern paddr_t physical_start, physical_end;
   1022 #ifdef PMAP_NEED_ALLOC_POOLPAGE
   1023 struct vm_page *arm_pmap_alloc_poolpage(int);
   1024 #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
   1025 #endif
   1026 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
   1027 #define	PMAP_MAP_POOLPAGE(pa) \
   1028         ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE))
   1029 #define PMAP_UNMAP_POOLPAGE(va) \
   1030         ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start))
   1031 #endif
   1032 
   1033 /*
   1034  * pmap-specific data store in the vm_page structure.
   1035  */
   1036 #define	__HAVE_VM_PAGE_MD
   1037 struct vm_page_md {
   1038 	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
   1039 	int pvh_attrs;				/* page attributes */
   1040 	u_int uro_mappings;
   1041 	u_int urw_mappings;
   1042 	union {
   1043 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
   1044 		u_int i_mappings;
   1045 	} k_u;
   1046 #define	kro_mappings	k_u.s_mappings[0]
   1047 #define	krw_mappings	k_u.s_mappings[1]
   1048 #define	k_mappings	k_u.i_mappings
   1049 };
   1050 
   1051 /*
   1052  * Set the default color of each page.
   1053  */
   1054 #if ARM_MMU_V6 > 0
   1055 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1056 	(pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
   1057 #else
   1058 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1059 	(pg)->mdpage.pvh_attrs = 0
   1060 #endif
   1061 
   1062 #define	VM_MDPAGE_INIT(pg)						\
   1063 do {									\
   1064 	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
   1065 	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
   1066 	(pg)->mdpage.uro_mappings = 0;					\
   1067 	(pg)->mdpage.urw_mappings = 0;					\
   1068 	(pg)->mdpage.k_mappings = 0;					\
   1069 } while (/*CONSTCOND*/0)
   1070 
   1071 #endif /* !_LOCORE */
   1072 
   1073 #endif /* _KERNEL */
   1074 
   1075 #endif	/* _ARM32_PMAP_H_ */
   1076