Home | History | Annotate | Line # | Download | only in arm32
pmap.h revision 1.81.24.1
      1 /*	$NetBSD: pmap.h,v 1.81.24.1 2007/10/12 02:22:23 matt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed for the NetBSD Project by
     20  *	Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (c) 1994,1995 Mark Brinicombe.
     40  * All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. All advertising materials mentioning features or use of this software
     51  *    must display the following acknowledgement:
     52  *	This product includes software developed by Mark Brinicombe
     53  * 4. The name of the author may not be used to endorse or promote products
     54  *    derived from this software without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66  */
     67 
     68 #ifndef	_ARM32_PMAP_H_
     69 #define	_ARM32_PMAP_H_
     70 
     71 #ifdef _KERNEL
     72 
     73 #include <arm/cpuconf.h>
     74 #include <arm/arm32/pte.h>
     75 #ifndef _LOCORE
     76 #if defined(_KERNEL_OPT)
     77 #include "opt_arm32_pmap.h"
     78 #endif
     79 #include <arm/cpufunc.h>
     80 #include <uvm/uvm_object.h>
     81 #endif
     82 
     83 /*
     84  * a pmap describes a processes' 4GB virtual address space.  this
     85  * virtual address space can be broken up into 4096 1MB regions which
     86  * are described by L1 PTEs in the L1 table.
     87  *
     88  * There is a line drawn at KERNEL_BASE.  Everything below that line
     89  * changes when the VM context is switched.  Everything above that line
     90  * is the same no matter which VM context is running.  This is achieved
     91  * by making the L1 PTEs for those slots above KERNEL_BASE reference
     92  * kernel L2 tables.
     93  *
     94  * The basic layout of the virtual address space thus looks like this:
     95  *
     96  *	0xffffffff
     97  *	.
     98  *	.
     99  *	.
    100  *	KERNEL_BASE
    101  *	--------------------
    102  *	.
    103  *	.
    104  *	.
    105  *	0x00000000
    106  */
    107 
    108 /*
    109  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
    110  * A bucket size of 16 provides for 16MB of contiguous virtual address
    111  * space per l2_dtable. Most processes will, therefore, require only two or
    112  * three of these to map their whole working set.
    113  */
    114 #define	L2_BUCKET_LOG2	4
    115 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
    116 
    117 /*
    118  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
    119  * of l2_dtable structures required to track all possible page descriptors
    120  * mappable by an L1 translation table is given by the following constants:
    121  */
    122 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
    123 #define	L2_SIZE		(1 << L2_LOG2)
    124 
    125 #ifndef _LOCORE
    126 
    127 struct l1_ttable;
    128 struct l2_dtable;
    129 
    130 /*
    131  * Track cache/tlb occupancy using the following structure
    132  */
    133 union pmap_cache_state {
    134 	struct {
    135 		union {
    136 			u_int8_t csu_cache_b[2];
    137 			u_int16_t csu_cache;
    138 		} cs_cache_u;
    139 
    140 		union {
    141 			u_int8_t csu_tlb_b[2];
    142 			u_int16_t csu_tlb;
    143 		} cs_tlb_u;
    144 	} cs_s;
    145 	u_int32_t cs_all;
    146 };
    147 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
    148 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
    149 #define	cs_cache	cs_s.cs_cache_u.csu_cache
    150 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
    151 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
    152 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
    153 
    154 /*
    155  * Assigned to cs_all to force cacheops to work for a particular pmap
    156  */
    157 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
    158 
    159 /*
    160  * This structure is used by machine-dependent code to describe
    161  * static mappings of devices, created at bootstrap time.
    162  */
    163 struct pmap_devmap {
    164 	vaddr_t		pd_va;		/* virtual address */
    165 	paddr_t		pd_pa;		/* physical address */
    166 	psize_t		pd_size;	/* size of region */
    167 	vm_prot_t	pd_prot;	/* protection code */
    168 	int		pd_cache;	/* cache attributes */
    169 };
    170 
    171 /*
    172  * The pmap structure itself
    173  */
    174 struct pmap {
    175 	u_int8_t		pm_domain;
    176 	bool			pm_remove_all;
    177 	struct l1_ttable	*pm_l1;
    178 	union pmap_cache_state	pm_cstate;
    179 	struct uvm_object	pm_obj;
    180 #define	pm_lock pm_obj.vmobjlock
    181 	struct l2_dtable	*pm_l2[L2_SIZE];
    182 	struct pmap_statistics	pm_stats;
    183 	LIST_ENTRY(pmap)	pm_list;
    184 };
    185 
    186 typedef struct pmap *pmap_t;
    187 
    188 /*
    189  * Physical / virtual address structure. In a number of places (particularly
    190  * during bootstrapping) we need to keep track of the physical and virtual
    191  * addresses of various pages
    192  */
    193 typedef struct pv_addr {
    194 	SLIST_ENTRY(pv_addr) pv_list;
    195 	paddr_t pv_pa;
    196 	vaddr_t pv_va;
    197 	vsize_t pv_size;
    198 } pv_addr_t;
    199 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
    200 
    201 extern pv_addrqh_t pmap_freeq;
    202 extern pv_addr_t kernelpages;
    203 extern pv_addr_t systempage;
    204 extern pv_addr_t kernel_l1pt;
    205 
    206 /*
    207  * Determine various modes for PTEs (user vs. kernel, cacheable
    208  * vs. non-cacheable).
    209  */
    210 #define	PTE_KERNEL	0
    211 #define	PTE_USER	1
    212 #define	PTE_NOCACHE	0
    213 #define	PTE_CACHE	1
    214 #define	PTE_PAGETABLE	2
    215 
    216 /*
    217  * Flags that indicate attributes of pages or mappings of pages.
    218  *
    219  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
    220  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
    221  * pv_entry's for each page.  They live in the same "namespace" so
    222  * that we can clear multiple attributes at a time.
    223  *
    224  * Note the "non-cacheable" flag generally means the page has
    225  * multiple mappings in a given address space.
    226  */
    227 #define	PVF_MOD		0x01		/* page is modified */
    228 #define	PVF_REF		0x02		/* page is referenced */
    229 #define	PVF_WIRED	0x04		/* mapping is wired */
    230 #define	PVF_WRITE	0x08		/* mapping is writable */
    231 #define	PVF_EXEC	0x10		/* mapping is executable */
    232 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
    233 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
    234 #define	PVF_COLORED	0x80		/* page has or had a color */
    235 #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
    236 #define	PVF_NC		(PVF_UNC|PVF_KNC)
    237 
    238 /*
    239  * Commonly referenced structures
    240  */
    241 extern struct pmap	kernel_pmap_store;
    242 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
    243 
    244 /*
    245  * Macros that we need to export
    246  */
    247 #define pmap_kernel()			(&kernel_pmap_store)
    248 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    249 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    250 
    251 #define	pmap_remove(pmap,sva,eva)	pmap_do_remove((pmap),(sva),(eva),0)
    252 
    253 #define	pmap_is_modified(pg)	\
    254 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
    255 #define	pmap_is_referenced(pg)	\
    256 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
    257 #define	pmap_is_page_colored_p(pg)	\
    258 	(((pg)->mdpage.pvh_attrs & PVF_COLORED) != 0)
    259 
    260 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    261 
    262 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
    263 
    264 /*
    265  * Functions that we need to export
    266  */
    267 void	pmap_procwr(struct proc *, vaddr_t, int);
    268 void	pmap_remove_all(pmap_t);
    269 bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
    270 
    271 #define	PMAP_NEED_PROCWR
    272 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    273 
    274 #if ARM_MMU_V6 > 0
    275 #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
    276 void	pmap_prefer(vaddr_t, vaddr_t *, int);
    277 #endif
    278 
    279 /* Functions we use internally. */
    280 #ifdef PMAP_STEAL_MEMORY
    281 void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
    282 void	pmap_boot_pageadd(pv_addr_t *);
    283 vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    284 #endif
    285 void	pmap_bootstrap(vaddr_t, vaddr_t);
    286 
    287 void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
    288 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
    289 bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
    290 bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
    291 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
    292 
    293 void	pmap_debug(int);
    294 void	pmap_postinit(void);
    295 
    296 void	vector_page_setprot(int);
    297 
    298 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    299 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    300 
    301 /* Bootstrapping routines. */
    302 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
    303 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
    304 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
    305 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
    306 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    307 void	pmap_devmap_register(const struct pmap_devmap *);
    308 
    309 /*
    310  * Special page zero routine for use by the idle loop (no cache cleans).
    311  */
    312 bool	pmap_pageidlezero(paddr_t);
    313 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    314 
    315 /*
    316  * The current top of kernel VM
    317  */
    318 extern vaddr_t	pmap_curmaxkvaddr;
    319 
    320 /*
    321  * Useful macros and constants
    322  */
    323 
    324 /* Virtual address to page table entry */
    325 static inline pt_entry_t *
    326 vtopte(vaddr_t va)
    327 {
    328 	pd_entry_t *pdep;
    329 	pt_entry_t *ptep;
    330 
    331 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
    332 		return (NULL);
    333 	return (ptep);
    334 }
    335 
    336 /*
    337  * Virtual address to physical address
    338  */
    339 static inline paddr_t
    340 vtophys(vaddr_t va)
    341 {
    342 	paddr_t pa;
    343 
    344 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
    345 		return (0);	/* XXXSCW: Panic? */
    346 
    347 	return (pa);
    348 }
    349 
    350 /*
    351  * The new pmap ensures that page-tables are always mapping Write-Thru.
    352  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
    353  * on every change.
    354  *
    355  * Unfortunately, not all CPUs have a write-through cache mode.  So we
    356  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
    357  * and if there is the chance for PTE syncs to be needed, we define
    358  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
    359  * the code.
    360  */
    361 extern int pmap_needs_pte_sync;
    362 #if defined(_KERNEL_OPT)
    363 /*
    364  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
    365  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
    366  * this at compile time.
    367  */
    368 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
    369 #define	PMAP_NEEDS_PTE_SYNC	1
    370 #define	PMAP_INCLUDE_PTE_SYNC
    371 #elif (ARM_MMU_SA1 == 0)
    372 #define	PMAP_NEEDS_PTE_SYNC	0
    373 #endif
    374 #endif /* _KERNEL_OPT */
    375 
    376 /*
    377  * Provide a fallback in case we were not able to determine it at
    378  * compile-time.
    379  */
    380 #ifndef PMAP_NEEDS_PTE_SYNC
    381 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
    382 #define	PMAP_INCLUDE_PTE_SYNC
    383 #endif
    384 
    385 #define	PTE_SYNC(pte)							\
    386 do {									\
    387 	if (PMAP_NEEDS_PTE_SYNC)					\
    388 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
    389 } while (/*CONSTCOND*/0)
    390 
    391 #define	PTE_SYNC_RANGE(pte, cnt)					\
    392 do {									\
    393 	if (PMAP_NEEDS_PTE_SYNC) {					\
    394 		cpu_dcache_wb_range((vaddr_t)(pte),			\
    395 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
    396 	}								\
    397 } while (/*CONSTCOND*/0)
    398 
    399 #define	l1pte_valid(pde)	((pde) != 0)
    400 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
    401 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
    402 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
    403 
    404 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
    405 #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
    406 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
    407 #define l2pte_minidata(pte)	(((pte) & \
    408 				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
    409 				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
    410 
    411 /* L1 and L2 page table macros */
    412 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
    413 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
    414 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
    415 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
    416 
    417 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
    418 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
    419 
    420 /* Size of the kernel part of the L1 page table */
    421 #define KERNEL_PD_SIZE	\
    422 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
    423 
    424 /************************* ARM MMU configuration *****************************/
    425 
    426 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
    427 void	pmap_copy_page_generic(paddr_t, paddr_t);
    428 void	pmap_zero_page_generic(paddr_t);
    429 
    430 void	pmap_pte_init_generic(void);
    431 #if defined(CPU_ARM8)
    432 void	pmap_pte_init_arm8(void);
    433 #endif
    434 #if defined(CPU_ARM9)
    435 void	pmap_pte_init_arm9(void);
    436 #endif /* CPU_ARM9 */
    437 #if defined(CPU_ARM10)
    438 void	pmap_pte_init_arm10(void);
    439 #endif /* CPU_ARM10 */
    440 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
    441 
    442 #if ARM_MMU_SA1 == 1
    443 void	pmap_pte_init_sa1(void);
    444 #endif /* ARM_MMU_SA1 == 1 */
    445 
    446 #if ARM_MMU_XSCALE == 1
    447 void	pmap_copy_page_xscale(paddr_t, paddr_t);
    448 void	pmap_zero_page_xscale(paddr_t);
    449 
    450 void	pmap_pte_init_xscale(void);
    451 
    452 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
    453 
    454 #define	PMAP_UAREA(va)		pmap_uarea(va)
    455 void	pmap_uarea(vaddr_t);
    456 #endif /* ARM_MMU_XSCALE == 1 */
    457 
    458 extern pt_entry_t		pte_l1_s_cache_mode;
    459 extern pt_entry_t		pte_l1_s_cache_mask;
    460 
    461 extern pt_entry_t		pte_l2_l_cache_mode;
    462 extern pt_entry_t		pte_l2_l_cache_mask;
    463 
    464 extern pt_entry_t		pte_l2_s_cache_mode;
    465 extern pt_entry_t		pte_l2_s_cache_mask;
    466 
    467 extern pt_entry_t		pte_l1_s_cache_mode_pt;
    468 extern pt_entry_t		pte_l2_l_cache_mode_pt;
    469 extern pt_entry_t		pte_l2_s_cache_mode_pt;
    470 
    471 extern pt_entry_t		pte_l2_s_prot_u;
    472 extern pt_entry_t		pte_l2_s_prot_w;
    473 extern pt_entry_t		pte_l2_s_prot_mask;
    474 
    475 extern pt_entry_t		pte_l1_s_proto;
    476 extern pt_entry_t		pte_l1_c_proto;
    477 extern pt_entry_t		pte_l2_s_proto;
    478 
    479 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
    480 extern void (*pmap_zero_page_func)(paddr_t);
    481 
    482 #endif /* !_LOCORE */
    483 
    484 /*****************************************************************************/
    485 
    486 /*
    487  * tell MI code that the cache is virtually-indexed.
    488  * ARMv6 is physically-tagged but all others are virtually-tagged.
    489  */
    490 #if ARM_MMU_V6 > 0
    491 #define PMAP_CACHE_VIPT
    492 #else
    493 #define PMAP_CACHE_VIVT
    494 #endif
    495 
    496 /*
    497  * Definitions for MMU domains
    498  */
    499 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
    500 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
    501 
    502 /*
    503  * These macros define the various bit masks in the PTE.
    504  *
    505  * We use these macros since we use different bits on different processor
    506  * models.
    507  */
    508 #define	L1_S_PROT_U		(L1_S_AP(AP_U))
    509 #define	L1_S_PROT_W		(L1_S_AP(AP_W))
    510 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
    511 
    512 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
    513 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
    514 
    515 #define	L2_L_PROT_U		(L2_AP(AP_U))
    516 #define	L2_L_PROT_W		(L2_AP(AP_W))
    517 #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
    518 
    519 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
    520 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
    521 
    522 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
    523 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
    524 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
    525 
    526 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
    527 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
    528 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
    529 
    530 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
    531 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
    532 
    533 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
    534 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
    535 
    536 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
    537 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
    538 
    539 #define	L2_L_PROTO		(L2_TYPE_L)
    540 
    541 #define	L2_S_PROTO_generic	(L2_TYPE_S)
    542 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
    543 
    544 /*
    545  * User-visible names for the ones that vary with MMU class.
    546  */
    547 
    548 #if ARM_NMMUS > 1
    549 /* More than one MMU class configured; use variables. */
    550 #define	L2_S_PROT_U		pte_l2_s_prot_u
    551 #define	L2_S_PROT_W		pte_l2_s_prot_w
    552 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
    553 
    554 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
    555 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
    556 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
    557 
    558 #define	L1_S_PROTO		pte_l1_s_proto
    559 #define	L1_C_PROTO		pte_l1_c_proto
    560 #define	L2_S_PROTO		pte_l2_s_proto
    561 
    562 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
    563 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
    564 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
    565 #define	L2_S_PROT_U		L2_S_PROT_U_generic
    566 #define	L2_S_PROT_W		L2_S_PROT_W_generic
    567 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    568 
    569 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    570 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    571 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    572 
    573 #define	L1_S_PROTO		L1_S_PROTO_generic
    574 #define	L1_C_PROTO		L1_C_PROTO_generic
    575 #define	L2_S_PROTO		L2_S_PROTO_generic
    576 
    577 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    578 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    579 #elif ARM_MMU_XSCALE == 1
    580 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
    581 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
    582 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
    583 
    584 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
    585 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
    586 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
    587 
    588 #define	L1_S_PROTO		L1_S_PROTO_xscale
    589 #define	L1_C_PROTO		L1_C_PROTO_xscale
    590 #define	L2_S_PROTO		L2_S_PROTO_xscale
    591 
    592 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
    593 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
    594 #endif /* ARM_NMMUS > 1 */
    595 
    596 /*
    597  * These macros return various bits based on kernel/user and protection.
    598  * Note that the compiler will usually fold these at compile time.
    599  */
    600 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
    601 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
    602 
    603 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
    604 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
    605 
    606 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
    607 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
    608 
    609 /*
    610  * Macros to test if a mapping is mappable with an L1 Section mapping
    611  * or an L2 Large Page mapping.
    612  */
    613 #define	L1_S_MAPPABLE_P(va, pa, size)					\
    614 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
    615 
    616 #define	L2_L_MAPPABLE_P(va, pa, size)					\
    617 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
    618 
    619 /*
    620  * Hooks for the pool allocator.
    621  */
    622 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    623 
    624 #endif /* _KERNEL */
    625 
    626 #endif	/* _ARM32_PMAP_H_ */
    627