pmap.h revision 1.154.2.2       1  1.154.2.2  pgoyette /*	$NetBSD: pmap.h,v 1.154.2.2 2018/10/20 06:58:25 pgoyette Exp $	*/
      2       1.46   thorpej 
      3       1.46   thorpej /*
      4       1.65       scw  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
      5       1.46   thorpej  * All rights reserved.
      6       1.46   thorpej  *
      7       1.65       scw  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
      8       1.46   thorpej  *
      9       1.46   thorpej  * Redistribution and use in source and binary forms, with or without
     10       1.46   thorpej  * modification, are permitted provided that the following conditions
     11       1.46   thorpej  * are met:
     12       1.46   thorpej  * 1. Redistributions of source code must retain the above copyright
     13       1.46   thorpej  *    notice, this list of conditions and the following disclaimer.
     14       1.46   thorpej  * 2. Redistributions in binary form must reproduce the above copyright
     15       1.46   thorpej  *    notice, this list of conditions and the following disclaimer in the
     16       1.46   thorpej  *    documentation and/or other materials provided with the distribution.
     17       1.46   thorpej  * 3. All advertising materials mentioning features or use of this software
     18       1.46   thorpej  *    must display the following acknowledgement:
     19       1.46   thorpej  *	This product includes software developed for the NetBSD Project by
     20       1.46   thorpej  *	Wasabi Systems, Inc.
     21       1.46   thorpej  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22       1.46   thorpej  *    or promote products derived from this software without specific prior
     23       1.46   thorpej  *    written permission.
     24       1.46   thorpej  *
     25       1.46   thorpej  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26       1.46   thorpej  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27       1.46   thorpej  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28       1.46   thorpej  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29       1.46   thorpej  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30       1.46   thorpej  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31       1.46   thorpej  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32       1.46   thorpej  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33       1.46   thorpej  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34       1.46   thorpej  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35       1.46   thorpej  * POSSIBILITY OF SUCH DAMAGE.
     36       1.46   thorpej  */
     37        1.1   reinoud 
     38        1.1   reinoud /*
     39        1.1   reinoud  * Copyright (c) 1994,1995 Mark Brinicombe.
     40        1.1   reinoud  * All rights reserved.
     41        1.1   reinoud  *
     42        1.1   reinoud  * Redistribution and use in source and binary forms, with or without
     43        1.1   reinoud  * modification, are permitted provided that the following conditions
     44        1.1   reinoud  * are met:
     45        1.1   reinoud  * 1. Redistributions of source code must retain the above copyright
     46        1.1   reinoud  *    notice, this list of conditions and the following disclaimer.
     47        1.1   reinoud  * 2. Redistributions in binary form must reproduce the above copyright
     48        1.1   reinoud  *    notice, this list of conditions and the following disclaimer in the
     49        1.1   reinoud  *    documentation and/or other materials provided with the distribution.
     50        1.1   reinoud  * 3. All advertising materials mentioning features or use of this software
     51        1.1   reinoud  *    must display the following acknowledgement:
     52        1.1   reinoud  *	This product includes software developed by Mark Brinicombe
     53        1.1   reinoud  * 4. The name of the author may not be used to endorse or promote products
     54        1.1   reinoud  *    derived from this software without specific prior written permission.
     55        1.1   reinoud  *
     56        1.1   reinoud  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57        1.1   reinoud  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58        1.1   reinoud  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59        1.1   reinoud  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60        1.1   reinoud  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61        1.1   reinoud  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62        1.1   reinoud  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63        1.1   reinoud  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64        1.1   reinoud  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65        1.1   reinoud  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66        1.1   reinoud  */
     67        1.1   reinoud 
     68        1.1   reinoud #ifndef	_ARM32_PMAP_H_
     69        1.1   reinoud #define	_ARM32_PMAP_H_
     70        1.1   reinoud 
     71       1.18   thorpej #ifdef _KERNEL
     72       1.18   thorpej 
     73       1.52   thorpej #include <arm/cpuconf.h>
     74       1.75       bsh #include <arm/arm32/pte.h>
     75       1.75       bsh #ifndef _LOCORE
     76       1.85      matt #if defined(_KERNEL_OPT)
     77       1.85      matt #include "opt_arm32_pmap.h"
     78      1.136     skrll #include "opt_multiprocessor.h"
     79       1.85      matt #endif
     80       1.19   thorpej #include <arm/cpufunc.h>
     81      1.138     joerg #include <arm/locore.h>
     82       1.12     chris #include <uvm/uvm_object.h>
     83      1.143     skrll #include <uvm/pmap/pmap_pvt.h>
     84       1.75       bsh #endif
     85        1.1   reinoud 
     86      1.124      matt #ifdef ARM_MMU_EXTENDED
     87      1.147     skrll #define PMAP_HWPAGEWALKER		1
     88      1.124      matt #define PMAP_TLB_MAX			1
     89      1.126      matt #if PMAP_TLB_MAX > 1
     90      1.144     skrll #define PMAP_TLB_NEED_SHOOTDOWN		1
     91      1.126      matt #endif
     92      1.126      matt #define PMAP_TLB_FLUSH_ASID_ON_RESET	(arm_has_tlbiasid_p)
     93      1.124      matt #define PMAP_TLB_NUM_PIDS		256
     94      1.124      matt #define cpu_set_tlb_info(ci, ti)        ((void)((ci)->ci_tlb_info = (ti)))
     95      1.124      matt #if PMAP_TLB_MAX > 1
     96      1.124      matt #define cpu_tlb_info(ci)		((ci)->ci_tlb_info)
     97      1.124      matt #else
     98      1.124      matt #define cpu_tlb_info(ci)		(&pmap_tlb0_info)
     99      1.124      matt #endif
    100      1.124      matt #define pmap_md_tlb_asid_max()		(PMAP_TLB_NUM_PIDS - 1)
    101      1.124      matt #include <uvm/pmap/tlb.h>
    102      1.124      matt #include <uvm/pmap/pmap_tlb.h>
    103      1.124      matt 
    104      1.135     skrll /*
    105      1.124      matt  * If we have an EXTENDED MMU and the address space is split evenly between
    106      1.124      matt  * user and kernel, we can use the TTBR0/TTBR1 to have separate L1 tables for
    107      1.124      matt  * user and kernel address spaces.
    108      1.135     skrll  */
    109      1.128      matt #if (KERNEL_BASE & 0x80000000) == 0
    110      1.128      matt #error ARMv6 or later systems must have a KERNEL_BASE >= 0x80000000
    111      1.135     skrll #endif
    112      1.124      matt #endif  /* ARM_MMU_EXTENDED */
    113      1.124      matt 
    114        1.1   reinoud /*
    115       1.11     chris  * a pmap describes a processes' 4GB virtual address space.  this
    116       1.11     chris  * virtual address space can be broken up into 4096 1MB regions which
    117       1.38   thorpej  * are described by L1 PTEs in the L1 table.
    118       1.11     chris  *
    119       1.38   thorpej  * There is a line drawn at KERNEL_BASE.  Everything below that line
    120       1.38   thorpej  * changes when the VM context is switched.  Everything above that line
    121       1.38   thorpej  * is the same no matter which VM context is running.  This is achieved
    122       1.38   thorpej  * by making the L1 PTEs for those slots above KERNEL_BASE reference
    123       1.38   thorpej  * kernel L2 tables.
    124       1.11     chris  *
    125       1.38   thorpej  * The basic layout of the virtual address space thus looks like this:
    126       1.38   thorpej  *
    127       1.38   thorpej  *	0xffffffff
    128       1.38   thorpej  *	.
    129       1.38   thorpej  *	.
    130       1.38   thorpej  *	.
    131       1.38   thorpej  *	KERNEL_BASE
    132       1.38   thorpej  *	--------------------
    133       1.38   thorpej  *	.
    134       1.38   thorpej  *	.
    135       1.38   thorpej  *	.
    136       1.38   thorpej  *	0x00000000
    137       1.11     chris  */
    138       1.11     chris 
    139       1.65       scw /*
    140       1.65       scw  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
    141       1.65       scw  * A bucket size of 16 provides for 16MB of contiguous virtual address
    142       1.65       scw  * space per l2_dtable. Most processes will, therefore, require only two or
    143       1.65       scw  * three of these to map their whole working set.
    144       1.65       scw  */
    145      1.124      matt #define	L2_BUCKET_XLOG2	(L1_S_SHIFT)
    146      1.124      matt #define L2_BUCKET_XSIZE	(1 << L2_BUCKET_XLOG2)
    147       1.65       scw #define	L2_BUCKET_LOG2	4
    148       1.65       scw #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
    149       1.65       scw 
    150       1.65       scw /*
    151       1.65       scw  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
    152       1.65       scw  * of l2_dtable structures required to track all possible page descriptors
    153       1.65       scw  * mappable by an L1 translation table is given by the following constants:
    154       1.65       scw  */
    155      1.124      matt #define	L2_LOG2		(32 - (L2_BUCKET_XLOG2 + L2_BUCKET_LOG2))
    156       1.65       scw #define	L2_SIZE		(1 << L2_LOG2)
    157       1.65       scw 
    158       1.90      matt /*
    159       1.90      matt  * tell MI code that the cache is virtually-indexed.
    160       1.90      matt  * ARMv6 is physically-tagged but all others are virtually-tagged.
    161       1.90      matt  */
    162       1.95  jmcneill #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    163       1.90      matt #define PMAP_CACHE_VIPT
    164       1.90      matt #else
    165       1.90      matt #define PMAP_CACHE_VIVT
    166       1.90      matt #endif
    167       1.90      matt 
    168       1.75       bsh #ifndef _LOCORE
    169       1.75       bsh 
    170      1.146     skrll #ifndef ARM_MMU_EXTENDED
    171       1.65       scw struct l1_ttable;
    172       1.65       scw struct l2_dtable;
    173       1.65       scw 
    174       1.65       scw /*
    175       1.65       scw  * Track cache/tlb occupancy using the following structure
    176       1.65       scw  */
    177       1.65       scw union pmap_cache_state {
    178       1.65       scw 	struct {
    179       1.65       scw 		union {
    180      1.115     skrll 			uint8_t csu_cache_b[2];
    181      1.115     skrll 			uint16_t csu_cache;
    182       1.65       scw 		} cs_cache_u;
    183       1.65       scw 
    184       1.65       scw 		union {
    185      1.115     skrll 			uint8_t csu_tlb_b[2];
    186      1.115     skrll 			uint16_t csu_tlb;
    187       1.65       scw 		} cs_tlb_u;
    188       1.65       scw 	} cs_s;
    189      1.115     skrll 	uint32_t cs_all;
    190       1.65       scw };
    191       1.65       scw #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
    192       1.65       scw #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
    193       1.65       scw #define	cs_cache	cs_s.cs_cache_u.csu_cache
    194       1.65       scw #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
    195       1.65       scw #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
    196       1.65       scw #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
    197       1.65       scw 
    198       1.65       scw /*
    199       1.65       scw  * Assigned to cs_all to force cacheops to work for a particular pmap
    200       1.65       scw  */
    201       1.65       scw #define	PMAP_CACHE_STATE_ALL	0xffffffffu
    202      1.124      matt #endif /* !ARM_MMU_EXTENDED */
    203       1.65       scw 
    204       1.65       scw /*
    205       1.73   thorpej  * This structure is used by machine-dependent code to describe
    206       1.73   thorpej  * static mappings of devices, created at bootstrap time.
    207       1.73   thorpej  */
    208       1.73   thorpej struct pmap_devmap {
    209       1.73   thorpej 	vaddr_t		pd_va;		/* virtual address */
    210       1.73   thorpej 	paddr_t		pd_pa;		/* physical address */
    211       1.73   thorpej 	psize_t		pd_size;	/* size of region */
    212       1.73   thorpej 	vm_prot_t	pd_prot;	/* protection code */
    213       1.73   thorpej 	int		pd_cache;	/* cache attributes */
    214       1.73   thorpej };
    215       1.73   thorpej 
    216      1.153     skrll #define	DEVMAP_ALIGN(a)	((a) & ~L1_S_OFFSET)
    217      1.153     skrll #define	DEVMAP_SIZE(s)	roundup2((s), L1_S_SIZE)
    218      1.153     skrll #define	DEVMAP_ENTRY(va, pa, sz)			\
    219      1.153     skrll 	{						\
    220      1.153     skrll 		.pd_va = DEVMAP_ALIGN(va),		\
    221      1.153     skrll 		.pd_pa = DEVMAP_ALIGN(pa),		\
    222      1.153     skrll 		.pd_size = DEVMAP_SIZE(sz),		\
    223      1.153     skrll 		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,	\
    224      1.153     skrll 		.pd_cache = PTE_NOCACHE			\
    225      1.153     skrll 	}
    226      1.153     skrll #define	DEVMAP_ENTRY_END	{ 0 }
    227      1.153     skrll 
    228       1.73   thorpej /*
    229       1.65       scw  * The pmap structure itself
    230       1.65       scw  */
    231       1.65       scw struct pmap {
    232      1.124      matt 	struct uvm_object	pm_obj;
    233      1.124      matt 	kmutex_t		pm_obj_lock;
    234      1.124      matt #define	pm_lock pm_obj.vmobjlock
    235      1.120      matt #ifndef ARM_HAS_VBAR
    236       1.82       scw 	pd_entry_t		*pm_pl1vec;
    237      1.124      matt 	pd_entry_t		pm_l1vec;
    238      1.120      matt #endif
    239       1.65       scw 	struct l2_dtable	*pm_l2[L2_SIZE];
    240       1.65       scw 	struct pmap_statistics	pm_stats;
    241       1.65       scw 	LIST_ENTRY(pmap)	pm_list;
    242      1.124      matt #ifdef ARM_MMU_EXTENDED
    243      1.124      matt 	pd_entry_t		*pm_l1;
    244      1.124      matt 	paddr_t			pm_l1_pa;
    245      1.124      matt 	bool			pm_remove_all;
    246      1.124      matt #ifdef MULTIPROCESSOR
    247      1.124      matt 	kcpuset_t		*pm_onproc;
    248      1.124      matt 	kcpuset_t		*pm_active;
    249      1.126      matt #if PMAP_TLB_MAX > 1
    250      1.126      matt 	u_int			pm_shootdown_pending;
    251      1.126      matt #endif
    252      1.124      matt #endif
    253      1.126      matt 	struct pmap_asid_info	pm_pai[PMAP_TLB_MAX];
    254      1.124      matt #else
    255      1.124      matt 	struct l1_ttable	*pm_l1;
    256      1.124      matt 	union pmap_cache_state	pm_cstate;
    257      1.124      matt 	uint8_t			pm_domain;
    258      1.124      matt 	bool			pm_activated;
    259      1.124      matt 	bool			pm_remove_all;
    260      1.124      matt #endif
    261      1.124      matt };
    262      1.124      matt 
    263      1.124      matt struct pmap_kernel {
    264      1.124      matt 	struct pmap		kernel_pmap;
    265       1.65       scw };
    266       1.65       scw 
    267      1.106    martin /*
    268      1.106    martin  * Physical / virtual address structure. In a number of places (particularly
    269      1.106    martin  * during bootstrapping) we need to keep track of the physical and virtual
    270      1.106    martin  * addresses of various pages
    271      1.106    martin  */
    272      1.106    martin typedef struct pv_addr {
    273      1.106    martin 	SLIST_ENTRY(pv_addr) pv_list;
    274      1.106    martin 	paddr_t pv_pa;
    275      1.106    martin 	vaddr_t pv_va;
    276      1.106    martin 	vsize_t pv_size;
    277      1.106    martin 	uint8_t pv_cache;
    278      1.106    martin 	uint8_t pv_prot;
    279      1.106    martin } pv_addr_t;
    280      1.106    martin typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
    281      1.106    martin 
    282       1.85      matt extern pv_addrqh_t pmap_freeq;
    283      1.102      matt extern pv_addr_t kernelstack;
    284      1.102      matt extern pv_addr_t abtstack;
    285      1.102      matt extern pv_addr_t fiqstack;
    286      1.102      matt extern pv_addr_t irqstack;
    287      1.102      matt extern pv_addr_t undstack;
    288      1.103      matt extern pv_addr_t idlestack;
    289       1.85      matt extern pv_addr_t systempage;
    290       1.85      matt extern pv_addr_t kernel_l1pt;
    291        1.1   reinoud 
    292      1.126      matt #ifdef ARM_MMU_EXTENDED
    293      1.126      matt extern bool arm_has_tlbiasid_p;	/* also in <arm/locore.h> */
    294      1.126      matt #endif
    295      1.126      matt 
    296        1.1   reinoud /*
    297       1.24   thorpej  * Determine various modes for PTEs (user vs. kernel, cacheable
    298       1.24   thorpej  * vs. non-cacheable).
    299       1.24   thorpej  */
    300       1.24   thorpej #define	PTE_KERNEL	0
    301       1.24   thorpej #define	PTE_USER	1
    302       1.24   thorpej #define	PTE_NOCACHE	0
    303       1.24   thorpej #define	PTE_CACHE	1
    304       1.65       scw #define	PTE_PAGETABLE	2
    305       1.24   thorpej 
    306       1.24   thorpej /*
    307       1.43   thorpej  * Flags that indicate attributes of pages or mappings of pages.
    308       1.43   thorpej  *
    309       1.43   thorpej  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
    310       1.43   thorpej  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
    311       1.43   thorpej  * pv_entry's for each page.  They live in the same "namespace" so
    312       1.43   thorpej  * that we can clear multiple attributes at a time.
    313       1.43   thorpej  *
    314       1.43   thorpej  * Note the "non-cacheable" flag generally means the page has
    315       1.43   thorpej  * multiple mappings in a given address space.
    316       1.43   thorpej  */
    317       1.43   thorpej #define	PVF_MOD		0x01		/* page is modified */
    318       1.43   thorpej #define	PVF_REF		0x02		/* page is referenced */
    319       1.43   thorpej #define	PVF_WIRED	0x04		/* mapping is wired */
    320       1.43   thorpej #define	PVF_WRITE	0x08		/* mapping is writable */
    321       1.56   thorpej #define	PVF_EXEC	0x10		/* mapping is executable */
    322       1.90      matt #ifdef PMAP_CACHE_VIVT
    323       1.65       scw #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
    324       1.65       scw #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
    325       1.90      matt #define	PVF_NC		(PVF_UNC|PVF_KNC)
    326       1.90      matt #endif
    327       1.90      matt #ifdef PMAP_CACHE_VIPT
    328       1.90      matt #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
    329       1.90      matt #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
    330       1.90      matt #endif
    331       1.85      matt #define	PVF_COLORED	0x80		/* page has or had a color */
    332       1.85      matt #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
    333       1.86      matt #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
    334       1.87      matt #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
    335       1.88      matt #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
    336       1.88      matt #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
    337       1.88      matt #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
    338       1.43   thorpej 
    339       1.43   thorpej /*
    340        1.1   reinoud  * Commonly referenced structures
    341        1.1   reinoud  */
    342        1.4      matt extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
    343      1.113      matt extern int		arm_poolpage_vmfreelist;
    344        1.1   reinoud 
    345        1.1   reinoud /*
    346        1.1   reinoud  * Macros that we need to export
    347        1.1   reinoud  */
    348        1.1   reinoud #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    349        1.1   reinoud #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    350       1.31   thorpej 
    351       1.43   thorpej #define	pmap_is_modified(pg)	\
    352       1.43   thorpej 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
    353       1.43   thorpej #define	pmap_is_referenced(pg)	\
    354       1.43   thorpej 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
    355       1.96  uebayasi #define	pmap_is_page_colored_p(md)	\
    356       1.96  uebayasi 	(((md)->pvh_attrs & PVF_COLORED) != 0)
    357       1.41   thorpej 
    358       1.41   thorpej #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
    359       1.60       chs 
    360       1.35   thorpej #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
    361       1.98  macallan u_int arm32_mmap_flags(paddr_t);
    362      1.137     skrll #define ARM32_MMAP_WRITECOMBINE		0x40000000
    363       1.98  macallan #define ARM32_MMAP_CACHEABLE		0x20000000
    364  1.154.2.1  pgoyette #define ARM_MMAP_WRITECOMBINE		ARM32_MMAP_WRITECOMBINE
    365  1.154.2.1  pgoyette #define ARM_MMAP_CACHEABLE		ARM32_MMAP_CACHEABLE
    366      1.137     skrll #define pmap_mmap_flags(ppn)		arm32_mmap_flags(ppn)
    367        1.1   reinoud 
    368      1.123      matt #define	PMAP_PTE			0x10000000 /* kenter_pa */
    369      1.123      matt 
    370        1.1   reinoud /*
    371        1.1   reinoud  * Functions that we need to export
    372        1.1   reinoud  */
    373       1.39   thorpej void	pmap_procwr(struct proc *, vaddr_t, int);
    374       1.65       scw void	pmap_remove_all(pmap_t);
    375       1.80   thorpej bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
    376       1.39   thorpej 
    377        1.1   reinoud #define	PMAP_NEED_PROCWR
    378       1.29     chris #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
    379       1.92   thorpej #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
    380        1.4      matt 
    381       1.95  jmcneill #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
    382       1.85      matt #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
    383       1.85      matt void	pmap_prefer(vaddr_t, vaddr_t *, int);
    384       1.85      matt #endif
    385       1.85      matt 
    386       1.85      matt void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    387       1.85      matt 
    388       1.39   thorpej /* Functions we use internally. */
    389       1.85      matt #ifdef PMAP_STEAL_MEMORY
    390       1.85      matt void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
    391       1.85      matt void	pmap_boot_pageadd(pv_addr_t *);
    392       1.85      matt vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    393       1.85      matt #endif
    394       1.85      matt void	pmap_bootstrap(vaddr_t, vaddr_t);
    395       1.65       scw 
    396       1.78       scw void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
    397       1.70       scw int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
    398      1.124      matt int	pmap_prefetchabt_fixup(void *);
    399       1.80   thorpej bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
    400       1.80   thorpej bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
    401  1.154.2.1  pgoyette bool	pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
    402       1.65       scw 
    403       1.65       scw void	pmap_debug(int);
    404       1.39   thorpej void	pmap_postinit(void);
    405       1.42   thorpej 
    406       1.42   thorpej void	vector_page_setprot(int);
    407       1.24   thorpej 
    408       1.73   thorpej const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    409       1.73   thorpej const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    410       1.73   thorpej 
    411       1.24   thorpej /* Bootstrapping routines. */
    412       1.24   thorpej void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
    413       1.25   thorpej void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
    414       1.28   thorpej vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
    415  1.154.2.2  pgoyette void	pmap_unmap_chunk(vaddr_t, vaddr_t, vsize_t);
    416       1.28   thorpej void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
    417       1.73   thorpej void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    418       1.74   thorpej void	pmap_devmap_register(const struct pmap_devmap *);
    419       1.13     chris 
    420       1.13     chris /*
    421      1.135     skrll  * Special page zero routine for use by the idle loop (no cache cleans).
    422       1.13     chris  */
    423       1.80   thorpej bool	pmap_pageidlezero(paddr_t);
    424       1.13     chris #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
    425        1.1   reinoud 
    426      1.131      matt #ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
    427      1.131      matt /*
    428      1.131      matt  * For the pmap, this is a more useful way to map a direct mapped page.
    429      1.131      matt  * It returns either the direct-mapped VA or the VA supplied if it can't
    430      1.131      matt  * be direct mapped.
    431      1.131      matt  */
    432      1.131      matt vaddr_t	pmap_direct_mapped_phys(paddr_t, bool *, vaddr_t);
    433      1.131      matt #endif
    434      1.131      matt 
    435       1.29     chris /*
    436       1.84     chris  * used by dumpsys to record the PA of the L1 table
    437       1.84     chris  */
    438       1.84     chris uint32_t pmap_kernel_L1_addr(void);
    439       1.84     chris /*
    440       1.29     chris  * The current top of kernel VM
    441       1.29     chris  */
    442       1.29     chris extern vaddr_t	pmap_curmaxkvaddr;
    443        1.1   reinoud 
    444      1.131      matt #if defined(ARM_MMU_EXTENDED) && defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
    445      1.131      matt /*
    446      1.141      matt  * Ending VA of direct mapped memory (usually KERNEL_VM_BASE).
    447      1.131      matt  */
    448      1.140      matt extern vaddr_t pmap_directlimit;
    449      1.131      matt #endif
    450      1.131      matt 
    451        1.1   reinoud /*
    452      1.135     skrll  * Useful macros and constants
    453        1.1   reinoud  */
    454       1.59   thorpej 
    455       1.65       scw /* Virtual address to page table entry */
    456       1.79     perry static inline pt_entry_t *
    457       1.65       scw vtopte(vaddr_t va)
    458       1.65       scw {
    459       1.65       scw 	pd_entry_t *pdep;
    460       1.65       scw 	pt_entry_t *ptep;
    461       1.65       scw 
    462      1.124      matt 	KASSERT(trunc_page(va) == va);
    463      1.124      matt 
    464       1.81   thorpej 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
    465       1.65       scw 		return (NULL);
    466       1.65       scw 	return (ptep);
    467       1.65       scw }
    468       1.65       scw 
    469       1.65       scw /*
    470       1.65       scw  * Virtual address to physical address
    471       1.65       scw  */
    472       1.79     perry static inline paddr_t
    473       1.65       scw vtophys(vaddr_t va)
    474       1.65       scw {
    475       1.65       scw 	paddr_t pa;
    476       1.65       scw 
    477       1.81   thorpej 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
    478       1.65       scw 		return (0);	/* XXXSCW: Panic? */
    479       1.65       scw 
    480       1.65       scw 	return (pa);
    481       1.65       scw }
    482       1.65       scw 
    483       1.65       scw /*
    484       1.65       scw  * The new pmap ensures that page-tables are always mapping Write-Thru.
    485       1.65       scw  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
    486       1.65       scw  * on every change.
    487       1.65       scw  *
    488       1.69   thorpej  * Unfortunately, not all CPUs have a write-through cache mode.  So we
    489       1.69   thorpej  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
    490       1.69   thorpej  * and if there is the chance for PTE syncs to be needed, we define
    491       1.69   thorpej  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
    492       1.69   thorpej  * the code.
    493       1.69   thorpej  */
    494       1.69   thorpej extern int pmap_needs_pte_sync;
    495       1.69   thorpej #if defined(_KERNEL_OPT)
    496       1.69   thorpej /*
    497      1.145     skrll  * Perform compile time evaluation of PMAP_NEEDS_PTE_SYNC when only a
    498      1.145     skrll  * single MMU type is selected.
    499      1.145     skrll  *
    500       1.69   thorpej  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
    501      1.145     skrll  * we need to do PTE syncs. Additionally, V6 MMUs also need PTE syncs.
    502      1.145     skrll  * Finally, MEMC, GENERIC and XSCALE MMUs do not need PTE syncs.
    503      1.145     skrll  *
    504      1.145     skrll  * Use run time evaluation for all other cases.
    505      1.148     skrll  *
    506       1.69   thorpej  */
    507      1.145     skrll #if (ARM_NMMUS == 1)
    508      1.145     skrll #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0)
    509      1.104      matt #define	PMAP_INCLUDE_PTE_SYNC
    510      1.109      matt #define	PMAP_NEEDS_PTE_SYNC	1
    511      1.145     skrll #elif (ARM_MMU_MEMC + ARM_MMU_GENERIC + ARM_MMU_XSCALE != 0)
    512       1.69   thorpej #define	PMAP_NEEDS_PTE_SYNC	0
    513       1.69   thorpej #endif
    514      1.112      matt #endif
    515       1.69   thorpej #endif /* _KERNEL_OPT */
    516       1.69   thorpej 
    517       1.69   thorpej /*
    518       1.69   thorpej  * Provide a fallback in case we were not able to determine it at
    519       1.69   thorpej  * compile-time.
    520       1.65       scw  */
    521       1.69   thorpej #ifndef PMAP_NEEDS_PTE_SYNC
    522       1.69   thorpej #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
    523       1.69   thorpej #define	PMAP_INCLUDE_PTE_SYNC
    524       1.69   thorpej #endif
    525       1.65       scw 
    526      1.104      matt static inline void
    527      1.104      matt pmap_ptesync(pt_entry_t *ptep, size_t cnt)
    528      1.104      matt {
    529      1.132      matt 	if (PMAP_NEEDS_PTE_SYNC) {
    530      1.104      matt 		cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
    531      1.132      matt #ifdef SHEEVA_L2_CACHE
    532      1.132      matt 		cpu_sdcache_wb_range((vaddr_t)ptep, -1,
    533      1.132      matt 		    cnt * sizeof(pt_entry_t));
    534      1.132      matt #endif
    535      1.132      matt 	}
    536      1.138     joerg 	arm_dsb();
    537      1.104      matt }
    538       1.69   thorpej 
    539      1.124      matt #define	PDE_SYNC(pdep)			pmap_ptesync((pdep), 1)
    540      1.124      matt #define	PDE_SYNC_RANGE(pdep, cnt)	pmap_ptesync((pdep), (cnt))
    541      1.124      matt #define	PTE_SYNC(ptep)			pmap_ptesync((ptep), PAGE_SIZE / L2_S_SIZE)
    542      1.104      matt #define	PTE_SYNC_RANGE(ptep, cnt)	pmap_ptesync((ptep), (cnt))
    543       1.65       scw 
    544      1.124      matt #define l1pte_valid_p(pde)	((pde) != 0)
    545      1.124      matt #define l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
    546      1.124      matt #define l1pte_supersection_p(pde) (l1pte_section_p(pde)	\
    547      1.104      matt 				&& ((pde) & L1_S_V6_SUPER) != 0)
    548      1.124      matt #define l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
    549      1.124      matt #define l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
    550      1.124      matt #define l1pte_pa(pde)		((pde) & L1_C_ADDR_MASK)
    551      1.124      matt #define l1pte_index(v)		((vaddr_t)(v) >> L1_S_SHIFT)
    552      1.124      matt #define l1pte_pgindex(v)	l1pte_index((v) & L1_ADDR_BITS \
    553      1.124      matt 		& ~(PAGE_SIZE * PAGE_SIZE / sizeof(pt_entry_t) - 1))
    554      1.124      matt 
    555      1.124      matt static inline void
    556      1.124      matt l1pte_setone(pt_entry_t *pdep, pt_entry_t pde)
    557      1.124      matt {
    558      1.124      matt 	*pdep = pde;
    559      1.124      matt }
    560       1.36   thorpej 
    561      1.124      matt static inline void
    562      1.124      matt l1pte_set(pt_entry_t *pdep, pt_entry_t pde)
    563      1.124      matt {
    564      1.124      matt 	*pdep = pde;
    565      1.124      matt 	if (l1pte_page_p(pde)) {
    566      1.124      matt 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (PAGE_SIZE / L2_T_SIZE - 1)) == 0, "%p", pdep);
    567      1.124      matt 		for (size_t k = 1; k < PAGE_SIZE / L2_T_SIZE; k++) {
    568      1.124      matt 			pde += L2_T_SIZE;
    569      1.124      matt 			pdep[k] = pde;
    570      1.124      matt 		}
    571      1.124      matt 	} else if (l1pte_supersection_p(pde)) {
    572      1.124      matt 		KASSERTMSG((((uintptr_t)pdep / sizeof(pde)) & (L1_SS_SIZE / L1_S_SIZE - 1)) == 0, "%p", pdep);
    573      1.124      matt 		for (size_t k = 1; k < L1_SS_SIZE / L1_S_SIZE; k++) {
    574      1.124      matt 			pdep[k] = pde;
    575      1.124      matt 		}
    576      1.124      matt 	}
    577      1.124      matt }
    578      1.124      matt 
    579      1.124      matt #define l2pte_index(v)		((((v) & L2_ADDR_BITS) >> PGSHIFT) << (PGSHIFT-L2_S_SHIFT))
    580      1.124      matt #define l2pte_valid_p(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
    581      1.124      matt #define l2pte_pa(pte)		((pte) & L2_S_FRAME)
    582      1.124      matt #define l1pte_lpage_p(pte)	(((pte) & L2_TYPE_MASK) == L2_TYPE_L)
    583      1.124      matt #define l2pte_minidata_p(pte)	(((pte) & \
    584       1.85      matt 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
    585       1.85      matt 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
    586       1.35   thorpej 
    587      1.121      matt static inline void
    588      1.121      matt l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
    589      1.121      matt {
    590      1.129     skrll 	if (l1pte_lpage_p(pte)) {
    591      1.139     skrll 		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (L2_L_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    592      1.129     skrll 		for (size_t k = 0; k < L2_L_SIZE / L2_S_SIZE; k++) {
    593      1.129     skrll 			*ptep++ = pte;
    594      1.129     skrll 		}
    595      1.129     skrll 	} else {
    596      1.139     skrll 		KASSERTMSG((((uintptr_t)ptep / sizeof(pte)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    597      1.129     skrll 		for (size_t k = 0; k < PAGE_SIZE / L2_S_SIZE; k++) {
    598      1.129     skrll 			KASSERTMSG(*ptep == opte, "%#x [*%p] != %#x", *ptep, ptep, opte);
    599      1.129     skrll 			*ptep++ = pte;
    600      1.129     skrll 			pte += L2_S_SIZE;
    601      1.129     skrll 			if (opte)
    602      1.129     skrll 				opte += L2_S_SIZE;
    603      1.129     skrll 		}
    604      1.121      matt 	}
    605      1.129     skrll }
    606      1.121      matt 
    607      1.121      matt static inline void
    608      1.121      matt l2pte_reset(pt_entry_t *ptep)
    609      1.121      matt {
    610      1.139     skrll 	KASSERTMSG((((uintptr_t)ptep / sizeof(*ptep)) & (PAGE_SIZE / L2_S_SIZE - 1)) == 0, "%p", ptep);
    611      1.121      matt 	*ptep = 0;
    612      1.121      matt 	for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
    613      1.121      matt 		ptep[k] = 0;
    614      1.121      matt 	}
    615      1.135     skrll }
    616      1.121      matt 
    617        1.1   reinoud /* L1 and L2 page table macros */
    618       1.36   thorpej #define pmap_pde_v(pde)		l1pte_valid(*(pde))
    619       1.36   thorpej #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
    620      1.107      matt #define pmap_pde_supersection(pde)	l1pte_supersection_p(*(pde))
    621       1.36   thorpej #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
    622       1.36   thorpej #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
    623       1.16  rearnsha 
    624      1.124      matt #define	pmap_pte_v(pte)		l2pte_valid_p(*(pte))
    625       1.36   thorpej #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
    626       1.35   thorpej 
    627        1.1   reinoud /* Size of the kernel part of the L1 page table */
    628        1.1   reinoud #define KERNEL_PD_SIZE	\
    629       1.44   thorpej 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
    630       1.20       chs 
    631      1.117      matt void	bzero_page(vaddr_t);
    632      1.117      matt void	bcopy_page(vaddr_t, vaddr_t);
    633       1.46   thorpej 
    634      1.116      matt #ifdef FPU_VFP
    635      1.117      matt void	bzero_page_vfp(vaddr_t);
    636      1.117      matt void	bcopy_page_vfp(vaddr_t, vaddr_t);
    637      1.116      matt #endif
    638      1.116      matt 
    639      1.117      matt /************************* ARM MMU configuration *****************************/
    640      1.117      matt 
    641       1.95  jmcneill #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
    642       1.51   thorpej void	pmap_copy_page_generic(paddr_t, paddr_t);
    643       1.51   thorpej void	pmap_zero_page_generic(paddr_t);
    644       1.51   thorpej 
    645       1.46   thorpej void	pmap_pte_init_generic(void);
    646       1.69   thorpej #if defined(CPU_ARM8)
    647       1.69   thorpej void	pmap_pte_init_arm8(void);
    648       1.69   thorpej #endif
    649       1.46   thorpej #if defined(CPU_ARM9)
    650       1.46   thorpej void	pmap_pte_init_arm9(void);
    651       1.46   thorpej #endif /* CPU_ARM9 */
    652       1.76  rearnsha #if defined(CPU_ARM10)
    653       1.76  rearnsha void	pmap_pte_init_arm10(void);
    654       1.76  rearnsha #endif /* CPU_ARM10 */
    655      1.103      matt #if defined(CPU_ARM11)	/* ARM_MMU_V6 */
    656       1.94  uebayasi void	pmap_pte_init_arm11(void);
    657       1.94  uebayasi #endif /* CPU_ARM11 */
    658      1.103      matt #if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
    659       1.99       bsh void	pmap_pte_init_arm11mpcore(void);
    660       1.99       bsh #endif
    661      1.103      matt #if ARM_MMU_V7 == 1
    662      1.103      matt void	pmap_pte_init_armv7(void);
    663      1.103      matt #endif /* ARM_MMU_V7 */
    664       1.69   thorpej #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
    665       1.69   thorpej 
    666       1.69   thorpej #if ARM_MMU_SA1 == 1
    667       1.69   thorpej void	pmap_pte_init_sa1(void);
    668       1.69   thorpej #endif /* ARM_MMU_SA1 == 1 */
    669       1.46   thorpej 
    670       1.52   thorpej #if ARM_MMU_XSCALE == 1
    671       1.51   thorpej void	pmap_copy_page_xscale(paddr_t, paddr_t);
    672       1.51   thorpej void	pmap_zero_page_xscale(paddr_t);
    673       1.51   thorpej 
    674       1.46   thorpej void	pmap_pte_init_xscale(void);
    675       1.50   thorpej 
    676       1.50   thorpej void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
    677       1.77       scw 
    678       1.77       scw #define	PMAP_UAREA(va)		pmap_uarea(va)
    679       1.77       scw void	pmap_uarea(vaddr_t);
    680       1.52   thorpej #endif /* ARM_MMU_XSCALE == 1 */
    681       1.46   thorpej 
    682       1.49   thorpej extern pt_entry_t		pte_l1_s_cache_mode;
    683       1.49   thorpej extern pt_entry_t		pte_l1_s_cache_mask;
    684       1.49   thorpej 
    685       1.49   thorpej extern pt_entry_t		pte_l2_l_cache_mode;
    686       1.49   thorpej extern pt_entry_t		pte_l2_l_cache_mask;
    687       1.49   thorpej 
    688       1.49   thorpej extern pt_entry_t		pte_l2_s_cache_mode;
    689       1.49   thorpej extern pt_entry_t		pte_l2_s_cache_mask;
    690       1.46   thorpej 
    691       1.65       scw extern pt_entry_t		pte_l1_s_cache_mode_pt;
    692       1.65       scw extern pt_entry_t		pte_l2_l_cache_mode_pt;
    693       1.65       scw extern pt_entry_t		pte_l2_s_cache_mode_pt;
    694       1.65       scw 
    695       1.98  macallan extern pt_entry_t		pte_l1_s_wc_mode;
    696       1.98  macallan extern pt_entry_t		pte_l2_l_wc_mode;
    697       1.98  macallan extern pt_entry_t		pte_l2_s_wc_mode;
    698       1.98  macallan 
    699       1.95  jmcneill extern pt_entry_t		pte_l1_s_prot_u;
    700       1.95  jmcneill extern pt_entry_t		pte_l1_s_prot_w;
    701       1.95  jmcneill extern pt_entry_t		pte_l1_s_prot_ro;
    702       1.95  jmcneill extern pt_entry_t		pte_l1_s_prot_mask;
    703       1.95  jmcneill 
    704       1.46   thorpej extern pt_entry_t		pte_l2_s_prot_u;
    705       1.46   thorpej extern pt_entry_t		pte_l2_s_prot_w;
    706       1.95  jmcneill extern pt_entry_t		pte_l2_s_prot_ro;
    707       1.46   thorpej extern pt_entry_t		pte_l2_s_prot_mask;
    708       1.95  jmcneill 
    709       1.95  jmcneill extern pt_entry_t		pte_l2_l_prot_u;
    710       1.95  jmcneill extern pt_entry_t		pte_l2_l_prot_w;
    711       1.95  jmcneill extern pt_entry_t		pte_l2_l_prot_ro;
    712       1.95  jmcneill extern pt_entry_t		pte_l2_l_prot_mask;
    713       1.95  jmcneill 
    714      1.103      matt extern pt_entry_t		pte_l1_ss_proto;
    715       1.46   thorpej extern pt_entry_t		pte_l1_s_proto;
    716       1.46   thorpej extern pt_entry_t		pte_l1_c_proto;
    717       1.46   thorpej extern pt_entry_t		pte_l2_s_proto;
    718       1.46   thorpej 
    719       1.51   thorpej extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
    720       1.51   thorpej extern void (*pmap_zero_page_func)(paddr_t);
    721       1.75       bsh 
    722       1.75       bsh #endif /* !_LOCORE */
    723       1.51   thorpej 
    724       1.46   thorpej /*****************************************************************************/
    725       1.46   thorpej 
    726      1.124      matt #define	KERNEL_PID		0	/* The kernel uses ASID 0 */
    727      1.124      matt 
    728       1.20       chs /*
    729       1.65       scw  * Definitions for MMU domains
    730       1.65       scw  */
    731      1.103      matt #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
    732      1.124      matt #define	PMAP_DOMAIN_KERNEL	0	/* The kernel pmap uses domain #0 */
    733  1.154.2.2  pgoyette 
    734      1.124      matt #ifdef ARM_MMU_EXTENDED
    735      1.124      matt #define	PMAP_DOMAIN_USER	1	/* User pmaps use domain #1 */
    736  1.154.2.2  pgoyette #define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | (DOMAIN_CLIENT << (PMAP_DOMAIN_USER*2)))
    737  1.154.2.2  pgoyette #else
    738  1.154.2.2  pgoyette #define	DOMAIN_DEFAULT		((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)))
    739      1.124      matt #endif
    740       1.45   thorpej 
    741       1.45   thorpej /*
    742       1.45   thorpej  * These macros define the various bit masks in the PTE.
    743       1.45   thorpej  *
    744       1.45   thorpej  * We use these macros since we use different bits on different processor
    745       1.45   thorpej  * models.
    746       1.45   thorpej  */
    747       1.95  jmcneill #define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
    748       1.95  jmcneill #define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
    749      1.152     skrll #define	L1_S_PROT_RO_generic	(0)
    750       1.95  jmcneill #define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    751       1.95  jmcneill 
    752       1.95  jmcneill #define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
    753       1.95  jmcneill #define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
    754      1.152     skrll #define	L1_S_PROT_RO_xscale	(0)
    755       1.95  jmcneill #define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    756       1.95  jmcneill 
    757       1.99       bsh #define	L1_S_PROT_U_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    758       1.99       bsh #define	L1_S_PROT_W_armv6	(L1_S_AP(AP_W))
    759       1.99       bsh #define	L1_S_PROT_RO_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    760       1.99       bsh #define	L1_S_PROT_MASK_armv6	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    761       1.99       bsh 
    762       1.95  jmcneill #define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
    763       1.95  jmcneill #define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
    764       1.95  jmcneill #define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
    765       1.95  jmcneill #define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
    766       1.45   thorpej 
    767       1.49   thorpej #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
    768       1.85      matt #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
    769       1.99       bsh #define	L1_S_CACHE_MASK_armv6	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
    770      1.134     skrll #define	L1_S_CACHE_MASK_armv6n	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
    771      1.111      matt #define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
    772       1.45   thorpej 
    773       1.95  jmcneill #define	L2_L_PROT_U_generic	(L2_AP(AP_U))
    774       1.95  jmcneill #define	L2_L_PROT_W_generic	(L2_AP(AP_W))
    775      1.152     skrll #define	L2_L_PROT_RO_generic	(0)
    776       1.95  jmcneill #define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    777       1.95  jmcneill 
    778       1.95  jmcneill #define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
    779       1.95  jmcneill #define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
    780      1.152     skrll #define	L2_L_PROT_RO_xscale	(0)
    781       1.95  jmcneill #define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    782       1.95  jmcneill 
    783       1.99       bsh #define	L2_L_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    784       1.99       bsh #define	L2_L_PROT_W_armv6n	(L2_AP0(AP_W))
    785       1.99       bsh #define	L2_L_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    786       1.99       bsh #define	L2_L_PROT_MASK_armv6n	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    787       1.99       bsh 
    788       1.95  jmcneill #define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    789       1.95  jmcneill #define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
    790       1.95  jmcneill #define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    791       1.95  jmcneill #define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
    792       1.45   thorpej 
    793       1.49   thorpej #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
    794       1.85      matt #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
    795       1.99       bsh #define	L2_L_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
    796      1.134     skrll #define	L2_L_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    797      1.111      matt #define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    798       1.49   thorpej 
    799       1.46   thorpej #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
    800       1.46   thorpej #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
    801      1.152     skrll #define	L2_S_PROT_RO_generic	(0)
    802       1.95  jmcneill #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    803       1.46   thorpej 
    804       1.48   thorpej #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
    805       1.48   thorpej #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
    806      1.152     skrll #define	L2_S_PROT_RO_xscale	(0)
    807       1.95  jmcneill #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    808       1.95  jmcneill 
    809       1.99       bsh #define	L2_S_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
    810       1.99       bsh #define	L2_S_PROT_W_armv6n	(L2_AP0(AP_W))
    811       1.99       bsh #define	L2_S_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    812       1.99       bsh #define	L2_S_PROT_MASK_armv6n	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    813       1.99       bsh 
    814       1.95  jmcneill #define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
    815       1.95  jmcneill #define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
    816       1.95  jmcneill #define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
    817       1.95  jmcneill #define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
    818       1.46   thorpej 
    819       1.49   thorpej #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
    820       1.85      matt #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
    821       1.99       bsh #define	L2_XS_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
    822       1.99       bsh #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    823       1.99       bsh #define	L2_S_CACHE_MASK_armv6c	L2_XS_CACHE_MASK_armv6
    824       1.99       bsh #else
    825       1.99       bsh #define	L2_S_CACHE_MASK_armv6c	L2_S_CACHE_MASK_generic
    826       1.99       bsh #endif
    827      1.142     skrll #define	L2_S_CACHE_MASK_armv6n	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    828      1.111      matt #define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
    829       1.46   thorpej 
    830       1.99       bsh 
    831       1.46   thorpej #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
    832       1.47   thorpej #define	L1_S_PROTO_xscale	(L1_TYPE_S)
    833       1.99       bsh #define	L1_S_PROTO_armv6	(L1_TYPE_S)
    834       1.95  jmcneill #define	L1_S_PROTO_armv7	(L1_TYPE_S)
    835       1.46   thorpej 
    836      1.103      matt #define	L1_SS_PROTO_generic	0
    837      1.103      matt #define	L1_SS_PROTO_xscale	0
    838      1.103      matt #define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
    839      1.103      matt #define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
    840      1.103      matt 
    841       1.46   thorpej #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
    842       1.47   thorpej #define	L1_C_PROTO_xscale	(L1_TYPE_C)
    843       1.99       bsh #define	L1_C_PROTO_armv6	(L1_TYPE_C)
    844       1.95  jmcneill #define	L1_C_PROTO_armv7	(L1_TYPE_C)
    845       1.46   thorpej 
    846       1.46   thorpej #define	L2_L_PROTO		(L2_TYPE_L)
    847       1.46   thorpej 
    848       1.46   thorpej #define	L2_S_PROTO_generic	(L2_TYPE_S)
    849       1.85      matt #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
    850       1.99       bsh #ifdef	ARMV6_EXTENDED_SMALL_PAGE
    851       1.99       bsh #define	L2_S_PROTO_armv6c	(L2_TYPE_XS)    /* XP=0, extended small page */
    852       1.99       bsh #else
    853       1.99       bsh #define	L2_S_PROTO_armv6c	(L2_TYPE_S)	/* XP=0, subpage APs */
    854       1.99       bsh #endif
    855      1.134     skrll #ifdef ARM_MMU_EXTENDED
    856      1.134     skrll #define	L2_S_PROTO_armv6n	(L2_TYPE_S|L2_XS_XN)
    857      1.134     skrll #else
    858       1.99       bsh #define	L2_S_PROTO_armv6n	(L2_TYPE_S)	/* with XP=1 */
    859      1.134     skrll #endif
    860      1.124      matt #ifdef ARM_MMU_EXTENDED
    861      1.124      matt #define	L2_S_PROTO_armv7	(L2_TYPE_S|L2_XS_XN)
    862      1.124      matt #else
    863       1.95  jmcneill #define	L2_S_PROTO_armv7	(L2_TYPE_S)
    864      1.124      matt #endif
    865       1.45   thorpej 
    866       1.46   thorpej /*
    867       1.46   thorpej  * User-visible names for the ones that vary with MMU class.
    868       1.46   thorpej  */
    869       1.46   thorpej 
    870       1.46   thorpej #if ARM_NMMUS > 1
    871       1.46   thorpej /* More than one MMU class configured; use variables. */
    872       1.95  jmcneill #define	L1_S_PROT_U		pte_l1_s_prot_u
    873       1.95  jmcneill #define	L1_S_PROT_W		pte_l1_s_prot_w
    874       1.95  jmcneill #define	L1_S_PROT_RO		pte_l1_s_prot_ro
    875       1.95  jmcneill #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
    876       1.95  jmcneill 
    877       1.46   thorpej #define	L2_S_PROT_U		pte_l2_s_prot_u
    878       1.46   thorpej #define	L2_S_PROT_W		pte_l2_s_prot_w
    879       1.95  jmcneill #define	L2_S_PROT_RO		pte_l2_s_prot_ro
    880       1.46   thorpej #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
    881       1.46   thorpej 
    882       1.95  jmcneill #define	L2_L_PROT_U		pte_l2_l_prot_u
    883       1.95  jmcneill #define	L2_L_PROT_W		pte_l2_l_prot_w
    884       1.95  jmcneill #define	L2_L_PROT_RO		pte_l2_l_prot_ro
    885       1.95  jmcneill #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
    886       1.95  jmcneill 
    887       1.49   thorpej #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
    888       1.49   thorpej #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
    889       1.49   thorpej #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
    890       1.49   thorpej 
    891      1.103      matt #define	L1_SS_PROTO		pte_l1_ss_proto
    892       1.46   thorpej #define	L1_S_PROTO		pte_l1_s_proto
    893       1.46   thorpej #define	L1_C_PROTO		pte_l1_c_proto
    894       1.46   thorpej #define	L2_S_PROTO		pte_l2_s_proto
    895       1.51   thorpej 
    896       1.51   thorpej #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
    897       1.51   thorpej #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
    898       1.99       bsh #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
    899       1.99       bsh #define	L1_S_PROT_U		L1_S_PROT_U_generic
    900       1.99       bsh #define	L1_S_PROT_W		L1_S_PROT_W_generic
    901       1.99       bsh #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    902       1.99       bsh #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    903       1.99       bsh 
    904       1.99       bsh #define	L2_S_PROT_U		L2_S_PROT_U_generic
    905       1.99       bsh #define	L2_S_PROT_W		L2_S_PROT_W_generic
    906       1.99       bsh #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    907       1.99       bsh #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    908       1.99       bsh 
    909       1.99       bsh #define	L2_L_PROT_U		L2_L_PROT_U_generic
    910       1.99       bsh #define	L2_L_PROT_W		L2_L_PROT_W_generic
    911       1.99       bsh #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    912       1.99       bsh #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    913       1.99       bsh 
    914       1.99       bsh #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    915       1.99       bsh #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    916       1.99       bsh #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    917       1.99       bsh 
    918      1.103      matt #define	L1_SS_PROTO		L1_SS_PROTO_generic
    919       1.99       bsh #define	L1_S_PROTO		L1_S_PROTO_generic
    920       1.99       bsh #define	L1_C_PROTO		L1_C_PROTO_generic
    921       1.99       bsh #define	L2_S_PROTO		L2_S_PROTO_generic
    922       1.99       bsh 
    923       1.99       bsh #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    924       1.99       bsh #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    925       1.99       bsh #elif ARM_MMU_V6N != 0
    926       1.99       bsh #define	L1_S_PROT_U		L1_S_PROT_U_armv6
    927       1.99       bsh #define	L1_S_PROT_W		L1_S_PROT_W_armv6
    928       1.99       bsh #define	L1_S_PROT_RO		L1_S_PROT_RO_armv6
    929       1.99       bsh #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv6
    930       1.99       bsh 
    931       1.99       bsh #define	L2_S_PROT_U		L2_S_PROT_U_armv6n
    932       1.99       bsh #define	L2_S_PROT_W		L2_S_PROT_W_armv6n
    933       1.99       bsh #define	L2_S_PROT_RO		L2_S_PROT_RO_armv6n
    934       1.99       bsh #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv6n
    935       1.99       bsh 
    936       1.99       bsh #define	L2_L_PROT_U		L2_L_PROT_U_armv6n
    937       1.99       bsh #define	L2_L_PROT_W		L2_L_PROT_W_armv6n
    938       1.99       bsh #define	L2_L_PROT_RO		L2_L_PROT_RO_armv6n
    939       1.99       bsh #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv6n
    940       1.99       bsh 
    941      1.134     skrll #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv6n
    942      1.134     skrll #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv6n
    943       1.99       bsh #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv6n
    944       1.99       bsh 
    945      1.150     skrll /*
    946      1.150     skrll  * These prototypes make writeable mappings, while the other MMU types
    947      1.150     skrll  * make read-only mappings.
    948      1.150     skrll  */
    949      1.103      matt #define	L1_SS_PROTO		L1_SS_PROTO_armv6
    950       1.99       bsh #define	L1_S_PROTO		L1_S_PROTO_armv6
    951       1.99       bsh #define	L1_C_PROTO		L1_C_PROTO_armv6
    952       1.99       bsh #define	L2_S_PROTO		L2_S_PROTO_armv6n
    953       1.99       bsh 
    954       1.99       bsh #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    955       1.99       bsh #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    956       1.99       bsh #elif ARM_MMU_V6C != 0
    957       1.95  jmcneill #define	L1_S_PROT_U		L1_S_PROT_U_generic
    958       1.95  jmcneill #define	L1_S_PROT_W		L1_S_PROT_W_generic
    959       1.95  jmcneill #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    960       1.95  jmcneill #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    961       1.95  jmcneill 
    962       1.46   thorpej #define	L2_S_PROT_U		L2_S_PROT_U_generic
    963       1.46   thorpej #define	L2_S_PROT_W		L2_S_PROT_W_generic
    964       1.95  jmcneill #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
    965       1.46   thorpej #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
    966       1.46   thorpej 
    967       1.95  jmcneill #define	L2_L_PROT_U		L2_L_PROT_U_generic
    968       1.95  jmcneill #define	L2_L_PROT_W		L2_L_PROT_W_generic
    969       1.95  jmcneill #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    970       1.95  jmcneill #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    971       1.95  jmcneill 
    972       1.49   thorpej #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
    973       1.49   thorpej #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
    974       1.49   thorpej #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
    975       1.49   thorpej 
    976      1.130      matt #define	L1_SS_PROTO		L1_SS_PROTO_armv6
    977       1.46   thorpej #define	L1_S_PROTO		L1_S_PROTO_generic
    978       1.46   thorpej #define	L1_C_PROTO		L1_C_PROTO_generic
    979       1.46   thorpej #define	L2_S_PROTO		L2_S_PROTO_generic
    980       1.51   thorpej 
    981       1.51   thorpej #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
    982       1.51   thorpej #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
    983       1.46   thorpej #elif ARM_MMU_XSCALE == 1
    984       1.95  jmcneill #define	L1_S_PROT_U		L1_S_PROT_U_generic
    985       1.95  jmcneill #define	L1_S_PROT_W		L1_S_PROT_W_generic
    986       1.95  jmcneill #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
    987       1.95  jmcneill #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
    988       1.95  jmcneill 
    989       1.46   thorpej #define	L2_S_PROT_U		L2_S_PROT_U_xscale
    990       1.46   thorpej #define	L2_S_PROT_W		L2_S_PROT_W_xscale
    991       1.95  jmcneill #define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
    992       1.46   thorpej #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
    993       1.49   thorpej 
    994       1.95  jmcneill #define	L2_L_PROT_U		L2_L_PROT_U_generic
    995       1.95  jmcneill #define	L2_L_PROT_W		L2_L_PROT_W_generic
    996       1.95  jmcneill #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
    997       1.95  jmcneill #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
    998       1.95  jmcneill 
    999       1.49   thorpej #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
   1000       1.49   thorpej #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
   1001       1.49   thorpej #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
   1002       1.46   thorpej 
   1003      1.103      matt #define	L1_SS_PROTO		L1_SS_PROTO_xscale
   1004       1.46   thorpej #define	L1_S_PROTO		L1_S_PROTO_xscale
   1005       1.46   thorpej #define	L1_C_PROTO		L1_C_PROTO_xscale
   1006       1.46   thorpej #define	L2_S_PROTO		L2_S_PROTO_xscale
   1007       1.51   thorpej 
   1008       1.51   thorpej #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
   1009       1.51   thorpej #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
   1010       1.95  jmcneill #elif ARM_MMU_V7 == 1
   1011       1.95  jmcneill #define	L1_S_PROT_U		L1_S_PROT_U_armv7
   1012       1.95  jmcneill #define	L1_S_PROT_W		L1_S_PROT_W_armv7
   1013       1.95  jmcneill #define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
   1014       1.95  jmcneill #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
   1015       1.95  jmcneill 
   1016       1.95  jmcneill #define	L2_S_PROT_U		L2_S_PROT_U_armv7
   1017       1.95  jmcneill #define	L2_S_PROT_W		L2_S_PROT_W_armv7
   1018       1.95  jmcneill #define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
   1019       1.95  jmcneill #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
   1020       1.95  jmcneill 
   1021       1.95  jmcneill #define	L2_L_PROT_U		L2_L_PROT_U_armv7
   1022       1.95  jmcneill #define	L2_L_PROT_W		L2_L_PROT_W_armv7
   1023       1.95  jmcneill #define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
   1024       1.95  jmcneill #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
   1025       1.95  jmcneill 
   1026       1.95  jmcneill #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
   1027       1.95  jmcneill #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
   1028       1.95  jmcneill #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
   1029       1.95  jmcneill 
   1030      1.150     skrll /*
   1031      1.150     skrll  * These prototypes make writeable mappings, while the other MMU types
   1032      1.150     skrll  * make read-only mappings.
   1033      1.150     skrll  */
   1034      1.103      matt #define	L1_SS_PROTO		L1_SS_PROTO_armv7
   1035       1.95  jmcneill #define	L1_S_PROTO		L1_S_PROTO_armv7
   1036       1.95  jmcneill #define	L1_C_PROTO		L1_C_PROTO_armv7
   1037       1.95  jmcneill #define	L2_S_PROTO		L2_S_PROTO_armv7
   1038       1.95  jmcneill 
   1039       1.95  jmcneill #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
   1040       1.95  jmcneill #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
   1041       1.46   thorpej #endif /* ARM_NMMUS > 1 */
   1042       1.20       chs 
   1043       1.45   thorpej /*
   1044       1.95  jmcneill  * Macros to set and query the write permission on page descriptors.
   1045       1.95  jmcneill  */
   1046       1.95  jmcneill #define l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
   1047       1.95  jmcneill #define l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
   1048      1.149     skrll 
   1049      1.152     skrll #define l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
   1050      1.152     skrll #define l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
   1051       1.95  jmcneill 
   1052       1.95  jmcneill #define l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
   1053      1.152     skrll 				 (L2_S_PROT_RO == 0 || \
   1054       1.95  jmcneill 				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
   1055       1.95  jmcneill 
   1056       1.95  jmcneill /*
   1057       1.45   thorpej  * These macros return various bits based on kernel/user and protection.
   1058       1.45   thorpej  * Note that the compiler will usually fold these at compile time.
   1059       1.45   thorpej  */
   1060      1.152     skrll 
   1061      1.152     skrll #define	L1_S_PROT(ku, pr)	(					   \
   1062      1.152     skrll 	(((ku) == PTE_USER) ? 						   \
   1063      1.152     skrll 	    L1_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)	   \
   1064      1.152     skrll 	: 								   \
   1065      1.152     skrll 	    (((L1_S_PROT_RO && 						   \
   1066      1.152     skrll 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1067      1.152     skrll 		    L1_S_PROT_RO : L1_S_PROT_W)))			   \
   1068      1.152     skrll     )
   1069      1.152     skrll 
   1070      1.152     skrll #define	L2_L_PROT(ku, pr)	(					   \
   1071      1.152     skrll 	(((ku) == PTE_USER) ?						   \
   1072      1.152     skrll 	    L2_L_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)	   \
   1073      1.152     skrll 	:								   \
   1074      1.152     skrll 	    (((L2_L_PROT_RO && 						   \
   1075      1.152     skrll 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1076      1.152     skrll 		    L2_L_PROT_RO : L2_L_PROT_W)))			   \
   1077      1.152     skrll     )
   1078      1.152     skrll 
   1079      1.152     skrll #define	L2_S_PROT(ku, pr)	(					   \
   1080      1.152     skrll 	(((ku) == PTE_USER) ?						   \
   1081      1.152     skrll 	    L2_S_PROT_U | (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)	   \
   1082      1.152     skrll 	:								   \
   1083      1.152     skrll 	    (((L2_S_PROT_RO &&						   \
   1084      1.152     skrll 		((pr) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ) ? \
   1085      1.152     skrll 		    L2_S_PROT_RO : L2_S_PROT_W)))			   \
   1086      1.152     skrll     )
   1087       1.66   thorpej 
   1088       1.66   thorpej /*
   1089      1.103      matt  * Macros to test if a mapping is mappable with an L1 SuperSection,
   1090      1.103      matt  * L1 Section, or an L2 Large Page mapping.
   1091       1.66   thorpej  */
   1092      1.103      matt #define	L1_SS_MAPPABLE_P(va, pa, size)					\
   1093      1.103      matt 	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
   1094      1.103      matt 
   1095       1.66   thorpej #define	L1_S_MAPPABLE_P(va, pa, size)					\
   1096       1.66   thorpej 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
   1097       1.66   thorpej 
   1098       1.67   thorpej #define	L2_L_MAPPABLE_P(va, pa, size)					\
   1099       1.68   thorpej 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
   1100       1.64   thorpej 
   1101  1.154.2.1  pgoyette #define	PMAP_MAPSIZE1	L2_L_SIZE
   1102  1.154.2.1  pgoyette #define	PMAP_MAPSIZE2	L1_S_SIZE
   1103  1.154.2.1  pgoyette #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
   1104  1.154.2.1  pgoyette #define	PMAP_MAPSIZE3	L1_SS_SIZE
   1105  1.154.2.1  pgoyette #endif
   1106  1.154.2.1  pgoyette 
   1107      1.119      matt #ifndef _LOCORE
   1108       1.64   thorpej /*
   1109       1.64   thorpej  * Hooks for the pool allocator.
   1110       1.64   thorpej  */
   1111       1.64   thorpej #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
   1112      1.117      matt extern paddr_t physical_start, physical_end;
   1113      1.113      matt #ifdef PMAP_NEED_ALLOC_POOLPAGE
   1114      1.114      matt struct vm_page *arm_pmap_alloc_poolpage(int);
   1115      1.113      matt #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
   1116      1.118      matt #endif
   1117      1.118      matt #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
   1118      1.131      matt vaddr_t	pmap_map_poolpage(paddr_t);
   1119      1.131      matt paddr_t	pmap_unmap_poolpage(vaddr_t);
   1120      1.131      matt #define	PMAP_MAP_POOLPAGE(pa)	pmap_map_poolpage(pa)
   1121      1.131      matt #define PMAP_UNMAP_POOLPAGE(va)	pmap_unmap_poolpage(va)
   1122      1.113      matt #endif
   1123       1.18   thorpej 
   1124      1.143     skrll #define __HAVE_PMAP_PV_TRACK	1
   1125      1.143     skrll 
   1126      1.143     skrll void pmap_pv_protect(paddr_t, vm_prot_t);
   1127      1.143     skrll 
   1128      1.143     skrll struct pmap_page {
   1129       1.97  uebayasi 	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
   1130       1.97  uebayasi 	int pvh_attrs;				/* page attributes */
   1131       1.97  uebayasi 	u_int uro_mappings;
   1132       1.97  uebayasi 	u_int urw_mappings;
   1133       1.97  uebayasi 	union {
   1134       1.97  uebayasi 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
   1135       1.97  uebayasi 		u_int i_mappings;
   1136       1.97  uebayasi 	} k_u;
   1137       1.97  uebayasi };
   1138       1.97  uebayasi 
   1139       1.97  uebayasi /*
   1140      1.143     skrll  * pmap-specific data store in the vm_page structure.
   1141      1.143     skrll  */
   1142      1.143     skrll #define	__HAVE_VM_PAGE_MD
   1143      1.143     skrll struct vm_page_md {
   1144      1.143     skrll 	struct pmap_page pp;
   1145      1.143     skrll #define	pvh_list	pp.pvh_list
   1146      1.143     skrll #define	pvh_attrs	pp.pvh_attrs
   1147      1.143     skrll #define	uro_mappings	pp.uro_mappings
   1148      1.143     skrll #define	urw_mappings	pp.urw_mappings
   1149      1.143     skrll #define	kro_mappings	pp.k_u.s_mappings[0]
   1150      1.143     skrll #define	krw_mappings	pp.k_u.s_mappings[1]
   1151      1.143     skrll #define	k_mappings	pp.k_u.i_mappings
   1152      1.143     skrll };
   1153      1.143     skrll 
   1154      1.143     skrll #define PMAP_PAGE_TO_MD(ppage) container_of((ppage), struct vm_page_md, pp)
   1155      1.143     skrll 
   1156      1.143     skrll /*
   1157       1.97  uebayasi  * Set the default color of each page.
   1158       1.97  uebayasi  */
   1159       1.97  uebayasi #if ARM_MMU_V6 > 0
   1160       1.97  uebayasi #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1161       1.97  uebayasi 	(pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
   1162       1.97  uebayasi #else
   1163       1.97  uebayasi #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
   1164       1.97  uebayasi 	(pg)->mdpage.pvh_attrs = 0
   1165       1.97  uebayasi #endif
   1166      1.135     skrll 
   1167       1.97  uebayasi #define	VM_MDPAGE_INIT(pg)						\
   1168       1.97  uebayasi do {									\
   1169       1.97  uebayasi 	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
   1170       1.97  uebayasi 	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
   1171       1.97  uebayasi 	(pg)->mdpage.uro_mappings = 0;					\
   1172       1.97  uebayasi 	(pg)->mdpage.urw_mappings = 0;					\
   1173       1.97  uebayasi 	(pg)->mdpage.k_mappings = 0;					\
   1174       1.97  uebayasi } while (/*CONSTCOND*/0)
   1175       1.97  uebayasi 
   1176       1.97  uebayasi #endif /* !_LOCORE */
   1177       1.97  uebayasi 
   1178       1.18   thorpej #endif /* _KERNEL */
   1179        1.1   reinoud 
   1180        1.1   reinoud #endif	/* _ARM32_PMAP_H_ */
   1181