Home | History | Annotate | Line # | Download | only in include
pmap.h revision 1.46.2.2
      1  1.46.2.2   thorpej /* $NetBSD: pmap.h,v 1.46.2.2 2021/06/17 04:46:16 thorpej Exp $ */
      2       1.1      matt 
      3       1.1      matt /*-
      4       1.1      matt  * Copyright (c) 2014 The NetBSD Foundation, Inc.
      5       1.1      matt  * All rights reserved.
      6       1.1      matt  *
      7       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      8       1.1      matt  * by Matt Thomas of 3am Software Foundry.
      9       1.1      matt  *
     10       1.1      matt  * Redistribution and use in source and binary forms, with or without
     11       1.1      matt  * modification, are permitted provided that the following conditions
     12       1.1      matt  * are met:
     13       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     14       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     15       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     17       1.1      matt  *    documentation and/or other materials provided with the distribution.
     18       1.1      matt  *
     19       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     30       1.1      matt  */
     31       1.1      matt 
     32       1.1      matt #ifndef _AARCH64_PMAP_H_
     33       1.1      matt #define _AARCH64_PMAP_H_
     34       1.1      matt 
     35       1.1      matt #ifdef __aarch64__
     36       1.1      matt 
     37      1.10  jakllsch #ifdef _KERNEL
     38      1.17      maxv #ifdef _KERNEL_OPT
     39      1.17      maxv #include "opt_kasan.h"
     40      1.17      maxv #endif
     41      1.17      maxv 
     42       1.1      matt #include <sys/types.h>
     43       1.1      matt #include <sys/pool.h>
     44       1.2       ryo #include <sys/queue.h>
     45       1.2       ryo #include <uvm/uvm_pglist.h>
     46       1.1      matt 
     47      1.36       ryo #include <aarch64/armreg.h>
     48       1.2       ryo #include <aarch64/pte.h>
     49       1.1      matt 
     50      1.26      maya #define PMAP_NEED_PROCWR
     51       1.1      matt #define PMAP_GROWKERNEL
     52       1.1      matt #define PMAP_STEAL_MEMORY
     53       1.1      matt 
     54       1.2       ryo #define __HAVE_VM_PAGE_MD
     55      1.33       ryo #define __HAVE_PMAP_PV_TRACK	1
     56       1.2       ryo 
     57      1.17      maxv #ifndef KASAN
     58      1.15       ryo #define PMAP_MAP_POOLPAGE(pa)		AARCH64_PA_TO_KVA(pa)
     59      1.15       ryo #define PMAP_UNMAP_POOLPAGE(va)		AARCH64_KVA_TO_PA(va)
     60      1.20  jdolecek 
     61      1.20  jdolecek #define PMAP_DIRECT
     62      1.20  jdolecek static __inline int
     63      1.20  jdolecek pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
     64      1.20  jdolecek     int (*process)(void *, size_t, void *), void *arg)
     65      1.20  jdolecek {
     66      1.20  jdolecek 	vaddr_t va = AARCH64_PA_TO_KVA(pa);
     67      1.20  jdolecek 
     68      1.20  jdolecek 	return process((void *)(va + pgoff), len, arg);
     69      1.20  jdolecek }
     70      1.17      maxv #endif
     71      1.15       ryo 
     72       1.1      matt struct pmap {
     73       1.2       ryo 	kmutex_t pm_lock;
     74       1.1      matt 	struct pool *pm_pvpool;
     75       1.2       ryo 	pd_entry_t *pm_l0table;			/* L0 table: 512G*512 */
     76       1.2       ryo 	paddr_t pm_l0table_pa;
     77       1.2       ryo 
     78      1.34       ryo 	LIST_HEAD(, vm_page) pm_vmlist;		/* for L[0123] tables */
     79      1.44       ryo 	LIST_HEAD(, pv_entry) pm_pvlist;	/* all pv of this process */
     80       1.2       ryo 
     81       1.1      matt 	struct pmap_statistics pm_stats;
     82       1.2       ryo 	unsigned int pm_refcnt;
     83      1.24       ryo 	unsigned int pm_idlepdp;
     84       1.2       ryo 	int pm_asid;
     85       1.2       ryo 	bool pm_activated;
     86       1.1      matt };
     87       1.1      matt 
     88      1.44       ryo /*
     89      1.44       ryo  * should be kept <=32 bytes sized to reduce memory consumption & cache misses,
     90      1.44       ryo  * but it doesn't...
     91      1.44       ryo  */
     92      1.40        ad struct pv_entry {
     93      1.40        ad 	struct pv_entry *pv_next;
     94      1.40        ad 	struct pmap *pv_pmap;
     95      1.40        ad 	vaddr_t pv_va;	/* for embedded entry (pp_pv) also includes flags */
     96      1.40        ad 	void *pv_ptep;	/* pointer for fast pte lookup */
     97      1.44       ryo 	LIST_ENTRY(pv_entry) pv_proc;	/* belonging to the process */
     98      1.40        ad };
     99      1.32       ryo 
    100      1.32       ryo struct pmap_page {
    101      1.32       ryo 	kmutex_t pp_pvlock;
    102      1.40        ad 	struct pv_entry pp_pv;
    103      1.32       ryo };
    104      1.32       ryo 
    105      1.40        ad /* try to keep vm_page at or under 128 bytes to reduce cache misses */
    106       1.2       ryo struct vm_page_md {
    107      1.32       ryo 	struct pmap_page mdpg_pp;
    108       1.1      matt };
    109      1.40        ad /* for page descriptor page only */
    110      1.40        ad #define	mdpg_ptep_parent	mdpg_pp.pp_pv.pv_ptep
    111       1.1      matt 
    112      1.32       ryo #define VM_MDPAGE_INIT(pg)					\
    113      1.32       ryo 	do {							\
    114      1.39     skrll 		PMAP_PAGE_INIT(&(pg)->mdpage.mdpg_pp);		\
    115       1.1      matt 	} while (/*CONSTCOND*/ 0)
    116       1.1      matt 
    117      1.37       ryo #define PMAP_PAGE_INIT(pp)						\
    118      1.37       ryo 	do {								\
    119      1.42     skrll 		mutex_init(&(pp)->pp_pvlock, MUTEX_NODEBUG, IPL_NONE);	\
    120      1.40        ad 		(pp)->pp_pv.pv_next = NULL;				\
    121      1.40        ad 		(pp)->pp_pv.pv_pmap = NULL;				\
    122      1.40        ad 		(pp)->pp_pv.pv_va = 0;					\
    123      1.40        ad 		(pp)->pp_pv.pv_ptep = NULL;				\
    124      1.37       ryo 	} while (/*CONSTCOND*/ 0)
    125      1.11       ryo 
    126      1.11       ryo /* saved permission bit for referenced/modified emulation */
    127      1.11       ryo #define LX_BLKPAG_OS_READ		LX_BLKPAG_OS_0
    128      1.11       ryo #define LX_BLKPAG_OS_WRITE		LX_BLKPAG_OS_1
    129      1.11       ryo #define LX_BLKPAG_OS_WIRED		LX_BLKPAG_OS_2
    130      1.11       ryo #define LX_BLKPAG_OS_BOOT		LX_BLKPAG_OS_3
    131      1.11       ryo #define LX_BLKPAG_OS_RWMASK		(LX_BLKPAG_OS_WRITE|LX_BLKPAG_OS_READ)
    132      1.11       ryo 
    133  1.46.2.2   thorpej #define PMAP_PTE_OS0	"read"
    134  1.46.2.2   thorpej #define PMAP_PTE_OS1	"write"
    135  1.46.2.2   thorpej #define PMAP_PTE_OS2	"wired"
    136  1.46.2.2   thorpej #define PMAP_PTE_OS3	"boot"
    137  1.46.2.2   thorpej 
    138      1.11       ryo /* memory attributes are configured MAIR_EL1 in locore */
    139      1.11       ryo #define LX_BLKPAG_ATTR_NORMAL_WB	__SHIFTIN(0, LX_BLKPAG_ATTR_INDX)
    140      1.11       ryo #define LX_BLKPAG_ATTR_NORMAL_NC	__SHIFTIN(1, LX_BLKPAG_ATTR_INDX)
    141      1.11       ryo #define LX_BLKPAG_ATTR_NORMAL_WT	__SHIFTIN(2, LX_BLKPAG_ATTR_INDX)
    142      1.11       ryo #define LX_BLKPAG_ATTR_DEVICE_MEM	__SHIFTIN(3, LX_BLKPAG_ATTR_INDX)
    143      1.28  jmcneill #define LX_BLKPAG_ATTR_DEVICE_MEM_SO	__SHIFTIN(4, LX_BLKPAG_ATTR_INDX)
    144      1.11       ryo #define LX_BLKPAG_ATTR_MASK		LX_BLKPAG_ATTR_INDX
    145      1.11       ryo 
    146      1.13       ryo #define lxpde_pa(pde)		((paddr_t)((pde) & LX_TBL_PA))
    147      1.21       ryo #define lxpde_valid(pde)	(((pde) & LX_VALID) == LX_VALID)
    148      1.13       ryo #define l0pde_pa(pde)		lxpde_pa(pde)
    149       1.2       ryo #define l0pde_index(v)		(((vaddr_t)(v) & L0_ADDR_BITS) >> L0_SHIFT)
    150      1.21       ryo #define l0pde_valid(pde)	lxpde_valid(pde)
    151       1.2       ryo /* l0pte always contains table entries */
    152       1.2       ryo 
    153      1.13       ryo #define l1pde_pa(pde)		lxpde_pa(pde)
    154       1.2       ryo #define l1pde_index(v)		(((vaddr_t)(v) & L1_ADDR_BITS) >> L1_SHIFT)
    155      1.21       ryo #define l1pde_valid(pde)	lxpde_valid(pde)
    156       1.2       ryo #define l1pde_is_block(pde)	(((pde) & LX_TYPE) == LX_TYPE_BLK)
    157       1.2       ryo #define l1pde_is_table(pde)	(((pde) & LX_TYPE) == LX_TYPE_TBL)
    158       1.2       ryo 
    159      1.13       ryo #define l2pde_pa(pde)		lxpde_pa(pde)
    160       1.2       ryo #define l2pde_index(v)		(((vaddr_t)(v) & L2_ADDR_BITS) >> L2_SHIFT)
    161      1.21       ryo #define l2pde_valid(pde)	lxpde_valid(pde)
    162       1.2       ryo #define l2pde_is_block(pde)	(((pde) & LX_TYPE) == LX_TYPE_BLK)
    163       1.2       ryo #define l2pde_is_table(pde)	(((pde) & LX_TYPE) == LX_TYPE_TBL)
    164       1.2       ryo 
    165      1.13       ryo #define l3pte_pa(pde)		lxpde_pa(pde)
    166       1.8       ryo #define l3pte_executable(pde,user)	\
    167       1.8       ryo     (((pde) & ((user) ? LX_BLKPAG_UXN : LX_BLKPAG_PXN)) == 0)
    168       1.6       ryo #define l3pte_readable(pde)	((pde) & LX_BLKPAG_AF)
    169       1.6       ryo #define l3pte_writable(pde)	\
    170       1.6       ryo     (((pde) & (LX_BLKPAG_AF|LX_BLKPAG_AP)) == (LX_BLKPAG_AF|LX_BLKPAG_AP_RW))
    171       1.2       ryo #define l3pte_index(v)		(((vaddr_t)(v) & L3_ADDR_BITS) >> L3_SHIFT)
    172      1.21       ryo #define l3pte_valid(pde)	lxpde_valid(pde)
    173       1.2       ryo #define l3pte_is_page(pde)	(((pde) & LX_TYPE) == L3_TYPE_PAG)
    174       1.2       ryo /* l3pte contains always page entries */
    175       1.2       ryo 
    176       1.2       ryo void pmap_bootstrap(vaddr_t, vaddr_t);
    177       1.2       ryo bool pmap_fault_fixup(struct pmap *, vaddr_t, vm_prot_t, bool user);
    178       1.7       ryo 
    179       1.7       ryo /* for ddb */
    180       1.7       ryo pt_entry_t *kvtopte(vaddr_t);
    181  1.46.2.1   thorpej void pmap_db_pmap_print(struct pmap *, void (*)(const char *, ...) __printflike(1, 2));
    182  1.46.2.1   thorpej void pmap_db_mdpg_print(struct vm_page *, void (*)(const char *, ...) __printflike(1, 2));
    183  1.46.2.1   thorpej 
    184  1.46.2.1   thorpej pd_entry_t *pmap_l0table(struct pmap *);
    185  1.46.2.1   thorpej 
    186  1.46.2.1   thorpej /* change attribute of kernel segment */
    187  1.46.2.1   thorpej static inline pt_entry_t
    188  1.46.2.1   thorpej pmap_kvattr(pt_entry_t *ptep, vm_prot_t prot)
    189  1.46.2.1   thorpej {
    190  1.46.2.1   thorpej 	pt_entry_t pte = *ptep;
    191  1.46.2.1   thorpej 	const pt_entry_t opte = pte;
    192  1.46.2.1   thorpej 
    193  1.46.2.1   thorpej 	pte &= ~(LX_BLKPAG_AF|LX_BLKPAG_AP);
    194  1.46.2.1   thorpej 	switch (prot & (VM_PROT_READ|VM_PROT_WRITE)) {
    195  1.46.2.1   thorpej 	case 0:
    196  1.46.2.1   thorpej 		break;
    197  1.46.2.1   thorpej 	case VM_PROT_READ:
    198  1.46.2.1   thorpej 		pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RO);
    199  1.46.2.1   thorpej 		break;
    200  1.46.2.1   thorpej 	case VM_PROT_WRITE:
    201  1.46.2.1   thorpej 	case VM_PROT_READ|VM_PROT_WRITE:
    202  1.46.2.1   thorpej 		pte |= (LX_BLKPAG_AF|LX_BLKPAG_AP_RW);
    203  1.46.2.1   thorpej 		break;
    204  1.46.2.1   thorpej 	}
    205  1.46.2.1   thorpej 
    206  1.46.2.1   thorpej 	if ((prot & VM_PROT_EXECUTE) == 0) {
    207  1.46.2.1   thorpej 		pte |= LX_BLKPAG_PXN;
    208  1.46.2.1   thorpej 	} else {
    209  1.46.2.1   thorpej 		pte |= LX_BLKPAG_AF;
    210  1.46.2.1   thorpej 		pte &= ~LX_BLKPAG_PXN;
    211  1.46.2.1   thorpej 	}
    212  1.46.2.1   thorpej 
    213  1.46.2.1   thorpej 	*ptep = pte;
    214  1.46.2.1   thorpej 
    215  1.46.2.1   thorpej 	return opte;
    216  1.46.2.1   thorpej }
    217       1.2       ryo 
    218      1.41     skrll /* pmapboot.c */
    219      1.41     skrll pd_entry_t *pmapboot_pagealloc(void);
    220      1.46     skrll void pmapboot_enter(vaddr_t, paddr_t, psize_t, psize_t, pt_entry_t,
    221      1.12       ryo     void (*pr)(const char *, ...) __printflike(1, 2));
    222      1.46     skrll void pmapboot_enter_range(vaddr_t, paddr_t, psize_t, pt_entry_t,
    223      1.41     skrll     void (*)(const char *, ...) __printflike(1, 2));
    224      1.12       ryo int pmapboot_protect(vaddr_t, vaddr_t, vm_prot_t);
    225      1.12       ryo 
    226       1.2       ryo /* Hooks for the pool allocator */
    227       1.2       ryo paddr_t vtophys(vaddr_t);
    228       1.2       ryo #define VTOPHYS_FAILED		((paddr_t)-1L)	/* POOL_PADDR_INVALID */
    229       1.2       ryo #define POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
    230       1.2       ryo 
    231       1.2       ryo /* devmap */
    232       1.2       ryo struct pmap_devmap {
    233       1.2       ryo 	vaddr_t pd_va;		/* virtual address */
    234       1.2       ryo 	paddr_t pd_pa;		/* physical address */
    235       1.2       ryo 	psize_t pd_size;	/* size of region */
    236       1.2       ryo 	vm_prot_t pd_prot;	/* protection code */
    237       1.2       ryo 	u_int pd_flags;		/* flags for pmap_kenter_pa() */
    238       1.2       ryo };
    239       1.2       ryo 
    240       1.2       ryo void pmap_devmap_register(const struct pmap_devmap *);
    241      1.16     skrll void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
    242       1.2       ryo const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
    243       1.2       ryo const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
    244       1.2       ryo vaddr_t pmap_devmap_phystov(paddr_t);
    245       1.2       ryo paddr_t pmap_devmap_vtophys(paddr_t);
    246       1.2       ryo 
    247      1.11       ryo #define L1_TRUNC_BLOCK(x)	((x) & L1_FRAME)
    248      1.11       ryo #define L1_ROUND_BLOCK(x)	L1_TRUNC_BLOCK((x) + L1_SIZE - 1)
    249      1.11       ryo #define L2_TRUNC_BLOCK(x)	((x) & L2_FRAME)
    250      1.11       ryo #define L2_ROUND_BLOCK(x)	L2_TRUNC_BLOCK((x) + L2_SIZE - 1)
    251      1.29     skrll #define L3_TRUNC_BLOCK(x)	((x) & L3_FRAME)
    252      1.29     skrll #define L3_ROUND_BLOCK(x)	L3_TRUNC_BLOCK((x) + L3_SIZE - 1)
    253      1.11       ryo 
    254      1.30     skrll #define DEVMAP_ALIGN(x)		L3_TRUNC_BLOCK((x))
    255      1.30     skrll #define DEVMAP_SIZE(x)		L3_ROUND_BLOCK((x))
    256       1.2       ryo 
    257       1.2       ryo #define	DEVMAP_ENTRY(va, pa, sz)			\
    258       1.2       ryo 	{						\
    259      1.30     skrll 		.pd_va = DEVMAP_ALIGN(va),		\
    260      1.30     skrll 		.pd_pa = DEVMAP_ALIGN(pa),		\
    261      1.30     skrll 		.pd_size = DEVMAP_SIZE(sz),			\
    262       1.2       ryo 		.pd_prot = VM_PROT_READ|VM_PROT_WRITE,	\
    263      1.25     skrll 		.pd_flags = PMAP_DEV			\
    264       1.2       ryo 	}
    265       1.2       ryo #define	DEVMAP_ENTRY_END	{ 0 }
    266       1.2       ryo 
    267       1.2       ryo /* mmap cookie and flags */
    268       1.2       ryo #define AARCH64_MMAP_FLAG_SHIFT		(64 - PGSHIFT)
    269       1.2       ryo #define AARCH64_MMAP_FLAG_MASK		0xf
    270       1.3  jmcneill #define AARCH64_MMAP_WRITEBACK		0UL
    271       1.3  jmcneill #define AARCH64_MMAP_NOCACHE		1UL
    272       1.3  jmcneill #define AARCH64_MMAP_WRITECOMBINE	2UL
    273       1.3  jmcneill #define AARCH64_MMAP_DEVICE		3UL
    274       1.2       ryo 
    275       1.5  jmcneill #define ARM_MMAP_MASK			__BITS(63, AARCH64_MMAP_FLAG_SHIFT)
    276       1.5  jmcneill #define ARM_MMAP_WRITECOMBINE		__SHIFTIN(AARCH64_MMAP_WRITECOMBINE, ARM_MMAP_MASK)
    277       1.5  jmcneill #define ARM_MMAP_WRITEBACK		__SHIFTIN(AARCH64_MMAP_WRITEBACK, ARM_MMAP_MASK)
    278       1.5  jmcneill #define ARM_MMAP_NOCACHE		__SHIFTIN(AARCH64_MMAP_NOCACHE, ARM_MMAP_MASK)
    279       1.5  jmcneill #define ARM_MMAP_DEVICE			__SHIFTIN(AARCH64_MMAP_DEVICE, ARM_MMAP_MASK)
    280       1.2       ryo 
    281       1.2       ryo #define	PMAP_PTE			0x10000000 /* kenter_pa */
    282       1.2       ryo #define	PMAP_DEV			0x20000000 /* kenter_pa */
    283      1.28  jmcneill #define	PMAP_DEV_SO			0x40000000 /* kenter_pa */
    284      1.28  jmcneill #define	PMAP_DEV_MASK			(PMAP_DEV | PMAP_DEV_SO)
    285       1.2       ryo 
    286       1.2       ryo static inline u_int
    287       1.2       ryo aarch64_mmap_flags(paddr_t mdpgno)
    288       1.2       ryo {
    289       1.2       ryo 	u_int nflag, pflag;
    290       1.2       ryo 
    291       1.2       ryo 	/*
    292      1.45     skrll 	 * aarch64 arch has 5 memory attributes defined:
    293       1.2       ryo 	 *
    294       1.2       ryo 	 *  WriteBack      - write back cache
    295      1.31     skrll 	 *  WriteThru      - write through cache
    296       1.2       ryo 	 *  NoCache        - no cache
    297      1.27  jmcneill 	 *  Device(nGnRE)  - no Gathering, no Reordering, Early write ack
    298      1.28  jmcneill 	 *  Device(nGnRnE) - no Gathering, no Reordering, no Early write ack
    299       1.2       ryo 	 *
    300       1.2       ryo 	 * but pmap has PMAP_{NOCACHE,WRITE_COMBINE,WRITE_BACK} flags.
    301       1.2       ryo 	 */
    302       1.2       ryo 
    303       1.2       ryo 	nflag = (mdpgno >> AARCH64_MMAP_FLAG_SHIFT) & AARCH64_MMAP_FLAG_MASK;
    304       1.2       ryo 	switch (nflag) {
    305       1.2       ryo 	case AARCH64_MMAP_DEVICE:
    306       1.2       ryo 		pflag = PMAP_DEV;
    307       1.2       ryo 		break;
    308       1.2       ryo 	case AARCH64_MMAP_WRITECOMBINE:
    309       1.2       ryo 		pflag = PMAP_WRITE_COMBINE;
    310       1.2       ryo 		break;
    311       1.2       ryo 	case AARCH64_MMAP_WRITEBACK:
    312       1.2       ryo 		pflag = PMAP_WRITE_BACK;
    313       1.2       ryo 		break;
    314       1.2       ryo 	case AARCH64_MMAP_NOCACHE:
    315       1.2       ryo 	default:
    316       1.2       ryo 		pflag = PMAP_NOCACHE;
    317       1.2       ryo 		break;
    318       1.2       ryo 	}
    319       1.2       ryo 	return pflag;
    320       1.2       ryo }
    321       1.2       ryo 
    322       1.2       ryo #define pmap_phys_address(pa)		aarch64_ptob((pa))
    323       1.2       ryo #define pmap_mmap_flags(ppn)		aarch64_mmap_flags((ppn))
    324       1.2       ryo 
    325       1.2       ryo #define pmap_update(pmap)		((void)0)
    326       1.2       ryo #define pmap_copy(dp,sp,d,l,s)		((void)0)
    327       1.2       ryo #define pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
    328       1.2       ryo #define pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
    329       1.2       ryo 
    330      1.26      maya void	pmap_procwr(struct proc *, vaddr_t, int);
    331       1.2       ryo bool	pmap_extract_coherency(pmap_t, vaddr_t, paddr_t *, bool *);
    332      1.14       ryo void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    333       1.2       ryo 
    334      1.33       ryo void	pmap_pv_init(void);
    335      1.33       ryo void	pmap_pv_track(paddr_t, psize_t);
    336      1.33       ryo void	pmap_pv_untrack(paddr_t, psize_t);
    337      1.33       ryo void	pmap_pv_protect(paddr_t, vm_prot_t);
    338      1.33       ryo 
    339       1.2       ryo #define	PMAP_MAPSIZE1	L2_SIZE
    340       1.2       ryo 
    341      1.10  jakllsch #endif /* _KERNEL */
    342      1.10  jakllsch 
    343       1.1      matt #elif defined(__arm__)
    344       1.1      matt 
    345       1.1      matt #include <arm/pmap.h>
    346       1.1      matt 
    347       1.2       ryo #endif /* __arm__/__aarch64__ */
    348       1.1      matt 
    349       1.2       ryo #endif /* !_AARCH64_PMAP_ */
    350