Home | History | Annotate | Line # | Download | only in include
      1  1.10   skrll /*	$NetBSD: pmap_machdep.h,v 1.10 2025/10/09 06:18:38 skrll Exp $	*/
      2   1.1   skrll 
      3   1.1   skrll /*-
      4   1.1   skrll  * Copyright (c) 2022 The NetBSD Foundation, Inc.
      5   1.1   skrll  * All rights reserved.
      6   1.1   skrll  *
      7   1.1   skrll  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1   skrll  * by Nick Hudson
      9   1.1   skrll  *
     10   1.1   skrll  * Redistribution and use in source and binary forms, with or without
     11   1.1   skrll  * modification, are permitted provided that the following conditions
     12   1.1   skrll  * are met:
     13   1.1   skrll  * 1. Redistributions of source code must retain the above copyright
     14   1.1   skrll  *    notice, this list of conditions and the following disclaimer.
     15   1.1   skrll  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1   skrll  *    notice, this list of conditions and the following disclaimer in the
     17   1.1   skrll  *    documentation and/or other materials provided with the distribution.
     18   1.1   skrll  *
     19   1.1   skrll  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1   skrll  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1   skrll  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1   skrll  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1   skrll  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1   skrll  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1   skrll  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1   skrll  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1   skrll  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1   skrll  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1   skrll  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1   skrll  */
     31   1.1   skrll 
     32   1.1   skrll #ifndef	_AARCH64_PMAP_MACHDEP_H_
     33   1.1   skrll #define	_AARCH64_PMAP_MACHDEP_H_
     34   1.1   skrll 
     35   1.1   skrll #include <arm/cpufunc.h>
     36   1.1   skrll 
     37   1.1   skrll #define	PMAP_HWPAGEWALKER		1
     38   1.1   skrll 
     39   1.1   skrll #define	PMAP_PDETABSIZE	(PAGE_SIZE / sizeof(pd_entry_t))
     40   1.1   skrll #define	PMAP_SEGTABSIZE	NSEGPG
     41   1.1   skrll 
     42   1.1   skrll #define	PMAP_INVALID_PDETAB_ADDRESS	((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
     43   1.1   skrll #define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
     44   1.1   skrll 
     45   1.1   skrll #define	NPTEPG		(PAGE_SIZE / sizeof(pt_entry_t))
     46   1.1   skrll #define	NPDEPG		(PAGE_SIZE / sizeof(pd_entry_t))
     47   1.1   skrll 
     48   1.1   skrll #define	PTPSHIFT	3
     49   1.1   skrll #define	PTPLENGTH	(PGSHIFT - PTPSHIFT)
     50   1.1   skrll #define	SEGSHIFT	(PGSHIFT + PTPLENGTH)	/* LOG2(NBSEG) */
     51   1.1   skrll 
     52   1.1   skrll #define	NBSEG		(1 << SEGSHIFT)		/* bytes/segment */
     53   1.1   skrll #define	SEGOFSET	(NBSEG - 1)		/* byte offset into segment */
     54   1.1   skrll 
     55   1.1   skrll #define	SEGLENGTH	(PGSHIFT - 3)
     56   1.1   skrll 
     57   1.1   skrll #define	XSEGSHIFT	(SEGSHIFT + SEGLENGTH + SEGLENGTH)
     58   1.1   skrll 						/* LOG2(NBXSEG) */
     59   1.1   skrll 
     60   1.1   skrll #define	NBXSEG		(1UL << XSEGSHIFT)	/* bytes/xsegment */
     61   1.1   skrll #define	XSEGOFSET	(NBXSEG - 1)		/* byte offset into xsegment */
     62   1.1   skrll #define	XSEGLENGTH	(PGSHIFT - 3)
     63   1.1   skrll #define	NXSEGPG		(1 << XSEGLENGTH)
     64   1.1   skrll #define	NSEGPG		(1 << SEGLENGTH)
     65   1.1   skrll 
     66   1.1   skrll 
     67   1.4   skrll #ifndef	__BSD_PTENTRY_T__
     68   1.1   skrll #define	__BSD_PTENTRY_T__
     69   1.4   skrll #define	PRIxPTE		PRIx64
     70   1.1   skrll #endif /* __BSD_PTENTRY_T__ */
     71   1.1   skrll 
     72   1.1   skrll #define	KERNEL_PID	0
     73   1.1   skrll 
     74   1.1   skrll #define	__HAVE_PMAP_PV_TRACK
     75   1.1   skrll #define	__HAVE_PMAP_MD
     76   1.1   skrll 
     77   1.1   skrll /* XXX temporary */
     78   1.1   skrll #define	__HAVE_UNLOCKED_PMAP
     79   1.1   skrll 
     80   1.1   skrll #define	PMAP_PAGE_INIT(pp)				\
     81   1.1   skrll do {							\
     82   1.1   skrll 	(pp)->pp_md.mdpg_first.pv_next = NULL;		\
     83   1.1   skrll 	(pp)->pp_md.mdpg_first.pv_pmap = NULL;		\
     84   1.1   skrll 	(pp)->pp_md.mdpg_first.pv_va = 0;		\
     85   1.1   skrll 	(pp)->pp_md.mdpg_attrs = 0;			\
     86   1.1   skrll 	VM_PAGEMD_PVLIST_LOCK_INIT(&(pp)->pp_md);	\
     87   1.1   skrll } while (/* CONSTCOND */ 0)
     88   1.1   skrll 
     89   1.1   skrll struct pmap_md {
     90   1.1   skrll 	paddr_t			pmd_l0_pa;
     91   1.1   skrll };
     92   1.1   skrll 
     93   1.1   skrll #define	pm_l0_pa	pm_md.pmd_l0_pa
     94   1.1   skrll 
     95   1.4   skrll void	pmap_md_pdetab_init(struct pmap *);
     96   1.4   skrll void	pmap_md_pdetab_fini(struct pmap *);
     97   1.1   skrll 
     98   1.4   skrll vaddr_t	pmap_md_map_poolpage(paddr_t, size_t);
     99   1.4   skrll paddr_t	pmap_md_unmap_poolpage(vaddr_t, size_t);
    100   1.4   skrll 
    101   1.4   skrll struct vm_page *
    102   1.4   skrll 	pmap_md_alloc_poolpage(int);
    103   1.1   skrll 
    104   1.1   skrll bool	pmap_md_direct_mapped_vaddr_p(vaddr_t);
    105   1.1   skrll paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
    106   1.1   skrll vaddr_t	pmap_md_direct_map_paddr(paddr_t);
    107   1.1   skrll bool	pmap_md_io_vaddr_p(vaddr_t);
    108   1.1   skrll 
    109   1.1   skrll void	pmap_md_activate_efirt(void);
    110   1.1   skrll void	pmap_md_deactivate_efirt(void);
    111   1.1   skrll 
    112   1.1   skrll void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    113   1.1   skrll 
    114   1.3   skrll vsize_t	pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int flags);
    115   1.3   skrll 
    116   1.1   skrll #include <uvm/pmap/vmpagemd.h>
    117   1.1   skrll #include <uvm/pmap/pmap.h>
    118   1.1   skrll #include <uvm/pmap/pmap_pvt.h>
    119   1.1   skrll #include <uvm/pmap/pmap_tlb.h>
    120   1.1   skrll #include <uvm/pmap/pmap_synci.h>
    121   1.1   skrll #include <uvm/pmap/tlb.h>
    122   1.1   skrll 
    123   1.1   skrll #include <uvm/uvm_page.h>
    124   1.1   skrll 
    125   1.1   skrll #define	POOL_VTOPHYS(va)	vtophys((vaddr_t)(va))
    126   1.1   skrll 
    127   1.1   skrll struct pmap_page {
    128   1.1   skrll 	struct vm_page_md	pp_md;
    129   1.1   skrll };
    130   1.1   skrll 
    131   1.1   skrll #define	PMAP_PAGE_TO_MD(ppage)	(&((ppage)->pp_md))
    132   1.1   skrll 
    133   1.1   skrll #define	PVLIST_EMPTY_P(pg)	VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(pg))
    134   1.1   skrll 
    135   1.1   skrll #define	LX_BLKPAG_OS_MODIFIED	LX_BLKPAG_OS_0
    136   1.1   skrll 
    137   1.1   skrll #define	PMAP_PTE_OS0	"modified"
    138   1.1   skrll #define	PMAP_PTE_OS1	"(unk)"
    139   1.1   skrll 
    140   1.1   skrll static inline paddr_t
    141   1.1   skrll pmap_l0pa(struct pmap *pm)
    142   1.1   skrll {
    143   1.4   skrll 	return pm->pm_l0_pa;
    144   1.1   skrll }
    145   1.1   skrll 
    146   1.1   skrll #if defined(__PMAP_PRIVATE)
    147   1.1   skrll 
    148   1.1   skrll #include <uvm/uvm_physseg.h>
    149   1.1   skrll struct vm_page_md;
    150   1.1   skrll 
    151   1.1   skrll void	pmap_md_icache_sync_all(void);
    152   1.1   skrll void	pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
    153   1.1   skrll void	pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
    154   1.1   skrll bool	pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
    155   1.1   skrll void	pmap_md_vca_clean(struct vm_page_md *, int);
    156   1.1   skrll void	pmap_md_vca_remove(struct vm_page_md *, vaddr_t, bool, bool);
    157   1.1   skrll bool	pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
    158   1.1   skrll 
    159   1.1   skrll void	pmap_md_xtab_activate(pmap_t, struct lwp *);
    160   1.1   skrll void	pmap_md_xtab_deactivate(pmap_t);
    161   1.1   skrll 
    162   1.4   skrll vaddr_t	pmap_md_direct_map_paddr(paddr_t);
    163   1.1   skrll 
    164   1.1   skrll 
    165   1.1   skrll #ifdef MULTIPROCESSOR
    166   1.1   skrll #define	PMAP_NO_PV_UNCACHED
    167   1.1   skrll #endif
    168   1.1   skrll 
    169   1.1   skrll static inline void
    170   1.1   skrll pmap_md_init(void)
    171   1.1   skrll {
    172   1.1   skrll 	// nothing
    173   1.1   skrll }
    174   1.1   skrll 
    175   1.1   skrll 
    176   1.1   skrll static inline bool
    177   1.1   skrll pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
    178   1.1   skrll {
    179   1.1   skrll 	// TLB not walked and so not called.
    180   1.1   skrll 	return false;
    181   1.1   skrll }
    182   1.1   skrll 
    183   1.1   skrll 
    184   1.1   skrll static inline bool
    185   1.1   skrll pmap_md_virtual_cache_aliasing_p(void)
    186   1.1   skrll {
    187   1.1   skrll 	return false;
    188   1.1   skrll }
    189   1.1   skrll 
    190   1.1   skrll 
    191   1.1   skrll static inline vsize_t
    192   1.1   skrll pmap_md_cache_prefer_mask(void)
    193   1.1   skrll {
    194   1.1   skrll 	return 0;
    195   1.1   skrll }
    196   1.1   skrll 
    197   1.1   skrll 
    198   1.1   skrll static inline pt_entry_t *
    199   1.1   skrll pmap_md_nptep(pt_entry_t *ptep)
    200   1.1   skrll {
    201   1.1   skrll 
    202   1.1   skrll 	return ptep + 1;
    203   1.1   skrll }
    204   1.1   skrll 
    205   1.1   skrll 
    206   1.1   skrll static __inline paddr_t
    207   1.1   skrll pte_to_paddr(pt_entry_t pte)
    208   1.1   skrll {
    209   1.1   skrll 
    210   1.1   skrll 	return l3pte_pa(pte);
    211   1.1   skrll }
    212   1.1   skrll 
    213   1.1   skrll 
    214   1.1   skrll static inline bool
    215   1.1   skrll pte_valid_p(pt_entry_t pte)
    216   1.1   skrll {
    217   1.1   skrll 
    218   1.1   skrll 	return l3pte_valid(pte);
    219   1.1   skrll }
    220   1.1   skrll 
    221   1.1   skrll 
    222   1.1   skrll static inline void
    223   1.1   skrll pmap_md_clean_page(struct vm_page_md *md, bool is_src)
    224   1.1   skrll {
    225   1.1   skrll }
    226   1.1   skrll 
    227   1.1   skrll 
    228   1.1   skrll static inline bool
    229   1.1   skrll pte_modified_p(pt_entry_t pte)
    230   1.1   skrll {
    231   1.1   skrll 
    232   1.1   skrll 	return (pte & LX_BLKPAG_OS_MODIFIED) != 0;
    233   1.1   skrll }
    234   1.1   skrll 
    235   1.1   skrll 
    236   1.1   skrll static inline bool
    237   1.1   skrll pte_wired_p(pt_entry_t pte)
    238   1.1   skrll {
    239   1.1   skrll 
    240   1.4   skrll 	return (pte & LX_BLKPAG_OS_WIRED) != 0;
    241   1.1   skrll }
    242   1.1   skrll 
    243   1.1   skrll 
    244   1.1   skrll static inline pt_entry_t
    245   1.1   skrll pte_wire_entry(pt_entry_t pte)
    246   1.1   skrll {
    247   1.1   skrll 
    248   1.4   skrll 	return pte | LX_BLKPAG_OS_WIRED;
    249   1.1   skrll }
    250   1.1   skrll 
    251   1.1   skrll 
    252   1.1   skrll static inline pt_entry_t
    253   1.1   skrll pte_unwire_entry(pt_entry_t pte)
    254   1.1   skrll {
    255   1.1   skrll 
    256   1.4   skrll 	return pte & ~LX_BLKPAG_OS_WIRED;
    257   1.1   skrll }
    258   1.1   skrll 
    259   1.1   skrll 
    260   1.1   skrll static inline uint64_t
    261   1.1   skrll pte_value(pt_entry_t pte)
    262   1.1   skrll {
    263   1.1   skrll 
    264   1.1   skrll 	return pte;
    265   1.1   skrll }
    266   1.1   skrll 
    267   1.1   skrll static inline bool
    268   1.1   skrll pte_cached_p(pt_entry_t pte)
    269   1.1   skrll {
    270   1.1   skrll 
    271   1.1   skrll 	return ((pte & LX_BLKPAG_ATTR_MASK) == LX_BLKPAG_ATTR_NORMAL_WB);
    272   1.1   skrll }
    273   1.1   skrll 
    274   1.1   skrll static inline bool
    275   1.1   skrll pte_deferred_exec_p(pt_entry_t pte)
    276   1.1   skrll {
    277   1.1   skrll 
    278   1.1   skrll 	return false;
    279   1.1   skrll }
    280   1.1   skrll 
    281   1.1   skrll static inline pt_entry_t
    282   1.1   skrll pte_nv_entry(bool kernel_p)
    283   1.1   skrll {
    284   1.1   skrll 
    285   1.1   skrll 	/* Not valid entry */
    286   1.1   skrll 	return kernel_p ? 0 : 0;
    287   1.1   skrll }
    288   1.1   skrll 
    289   1.1   skrll static inline pt_entry_t
    290   1.1   skrll pte_prot_downgrade(pt_entry_t pte, vm_prot_t prot)
    291   1.1   skrll {
    292   1.1   skrll 
    293   1.1   skrll 	return (pte & ~LX_BLKPAG_AP)
    294   1.1   skrll 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    295   1.1   skrll }
    296   1.1   skrll 
    297   1.1   skrll static inline pt_entry_t
    298   1.1   skrll pte_prot_nowrite(pt_entry_t pte)
    299   1.1   skrll {
    300   1.1   skrll 
    301   1.1   skrll 	return pte & ~LX_BLKPAG_AF;
    302   1.1   skrll }
    303   1.1   skrll 
    304   1.1   skrll static inline pt_entry_t
    305   1.1   skrll pte_cached_change(pt_entry_t pte, bool cached)
    306   1.1   skrll {
    307   1.1   skrll 	pte &= ~LX_BLKPAG_ATTR_MASK;
    308   1.1   skrll 	pte |= (cached ? LX_BLKPAG_ATTR_NORMAL_WB : LX_BLKPAG_ATTR_NORMAL_NC);
    309   1.1   skrll 
    310   1.1   skrll 	return pte;
    311   1.1   skrll }
    312   1.1   skrll 
    313   1.1   skrll static inline void
    314   1.1   skrll pte_set(pt_entry_t *ptep, pt_entry_t pte)
    315   1.1   skrll {
    316   1.1   skrll 
    317   1.1   skrll 	*ptep = pte;
    318   1.1   skrll 	dsb(ishst);
    319   1.1   skrll 	/*
    320   1.5   skrll 	 * if this mapping is going to be used by userland then the eret *can*
    321   1.5   skrll 	 * act as the isb, but might not (apple m1).
    322   1.1   skrll 	 *
    323   1.5   skrll 	 * if this mapping is kernel then the isb is always needed (for some
    324   1.5   skrll 	 * micro-architectures)
    325   1.1   skrll 	 */
    326   1.1   skrll 
    327   1.1   skrll 	isb();
    328   1.1   skrll }
    329   1.1   skrll 
    330   1.1   skrll static inline pd_entry_t
    331   1.1   skrll pte_invalid_pde(void)
    332   1.1   skrll {
    333   1.1   skrll 
    334   1.1   skrll 	return 0;
    335   1.1   skrll }
    336   1.1   skrll 
    337   1.1   skrll 
    338   1.1   skrll static inline pd_entry_t
    339   1.1   skrll pte_pde_pdetab(paddr_t pa, bool kernel_p)
    340   1.1   skrll {
    341   1.1   skrll 
    342   1.1   skrll 	return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
    343   1.1   skrll }
    344   1.1   skrll 
    345   1.1   skrll 
    346   1.1   skrll static inline pd_entry_t
    347   1.1   skrll pte_pde_ptpage(paddr_t pa, bool kernel_p)
    348   1.1   skrll {
    349   1.1   skrll 
    350   1.1   skrll 	return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
    351   1.1   skrll }
    352   1.1   skrll 
    353   1.1   skrll 
    354   1.1   skrll static inline bool
    355   1.1   skrll pte_pde_valid_p(pd_entry_t pde)
    356   1.1   skrll {
    357   1.1   skrll 
    358   1.1   skrll 	return lxpde_valid(pde);
    359   1.1   skrll }
    360   1.1   skrll 
    361   1.1   skrll 
    362   1.1   skrll static inline paddr_t
    363   1.1   skrll pte_pde_to_paddr(pd_entry_t pde)
    364   1.1   skrll {
    365   1.1   skrll 
    366   1.1   skrll 	return lxpde_pa(pde);
    367   1.1   skrll }
    368   1.1   skrll 
    369   1.1   skrll 
    370   1.1   skrll static inline pd_entry_t
    371   1.1   skrll pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
    372   1.1   skrll {
    373   1.1   skrll #ifdef MULTIPROCESSOR
    374   1.1   skrll 	opde = atomic_cas_64(pdep, opde, npde);
    375   1.1   skrll #else
    376   1.1   skrll 	*pdep = npde;
    377   1.1   skrll #endif
    378   1.1   skrll 	return opde;
    379   1.1   skrll }
    380   1.1   skrll 
    381   1.1   skrll 
    382   1.1   skrll static inline void
    383   1.1   skrll pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
    384   1.1   skrll {
    385   1.1   skrll 
    386   1.1   skrll 	*pdep = npde;
    387   1.1   skrll }
    388   1.1   skrll 
    389   1.1   skrll 
    390   1.1   skrll static inline pt_entry_t
    391   1.1   skrll pte_memattr(u_int flags)
    392   1.1   skrll {
    393   1.1   skrll 
    394   1.1   skrll 	switch (flags & (PMAP_DEV_MASK | PMAP_CACHE_MASK)) {
    395   1.1   skrll 	case PMAP_DEV_NP ... PMAP_DEV_NP | PMAP_CACHE_MASK:
    396   1.1   skrll 		/* Device-nGnRnE */
    397   1.1   skrll 		return LX_BLKPAG_ATTR_DEVICE_MEM_NP;
    398   1.1   skrll 	case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
    399   1.1   skrll 		/* Device-nGnRE */
    400   1.1   skrll 		return LX_BLKPAG_ATTR_DEVICE_MEM;
    401   1.1   skrll 	case PMAP_NOCACHE:
    402   1.1   skrll 	case PMAP_NOCACHE_OVR:
    403   1.1   skrll 	case PMAP_WRITE_COMBINE:
    404   1.1   skrll 		/* only no-cache */
    405   1.1   skrll 		return LX_BLKPAG_ATTR_NORMAL_NC;
    406   1.1   skrll 	case PMAP_WRITE_BACK:
    407   1.1   skrll 	case 0:
    408   1.1   skrll 	default:
    409   1.1   skrll 		return LX_BLKPAG_ATTR_NORMAL_WB;
    410   1.1   skrll 	}
    411   1.1   skrll }
    412   1.1   skrll 
    413   1.1   skrll 
    414   1.1   skrll static inline pt_entry_t
    415   1.1   skrll pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
    416   1.1   skrll     u_int flags)
    417   1.1   skrll {
    418   1.1   skrll 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    419   1.1   skrll 
    420   1.1   skrll 	pt_entry_t pte = pa
    421   1.1   skrll 	    | LX_VALID
    422   1.1   skrll #ifdef MULTIPROCESSOR
    423   1.1   skrll 	    | LX_BLKPAG_SH_IS
    424   1.1   skrll #endif
    425   1.1   skrll 	    | L3_TYPE_PAG
    426   1.1   skrll 	    | LX_BLKPAG_AF
    427   1.1   skrll 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    428   1.1   skrll 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW)
    429   1.1   skrll 	    | LX_BLKPAG_OS_WIRED;
    430   1.1   skrll 
    431   1.1   skrll 	if (prot & VM_PROT_EXECUTE)
    432   1.1   skrll 		pte &= ~LX_BLKPAG_PXN;
    433   1.1   skrll 
    434   1.1   skrll 	pte &= ~LX_BLKPAG_ATTR_MASK;
    435   1.1   skrll 	pte |= pte_memattr(flags);
    436   1.1   skrll 
    437   1.1   skrll 	return pte;
    438   1.1   skrll }
    439   1.1   skrll 
    440   1.8   skrll 
    441   1.9  martin #if defined(EFI_RUNTIME)
    442   1.1   skrll static inline pt_entry_t
    443   1.1   skrll pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
    444   1.1   skrll {
    445   1.1   skrll 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    446   1.1   skrll 
    447   1.1   skrll 	pt_entry_t npte = pa
    448   1.1   skrll 	    | LX_VALID
    449   1.1   skrll #ifdef MULTIPROCESSOR
    450   1.1   skrll 	    | LX_BLKPAG_SH_IS
    451   1.1   skrll #endif
    452   1.1   skrll 	    | L3_TYPE_PAG
    453   1.1   skrll 	    | LX_BLKPAG_AF
    454   1.1   skrll 	    | LX_BLKPAG_NG /* | LX_BLKPAG_APUSER */
    455   1.1   skrll 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    456   1.1   skrll 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    457   1.1   skrll 
    458   1.1   skrll 	if (prot & VM_PROT_EXECUTE)
    459   1.1   skrll 		npte &= ~LX_BLKPAG_PXN;
    460   1.1   skrll 
    461   1.1   skrll 	npte &= ~LX_BLKPAG_ATTR_MASK;
    462   1.1   skrll 	npte |= pte_memattr(flags);
    463   1.1   skrll 
    464   1.1   skrll 	return npte;
    465   1.1   skrll }
    466   1.9  martin #endif
    467   1.1   skrll 
    468   1.8   skrll 
    469   1.1   skrll static inline pt_entry_t
    470   1.1   skrll pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot,
    471   1.1   skrll     u_int flags, bool is_kernel_pmap_p)
    472   1.1   skrll {
    473   1.1   skrll 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    474   1.1   skrll 
    475   1.1   skrll 	pt_entry_t npte = pa
    476   1.1   skrll 	    | LX_VALID
    477   1.1   skrll #ifdef MULTIPROCESSOR
    478   1.1   skrll 	    | LX_BLKPAG_SH_IS
    479   1.1   skrll #endif
    480   1.1   skrll 	    | L3_TYPE_PAG
    481   1.1   skrll 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    482   1.1   skrll 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    483   1.1   skrll 
    484   1.1   skrll 	if ((prot & VM_PROT_WRITE) != 0 &&
    485   1.1   skrll 	    ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
    486   1.1   skrll 		/*
    487   1.1   skrll 		 * This is a writable mapping, and the page's mod state
    488   1.1   skrll 		 * indicates it has already been modified.  No need for
    489   1.1   skrll 		 * modified emulation.
    490   1.1   skrll 		 */
    491   1.1   skrll 		npte |= LX_BLKPAG_AF;
    492   1.1   skrll 	} else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
    493   1.1   skrll 		/*
    494   1.1   skrll 		 * - The access type indicates that we don't need to do
    495   1.1   skrll 		 *   referenced emulation.
    496   1.1   skrll 		 * OR
    497   1.1   skrll 		 * - The physical page has already been referenced so no need
    498   1.1   skrll 		 *   to re-do referenced emulation here.
    499   1.1   skrll 		 */
    500   1.1   skrll 		npte |= LX_BLKPAG_AF;
    501   1.1   skrll 	}
    502   1.1   skrll 
    503   1.1   skrll 	if (prot & VM_PROT_EXECUTE)
    504   1.1   skrll 		npte &= (is_kernel_pmap_p ? ~LX_BLKPAG_PXN : ~LX_BLKPAG_UXN);
    505   1.1   skrll 
    506   1.1   skrll 	npte &= ~LX_BLKPAG_ATTR_MASK;
    507   1.1   skrll 	npte |= pte_memattr(flags);
    508   1.1   skrll 
    509   1.1   skrll 	/*
    510   1.1   skrll 	 * Make sure userland mappings get the right permissions
    511   1.1   skrll 	 */
    512   1.1   skrll 	if (!is_kernel_pmap_p) {
    513   1.1   skrll 		npte |= LX_BLKPAG_NG | LX_BLKPAG_APUSER;
    514   1.1   skrll 	}
    515   1.1   skrll 
    516   1.1   skrll 	return npte;
    517   1.1   skrll }
    518   1.1   skrll #endif /* __PMAP_PRIVATE */
    519   1.1   skrll 
    520   1.1   skrll #endif	/* _AARCH64_PMAP_MACHDEP_H_ */
    521   1.1   skrll 
    522