Home | History | Annotate | Line # | Download | only in include
      1 /*	$NetBSD: pmap_machdep.h,v 1.10 2025/10/09 06:18:38 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2022 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Nick Hudson
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #ifndef	_AARCH64_PMAP_MACHDEP_H_
     33 #define	_AARCH64_PMAP_MACHDEP_H_
     34 
     35 #include <arm/cpufunc.h>
     36 
     37 #define	PMAP_HWPAGEWALKER		1
     38 
     39 #define	PMAP_PDETABSIZE	(PAGE_SIZE / sizeof(pd_entry_t))
     40 #define	PMAP_SEGTABSIZE	NSEGPG
     41 
     42 #define	PMAP_INVALID_PDETAB_ADDRESS	((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
     43 #define	PMAP_INVALID_SEGTAB_ADDRESS	((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
     44 
     45 #define	NPTEPG		(PAGE_SIZE / sizeof(pt_entry_t))
     46 #define	NPDEPG		(PAGE_SIZE / sizeof(pd_entry_t))
     47 
     48 #define	PTPSHIFT	3
     49 #define	PTPLENGTH	(PGSHIFT - PTPSHIFT)
     50 #define	SEGSHIFT	(PGSHIFT + PTPLENGTH)	/* LOG2(NBSEG) */
     51 
     52 #define	NBSEG		(1 << SEGSHIFT)		/* bytes/segment */
     53 #define	SEGOFSET	(NBSEG - 1)		/* byte offset into segment */
     54 
     55 #define	SEGLENGTH	(PGSHIFT - 3)
     56 
     57 #define	XSEGSHIFT	(SEGSHIFT + SEGLENGTH + SEGLENGTH)
     58 						/* LOG2(NBXSEG) */
     59 
     60 #define	NBXSEG		(1UL << XSEGSHIFT)	/* bytes/xsegment */
     61 #define	XSEGOFSET	(NBXSEG - 1)		/* byte offset into xsegment */
     62 #define	XSEGLENGTH	(PGSHIFT - 3)
     63 #define	NXSEGPG		(1 << XSEGLENGTH)
     64 #define	NSEGPG		(1 << SEGLENGTH)
     65 
     66 
     67 #ifndef	__BSD_PTENTRY_T__
     68 #define	__BSD_PTENTRY_T__
     69 #define	PRIxPTE		PRIx64
     70 #endif /* __BSD_PTENTRY_T__ */
     71 
     72 #define	KERNEL_PID	0
     73 
     74 #define	__HAVE_PMAP_PV_TRACK
     75 #define	__HAVE_PMAP_MD
     76 
     77 /* XXX temporary */
     78 #define	__HAVE_UNLOCKED_PMAP
     79 
     80 #define	PMAP_PAGE_INIT(pp)				\
     81 do {							\
     82 	(pp)->pp_md.mdpg_first.pv_next = NULL;		\
     83 	(pp)->pp_md.mdpg_first.pv_pmap = NULL;		\
     84 	(pp)->pp_md.mdpg_first.pv_va = 0;		\
     85 	(pp)->pp_md.mdpg_attrs = 0;			\
     86 	VM_PAGEMD_PVLIST_LOCK_INIT(&(pp)->pp_md);	\
     87 } while (/* CONSTCOND */ 0)
     88 
     89 struct pmap_md {
     90 	paddr_t			pmd_l0_pa;
     91 };
     92 
     93 #define	pm_l0_pa	pm_md.pmd_l0_pa
     94 
     95 void	pmap_md_pdetab_init(struct pmap *);
     96 void	pmap_md_pdetab_fini(struct pmap *);
     97 
     98 vaddr_t	pmap_md_map_poolpage(paddr_t, size_t);
     99 paddr_t	pmap_md_unmap_poolpage(vaddr_t, size_t);
    100 
    101 struct vm_page *
    102 	pmap_md_alloc_poolpage(int);
    103 
    104 bool	pmap_md_direct_mapped_vaddr_p(vaddr_t);
    105 paddr_t	pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
    106 vaddr_t	pmap_md_direct_map_paddr(paddr_t);
    107 bool	pmap_md_io_vaddr_p(vaddr_t);
    108 
    109 void	pmap_md_activate_efirt(void);
    110 void	pmap_md_deactivate_efirt(void);
    111 
    112 void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
    113 
    114 vsize_t	pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int flags);
    115 
    116 #include <uvm/pmap/vmpagemd.h>
    117 #include <uvm/pmap/pmap.h>
    118 #include <uvm/pmap/pmap_pvt.h>
    119 #include <uvm/pmap/pmap_tlb.h>
    120 #include <uvm/pmap/pmap_synci.h>
    121 #include <uvm/pmap/tlb.h>
    122 
    123 #include <uvm/uvm_page.h>
    124 
    125 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t)(va))
    126 
    127 struct pmap_page {
    128 	struct vm_page_md	pp_md;
    129 };
    130 
    131 #define	PMAP_PAGE_TO_MD(ppage)	(&((ppage)->pp_md))
    132 
    133 #define	PVLIST_EMPTY_P(pg)	VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(pg))
    134 
    135 #define	LX_BLKPAG_OS_MODIFIED	LX_BLKPAG_OS_0
    136 
    137 #define	PMAP_PTE_OS0	"modified"
    138 #define	PMAP_PTE_OS1	"(unk)"
    139 
    140 static inline paddr_t
    141 pmap_l0pa(struct pmap *pm)
    142 {
    143 	return pm->pm_l0_pa;
    144 }
    145 
    146 #if defined(__PMAP_PRIVATE)
    147 
    148 #include <uvm/uvm_physseg.h>
    149 struct vm_page_md;
    150 
    151 void	pmap_md_icache_sync_all(void);
    152 void	pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
    153 void	pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
    154 bool	pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
    155 void	pmap_md_vca_clean(struct vm_page_md *, int);
    156 void	pmap_md_vca_remove(struct vm_page_md *, vaddr_t, bool, bool);
    157 bool	pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
    158 
    159 void	pmap_md_xtab_activate(pmap_t, struct lwp *);
    160 void	pmap_md_xtab_deactivate(pmap_t);
    161 
    162 vaddr_t	pmap_md_direct_map_paddr(paddr_t);
    163 
    164 
    165 #ifdef MULTIPROCESSOR
    166 #define	PMAP_NO_PV_UNCACHED
    167 #endif
    168 
    169 static inline void
    170 pmap_md_init(void)
    171 {
    172 	// nothing
    173 }
    174 
    175 
    176 static inline bool
    177 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
    178 {
    179 	// TLB not walked and so not called.
    180 	return false;
    181 }
    182 
    183 
    184 static inline bool
    185 pmap_md_virtual_cache_aliasing_p(void)
    186 {
    187 	return false;
    188 }
    189 
    190 
    191 static inline vsize_t
    192 pmap_md_cache_prefer_mask(void)
    193 {
    194 	return 0;
    195 }
    196 
    197 
    198 static inline pt_entry_t *
    199 pmap_md_nptep(pt_entry_t *ptep)
    200 {
    201 
    202 	return ptep + 1;
    203 }
    204 
    205 
    206 static __inline paddr_t
    207 pte_to_paddr(pt_entry_t pte)
    208 {
    209 
    210 	return l3pte_pa(pte);
    211 }
    212 
    213 
    214 static inline bool
    215 pte_valid_p(pt_entry_t pte)
    216 {
    217 
    218 	return l3pte_valid(pte);
    219 }
    220 
    221 
    222 static inline void
    223 pmap_md_clean_page(struct vm_page_md *md, bool is_src)
    224 {
    225 }
    226 
    227 
    228 static inline bool
    229 pte_modified_p(pt_entry_t pte)
    230 {
    231 
    232 	return (pte & LX_BLKPAG_OS_MODIFIED) != 0;
    233 }
    234 
    235 
    236 static inline bool
    237 pte_wired_p(pt_entry_t pte)
    238 {
    239 
    240 	return (pte & LX_BLKPAG_OS_WIRED) != 0;
    241 }
    242 
    243 
    244 static inline pt_entry_t
    245 pte_wire_entry(pt_entry_t pte)
    246 {
    247 
    248 	return pte | LX_BLKPAG_OS_WIRED;
    249 }
    250 
    251 
    252 static inline pt_entry_t
    253 pte_unwire_entry(pt_entry_t pte)
    254 {
    255 
    256 	return pte & ~LX_BLKPAG_OS_WIRED;
    257 }
    258 
    259 
    260 static inline uint64_t
    261 pte_value(pt_entry_t pte)
    262 {
    263 
    264 	return pte;
    265 }
    266 
    267 static inline bool
    268 pte_cached_p(pt_entry_t pte)
    269 {
    270 
    271 	return ((pte & LX_BLKPAG_ATTR_MASK) == LX_BLKPAG_ATTR_NORMAL_WB);
    272 }
    273 
    274 static inline bool
    275 pte_deferred_exec_p(pt_entry_t pte)
    276 {
    277 
    278 	return false;
    279 }
    280 
    281 static inline pt_entry_t
    282 pte_nv_entry(bool kernel_p)
    283 {
    284 
    285 	/* Not valid entry */
    286 	return kernel_p ? 0 : 0;
    287 }
    288 
    289 static inline pt_entry_t
    290 pte_prot_downgrade(pt_entry_t pte, vm_prot_t prot)
    291 {
    292 
    293 	return (pte & ~LX_BLKPAG_AP)
    294 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    295 }
    296 
    297 static inline pt_entry_t
    298 pte_prot_nowrite(pt_entry_t pte)
    299 {
    300 
    301 	return pte & ~LX_BLKPAG_AF;
    302 }
    303 
    304 static inline pt_entry_t
    305 pte_cached_change(pt_entry_t pte, bool cached)
    306 {
    307 	pte &= ~LX_BLKPAG_ATTR_MASK;
    308 	pte |= (cached ? LX_BLKPAG_ATTR_NORMAL_WB : LX_BLKPAG_ATTR_NORMAL_NC);
    309 
    310 	return pte;
    311 }
    312 
    313 static inline void
    314 pte_set(pt_entry_t *ptep, pt_entry_t pte)
    315 {
    316 
    317 	*ptep = pte;
    318 	dsb(ishst);
    319 	/*
    320 	 * if this mapping is going to be used by userland then the eret *can*
    321 	 * act as the isb, but might not (apple m1).
    322 	 *
    323 	 * if this mapping is kernel then the isb is always needed (for some
    324 	 * micro-architectures)
    325 	 */
    326 
    327 	isb();
    328 }
    329 
    330 static inline pd_entry_t
    331 pte_invalid_pde(void)
    332 {
    333 
    334 	return 0;
    335 }
    336 
    337 
    338 static inline pd_entry_t
    339 pte_pde_pdetab(paddr_t pa, bool kernel_p)
    340 {
    341 
    342 	return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
    343 }
    344 
    345 
    346 static inline pd_entry_t
    347 pte_pde_ptpage(paddr_t pa, bool kernel_p)
    348 {
    349 
    350 	return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
    351 }
    352 
    353 
    354 static inline bool
    355 pte_pde_valid_p(pd_entry_t pde)
    356 {
    357 
    358 	return lxpde_valid(pde);
    359 }
    360 
    361 
    362 static inline paddr_t
    363 pte_pde_to_paddr(pd_entry_t pde)
    364 {
    365 
    366 	return lxpde_pa(pde);
    367 }
    368 
    369 
    370 static inline pd_entry_t
    371 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
    372 {
    373 #ifdef MULTIPROCESSOR
    374 	opde = atomic_cas_64(pdep, opde, npde);
    375 #else
    376 	*pdep = npde;
    377 #endif
    378 	return opde;
    379 }
    380 
    381 
    382 static inline void
    383 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
    384 {
    385 
    386 	*pdep = npde;
    387 }
    388 
    389 
    390 static inline pt_entry_t
    391 pte_memattr(u_int flags)
    392 {
    393 
    394 	switch (flags & (PMAP_DEV_MASK | PMAP_CACHE_MASK)) {
    395 	case PMAP_DEV_NP ... PMAP_DEV_NP | PMAP_CACHE_MASK:
    396 		/* Device-nGnRnE */
    397 		return LX_BLKPAG_ATTR_DEVICE_MEM_NP;
    398 	case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
    399 		/* Device-nGnRE */
    400 		return LX_BLKPAG_ATTR_DEVICE_MEM;
    401 	case PMAP_NOCACHE:
    402 	case PMAP_NOCACHE_OVR:
    403 	case PMAP_WRITE_COMBINE:
    404 		/* only no-cache */
    405 		return LX_BLKPAG_ATTR_NORMAL_NC;
    406 	case PMAP_WRITE_BACK:
    407 	case 0:
    408 	default:
    409 		return LX_BLKPAG_ATTR_NORMAL_WB;
    410 	}
    411 }
    412 
    413 
    414 static inline pt_entry_t
    415 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
    416     u_int flags)
    417 {
    418 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    419 
    420 	pt_entry_t pte = pa
    421 	    | LX_VALID
    422 #ifdef MULTIPROCESSOR
    423 	    | LX_BLKPAG_SH_IS
    424 #endif
    425 	    | L3_TYPE_PAG
    426 	    | LX_BLKPAG_AF
    427 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    428 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW)
    429 	    | LX_BLKPAG_OS_WIRED;
    430 
    431 	if (prot & VM_PROT_EXECUTE)
    432 		pte &= ~LX_BLKPAG_PXN;
    433 
    434 	pte &= ~LX_BLKPAG_ATTR_MASK;
    435 	pte |= pte_memattr(flags);
    436 
    437 	return pte;
    438 }
    439 
    440 
    441 #if defined(EFI_RUNTIME)
    442 static inline pt_entry_t
    443 pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
    444 {
    445 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    446 
    447 	pt_entry_t npte = pa
    448 	    | LX_VALID
    449 #ifdef MULTIPROCESSOR
    450 	    | LX_BLKPAG_SH_IS
    451 #endif
    452 	    | L3_TYPE_PAG
    453 	    | LX_BLKPAG_AF
    454 	    | LX_BLKPAG_NG /* | LX_BLKPAG_APUSER */
    455 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    456 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    457 
    458 	if (prot & VM_PROT_EXECUTE)
    459 		npte &= ~LX_BLKPAG_PXN;
    460 
    461 	npte &= ~LX_BLKPAG_ATTR_MASK;
    462 	npte |= pte_memattr(flags);
    463 
    464 	return npte;
    465 }
    466 #endif
    467 
    468 
    469 static inline pt_entry_t
    470 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot,
    471     u_int flags, bool is_kernel_pmap_p)
    472 {
    473 	KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
    474 
    475 	pt_entry_t npte = pa
    476 	    | LX_VALID
    477 #ifdef MULTIPROCESSOR
    478 	    | LX_BLKPAG_SH_IS
    479 #endif
    480 	    | L3_TYPE_PAG
    481 	    | LX_BLKPAG_UXN | LX_BLKPAG_PXN
    482 	    | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
    483 
    484 	if ((prot & VM_PROT_WRITE) != 0 &&
    485 	    ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
    486 		/*
    487 		 * This is a writable mapping, and the page's mod state
    488 		 * indicates it has already been modified.  No need for
    489 		 * modified emulation.
    490 		 */
    491 		npte |= LX_BLKPAG_AF;
    492 	} else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
    493 		/*
    494 		 * - The access type indicates that we don't need to do
    495 		 *   referenced emulation.
    496 		 * OR
    497 		 * - The physical page has already been referenced so no need
    498 		 *   to re-do referenced emulation here.
    499 		 */
    500 		npte |= LX_BLKPAG_AF;
    501 	}
    502 
    503 	if (prot & VM_PROT_EXECUTE)
    504 		npte &= (is_kernel_pmap_p ? ~LX_BLKPAG_PXN : ~LX_BLKPAG_UXN);
    505 
    506 	npte &= ~LX_BLKPAG_ATTR_MASK;
    507 	npte |= pte_memattr(flags);
    508 
    509 	/*
    510 	 * Make sure userland mappings get the right permissions
    511 	 */
    512 	if (!is_kernel_pmap_p) {
    513 		npte |= LX_BLKPAG_NG | LX_BLKPAG_APUSER;
    514 	}
    515 
    516 	return npte;
    517 }
    518 #endif /* __PMAP_PRIVATE */
    519 
    520 #endif	/* _AARCH64_PMAP_MACHDEP_H_ */
    521 
    522