Home | History | Annotate | Line # | Download | only in include
      1 /* $NetBSD: pte.h,v 1.14 2024/10/12 12:27:33 skrll Exp $ */
      2 
      3 /*
      4  * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
      9  * Nick Hudson.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 #ifndef _RISCV_PTE_H_
     34 #define	_RISCV_PTE_H_
     35 
     36 #ifdef _LP64	/* Sv39 */
     37 #define	PTE_PPN		__BITS(53, 10)
     38 #define	PTE_PPN0	__BITS(18, 10)
     39 #define	PTE_PPN1	__BITS(27, 19)
     40 #define	PTE_PPN2	__BITS(53, 28)
     41 typedef uint64_t pt_entry_t;
     42 typedef uint64_t pd_entry_t;
     43 #define	atomic_cas_pte	atomic_cas_64
     44 #else		/* Sv32 */
     45 #define	PTE_PPN		__BITS(31, 10)
     46 #define	PTE_PPN0	__BITS(19, 10)
     47 #define	PTE_PPN1	__BITS(31, 20)
     48 typedef uint32_t pt_entry_t;
     49 typedef uint32_t pd_entry_t;
     50 #define	atomic_cas_pte	atomic_cas_32
     51 #endif
     52 
     53 #define	PTE_PPN_SHIFT	10
     54 
     55 #define	NPTEPG		(NBPG / sizeof(pt_entry_t))
     56 #define	NSEGPG		NPTEPG
     57 #define	NPDEPG		NPTEPG
     58 
     59 
     60 /* HardWare PTE bits SV39 */
     61 #define PTE_N		__BIT(63)	// Svnapot
     62 #define PTE_PBMT	__BITS(62, 61)	// Svpbmt
     63 #define PTE_reserved0	__BITS(60, 54)	//
     64 
     65 /* Software PTE bits. */
     66 #define	PTE_RSW		__BITS(9, 8)
     67 #define	PTE_WIRED	__BIT(9)
     68 
     69 /* Hardware PTE bits. */
     70 // These are hardware defined bits
     71 #define	PTE_D		__BIT(7)	// Dirty
     72 #define	PTE_A		__BIT(6)	// Accessed
     73 #define	PTE_G		__BIT(5)	// Global
     74 #define	PTE_U		__BIT(4)	// User
     75 #define	PTE_X		__BIT(3)	// eXecute
     76 #define	PTE_W		__BIT(2)	// Write
     77 #define	PTE_R		__BIT(1)	// Read
     78 #define	PTE_V		__BIT(0)	// Valid
     79 
     80 #define	PTE_HARDWIRED	(PTE_A | PTE_D)
     81 #define	PTE_USER	(PTE_V | PTE_U)
     82 #define	PTE_KERN	(PTE_V | PTE_G)
     83 #define	PTE_RW		(PTE_R | PTE_W)
     84 #define	PTE_RX		(PTE_R | PTE_X)
     85 #define	PTE_RWX		(PTE_R | PTE_W | PTE_X)
     86 
     87 #define	PTE_ISLEAF_P(pte) (((pte) & PTE_RWX) != 0)
     88 
     89 #define	PA_TO_PTE(pa)	(((pa) >> PGSHIFT) << PTE_PPN_SHIFT)
     90 #define	PTE_TO_PA(pte)	(((pte) >> PTE_PPN_SHIFT) << PGSHIFT)
     91 
     92 #if defined(_KERNEL)
     93 
     94 static inline bool
     95 pte_valid_p(pt_entry_t pte)
     96 {
     97 	return (pte & PTE_V) != 0;
     98 }
     99 
    100 static inline bool
    101 pte_wired_p(pt_entry_t pte)
    102 {
    103 	return (pte & PTE_WIRED) != 0;
    104 }
    105 
    106 static inline bool
    107 pte_modified_p(pt_entry_t pte)
    108 {
    109 	return (pte & PTE_D) != 0;
    110 }
    111 
    112 static inline bool
    113 pte_cached_p(pt_entry_t pte)
    114 {
    115 	/* TODO: This seems wrong... */
    116 	return true;
    117 }
    118 
    119 static inline bool
    120 pte_deferred_exec_p(pt_entry_t pte)
    121 {
    122 	return false;
    123 }
    124 
    125 static inline pt_entry_t
    126 pte_wire_entry(pt_entry_t pte)
    127 {
    128 	return pte | PTE_HARDWIRED | PTE_WIRED;
    129 }
    130 
    131 static inline pt_entry_t
    132 pte_unwire_entry(pt_entry_t pte)
    133 {
    134 	return pte & ~(PTE_HARDWIRED | PTE_WIRED);
    135 }
    136 
    137 static inline paddr_t
    138 pte_to_paddr(pt_entry_t pte)
    139 {
    140 	return PTE_TO_PA(pte);
    141 }
    142 
    143 static inline pt_entry_t
    144 pte_nv_entry(bool kernel_p)
    145 {
    146 	return 0;
    147 }
    148 
    149 static inline pt_entry_t
    150 pte_prot_nowrite(pt_entry_t pte)
    151 {
    152 	return pte & ~PTE_W;
    153 }
    154 
    155 static inline pt_entry_t
    156 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
    157 {
    158 	if ((newprot & VM_PROT_READ) == 0)
    159 		pte &= ~PTE_R;
    160 	if ((newprot & VM_PROT_WRITE) == 0)
    161 		pte &= ~PTE_W;
    162 	if ((newprot & VM_PROT_EXECUTE) == 0)
    163 		pte &= ~PTE_X;
    164 	return pte;
    165 }
    166 
    167 static inline pt_entry_t
    168 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
    169 {
    170 	KASSERT(prot & VM_PROT_READ);
    171 	pt_entry_t pte = PTE_R;
    172 
    173 	if (prot & VM_PROT_EXECUTE) {
    174 		pte |= PTE_X;
    175 	}
    176 	if (prot & VM_PROT_WRITE) {
    177 		pte |= PTE_W;
    178 	}
    179 
    180 	return pte;
    181 }
    182 
    183 static inline pt_entry_t
    184 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
    185 {
    186 	return 0;
    187 }
    188 
    189 static inline pt_entry_t
    190 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
    191     int flags, bool kernel_p)
    192 {
    193 	pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
    194 
    195 	pte |= kernel_p ? PTE_KERN : PTE_USER;
    196 	pte |= pte_flag_bits(mdpg, flags, kernel_p);
    197 	pte |= pte_prot_bits(mdpg, prot, kernel_p);
    198 
    199 	if (mdpg != NULL) {
    200 
    201 		if ((prot & VM_PROT_WRITE) != 0 &&
    202 		    ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
    203 			/*
    204 			* This is a writable mapping, and the page's mod state
    205 			* indicates it has already been modified.  No need for
    206 			* modified emulation.
    207 			*/
    208 			pte |= PTE_A;
    209 		} else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
    210 			/*
    211 			* - The access type indicates that we don't need to do
    212 			*   referenced emulation.
    213 			* OR
    214 			* - The physical page has already been referenced so no need
    215 			*   to re-do referenced emulation here.
    216 			*/
    217 			pte |= PTE_A;
    218 		}
    219 	} else {
    220 		pte |= PTE_A | PTE_D;
    221 	}
    222 
    223 	return pte;
    224 }
    225 
    226 static inline pt_entry_t
    227 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
    228     int flags)
    229 {
    230 	pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
    231 
    232 	pte |= PTE_KERN | PTE_HARDWIRED | PTE_WIRED;
    233 	pte |= pte_flag_bits(NULL, flags, true);
    234 	pte |= pte_prot_bits(NULL, prot, true);
    235 
    236 	return pte;
    237 }
    238 
    239 static inline void
    240 pte_set(pt_entry_t *ptep, pt_entry_t pte)
    241 {
    242 	*ptep = pte;
    243 }
    244 
    245 static inline pd_entry_t
    246 pte_invalid_pde(void)
    247 {
    248 	return 0;
    249 }
    250 
    251 static inline pd_entry_t
    252 pte_pde_pdetab(paddr_t pa, bool kernel_p)
    253 {
    254 	return PTE_V | PA_TO_PTE(pa);
    255 }
    256 
    257 static inline pd_entry_t
    258 pte_pde_ptpage(paddr_t pa, bool kernel_p)
    259 {
    260 	return PTE_V | PA_TO_PTE(pa);
    261 }
    262 
    263 static inline bool
    264 pte_pde_valid_p(pd_entry_t pde)
    265 {
    266 	return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V;
    267 }
    268 
    269 static inline paddr_t
    270 pte_pde_to_paddr(pd_entry_t pde)
    271 {
    272 	return pte_to_paddr((pt_entry_t)pde);
    273 }
    274 
    275 static inline pd_entry_t
    276 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
    277 {
    278 #ifdef MULTIPROCESSOR
    279 #ifdef _LP64
    280 	return atomic_cas_64(pdep, opde, npde);
    281 #else
    282 	return atomic_cas_32(pdep, opde, npde);
    283 #endif
    284 #else
    285 	*pdep = npde;
    286 	return 0;
    287 #endif
    288 }
    289 
    290 static inline void
    291 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
    292 {
    293 
    294 	*pdep = npde;
    295 }
    296 
    297 static inline pt_entry_t
    298 pte_value(pt_entry_t pte)
    299 {
    300 	return pte;
    301 }
    302 
    303 #endif /* _KERNEL */
    304 
    305 #endif /* _RISCV_PTE_H_ */
    306