1 1.14 skrll /* $NetBSD: pte.h,v 1.14 2024/10/12 12:27:33 skrll Exp $ */ 2 1.3 maxv 3 1.3 maxv /* 4 1.6 skrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc. 5 1.1 matt * All rights reserved. 6 1.1 matt * 7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation 8 1.6 skrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and 9 1.6 skrll * Nick Hudson. 10 1.1 matt * 11 1.1 matt * Redistribution and use in source and binary forms, with or without 12 1.1 matt * modification, are permitted provided that the following conditions 13 1.1 matt * are met: 14 1.1 matt * 1. Redistributions of source code must retain the above copyright 15 1.1 matt * notice, this list of conditions and the following disclaimer. 16 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright 17 1.1 matt * notice, this list of conditions and the following disclaimer in the 18 1.1 matt * documentation and/or other materials provided with the distribution. 19 1.1 matt * 20 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 1.1 matt * POSSIBILITY OF SUCH DAMAGE. 31 1.1 matt */ 32 1.1 matt 33 1.1 matt #ifndef _RISCV_PTE_H_ 34 1.9 simonb #define _RISCV_PTE_H_ 35 1.1 matt 36 1.3 maxv #ifdef _LP64 /* Sv39 */ 37 1.9 simonb #define PTE_PPN __BITS(53, 10) 38 1.3 maxv #define PTE_PPN0 __BITS(18, 10) 39 1.3 maxv #define PTE_PPN1 __BITS(27, 19) 40 1.3 maxv #define PTE_PPN2 __BITS(53, 28) 41 1.7 skrll typedef uint64_t pt_entry_t; 42 1.7 skrll typedef uint64_t pd_entry_t; 43 1.9 simonb #define atomic_cas_pte atomic_cas_64 44 1.3 maxv #else /* Sv32 */ 45 1.9 simonb #define PTE_PPN __BITS(31, 10) 46 1.3 maxv #define PTE_PPN0 __BITS(19, 10) 47 1.3 maxv #define PTE_PPN1 __BITS(31, 20) 48 1.7 skrll typedef uint32_t pt_entry_t; 49 1.7 skrll typedef uint32_t pd_entry_t; 50 1.9 simonb #define atomic_cas_pte atomic_cas_32 51 1.1 matt #endif 52 1.1 matt 53 1.9 simonb #define PTE_PPN_SHIFT 10 54 1.3 maxv 55 1.14 skrll #define NPTEPG (NBPG / sizeof(pt_entry_t)) 56 1.9 simonb #define NSEGPG NPTEPG 57 1.9 simonb #define NPDEPG NPTEPG 58 1.3 maxv 59 1.11 skrll 60 1.11 skrll /* HardWare PTE bits SV39 */ 61 1.11 skrll #define PTE_N __BIT(63) // Svnapot 62 1.11 skrll #define PTE_PBMT __BITS(62, 61) // Svpbmt 63 1.11 skrll #define PTE_reserved0 __BITS(60, 54) // 64 1.11 skrll 65 1.3 maxv /* Software PTE bits. */ 66 1.11 skrll #define PTE_RSW __BITS(9, 8) 67 1.6 skrll #define PTE_WIRED __BIT(9) 68 1.3 maxv 69 1.3 maxv /* Hardware PTE bits. */ 70 1.5 skrll // These are hardware defined bits 71 1.5 skrll #define PTE_D __BIT(7) // Dirty 72 1.5 skrll #define PTE_A __BIT(6) // Accessed 73 1.5 skrll #define PTE_G __BIT(5) // Global 74 1.5 skrll #define PTE_U __BIT(4) // User 75 1.5 skrll #define PTE_X __BIT(3) // eXecute 76 1.5 skrll #define PTE_W __BIT(2) // Write 77 1.5 skrll #define PTE_R __BIT(1) // Read 78 1.5 skrll #define PTE_V __BIT(0) // Valid 79 1.3 maxv 80 1.9 simonb #define PTE_HARDWIRED (PTE_A | PTE_D) 81 1.13 skrll #define PTE_USER (PTE_V | PTE_U) 82 1.13 skrll #define PTE_KERN (PTE_V | PTE_G) 83 1.9 simonb #define PTE_RW (PTE_R | PTE_W) 84 1.9 simonb #define PTE_RX (PTE_R | PTE_X) 85 1.13 skrll #define PTE_RWX (PTE_R | PTE_W | PTE_X) 86 1.13 skrll 87 1.13 skrll #define PTE_ISLEAF_P(pte) (((pte) & PTE_RWX) != 0) 88 1.6 skrll 89 1.14 skrll #define PA_TO_PTE(pa) (((pa) >> PGSHIFT) << PTE_PPN_SHIFT) 90 1.14 skrll #define PTE_TO_PA(pte) (((pte) >> PTE_PPN_SHIFT) << PGSHIFT) 91 1.1 matt 92 1.14 skrll #if defined(_KERNEL) 93 1.6 skrll 94 1.1 matt static inline bool 95 1.1 matt pte_valid_p(pt_entry_t pte) 96 1.1 matt { 97 1.1 matt return (pte & PTE_V) != 0; 98 1.1 matt } 99 1.1 matt 100 1.1 matt static inline bool 101 1.1 matt pte_wired_p(pt_entry_t pte) 102 1.1 matt { 103 1.1 matt return (pte & PTE_WIRED) != 0; 104 1.1 matt } 105 1.1 matt 106 1.1 matt static inline bool 107 1.1 matt pte_modified_p(pt_entry_t pte) 108 1.1 matt { 109 1.3 maxv return (pte & PTE_D) != 0; 110 1.1 matt } 111 1.1 matt 112 1.1 matt static inline bool 113 1.1 matt pte_cached_p(pt_entry_t pte) 114 1.1 matt { 115 1.13 skrll /* TODO: This seems wrong... */ 116 1.1 matt return true; 117 1.1 matt } 118 1.1 matt 119 1.1 matt static inline bool 120 1.1 matt pte_deferred_exec_p(pt_entry_t pte) 121 1.1 matt { 122 1.3 maxv return false; 123 1.1 matt } 124 1.1 matt 125 1.1 matt static inline pt_entry_t 126 1.1 matt pte_wire_entry(pt_entry_t pte) 127 1.1 matt { 128 1.13 skrll return pte | PTE_HARDWIRED | PTE_WIRED; 129 1.1 matt } 130 1.4 skrll 131 1.4 skrll static inline pt_entry_t 132 1.1 matt pte_unwire_entry(pt_entry_t pte) 133 1.1 matt { 134 1.13 skrll return pte & ~(PTE_HARDWIRED | PTE_WIRED); 135 1.1 matt } 136 1.1 matt 137 1.1 matt static inline paddr_t 138 1.1 matt pte_to_paddr(pt_entry_t pte) 139 1.1 matt { 140 1.6 skrll return PTE_TO_PA(pte); 141 1.1 matt } 142 1.1 matt 143 1.1 matt static inline pt_entry_t 144 1.1 matt pte_nv_entry(bool kernel_p) 145 1.1 matt { 146 1.11 skrll return 0; 147 1.1 matt } 148 1.1 matt 149 1.1 matt static inline pt_entry_t 150 1.1 matt pte_prot_nowrite(pt_entry_t pte) 151 1.1 matt { 152 1.3 maxv return pte & ~PTE_W; 153 1.1 matt } 154 1.1 matt 155 1.1 matt static inline pt_entry_t 156 1.1 matt pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot) 157 1.1 matt { 158 1.3 maxv if ((newprot & VM_PROT_READ) == 0) 159 1.3 maxv pte &= ~PTE_R; 160 1.3 maxv if ((newprot & VM_PROT_WRITE) == 0) 161 1.3 maxv pte &= ~PTE_W; 162 1.1 matt if ((newprot & VM_PROT_EXECUTE) == 0) 163 1.3 maxv pte &= ~PTE_X; 164 1.1 matt return pte; 165 1.1 matt } 166 1.1 matt 167 1.1 matt static inline pt_entry_t 168 1.1 matt pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p) 169 1.1 matt { 170 1.1 matt KASSERT(prot & VM_PROT_READ); 171 1.13 skrll pt_entry_t pte = PTE_R; 172 1.3 maxv 173 1.1 matt if (prot & VM_PROT_EXECUTE) { 174 1.3 maxv pte |= PTE_X; 175 1.1 matt } 176 1.1 matt if (prot & VM_PROT_WRITE) { 177 1.3 maxv pte |= PTE_W; 178 1.1 matt } 179 1.3 maxv 180 1.3 maxv return pte; 181 1.1 matt } 182 1.1 matt 183 1.1 matt static inline pt_entry_t 184 1.1 matt pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p) 185 1.1 matt { 186 1.1 matt return 0; 187 1.1 matt } 188 1.1 matt 189 1.1 matt static inline pt_entry_t 190 1.1 matt pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 191 1.3 maxv int flags, bool kernel_p) 192 1.1 matt { 193 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa); 194 1.1 matt 195 1.13 skrll pte |= kernel_p ? PTE_KERN : PTE_USER; 196 1.1 matt pte |= pte_flag_bits(mdpg, flags, kernel_p); 197 1.1 matt pte |= pte_prot_bits(mdpg, prot, kernel_p); 198 1.1 matt 199 1.13 skrll if (mdpg != NULL) { 200 1.13 skrll 201 1.13 skrll if ((prot & VM_PROT_WRITE) != 0 && 202 1.13 skrll ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) { 203 1.13 skrll /* 204 1.13 skrll * This is a writable mapping, and the page's mod state 205 1.13 skrll * indicates it has already been modified. No need for 206 1.13 skrll * modified emulation. 207 1.13 skrll */ 208 1.13 skrll pte |= PTE_A; 209 1.13 skrll } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) { 210 1.13 skrll /* 211 1.13 skrll * - The access type indicates that we don't need to do 212 1.13 skrll * referenced emulation. 213 1.13 skrll * OR 214 1.13 skrll * - The physical page has already been referenced so no need 215 1.13 skrll * to re-do referenced emulation here. 216 1.13 skrll */ 217 1.13 skrll pte |= PTE_A; 218 1.13 skrll } 219 1.13 skrll } else { 220 1.13 skrll pte |= PTE_A | PTE_D; 221 1.13 skrll } 222 1.1 matt 223 1.1 matt return pte; 224 1.1 matt } 225 1.1 matt 226 1.1 matt static inline pt_entry_t 227 1.1 matt pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, 228 1.3 maxv int flags) 229 1.1 matt { 230 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa); 231 1.1 matt 232 1.13 skrll pte |= PTE_KERN | PTE_HARDWIRED | PTE_WIRED; 233 1.1 matt pte |= pte_flag_bits(NULL, flags, true); 234 1.13 skrll pte |= pte_prot_bits(NULL, prot, true); 235 1.1 matt 236 1.1 matt return pte; 237 1.1 matt } 238 1.1 matt 239 1.1 matt static inline void 240 1.1 matt pte_set(pt_entry_t *ptep, pt_entry_t pte) 241 1.1 matt { 242 1.1 matt *ptep = pte; 243 1.1 matt } 244 1.1 matt 245 1.6 skrll static inline pd_entry_t 246 1.6 skrll pte_invalid_pde(void) 247 1.6 skrll { 248 1.6 skrll return 0; 249 1.6 skrll } 250 1.6 skrll 251 1.6 skrll static inline pd_entry_t 252 1.6 skrll pte_pde_pdetab(paddr_t pa, bool kernel_p) 253 1.6 skrll { 254 1.14 skrll return PTE_V | PA_TO_PTE(pa); 255 1.6 skrll } 256 1.6 skrll 257 1.6 skrll static inline pd_entry_t 258 1.6 skrll pte_pde_ptpage(paddr_t pa, bool kernel_p) 259 1.6 skrll { 260 1.14 skrll return PTE_V | PA_TO_PTE(pa); 261 1.6 skrll } 262 1.6 skrll 263 1.6 skrll static inline bool 264 1.6 skrll pte_pde_valid_p(pd_entry_t pde) 265 1.6 skrll { 266 1.10 skrll return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V; 267 1.6 skrll } 268 1.6 skrll 269 1.6 skrll static inline paddr_t 270 1.6 skrll pte_pde_to_paddr(pd_entry_t pde) 271 1.6 skrll { 272 1.6 skrll return pte_to_paddr((pt_entry_t)pde); 273 1.6 skrll } 274 1.6 skrll 275 1.6 skrll static inline pd_entry_t 276 1.6 skrll pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde) 277 1.6 skrll { 278 1.6 skrll #ifdef MULTIPROCESSOR 279 1.6 skrll #ifdef _LP64 280 1.6 skrll return atomic_cas_64(pdep, opde, npde); 281 1.6 skrll #else 282 1.6 skrll return atomic_cas_32(pdep, opde, npde); 283 1.6 skrll #endif 284 1.6 skrll #else 285 1.6 skrll *pdep = npde; 286 1.6 skrll return 0; 287 1.6 skrll #endif 288 1.6 skrll } 289 1.6 skrll 290 1.6 skrll static inline void 291 1.6 skrll pte_pde_set(pd_entry_t *pdep, pd_entry_t npde) 292 1.6 skrll { 293 1.6 skrll 294 1.6 skrll *pdep = npde; 295 1.6 skrll } 296 1.6 skrll 297 1.2 maxv static inline pt_entry_t 298 1.2 maxv pte_value(pt_entry_t pte) 299 1.2 maxv { 300 1.2 maxv return pte; 301 1.2 maxv } 302 1.2 maxv 303 1.14 skrll #endif /* _KERNEL */ 304 1.14 skrll 305 1.1 matt #endif /* _RISCV_PTE_H_ */ 306