pte.h revision 1.15 1 1.15 thorpej /* $NetBSD: pte.h,v 1.15 2025/10/12 04:08:26 thorpej Exp $ */
2 1.3 maxv
3 1.3 maxv /*
4 1.6 skrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.6 skrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 1.6 skrll * Nick Hudson.
10 1.1 matt *
11 1.1 matt * Redistribution and use in source and binary forms, with or without
12 1.1 matt * modification, are permitted provided that the following conditions
13 1.1 matt * are met:
14 1.1 matt * 1. Redistributions of source code must retain the above copyright
15 1.1 matt * notice, this list of conditions and the following disclaimer.
16 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 matt * notice, this list of conditions and the following disclaimer in the
18 1.1 matt * documentation and/or other materials provided with the distribution.
19 1.1 matt *
20 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
31 1.1 matt */
32 1.1 matt
33 1.1 matt #ifndef _RISCV_PTE_H_
34 1.9 simonb #define _RISCV_PTE_H_
35 1.1 matt
36 1.3 maxv #ifdef _LP64 /* Sv39 */
37 1.9 simonb #define PTE_PPN __BITS(53, 10)
38 1.3 maxv #define PTE_PPN0 __BITS(18, 10)
39 1.3 maxv #define PTE_PPN1 __BITS(27, 19)
40 1.3 maxv #define PTE_PPN2 __BITS(53, 28)
41 1.7 skrll typedef uint64_t pt_entry_t;
42 1.7 skrll typedef uint64_t pd_entry_t;
43 1.9 simonb #define atomic_cas_pte atomic_cas_64
44 1.3 maxv #else /* Sv32 */
45 1.9 simonb #define PTE_PPN __BITS(31, 10)
46 1.3 maxv #define PTE_PPN0 __BITS(19, 10)
47 1.3 maxv #define PTE_PPN1 __BITS(31, 20)
48 1.7 skrll typedef uint32_t pt_entry_t;
49 1.7 skrll typedef uint32_t pd_entry_t;
50 1.9 simonb #define atomic_cas_pte atomic_cas_32
51 1.1 matt #endif
52 1.1 matt
53 1.9 simonb #define PTE_PPN_SHIFT 10
54 1.3 maxv
55 1.14 skrll #define NPTEPG (NBPG / sizeof(pt_entry_t))
56 1.9 simonb #define NSEGPG NPTEPG
57 1.9 simonb #define NPDEPG NPTEPG
58 1.3 maxv
59 1.11 skrll
60 1.11 skrll /* HardWare PTE bits SV39 */
61 1.11 skrll #define PTE_N __BIT(63) // Svnapot
62 1.11 skrll #define PTE_PBMT __BITS(62, 61) // Svpbmt
63 1.11 skrll #define PTE_reserved0 __BITS(60, 54) //
64 1.11 skrll
65 1.15 thorpej /*
66 1.15 thorpej * Svpbmt (Page Based Memory Types) extension:
67 1.15 thorpej *
68 1.15 thorpej * PMA --> adhere to physical memory attributes
69 1.15 thorpej * NC --> non-cacheable, idempotent, weakly-ordered
70 1.15 thorpej * IO --> non-cacheable, non-idempotent, strongly-ordered
71 1.15 thorpej */
72 1.15 thorpej #define PTE_PBMT_PMA __SHIFTIN(0, PTE_PBMT)
73 1.15 thorpej #define PTE_PBMT_NC __SHIFTIN(1, PTE_PBMT)
74 1.15 thorpej #define PTE_PBMT_IO __SHIFTIN(2, PTE_PBMT)
75 1.15 thorpej
76 1.15 thorpej /* XTheadMae (Memory Attribute Extensions) */
77 1.15 thorpej #define PTE_XMAE __BITS(63,59)
78 1.15 thorpej #define PTE_XMAE_SO __BIT(63) // Strong Order
79 1.15 thorpej #define PTE_XMAE_C __BIT(62) // Cacheable
80 1.15 thorpej #define PTE_XMAE_B __BIT(61) // Bufferable
81 1.15 thorpej #define PTE_XMAE_SH __BIT(60) // Shareable
82 1.15 thorpej #define PTE_XMAE_T __BIT(59) // Trustable
83 1.15 thorpej
84 1.15 thorpej /*
85 1.15 thorpej * Map to the rough PBMT equivalent:
86 1.15 thorpej *
87 1.15 thorpej * PMA (i.e. no specific attribute) --> C B SH
88 1.15 thorpej * NC --> B SH
89 1.15 thorpej * IO --> SO SH
90 1.15 thorpej */
91 1.15 thorpej #define PTE_XMAE_PMA ( PTE_XMAE_C | PTE_XMAE_B | PTE_XMAE_SH)
92 1.15 thorpej #define PTE_XMAE_NC ( PTE_XMAE_B | PTE_XMAE_SH)
93 1.15 thorpej #define PTE_XMAE_IO (PTE_XMAE_SO | PTE_XMAE_SH)
94 1.15 thorpej
95 1.3 maxv /* Software PTE bits. */
96 1.11 skrll #define PTE_RSW __BITS(9, 8)
97 1.6 skrll #define PTE_WIRED __BIT(9)
98 1.3 maxv
99 1.3 maxv /* Hardware PTE bits. */
100 1.5 skrll // These are hardware defined bits
101 1.5 skrll #define PTE_D __BIT(7) // Dirty
102 1.5 skrll #define PTE_A __BIT(6) // Accessed
103 1.5 skrll #define PTE_G __BIT(5) // Global
104 1.5 skrll #define PTE_U __BIT(4) // User
105 1.5 skrll #define PTE_X __BIT(3) // eXecute
106 1.5 skrll #define PTE_W __BIT(2) // Write
107 1.5 skrll #define PTE_R __BIT(1) // Read
108 1.5 skrll #define PTE_V __BIT(0) // Valid
109 1.3 maxv
110 1.9 simonb #define PTE_HARDWIRED (PTE_A | PTE_D)
111 1.13 skrll #define PTE_USER (PTE_V | PTE_U)
112 1.13 skrll #define PTE_KERN (PTE_V | PTE_G)
113 1.9 simonb #define PTE_RW (PTE_R | PTE_W)
114 1.9 simonb #define PTE_RX (PTE_R | PTE_X)
115 1.13 skrll #define PTE_RWX (PTE_R | PTE_W | PTE_X)
116 1.13 skrll
117 1.13 skrll #define PTE_ISLEAF_P(pte) (((pte) & PTE_RWX) != 0)
118 1.6 skrll
119 1.14 skrll #define PA_TO_PTE(pa) (((pa) >> PGSHIFT) << PTE_PPN_SHIFT)
120 1.15 thorpej #define PTE_TO_PA(pte) (__SHIFTOUT((pte), PTE_PPN) << PGSHIFT)
121 1.1 matt
122 1.14 skrll #if defined(_KERNEL)
123 1.6 skrll
124 1.1 matt static inline bool
125 1.1 matt pte_valid_p(pt_entry_t pte)
126 1.1 matt {
127 1.1 matt return (pte & PTE_V) != 0;
128 1.1 matt }
129 1.1 matt
130 1.1 matt static inline bool
131 1.1 matt pte_wired_p(pt_entry_t pte)
132 1.1 matt {
133 1.1 matt return (pte & PTE_WIRED) != 0;
134 1.1 matt }
135 1.1 matt
136 1.1 matt static inline bool
137 1.1 matt pte_modified_p(pt_entry_t pte)
138 1.1 matt {
139 1.3 maxv return (pte & PTE_D) != 0;
140 1.1 matt }
141 1.1 matt
142 1.1 matt static inline bool
143 1.1 matt pte_cached_p(pt_entry_t pte)
144 1.1 matt {
145 1.13 skrll /* TODO: This seems wrong... */
146 1.1 matt return true;
147 1.1 matt }
148 1.1 matt
149 1.1 matt static inline bool
150 1.1 matt pte_deferred_exec_p(pt_entry_t pte)
151 1.1 matt {
152 1.3 maxv return false;
153 1.1 matt }
154 1.1 matt
155 1.1 matt static inline pt_entry_t
156 1.1 matt pte_wire_entry(pt_entry_t pte)
157 1.1 matt {
158 1.13 skrll return pte | PTE_HARDWIRED | PTE_WIRED;
159 1.1 matt }
160 1.4 skrll
161 1.4 skrll static inline pt_entry_t
162 1.1 matt pte_unwire_entry(pt_entry_t pte)
163 1.1 matt {
164 1.13 skrll return pte & ~(PTE_HARDWIRED | PTE_WIRED);
165 1.1 matt }
166 1.1 matt
167 1.1 matt static inline paddr_t
168 1.1 matt pte_to_paddr(pt_entry_t pte)
169 1.1 matt {
170 1.6 skrll return PTE_TO_PA(pte);
171 1.1 matt }
172 1.1 matt
173 1.1 matt static inline pt_entry_t
174 1.1 matt pte_nv_entry(bool kernel_p)
175 1.1 matt {
176 1.11 skrll return 0;
177 1.1 matt }
178 1.1 matt
179 1.1 matt static inline pt_entry_t
180 1.1 matt pte_prot_nowrite(pt_entry_t pte)
181 1.1 matt {
182 1.3 maxv return pte & ~PTE_W;
183 1.1 matt }
184 1.1 matt
185 1.1 matt static inline pt_entry_t
186 1.1 matt pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
187 1.1 matt {
188 1.3 maxv if ((newprot & VM_PROT_READ) == 0)
189 1.3 maxv pte &= ~PTE_R;
190 1.3 maxv if ((newprot & VM_PROT_WRITE) == 0)
191 1.3 maxv pte &= ~PTE_W;
192 1.1 matt if ((newprot & VM_PROT_EXECUTE) == 0)
193 1.3 maxv pte &= ~PTE_X;
194 1.1 matt return pte;
195 1.1 matt }
196 1.1 matt
197 1.1 matt static inline pt_entry_t
198 1.1 matt pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
199 1.1 matt {
200 1.1 matt KASSERT(prot & VM_PROT_READ);
201 1.13 skrll pt_entry_t pte = PTE_R;
202 1.3 maxv
203 1.1 matt if (prot & VM_PROT_EXECUTE) {
204 1.3 maxv pte |= PTE_X;
205 1.1 matt }
206 1.1 matt if (prot & VM_PROT_WRITE) {
207 1.3 maxv pte |= PTE_W;
208 1.1 matt }
209 1.3 maxv
210 1.3 maxv return pte;
211 1.1 matt }
212 1.1 matt
213 1.1 matt static inline pt_entry_t
214 1.1 matt pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
215 1.1 matt {
216 1.1 matt return 0;
217 1.1 matt }
218 1.1 matt
219 1.15 thorpej pt_entry_t pte_enter_flags_to_pbmt(int);
220 1.15 thorpej
221 1.1 matt static inline pt_entry_t
222 1.1 matt pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
223 1.3 maxv int flags, bool kernel_p)
224 1.1 matt {
225 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
226 1.1 matt
227 1.13 skrll pte |= kernel_p ? PTE_KERN : PTE_USER;
228 1.1 matt pte |= pte_flag_bits(mdpg, flags, kernel_p);
229 1.1 matt pte |= pte_prot_bits(mdpg, prot, kernel_p);
230 1.15 thorpej pte |= pte_enter_flags_to_pbmt(flags);
231 1.1 matt
232 1.13 skrll if (mdpg != NULL) {
233 1.13 skrll
234 1.13 skrll if ((prot & VM_PROT_WRITE) != 0 &&
235 1.13 skrll ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
236 1.13 skrll /*
237 1.13 skrll * This is a writable mapping, and the page's mod state
238 1.13 skrll * indicates it has already been modified. No need for
239 1.13 skrll * modified emulation.
240 1.13 skrll */
241 1.13 skrll pte |= PTE_A;
242 1.13 skrll } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
243 1.13 skrll /*
244 1.13 skrll * - The access type indicates that we don't need to do
245 1.13 skrll * referenced emulation.
246 1.13 skrll * OR
247 1.13 skrll * - The physical page has already been referenced so no need
248 1.13 skrll * to re-do referenced emulation here.
249 1.13 skrll */
250 1.13 skrll pte |= PTE_A;
251 1.13 skrll }
252 1.13 skrll } else {
253 1.13 skrll pte |= PTE_A | PTE_D;
254 1.13 skrll }
255 1.1 matt
256 1.1 matt return pte;
257 1.1 matt }
258 1.1 matt
259 1.1 matt static inline pt_entry_t
260 1.1 matt pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
261 1.3 maxv int flags)
262 1.1 matt {
263 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
264 1.1 matt
265 1.13 skrll pte |= PTE_KERN | PTE_HARDWIRED | PTE_WIRED;
266 1.1 matt pte |= pte_flag_bits(NULL, flags, true);
267 1.13 skrll pte |= pte_prot_bits(NULL, prot, true);
268 1.15 thorpej pte |= pte_enter_flags_to_pbmt(flags);
269 1.1 matt
270 1.1 matt return pte;
271 1.1 matt }
272 1.1 matt
273 1.1 matt static inline void
274 1.1 matt pte_set(pt_entry_t *ptep, pt_entry_t pte)
275 1.1 matt {
276 1.1 matt *ptep = pte;
277 1.1 matt }
278 1.1 matt
279 1.6 skrll static inline pd_entry_t
280 1.6 skrll pte_invalid_pde(void)
281 1.6 skrll {
282 1.6 skrll return 0;
283 1.6 skrll }
284 1.6 skrll
285 1.6 skrll static inline pd_entry_t
286 1.6 skrll pte_pde_pdetab(paddr_t pa, bool kernel_p)
287 1.6 skrll {
288 1.14 skrll return PTE_V | PA_TO_PTE(pa);
289 1.6 skrll }
290 1.6 skrll
291 1.6 skrll static inline pd_entry_t
292 1.6 skrll pte_pde_ptpage(paddr_t pa, bool kernel_p)
293 1.6 skrll {
294 1.14 skrll return PTE_V | PA_TO_PTE(pa);
295 1.6 skrll }
296 1.6 skrll
297 1.6 skrll static inline bool
298 1.6 skrll pte_pde_valid_p(pd_entry_t pde)
299 1.6 skrll {
300 1.10 skrll return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V;
301 1.6 skrll }
302 1.6 skrll
303 1.6 skrll static inline paddr_t
304 1.6 skrll pte_pde_to_paddr(pd_entry_t pde)
305 1.6 skrll {
306 1.6 skrll return pte_to_paddr((pt_entry_t)pde);
307 1.6 skrll }
308 1.6 skrll
309 1.6 skrll static inline pd_entry_t
310 1.6 skrll pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
311 1.6 skrll {
312 1.6 skrll #ifdef MULTIPROCESSOR
313 1.6 skrll #ifdef _LP64
314 1.6 skrll return atomic_cas_64(pdep, opde, npde);
315 1.6 skrll #else
316 1.6 skrll return atomic_cas_32(pdep, opde, npde);
317 1.6 skrll #endif
318 1.6 skrll #else
319 1.6 skrll *pdep = npde;
320 1.6 skrll return 0;
321 1.6 skrll #endif
322 1.6 skrll }
323 1.6 skrll
324 1.6 skrll static inline void
325 1.6 skrll pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
326 1.6 skrll {
327 1.6 skrll
328 1.6 skrll *pdep = npde;
329 1.6 skrll }
330 1.6 skrll
331 1.2 maxv static inline pt_entry_t
332 1.2 maxv pte_value(pt_entry_t pte)
333 1.2 maxv {
334 1.2 maxv return pte;
335 1.2 maxv }
336 1.2 maxv
337 1.14 skrll #endif /* _KERNEL */
338 1.14 skrll
339 1.1 matt #endif /* _RISCV_PTE_H_ */
340