pte.h revision 1.16 1 /* $NetBSD: pte.h,v 1.16 2025/10/12 19:44:04 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 * Nick Hudson.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _RISCV_PTE_H_
34 #define _RISCV_PTE_H_
35
36 #ifdef _LP64 /* Sv39 */
37 #define PTE_PPN __BITS(53, 10)
38 #define PTE_PPN0 __BITS(18, 10)
39 #define PTE_PPN1 __BITS(27, 19)
40 #define PTE_PPN2 __BITS(53, 28)
41 typedef uint64_t pt_entry_t;
42 typedef uint64_t pd_entry_t;
43 #define atomic_cas_pte atomic_cas_64
44 #else /* Sv32 */
45 #define PTE_PPN __BITS(31, 10)
46 #define PTE_PPN0 __BITS(19, 10)
47 #define PTE_PPN1 __BITS(31, 20)
48 typedef uint32_t pt_entry_t;
49 typedef uint32_t pd_entry_t;
50 #define atomic_cas_pte atomic_cas_32
51 #endif
52
53 #define PTE_PPN_SHIFT 10
54
55 #define NPTEPG (NBPG / sizeof(pt_entry_t))
56 #define NSEGPG NPTEPG
57 #define NPDEPG NPTEPG
58
59
60 /* HardWare PTE bits SV39 */
61 #define PTE_N __BIT(63) // Svnapot
62 #define PTE_PBMT __BITS(62, 61) // Svpbmt
63 #define PTE_reserved0 __BITS(60, 54) //
64
65 /*
66 * Svpbmt (Page Based Memory Types) extension:
67 *
68 * PMA --> adhere to physical memory attributes
69 * NC --> non-cacheable, idempotent, weakly-ordered
70 * IO --> non-cacheable, non-idempotent, strongly-ordered
71 */
72 #define PTE_PBMT_PMA __SHIFTIN(0, PTE_PBMT)
73 #define PTE_PBMT_NC __SHIFTIN(1, PTE_PBMT)
74 #define PTE_PBMT_IO __SHIFTIN(2, PTE_PBMT)
75
76 /* XTheadMae (Memory Attribute Extensions) */
77 #define PTE_XMAE __BITS(63,59)
78 #define PTE_XMAE_SO __BIT(63) // Strong Order
79 #define PTE_XMAE_C __BIT(62) // Cacheable
80 #define PTE_XMAE_B __BIT(61) // Bufferable
81 #define PTE_XMAE_SH __BIT(60) // Shareable
82 #define PTE_XMAE_T __BIT(59) // Trustable
83
84 /*
85 * Map to the rough PBMT equivalent:
86 *
87 * PMA (i.e. no specific attribute) --> C B SH
88 * NC --> B SH
89 * IO --> SO SH
90 */
91 #define PTE_XMAE_PMA ( PTE_XMAE_C | PTE_XMAE_B | PTE_XMAE_SH)
92 #define PTE_XMAE_NC ( PTE_XMAE_B | PTE_XMAE_SH)
93 #define PTE_XMAE_IO (PTE_XMAE_SO | PTE_XMAE_SH)
94
95 /* Software PTE bits. */
96 #define PTE_RSW __BITS(9, 8)
97 #define PTE_WIRED __BIT(9)
98
99 /* Hardware PTE bits. */
100 // These are hardware defined bits
101 #define PTE_D __BIT(7) // Dirty
102 #define PTE_A __BIT(6) // Accessed
103 #define PTE_G __BIT(5) // Global
104 #define PTE_U __BIT(4) // User
105 #define PTE_X __BIT(3) // eXecute
106 #define PTE_W __BIT(2) // Write
107 #define PTE_R __BIT(1) // Read
108 #define PTE_V __BIT(0) // Valid
109
110 #define PTE_HARDWIRED (PTE_A | PTE_D)
111 #define PTE_USER (PTE_V | PTE_U)
112 #define PTE_KERN (PTE_V | PTE_G)
113 #define PTE_RW (PTE_R | PTE_W)
114 #define PTE_RX (PTE_R | PTE_X)
115 #define PTE_RWX (PTE_R | PTE_W | PTE_X)
116
117 #define PTE_ISLEAF_P(pte) (((pte) & PTE_RWX) != 0)
118
119 #define PA_TO_PTE(pa) (((pa) >> PGSHIFT) << PTE_PPN_SHIFT)
120 #define PTE_TO_PA(pte) (__SHIFTOUT((pte), PTE_PPN) << PGSHIFT)
121
122 #if defined(_KERNEL)
123
124 static inline bool
125 pte_valid_p(pt_entry_t pte)
126 {
127 return (pte & PTE_V) != 0;
128 }
129
130 static inline bool
131 pte_wired_p(pt_entry_t pte)
132 {
133 return (pte & PTE_WIRED) != 0;
134 }
135
136 static inline bool
137 pte_modified_p(pt_entry_t pte)
138 {
139 return (pte & PTE_D) != 0;
140 }
141
142 static inline bool
143 pte_cached_p(pt_entry_t pte)
144 {
145 /* TODO: This seems wrong... */
146 return true;
147 }
148
149 static inline bool
150 pte_deferred_exec_p(pt_entry_t pte)
151 {
152 return false;
153 }
154
155 static inline pt_entry_t
156 pte_wire_entry(pt_entry_t pte)
157 {
158 return pte | PTE_HARDWIRED | PTE_WIRED;
159 }
160
161 static inline pt_entry_t
162 pte_unwire_entry(pt_entry_t pte)
163 {
164 return pte & ~(PTE_HARDWIRED | PTE_WIRED);
165 }
166
167 static inline paddr_t
168 pte_to_paddr(pt_entry_t pte)
169 {
170 return PTE_TO_PA(pte);
171 }
172
173 static inline pt_entry_t
174 pte_nv_entry(bool kernel_p)
175 {
176 return 0;
177 }
178
179 static inline pt_entry_t
180 pte_prot_nowrite(pt_entry_t pte)
181 {
182 return pte & ~PTE_W;
183 }
184
185 static inline pt_entry_t
186 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
187 {
188 if ((newprot & VM_PROT_READ) == 0)
189 pte &= ~PTE_R;
190 if ((newprot & VM_PROT_WRITE) == 0)
191 pte &= ~PTE_W;
192 if ((newprot & VM_PROT_EXECUTE) == 0)
193 pte &= ~PTE_X;
194 return pte;
195 }
196
197 static inline pt_entry_t
198 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
199 {
200 KASSERT(prot & VM_PROT_READ);
201 pt_entry_t pte = PTE_R;
202
203 if (prot & VM_PROT_EXECUTE) {
204 pte |= PTE_X;
205 }
206 if (prot & VM_PROT_WRITE) {
207 pte |= PTE_W;
208 }
209
210 return pte;
211 }
212
213 static inline pt_entry_t
214 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
215 {
216 return 0;
217 }
218
219 #ifdef _LP64
220 pt_entry_t pte_enter_flags_to_pbmt(int);
221 #else
222 static inline pt_entry_t
223 pte_enter_flags_to_pbmt(int flags)
224 {
225 return 0;
226 };
227 #endif
228
229 static inline pt_entry_t
230 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
231 int flags, bool kernel_p)
232 {
233 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
234
235 pte |= kernel_p ? PTE_KERN : PTE_USER;
236 pte |= pte_flag_bits(mdpg, flags, kernel_p);
237 pte |= pte_prot_bits(mdpg, prot, kernel_p);
238 pte |= pte_enter_flags_to_pbmt(flags);
239
240 if (mdpg != NULL) {
241
242 if ((prot & VM_PROT_WRITE) != 0 &&
243 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
244 /*
245 * This is a writable mapping, and the page's mod state
246 * indicates it has already been modified. No need for
247 * modified emulation.
248 */
249 pte |= PTE_A;
250 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
251 /*
252 * - The access type indicates that we don't need to do
253 * referenced emulation.
254 * OR
255 * - The physical page has already been referenced so no need
256 * to re-do referenced emulation here.
257 */
258 pte |= PTE_A;
259 }
260 } else {
261 pte |= PTE_A | PTE_D;
262 }
263
264 return pte;
265 }
266
267 static inline pt_entry_t
268 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
269 int flags)
270 {
271 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
272
273 pte |= PTE_KERN | PTE_HARDWIRED | PTE_WIRED;
274 pte |= pte_flag_bits(NULL, flags, true);
275 pte |= pte_prot_bits(NULL, prot, true);
276 pte |= pte_enter_flags_to_pbmt(flags);
277
278 return pte;
279 }
280
281 static inline void
282 pte_set(pt_entry_t *ptep, pt_entry_t pte)
283 {
284 *ptep = pte;
285 }
286
287 static inline pd_entry_t
288 pte_invalid_pde(void)
289 {
290 return 0;
291 }
292
293 static inline pd_entry_t
294 pte_pde_pdetab(paddr_t pa, bool kernel_p)
295 {
296 return PTE_V | PA_TO_PTE(pa);
297 }
298
299 static inline pd_entry_t
300 pte_pde_ptpage(paddr_t pa, bool kernel_p)
301 {
302 return PTE_V | PA_TO_PTE(pa);
303 }
304
305 static inline bool
306 pte_pde_valid_p(pd_entry_t pde)
307 {
308 return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V;
309 }
310
311 static inline paddr_t
312 pte_pde_to_paddr(pd_entry_t pde)
313 {
314 return pte_to_paddr((pt_entry_t)pde);
315 }
316
317 static inline pd_entry_t
318 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
319 {
320 #ifdef MULTIPROCESSOR
321 #ifdef _LP64
322 return atomic_cas_64(pdep, opde, npde);
323 #else
324 return atomic_cas_32(pdep, opde, npde);
325 #endif
326 #else
327 *pdep = npde;
328 return 0;
329 #endif
330 }
331
332 static inline void
333 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
334 {
335
336 *pdep = npde;
337 }
338
339 static inline pt_entry_t
340 pte_value(pt_entry_t pte)
341 {
342 return pte;
343 }
344
345 #endif /* _KERNEL */
346
347 #endif /* _RISCV_PTE_H_ */
348