pte.h revision 1.15 1 /* $NetBSD: pte.h,v 1.15 2025/10/12 04:08:26 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 * Nick Hudson.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _RISCV_PTE_H_
34 #define _RISCV_PTE_H_
35
36 #ifdef _LP64 /* Sv39 */
37 #define PTE_PPN __BITS(53, 10)
38 #define PTE_PPN0 __BITS(18, 10)
39 #define PTE_PPN1 __BITS(27, 19)
40 #define PTE_PPN2 __BITS(53, 28)
41 typedef uint64_t pt_entry_t;
42 typedef uint64_t pd_entry_t;
43 #define atomic_cas_pte atomic_cas_64
44 #else /* Sv32 */
45 #define PTE_PPN __BITS(31, 10)
46 #define PTE_PPN0 __BITS(19, 10)
47 #define PTE_PPN1 __BITS(31, 20)
48 typedef uint32_t pt_entry_t;
49 typedef uint32_t pd_entry_t;
50 #define atomic_cas_pte atomic_cas_32
51 #endif
52
53 #define PTE_PPN_SHIFT 10
54
55 #define NPTEPG (NBPG / sizeof(pt_entry_t))
56 #define NSEGPG NPTEPG
57 #define NPDEPG NPTEPG
58
59
60 /* HardWare PTE bits SV39 */
61 #define PTE_N __BIT(63) // Svnapot
62 #define PTE_PBMT __BITS(62, 61) // Svpbmt
63 #define PTE_reserved0 __BITS(60, 54) //
64
65 /*
66 * Svpbmt (Page Based Memory Types) extension:
67 *
68 * PMA --> adhere to physical memory attributes
69 * NC --> non-cacheable, idempotent, weakly-ordered
70 * IO --> non-cacheable, non-idempotent, strongly-ordered
71 */
72 #define PTE_PBMT_PMA __SHIFTIN(0, PTE_PBMT)
73 #define PTE_PBMT_NC __SHIFTIN(1, PTE_PBMT)
74 #define PTE_PBMT_IO __SHIFTIN(2, PTE_PBMT)
75
76 /* XTheadMae (Memory Attribute Extensions) */
77 #define PTE_XMAE __BITS(63,59)
78 #define PTE_XMAE_SO __BIT(63) // Strong Order
79 #define PTE_XMAE_C __BIT(62) // Cacheable
80 #define PTE_XMAE_B __BIT(61) // Bufferable
81 #define PTE_XMAE_SH __BIT(60) // Shareable
82 #define PTE_XMAE_T __BIT(59) // Trustable
83
84 /*
85 * Map to the rough PBMT equivalent:
86 *
87 * PMA (i.e. no specific attribute) --> C B SH
88 * NC --> B SH
89 * IO --> SO SH
90 */
91 #define PTE_XMAE_PMA ( PTE_XMAE_C | PTE_XMAE_B | PTE_XMAE_SH)
92 #define PTE_XMAE_NC ( PTE_XMAE_B | PTE_XMAE_SH)
93 #define PTE_XMAE_IO (PTE_XMAE_SO | PTE_XMAE_SH)
94
95 /* Software PTE bits. */
96 #define PTE_RSW __BITS(9, 8)
97 #define PTE_WIRED __BIT(9)
98
99 /* Hardware PTE bits. */
100 // These are hardware defined bits
101 #define PTE_D __BIT(7) // Dirty
102 #define PTE_A __BIT(6) // Accessed
103 #define PTE_G __BIT(5) // Global
104 #define PTE_U __BIT(4) // User
105 #define PTE_X __BIT(3) // eXecute
106 #define PTE_W __BIT(2) // Write
107 #define PTE_R __BIT(1) // Read
108 #define PTE_V __BIT(0) // Valid
109
110 #define PTE_HARDWIRED (PTE_A | PTE_D)
111 #define PTE_USER (PTE_V | PTE_U)
112 #define PTE_KERN (PTE_V | PTE_G)
113 #define PTE_RW (PTE_R | PTE_W)
114 #define PTE_RX (PTE_R | PTE_X)
115 #define PTE_RWX (PTE_R | PTE_W | PTE_X)
116
117 #define PTE_ISLEAF_P(pte) (((pte) & PTE_RWX) != 0)
118
119 #define PA_TO_PTE(pa) (((pa) >> PGSHIFT) << PTE_PPN_SHIFT)
120 #define PTE_TO_PA(pte) (__SHIFTOUT((pte), PTE_PPN) << PGSHIFT)
121
122 #if defined(_KERNEL)
123
124 static inline bool
125 pte_valid_p(pt_entry_t pte)
126 {
127 return (pte & PTE_V) != 0;
128 }
129
130 static inline bool
131 pte_wired_p(pt_entry_t pte)
132 {
133 return (pte & PTE_WIRED) != 0;
134 }
135
136 static inline bool
137 pte_modified_p(pt_entry_t pte)
138 {
139 return (pte & PTE_D) != 0;
140 }
141
142 static inline bool
143 pte_cached_p(pt_entry_t pte)
144 {
145 /* TODO: This seems wrong... */
146 return true;
147 }
148
149 static inline bool
150 pte_deferred_exec_p(pt_entry_t pte)
151 {
152 return false;
153 }
154
155 static inline pt_entry_t
156 pte_wire_entry(pt_entry_t pte)
157 {
158 return pte | PTE_HARDWIRED | PTE_WIRED;
159 }
160
161 static inline pt_entry_t
162 pte_unwire_entry(pt_entry_t pte)
163 {
164 return pte & ~(PTE_HARDWIRED | PTE_WIRED);
165 }
166
167 static inline paddr_t
168 pte_to_paddr(pt_entry_t pte)
169 {
170 return PTE_TO_PA(pte);
171 }
172
173 static inline pt_entry_t
174 pte_nv_entry(bool kernel_p)
175 {
176 return 0;
177 }
178
179 static inline pt_entry_t
180 pte_prot_nowrite(pt_entry_t pte)
181 {
182 return pte & ~PTE_W;
183 }
184
185 static inline pt_entry_t
186 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
187 {
188 if ((newprot & VM_PROT_READ) == 0)
189 pte &= ~PTE_R;
190 if ((newprot & VM_PROT_WRITE) == 0)
191 pte &= ~PTE_W;
192 if ((newprot & VM_PROT_EXECUTE) == 0)
193 pte &= ~PTE_X;
194 return pte;
195 }
196
197 static inline pt_entry_t
198 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
199 {
200 KASSERT(prot & VM_PROT_READ);
201 pt_entry_t pte = PTE_R;
202
203 if (prot & VM_PROT_EXECUTE) {
204 pte |= PTE_X;
205 }
206 if (prot & VM_PROT_WRITE) {
207 pte |= PTE_W;
208 }
209
210 return pte;
211 }
212
213 static inline pt_entry_t
214 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
215 {
216 return 0;
217 }
218
219 pt_entry_t pte_enter_flags_to_pbmt(int);
220
221 static inline pt_entry_t
222 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
223 int flags, bool kernel_p)
224 {
225 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
226
227 pte |= kernel_p ? PTE_KERN : PTE_USER;
228 pte |= pte_flag_bits(mdpg, flags, kernel_p);
229 pte |= pte_prot_bits(mdpg, prot, kernel_p);
230 pte |= pte_enter_flags_to_pbmt(flags);
231
232 if (mdpg != NULL) {
233
234 if ((prot & VM_PROT_WRITE) != 0 &&
235 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
236 /*
237 * This is a writable mapping, and the page's mod state
238 * indicates it has already been modified. No need for
239 * modified emulation.
240 */
241 pte |= PTE_A;
242 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
243 /*
244 * - The access type indicates that we don't need to do
245 * referenced emulation.
246 * OR
247 * - The physical page has already been referenced so no need
248 * to re-do referenced emulation here.
249 */
250 pte |= PTE_A;
251 }
252 } else {
253 pte |= PTE_A | PTE_D;
254 }
255
256 return pte;
257 }
258
259 static inline pt_entry_t
260 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
261 int flags)
262 {
263 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
264
265 pte |= PTE_KERN | PTE_HARDWIRED | PTE_WIRED;
266 pte |= pte_flag_bits(NULL, flags, true);
267 pte |= pte_prot_bits(NULL, prot, true);
268 pte |= pte_enter_flags_to_pbmt(flags);
269
270 return pte;
271 }
272
273 static inline void
274 pte_set(pt_entry_t *ptep, pt_entry_t pte)
275 {
276 *ptep = pte;
277 }
278
279 static inline pd_entry_t
280 pte_invalid_pde(void)
281 {
282 return 0;
283 }
284
285 static inline pd_entry_t
286 pte_pde_pdetab(paddr_t pa, bool kernel_p)
287 {
288 return PTE_V | PA_TO_PTE(pa);
289 }
290
291 static inline pd_entry_t
292 pte_pde_ptpage(paddr_t pa, bool kernel_p)
293 {
294 return PTE_V | PA_TO_PTE(pa);
295 }
296
297 static inline bool
298 pte_pde_valid_p(pd_entry_t pde)
299 {
300 return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V;
301 }
302
303 static inline paddr_t
304 pte_pde_to_paddr(pd_entry_t pde)
305 {
306 return pte_to_paddr((pt_entry_t)pde);
307 }
308
309 static inline pd_entry_t
310 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
311 {
312 #ifdef MULTIPROCESSOR
313 #ifdef _LP64
314 return atomic_cas_64(pdep, opde, npde);
315 #else
316 return atomic_cas_32(pdep, opde, npde);
317 #endif
318 #else
319 *pdep = npde;
320 return 0;
321 #endif
322 }
323
324 static inline void
325 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
326 {
327
328 *pdep = npde;
329 }
330
331 static inline pt_entry_t
332 pte_value(pt_entry_t pte)
333 {
334 return pte;
335 }
336
337 #endif /* _KERNEL */
338
339 #endif /* _RISCV_PTE_H_ */
340