pte.h revision 1.11 1 /* $NetBSD: pte.h,v 1.11 2022/11/12 07:34:18 skrll Exp $ */
2
3 /*
4 * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 * Nick Hudson.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _RISCV_PTE_H_
34 #define _RISCV_PTE_H_
35
36 #ifdef _LP64 /* Sv39 */
37 #define PTE_PPN __BITS(53, 10)
38 #define PTE_PPN0 __BITS(18, 10)
39 #define PTE_PPN1 __BITS(27, 19)
40 #define PTE_PPN2 __BITS(53, 28)
41 typedef uint64_t pt_entry_t;
42 typedef uint64_t pd_entry_t;
43 #define atomic_cas_pte atomic_cas_64
44 #else /* Sv32 */
45 #define PTE_PPN __BITS(31, 10)
46 #define PTE_PPN0 __BITS(19, 10)
47 #define PTE_PPN1 __BITS(31, 20)
48 typedef uint32_t pt_entry_t;
49 typedef uint32_t pd_entry_t;
50 #define atomic_cas_pte atomic_cas_32
51 #endif
52
53 #define PTE_PPN_SHIFT 10
54
55 #define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
56 #define NSEGPG NPTEPG
57 #define NPDEPG NPTEPG
58
59
60 /* HardWare PTE bits SV39 */
61 #define PTE_N __BIT(63) // Svnapot
62 #define PTE_PBMT __BITS(62, 61) // Svpbmt
63 #define PTE_reserved0 __BITS(60, 54) //
64
65 /* Software PTE bits. */
66 #define PTE_RSW __BITS(9, 8)
67 #define PTE_WIRED __BIT(9)
68
69 /* Hardware PTE bits. */
70 // These are hardware defined bits
71 #define PTE_D __BIT(7) // Dirty
72 #define PTE_A __BIT(6) // Accessed
73 #define PTE_G __BIT(5) // Global
74 #define PTE_U __BIT(4) // User
75 #define PTE_X __BIT(3) // eXecute
76 #define PTE_W __BIT(2) // Write
77 #define PTE_R __BIT(1) // Read
78 #define PTE_V __BIT(0) // Valid
79
80 #define PTE_HARDWIRED (PTE_A | PTE_D)
81 #define PTE_KERN (PTE_V | PTE_G | PTE_A | PTE_D)
82 #define PTE_RW (PTE_R | PTE_W)
83 #define PTE_RX (PTE_R | PTE_X)
84
85 #define PA_TO_PTE(pa) (((pa) >> PAGE_SHIFT) << PTE_PPN_SHIFT)
86 #define PTE_TO_PA(pte) (((pte) >> PTE_PPN_SHIFT) << PAGE_SHIFT)
87
88 #define L2_SHIFT 30
89 #define L1_SHIFT 21
90 #define L0_SHIFT 12
91
92 #define L2_SIZE (1 << L2_SHIFT)
93 #define L1_SIZE (1 << L1_SHIFT)
94 #define L0_SIZE (1 << L0_SHIFT)
95
96 #define L2_OFFSET (L2_SIZE - 1)
97 #define L1_OFFSET (L1_SIZE - 1)
98 #define L0_OFFSET (L0_SIZE - 1)
99
100 #define Ln_ENTRIES (1 << 9)
101 #define Ln_ADDR_MASK (Ln_ENTRIES - 1)
102
103 #define pl2_i(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
104 #define pl1_i(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
105 #define pl0_i(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK)
106
107 static inline const size_t
108 pte_index(vaddr_t va)
109 {
110 return ((va >> PGSHIFT) & (NPTEPG - 1));
111 }
112
113 static inline bool
114 pte_valid_p(pt_entry_t pte)
115 {
116 return (pte & PTE_V) != 0;
117 }
118
119 static inline bool
120 pte_wired_p(pt_entry_t pte)
121 {
122 return (pte & PTE_WIRED) != 0;
123 }
124
125 static inline bool
126 pte_modified_p(pt_entry_t pte)
127 {
128 return (pte & PTE_D) != 0;
129 }
130
131 static inline bool
132 pte_cached_p(pt_entry_t pte)
133 {
134 return true;
135 }
136
137 static inline bool
138 pte_deferred_exec_p(pt_entry_t pte)
139 {
140 return false;
141 }
142
143 static inline pt_entry_t
144 pte_wire_entry(pt_entry_t pte)
145 {
146 return pte | PTE_WIRED;
147 }
148
149 static inline pt_entry_t
150 pte_unwire_entry(pt_entry_t pte)
151 {
152 return pte & ~PTE_WIRED;
153 }
154
155 static inline paddr_t
156 pte_to_paddr(pt_entry_t pte)
157 {
158 return PTE_TO_PA(pte);
159 }
160
161 static inline pt_entry_t
162 pte_nv_entry(bool kernel_p)
163 {
164 return 0;
165 }
166
167 static inline pt_entry_t
168 pte_prot_nowrite(pt_entry_t pte)
169 {
170 return pte & ~PTE_W;
171 }
172
173 static inline pt_entry_t
174 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
175 {
176 if ((newprot & VM_PROT_READ) == 0)
177 pte &= ~PTE_R;
178 if ((newprot & VM_PROT_WRITE) == 0)
179 pte &= ~PTE_W;
180 if ((newprot & VM_PROT_EXECUTE) == 0)
181 pte &= ~PTE_X;
182 return pte;
183 }
184
185 static inline pt_entry_t
186 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
187 {
188 pt_entry_t pte;
189
190 KASSERT(prot & VM_PROT_READ);
191
192 pte = PTE_R;
193 if (prot & VM_PROT_EXECUTE) {
194 pte |= PTE_X;
195 }
196 if (prot & VM_PROT_WRITE) {
197 pte |= PTE_W;
198 }
199
200 return pte;
201 }
202
203 static inline pt_entry_t
204 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
205 {
206 #if 0
207 if (__predict_false(flags & PMAP_NOCACHE)) {
208 if (__predict_true(mdpg != NULL)) {
209 return pte_nocached_bits();
210 } else {
211 return pte_ionocached_bits();
212 }
213 } else {
214 if (__predict_false(mdpg != NULL)) {
215 return pte_cached_bits();
216 } else {
217 return pte_iocached_bits();
218 }
219 }
220 #else
221 return 0;
222 #endif
223 }
224
225 static inline pt_entry_t
226 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
227 int flags, bool kernel_p)
228 {
229 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
230
231 pte |= pte_flag_bits(mdpg, flags, kernel_p);
232 pte |= pte_prot_bits(mdpg, prot, kernel_p);
233
234 if (mdpg == NULL && VM_PAGEMD_REFERENCED_P(mdpg))
235 pte |= PTE_V;
236
237 return pte;
238 }
239
240 static inline pt_entry_t
241 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
242 int flags)
243 {
244 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
245
246 pte |= PTE_WIRED | PTE_G | PTE_V;
247 pte |= pte_flag_bits(NULL, flags, true);
248 pte |= pte_prot_bits(NULL, prot, true); /* pretend unmanaged */
249
250 return pte;
251 }
252
253 static inline void
254 pte_set(pt_entry_t *ptep, pt_entry_t pte)
255 {
256 *ptep = pte;
257 }
258
259 static inline pd_entry_t
260 pte_invalid_pde(void)
261 {
262 return 0;
263 }
264
265 static inline pd_entry_t
266 pte_pde_pdetab(paddr_t pa, bool kernel_p)
267 {
268 return PTE_V | (pa >> PAGE_SHIFT) << PTE_PPN_SHIFT;
269 }
270
271 static inline pd_entry_t
272 pte_pde_ptpage(paddr_t pa, bool kernel_p)
273 {
274 return PTE_V | (pa >> PAGE_SHIFT) << PTE_PPN_SHIFT;
275 }
276
277 static inline bool
278 pte_pde_valid_p(pd_entry_t pde)
279 {
280 return (pde & (PTE_X | PTE_W | PTE_R | PTE_V)) == PTE_V;
281 }
282
283 static inline paddr_t
284 pte_pde_to_paddr(pd_entry_t pde)
285 {
286 return pte_to_paddr((pt_entry_t)pde);
287 }
288
289 static inline pd_entry_t
290 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
291 {
292 #ifdef MULTIPROCESSOR
293 #ifdef _LP64
294 return atomic_cas_64(pdep, opde, npde);
295 #else
296 return atomic_cas_32(pdep, opde, npde);
297 #endif
298 #else
299 *pdep = npde;
300 return 0;
301 #endif
302 }
303
304 static inline void
305 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
306 {
307
308 *pdep = npde;
309 }
310
311
312 static inline pt_entry_t
313 pte_value(pt_entry_t pte)
314 {
315 return pte;
316 }
317
318 #endif /* _RISCV_PTE_H_ */
319