pte.h revision 1.9 1 /* $NetBSD: pte.h,v 1.9 2022/10/15 06:41:43 simonb Exp $ */
2
3 /*
4 * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9 * Nick Hudson.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef _RISCV_PTE_H_
34 #define _RISCV_PTE_H_
35
36 #ifdef _LP64 /* Sv39 */
37 #define PTE_PPN __BITS(53, 10)
38 #define PTE_PPN0 __BITS(18, 10)
39 #define PTE_PPN1 __BITS(27, 19)
40 #define PTE_PPN2 __BITS(53, 28)
41 typedef uint64_t pt_entry_t;
42 typedef uint64_t pd_entry_t;
43 #define atomic_cas_pte atomic_cas_64
44 #else /* Sv32 */
45 #define PTE_PPN __BITS(31, 10)
46 #define PTE_PPN0 __BITS(19, 10)
47 #define PTE_PPN1 __BITS(31, 20)
48 typedef uint32_t pt_entry_t;
49 typedef uint32_t pd_entry_t;
50 #define atomic_cas_pte atomic_cas_32
51 #endif
52
53 #define PTE_PPN_SHIFT 10
54
55 #define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
56 #define NSEGPG NPTEPG
57 #define NPDEPG NPTEPG
58
59 /* Software PTE bits. */
60 #define PTE_RSW __BITS(9,8)
61 #define PTE_WIRED __BIT(9)
62
63 /* Hardware PTE bits. */
64 // These are hardware defined bits
65 #define PTE_D __BIT(7) // Dirty
66 #define PTE_A __BIT(6) // Accessed
67 #define PTE_G __BIT(5) // Global
68 #define PTE_U __BIT(4) // User
69 #define PTE_X __BIT(3) // eXecute
70 #define PTE_W __BIT(2) // Write
71 #define PTE_R __BIT(1) // Read
72 #define PTE_V __BIT(0) // Valid
73
74 #define PTE_HARDWIRED (PTE_A | PTE_D)
75 #define PTE_KERN (PTE_V | PTE_G | PTE_A | PTE_D)
76 #define PTE_RW (PTE_R | PTE_W)
77 #define PTE_RX (PTE_R | PTE_X)
78
79 #define PA_TO_PTE(pa) (((pa) >> PAGE_SHIFT) << PTE_PPN_SHIFT)
80 #define PTE_TO_PA(pte) (((pte) >> PTE_PPN_SHIFT) << PAGE_SHIFT)
81
82 #define L2_SHIFT 30
83 #define L1_SHIFT 21
84 #define L0_SHIFT 12
85
86 #define L2_SIZE (1 << L2_SHIFT)
87 #define L1_SIZE (1 << L1_SHIFT)
88 #define L0_SIZE (1 << L0_SHIFT)
89
90 #define L2_OFFSET (L2_SIZE - 1)
91 #define L1_OFFSET (L1_SIZE - 1)
92 #define L0_OFFSET (L0_SIZE - 1)
93
94 #define Ln_ENTRIES (1 << 9)
95 #define Ln_ADDR_MASK (Ln_ENTRIES - 1)
96
97 #define pl2_i(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
98 #define pl1_i(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
99 #define pl0_i(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK)
100
101 static inline const size_t
102 pte_index(vaddr_t va)
103 {
104 return ((va >> PGSHIFT) & (NPTEPG - 1));
105 }
106
107 static inline bool
108 pte_valid_p(pt_entry_t pte)
109 {
110 return (pte & PTE_V) != 0;
111 }
112
113 static inline bool
114 pte_wired_p(pt_entry_t pte)
115 {
116 return (pte & PTE_WIRED) != 0;
117 }
118
119 static inline bool
120 pte_modified_p(pt_entry_t pte)
121 {
122 return (pte & PTE_D) != 0;
123 }
124
125 static inline bool
126 pte_cached_p(pt_entry_t pte)
127 {
128 return true;
129 }
130
131 static inline bool
132 pte_deferred_exec_p(pt_entry_t pte)
133 {
134 return false;
135 }
136
137 static inline pt_entry_t
138 pte_wire_entry(pt_entry_t pte)
139 {
140 return pte | PTE_WIRED;
141 }
142
143 static inline pt_entry_t
144 pte_unwire_entry(pt_entry_t pte)
145 {
146 return pte & ~PTE_WIRED;
147 }
148
149 static inline paddr_t
150 pte_to_paddr(pt_entry_t pte)
151 {
152 return PTE_TO_PA(pte);
153 }
154
155 static inline pt_entry_t
156 pte_nv_entry(bool kernel_p)
157 {
158 return kernel_p ? PTE_G : 0;
159 }
160
161 static inline pt_entry_t
162 pte_prot_nowrite(pt_entry_t pte)
163 {
164 return pte & ~PTE_W;
165 }
166
167 static inline pt_entry_t
168 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
169 {
170 if ((newprot & VM_PROT_READ) == 0)
171 pte &= ~PTE_R;
172 if ((newprot & VM_PROT_WRITE) == 0)
173 pte &= ~PTE_W;
174 if ((newprot & VM_PROT_EXECUTE) == 0)
175 pte &= ~PTE_X;
176 return pte;
177 }
178
179 static inline pt_entry_t
180 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
181 {
182 pt_entry_t pte;
183
184 KASSERT(prot & VM_PROT_READ);
185
186 pte = PTE_R;
187 if (prot & VM_PROT_EXECUTE) {
188 pte |= PTE_X;
189 }
190 if (prot & VM_PROT_WRITE) {
191 pte |= PTE_W;
192 }
193
194 return pte;
195 }
196
197 static inline pt_entry_t
198 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
199 {
200 #if 0
201 if (__predict_false(flags & PMAP_NOCACHE)) {
202 if (__predict_true(mdpg != NULL)) {
203 return pte_nocached_bits();
204 } else {
205 return pte_ionocached_bits();
206 }
207 } else {
208 if (__predict_false(mdpg != NULL)) {
209 return pte_cached_bits();
210 } else {
211 return pte_iocached_bits();
212 }
213 }
214 #else
215 return 0;
216 #endif
217 }
218
219 static inline pt_entry_t
220 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
221 int flags, bool kernel_p)
222 {
223 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
224
225 pte |= pte_flag_bits(mdpg, flags, kernel_p);
226 pte |= pte_prot_bits(mdpg, prot, kernel_p);
227
228 if (mdpg == NULL && VM_PAGEMD_REFERENCED_P(mdpg))
229 pte |= PTE_V;
230
231 return pte;
232 }
233
234 static inline pt_entry_t
235 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
236 int flags)
237 {
238 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
239
240 pte |= PTE_WIRED | PTE_G | PTE_V;
241 pte |= pte_flag_bits(NULL, flags, true);
242 pte |= pte_prot_bits(NULL, prot, true); /* pretend unmanaged */
243
244 return pte;
245 }
246
247 static inline void
248 pte_set(pt_entry_t *ptep, pt_entry_t pte)
249 {
250 *ptep = pte;
251 }
252
253 static inline pd_entry_t
254 pte_invalid_pde(void)
255 {
256 return 0;
257 }
258
259 static inline pd_entry_t
260 pte_pde_pdetab(paddr_t pa, bool kernel_p)
261 {
262 return PTE_V | (pa >> PAGE_SHIFT) << L2_SHIFT;
263 }
264
265 static inline pd_entry_t
266 pte_pde_ptpage(paddr_t pa, bool kernel_p)
267 {
268 return PTE_V | PTE_X | PTE_W | PTE_R | (pa >> PAGE_SHIFT) << L2_SHIFT;
269 }
270
271 static inline bool
272 pte_pde_valid_p(pd_entry_t pde)
273 {
274 return (pde & (PTE_X | PTE_W | PTE_R)) == 0;
275 }
276
277 static inline paddr_t
278 pte_pde_to_paddr(pd_entry_t pde)
279 {
280 return pte_to_paddr((pt_entry_t)pde);
281 }
282
283 static inline pd_entry_t
284 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
285 {
286 #ifdef MULTIPROCESSOR
287 #ifdef _LP64
288 return atomic_cas_64(pdep, opde, npde);
289 #else
290 return atomic_cas_32(pdep, opde, npde);
291 #endif
292 #else
293 *pdep = npde;
294 return 0;
295 #endif
296 }
297
298 static inline void
299 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
300 {
301
302 *pdep = npde;
303 }
304
305
306 static inline pt_entry_t
307 pte_value(pt_entry_t pte)
308 {
309 return pte;
310 }
311
312 #endif /* _RISCV_PTE_H_ */
313