pte.h revision 1.4.4.1 1 /* $NetBSD: pte.h,v 1.4.4.1 2020/12/14 14:38:00 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas (of 3am Software Foundry) and Maxime Villard.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _RISCV_PTE_H_
33 #define _RISCV_PTE_H_
34
35 #ifdef _LP64 /* Sv39 */
36 #define PTE_PPN __BITS(53, 10)
37 #define PTE_PPN0 __BITS(18, 10)
38 #define PTE_PPN1 __BITS(27, 19)
39 #define PTE_PPN2 __BITS(53, 28)
40 typedef __uint64_t pt_entry_t;
41 typedef __uint64_t pd_entry_t;
42 #define atomic_cas_pte atomic_cas_64
43 #else /* Sv32 */
44 #define PTE_PPN __BITS(31, 10)
45 #define PTE_PPN0 __BITS(19, 10)
46 #define PTE_PPN1 __BITS(31, 20)
47 typedef __uint32_t pt_entry_t;
48 typedef __uint32_t pd_entry_t;
49 #define atomic_cas_pte atomic_cas_32
50 #endif
51
52 #define PTE_PPN_SHIFT 10
53
54 #define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
55 #define NSEGPG NPTEPG
56 #define NPDEPG NPTEPG
57
58 /* Software PTE bits. */
59 #define PTE_WIRED __BIT(8)
60
61 /* Hardware PTE bits. */
62 // These are hardware defined bits
63 #define PTE_D __BIT(7) // Dirty
64 #define PTE_A __BIT(6) // Accessed
65 #define PTE_G __BIT(5) // Global
66 #define PTE_U __BIT(4) // User
67 #define PTE_X __BIT(3) // eXecute
68 #define PTE_W __BIT(2) // Write
69 #define PTE_R __BIT(1) // Read
70 #define PTE_V __BIT(0) // Valid
71
72 #define PA_TO_PTE(pa) (((pa) >> PAGE_SHIFT) << PTE_PPN_SHIFT)
73 #define PTE_TO_PA(pte) (((pte) >> PTE_PPN_SHIFT) << PAGE_SHIFT)
74
75 #define L2_SHIFT 30
76 #define L1_SHIFT 21
77 #define L0_SHIFT 12
78
79 #define L2_SIZE (1 << L2_SHIFT)
80 #define L1_SIZE (1 << L1_SHIFT)
81 #define L0_SIZE (1 << L0_SHIFT)
82
83 #define L2_OFFSET (L2_SIZE - 1)
84 #define L1_OFFSET (L1_SIZE - 1)
85 #define L0_OFFSET (L0_SIZE - 1)
86
87 #define Ln_ENTRIES (1 << 9)
88 #define Ln_ADDR_MASK (Ln_ENTRIES - 1)
89
90 #define pl2_i(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
91 #define pl1_i(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
92 #define pl0_i(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK)
93
94 static inline bool
95 pte_valid_p(pt_entry_t pte)
96 {
97 return (pte & PTE_V) != 0;
98 }
99
100 static inline bool
101 pte_wired_p(pt_entry_t pte)
102 {
103 return (pte & PTE_WIRED) != 0;
104 }
105
106 static inline bool
107 pte_modified_p(pt_entry_t pte)
108 {
109 return (pte & PTE_D) != 0;
110 }
111
112 static inline bool
113 pte_cached_p(pt_entry_t pte)
114 {
115 return true;
116 }
117
118 static inline bool
119 pte_deferred_exec_p(pt_entry_t pte)
120 {
121 return false;
122 }
123
124 static inline pt_entry_t
125 pte_wire_entry(pt_entry_t pte)
126 {
127 return pte | PTE_WIRED;
128 }
129
130 static inline pt_entry_t
131 pte_unwire_entry(pt_entry_t pte)
132 {
133 return pte & ~PTE_WIRED;
134 }
135
136 static inline paddr_t
137 pte_to_paddr(pt_entry_t pte)
138 {
139 return pte & ~PAGE_MASK;
140 }
141
142 static inline pt_entry_t
143 pte_nv_entry(bool kernel_p)
144 {
145 return kernel_p ? PTE_G : 0;
146 }
147
148 static inline pt_entry_t
149 pte_prot_nowrite(pt_entry_t pte)
150 {
151 return pte & ~PTE_W;
152 }
153
154 static inline pt_entry_t
155 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
156 {
157 if ((newprot & VM_PROT_READ) == 0)
158 pte &= ~PTE_R;
159 if ((newprot & VM_PROT_WRITE) == 0)
160 pte &= ~PTE_W;
161 if ((newprot & VM_PROT_EXECUTE) == 0)
162 pte &= ~PTE_X;
163 return pte;
164 }
165
166 static inline pt_entry_t
167 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
168 {
169 pt_entry_t pte;
170
171 KASSERT(prot & VM_PROT_READ);
172
173 pte = PTE_R;
174 if (prot & VM_PROT_EXECUTE) {
175 pte |= PTE_X;
176 }
177 if (prot & VM_PROT_WRITE) {
178 pte |= PTE_W;
179 }
180
181 return pte;
182 }
183
184 static inline pt_entry_t
185 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
186 {
187 #if 0
188 if (__predict_false(flags & PMAP_NOCACHE)) {
189 if (__predict_true(mdpg != NULL)) {
190 return pte_nocached_bits();
191 } else {
192 return pte_ionocached_bits();
193 }
194 } else {
195 if (__predict_false(mdpg != NULL)) {
196 return pte_cached_bits();
197 } else {
198 return pte_iocached_bits();
199 }
200 }
201 #else
202 return 0;
203 #endif
204 }
205
206 static inline pt_entry_t
207 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
208 int flags, bool kernel_p)
209 {
210 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
211
212 pte |= pte_flag_bits(mdpg, flags, kernel_p);
213 pte |= pte_prot_bits(mdpg, prot, kernel_p);
214
215 if (mdpg == NULL && VM_PAGEMD_REFERENCED_P(mdpg))
216 pte |= PTE_V;
217
218 return pte;
219 }
220
221 static inline pt_entry_t
222 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
223 int flags)
224 {
225 pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
226
227 pte |= PTE_WIRED | PTE_V;
228 pte |= pte_flag_bits(NULL, flags, true);
229 pte |= pte_prot_bits(NULL, prot, true); /* pretend unmanaged */
230
231 return pte;
232 }
233
234 static inline void
235 pte_set(pt_entry_t *ptep, pt_entry_t pte)
236 {
237 *ptep = pte;
238 }
239
240 static inline pt_entry_t
241 pte_value(pt_entry_t pte)
242 {
243 return pte;
244 }
245
246 #endif /* _RISCV_PTE_H_ */
247