pte.h revision 1.4 1 1.4 skrll /* $NetBSD: pte.h,v 1.4 2020/03/14 16:12:16 skrll Exp $ */
2 1.3 maxv
3 1.3 maxv /*
4 1.3 maxv * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
5 1.1 matt * All rights reserved.
6 1.1 matt *
7 1.1 matt * This code is derived from software contributed to The NetBSD Foundation
8 1.3 maxv * by Matt Thomas (of 3am Software Foundry) and Maxime Villard.
9 1.1 matt *
10 1.1 matt * Redistribution and use in source and binary forms, with or without
11 1.1 matt * modification, are permitted provided that the following conditions
12 1.1 matt * are met:
13 1.1 matt * 1. Redistributions of source code must retain the above copyright
14 1.1 matt * notice, this list of conditions and the following disclaimer.
15 1.1 matt * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 matt * notice, this list of conditions and the following disclaimer in the
17 1.1 matt * documentation and/or other materials provided with the distribution.
18 1.1 matt *
19 1.1 matt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.1 matt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.1 matt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.1 matt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.1 matt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.1 matt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.1 matt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.1 matt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.1 matt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.1 matt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.1 matt * POSSIBILITY OF SUCH DAMAGE.
30 1.1 matt */
31 1.1 matt
32 1.1 matt #ifndef _RISCV_PTE_H_
33 1.1 matt #define _RISCV_PTE_H_
34 1.1 matt
35 1.3 maxv #ifdef _LP64 /* Sv39 */
36 1.3 maxv #define PTE_PPN __BITS(53, 10)
37 1.3 maxv #define PTE_PPN0 __BITS(18, 10)
38 1.3 maxv #define PTE_PPN1 __BITS(27, 19)
39 1.3 maxv #define PTE_PPN2 __BITS(53, 28)
40 1.1 matt typedef __uint64_t pt_entry_t;
41 1.1 matt typedef __uint64_t pd_entry_t;
42 1.1 matt #define atomic_cas_pte atomic_cas_64
43 1.3 maxv #else /* Sv32 */
44 1.3 maxv #define PTE_PPN __BITS(31, 10)
45 1.3 maxv #define PTE_PPN0 __BITS(19, 10)
46 1.3 maxv #define PTE_PPN1 __BITS(31, 20)
47 1.1 matt typedef __uint32_t pt_entry_t;
48 1.1 matt typedef __uint32_t pd_entry_t;
49 1.1 matt #define atomic_cas_pte atomic_cas_32
50 1.1 matt #endif
51 1.1 matt
52 1.3 maxv #define PTE_PPN_SHIFT 10
53 1.3 maxv
54 1.3 maxv #define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
55 1.3 maxv #define NSEGPG NPTEPG
56 1.3 maxv #define NPDEPG NPTEPG
57 1.3 maxv
58 1.3 maxv /* Software PTE bits. */
59 1.3 maxv #define PTE_WIRED __BIT(8)
60 1.3 maxv
61 1.3 maxv /* Hardware PTE bits. */
62 1.3 maxv #define PTE_D __BIT(7)
63 1.3 maxv #define PTE_A __BIT(6)
64 1.3 maxv #define PTE_G __BIT(5)
65 1.3 maxv #define PTE_U __BIT(4)
66 1.3 maxv #define PTE_X __BIT(3)
67 1.3 maxv #define PTE_W __BIT(2)
68 1.3 maxv #define PTE_R __BIT(1)
69 1.3 maxv #define PTE_V __BIT(0)
70 1.3 maxv
71 1.3 maxv #define PA_TO_PTE(pa) (((pa) >> PAGE_SHIFT) << PTE_PPN_SHIFT)
72 1.3 maxv #define PTE_TO_PA(pte) (((pte) >> PTE_PPN_SHIFT) << PAGE_SHIFT)
73 1.3 maxv
74 1.3 maxv #define L2_SHIFT 30
75 1.3 maxv #define L1_SHIFT 21
76 1.3 maxv #define L0_SHIFT 12
77 1.3 maxv
78 1.3 maxv #define L2_SIZE (1 << L2_SHIFT)
79 1.3 maxv #define L1_SIZE (1 << L1_SHIFT)
80 1.3 maxv #define L0_SIZE (1 << L0_SHIFT)
81 1.3 maxv
82 1.3 maxv #define L2_OFFSET (L2_SIZE - 1)
83 1.3 maxv #define L1_OFFSET (L1_SIZE - 1)
84 1.3 maxv #define L0_OFFSET (L0_SIZE - 1)
85 1.3 maxv
86 1.3 maxv #define Ln_ENTRIES (1 << 9)
87 1.3 maxv #define Ln_ADDR_MASK (Ln_ENTRIES - 1)
88 1.3 maxv
89 1.3 maxv #define pl2_i(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
90 1.3 maxv #define pl1_i(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
91 1.3 maxv #define pl0_i(va) (((va) >> L0_SHIFT) & Ln_ADDR_MASK)
92 1.1 matt
93 1.1 matt static inline bool
94 1.1 matt pte_valid_p(pt_entry_t pte)
95 1.1 matt {
96 1.1 matt return (pte & PTE_V) != 0;
97 1.1 matt }
98 1.1 matt
99 1.1 matt static inline bool
100 1.1 matt pte_wired_p(pt_entry_t pte)
101 1.1 matt {
102 1.1 matt return (pte & PTE_WIRED) != 0;
103 1.1 matt }
104 1.1 matt
105 1.1 matt static inline bool
106 1.1 matt pte_modified_p(pt_entry_t pte)
107 1.1 matt {
108 1.3 maxv return (pte & PTE_D) != 0;
109 1.1 matt }
110 1.1 matt
111 1.1 matt static inline bool
112 1.1 matt pte_cached_p(pt_entry_t pte)
113 1.1 matt {
114 1.1 matt return true;
115 1.1 matt }
116 1.1 matt
117 1.1 matt static inline bool
118 1.1 matt pte_deferred_exec_p(pt_entry_t pte)
119 1.1 matt {
120 1.3 maxv return false;
121 1.1 matt }
122 1.1 matt
123 1.1 matt static inline pt_entry_t
124 1.1 matt pte_wire_entry(pt_entry_t pte)
125 1.1 matt {
126 1.1 matt return pte | PTE_WIRED;
127 1.1 matt }
128 1.4 skrll
129 1.4 skrll static inline pt_entry_t
130 1.1 matt pte_unwire_entry(pt_entry_t pte)
131 1.1 matt {
132 1.1 matt return pte & ~PTE_WIRED;
133 1.1 matt }
134 1.1 matt
135 1.1 matt static inline paddr_t
136 1.1 matt pte_to_paddr(pt_entry_t pte)
137 1.1 matt {
138 1.1 matt return pte & ~PAGE_MASK;
139 1.1 matt }
140 1.1 matt
141 1.1 matt static inline pt_entry_t
142 1.1 matt pte_nv_entry(bool kernel_p)
143 1.1 matt {
144 1.1 matt return kernel_p ? PTE_G : 0;
145 1.1 matt }
146 1.1 matt
147 1.1 matt static inline pt_entry_t
148 1.1 matt pte_prot_nowrite(pt_entry_t pte)
149 1.1 matt {
150 1.3 maxv return pte & ~PTE_W;
151 1.1 matt }
152 1.1 matt
153 1.1 matt static inline pt_entry_t
154 1.1 matt pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
155 1.1 matt {
156 1.3 maxv if ((newprot & VM_PROT_READ) == 0)
157 1.3 maxv pte &= ~PTE_R;
158 1.3 maxv if ((newprot & VM_PROT_WRITE) == 0)
159 1.3 maxv pte &= ~PTE_W;
160 1.1 matt if ((newprot & VM_PROT_EXECUTE) == 0)
161 1.3 maxv pte &= ~PTE_X;
162 1.1 matt return pte;
163 1.1 matt }
164 1.1 matt
165 1.1 matt static inline pt_entry_t
166 1.1 matt pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
167 1.1 matt {
168 1.3 maxv pt_entry_t pte;
169 1.3 maxv
170 1.1 matt KASSERT(prot & VM_PROT_READ);
171 1.3 maxv
172 1.3 maxv pte = PTE_R;
173 1.1 matt if (prot & VM_PROT_EXECUTE) {
174 1.3 maxv pte |= PTE_X;
175 1.1 matt }
176 1.1 matt if (prot & VM_PROT_WRITE) {
177 1.3 maxv pte |= PTE_W;
178 1.1 matt }
179 1.3 maxv
180 1.3 maxv return pte;
181 1.1 matt }
182 1.1 matt
183 1.1 matt static inline pt_entry_t
184 1.1 matt pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
185 1.1 matt {
186 1.1 matt #if 0
187 1.1 matt if (__predict_false(flags & PMAP_NOCACHE)) {
188 1.1 matt if (__predict_true(mdpg != NULL)) {
189 1.1 matt return pte_nocached_bits();
190 1.1 matt } else {
191 1.1 matt return pte_ionocached_bits();
192 1.1 matt }
193 1.1 matt } else {
194 1.1 matt if (__predict_false(mdpg != NULL)) {
195 1.1 matt return pte_cached_bits();
196 1.1 matt } else {
197 1.1 matt return pte_iocached_bits();
198 1.1 matt }
199 1.1 matt }
200 1.1 matt #else
201 1.1 matt return 0;
202 1.1 matt #endif
203 1.1 matt }
204 1.1 matt
205 1.1 matt static inline pt_entry_t
206 1.1 matt pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
207 1.3 maxv int flags, bool kernel_p)
208 1.1 matt {
209 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
210 1.1 matt
211 1.1 matt pte |= pte_flag_bits(mdpg, flags, kernel_p);
212 1.1 matt pte |= pte_prot_bits(mdpg, prot, kernel_p);
213 1.1 matt
214 1.1 matt if (mdpg == NULL && VM_PAGEMD_REFERENCED_P(mdpg))
215 1.1 matt pte |= PTE_V;
216 1.1 matt
217 1.1 matt return pte;
218 1.1 matt }
219 1.1 matt
220 1.1 matt static inline pt_entry_t
221 1.1 matt pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
222 1.3 maxv int flags)
223 1.1 matt {
224 1.3 maxv pt_entry_t pte = (pt_entry_t)PA_TO_PTE(pa);
225 1.1 matt
226 1.1 matt pte |= PTE_WIRED | PTE_V;
227 1.1 matt pte |= pte_flag_bits(NULL, flags, true);
228 1.1 matt pte |= pte_prot_bits(NULL, prot, true); /* pretend unmanaged */
229 1.1 matt
230 1.1 matt return pte;
231 1.1 matt }
232 1.1 matt
233 1.1 matt static inline void
234 1.1 matt pte_set(pt_entry_t *ptep, pt_entry_t pte)
235 1.1 matt {
236 1.1 matt *ptep = pte;
237 1.1 matt }
238 1.1 matt
239 1.2 maxv static inline pt_entry_t
240 1.2 maxv pte_value(pt_entry_t pte)
241 1.2 maxv {
242 1.2 maxv return pte;
243 1.2 maxv }
244 1.2 maxv
245 1.1 matt #endif /* _RISCV_PTE_H_ */
246