pmap_private.h revision 1.4 1 1.4 riastrad /* $NetBSD: pmap_private.h,v 1.4 2022/08/21 09:12:43 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*
4 1.1 riastrad * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * Redistribution and use in source and binary forms, with or without
8 1.1 riastrad * modification, are permitted provided that the following conditions
9 1.1 riastrad * are met:
10 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
11 1.1 riastrad * notice, this list of conditions and the following disclaimer.
12 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
14 1.1 riastrad * documentation and/or other materials provided with the distribution.
15 1.1 riastrad *
16 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 riastrad * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 riastrad * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 riastrad * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 riastrad * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1 riastrad * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1 riastrad * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1 riastrad * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1 riastrad * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1 riastrad * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.1 riastrad */
27 1.1 riastrad
28 1.1 riastrad /*
29 1.1 riastrad * Copyright (c) 2001 Wasabi Systems, Inc.
30 1.1 riastrad * All rights reserved.
31 1.1 riastrad *
32 1.1 riastrad * Written by Frank van der Linden for Wasabi Systems, Inc.
33 1.1 riastrad *
34 1.1 riastrad * Redistribution and use in source and binary forms, with or without
35 1.1 riastrad * modification, are permitted provided that the following conditions
36 1.1 riastrad * are met:
37 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
38 1.1 riastrad * notice, this list of conditions and the following disclaimer.
39 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
40 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
41 1.1 riastrad * documentation and/or other materials provided with the distribution.
42 1.1 riastrad * 3. All advertising materials mentioning features or use of this software
43 1.1 riastrad * must display the following acknowledgement:
44 1.1 riastrad * This product includes software developed for the NetBSD Project by
45 1.1 riastrad * Wasabi Systems, Inc.
46 1.1 riastrad * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 1.1 riastrad * or promote products derived from this software without specific prior
48 1.1 riastrad * written permission.
49 1.1 riastrad *
50 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 1.1 riastrad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
61 1.1 riastrad */
62 1.1 riastrad
63 1.1 riastrad #ifndef _AMD64_PMAP_PRIVATE_H_
64 1.1 riastrad #define _AMD64_PMAP_PRIVATE_H_
65 1.1 riastrad
66 1.1 riastrad #ifdef __x86_64__
67 1.1 riastrad
68 1.1 riastrad #if defined(_KERNEL_OPT)
69 1.1 riastrad #include "opt_xen.h"
70 1.1 riastrad #include "opt_kasan.h"
71 1.1 riastrad #include "opt_kmsan.h"
72 1.1 riastrad #include "opt_kubsan.h"
73 1.1 riastrad #endif
74 1.1 riastrad
75 1.1 riastrad #include <sys/atomic.h>
76 1.1 riastrad
77 1.1 riastrad #include <machine/pte.h>
78 1.1 riastrad #include <machine/segments.h>
79 1.1 riastrad #ifdef _KERNEL
80 1.1 riastrad #include <machine/cpufunc.h>
81 1.1 riastrad #endif
82 1.1 riastrad
83 1.1 riastrad #include <uvm/uvm_object.h>
84 1.1 riastrad #ifdef XENPV
85 1.1 riastrad #include <xen/xenfunc.h>
86 1.1 riastrad #include <xen/xenpmap.h>
87 1.1 riastrad #endif
88 1.1 riastrad
89 1.1 riastrad #ifdef KASAN
90 1.1 riastrad #define L4_SLOT_KASAN 256
91 1.1 riastrad #define NL4_SLOT_KASAN 32
92 1.1 riastrad #endif
93 1.1 riastrad
94 1.1 riastrad #ifdef KMSAN
95 1.1 riastrad #define L4_SLOT_KMSAN 256
96 1.1 riastrad #define NL4_SLOT_KMSAN 4
97 1.1 riastrad #endif
98 1.1 riastrad
99 1.1 riastrad #define NL4_SLOT_DIRECT 32
100 1.1 riastrad
101 1.1 riastrad #ifndef XENPV
102 1.1 riastrad #define L4_SLOT_PTE slotspace.area[SLAREA_PTE].sslot
103 1.1 riastrad #else
104 1.1 riastrad #define L4_SLOT_PTE 509
105 1.1 riastrad #endif
106 1.1 riastrad #define L4_SLOT_KERN slotspace.area[SLAREA_MAIN].sslot
107 1.1 riastrad #define L4_SLOT_KERNBASE 511 /* pl4_i(KERNBASE) */
108 1.1 riastrad
109 1.1 riastrad #define PDIR_SLOT_USERLIM 255
110 1.1 riastrad #define PDIR_SLOT_KERN L4_SLOT_KERN
111 1.1 riastrad #define PDIR_SLOT_PTE L4_SLOT_PTE
112 1.1 riastrad
113 1.1 riastrad /*
114 1.1 riastrad * The following defines give the virtual addresses of various MMU
115 1.1 riastrad * data structures:
116 1.1 riastrad * PTE_BASE: the base VA of the linear PTE mappings
117 1.1 riastrad * PDP_BASE: the base VA of the recursive mapping of the PTD
118 1.1 riastrad */
119 1.1 riastrad
120 1.1 riastrad #ifndef XENPV
121 1.1 riastrad extern pt_entry_t *pte_base;
122 1.1 riastrad #define PTE_BASE pte_base
123 1.1 riastrad #else
124 1.1 riastrad #define PTE_BASE ((pt_entry_t *)VA_SIGN_NEG((L4_SLOT_PTE * NBPD_L4)))
125 1.1 riastrad #endif
126 1.1 riastrad
127 1.1 riastrad #define L1_BASE PTE_BASE
128 1.1 riastrad #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
129 1.1 riastrad #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
130 1.1 riastrad #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
131 1.1 riastrad
132 1.1 riastrad #define PDP_BASE L4_BASE
133 1.1 riastrad
134 1.1 riastrad #if defined(KMSAN)
135 1.1 riastrad #define NKL4_MAX_ENTRIES (unsigned long)1 /* 512GB only */
136 1.1 riastrad #else
137 1.1 riastrad #define NKL4_MAX_ENTRIES (unsigned long)64
138 1.1 riastrad #endif
139 1.1 riastrad #define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512)
140 1.1 riastrad #define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512)
141 1.1 riastrad #define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512)
142 1.1 riastrad
143 1.1 riastrad #define NKL4_KIMG_ENTRIES 1
144 1.1 riastrad #define NKL3_KIMG_ENTRIES 1
145 1.1 riastrad #if defined(KUBSAN) || defined(KMSAN)
146 1.1 riastrad #define NKL2_KIMG_ENTRIES 64 /* really big kernel */
147 1.1 riastrad #else
148 1.1 riastrad #define NKL2_KIMG_ENTRIES 48
149 1.1 riastrad #endif
150 1.1 riastrad
151 1.1 riastrad /*
152 1.1 riastrad * Since kva space is below the kernel in its entirety, we start off
153 1.1 riastrad * with zero entries on each level.
154 1.1 riastrad */
155 1.1 riastrad #define NKL4_START_ENTRIES 0
156 1.1 riastrad #define NKL3_START_ENTRIES 0
157 1.1 riastrad #define NKL2_START_ENTRIES 0
158 1.1 riastrad #define NKL1_START_ENTRIES 0
159 1.1 riastrad
160 1.1 riastrad #define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK, L3_MASK, L4_MASK }
161 1.1 riastrad #define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
162 1.1 riastrad #define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
163 1.1 riastrad #define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
164 1.1 riastrad NKL3_START_ENTRIES, NKL4_START_ENTRIES }
165 1.1 riastrad #define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
166 1.1 riastrad NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
167 1.1 riastrad #define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
168 1.1 riastrad #define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE }
169 1.1 riastrad
170 1.2 riastrad #define PTP_LEVELS 4
171 1.2 riastrad
172 1.1 riastrad /*
173 1.1 riastrad * PTE_AVL usage: we make use of the ignored bits of the PTE
174 1.1 riastrad */
175 1.1 riastrad #define PTE_WIRED PTE_AVL1 /* Wired Mapping */
176 1.1 riastrad #define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
177 1.1 riastrad #define PTE_X 0 /* Dummy */
178 1.1 riastrad
179 1.1 riastrad /* XXX To be deleted. */
180 1.1 riastrad #define PG_W PTE_WIRED
181 1.1 riastrad #define PG_PVLIST PTE_PVLIST
182 1.1 riastrad #define PG_X PTE_X
183 1.1 riastrad
184 1.1 riastrad void svs_pmap_sync(struct pmap *, int);
185 1.1 riastrad void svs_ldt_sync(struct pmap *);
186 1.1 riastrad void svs_lwp_switch(struct lwp *, struct lwp *);
187 1.1 riastrad void svs_pdir_switch(struct pmap *);
188 1.1 riastrad void svs_init(void);
189 1.1 riastrad extern bool svs_enabled;
190 1.1 riastrad extern bool svs_pcid;
191 1.1 riastrad
192 1.1 riastrad #define _MACHINE_PMAP_PRIVATE_H_X86
193 1.1 riastrad #include <x86/pmap_private.h>
194 1.1 riastrad #undef _MACHINE_PMAP_PRIVATE_H_X86
195 1.1 riastrad
196 1.1 riastrad #ifndef XENPV
197 1.3 riastrad
198 1.1 riastrad #define pmap_pa2pte(a) (a)
199 1.1 riastrad #define pmap_pte2pa(a) ((a) & PTE_FRAME)
200 1.1 riastrad #define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
201 1.1 riastrad #define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
202 1.1 riastrad #define pmap_pte_testset(p, n) \
203 1.1 riastrad atomic_swap_ulong((volatile unsigned long *)p, n)
204 1.1 riastrad #define pmap_pte_setbits(p, b) \
205 1.1 riastrad atomic_or_ulong((volatile unsigned long *)p, b)
206 1.1 riastrad #define pmap_pte_clearbits(p, b) \
207 1.1 riastrad atomic_and_ulong((volatile unsigned long *)p, ~(b))
208 1.1 riastrad #define pmap_pte_flush() /* nothing */
209 1.3 riastrad
210 1.1 riastrad #else
211 1.3 riastrad
212 1.1 riastrad extern kmutex_t pte_lock;
213 1.1 riastrad
214 1.1 riastrad static __inline pt_entry_t
215 1.1 riastrad pmap_pa2pte(paddr_t pa)
216 1.1 riastrad {
217 1.1 riastrad return (pt_entry_t)xpmap_ptom_masked(pa);
218 1.1 riastrad }
219 1.1 riastrad
220 1.1 riastrad static __inline paddr_t
221 1.1 riastrad pmap_pte2pa(pt_entry_t pte)
222 1.1 riastrad {
223 1.1 riastrad return xpmap_mtop_masked(pte & PTE_FRAME);
224 1.1 riastrad }
225 1.1 riastrad
226 1.1 riastrad static __inline void
227 1.1 riastrad pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
228 1.1 riastrad {
229 1.1 riastrad int s = splvm();
230 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
231 1.1 riastrad splx(s);
232 1.1 riastrad }
233 1.1 riastrad
234 1.1 riastrad static __inline pt_entry_t
235 1.1 riastrad pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
236 1.1 riastrad {
237 1.1 riastrad pt_entry_t opte;
238 1.1 riastrad
239 1.1 riastrad mutex_enter(&pte_lock);
240 1.1 riastrad opte = *ptep;
241 1.1 riastrad if (opte == o) {
242 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
243 1.1 riastrad xpq_flush_queue();
244 1.1 riastrad }
245 1.1 riastrad mutex_exit(&pte_lock);
246 1.1 riastrad return opte;
247 1.1 riastrad }
248 1.1 riastrad
249 1.1 riastrad static __inline pt_entry_t
250 1.1 riastrad pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
251 1.1 riastrad {
252 1.1 riastrad pt_entry_t opte;
253 1.1 riastrad
254 1.1 riastrad mutex_enter(&pte_lock);
255 1.1 riastrad opte = *pte;
256 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
257 1.1 riastrad xpq_flush_queue();
258 1.1 riastrad mutex_exit(&pte_lock);
259 1.1 riastrad return opte;
260 1.1 riastrad }
261 1.1 riastrad
262 1.1 riastrad static __inline void
263 1.1 riastrad pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
264 1.1 riastrad {
265 1.1 riastrad mutex_enter(&pte_lock);
266 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
267 1.1 riastrad xpq_flush_queue();
268 1.1 riastrad mutex_exit(&pte_lock);
269 1.1 riastrad }
270 1.1 riastrad
271 1.1 riastrad static __inline void
272 1.1 riastrad pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
273 1.1 riastrad {
274 1.1 riastrad mutex_enter(&pte_lock);
275 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
276 1.1 riastrad (*pte) & ~bits);
277 1.1 riastrad xpq_flush_queue();
278 1.1 riastrad mutex_exit(&pte_lock);
279 1.1 riastrad }
280 1.1 riastrad
281 1.1 riastrad static __inline void
282 1.1 riastrad pmap_pte_flush(void)
283 1.1 riastrad {
284 1.1 riastrad int s = splvm();
285 1.1 riastrad xpq_flush_queue();
286 1.1 riastrad splx(s);
287 1.1 riastrad }
288 1.3 riastrad
289 1.1 riastrad #endif
290 1.1 riastrad
291 1.1 riastrad #ifdef __HAVE_DIRECT_MAP
292 1.1 riastrad #define PMAP_DIRECT
293 1.1 riastrad
294 1.1 riastrad static __inline int
295 1.1 riastrad pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
296 1.1 riastrad int (*process)(void *, size_t, void *), void *arg)
297 1.1 riastrad {
298 1.1 riastrad vaddr_t va = PMAP_DIRECT_MAP(pa);
299 1.1 riastrad
300 1.1 riastrad return process((void *)(va + pgoff), len, arg);
301 1.1 riastrad }
302 1.1 riastrad
303 1.1 riastrad #endif /* __HAVE_DIRECT_MAP */
304 1.1 riastrad
305 1.1 riastrad void pmap_changeprot_local(vaddr_t, vm_prot_t);
306 1.1 riastrad
307 1.1 riastrad #else /* !__x86_64__ */
308 1.1 riastrad
309 1.1 riastrad #include <i386/pmap_private.h>
310 1.1 riastrad
311 1.1 riastrad #endif /* __x86_64__ */
312 1.1 riastrad
313 1.1 riastrad #endif /* _AMD64_PMAP_PRIVATE_H_ */
314