pmap_private.h revision 1.1 1 1.1 riastrad /* $NetBSD: pmap_private.h,v 1.1 2022/08/20 23:48:50 riastradh Exp $ */
2 1.1 riastrad
3 1.1 riastrad /*
4 1.1 riastrad * Copyright (c) 1997 Charles D. Cranor and Washington University.
5 1.1 riastrad * All rights reserved.
6 1.1 riastrad *
7 1.1 riastrad * Redistribution and use in source and binary forms, with or without
8 1.1 riastrad * modification, are permitted provided that the following conditions
9 1.1 riastrad * are met:
10 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
11 1.1 riastrad * notice, this list of conditions and the following disclaimer.
12 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
14 1.1 riastrad * documentation and/or other materials provided with the distribution.
15 1.1 riastrad *
16 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 riastrad * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 1.1 riastrad * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 1.1 riastrad * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 1.1 riastrad * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 1.1 riastrad * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 1.1 riastrad * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 1.1 riastrad * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 1.1 riastrad * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 1.1 riastrad * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 1.1 riastrad */
27 1.1 riastrad
28 1.1 riastrad /*
29 1.1 riastrad * Copyright (c) 2001 Wasabi Systems, Inc.
30 1.1 riastrad * All rights reserved.
31 1.1 riastrad *
32 1.1 riastrad * Written by Frank van der Linden for Wasabi Systems, Inc.
33 1.1 riastrad *
34 1.1 riastrad * Redistribution and use in source and binary forms, with or without
35 1.1 riastrad * modification, are permitted provided that the following conditions
36 1.1 riastrad * are met:
37 1.1 riastrad * 1. Redistributions of source code must retain the above copyright
38 1.1 riastrad * notice, this list of conditions and the following disclaimer.
39 1.1 riastrad * 2. Redistributions in binary form must reproduce the above copyright
40 1.1 riastrad * notice, this list of conditions and the following disclaimer in the
41 1.1 riastrad * documentation and/or other materials provided with the distribution.
42 1.1 riastrad * 3. All advertising materials mentioning features or use of this software
43 1.1 riastrad * must display the following acknowledgement:
44 1.1 riastrad * This product includes software developed for the NetBSD Project by
45 1.1 riastrad * Wasabi Systems, Inc.
46 1.1 riastrad * 4. The name of Wasabi Systems, Inc. may not be used to endorse
47 1.1 riastrad * or promote products derived from this software without specific prior
48 1.1 riastrad * written permission.
49 1.1 riastrad *
50 1.1 riastrad * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
51 1.1 riastrad * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
52 1.1 riastrad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
53 1.1 riastrad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
54 1.1 riastrad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 1.1 riastrad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 1.1 riastrad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 1.1 riastrad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 1.1 riastrad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 1.1 riastrad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
60 1.1 riastrad * POSSIBILITY OF SUCH DAMAGE.
61 1.1 riastrad */
62 1.1 riastrad
63 1.1 riastrad #ifndef _AMD64_PMAP_PRIVATE_H_
64 1.1 riastrad #define _AMD64_PMAP_PRIVATE_H_
65 1.1 riastrad
66 1.1 riastrad #ifdef __x86_64__
67 1.1 riastrad
68 1.1 riastrad #if defined(_KERNEL_OPT)
69 1.1 riastrad #include "opt_xen.h"
70 1.1 riastrad #include "opt_kasan.h"
71 1.1 riastrad #include "opt_kmsan.h"
72 1.1 riastrad #include "opt_kubsan.h"
73 1.1 riastrad #endif
74 1.1 riastrad
75 1.1 riastrad #include <sys/atomic.h>
76 1.1 riastrad
77 1.1 riastrad #include <machine/pte.h>
78 1.1 riastrad #include <machine/segments.h>
79 1.1 riastrad #ifdef _KERNEL
80 1.1 riastrad #include <machine/cpufunc.h>
81 1.1 riastrad #endif
82 1.1 riastrad
83 1.1 riastrad #include <uvm/uvm_object.h>
84 1.1 riastrad #ifdef XENPV
85 1.1 riastrad #include <xen/xenfunc.h>
86 1.1 riastrad #include <xen/xenpmap.h>
87 1.1 riastrad #endif
88 1.1 riastrad
89 1.1 riastrad /*
90 1.1 riastrad * Mask to get rid of the sign-extended part of addresses.
91 1.1 riastrad */
92 1.1 riastrad #define VA_SIGN_MASK 0xffff000000000000
93 1.1 riastrad #define VA_SIGN_NEG(va) ((va) | VA_SIGN_MASK)
94 1.1 riastrad /* XXXfvdl this one's not right. */
95 1.1 riastrad #define VA_SIGN_POS(va) ((va) & ~VA_SIGN_MASK)
96 1.1 riastrad
97 1.1 riastrad #ifdef KASAN
98 1.1 riastrad #define L4_SLOT_KASAN 256
99 1.1 riastrad #define NL4_SLOT_KASAN 32
100 1.1 riastrad #endif
101 1.1 riastrad
102 1.1 riastrad #ifdef KMSAN
103 1.1 riastrad #define L4_SLOT_KMSAN 256
104 1.1 riastrad #define NL4_SLOT_KMSAN 4
105 1.1 riastrad #endif
106 1.1 riastrad
107 1.1 riastrad #define NL4_SLOT_DIRECT 32
108 1.1 riastrad
109 1.1 riastrad #ifndef XENPV
110 1.1 riastrad #define L4_SLOT_PTE slotspace.area[SLAREA_PTE].sslot
111 1.1 riastrad #else
112 1.1 riastrad #define L4_SLOT_PTE 509
113 1.1 riastrad #endif
114 1.1 riastrad #define L4_SLOT_KERN slotspace.area[SLAREA_MAIN].sslot
115 1.1 riastrad #define L4_SLOT_KERNBASE 511 /* pl4_i(KERNBASE) */
116 1.1 riastrad
117 1.1 riastrad #define PDIR_SLOT_USERLIM 255
118 1.1 riastrad #define PDIR_SLOT_KERN L4_SLOT_KERN
119 1.1 riastrad #define PDIR_SLOT_PTE L4_SLOT_PTE
120 1.1 riastrad
121 1.1 riastrad /*
122 1.1 riastrad * The following defines give the virtual addresses of various MMU
123 1.1 riastrad * data structures:
124 1.1 riastrad * PTE_BASE: the base VA of the linear PTE mappings
125 1.1 riastrad * PDP_BASE: the base VA of the recursive mapping of the PTD
126 1.1 riastrad */
127 1.1 riastrad
128 1.1 riastrad #ifndef XENPV
129 1.1 riastrad extern pt_entry_t *pte_base;
130 1.1 riastrad #define PTE_BASE pte_base
131 1.1 riastrad #else
132 1.1 riastrad #define PTE_BASE ((pt_entry_t *)VA_SIGN_NEG((L4_SLOT_PTE * NBPD_L4)))
133 1.1 riastrad #endif
134 1.1 riastrad
135 1.1 riastrad #define L1_BASE PTE_BASE
136 1.1 riastrad #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
137 1.1 riastrad #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
138 1.1 riastrad #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
139 1.1 riastrad
140 1.1 riastrad #define PDP_BASE L4_BASE
141 1.1 riastrad
142 1.1 riastrad #if defined(KMSAN)
143 1.1 riastrad #define NKL4_MAX_ENTRIES (unsigned long)1 /* 512GB only */
144 1.1 riastrad #else
145 1.1 riastrad #define NKL4_MAX_ENTRIES (unsigned long)64
146 1.1 riastrad #endif
147 1.1 riastrad #define NKL3_MAX_ENTRIES (unsigned long)(NKL4_MAX_ENTRIES * 512)
148 1.1 riastrad #define NKL2_MAX_ENTRIES (unsigned long)(NKL3_MAX_ENTRIES * 512)
149 1.1 riastrad #define NKL1_MAX_ENTRIES (unsigned long)(NKL2_MAX_ENTRIES * 512)
150 1.1 riastrad
151 1.1 riastrad #define NKL4_KIMG_ENTRIES 1
152 1.1 riastrad #define NKL3_KIMG_ENTRIES 1
153 1.1 riastrad #if defined(KUBSAN) || defined(KMSAN)
154 1.1 riastrad #define NKL2_KIMG_ENTRIES 64 /* really big kernel */
155 1.1 riastrad #else
156 1.1 riastrad #define NKL2_KIMG_ENTRIES 48
157 1.1 riastrad #endif
158 1.1 riastrad
159 1.1 riastrad /*
160 1.1 riastrad * Since kva space is below the kernel in its entirety, we start off
161 1.1 riastrad * with zero entries on each level.
162 1.1 riastrad */
163 1.1 riastrad #define NKL4_START_ENTRIES 0
164 1.1 riastrad #define NKL3_START_ENTRIES 0
165 1.1 riastrad #define NKL2_START_ENTRIES 0
166 1.1 riastrad #define NKL1_START_ENTRIES 0
167 1.1 riastrad
168 1.1 riastrad #define PTP_MASK_INITIALIZER { L1_MASK, L2_MASK, L3_MASK, L4_MASK }
169 1.1 riastrad #define PTP_FRAME_INITIALIZER { L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
170 1.1 riastrad #define PTP_SHIFT_INITIALIZER { L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
171 1.1 riastrad #define NKPTP_INITIALIZER { NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
172 1.1 riastrad NKL3_START_ENTRIES, NKL4_START_ENTRIES }
173 1.1 riastrad #define NKPTPMAX_INITIALIZER { NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
174 1.1 riastrad NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
175 1.1 riastrad #define NBPD_INITIALIZER { NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
176 1.1 riastrad #define PDES_INITIALIZER { L2_BASE, L3_BASE, L4_BASE }
177 1.1 riastrad
178 1.1 riastrad /*
179 1.1 riastrad * PTE_AVL usage: we make use of the ignored bits of the PTE
180 1.1 riastrad */
181 1.1 riastrad #define PTE_WIRED PTE_AVL1 /* Wired Mapping */
182 1.1 riastrad #define PTE_PVLIST PTE_AVL2 /* Mapping has entry on pvlist */
183 1.1 riastrad #define PTE_X 0 /* Dummy */
184 1.1 riastrad
185 1.1 riastrad /* XXX To be deleted. */
186 1.1 riastrad #define PG_W PTE_WIRED
187 1.1 riastrad #define PG_PVLIST PTE_PVLIST
188 1.1 riastrad #define PG_X PTE_X
189 1.1 riastrad
190 1.1 riastrad void svs_pmap_sync(struct pmap *, int);
191 1.1 riastrad void svs_ldt_sync(struct pmap *);
192 1.1 riastrad void svs_lwp_switch(struct lwp *, struct lwp *);
193 1.1 riastrad void svs_pdir_switch(struct pmap *);
194 1.1 riastrad void svs_init(void);
195 1.1 riastrad extern bool svs_enabled;
196 1.1 riastrad extern bool svs_pcid;
197 1.1 riastrad
198 1.1 riastrad #define _MACHINE_PMAP_PRIVATE_H_X86
199 1.1 riastrad #include <x86/pmap_private.h>
200 1.1 riastrad #undef _MACHINE_PMAP_PRIVATE_H_X86
201 1.1 riastrad
202 1.1 riastrad #ifndef XENPV
203 1.1 riastrad #define pmap_pa2pte(a) (a)
204 1.1 riastrad #define pmap_pte2pa(a) ((a) & PTE_FRAME)
205 1.1 riastrad #define pmap_pte_set(p, n) do { *(p) = (n); } while (0)
206 1.1 riastrad #define pmap_pte_cas(p, o, n) atomic_cas_64((p), (o), (n))
207 1.1 riastrad #define pmap_pte_testset(p, n) \
208 1.1 riastrad atomic_swap_ulong((volatile unsigned long *)p, n)
209 1.1 riastrad #define pmap_pte_setbits(p, b) \
210 1.1 riastrad atomic_or_ulong((volatile unsigned long *)p, b)
211 1.1 riastrad #define pmap_pte_clearbits(p, b) \
212 1.1 riastrad atomic_and_ulong((volatile unsigned long *)p, ~(b))
213 1.1 riastrad #define pmap_pte_flush() /* nothing */
214 1.1 riastrad #else
215 1.1 riastrad extern kmutex_t pte_lock;
216 1.1 riastrad
217 1.1 riastrad static __inline pt_entry_t
218 1.1 riastrad pmap_pa2pte(paddr_t pa)
219 1.1 riastrad {
220 1.1 riastrad return (pt_entry_t)xpmap_ptom_masked(pa);
221 1.1 riastrad }
222 1.1 riastrad
223 1.1 riastrad static __inline paddr_t
224 1.1 riastrad pmap_pte2pa(pt_entry_t pte)
225 1.1 riastrad {
226 1.1 riastrad return xpmap_mtop_masked(pte & PTE_FRAME);
227 1.1 riastrad }
228 1.1 riastrad
229 1.1 riastrad static __inline void
230 1.1 riastrad pmap_pte_set(pt_entry_t *pte, pt_entry_t npte)
231 1.1 riastrad {
232 1.1 riastrad int s = splvm();
233 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(pte), npte);
234 1.1 riastrad splx(s);
235 1.1 riastrad }
236 1.1 riastrad
237 1.1 riastrad static __inline pt_entry_t
238 1.1 riastrad pmap_pte_cas(volatile pt_entry_t *ptep, pt_entry_t o, pt_entry_t n)
239 1.1 riastrad {
240 1.1 riastrad pt_entry_t opte;
241 1.1 riastrad
242 1.1 riastrad mutex_enter(&pte_lock);
243 1.1 riastrad opte = *ptep;
244 1.1 riastrad if (opte == o) {
245 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(ptep)), n);
246 1.1 riastrad xpq_flush_queue();
247 1.1 riastrad }
248 1.1 riastrad
249 1.1 riastrad mutex_exit(&pte_lock);
250 1.1 riastrad return opte;
251 1.1 riastrad }
252 1.1 riastrad
253 1.1 riastrad static __inline pt_entry_t
254 1.1 riastrad pmap_pte_testset(volatile pt_entry_t *pte, pt_entry_t npte)
255 1.1 riastrad {
256 1.1 riastrad pt_entry_t opte;
257 1.1 riastrad
258 1.1 riastrad mutex_enter(&pte_lock);
259 1.1 riastrad opte = *pte;
260 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), npte);
261 1.1 riastrad xpq_flush_queue();
262 1.1 riastrad mutex_exit(&pte_lock);
263 1.1 riastrad return opte;
264 1.1 riastrad }
265 1.1 riastrad
266 1.1 riastrad static __inline void
267 1.1 riastrad pmap_pte_setbits(volatile pt_entry_t *pte, pt_entry_t bits)
268 1.1 riastrad {
269 1.1 riastrad mutex_enter(&pte_lock);
270 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)), (*pte) | bits);
271 1.1 riastrad xpq_flush_queue();
272 1.1 riastrad mutex_exit(&pte_lock);
273 1.1 riastrad }
274 1.1 riastrad
275 1.1 riastrad static __inline void
276 1.1 riastrad pmap_pte_clearbits(volatile pt_entry_t *pte, pt_entry_t bits)
277 1.1 riastrad {
278 1.1 riastrad mutex_enter(&pte_lock);
279 1.1 riastrad xpq_queue_pte_update(xpmap_ptetomach(__UNVOLATILE(pte)),
280 1.1 riastrad (*pte) & ~bits);
281 1.1 riastrad xpq_flush_queue();
282 1.1 riastrad mutex_exit(&pte_lock);
283 1.1 riastrad }
284 1.1 riastrad
285 1.1 riastrad static __inline void
286 1.1 riastrad pmap_pte_flush(void)
287 1.1 riastrad {
288 1.1 riastrad int s = splvm();
289 1.1 riastrad xpq_flush_queue();
290 1.1 riastrad splx(s);
291 1.1 riastrad }
292 1.1 riastrad #endif
293 1.1 riastrad
294 1.1 riastrad #ifdef __HAVE_DIRECT_MAP
295 1.1 riastrad #define PMAP_DIRECT
296 1.1 riastrad
297 1.1 riastrad static __inline int
298 1.1 riastrad pmap_direct_process(paddr_t pa, voff_t pgoff, size_t len,
299 1.1 riastrad int (*process)(void *, size_t, void *), void *arg)
300 1.1 riastrad {
301 1.1 riastrad vaddr_t va = PMAP_DIRECT_MAP(pa);
302 1.1 riastrad
303 1.1 riastrad return process((void *)(va + pgoff), len, arg);
304 1.1 riastrad }
305 1.1 riastrad
306 1.1 riastrad #endif /* __HAVE_DIRECT_MAP */
307 1.1 riastrad
308 1.1 riastrad void pmap_changeprot_local(vaddr_t, vm_prot_t);
309 1.1 riastrad
310 1.1 riastrad #else /* !__x86_64__ */
311 1.1 riastrad
312 1.1 riastrad #include <i386/pmap_private.h>
313 1.1 riastrad
314 1.1 riastrad #endif /* __x86_64__ */
315 1.1 riastrad
316 1.1 riastrad #endif /* _AMD64_PMAP_PRIVATE_H_ */
317