pmap_segtab.c revision 1.33 1 1.33 skrll /* $NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $ */
2 1.1 christos
3 1.1 christos /*-
4 1.1 christos * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 1.1 christos * All rights reserved.
6 1.1 christos *
7 1.1 christos * This code is derived from software contributed to The NetBSD Foundation
8 1.1 christos * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 christos * NASA Ames Research Center and by Chris G. Demetriou.
10 1.1 christos *
11 1.1 christos * Redistribution and use in source and binary forms, with or without
12 1.1 christos * modification, are permitted provided that the following conditions
13 1.1 christos * are met:
14 1.1 christos * 1. Redistributions of source code must retain the above copyright
15 1.1 christos * notice, this list of conditions and the following disclaimer.
16 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 christos * notice, this list of conditions and the following disclaimer in the
18 1.1 christos * documentation and/or other materials provided with the distribution.
19 1.1 christos *
20 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 christos * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 christos * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 christos * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 christos * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 christos * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 christos * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 christos * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 christos * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 christos * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 christos * POSSIBILITY OF SUCH DAMAGE.
31 1.1 christos */
32 1.1 christos
33 1.1 christos /*
34 1.1 christos * Copyright (c) 1992, 1993
35 1.1 christos * The Regents of the University of California. All rights reserved.
36 1.1 christos *
37 1.1 christos * This code is derived from software contributed to Berkeley by
38 1.1 christos * the Systems Programming Group of the University of Utah Computer
39 1.1 christos * Science Department and Ralph Campbell.
40 1.1 christos *
41 1.1 christos * Redistribution and use in source and binary forms, with or without
42 1.1 christos * modification, are permitted provided that the following conditions
43 1.1 christos * are met:
44 1.1 christos * 1. Redistributions of source code must retain the above copyright
45 1.1 christos * notice, this list of conditions and the following disclaimer.
46 1.1 christos * 2. Redistributions in binary form must reproduce the above copyright
47 1.1 christos * notice, this list of conditions and the following disclaimer in the
48 1.1 christos * documentation and/or other materials provided with the distribution.
49 1.1 christos * 3. Neither the name of the University nor the names of its contributors
50 1.1 christos * may be used to endorse or promote products derived from this software
51 1.1 christos * without specific prior written permission.
52 1.1 christos *
53 1.1 christos * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 1.1 christos * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 1.1 christos * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 1.1 christos * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 1.1 christos * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 1.1 christos * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 1.1 christos * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 1.1 christos * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 1.1 christos * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 1.1 christos * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 1.1 christos * SUCH DAMAGE.
64 1.1 christos *
65 1.1 christos * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 1.1 christos */
67 1.1 christos
68 1.1 christos #include <sys/cdefs.h>
69 1.1 christos
70 1.33 skrll __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.33 2023/07/23 07:25:36 skrll Exp $");
71 1.1 christos
72 1.1 christos /*
73 1.1 christos * Manages physical address maps.
74 1.1 christos *
75 1.1 christos * In addition to hardware address maps, this
76 1.1 christos * module is called upon to provide software-use-only
77 1.1 christos * maps which may or may not be stored in the same
78 1.1 christos * form as hardware maps. These pseudo-maps are
79 1.1 christos * used to store intermediate results from copy
80 1.1 christos * operations to and from address spaces.
81 1.1 christos *
82 1.1 christos * Since the information managed by this module is
83 1.1 christos * also stored by the logical address mapping module,
84 1.1 christos * this module may throw away valid virtual-to-physical
85 1.1 christos * mappings at almost any time. However, invalidations
86 1.1 christos * of virtual-to-physical mappings must be done as
87 1.1 christos * requested.
88 1.1 christos *
89 1.1 christos * In order to cope with hardware architectures which
90 1.1 christos * make virtual-to-physical map invalidates expensive,
91 1.1 christos * this module may delay invalidate or reduced protection
92 1.1 christos * operations until such time as they are actually
93 1.1 christos * necessary. This module is given full information as
94 1.1 christos * to which processors are currently using which maps,
95 1.1 christos * and to when physical maps must be made correct.
96 1.1 christos */
97 1.1 christos
98 1.1 christos #define __PMAP_PRIVATE
99 1.1 christos
100 1.1 christos #include "opt_multiprocessor.h"
101 1.1 christos
102 1.1 christos #include <sys/param.h>
103 1.13 skrll
104 1.13 skrll #include <sys/atomic.h>
105 1.13 skrll #include <sys/mutex.h>
106 1.13 skrll #include <sys/proc.h>
107 1.1 christos #include <sys/systm.h>
108 1.1 christos
109 1.1 christos #include <uvm/uvm.h>
110 1.29 skrll #include <uvm/pmap/pmap.h>
111 1.1 christos
112 1.29 skrll #if defined(XSEGSHIFT) && XSEGSHIFT == SEGSHIFT
113 1.29 skrll #undef XSEGSHIFT
114 1.29 skrll #undef XSEGLENGTH
115 1.29 skrll #undef NBXSEG
116 1.29 skrll #undef NXSEGPG
117 1.29 skrll #endif
118 1.29 skrll
119 1.29 skrll #define MULT_CTASSERT(a,b) __CTASSERT((a) < (b) || ((a) % (b) == 0))
120 1.29 skrll
121 1.29 skrll __CTASSERT(sizeof(pmap_ptpage_t) == NBPG);
122 1.29 skrll
123 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
124 1.29 skrll #ifdef _LP64
125 1.29 skrll MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG);
126 1.29 skrll MULT_CTASSERT(NPDEPG, PMAP_PDETABSIZE);
127 1.29 skrll #endif /* _LP64 */
128 1.29 skrll MULT_CTASSERT(sizeof(pmap_pdetab_t *), sizeof(pd_entry_t));
129 1.29 skrll MULT_CTASSERT(sizeof(pd_entry_t), sizeof(pmap_pdetab_t));
130 1.29 skrll
131 1.29 skrll #if 0
132 1.29 skrll #ifdef _LP64
133 1.29 skrll static const bool separate_pdetab_root_p = NPDEPG != PMAP_PDETABSIZE;
134 1.29 skrll #else
135 1.29 skrll static const bool separate_pdetab_root_p = true;
136 1.29 skrll #endif /* _LP64 */
137 1.29 skrll #endif
138 1.1 christos
139 1.29 skrll typedef struct {
140 1.29 skrll pmap_pdetab_t *free_pdetab0; /* free list kept locally */
141 1.29 skrll pmap_pdetab_t *free_pdetab; /* free list kept locally */
142 1.29 skrll #ifdef DEBUG
143 1.29 skrll uint32_t nget;
144 1.29 skrll uint32_t nput;
145 1.29 skrll uint32_t npage;
146 1.29 skrll #define PDETAB_ADD(n, v) (pmap_segtab_info.pdealloc.n += (v))
147 1.29 skrll #else
148 1.29 skrll #define PDETAB_ADD(n, v) ((void) 0)
149 1.29 skrll #endif /* DEBUG */
150 1.29 skrll } pmap_pdetab_alloc_t;
151 1.29 skrll #endif /* PMAP_HWPAGEWALKER */
152 1.29 skrll
153 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
154 1.29 skrll #ifdef _LP64
155 1.29 skrll __CTASSERT(NSEGPG >= PMAP_SEGTABSIZE);
156 1.29 skrll __CTASSERT(NSEGPG % PMAP_SEGTABSIZE == 0);
157 1.29 skrll #endif
158 1.29 skrll __CTASSERT(NBPG >= sizeof(pmap_segtab_t));
159 1.29 skrll
160 1.29 skrll typedef struct {
161 1.29 skrll pmap_segtab_t *free_segtab0; /* free list kept locally */
162 1.1 christos pmap_segtab_t *free_segtab; /* free list kept locally */
163 1.1 christos #ifdef DEBUG
164 1.29 skrll uint32_t nget;
165 1.29 skrll uint32_t nput;
166 1.29 skrll uint32_t npage;
167 1.29 skrll #define SEGTAB_ADD(n, v) (pmap_segtab_info.segalloc.n += (v))
168 1.1 christos #else
169 1.1 christos #define SEGTAB_ADD(n, v) ((void) 0)
170 1.1 christos #endif
171 1.29 skrll } pmap_segtab_alloc_t;
172 1.29 skrll #endif /* !PMAP_HWPAGEWALKER || !PMAP_MAP_PDETABPAGE */
173 1.29 skrll
174 1.29 skrll struct pmap_segtab_info {
175 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
176 1.29 skrll pmap_pdetab_alloc_t pdealloc;
177 1.29 skrll #endif
178 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
179 1.29 skrll pmap_segtab_alloc_t segalloc;
180 1.29 skrll #endif
181 1.29 skrll #ifdef PMAP_PPG_CACHE
182 1.1 christos struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */
183 1.1 christos #endif
184 1.1 christos } pmap_segtab_info = {
185 1.29 skrll #ifdef PMAP_PPG_CACHE
186 1.1 christos .ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
187 1.1 christos #endif
188 1.1 christos };
189 1.1 christos
190 1.1 christos kmutex_t pmap_segtab_lock __cacheline_aligned;
191 1.1 christos
192 1.29 skrll #ifndef PMAP_HWPAGEWALKER
193 1.19 mrg /*
194 1.29 skrll * Check that a seg_ppg[] array is empty.
195 1.19 mrg *
196 1.28 skrll * This is used when allocating or freeing a pmap_segtab_t. The stb
197 1.29 skrll * should be unused -- meaning, none of the seg_ppg[] pointers are
198 1.19 mrg * not NULL, as it transitions from either freshly allocated segtab from
199 1.19 mrg * pmap pool, an unused allocated page segtab alloc from the SMP case,
200 1.19 mrg * where two CPUs attempt to allocate the same underlying segtab, the
201 1.19 mrg * release of a segtab entry to the freelist, or for SMP, where reserve
202 1.19 mrg * also frees a freshly allocated but unused entry.
203 1.19 mrg */
204 1.4 mrg static void
205 1.28 skrll pmap_check_stb(pmap_segtab_t *stb, const char *caller, const char *why)
206 1.4 mrg {
207 1.4 mrg #ifdef DEBUG
208 1.4 mrg for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
209 1.29 skrll if (stb->seg_ppg[i] != NULL) {
210 1.19 mrg #define DEBUG_NOISY
211 1.4 mrg #ifdef DEBUG_NOISY
212 1.19 mrg UVMHIST_FUNC(__func__);
213 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "stb=%#jx",
214 1.28 skrll (uintptr_t)stb, 0, 0, 0);
215 1.4 mrg for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
216 1.29 skrll if (stb->seg_ppg[j] != NULL)
217 1.29 skrll printf("%s: stb->seg_ppg[%zu] = %p\n",
218 1.29 skrll caller, j, stb->seg_ppg[j]);
219 1.4 mrg #endif
220 1.29 skrll panic("%s: pm_segtab.seg_ppg[%zu] != 0 (%p): %s",
221 1.29 skrll caller, i, stb->seg_ppg[i], why);
222 1.4 mrg }
223 1.4 mrg }
224 1.4 mrg #endif
225 1.4 mrg }
226 1.29 skrll #endif /* PMAP_HWPAGEWALKER */
227 1.19 mrg
228 1.1 christos static inline struct vm_page *
229 1.1 christos pmap_pte_pagealloc(void)
230 1.1 christos {
231 1.1 christos struct vm_page *pg;
232 1.1 christos
233 1.29 skrll pg = pmap_md_alloc_poolpage(UVM_PGA_ZERO | UVM_PGA_USERESERVE);
234 1.1 christos if (pg) {
235 1.1 christos #ifdef UVM_PAGE_TRKOWN
236 1.1 christos pg->owner_tag = NULL;
237 1.1 christos #endif
238 1.1 christos UVM_PAGE_OWN(pg, "pmap-ptp");
239 1.1 christos }
240 1.1 christos
241 1.1 christos return pg;
242 1.1 christos }
243 1.1 christos
244 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
245 1.29 skrll static vaddr_t
246 1.29 skrll pmap_pde_to_va(pd_entry_t pde)
247 1.29 skrll {
248 1.29 skrll if (!pte_pde_valid_p(pde))
249 1.29 skrll return 0;
250 1.29 skrll
251 1.29 skrll paddr_t pa = pte_pde_to_paddr(pde);
252 1.29 skrll return pmap_md_direct_map_paddr(pa);
253 1.29 skrll }
254 1.29 skrll
255 1.29 skrll #ifdef _LP64
256 1.29 skrll static pmap_pdetab_t *
257 1.29 skrll pmap_pde_to_pdetab(pd_entry_t pde)
258 1.1 christos {
259 1.29 skrll
260 1.29 skrll return (pmap_pdetab_t *)pmap_pde_to_va(pde);
261 1.29 skrll }
262 1.29 skrll #endif
263 1.29 skrll
264 1.29 skrll static pmap_ptpage_t *
265 1.29 skrll pmap_pde_to_ptpage(pd_entry_t pde)
266 1.29 skrll {
267 1.29 skrll
268 1.29 skrll return (pmap_ptpage_t *)pmap_pde_to_va(pde);
269 1.29 skrll }
270 1.29 skrll #endif
271 1.29 skrll
272 1.29 skrll #ifdef _LP64
273 1.29 skrll __CTASSERT((XSEGSHIFT - SEGSHIFT) % (PGSHIFT-3) == 0);
274 1.29 skrll #endif
275 1.29 skrll
276 1.29 skrll static inline pmap_ptpage_t *
277 1.29 skrll pmap_ptpage(struct pmap *pmap, vaddr_t va)
278 1.29 skrll {
279 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
280 1.29 skrll vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
281 1.29 skrll pmap_pdetab_t *ptb = pmap->pm_pdetab;
282 1.29 skrll
283 1.29 skrll // UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", ptb, 0, 0, 0);
284 1.29 skrll
285 1.33 skrll KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
286 1.33 skrll "pmap_kernel: %s, va %#" PRIxVADDR,
287 1.33 skrll pmap == pmap_kernel() ? "true" : "false",
288 1.33 skrll pmap == pmap_kernel() ? va : 0);
289 1.29 skrll
290 1.29 skrll #ifdef _LP64
291 1.29 skrll for (size_t segshift = XSEGSHIFT;
292 1.29 skrll segshift > SEGSHIFT;
293 1.29 skrll segshift -= PGSHIFT - 3, pdetab_mask = NSEGPG - 1) {
294 1.29 skrll ptb = pmap_pde_to_pdetab(ptb->pde_pde[(va >> segshift) & pdetab_mask]);
295 1.29 skrll if (ptb == NULL)
296 1.29 skrll return NULL;
297 1.29 skrll }
298 1.29 skrll #endif
299 1.29 skrll return pmap_pde_to_ptpage(ptb->pde_pde[(va >> SEGSHIFT) & pdetab_mask]);
300 1.29 skrll #else
301 1.29 skrll vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
302 1.28 skrll pmap_segtab_t *stb = pmap->pm_segtab;
303 1.29 skrll
304 1.6 skrll KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
305 1.6 skrll "pmap %p va %#" PRIxVADDR, pmap, va);
306 1.1 christos #ifdef _LP64
307 1.29 skrll for (size_t segshift = XSEGSHIFT;
308 1.29 skrll segshift > SEGSHIFT;
309 1.29 skrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
310 1.29 skrll stb = stb->seg_seg[(va >> segshift) & segtab_mask];
311 1.29 skrll if (stb == NULL)
312 1.29 skrll return NULL;
313 1.29 skrll }
314 1.29 skrll #endif
315 1.29 skrll return stb->seg_ppg[(va >> SEGSHIFT) & segtab_mask];
316 1.29 skrll #endif
317 1.29 skrll }
318 1.29 skrll
319 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
320 1.29 skrll bool
321 1.29 skrll pmap_pdetab_fixup(struct pmap *pmap, vaddr_t va)
322 1.29 skrll {
323 1.29 skrll struct pmap * const kpm = pmap_kernel();
324 1.29 skrll pmap_pdetab_t * const kptb = kpm->pm_pdetab;
325 1.29 skrll pmap_pdetab_t * const uptb = pmap->pm_pdetab;
326 1.29 skrll size_t idx = PMAP_PDETABSIZE - 1;
327 1.29 skrll #if !defined(PMAP_MAP_PDETABPAGE)
328 1.29 skrll __CTASSERT(PMAP_PDETABSIZE == PMAP_SEGTABSIZE);
329 1.29 skrll pmap_segtab_t * const kstb = &pmap_kern_segtab;
330 1.29 skrll pmap_segtab_t * const ustb = pmap->pm_segtab;
331 1.29 skrll #endif
332 1.29 skrll
333 1.29 skrll // Regardless of how many levels deep this page table is, we only
334 1.29 skrll // need to verify the first level PDEs match up.
335 1.29 skrll #ifdef XSEGSHIFT
336 1.29 skrll idx &= va >> XSEGSHIFT;
337 1.29 skrll #else
338 1.29 skrll idx &= va >> SEGSHIFT;
339 1.29 skrll #endif
340 1.29 skrll if (uptb->pde_pde[idx] != kptb->pde_pde[idx]) {
341 1.29 skrll pte_pde_set(&uptb->pde_pde[idx], kptb->pde_pde[idx]);
342 1.29 skrll #if !defined(PMAP_MAP_PDETABPAGE)
343 1.29 skrll ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP
344 1.29 skrll #endif
345 1.29 skrll return true;
346 1.29 skrll }
347 1.29 skrll return false;
348 1.29 skrll }
349 1.29 skrll #endif /* PMAP_HWPAGEWALKER */
350 1.29 skrll
351 1.29 skrll
352 1.29 skrll static void
353 1.29 skrll pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg,
354 1.29 skrll struct pglist *pglist, voff_t off)
355 1.29 skrll {
356 1.29 skrll UVMHIST_FUNC(__func__);
357 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx pg %#jx list %#jx",
358 1.29 skrll (uintptr_t)pmap, (uintptr_t)kva, (uintptr_t)pg, (uintptr_t)pglist);
359 1.29 skrll
360 1.29 skrll struct uvm_object * const uobj = &pmap->pm_uobject;
361 1.29 skrll if (pg == NULL) {
362 1.29 skrll paddr_t pa;
363 1.29 skrll
364 1.29 skrll bool ok __diagused = pmap_extract(pmap_kernel(), kva, &pa);
365 1.29 skrll KASSERT(ok);
366 1.29 skrll
367 1.29 skrll pg = PHYS_TO_VM_PAGE(pa);
368 1.29 skrll KASSERT(pg != NULL);
369 1.29 skrll }
370 1.29 skrll
371 1.29 skrll UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
372 1.29 skrll (uintptr_t)kva, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)pglist);
373 1.29 skrll
374 1.29 skrll pmap_lock(pmap);
375 1.29 skrll TAILQ_INSERT_TAIL(pglist, pg, pageq.queue);
376 1.29 skrll uobj->uo_npages++;
377 1.29 skrll pmap_unlock(pmap);
378 1.29 skrll
379 1.29 skrll /*
380 1.29 skrll * Now set each vm_page that maps this page to point to the
381 1.29 skrll * pmap and set the offset to what we want.
382 1.29 skrll */
383 1.29 skrll KASSERTMSG(pg->uobject == NULL, "pg %p pg->uobject %p", pg, pg->uobject);
384 1.29 skrll pg->uobject = uobj;
385 1.29 skrll pg->offset = off;
386 1.29 skrll }
387 1.29 skrll
388 1.29 skrll static struct vm_page *
389 1.29 skrll pmap_page_detach(pmap_t pmap, struct pglist *list, vaddr_t va)
390 1.29 skrll {
391 1.29 skrll UVMHIST_FUNC(__func__);
392 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx list %#jx",
393 1.29 skrll (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)list, 0);
394 1.29 skrll
395 1.29 skrll paddr_t pa;
396 1.29 skrll bool ok __diagused = pmap_extract(pmap_kernel(), va, &pa);
397 1.29 skrll KASSERT(ok);
398 1.29 skrll
399 1.29 skrll struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
400 1.29 skrll struct uvm_object * const uobj = &pmap->pm_uobject;
401 1.29 skrll
402 1.29 skrll UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
403 1.29 skrll (uintptr_t)va, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)list);
404 1.29 skrll
405 1.29 skrll KASSERTMSG(pg->uobject == uobj, "pg->uobject %p vs uobj %p",
406 1.29 skrll pg->uobject, uobj);
407 1.29 skrll
408 1.29 skrll pmap_lock(pmap);
409 1.29 skrll TAILQ_REMOVE(list, pg, pageq.queue);
410 1.29 skrll uobj->uo_npages--;
411 1.29 skrll pmap_unlock(pmap);
412 1.29 skrll
413 1.29 skrll pg->uobject = NULL;
414 1.29 skrll pg->offset = 0;
415 1.29 skrll
416 1.29 skrll return pg;
417 1.29 skrll }
418 1.29 skrll
419 1.29 skrll #ifndef PMAP_PPG_CACHE
420 1.29 skrll static void
421 1.29 skrll pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size)
422 1.29 skrll {
423 1.29 skrll #ifdef PMAP_MAP_PTEPAGE
424 1.29 skrll UVMHIST_FUNC(__func__);
425 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx list %#jx kva %#jx size %#jx",
426 1.29 skrll (uintptr_t)pmap, (uintptr_t)list, kva, size);
427 1.29 skrll KASSERT(size == PAGE_SIZE);
428 1.29 skrll if (size == PAGE_SIZE) {
429 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach (kva %#jx)",
430 1.29 skrll kva, 0, 0, 0);
431 1.29 skrll uvm_pagefree(pmap_page_detach(pmap, list, kva));
432 1.29 skrll return;
433 1.29 skrll }
434 1.1 christos #endif
435 1.29 skrll for (size_t i = 0; i < size; i += PAGE_SIZE) {
436 1.29 skrll (void)pmap_page_detach(pmap, list, kva + i);
437 1.29 skrll }
438 1.1 christos
439 1.29 skrll uvm_km_free(kernel_map, kva, size, UVM_KMF_WIRED);
440 1.1 christos }
441 1.29 skrll #endif
442 1.1 christos
443 1.1 christos pt_entry_t *
444 1.1 christos pmap_pte_lookup(pmap_t pmap, vaddr_t va)
445 1.1 christos {
446 1.29 skrll pmap_ptpage_t * const ppg = pmap_ptpage(pmap, va);
447 1.29 skrll if (ppg == NULL)
448 1.1 christos return NULL;
449 1.1 christos
450 1.29 skrll const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
451 1.29 skrll
452 1.29 skrll return ppg->ppg_ptes + pte_idx;
453 1.1 christos }
454 1.1 christos
455 1.29 skrll
456 1.29 skrll static pmap_ptpage_t *
457 1.29 skrll pmap_ptpage_alloc(pmap_t pmap, int flags, paddr_t *pa_p)
458 1.1 christos {
459 1.19 mrg UVMHIST_FUNC(__func__);
460 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx flags %#jx pa_p %#jx", (uintptr_t)pmap,
461 1.29 skrll (uintptr_t)flags, (uintptr_t)pa_p, 0);
462 1.29 skrll
463 1.29 skrll pmap_ptpage_t *ppg = NULL;
464 1.29 skrll
465 1.29 skrll #ifdef PMAP_MAP_PTEPAGE
466 1.29 skrll struct vm_page *pg = NULL;
467 1.29 skrll paddr_t pa;
468 1.29 skrll #ifdef PMAP_PPG_CACHE
469 1.29 skrll ppg = pmap_pgcache_alloc(&pmap_segtab_info.ppg_flist);
470 1.29 skrll #endif
471 1.29 skrll if (ppg == NULL) {
472 1.29 skrll pg = pmap_pte_pagealloc();
473 1.29 skrll if (pg == NULL) {
474 1.29 skrll if (flags & PMAP_CANFAIL)
475 1.29 skrll return NULL;
476 1.29 skrll panic("%s: cannot allocate page table page ",
477 1.29 skrll __func__);
478 1.29 skrll }
479 1.29 skrll pa = VM_PAGE_TO_PHYS(pg);
480 1.29 skrll ppg = (pmap_ptpage_t *)PMAP_MAP_PTEPAGE(pa);
481 1.29 skrll } else {
482 1.29 skrll bool ok __diagused = pmap_extract(pmap_kernel(), (vaddr_t)ppg, &pa);
483 1.29 skrll KASSERT(ok);
484 1.29 skrll }
485 1.19 mrg
486 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
487 1.29 skrll pmap_page_attach(pmap, (vaddr_t)ppg, pg, &pmap->pm_ppg_list, 0);
488 1.19 mrg
489 1.29 skrll *pa_p = pa;
490 1.29 skrll #else
491 1.29 skrll vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
492 1.29 skrll UVM_KMF_WIRED | UVM_KMF_WAITVA
493 1.29 skrll | (flags & PMAP_CANFAIL ? UVM_KMF_CANFAIL : 0));
494 1.29 skrll if (kva == 0) {
495 1.29 skrll if (flags & PMAP_CANFAIL)
496 1.29 skrll return NULL;
497 1.29 skrll panic("%s: cannot allocate page table page", __func__);
498 1.29 skrll }
499 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
500 1.29 skrll pmap_page_attach(pmap, kva, NULL, &pmap->pm_ppg_list, 0);
501 1.29 skrll ppg = (pmap_ptpage_t *)kva;
502 1.29 skrll #endif
503 1.29 skrll
504 1.29 skrll UVMHIST_LOG(pmapxtabhist, "... ppg %#jx", (uintptr_t)ppg, 0, 0, 0);
505 1.29 skrll
506 1.29 skrll return ppg;
507 1.1 christos }
508 1.1 christos
509 1.1 christos static void
510 1.29 skrll pmap_ptpage_free(pmap_t pmap, pmap_ptpage_t *ppg, const char *caller)
511 1.1 christos {
512 1.29 skrll UVMHIST_FUNC(__func__);
513 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx va %#jx", (uintptr_t)pmap,
514 1.29 skrll (uintptr_t)ppg, 0, 0);
515 1.29 skrll
516 1.29 skrll const vaddr_t kva = (vaddr_t)ppg;
517 1.29 skrll /*
518 1.29 skrll * All pte arrays should be page aligned.
519 1.29 skrll */
520 1.29 skrll if ((kva & PAGE_MASK) != 0) {
521 1.29 skrll panic("%s: pte entry at %p not page aligned", caller, ppg);
522 1.29 skrll }
523 1.29 skrll
524 1.29 skrll #ifdef DEBUG
525 1.29 skrll for (size_t j = 0; j < NPTEPG; j++) {
526 1.29 skrll if (ppg->ppg_ptes[j] != 0) {
527 1.29 skrll UVMHIST_LOG(pmapxtabhist,
528 1.29 skrll "pte entry %#jx not 0 (%#jx)",
529 1.29 skrll (uintptr_t)&ppg->ppg_ptes[j],
530 1.29 skrll (uintptr_t)ppg->ppg_ptes[j], 0, 0);
531 1.29 skrll for (size_t i = j + 1; i < NPTEPG; i++)
532 1.29 skrll if (ppg->ppg_ptes[i] != 0)
533 1.29 skrll UVMHIST_LOG(pmapxtabhist,
534 1.29 skrll "pte[%zu] = %#"PRIxPTE,
535 1.29 skrll i, ppg->ppg_ptes[i], 0, 0);
536 1.1 christos
537 1.29 skrll panic("%s: pte entry at %p not 0 (%#" PRIxPTE ")",
538 1.29 skrll __func__, &ppg->ppg_ptes[j],
539 1.29 skrll ppg->ppg_ptes[j]);
540 1.1 christos }
541 1.29 skrll }
542 1.1 christos #endif
543 1.29 skrll //pmap_md_vca_clean(pg, (vaddr_t)ppg, NBPG);
544 1.29 skrll #ifdef PMAP_PPG_CACHE
545 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
546 1.29 skrll pmap_page_detach(pmap, &pmap->pm_ppg_list, kva);
547 1.29 skrll pmap_segtab_pagecache(&pmap_segtab_info.ppg_flist, ppg);
548 1.29 skrll #else
549 1.29 skrll pmap_segtab_pagefree(pmap, &pmap->pm_ppg_list, kva, PAGE_SIZE);
550 1.29 skrll #endif /* PMAP_PPG_CACHE */
551 1.29 skrll }
552 1.29 skrll
553 1.1 christos
554 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
555 1.19 mrg
556 1.29 skrll static pmap_pdetab_t *
557 1.29 skrll pmap_pdetab_alloc(struct pmap *pmap)
558 1.29 skrll {
559 1.29 skrll UVMHIST_FUNC(__func__);
560 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
561 1.19 mrg
562 1.29 skrll pmap_pdetab_t *ptb;
563 1.32 skrll #ifdef UVMHIST
564 1.29 skrll bool found_on_freelist = false;
565 1.1 christos #endif
566 1.1 christos
567 1.29 skrll again:
568 1.29 skrll mutex_spin_enter(&pmap_segtab_lock);
569 1.29 skrll UVMHIST_LOG(pmapxtabhist, "free_pdetab %#jx",
570 1.29 skrll (uintptr_t)pmap_segtab_info.pdealloc.free_pdetab, 0, 0, 0);
571 1.29 skrll if (__predict_true((ptb = pmap_segtab_info.pdealloc.free_pdetab) != NULL)) {
572 1.29 skrll pmap_segtab_info.pdealloc.free_pdetab = ptb->pde_next;
573 1.29 skrll
574 1.29 skrll UVMHIST_LOG(pmapxtabhist, "freelist ptb=%#jx",
575 1.29 skrll (uintptr_t)ptb, 0, 0, 0);
576 1.29 skrll
577 1.29 skrll PDETAB_ADD(nget, 1);
578 1.29 skrll ptb->pde_next = NULL;
579 1.32 skrll #ifdef UVMHIST
580 1.29 skrll found_on_freelist = true;
581 1.29 skrll #endif
582 1.1 christos }
583 1.29 skrll mutex_spin_exit(&pmap_segtab_lock);
584 1.1 christos
585 1.29 skrll struct vm_page *ptb_pg = NULL;
586 1.29 skrll if (__predict_false(ptb == NULL)) {
587 1.29 skrll ptb_pg = pmap_pte_pagealloc();
588 1.29 skrll
589 1.29 skrll UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx",
590 1.29 skrll (uintptr_t)ptb_pg, 0, 0, 0);
591 1.29 skrll if (__predict_false(ptb_pg == NULL)) {
592 1.29 skrll /*
593 1.29 skrll * XXX What else can we do? Could we deadlock here?
594 1.29 skrll */
595 1.29 skrll uvm_wait("pdetab");
596 1.29 skrll goto again;
597 1.29 skrll }
598 1.29 skrll
599 1.29 skrll UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx 2",
600 1.29 skrll (uintptr_t)ptb_pg, 0, 0, 0);
601 1.29 skrll PDETAB_ADD(npage, 1);
602 1.29 skrll const paddr_t ptb_pa = VM_PAGE_TO_PHYS(ptb_pg);
603 1.29 skrll UVMHIST_LOG(pmapxtabhist, "ptb_pa=%#jx", (uintptr_t)ptb_pa, 0, 0, 0);
604 1.29 skrll ptb = (pmap_pdetab_t *)PMAP_MAP_PDETABPAGE(ptb_pa);
605 1.29 skrll UVMHIST_LOG(pmapxtabhist, "new ptb=%#jx", (uintptr_t)ptb, 0,
606 1.29 skrll 0, 0);
607 1.29 skrll
608 1.29 skrll if (pte_invalid_pde() != 0) {
609 1.29 skrll for (size_t i = 0; i < NPDEPG; i++) {
610 1.29 skrll ptb->pde_pde[i] = pte_invalid_pde();
611 1.29 skrll }
612 1.29 skrll }
613 1.1 christos }
614 1.29 skrll
615 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
616 1.29 skrll pmap_page_attach(pmap, (vaddr_t)ptb, ptb_pg, &pmap->pm_pdetab_list, 0);
617 1.29 skrll
618 1.29 skrll UVMHIST_LOG(pmapxtabhist, "... ptb %#jx found on freelist %d",
619 1.29 skrll (uintptr_t)ptb, found_on_freelist, 0, 0);
620 1.29 skrll
621 1.29 skrll return ptb;
622 1.1 christos }
623 1.1 christos
624 1.29 skrll
625 1.29 skrll #else
626 1.1 christos /*
627 1.1 christos * Create and return a physical map.
628 1.1 christos *
629 1.1 christos * If the size specified for the map
630 1.1 christos * is zero, the map is an actual physical
631 1.1 christos * map, and may be referenced by the
632 1.1 christos * hardware.
633 1.1 christos *
634 1.1 christos * If the size specified is non-zero,
635 1.1 christos * the map will be used in software only, and
636 1.1 christos * is bounded by that size.
637 1.1 christos */
638 1.1 christos static pmap_segtab_t *
639 1.29 skrll pmap_segtab_alloc(struct pmap *pmap)
640 1.1 christos {
641 1.29 skrll UVMHIST_FUNC(__func__);
642 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
643 1.29 skrll
644 1.28 skrll pmap_segtab_t *stb;
645 1.4 mrg bool found_on_freelist = false;
646 1.1 christos
647 1.1 christos again:
648 1.1 christos mutex_spin_enter(&pmap_segtab_lock);
649 1.29 skrll if (__predict_true((stb = pmap_segtab_info.segalloc.free_segtab) != NULL)) {
650 1.29 skrll pmap_segtab_info.segalloc.free_segtab = stb->seg_next;
651 1.1 christos SEGTAB_ADD(nget, 1);
652 1.29 skrll stb->seg_next = NULL;
653 1.4 mrg found_on_freelist = true;
654 1.29 skrll UVMHIST_LOG(pmapxtabhist, "freelist stb=%#jx",
655 1.28 skrll (uintptr_t)stb, 0, 0, 0);
656 1.1 christos }
657 1.1 christos mutex_spin_exit(&pmap_segtab_lock);
658 1.1 christos
659 1.29 skrll struct vm_page *stb_pg = NULL;
660 1.28 skrll if (__predict_false(stb == NULL)) {
661 1.29 skrll stb_pg = pmap_pte_pagealloc();
662 1.1 christos
663 1.28 skrll if (__predict_false(stb_pg == NULL)) {
664 1.1 christos /*
665 1.1 christos * XXX What else can we do? Could we deadlock here?
666 1.1 christos */
667 1.10 skrll uvm_wait("segtab");
668 1.1 christos goto again;
669 1.1 christos }
670 1.1 christos SEGTAB_ADD(npage, 1);
671 1.28 skrll const paddr_t stb_pa = VM_PAGE_TO_PHYS(stb_pg);
672 1.1 christos
673 1.29 skrll stb = (pmap_segtab_t *)PMAP_MAP_SEGTABPAGE(stb_pa);
674 1.29 skrll UVMHIST_LOG(pmapxtabhist, "new stb=%#jx", (uintptr_t)stb, 0,
675 1.29 skrll 0, 0);
676 1.29 skrll #if 0
677 1.29 skrll CTASSERT(NBPG / sizeof(*stb) == 1);
678 1.28 skrll const size_t n = NBPG / sizeof(*stb);
679 1.1 christos if (n > 1) {
680 1.1 christos /*
681 1.1 christos * link all the segtabs in this page together
682 1.1 christos */
683 1.1 christos for (size_t i = 1; i < n - 1; i++) {
684 1.29 skrll stb[i].seg_next = &stb[i + 1];
685 1.1 christos }
686 1.1 christos /*
687 1.1 christos * Now link the new segtabs into the free segtab list.
688 1.1 christos */
689 1.1 christos mutex_spin_enter(&pmap_segtab_lock);
690 1.29 skrll stb[n - 1].seg_next = pmap_segtab_info.segalloc.free_segtab;
691 1.29 skrll pmap_segtab_info.segalloc.free_segtab = stb + 1;
692 1.1 christos SEGTAB_ADD(nput, n - 1);
693 1.1 christos mutex_spin_exit(&pmap_segtab_lock);
694 1.1 christos }
695 1.29 skrll #endif
696 1.1 christos }
697 1.1 christos
698 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to attach", 0, 0, 0, 0);
699 1.29 skrll pmap_page_attach(pmap, (vaddr_t)stb, stb_pg, &pmap->pm_segtab_list, 0);
700 1.29 skrll
701 1.28 skrll pmap_check_stb(stb, __func__,
702 1.29 skrll found_on_freelist ? "from free list" : "allocated");
703 1.29 skrll
704 1.29 skrll UVMHIST_LOG(pmapxtabhist, "... stb %#jx found on freelist %zu",
705 1.29 skrll (uintptr_t)stb, found_on_freelist, 0, 0);
706 1.4 mrg
707 1.28 skrll return stb;
708 1.1 christos }
709 1.29 skrll #endif
710 1.29 skrll
711 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
712 1.29 skrll static void
713 1.29 skrll pmap_pdetab_free(pmap_pdetab_t *ptb)
714 1.29 skrll {
715 1.29 skrll UVMHIST_FUNC(__func__);
716 1.29 skrll UVMHIST_CALLARGS(pmaphist, "ptb %#jx", (uintptr_t)ptb, 0, 0, 0);
717 1.29 skrll /*
718 1.29 skrll * Insert the pdetab into the pdetab freelist.
719 1.29 skrll */
720 1.29 skrll mutex_spin_enter(&pmap_segtab_lock);
721 1.29 skrll ptb->pde_next = pmap_segtab_info.pdealloc.free_pdetab;
722 1.29 skrll pmap_segtab_info.pdealloc.free_pdetab = ptb;
723 1.29 skrll PDETAB_ADD(nput, 1);
724 1.29 skrll mutex_spin_exit(&pmap_segtab_lock);
725 1.29 skrll
726 1.29 skrll }
727 1.29 skrll #endif
728 1.29 skrll
729 1.29 skrll
730 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
731 1.29 skrll /*
732 1.29 skrll * Insert the segtab into the segtab freelist.
733 1.29 skrll */
734 1.29 skrll static void
735 1.29 skrll pmap_segtab_free(pmap_segtab_t *stb)
736 1.29 skrll {
737 1.29 skrll UVMHIST_FUNC(__func__);
738 1.29 skrll UVMHIST_CALLARGS(pmaphist, "stb %#jx", (uintptr_t)stb, 0, 0, 0);
739 1.29 skrll
740 1.29 skrll /*
741 1.29 skrll * Insert the segtab into the segtab freelist.
742 1.29 skrll */
743 1.29 skrll mutex_spin_enter(&pmap_segtab_lock);
744 1.29 skrll stb->seg_next = pmap_segtab_info.segalloc.free_segtab;
745 1.29 skrll pmap_segtab_info.segalloc.free_segtab = stb;
746 1.29 skrll SEGTAB_ADD(nput, 1);
747 1.29 skrll mutex_spin_exit(&pmap_segtab_lock);
748 1.29 skrll }
749 1.29 skrll #endif
750 1.29 skrll
751 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
752 1.29 skrll static void
753 1.29 skrll pmap_pdetab_release(pmap_t pmap, pmap_pdetab_t **ptb_p, bool free_ptb,
754 1.29 skrll vaddr_t va, vsize_t vinc)
755 1.29 skrll {
756 1.29 skrll const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
757 1.29 skrll pmap_pdetab_t *ptb = *ptb_p;
758 1.29 skrll
759 1.29 skrll UVMHIST_FUNC(__func__);
760 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx ptb_p %#jx ptb %#jx free %jd",
761 1.29 skrll (uintptr_t)pmap, (uintptr_t)ptb_p, (uintptr_t)ptb, free_ptb);
762 1.29 skrll UVMHIST_LOG(pmapxtabhist, " va=%#jx vinc=%#jx",
763 1.29 skrll (uintptr_t)va, (uintptr_t)vinc, 0, 0);
764 1.29 skrll
765 1.29 skrll for (size_t i = (va / vinc) & pdetab_mask;
766 1.29 skrll i < PMAP_PDETABSIZE;
767 1.29 skrll i++, va += vinc) {
768 1.29 skrll #ifdef _LP64
769 1.29 skrll if (vinc > NBSEG) {
770 1.29 skrll if (pte_pde_valid_p(ptb->pde_pde[i])) {
771 1.29 skrll pmap_pdetab_t *nptb =
772 1.29 skrll pmap_pde_to_pdetab(ptb->pde_pde[i]);
773 1.29 skrll UVMHIST_LOG(pmapxtabhist,
774 1.29 skrll " va %#jx ptp->pde_pde[%jd] (*%#jx) = %#jx "
775 1.29 skrll "recursing", va, i, &ptb->pde_pde[i],
776 1.29 skrll ptb->pde_pde[i]);
777 1.29 skrll pmap_pdetab_release(pmap, &nptb, true,
778 1.29 skrll va, vinc / NPDEPG);
779 1.29 skrll ptb->pde_pde[i] = pte_invalid_pde();
780 1.29 skrll KASSERT(nptb == NULL);
781 1.29 skrll }
782 1.29 skrll continue;
783 1.29 skrll }
784 1.29 skrll #endif
785 1.29 skrll KASSERT(vinc == NBSEG);
786 1.29 skrll
787 1.29 skrll /* get pointer to PT page */
788 1.29 skrll pmap_ptpage_t *ppg = pmap_pde_to_ptpage(ptb->pde_pde[i]);
789 1.29 skrll UVMHIST_LOG(pmapxtabhist,
790 1.29 skrll " va %#jx ptb->pde_pde[%jd] (*%#jx) = %#jx", va, i,
791 1.29 skrll (uintptr_t)&ptb->pde_pde[i], ptb->pde_pde[i]);
792 1.29 skrll if (ppg == NULL)
793 1.29 skrll continue;
794 1.29 skrll
795 1.29 skrll UVMHIST_LOG(pmapxtabhist, " zeroing tab (%#jx)[%jd] (%#jx)",
796 1.29 skrll (uintptr_t)ptb->pde_pde, i, (uintptr_t)&ptb->pde_pde[i], 0);
797 1.29 skrll
798 1.29 skrll ptb->pde_pde[i] = pte_invalid_pde();
799 1.29 skrll
800 1.29 skrll pmap_ptpage_free(pmap, ppg, __func__);
801 1.29 skrll }
802 1.29 skrll
803 1.29 skrll if (free_ptb) {
804 1.29 skrll UVMHIST_LOG(pmapxtabhist, " ptbp %#jx ptb %#jx",
805 1.29 skrll (uintptr_t)ptb_p, (uintptr_t)ptb, 0, 0);
806 1.29 skrll const vaddr_t kva = (vaddr_t)ptb;
807 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
808 1.29 skrll pmap_page_detach(pmap, &pmap->pm_pdetab_list, kva);
809 1.29 skrll pmap_pdetab_free(ptb);
810 1.29 skrll *ptb_p = NULL;
811 1.29 skrll }
812 1.29 skrll }
813 1.29 skrll #endif
814 1.29 skrll
815 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
816 1.29 skrll static void
817 1.29 skrll pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stb_p, bool free_stb,
818 1.29 skrll pte_callback_t callback, uintptr_t flags, vaddr_t va, vsize_t vinc)
819 1.29 skrll {
820 1.29 skrll pmap_segtab_t *stb = *stb_p;
821 1.29 skrll
822 1.29 skrll UVMHIST_FUNC(__func__);
823 1.29 skrll UVMHIST_CALLARGS(pmapxtabhist, "pm=%#jx stb_p=%#jx free=%jd",
824 1.29 skrll (uintptr_t)pmap, (uintptr_t)stb, free_stb, 0);
825 1.29 skrll UVMHIST_LOG(pmapxtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
826 1.29 skrll (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
827 1.29 skrll
828 1.29 skrll for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
829 1.29 skrll i < PMAP_SEGTABSIZE;
830 1.29 skrll i++, va += vinc) {
831 1.29 skrll #ifdef _LP64
832 1.29 skrll if (vinc > NBSEG) {
833 1.29 skrll if (stb->seg_seg[i] != NULL) {
834 1.29 skrll UVMHIST_LOG(pmapxtabhist,
835 1.29 skrll " recursing %jd", i, 0, 0, 0);
836 1.29 skrll pmap_segtab_release(pmap, &stb->seg_seg[i],
837 1.29 skrll true, callback, flags, va, vinc / NSEGPG);
838 1.29 skrll KASSERT(stb->seg_seg[i] == NULL);
839 1.29 skrll }
840 1.29 skrll continue;
841 1.29 skrll }
842 1.29 skrll #endif
843 1.29 skrll KASSERT(vinc == NBSEG);
844 1.29 skrll
845 1.29 skrll /* get pointer to segment map */
846 1.29 skrll pmap_ptpage_t *ppg = stb->seg_ppg[i];
847 1.29 skrll if (ppg == NULL)
848 1.29 skrll continue;
849 1.29 skrll
850 1.29 skrll /*
851 1.29 skrll * If our caller wants a callback, do so.
852 1.29 skrll */
853 1.29 skrll if (callback != NULL) {
854 1.29 skrll (*callback)(pmap, va, va + vinc, ppg->ppg_ptes, flags);
855 1.29 skrll }
856 1.29 skrll pmap_ptpage_free(pmap, ppg, __func__);
857 1.29 skrll stb->seg_ppg[i] = NULL;
858 1.29 skrll UVMHIST_LOG(pmapxtabhist, " zeroing tab[%jd]", i, 0, 0, 0);
859 1.29 skrll }
860 1.29 skrll
861 1.29 skrll if (free_stb) {
862 1.29 skrll pmap_check_stb(stb, __func__,
863 1.29 skrll vinc == NBSEG ? "release seg" : "release xseg");
864 1.29 skrll
865 1.29 skrll const vaddr_t kva = (vaddr_t)stb;
866 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach", 0, 0, 0, 0);
867 1.29 skrll pmap_page_detach(pmap, &pmap->pm_segtab_list, kva);
868 1.29 skrll pmap_segtab_free(stb);
869 1.29 skrll *stb_p = NULL;
870 1.29 skrll }
871 1.29 skrll }
872 1.29 skrll #endif
873 1.29 skrll
874 1.29 skrll
875 1.1 christos
876 1.1 christos /*
877 1.1 christos * Allocate the top segment table for the pmap.
878 1.1 christos */
879 1.1 christos void
880 1.1 christos pmap_segtab_init(pmap_t pmap)
881 1.1 christos {
882 1.29 skrll UVMHIST_FUNC(__func__);
883 1.29 skrll UVMHIST_CALLARGS(pmaphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
884 1.1 christos
885 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
886 1.29 skrll /*
887 1.29 skrll * Constantly converting from extracted PA to VA is somewhat expensive
888 1.29 skrll * for systems with hardware page walkers and without an inexpensive
889 1.29 skrll * way to access arbitrary virtual addresses, so we allocate an extra
890 1.29 skrll * root segtab so that it can contain non-virtual addresses.
891 1.29 skrll */
892 1.29 skrll pmap->pm_segtab = pmap_segtab_alloc(pmap);
893 1.29 skrll #endif
894 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
895 1.29 skrll pmap->pm_pdetab = pmap_pdetab_alloc(pmap);
896 1.29 skrll pmap_md_pdetab_init(pmap);
897 1.29 skrll #endif
898 1.1 christos }
899 1.1 christos
900 1.1 christos /*
901 1.1 christos * Retire the given physical map from service.
902 1.1 christos * Should only be called if the map contains
903 1.1 christos * no valid mappings.
904 1.1 christos */
905 1.1 christos void
906 1.1 christos pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
907 1.1 christos {
908 1.29 skrll KASSERT(pmap != pmap_kernel());
909 1.1 christos #ifdef _LP64
910 1.1 christos const vsize_t vinc = NBXSEG;
911 1.1 christos #else
912 1.1 christos const vsize_t vinc = NBSEG;
913 1.1 christos #endif
914 1.29 skrll
915 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
916 1.29 skrll if (pmap->pm_pdetab != NULL) {
917 1.31 skrll pmap_md_pdetab_fini(pmap);
918 1.29 skrll pmap_pdetab_release(pmap, &pmap->pm_pdetab,
919 1.29 skrll true, pmap->pm_minaddr, vinc);
920 1.29 skrll }
921 1.29 skrll #endif
922 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
923 1.29 skrll if (pmap->pm_segtab != NULL) {
924 1.29 skrll pmap_segtab_release(pmap, &pmap->pm_segtab,
925 1.29 skrll func == NULL, func, flags, pmap->pm_minaddr, vinc);
926 1.29 skrll }
927 1.29 skrll #endif
928 1.29 skrll
929 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
930 1.29 skrll #if !defined(PMAP_MAP_PDETABPAGE)
931 1.29 skrll KASSERT((pmap->pm_segtab == NULL) == (pmap->pm_pdetab == NULL));
932 1.29 skrll #endif
933 1.29 skrll KASSERT(pmap->pm_pdetab == NULL);
934 1.29 skrll #endif
935 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
936 1.29 skrll KASSERT(pmap->pm_segtab == NULL);
937 1.29 skrll #endif
938 1.29 skrll
939 1.1 christos }
940 1.1 christos
941 1.1 christos /*
942 1.1 christos * Make a new pmap (vmspace) active for the given process.
943 1.1 christos */
944 1.1 christos void
945 1.1 christos pmap_segtab_activate(struct pmap *pm, struct lwp *l)
946 1.1 christos {
947 1.1 christos if (l == curlwp) {
948 1.29 skrll KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
949 1.29 skrll pmap_md_xtab_activate(pm, l);
950 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
951 1.3 matt struct cpu_info * const ci = l->l_cpu;
952 1.1 christos if (pm == pmap_kernel()) {
953 1.3 matt ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
954 1.1 christos #ifdef _LP64
955 1.3 matt ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
956 1.1 christos #endif
957 1.1 christos } else {
958 1.3 matt ci->ci_pmap_user_segtab = pm->pm_segtab;
959 1.1 christos #ifdef _LP64
960 1.3 matt ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
961 1.1 christos #endif
962 1.1 christos }
963 1.29 skrll #endif
964 1.1 christos }
965 1.1 christos }
966 1.1 christos
967 1.15 skrll void
968 1.15 skrll pmap_segtab_deactivate(pmap_t pm)
969 1.15 skrll {
970 1.15 skrll pmap_md_xtab_deactivate(pm);
971 1.15 skrll
972 1.29 skrll #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
973 1.15 skrll curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
974 1.15 skrll #ifdef _LP64
975 1.15 skrll curcpu()->ci_pmap_user_seg0tab = NULL;
976 1.15 skrll #endif
977 1.29 skrll #endif
978 1.15 skrll }
979 1.15 skrll
980 1.1 christos /*
981 1.1 christos * Act on the given range of addresses from the specified map.
982 1.1 christos *
983 1.1 christos * It is assumed that the start and end are properly rounded to
984 1.1 christos * the page size.
985 1.1 christos */
986 1.1 christos void
987 1.1 christos pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
988 1.8 skrll pte_callback_t callback, uintptr_t flags)
989 1.1 christos {
990 1.1 christos #if 0
991 1.1 christos printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
992 1.1 christos __func__, pmap, sva, eva, callback, flags);
993 1.1 christos #endif
994 1.1 christos while (sva < eva) {
995 1.1 christos vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
996 1.1 christos if (lastseg_va == 0 || lastseg_va > eva)
997 1.1 christos lastseg_va = eva;
998 1.1 christos
999 1.1 christos /*
1000 1.1 christos * If VA belongs to an unallocated segment,
1001 1.1 christos * skip to the next segment boundary.
1002 1.1 christos */
1003 1.9 skrll pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
1004 1.9 skrll if (ptep != NULL) {
1005 1.1 christos /*
1006 1.1 christos * Callback to deal with the ptes for this segment.
1007 1.1 christos */
1008 1.9 skrll (*callback)(pmap, sva, lastseg_va, ptep, flags);
1009 1.1 christos }
1010 1.1 christos /*
1011 1.1 christos * In theory we could release pages with no entries,
1012 1.1 christos * but that takes more effort than we want here.
1013 1.1 christos */
1014 1.1 christos sva = lastseg_va;
1015 1.1 christos }
1016 1.1 christos }
1017 1.1 christos
1018 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1019 1.29 skrll static pd_entry_t *
1020 1.29 skrll pmap_pdetab_reserve(struct pmap *pmap, vaddr_t va)
1021 1.29 skrll #elif defined(PMAP_HWPAGEWALKER)
1022 1.29 skrll static pmap_ptpage_t **
1023 1.29 skrll pmap_segtab_reserve(struct pmap *pmap, vaddr_t va, pd_entry_t **pde_p)
1024 1.29 skrll #else
1025 1.29 skrll static pmap_ptpage_t **
1026 1.29 skrll pmap_segtab_reserve(struct pmap *pmap, vaddr_t va)
1027 1.29 skrll #endif
1028 1.1 christos {
1029 1.16 mrg UVMHIST_FUNC(__func__);
1030 1.29 skrll UVMHIST_CALLARGS(pmaphist, "pm %#jx va %#jx", (uintptr_t)pmap,
1031 1.29 skrll (uintptr_t)va, 0, 0);
1032 1.29 skrll
1033 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
1034 1.29 skrll pmap_pdetab_t *ptb = pmap->pm_pdetab;
1035 1.29 skrll UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", (uintptr_t)ptb, 0, 0, 0);
1036 1.29 skrll #endif
1037 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1038 1.29 skrll vaddr_t segtab_mask = PMAP_PDETABSIZE - 1;
1039 1.29 skrll #ifdef _LP64
1040 1.29 skrll for (size_t segshift = XSEGSHIFT;
1041 1.29 skrll segshift > SEGSHIFT;
1042 1.29 skrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1043 1.29 skrll pd_entry_t * const pde_p =
1044 1.29 skrll &ptb->pde_pde[(va >> segshift) & segtab_mask];
1045 1.29 skrll pd_entry_t opde = *pde_p;
1046 1.29 skrll
1047 1.29 skrll UVMHIST_LOG(pmaphist,
1048 1.29 skrll "ptb %#jx segshift %jd pde_p %#jx opde %#jx",
1049 1.29 skrll ptb, segshift, pde_p, opde);
1050 1.29 skrll
1051 1.29 skrll if (__predict_false(!pte_pde_valid_p(opde))) {
1052 1.29 skrll ptb = pmap_pdetab_alloc(pmap);
1053 1.29 skrll pd_entry_t npde = pte_pde_pdetab(
1054 1.29 skrll pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)ptb),
1055 1.29 skrll pmap == pmap_kernel());
1056 1.29 skrll opde = pte_pde_cas(pde_p, opde, npde);
1057 1.29 skrll if (__predict_false(pte_pde_valid_p(opde))) {
1058 1.29 skrll const vaddr_t kva = (vaddr_t)ptb;
1059 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach",
1060 1.29 skrll 0, 0, 0, 0);
1061 1.29 skrll pmap_page_detach(pmap, &pmap->pm_pdetab_list,
1062 1.29 skrll kva);
1063 1.29 skrll pmap_pdetab_free(ptb);
1064 1.29 skrll } else {
1065 1.29 skrll opde = npde;
1066 1.29 skrll }
1067 1.29 skrll }
1068 1.29 skrll ptb = pmap_pde_to_pdetab(opde);
1069 1.29 skrll UVMHIST_LOG(pmaphist, "opde %#jx ptb %#jx", opde, ptb, 0, 0);
1070 1.29 skrll }
1071 1.29 skrll #elif defined(XSEGSHIFT)
1072 1.29 skrll size_t segshift = XSEGSHIFT;
1073 1.29 skrll
1074 1.29 skrll pd_entry_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1075 1.29 skrll KASSERT(pte_pde_valid_p(opde));
1076 1.29 skrll ptb = pmap_pde_to_pdetab(opde);
1077 1.29 skrll segtab_mask = NSEGPG - 1;
1078 1.29 skrll #endif /* _LP64 */
1079 1.29 skrll const size_t idx = (va >> SEGSHIFT) & segtab_mask;
1080 1.16 mrg
1081 1.29 skrll UVMHIST_LOG(pmaphist, "... returning %#jx (idx %jd)", (uintptr_t)&ptb->pde_pde[idx], idx, 0, 0);
1082 1.29 skrll
1083 1.29 skrll return &ptb->pde_pde[idx];
1084 1.29 skrll #else /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1085 1.29 skrll pmap_segtab_t *stb = pmap->pm_segtab;
1086 1.29 skrll vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
1087 1.1 christos #ifdef _LP64
1088 1.29 skrll for (size_t segshift = XSEGSHIFT;
1089 1.29 skrll segshift > SEGSHIFT;
1090 1.29 skrll segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
1091 1.29 skrll size_t idx = (va >> segshift) & segtab_mask;
1092 1.29 skrll pmap_segtab_t ** const stb_p = &stb->seg_seg[idx];
1093 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
1094 1.29 skrll pmap_pdetab_t ** const ptb_p = &ptb->pde_pde[idx];
1095 1.29 skrll #endif /* PMAP_HWPAGEWALKER */
1096 1.28 skrll if (__predict_false((stb = *stb_p) == NULL)) {
1097 1.29 skrll stb = pmap_segtab_alloc(pmap);
1098 1.1 christos #ifdef MULTIPROCESSOR
1099 1.29 skrll pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, stb);
1100 1.28 skrll if (__predict_false(ostb != NULL)) {
1101 1.29 skrll const vaddr_t kva = (vaddr_t)stb;
1102 1.29 skrll UVMHIST_LOG(pmapxtabhist, "about to detach",
1103 1.29 skrll 0, 0, 0, 0);
1104 1.29 skrll pmap_page_detach(pmap, &pmap->pm_segtab_list,
1105 1.29 skrll kva);
1106 1.29 skrll pmap_segtab_free(stb);
1107 1.29 skrll stb = ostb;
1108 1.1 christos }
1109 1.1 christos #else
1110 1.29 skrll *stb_p = stb;
1111 1.1 christos #endif /* MULTIPROCESSOR */
1112 1.1 christos }
1113 1.29 skrll }
1114 1.29 skrll #elif defined(PMAP_HWPAGEWALKER)
1115 1.29 skrll pmap_segtab_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
1116 1.29 skrll KASSERT(pte_pde_valid_p(opde));
1117 1.29 skrll ptb = pmap_pde_to_pdetab(opde);
1118 1.29 skrll segtab_mask = NSEGPG - 1;
1119 1.29 skrll
1120 1.1 christos #endif /* _LP64 */
1121 1.29 skrll size_t idx = (va >> SEGSHIFT) & segtab_mask;
1122 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
1123 1.29 skrll #if defined(XSEGSHIFT) && (XSEGSHIFT != SEGSHIFT)
1124 1.29 skrll *pte_p = &pmap->pm_segtab
1125 1.29 skrll #else /* XSEGSHIFT */
1126 1.29 skrll *pde_p = &ptb->pde_pde[idx];
1127 1.29 skrll #endif /* XSEGSHIFT */
1128 1.29 skrll #endif /* PMAP_HWPAGEWALKER */
1129 1.29 skrll return &stb->seg_ppg[idx];
1130 1.29 skrll #endif
1131 1.29 skrll }
1132 1.29 skrll
1133 1.29 skrll
1134 1.29 skrll /*
1135 1.29 skrll * Return a pointer for the pte that corresponds to the specified virtual
1136 1.29 skrll * address (va) in the target physical map, allocating if needed.
1137 1.29 skrll */
1138 1.29 skrll pt_entry_t *
1139 1.29 skrll pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
1140 1.29 skrll {
1141 1.29 skrll UVMHIST_FUNC(__func__);
1142 1.29 skrll UVMHIST_CALLARGS(pmaphist, "pm=%#jx va=%#jx flags=%#jx",
1143 1.29 skrll (uintptr_t)pmap, (uintptr_t)va, flags, 0);
1144 1.29 skrll pmap_ptpage_t *ppg;
1145 1.29 skrll paddr_t pa = 0;
1146 1.29 skrll
1147 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1148 1.29 skrll pd_entry_t * const pde_p = pmap_pdetab_reserve(pmap, va);
1149 1.29 skrll ppg = pmap_pde_to_ptpage(*pde_p);
1150 1.29 skrll #elif defined(PMAP_HWPAGEWALKER)
1151 1.29 skrll pd_entry_t *pde_p;
1152 1.29 skrll pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va, &pde_p);
1153 1.29 skrll ppg = *ppg_p;
1154 1.29 skrll #else
1155 1.29 skrll pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va);
1156 1.29 skrll ppg = *ppg_p;
1157 1.29 skrll #endif
1158 1.29 skrll
1159 1.29 skrll if (__predict_false(ppg == NULL)) {
1160 1.29 skrll ppg = pmap_ptpage_alloc(pmap, flags, &pa);
1161 1.29 skrll if (__predict_false(ppg == NULL))
1162 1.29 skrll return NULL;
1163 1.29 skrll
1164 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
1165 1.29 skrll pd_entry_t npde = pte_pde_ptpage(pa, pmap == pmap_kernel());
1166 1.1 christos #endif
1167 1.29 skrll #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
1168 1.30 skrll pd_entry_t opde = pte_pde_cas(pde_p, pte_invalid_pde(), npde);
1169 1.29 skrll if (__predict_false(pte_pde_valid_p(opde))) {
1170 1.29 skrll pmap_ptpage_free(pmap, ppg, __func__);
1171 1.29 skrll ppg = pmap_pde_to_ptpage(opde);
1172 1.1 christos }
1173 1.29 skrll #else
1174 1.1 christos #ifdef MULTIPROCESSOR
1175 1.29 skrll pmap_ptpage_t *oppg = atomic_cas_ptr(ppg_p, NULL, ppg);
1176 1.1 christos /*
1177 1.1 christos * If another thread allocated the segtab needed for this va
1178 1.1 christos * free the page we just allocated.
1179 1.1 christos */
1180 1.29 skrll if (__predict_false(oppg != NULL)) {
1181 1.29 skrll pmap_ptpage_free(pmap, ppg, __func__);
1182 1.29 skrll ppg = oppg;
1183 1.29 skrll #if defined(PMAP_HWPAGEWALKER)
1184 1.29 skrll } else {
1185 1.29 skrll pte_pde_set(pde_p, npde);
1186 1.1 christos #endif
1187 1.1 christos }
1188 1.29 skrll #else /* !MULTIPROCESSOR */
1189 1.29 skrll *ppg_p = ppg;
1190 1.29 skrll #endif /* MULTIPROCESSOR */
1191 1.29 skrll #endif /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
1192 1.29 skrll }
1193 1.1 christos
1194 1.29 skrll const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
1195 1.1 christos
1196 1.29 skrll return ppg->ppg_ptes + pte_idx;
1197 1.1 christos }
1198