pmap_segtab.c revision 1.28 1 /* $NetBSD: pmap_segtab.c,v 1.28 2022/09/25 06:21:58 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1992, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * This code is derived from software contributed to Berkeley by
38 * the Systems Programming Group of the University of Utah Computer
39 * Science Department and Ralph Campbell.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)pmap.c 8.4 (Berkeley) 1/26/94
66 */
67
68 #include <sys/cdefs.h>
69
70 __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.28 2022/09/25 06:21:58 skrll Exp $");
71
72 /*
73 * Manages physical address maps.
74 *
75 * In addition to hardware address maps, this
76 * module is called upon to provide software-use-only
77 * maps which may or may not be stored in the same
78 * form as hardware maps. These pseudo-maps are
79 * used to store intermediate results from copy
80 * operations to and from address spaces.
81 *
82 * Since the information managed by this module is
83 * also stored by the logical address mapping module,
84 * this module may throw away valid virtual-to-physical
85 * mappings at almost any time. However, invalidations
86 * of virtual-to-physical mappings must be done as
87 * requested.
88 *
89 * In order to cope with hardware architectures which
90 * make virtual-to-physical map invalidates expensive,
91 * this module may delay invalidate or reduced protection
92 * operations until such time as they are actually
93 * necessary. This module is given full information as
94 * to which processors are currently using which maps,
95 * and to when physical maps must be made correct.
96 */
97
98 #define __PMAP_PRIVATE
99
100 #include "opt_multiprocessor.h"
101
102 #include <sys/param.h>
103
104 #include <sys/atomic.h>
105 #include <sys/mutex.h>
106 #include <sys/proc.h>
107 #include <sys/systm.h>
108
109 #include <uvm/uvm.h>
110
111 CTASSERT(NBPG >= sizeof(pmap_segtab_t));
112
113 struct pmap_segtab_info {
114 pmap_segtab_t *free_segtab; /* free list kept locally */
115 #ifdef DEBUG
116 uint32_t nget_segtab;
117 uint32_t nput_segtab;
118 uint32_t npage_segtab;
119 #define SEGTAB_ADD(n, v) (pmap_segtab_info.n ## _segtab += (v))
120 #else
121 #define SEGTAB_ADD(n, v) ((void) 0)
122 #endif
123 #ifdef PMAP_PTP_CACHE
124 struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */
125 #endif
126 } pmap_segtab_info = {
127 #ifdef PMAP_PTP_CACHE
128 .ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
129 #endif
130 };
131
132 kmutex_t pmap_segtab_lock __cacheline_aligned;
133
134 /*
135 * Check that a seg_tab[] array is empty.
136 *
137 * This is used when allocating or freeing a pmap_segtab_t. The stb
138 * should be unused -- meaning, none of the seg_tab[] pointers are
139 * not NULL, as it transitions from either freshly allocated segtab from
140 * pmap pool, an unused allocated page segtab alloc from the SMP case,
141 * where two CPUs attempt to allocate the same underlying segtab, the
142 * release of a segtab entry to the freelist, or for SMP, where reserve
143 * also frees a freshly allocated but unused entry.
144 */
145 static void
146 pmap_check_stb(pmap_segtab_t *stb, const char *caller, const char *why)
147 {
148 #ifdef DEBUG
149 for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
150 if (stb->seg_tab[i] != NULL) {
151 #define DEBUG_NOISY
152 #ifdef DEBUG_NOISY
153 UVMHIST_FUNC(__func__);
154 UVMHIST_CALLARGS(pmapsegtabhist, "stb=%#jx",
155 (uintptr_t)stb, 0, 0, 0);
156 for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
157 if (stb->seg_tab[j] != NULL)
158 printf("%s: stb->seg_tab[%zu] = %p\n",
159 caller, j, stb->seg_tab[j]);
160 #endif
161 panic("%s: pm_segtab.seg_tab[%zu] != 0 (%p): %s",
162 caller, i, stb->seg_tab[i], why);
163 }
164 }
165 #endif
166 }
167
168 /*
169 * Check that an array of ptes is actually zero.
170 */
171 static void
172 pmap_check_ptes(pt_entry_t *pte, const char *caller)
173 {
174 /*
175 * All pte arrays should be page aligned.
176 */
177 if (((uintptr_t)pte & PAGE_MASK) != 0) {
178 panic("%s: pte entry at %p not page aligned", caller, pte);
179 }
180
181 #ifdef DEBUG
182 for (size_t i = 0; i < NPTEPG; i++)
183 if (pte[i] != 0) {
184 #ifdef DEBUG_NOISY
185 UVMHIST_FUNC(__func__);
186 UVMHIST_CALLARGS(pmapsegtabhist, "pte=%#jx",
187 (uintptr_t)pte, 0, 0, 0);
188 for (size_t j = i + 1; j < NPTEPG; j++)
189 if (pte[j] != 0)
190 UVMHIST_LOG(pmapsegtabhist,
191 "pte[%zu] = %#"PRIxPTE,
192 j, pte_value(pte[j]), 0, 0);
193 #endif
194 panic("%s: pte[%zu] entry at %p not 0 (%#"PRIxPTE")",
195 caller, i, &pte[i], pte_value(pte[i]));
196 }
197 #endif
198 }
199
200 static inline struct vm_page *
201 pmap_pte_pagealloc(void)
202 {
203 struct vm_page *pg;
204
205 pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO|UVM_PGA_USERESERVE);
206 if (pg) {
207 #ifdef UVM_PAGE_TRKOWN
208 pg->owner_tag = NULL;
209 #endif
210 UVM_PAGE_OWN(pg, "pmap-ptp");
211 }
212
213 return pg;
214 }
215
216 static inline pt_entry_t *
217 pmap_segmap(struct pmap *pmap, vaddr_t va)
218 {
219 pmap_segtab_t *stb = pmap->pm_segtab;
220 KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
221 "pmap %p va %#" PRIxVADDR, pmap, va);
222 #ifdef _LP64
223 stb = stb->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
224 if (stb == NULL)
225 return NULL;
226 #endif
227
228 return stb->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
229 }
230
231 pt_entry_t *
232 pmap_pte_lookup(pmap_t pmap, vaddr_t va)
233 {
234 pt_entry_t *pte = pmap_segmap(pmap, va);
235 if (pte == NULL)
236 return NULL;
237
238 return pte + ((va >> PGSHIFT) & (NPTEPG - 1));
239 }
240
241 /*
242 * Insert the segtab into the segtab freelist.
243 */
244 static void
245 pmap_segtab_free(pmap_segtab_t *stb)
246 {
247 UVMHIST_FUNC(__func__);
248
249 UVMHIST_CALLARGS(pmapsegtabhist, "stb=%#jx", (uintptr_t)stb, 0, 0, 0);
250
251 mutex_spin_enter(&pmap_segtab_lock);
252 stb->seg_seg[0] = pmap_segtab_info.free_segtab;
253 pmap_segtab_info.free_segtab = stb;
254 SEGTAB_ADD(nput, 1);
255 mutex_spin_exit(&pmap_segtab_lock);
256 }
257
258 static void
259 pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stb_p, bool free_stb,
260 pte_callback_t callback, uintptr_t flags,
261 vaddr_t va, vsize_t vinc)
262 {
263 pmap_segtab_t *stb = *stb_p;
264
265 UVMHIST_FUNC(__func__);
266 UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx stb_p=%#jx free=%jd",
267 (uintptr_t)pmap, (uintptr_t)stb_p, free_stb, 0);
268 UVMHIST_LOG(pmapsegtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
269 (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
270 for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
271 i < PMAP_SEGTABSIZE;
272 i++, va += vinc) {
273 #ifdef _LP64
274 if (vinc > NBSEG) {
275 if (stb->seg_seg[i] != NULL) {
276 UVMHIST_LOG(pmapsegtabhist,
277 " recursing %jd", i, 0, 0, 0);
278 pmap_segtab_release(pmap, &stb->seg_seg[i],
279 true, callback, flags, va, vinc / NSEGPG);
280 KASSERT(stb->seg_seg[i] == NULL);
281 }
282 continue;
283 }
284 #endif
285 KASSERT(vinc == NBSEG);
286
287 /* get pointer to segment map */
288 pt_entry_t *pte = stb->seg_tab[i];
289 if (pte == NULL)
290 continue;
291 pmap_check_ptes(pte, __func__);
292
293 /*
294 * If our caller wants a callback, do so.
295 */
296 if (callback != NULL) {
297 (*callback)(pmap, va, va + vinc, pte, flags);
298 }
299
300 // PMAP_UNMAP_POOLPAGE should handle any VCA issues itself
301 paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
302 struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
303 #ifdef PMAP_PTP_CACHE
304 mutex_spin_enter(&pmap_segtab_lock);
305 LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, pageq.list);
306 mutex_spin_exit(&pmap_segtab_lock);
307 #else
308 uvm_pagefree(pg);
309 #endif
310
311 stb->seg_tab[i] = NULL;
312 UVMHIST_LOG(pmapsegtabhist, " zeroing tab[%jd]", i, 0, 0, 0);
313 }
314
315 if (free_stb) {
316 pmap_check_stb(stb, __func__,
317 vinc == NBSEG ? "release seg" : "release xseg");
318 pmap_segtab_free(stb);
319 *stb_p = NULL;
320 }
321 }
322
323 /*
324 * Create and return a physical map.
325 *
326 * If the size specified for the map
327 * is zero, the map is an actual physical
328 * map, and may be referenced by the
329 * hardware.
330 *
331 * If the size specified is non-zero,
332 * the map will be used in software only, and
333 * is bounded by that size.
334 */
335 static pmap_segtab_t *
336 pmap_segtab_alloc(void)
337 {
338 pmap_segtab_t *stb;
339 bool found_on_freelist = false;
340
341 UVMHIST_FUNC(__func__);
342 again:
343 mutex_spin_enter(&pmap_segtab_lock);
344 if (__predict_true((stb = pmap_segtab_info.free_segtab) != NULL)) {
345 pmap_segtab_info.free_segtab = stb->seg_seg[0];
346 stb->seg_seg[0] = NULL;
347 SEGTAB_ADD(nget, 1);
348 found_on_freelist = true;
349 UVMHIST_CALLARGS(pmapsegtabhist, "freelist stb=%#jx",
350 (uintptr_t)stb, 0, 0, 0);
351 }
352 mutex_spin_exit(&pmap_segtab_lock);
353
354 if (__predict_false(stb == NULL)) {
355 struct vm_page * const stb_pg = pmap_pte_pagealloc();
356
357 if (__predict_false(stb_pg == NULL)) {
358 /*
359 * XXX What else can we do? Could we deadlock here?
360 */
361 uvm_wait("segtab");
362 goto again;
363 }
364 SEGTAB_ADD(npage, 1);
365 const paddr_t stb_pa = VM_PAGE_TO_PHYS(stb_pg);
366
367 stb = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stb_pa);
368 UVMHIST_CALLARGS(pmapsegtabhist, "new stb=%#jx",
369 (uintptr_t)stb, 0, 0, 0);
370 const size_t n = NBPG / sizeof(*stb);
371 if (n > 1) {
372 /*
373 * link all the segtabs in this page together
374 */
375 for (size_t i = 1; i < n - 1; i++) {
376 stb[i].seg_seg[0] = &stb[i+1];
377 }
378 /*
379 * Now link the new segtabs into the free segtab list.
380 */
381 mutex_spin_enter(&pmap_segtab_lock);
382 stb[n-1].seg_seg[0] = pmap_segtab_info.free_segtab;
383 pmap_segtab_info.free_segtab = stb + 1;
384 SEGTAB_ADD(nput, n - 1);
385 mutex_spin_exit(&pmap_segtab_lock);
386 }
387 }
388
389 pmap_check_stb(stb, __func__,
390 found_on_freelist ? "from free list" : "allocated");
391
392 return stb;
393 }
394
395 /*
396 * Allocate the top segment table for the pmap.
397 */
398 void
399 pmap_segtab_init(pmap_t pmap)
400 {
401
402 pmap->pm_segtab = pmap_segtab_alloc();
403 }
404
405 /*
406 * Retire the given physical map from service.
407 * Should only be called if the map contains
408 * no valid mappings.
409 */
410 void
411 pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
412 {
413 if (pmap->pm_segtab == NULL)
414 return;
415
416 #ifdef _LP64
417 const vsize_t vinc = NBXSEG;
418 #else
419 const vsize_t vinc = NBSEG;
420 #endif
421 pmap_segtab_release(pmap, &pmap->pm_segtab,
422 func == NULL, func, flags, pmap->pm_minaddr, vinc);
423 }
424
425 /*
426 * Make a new pmap (vmspace) active for the given process.
427 */
428 void
429 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
430 {
431 if (l == curlwp) {
432 struct cpu_info * const ci = l->l_cpu;
433 pmap_md_xtab_activate(pm, l);
434 KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
435 if (pm == pmap_kernel()) {
436 ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
437 #ifdef _LP64
438 ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
439 #endif
440 } else {
441 ci->ci_pmap_user_segtab = pm->pm_segtab;
442 #ifdef _LP64
443 ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
444 #endif
445 }
446 }
447 }
448
449
450 void
451 pmap_segtab_deactivate(pmap_t pm)
452 {
453
454 pmap_md_xtab_deactivate(pm);
455
456 curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
457 #ifdef _LP64
458 curcpu()->ci_pmap_user_seg0tab = NULL;
459 #endif
460
461 }
462
463 /*
464 * Act on the given range of addresses from the specified map.
465 *
466 * It is assumed that the start and end are properly rounded to
467 * the page size.
468 */
469 void
470 pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
471 pte_callback_t callback, uintptr_t flags)
472 {
473 #if 0
474 printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
475 __func__, pmap, sva, eva, callback, flags);
476 #endif
477 while (sva < eva) {
478 vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
479 if (lastseg_va == 0 || lastseg_va > eva)
480 lastseg_va = eva;
481
482 /*
483 * If VA belongs to an unallocated segment,
484 * skip to the next segment boundary.
485 */
486 pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
487 if (ptep != NULL) {
488 /*
489 * Callback to deal with the ptes for this segment.
490 */
491 (*callback)(pmap, sva, lastseg_va, ptep, flags);
492 }
493 /*
494 * In theory we could release pages with no entries,
495 * but that takes more effort than we want here.
496 */
497 sva = lastseg_va;
498 }
499 }
500
501 /*
502 * Return a pointer for the pte that corresponds to the specified virtual
503 * address (va) in the target physical map, allocating if needed.
504 */
505 pt_entry_t *
506 pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
507 {
508 pmap_segtab_t *stb = pmap->pm_segtab;
509 pt_entry_t *pte;
510 UVMHIST_FUNC(__func__);
511
512 pte = pmap_pte_lookup(pmap, va);
513 if (__predict_false(pte == NULL)) {
514 #ifdef _LP64
515 pmap_segtab_t ** const stb_p =
516 &stb->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)];
517 if (__predict_false((stb = *stb_p) == NULL)) {
518 pmap_segtab_t *nstb = pmap_segtab_alloc();
519 #ifdef MULTIPROCESSOR
520 pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, nstb);
521 if (__predict_false(ostb != NULL)) {
522 pmap_check_stb(nstb, __func__, "reserve");
523 pmap_segtab_free(nstb);
524 nstb = ostb;
525 }
526 #else
527 *stb_p = nstb;
528 #endif /* MULTIPROCESSOR */
529 stb = nstb;
530 }
531 KASSERT(stb == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]);
532 #endif /* _LP64 */
533 struct vm_page *pg = NULL;
534 #ifdef PMAP_PTP_CACHE
535 mutex_spin_enter(&pmap_segtab_lock);
536 if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) {
537 LIST_REMOVE(pg, pageq.list);
538 KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg);
539 }
540 mutex_spin_exit(&pmap_segtab_lock);
541 #endif
542 if (pg == NULL)
543 pg = pmap_pte_pagealloc();
544 if (pg == NULL) {
545 if (flags & PMAP_CANFAIL)
546 return NULL;
547 panic("%s: cannot allocate page table page "
548 "for va %" PRIxVADDR, __func__, va);
549 }
550
551 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
552 pte = (pt_entry_t *)PMAP_MAP_POOLPAGE(pa);
553 pt_entry_t ** const pte_p =
554 &stb->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)];
555 #ifdef MULTIPROCESSOR
556 pt_entry_t *opte = atomic_cas_ptr(pte_p, NULL, pte);
557 /*
558 * If another thread allocated the segtab needed for this va
559 * free the page we just allocated.
560 */
561 if (__predict_false(opte != NULL)) {
562 #ifdef PMAP_PTP_CACHE
563 mutex_spin_enter(&pmap_segtab_lock);
564 LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist,
565 pg, pageq.list);
566 mutex_spin_exit(&pmap_segtab_lock);
567 #else
568 PMAP_UNMAP_POOLPAGE((vaddr_t)pte);
569 uvm_pagefree(pg);
570 #endif
571 pte = opte;
572 }
573 #else
574 *pte_p = pte;
575 #endif
576 KASSERT(pte == stb->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]);
577 UVMHIST_CALLARGS(pmapsegtabhist, "pm=%#jx va=%#jx -> tab[%jd]=%#jx",
578 (uintptr_t)pmap, (uintptr_t)va,
579 (va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1), (uintptr_t)pte);
580
581 pmap_check_ptes(pte, __func__);
582 pte += (va >> PGSHIFT) & (NPTEPG - 1);
583 }
584
585 return pte;
586 }
587