Home | History | Annotate | Line # | Download | only in pmap
pmap_segtab.c revision 1.29
      1 /*	$NetBSD: pmap_segtab.c,v 1.29 2022/10/26 07:35:20 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center and by Chris G. Demetriou.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1992, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  *
     37  * This code is derived from software contributed to Berkeley by
     38  * the Systems Programming Group of the University of Utah Computer
     39  * Science Department and Ralph Campbell.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)pmap.c	8.4 (Berkeley) 1/26/94
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 
     70 __KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.29 2022/10/26 07:35:20 skrll Exp $");
     71 
     72 /*
     73  *	Manages physical address maps.
     74  *
     75  *	In addition to hardware address maps, this
     76  *	module is called upon to provide software-use-only
     77  *	maps which may or may not be stored in the same
     78  *	form as hardware maps.  These pseudo-maps are
     79  *	used to store intermediate results from copy
     80  *	operations to and from address spaces.
     81  *
     82  *	Since the information managed by this module is
     83  *	also stored by the logical address mapping module,
     84  *	this module may throw away valid virtual-to-physical
     85  *	mappings at almost any time.  However, invalidations
     86  *	of virtual-to-physical mappings must be done as
     87  *	requested.
     88  *
     89  *	In order to cope with hardware architectures which
     90  *	make virtual-to-physical map invalidates expensive,
     91  *	this module may delay invalidate or reduced protection
     92  *	operations until such time as they are actually
     93  *	necessary.  This module is given full information as
     94  *	to which processors are currently using which maps,
     95  *	and to when physical maps must be made correct.
     96  */
     97 
     98 #define __PMAP_PRIVATE
     99 
    100 #include "opt_multiprocessor.h"
    101 
    102 #include <sys/param.h>
    103 
    104 #include <sys/atomic.h>
    105 #include <sys/mutex.h>
    106 #include <sys/proc.h>
    107 #include <sys/systm.h>
    108 
    109 #include <uvm/uvm.h>
    110 #include <uvm/pmap/pmap.h>
    111 
    112 #if defined(XSEGSHIFT) && XSEGSHIFT == SEGSHIFT
    113 #undef XSEGSHIFT
    114 #undef XSEGLENGTH
    115 #undef NBXSEG
    116 #undef NXSEGPG
    117 #endif
    118 
    119 #define MULT_CTASSERT(a,b)	__CTASSERT((a) < (b) || ((a) % (b) == 0))
    120 
    121 __CTASSERT(sizeof(pmap_ptpage_t) == NBPG);
    122 
    123 #if defined(PMAP_HWPAGEWALKER)
    124 #ifdef _LP64
    125 MULT_CTASSERT(PMAP_PDETABSIZE, NPDEPG);
    126 MULT_CTASSERT(NPDEPG, PMAP_PDETABSIZE);
    127 #endif /* _LP64 */
    128 MULT_CTASSERT(sizeof(pmap_pdetab_t *), sizeof(pd_entry_t));
    129 MULT_CTASSERT(sizeof(pd_entry_t), sizeof(pmap_pdetab_t));
    130 
    131 #if 0
    132 #ifdef _LP64
    133 static const bool separate_pdetab_root_p = NPDEPG != PMAP_PDETABSIZE;
    134 #else
    135 static const bool separate_pdetab_root_p = true;
    136 #endif /* _LP64 */
    137 #endif
    138 
    139 typedef struct {
    140 	pmap_pdetab_t *free_pdetab0;	/* free list kept locally */
    141 	pmap_pdetab_t *free_pdetab;	/* free list kept locally */
    142 #ifdef DEBUG
    143 	uint32_t nget;
    144 	uint32_t nput;
    145 	uint32_t npage;
    146 #define	PDETAB_ADD(n, v)	(pmap_segtab_info.pdealloc.n += (v))
    147 #else
    148 #define	PDETAB_ADD(n, v)	((void) 0)
    149 #endif /* DEBUG */
    150 } pmap_pdetab_alloc_t;
    151 #endif /* PMAP_HWPAGEWALKER */
    152 
    153 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    154 #ifdef _LP64
    155 __CTASSERT(NSEGPG >= PMAP_SEGTABSIZE);
    156 __CTASSERT(NSEGPG % PMAP_SEGTABSIZE == 0);
    157 #endif
    158 __CTASSERT(NBPG >= sizeof(pmap_segtab_t));
    159 
    160 typedef struct {
    161 	pmap_segtab_t *free_segtab0;	/* free list kept locally */
    162 	pmap_segtab_t *free_segtab;	/* free list kept locally */
    163 #ifdef DEBUG
    164 	uint32_t nget;
    165 	uint32_t nput;
    166 	uint32_t npage;
    167 #define	SEGTAB_ADD(n, v)	(pmap_segtab_info.segalloc.n += (v))
    168 #else
    169 #define	SEGTAB_ADD(n, v)	((void) 0)
    170 #endif
    171 } pmap_segtab_alloc_t;
    172 #endif /* !PMAP_HWPAGEWALKER || !PMAP_MAP_PDETABPAGE */
    173 
    174 struct pmap_segtab_info {
    175 #if defined(PMAP_HWPAGEWALKER)
    176 	pmap_pdetab_alloc_t pdealloc;
    177 #endif
    178 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    179 	pmap_segtab_alloc_t segalloc;
    180 #endif
    181 #ifdef PMAP_PPG_CACHE
    182 	struct pgflist ptp_pgflist;	/* Keep a list of idle page tables. */
    183 #endif
    184 } pmap_segtab_info = {
    185 #ifdef PMAP_PPG_CACHE
    186 	.ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist),
    187 #endif
    188 };
    189 
    190 kmutex_t pmap_segtab_lock __cacheline_aligned;
    191 
    192 #ifndef PMAP_HWPAGEWALKER
    193 /*
    194  * Check that a seg_ppg[] array is empty.
    195  *
    196  * This is used when allocating or freeing a pmap_segtab_t.  The stb
    197  * should be unused -- meaning, none of the seg_ppg[] pointers are
    198  * not NULL, as it transitions from either freshly allocated segtab from
    199  * pmap pool, an unused allocated page segtab alloc from the SMP case,
    200  * where two CPUs attempt to allocate the same underlying segtab, the
    201  * release of a segtab entry to the freelist, or for SMP, where reserve
    202  * also frees a freshly allocated but unused entry.
    203  */
    204 static void
    205 pmap_check_stb(pmap_segtab_t *stb, const char *caller, const char *why)
    206 {
    207 #ifdef DEBUG
    208 	for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) {
    209 		if (stb->seg_ppg[i] != NULL) {
    210 #define DEBUG_NOISY
    211 #ifdef DEBUG_NOISY
    212 			UVMHIST_FUNC(__func__);
    213 			UVMHIST_CALLARGS(pmapxtabhist, "stb=%#jx",
    214 			    (uintptr_t)stb, 0, 0, 0);
    215 			for (size_t j = i; j < PMAP_SEGTABSIZE; j++)
    216 				if (stb->seg_ppg[j] != NULL)
    217 					printf("%s: stb->seg_ppg[%zu] = %p\n",
    218 					    caller, j, stb->seg_ppg[j]);
    219 #endif
    220 			panic("%s: pm_segtab.seg_ppg[%zu] != 0 (%p): %s",
    221 			    caller, i, stb->seg_ppg[i], why);
    222 		}
    223 	}
    224 #endif
    225 }
    226 #endif /* PMAP_HWPAGEWALKER */
    227 
    228 static inline struct vm_page *
    229 pmap_pte_pagealloc(void)
    230 {
    231 	struct vm_page *pg;
    232 
    233 	pg = pmap_md_alloc_poolpage(UVM_PGA_ZERO | UVM_PGA_USERESERVE);
    234 	if (pg) {
    235 #ifdef UVM_PAGE_TRKOWN
    236 		pg->owner_tag = NULL;
    237 #endif
    238 		UVM_PAGE_OWN(pg, "pmap-ptp");
    239 	}
    240 
    241 	return pg;
    242 }
    243 
    244 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
    245 static vaddr_t
    246 pmap_pde_to_va(pd_entry_t pde)
    247 {
    248 	if (!pte_pde_valid_p(pde))
    249 		return 0;
    250 
    251 	paddr_t pa = pte_pde_to_paddr(pde);
    252 	return pmap_md_direct_map_paddr(pa);
    253 }
    254 
    255 #ifdef _LP64
    256 static pmap_pdetab_t *
    257 pmap_pde_to_pdetab(pd_entry_t pde)
    258 {
    259 
    260 	return (pmap_pdetab_t *)pmap_pde_to_va(pde);
    261 }
    262 #endif
    263 
    264 static pmap_ptpage_t *
    265 pmap_pde_to_ptpage(pd_entry_t pde)
    266 {
    267 
    268 	return (pmap_ptpage_t *)pmap_pde_to_va(pde);
    269 }
    270 #endif
    271 
    272 #ifdef _LP64
    273 __CTASSERT((XSEGSHIFT - SEGSHIFT) % (PGSHIFT-3) == 0);
    274 #endif
    275 
    276 static inline pmap_ptpage_t *
    277 pmap_ptpage(struct pmap *pmap, vaddr_t va)
    278 {
    279 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
    280 	vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
    281 	pmap_pdetab_t *ptb = pmap->pm_pdetab;
    282 
    283 //	UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", ptb, 0, 0, 0);
    284 
    285 	KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va));
    286 
    287 #ifdef _LP64
    288 	for (size_t segshift = XSEGSHIFT;
    289 	    segshift > SEGSHIFT;
    290 	    segshift -= PGSHIFT - 3, pdetab_mask = NSEGPG - 1) {
    291 		ptb = pmap_pde_to_pdetab(ptb->pde_pde[(va >> segshift) & pdetab_mask]);
    292 		if (ptb == NULL)
    293 			return NULL;
    294 	}
    295 #endif
    296 	return pmap_pde_to_ptpage(ptb->pde_pde[(va >> SEGSHIFT) & pdetab_mask]);
    297 #else
    298 	vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
    299 	pmap_segtab_t *stb = pmap->pm_segtab;
    300 
    301 	KASSERTMSG(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va),
    302 	    "pmap %p va %#" PRIxVADDR, pmap, va);
    303 #ifdef _LP64
    304 	for (size_t segshift = XSEGSHIFT;
    305 	    segshift > SEGSHIFT;
    306 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
    307 		stb = stb->seg_seg[(va >> segshift) & segtab_mask];
    308 		if (stb == NULL)
    309 			return NULL;
    310 	}
    311 #endif
    312 	return stb->seg_ppg[(va >> SEGSHIFT) & segtab_mask];
    313 #endif
    314 }
    315 
    316 #if defined(PMAP_HWPAGEWALKER)
    317 bool
    318 pmap_pdetab_fixup(struct pmap *pmap, vaddr_t va)
    319 {
    320 	struct pmap * const kpm = pmap_kernel();
    321 	pmap_pdetab_t * const kptb = kpm->pm_pdetab;
    322 	pmap_pdetab_t * const uptb = pmap->pm_pdetab;
    323 	size_t idx = PMAP_PDETABSIZE - 1;
    324 #if !defined(PMAP_MAP_PDETABPAGE)
    325 	__CTASSERT(PMAP_PDETABSIZE == PMAP_SEGTABSIZE);
    326 	pmap_segtab_t * const kstb = &pmap_kern_segtab;
    327 	pmap_segtab_t * const ustb = pmap->pm_segtab;
    328 #endif
    329 
    330 	// Regardless of how many levels deep this page table is, we only
    331 	// need to verify the first level PDEs match up.
    332 #ifdef XSEGSHIFT
    333 	idx &= va >> XSEGSHIFT;
    334 #else
    335 	idx &= va >> SEGSHIFT;
    336 #endif
    337 	if (uptb->pde_pde[idx] != kptb->pde_pde[idx]) {
    338 		pte_pde_set(&uptb->pde_pde[idx], kptb->pde_pde[idx]);
    339 #if !defined(PMAP_MAP_PDETABPAGE)
    340 		ustb->seg_seg[idx] = kstb->seg_seg[idx]; // copy KVA of PTP
    341 #endif
    342 		return true;
    343 	}
    344 	return false;
    345 }
    346 #endif /* PMAP_HWPAGEWALKER */
    347 
    348 
    349 static void
    350 pmap_page_attach(pmap_t pmap, vaddr_t kva, struct vm_page *pg,
    351     struct pglist *pglist, voff_t off)
    352 {
    353 	UVMHIST_FUNC(__func__);
    354 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx pg %#jx list %#jx",
    355 	    (uintptr_t)pmap, (uintptr_t)kva, (uintptr_t)pg, (uintptr_t)pglist);
    356 
    357 	struct uvm_object * const uobj = &pmap->pm_uobject;
    358 	if (pg == NULL) {
    359 		paddr_t pa;
    360 
    361 		bool ok __diagused = pmap_extract(pmap_kernel(), kva, &pa);
    362 		KASSERT(ok);
    363 
    364 		pg = PHYS_TO_VM_PAGE(pa);
    365 		KASSERT(pg != NULL);
    366 	}
    367 
    368 	UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
    369 	    (uintptr_t)kva, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)pglist);
    370 
    371 	pmap_lock(pmap);
    372 	TAILQ_INSERT_TAIL(pglist, pg, pageq.queue);
    373 	uobj->uo_npages++;
    374 	pmap_unlock(pmap);
    375 
    376 	/*
    377 	 * Now set each vm_page that maps this page to point to the
    378 	 * pmap and set the offset to what we want.
    379 	 */
    380 	KASSERTMSG(pg->uobject == NULL, "pg %p pg->uobject %p", pg, pg->uobject);
    381 	pg->uobject = uobj;
    382 	pg->offset = off;
    383 }
    384 
    385 static struct vm_page *
    386 pmap_page_detach(pmap_t pmap, struct pglist *list, vaddr_t va)
    387 {
    388 	UVMHIST_FUNC(__func__);
    389 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx kva %#jx list %#jx",
    390 	    (uintptr_t)pmap, (uintptr_t)va, (uintptr_t)list, 0);
    391 
    392 	paddr_t pa;
    393 	bool ok __diagused = pmap_extract(pmap_kernel(), va, &pa);
    394 	KASSERT(ok);
    395 
    396 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
    397 	struct uvm_object * const uobj = &pmap->pm_uobject;
    398 
    399 	UVMHIST_LOG(pmapxtabhist, "kva %#jx uobj %#jx pg %#jx list %#jx",
    400 	    (uintptr_t)va, (uintptr_t)uobj, (uintptr_t)pg, (uintptr_t)list);
    401 
    402 	KASSERTMSG(pg->uobject == uobj, "pg->uobject %p vs uobj %p",
    403 	    pg->uobject, uobj);
    404 
    405 	pmap_lock(pmap);
    406 	TAILQ_REMOVE(list, pg, pageq.queue);
    407 	uobj->uo_npages--;
    408 	pmap_unlock(pmap);
    409 
    410 	pg->uobject = NULL;
    411 	pg->offset = 0;
    412 
    413 	return pg;
    414 }
    415 
    416 #ifndef PMAP_PPG_CACHE
    417 static void
    418 pmap_segtab_pagefree(pmap_t pmap, struct pglist *list, vaddr_t kva, size_t size)
    419 {
    420 #ifdef PMAP_MAP_PTEPAGE
    421 	UVMHIST_FUNC(__func__);
    422 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx list %#jx kva %#jx size %#jx",
    423 	    (uintptr_t)pmap, (uintptr_t)list, kva, size);
    424 	KASSERT(size == PAGE_SIZE);
    425 	if (size == PAGE_SIZE) {
    426 		UVMHIST_LOG(pmapxtabhist, "about to detach (kva %#jx)",
    427 		    kva, 0, 0, 0);
    428 		uvm_pagefree(pmap_page_detach(pmap, list, kva));
    429 		return;
    430 	}
    431 #endif
    432 	for (size_t i = 0; i < size; i += PAGE_SIZE) {
    433 		(void)pmap_page_detach(pmap, list, kva + i);
    434 	}
    435 
    436 	uvm_km_free(kernel_map, kva, size, UVM_KMF_WIRED);
    437 }
    438 #endif
    439 
    440 pt_entry_t *
    441 pmap_pte_lookup(pmap_t pmap, vaddr_t va)
    442 {
    443 	pmap_ptpage_t * const ppg = pmap_ptpage(pmap, va);
    444 	if (ppg == NULL)
    445 		return NULL;
    446 
    447 	const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
    448 
    449 	return ppg->ppg_ptes + pte_idx;
    450 }
    451 
    452 
    453 static pmap_ptpage_t *
    454 pmap_ptpage_alloc(pmap_t pmap, int flags, paddr_t *pa_p)
    455 {
    456 	UVMHIST_FUNC(__func__);
    457 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx flags %#jx pa_p %#jx", (uintptr_t)pmap,
    458 	    (uintptr_t)flags, (uintptr_t)pa_p, 0);
    459 
    460 	pmap_ptpage_t *ppg = NULL;
    461 
    462 #ifdef PMAP_MAP_PTEPAGE
    463 	struct vm_page *pg = NULL;
    464 	paddr_t pa;
    465 #ifdef PMAP_PPG_CACHE
    466 	ppg = pmap_pgcache_alloc(&pmap_segtab_info.ppg_flist);
    467 #endif
    468 	if (ppg == NULL) {
    469 		pg = pmap_pte_pagealloc();
    470 		if (pg == NULL) {
    471 			if (flags & PMAP_CANFAIL)
    472 				return NULL;
    473 			panic("%s: cannot allocate page table page ",
    474 			    __func__);
    475 		}
    476 		pa = VM_PAGE_TO_PHYS(pg);
    477 		ppg = (pmap_ptpage_t *)PMAP_MAP_PTEPAGE(pa);
    478 	} else {
    479 		bool ok __diagused = pmap_extract(pmap_kernel(), (vaddr_t)ppg, &pa);
    480 		KASSERT(ok);
    481 	}
    482 
    483 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
    484 	pmap_page_attach(pmap, (vaddr_t)ppg, pg, &pmap->pm_ppg_list, 0);
    485 
    486 	*pa_p = pa;
    487 #else
    488 	vaddr_t kva = uvm_km_alloc(kernel_map, PAGE_SIZE, PAGE_SIZE,
    489 	    UVM_KMF_WIRED | UVM_KMF_WAITVA
    490 	    | (flags & PMAP_CANFAIL ? UVM_KMF_CANFAIL : 0));
    491 	if (kva == 0) {
    492 		if (flags & PMAP_CANFAIL)
    493 			return NULL;
    494 		panic("%s: cannot allocate page table page", __func__);
    495 	}
    496 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
    497 	pmap_page_attach(pmap, kva, NULL, &pmap->pm_ppg_list, 0);
    498 	ppg = (pmap_ptpage_t *)kva;
    499 #endif
    500 
    501 	UVMHIST_LOG(pmapxtabhist, "... ppg %#jx", (uintptr_t)ppg, 0, 0, 0);
    502 
    503 	return ppg;
    504 }
    505 
    506 static void
    507 pmap_ptpage_free(pmap_t pmap, pmap_ptpage_t *ppg, const char *caller)
    508 {
    509 	UVMHIST_FUNC(__func__);
    510 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx va %#jx", (uintptr_t)pmap,
    511 	    (uintptr_t)ppg, 0, 0);
    512 
    513 	const vaddr_t kva = (vaddr_t)ppg;
    514 	/*
    515 	 * All pte arrays should be page aligned.
    516 	 */
    517 	if ((kva & PAGE_MASK) != 0) {
    518 		panic("%s: pte entry at %p not page aligned", caller, ppg);
    519 	}
    520 
    521 #ifdef DEBUG
    522 	for (size_t j = 0; j < NPTEPG; j++) {
    523 		if (ppg->ppg_ptes[j] != 0) {
    524 			UVMHIST_LOG(pmapxtabhist,
    525 			    "pte entry %#jx not 0 (%#jx)",
    526 			    (uintptr_t)&ppg->ppg_ptes[j],
    527 			    (uintptr_t)ppg->ppg_ptes[j], 0, 0);
    528 			for (size_t i = j + 1; i < NPTEPG; i++)
    529 				if (ppg->ppg_ptes[i] != 0)
    530 					UVMHIST_LOG(pmapxtabhist,
    531 					    "pte[%zu] = %#"PRIxPTE,
    532 					    i, ppg->ppg_ptes[i], 0, 0);
    533 
    534 			panic("%s: pte entry at %p not 0 (%#" PRIxPTE ")",
    535 			    __func__, &ppg->ppg_ptes[j],
    536 			    ppg->ppg_ptes[j]);
    537 		}
    538 	}
    539 #endif
    540 	//pmap_md_vca_clean(pg, (vaddr_t)ppg, NBPG);
    541 #ifdef PMAP_PPG_CACHE
    542 	UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
    543 	pmap_page_detach(pmap, &pmap->pm_ppg_list, kva);
    544 	pmap_segtab_pagecache(&pmap_segtab_info.ppg_flist, ppg);
    545 #else
    546 	pmap_segtab_pagefree(pmap, &pmap->pm_ppg_list, kva, PAGE_SIZE);
    547 #endif /* PMAP_PPG_CACHE */
    548 }
    549 
    550 
    551 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
    552 
    553 static pmap_pdetab_t *
    554 pmap_pdetab_alloc(struct pmap *pmap)
    555 {
    556 	UVMHIST_FUNC(__func__);
    557 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
    558 
    559 	pmap_pdetab_t *ptb;
    560 #ifdef KERNHIST
    561 	bool found_on_freelist = false;
    562 #endif
    563 
    564  again:
    565 	mutex_spin_enter(&pmap_segtab_lock);
    566 	UVMHIST_LOG(pmapxtabhist, "free_pdetab %#jx",
    567 	    (uintptr_t)pmap_segtab_info.pdealloc.free_pdetab, 0, 0, 0);
    568 	if (__predict_true((ptb = pmap_segtab_info.pdealloc.free_pdetab) != NULL)) {
    569 		pmap_segtab_info.pdealloc.free_pdetab = ptb->pde_next;
    570 
    571 		UVMHIST_LOG(pmapxtabhist, "freelist ptb=%#jx",
    572 		    (uintptr_t)ptb, 0, 0, 0);
    573 
    574 		PDETAB_ADD(nget, 1);
    575 		ptb->pde_next = NULL;
    576 #ifdef KERNHIST
    577 		found_on_freelist = true;
    578 #endif
    579 	}
    580 	mutex_spin_exit(&pmap_segtab_lock);
    581 
    582 	struct vm_page *ptb_pg = NULL;
    583 	if (__predict_false(ptb == NULL)) {
    584 		ptb_pg = pmap_pte_pagealloc();
    585 
    586 		UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx",
    587 		    (uintptr_t)ptb_pg, 0, 0, 0);
    588 		if (__predict_false(ptb_pg == NULL)) {
    589 			/*
    590 			 * XXX What else can we do?  Could we deadlock here?
    591 			 */
    592 			uvm_wait("pdetab");
    593 			goto again;
    594 		}
    595 
    596 		UVMHIST_LOG(pmapxtabhist, "ptb_pg=%#jx 2",
    597 		    (uintptr_t)ptb_pg, 0, 0, 0);
    598 		PDETAB_ADD(npage, 1);
    599 		const paddr_t ptb_pa = VM_PAGE_TO_PHYS(ptb_pg);
    600 		UVMHIST_LOG(pmapxtabhist, "ptb_pa=%#jx",  (uintptr_t)ptb_pa, 0, 0, 0);
    601 		ptb = (pmap_pdetab_t *)PMAP_MAP_PDETABPAGE(ptb_pa);
    602 		UVMHIST_LOG(pmapxtabhist, "new ptb=%#jx", (uintptr_t)ptb, 0,
    603 		    0, 0);
    604 
    605 		if (pte_invalid_pde() != 0) {
    606 			for (size_t i = 0; i < NPDEPG; i++) {
    607 				ptb->pde_pde[i] = pte_invalid_pde();
    608 			}
    609 		}
    610 	}
    611 
    612 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
    613 	pmap_page_attach(pmap, (vaddr_t)ptb, ptb_pg, &pmap->pm_pdetab_list, 0);
    614 
    615 	UVMHIST_LOG(pmapxtabhist, "... ptb %#jx found on freelist %d",
    616 	    (uintptr_t)ptb, found_on_freelist, 0, 0);
    617 
    618 	return ptb;
    619 }
    620 
    621 
    622 #else
    623 /*
    624  *	Create and return a physical map.
    625  *
    626  *	If the size specified for the map
    627  *	is zero, the map is an actual physical
    628  *	map, and may be referenced by the
    629  *	hardware.
    630  *
    631  *	If the size specified is non-zero,
    632  *	the map will be used in software only, and
    633  *	is bounded by that size.
    634  */
    635 static pmap_segtab_t *
    636 pmap_segtab_alloc(struct pmap *pmap)
    637 {
    638 	UVMHIST_FUNC(__func__);
    639 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
    640 
    641 	pmap_segtab_t *stb;
    642 	bool found_on_freelist = false;
    643 
    644  again:
    645 	mutex_spin_enter(&pmap_segtab_lock);
    646 	if (__predict_true((stb = pmap_segtab_info.segalloc.free_segtab) != NULL)) {
    647 		pmap_segtab_info.segalloc.free_segtab = stb->seg_next;
    648 		SEGTAB_ADD(nget, 1);
    649 		stb->seg_next = NULL;
    650 		found_on_freelist = true;
    651 		UVMHIST_LOG(pmapxtabhist, "freelist stb=%#jx",
    652 		    (uintptr_t)stb, 0, 0, 0);
    653 	}
    654 	mutex_spin_exit(&pmap_segtab_lock);
    655 
    656 	struct vm_page *stb_pg = NULL;
    657 	if (__predict_false(stb == NULL)) {
    658 		stb_pg = pmap_pte_pagealloc();
    659 
    660 		if (__predict_false(stb_pg == NULL)) {
    661 			/*
    662 			 * XXX What else can we do?  Could we deadlock here?
    663 			 */
    664 			uvm_wait("segtab");
    665 			goto again;
    666 		}
    667 		SEGTAB_ADD(npage, 1);
    668 		const paddr_t stb_pa = VM_PAGE_TO_PHYS(stb_pg);
    669 
    670 		stb = (pmap_segtab_t *)PMAP_MAP_SEGTABPAGE(stb_pa);
    671 		UVMHIST_LOG(pmapxtabhist, "new stb=%#jx", (uintptr_t)stb, 0,
    672 		    0, 0);
    673 #if 0
    674 CTASSERT(NBPG / sizeof(*stb) == 1);
    675 		const size_t n = NBPG / sizeof(*stb);
    676 		if (n > 1) {
    677 			/*
    678 			 * link all the segtabs in this page together
    679 			 */
    680 			for (size_t i = 1; i < n - 1; i++) {
    681 				stb[i].seg_next = &stb[i + 1];
    682 			}
    683 			/*
    684 			 * Now link the new segtabs into the free segtab list.
    685 			 */
    686 			mutex_spin_enter(&pmap_segtab_lock);
    687 			stb[n - 1].seg_next = pmap_segtab_info.segalloc.free_segtab;
    688 			pmap_segtab_info.segalloc.free_segtab = stb + 1;
    689 			SEGTAB_ADD(nput, n - 1);
    690 			mutex_spin_exit(&pmap_segtab_lock);
    691 		}
    692 #endif
    693 	}
    694 
    695 	UVMHIST_LOG(pmapxtabhist, "about to attach",  0, 0, 0, 0);
    696 	pmap_page_attach(pmap, (vaddr_t)stb, stb_pg, &pmap->pm_segtab_list, 0);
    697 
    698 	pmap_check_stb(stb, __func__,
    699 	    found_on_freelist ? "from free list" : "allocated");
    700 
    701 	UVMHIST_LOG(pmapxtabhist, "... stb %#jx found on freelist %zu",
    702 	    (uintptr_t)stb, found_on_freelist, 0, 0);
    703 
    704 	return stb;
    705 }
    706 #endif
    707 
    708 #if defined(PMAP_HWPAGEWALKER)
    709 static void
    710 pmap_pdetab_free(pmap_pdetab_t *ptb)
    711 {
    712 	UVMHIST_FUNC(__func__);
    713 	UVMHIST_CALLARGS(pmaphist, "ptb %#jx", (uintptr_t)ptb, 0, 0, 0);
    714 	/*
    715 	 * Insert the pdetab into the pdetab freelist.
    716 	 */
    717 	mutex_spin_enter(&pmap_segtab_lock);
    718 	ptb->pde_next = pmap_segtab_info.pdealloc.free_pdetab;
    719 	pmap_segtab_info.pdealloc.free_pdetab = ptb;
    720 	PDETAB_ADD(nput, 1);
    721 	mutex_spin_exit(&pmap_segtab_lock);
    722 
    723 }
    724 #endif
    725 
    726 
    727 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    728 /*
    729  * Insert the segtab into the segtab freelist.
    730  */
    731 static void
    732 pmap_segtab_free(pmap_segtab_t *stb)
    733 {
    734 	UVMHIST_FUNC(__func__);
    735 	UVMHIST_CALLARGS(pmaphist, "stb %#jx", (uintptr_t)stb, 0, 0, 0);
    736 
    737 	/*
    738 	 * Insert the segtab into the segtab freelist.
    739 	 */
    740 	mutex_spin_enter(&pmap_segtab_lock);
    741 	stb->seg_next = pmap_segtab_info.segalloc.free_segtab;
    742 	pmap_segtab_info.segalloc.free_segtab = stb;
    743 	SEGTAB_ADD(nput, 1);
    744 	mutex_spin_exit(&pmap_segtab_lock);
    745 }
    746 #endif
    747 
    748 #if defined(PMAP_HWPAGEWALKER)
    749 static void
    750 pmap_pdetab_release(pmap_t pmap, pmap_pdetab_t **ptb_p, bool free_ptb,
    751     vaddr_t va, vsize_t vinc)
    752 {
    753 	const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
    754 	pmap_pdetab_t *ptb = *ptb_p;
    755 
    756 	UVMHIST_FUNC(__func__);
    757 	UVMHIST_CALLARGS(pmapxtabhist, "pm %#jx ptb_p %#jx ptb %#jx free %jd",
    758 	    (uintptr_t)pmap, (uintptr_t)ptb_p, (uintptr_t)ptb, free_ptb);
    759 	UVMHIST_LOG(pmapxtabhist, " va=%#jx vinc=%#jx",
    760 	    (uintptr_t)va, (uintptr_t)vinc, 0, 0);
    761 
    762 	for (size_t i = (va / vinc) & pdetab_mask;
    763 	    i < PMAP_PDETABSIZE;
    764 	    i++, va += vinc) {
    765 #ifdef _LP64
    766 		if (vinc > NBSEG) {
    767 			if (pte_pde_valid_p(ptb->pde_pde[i])) {
    768 				pmap_pdetab_t *nptb =
    769 				    pmap_pde_to_pdetab(ptb->pde_pde[i]);
    770 				UVMHIST_LOG(pmapxtabhist,
    771 				    " va %#jx ptp->pde_pde[%jd] (*%#jx) = %#jx "
    772 				    "recursing", va, i, &ptb->pde_pde[i],
    773 				    ptb->pde_pde[i]);
    774 				pmap_pdetab_release(pmap, &nptb, true,
    775 				    va, vinc / NPDEPG);
    776 				ptb->pde_pde[i] = pte_invalid_pde();
    777 				KASSERT(nptb == NULL);
    778 			}
    779 			continue;
    780 		}
    781 #endif
    782 		KASSERT(vinc == NBSEG);
    783 
    784 		/* get pointer to PT page */
    785 		pmap_ptpage_t *ppg = pmap_pde_to_ptpage(ptb->pde_pde[i]);
    786 		UVMHIST_LOG(pmapxtabhist,
    787 		    "   va %#jx ptb->pde_pde[%jd] (*%#jx) = %#jx", va, i,
    788 		    (uintptr_t)&ptb->pde_pde[i], ptb->pde_pde[i]);
    789 		if (ppg == NULL)
    790 			continue;
    791 
    792 		UVMHIST_LOG(pmapxtabhist, " zeroing tab (%#jx)[%jd] (%#jx)",
    793 		    (uintptr_t)ptb->pde_pde, i, (uintptr_t)&ptb->pde_pde[i], 0);
    794 
    795 		ptb->pde_pde[i] = pte_invalid_pde();
    796 
    797 		pmap_ptpage_free(pmap, ppg, __func__);
    798 	}
    799 
    800 	if (free_ptb) {
    801 		UVMHIST_LOG(pmapxtabhist, " ptbp %#jx ptb %#jx",
    802 		    (uintptr_t)ptb_p, (uintptr_t)ptb, 0, 0);
    803 		const vaddr_t kva = (vaddr_t)ptb;
    804 		UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
    805 		pmap_page_detach(pmap, &pmap->pm_pdetab_list, kva);
    806 		pmap_pdetab_free(ptb);
    807 		*ptb_p = NULL;
    808 	}
    809 }
    810 #endif
    811 
    812 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    813 static void
    814 pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stb_p, bool free_stb,
    815     pte_callback_t callback, uintptr_t flags, vaddr_t va, vsize_t vinc)
    816 {
    817 	pmap_segtab_t *stb = *stb_p;
    818 
    819 	UVMHIST_FUNC(__func__);
    820 	UVMHIST_CALLARGS(pmapxtabhist, "pm=%#jx stb_p=%#jx free=%jd",
    821 	    (uintptr_t)pmap, (uintptr_t)stb, free_stb, 0);
    822 	UVMHIST_LOG(pmapxtabhist, " callback=%#jx flags=%#jx va=%#jx vinc=%#jx",
    823 	    (uintptr_t)callback, flags, (uintptr_t)va, (uintptr_t)vinc);
    824 
    825 	for (size_t i = (va / vinc) & (PMAP_SEGTABSIZE - 1);
    826 	    i < PMAP_SEGTABSIZE;
    827 	    i++, va += vinc) {
    828 #ifdef _LP64
    829 		if (vinc > NBSEG) {
    830 			if (stb->seg_seg[i] != NULL) {
    831 				UVMHIST_LOG(pmapxtabhist,
    832 				    " recursing %jd", i, 0, 0, 0);
    833 				pmap_segtab_release(pmap, &stb->seg_seg[i],
    834 				    true, callback, flags, va, vinc / NSEGPG);
    835 				KASSERT(stb->seg_seg[i] == NULL);
    836 			}
    837 			continue;
    838 		}
    839 #endif
    840 		KASSERT(vinc == NBSEG);
    841 
    842 		/* get pointer to segment map */
    843 		pmap_ptpage_t *ppg = stb->seg_ppg[i];
    844 		if (ppg == NULL)
    845 			continue;
    846 
    847 		/*
    848 		 * If our caller wants a callback, do so.
    849 		 */
    850 		if (callback != NULL) {
    851 			(*callback)(pmap, va, va + vinc, ppg->ppg_ptes, flags);
    852 		}
    853 		pmap_ptpage_free(pmap, ppg, __func__);
    854 		stb->seg_ppg[i] = NULL;
    855 		UVMHIST_LOG(pmapxtabhist, " zeroing tab[%jd]", i, 0, 0, 0);
    856 	}
    857 
    858 	if (free_stb) {
    859 		pmap_check_stb(stb, __func__,
    860 		    vinc == NBSEG ? "release seg" : "release xseg");
    861 
    862 		const vaddr_t kva = (vaddr_t)stb;
    863 		UVMHIST_LOG(pmapxtabhist, "about to detach",  0, 0, 0, 0);
    864 		pmap_page_detach(pmap, &pmap->pm_segtab_list, kva);
    865 		pmap_segtab_free(stb);
    866 		*stb_p = NULL;
    867 	}
    868 }
    869 #endif
    870 
    871 
    872 
    873 /*
    874  * Allocate the top segment table for the pmap.
    875  */
    876 void
    877 pmap_segtab_init(pmap_t pmap)
    878 {
    879 	UVMHIST_FUNC(__func__);
    880 	UVMHIST_CALLARGS(pmaphist, "pm %#jx", (uintptr_t)pmap, 0, 0, 0);
    881 
    882 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    883 	/*
    884 	 * Constantly converting from extracted PA to VA is somewhat expensive
    885 	 * for systems with hardware page walkers and without an inexpensive
    886 	 * way to access arbitrary virtual addresses, so we allocate an extra
    887 	 * root segtab so that it can contain non-virtual addresses.
    888 	 */
    889 	pmap->pm_segtab = pmap_segtab_alloc(pmap);
    890 #endif
    891 #if defined(PMAP_HWPAGEWALKER)
    892 	pmap->pm_pdetab = pmap_pdetab_alloc(pmap);
    893 	pmap_md_pdetab_init(pmap);
    894 #endif
    895 }
    896 
    897 /*
    898  *	Retire the given physical map from service.
    899  *	Should only be called if the map contains
    900  *	no valid mappings.
    901  */
    902 void
    903 pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags)
    904 {
    905 	KASSERT(pmap != pmap_kernel());
    906 #ifdef _LP64
    907 	const vsize_t vinc = NBXSEG;
    908 #else
    909 	const vsize_t vinc = NBSEG;
    910 #endif
    911 
    912 #if defined(PMAP_HWPAGEWALKER)
    913 	if (pmap->pm_pdetab != NULL) {
    914 		pmap_pdetab_release(pmap, &pmap->pm_pdetab,
    915 		    true, pmap->pm_minaddr, vinc);
    916 	}
    917 #endif
    918 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    919 	if (pmap->pm_segtab != NULL) {
    920 		pmap_segtab_release(pmap, &pmap->pm_segtab,
    921 		    func == NULL, func, flags, pmap->pm_minaddr, vinc);
    922 	}
    923 #endif
    924 
    925 #if defined(PMAP_HWPAGEWALKER)
    926 #if !defined(PMAP_MAP_PDETABPAGE)
    927 	KASSERT((pmap->pm_segtab == NULL) == (pmap->pm_pdetab == NULL));
    928 #endif
    929 	KASSERT(pmap->pm_pdetab == NULL);
    930 #endif
    931 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    932 	KASSERT(pmap->pm_segtab == NULL);
    933 #endif
    934 
    935 }
    936 
    937 /*
    938  *	Make a new pmap (vmspace) active for the given process.
    939  */
    940 void
    941 pmap_segtab_activate(struct pmap *pm, struct lwp *l)
    942 {
    943 	if (l == curlwp) {
    944 		KASSERT(pm == l->l_proc->p_vmspace->vm_map.pmap);
    945 		pmap_md_xtab_activate(pm, l);
    946 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    947 		struct cpu_info * const ci = l->l_cpu;
    948 		if (pm == pmap_kernel()) {
    949 			ci->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    950 #ifdef _LP64
    951 			ci->ci_pmap_user_seg0tab = PMAP_INVALID_SEGTAB_ADDRESS;
    952 #endif
    953 		} else {
    954 			ci->ci_pmap_user_segtab = pm->pm_segtab;
    955 #ifdef _LP64
    956 			ci->ci_pmap_user_seg0tab = pm->pm_segtab->seg_seg[0];
    957 #endif
    958 		}
    959 #endif
    960 	}
    961 }
    962 
    963 void
    964 pmap_segtab_deactivate(pmap_t pm)
    965 {
    966 	pmap_md_xtab_deactivate(pm);
    967 
    968 #if !defined(PMAP_HWPAGEWALKER) || !defined(PMAP_MAP_PDETABPAGE)
    969 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
    970 #ifdef _LP64
    971 	curcpu()->ci_pmap_user_seg0tab = NULL;
    972 #endif
    973 #endif
    974 }
    975 
    976 /*
    977  *	Act on the given range of addresses from the specified map.
    978  *
    979  *	It is assumed that the start and end are properly rounded to
    980  *	the page size.
    981  */
    982 void
    983 pmap_pte_process(pmap_t pmap, vaddr_t sva, vaddr_t eva,
    984     pte_callback_t callback, uintptr_t flags)
    985 {
    986 #if 0
    987 	printf("%s: %p, %"PRIxVADDR", %"PRIxVADDR", %p, %"PRIxPTR"\n",
    988 	    __func__, pmap, sva, eva, callback, flags);
    989 #endif
    990 	while (sva < eva) {
    991 		vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG;
    992 		if (lastseg_va == 0 || lastseg_va > eva)
    993 			lastseg_va = eva;
    994 
    995 		/*
    996 		 * If VA belongs to an unallocated segment,
    997 		 * skip to the next segment boundary.
    998 		 */
    999 		pt_entry_t * const ptep = pmap_pte_lookup(pmap, sva);
   1000 		if (ptep != NULL) {
   1001 			/*
   1002 			 * Callback to deal with the ptes for this segment.
   1003 			 */
   1004 			(*callback)(pmap, sva, lastseg_va, ptep, flags);
   1005 		}
   1006 		/*
   1007 		 * In theory we could release pages with no entries,
   1008 		 * but that takes more effort than we want here.
   1009 		 */
   1010 		sva = lastseg_va;
   1011 	}
   1012 }
   1013 
   1014 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
   1015 static pd_entry_t *
   1016 pmap_pdetab_reserve(struct pmap *pmap, vaddr_t va)
   1017 #elif defined(PMAP_HWPAGEWALKER)
   1018 static pmap_ptpage_t **
   1019 pmap_segtab_reserve(struct pmap *pmap, vaddr_t va, pd_entry_t **pde_p)
   1020 #else
   1021 static pmap_ptpage_t **
   1022 pmap_segtab_reserve(struct pmap *pmap, vaddr_t va)
   1023 #endif
   1024 {
   1025 	UVMHIST_FUNC(__func__);
   1026 	UVMHIST_CALLARGS(pmaphist, "pm %#jx va %#jx", (uintptr_t)pmap,
   1027 	    (uintptr_t)va, 0, 0);
   1028 
   1029 #if defined(PMAP_HWPAGEWALKER)
   1030 	pmap_pdetab_t *ptb = pmap->pm_pdetab;
   1031 	UVMHIST_LOG(pmaphist, "pm_pdetab %#jx", (uintptr_t)ptb, 0, 0, 0);
   1032 #endif
   1033 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
   1034 	vaddr_t segtab_mask = PMAP_PDETABSIZE - 1;
   1035 #ifdef _LP64
   1036 	for (size_t segshift = XSEGSHIFT;
   1037 	    segshift > SEGSHIFT;
   1038 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
   1039 		pd_entry_t * const pde_p =
   1040 		    &ptb->pde_pde[(va >> segshift) & segtab_mask];
   1041 		pd_entry_t opde = *pde_p;
   1042 
   1043 		UVMHIST_LOG(pmaphist,
   1044 		    "ptb %#jx segshift %jd pde_p %#jx opde %#jx",
   1045 		    ptb, segshift, pde_p, opde);
   1046 
   1047 		if (__predict_false(!pte_pde_valid_p(opde))) {
   1048 			ptb = pmap_pdetab_alloc(pmap);
   1049 			pd_entry_t npde = pte_pde_pdetab(
   1050 			    pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)ptb),
   1051 			    pmap == pmap_kernel());
   1052 			opde = pte_pde_cas(pde_p, opde, npde);
   1053 			if (__predict_false(pte_pde_valid_p(opde))) {
   1054 				const vaddr_t kva = (vaddr_t)ptb;
   1055 				UVMHIST_LOG(pmapxtabhist, "about to detach",
   1056 				    0, 0, 0, 0);
   1057 				pmap_page_detach(pmap, &pmap->pm_pdetab_list,
   1058 				    kva);
   1059 				pmap_pdetab_free(ptb);
   1060 			} else {
   1061 				opde = npde;
   1062 			}
   1063 		}
   1064 		ptb = pmap_pde_to_pdetab(opde);
   1065 		UVMHIST_LOG(pmaphist, "opde %#jx ptb %#jx", opde, ptb, 0, 0);
   1066 	}
   1067 #elif defined(XSEGSHIFT)
   1068 	size_t segshift = XSEGSHIFT;
   1069 
   1070 	pd_entry_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
   1071 	KASSERT(pte_pde_valid_p(opde));
   1072 	ptb = pmap_pde_to_pdetab(opde);
   1073 	segtab_mask = NSEGPG - 1;
   1074 #endif /* _LP64 */
   1075 	const size_t idx = (va >> SEGSHIFT) & segtab_mask;
   1076 
   1077 	UVMHIST_LOG(pmaphist, "... returning %#jx (idx %jd)", (uintptr_t)&ptb->pde_pde[idx], idx, 0, 0);
   1078 
   1079 	return &ptb->pde_pde[idx];
   1080 #else /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
   1081 	pmap_segtab_t *stb = pmap->pm_segtab;
   1082 	vaddr_t segtab_mask = PMAP_SEGTABSIZE - 1;
   1083 #ifdef _LP64
   1084 	for (size_t segshift = XSEGSHIFT;
   1085 	    segshift > SEGSHIFT;
   1086 	    segshift -= PGSHIFT - 3, segtab_mask = NSEGPG - 1) {
   1087 		size_t idx = (va >> segshift) & segtab_mask;
   1088 		pmap_segtab_t ** const stb_p = &stb->seg_seg[idx];
   1089 #if defined(PMAP_HWPAGEWALKER)
   1090 		pmap_pdetab_t ** const ptb_p = &ptb->pde_pde[idx];
   1091 #endif	/* PMAP_HWPAGEWALKER */
   1092 		if (__predict_false((stb = *stb_p) == NULL)) {
   1093 			stb = pmap_segtab_alloc(pmap);
   1094 #ifdef MULTIPROCESSOR
   1095 			pmap_segtab_t *ostb = atomic_cas_ptr(stb_p, NULL, stb);
   1096 			if (__predict_false(ostb != NULL)) {
   1097 				const vaddr_t kva = (vaddr_t)stb;
   1098 				UVMHIST_LOG(pmapxtabhist, "about to detach",
   1099 				    0, 0, 0, 0);
   1100 				pmap_page_detach(pmap, &pmap->pm_segtab_list,
   1101 				    kva);
   1102 				pmap_segtab_free(stb);
   1103 				stb = ostb;
   1104 			}
   1105 #else
   1106 			*stb_p = stb;
   1107 #endif /* MULTIPROCESSOR */
   1108 		}
   1109 	}
   1110 #elif defined(PMAP_HWPAGEWALKER)
   1111 	pmap_segtab_t opde = ptb->pde_pde[(va >> segshift) & segtab_mask];
   1112 	KASSERT(pte_pde_valid_p(opde));
   1113 	ptb = pmap_pde_to_pdetab(opde);
   1114 	segtab_mask = NSEGPG - 1;
   1115 
   1116 #endif /* _LP64 */
   1117 	size_t idx = (va >> SEGSHIFT) & segtab_mask;
   1118 #if defined(PMAP_HWPAGEWALKER)
   1119 #if defined(XSEGSHIFT) && (XSEGSHIFT != SEGSHIFT)
   1120 	*pte_p = &pmap->pm_segtab
   1121 #else /* XSEGSHIFT */
   1122 	*pde_p = &ptb->pde_pde[idx];
   1123 #endif /* XSEGSHIFT */
   1124 #endif /* PMAP_HWPAGEWALKER */
   1125 	return &stb->seg_ppg[idx];
   1126 #endif
   1127 }
   1128 
   1129 
   1130 /*
   1131  *	Return a pointer for the pte that corresponds to the specified virtual
   1132  *	address (va) in the target physical map, allocating if needed.
   1133  */
   1134 pt_entry_t *
   1135 pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags)
   1136 {
   1137 	UVMHIST_FUNC(__func__);
   1138 	UVMHIST_CALLARGS(pmaphist, "pm=%#jx va=%#jx flags=%#jx",
   1139 	    (uintptr_t)pmap, (uintptr_t)va, flags, 0);
   1140 	pmap_ptpage_t *ppg;
   1141 	paddr_t pa = 0;
   1142 
   1143 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
   1144 	pd_entry_t * const pde_p = pmap_pdetab_reserve(pmap, va);
   1145 	ppg = pmap_pde_to_ptpage(*pde_p);
   1146 #elif defined(PMAP_HWPAGEWALKER)
   1147 	pd_entry_t *pde_p;
   1148 	pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va, &pde_p);
   1149 	ppg = *ppg_p;
   1150 #else
   1151 	pmap_ptpage_t ** const ppg_p = pmap_segtab_reserve(pmap, va);
   1152 	ppg = *ppg_p;
   1153 #endif
   1154 
   1155 	if (__predict_false(ppg == NULL)) {
   1156 		ppg = pmap_ptpage_alloc(pmap, flags, &pa);
   1157 		if (__predict_false(ppg == NULL))
   1158 			return NULL;
   1159 
   1160 #if defined(PMAP_HWPAGEWALKER)
   1161 		pd_entry_t npde = pte_pde_ptpage(pa, pmap == pmap_kernel());
   1162 #endif
   1163 #if defined(PMAP_HWPAGEWALKER) && defined(PMAP_MAP_PDETABPAGE)
   1164 		pd_entry_t opde = *pde_p;
   1165 		opde = pte_pde_cas(pde_p, opde, npde);
   1166 		if (__predict_false(pte_pde_valid_p(opde))) {
   1167 			pmap_ptpage_free(pmap, ppg, __func__);
   1168 			ppg = pmap_pde_to_ptpage(opde);
   1169 		}
   1170 #else
   1171 #ifdef MULTIPROCESSOR
   1172 		pmap_ptpage_t *oppg = atomic_cas_ptr(ppg_p, NULL, ppg);
   1173 		/*
   1174 		 * If another thread allocated the segtab needed for this va
   1175 		 * free the page we just allocated.
   1176 		 */
   1177 		if (__predict_false(oppg != NULL)) {
   1178 			pmap_ptpage_free(pmap, ppg, __func__);
   1179 			ppg = oppg;
   1180 #if defined(PMAP_HWPAGEWALKER)
   1181 		} else {
   1182 			pte_pde_set(pde_p, npde);
   1183 #endif
   1184 		}
   1185 #else /* !MULTIPROCESSOR */
   1186 		*ppg_p = ppg;
   1187 #endif /* MULTIPROCESSOR */
   1188 #endif /* PMAP_HWPAGEWALKER && PMAP_MAP_PDETABPAGE */
   1189 	}
   1190 
   1191 	const size_t pte_idx = (va >> PGSHIFT) & (NPTEPG - 1);
   1192 
   1193 	return ppg->ppg_ptes + pte_idx;
   1194 }
   1195