Home | History | Annotate | Line # | Download | only in oea
pmap.c revision 1.1
      1 /*	$NetBSD: pmap.c,v 1.1 2003/02/03 17:10:10 matt Exp $	*/
      2 /*-
      3  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  * All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by TooLs GmbH.
     54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  *    derived from this software without specific prior written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  */
     68 
     69 #include "opt_altivec.h"
     70 #include "opt_pmap.h"
     71 #include <sys/param.h>
     72 #include <sys/malloc.h>
     73 #include <sys/proc.h>
     74 #include <sys/user.h>
     75 #include <sys/pool.h>
     76 #include <sys/queue.h>
     77 #include <sys/device.h>		/* for evcnt */
     78 #include <sys/systm.h>
     79 
     80 #if __NetBSD_Version__ < 105010000
     81 #include <vm/vm.h>
     82 #include <vm/vm_kern.h>
     83 #define	splvm()		splimp()
     84 #endif
     85 
     86 #include <uvm/uvm.h>
     87 
     88 #include <machine/pcb.h>
     89 #include <machine/powerpc.h>
     90 #include <powerpc/spr.h>
     91 #include <powerpc/oea/sr_601.h>
     92 #if __NetBSD_Version__ > 105010000
     93 #include <powerpc/oea/bat.h>
     94 #else
     95 #include <powerpc/bat.h>
     96 #endif
     97 
     98 #if defined(DEBUG) || defined(PMAPCHECK)
     99 #define	STATIC
    100 #else
    101 #define	STATIC	static
    102 #endif
    103 
    104 struct pteg {
    105 	pte_t pt[8];
    106 };
    107 typedef struct pteg pteg_t;
    108 
    109 #ifdef ALTIVEC
    110 int pmap_use_altivec;
    111 #endif
    112 
    113 volatile pteg_t *pmap_pteg_table;
    114 unsigned int pmap_pteg_cnt;
    115 unsigned int pmap_pteg_mask;
    116 paddr_t pmap_memlimit = -NBPG;		/* there is no limit */
    117 
    118 struct pmap kernel_pmap_;
    119 unsigned int pmap_pages_stolen;
    120 u_long pmap_pte_valid;
    121 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    122 u_long pmap_pvo_enter_depth;
    123 u_long pmap_pvo_remove_depth;
    124 #endif
    125 
    126 int physmem;
    127 #ifndef MSGBUFADDR
    128 extern paddr_t msgbuf_paddr;
    129 #endif
    130 
    131 static struct mem_region *mem, *avail;
    132 static u_int mem_cnt, avail_cnt;
    133 
    134 extern sr_t iosrtable[16];
    135 
    136 #ifdef __HAVE_PMAP_PHYSSEG
    137 /*
    138  * This is a cache of referenced/modified bits.
    139  * Bits herein are shifted by ATTRSHFT.
    140  */
    141 #define	ATTR_SHFT	4
    142 struct pmap_physseg pmap_physseg;
    143 #endif
    144 
    145 /*
    146  * The following structure is exactly 32 bytes long (one cacheline).
    147  */
    148 struct pvo_entry {
    149 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
    150 	TAILQ_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
    151 	struct pte pvo_pte;			/* Prebuilt PTE */
    152 	pmap_t pvo_pmap;			/* ptr to owning pmap */
    153 	vaddr_t pvo_vaddr;			/* VA of entry */
    154 #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
    155 #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
    156 #define	PVO_WIRED		0x0010		/* PVO entry is wired */
    157 #define	PVO_MANAGED		0x0020		/* PVO e. for managed page */
    158 #define	PVO_EXECUTABLE		0x0040		/* PVO e. for executable page */
    159 };
    160 #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
    161 #define	PVO_ISEXECUTABLE(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
    162 #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
    163 #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
    164 #define	PVO_PTEGIDX_CLR(pvo)	\
    165 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
    166 #define	PVO_PTEGIDX_SET(pvo,i)	\
    167 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
    168 
    169 TAILQ_HEAD(pvo_tqhead, pvo_entry);
    170 struct pvo_tqhead *pmap_pvo_table;	/* pvo entries by ptegroup index */
    171 struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
    172 struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
    173 
    174 struct pool pmap_pool;		/* pool for pmap structures */
    175 struct pool pmap_upvo_pool;	/* pool for pvo entries for unmanaged pages */
    176 struct pool pmap_mpvo_pool;	/* pool for pvo entries for managed pages */
    177 
    178 /*
    179  * We keep a cache of unmanaged pages to be used for pvo entries for
    180  * unmanaged pages.
    181  */
    182 struct pvo_page {
    183 	SIMPLEQ_ENTRY(pvo_page) pvop_link;
    184 };
    185 SIMPLEQ_HEAD(pvop_head, pvo_page);
    186 struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
    187 struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
    188 u_long pmap_upvop_free;
    189 u_long pmap_upvop_maxfree;
    190 u_long pmap_mpvop_free;
    191 u_long pmap_mpvop_maxfree;
    192 
    193 STATIC void *pmap_pool_ualloc(struct pool *, int);
    194 STATIC void *pmap_pool_malloc(struct pool *, int);
    195 
    196 STATIC void pmap_pool_ufree(struct pool *, void *);
    197 STATIC void pmap_pool_mfree(struct pool *, void *);
    198 
    199 static struct pool_allocator pmap_pool_mallocator = {
    200 	pmap_pool_malloc, pmap_pool_mfree, 0,
    201 };
    202 
    203 static struct pool_allocator pmap_pool_uallocator = {
    204 	pmap_pool_ualloc, pmap_pool_ufree, 0,
    205 };
    206 
    207 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    208 void pmap_pte_print(volatile pte_t *);
    209 #endif
    210 
    211 #ifdef DDB
    212 void pmap_pteg_check(void);
    213 void pmap_pteg_dist(void);
    214 void pmap_print_pte(pmap_t, vaddr_t);
    215 void pmap_print_mmuregs(void);
    216 #endif
    217 
    218 #if defined(DEBUG) || defined(PMAPCHECK)
    219 #ifdef PMAPCHECK
    220 int pmapcheck = 1;
    221 #else
    222 int pmapcheck = 0;
    223 #endif
    224 void pmap_pvo_verify(void);
    225 STATIC void pmap_pvo_check(const struct pvo_entry *);
    226 #define	PMAP_PVO_CHECK(pvo)	 		\
    227 	do {					\
    228 		if (pmapcheck)			\
    229 			pmap_pvo_check(pvo);	\
    230 	} while (0)
    231 #else
    232 #define	PMAP_PVO_CHECK(pvo)	do { } while (/*CONSTCOND*/0)
    233 #endif
    234 STATIC int pmap_pte_insert(int, pte_t *);
    235 STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
    236 	vaddr_t, paddr_t, u_int, int);
    237 STATIC void pmap_pvo_remove(struct pvo_entry *, int);
    238 STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
    239 STATIC volatile pte_t *pmap_pvo_to_pte(const struct pvo_entry *, int);
    240 
    241 STATIC void tlbia(void);
    242 
    243 STATIC void pmap_release(pmap_t);
    244 STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
    245 
    246 #define	VSID_NBPW	(sizeof(uint32_t) * 8)
    247 static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
    248 
    249 static int pmap_initialized;
    250 
    251 #if defined(DEBUG) || defined(PMAPDEBUG)
    252 #define	PMAPDEBUG_BOOT		0x0001
    253 #define	PMAPDEBUG_PTE		0x0002
    254 #define	PMAPDEBUG_EXEC		0x0008
    255 #define	PMAPDEBUG_PVOENTER	0x0010
    256 #define	PMAPDEBUG_PVOREMOVE	0x0020
    257 #define	PMAPDEBUG_ACTIVATE	0x0100
    258 #define	PMAPDEBUG_CREATE	0x0200
    259 #define	PMAPDEBUG_ENTER		0x1000
    260 #define	PMAPDEBUG_KENTER	0x2000
    261 #define	PMAPDEBUG_KREMOVE	0x4000
    262 #define	PMAPDEBUG_REMOVE	0x8000
    263 unsigned int pmapdebug = 0;
    264 # define DPRINTF(x)		printf x
    265 # define DPRINTFN(n, x)		if (pmapdebug & PMAPDEBUG_ ## n) printf x
    266 #else
    267 # define DPRINTF(x)
    268 # define DPRINTFN(n, x)
    269 #endif
    270 
    271 
    272 #ifdef PMAPCOUNTERS
    273 #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
    274 #define	PMAPCOUNT2(ev)	((ev).ev_count++)
    275 
    276 struct evcnt pmap_evcnt_mappings =
    277     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    278 	    "pmap", "pages mapped");
    279 struct evcnt pmap_evcnt_unmappings =
    280     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    281 	    "pmap", "pages unmapped");
    282 
    283 struct evcnt pmap_evcnt_kernel_mappings =
    284     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    285 	    "pmap", "kernel pages mapped");
    286 struct evcnt pmap_evcnt_kernel_unmappings =
    287     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
    288 	    "pmap", "kernel pages unmapped");
    289 
    290 struct evcnt pmap_evcnt_mappings_replaced =
    291     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    292 	    "pmap", "page mappings replaced");
    293 
    294 struct evcnt pmap_evcnt_exec_mappings =
    295     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    296 	    "pmap", "exec pages mapped");
    297 struct evcnt pmap_evcnt_exec_cached =
    298     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    299 	    "pmap", "exec pages cached");
    300 
    301 struct evcnt pmap_evcnt_exec_synced =
    302     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    303 	    "pmap", "exec pages synced");
    304 struct evcnt pmap_evcnt_exec_synced_clear_modify =
    305     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    306 	    "pmap", "exec pages synced (CM)");
    307 
    308 struct evcnt pmap_evcnt_exec_uncached_page_protect =
    309     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    310 	    "pmap", "exec pages uncached (PP)");
    311 struct evcnt pmap_evcnt_exec_uncached_clear_modify =
    312     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    313 	    "pmap", "exec pages uncached (CM)");
    314 struct evcnt pmap_evcnt_exec_uncached_zero_page =
    315     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    316 	    "pmap", "exec pages uncached (ZP)");
    317 struct evcnt pmap_evcnt_exec_uncached_copy_page =
    318     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    319 	    "pmap", "exec pages uncached (CP)");
    320 
    321 struct evcnt pmap_evcnt_updates =
    322     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    323 	    "pmap", "updates");
    324 struct evcnt pmap_evcnt_collects =
    325     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    326 	    "pmap", "collects");
    327 struct evcnt pmap_evcnt_copies =
    328     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    329 	    "pmap", "copies");
    330 
    331 struct evcnt pmap_evcnt_ptes_spilled =
    332     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    333 	    "pmap", "ptes spilled from overflow");
    334 struct evcnt pmap_evcnt_ptes_unspilled =
    335     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    336 	    "pmap", "ptes not spilled");
    337 struct evcnt pmap_evcnt_ptes_evicted =
    338     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    339 	    "pmap", "ptes evicted");
    340 
    341 struct evcnt pmap_evcnt_ptes_primary[8] = {
    342     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    343 	    "pmap", "ptes added at primary[0]"),
    344     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    345 	    "pmap", "ptes added at primary[1]"),
    346     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    347 	    "pmap", "ptes added at primary[2]"),
    348     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    349 	    "pmap", "ptes added at primary[3]"),
    350 
    351     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    352 	    "pmap", "ptes added at primary[4]"),
    353     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    354 	    "pmap", "ptes added at primary[5]"),
    355     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    356 	    "pmap", "ptes added at primary[6]"),
    357     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    358 	    "pmap", "ptes added at primary[7]"),
    359 };
    360 struct evcnt pmap_evcnt_ptes_secondary[8] = {
    361     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    362 	    "pmap", "ptes added at secondary[0]"),
    363     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    364 	    "pmap", "ptes added at secondary[1]"),
    365     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    366 	    "pmap", "ptes added at secondary[2]"),
    367     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    368 	    "pmap", "ptes added at secondary[3]"),
    369 
    370     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    371 	    "pmap", "ptes added at secondary[4]"),
    372     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    373 	    "pmap", "ptes added at secondary[5]"),
    374     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    375 	    "pmap", "ptes added at secondary[6]"),
    376     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    377 	    "pmap", "ptes added at secondary[7]"),
    378 };
    379 struct evcnt pmap_evcnt_ptes_removed =
    380     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    381 	    "pmap", "ptes removed");
    382 struct evcnt pmap_evcnt_ptes_changed =
    383     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    384 	    "pmap", "ptes changed");
    385 
    386 /*
    387  * From pmap_subr.c
    388  */
    389 extern struct evcnt pmap_evcnt_zeroed_pages;
    390 extern struct evcnt pmap_evcnt_copied_pages;
    391 extern struct evcnt pmap_evcnt_idlezeroed_pages;
    392 #else
    393 #define	PMAPCOUNT(ev)	((void) 0)
    394 #define	PMAPCOUNT2(ev)	((void) 0)
    395 #endif
    396 
    397 #define	TLBIE(va)	__asm __volatile("tlbie %0" :: "r"(va))
    398 #define	TLBSYNC()	__asm __volatile("tlbsync")
    399 #define	SYNC()		__asm __volatile("sync")
    400 #define	EIEIO()		__asm __volatile("eieio")
    401 #define	MFMSR()		mfmsr()
    402 #define	MTMSR(psl)	mtmsr(psl)
    403 #define	MFPVR()		mfpvr()
    404 #define	MFSRIN(va)	mfsrin(va)
    405 #define	MFTB()		mfrtcltbl()
    406 
    407 static __inline sr_t
    408 mfsrin(vaddr_t va)
    409 {
    410 	sr_t sr;
    411 	__asm __volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
    412 	return sr;
    413 }
    414 
    415 static __inline u_int32_t
    416 pmap_interrupts_off(void)
    417 {
    418 	u_int32_t msr = MFMSR();
    419 	if (msr & PSL_EE)
    420 		MTMSR(msr & ~PSL_EE);
    421 	return msr;
    422 }
    423 
    424 static void
    425 pmap_interrupts_restore(u_int32_t msr)
    426 {
    427 	if (msr & PSL_EE)
    428 		MTMSR(msr);
    429 }
    430 
    431 static __inline u_int32_t
    432 mfrtcltbl(void)
    433 {
    434 
    435 	if ((MFPVR() >> 16) == MPC601)
    436 		return (mfrtcl() >> 7);
    437 	else
    438 		return (mftbl());
    439 }
    440 
    441 /*
    442  * These small routines may have to be replaced,
    443  * if/when we support processors other that the 604.
    444  */
    445 
    446 void
    447 tlbia(void)
    448 {
    449 	caddr_t i;
    450 
    451 	SYNC();
    452 	/*
    453 	 * Why not use "tlbia"?  Because not all processors implement it.
    454 	 *
    455 	 * This needs to be a per-cpu callback to do the appropriate thing
    456 	 * for the CPU. XXX
    457 	 */
    458 	for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) {
    459 		TLBIE(i);
    460 		EIEIO();
    461 		SYNC();
    462 	}
    463 	TLBSYNC();
    464 	SYNC();
    465 }
    466 
    467 static __inline int
    468 va_to_sr(sr_t *sr, vaddr_t va)
    469 {
    470 	return sr[(uintptr_t)va >> ADDR_SR_SHFT];
    471 }
    472 
    473 static __inline int
    474 va_to_pteg(sr_t sr, vaddr_t addr)
    475 {
    476 	int hash;
    477 
    478 	hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
    479 	return hash & pmap_pteg_mask;
    480 }
    481 
    482 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    483 /*
    484  * Given a PTE in the page table, calculate the VADDR that hashes to it.
    485  * The only bit of magic is that the top 4 bits of the address doesn't
    486  * technically exist in the PTE.  But we know we reserved 4 bits of the
    487  * VSID for it so that's how we get it.
    488  */
    489 static vaddr_t
    490 pmap_pte_to_va(volatile const pte_t *pt)
    491 {
    492 	vaddr_t va;
    493 	uintptr_t ptaddr = (uintptr_t) pt;
    494 
    495 	if (pt->pte_hi & PTE_HID)
    496 		ptaddr ^= (pmap_pteg_mask << 6);
    497 
    498 	/* PPC Bits 10-19 */
    499 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr >> 6)) & 0x3ff;
    500 	va <<= ADDR_PIDX_SHFT;
    501 
    502 	/* PPC Bits 4-9 */
    503 	va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
    504 
    505 	/* PPC Bits 0-3 */
    506 	va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
    507 
    508 	return va;
    509 }
    510 #endif
    511 
    512 static __inline struct pvo_head *
    513 pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
    514 {
    515 #ifdef __HAVE_VM_PAGE_MD
    516 	struct vm_page *pg;
    517 
    518 	pg = PHYS_TO_VM_PAGE(pa);
    519 	if (pg_p != NULL)
    520 		*pg_p = pg;
    521 	if (pg == NULL)
    522 		return &pmap_pvo_unmanaged;
    523 	return &pg->mdpage.mdpg_pvoh;
    524 #endif
    525 #ifdef __HAVE_PMAP_PHYSSEG
    526 	int bank, pg;
    527 
    528 	bank = vm_physseg_find(atop(pa), &pg);
    529 	if (pg_p != NULL)
    530 		*pg_p = pg;
    531 	if (bank == -1)
    532 		return &pmap_pvo_unmanaged;
    533 	return &vm_physmem[bank].pmseg.pvoh[pg];
    534 #endif
    535 }
    536 
    537 static __inline struct pvo_head *
    538 vm_page_to_pvoh(struct vm_page *pg)
    539 {
    540 #ifdef __HAVE_VM_PAGE_MD
    541 	return &pg->mdpage.mdpg_pvoh;
    542 #endif
    543 #ifdef __HAVE_PMAP_PHYSSEG
    544 	return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
    545 #endif
    546 }
    547 
    548 
    549 #ifdef __HAVE_PMAP_PHYSSEG
    550 static __inline char *
    551 pa_to_attr(paddr_t pa)
    552 {
    553 	int bank, pg;
    554 
    555 	bank = vm_physseg_find(atop(pa), &pg);
    556 	if (bank == -1)
    557 		return NULL;
    558 	return &vm_physmem[bank].pmseg.attrs[pg];
    559 }
    560 #endif
    561 
    562 static __inline void
    563 pmap_attr_clear(struct vm_page *pg, int ptebit)
    564 {
    565 #ifdef __HAVE_PMAP_PHYSSEG
    566 	*pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
    567 #endif
    568 #ifdef __HAVE_VM_PAGE_MD
    569 	pg->mdpage.mdpg_attrs &= ~ptebit;
    570 #endif
    571 }
    572 
    573 static __inline int
    574 pmap_attr_fetch(struct vm_page *pg)
    575 {
    576 #ifdef __HAVE_PMAP_PHYSSEG
    577 	return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
    578 #endif
    579 #ifdef __HAVE_VM_PAGE_MD
    580 	return pg->mdpage.mdpg_attrs;
    581 #endif
    582 }
    583 
    584 static __inline void
    585 pmap_attr_save(struct vm_page *pg, int ptebit)
    586 {
    587 #ifdef __HAVE_PMAP_PHYSSEG
    588 	*pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
    589 #endif
    590 #ifdef __HAVE_VM_PAGE_MD
    591 	pg->mdpage.mdpg_attrs |= ptebit;
    592 #endif
    593 }
    594 
    595 static __inline int
    596 pmap_pte_compare(const volatile pte_t *pt, const pte_t *pvo_pt)
    597 {
    598 	if (pt->pte_hi == pvo_pt->pte_hi
    599 #if 0
    600 	    && ((pt->pte_lo ^ pvo_pt->pte_lo) &
    601 	        ~(PTE_REF|PTE_CHG)) == 0
    602 #endif
    603 	    )
    604 		return 1;
    605 	return 0;
    606 }
    607 
    608 static __inline int
    609 pmap_pte_match(volatile pte_t *pt, sr_t sr, vaddr_t va, int which)
    610 {
    611 	return (pt->pte_hi & ~PTE_VALID)
    612 		== (  ((sr & SR_VSID) << PTE_VSID_SHFT)
    613 		    | ((va >> ADDR_API_SHFT) & PTE_API)
    614 		    | which);
    615 }
    616 
    617 static __inline void
    618 pmap_pte_create(pte_t *pt, sr_t sr, vaddr_t va, u_int pte_lo)
    619 {
    620 	/*
    621 	 * Construct the PTE.  Default to IMB initially.  Valid bit
    622 	 * only gets set when the real pte is set in memory.
    623 	 *
    624 	 * Note: Don't set the valid bit for correct operation of tlb update.
    625 	 */
    626 	pt->pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
    627 		| (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API);
    628 	pt->pte_lo = pte_lo;
    629 }
    630 
    631 static __inline void
    632 pmap_pte_synch(volatile pte_t *pt, pte_t *pvo_pt)
    633 {
    634 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
    635 }
    636 
    637 static __inline void
    638 pmap_pte_clear(volatile pte_t *pt, vaddr_t va, int ptebit)
    639 {
    640 	/*
    641 	 * As shown in Section 7.6.3.2.3
    642 	 */
    643 	pt->pte_lo &= ~ptebit;
    644 	TLBIE(va);
    645 	SYNC();
    646 	EIEIO();
    647 	TLBSYNC();
    648 	SYNC();
    649 }
    650 
    651 static __inline void
    652 pmap_pte_set(volatile pte_t *pt, pte_t *pvo_pt)
    653 {
    654 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    655 	if (pvo_pt->pte_hi & PTE_VALID)
    656 		panic("pte_set: setting an already valid pte %p", pvo_pt);
    657 #endif
    658 	pvo_pt->pte_hi |= PTE_VALID;
    659 	/*
    660 	 * Update the PTE as defined in section 7.6.3.1
    661 	 * Note that the REF/CHG bits are from pvo_pt and thus should
    662 	 * have been saved so this routine can restore them (if desired).
    663 	 */
    664 	pt->pte_lo = pvo_pt->pte_lo;
    665 	EIEIO();
    666 	pt->pte_hi = pvo_pt->pte_hi;
    667 	SYNC();
    668 	pmap_pte_valid++;
    669 }
    670 
    671 static __inline void
    672 pmap_pte_unset(volatile pte_t *pt, pte_t *pvo_pt, vaddr_t va)
    673 {
    674 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    675 	if ((pvo_pt->pte_hi & PTE_VALID) == 0)
    676 		panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
    677 	if ((pt->pte_hi & PTE_VALID) == 0)
    678 		panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
    679 #endif
    680 
    681 	pvo_pt->pte_hi &= ~PTE_VALID;
    682 	/*
    683 	 * Force the ref & chg bits back into the PTEs.
    684 	 */
    685 	SYNC();
    686 	/*
    687 	 * Invalidate the pte ... (Section 7.6.3.3)
    688 	 */
    689 	pt->pte_hi &= ~PTE_VALID;
    690 	SYNC();
    691 	TLBIE(va);
    692 	SYNC();
    693 	EIEIO();
    694 	TLBSYNC();
    695 	SYNC();
    696 	/*
    697 	 * Save the ref & chg bits ...
    698 	 */
    699 	pmap_pte_synch(pt, pvo_pt);
    700 	pmap_pte_valid--;
    701 }
    702 
    703 static __inline void
    704 pmap_pte_change(volatile pte_t *pt, pte_t *pvo_pt, vaddr_t va)
    705 {
    706 	/*
    707 	 * Invalidate the PTE
    708 	 */
    709 	pmap_pte_unset(pt, pvo_pt, va);
    710 	pmap_pte_set(pt, pvo_pt);
    711 }
    712 
    713 /*
    714  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
    715  * (either primary or secondary location).
    716  *
    717  * Note: both the destination and source PTEs must not have PTE_VALID set.
    718  */
    719 
    720 STATIC int
    721 pmap_pte_insert(int ptegidx, pte_t *pvo_pt)
    722 {
    723 	volatile pte_t *pt;
    724 	int i;
    725 
    726 #if defined(DEBUG)
    727 	DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%x 0x%x\n",
    728 		ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
    729 #endif
    730 	/*
    731 	 * First try primary hash.
    732 	 */
    733 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    734 		if ((pt->pte_hi & PTE_VALID) == 0) {
    735 			pvo_pt->pte_hi &= ~PTE_HID;
    736 			pmap_pte_set(pt, pvo_pt);
    737 			return i;
    738 		}
    739 	}
    740 
    741 	/*
    742 	 * Now try secondary hash.
    743 	 */
    744 	ptegidx ^= pmap_pteg_mask;
    745 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    746 		if ((pt->pte_hi & PTE_VALID) == 0) {
    747 			pvo_pt->pte_hi |= PTE_HID;
    748 			pmap_pte_set(pt, pvo_pt);
    749 			return i;
    750 		}
    751 	}
    752 	return -1;
    753 }
    754 
    755 /*
    756  * Spill handler.
    757  *
    758  * Tries to spill a page table entry from the overflow area.
    759  * This runs in either real mode (if dealing with a exception spill)
    760  * or virtual mode when dealing with manually spilling one of the
    761  * kernel's pte entries.  In either case, interrupts are already
    762  * disabled.
    763  */
    764 int
    765 pmap_pte_spill(struct pmap *pm, vaddr_t addr)
    766 {
    767 	struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
    768 	struct pvo_entry *pvo;
    769 	struct pvo_tqhead *pvoh, *vpvoh;
    770 	int ptegidx, i, j;
    771 	sr_t sr;
    772 	volatile pteg_t *pteg;
    773 	volatile pte_t *pt;
    774 
    775 	sr = va_to_sr(pm->pm_sr, addr);
    776 	ptegidx = va_to_pteg(sr, addr);
    777 
    778 	/*
    779 	 * Have to substitute some entry. Use the primary hash for this.
    780 	 *
    781 	 * Use low bits of timebase as random generator
    782 	 */
    783 	pteg = &pmap_pteg_table[ptegidx];
    784 	i = MFTB() & 7;
    785 	pt = &pteg->pt[i];
    786 
    787 	source_pvo = NULL;
    788 	victim_pvo = NULL;
    789 	pvoh = &pmap_pvo_table[ptegidx];
    790 	TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
    791 
    792 		/*
    793 		 * We need to find pvo entry for this address...
    794 		 */
    795 		PMAP_PVO_CHECK(pvo);		/* sanity check */
    796 
    797 		/*
    798 		 * If we haven't found the source and we come to a PVO with
    799 		 * a valid PTE, then we know we can't find it because all
    800 		 * evicted PVOs always are first in the list.
    801 		 */
    802 		if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
    803 			break;
    804 		if (source_pvo == NULL &&
    805 		    pmap_pte_match(&pvo->pvo_pte, sr, addr,
    806 				   pvo->pvo_pte.pte_hi & PTE_HID)) {
    807 
    808 			/*
    809 			 * Now we have found the entry to be spilled into the
    810 			 * pteg.  Attempt to insert it into the page table.
    811 			 */
    812 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
    813 			if (j >= 0) {
    814 				PVO_PTEGIDX_SET(pvo, j);
    815 				PMAP_PVO_CHECK(pvo);	/* sanity check */
    816 				pvo->pvo_pmap->pm_evictions--;
    817 				PMAPCOUNT(ptes_spilled);
    818 				PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
    819 				    ? pmap_evcnt_ptes_secondary
    820 				    : pmap_evcnt_ptes_primary)[j]);
    821 
    822 				/*
    823 				 * Since we keep the evicted entries at the
    824 				 * from of the PVO list, we need move this
    825 				 * (now resident) PVO after the evicted
    826 				 * entries.
    827 				 */
    828 				next_pvo = TAILQ_NEXT(pvo, pvo_olink);
    829 
    830 				/*
    831 				 * If we don't have to move (either we were
    832 				 * the last entry or the next entry was valid,
    833 				 * don't change our position.  Otherwise
    834 				 * move ourselves to the tail of the queue.
    835 				 */
    836 				if (next_pvo != NULL &&
    837 				    !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
    838 					TAILQ_REMOVE(pvoh, pvo, pvo_olink);
    839 					TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
    840 				}
    841 				return 1;
    842 			}
    843 			source_pvo = pvo;
    844 			if (victim_pvo != NULL)
    845 				break;
    846 		}
    847 
    848 		/*
    849 		 * We also need the pvo entry of the victim we are replacing
    850 		 * so save the R & C bits of the PTE.
    851 		 */
    852 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
    853 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
    854 			vpvoh = pvoh;
    855 			victim_pvo = pvo;
    856 			if (source_pvo != NULL)
    857 				break;
    858 		}
    859 	}
    860 
    861 	if (source_pvo == NULL) {
    862 		PMAPCOUNT(ptes_unspilled);
    863 		return 0;
    864 	}
    865 
    866 	if (victim_pvo == NULL) {
    867 		if ((pt->pte_hi & PTE_HID) == 0)
    868 			panic("pmap_pte_spill: victim p-pte (%p) has "
    869 			    "no pvo entry!", pt);
    870 
    871 		/*
    872 		 * If this is a secondary PTE, we need to search
    873 		 * its primary pvo bucket for the matching PVO.
    874 		 */
    875 		vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask];
    876 		TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
    877 			PMAP_PVO_CHECK(pvo);		/* sanity check */
    878 
    879 			/*
    880 			 * We also need the pvo entry of the victim we are
    881 			 * replacing so save the R & C bits of the PTE.
    882 			 */
    883 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
    884 				victim_pvo = pvo;
    885 				break;
    886 			}
    887 		}
    888 		if (victim_pvo == NULL)
    889 			panic("pmap_pte_spill: victim s-pte (%p) has "
    890 			    "no pvo entry!", pt);
    891 	}
    892 
    893 	/*
    894 	 * We are invalidating the TLB entry for the EA for the
    895 	 * we are replacing even though its valid; If we don't
    896 	 * we lose any ref/chg bit changes contained in the TLB
    897 	 * entry.
    898 	 */
    899 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
    900 
    901 	/*
    902 	 * To enforce the PVO list ordering constraint that all
    903 	 * evicted entries should come before all valid entries,
    904 	 * move the source PVO to the tail of its list and the
    905 	 * victim PVO to the head of its list (which might not be
    906 	 * the same list, if the victim was using the secondary hash).
    907 	 */
    908 	TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
    909 	TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
    910 	TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
    911 	TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
    912 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
    913 	pmap_pte_set(pt, &source_pvo->pvo_pte);
    914 	victim_pvo->pvo_pmap->pm_evictions++;
    915 	source_pvo->pvo_pmap->pm_evictions--;
    916 
    917 	PVO_PTEGIDX_CLR(victim_pvo);
    918 	PVO_PTEGIDX_SET(source_pvo, i);
    919 	PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
    920 	PMAPCOUNT(ptes_spilled);
    921 	PMAPCOUNT(ptes_evicted);
    922 	PMAPCOUNT(ptes_removed);
    923 
    924 	PMAP_PVO_CHECK(victim_pvo);
    925 	PMAP_PVO_CHECK(source_pvo);
    926 	return 1;
    927 }
    928 
    929 /*
    930  * Restrict given range to physical memory
    931  */
    932 void
    933 pmap_real_memory(paddr_t *start, psize_t *size)
    934 {
    935 	struct mem_region *mp;
    936 
    937 	for (mp = mem; mp->size; mp++) {
    938 		if (*start + *size > mp->start
    939 		    && *start < mp->start + mp->size) {
    940 			if (*start < mp->start) {
    941 				*size -= mp->start - *start;
    942 				*start = mp->start;
    943 			}
    944 			if (*start + *size > mp->start + mp->size)
    945 				*size = mp->start + mp->size - *start;
    946 			return;
    947 		}
    948 	}
    949 	*size = 0;
    950 }
    951 
    952 /*
    953  * Initialize anything else for pmap handling.
    954  * Called during vm_init().
    955  */
    956 void
    957 pmap_init(void)
    958 {
    959 	int s;
    960 #ifdef __HAVE_PMAP_PHYSSEG
    961 	struct pvo_tqhead *pvoh;
    962 	int bank;
    963 	long sz;
    964 	char *attr;
    965 
    966 	s = splvm();
    967 	pvoh = pmap_physseg.pvoh;
    968 	attr = pmap_physseg.attrs;
    969 	for (bank = 0; bank < vm_nphysseg; bank++) {
    970 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
    971 		vm_physmem[bank].pmseg.pvoh = pvoh;
    972 		vm_physmem[bank].pmseg.attrs = attr;
    973 		for (; sz > 0; sz--, pvoh++, attr++) {
    974 			TAILQ_INIT(pvoh);
    975 			*attr = 0;
    976 		}
    977 	}
    978 	splx(s);
    979 #endif
    980 
    981 	s = splvm();
    982 	pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
    983 	    sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
    984 	    &pmap_pool_mallocator);
    985 
    986 	pool_setlowat(&pmap_mpvo_pool, 1008);
    987 
    988 	pmap_initialized = 1;
    989 	splx(s);
    990 
    991 #ifdef PMAPCOUNTERS
    992 	evcnt_attach_static(&pmap_evcnt_mappings);
    993 	evcnt_attach_static(&pmap_evcnt_mappings_replaced);
    994 	evcnt_attach_static(&pmap_evcnt_unmappings);
    995 
    996 	evcnt_attach_static(&pmap_evcnt_kernel_mappings);
    997 	evcnt_attach_static(&pmap_evcnt_kernel_unmappings);
    998 
    999 	evcnt_attach_static(&pmap_evcnt_exec_mappings);
   1000 	evcnt_attach_static(&pmap_evcnt_exec_cached);
   1001 	evcnt_attach_static(&pmap_evcnt_exec_synced);
   1002 	evcnt_attach_static(&pmap_evcnt_exec_synced_clear_modify);
   1003 
   1004 	evcnt_attach_static(&pmap_evcnt_exec_uncached_page_protect);
   1005 	evcnt_attach_static(&pmap_evcnt_exec_uncached_clear_modify);
   1006 	evcnt_attach_static(&pmap_evcnt_exec_uncached_zero_page);
   1007 	evcnt_attach_static(&pmap_evcnt_exec_uncached_copy_page);
   1008 
   1009 	evcnt_attach_static(&pmap_evcnt_zeroed_pages);
   1010 	evcnt_attach_static(&pmap_evcnt_copied_pages);
   1011 	evcnt_attach_static(&pmap_evcnt_idlezeroed_pages);
   1012 
   1013 	evcnt_attach_static(&pmap_evcnt_updates);
   1014 	evcnt_attach_static(&pmap_evcnt_collects);
   1015 	evcnt_attach_static(&pmap_evcnt_copies);
   1016 
   1017 	evcnt_attach_static(&pmap_evcnt_ptes_spilled);
   1018 	evcnt_attach_static(&pmap_evcnt_ptes_unspilled);
   1019 	evcnt_attach_static(&pmap_evcnt_ptes_evicted);
   1020 	evcnt_attach_static(&pmap_evcnt_ptes_removed);
   1021 	evcnt_attach_static(&pmap_evcnt_ptes_changed);
   1022 	evcnt_attach_static(&pmap_evcnt_ptes_primary[0]);
   1023 	evcnt_attach_static(&pmap_evcnt_ptes_primary[1]);
   1024 	evcnt_attach_static(&pmap_evcnt_ptes_primary[2]);
   1025 	evcnt_attach_static(&pmap_evcnt_ptes_primary[3]);
   1026 	evcnt_attach_static(&pmap_evcnt_ptes_primary[4]);
   1027 	evcnt_attach_static(&pmap_evcnt_ptes_primary[5]);
   1028 	evcnt_attach_static(&pmap_evcnt_ptes_primary[6]);
   1029 	evcnt_attach_static(&pmap_evcnt_ptes_primary[7]);
   1030 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[0]);
   1031 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[1]);
   1032 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[2]);
   1033 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[3]);
   1034 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[4]);
   1035 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[5]);
   1036 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[6]);
   1037 	evcnt_attach_static(&pmap_evcnt_ptes_secondary[7]);
   1038 #endif
   1039 }
   1040 
   1041 /*
   1042  * How much virtual space does the kernel get?
   1043  */
   1044 void
   1045 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1046 {
   1047 	/*
   1048 	 * For now, reserve one segment (minus some overhead) for kernel
   1049 	 * virtual memory
   1050 	 */
   1051 	*start = VM_MIN_KERNEL_ADDRESS;
   1052 	*end = VM_MAX_KERNEL_ADDRESS;
   1053 }
   1054 
   1055 /*
   1056  * Allocate, initialize, and return a new physical map.
   1057  */
   1058 pmap_t
   1059 pmap_create(void)
   1060 {
   1061 	pmap_t pm;
   1062 
   1063 	pm = pool_get(&pmap_pool, PR_WAITOK);
   1064 	memset((caddr_t)pm, 0, sizeof *pm);
   1065 	pmap_pinit(pm);
   1066 
   1067 	DPRINTFN(CREATE,("pmap_create: pm %p:\n"
   1068 	    "\t%06x %06x %06x %06x    %06x %06x %06x %06x\n"
   1069 	    "\t%06x %06x %06x %06x    %06x %06x %06x %06x\n", pm,
   1070 	    pm->pm_sr[0], pm->pm_sr[1], pm->pm_sr[2], pm->pm_sr[3],
   1071 	    pm->pm_sr[4], pm->pm_sr[5], pm->pm_sr[6], pm->pm_sr[7],
   1072 	    pm->pm_sr[8], pm->pm_sr[9], pm->pm_sr[10], pm->pm_sr[11],
   1073 	    pm->pm_sr[12], pm->pm_sr[13], pm->pm_sr[14], pm->pm_sr[15]));
   1074 	return pm;
   1075 }
   1076 
   1077 /*
   1078  * Initialize a preallocated and zeroed pmap structure.
   1079  */
   1080 unsigned short pmap_context = 0;
   1081 void
   1082 pmap_pinit(pmap_t pm)
   1083 {
   1084 	int i, mask;
   1085 	unsigned int entropy = MFTB();
   1086 
   1087 	/*
   1088 	 * Allocate some segment registers for this pmap.
   1089 	 */
   1090 	pm->pm_refs = 1;
   1091 	for (i = 0; i < NPMAPS ; i += VSID_NBPW) {
   1092 		static unsigned int pmap_vsidcontext;
   1093 		unsigned int hash, n;
   1094 
   1095 		/* Create a new value by multiplying by a prime adding in
   1096 		 * entropy from the timebase register.  This is to make the
   1097 		 * VSID more random so that the PT Hash function collides
   1098 		 * less often. (note that the prime causes gcc to do shifts
   1099 		 * instead of a multiply)
   1100 		 */
   1101 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
   1102 		hash = pmap_vsidcontext & (NPMAPS - 1);
   1103 		if (hash == 0)			/* 0 is special, avoid it */
   1104 			continue;
   1105 		n = hash >> 5;
   1106 		mask = 1 << (hash & (VSID_NBPW-1));
   1107 		hash = (pmap_vsidcontext & 0xfffff);
   1108 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
   1109 			/* anything free in this bucket? */
   1110 			if (pmap_vsid_bitmap[n] == 0xffffffff) {
   1111 				entropy = (pmap_vsidcontext >> 20);
   1112 				continue;
   1113 			}
   1114 			i = ffs(~pmap_vsid_bitmap[n]) - 1;
   1115 			mask = 1 << i;
   1116 			hash &= 0xfffff & ~(VSID_NBPW-1);
   1117 			hash |= i;
   1118 		}
   1119 		pmap_vsid_bitmap[n] |= mask;
   1120 		for (i = 0; i < 16; i++)
   1121 			pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY;
   1122 		return;
   1123 	}
   1124 	panic("pmap_pinit: out of segments");
   1125 }
   1126 
   1127 /*
   1128  * Add a reference to the given pmap.
   1129  */
   1130 void
   1131 pmap_reference(pmap_t pm)
   1132 {
   1133 	pm->pm_refs++;
   1134 }
   1135 
   1136 /*
   1137  * Retire the given pmap from service.
   1138  * Should only be called if the map contains no valid mappings.
   1139  */
   1140 void
   1141 pmap_destroy(pmap_t pm)
   1142 {
   1143 	if (--pm->pm_refs == 0) {
   1144 		pmap_release(pm);
   1145 		pool_put(&pmap_pool, pm);
   1146 	}
   1147 }
   1148 
   1149 /*
   1150  * Release any resources held by the given physical map.
   1151  * Called when a pmap initialized by pmap_pinit is being released.
   1152  */
   1153 void
   1154 pmap_release(pmap_t pm)
   1155 {
   1156 	int idx, mask;
   1157 
   1158 	if (pm->pm_sr[0] == 0)
   1159 		panic("pmap_release");
   1160 	idx = VSID_TO_HASH(pm->pm_sr[0]) & (NPMAPS-1);
   1161 	mask = 1 << (idx % VSID_NBPW);
   1162 	idx /= VSID_NBPW;
   1163 	pmap_vsid_bitmap[idx] &= ~mask;
   1164 }
   1165 
   1166 /*
   1167  * Copy the range specified by src_addr/len
   1168  * from the source map to the range dst_addr/len
   1169  * in the destination map.
   1170  *
   1171  * This routine is only advisory and need not do anything.
   1172  */
   1173 void
   1174 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
   1175 	vsize_t len, vaddr_t src_addr)
   1176 {
   1177 	PMAPCOUNT(copies);
   1178 }
   1179 
   1180 /*
   1181  * Require that all active physical maps contain no
   1182  * incorrect entries NOW.
   1183  */
   1184 void
   1185 pmap_update(struct pmap *pmap)
   1186 {
   1187 	PMAPCOUNT(updates);
   1188 	TLBSYNC();
   1189 }
   1190 
   1191 /*
   1192  * Garbage collects the physical map system for
   1193  * pages which are no longer used.
   1194  * Success need not be guaranteed -- that is, there
   1195  * may well be pages which are not referenced, but
   1196  * others may be collected.
   1197  * Called by the pageout daemon when pages are scarce.
   1198  */
   1199 void
   1200 pmap_collect(pmap_t pm)
   1201 {
   1202 	PMAPCOUNT(collects);
   1203 }
   1204 
   1205 static __inline int
   1206 pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
   1207 {
   1208 	int pteidx;
   1209 	/*
   1210 	 * We can find the actual pte entry without searching by
   1211 	 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
   1212 	 * and by noticing the HID bit.
   1213 	 */
   1214 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
   1215 	if (pvo->pvo_pte.pte_hi & PTE_HID)
   1216 		pteidx ^= pmap_pteg_mask * 8;
   1217 	return pteidx;
   1218 }
   1219 
   1220 volatile pte_t *
   1221 pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
   1222 {
   1223 	volatile pte_t *pt;
   1224 
   1225 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1226 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
   1227 		return NULL;
   1228 #endif
   1229 
   1230 	/*
   1231 	 * If we haven't been supplied the ptegidx, calculate it.
   1232 	 */
   1233 	if (pteidx == -1) {
   1234 		int ptegidx;
   1235 		sr_t sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
   1236 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
   1237 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1238 	}
   1239 
   1240 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
   1241 
   1242 #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1243 	return pt;
   1244 #else
   1245 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
   1246 		panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1247 		    "pvo but no valid pte index", pvo);
   1248 	}
   1249 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
   1250 		panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
   1251 		    "pvo but no valid pte", pvo);
   1252 	}
   1253 
   1254 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
   1255 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
   1256 #if defined(DEBUG) || defined(PMAPCHECK)
   1257 			pmap_pte_print(pt);
   1258 #endif
   1259 			panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1260 			    "pmap_pteg_table %p but invalid in pvo",
   1261 			    pvo, pt);
   1262 		}
   1263 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
   1264 #if defined(DEBUG) || defined(PMAPCHECK)
   1265 			pmap_pte_print(pt);
   1266 #endif
   1267 			panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
   1268 			    "not match pte %p in pmap_pteg_table",
   1269 			    pvo, pt);
   1270 		}
   1271 		return pt;
   1272 	}
   1273 
   1274 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1275 #if defined(DEBUG) || defined(PMAPCHECK)
   1276 		pmap_pte_print(pt);
   1277 #endif
   1278 		panic("pmap_pvo_to_pte: pvo %p: has invalid pte %p in "
   1279 		    "pmap_pteg_table but valid in pvo", pvo, pt);
   1280 	}
   1281 	return NULL;
   1282 #endif	/* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
   1283 }
   1284 
   1285 struct pvo_entry *
   1286 pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
   1287 {
   1288 	struct pvo_entry *pvo;
   1289 	int ptegidx;
   1290 	sr_t sr;
   1291 
   1292 	va &= ~ADDR_POFF;
   1293 	sr = va_to_sr(pm->pm_sr, va);
   1294 	ptegidx = va_to_pteg(sr, va);
   1295 
   1296 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1297 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1298 		if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   1299 			panic("pmap_pvo_find_va: invalid pvo %p on "
   1300 			    "list %#x (%p)", pvo, ptegidx,
   1301 			     &pmap_pvo_table[ptegidx]);
   1302 #endif
   1303 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1304 			if (pteidx_p)
   1305 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
   1306 			return pvo;
   1307 		}
   1308 	}
   1309 	return NULL;
   1310 }
   1311 
   1312 #if defined(DEBUG) || defined(PMAPCHECK)
   1313 void
   1314 pmap_pvo_check(const struct pvo_entry *pvo)
   1315 {
   1316 	struct pvo_head *pvo_head;
   1317 	struct pvo_entry *pvo0;
   1318 	volatile pte_t *pt;
   1319 	int failed = 0;
   1320 
   1321 	if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
   1322 		panic("pmap_pvo_check: pvo %p: invalid address", pvo);
   1323 
   1324 	if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
   1325 		printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
   1326 		    pvo, pvo->pvo_pmap);
   1327 		failed = 1;
   1328 	}
   1329 
   1330 	if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
   1331 	    (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
   1332 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1333 		    pvo, TAILQ_NEXT(pvo, pvo_olink));
   1334 		failed = 1;
   1335 	}
   1336 
   1337 	if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
   1338 	    (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
   1339 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1340 		    pvo, LIST_NEXT(pvo, pvo_vlink));
   1341 		failed = 1;
   1342 	}
   1343 
   1344 	if (pvo->pvo_vaddr & PVO_MANAGED) {
   1345 		pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
   1346 	} else {
   1347 		if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
   1348 			printf("pmap_pvo_check: pvo %p: non kernel address "
   1349 			    "on kernel unmanaged list\n", pvo);
   1350 			failed = 1;
   1351 		}
   1352 		pvo_head = &pmap_pvo_kunmanaged;
   1353 	}
   1354 	LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
   1355 		if (pvo0 == pvo)
   1356 			break;
   1357 	}
   1358 	if (pvo0 == NULL) {
   1359 		printf("pmap_pvo_check: pvo %p: not present "
   1360 		    "on its vlist head %p\n", pvo, pvo_head);
   1361 		failed = 1;
   1362 	}
   1363 	if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
   1364 		printf("pmap_pvo_check: pvo %p: not present "
   1365 		    "on its olist head\n", pvo);
   1366 		failed = 1;
   1367 	}
   1368 	pt = pmap_pvo_to_pte(pvo, -1);
   1369 	if (pt == NULL) {
   1370 		if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1371 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1372 			    "no PTE\n", pvo);
   1373 			failed = 1;
   1374 		}
   1375 	} else {
   1376 		if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
   1377 		    (uintptr_t) pt >=
   1378 		    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
   1379 			printf("pmap_pvo_check: pvo %p: pte %p not in "
   1380 			    "pteg table\n", pvo, pt);
   1381 			failed = 1;
   1382 		}
   1383 		if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
   1384 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1385 			    "no PTE\n", pvo);
   1386 			failed = 1;
   1387 		}
   1388 		if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
   1389 			printf("pmap_pvo_check: pvo %p: pte_hi differ: "
   1390 			    "%#x/%#x\n", pvo, pvo->pvo_pte.pte_hi, pt->pte_hi);
   1391 			failed = 1;
   1392 		}
   1393 		if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
   1394 		    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
   1395 			printf("pmap_pvo_check: pvo %p: pte_lo differ: "
   1396 			    "%#x/%#x\n", pvo,
   1397 			    pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN),
   1398 			    pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN));
   1399 			failed = 1;
   1400 		}
   1401 		if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
   1402 			printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
   1403 			    " doesn't not match PVO's VA %#lx\n",
   1404 			    pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
   1405 			failed = 1;
   1406 		}
   1407 		if (failed)
   1408 			pmap_pte_print(pt);
   1409 	}
   1410 	if (failed)
   1411 		panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
   1412 		    pvo->pvo_pmap);
   1413 }
   1414 #endif /* DEBUG || PMAPCHECK */
   1415 
   1416 /*
   1417  * This returns whether this is the first mapping of a page.
   1418  */
   1419 int
   1420 pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
   1421 	vaddr_t va, paddr_t pa, u_int pte_lo, int flags)
   1422 {
   1423 	struct pvo_entry *pvo;
   1424 	struct pvo_tqhead *pvoh;
   1425 	u_int32_t msr;
   1426 	sr_t sr;
   1427 	int ptegidx;
   1428 	int i;
   1429 	int poolflags = PR_NOWAIT;
   1430 
   1431 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1432 	if (pmap_pvo_remove_depth > 0)
   1433 		panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
   1434 	if (++pmap_pvo_enter_depth > 1)
   1435 		panic("pmap_pvo_enter: called recursively!");
   1436 #endif
   1437 
   1438 	/*
   1439 	 * Compute the PTE Group index.
   1440 	 */
   1441 	va &= ~ADDR_POFF;
   1442 	sr = va_to_sr(pm->pm_sr, va);
   1443 	ptegidx = va_to_pteg(sr, va);
   1444 
   1445 	msr = pmap_interrupts_off();
   1446 	/*
   1447 	 * Remove any existing mapping for this page.  Reuse the
   1448 	 * pvo entry if there a mapping.
   1449 	 */
   1450 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1451 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1452 #ifdef DEBUG
   1453 			if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
   1454 			    ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
   1455 			    ~(PTE_REF|PTE_CHG)) == 0 &&
   1456 			   va < VM_MIN_KERNEL_ADDRESS) {
   1457 				printf("pmap_pvo_enter: pvo %p: dup %#x/%#lx\n",
   1458 				    pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
   1459 				printf("pmap_pvo_enter: pte_hi=%#x sr=%#x\n",
   1460 				    pvo->pvo_pte.pte_hi,
   1461 				    pm->pm_sr[va >> ADDR_SR_SHFT]);
   1462 				pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
   1463 #ifdef DDBX
   1464 				Debugger();
   1465 #endif
   1466 			}
   1467 #endif
   1468 			PMAPCOUNT(mappings_replaced);
   1469 			pmap_pvo_remove(pvo, -1);
   1470 			break;
   1471 		}
   1472 	}
   1473 
   1474 	/*
   1475 	 * If we aren't overwriting an mapping, try to allocate
   1476 	 */
   1477 	pmap_interrupts_restore(msr);
   1478 	pvo = pool_get(pl, poolflags);
   1479 	msr = pmap_interrupts_off();
   1480 	if (pvo == NULL) {
   1481 #if 0
   1482 		pvo = pmap_pvo_reclaim(pm);
   1483 		if (pvo == NULL) {
   1484 #endif
   1485 			if ((flags & PMAP_CANFAIL) == 0)
   1486 				panic("pmap_pvo_enter: failed");
   1487 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1488 			pmap_pvo_enter_depth--;
   1489 #endif
   1490 			pmap_interrupts_restore(msr);
   1491 			return ENOMEM;
   1492 #if 0
   1493 		}
   1494 #endif
   1495 	}
   1496 	pvo->pvo_vaddr = va;
   1497 	pvo->pvo_pmap = pm;
   1498 	pvo->pvo_vaddr &= ~ADDR_POFF;
   1499 	if (flags & VM_PROT_EXECUTE) {
   1500 		PMAPCOUNT(exec_mappings);
   1501 		pvo->pvo_vaddr |= PVO_EXECUTABLE;
   1502 	}
   1503 	if (flags & PMAP_WIRED)
   1504 		pvo->pvo_vaddr |= PVO_WIRED;
   1505 	if (pvo_head != &pmap_pvo_kunmanaged) {
   1506 		pvo->pvo_vaddr |= PVO_MANAGED;
   1507 		PMAPCOUNT(mappings);
   1508 	} else {
   1509 		PMAPCOUNT(kernel_mappings);
   1510 	}
   1511 	pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo);
   1512 
   1513 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
   1514 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
   1515 		pvo->pvo_pmap->pm_stats.wired_count++;
   1516 	pvo->pvo_pmap->pm_stats.resident_count++;
   1517 #if defined(DEBUG)
   1518 	if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS)
   1519 		DPRINTFN(PVOENTER,
   1520 		    ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
   1521 		    pvo, pm, va, pa));
   1522 #endif
   1523 
   1524 	/*
   1525 	 * We hope this succeeds but it isn't required.
   1526 	 */
   1527 	pvoh = &pmap_pvo_table[ptegidx];
   1528 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
   1529 	if (i >= 0) {
   1530 		PVO_PTEGIDX_SET(pvo, i);
   1531 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
   1532 		    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
   1533 		TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1534 	} else {
   1535 
   1536 		/*
   1537 		 * Since we didn't have room for this entry (which makes it
   1538 		 * and evicted entry), place it at the head of the list.
   1539 		 */
   1540 		TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
   1541 		PMAPCOUNT(ptes_evicted);
   1542 		pm->pm_evictions++;
   1543 	}
   1544 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1545 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1546 	pmap_pvo_enter_depth--;
   1547 #endif
   1548 	pmap_interrupts_restore(msr);
   1549 	return 0;
   1550 }
   1551 
   1552 void
   1553 pmap_pvo_remove(struct pvo_entry *pvo, int pteidx)
   1554 {
   1555 	volatile pte_t *pt;
   1556 	int ptegidx;
   1557 
   1558 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1559 	if (++pmap_pvo_remove_depth > 1)
   1560 		panic("pmap_pvo_remove: called recursively!");
   1561 #endif
   1562 
   1563 	/*
   1564 	 * If we haven't been supplied the ptegidx, calculate it.
   1565 	 */
   1566 	if (pteidx == -1) {
   1567 		sr_t sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr);
   1568 		ptegidx = va_to_pteg(sr, pvo->pvo_vaddr);
   1569 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1570 	} else {
   1571 		ptegidx = pteidx >> 3;
   1572 		if (pvo->pvo_pte.pte_hi & PTE_HID)
   1573 			ptegidx ^= pmap_pteg_mask;
   1574 	}
   1575 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1576 
   1577 	/*
   1578 	 * If there is an active pte entry, we need to deactivate it
   1579 	 * (and save the ref & chg bits).
   1580 	 */
   1581 	pt = pmap_pvo_to_pte(pvo, pteidx);
   1582 	if (pt != NULL) {
   1583 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   1584 		PVO_PTEGIDX_CLR(pvo);
   1585 		PMAPCOUNT(ptes_removed);
   1586 	} else {
   1587 		KASSERT(pvo->pvo_pmap->pm_evictions > 0);
   1588 		pvo->pvo_pmap->pm_evictions--;
   1589 	}
   1590 
   1591 	/*
   1592 	 * Update our statistics
   1593 	 */
   1594 	pvo->pvo_pmap->pm_stats.resident_count--;
   1595 	if (pvo->pvo_pte.pte_lo & PVO_WIRED)
   1596 		pvo->pvo_pmap->pm_stats.wired_count--;
   1597 
   1598 	/*
   1599 	 * Save the REF/CHG bits into their cache if the page is managed.
   1600 	 */
   1601 	if (pvo->pvo_vaddr & PVO_MANAGED) {
   1602 		u_int ptelo = pvo->pvo_pte.pte_lo;
   1603 		struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
   1604 
   1605 		if (pg != NULL) {
   1606 			pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
   1607 		}
   1608 		PMAPCOUNT(unmappings);
   1609 	} else {
   1610 		PMAPCOUNT(kernel_unmappings);
   1611 	}
   1612 
   1613 	/*
   1614 	 * Remove the PVO from its lists and return it to the pool.
   1615 	 */
   1616 	LIST_REMOVE(pvo, pvo_vlink);
   1617 	TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
   1618 	pool_put(pvo->pvo_vaddr & PVO_MANAGED
   1619 	    ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
   1620 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1621 	pmap_pvo_remove_depth--;
   1622 #endif
   1623 }
   1624 
   1625 /*
   1626  * Insert physical page at pa into the given pmap at virtual address va.
   1627  */
   1628 int
   1629 pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
   1630 {
   1631 	struct mem_region *mp;
   1632 	struct pvo_head *pvo_head;
   1633 	struct vm_page *pg;
   1634 	struct pool *pl;
   1635 	u_int32_t pte_lo;
   1636 	int s;
   1637 	int error;
   1638 	u_int pvo_flags;
   1639 	u_int was_exec = 0;
   1640 
   1641 	if (__predict_false(!pmap_initialized)) {
   1642 		pvo_head = &pmap_pvo_kunmanaged;
   1643 		pl = &pmap_upvo_pool;
   1644 		pvo_flags = 0;
   1645 		pg = NULL;
   1646 		was_exec = PTE_EXEC;
   1647 	} else {
   1648 		pvo_head = pa_to_pvoh(pa, &pg);
   1649 		pl = &pmap_mpvo_pool;
   1650 		pvo_flags = PVO_MANAGED;
   1651 	}
   1652 
   1653 	DPRINTFN(ENTER,
   1654 	    ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
   1655 	    pm, va, pa, prot, flags));
   1656 
   1657 	/*
   1658 	 * If this is a managed page, and it's the first reference to the
   1659 	 * page clear the execness of the page.  Otherwise fetch the execness.
   1660 	 */
   1661 	if (pg != NULL)
   1662 		was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
   1663 
   1664 	DPRINTFN(ENTER, (" was_exec=%d", was_exec));
   1665 
   1666 	/*
   1667 	 * Assume the page is cache inhibited and access is guarded unless
   1668 	 * it's in our available memory array.  If it is in the memory array,
   1669 	 * asssume it's in memory coherent memory.
   1670 	 */
   1671 	pte_lo = PTE_IG;
   1672 	if ((flags & PMAP_NC) == 0) {
   1673 		for (mp = mem; mp->size; mp++) {
   1674 			if (pa >= mp->start && pa < mp->start + mp->size) {
   1675 				pte_lo = PTE_M;
   1676 				break;
   1677 			}
   1678 		}
   1679 	}
   1680 
   1681 	if (prot & VM_PROT_WRITE)
   1682 		pte_lo |= PTE_BW;
   1683 	else
   1684 		pte_lo |= PTE_BR;
   1685 
   1686 	/*
   1687 	 * If this was in response to a fault, "pre-fault" the PTE's
   1688 	 * changed/referenced bit appropriately.
   1689 	 */
   1690 	if (flags & VM_PROT_WRITE)
   1691 		pte_lo |= PTE_CHG;
   1692 	if (flags & (VM_PROT_READ|VM_PROT_WRITE))
   1693 		pte_lo |= PTE_REF;
   1694 
   1695 #if 0
   1696 	if (pm == pmap_kernel()) {
   1697 		if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_READ)
   1698 			printf("pmap_pvo_enter: Kernel RO va %#lx pa %#lx\n",
   1699 				va, pa);
   1700 		if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == VM_PROT_NONE)
   1701 			printf("pmap_pvo_enter: Kernel N/A va %#lx pa %#lx\n",
   1702 				va, pa);
   1703 	}
   1704 #endif
   1705 
   1706 	/*
   1707 	 * We need to know if this page can be executable
   1708 	 */
   1709 	flags |= (prot & VM_PROT_EXECUTE);
   1710 
   1711 	/*
   1712 	 * Record mapping for later back-translation and pte spilling.
   1713 	 * This will overwrite any existing mapping.
   1714 	 */
   1715 	s = splvm();
   1716 	error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
   1717 	splx(s);
   1718 
   1719 	/*
   1720 	 * Flush the real page from the instruction cache if this page is
   1721 	 * mapped executable and cacheable and has not been flushed since
   1722 	 * the last time it was modified.
   1723 	 */
   1724 	if (error == 0 &&
   1725             (flags & VM_PROT_EXECUTE) &&
   1726             (pte_lo & PTE_I) == 0 &&
   1727 	    was_exec == 0) {
   1728 		DPRINTFN(ENTER, (" syncicache"));
   1729 		PMAPCOUNT(exec_synced);
   1730 		pmap_syncicache(pa, NBPG);
   1731 		if (pg != NULL) {
   1732 			pmap_attr_save(pg, PTE_EXEC);
   1733 			PMAPCOUNT(exec_cached);
   1734 #if defined(DEBUG) || defined(PMAPDEBUG)
   1735 			if (pmapdebug & PMAPDEBUG_ENTER)
   1736 				printf(" marked-as-exec");
   1737 			else if (pmapdebug & PMAPDEBUG_EXEC)
   1738 				printf("[pmap_enter: %#lx: marked-as-exec]\n",
   1739 				    pg->phys_addr);
   1740 
   1741 #endif
   1742 		}
   1743 	}
   1744 
   1745 	DPRINTFN(ENTER, (": error=%d\n", error));
   1746 
   1747 	return error;
   1748 }
   1749 
   1750 void
   1751 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   1752 {
   1753 	struct mem_region *mp;
   1754 	u_int32_t pte_lo;
   1755 	u_int32_t msr;
   1756 	int error;
   1757 	int s;
   1758 
   1759 	if (va < VM_MIN_KERNEL_ADDRESS)
   1760 		panic("pmap_kenter_pa: attempt to enter "
   1761 		    "non-kernel address %#lx!", va);
   1762 
   1763 	DPRINTFN(KENTER,
   1764 	    ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
   1765 
   1766 	/*
   1767 	 * Assume the page is cache inhibited and access is guarded unless
   1768 	 * it's in our available memory array.  If it is in the memory array,
   1769 	 * asssume it's in memory coherent memory.
   1770 	 */
   1771 	pte_lo = PTE_IG;
   1772 	for (mp = mem; mp->size; mp++) {
   1773 		if (pa >= mp->start && pa < mp->start + mp->size) {
   1774 			pte_lo = PTE_M;
   1775 			break;
   1776 		}
   1777 	}
   1778 
   1779 	if (prot & VM_PROT_WRITE)
   1780 		pte_lo |= PTE_BW;
   1781 	else
   1782 		pte_lo |= PTE_BR;
   1783 
   1784 	/*
   1785 	 * We don't care about REF/CHG on PVOs on the unmanaged list.
   1786 	 */
   1787 	s = splvm();
   1788 	msr = pmap_interrupts_off();
   1789 	error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
   1790 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
   1791 	pmap_interrupts_restore(msr);
   1792 	splx(s);
   1793 
   1794 	if (error != 0)
   1795 		panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
   1796 		      va, pa, error);
   1797 }
   1798 
   1799 void
   1800 pmap_kremove(vaddr_t va, vsize_t len)
   1801 {
   1802 	if (va < VM_MIN_KERNEL_ADDRESS)
   1803 		panic("pmap_kremove: attempt to remove "
   1804 		    "non-kernel address %#lx!", va);
   1805 
   1806 	DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
   1807 	pmap_remove(pmap_kernel(), va, va + len);
   1808 }
   1809 
   1810 /*
   1811  * Remove the given range of mapping entries.
   1812  */
   1813 void
   1814 pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
   1815 {
   1816 	struct pvo_entry *pvo;
   1817 	u_int32_t msr;
   1818 	int pteidx;
   1819 	int s;
   1820 
   1821 	for (; va < endva; va += PAGE_SIZE) {
   1822 		s = splvm();
   1823 		msr = pmap_interrupts_off();
   1824 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   1825 		if (pvo != NULL) {
   1826 			pmap_pvo_remove(pvo, pteidx);
   1827 		}
   1828 		pmap_interrupts_restore(msr);
   1829 		splx(s);
   1830 	}
   1831 }
   1832 
   1833 /*
   1834  * Get the physical page address for the given pmap/virtual address.
   1835  */
   1836 boolean_t
   1837 pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
   1838 {
   1839 	struct pvo_entry *pvo;
   1840 	u_int32_t msr;
   1841 	int s;
   1842 
   1843 	s = splvm();
   1844 	msr = pmap_interrupts_off();
   1845 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
   1846 	if (pvo != NULL) {
   1847 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   1848 		*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF);
   1849 	}
   1850 	pmap_interrupts_restore(msr);
   1851 	splx(s);
   1852 	return pvo != NULL;
   1853 }
   1854 
   1855 /*
   1856  * Lower the protection on the specified range of this pmap.
   1857  *
   1858  * There are only two cases: either the protection is going to 0,
   1859  * or it is going to read-only.
   1860  */
   1861 void
   1862 pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
   1863 {
   1864 	struct pvo_entry *pvo;
   1865 	volatile pte_t *pt;
   1866 	u_int32_t msr;
   1867 	int s;
   1868 	int pteidx;
   1869 
   1870 	/*
   1871 	 * Since this routine only downgrades protection, we should
   1872 	 * always be called without WRITE permisison.
   1873 	 */
   1874 	KASSERT((prot & VM_PROT_WRITE) == 0);
   1875 
   1876 	/*
   1877 	 * If there is no protection, this is equivalent to
   1878 	 * remove the pmap from the pmap.
   1879 	 */
   1880 	if ((prot & VM_PROT_READ) == 0) {
   1881 		pmap_remove(pm, va, endva);
   1882 		return;
   1883 	}
   1884 
   1885 	s = splvm();
   1886 	msr = pmap_interrupts_off();
   1887 
   1888 	for (; va < endva; va += NBPG) {
   1889 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   1890 		if (pvo == NULL)
   1891 			continue;
   1892 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   1893 
   1894 		/*
   1895 		 * Revoke executable if asked to do so.
   1896 		 */
   1897 		if ((prot & VM_PROT_EXECUTE) == 0)
   1898 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
   1899 
   1900 #if 0
   1901 		/*
   1902 		 * If the page is already read-only, no change
   1903 		 * needs to be made.
   1904 		 */
   1905 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
   1906 			continue;
   1907 #endif
   1908 		/*
   1909 		 * Grab the PTE pointer before we diddle with
   1910 		 * the cached PTE copy.
   1911 		 */
   1912 		pt = pmap_pvo_to_pte(pvo, pteidx);
   1913 		/*
   1914 		 * Change the protection of the page.
   1915 		 */
   1916 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   1917 		pvo->pvo_pte.pte_lo |= PTE_BR;
   1918 
   1919 		/*
   1920 		 * If the PVO is in the page table, update
   1921 		 * that pte at well.
   1922 		 */
   1923 		if (pt != NULL) {
   1924 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   1925 			PMAPCOUNT(ptes_changed);
   1926 		}
   1927 
   1928 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   1929 	}
   1930 
   1931 	pmap_interrupts_restore(msr);
   1932 	splx(s);
   1933 }
   1934 
   1935 void
   1936 pmap_unwire(pmap_t pm, vaddr_t va)
   1937 {
   1938 	struct pvo_entry *pvo;
   1939 	u_int32_t msr;
   1940 	int s;
   1941 
   1942 	s = splvm();
   1943 	msr = pmap_interrupts_off();
   1944 
   1945 	pvo = pmap_pvo_find_va(pm, va, NULL);
   1946 	if (pvo != NULL) {
   1947 		if (pvo->pvo_vaddr & PVO_WIRED) {
   1948 			pvo->pvo_vaddr &= ~PVO_WIRED;
   1949 			pm->pm_stats.wired_count--;
   1950 		}
   1951 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   1952 	}
   1953 
   1954 	pmap_interrupts_restore(msr);
   1955 	splx(s);
   1956 }
   1957 
   1958 /*
   1959  * Lower the protection on the specified physical page.
   1960  *
   1961  * There are only two cases: either the protection is going to 0,
   1962  * or it is going to read-only.
   1963  */
   1964 void
   1965 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1966 {
   1967 	struct pvo_head *pvo_head;
   1968 	struct pvo_entry *pvo, *next_pvo;
   1969 	volatile pte_t *pt;
   1970 	u_int32_t msr;
   1971 	int s;
   1972 
   1973 	/*
   1974 	 * Since this routine only downgrades protection, if the
   1975 	 * maximal protection is desired, there isn't any change
   1976 	 * to be made.
   1977 	 */
   1978 	KASSERT((prot & VM_PROT_WRITE) == 0);
   1979 	if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE))
   1980 		return;
   1981 
   1982 	s = splvm();
   1983 	msr = pmap_interrupts_off();
   1984 
   1985 	/*
   1986 	 * When UVM reuses a page, it does a pmap_page_protect with
   1987 	 * VM_PROT_NONE.  At that point, we can clear the exec flag
   1988 	 * since we know the page will have different contents.
   1989 	 */
   1990 	if ((prot & VM_PROT_READ) == 0) {
   1991 		DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
   1992 		    pg->phys_addr));
   1993 		if (pmap_attr_fetch(pg) & PTE_EXEC) {
   1994 			PMAPCOUNT(exec_uncached_page_protect);
   1995 			pmap_attr_clear(pg, PTE_EXEC);
   1996 		}
   1997 	}
   1998 
   1999 	pvo_head = vm_page_to_pvoh(pg);
   2000 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
   2001 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
   2002 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2003 
   2004 		/*
   2005 		 * Downgrading to no mapping at all, we just remove the entry.
   2006 		 */
   2007 		if ((prot & VM_PROT_READ) == 0) {
   2008 			pmap_pvo_remove(pvo, -1);
   2009 			continue;
   2010 		}
   2011 
   2012 		/*
   2013 		 * If EXEC permission is being revoked, just clear the
   2014 		 * flag in the PVO.
   2015 		 */
   2016 		if ((prot & VM_PROT_EXECUTE) == 0)
   2017 			pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
   2018 
   2019 		/*
   2020 		 * If this entry is already RO, don't diddle with the
   2021 		 * page table.
   2022 		 */
   2023 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
   2024 			PMAP_PVO_CHECK(pvo);
   2025 			continue;
   2026 		}
   2027 
   2028 		/*
   2029 		 * Grab the PTE before the we diddle the bits so
   2030 		 * pvo_to_pte can verify the pte contents are as
   2031 		 * expected.
   2032 		 */
   2033 		pt = pmap_pvo_to_pte(pvo, -1);
   2034 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2035 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2036 		if (pt != NULL) {
   2037 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2038 			PMAPCOUNT(ptes_changed);
   2039 		}
   2040 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2041 	}
   2042 
   2043 	pmap_interrupts_restore(msr);
   2044 	splx(s);
   2045 }
   2046 
   2047 /*
   2048  * Activate the address space for the specified process.  If the process
   2049  * is the current process, load the new MMU context.
   2050  */
   2051 void
   2052 pmap_activate(struct lwp *l)
   2053 {
   2054 	struct pcb *pcb = &l->l_addr->u_pcb;
   2055 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   2056 
   2057 	DPRINTFN(ACTIVATE,
   2058 	    ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
   2059 
   2060 	/*
   2061 	 * XXX Normally performed in cpu_fork().
   2062 	 */
   2063 	if (pcb->pcb_pm != pmap) {
   2064 		pcb->pcb_pm = pmap;
   2065 		pcb->pcb_pmreal = pmap;
   2066 	}
   2067 
   2068 	/*
   2069 	 * In theory, the SR registers need only be valid on return
   2070 	 * to user space wait to do them there.
   2071 	 */
   2072 	if (l == curlwp) {
   2073 		/* Store pointer to new current pmap. */
   2074 		curpm = pmap;
   2075 	}
   2076 }
   2077 
   2078 /*
   2079  * Deactivate the specified process's address space.
   2080  */
   2081 void
   2082 pmap_deactivate(struct lwp *l)
   2083 {
   2084 }
   2085 
   2086 boolean_t
   2087 pmap_query_bit(struct vm_page *pg, int ptebit)
   2088 {
   2089 	struct pvo_entry *pvo;
   2090 	volatile pte_t *pt;
   2091 	u_int32_t msr;
   2092 	int s;
   2093 
   2094 	if (pmap_attr_fetch(pg) & ptebit)
   2095 		return TRUE;
   2096 	s = splvm();
   2097 	msr = pmap_interrupts_off();
   2098 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2099 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2100 		/*
   2101 		 * See if we saved the bit off.  If so cache, it and return
   2102 		 * success.
   2103 		 */
   2104 		if (pvo->pvo_pte.pte_lo & ptebit) {
   2105 			pmap_attr_save(pg, ptebit);
   2106 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   2107 			pmap_interrupts_restore(msr);
   2108 			splx(s);
   2109 			return TRUE;
   2110 		}
   2111 	}
   2112 	/*
   2113 	 * No luck, now go thru the hard part of looking at the ptes
   2114 	 * themselves.  Sync so any pending REF/CHG bits are flushed
   2115 	 * to the PTEs.
   2116 	 */
   2117 	SYNC();
   2118 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2119 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2120 		/*
   2121 		 * See if this pvo have a valid PTE.  If so, fetch the
   2122 		 * REF/CHG bits from the valid PTE.  If the appropriate
   2123 		 * ptebit is set, cache, it and return success.
   2124 		 */
   2125 		pt = pmap_pvo_to_pte(pvo, -1);
   2126 		if (pt != NULL) {
   2127 			pmap_pte_synch(pt, &pvo->pvo_pte);
   2128 			if (pvo->pvo_pte.pte_lo & ptebit) {
   2129 				pmap_attr_save(pg, ptebit);
   2130 				PMAP_PVO_CHECK(pvo);		/* sanity check */
   2131 				pmap_interrupts_restore(msr);
   2132 				splx(s);
   2133 				return TRUE;
   2134 			}
   2135 		}
   2136 	}
   2137 	pmap_interrupts_restore(msr);
   2138 	splx(s);
   2139 	return FALSE;
   2140 }
   2141 
   2142 boolean_t
   2143 pmap_clear_bit(struct vm_page *pg, int ptebit)
   2144 {
   2145 	struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   2146 	struct pvo_entry *pvo;
   2147 	volatile pte_t *pt;
   2148 	u_int32_t msr;
   2149 	int rv = 0;
   2150 	int s;
   2151 
   2152 	s = splvm();
   2153 	msr = pmap_interrupts_off();
   2154 
   2155 	/*
   2156 	 * Fetch the cache value
   2157 	 */
   2158 	rv |= pmap_attr_fetch(pg);
   2159 
   2160 	/*
   2161 	 * Clear the cached value.
   2162 	 */
   2163 	pmap_attr_clear(pg, ptebit);
   2164 
   2165 	/*
   2166 	 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
   2167 	 * can reset the right ones).  Note that since the pvo entries and
   2168 	 * list heads are accessed via BAT0 and are never placed in the
   2169 	 * page table, we don't have to worry about further accesses setting
   2170 	 * the REF/CHG bits.
   2171 	 */
   2172 	SYNC();
   2173 
   2174 	/*
   2175 	 * For each pvo entry, clear pvo's ptebit.  If this pvo have a
   2176 	 * valid PTE.  If so, clear the ptebit from the valid PTE.
   2177 	 */
   2178 	LIST_FOREACH(pvo, pvoh, pvo_vlink) {
   2179 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2180 		pt = pmap_pvo_to_pte(pvo, -1);
   2181 		if (pt != NULL) {
   2182 			/*
   2183 			 * Only sync the PTE if the bit we are looking
   2184 			 * for is not already set.
   2185 			 */
   2186 			if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
   2187 				pmap_pte_synch(pt, &pvo->pvo_pte);
   2188 			/*
   2189 			 * If the bit we are looking for was already set,
   2190 			 * clear that bit in the pte.
   2191 			 */
   2192 			if (pvo->pvo_pte.pte_lo & ptebit)
   2193 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
   2194 		}
   2195 		rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
   2196 		pvo->pvo_pte.pte_lo &= ~ptebit;
   2197 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2198 	}
   2199 	pmap_interrupts_restore(msr);
   2200 	splx(s);
   2201 	/*
   2202 	 * If we are clearing the modify bit and this page was marked EXEC
   2203 	 * and the user of the page thinks the page was modified, then we
   2204 	 * need to clean it from the icache if it's mapped or clear the EXEC
   2205 	 * bit if it's not mapped.  The page itself might not have the CHG
   2206 	 * bit set if the modification was done via DMA to the page.
   2207 	 */
   2208 	if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
   2209 		if (LIST_EMPTY(pvoh)) {
   2210 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
   2211 			    pg->phys_addr));
   2212 			pmap_attr_clear(pg, PTE_EXEC);
   2213 			PMAPCOUNT(exec_uncached_clear_modify);
   2214 		} else {
   2215 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
   2216 			    pg->phys_addr));
   2217 			pmap_syncicache(pg->phys_addr, NBPG);
   2218 			PMAPCOUNT(exec_synced_clear_modify);
   2219 		}
   2220 	}
   2221 	return (rv & ptebit) != 0;
   2222 }
   2223 
   2224 void
   2225 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   2226 {
   2227 	struct pvo_entry *pvo;
   2228 	size_t offset = va & ADDR_POFF;
   2229 	int s;
   2230 
   2231 	s = splvm();
   2232 	while (len > 0) {
   2233 		size_t seglen = NBPG - offset;
   2234 		if (seglen > len)
   2235 			seglen = len;
   2236 		pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
   2237 		if (pvo != NULL && PVO_ISEXECUTABLE(pvo)) {
   2238 			pmap_syncicache(
   2239 			    (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
   2240 			PMAP_PVO_CHECK(pvo);
   2241 		}
   2242 		va += seglen;
   2243 		len -= seglen;
   2244 		offset = 0;
   2245 	}
   2246 	splx(s);
   2247 }
   2248 
   2249 #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
   2250 void
   2251 pmap_pte_print(volatile pte_t *pt)
   2252 {
   2253 	printf("PTE %p: ", pt);
   2254 	/* High word: */
   2255 	printf("0x%08x: [", pt->pte_hi);
   2256 	printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
   2257 	printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
   2258 	printf("0x%06x 0x%02X",
   2259 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2260 	    pt->pte_hi & PTE_API);
   2261 	printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
   2262 	/* Low word: */
   2263 	printf(" 0x%08x: [", pt->pte_lo);
   2264 	printf("0x%05x... ", pt->pte_lo >> 12);
   2265 	printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
   2266 	printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
   2267 	printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
   2268 	printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
   2269 	printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
   2270 	printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
   2271 	switch (pt->pte_lo & PTE_PP) {
   2272 	case PTE_BR: printf("br]\n"); break;
   2273 	case PTE_BW: printf("bw]\n"); break;
   2274 	case PTE_SO: printf("so]\n"); break;
   2275 	case PTE_SW: printf("sw]\n"); break;
   2276 	}
   2277 }
   2278 #endif
   2279 
   2280 #if defined(DDB)
   2281 void
   2282 pmap_pteg_check(void)
   2283 {
   2284 	volatile pte_t *pt;
   2285 	int i;
   2286 	int ptegidx;
   2287 	u_int p_valid = 0;
   2288 	u_int s_valid = 0;
   2289 	u_int invalid = 0;
   2290 
   2291 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2292 		for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
   2293 			if (pt->pte_hi & PTE_VALID) {
   2294 				if (pt->pte_hi & PTE_HID)
   2295 					s_valid++;
   2296 				else
   2297 					p_valid++;
   2298 			} else
   2299 				invalid++;
   2300 		}
   2301 	}
   2302 	printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
   2303 		p_valid, p_valid, s_valid, s_valid,
   2304 		invalid, invalid);
   2305 }
   2306 
   2307 void
   2308 pmap_print_mmuregs(void)
   2309 {
   2310 	int i;
   2311 	u_int cpuvers;
   2312 	vaddr_t addr;
   2313 	sr_t soft_sr[16];
   2314 	struct bat soft_ibat[4];
   2315 	struct bat soft_dbat[4];
   2316 	u_int32_t sdr1;
   2317 
   2318 	cpuvers = MFPVR() >> 16;
   2319 
   2320 	__asm __volatile ("mfsdr1 %0" : "=r"(sdr1));
   2321 	for (i=0; i<16; i++) {
   2322 		soft_sr[i] = MFSRIN(addr);
   2323 		addr += (1 << ADDR_SR_SHFT);
   2324 	}
   2325 
   2326 	/* read iBAT (601: uBAT) registers */
   2327 	__asm __volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
   2328 	__asm __volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
   2329 	__asm __volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
   2330 	__asm __volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
   2331 	__asm __volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
   2332 	__asm __volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
   2333 	__asm __volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
   2334 	__asm __volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
   2335 
   2336 
   2337 	if (cpuvers != MPC601) {
   2338 		/* read dBAT registers */
   2339 		__asm __volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
   2340 		__asm __volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
   2341 		__asm __volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
   2342 		__asm __volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
   2343 		__asm __volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
   2344 		__asm __volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
   2345 		__asm __volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
   2346 		__asm __volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
   2347 	}
   2348 
   2349 	printf("SDR1:\t0x%x\n", sdr1);
   2350 	printf("SR[]:\t");
   2351 	addr = 0;
   2352 	for (i=0; i<4; i++)
   2353 		printf("0x%08x,   ", soft_sr[i]);
   2354 	printf("\n\t");
   2355 	for ( ; i<8; i++)
   2356 		printf("0x%08x,   ", soft_sr[i]);
   2357 	printf("\n\t");
   2358 	for ( ; i<12; i++)
   2359 		printf("0x%08x,   ", soft_sr[i]);
   2360 	printf("\n\t");
   2361 	for ( ; i<16; i++)
   2362 		printf("0x%08x,   ", soft_sr[i]);
   2363 	printf("\n");
   2364 
   2365 	printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
   2366 	for (i=0; i<4; i++) {
   2367 		printf("0x%08x 0x%08x, ",
   2368 			soft_ibat[i].batu, soft_ibat[i].batl);
   2369 		if (i == 1)
   2370 			printf("\n\t");
   2371 	}
   2372 	if (cpuvers != MPC601) {
   2373 		printf("\ndBAT[]:\t");
   2374 		for (i=0; i<4; i++) {
   2375 			printf("0x%08x 0x%08x, ",
   2376 				soft_dbat[i].batu, soft_dbat[i].batl);
   2377 			if (i == 1)
   2378 				printf("\n\t");
   2379 		}
   2380 	}
   2381 	printf("\n");
   2382 }
   2383 
   2384 void
   2385 pmap_print_pte(pmap_t pm, vaddr_t va)
   2386 {
   2387 	struct pvo_entry *pvo;
   2388 	volatile pte_t *pt;
   2389 	int pteidx;
   2390 
   2391 	pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2392 	if (pvo != NULL) {
   2393 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2394 		if (pt != NULL) {
   2395 			printf("VA %#lx -> %p -> %s %#x, %#x\n",
   2396 				va, pt,
   2397 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
   2398 				pt->pte_hi, pt->pte_lo);
   2399 		} else {
   2400 			printf("No valid PTE found\n");
   2401 		}
   2402 	} else {
   2403 		printf("Address not in pmap\n");
   2404 	}
   2405 }
   2406 
   2407 void
   2408 pmap_pteg_dist(void)
   2409 {
   2410 	struct pvo_entry *pvo;
   2411 	int ptegidx;
   2412 	int depth;
   2413 	int max_depth = 0;
   2414 	unsigned int depths[64];
   2415 
   2416 	memset(depths, 0, sizeof(depths));
   2417 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2418 		depth = 0;
   2419 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2420 			depth++;
   2421 		}
   2422 		if (depth > max_depth)
   2423 			max_depth = depth;
   2424 		if (depth > 63)
   2425 			depth = 63;
   2426 		depths[depth]++;
   2427 	}
   2428 
   2429 	for (depth = 0; depth < 64; depth++) {
   2430 		printf("  [%2d]: %8u", depth, depths[depth]);
   2431 		if ((depth & 3) == 3)
   2432 			printf("\n");
   2433 		if (depth == max_depth)
   2434 			break;
   2435 	}
   2436 	if ((depth & 3) != 3)
   2437 		printf("\n");
   2438 	printf("Max depth found was %d\n", max_depth);
   2439 }
   2440 #endif /* DEBUG */
   2441 
   2442 #if defined(PMAPCHECK) || defined(DEBUG)
   2443 void
   2444 pmap_pvo_verify(void)
   2445 {
   2446 	int ptegidx;
   2447 	int s;
   2448 
   2449 	s = splvm();
   2450 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2451 		struct pvo_entry *pvo;
   2452 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2453 			if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   2454 				panic("pmap_pvo_verify: invalid pvo %p "
   2455 				    "on list %#x", pvo, ptegidx);
   2456 			pmap_pvo_check(pvo);
   2457 		}
   2458 	}
   2459 	splx(s);
   2460 }
   2461 #endif /* PMAPCHECK */
   2462 
   2463 
   2464 void *
   2465 pmap_pool_ualloc(struct pool *pp, int flags)
   2466 {
   2467 	struct pvo_page *pvop;
   2468 
   2469 	pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
   2470 	if (pvop != NULL) {
   2471 		pmap_upvop_free--;
   2472 		SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
   2473 		return pvop;
   2474 	}
   2475 	if (uvm.page_init_done != TRUE) {
   2476 		return (void *) uvm_pageboot_alloc(PAGE_SIZE);
   2477 	}
   2478 	return pmap_pool_malloc(pp, flags);
   2479 }
   2480 
   2481 void *
   2482 pmap_pool_malloc(struct pool *pp, int flags)
   2483 {
   2484 	struct pvo_page *pvop;
   2485 	struct vm_page *pg;
   2486 
   2487 	pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
   2488 	if (pvop != NULL) {
   2489 		pmap_mpvop_free--;
   2490 		SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
   2491 		return pvop;
   2492 	}
   2493  again:
   2494 	pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
   2495 	    UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
   2496 	if (__predict_false(pg == NULL)) {
   2497 		if (flags & PR_WAITOK) {
   2498 			uvm_wait("plpg");
   2499 			goto again;
   2500 		} else {
   2501 			return (0);
   2502 		}
   2503 	}
   2504 	return (void *) VM_PAGE_TO_PHYS(pg);
   2505 }
   2506 
   2507 void
   2508 pmap_pool_ufree(struct pool *pp, void *va)
   2509 {
   2510 	struct pvo_page *pvop;
   2511 #if 0
   2512 	if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
   2513 		pmap_pool_mfree(va, size, tag);
   2514 		return;
   2515 	}
   2516 #endif
   2517 	pvop = va;
   2518 	SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
   2519 	pmap_upvop_free++;
   2520 	if (pmap_upvop_free > pmap_upvop_maxfree)
   2521 		pmap_upvop_maxfree = pmap_upvop_free;
   2522 }
   2523 
   2524 void
   2525 pmap_pool_mfree(struct pool *pp, void *va)
   2526 {
   2527 	struct pvo_page *pvop;
   2528 
   2529 	pvop = va;
   2530 	SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
   2531 	pmap_mpvop_free++;
   2532 	if (pmap_mpvop_free > pmap_mpvop_maxfree)
   2533 		pmap_mpvop_maxfree = pmap_mpvop_free;
   2534 #if 0
   2535 	uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
   2536 #endif
   2537 }
   2538 
   2539 /*
   2540  * This routine in bootstraping to steal to-be-managed memory (which will
   2541  * then be unmanaged).  We use it to grab from the first 256MB for our
   2542  * pmap needs and above 256MB for other stuff.
   2543  */
   2544 vaddr_t
   2545 pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
   2546 {
   2547 	vsize_t size;
   2548 	vaddr_t va;
   2549 	paddr_t pa = 0;
   2550 	int npgs, bank;
   2551 	struct vm_physseg *ps;
   2552 
   2553 	if (uvm.page_init_done == TRUE)
   2554 		panic("pmap_steal_memory: called _after_ bootstrap");
   2555 
   2556 	*vstartp = VM_MIN_KERNEL_ADDRESS;
   2557 	*vendp = VM_MAX_KERNEL_ADDRESS;
   2558 
   2559 	size = round_page(vsize);
   2560 	npgs = atop(size);
   2561 
   2562 	/*
   2563 	 * PA 0 will never be among those given to UVM so we can use it
   2564 	 * to indicate we couldn't steal any memory.
   2565 	 */
   2566 	for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
   2567 		if (ps->free_list == VM_FREELIST_FIRST256 &&
   2568 		    ps->avail_end - ps->avail_start >= npgs) {
   2569 			pa = ptoa(ps->avail_start);
   2570 			break;
   2571 		}
   2572 	}
   2573 
   2574 	if (pa == 0)
   2575 		panic("pmap_steal_memory: no approriate memory to steal!");
   2576 
   2577 	ps->avail_start += npgs;
   2578 	ps->start += npgs;
   2579 
   2580 	/*
   2581 	 * If we've used up all the pages in the segment, remove it and
   2582 	 * compact the list.
   2583 	 */
   2584 	if (ps->avail_start == ps->end) {
   2585 		/*
   2586 		 * If this was the last one, then a very bad thing has occurred
   2587 		 */
   2588 		if (--vm_nphysseg == 0)
   2589 			panic("pmap_steal_memory: out of memory!");
   2590 
   2591 		printf("pmap_steal_memory: consumed bank %d\n", bank);
   2592 		for (; bank < vm_nphysseg; bank++, ps++) {
   2593 			ps[0] = ps[1];
   2594 		}
   2595 	}
   2596 
   2597 	va = (vaddr_t) pa;
   2598 	memset((caddr_t) va, 0, size);
   2599 	pmap_pages_stolen += npgs;
   2600 #ifdef DEBUG
   2601 	if (pmapdebug && npgs > 1) {
   2602 		u_int cnt = 0;
   2603 		for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
   2604 			cnt += ps->avail_end - ps->avail_start;
   2605 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
   2606 		    npgs, pmap_pages_stolen, cnt);
   2607 	}
   2608 #endif
   2609 
   2610 	return va;
   2611 }
   2612 
   2613 /*
   2614  * Find a chuck of memory with right size and alignment.
   2615  */
   2616 void *
   2617 pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
   2618 {
   2619 	struct mem_region *mp;
   2620 	paddr_t s, e;
   2621 	int i, j;
   2622 
   2623 	size = round_page(size);
   2624 
   2625 	DPRINTFN(BOOT,
   2626 	    ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
   2627 	    size, alignment, at_end));
   2628 
   2629 	if (alignment < NBPG || (alignment & (alignment-1)) != 0)
   2630 		panic("pmap_boot_find_memory: invalid alignment %lx",
   2631 		    alignment);
   2632 
   2633 	if (at_end) {
   2634 		if (alignment != NBPG)
   2635 			panic("pmap_boot_find_memory: invalid ending "
   2636 			    "alignment %lx", alignment);
   2637 
   2638 		for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
   2639 			s = mp->start + mp->size - size;
   2640 			if (s >= mp->start && mp->size >= size) {
   2641 				DPRINTFN(BOOT,(": %lx\n", s));
   2642 				DPRINTFN(BOOT,
   2643 				    ("pmap_boot_find_memory: b-avail[%d] start "
   2644 				     "0x%lx size 0x%lx\n", mp - avail,
   2645 				     mp->start, mp->size));
   2646 				mp->size -= size;
   2647 				DPRINTFN(BOOT,
   2648 				    ("pmap_boot_find_memory: a-avail[%d] start "
   2649 				     "0x%lx size 0x%lx\n", mp - avail,
   2650 				     mp->start, mp->size));
   2651 				return (void *) s;
   2652 			}
   2653 		}
   2654 		panic("pmap_boot_find_memory: no available memory");
   2655 	}
   2656 
   2657 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   2658 		s = (mp->start + alignment - 1) & ~(alignment-1);
   2659 		e = s + size;
   2660 
   2661 		/*
   2662 		 * Is the calculated region entirely within the region?
   2663 		 */
   2664 		if (s < mp->start || e > mp->start + mp->size)
   2665 			continue;
   2666 
   2667 		DPRINTFN(BOOT,(": %lx\n", s));
   2668 		if (s == mp->start) {
   2669 			/*
   2670 			 * If the block starts at the beginning of region,
   2671 			 * adjust the size & start. (the region may now be
   2672 			 * zero in length)
   2673 			 */
   2674 			DPRINTFN(BOOT,
   2675 			    ("pmap_boot_find_memory: b-avail[%d] start "
   2676 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   2677 			mp->start += size;
   2678 			mp->size -= size;
   2679 			DPRINTFN(BOOT,
   2680 			    ("pmap_boot_find_memory: a-avail[%d] start "
   2681 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   2682 		} else if (e == mp->start + mp->size) {
   2683 			/*
   2684 			 * If the block starts at the beginning of region,
   2685 			 * adjust only the size.
   2686 			 */
   2687 			DPRINTFN(BOOT,
   2688 			    ("pmap_boot_find_memory: b-avail[%d] start "
   2689 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   2690 			mp->size -= size;
   2691 			DPRINTFN(BOOT,
   2692 			    ("pmap_boot_find_memory: a-avail[%d] start "
   2693 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   2694 		} else {
   2695 			/*
   2696 			 * Block is in the middle of the region, so we
   2697 			 * have to split it in two.
   2698 			 */
   2699 			for (j = avail_cnt; j > i + 1; j--) {
   2700 				avail[j] = avail[j-1];
   2701 			}
   2702 			DPRINTFN(BOOT,
   2703 			    ("pmap_boot_find_memory: b-avail[%d] start "
   2704 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   2705 			mp[1].start = e;
   2706 			mp[1].size = mp[0].start + mp[0].size - e;
   2707 			mp[0].size = s - mp[0].start;
   2708 			avail_cnt++;
   2709 			for (; i < avail_cnt; i++) {
   2710 				DPRINTFN(BOOT,
   2711 				    ("pmap_boot_find_memory: a-avail[%d] "
   2712 				     "start 0x%lx size 0x%lx\n", i,
   2713 				     avail[i].start, avail[i].size));
   2714 			}
   2715 		}
   2716 		return (void *) s;
   2717 	}
   2718 	panic("pmap_boot_find_memory: not enough memory for "
   2719 	    "%lx/%lx allocation?", size, alignment);
   2720 }
   2721 
   2722 /*
   2723  * This is not part of the defined PMAP interface and is specific to the
   2724  * PowerPC architecture.  This is called during initppc, before the system
   2725  * is really initialized.
   2726  */
   2727 void
   2728 pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
   2729 {
   2730 	struct mem_region *mp, tmp;
   2731 	paddr_t s, e;
   2732 	psize_t size;
   2733 	int i, j;
   2734 
   2735 	/*
   2736 	 * Get memory.
   2737 	 */
   2738 	mem_regions(&mem, &avail);
   2739 #if defined(DEBUG)
   2740 	if (pmapdebug & PMAPDEBUG_BOOT) {
   2741 		printf("pmap_bootstrap: memory configuration:\n");
   2742 		for (mp = mem; mp->size; mp++) {
   2743 			printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
   2744 				mp->start, mp->size);
   2745 		}
   2746 		for (mp = avail; mp->size; mp++) {
   2747 			printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
   2748 				mp->start, mp->size);
   2749 		}
   2750 	}
   2751 #endif
   2752 
   2753 	/*
   2754 	 * Find out how much physical memory we have and in how many chunks.
   2755 	 */
   2756 	for (mem_cnt = 0, mp = mem; mp->size; mp++) {
   2757 		if (mp->start >= pmap_memlimit)
   2758 			continue;
   2759 		if (mp->start + mp->size > pmap_memlimit) {
   2760 			size = pmap_memlimit - mp->start;
   2761 			physmem += btoc(size);
   2762 		} else {
   2763 			physmem += btoc(mp->size);
   2764 		}
   2765 		mem_cnt++;
   2766 	}
   2767 
   2768 	/*
   2769 	 * Count the number of available entries.
   2770 	 */
   2771 	for (avail_cnt = 0, mp = avail; mp->size; mp++)
   2772 		avail_cnt++;
   2773 
   2774 	/*
   2775 	 * Page align all regions.
   2776 	 */
   2777 	kernelstart = trunc_page(kernelstart);
   2778 	kernelend = round_page(kernelend);
   2779 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   2780 		s = round_page(mp->start);
   2781 		mp->size -= (s - mp->start);
   2782 		mp->size = trunc_page(mp->size);
   2783 		mp->start = s;
   2784 		e = mp->start + mp->size;
   2785 
   2786 		DPRINTFN(BOOT,
   2787 		    ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
   2788 		    i, mp->start, mp->size));
   2789 
   2790 		/*
   2791 		 * Don't allow the end to run beyond our artificial limit
   2792 		 */
   2793 		if (e > pmap_memlimit)
   2794 			e = pmap_memlimit;
   2795 
   2796 		/*
   2797 		 * Is this region empty or strange?  skip it.
   2798 		 */
   2799 		if (e <= s) {
   2800 			mp->start = 0;
   2801 			mp->size = 0;
   2802 			continue;
   2803 		}
   2804 
   2805 		/*
   2806 		 * Does this overlap the beginning of kernel?
   2807 		 *   Does extend past the end of the kernel?
   2808 		 */
   2809 		else if (s < kernelstart && e > kernelstart) {
   2810 			if (e > kernelend) {
   2811 				avail[avail_cnt].start = kernelend;
   2812 				avail[avail_cnt].size = e - kernelend;
   2813 				avail_cnt++;
   2814 			}
   2815 			mp->size = kernelstart - s;
   2816 		}
   2817 		/*
   2818 		 * Check whether this region overlaps the end of the kernel.
   2819 		 */
   2820 		else if (s < kernelend && e > kernelend) {
   2821 			mp->start = kernelend;
   2822 			mp->size = e - kernelend;
   2823 		}
   2824 		/*
   2825 		 * Look whether this regions is completely inside the kernel.
   2826 		 * Nuke it if it does.
   2827 		 */
   2828 		else if (s >= kernelstart && e <= kernelend) {
   2829 			mp->start = 0;
   2830 			mp->size = 0;
   2831 		}
   2832 		/*
   2833 		 * If the user imposed a memory limit, enforce it.
   2834 		 */
   2835 		else if (s >= pmap_memlimit) {
   2836 			mp->start = -NBPG;	/* let's know why */
   2837 			mp->size = 0;
   2838 		}
   2839 		else {
   2840 			mp->start = s;
   2841 			mp->size = e - s;
   2842 		}
   2843 		DPRINTFN(BOOT,
   2844 		    ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
   2845 		    i, mp->start, mp->size));
   2846 	}
   2847 
   2848 	/*
   2849 	 * Move (and uncount) all the null return to the end.
   2850 	 */
   2851 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   2852 		if (mp->size == 0) {
   2853 			tmp = avail[i];
   2854 			avail[i] = avail[--avail_cnt];
   2855 			avail[avail_cnt] = avail[i];
   2856 		}
   2857 	}
   2858 
   2859 	/*
   2860 	 * (Bubble)sort them into asecnding order.
   2861 	 */
   2862 	for (i = 0; i < avail_cnt; i++) {
   2863 		for (j = i + 1; j < avail_cnt; j++) {
   2864 			if (avail[i].start > avail[j].start) {
   2865 				tmp = avail[i];
   2866 				avail[i] = avail[j];
   2867 				avail[j] = tmp;
   2868 			}
   2869 		}
   2870 	}
   2871 
   2872 	/*
   2873 	 * Make sure they don't overlap.
   2874 	 */
   2875 	for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
   2876 		if (mp[0].start + mp[0].size > mp[1].start) {
   2877 			mp[0].size = mp[1].start - mp[0].start;
   2878 		}
   2879 		DPRINTFN(BOOT,
   2880 		    ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
   2881 		    i, mp->start, mp->size));
   2882 	}
   2883 	DPRINTFN(BOOT,
   2884 	    ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
   2885 	    i, mp->start, mp->size));
   2886 
   2887 #ifdef	PTEGCOUNT
   2888 	pmap_pteg_cnt = PTEGCOUNT;
   2889 #else /* PTEGCOUNT */
   2890 	pmap_pteg_cnt = 0x1000;
   2891 
   2892 	while (pmap_pteg_cnt < physmem)
   2893 		pmap_pteg_cnt <<= 1;
   2894 
   2895 	pmap_pteg_cnt >>= 1;
   2896 #endif /* PTEGCOUNT */
   2897 
   2898 	/*
   2899 	 * Find suitably aligned memory for PTEG hash table.
   2900 	 */
   2901 	size = pmap_pteg_cnt * sizeof(pteg_t);
   2902 	pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
   2903 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   2904 	if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
   2905 		panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
   2906 		    pmap_pteg_table, size);
   2907 #endif
   2908 
   2909 	memset((void *)pmap_pteg_table, 0, pmap_pteg_cnt * sizeof(pteg_t));
   2910 	pmap_pteg_mask = pmap_pteg_cnt - 1;
   2911 
   2912 	/*
   2913 	 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
   2914 	 * with pages.  So we just steal them before giving them to UVM.
   2915 	 */
   2916 	size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
   2917 	pmap_pvo_table = pmap_boot_find_memory(size, NBPG, 0);
   2918 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   2919 	if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
   2920 		panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
   2921 		    pmap_pvo_table, size);
   2922 #endif
   2923 
   2924 	for (i = 0; i < pmap_pteg_cnt; i++)
   2925 		TAILQ_INIT(&pmap_pvo_table[i]);
   2926 
   2927 #ifndef MSGBUFADDR
   2928 	/*
   2929 	 * Allocate msgbuf in high memory.
   2930 	 */
   2931 	msgbuf_paddr = (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, NBPG, 1);
   2932 #endif
   2933 
   2934 #ifdef __HAVE_PMAP_PHYSSEG
   2935 	{
   2936 		u_int npgs = 0;
   2937 		for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
   2938 			npgs += btoc(mp->size);
   2939 		size = (sizeof(struct pvo_head) + 1) * npgs;
   2940 		pmap_physseg.pvoh = pmap_boot_find_memory(size, NBPG, 0);
   2941 		pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
   2942 #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   2943 		if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
   2944 			panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
   2945 			    pmap_physseg.pvoh, size);
   2946 #endif
   2947 	}
   2948 #endif
   2949 
   2950 	for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
   2951 		paddr_t pfstart = atop(mp->start);
   2952 		paddr_t pfend = atop(mp->start + mp->size);
   2953 		if (mp->size == 0)
   2954 			continue;
   2955 		if (mp->start + mp->size <= SEGMENT_LENGTH) {
   2956 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   2957 				VM_FREELIST_FIRST256);
   2958 		} else if (mp->start >= SEGMENT_LENGTH) {
   2959 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   2960 				VM_FREELIST_DEFAULT);
   2961 		} else {
   2962 			pfend = atop(SEGMENT_LENGTH);
   2963 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   2964 				VM_FREELIST_FIRST256);
   2965 			pfstart = atop(SEGMENT_LENGTH);
   2966 			pfend = atop(mp->start + mp->size);
   2967 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   2968 				VM_FREELIST_DEFAULT);
   2969 		}
   2970 	}
   2971 
   2972 	/*
   2973 	 * Make sure kernel vsid is allocated as well as VSID 0.
   2974 	 */
   2975 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   2976 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
   2977 	pmap_vsid_bitmap[0] |= 1;
   2978 
   2979 	/*
   2980 	 * Initialize kernel pmap and hardware.
   2981 	 */
   2982 	for (i = 0; i < 16; i++) {
   2983 		pmap_kernel()->pm_sr[i] = EMPTY_SEGMENT;
   2984 		__asm __volatile ("mtsrin %0,%1"
   2985 			      :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
   2986 	}
   2987 
   2988 	pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
   2989 	__asm __volatile ("mtsr %0,%1"
   2990 		      :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
   2991 #ifdef KERNEL2_SR
   2992 	pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
   2993 	__asm __volatile ("mtsr %0,%1"
   2994 		      :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
   2995 #endif
   2996 	for (i = 0; i < 16; i++) {
   2997 		if (iosrtable[i] & SR601_T) {
   2998 			pmap_kernel()->pm_sr[i] = iosrtable[i];
   2999 			__asm __volatile ("mtsrin %0,%1"
   3000 			    :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
   3001 		}
   3002 	}
   3003 
   3004 	__asm __volatile ("sync; mtsdr1 %0; isync"
   3005 		      :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10)));
   3006 	tlbia();
   3007 
   3008 #ifdef ALTIVEC
   3009 	pmap_use_altivec = cpu_altivec;
   3010 #endif
   3011 
   3012 #ifdef DEBUG
   3013 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3014 		u_int cnt;
   3015 		int bank;
   3016 		char pbuf[9];
   3017 		for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
   3018 			cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
   3019 			printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
   3020 			    bank,
   3021 			    ptoa(vm_physmem[bank].avail_start),
   3022 			    ptoa(vm_physmem[bank].avail_end),
   3023 			    ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
   3024 		}
   3025 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
   3026 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
   3027 		    pbuf, cnt);
   3028 #ifdef DDB
   3029 		Debugger();
   3030 #endif
   3031 	}
   3032 #endif
   3033 
   3034 	pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
   3035 	    sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
   3036 	    &pmap_pool_uallocator);
   3037 
   3038 	pool_setlowat(&pmap_upvo_pool, 252);
   3039 
   3040 	pool_init(&pmap_pool, sizeof(struct pmap),
   3041 	    sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator);
   3042 }
   3043