Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.39.4.1
      1 /*	$NetBSD: pmap.c,v 1.39.4.1 2006/09/09 02:42:22 rpaulo Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  * All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by TooLs GmbH.
     54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  *    derived from this software without specific prior written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.39.4.1 2006/09/09 02:42:22 rpaulo Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/malloc.h>
     74 #include <sys/proc.h>
     75 #include <sys/user.h>
     76 #include <sys/queue.h>
     77 #include <sys/systm.h>
     78 #include <sys/pool.h>
     79 #include <sys/device.h>
     80 
     81 #include <uvm/uvm.h>
     82 
     83 #include <machine/cpu.h>
     84 #include <machine/pcb.h>
     85 #include <machine/powerpc.h>
     86 
     87 #include <powerpc/spr.h>
     88 #include <machine/tlb.h>
     89 
     90 /*
     91  * kernmap is an array of PTEs large enough to map in
     92  * 4GB.  At 16KB/page it is 256K entries or 2MB.
     93  */
     94 #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
     95 caddr_t kernmap;
     96 
     97 #define MINCTX		2
     98 #define NUMCTX		256
     99 
    100 volatile struct pmap *ctxbusy[NUMCTX];
    101 
    102 #define TLBF_USED	0x1
    103 #define	TLBF_REF	0x2
    104 #define	TLBF_LOCKED	0x4
    105 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    106 
    107 typedef struct tlb_info_s {
    108 	char	ti_flags;
    109 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    110 	u_int	ti_va;
    111 } tlb_info_t;
    112 
    113 volatile tlb_info_t tlb_info[NTLB];
    114 /* We'll use a modified FIFO replacement policy cause it's cheap */
    115 volatile int tlbnext;
    116 
    117 static int tlb_nreserved = 0;
    118 static int pmap_bootstrap_done = 0;
    119 
    120 /* Event counters */
    121 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    122 	NULL, "cpu", "tlbmiss");
    123 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    124 	NULL, "cpu", "tlbhit");
    125 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    126 	NULL, "cpu", "tlbflush");
    127 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    128 	NULL, "cpu", "tlbenter");
    129 
    130 struct pmap kernel_pmap_;
    131 
    132 int physmem;
    133 static int npgs;
    134 static u_int nextavail;
    135 #ifndef MSGBUFADDR
    136 extern paddr_t msgbuf_paddr;
    137 #endif
    138 
    139 static struct mem_region *mem, *avail;
    140 
    141 /*
    142  * This is a cache of referenced/modified bits.
    143  * Bits herein are shifted by ATTRSHFT.
    144  */
    145 static char *pmap_attrib;
    146 
    147 #define PV_WIRED	0x1
    148 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    149 #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
    150 #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
    151 #define PV_CMPVA(va,pv)	(!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
    152 
    153 struct pv_entry {
    154 	struct pv_entry *pv_next;	/* Linked list of mappings */
    155 	vaddr_t pv_va;			/* virtual address of mapping */
    156 	struct pmap *pv_pm;
    157 };
    158 
    159 /* Each index corresponds to TLB_SIZE_* value. */
    160 static size_t tlbsize[] = {
    161 	1024, 		/* TLB_SIZE_1K */
    162 	4096, 		/* TLB_SIZE_4K */
    163 	16384, 		/* TLB_SIZE_16K */
    164 	65536, 		/* TLB_SIZE_64K */
    165 	262144, 	/* TLB_SIZE_256K */
    166 	1048576, 	/* TLB_SIZE_1M */
    167 	4194304, 	/* TLB_SIZE_4M */
    168 	16777216, 	/* TLB_SIZE_16M */
    169 };
    170 
    171 struct pv_entry *pv_table;
    172 static struct pool pv_pool;
    173 
    174 static int pmap_initialized;
    175 
    176 static int ctx_flush(int);
    177 
    178 inline struct pv_entry *pa_to_pv(paddr_t);
    179 static inline char *pa_to_attr(paddr_t);
    180 
    181 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    182 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    183 
    184 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, boolean_t);
    185 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    186 
    187 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
    188 
    189 
    190 inline struct pv_entry *
    191 pa_to_pv(paddr_t pa)
    192 {
    193 	int bank, pg;
    194 
    195 	bank = vm_physseg_find(atop(pa), &pg);
    196 	if (bank == -1)
    197 		return NULL;
    198 	return &vm_physmem[bank].pmseg.pvent[pg];
    199 }
    200 
    201 static inline char *
    202 pa_to_attr(paddr_t pa)
    203 {
    204 	int bank, pg;
    205 
    206 	bank = vm_physseg_find(atop(pa), &pg);
    207 	if (bank == -1)
    208 		return NULL;
    209 	return &vm_physmem[bank].pmseg.attrs[pg];
    210 }
    211 
    212 /*
    213  * Insert PTE into page table.
    214  */
    215 int
    216 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    217 {
    218 	int seg = STIDX(va);
    219 	int ptn = PTIDX(va);
    220 	u_int oldpte;
    221 
    222 	if (!pm->pm_ptbl[seg]) {
    223 		/* Don't allocate a page to clear a non-existent mapping. */
    224 		if (!pte)
    225 			return (0);
    226 		/* Allocate a page XXXX this will sleep! */
    227 		pm->pm_ptbl[seg] =
    228 		    (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    229 		    UVM_KMF_WIRED | UVM_KMF_ZERO);
    230 	}
    231 	oldpte = pm->pm_ptbl[seg][ptn];
    232 	pm->pm_ptbl[seg][ptn] = pte;
    233 
    234 	/* Flush entry. */
    235 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    236 	if (oldpte != pte) {
    237 		if (pte == 0)
    238 			pm->pm_stats.resident_count--;
    239 		else
    240 			pm->pm_stats.resident_count++;
    241 	}
    242 	return (1);
    243 }
    244 
    245 /*
    246  * Get a pointer to a PTE in a page table.
    247  */
    248 volatile u_int *
    249 pte_find(struct pmap *pm, vaddr_t va)
    250 {
    251 	int seg = STIDX(va);
    252 	int ptn = PTIDX(va);
    253 
    254 	if (pm->pm_ptbl[seg])
    255 		return (&pm->pm_ptbl[seg][ptn]);
    256 
    257 	return (NULL);
    258 }
    259 
    260 /*
    261  * This is called during initppc, before the system is really initialized.
    262  */
    263 void
    264 pmap_bootstrap(u_int kernelstart, u_int kernelend)
    265 {
    266 	struct mem_region *mp, *mp1;
    267 	int cnt, i;
    268 	u_int s, e, sz;
    269 
    270 	/* XXXfreza: compat, we used to statically reserve 4 entries. */
    271 	if (tlb_nreserved == 0)
    272 		tlb_nreserved = TLB_NRESERVED;
    273 
    274 	tlbnext = tlb_nreserved;
    275 
    276 	/*
    277 	 * Allocate the kernel page table at the end of
    278 	 * kernel space so it's in the locked TTE.
    279 	 */
    280 	kernmap = (caddr_t)kernelend;
    281 
    282 	/*
    283 	 * Initialize kernel page table.
    284 	 */
    285 	for (i = 0; i < STSZ; i++) {
    286 		pmap_kernel()->pm_ptbl[i] = 0;
    287 	}
    288 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    289 
    290 	/*
    291 	 * Announce page-size to the VM-system
    292 	 */
    293 	uvmexp.pagesize = NBPG;
    294 	uvm_setpagesize();
    295 
    296 	/*
    297 	 * Get memory.
    298 	 */
    299 	mem_regions(&mem, &avail);
    300 	for (mp = mem; mp->size; mp++) {
    301 		physmem += btoc(mp->size);
    302 		printf("+%lx,",mp->size);
    303 	}
    304 	printf("\n");
    305 	ppc4xx_tlb_init();
    306 	/*
    307 	 * Count the number of available entries.
    308 	 */
    309 	for (cnt = 0, mp = avail; mp->size; mp++)
    310 		cnt++;
    311 
    312 	/*
    313 	 * Page align all regions.
    314 	 * Non-page aligned memory isn't very interesting to us.
    315 	 * Also, sort the entries for ascending addresses.
    316 	 */
    317 	kernelstart &= ~PGOFSET;
    318 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    319 	for (mp = avail; mp->size; mp++) {
    320 		s = mp->start;
    321 		e = mp->start + mp->size;
    322 		printf("%08x-%08x -> ",s,e);
    323 		/*
    324 		 * Check whether this region holds all of the kernel.
    325 		 */
    326 		if (s < kernelstart && e > kernelend) {
    327 			avail[cnt].start = kernelend;
    328 			avail[cnt++].size = e - kernelend;
    329 			e = kernelstart;
    330 		}
    331 		/*
    332 		 * Look whether this regions starts within the kernel.
    333 		 */
    334 		if (s >= kernelstart && s < kernelend) {
    335 			if (e <= kernelend)
    336 				goto empty;
    337 			s = kernelend;
    338 		}
    339 		/*
    340 		 * Now look whether this region ends within the kernel.
    341 		 */
    342 		if (e > kernelstart && e <= kernelend) {
    343 			if (s >= kernelstart)
    344 				goto empty;
    345 			e = kernelstart;
    346 		}
    347 		/*
    348 		 * Now page align the start and size of the region.
    349 		 */
    350 		s = round_page(s);
    351 		e = trunc_page(e);
    352 		if (e < s)
    353 			e = s;
    354 		sz = e - s;
    355 		printf("%08x-%08x = %x\n",s,e,sz);
    356 		/*
    357 		 * Check whether some memory is left here.
    358 		 */
    359 		if (sz == 0) {
    360 		empty:
    361 			memmove(mp, mp + 1,
    362 				(cnt - (mp - avail)) * sizeof *mp);
    363 			cnt--;
    364 			mp--;
    365 			continue;
    366 		}
    367 		/*
    368 		 * Do an insertion sort.
    369 		 */
    370 		npgs += btoc(sz);
    371 		for (mp1 = avail; mp1 < mp; mp1++)
    372 			if (s < mp1->start)
    373 				break;
    374 		if (mp1 < mp) {
    375 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    376 			mp1->start = s;
    377 			mp1->size = sz;
    378 		} else {
    379 			mp->start = s;
    380 			mp->size = sz;
    381 		}
    382 	}
    383 
    384 	/*
    385 	 * We cannot do pmap_steal_memory here,
    386 	 * since we don't run with translation enabled yet.
    387 	 */
    388 #ifndef MSGBUFADDR
    389 	/*
    390 	 * allow for msgbuf
    391 	 */
    392 	sz = round_page(MSGBUFSIZE);
    393 	mp = NULL;
    394 	for (mp1 = avail; mp1->size; mp1++)
    395 		if (mp1->size >= sz)
    396 			mp = mp1;
    397 	if (mp == NULL)
    398 		panic("not enough memory?");
    399 
    400 	npgs -= btoc(sz);
    401 	msgbuf_paddr = mp->start + mp->size - sz;
    402 	mp->size -= sz;
    403 	if (mp->size <= 0)
    404 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    405 #endif
    406 
    407 	for (mp = avail; mp->size; mp++)
    408 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    409 			atop(mp->start), atop(mp->start + mp->size),
    410 			VM_FREELIST_DEFAULT);
    411 
    412 	/*
    413 	 * Initialize kernel pmap and hardware.
    414 	 */
    415 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    416 	pmap_kernel()->pm_ctx = KERNEL_PID;
    417 	nextavail = avail->start;
    418 
    419 	evcnt_attach_static(&tlbmiss_ev);
    420 	evcnt_attach_static(&tlbhit_ev);
    421 	evcnt_attach_static(&tlbflush_ev);
    422 	evcnt_attach_static(&tlbenter_ev);
    423 
    424 	pmap_bootstrap_done = 1;
    425 }
    426 
    427 /*
    428  * Restrict given range to physical memory
    429  *
    430  * (Used by /dev/mem)
    431  */
    432 void
    433 pmap_real_memory(paddr_t *start, psize_t *size)
    434 {
    435 	struct mem_region *mp;
    436 
    437 	for (mp = mem; mp->size; mp++) {
    438 		if (*start + *size > mp->start &&
    439 		    *start < mp->start + mp->size) {
    440 			if (*start < mp->start) {
    441 				*size -= mp->start - *start;
    442 				*start = mp->start;
    443 			}
    444 			if (*start + *size > mp->start + mp->size)
    445 				*size = mp->start + mp->size - *start;
    446 			return;
    447 		}
    448 	}
    449 	*size = 0;
    450 }
    451 
    452 /*
    453  * Initialize anything else for pmap handling.
    454  * Called during vm_init().
    455  */
    456 void
    457 pmap_init(void)
    458 {
    459 	struct pv_entry *pv;
    460 	vsize_t sz;
    461 	vaddr_t addr;
    462 	int i, s;
    463 	int bank;
    464 	char *attr;
    465 
    466 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    467 	sz = round_page(sz);
    468 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    469 	s = splvm();
    470 	pv = pv_table = (struct pv_entry *)addr;
    471 	for (i = npgs; --i >= 0;)
    472 		pv++->pv_pm = NULL;
    473 	pmap_attrib = (char *)pv;
    474 	memset(pv, 0, npgs);
    475 
    476 	pv = pv_table;
    477 	attr = pmap_attrib;
    478 	for (bank = 0; bank < vm_nphysseg; bank++) {
    479 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
    480 		vm_physmem[bank].pmseg.pvent = pv;
    481 		vm_physmem[bank].pmseg.attrs = attr;
    482 		pv += sz;
    483 		attr += sz;
    484 	}
    485 
    486 	pmap_initialized = 1;
    487 	splx(s);
    488 
    489 	/* Setup a pool for additional pvlist structures */
    490 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL);
    491 }
    492 
    493 /*
    494  * How much virtual space is available to the kernel?
    495  */
    496 void
    497 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    498 {
    499 
    500 #if 0
    501 	/*
    502 	 * Reserve one segment for kernel virtual memory
    503 	 */
    504 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    505 	*end = *start + SEGMENT_LENGTH;
    506 #else
    507 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    508 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    509 #endif
    510 }
    511 
    512 #ifdef PMAP_GROWKERNEL
    513 /*
    514  * Preallocate kernel page tables to a specified VA.
    515  * This simply loops through the first TTE for each
    516  * page table from the beginning of the kernel pmap,
    517  * reads the entry, and if the result is
    518  * zero (either invalid entry or no page table) it stores
    519  * a zero there, populating page tables in the process.
    520  * This is not the most efficient technique but i don't
    521  * expect it to be called that often.
    522  */
    523 extern struct vm_page *vm_page_alloc1 __P((void));
    524 extern void vm_page_free1 __P((struct vm_page *));
    525 
    526 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    527 
    528 vaddr_t
    529 pmap_growkernel(vaddr_t maxkvaddr)
    530 {
    531 	int s;
    532 	int seg;
    533 	paddr_t pg;
    534 	struct pmap *pm = pmap_kernel();
    535 
    536 	s = splvm();
    537 
    538 	/* Align with the start of a page table */
    539 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    540 	     kbreak += PTMAP) {
    541 		seg = STIDX(kbreak);
    542 
    543 		if (pte_find(pm, kbreak))
    544 			continue;
    545 
    546 		if (uvm.page_init_done) {
    547 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    548 		} else {
    549 			if (!uvm_page_physget(&pg))
    550 				panic("pmap_growkernel: no memory");
    551 		}
    552 		if (!pg)
    553 			panic("pmap_growkernel: no pages");
    554 		pmap_zero_page((paddr_t)pg);
    555 
    556 		/* XXX This is based on all phymem being addressable */
    557 		pm->pm_ptbl[seg] = (u_int *)pg;
    558 	}
    559 	splx(s);
    560 	return (kbreak);
    561 }
    562 
    563 /*
    564  *	vm_page_alloc1:
    565  *
    566  *	Allocate and return a memory cell with no associated object.
    567  */
    568 struct vm_page *
    569 vm_page_alloc1(void)
    570 {
    571 	struct vm_page *pg;
    572 
    573 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    574 	if (pg) {
    575 		pg->wire_count = 1;	/* no mappings yet */
    576 		pg->flags &= ~PG_BUSY;	/* never busy */
    577 	}
    578 	return pg;
    579 }
    580 
    581 /*
    582  *	vm_page_free1:
    583  *
    584  *	Returns the given page to the free list,
    585  *	disassociating it with any VM object.
    586  *
    587  *	Object and page must be locked prior to entry.
    588  */
    589 void
    590 vm_page_free1(struct vm_page *pg)
    591 {
    592 #ifdef DIAGNOSTIC
    593 	if (pg->flags != (PG_CLEAN|PG_FAKE)) {
    594 		printf("Freeing invalid page %p\n", pg);
    595 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
    596 #ifdef DDB
    597 		Debugger();
    598 #endif
    599 		return;
    600 	}
    601 #endif
    602 	pg->flags |= PG_BUSY;
    603 	pg->wire_count = 0;
    604 	uvm_pagefree(pg);
    605 }
    606 #endif
    607 
    608 /*
    609  * Create and return a physical map.
    610  */
    611 struct pmap *
    612 pmap_create(void)
    613 {
    614 	struct pmap *pm;
    615 
    616 	pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
    617 	memset(pm, 0, sizeof *pm);
    618 	pm->pm_refs = 1;
    619 	return pm;
    620 }
    621 
    622 /*
    623  * Add a reference to the given pmap.
    624  */
    625 void
    626 pmap_reference(struct pmap *pm)
    627 {
    628 
    629 	pm->pm_refs++;
    630 }
    631 
    632 /*
    633  * Retire the given pmap from service.
    634  * Should only be called if the map contains no valid mappings.
    635  */
    636 void
    637 pmap_destroy(struct pmap *pm)
    638 {
    639 	int i;
    640 
    641 	if (--pm->pm_refs > 0) {
    642 		return;
    643 	}
    644 	KASSERT(pm->pm_stats.resident_count == 0);
    645 	KASSERT(pm->pm_stats.wired_count == 0);
    646 	for (i = 0; i < STSZ; i++)
    647 		if (pm->pm_ptbl[i]) {
    648 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
    649 			    PAGE_SIZE, UVM_KMF_WIRED);
    650 			pm->pm_ptbl[i] = NULL;
    651 		}
    652 	if (pm->pm_ctx)
    653 		ctx_free(pm);
    654 	free(pm, M_VMPMAP);
    655 }
    656 
    657 /*
    658  * Copy the range specified by src_addr/len
    659  * from the source map to the range dst_addr/len
    660  * in the destination map.
    661  *
    662  * This routine is only advisory and need not do anything.
    663  */
    664 void
    665 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    666 	  vsize_t len, vaddr_t src_addr)
    667 {
    668 }
    669 
    670 /*
    671  * Require that all active physical maps contain no
    672  * incorrect entries NOW.
    673  */
    674 void
    675 pmap_update(struct pmap *pmap)
    676 {
    677 }
    678 
    679 /*
    680  * Garbage collects the physical map system for
    681  * pages which are no longer used.
    682  * Success need not be guaranteed -- that is, there
    683  * may well be pages which are not referenced, but
    684  * others may be collected.
    685  * Called by the pageout daemon when pages are scarce.
    686  */
    687 void
    688 pmap_collect(struct pmap *pm)
    689 {
    690 }
    691 
    692 /*
    693  * Fill the given physical page with zeroes.
    694  */
    695 void
    696 pmap_zero_page(paddr_t pa)
    697 {
    698 
    699 #ifdef PPC_4XX_NOCACHE
    700 	memset((caddr_t)pa, 0, PAGE_SIZE);
    701 #else
    702 	int i;
    703 
    704 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
    705 		__asm volatile ("dcbz 0,%0" :: "r"(pa));
    706 		pa += CACHELINESIZE;
    707 	}
    708 #endif
    709 }
    710 
    711 /*
    712  * Copy the given physical source page to its destination.
    713  */
    714 void
    715 pmap_copy_page(paddr_t src, paddr_t dst)
    716 {
    717 
    718 	memcpy((caddr_t)dst, (caddr_t)src, PAGE_SIZE);
    719 	dcache_flush_page(dst);
    720 }
    721 
    722 /*
    723  * This returns whether this is the first mapping of a page.
    724  */
    725 static inline int
    726 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, boolean_t wired)
    727 {
    728 	struct pv_entry *pv, *npv = NULL;
    729 	int s;
    730 
    731 	if (!pmap_initialized)
    732 		return 0;
    733 
    734 	s = splvm();
    735 	pv = pa_to_pv(pa);
    736 	if (!pv->pv_pm) {
    737 		/*
    738 		 * No entries yet, use header as the first entry.
    739 		 */
    740 		pv->pv_va = va;
    741 		pv->pv_pm = pm;
    742 		pv->pv_next = NULL;
    743 	} else {
    744 		/*
    745 		 * There is at least one other VA mapping this page.
    746 		 * Place this entry after the header.
    747 		 */
    748 		npv = pool_get(&pv_pool, PR_WAITOK);
    749 		npv->pv_va = va;
    750 		npv->pv_pm = pm;
    751 		npv->pv_next = pv->pv_next;
    752 		pv->pv_next = npv;
    753 		pv = npv;
    754 	}
    755 	if (wired) {
    756 		PV_WIRE(pv);
    757 		pm->pm_stats.wired_count++;
    758 	}
    759 	splx(s);
    760 	return (1);
    761 }
    762 
    763 static void
    764 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    765 {
    766 	struct pv_entry *pv, *npv;
    767 
    768 	/*
    769 	 * Remove from the PV table.
    770 	 */
    771 	pv = pa_to_pv(pa);
    772 	if (!pv)
    773 		return;
    774 
    775 	/*
    776 	 * If it is the first entry on the list, it is actually
    777 	 * in the header and we must copy the following entry up
    778 	 * to the header.  Otherwise we must search the list for
    779 	 * the entry.  In either case we free the now unused entry.
    780 	 */
    781 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    782 		if (PV_ISWIRED(pv)) {
    783 			pm->pm_stats.wired_count--;
    784 		}
    785 		if ((npv = pv->pv_next)) {
    786 			*pv = *npv;
    787 			pool_put(&pv_pool, npv);
    788 		} else
    789 			pv->pv_pm = NULL;
    790 	} else {
    791 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    792 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    793 				break;
    794 		if (npv) {
    795 			pv->pv_next = npv->pv_next;
    796 			if (PV_ISWIRED(npv)) {
    797 				pm->pm_stats.wired_count--;
    798 			}
    799 			pool_put(&pv_pool, npv);
    800 		}
    801 	}
    802 }
    803 
    804 /*
    805  * Insert physical page at pa into the given pmap at virtual address va.
    806  */
    807 int
    808 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
    809 {
    810 	int s;
    811 	u_int tte;
    812 	int managed;
    813 
    814 	/*
    815 	 * Have to remove any existing mapping first.
    816 	 */
    817 	pmap_remove(pm, va, va + PAGE_SIZE);
    818 
    819 	if (flags & PMAP_WIRED)
    820 		flags |= prot;
    821 
    822 	managed = 0;
    823 	if (vm_physseg_find(atop(pa), NULL) != -1)
    824 		managed = 1;
    825 
    826 	/*
    827 	 * Generate TTE.
    828 	 */
    829 	tte = TTE_PA(pa);
    830 	/* XXXX -- need to support multiple page sizes. */
    831 	tte |= TTE_SZ_16K;
    832 #ifdef	DIAGNOSTIC
    833 	if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
    834 		(PME_NOCACHE | PME_WRITETHROUG))
    835 		panic("pmap_enter: uncached & writethrough");
    836 #endif
    837 	if (flags & PME_NOCACHE)
    838 		/* Must be I/O mapping */
    839 		tte |= TTE_I | TTE_G;
    840 #ifdef PPC_4XX_NOCACHE
    841 	tte |= TTE_I;
    842 #else
    843 	else if (flags & PME_WRITETHROUG)
    844 		/* Uncached and writethrough are not compatible */
    845 		tte |= TTE_W;
    846 #endif
    847 	if (pm == pmap_kernel())
    848 		tte |= TTE_ZONE(ZONE_PRIV);
    849 	else
    850 		tte |= TTE_ZONE(ZONE_USER);
    851 
    852 	if (flags & VM_PROT_WRITE)
    853 		tte |= TTE_WR;
    854 
    855 	if (flags & VM_PROT_EXECUTE)
    856 		tte |= TTE_EX;
    857 
    858 	/*
    859 	 * Now record mapping for later back-translation.
    860 	 */
    861 	if (pmap_initialized && managed) {
    862 		char *attr;
    863 
    864 		if (!pmap_enter_pv(pm, va, pa, flags & PMAP_WIRED)) {
    865 			/* Could not enter pv on a managed page */
    866 			return 1;
    867 		}
    868 
    869 		/* Now set attributes. */
    870 		attr = pa_to_attr(pa);
    871 #ifdef DIAGNOSTIC
    872 		if (!attr)
    873 			panic("managed but no attr");
    874 #endif
    875 		if (flags & VM_PROT_ALL)
    876 			*attr |= PMAP_ATTR_REF;
    877 		if (flags & VM_PROT_WRITE)
    878 			*attr |= PMAP_ATTR_CHG;
    879 	}
    880 
    881 	s = splvm();
    882 
    883 	/* Insert page into page table. */
    884 	pte_enter(pm, va, tte);
    885 
    886 	/* If this is a real fault, enter it in the tlb */
    887 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    888 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    889 	}
    890 	splx(s);
    891 
    892 	/* Flush the real memory from the instruction cache. */
    893 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
    894 		__syncicache((void *)pa, PAGE_SIZE);
    895 
    896 	return 0;
    897 }
    898 
    899 void
    900 pmap_unwire(struct pmap *pm, vaddr_t va)
    901 {
    902 	struct pv_entry *pv;
    903 	paddr_t pa;
    904 	int s;
    905 
    906 	if (!pmap_extract(pm, va, &pa)) {
    907 		return;
    908 	}
    909 
    910 	pv = pa_to_pv(pa);
    911 	if (!pv)
    912 		return;
    913 
    914 	s = splvm();
    915 	while (pv != NULL) {
    916 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    917 			if (PV_ISWIRED(pv)) {
    918 				PV_UNWIRE(pv);
    919 				pm->pm_stats.wired_count--;
    920 			}
    921 			break;
    922 		}
    923 		pv = pv->pv_next;
    924 	}
    925 	splx(s);
    926 }
    927 
    928 void
    929 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
    930 {
    931 	int s;
    932 	u_int tte;
    933 	struct pmap *pm = pmap_kernel();
    934 
    935 	/*
    936 	 * Have to remove any existing mapping first.
    937 	 */
    938 
    939 	/*
    940 	 * Generate TTE.
    941 	 *
    942 	 * XXXX
    943 	 *
    944 	 * Since the kernel does not handle execution privileges properly,
    945 	 * we will handle read and execute permissions together.
    946 	 */
    947 	tte = 0;
    948 	if (prot & VM_PROT_ALL) {
    949 
    950 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    951 		/* XXXX -- need to support multiple page sizes. */
    952 		tte |= TTE_SZ_16K;
    953 #ifdef DIAGNOSTIC
    954 		if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
    955 			(PME_NOCACHE | PME_WRITETHROUG))
    956 			panic("pmap_kenter_pa: uncached & writethrough");
    957 #endif
    958 		if (prot & PME_NOCACHE)
    959 			/* Must be I/O mapping */
    960 			tte |= TTE_I | TTE_G;
    961 #ifdef PPC_4XX_NOCACHE
    962 		tte |= TTE_I;
    963 #else
    964 		else if (prot & PME_WRITETHROUG)
    965 			/* Uncached and writethrough are not compatible */
    966 			tte |= TTE_W;
    967 #endif
    968 		if (prot & VM_PROT_WRITE)
    969 			tte |= TTE_WR;
    970 	}
    971 
    972 	s = splvm();
    973 
    974 	/* Insert page into page table. */
    975 	pte_enter(pm, va, tte);
    976 	splx(s);
    977 }
    978 
    979 void
    980 pmap_kremove(vaddr_t va, vsize_t len)
    981 {
    982 
    983 	while (len > 0) {
    984 		pte_enter(pmap_kernel(), va, 0);
    985 		va += PAGE_SIZE;
    986 		len -= PAGE_SIZE;
    987 	}
    988 }
    989 
    990 /*
    991  * Remove the given range of mapping entries.
    992  */
    993 void
    994 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
    995 {
    996 	int s;
    997 	paddr_t pa;
    998 	volatile u_int *ptp;
    999 
   1000 	s = splvm();
   1001 	while (va < endva) {
   1002 
   1003 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
   1004 			pa = TTE_PA(pa);
   1005 			pmap_remove_pv(pm, va, pa);
   1006 			*ptp = 0;
   1007 			ppc4xx_tlb_flush(va, pm->pm_ctx);
   1008 			pm->pm_stats.resident_count--;
   1009 		}
   1010 		va += PAGE_SIZE;
   1011 	}
   1012 
   1013 	splx(s);
   1014 }
   1015 
   1016 /*
   1017  * Get the physical page address for the given pmap/virtual address.
   1018  */
   1019 boolean_t
   1020 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1021 {
   1022 	int seg = STIDX(va);
   1023 	int ptn = PTIDX(va);
   1024 	u_int pa = 0;
   1025 	int s;
   1026 
   1027 	s = splvm();
   1028 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
   1029 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1030 	}
   1031 	splx(s);
   1032 	return (pa != 0);
   1033 }
   1034 
   1035 /*
   1036  * Lower the protection on the specified range of this pmap.
   1037  *
   1038  * There are only two cases: either the protection is going to 0,
   1039  * or it is going to read-only.
   1040  */
   1041 void
   1042 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1043 {
   1044 	volatile u_int *ptp;
   1045 	int s, bic;
   1046 
   1047 	if ((prot & VM_PROT_READ) == 0) {
   1048 		pmap_remove(pm, sva, eva);
   1049 		return;
   1050 	}
   1051 	bic = 0;
   1052 	if ((prot & VM_PROT_WRITE) == 0) {
   1053 		bic |= TTE_WR;
   1054 	}
   1055 	if ((prot & VM_PROT_EXECUTE) == 0) {
   1056 		bic |= TTE_EX;
   1057 	}
   1058 	if (bic == 0) {
   1059 		return;
   1060 	}
   1061 	s = splvm();
   1062 	while (sva < eva) {
   1063 		if ((ptp = pte_find(pm, sva)) != NULL) {
   1064 			*ptp &= ~bic;
   1065 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1066 		}
   1067 		sva += PAGE_SIZE;
   1068 	}
   1069 	splx(s);
   1070 }
   1071 
   1072 boolean_t
   1073 pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
   1074 {
   1075 	paddr_t pa;
   1076 	char *attr;
   1077 	int s, rv;
   1078 
   1079 	/*
   1080 	 * First modify bits in cache.
   1081 	 */
   1082 	pa = VM_PAGE_TO_PHYS(pg);
   1083 	attr = pa_to_attr(pa);
   1084 	if (attr == NULL)
   1085 		return FALSE;
   1086 
   1087 	s = splvm();
   1088 	rv = ((*attr & mask) != 0);
   1089 	if (clear) {
   1090 		*attr &= ~mask;
   1091 		pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
   1092 	}
   1093 	splx(s);
   1094 	return rv;
   1095 }
   1096 
   1097 
   1098 /*
   1099  * Lower the protection on the specified physical page.
   1100  *
   1101  * There are only two cases: either the protection is going to 0,
   1102  * or it is going to read-only.
   1103  */
   1104 void
   1105 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1106 {
   1107 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1108 	vaddr_t va;
   1109 	struct pv_entry *pvh, *pv, *npv;
   1110 	struct pmap *pm;
   1111 
   1112 	pvh = pa_to_pv(pa);
   1113 	if (pvh == NULL)
   1114 		return;
   1115 
   1116 	/* Handle extra pvs which may be deleted in the operation */
   1117 	for (pv = pvh->pv_next; pv; pv = npv) {
   1118 		npv = pv->pv_next;
   1119 
   1120 		pm = pv->pv_pm;
   1121 		va = pv->pv_va;
   1122 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1123 	}
   1124 	/* Now check the head pv */
   1125 	if (pvh->pv_pm) {
   1126 		pv = pvh;
   1127 		pm = pv->pv_pm;
   1128 		va = pv->pv_va;
   1129 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1130 	}
   1131 }
   1132 
   1133 /*
   1134  * Activate the address space for the specified process.  If the process
   1135  * is the current process, load the new MMU context.
   1136  */
   1137 void
   1138 pmap_activate(struct lwp *l)
   1139 {
   1140 #if 0
   1141 	struct pcb *pcb = &l->l_proc->p_addr->u_pcb;
   1142 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1143 
   1144 	/*
   1145 	 * XXX Normally performed in cpu_fork().
   1146 	 */
   1147 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
   1148 	pcb->pcb_pm = pmap;
   1149 #endif
   1150 }
   1151 
   1152 /*
   1153  * Deactivate the specified process's address space.
   1154  */
   1155 void
   1156 pmap_deactivate(struct lwp *l)
   1157 {
   1158 }
   1159 
   1160 /*
   1161  * Synchronize caches corresponding to [addr, addr+len) in p.
   1162  */
   1163 void
   1164 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1165 {
   1166 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1167 	int msr, ctx, opid, step;
   1168 
   1169 	step = CACHELINESIZE;
   1170 
   1171 	/*
   1172 	 * Need to turn off IMMU and switch to user context.
   1173 	 * (icbi uses DMMU).
   1174 	 */
   1175 	if (!(ctx = pm->pm_ctx)) {
   1176 		/* No context -- assign it one */
   1177 		ctx_alloc(pm);
   1178 		ctx = pm->pm_ctx;
   1179 	}
   1180 	__asm volatile("mfmsr %0;"
   1181 		"li %1, %7;"
   1182 		"andc %1,%0,%1;"
   1183 		"mtmsr %1;"
   1184 		"sync;isync;"
   1185 		"mfpid %1;"
   1186 		"mtpid %2;"
   1187 		"sync; isync;"
   1188 		"1:"
   1189 		"dcbf 0,%3;"
   1190 		"icbi 0,%3;"
   1191 		"add %3,%3,%5;"
   1192 		"addc. %4,%4,%6;"
   1193 		"bge 1b;"
   1194 		"mtpid %1;"
   1195 		"mtmsr %0;"
   1196 		"sync; isync"
   1197 		: "=&r" (msr), "=&r" (opid)
   1198 		: "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step),
   1199 		  "K" (PSL_IR | PSL_DR));
   1200 }
   1201 
   1202 
   1203 /* This has to be done in real mode !!! */
   1204 void
   1205 ppc4xx_tlb_flush(vaddr_t va, int pid)
   1206 {
   1207 	u_long i, found;
   1208 	u_long msr;
   1209 
   1210 	/* If there's no context then it can't be mapped. */
   1211 	if (!pid)
   1212 		return;
   1213 
   1214 	__asm( 	"mfpid %1;"		/* Save PID */
   1215 		"mfmsr %2;"		/* Save MSR */
   1216 		"li %0,0;"		/* Now clear MSR */
   1217 		"mtmsr %0;"
   1218 		"mtpid %4;"		/* Set PID */
   1219 		"sync;"
   1220 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1221 		"sync;"
   1222 		"mtpid %1;"		/* Restore PID */
   1223 		"mtmsr %2;"		/* Restore MSR */
   1224 		"sync;isync;"
   1225 		"li %1,1;"
   1226 		"beq 1f;"
   1227 		"li %1,0;"
   1228 		"1:"
   1229 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1230 		: "r" (va), "r" (pid));
   1231 	if (found && !TLB_LOCKED(i)) {
   1232 
   1233 		/* Now flush translation */
   1234 		__asm volatile(
   1235 			"tlbwe %0,%1,0;"
   1236 			"sync;isync;"
   1237 			: : "r" (0), "r" (i));
   1238 
   1239 		tlb_info[i].ti_ctx = 0;
   1240 		tlb_info[i].ti_flags = 0;
   1241 		tlbnext = i;
   1242 		/* Successful flushes */
   1243 		tlbflush_ev.ev_count++;
   1244 	}
   1245 }
   1246 
   1247 void
   1248 ppc4xx_tlb_flush_all(void)
   1249 {
   1250 	u_long i;
   1251 
   1252 	for (i = 0; i < NTLB; i++)
   1253 		if (!TLB_LOCKED(i)) {
   1254 			__asm volatile(
   1255 				"tlbwe %0,%1,0;"
   1256 				"sync;isync;"
   1257 				: : "r" (0), "r" (i));
   1258 			tlb_info[i].ti_ctx = 0;
   1259 			tlb_info[i].ti_flags = 0;
   1260 		}
   1261 
   1262 	__asm volatile("sync;isync");
   1263 }
   1264 
   1265 /* Find a TLB entry to evict. */
   1266 static int
   1267 ppc4xx_tlb_find_victim(void)
   1268 {
   1269 	int flags;
   1270 
   1271 	for (;;) {
   1272 		if (++tlbnext >= NTLB)
   1273 			tlbnext = tlb_nreserved;
   1274 		flags = tlb_info[tlbnext].ti_flags;
   1275 		if (!(flags & TLBF_USED) ||
   1276 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1277 			u_long va, stack = (u_long)&va;
   1278 
   1279 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1280 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1281 			     (flags & TLBF_USED)) {
   1282 				/* Kernel stack page */
   1283 				flags |= TLBF_USED;
   1284 				tlb_info[tlbnext].ti_flags = flags;
   1285 			} else {
   1286 				/* Found it! */
   1287 				return (tlbnext);
   1288 			}
   1289 		} else {
   1290 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1291 		}
   1292 	}
   1293 }
   1294 
   1295 void
   1296 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1297 {
   1298 	u_long th, tl, idx;
   1299 	tlbpid_t pid;
   1300 	u_short msr;
   1301 	paddr_t pa;
   1302 	int s, sz;
   1303 
   1304 	tlbenter_ev.ev_count++;
   1305 
   1306 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
   1307 	pa = (pte & TTE_RPN_MASK(sz));
   1308 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
   1309 	tl = (pte & ~TLB_RPN_MASK) | pa;
   1310 	tl |= ppc4xx_tlbflags(va, pa);
   1311 
   1312 	s = splhigh();
   1313 	idx = ppc4xx_tlb_find_victim();
   1314 
   1315 #ifdef DIAGNOSTIC
   1316 	if ((idx < tlb_nreserved) || (idx >= NTLB)) {
   1317 		panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
   1318 	}
   1319 #endif
   1320 
   1321 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1322 	tlb_info[idx].ti_ctx = ctx;
   1323 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1324 
   1325 	__asm volatile(
   1326 		"mfmsr %0;"			/* Save MSR */
   1327 		"li %1,0;"
   1328 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1329 		"mtmsr %1;"			/* Clear MSR */
   1330 		"mfpid %1;"			/* Save old PID */
   1331 		"mtpid %2;"			/* Load translation ctx */
   1332 		"sync; isync;"
   1333 #ifdef DEBUG
   1334 		"andi. %3,%3,63;"
   1335 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
   1336 #endif
   1337 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1338 		"sync; isync;"
   1339 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1340 		"sync; isync;"
   1341 	: "=&r" (msr), "=&r" (pid)
   1342 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1343 	splx(s);
   1344 }
   1345 
   1346 void
   1347 ppc4xx_tlb_init(void)
   1348 {
   1349 	int i;
   1350 
   1351 	/* Mark reserved TLB entries */
   1352 	for (i = 0; i < tlb_nreserved; i++) {
   1353 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1354 		tlb_info[i].ti_ctx = KERNEL_PID;
   1355 	}
   1356 
   1357 	/* Setup security zones */
   1358 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1359 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1360 	 * Z3 - full access regardless of TLB entry permissions
   1361 	 */
   1362 
   1363 	__asm volatile(
   1364 		"mtspr %0,%1;"
   1365 		"sync;"
   1366 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1367 }
   1368 
   1369 /*
   1370  * ppc4xx_tlb_size_mask:
   1371  *
   1372  * 	Roundup size to supported page size, return TLBHI mask and real size.
   1373  */
   1374 static int
   1375 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
   1376 {
   1377 	int 			i;
   1378 
   1379 	for (i = 0; i < __arraycount(tlbsize); i++)
   1380 		if (size <= tlbsize[i]) {
   1381 			*mask = (i << TLB_SIZE_SHFT);
   1382 			*rsiz = tlbsize[i];
   1383 			return (0);
   1384 		}
   1385 	return (EINVAL);
   1386 }
   1387 
   1388 /*
   1389  * ppc4xx_tlb_mapiodev:
   1390  *
   1391  * 	Lookup virtual address of mapping previously entered via
   1392  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
   1393  * 	need to waste extra storage for reserved mappings. Note
   1394  * 	that reading TLBHI also sets PID, but all reserved mappings
   1395  * 	use KERNEL_PID, so the side effect is nil.
   1396  */
   1397 void *
   1398 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
   1399 {
   1400 	paddr_t 		pa;
   1401 	vaddr_t 		va;
   1402 	u_int 			lo, hi, sz;
   1403 	int 			i;
   1404 
   1405 	/* tlb_nreserved is only allowed to grow, so this is safe. */
   1406 	for (i = 0; i < tlb_nreserved; i++) {
   1407 		__asm volatile (
   1408 		    "	tlbre %0,%2,1 	\n" 	/* TLBLO */
   1409 		    "	tlbre %1,%2,0 	\n" 	/* TLBHI */
   1410 		    : "=&r" (lo), "=&r" (hi)
   1411 		    : "r" (i));
   1412 
   1413 		KASSERT(hi & TLB_VALID);
   1414 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1415 
   1416 		pa = (lo & TLB_RPN_MASK);
   1417 		if (base < pa)
   1418 			continue;
   1419 
   1420 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
   1421 		if ((base + len) > (pa + sz))
   1422 			continue;
   1423 
   1424 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
   1425 		return (void *)(va);
   1426 	}
   1427 
   1428 	return (NULL);
   1429 }
   1430 
   1431 /*
   1432  * ppc4xx_tlb_reserve:
   1433  *
   1434  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
   1435  */
   1436 void
   1437 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
   1438 {
   1439 	u_int 			lo, hi;
   1440 	int 			szmask, rsize;
   1441 
   1442 	/* Called before pmap_bootstrap(), va outside kernel space. */
   1443 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
   1444 	KASSERT(! pmap_bootstrap_done);
   1445 	KASSERT(tlb_nreserved < NTLB);
   1446 
   1447 	/* Resolve size. */
   1448 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
   1449 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
   1450 		    size, tlb_nreserved);
   1451 
   1452 	/* Real size will be power of two >= 1024, so this is OK. */
   1453 	pa &= ~(rsize - 1); 	/* RPN */
   1454 	va &= ~(rsize - 1); 	/* EPN */
   1455 
   1456 	lo = pa | TLB_WR | flags;
   1457 	hi = va | TLB_VALID | szmask | KERNEL_PID;
   1458 
   1459 #ifdef PPC_4XX_NOCACHE
   1460 	lo |= TLB_I;
   1461 #endif
   1462 
   1463 	__asm volatile(
   1464 	    "	tlbwe %1,%0,1 	\n" 	/* write TLBLO */
   1465 	    "	tlbwe %2,%0,0 	\n" 	/* write TLBHI */
   1466 	    "   sync 		\n"
   1467 	    "	isync 		\n"
   1468 	    : : "r" (tlb_nreserved), "r" (lo), "r" (hi));
   1469 
   1470 	tlb_nreserved++;
   1471 }
   1472 
   1473 /*
   1474  * We should pass the ctx in from trap code.
   1475  */
   1476 int
   1477 pmap_tlbmiss(vaddr_t va, int ctx)
   1478 {
   1479 	volatile u_int *pte;
   1480 	u_long tte;
   1481 
   1482 	tlbmiss_ev.ev_count++;
   1483 
   1484 	/*
   1485 	 * XXXX We will reserve 0-0x80000000 for va==pa mappings.
   1486 	 */
   1487 	if (ctx != KERNEL_PID || (va & 0x80000000)) {
   1488 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
   1489 		if (pte == NULL) {
   1490 			/* Map unmanaged addresses directly for kernel access */
   1491 			return 1;
   1492 		}
   1493 		tte = *pte;
   1494 		if (tte == 0) {
   1495 			return 1;
   1496 		}
   1497 	} else {
   1498 		/* Create a 16MB writable mapping. */
   1499 #ifdef PPC_4XX_NOCACHE
   1500 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR;
   1501 #else
   1502 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1503 #endif
   1504 	}
   1505 	tlbhit_ev.ev_count++;
   1506 	ppc4xx_tlb_enter(ctx, va, tte);
   1507 
   1508 	return 0;
   1509 }
   1510 
   1511 /*
   1512  * Flush all the entries matching a context from the TLB.
   1513  */
   1514 static int
   1515 ctx_flush(int cnum)
   1516 {
   1517 	int i;
   1518 
   1519 	/* We gotta steal this context */
   1520 	for (i = tlb_nreserved; i < NTLB; i++) {
   1521 		if (tlb_info[i].ti_ctx == cnum) {
   1522 			/* Can't steal ctx if it has a locked entry. */
   1523 			if (TLB_LOCKED(i)) {
   1524 #ifdef DIAGNOSTIC
   1525 				printf("ctx_flush: can't invalidate "
   1526 					"locked mapping %d "
   1527 					"for context %d\n", i, cnum);
   1528 #ifdef DDB
   1529 				Debugger();
   1530 #endif
   1531 #endif
   1532 				return (1);
   1533 			}
   1534 #ifdef DIAGNOSTIC
   1535 			if (i < tlb_nreserved)
   1536 				panic("TLB entry %d not locked", i);
   1537 #endif
   1538 			/* Invalidate particular TLB entry regardless of locked status */
   1539 			__asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
   1540 			tlb_info[i].ti_flags = 0;
   1541 		}
   1542 	}
   1543 	return (0);
   1544 }
   1545 
   1546 /*
   1547  * Allocate a context.  If necessary, steal one from someone else.
   1548  *
   1549  * The new context is flushed from the TLB before returning.
   1550  */
   1551 int
   1552 ctx_alloc(struct pmap *pm)
   1553 {
   1554 	int s, cnum;
   1555 	static int next = MINCTX;
   1556 
   1557 	if (pm == pmap_kernel()) {
   1558 #ifdef DIAGNOSTIC
   1559 		printf("ctx_alloc: kernel pmap!\n");
   1560 #endif
   1561 		return (0);
   1562 	}
   1563 	s = splvm();
   1564 
   1565 	/* Find a likely context. */
   1566 	cnum = next;
   1567 	do {
   1568 		if ((++cnum) > NUMCTX)
   1569 			cnum = MINCTX;
   1570 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1571 
   1572 	/* Now clean it out */
   1573 oops:
   1574 	if (cnum < MINCTX)
   1575 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1576 	if (ctx_flush(cnum)) {
   1577 		/* oops -- something's wired. */
   1578 		if ((++cnum) > NUMCTX)
   1579 			cnum = MINCTX;
   1580 		goto oops;
   1581 	}
   1582 
   1583 	if (ctxbusy[cnum]) {
   1584 #ifdef DEBUG
   1585 		/* We should identify this pmap and clear it */
   1586 		printf("Warning: stealing context %d\n", cnum);
   1587 #endif
   1588 		ctxbusy[cnum]->pm_ctx = 0;
   1589 	}
   1590 	ctxbusy[cnum] = pm;
   1591 	next = cnum;
   1592 	splx(s);
   1593 	pm->pm_ctx = cnum;
   1594 
   1595 	return cnum;
   1596 }
   1597 
   1598 /*
   1599  * Give away a context.
   1600  */
   1601 void
   1602 ctx_free(struct pmap *pm)
   1603 {
   1604 	int oldctx;
   1605 
   1606 	oldctx = pm->pm_ctx;
   1607 
   1608 	if (oldctx == 0)
   1609 		panic("ctx_free: freeing kernel context");
   1610 #ifdef DIAGNOSTIC
   1611 	if (ctxbusy[oldctx] == 0)
   1612 		printf("ctx_free: freeing free context %d\n", oldctx);
   1613 	if (ctxbusy[oldctx] != pm) {
   1614 		printf("ctx_free: freeing someone esle's context\n "
   1615 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1616 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1617 #ifdef DDB
   1618 		Debugger();
   1619 #endif
   1620 	}
   1621 #endif
   1622 	/* We should verify it has not been stolen and reallocated... */
   1623 	ctxbusy[oldctx] = NULL;
   1624 	ctx_flush(oldctx);
   1625 }
   1626 
   1627 
   1628 #ifdef DEBUG
   1629 /*
   1630  * Test ref/modify handling.
   1631  */
   1632 void pmap_testout __P((void));
   1633 void
   1634 pmap_testout()
   1635 {
   1636 	vaddr_t va;
   1637 	volatile int *loc;
   1638 	int val = 0;
   1639 	paddr_t pa;
   1640 	struct vm_page *pg;
   1641 	int ref, mod;
   1642 
   1643 	/* Allocate a page */
   1644 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1645 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
   1646 	loc = (int*)va;
   1647 
   1648 	pmap_extract(pmap_kernel(), va, &pa);
   1649 	pg = PHYS_TO_VM_PAGE(pa);
   1650 	pmap_unwire(pmap_kernel(), va);
   1651 
   1652 	pmap_kremove(va, PAGE_SIZE);
   1653 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1654 	pmap_update(pmap_kernel());
   1655 
   1656 	/* Now clear reference and modify */
   1657 	ref = pmap_clear_reference(pg);
   1658 	mod = pmap_clear_modify(pg);
   1659 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1660 	       (void *)(u_long)va, (long)pa,
   1661 	       ref, mod);
   1662 
   1663 	/* Check it's properly cleared */
   1664 	ref = pmap_is_referenced(pg);
   1665 	mod = pmap_is_modified(pg);
   1666 	printf("Checking cleared page: ref %d, mod %d\n",
   1667 	       ref, mod);
   1668 
   1669 	/* Reference page */
   1670 	val = *loc;
   1671 
   1672 	ref = pmap_is_referenced(pg);
   1673 	mod = pmap_is_modified(pg);
   1674 	printf("Referenced page: ref %d, mod %d val %x\n",
   1675 	       ref, mod, val);
   1676 
   1677 	/* Now clear reference and modify */
   1678 	ref = pmap_clear_reference(pg);
   1679 	mod = pmap_clear_modify(pg);
   1680 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1681 	       (void *)(u_long)va, (long)pa,
   1682 	       ref, mod);
   1683 
   1684 	/* Modify page */
   1685 	*loc = 1;
   1686 
   1687 	ref = pmap_is_referenced(pg);
   1688 	mod = pmap_is_modified(pg);
   1689 	printf("Modified page: ref %d, mod %d\n",
   1690 	       ref, mod);
   1691 
   1692 	/* Now clear reference and modify */
   1693 	ref = pmap_clear_reference(pg);
   1694 	mod = pmap_clear_modify(pg);
   1695 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1696 	       (void *)(u_long)va, (long)pa,
   1697 	       ref, mod);
   1698 
   1699 	/* Check it's properly cleared */
   1700 	ref = pmap_is_referenced(pg);
   1701 	mod = pmap_is_modified(pg);
   1702 	printf("Checking cleared page: ref %d, mod %d\n",
   1703 	       ref, mod);
   1704 
   1705 	/* Modify page */
   1706 	*loc = 1;
   1707 
   1708 	ref = pmap_is_referenced(pg);
   1709 	mod = pmap_is_modified(pg);
   1710 	printf("Modified page: ref %d, mod %d\n",
   1711 	       ref, mod);
   1712 
   1713 	/* Check pmap_protect() */
   1714 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1715 	pmap_update(pmap_kernel());
   1716 	ref = pmap_is_referenced(pg);
   1717 	mod = pmap_is_modified(pg);
   1718 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1719 	       ref, mod);
   1720 
   1721 	/* Now clear reference and modify */
   1722 	ref = pmap_clear_reference(pg);
   1723 	mod = pmap_clear_modify(pg);
   1724 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1725 	       (void *)(u_long)va, (long)pa,
   1726 	       ref, mod);
   1727 
   1728 	/* Reference page */
   1729 	val = *loc;
   1730 
   1731 	ref = pmap_is_referenced(pg);
   1732 	mod = pmap_is_modified(pg);
   1733 	printf("Referenced page: ref %d, mod %d val %x\n",
   1734 	       ref, mod, val);
   1735 
   1736 	/* Now clear reference and modify */
   1737 	ref = pmap_clear_reference(pg);
   1738 	mod = pmap_clear_modify(pg);
   1739 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1740 	       (void *)(u_long)va, (long)pa,
   1741 	       ref, mod);
   1742 
   1743 	/* Modify page */
   1744 #if 0
   1745 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1746 	pmap_update(pmap_kernel());
   1747 #endif
   1748 	*loc = 1;
   1749 
   1750 	ref = pmap_is_referenced(pg);
   1751 	mod = pmap_is_modified(pg);
   1752 	printf("Modified page: ref %d, mod %d\n",
   1753 	       ref, mod);
   1754 
   1755 	/* Check pmap_protect() */
   1756 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1757 	pmap_update(pmap_kernel());
   1758 	ref = pmap_is_referenced(pg);
   1759 	mod = pmap_is_modified(pg);
   1760 	printf("pmap_protect(): ref %d, mod %d\n",
   1761 	       ref, mod);
   1762 
   1763 	/* Now clear reference and modify */
   1764 	ref = pmap_clear_reference(pg);
   1765 	mod = pmap_clear_modify(pg);
   1766 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1767 	       (void *)(u_long)va, (long)pa,
   1768 	       ref, mod);
   1769 
   1770 	/* Reference page */
   1771 	val = *loc;
   1772 
   1773 	ref = pmap_is_referenced(pg);
   1774 	mod = pmap_is_modified(pg);
   1775 	printf("Referenced page: ref %d, mod %d val %x\n",
   1776 	       ref, mod, val);
   1777 
   1778 	/* Now clear reference and modify */
   1779 	ref = pmap_clear_reference(pg);
   1780 	mod = pmap_clear_modify(pg);
   1781 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1782 	       (void *)(u_long)va, (long)pa,
   1783 	       ref, mod);
   1784 
   1785 	/* Modify page */
   1786 #if 0
   1787 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1788 	pmap_update(pmap_kernel());
   1789 #endif
   1790 	*loc = 1;
   1791 
   1792 	ref = pmap_is_referenced(pg);
   1793 	mod = pmap_is_modified(pg);
   1794 	printf("Modified page: ref %d, mod %d\n",
   1795 	       ref, mod);
   1796 
   1797 	/* Check pmap_pag_protect() */
   1798 	pmap_page_protect(pg, VM_PROT_READ);
   1799 	ref = pmap_is_referenced(pg);
   1800 	mod = pmap_is_modified(pg);
   1801 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1802 	       ref, mod);
   1803 
   1804 	/* Now clear reference and modify */
   1805 	ref = pmap_clear_reference(pg);
   1806 	mod = pmap_clear_modify(pg);
   1807 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1808 	       (void *)(u_long)va, (long)pa,
   1809 	       ref, mod);
   1810 
   1811 	/* Reference page */
   1812 	val = *loc;
   1813 
   1814 	ref = pmap_is_referenced(pg);
   1815 	mod = pmap_is_modified(pg);
   1816 	printf("Referenced page: ref %d, mod %d val %x\n",
   1817 	       ref, mod, val);
   1818 
   1819 	/* Now clear reference and modify */
   1820 	ref = pmap_clear_reference(pg);
   1821 	mod = pmap_clear_modify(pg);
   1822 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1823 	       (void *)(u_long)va, (long)pa,
   1824 	       ref, mod);
   1825 
   1826 	/* Modify page */
   1827 #if 0
   1828 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1829 	pmap_update(pmap_kernel());
   1830 #endif
   1831 	*loc = 1;
   1832 
   1833 	ref = pmap_is_referenced(pg);
   1834 	mod = pmap_is_modified(pg);
   1835 	printf("Modified page: ref %d, mod %d\n",
   1836 	       ref, mod);
   1837 
   1838 	/* Check pmap_pag_protect() */
   1839 	pmap_page_protect(pg, VM_PROT_NONE);
   1840 	ref = pmap_is_referenced(pg);
   1841 	mod = pmap_is_modified(pg);
   1842 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1843 	       ref, mod);
   1844 
   1845 	/* Now clear reference and modify */
   1846 	ref = pmap_clear_reference(pg);
   1847 	mod = pmap_clear_modify(pg);
   1848 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1849 	       (void *)(u_long)va, (long)pa,
   1850 	       ref, mod);
   1851 
   1852 
   1853 	/* Reference page */
   1854 	val = *loc;
   1855 
   1856 	ref = pmap_is_referenced(pg);
   1857 	mod = pmap_is_modified(pg);
   1858 	printf("Referenced page: ref %d, mod %d val %x\n",
   1859 	       ref, mod, val);
   1860 
   1861 	/* Now clear reference and modify */
   1862 	ref = pmap_clear_reference(pg);
   1863 	mod = pmap_clear_modify(pg);
   1864 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1865 	       (void *)(u_long)va, (long)pa,
   1866 	       ref, mod);
   1867 
   1868 	/* Modify page */
   1869 #if 0
   1870 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1871 	pmap_update(pmap_kernel());
   1872 #endif
   1873 	*loc = 1;
   1874 
   1875 	ref = pmap_is_referenced(pg);
   1876 	mod = pmap_is_modified(pg);
   1877 	printf("Modified page: ref %d, mod %d\n",
   1878 	       ref, mod);
   1879 
   1880 	/* Unmap page */
   1881 	pmap_remove(pmap_kernel(), va, va+1);
   1882 	pmap_update(pmap_kernel());
   1883 	ref = pmap_is_referenced(pg);
   1884 	mod = pmap_is_modified(pg);
   1885 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1886 
   1887 	/* Now clear reference and modify */
   1888 	ref = pmap_clear_reference(pg);
   1889 	mod = pmap_clear_modify(pg);
   1890 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1891 	       (void *)(u_long)va, (long)pa, ref, mod);
   1892 
   1893 	/* Check it's properly cleared */
   1894 	ref = pmap_is_referenced(pg);
   1895 	mod = pmap_is_modified(pg);
   1896 	printf("Checking cleared page: ref %d, mod %d\n",
   1897 	       ref, mod);
   1898 
   1899 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
   1900 	pmap_kenter_pa(va, pa, VM_PROT_ALL);
   1901 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
   1902 }
   1903 #endif
   1904