Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.49
      1 /*	$NetBSD: pmap.c,v 1.49 2007/07/24 15:19:09 hannken Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  * All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by TooLs GmbH.
     54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  *    derived from this software without specific prior written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.49 2007/07/24 15:19:09 hannken Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/malloc.h>
     74 #include <sys/proc.h>
     75 #include <sys/user.h>
     76 #include <sys/queue.h>
     77 #include <sys/systm.h>
     78 #include <sys/pool.h>
     79 #include <sys/device.h>
     80 
     81 #include <uvm/uvm.h>
     82 
     83 #include <machine/cpu.h>
     84 #include <machine/pcb.h>
     85 #include <machine/powerpc.h>
     86 
     87 #include <powerpc/spr.h>
     88 #include <machine/tlb.h>
     89 
     90 /*
     91  * kernmap is an array of PTEs large enough to map in
     92  * 4GB.  At 16KB/page it is 256K entries or 2MB.
     93  */
     94 #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
     95 void *kernmap;
     96 
     97 #define MINCTX		2
     98 #define NUMCTX		256
     99 
    100 volatile struct pmap *ctxbusy[NUMCTX];
    101 
    102 #define TLBF_USED	0x1
    103 #define	TLBF_REF	0x2
    104 #define	TLBF_LOCKED	0x4
    105 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    106 
    107 typedef struct tlb_info_s {
    108 	char	ti_flags;
    109 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    110 	u_int	ti_va;
    111 } tlb_info_t;
    112 
    113 volatile tlb_info_t tlb_info[NTLB];
    114 /* We'll use a modified FIFO replacement policy cause it's cheap */
    115 volatile int tlbnext;
    116 
    117 static int tlb_nreserved = 0;
    118 static int pmap_bootstrap_done = 0;
    119 
    120 /* Event counters */
    121 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    122 	NULL, "cpu", "tlbmiss");
    123 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    124 	NULL, "cpu", "tlbhit");
    125 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    126 	NULL, "cpu", "tlbflush");
    127 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    128 	NULL, "cpu", "tlbenter");
    129 
    130 struct pmap kernel_pmap_;
    131 
    132 int physmem;
    133 static int npgs;
    134 static u_int nextavail;
    135 #ifndef MSGBUFADDR
    136 extern paddr_t msgbuf_paddr;
    137 #endif
    138 
    139 static struct mem_region *mem, *avail;
    140 
    141 /*
    142  * This is a cache of referenced/modified bits.
    143  * Bits herein are shifted by ATTRSHFT.
    144  */
    145 static char *pmap_attrib;
    146 
    147 #define PV_WIRED	0x1
    148 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    149 #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
    150 #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
    151 #define PV_CMPVA(va,pv)	(!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
    152 
    153 struct pv_entry {
    154 	struct pv_entry *pv_next;	/* Linked list of mappings */
    155 	vaddr_t pv_va;			/* virtual address of mapping */
    156 	struct pmap *pv_pm;
    157 };
    158 
    159 /* Each index corresponds to TLB_SIZE_* value. */
    160 static size_t tlbsize[] = {
    161 	1024, 		/* TLB_SIZE_1K */
    162 	4096, 		/* TLB_SIZE_4K */
    163 	16384, 		/* TLB_SIZE_16K */
    164 	65536, 		/* TLB_SIZE_64K */
    165 	262144, 	/* TLB_SIZE_256K */
    166 	1048576, 	/* TLB_SIZE_1M */
    167 	4194304, 	/* TLB_SIZE_4M */
    168 	16777216, 	/* TLB_SIZE_16M */
    169 };
    170 
    171 struct pv_entry *pv_table;
    172 static struct pool pv_pool;
    173 
    174 static int pmap_initialized;
    175 
    176 static int ctx_flush(int);
    177 
    178 inline struct pv_entry *pa_to_pv(paddr_t);
    179 static inline char *pa_to_attr(paddr_t);
    180 
    181 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    182 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    183 
    184 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
    185 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    186 
    187 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
    188 
    189 
    190 inline struct pv_entry *
    191 pa_to_pv(paddr_t pa)
    192 {
    193 	int bank, pg;
    194 
    195 	bank = vm_physseg_find(atop(pa), &pg);
    196 	if (bank == -1)
    197 		return NULL;
    198 	return &vm_physmem[bank].pmseg.pvent[pg];
    199 }
    200 
    201 static inline char *
    202 pa_to_attr(paddr_t pa)
    203 {
    204 	int bank, pg;
    205 
    206 	bank = vm_physseg_find(atop(pa), &pg);
    207 	if (bank == -1)
    208 		return NULL;
    209 	return &vm_physmem[bank].pmseg.attrs[pg];
    210 }
    211 
    212 /*
    213  * Insert PTE into page table.
    214  */
    215 int
    216 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    217 {
    218 	int seg = STIDX(va);
    219 	int ptn = PTIDX(va);
    220 	u_int oldpte;
    221 
    222 	if (!pm->pm_ptbl[seg]) {
    223 		/* Don't allocate a page to clear a non-existent mapping. */
    224 		if (!pte)
    225 			return (0);
    226 		/* Allocate a page XXXX this will sleep! */
    227 		pm->pm_ptbl[seg] =
    228 		    (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    229 		    UVM_KMF_WIRED | UVM_KMF_ZERO);
    230 	}
    231 	oldpte = pm->pm_ptbl[seg][ptn];
    232 	pm->pm_ptbl[seg][ptn] = pte;
    233 
    234 	/* Flush entry. */
    235 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    236 	if (oldpte != pte) {
    237 		if (pte == 0)
    238 			pm->pm_stats.resident_count--;
    239 		else
    240 			pm->pm_stats.resident_count++;
    241 	}
    242 	return (1);
    243 }
    244 
    245 /*
    246  * Get a pointer to a PTE in a page table.
    247  */
    248 volatile u_int *
    249 pte_find(struct pmap *pm, vaddr_t va)
    250 {
    251 	int seg = STIDX(va);
    252 	int ptn = PTIDX(va);
    253 
    254 	if (pm->pm_ptbl[seg])
    255 		return (&pm->pm_ptbl[seg][ptn]);
    256 
    257 	return (NULL);
    258 }
    259 
    260 /*
    261  * This is called during initppc, before the system is really initialized.
    262  */
    263 void
    264 pmap_bootstrap(u_int kernelstart, u_int kernelend)
    265 {
    266 	struct mem_region *mp, *mp1;
    267 	int cnt, i;
    268 	u_int s, e, sz;
    269 
    270 	tlbnext = tlb_nreserved;
    271 
    272 	/*
    273 	 * Allocate the kernel page table at the end of
    274 	 * kernel space so it's in the locked TTE.
    275 	 */
    276 	kernmap = (void *)kernelend;
    277 
    278 	/*
    279 	 * Initialize kernel page table.
    280 	 */
    281 	for (i = 0; i < STSZ; i++) {
    282 		pmap_kernel()->pm_ptbl[i] = 0;
    283 	}
    284 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    285 
    286 	/*
    287 	 * Announce page-size to the VM-system
    288 	 */
    289 	uvmexp.pagesize = NBPG;
    290 	uvm_setpagesize();
    291 
    292 	/*
    293 	 * Get memory.
    294 	 */
    295 	mem_regions(&mem, &avail);
    296 	for (mp = mem; mp->size; mp++) {
    297 		physmem += btoc(mp->size);
    298 		printf("+%lx,",mp->size);
    299 	}
    300 	printf("\n");
    301 	ppc4xx_tlb_init();
    302 	/*
    303 	 * Count the number of available entries.
    304 	 */
    305 	for (cnt = 0, mp = avail; mp->size; mp++)
    306 		cnt++;
    307 
    308 	/*
    309 	 * Page align all regions.
    310 	 * Non-page aligned memory isn't very interesting to us.
    311 	 * Also, sort the entries for ascending addresses.
    312 	 */
    313 	kernelstart &= ~PGOFSET;
    314 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    315 	for (mp = avail; mp->size; mp++) {
    316 		s = mp->start;
    317 		e = mp->start + mp->size;
    318 		printf("%08x-%08x -> ",s,e);
    319 		/*
    320 		 * Check whether this region holds all of the kernel.
    321 		 */
    322 		if (s < kernelstart && e > kernelend) {
    323 			avail[cnt].start = kernelend;
    324 			avail[cnt++].size = e - kernelend;
    325 			e = kernelstart;
    326 		}
    327 		/*
    328 		 * Look whether this regions starts within the kernel.
    329 		 */
    330 		if (s >= kernelstart && s < kernelend) {
    331 			if (e <= kernelend)
    332 				goto empty;
    333 			s = kernelend;
    334 		}
    335 		/*
    336 		 * Now look whether this region ends within the kernel.
    337 		 */
    338 		if (e > kernelstart && e <= kernelend) {
    339 			if (s >= kernelstart)
    340 				goto empty;
    341 			e = kernelstart;
    342 		}
    343 		/*
    344 		 * Now page align the start and size of the region.
    345 		 */
    346 		s = round_page(s);
    347 		e = trunc_page(e);
    348 		if (e < s)
    349 			e = s;
    350 		sz = e - s;
    351 		printf("%08x-%08x = %x\n",s,e,sz);
    352 		/*
    353 		 * Check whether some memory is left here.
    354 		 */
    355 		if (sz == 0) {
    356 		empty:
    357 			memmove(mp, mp + 1,
    358 				(cnt - (mp - avail)) * sizeof *mp);
    359 			cnt--;
    360 			mp--;
    361 			continue;
    362 		}
    363 		/*
    364 		 * Do an insertion sort.
    365 		 */
    366 		npgs += btoc(sz);
    367 		for (mp1 = avail; mp1 < mp; mp1++)
    368 			if (s < mp1->start)
    369 				break;
    370 		if (mp1 < mp) {
    371 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    372 			mp1->start = s;
    373 			mp1->size = sz;
    374 		} else {
    375 			mp->start = s;
    376 			mp->size = sz;
    377 		}
    378 	}
    379 
    380 	/*
    381 	 * We cannot do pmap_steal_memory here,
    382 	 * since we don't run with translation enabled yet.
    383 	 */
    384 #ifndef MSGBUFADDR
    385 	/*
    386 	 * allow for msgbuf
    387 	 */
    388 	sz = round_page(MSGBUFSIZE);
    389 	mp = NULL;
    390 	for (mp1 = avail; mp1->size; mp1++)
    391 		if (mp1->size >= sz)
    392 			mp = mp1;
    393 	if (mp == NULL)
    394 		panic("not enough memory?");
    395 
    396 	npgs -= btoc(sz);
    397 	msgbuf_paddr = mp->start + mp->size - sz;
    398 	mp->size -= sz;
    399 	if (mp->size <= 0)
    400 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    401 #endif
    402 
    403 	for (mp = avail; mp->size; mp++)
    404 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    405 			atop(mp->start), atop(mp->start + mp->size),
    406 			VM_FREELIST_DEFAULT);
    407 
    408 	/*
    409 	 * Initialize kernel pmap and hardware.
    410 	 */
    411 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    412 	pmap_kernel()->pm_ctx = KERNEL_PID;
    413 	nextavail = avail->start;
    414 
    415 	evcnt_attach_static(&tlbmiss_ev);
    416 	evcnt_attach_static(&tlbhit_ev);
    417 	evcnt_attach_static(&tlbflush_ev);
    418 	evcnt_attach_static(&tlbenter_ev);
    419 
    420 	pmap_bootstrap_done = 1;
    421 }
    422 
    423 /*
    424  * Restrict given range to physical memory
    425  *
    426  * (Used by /dev/mem)
    427  */
    428 void
    429 pmap_real_memory(paddr_t *start, psize_t *size)
    430 {
    431 	struct mem_region *mp;
    432 
    433 	for (mp = mem; mp->size; mp++) {
    434 		if (*start + *size > mp->start &&
    435 		    *start < mp->start + mp->size) {
    436 			if (*start < mp->start) {
    437 				*size -= mp->start - *start;
    438 				*start = mp->start;
    439 			}
    440 			if (*start + *size > mp->start + mp->size)
    441 				*size = mp->start + mp->size - *start;
    442 			return;
    443 		}
    444 	}
    445 	*size = 0;
    446 }
    447 
    448 /*
    449  * Initialize anything else for pmap handling.
    450  * Called during vm_init().
    451  */
    452 void
    453 pmap_init(void)
    454 {
    455 	struct pv_entry *pv;
    456 	vsize_t sz;
    457 	vaddr_t addr;
    458 	int i, s;
    459 	int bank;
    460 	char *attr;
    461 
    462 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    463 	sz = round_page(sz);
    464 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    465 	s = splvm();
    466 	pv = pv_table = (struct pv_entry *)addr;
    467 	for (i = npgs; --i >= 0;)
    468 		pv++->pv_pm = NULL;
    469 	pmap_attrib = (char *)pv;
    470 	memset(pv, 0, npgs);
    471 
    472 	pv = pv_table;
    473 	attr = pmap_attrib;
    474 	for (bank = 0; bank < vm_nphysseg; bank++) {
    475 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
    476 		vm_physmem[bank].pmseg.pvent = pv;
    477 		vm_physmem[bank].pmseg.attrs = attr;
    478 		pv += sz;
    479 		attr += sz;
    480 	}
    481 
    482 	pmap_initialized = 1;
    483 	splx(s);
    484 
    485 	/* Setup a pool for additional pvlist structures */
    486 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL,
    487 	    IPL_VM);
    488 }
    489 
    490 /*
    491  * How much virtual space is available to the kernel?
    492  */
    493 void
    494 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    495 {
    496 
    497 #if 0
    498 	/*
    499 	 * Reserve one segment for kernel virtual memory
    500 	 */
    501 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    502 	*end = *start + SEGMENT_LENGTH;
    503 #else
    504 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    505 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    506 #endif
    507 }
    508 
    509 #ifdef PMAP_GROWKERNEL
    510 /*
    511  * Preallocate kernel page tables to a specified VA.
    512  * This simply loops through the first TTE for each
    513  * page table from the beginning of the kernel pmap,
    514  * reads the entry, and if the result is
    515  * zero (either invalid entry or no page table) it stores
    516  * a zero there, populating page tables in the process.
    517  * This is not the most efficient technique but i don't
    518  * expect it to be called that often.
    519  */
    520 extern struct vm_page *vm_page_alloc1 __P((void));
    521 extern void vm_page_free1 __P((struct vm_page *));
    522 
    523 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    524 
    525 vaddr_t
    526 pmap_growkernel(vaddr_t maxkvaddr)
    527 {
    528 	int s;
    529 	int seg;
    530 	paddr_t pg;
    531 	struct pmap *pm = pmap_kernel();
    532 
    533 	s = splvm();
    534 
    535 	/* Align with the start of a page table */
    536 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    537 	     kbreak += PTMAP) {
    538 		seg = STIDX(kbreak);
    539 
    540 		if (pte_find(pm, kbreak))
    541 			continue;
    542 
    543 		if (uvm.page_init_done) {
    544 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    545 		} else {
    546 			if (!uvm_page_physget(&pg))
    547 				panic("pmap_growkernel: no memory");
    548 		}
    549 		if (!pg)
    550 			panic("pmap_growkernel: no pages");
    551 		pmap_zero_page((paddr_t)pg);
    552 
    553 		/* XXX This is based on all phymem being addressable */
    554 		pm->pm_ptbl[seg] = (u_int *)pg;
    555 	}
    556 	splx(s);
    557 	return (kbreak);
    558 }
    559 
    560 /*
    561  *	vm_page_alloc1:
    562  *
    563  *	Allocate and return a memory cell with no associated object.
    564  */
    565 struct vm_page *
    566 vm_page_alloc1(void)
    567 {
    568 	struct vm_page *pg;
    569 
    570 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    571 	if (pg) {
    572 		pg->wire_count = 1;	/* no mappings yet */
    573 		pg->flags &= ~PG_BUSY;	/* never busy */
    574 	}
    575 	return pg;
    576 }
    577 
    578 /*
    579  *	vm_page_free1:
    580  *
    581  *	Returns the given page to the free list,
    582  *	disassociating it with any VM object.
    583  *
    584  *	Object and page must be locked prior to entry.
    585  */
    586 void
    587 vm_page_free1(struct vm_page *pg)
    588 {
    589 #ifdef DIAGNOSTIC
    590 	if (pg->flags != (PG_CLEAN|PG_FAKE)) {
    591 		printf("Freeing invalid page %p\n", pg);
    592 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
    593 #ifdef DDB
    594 		Debugger();
    595 #endif
    596 		return;
    597 	}
    598 #endif
    599 	pg->flags |= PG_BUSY;
    600 	pg->wire_count = 0;
    601 	uvm_pagefree(pg);
    602 }
    603 #endif
    604 
    605 /*
    606  * Create and return a physical map.
    607  */
    608 struct pmap *
    609 pmap_create(void)
    610 {
    611 	struct pmap *pm;
    612 
    613 	pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
    614 	memset(pm, 0, sizeof *pm);
    615 	pm->pm_refs = 1;
    616 	return pm;
    617 }
    618 
    619 /*
    620  * Add a reference to the given pmap.
    621  */
    622 void
    623 pmap_reference(struct pmap *pm)
    624 {
    625 
    626 	pm->pm_refs++;
    627 }
    628 
    629 /*
    630  * Retire the given pmap from service.
    631  * Should only be called if the map contains no valid mappings.
    632  */
    633 void
    634 pmap_destroy(struct pmap *pm)
    635 {
    636 	int i;
    637 
    638 	if (--pm->pm_refs > 0) {
    639 		return;
    640 	}
    641 	KASSERT(pm->pm_stats.resident_count == 0);
    642 	KASSERT(pm->pm_stats.wired_count == 0);
    643 	for (i = 0; i < STSZ; i++)
    644 		if (pm->pm_ptbl[i]) {
    645 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
    646 			    PAGE_SIZE, UVM_KMF_WIRED);
    647 			pm->pm_ptbl[i] = NULL;
    648 		}
    649 	if (pm->pm_ctx)
    650 		ctx_free(pm);
    651 	free(pm, M_VMPMAP);
    652 }
    653 
    654 /*
    655  * Copy the range specified by src_addr/len
    656  * from the source map to the range dst_addr/len
    657  * in the destination map.
    658  *
    659  * This routine is only advisory and need not do anything.
    660  */
    661 void
    662 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    663 	  vsize_t len, vaddr_t src_addr)
    664 {
    665 }
    666 
    667 /*
    668  * Require that all active physical maps contain no
    669  * incorrect entries NOW.
    670  */
    671 void
    672 pmap_update(struct pmap *pmap)
    673 {
    674 }
    675 
    676 /*
    677  * Garbage collects the physical map system for
    678  * pages which are no longer used.
    679  * Success need not be guaranteed -- that is, there
    680  * may well be pages which are not referenced, but
    681  * others may be collected.
    682  * Called by the pageout daemon when pages are scarce.
    683  */
    684 void
    685 pmap_collect(struct pmap *pm)
    686 {
    687 }
    688 
    689 /*
    690  * Fill the given physical page with zeroes.
    691  */
    692 void
    693 pmap_zero_page(paddr_t pa)
    694 {
    695 
    696 #ifdef PPC_4XX_NOCACHE
    697 	memset((void *)pa, 0, PAGE_SIZE);
    698 #else
    699 	int i;
    700 
    701 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
    702 		__asm volatile ("dcbz 0,%0" :: "r"(pa));
    703 		pa += CACHELINESIZE;
    704 	}
    705 #endif
    706 }
    707 
    708 /*
    709  * Copy the given physical source page to its destination.
    710  */
    711 void
    712 pmap_copy_page(paddr_t src, paddr_t dst)
    713 {
    714 
    715 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
    716 	dcache_flush_page(dst);
    717 }
    718 
    719 /*
    720  * This returns != 0 on success.
    721  */
    722 static inline int
    723 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
    724 {
    725 	struct pv_entry *pv, *npv = NULL;
    726 	int s;
    727 
    728 	if (!pmap_initialized)
    729 		return 0;
    730 
    731 	s = splvm();
    732 	pv = pa_to_pv(pa);
    733 	if (!pv->pv_pm) {
    734 		/*
    735 		 * No entries yet, use header as the first entry.
    736 		 */
    737 		pv->pv_va = va;
    738 		pv->pv_pm = pm;
    739 		pv->pv_next = NULL;
    740 	} else {
    741 		/*
    742 		 * There is at least one other VA mapping this page.
    743 		 * Place this entry after the header.
    744 		 */
    745 		npv = pool_get(&pv_pool, PR_NOWAIT);
    746 		if (npv == NULL) {
    747 			if ((flags & PMAP_CANFAIL) == 0)
    748 				panic("pmap_enter_pv: failed");
    749 			splx(s);
    750 			return 0;
    751 		}
    752 		npv->pv_va = va;
    753 		npv->pv_pm = pm;
    754 		npv->pv_next = pv->pv_next;
    755 		pv->pv_next = npv;
    756 		pv = npv;
    757 	}
    758 	if (flags & PMAP_WIRED) {
    759 		PV_WIRE(pv);
    760 		pm->pm_stats.wired_count++;
    761 	}
    762 	splx(s);
    763 	return (1);
    764 }
    765 
    766 static void
    767 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    768 {
    769 	struct pv_entry *pv, *npv;
    770 
    771 	/*
    772 	 * Remove from the PV table.
    773 	 */
    774 	pv = pa_to_pv(pa);
    775 	if (!pv)
    776 		return;
    777 
    778 	/*
    779 	 * If it is the first entry on the list, it is actually
    780 	 * in the header and we must copy the following entry up
    781 	 * to the header.  Otherwise we must search the list for
    782 	 * the entry.  In either case we free the now unused entry.
    783 	 */
    784 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    785 		if (PV_ISWIRED(pv)) {
    786 			pm->pm_stats.wired_count--;
    787 		}
    788 		if ((npv = pv->pv_next)) {
    789 			*pv = *npv;
    790 			pool_put(&pv_pool, npv);
    791 		} else
    792 			pv->pv_pm = NULL;
    793 	} else {
    794 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    795 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    796 				break;
    797 		if (npv) {
    798 			pv->pv_next = npv->pv_next;
    799 			if (PV_ISWIRED(npv)) {
    800 				pm->pm_stats.wired_count--;
    801 			}
    802 			pool_put(&pv_pool, npv);
    803 		}
    804 	}
    805 }
    806 
    807 /*
    808  * Insert physical page at pa into the given pmap at virtual address va.
    809  */
    810 int
    811 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
    812 {
    813 	int s;
    814 	u_int tte;
    815 	int managed;
    816 
    817 	/*
    818 	 * Have to remove any existing mapping first.
    819 	 */
    820 	pmap_remove(pm, va, va + PAGE_SIZE);
    821 
    822 	if (flags & PMAP_WIRED)
    823 		flags |= prot;
    824 
    825 	managed = 0;
    826 	if (vm_physseg_find(atop(pa), NULL) != -1)
    827 		managed = 1;
    828 
    829 	/*
    830 	 * Generate TTE.
    831 	 */
    832 	tte = TTE_PA(pa);
    833 	/* XXXX -- need to support multiple page sizes. */
    834 	tte |= TTE_SZ_16K;
    835 #ifdef	DIAGNOSTIC
    836 	if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
    837 		(PME_NOCACHE | PME_WRITETHROUG))
    838 		panic("pmap_enter: uncached & writethrough");
    839 #endif
    840 	if (flags & PME_NOCACHE)
    841 		/* Must be I/O mapping */
    842 		tte |= TTE_I | TTE_G;
    843 #ifdef PPC_4XX_NOCACHE
    844 	tte |= TTE_I;
    845 #else
    846 	else if (flags & PME_WRITETHROUG)
    847 		/* Uncached and writethrough are not compatible */
    848 		tte |= TTE_W;
    849 #endif
    850 	if (pm == pmap_kernel())
    851 		tte |= TTE_ZONE(ZONE_PRIV);
    852 	else
    853 		tte |= TTE_ZONE(ZONE_USER);
    854 
    855 	if (flags & VM_PROT_WRITE)
    856 		tte |= TTE_WR;
    857 
    858 	if (flags & VM_PROT_EXECUTE)
    859 		tte |= TTE_EX;
    860 
    861 	/*
    862 	 * Now record mapping for later back-translation.
    863 	 */
    864 	if (pmap_initialized && managed) {
    865 		char *attr;
    866 
    867 		if (!pmap_enter_pv(pm, va, pa, flags)) {
    868 			/* Could not enter pv on a managed page */
    869 			return 1;
    870 		}
    871 
    872 		/* Now set attributes. */
    873 		attr = pa_to_attr(pa);
    874 #ifdef DIAGNOSTIC
    875 		if (!attr)
    876 			panic("managed but no attr");
    877 #endif
    878 		if (flags & VM_PROT_ALL)
    879 			*attr |= PMAP_ATTR_REF;
    880 		if (flags & VM_PROT_WRITE)
    881 			*attr |= PMAP_ATTR_CHG;
    882 	}
    883 
    884 	s = splvm();
    885 
    886 	/* Insert page into page table. */
    887 	pte_enter(pm, va, tte);
    888 
    889 	/* If this is a real fault, enter it in the tlb */
    890 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    891 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    892 	}
    893 	splx(s);
    894 
    895 	/* Flush the real memory from the instruction cache. */
    896 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
    897 		__syncicache((void *)pa, PAGE_SIZE);
    898 
    899 	return 0;
    900 }
    901 
    902 void
    903 pmap_unwire(struct pmap *pm, vaddr_t va)
    904 {
    905 	struct pv_entry *pv;
    906 	paddr_t pa;
    907 	int s;
    908 
    909 	if (!pmap_extract(pm, va, &pa)) {
    910 		return;
    911 	}
    912 
    913 	pv = pa_to_pv(pa);
    914 	if (!pv)
    915 		return;
    916 
    917 	s = splvm();
    918 	while (pv != NULL) {
    919 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    920 			if (PV_ISWIRED(pv)) {
    921 				PV_UNWIRE(pv);
    922 				pm->pm_stats.wired_count--;
    923 			}
    924 			break;
    925 		}
    926 		pv = pv->pv_next;
    927 	}
    928 	splx(s);
    929 }
    930 
    931 void
    932 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
    933 {
    934 	int s;
    935 	u_int tte;
    936 	struct pmap *pm = pmap_kernel();
    937 
    938 	/*
    939 	 * Have to remove any existing mapping first.
    940 	 */
    941 
    942 	/*
    943 	 * Generate TTE.
    944 	 *
    945 	 * XXXX
    946 	 *
    947 	 * Since the kernel does not handle execution privileges properly,
    948 	 * we will handle read and execute permissions together.
    949 	 */
    950 	tte = 0;
    951 	if (prot & VM_PROT_ALL) {
    952 
    953 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    954 		/* XXXX -- need to support multiple page sizes. */
    955 		tte |= TTE_SZ_16K;
    956 #ifdef DIAGNOSTIC
    957 		if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
    958 			(PME_NOCACHE | PME_WRITETHROUG))
    959 			panic("pmap_kenter_pa: uncached & writethrough");
    960 #endif
    961 		if (prot & PME_NOCACHE)
    962 			/* Must be I/O mapping */
    963 			tte |= TTE_I | TTE_G;
    964 #ifdef PPC_4XX_NOCACHE
    965 		tte |= TTE_I;
    966 #else
    967 		else if (prot & PME_WRITETHROUG)
    968 			/* Uncached and writethrough are not compatible */
    969 			tte |= TTE_W;
    970 #endif
    971 		if (prot & VM_PROT_WRITE)
    972 			tte |= TTE_WR;
    973 	}
    974 
    975 	s = splvm();
    976 
    977 	/* Insert page into page table. */
    978 	pte_enter(pm, va, tte);
    979 	splx(s);
    980 }
    981 
    982 void
    983 pmap_kremove(vaddr_t va, vsize_t len)
    984 {
    985 
    986 	while (len > 0) {
    987 		pte_enter(pmap_kernel(), va, 0);
    988 		va += PAGE_SIZE;
    989 		len -= PAGE_SIZE;
    990 	}
    991 }
    992 
    993 /*
    994  * Remove the given range of mapping entries.
    995  */
    996 void
    997 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
    998 {
    999 	int s;
   1000 	paddr_t pa;
   1001 	volatile u_int *ptp;
   1002 
   1003 	s = splvm();
   1004 	while (va < endva) {
   1005 
   1006 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
   1007 			pa = TTE_PA(pa);
   1008 			pmap_remove_pv(pm, va, pa);
   1009 			*ptp = 0;
   1010 			ppc4xx_tlb_flush(va, pm->pm_ctx);
   1011 			pm->pm_stats.resident_count--;
   1012 		}
   1013 		va += PAGE_SIZE;
   1014 	}
   1015 
   1016 	splx(s);
   1017 }
   1018 
   1019 /*
   1020  * Get the physical page address for the given pmap/virtual address.
   1021  */
   1022 bool
   1023 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1024 {
   1025 	int seg = STIDX(va);
   1026 	int ptn = PTIDX(va);
   1027 	u_int pa = 0;
   1028 	int s;
   1029 
   1030 	s = splvm();
   1031 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
   1032 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1033 	}
   1034 	splx(s);
   1035 	return (pa != 0);
   1036 }
   1037 
   1038 /*
   1039  * Lower the protection on the specified range of this pmap.
   1040  *
   1041  * There are only two cases: either the protection is going to 0,
   1042  * or it is going to read-only.
   1043  */
   1044 void
   1045 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1046 {
   1047 	volatile u_int *ptp;
   1048 	int s, bic;
   1049 
   1050 	if ((prot & VM_PROT_READ) == 0) {
   1051 		pmap_remove(pm, sva, eva);
   1052 		return;
   1053 	}
   1054 	bic = 0;
   1055 	if ((prot & VM_PROT_WRITE) == 0) {
   1056 		bic |= TTE_WR;
   1057 	}
   1058 	if ((prot & VM_PROT_EXECUTE) == 0) {
   1059 		bic |= TTE_EX;
   1060 	}
   1061 	if (bic == 0) {
   1062 		return;
   1063 	}
   1064 	s = splvm();
   1065 	while (sva < eva) {
   1066 		if ((ptp = pte_find(pm, sva)) != NULL) {
   1067 			*ptp &= ~bic;
   1068 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1069 		}
   1070 		sva += PAGE_SIZE;
   1071 	}
   1072 	splx(s);
   1073 }
   1074 
   1075 bool
   1076 pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
   1077 {
   1078 	paddr_t pa;
   1079 	char *attr;
   1080 	int s, rv;
   1081 
   1082 	/*
   1083 	 * First modify bits in cache.
   1084 	 */
   1085 	pa = VM_PAGE_TO_PHYS(pg);
   1086 	attr = pa_to_attr(pa);
   1087 	if (attr == NULL)
   1088 		return false;
   1089 
   1090 	s = splvm();
   1091 	rv = ((*attr & mask) != 0);
   1092 	if (clear) {
   1093 		*attr &= ~mask;
   1094 		pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
   1095 	}
   1096 	splx(s);
   1097 	return rv;
   1098 }
   1099 
   1100 
   1101 /*
   1102  * Lower the protection on the specified physical page.
   1103  *
   1104  * There are only two cases: either the protection is going to 0,
   1105  * or it is going to read-only.
   1106  */
   1107 void
   1108 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1109 {
   1110 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1111 	vaddr_t va;
   1112 	struct pv_entry *pvh, *pv, *npv;
   1113 	struct pmap *pm;
   1114 
   1115 	pvh = pa_to_pv(pa);
   1116 	if (pvh == NULL)
   1117 		return;
   1118 
   1119 	/* Handle extra pvs which may be deleted in the operation */
   1120 	for (pv = pvh->pv_next; pv; pv = npv) {
   1121 		npv = pv->pv_next;
   1122 
   1123 		pm = pv->pv_pm;
   1124 		va = pv->pv_va;
   1125 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1126 	}
   1127 	/* Now check the head pv */
   1128 	if (pvh->pv_pm) {
   1129 		pv = pvh;
   1130 		pm = pv->pv_pm;
   1131 		va = pv->pv_va;
   1132 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1133 	}
   1134 }
   1135 
   1136 /*
   1137  * Activate the address space for the specified process.  If the process
   1138  * is the current process, load the new MMU context.
   1139  */
   1140 void
   1141 pmap_activate(struct lwp *l)
   1142 {
   1143 #if 0
   1144 	struct pcb *pcb = &l->l_proc->p_addr->u_pcb;
   1145 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1146 
   1147 	/*
   1148 	 * XXX Normally performed in cpu_fork().
   1149 	 */
   1150 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
   1151 	pcb->pcb_pm = pmap;
   1152 #endif
   1153 }
   1154 
   1155 /*
   1156  * Deactivate the specified process's address space.
   1157  */
   1158 void
   1159 pmap_deactivate(struct lwp *l)
   1160 {
   1161 }
   1162 
   1163 /*
   1164  * Synchronize caches corresponding to [addr, addr+len) in p.
   1165  */
   1166 void
   1167 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1168 {
   1169 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1170 	int msr, ctx, opid, step;
   1171 
   1172 	step = CACHELINESIZE;
   1173 
   1174 	/*
   1175 	 * Need to turn off IMMU and switch to user context.
   1176 	 * (icbi uses DMMU).
   1177 	 */
   1178 	if (!(ctx = pm->pm_ctx)) {
   1179 		/* No context -- assign it one */
   1180 		ctx_alloc(pm);
   1181 		ctx = pm->pm_ctx;
   1182 	}
   1183 	__asm volatile("mfmsr %0;"
   1184 		"li %1, %7;"
   1185 		"andc %1,%0,%1;"
   1186 		"mtmsr %1;"
   1187 		"sync;isync;"
   1188 		"mfpid %1;"
   1189 		"mtpid %2;"
   1190 		"sync; isync;"
   1191 		"1:"
   1192 		"dcbf 0,%3;"
   1193 		"icbi 0,%3;"
   1194 		"add %3,%3,%5;"
   1195 		"addc. %4,%4,%6;"
   1196 		"bge 1b;"
   1197 		"mtpid %1;"
   1198 		"mtmsr %0;"
   1199 		"sync; isync"
   1200 		: "=&r" (msr), "=&r" (opid)
   1201 		: "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step),
   1202 		  "K" (PSL_IR | PSL_DR));
   1203 }
   1204 
   1205 
   1206 /* This has to be done in real mode !!! */
   1207 void
   1208 ppc4xx_tlb_flush(vaddr_t va, int pid)
   1209 {
   1210 	u_long i, found;
   1211 	u_long msr;
   1212 
   1213 	/* If there's no context then it can't be mapped. */
   1214 	if (!pid)
   1215 		return;
   1216 
   1217 	__asm( 	"mfpid %1;"		/* Save PID */
   1218 		"mfmsr %2;"		/* Save MSR */
   1219 		"li %0,0;"		/* Now clear MSR */
   1220 		"mtmsr %0;"
   1221 		"mtpid %4;"		/* Set PID */
   1222 		"sync;"
   1223 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1224 		"sync;"
   1225 		"mtpid %1;"		/* Restore PID */
   1226 		"mtmsr %2;"		/* Restore MSR */
   1227 		"sync;isync;"
   1228 		"li %1,1;"
   1229 		"beq 1f;"
   1230 		"li %1,0;"
   1231 		"1:"
   1232 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1233 		: "r" (va), "r" (pid));
   1234 	if (found && !TLB_LOCKED(i)) {
   1235 
   1236 		/* Now flush translation */
   1237 		__asm volatile(
   1238 			"tlbwe %0,%1,0;"
   1239 			"sync;isync;"
   1240 			: : "r" (0), "r" (i));
   1241 
   1242 		tlb_info[i].ti_ctx = 0;
   1243 		tlb_info[i].ti_flags = 0;
   1244 		tlbnext = i;
   1245 		/* Successful flushes */
   1246 		tlbflush_ev.ev_count++;
   1247 	}
   1248 }
   1249 
   1250 void
   1251 ppc4xx_tlb_flush_all(void)
   1252 {
   1253 	u_long i;
   1254 
   1255 	for (i = 0; i < NTLB; i++)
   1256 		if (!TLB_LOCKED(i)) {
   1257 			__asm volatile(
   1258 				"tlbwe %0,%1,0;"
   1259 				"sync;isync;"
   1260 				: : "r" (0), "r" (i));
   1261 			tlb_info[i].ti_ctx = 0;
   1262 			tlb_info[i].ti_flags = 0;
   1263 		}
   1264 
   1265 	__asm volatile("sync;isync");
   1266 }
   1267 
   1268 /* Find a TLB entry to evict. */
   1269 static int
   1270 ppc4xx_tlb_find_victim(void)
   1271 {
   1272 	int flags;
   1273 
   1274 	for (;;) {
   1275 		if (++tlbnext >= NTLB)
   1276 			tlbnext = tlb_nreserved;
   1277 		flags = tlb_info[tlbnext].ti_flags;
   1278 		if (!(flags & TLBF_USED) ||
   1279 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1280 			u_long va, stack = (u_long)&va;
   1281 
   1282 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1283 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1284 			     (flags & TLBF_USED)) {
   1285 				/* Kernel stack page */
   1286 				flags |= TLBF_USED;
   1287 				tlb_info[tlbnext].ti_flags = flags;
   1288 			} else {
   1289 				/* Found it! */
   1290 				return (tlbnext);
   1291 			}
   1292 		} else {
   1293 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1294 		}
   1295 	}
   1296 }
   1297 
   1298 void
   1299 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1300 {
   1301 	u_long th, tl, idx;
   1302 	tlbpid_t pid;
   1303 	u_short msr;
   1304 	paddr_t pa;
   1305 	int s, sz;
   1306 
   1307 	tlbenter_ev.ev_count++;
   1308 
   1309 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
   1310 	pa = (pte & TTE_RPN_MASK(sz));
   1311 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
   1312 	tl = (pte & ~TLB_RPN_MASK) | pa;
   1313 	tl |= ppc4xx_tlbflags(va, pa);
   1314 
   1315 	s = splhigh();
   1316 	idx = ppc4xx_tlb_find_victim();
   1317 
   1318 #ifdef DIAGNOSTIC
   1319 	if ((idx < tlb_nreserved) || (idx >= NTLB)) {
   1320 		panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
   1321 	}
   1322 #endif
   1323 
   1324 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1325 	tlb_info[idx].ti_ctx = ctx;
   1326 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1327 
   1328 	__asm volatile(
   1329 		"mfmsr %0;"			/* Save MSR */
   1330 		"li %1,0;"
   1331 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1332 		"mtmsr %1;"			/* Clear MSR */
   1333 		"mfpid %1;"			/* Save old PID */
   1334 		"mtpid %2;"			/* Load translation ctx */
   1335 		"sync; isync;"
   1336 #ifdef DEBUG
   1337 		"andi. %3,%3,63;"
   1338 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
   1339 #endif
   1340 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1341 		"sync; isync;"
   1342 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1343 		"sync; isync;"
   1344 	: "=&r" (msr), "=&r" (pid)
   1345 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1346 	splx(s);
   1347 }
   1348 
   1349 void
   1350 ppc4xx_tlb_init(void)
   1351 {
   1352 	int i;
   1353 
   1354 	/* Mark reserved TLB entries */
   1355 	for (i = 0; i < tlb_nreserved; i++) {
   1356 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1357 		tlb_info[i].ti_ctx = KERNEL_PID;
   1358 	}
   1359 
   1360 	/* Setup security zones */
   1361 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1362 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1363 	 * Z3 - full access regardless of TLB entry permissions
   1364 	 */
   1365 
   1366 	__asm volatile(
   1367 		"mtspr %0,%1;"
   1368 		"sync;"
   1369 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1370 }
   1371 
   1372 /*
   1373  * ppc4xx_tlb_size_mask:
   1374  *
   1375  * 	Roundup size to supported page size, return TLBHI mask and real size.
   1376  */
   1377 static int
   1378 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
   1379 {
   1380 	int 			i;
   1381 
   1382 	for (i = 0; i < __arraycount(tlbsize); i++)
   1383 		if (size <= tlbsize[i]) {
   1384 			*mask = (i << TLB_SIZE_SHFT);
   1385 			*rsiz = tlbsize[i];
   1386 			return (0);
   1387 		}
   1388 	return (EINVAL);
   1389 }
   1390 
   1391 /*
   1392  * ppc4xx_tlb_mapiodev:
   1393  *
   1394  * 	Lookup virtual address of mapping previously entered via
   1395  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
   1396  * 	need to waste extra storage for reserved mappings. Note
   1397  * 	that reading TLBHI also sets PID, but all reserved mappings
   1398  * 	use KERNEL_PID, so the side effect is nil.
   1399  */
   1400 void *
   1401 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
   1402 {
   1403 	paddr_t 		pa;
   1404 	vaddr_t 		va;
   1405 	u_int 			lo, hi, sz;
   1406 	int 			i;
   1407 
   1408 	/* tlb_nreserved is only allowed to grow, so this is safe. */
   1409 	for (i = 0; i < tlb_nreserved; i++) {
   1410 		__asm volatile (
   1411 		    "	tlbre %0,%2,1 	\n" 	/* TLBLO */
   1412 		    "	tlbre %1,%2,0 	\n" 	/* TLBHI */
   1413 		    : "=&r" (lo), "=&r" (hi)
   1414 		    : "r" (i));
   1415 
   1416 		KASSERT(hi & TLB_VALID);
   1417 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1418 
   1419 		pa = (lo & TLB_RPN_MASK);
   1420 		if (base < pa)
   1421 			continue;
   1422 
   1423 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
   1424 		if ((base + len) > (pa + sz))
   1425 			continue;
   1426 
   1427 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
   1428 		return (void *)(va);
   1429 	}
   1430 
   1431 	return (NULL);
   1432 }
   1433 
   1434 /*
   1435  * ppc4xx_tlb_reserve:
   1436  *
   1437  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
   1438  */
   1439 void
   1440 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
   1441 {
   1442 	u_int 			lo, hi;
   1443 	int 			szmask, rsize;
   1444 
   1445 	/* Called before pmap_bootstrap(), va outside kernel space. */
   1446 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
   1447 	KASSERT(! pmap_bootstrap_done);
   1448 	KASSERT(tlb_nreserved < NTLB);
   1449 
   1450 	/* Resolve size. */
   1451 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
   1452 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
   1453 		    size, tlb_nreserved);
   1454 
   1455 	/* Real size will be power of two >= 1024, so this is OK. */
   1456 	pa &= ~(rsize - 1); 	/* RPN */
   1457 	va &= ~(rsize - 1); 	/* EPN */
   1458 
   1459 	lo = pa | TLB_WR | flags;
   1460 	hi = va | TLB_VALID | szmask;
   1461 
   1462 #ifdef PPC_4XX_NOCACHE
   1463 	lo |= TLB_I;
   1464 #endif
   1465 
   1466 	__asm volatile(
   1467 	    "	tlbwe %1,%0,1 	\n" 	/* write TLBLO */
   1468 	    "	tlbwe %2,%0,0 	\n" 	/* write TLBHI */
   1469 	    "   sync 		\n"
   1470 	    "	isync 		\n"
   1471 	    : : "r" (tlb_nreserved), "r" (lo), "r" (hi));
   1472 
   1473 	tlb_nreserved++;
   1474 }
   1475 
   1476 /*
   1477  * We should pass the ctx in from trap code.
   1478  */
   1479 int
   1480 pmap_tlbmiss(vaddr_t va, int ctx)
   1481 {
   1482 	volatile u_int *pte;
   1483 	u_long tte;
   1484 
   1485 	tlbmiss_ev.ev_count++;
   1486 
   1487 	/*
   1488 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
   1489 	 * Physical RAM is expected to live in this range, care must be taken
   1490 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
   1491 	 * code.
   1492 	 */
   1493 	if (ctx != KERNEL_PID || va >= VM_MIN_KERNEL_ADDRESS) {
   1494 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
   1495 		if (pte == NULL) {
   1496 			/* Map unmanaged addresses directly for kernel access */
   1497 			return 1;
   1498 		}
   1499 		tte = *pte;
   1500 		if (tte == 0) {
   1501 			return 1;
   1502 		}
   1503 	} else {
   1504 		/* Create a 16MB writable mapping. */
   1505 #ifdef PPC_4XX_NOCACHE
   1506 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR;
   1507 #else
   1508 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1509 #endif
   1510 	}
   1511 	tlbhit_ev.ev_count++;
   1512 	ppc4xx_tlb_enter(ctx, va, tte);
   1513 
   1514 	return 0;
   1515 }
   1516 
   1517 /*
   1518  * Flush all the entries matching a context from the TLB.
   1519  */
   1520 static int
   1521 ctx_flush(int cnum)
   1522 {
   1523 	int i;
   1524 
   1525 	/* We gotta steal this context */
   1526 	for (i = tlb_nreserved; i < NTLB; i++) {
   1527 		if (tlb_info[i].ti_ctx == cnum) {
   1528 			/* Can't steal ctx if it has a locked entry. */
   1529 			if (TLB_LOCKED(i)) {
   1530 #ifdef DIAGNOSTIC
   1531 				printf("ctx_flush: can't invalidate "
   1532 					"locked mapping %d "
   1533 					"for context %d\n", i, cnum);
   1534 #ifdef DDB
   1535 				Debugger();
   1536 #endif
   1537 #endif
   1538 				return (1);
   1539 			}
   1540 #ifdef DIAGNOSTIC
   1541 			if (i < tlb_nreserved)
   1542 				panic("TLB entry %d not locked", i);
   1543 #endif
   1544 			/* Invalidate particular TLB entry regardless of locked status */
   1545 			__asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
   1546 			tlb_info[i].ti_flags = 0;
   1547 		}
   1548 	}
   1549 	return (0);
   1550 }
   1551 
   1552 /*
   1553  * Allocate a context.  If necessary, steal one from someone else.
   1554  *
   1555  * The new context is flushed from the TLB before returning.
   1556  */
   1557 int
   1558 ctx_alloc(struct pmap *pm)
   1559 {
   1560 	int s, cnum;
   1561 	static int next = MINCTX;
   1562 
   1563 	if (pm == pmap_kernel()) {
   1564 #ifdef DIAGNOSTIC
   1565 		printf("ctx_alloc: kernel pmap!\n");
   1566 #endif
   1567 		return (0);
   1568 	}
   1569 	s = splvm();
   1570 
   1571 	/* Find a likely context. */
   1572 	cnum = next;
   1573 	do {
   1574 		if ((++cnum) > NUMCTX)
   1575 			cnum = MINCTX;
   1576 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1577 
   1578 	/* Now clean it out */
   1579 oops:
   1580 	if (cnum < MINCTX)
   1581 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1582 	if (ctx_flush(cnum)) {
   1583 		/* oops -- something's wired. */
   1584 		if ((++cnum) > NUMCTX)
   1585 			cnum = MINCTX;
   1586 		goto oops;
   1587 	}
   1588 
   1589 	if (ctxbusy[cnum]) {
   1590 #ifdef DEBUG
   1591 		/* We should identify this pmap and clear it */
   1592 		printf("Warning: stealing context %d\n", cnum);
   1593 #endif
   1594 		ctxbusy[cnum]->pm_ctx = 0;
   1595 	}
   1596 	ctxbusy[cnum] = pm;
   1597 	next = cnum;
   1598 	splx(s);
   1599 	pm->pm_ctx = cnum;
   1600 
   1601 	return cnum;
   1602 }
   1603 
   1604 /*
   1605  * Give away a context.
   1606  */
   1607 void
   1608 ctx_free(struct pmap *pm)
   1609 {
   1610 	int oldctx;
   1611 
   1612 	oldctx = pm->pm_ctx;
   1613 
   1614 	if (oldctx == 0)
   1615 		panic("ctx_free: freeing kernel context");
   1616 #ifdef DIAGNOSTIC
   1617 	if (ctxbusy[oldctx] == 0)
   1618 		printf("ctx_free: freeing free context %d\n", oldctx);
   1619 	if (ctxbusy[oldctx] != pm) {
   1620 		printf("ctx_free: freeing someone esle's context\n "
   1621 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1622 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1623 #ifdef DDB
   1624 		Debugger();
   1625 #endif
   1626 	}
   1627 #endif
   1628 	/* We should verify it has not been stolen and reallocated... */
   1629 	ctxbusy[oldctx] = NULL;
   1630 	ctx_flush(oldctx);
   1631 }
   1632 
   1633 
   1634 #ifdef DEBUG
   1635 /*
   1636  * Test ref/modify handling.
   1637  */
   1638 void pmap_testout __P((void));
   1639 void
   1640 pmap_testout()
   1641 {
   1642 	vaddr_t va;
   1643 	volatile int *loc;
   1644 	int val = 0;
   1645 	paddr_t pa;
   1646 	struct vm_page *pg;
   1647 	int ref, mod;
   1648 
   1649 	/* Allocate a page */
   1650 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1651 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
   1652 	loc = (int*)va;
   1653 
   1654 	pmap_extract(pmap_kernel(), va, &pa);
   1655 	pg = PHYS_TO_VM_PAGE(pa);
   1656 	pmap_unwire(pmap_kernel(), va);
   1657 
   1658 	pmap_kremove(va, PAGE_SIZE);
   1659 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1660 	pmap_update(pmap_kernel());
   1661 
   1662 	/* Now clear reference and modify */
   1663 	ref = pmap_clear_reference(pg);
   1664 	mod = pmap_clear_modify(pg);
   1665 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1666 	       (void *)(u_long)va, (long)pa,
   1667 	       ref, mod);
   1668 
   1669 	/* Check it's properly cleared */
   1670 	ref = pmap_is_referenced(pg);
   1671 	mod = pmap_is_modified(pg);
   1672 	printf("Checking cleared page: ref %d, mod %d\n",
   1673 	       ref, mod);
   1674 
   1675 	/* Reference page */
   1676 	val = *loc;
   1677 
   1678 	ref = pmap_is_referenced(pg);
   1679 	mod = pmap_is_modified(pg);
   1680 	printf("Referenced page: ref %d, mod %d val %x\n",
   1681 	       ref, mod, val);
   1682 
   1683 	/* Now clear reference and modify */
   1684 	ref = pmap_clear_reference(pg);
   1685 	mod = pmap_clear_modify(pg);
   1686 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1687 	       (void *)(u_long)va, (long)pa,
   1688 	       ref, mod);
   1689 
   1690 	/* Modify page */
   1691 	*loc = 1;
   1692 
   1693 	ref = pmap_is_referenced(pg);
   1694 	mod = pmap_is_modified(pg);
   1695 	printf("Modified page: ref %d, mod %d\n",
   1696 	       ref, mod);
   1697 
   1698 	/* Now clear reference and modify */
   1699 	ref = pmap_clear_reference(pg);
   1700 	mod = pmap_clear_modify(pg);
   1701 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1702 	       (void *)(u_long)va, (long)pa,
   1703 	       ref, mod);
   1704 
   1705 	/* Check it's properly cleared */
   1706 	ref = pmap_is_referenced(pg);
   1707 	mod = pmap_is_modified(pg);
   1708 	printf("Checking cleared page: ref %d, mod %d\n",
   1709 	       ref, mod);
   1710 
   1711 	/* Modify page */
   1712 	*loc = 1;
   1713 
   1714 	ref = pmap_is_referenced(pg);
   1715 	mod = pmap_is_modified(pg);
   1716 	printf("Modified page: ref %d, mod %d\n",
   1717 	       ref, mod);
   1718 
   1719 	/* Check pmap_protect() */
   1720 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1721 	pmap_update(pmap_kernel());
   1722 	ref = pmap_is_referenced(pg);
   1723 	mod = pmap_is_modified(pg);
   1724 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1725 	       ref, mod);
   1726 
   1727 	/* Now clear reference and modify */
   1728 	ref = pmap_clear_reference(pg);
   1729 	mod = pmap_clear_modify(pg);
   1730 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1731 	       (void *)(u_long)va, (long)pa,
   1732 	       ref, mod);
   1733 
   1734 	/* Reference page */
   1735 	val = *loc;
   1736 
   1737 	ref = pmap_is_referenced(pg);
   1738 	mod = pmap_is_modified(pg);
   1739 	printf("Referenced page: ref %d, mod %d val %x\n",
   1740 	       ref, mod, val);
   1741 
   1742 	/* Now clear reference and modify */
   1743 	ref = pmap_clear_reference(pg);
   1744 	mod = pmap_clear_modify(pg);
   1745 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1746 	       (void *)(u_long)va, (long)pa,
   1747 	       ref, mod);
   1748 
   1749 	/* Modify page */
   1750 #if 0
   1751 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1752 	pmap_update(pmap_kernel());
   1753 #endif
   1754 	*loc = 1;
   1755 
   1756 	ref = pmap_is_referenced(pg);
   1757 	mod = pmap_is_modified(pg);
   1758 	printf("Modified page: ref %d, mod %d\n",
   1759 	       ref, mod);
   1760 
   1761 	/* Check pmap_protect() */
   1762 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1763 	pmap_update(pmap_kernel());
   1764 	ref = pmap_is_referenced(pg);
   1765 	mod = pmap_is_modified(pg);
   1766 	printf("pmap_protect(): ref %d, mod %d\n",
   1767 	       ref, mod);
   1768 
   1769 	/* Now clear reference and modify */
   1770 	ref = pmap_clear_reference(pg);
   1771 	mod = pmap_clear_modify(pg);
   1772 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1773 	       (void *)(u_long)va, (long)pa,
   1774 	       ref, mod);
   1775 
   1776 	/* Reference page */
   1777 	val = *loc;
   1778 
   1779 	ref = pmap_is_referenced(pg);
   1780 	mod = pmap_is_modified(pg);
   1781 	printf("Referenced page: ref %d, mod %d val %x\n",
   1782 	       ref, mod, val);
   1783 
   1784 	/* Now clear reference and modify */
   1785 	ref = pmap_clear_reference(pg);
   1786 	mod = pmap_clear_modify(pg);
   1787 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1788 	       (void *)(u_long)va, (long)pa,
   1789 	       ref, mod);
   1790 
   1791 	/* Modify page */
   1792 #if 0
   1793 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1794 	pmap_update(pmap_kernel());
   1795 #endif
   1796 	*loc = 1;
   1797 
   1798 	ref = pmap_is_referenced(pg);
   1799 	mod = pmap_is_modified(pg);
   1800 	printf("Modified page: ref %d, mod %d\n",
   1801 	       ref, mod);
   1802 
   1803 	/* Check pmap_pag_protect() */
   1804 	pmap_page_protect(pg, VM_PROT_READ);
   1805 	ref = pmap_is_referenced(pg);
   1806 	mod = pmap_is_modified(pg);
   1807 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1808 	       ref, mod);
   1809 
   1810 	/* Now clear reference and modify */
   1811 	ref = pmap_clear_reference(pg);
   1812 	mod = pmap_clear_modify(pg);
   1813 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1814 	       (void *)(u_long)va, (long)pa,
   1815 	       ref, mod);
   1816 
   1817 	/* Reference page */
   1818 	val = *loc;
   1819 
   1820 	ref = pmap_is_referenced(pg);
   1821 	mod = pmap_is_modified(pg);
   1822 	printf("Referenced page: ref %d, mod %d val %x\n",
   1823 	       ref, mod, val);
   1824 
   1825 	/* Now clear reference and modify */
   1826 	ref = pmap_clear_reference(pg);
   1827 	mod = pmap_clear_modify(pg);
   1828 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1829 	       (void *)(u_long)va, (long)pa,
   1830 	       ref, mod);
   1831 
   1832 	/* Modify page */
   1833 #if 0
   1834 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1835 	pmap_update(pmap_kernel());
   1836 #endif
   1837 	*loc = 1;
   1838 
   1839 	ref = pmap_is_referenced(pg);
   1840 	mod = pmap_is_modified(pg);
   1841 	printf("Modified page: ref %d, mod %d\n",
   1842 	       ref, mod);
   1843 
   1844 	/* Check pmap_pag_protect() */
   1845 	pmap_page_protect(pg, VM_PROT_NONE);
   1846 	ref = pmap_is_referenced(pg);
   1847 	mod = pmap_is_modified(pg);
   1848 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1849 	       ref, mod);
   1850 
   1851 	/* Now clear reference and modify */
   1852 	ref = pmap_clear_reference(pg);
   1853 	mod = pmap_clear_modify(pg);
   1854 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1855 	       (void *)(u_long)va, (long)pa,
   1856 	       ref, mod);
   1857 
   1858 
   1859 	/* Reference page */
   1860 	val = *loc;
   1861 
   1862 	ref = pmap_is_referenced(pg);
   1863 	mod = pmap_is_modified(pg);
   1864 	printf("Referenced page: ref %d, mod %d val %x\n",
   1865 	       ref, mod, val);
   1866 
   1867 	/* Now clear reference and modify */
   1868 	ref = pmap_clear_reference(pg);
   1869 	mod = pmap_clear_modify(pg);
   1870 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1871 	       (void *)(u_long)va, (long)pa,
   1872 	       ref, mod);
   1873 
   1874 	/* Modify page */
   1875 #if 0
   1876 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1877 	pmap_update(pmap_kernel());
   1878 #endif
   1879 	*loc = 1;
   1880 
   1881 	ref = pmap_is_referenced(pg);
   1882 	mod = pmap_is_modified(pg);
   1883 	printf("Modified page: ref %d, mod %d\n",
   1884 	       ref, mod);
   1885 
   1886 	/* Unmap page */
   1887 	pmap_remove(pmap_kernel(), va, va+1);
   1888 	pmap_update(pmap_kernel());
   1889 	ref = pmap_is_referenced(pg);
   1890 	mod = pmap_is_modified(pg);
   1891 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1892 
   1893 	/* Now clear reference and modify */
   1894 	ref = pmap_clear_reference(pg);
   1895 	mod = pmap_clear_modify(pg);
   1896 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1897 	       (void *)(u_long)va, (long)pa, ref, mod);
   1898 
   1899 	/* Check it's properly cleared */
   1900 	ref = pmap_is_referenced(pg);
   1901 	mod = pmap_is_modified(pg);
   1902 	printf("Checking cleared page: ref %d, mod %d\n",
   1903 	       ref, mod);
   1904 
   1905 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
   1906 	pmap_kenter_pa(va, pa, VM_PROT_ALL);
   1907 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
   1908 }
   1909 #endif
   1910