Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.60.2.1
      1 /*	$NetBSD: pmap.c,v 1.60.2.1 2010/04/27 07:19:29 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright 2001 Wasabi Systems, Inc.
      5  * All rights reserved.
      6  *
      7  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *      This product includes software developed for the NetBSD Project by
     20  *      Wasabi Systems, Inc.
     21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  *    or promote products derived from this software without specific prior
     23  *    written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 /*
     39  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  * All rights reserved.
     42  *
     43  * Redistribution and use in source and binary forms, with or without
     44  * modification, are permitted provided that the following conditions
     45  * are met:
     46  * 1. Redistributions of source code must retain the above copyright
     47  *    notice, this list of conditions and the following disclaimer.
     48  * 2. Redistributions in binary form must reproduce the above copyright
     49  *    notice, this list of conditions and the following disclaimer in the
     50  *    documentation and/or other materials provided with the distribution.
     51  * 3. All advertising materials mentioning features or use of this software
     52  *    must display the following acknowledgement:
     53  *	This product includes software developed by TooLs GmbH.
     54  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  *    derived from this software without specific prior written permission.
     56  *
     57  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.60.2.1 2010/04/27 07:19:29 uebayasi Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/malloc.h>
     74 #include <sys/proc.h>
     75 #include <sys/queue.h>
     76 #include <sys/systm.h>
     77 #include <sys/pool.h>
     78 #include <sys/device.h>
     79 
     80 #include <uvm/uvm.h>
     81 
     82 #include <machine/cpu.h>
     83 #include <machine/pcb.h>
     84 #include <machine/powerpc.h>
     85 
     86 #include <powerpc/spr.h>
     87 #include <machine/tlb.h>
     88 
     89 /*
     90  * kernmap is an array of PTEs large enough to map in
     91  * 4GB.  At 16KB/page it is 256K entries or 2MB.
     92  */
     93 #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
     94 void *kernmap;
     95 
     96 #define MINCTX		2
     97 #define NUMCTX		256
     98 
     99 volatile struct pmap *ctxbusy[NUMCTX];
    100 
    101 #define TLBF_USED	0x1
    102 #define	TLBF_REF	0x2
    103 #define	TLBF_LOCKED	0x4
    104 #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    105 
    106 typedef struct tlb_info_s {
    107 	char	ti_flags;
    108 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    109 	u_int	ti_va;
    110 } tlb_info_t;
    111 
    112 volatile tlb_info_t tlb_info[NTLB];
    113 /* We'll use a modified FIFO replacement policy cause it's cheap */
    114 volatile int tlbnext;
    115 
    116 static int tlb_nreserved = 0;
    117 static int pmap_bootstrap_done = 0;
    118 
    119 /* Event counters */
    120 struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    121 	NULL, "cpu", "tlbmiss");
    122 struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    123 	NULL, "cpu", "tlbhit");
    124 struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    125 	NULL, "cpu", "tlbflush");
    126 struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    127 	NULL, "cpu", "tlbenter");
    128 
    129 struct pmap kernel_pmap_;
    130 struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
    131 
    132 static int npgs;
    133 static u_int nextavail;
    134 #ifndef MSGBUFADDR
    135 extern paddr_t msgbuf_paddr;
    136 #endif
    137 
    138 static struct mem_region *mem, *avail;
    139 
    140 /*
    141  * This is a cache of referenced/modified bits.
    142  * Bits herein are shifted by ATTRSHFT.
    143  */
    144 static char *pmap_attrib;
    145 
    146 #define PV_WIRED	0x1
    147 #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    148 #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
    149 #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
    150 #define PV_CMPVA(va,pv)	(!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
    151 
    152 struct pv_entry {
    153 	struct pv_entry *pv_next;	/* Linked list of mappings */
    154 	vaddr_t pv_va;			/* virtual address of mapping */
    155 	struct pmap *pv_pm;
    156 };
    157 
    158 /* Each index corresponds to TLB_SIZE_* value. */
    159 static size_t tlbsize[] = {
    160 	1024, 		/* TLB_SIZE_1K */
    161 	4096, 		/* TLB_SIZE_4K */
    162 	16384, 		/* TLB_SIZE_16K */
    163 	65536, 		/* TLB_SIZE_64K */
    164 	262144, 	/* TLB_SIZE_256K */
    165 	1048576, 	/* TLB_SIZE_1M */
    166 	4194304, 	/* TLB_SIZE_4M */
    167 	16777216, 	/* TLB_SIZE_16M */
    168 };
    169 
    170 struct pv_entry *pv_table;
    171 static struct pool pv_pool;
    172 
    173 static int pmap_initialized;
    174 
    175 static int ctx_flush(int);
    176 
    177 inline struct pv_entry *pa_to_pv(paddr_t);
    178 static inline char *pa_to_attr(paddr_t);
    179 
    180 static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    181 static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    182 
    183 static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
    184 static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    185 
    186 static int ppc4xx_tlb_size_mask(size_t, int *, int *);
    187 
    188 
    189 inline struct pv_entry *
    190 pa_to_pv(paddr_t pa)
    191 {
    192 	int bank, pg;
    193 
    194 	bank = vm_physseg_find(atop(pa), &pg);
    195 	if (bank == -1)
    196 		return NULL;
    197 	return &vm_physmem[bank].pmseg.pvent[pg];
    198 }
    199 
    200 static inline char *
    201 pa_to_attr(paddr_t pa)
    202 {
    203 	int bank, pg;
    204 
    205 	bank = vm_physseg_find(atop(pa), &pg);
    206 	if (bank == -1)
    207 		return NULL;
    208 	return &vm_physmem[bank].pmseg.attrs[pg];
    209 }
    210 
    211 /*
    212  * Insert PTE into page table.
    213  */
    214 int
    215 pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    216 {
    217 	int seg = STIDX(va);
    218 	int ptn = PTIDX(va);
    219 	u_int oldpte;
    220 
    221 	if (!pm->pm_ptbl[seg]) {
    222 		/* Don't allocate a page to clear a non-existent mapping. */
    223 		if (!pte)
    224 			return (0);
    225 		/* Allocate a page XXXX this will sleep! */
    226 		pm->pm_ptbl[seg] =
    227 		    (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    228 		    UVM_KMF_WIRED | UVM_KMF_ZERO);
    229 	}
    230 	oldpte = pm->pm_ptbl[seg][ptn];
    231 	pm->pm_ptbl[seg][ptn] = pte;
    232 
    233 	/* Flush entry. */
    234 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    235 	if (oldpte != pte) {
    236 		if (pte == 0)
    237 			pm->pm_stats.resident_count--;
    238 		else
    239 			pm->pm_stats.resident_count++;
    240 	}
    241 	return (1);
    242 }
    243 
    244 /*
    245  * Get a pointer to a PTE in a page table.
    246  */
    247 volatile u_int *
    248 pte_find(struct pmap *pm, vaddr_t va)
    249 {
    250 	int seg = STIDX(va);
    251 	int ptn = PTIDX(va);
    252 
    253 	if (pm->pm_ptbl[seg])
    254 		return (&pm->pm_ptbl[seg][ptn]);
    255 
    256 	return (NULL);
    257 }
    258 
    259 /*
    260  * This is called during initppc, before the system is really initialized.
    261  */
    262 void
    263 pmap_bootstrap(u_int kernelstart, u_int kernelend)
    264 {
    265 	struct mem_region *mp, *mp1;
    266 	int cnt, i;
    267 	u_int s, e, sz;
    268 
    269 	tlbnext = tlb_nreserved;
    270 
    271 	/*
    272 	 * Allocate the kernel page table at the end of
    273 	 * kernel space so it's in the locked TTE.
    274 	 */
    275 	kernmap = (void *)kernelend;
    276 
    277 	/*
    278 	 * Initialize kernel page table.
    279 	 */
    280 	for (i = 0; i < STSZ; i++) {
    281 		pmap_kernel()->pm_ptbl[i] = 0;
    282 	}
    283 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    284 
    285 	/*
    286 	 * Announce page-size to the VM-system
    287 	 */
    288 	uvmexp.pagesize = NBPG;
    289 	uvm_setpagesize();
    290 
    291 	/*
    292 	 * Get memory.
    293 	 */
    294 	mem_regions(&mem, &avail);
    295 	for (mp = mem; mp->size; mp++) {
    296 		physmem += btoc(mp->size);
    297 		printf("+%lx,",mp->size);
    298 	}
    299 	printf("\n");
    300 	ppc4xx_tlb_init();
    301 	/*
    302 	 * Count the number of available entries.
    303 	 */
    304 	for (cnt = 0, mp = avail; mp->size; mp++)
    305 		cnt++;
    306 
    307 	/*
    308 	 * Page align all regions.
    309 	 * Non-page aligned memory isn't very interesting to us.
    310 	 * Also, sort the entries for ascending addresses.
    311 	 */
    312 	kernelstart &= ~PGOFSET;
    313 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    314 	for (mp = avail; mp->size; mp++) {
    315 		s = mp->start;
    316 		e = mp->start + mp->size;
    317 		printf("%08x-%08x -> ",s,e);
    318 		/*
    319 		 * Check whether this region holds all of the kernel.
    320 		 */
    321 		if (s < kernelstart && e > kernelend) {
    322 			avail[cnt].start = kernelend;
    323 			avail[cnt++].size = e - kernelend;
    324 			e = kernelstart;
    325 		}
    326 		/*
    327 		 * Look whether this regions starts within the kernel.
    328 		 */
    329 		if (s >= kernelstart && s < kernelend) {
    330 			if (e <= kernelend)
    331 				goto empty;
    332 			s = kernelend;
    333 		}
    334 		/*
    335 		 * Now look whether this region ends within the kernel.
    336 		 */
    337 		if (e > kernelstart && e <= kernelend) {
    338 			if (s >= kernelstart)
    339 				goto empty;
    340 			e = kernelstart;
    341 		}
    342 		/*
    343 		 * Now page align the start and size of the region.
    344 		 */
    345 		s = round_page(s);
    346 		e = trunc_page(e);
    347 		if (e < s)
    348 			e = s;
    349 		sz = e - s;
    350 		printf("%08x-%08x = %x\n",s,e,sz);
    351 		/*
    352 		 * Check whether some memory is left here.
    353 		 */
    354 		if (sz == 0) {
    355 		empty:
    356 			memmove(mp, mp + 1,
    357 				(cnt - (mp - avail)) * sizeof *mp);
    358 			cnt--;
    359 			mp--;
    360 			continue;
    361 		}
    362 		/*
    363 		 * Do an insertion sort.
    364 		 */
    365 		npgs += btoc(sz);
    366 		for (mp1 = avail; mp1 < mp; mp1++)
    367 			if (s < mp1->start)
    368 				break;
    369 		if (mp1 < mp) {
    370 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    371 			mp1->start = s;
    372 			mp1->size = sz;
    373 		} else {
    374 			mp->start = s;
    375 			mp->size = sz;
    376 		}
    377 	}
    378 
    379 	/*
    380 	 * We cannot do pmap_steal_memory here,
    381 	 * since we don't run with translation enabled yet.
    382 	 */
    383 #ifndef MSGBUFADDR
    384 	/*
    385 	 * allow for msgbuf
    386 	 */
    387 	sz = round_page(MSGBUFSIZE);
    388 	mp = NULL;
    389 	for (mp1 = avail; mp1->size; mp1++)
    390 		if (mp1->size >= sz)
    391 			mp = mp1;
    392 	if (mp == NULL)
    393 		panic("not enough memory?");
    394 
    395 	npgs -= btoc(sz);
    396 	msgbuf_paddr = mp->start + mp->size - sz;
    397 	mp->size -= sz;
    398 	if (mp->size <= 0)
    399 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    400 #endif
    401 
    402 	for (mp = avail; mp->size; mp++)
    403 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    404 			atop(mp->start), atop(mp->start + mp->size),
    405 			VM_FREELIST_DEFAULT);
    406 
    407 	/*
    408 	 * Initialize kernel pmap and hardware.
    409 	 */
    410 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    411 	pmap_kernel()->pm_ctx = KERNEL_PID;
    412 	nextavail = avail->start;
    413 
    414 	evcnt_attach_static(&tlbmiss_ev);
    415 	evcnt_attach_static(&tlbhit_ev);
    416 	evcnt_attach_static(&tlbflush_ev);
    417 	evcnt_attach_static(&tlbenter_ev);
    418 
    419 	pmap_bootstrap_done = 1;
    420 }
    421 
    422 /*
    423  * Restrict given range to physical memory
    424  *
    425  * (Used by /dev/mem)
    426  */
    427 void
    428 pmap_real_memory(paddr_t *start, psize_t *size)
    429 {
    430 	struct mem_region *mp;
    431 
    432 	for (mp = mem; mp->size; mp++) {
    433 		if (*start + *size > mp->start &&
    434 		    *start < mp->start + mp->size) {
    435 			if (*start < mp->start) {
    436 				*size -= mp->start - *start;
    437 				*start = mp->start;
    438 			}
    439 			if (*start + *size > mp->start + mp->size)
    440 				*size = mp->start + mp->size - *start;
    441 			return;
    442 		}
    443 	}
    444 	*size = 0;
    445 }
    446 
    447 /*
    448  * Initialize anything else for pmap handling.
    449  * Called during vm_init().
    450  */
    451 void
    452 pmap_init(void)
    453 {
    454 	struct pv_entry *pv;
    455 	vsize_t sz;
    456 	vaddr_t addr;
    457 	int i, s;
    458 	int bank;
    459 	char *attr;
    460 
    461 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    462 	sz = round_page(sz);
    463 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    464 	s = splvm();
    465 	pv = pv_table = (struct pv_entry *)addr;
    466 	for (i = npgs; --i >= 0;)
    467 		pv++->pv_pm = NULL;
    468 	pmap_attrib = (char *)pv;
    469 	memset(pv, 0, npgs);
    470 
    471 	pv = pv_table;
    472 	attr = pmap_attrib;
    473 	for (bank = 0; bank < vm_nphysseg; bank++) {
    474 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
    475 		vm_physmem[bank].pmseg.pvent = pv;
    476 		vm_physmem[bank].pmseg.attrs = attr;
    477 		pv += sz;
    478 		attr += sz;
    479 	}
    480 
    481 	pmap_initialized = 1;
    482 	splx(s);
    483 
    484 	/* Setup a pool for additional pvlist structures */
    485 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL,
    486 	    IPL_VM);
    487 }
    488 
    489 /*
    490  * How much virtual space is available to the kernel?
    491  */
    492 void
    493 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    494 {
    495 
    496 #if 0
    497 	/*
    498 	 * Reserve one segment for kernel virtual memory
    499 	 */
    500 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    501 	*end = *start + SEGMENT_LENGTH;
    502 #else
    503 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    504 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    505 #endif
    506 }
    507 
    508 #ifdef PMAP_GROWKERNEL
    509 /*
    510  * Preallocate kernel page tables to a specified VA.
    511  * This simply loops through the first TTE for each
    512  * page table from the beginning of the kernel pmap,
    513  * reads the entry, and if the result is
    514  * zero (either invalid entry or no page table) it stores
    515  * a zero there, populating page tables in the process.
    516  * This is not the most efficient technique but i don't
    517  * expect it to be called that often.
    518  */
    519 extern struct vm_page *vm_page_alloc1(void);
    520 extern void vm_page_free1(struct vm_page *);
    521 
    522 vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    523 
    524 vaddr_t
    525 pmap_growkernel(vaddr_t maxkvaddr)
    526 {
    527 	int s;
    528 	int seg;
    529 	paddr_t pg;
    530 	struct pmap *pm = pmap_kernel();
    531 
    532 	s = splvm();
    533 
    534 	/* Align with the start of a page table */
    535 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    536 	     kbreak += PTMAP) {
    537 		seg = STIDX(kbreak);
    538 
    539 		if (pte_find(pm, kbreak))
    540 			continue;
    541 
    542 		if (uvm.page_init_done) {
    543 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    544 		} else {
    545 			if (!uvm_page_physget(&pg))
    546 				panic("pmap_growkernel: no memory");
    547 		}
    548 		if (!pg)
    549 			panic("pmap_growkernel: no pages");
    550 		pmap_zero_page((paddr_t)pg);
    551 
    552 		/* XXX This is based on all phymem being addressable */
    553 		pm->pm_ptbl[seg] = (u_int *)pg;
    554 	}
    555 	splx(s);
    556 	return (kbreak);
    557 }
    558 
    559 /*
    560  *	vm_page_alloc1:
    561  *
    562  *	Allocate and return a memory cell with no associated object.
    563  */
    564 struct vm_page *
    565 vm_page_alloc1(void)
    566 {
    567 	struct vm_page *pg;
    568 
    569 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    570 	if (pg) {
    571 		pg->wire_count = 1;	/* no mappings yet */
    572 		pg->flags &= ~PG_BUSY;	/* never busy */
    573 	}
    574 	return pg;
    575 }
    576 
    577 /*
    578  *	vm_page_free1:
    579  *
    580  *	Returns the given page to the free list,
    581  *	disassociating it with any VM object.
    582  *
    583  *	Object and page must be locked prior to entry.
    584  */
    585 void
    586 vm_page_free1(struct vm_page *pg)
    587 {
    588 #ifdef DIAGNOSTIC
    589 	if (pg->flags != (PG_CLEAN|PG_FAKE)) {
    590 		printf("Freeing invalid page %p\n", pg);
    591 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
    592 #ifdef DDB
    593 		Debugger();
    594 #endif
    595 		return;
    596 	}
    597 #endif
    598 	pg->flags |= PG_BUSY;
    599 	pg->wire_count = 0;
    600 	uvm_pagefree(pg);
    601 }
    602 #endif
    603 
    604 /*
    605  * Create and return a physical map.
    606  */
    607 struct pmap *
    608 pmap_create(void)
    609 {
    610 	struct pmap *pm;
    611 
    612 	pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
    613 	memset(pm, 0, sizeof *pm);
    614 	pm->pm_refs = 1;
    615 	return pm;
    616 }
    617 
    618 /*
    619  * Add a reference to the given pmap.
    620  */
    621 void
    622 pmap_reference(struct pmap *pm)
    623 {
    624 
    625 	pm->pm_refs++;
    626 }
    627 
    628 /*
    629  * Retire the given pmap from service.
    630  * Should only be called if the map contains no valid mappings.
    631  */
    632 void
    633 pmap_destroy(struct pmap *pm)
    634 {
    635 	int i;
    636 
    637 	if (--pm->pm_refs > 0) {
    638 		return;
    639 	}
    640 	KASSERT(pm->pm_stats.resident_count == 0);
    641 	KASSERT(pm->pm_stats.wired_count == 0);
    642 	for (i = 0; i < STSZ; i++)
    643 		if (pm->pm_ptbl[i]) {
    644 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
    645 			    PAGE_SIZE, UVM_KMF_WIRED);
    646 			pm->pm_ptbl[i] = NULL;
    647 		}
    648 	if (pm->pm_ctx)
    649 		ctx_free(pm);
    650 	free(pm, M_VMPMAP);
    651 }
    652 
    653 /*
    654  * Copy the range specified by src_addr/len
    655  * from the source map to the range dst_addr/len
    656  * in the destination map.
    657  *
    658  * This routine is only advisory and need not do anything.
    659  */
    660 void
    661 pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    662 	  vsize_t len, vaddr_t src_addr)
    663 {
    664 }
    665 
    666 /*
    667  * Require that all active physical maps contain no
    668  * incorrect entries NOW.
    669  */
    670 void
    671 pmap_update(struct pmap *pmap)
    672 {
    673 }
    674 
    675 /*
    676  * Fill the given physical page with zeroes.
    677  */
    678 void
    679 pmap_zero_page(paddr_t pa)
    680 {
    681 
    682 #ifdef PPC_4XX_NOCACHE
    683 	memset((void *)pa, 0, PAGE_SIZE);
    684 #else
    685 	int i;
    686 
    687 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
    688 		__asm volatile ("dcbz 0,%0" :: "r"(pa));
    689 		pa += CACHELINESIZE;
    690 	}
    691 #endif
    692 }
    693 
    694 /*
    695  * Copy the given physical source page to its destination.
    696  */
    697 void
    698 pmap_copy_page(paddr_t src, paddr_t dst)
    699 {
    700 
    701 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
    702 	dcache_flush_page(dst);
    703 }
    704 
    705 /*
    706  * This returns != 0 on success.
    707  */
    708 static inline int
    709 pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
    710 {
    711 	struct pv_entry *pv, *npv = NULL;
    712 	int s;
    713 
    714 	if (!pmap_initialized)
    715 		return 0;
    716 
    717 	s = splvm();
    718 	pv = pa_to_pv(pa);
    719 	if (!pv->pv_pm) {
    720 		/*
    721 		 * No entries yet, use header as the first entry.
    722 		 */
    723 		pv->pv_va = va;
    724 		pv->pv_pm = pm;
    725 		pv->pv_next = NULL;
    726 	} else {
    727 		/*
    728 		 * There is at least one other VA mapping this page.
    729 		 * Place this entry after the header.
    730 		 */
    731 		npv = pool_get(&pv_pool, PR_NOWAIT);
    732 		if (npv == NULL) {
    733 			if ((flags & PMAP_CANFAIL) == 0)
    734 				panic("pmap_enter_pv: failed");
    735 			splx(s);
    736 			return 0;
    737 		}
    738 		npv->pv_va = va;
    739 		npv->pv_pm = pm;
    740 		npv->pv_next = pv->pv_next;
    741 		pv->pv_next = npv;
    742 		pv = npv;
    743 	}
    744 	if (flags & PMAP_WIRED) {
    745 		PV_WIRE(pv);
    746 		pm->pm_stats.wired_count++;
    747 	}
    748 	splx(s);
    749 	return (1);
    750 }
    751 
    752 static void
    753 pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    754 {
    755 	struct pv_entry *pv, *npv;
    756 
    757 	/*
    758 	 * Remove from the PV table.
    759 	 */
    760 	pv = pa_to_pv(pa);
    761 	if (!pv)
    762 		return;
    763 
    764 	/*
    765 	 * If it is the first entry on the list, it is actually
    766 	 * in the header and we must copy the following entry up
    767 	 * to the header.  Otherwise we must search the list for
    768 	 * the entry.  In either case we free the now unused entry.
    769 	 */
    770 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    771 		if (PV_ISWIRED(pv)) {
    772 			pm->pm_stats.wired_count--;
    773 		}
    774 		if ((npv = pv->pv_next)) {
    775 			*pv = *npv;
    776 			pool_put(&pv_pool, npv);
    777 		} else
    778 			pv->pv_pm = NULL;
    779 	} else {
    780 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    781 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    782 				break;
    783 		if (npv) {
    784 			pv->pv_next = npv->pv_next;
    785 			if (PV_ISWIRED(npv)) {
    786 				pm->pm_stats.wired_count--;
    787 			}
    788 			pool_put(&pv_pool, npv);
    789 		}
    790 	}
    791 }
    792 
    793 /*
    794  * Insert physical page at pa into the given pmap at virtual address va.
    795  */
    796 int
    797 pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    798 {
    799 	int s;
    800 	u_int tte;
    801 	bool managed;
    802 
    803 	/*
    804 	 * Have to remove any existing mapping first.
    805 	 */
    806 	pmap_remove(pm, va, va + PAGE_SIZE);
    807 
    808 	if (flags & PMAP_WIRED)
    809 		flags |= prot;
    810 
    811 	managed = ((flags & PMAP_UNMANAGED) == 0) && uvm_pageismanaged(pa);
    812 
    813 	/*
    814 	 * Generate TTE.
    815 	 */
    816 	tte = TTE_PA(pa);
    817 	/* XXXX -- need to support multiple page sizes. */
    818 	tte |= TTE_SZ_16K;
    819 #ifdef	DIAGNOSTIC
    820 	if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
    821 		(PME_NOCACHE | PME_WRITETHROUG))
    822 		panic("pmap_enter: uncached & writethrough");
    823 #endif
    824 	if (flags & PME_NOCACHE)
    825 		/* Must be I/O mapping */
    826 		tte |= TTE_I | TTE_G;
    827 #ifdef PPC_4XX_NOCACHE
    828 	tte |= TTE_I;
    829 #else
    830 	else if (flags & PME_WRITETHROUG)
    831 		/* Uncached and writethrough are not compatible */
    832 		tte |= TTE_W;
    833 #endif
    834 	if (pm == pmap_kernel())
    835 		tte |= TTE_ZONE(ZONE_PRIV);
    836 	else
    837 		tte |= TTE_ZONE(ZONE_USER);
    838 
    839 	if (flags & VM_PROT_WRITE)
    840 		tte |= TTE_WR;
    841 
    842 	if (flags & VM_PROT_EXECUTE)
    843 		tte |= TTE_EX;
    844 
    845 	/*
    846 	 * Now record mapping for later back-translation.
    847 	 */
    848 	if (pmap_initialized && managed) {
    849 		char *attr;
    850 
    851 		if (!pmap_enter_pv(pm, va, pa, flags)) {
    852 			/* Could not enter pv on a managed page */
    853 			return 1;
    854 		}
    855 
    856 		/* Now set attributes. */
    857 		attr = pa_to_attr(pa);
    858 #ifdef DIAGNOSTIC
    859 		if (!attr)
    860 			panic("managed but no attr");
    861 #endif
    862 		if (flags & VM_PROT_ALL)
    863 			*attr |= PMAP_ATTR_REF;
    864 		if (flags & VM_PROT_WRITE)
    865 			*attr |= PMAP_ATTR_CHG;
    866 	}
    867 
    868 	s = splvm();
    869 
    870 	/* Insert page into page table. */
    871 	pte_enter(pm, va, tte);
    872 
    873 	/* If this is a real fault, enter it in the tlb */
    874 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    875 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    876 	}
    877 	splx(s);
    878 
    879 	/* Flush the real memory from the instruction cache. */
    880 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
    881 		__syncicache((void *)pa, PAGE_SIZE);
    882 
    883 	return 0;
    884 }
    885 
    886 void
    887 pmap_unwire(struct pmap *pm, vaddr_t va)
    888 {
    889 	struct pv_entry *pv;
    890 	paddr_t pa;
    891 	int s;
    892 
    893 	if (!pmap_extract(pm, va, &pa)) {
    894 		return;
    895 	}
    896 
    897 	pv = pa_to_pv(pa);
    898 	if (!pv)
    899 		return;
    900 
    901 	s = splvm();
    902 	while (pv != NULL) {
    903 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    904 			if (PV_ISWIRED(pv)) {
    905 				PV_UNWIRE(pv);
    906 				pm->pm_stats.wired_count--;
    907 			}
    908 			break;
    909 		}
    910 		pv = pv->pv_next;
    911 	}
    912 	splx(s);
    913 }
    914 
    915 void
    916 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    917 {
    918 	int s;
    919 	u_int tte;
    920 	struct pmap *pm = pmap_kernel();
    921 
    922 	/*
    923 	 * Have to remove any existing mapping first.
    924 	 */
    925 
    926 	/*
    927 	 * Generate TTE.
    928 	 *
    929 	 * XXXX
    930 	 *
    931 	 * Since the kernel does not handle execution privileges properly,
    932 	 * we will handle read and execute permissions together.
    933 	 */
    934 	tte = 0;
    935 	if (prot & VM_PROT_ALL) {
    936 
    937 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    938 		/* XXXX -- need to support multiple page sizes. */
    939 		tte |= TTE_SZ_16K;
    940 #ifdef DIAGNOSTIC
    941 		if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
    942 			(PME_NOCACHE | PME_WRITETHROUG))
    943 			panic("pmap_kenter_pa: uncached & writethrough");
    944 #endif
    945 		if (prot & PME_NOCACHE)
    946 			/* Must be I/O mapping */
    947 			tte |= TTE_I | TTE_G;
    948 #ifdef PPC_4XX_NOCACHE
    949 		tte |= TTE_I;
    950 #else
    951 		else if (prot & PME_WRITETHROUG)
    952 			/* Uncached and writethrough are not compatible */
    953 			tte |= TTE_W;
    954 #endif
    955 		if (prot & VM_PROT_WRITE)
    956 			tte |= TTE_WR;
    957 	}
    958 
    959 	s = splvm();
    960 
    961 	/* Insert page into page table. */
    962 	pte_enter(pm, va, tte);
    963 	splx(s);
    964 }
    965 
    966 void
    967 pmap_kremove(vaddr_t va, vsize_t len)
    968 {
    969 
    970 	while (len > 0) {
    971 		pte_enter(pmap_kernel(), va, 0);
    972 		va += PAGE_SIZE;
    973 		len -= PAGE_SIZE;
    974 	}
    975 }
    976 
    977 /*
    978  * Remove the given range of mapping entries.
    979  */
    980 void
    981 pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
    982 {
    983 	int s;
    984 	paddr_t pa;
    985 	volatile u_int *ptp;
    986 
    987 	s = splvm();
    988 	while (va < endva) {
    989 
    990 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
    991 			pa = TTE_PA(pa);
    992 			pmap_remove_pv(pm, va, pa);
    993 			*ptp = 0;
    994 			ppc4xx_tlb_flush(va, pm->pm_ctx);
    995 			pm->pm_stats.resident_count--;
    996 		}
    997 		va += PAGE_SIZE;
    998 	}
    999 
   1000 	splx(s);
   1001 }
   1002 
   1003 /*
   1004  * Get the physical page address for the given pmap/virtual address.
   1005  */
   1006 bool
   1007 pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1008 {
   1009 	int seg = STIDX(va);
   1010 	int ptn = PTIDX(va);
   1011 	u_int pa = 0;
   1012 	int s;
   1013 
   1014 	s = splvm();
   1015 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
   1016 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1017 	}
   1018 	splx(s);
   1019 	return (pa != 0);
   1020 }
   1021 
   1022 /*
   1023  * Lower the protection on the specified range of this pmap.
   1024  *
   1025  * There are only two cases: either the protection is going to 0,
   1026  * or it is going to read-only.
   1027  */
   1028 void
   1029 pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1030 {
   1031 	volatile u_int *ptp;
   1032 	int s, bic;
   1033 
   1034 	if ((prot & VM_PROT_READ) == 0) {
   1035 		pmap_remove(pm, sva, eva);
   1036 		return;
   1037 	}
   1038 	bic = 0;
   1039 	if ((prot & VM_PROT_WRITE) == 0) {
   1040 		bic |= TTE_WR;
   1041 	}
   1042 	if ((prot & VM_PROT_EXECUTE) == 0) {
   1043 		bic |= TTE_EX;
   1044 	}
   1045 	if (bic == 0) {
   1046 		return;
   1047 	}
   1048 	s = splvm();
   1049 	while (sva < eva) {
   1050 		if ((ptp = pte_find(pm, sva)) != NULL) {
   1051 			*ptp &= ~bic;
   1052 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1053 		}
   1054 		sva += PAGE_SIZE;
   1055 	}
   1056 	splx(s);
   1057 }
   1058 
   1059 bool
   1060 pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
   1061 {
   1062 	paddr_t pa;
   1063 	char *attr;
   1064 	int s, rv;
   1065 
   1066 	/*
   1067 	 * First modify bits in cache.
   1068 	 */
   1069 	pa = VM_PAGE_TO_PHYS(pg);
   1070 	attr = pa_to_attr(pa);
   1071 	if (attr == NULL)
   1072 		return false;
   1073 
   1074 	s = splvm();
   1075 	rv = ((*attr & mask) != 0);
   1076 	if (clear) {
   1077 		*attr &= ~mask;
   1078 		pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
   1079 	}
   1080 	splx(s);
   1081 	return rv;
   1082 }
   1083 
   1084 
   1085 /*
   1086  * Lower the protection on the specified physical page.
   1087  *
   1088  * There are only two cases: either the protection is going to 0,
   1089  * or it is going to read-only.
   1090  */
   1091 void
   1092 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1093 {
   1094 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1095 	vaddr_t va;
   1096 	struct pv_entry *pvh, *pv, *npv;
   1097 	struct pmap *pm;
   1098 
   1099 	pvh = pa_to_pv(pa);
   1100 	if (pvh == NULL)
   1101 		return;
   1102 
   1103 	/* Handle extra pvs which may be deleted in the operation */
   1104 	for (pv = pvh->pv_next; pv; pv = npv) {
   1105 		npv = pv->pv_next;
   1106 
   1107 		pm = pv->pv_pm;
   1108 		va = pv->pv_va;
   1109 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1110 	}
   1111 	/* Now check the head pv */
   1112 	if (pvh->pv_pm) {
   1113 		pv = pvh;
   1114 		pm = pv->pv_pm;
   1115 		va = pv->pv_va;
   1116 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1117 	}
   1118 }
   1119 
   1120 /*
   1121  * Activate the address space for the specified process.  If the process
   1122  * is the current process, load the new MMU context.
   1123  */
   1124 void
   1125 pmap_activate(struct lwp *l)
   1126 {
   1127 #if 0
   1128 	struct pcb *pcb = &l->l_proc->p_addr->u_pcb;
   1129 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1130 
   1131 	/*
   1132 	 * XXX Normally performed in cpu_fork().
   1133 	 */
   1134 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
   1135 	pcb->pcb_pm = pmap;
   1136 #endif
   1137 }
   1138 
   1139 /*
   1140  * Deactivate the specified process's address space.
   1141  */
   1142 void
   1143 pmap_deactivate(struct lwp *l)
   1144 {
   1145 }
   1146 
   1147 /*
   1148  * Synchronize caches corresponding to [addr, addr+len) in p.
   1149  */
   1150 void
   1151 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1152 {
   1153 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1154 	int msr, ctx, opid, step;
   1155 
   1156 	step = CACHELINESIZE;
   1157 
   1158 	/*
   1159 	 * Need to turn off IMMU and switch to user context.
   1160 	 * (icbi uses DMMU).
   1161 	 */
   1162 	if (!(ctx = pm->pm_ctx)) {
   1163 		/* No context -- assign it one */
   1164 		ctx_alloc(pm);
   1165 		ctx = pm->pm_ctx;
   1166 	}
   1167 	__asm volatile("mfmsr %0;"
   1168 		"li %1, %7;"
   1169 		"andc %1,%0,%1;"
   1170 		"mtmsr %1;"
   1171 		"sync;isync;"
   1172 		"mfpid %1;"
   1173 		"mtpid %2;"
   1174 		"sync; isync;"
   1175 		"1:"
   1176 		"dcbf 0,%3;"
   1177 		"icbi 0,%3;"
   1178 		"add %3,%3,%5;"
   1179 		"addc. %4,%4,%6;"
   1180 		"bge 1b;"
   1181 		"mtpid %1;"
   1182 		"mtmsr %0;"
   1183 		"sync; isync"
   1184 		: "=&r" (msr), "=&r" (opid)
   1185 		: "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step),
   1186 		  "K" (PSL_IR | PSL_DR));
   1187 }
   1188 
   1189 
   1190 /* This has to be done in real mode !!! */
   1191 void
   1192 ppc4xx_tlb_flush(vaddr_t va, int pid)
   1193 {
   1194 	u_long i, found;
   1195 	u_long msr;
   1196 
   1197 	/* If there's no context then it can't be mapped. */
   1198 	if (!pid)
   1199 		return;
   1200 
   1201 	__asm( 	"mfpid %1;"		/* Save PID */
   1202 		"mfmsr %2;"		/* Save MSR */
   1203 		"li %0,0;"		/* Now clear MSR */
   1204 		"mtmsr %0;"
   1205 		"mtpid %4;"		/* Set PID */
   1206 		"sync;"
   1207 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1208 		"sync;"
   1209 		"mtpid %1;"		/* Restore PID */
   1210 		"mtmsr %2;"		/* Restore MSR */
   1211 		"sync;isync;"
   1212 		"li %1,1;"
   1213 		"beq 1f;"
   1214 		"li %1,0;"
   1215 		"1:"
   1216 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1217 		: "r" (va), "r" (pid));
   1218 	if (found && !TLB_LOCKED(i)) {
   1219 
   1220 		/* Now flush translation */
   1221 		__asm volatile(
   1222 			"tlbwe %0,%1,0;"
   1223 			"sync;isync;"
   1224 			: : "r" (0), "r" (i));
   1225 
   1226 		tlb_info[i].ti_ctx = 0;
   1227 		tlb_info[i].ti_flags = 0;
   1228 		tlbnext = i;
   1229 		/* Successful flushes */
   1230 		tlbflush_ev.ev_count++;
   1231 	}
   1232 }
   1233 
   1234 void
   1235 ppc4xx_tlb_flush_all(void)
   1236 {
   1237 	u_long i;
   1238 
   1239 	for (i = 0; i < NTLB; i++)
   1240 		if (!TLB_LOCKED(i)) {
   1241 			__asm volatile(
   1242 				"tlbwe %0,%1,0;"
   1243 				"sync;isync;"
   1244 				: : "r" (0), "r" (i));
   1245 			tlb_info[i].ti_ctx = 0;
   1246 			tlb_info[i].ti_flags = 0;
   1247 		}
   1248 
   1249 	__asm volatile("sync;isync");
   1250 }
   1251 
   1252 /* Find a TLB entry to evict. */
   1253 static int
   1254 ppc4xx_tlb_find_victim(void)
   1255 {
   1256 	int flags;
   1257 
   1258 	for (;;) {
   1259 		if (++tlbnext >= NTLB)
   1260 			tlbnext = tlb_nreserved;
   1261 		flags = tlb_info[tlbnext].ti_flags;
   1262 		if (!(flags & TLBF_USED) ||
   1263 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1264 			u_long va, stack = (u_long)&va;
   1265 
   1266 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1267 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1268 			     (flags & TLBF_USED)) {
   1269 				/* Kernel stack page */
   1270 				flags |= TLBF_USED;
   1271 				tlb_info[tlbnext].ti_flags = flags;
   1272 			} else {
   1273 				/* Found it! */
   1274 				return (tlbnext);
   1275 			}
   1276 		} else {
   1277 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1278 		}
   1279 	}
   1280 }
   1281 
   1282 void
   1283 ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1284 {
   1285 	u_long th, tl, idx;
   1286 	tlbpid_t pid;
   1287 	u_short msr;
   1288 	paddr_t pa;
   1289 	int s, sz;
   1290 
   1291 	tlbenter_ev.ev_count++;
   1292 
   1293 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
   1294 	pa = (pte & TTE_RPN_MASK(sz));
   1295 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
   1296 	tl = (pte & ~TLB_RPN_MASK) | pa;
   1297 	tl |= ppc4xx_tlbflags(va, pa);
   1298 
   1299 	s = splhigh();
   1300 	idx = ppc4xx_tlb_find_victim();
   1301 
   1302 #ifdef DIAGNOSTIC
   1303 	if ((idx < tlb_nreserved) || (idx >= NTLB)) {
   1304 		panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
   1305 	}
   1306 #endif
   1307 
   1308 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1309 	tlb_info[idx].ti_ctx = ctx;
   1310 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1311 
   1312 	__asm volatile(
   1313 		"mfmsr %0;"			/* Save MSR */
   1314 		"li %1,0;"
   1315 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1316 		"mtmsr %1;"			/* Clear MSR */
   1317 		"mfpid %1;"			/* Save old PID */
   1318 		"mtpid %2;"			/* Load translation ctx */
   1319 		"sync; isync;"
   1320 #ifdef DEBUG
   1321 		"andi. %3,%3,63;"
   1322 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
   1323 #endif
   1324 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1325 		"sync; isync;"
   1326 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1327 		"sync; isync;"
   1328 	: "=&r" (msr), "=&r" (pid)
   1329 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1330 	splx(s);
   1331 }
   1332 
   1333 void
   1334 ppc4xx_tlb_init(void)
   1335 {
   1336 	int i;
   1337 
   1338 	/* Mark reserved TLB entries */
   1339 	for (i = 0; i < tlb_nreserved; i++) {
   1340 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1341 		tlb_info[i].ti_ctx = KERNEL_PID;
   1342 	}
   1343 
   1344 	/* Setup security zones */
   1345 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1346 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1347 	 * Z3 - full access regardless of TLB entry permissions
   1348 	 */
   1349 
   1350 	__asm volatile(
   1351 		"mtspr %0,%1;"
   1352 		"sync;"
   1353 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1354 }
   1355 
   1356 /*
   1357  * ppc4xx_tlb_size_mask:
   1358  *
   1359  * 	Roundup size to supported page size, return TLBHI mask and real size.
   1360  */
   1361 static int
   1362 ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
   1363 {
   1364 	int 			i;
   1365 
   1366 	for (i = 0; i < __arraycount(tlbsize); i++)
   1367 		if (size <= tlbsize[i]) {
   1368 			*mask = (i << TLB_SIZE_SHFT);
   1369 			*rsiz = tlbsize[i];
   1370 			return (0);
   1371 		}
   1372 	return (EINVAL);
   1373 }
   1374 
   1375 /*
   1376  * ppc4xx_tlb_mapiodev:
   1377  *
   1378  * 	Lookup virtual address of mapping previously entered via
   1379  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
   1380  * 	need to waste extra storage for reserved mappings. Note
   1381  * 	that reading TLBHI also sets PID, but all reserved mappings
   1382  * 	use KERNEL_PID, so the side effect is nil.
   1383  */
   1384 void *
   1385 ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
   1386 {
   1387 	paddr_t 		pa;
   1388 	vaddr_t 		va;
   1389 	u_int 			lo, hi, sz;
   1390 	int 			i;
   1391 
   1392 	/* tlb_nreserved is only allowed to grow, so this is safe. */
   1393 	for (i = 0; i < tlb_nreserved; i++) {
   1394 		__asm volatile (
   1395 		    "	tlbre %0,%2,1 	\n" 	/* TLBLO */
   1396 		    "	tlbre %1,%2,0 	\n" 	/* TLBHI */
   1397 		    : "=&r" (lo), "=&r" (hi)
   1398 		    : "r" (i));
   1399 
   1400 		KASSERT(hi & TLB_VALID);
   1401 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1402 
   1403 		pa = (lo & TLB_RPN_MASK);
   1404 		if (base < pa)
   1405 			continue;
   1406 
   1407 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
   1408 		if ((base + len) > (pa + sz))
   1409 			continue;
   1410 
   1411 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
   1412 		return (void *)(va);
   1413 	}
   1414 
   1415 	return (NULL);
   1416 }
   1417 
   1418 /*
   1419  * ppc4xx_tlb_reserve:
   1420  *
   1421  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
   1422  */
   1423 void
   1424 ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
   1425 {
   1426 	u_int 			lo, hi;
   1427 	int 			szmask, rsize;
   1428 
   1429 	/* Called before pmap_bootstrap(), va outside kernel space. */
   1430 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
   1431 	KASSERT(! pmap_bootstrap_done);
   1432 	KASSERT(tlb_nreserved < NTLB);
   1433 
   1434 	/* Resolve size. */
   1435 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
   1436 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
   1437 		    size, tlb_nreserved);
   1438 
   1439 	/* Real size will be power of two >= 1024, so this is OK. */
   1440 	pa &= ~(rsize - 1); 	/* RPN */
   1441 	va &= ~(rsize - 1); 	/* EPN */
   1442 
   1443 	lo = pa | TLB_WR | flags;
   1444 	hi = va | TLB_VALID | szmask;
   1445 
   1446 #ifdef PPC_4XX_NOCACHE
   1447 	lo |= TLB_I;
   1448 #endif
   1449 
   1450 	__asm volatile(
   1451 	    "	tlbwe %1,%0,1 	\n" 	/* write TLBLO */
   1452 	    "	tlbwe %2,%0,0 	\n" 	/* write TLBHI */
   1453 	    "   sync 		\n"
   1454 	    "	isync 		\n"
   1455 	    : : "r" (tlb_nreserved), "r" (lo), "r" (hi));
   1456 
   1457 	tlb_nreserved++;
   1458 }
   1459 
   1460 /*
   1461  * We should pass the ctx in from trap code.
   1462  */
   1463 int
   1464 pmap_tlbmiss(vaddr_t va, int ctx)
   1465 {
   1466 	volatile u_int *pte;
   1467 	u_long tte;
   1468 
   1469 	tlbmiss_ev.ev_count++;
   1470 
   1471 	/*
   1472 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
   1473 	 * Physical RAM is expected to live in this range, care must be taken
   1474 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
   1475 	 * code.
   1476 	 */
   1477 	if (ctx != KERNEL_PID || va >= VM_MIN_KERNEL_ADDRESS) {
   1478 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
   1479 		if (pte == NULL) {
   1480 			/* Map unmanaged addresses directly for kernel access */
   1481 			return 1;
   1482 		}
   1483 		tte = *pte;
   1484 		if (tte == 0) {
   1485 			return 1;
   1486 		}
   1487 	} else {
   1488 		/* Create a 16MB writable mapping. */
   1489 #ifdef PPC_4XX_NOCACHE
   1490 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR;
   1491 #else
   1492 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1493 #endif
   1494 	}
   1495 	tlbhit_ev.ev_count++;
   1496 	ppc4xx_tlb_enter(ctx, va, tte);
   1497 
   1498 	return 0;
   1499 }
   1500 
   1501 /*
   1502  * Flush all the entries matching a context from the TLB.
   1503  */
   1504 static int
   1505 ctx_flush(int cnum)
   1506 {
   1507 	int i;
   1508 
   1509 	/* We gotta steal this context */
   1510 	for (i = tlb_nreserved; i < NTLB; i++) {
   1511 		if (tlb_info[i].ti_ctx == cnum) {
   1512 			/* Can't steal ctx if it has a locked entry. */
   1513 			if (TLB_LOCKED(i)) {
   1514 #ifdef DIAGNOSTIC
   1515 				printf("ctx_flush: can't invalidate "
   1516 					"locked mapping %d "
   1517 					"for context %d\n", i, cnum);
   1518 #ifdef DDB
   1519 				Debugger();
   1520 #endif
   1521 #endif
   1522 				return (1);
   1523 			}
   1524 #ifdef DIAGNOSTIC
   1525 			if (i < tlb_nreserved)
   1526 				panic("TLB entry %d not locked", i);
   1527 #endif
   1528 			/* Invalidate particular TLB entry regardless of locked status */
   1529 			__asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
   1530 			tlb_info[i].ti_flags = 0;
   1531 		}
   1532 	}
   1533 	return (0);
   1534 }
   1535 
   1536 /*
   1537  * Allocate a context.  If necessary, steal one from someone else.
   1538  *
   1539  * The new context is flushed from the TLB before returning.
   1540  */
   1541 int
   1542 ctx_alloc(struct pmap *pm)
   1543 {
   1544 	int s, cnum;
   1545 	static int next = MINCTX;
   1546 
   1547 	if (pm == pmap_kernel()) {
   1548 #ifdef DIAGNOSTIC
   1549 		printf("ctx_alloc: kernel pmap!\n");
   1550 #endif
   1551 		return (0);
   1552 	}
   1553 	s = splvm();
   1554 
   1555 	/* Find a likely context. */
   1556 	cnum = next;
   1557 	do {
   1558 		if ((++cnum) > NUMCTX)
   1559 			cnum = MINCTX;
   1560 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1561 
   1562 	/* Now clean it out */
   1563 oops:
   1564 	if (cnum < MINCTX)
   1565 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1566 	if (ctx_flush(cnum)) {
   1567 		/* oops -- something's wired. */
   1568 		if ((++cnum) > NUMCTX)
   1569 			cnum = MINCTX;
   1570 		goto oops;
   1571 	}
   1572 
   1573 	if (ctxbusy[cnum]) {
   1574 #ifdef DEBUG
   1575 		/* We should identify this pmap and clear it */
   1576 		printf("Warning: stealing context %d\n", cnum);
   1577 #endif
   1578 		ctxbusy[cnum]->pm_ctx = 0;
   1579 	}
   1580 	ctxbusy[cnum] = pm;
   1581 	next = cnum;
   1582 	splx(s);
   1583 	pm->pm_ctx = cnum;
   1584 
   1585 	return cnum;
   1586 }
   1587 
   1588 /*
   1589  * Give away a context.
   1590  */
   1591 void
   1592 ctx_free(struct pmap *pm)
   1593 {
   1594 	int oldctx;
   1595 
   1596 	oldctx = pm->pm_ctx;
   1597 
   1598 	if (oldctx == 0)
   1599 		panic("ctx_free: freeing kernel context");
   1600 #ifdef DIAGNOSTIC
   1601 	if (ctxbusy[oldctx] == 0)
   1602 		printf("ctx_free: freeing free context %d\n", oldctx);
   1603 	if (ctxbusy[oldctx] != pm) {
   1604 		printf("ctx_free: freeing someone esle's context\n "
   1605 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1606 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1607 #ifdef DDB
   1608 		Debugger();
   1609 #endif
   1610 	}
   1611 #endif
   1612 	/* We should verify it has not been stolen and reallocated... */
   1613 	ctxbusy[oldctx] = NULL;
   1614 	ctx_flush(oldctx);
   1615 }
   1616 
   1617 
   1618 #ifdef DEBUG
   1619 /*
   1620  * Test ref/modify handling.
   1621  */
   1622 void pmap_testout(void);
   1623 void
   1624 pmap_testout(void)
   1625 {
   1626 	vaddr_t va;
   1627 	volatile int *loc;
   1628 	int val = 0;
   1629 	paddr_t pa;
   1630 	struct vm_page *pg;
   1631 	int ref, mod;
   1632 
   1633 	/* Allocate a page */
   1634 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1635 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
   1636 	loc = (int*)va;
   1637 
   1638 	pmap_extract(pmap_kernel(), va, &pa);
   1639 	pg = PHYS_TO_VM_PAGE(pa);
   1640 	pmap_unwire(pmap_kernel(), va);
   1641 
   1642 	pmap_kremove(va, PAGE_SIZE);
   1643 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1644 	pmap_update(pmap_kernel());
   1645 
   1646 	/* Now clear reference and modify */
   1647 	ref = pmap_clear_reference(pg);
   1648 	mod = pmap_clear_modify(pg);
   1649 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1650 	       (void *)(u_long)va, (long)pa,
   1651 	       ref, mod);
   1652 
   1653 	/* Check it's properly cleared */
   1654 	ref = pmap_is_referenced(pg);
   1655 	mod = pmap_is_modified(pg);
   1656 	printf("Checking cleared page: ref %d, mod %d\n",
   1657 	       ref, mod);
   1658 
   1659 	/* Reference page */
   1660 	val = *loc;
   1661 
   1662 	ref = pmap_is_referenced(pg);
   1663 	mod = pmap_is_modified(pg);
   1664 	printf("Referenced page: ref %d, mod %d val %x\n",
   1665 	       ref, mod, val);
   1666 
   1667 	/* Now clear reference and modify */
   1668 	ref = pmap_clear_reference(pg);
   1669 	mod = pmap_clear_modify(pg);
   1670 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1671 	       (void *)(u_long)va, (long)pa,
   1672 	       ref, mod);
   1673 
   1674 	/* Modify page */
   1675 	*loc = 1;
   1676 
   1677 	ref = pmap_is_referenced(pg);
   1678 	mod = pmap_is_modified(pg);
   1679 	printf("Modified page: ref %d, mod %d\n",
   1680 	       ref, mod);
   1681 
   1682 	/* Now clear reference and modify */
   1683 	ref = pmap_clear_reference(pg);
   1684 	mod = pmap_clear_modify(pg);
   1685 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1686 	       (void *)(u_long)va, (long)pa,
   1687 	       ref, mod);
   1688 
   1689 	/* Check it's properly cleared */
   1690 	ref = pmap_is_referenced(pg);
   1691 	mod = pmap_is_modified(pg);
   1692 	printf("Checking cleared page: ref %d, mod %d\n",
   1693 	       ref, mod);
   1694 
   1695 	/* Modify page */
   1696 	*loc = 1;
   1697 
   1698 	ref = pmap_is_referenced(pg);
   1699 	mod = pmap_is_modified(pg);
   1700 	printf("Modified page: ref %d, mod %d\n",
   1701 	       ref, mod);
   1702 
   1703 	/* Check pmap_protect() */
   1704 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1705 	pmap_update(pmap_kernel());
   1706 	ref = pmap_is_referenced(pg);
   1707 	mod = pmap_is_modified(pg);
   1708 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1709 	       ref, mod);
   1710 
   1711 	/* Now clear reference and modify */
   1712 	ref = pmap_clear_reference(pg);
   1713 	mod = pmap_clear_modify(pg);
   1714 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1715 	       (void *)(u_long)va, (long)pa,
   1716 	       ref, mod);
   1717 
   1718 	/* Reference page */
   1719 	val = *loc;
   1720 
   1721 	ref = pmap_is_referenced(pg);
   1722 	mod = pmap_is_modified(pg);
   1723 	printf("Referenced page: ref %d, mod %d val %x\n",
   1724 	       ref, mod, val);
   1725 
   1726 	/* Now clear reference and modify */
   1727 	ref = pmap_clear_reference(pg);
   1728 	mod = pmap_clear_modify(pg);
   1729 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1730 	       (void *)(u_long)va, (long)pa,
   1731 	       ref, mod);
   1732 
   1733 	/* Modify page */
   1734 #if 0
   1735 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1736 	pmap_update(pmap_kernel());
   1737 #endif
   1738 	*loc = 1;
   1739 
   1740 	ref = pmap_is_referenced(pg);
   1741 	mod = pmap_is_modified(pg);
   1742 	printf("Modified page: ref %d, mod %d\n",
   1743 	       ref, mod);
   1744 
   1745 	/* Check pmap_protect() */
   1746 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1747 	pmap_update(pmap_kernel());
   1748 	ref = pmap_is_referenced(pg);
   1749 	mod = pmap_is_modified(pg);
   1750 	printf("pmap_protect(): ref %d, mod %d\n",
   1751 	       ref, mod);
   1752 
   1753 	/* Now clear reference and modify */
   1754 	ref = pmap_clear_reference(pg);
   1755 	mod = pmap_clear_modify(pg);
   1756 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1757 	       (void *)(u_long)va, (long)pa,
   1758 	       ref, mod);
   1759 
   1760 	/* Reference page */
   1761 	val = *loc;
   1762 
   1763 	ref = pmap_is_referenced(pg);
   1764 	mod = pmap_is_modified(pg);
   1765 	printf("Referenced page: ref %d, mod %d val %x\n",
   1766 	       ref, mod, val);
   1767 
   1768 	/* Now clear reference and modify */
   1769 	ref = pmap_clear_reference(pg);
   1770 	mod = pmap_clear_modify(pg);
   1771 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1772 	       (void *)(u_long)va, (long)pa,
   1773 	       ref, mod);
   1774 
   1775 	/* Modify page */
   1776 #if 0
   1777 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1778 	pmap_update(pmap_kernel());
   1779 #endif
   1780 	*loc = 1;
   1781 
   1782 	ref = pmap_is_referenced(pg);
   1783 	mod = pmap_is_modified(pg);
   1784 	printf("Modified page: ref %d, mod %d\n",
   1785 	       ref, mod);
   1786 
   1787 	/* Check pmap_pag_protect() */
   1788 	pmap_page_protect(pg, VM_PROT_READ);
   1789 	ref = pmap_is_referenced(pg);
   1790 	mod = pmap_is_modified(pg);
   1791 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1792 	       ref, mod);
   1793 
   1794 	/* Now clear reference and modify */
   1795 	ref = pmap_clear_reference(pg);
   1796 	mod = pmap_clear_modify(pg);
   1797 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1798 	       (void *)(u_long)va, (long)pa,
   1799 	       ref, mod);
   1800 
   1801 	/* Reference page */
   1802 	val = *loc;
   1803 
   1804 	ref = pmap_is_referenced(pg);
   1805 	mod = pmap_is_modified(pg);
   1806 	printf("Referenced page: ref %d, mod %d val %x\n",
   1807 	       ref, mod, val);
   1808 
   1809 	/* Now clear reference and modify */
   1810 	ref = pmap_clear_reference(pg);
   1811 	mod = pmap_clear_modify(pg);
   1812 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1813 	       (void *)(u_long)va, (long)pa,
   1814 	       ref, mod);
   1815 
   1816 	/* Modify page */
   1817 #if 0
   1818 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1819 	pmap_update(pmap_kernel());
   1820 #endif
   1821 	*loc = 1;
   1822 
   1823 	ref = pmap_is_referenced(pg);
   1824 	mod = pmap_is_modified(pg);
   1825 	printf("Modified page: ref %d, mod %d\n",
   1826 	       ref, mod);
   1827 
   1828 	/* Check pmap_pag_protect() */
   1829 	pmap_page_protect(pg, VM_PROT_NONE);
   1830 	ref = pmap_is_referenced(pg);
   1831 	mod = pmap_is_modified(pg);
   1832 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1833 	       ref, mod);
   1834 
   1835 	/* Now clear reference and modify */
   1836 	ref = pmap_clear_reference(pg);
   1837 	mod = pmap_clear_modify(pg);
   1838 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1839 	       (void *)(u_long)va, (long)pa,
   1840 	       ref, mod);
   1841 
   1842 
   1843 	/* Reference page */
   1844 	val = *loc;
   1845 
   1846 	ref = pmap_is_referenced(pg);
   1847 	mod = pmap_is_modified(pg);
   1848 	printf("Referenced page: ref %d, mod %d val %x\n",
   1849 	       ref, mod, val);
   1850 
   1851 	/* Now clear reference and modify */
   1852 	ref = pmap_clear_reference(pg);
   1853 	mod = pmap_clear_modify(pg);
   1854 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1855 	       (void *)(u_long)va, (long)pa,
   1856 	       ref, mod);
   1857 
   1858 	/* Modify page */
   1859 #if 0
   1860 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1861 	pmap_update(pmap_kernel());
   1862 #endif
   1863 	*loc = 1;
   1864 
   1865 	ref = pmap_is_referenced(pg);
   1866 	mod = pmap_is_modified(pg);
   1867 	printf("Modified page: ref %d, mod %d\n",
   1868 	       ref, mod);
   1869 
   1870 	/* Unmap page */
   1871 	pmap_remove(pmap_kernel(), va, va+1);
   1872 	pmap_update(pmap_kernel());
   1873 	ref = pmap_is_referenced(pg);
   1874 	mod = pmap_is_modified(pg);
   1875 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1876 
   1877 	/* Now clear reference and modify */
   1878 	ref = pmap_clear_reference(pg);
   1879 	mod = pmap_clear_modify(pg);
   1880 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1881 	       (void *)(u_long)va, (long)pa, ref, mod);
   1882 
   1883 	/* Check it's properly cleared */
   1884 	ref = pmap_is_referenced(pg);
   1885 	mod = pmap_is_modified(pg);
   1886 	printf("Checking cleared page: ref %d, mod %d\n",
   1887 	       ref, mod);
   1888 
   1889 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
   1890 	pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);
   1891 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
   1892 }
   1893 #endif
   1894