Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.5
      1  1.5     eeh /*	$NetBSD: pmap.c,v 1.5 2001/09/11 04:35:43 eeh Exp $	*/
      2  1.1  simonb 
      3  1.1  simonb /*
      4  1.1  simonb  * Copyright 2001 Wasabi Systems, Inc.
      5  1.1  simonb  * All rights reserved.
      6  1.1  simonb  *
      7  1.1  simonb  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8  1.1  simonb  *
      9  1.1  simonb  * Redistribution and use in source and binary forms, with or without
     10  1.1  simonb  * modification, are permitted provided that the following conditions
     11  1.1  simonb  * are met:
     12  1.1  simonb  * 1. Redistributions of source code must retain the above copyright
     13  1.1  simonb  *    notice, this list of conditions and the following disclaimer.
     14  1.1  simonb  * 2. Redistributions in binary form must reproduce the above copyright
     15  1.1  simonb  *    notice, this list of conditions and the following disclaimer in the
     16  1.1  simonb  *    documentation and/or other materials provided with the distribution.
     17  1.1  simonb  * 3. All advertising materials mentioning features or use of this software
     18  1.1  simonb  *    must display the following acknowledgement:
     19  1.1  simonb  *      This product includes software developed for the NetBSD Project by
     20  1.1  simonb  *      Wasabi Systems, Inc.
     21  1.1  simonb  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22  1.1  simonb  *    or promote products derived from this software without specific prior
     23  1.1  simonb  *    written permission.
     24  1.1  simonb  *
     25  1.1  simonb  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26  1.1  simonb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  1.1  simonb  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  1.1  simonb  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29  1.1  simonb  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  1.1  simonb  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  1.1  simonb  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  1.1  simonb  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  1.1  simonb  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  1.1  simonb  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  1.1  simonb  * POSSIBILITY OF SUCH DAMAGE.
     36  1.1  simonb  */
     37  1.1  simonb 
     38  1.1  simonb /*
     39  1.1  simonb  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40  1.1  simonb  * Copyright (C) 1995, 1996 TooLs GmbH.
     41  1.1  simonb  * All rights reserved.
     42  1.1  simonb  *
     43  1.1  simonb  * Redistribution and use in source and binary forms, with or without
     44  1.1  simonb  * modification, are permitted provided that the following conditions
     45  1.1  simonb  * are met:
     46  1.1  simonb  * 1. Redistributions of source code must retain the above copyright
     47  1.1  simonb  *    notice, this list of conditions and the following disclaimer.
     48  1.1  simonb  * 2. Redistributions in binary form must reproduce the above copyright
     49  1.1  simonb  *    notice, this list of conditions and the following disclaimer in the
     50  1.1  simonb  *    documentation and/or other materials provided with the distribution.
     51  1.1  simonb  * 3. All advertising materials mentioning features or use of this software
     52  1.1  simonb  *    must display the following acknowledgement:
     53  1.1  simonb  *	This product includes software developed by TooLs GmbH.
     54  1.1  simonb  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55  1.1  simonb  *    derived from this software without specific prior written permission.
     56  1.1  simonb  *
     57  1.1  simonb  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58  1.1  simonb  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59  1.1  simonb  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60  1.1  simonb  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61  1.1  simonb  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62  1.1  simonb  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63  1.1  simonb  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64  1.1  simonb  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65  1.1  simonb  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66  1.1  simonb  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67  1.1  simonb  */
     68  1.1  simonb 
     69  1.1  simonb #undef NOCACHE
     70  1.1  simonb 
     71  1.1  simonb #include <sys/param.h>
     72  1.1  simonb #include <sys/malloc.h>
     73  1.1  simonb #include <sys/proc.h>
     74  1.1  simonb #include <sys/user.h>
     75  1.1  simonb #include <sys/queue.h>
     76  1.1  simonb #include <sys/systm.h>
     77  1.1  simonb #include <sys/pool.h>
     78  1.1  simonb #include <sys/device.h>
     79  1.1  simonb 
     80  1.1  simonb #include <uvm/uvm.h>
     81  1.1  simonb 
     82  1.1  simonb #include <machine/pcb.h>
     83  1.1  simonb #include <machine/powerpc.h>
     84  1.1  simonb 
     85  1.1  simonb #include <powerpc/spr.h>
     86  1.1  simonb #include <powerpc/ibm4xx/tlb.h>
     87  1.1  simonb 
     88  1.1  simonb 
     89  1.1  simonb #define	CACHE_LINE	32
     90  1.1  simonb 
     91  1.1  simonb /*
     92  1.1  simonb  * kernmap is an array of PTEs large enough to map in
     93  1.1  simonb  * 4GB.  At 16KB/page it is 256K entries or 2MB.
     94  1.1  simonb  */
     95  1.1  simonb #define KERNMAP_SIZE	((0xffffffffU/NBPG)+1)
     96  1.1  simonb caddr_t kernmap;
     97  1.1  simonb 
     98  1.1  simonb #define MINCTX		2
     99  1.1  simonb #define NUMCTX		256
    100  1.1  simonb volatile struct pmap *ctxbusy[NUMCTX];
    101  1.1  simonb 
    102  1.1  simonb #define TLBF_USED	0x1
    103  1.1  simonb #define	TLBF_REF	0x2
    104  1.1  simonb #define	TLBF_LOCKED	0x4
    105  1.1  simonb #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    106  1.1  simonb typedef struct tlb_info_s {
    107  1.1  simonb 	char	ti_flags;
    108  1.1  simonb 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    109  1.1  simonb 	u_int	ti_va;
    110  1.1  simonb } tlb_info_t;
    111  1.1  simonb 
    112  1.1  simonb volatile tlb_info_t tlb_info[NTLB];
    113  1.1  simonb /* We'll use a modified FIFO replacement policy cause it's cheap */
    114  1.1  simonb volatile int tlbnext = TLB_NRESERVED;
    115  1.1  simonb 
    116  1.1  simonb u_long dtlb_miss_count = 0;
    117  1.1  simonb u_long itlb_miss_count = 0;
    118  1.1  simonb u_long ktlb_miss_count = 0;
    119  1.1  simonb u_long utlb_miss_count = 0;
    120  1.1  simonb 
    121  1.1  simonb /* Event counters -- XXX type `INTR' so we can see them with vmstat -i */
    122  1.1  simonb struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,
    123  1.1  simonb 	NULL, "cpu", "tlbmiss");
    124  1.1  simonb struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,
    125  1.1  simonb 	NULL, "cpu", "tlbhit");
    126  1.1  simonb struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,
    127  1.1  simonb 	NULL, "cpu", "tlbflush");
    128  1.1  simonb struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_INTR,
    129  1.1  simonb 	NULL, "cpu", "tlbenter");
    130  1.1  simonb 
    131  1.1  simonb struct pmap kernel_pmap_;
    132  1.1  simonb 
    133  1.1  simonb int physmem;
    134  1.1  simonb static int npgs;
    135  1.1  simonb static u_int nextavail;
    136  1.1  simonb #ifndef MSGBUFADDR
    137  1.1  simonb extern paddr_t msgbuf_paddr;
    138  1.1  simonb #endif
    139  1.1  simonb 
    140  1.1  simonb static struct mem_region *mem, *avail;
    141  1.1  simonb 
    142  1.1  simonb /*
    143  1.1  simonb  * This is a cache of referenced/modified bits.
    144  1.1  simonb  * Bits herein are shifted by ATTRSHFT.
    145  1.1  simonb  */
    146  1.1  simonb static char *pmap_attrib;
    147  1.1  simonb 
    148  1.1  simonb #define PV_WIRED	0x1
    149  1.1  simonb #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    150  1.1  simonb #define	PV_CMPVA(va,pv)	(!(((pv)->pv_va^(va))&(~PV_WIRED)))
    151  1.1  simonb 
    152  1.1  simonb struct pv_entry {
    153  1.1  simonb 	struct pv_entry *pv_next;	/* Linked list of mappings */
    154  1.1  simonb 	vaddr_t pv_va;			/* virtual address of mapping */
    155  1.1  simonb 	struct pmap *pv_pm;
    156  1.1  simonb };
    157  1.1  simonb 
    158  1.1  simonb struct pv_entry *pv_table;
    159  1.1  simonb static struct pool pv_pool;
    160  1.1  simonb 
    161  1.1  simonb static int pmap_initialized;
    162  1.1  simonb 
    163  1.1  simonb static int ctx_flush(int);
    164  1.1  simonb 
    165  1.1  simonb static inline void dcache_flush_page(vaddr_t);
    166  1.1  simonb static inline void icache_flush_page(vaddr_t);
    167  1.1  simonb static inline void dcache_flush(vaddr_t, vsize_t);
    168  1.1  simonb static inline void icache_flush(vaddr_t, vsize_t);
    169  1.1  simonb 
    170  1.1  simonb inline struct pv_entry *pa_to_pv(paddr_t);
    171  1.1  simonb static inline char *pa_to_attr(paddr_t);
    172  1.1  simonb 
    173  1.1  simonb static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    174  1.1  simonb static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    175  1.1  simonb 
    176  1.1  simonb static void pmap_pinit(pmap_t);
    177  1.1  simonb static void pmap_release(pmap_t);
    178  1.1  simonb static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t);
    179  1.1  simonb static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    180  1.1  simonb 
    181  1.1  simonb /*
    182  1.1  simonb  * These small routines may have to be replaced,
    183  1.1  simonb  * if/when we support processors other that the 604.
    184  1.1  simonb  */
    185  1.1  simonb 
    186  1.1  simonb static inline void
    187  1.1  simonb dcache_flush_page(vaddr_t va)
    188  1.1  simonb {
    189  1.1  simonb 	int i;
    190  1.1  simonb 
    191  1.1  simonb 	for (i = 0; i < NBPG; i += CACHE_LINE)
    192  1.1  simonb 		asm volatile("dcbf %0,%1" : : "r" (va), "r" (i));
    193  1.1  simonb 	asm volatile("sync;isync" : : );
    194  1.1  simonb }
    195  1.1  simonb 
    196  1.1  simonb static inline void
    197  1.1  simonb icache_flush_page(vaddr_t va)
    198  1.1  simonb {
    199  1.1  simonb 	int i;
    200  1.1  simonb 
    201  1.1  simonb 	for (i = 0; i < NBPG; i += CACHE_LINE)
    202  1.1  simonb 		asm volatile("icbi %0,%1" : : "r" (va), "r" (i));
    203  1.1  simonb 	asm volatile("sync;isync" : : );
    204  1.1  simonb }
    205  1.1  simonb 
    206  1.1  simonb static inline void
    207  1.1  simonb dcache_flush(vaddr_t va, vsize_t len)
    208  1.1  simonb {
    209  1.1  simonb 	int i;
    210  1.1  simonb 
    211  1.1  simonb 	if (len == 0)
    212  1.1  simonb 		return;
    213  1.1  simonb 
    214  1.1  simonb 	/* Make sure we flush all cache lines */
    215  1.1  simonb 	len += va & (CACHE_LINE-1);
    216  1.1  simonb 	for (i = 0; i < len; i += CACHE_LINE)
    217  1.1  simonb 		asm volatile("dcbf %0,%1" : : "r" (va), "r" (i));
    218  1.1  simonb 	asm volatile("sync;isync" : : );
    219  1.1  simonb }
    220  1.1  simonb 
    221  1.1  simonb static inline void
    222  1.1  simonb icache_flush(vaddr_t va, vsize_t len)
    223  1.1  simonb {
    224  1.1  simonb 	int i;
    225  1.1  simonb 
    226  1.1  simonb 	if (len == 0)
    227  1.1  simonb 		return;
    228  1.1  simonb 
    229  1.1  simonb 	/* Make sure we flush all cache lines */
    230  1.1  simonb 	len += va & (CACHE_LINE-1);
    231  1.1  simonb 	for (i = 0; i < len; i += CACHE_LINE)
    232  1.1  simonb 		asm volatile("icbi %0,%1" : : "r" (va), "r" (i));
    233  1.1  simonb 	asm volatile("sync;isync" : : );
    234  1.1  simonb }
    235  1.1  simonb 
    236  1.1  simonb inline struct pv_entry *
    237  1.1  simonb pa_to_pv(paddr_t pa)
    238  1.1  simonb {
    239  1.1  simonb 	int bank, pg;
    240  1.1  simonb 
    241  1.1  simonb 	bank = vm_physseg_find(atop(pa), &pg);
    242  1.1  simonb 	if (bank == -1)
    243  1.1  simonb 		return NULL;
    244  1.1  simonb 	return &vm_physmem[bank].pmseg.pvent[pg];
    245  1.1  simonb }
    246  1.1  simonb 
    247  1.1  simonb static inline char *
    248  1.1  simonb pa_to_attr(paddr_t pa)
    249  1.1  simonb {
    250  1.1  simonb 	int bank, pg;
    251  1.1  simonb 
    252  1.1  simonb 	bank = vm_physseg_find(atop(pa), &pg);
    253  1.1  simonb 	if (bank == -1)
    254  1.1  simonb 		return NULL;
    255  1.1  simonb 	return &vm_physmem[bank].pmseg.attrs[pg];
    256  1.1  simonb }
    257  1.1  simonb 
    258  1.1  simonb /*
    259  1.1  simonb  * Insert PTE into page table.
    260  1.1  simonb  */
    261  1.1  simonb int
    262  1.1  simonb pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    263  1.1  simonb {
    264  1.1  simonb 	int seg = STIDX(va);
    265  1.1  simonb 	int ptn = PTIDX(va);
    266  1.1  simonb 	paddr_t pa;
    267  1.1  simonb 
    268  1.1  simonb 	if (!pm->pm_ptbl[seg]) {
    269  1.1  simonb 		/* Don't allocate a page to clear a non-existent mapping. */
    270  1.1  simonb 		if (!pte) return (1);
    271  1.1  simonb 		/* Allocate a page XXXX this will sleep! */
    272  1.1  simonb 		pa = 0;
    273  1.1  simonb 		pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1);
    274  1.1  simonb 	}
    275  1.1  simonb 	pm->pm_ptbl[seg][ptn] = pte;
    276  1.1  simonb 
    277  1.1  simonb 	/* Flush entry. */
    278  1.1  simonb 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    279  1.1  simonb 	return (1);
    280  1.1  simonb }
    281  1.1  simonb 
    282  1.1  simonb /*
    283  1.1  simonb  * Get a pointer to a PTE in a page table.
    284  1.1  simonb  */
    285  1.1  simonb volatile u_int *
    286  1.1  simonb pte_find(struct pmap *pm, vaddr_t va)
    287  1.1  simonb {
    288  1.1  simonb 	int seg = STIDX(va);
    289  1.1  simonb 	int ptn = PTIDX(va);
    290  1.1  simonb 
    291  1.1  simonb 	if (pm->pm_ptbl[seg])
    292  1.1  simonb 		return (&pm->pm_ptbl[seg][ptn]);
    293  1.1  simonb 
    294  1.1  simonb 	return (NULL);
    295  1.1  simonb }
    296  1.1  simonb 
    297  1.1  simonb /*
    298  1.1  simonb  * This is called during initppc, before the system is really initialized.
    299  1.1  simonb  */
    300  1.1  simonb void
    301  1.1  simonb pmap_bootstrap(u_int kernelstart, u_int kernelend)
    302  1.1  simonb {
    303  1.1  simonb 	struct mem_region *mp, *mp1;
    304  1.1  simonb 	int cnt, i;
    305  1.1  simonb 	u_int s, e, sz;
    306  1.1  simonb 
    307  1.1  simonb 	/*
    308  1.1  simonb 	 * Allocate the kernel page table at the end of
    309  1.1  simonb 	 * kernel space so it's in the locked TTE.
    310  1.1  simonb 	 */
    311  1.1  simonb 	kernmap = (caddr_t)kernelend;
    312  1.5     eeh //	kernelend += KERNMAP_SIZE*sizeof(struct pte);
    313  1.1  simonb 
    314  1.1  simonb 	/*
    315  1.1  simonb 	 * Initialize kernel page table.
    316  1.1  simonb 	 */
    317  1.5     eeh //	memset(kernmap, 0, KERNMAP_SIZE*sizeof(struct pte));
    318  1.1  simonb 	for (i = 0; i < STSZ; i++) {
    319  1.5     eeh 		pmap_kernel()->pm_ptbl[i] = 0; // (u_int *)(kernmap + i*NBPG);
    320  1.1  simonb 	}
    321  1.1  simonb 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    322  1.1  simonb 
    323  1.1  simonb 	/*
    324  1.1  simonb 	 * Announce page-size to the VM-system
    325  1.1  simonb 	 */
    326  1.1  simonb 	uvmexp.pagesize = NBPG;
    327  1.1  simonb 	uvm_setpagesize();
    328  1.1  simonb 
    329  1.1  simonb 	/*
    330  1.1  simonb 	 * Get memory.
    331  1.1  simonb 	 */
    332  1.1  simonb 	mem_regions(&mem, &avail);
    333  1.1  simonb 	for (mp = mem; mp->size; mp++) {
    334  1.1  simonb 		physmem += btoc(mp->size);
    335  1.1  simonb 		printf("+%lx,",mp->size);
    336  1.1  simonb 	}
    337  1.1  simonb 	printf("\n");
    338  1.1  simonb 	ppc4xx_tlb_init();
    339  1.1  simonb 	/*
    340  1.1  simonb 	 * Count the number of available entries.
    341  1.1  simonb 	 */
    342  1.1  simonb 	for (cnt = 0, mp = avail; mp->size; mp++)
    343  1.1  simonb 		cnt++;
    344  1.1  simonb 
    345  1.1  simonb 	/*
    346  1.1  simonb 	 * Page align all regions.
    347  1.1  simonb 	 * Non-page aligned memory isn't very interesting to us.
    348  1.1  simonb 	 * Also, sort the entries for ascending addresses.
    349  1.1  simonb 	 */
    350  1.1  simonb 	kernelstart &= ~PGOFSET;
    351  1.1  simonb 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    352  1.1  simonb 	for (mp = avail; mp->size; mp++) {
    353  1.1  simonb 		s = mp->start;
    354  1.1  simonb 		e = mp->start + mp->size;
    355  1.1  simonb 		printf("%08x-%08x -> ",s,e);
    356  1.1  simonb 		/*
    357  1.1  simonb 		 * Check whether this region holds all of the kernel.
    358  1.1  simonb 		 */
    359  1.1  simonb 		if (s < kernelstart && e > kernelend) {
    360  1.1  simonb 			avail[cnt].start = kernelend;
    361  1.1  simonb 			avail[cnt++].size = e - kernelend;
    362  1.1  simonb 			e = kernelstart;
    363  1.1  simonb 		}
    364  1.1  simonb 		/*
    365  1.1  simonb 		 * Look whether this regions starts within the kernel.
    366  1.1  simonb 		 */
    367  1.1  simonb 		if (s >= kernelstart && s < kernelend) {
    368  1.1  simonb 			if (e <= kernelend)
    369  1.1  simonb 				goto empty;
    370  1.1  simonb 			s = kernelend;
    371  1.1  simonb 		}
    372  1.1  simonb 		/*
    373  1.1  simonb 		 * Now look whether this region ends within the kernel.
    374  1.1  simonb 		 */
    375  1.1  simonb 		if (e > kernelstart && e <= kernelend) {
    376  1.1  simonb 			if (s >= kernelstart)
    377  1.1  simonb 				goto empty;
    378  1.1  simonb 			e = kernelstart;
    379  1.1  simonb 		}
    380  1.1  simonb 		/*
    381  1.1  simonb 		 * Now page align the start and size of the region.
    382  1.1  simonb 		 */
    383  1.1  simonb 		s = round_page(s);
    384  1.1  simonb 		e = trunc_page(e);
    385  1.1  simonb 		if (e < s)
    386  1.1  simonb 			e = s;
    387  1.1  simonb 		sz = e - s;
    388  1.1  simonb 		printf("%08x-%08x = %x\n",s,e,sz);
    389  1.1  simonb 		/*
    390  1.1  simonb 		 * Check whether some memory is left here.
    391  1.1  simonb 		 */
    392  1.1  simonb 		if (sz == 0) {
    393  1.1  simonb 		empty:
    394  1.3     wiz 			memmove(mp, mp + 1,
    395  1.3     wiz 				(cnt - (mp - avail)) * sizeof *mp);
    396  1.1  simonb 			cnt--;
    397  1.1  simonb 			mp--;
    398  1.1  simonb 			continue;
    399  1.1  simonb 		}
    400  1.1  simonb 		/*
    401  1.1  simonb 		 * Do an insertion sort.
    402  1.1  simonb 		 */
    403  1.1  simonb 		npgs += btoc(sz);
    404  1.1  simonb 		for (mp1 = avail; mp1 < mp; mp1++)
    405  1.1  simonb 			if (s < mp1->start)
    406  1.1  simonb 				break;
    407  1.1  simonb 		if (mp1 < mp) {
    408  1.3     wiz 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    409  1.1  simonb 			mp1->start = s;
    410  1.1  simonb 			mp1->size = sz;
    411  1.1  simonb 		} else {
    412  1.1  simonb 			mp->start = s;
    413  1.1  simonb 			mp->size = sz;
    414  1.1  simonb 		}
    415  1.1  simonb 	}
    416  1.1  simonb 
    417  1.1  simonb 	/*
    418  1.1  simonb 	 * We cannot do pmap_steal_memory here,
    419  1.1  simonb 	 * since we don't run with translation enabled yet.
    420  1.1  simonb 	 */
    421  1.1  simonb #ifndef MSGBUFADDR
    422  1.1  simonb 	/*
    423  1.1  simonb 	 * allow for msgbuf
    424  1.1  simonb 	 */
    425  1.1  simonb 	sz = round_page(MSGBUFSIZE);
    426  1.1  simonb 	mp = NULL;
    427  1.1  simonb 	for (mp1 = avail; mp1->size; mp1++)
    428  1.1  simonb 		if (mp1->size >= sz)
    429  1.1  simonb 			mp = mp1;
    430  1.1  simonb 	if (mp == NULL)
    431  1.1  simonb 		panic("not enough memory?");
    432  1.1  simonb 
    433  1.1  simonb 	npgs -= btoc(sz);
    434  1.1  simonb 	msgbuf_paddr = mp->start + mp->size - sz;
    435  1.1  simonb 	mp->size -= sz;
    436  1.1  simonb 	if (mp->size <= 0)
    437  1.3     wiz 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    438  1.1  simonb #endif
    439  1.1  simonb 
    440  1.1  simonb 	printf("Loading pages\n");
    441  1.1  simonb 	for (mp = avail; mp->size; mp++)
    442  1.1  simonb 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    443  1.1  simonb 			atop(mp->start), atop(mp->start + mp->size),
    444  1.1  simonb 			VM_FREELIST_DEFAULT);
    445  1.1  simonb 
    446  1.1  simonb 	/*
    447  1.1  simonb 	 * Initialize kernel pmap and hardware.
    448  1.1  simonb 	 */
    449  1.1  simonb 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    450  1.1  simonb 	pmap_kernel()->pm_ctx = KERNEL_PID;
    451  1.1  simonb 	nextavail = avail->start;
    452  1.1  simonb 
    453  1.1  simonb 
    454  1.1  simonb 	evcnt_attach_static(&tlbhit_ev);
    455  1.1  simonb 	evcnt_attach_static(&tlbmiss_ev);
    456  1.1  simonb 	evcnt_attach_static(&tlbflush_ev);
    457  1.1  simonb 	evcnt_attach_static(&tlbenter_ev);
    458  1.1  simonb 	printf("Done\n");
    459  1.1  simonb }
    460  1.1  simonb 
    461  1.1  simonb /*
    462  1.1  simonb  * Restrict given range to physical memory
    463  1.1  simonb  *
    464  1.1  simonb  * (Used by /dev/mem)
    465  1.1  simonb  */
    466  1.1  simonb void
    467  1.1  simonb pmap_real_memory(paddr_t *start, psize_t *size)
    468  1.1  simonb {
    469  1.1  simonb 	struct mem_region *mp;
    470  1.1  simonb 
    471  1.1  simonb 	for (mp = mem; mp->size; mp++) {
    472  1.1  simonb 		if (*start + *size > mp->start &&
    473  1.1  simonb 		    *start < mp->start + mp->size) {
    474  1.1  simonb 			if (*start < mp->start) {
    475  1.1  simonb 				*size -= mp->start - *start;
    476  1.1  simonb 				*start = mp->start;
    477  1.1  simonb 			}
    478  1.1  simonb 			if (*start + *size > mp->start + mp->size)
    479  1.1  simonb 				*size = mp->start + mp->size - *start;
    480  1.1  simonb 			return;
    481  1.1  simonb 		}
    482  1.1  simonb 	}
    483  1.1  simonb 	*size = 0;
    484  1.1  simonb }
    485  1.1  simonb 
    486  1.1  simonb /*
    487  1.1  simonb  * Initialize anything else for pmap handling.
    488  1.1  simonb  * Called during vm_init().
    489  1.1  simonb  */
    490  1.1  simonb void
    491  1.1  simonb pmap_init(void)
    492  1.1  simonb {
    493  1.1  simonb 	struct pv_entry *pv;
    494  1.1  simonb 	vsize_t sz;
    495  1.1  simonb 	vaddr_t addr;
    496  1.1  simonb 	int i, s;
    497  1.1  simonb 	int bank;
    498  1.1  simonb 	char *attr;
    499  1.1  simonb 
    500  1.1  simonb 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    501  1.1  simonb 	sz = round_page(sz);
    502  1.1  simonb 	addr = uvm_km_zalloc(kernel_map, sz);
    503  1.1  simonb 	s = splvm();
    504  1.1  simonb 	pv = pv_table = (struct pv_entry *)addr;
    505  1.1  simonb 	for (i = npgs; --i >= 0;)
    506  1.1  simonb 		pv++->pv_pm = NULL;
    507  1.1  simonb 	pmap_attrib = (char *)pv;
    508  1.2     wiz 	memset(pv, 0, npgs);
    509  1.1  simonb 
    510  1.1  simonb 	pv = pv_table;
    511  1.1  simonb 	attr = pmap_attrib;
    512  1.1  simonb 	for (bank = 0; bank < vm_nphysseg; bank++) {
    513  1.1  simonb 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
    514  1.1  simonb 		vm_physmem[bank].pmseg.pvent = pv;
    515  1.1  simonb 		vm_physmem[bank].pmseg.attrs = attr;
    516  1.1  simonb 		pv += sz;
    517  1.1  simonb 		attr += sz;
    518  1.1  simonb 	}
    519  1.1  simonb 
    520  1.1  simonb 	pmap_initialized = 1;
    521  1.1  simonb 	splx(s);
    522  1.1  simonb 
    523  1.1  simonb 	/* Setup a pool for additional pvlist structures */
    524  1.1  simonb 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", 0,
    525  1.1  simonb 		  NULL, NULL, 0);
    526  1.1  simonb }
    527  1.1  simonb 
    528  1.1  simonb /*
    529  1.1  simonb  * How much virtual space is available to the kernel?
    530  1.1  simonb  */
    531  1.1  simonb void
    532  1.1  simonb pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    533  1.1  simonb {
    534  1.1  simonb 
    535  1.1  simonb #if 0
    536  1.1  simonb 	/*
    537  1.1  simonb 	 * Reserve one segment for kernel virtual memory
    538  1.1  simonb 	 */
    539  1.1  simonb 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    540  1.1  simonb 	*end = *start + SEGMENT_LENGTH;
    541  1.1  simonb #else
    542  1.1  simonb 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    543  1.1  simonb 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    544  1.1  simonb #endif
    545  1.1  simonb }
    546  1.1  simonb 
    547  1.5     eeh #ifdef PMAP_GROWKERNEL
    548  1.5     eeh /*
    549  1.5     eeh  * Preallocate kernel page tables to a specified VA.
    550  1.5     eeh  * This simply loops through the first TTE for each
    551  1.5     eeh  * page table from the beginning of the kernel pmap,
    552  1.5     eeh  * reads the entry, and if the result is
    553  1.5     eeh  * zero (either invalid entry or no page table) it stores
    554  1.5     eeh  * a zero there, populating page tables in the process.
    555  1.5     eeh  * This is not the most efficient technique but i don't
    556  1.5     eeh  * expect it to be called that often.
    557  1.5     eeh  */
    558  1.5     eeh extern struct vm_page *vm_page_alloc1 __P((void));
    559  1.5     eeh extern void vm_page_free1 __P((struct vm_page *));
    560  1.5     eeh 
    561  1.5     eeh vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    562  1.5     eeh 
    563  1.5     eeh vaddr_t
    564  1.5     eeh pmap_growkernel(maxkvaddr)
    565  1.5     eeh         vaddr_t maxkvaddr;
    566  1.5     eeh {
    567  1.5     eeh 	int s;
    568  1.5     eeh 	int seg;
    569  1.5     eeh 	paddr_t pg;
    570  1.5     eeh 	struct pmap *pm = pmap_kernel();
    571  1.5     eeh 
    572  1.5     eeh 	s = splvm();
    573  1.5     eeh 
    574  1.5     eeh 	/* Align with the start of a page table */
    575  1.5     eeh 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    576  1.5     eeh 	     kbreak += PTMAP) {
    577  1.5     eeh 		seg = STIDX(kbreak);
    578  1.5     eeh 
    579  1.5     eeh 		if (pte_find(pm, kbreak)) continue;
    580  1.5     eeh 
    581  1.5     eeh 		if (uvm.page_init_done) {
    582  1.5     eeh 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    583  1.5     eeh 		} else {
    584  1.5     eeh 			if (!uvm_page_physget(&pg))
    585  1.5     eeh 				panic("pmap_growkernel: no memory");
    586  1.5     eeh 		}
    587  1.5     eeh 		if (!pg) panic("pmap_growkernel: no pages");
    588  1.5     eeh 		pmap_zero_page((paddr_t)pg);
    589  1.5     eeh 
    590  1.5     eeh 		/* XXX This is based on all phymem being addressable */
    591  1.5     eeh 		pm->pm_ptbl[seg] = (u_int *)pg;
    592  1.5     eeh 	}
    593  1.5     eeh 	splx(s);
    594  1.5     eeh 	return (kbreak);
    595  1.5     eeh }
    596  1.5     eeh 
    597  1.5     eeh /*
    598  1.5     eeh  *	vm_page_alloc1:
    599  1.5     eeh  *
    600  1.5     eeh  *	Allocate and return a memory cell with no associated object.
    601  1.5     eeh  */
    602  1.5     eeh struct vm_page *
    603  1.5     eeh vm_page_alloc1()
    604  1.5     eeh {
    605  1.5     eeh 	struct vm_page *pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    606  1.5     eeh 	if (pg) {
    607  1.5     eeh 		pg->wire_count = 1;	/* no mappings yet */
    608  1.5     eeh 		pg->flags &= ~PG_BUSY;	/* never busy */
    609  1.5     eeh 	}
    610  1.5     eeh 	return pg;
    611  1.5     eeh }
    612  1.5     eeh 
    613  1.5     eeh /*
    614  1.5     eeh  *	vm_page_free1:
    615  1.5     eeh  *
    616  1.5     eeh  *	Returns the given page to the free list,
    617  1.5     eeh  *	disassociating it with any VM object.
    618  1.5     eeh  *
    619  1.5     eeh  *	Object and page must be locked prior to entry.
    620  1.5     eeh  */
    621  1.5     eeh void
    622  1.5     eeh vm_page_free1(mem)
    623  1.5     eeh 	struct vm_page *mem;
    624  1.5     eeh {
    625  1.5     eeh 	if (mem->flags != (PG_CLEAN|PG_FAKE)) {
    626  1.5     eeh 		printf("Freeing invalid page %p\n", mem);
    627  1.5     eeh 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(mem));
    628  1.5     eeh 		Debugger();
    629  1.5     eeh 		return;
    630  1.5     eeh 	}
    631  1.5     eeh 	mem->flags |= PG_BUSY;
    632  1.5     eeh 	mem->wire_count = 0;
    633  1.5     eeh 	uvm_pagefree(mem);
    634  1.5     eeh }
    635  1.5     eeh #endif
    636  1.5     eeh 
    637  1.1  simonb /*
    638  1.1  simonb  * Create and return a physical map.
    639  1.1  simonb  */
    640  1.1  simonb struct pmap *
    641  1.1  simonb pmap_create(void)
    642  1.1  simonb {
    643  1.1  simonb 	struct pmap *pm;
    644  1.1  simonb 
    645  1.1  simonb 	pm = (struct pmap *)malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
    646  1.2     wiz 	memset((caddr_t)pm, 0, sizeof *pm);
    647  1.1  simonb 	pmap_pinit(pm);
    648  1.1  simonb 	return pm;
    649  1.1  simonb }
    650  1.1  simonb 
    651  1.1  simonb /*
    652  1.1  simonb  * Initialize a preallocated and zeroed pmap structure.
    653  1.1  simonb  */
    654  1.1  simonb void
    655  1.1  simonb pmap_pinit(struct pmap *pm)
    656  1.1  simonb {
    657  1.1  simonb 	int i;
    658  1.1  simonb 
    659  1.1  simonb 	/*
    660  1.1  simonb 	 * Allocate some segment registers for this pmap.
    661  1.1  simonb 	 */
    662  1.1  simonb 	pm->pm_refs = 1;
    663  1.1  simonb 	for (i = 0; i < STSZ; i++)
    664  1.1  simonb 		pm->pm_ptbl[i] = NULL;
    665  1.1  simonb }
    666  1.1  simonb 
    667  1.1  simonb /*
    668  1.1  simonb  * Add a reference to the given pmap.
    669  1.1  simonb  */
    670  1.1  simonb void
    671  1.1  simonb pmap_reference(struct pmap *pm)
    672  1.1  simonb {
    673  1.1  simonb 
    674  1.1  simonb 	pm->pm_refs++;
    675  1.1  simonb }
    676  1.1  simonb 
    677  1.1  simonb /*
    678  1.1  simonb  * Retire the given pmap from service.
    679  1.1  simonb  * Should only be called if the map contains no valid mappings.
    680  1.1  simonb  */
    681  1.1  simonb void
    682  1.1  simonb pmap_destroy(struct pmap *pm)
    683  1.1  simonb {
    684  1.1  simonb 
    685  1.1  simonb 	if (--pm->pm_refs == 0) {
    686  1.1  simonb 		pmap_release(pm);
    687  1.1  simonb 		free((caddr_t)pm, M_VMPMAP);
    688  1.1  simonb 	}
    689  1.1  simonb }
    690  1.1  simonb 
    691  1.1  simonb /*
    692  1.1  simonb  * Release any resources held by the given physical map.
    693  1.1  simonb  * Called when a pmap initialized by pmap_pinit is being released.
    694  1.1  simonb  */
    695  1.1  simonb static void
    696  1.1  simonb pmap_release(struct pmap *pm)
    697  1.1  simonb {
    698  1.1  simonb 	int i;
    699  1.1  simonb 
    700  1.1  simonb 	for (i = 0; i < STSZ; i++)
    701  1.1  simonb 		if (pm->pm_ptbl[i]) {
    702  1.1  simonb 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG);
    703  1.1  simonb 			pm->pm_ptbl[i] = NULL;
    704  1.1  simonb 		}
    705  1.1  simonb 	if (pm->pm_ctx) ctx_free(pm);
    706  1.1  simonb }
    707  1.1  simonb 
    708  1.1  simonb /*
    709  1.1  simonb  * Copy the range specified by src_addr/len
    710  1.1  simonb  * from the source map to the range dst_addr/len
    711  1.1  simonb  * in the destination map.
    712  1.1  simonb  *
    713  1.1  simonb  * This routine is only advisory and need not do anything.
    714  1.1  simonb  */
    715  1.1  simonb void
    716  1.1  simonb pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    717  1.1  simonb 	  vsize_t len, vaddr_t src_addr)
    718  1.1  simonb {
    719  1.1  simonb }
    720  1.1  simonb 
    721  1.1  simonb /*
    722  1.1  simonb  * Require that all active physical maps contain no
    723  1.1  simonb  * incorrect entries NOW.
    724  1.1  simonb  */
    725  1.1  simonb void
    726  1.4   chris pmap_update(struct pmap *pmap)
    727  1.1  simonb {
    728  1.1  simonb }
    729  1.1  simonb 
    730  1.1  simonb /*
    731  1.1  simonb  * Garbage collects the physical map system for
    732  1.1  simonb  * pages which are no longer used.
    733  1.1  simonb  * Success need not be guaranteed -- that is, there
    734  1.1  simonb  * may well be pages which are not referenced, but
    735  1.1  simonb  * others may be collected.
    736  1.1  simonb  * Called by the pageout daemon when pages are scarce.
    737  1.1  simonb  */
    738  1.1  simonb void
    739  1.1  simonb pmap_collect(struct pmap *pm)
    740  1.1  simonb {
    741  1.1  simonb }
    742  1.1  simonb 
    743  1.1  simonb /*
    744  1.1  simonb  * Fill the given physical page with zeroes.
    745  1.1  simonb  */
    746  1.1  simonb void
    747  1.1  simonb pmap_zero_page(paddr_t pa)
    748  1.1  simonb {
    749  1.1  simonb 
    750  1.1  simonb #ifdef NOCACHE
    751  1.2     wiz 	memset((caddr_t)pa, 0, NBPG);
    752  1.1  simonb #else
    753  1.1  simonb 	int i;
    754  1.1  simonb 
    755  1.1  simonb 	for (i = NBPG/CACHELINESIZE; i > 0; i--) {
    756  1.1  simonb 		__asm __volatile ("dcbz 0,%0" :: "r"(pa));
    757  1.1  simonb 		pa += CACHELINESIZE;
    758  1.1  simonb 	}
    759  1.1  simonb #endif
    760  1.1  simonb }
    761  1.1  simonb 
    762  1.1  simonb /*
    763  1.1  simonb  * Copy the given physical source page to its destination.
    764  1.1  simonb  */
    765  1.1  simonb void
    766  1.1  simonb pmap_copy_page(paddr_t src, paddr_t dst)
    767  1.1  simonb {
    768  1.1  simonb 
    769  1.2     wiz 	memcpy((caddr_t)dst, (caddr_t)src, NBPG);
    770  1.1  simonb 	dcache_flush_page(dst);
    771  1.1  simonb }
    772  1.1  simonb 
    773  1.1  simonb /*
    774  1.1  simonb  * This returns whether this is the first mapping of a page.
    775  1.1  simonb  */
    776  1.1  simonb static inline int
    777  1.1  simonb pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    778  1.1  simonb {
    779  1.1  simonb 	struct pv_entry *pv, *npv = NULL;
    780  1.1  simonb 	int s;
    781  1.1  simonb 
    782  1.1  simonb 	if (!pmap_initialized)
    783  1.1  simonb 		return 0;
    784  1.1  simonb 
    785  1.1  simonb 	s = splvm();
    786  1.1  simonb 
    787  1.1  simonb 	pv = pa_to_pv(pa);
    788  1.1  simonb 	if (!pv->pv_pm) {
    789  1.1  simonb 		/*
    790  1.1  simonb 		 * No entries yet, use header as the first entry.
    791  1.1  simonb 		 */
    792  1.1  simonb 		pv->pv_va = va;
    793  1.1  simonb 		pv->pv_pm = pm;
    794  1.1  simonb 		pv->pv_next = NULL;
    795  1.1  simonb 	} else {
    796  1.1  simonb 		/*
    797  1.1  simonb 		 * There is at least one other VA mapping this page.
    798  1.1  simonb 		 * Place this entry after the header.
    799  1.1  simonb 		 */
    800  1.1  simonb 		npv = pool_get(&pv_pool, PR_WAITOK);
    801  1.1  simonb 		if (!npv) return (0);
    802  1.1  simonb 		npv->pv_va = va;
    803  1.1  simonb 		npv->pv_pm = pm;
    804  1.1  simonb 		npv->pv_next = pv->pv_next;
    805  1.1  simonb 		pv->pv_next = npv;
    806  1.1  simonb 	}
    807  1.1  simonb 	splx(s);
    808  1.1  simonb 	return (1);
    809  1.1  simonb }
    810  1.1  simonb 
    811  1.1  simonb static void
    812  1.1  simonb pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    813  1.1  simonb {
    814  1.1  simonb 	struct pv_entry *pv, *npv;
    815  1.1  simonb 
    816  1.1  simonb 	/*
    817  1.1  simonb 	 * Remove from the PV table.
    818  1.1  simonb 	 */
    819  1.1  simonb 	pv = pa_to_pv(pa);
    820  1.1  simonb 	if (!pv) return;
    821  1.1  simonb 
    822  1.1  simonb 	/*
    823  1.1  simonb 	 * If it is the first entry on the list, it is actually
    824  1.1  simonb 	 * in the header and we must copy the following entry up
    825  1.1  simonb 	 * to the header.  Otherwise we must search the list for
    826  1.1  simonb 	 * the entry.  In either case we free the now unused entry.
    827  1.1  simonb 	 */
    828  1.1  simonb 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    829  1.1  simonb 		if ((npv = pv->pv_next)) {
    830  1.1  simonb 			*pv = *npv;
    831  1.1  simonb 			pool_put(&pv_pool, npv);
    832  1.1  simonb 		} else
    833  1.1  simonb 			pv->pv_pm = NULL;
    834  1.1  simonb 	} else {
    835  1.1  simonb 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    836  1.1  simonb 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    837  1.1  simonb 				break;
    838  1.1  simonb 		if (npv) {
    839  1.1  simonb 			pv->pv_next = npv->pv_next;
    840  1.1  simonb 			pool_put(&pv_pool, npv);
    841  1.1  simonb 		}
    842  1.1  simonb 	}
    843  1.1  simonb }
    844  1.1  simonb 
    845  1.1  simonb /*
    846  1.1  simonb  * Insert physical page at pa into the given pmap at virtual address va.
    847  1.1  simonb  */
    848  1.1  simonb int
    849  1.1  simonb pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
    850  1.1  simonb {
    851  1.1  simonb 	int s;
    852  1.1  simonb 	u_int tte;
    853  1.1  simonb 	int managed;
    854  1.1  simonb 
    855  1.1  simonb 	/*
    856  1.1  simonb 	 * Have to remove any existing mapping first.
    857  1.1  simonb 	 */
    858  1.1  simonb 	pmap_remove(pm, va, va + NBPG);
    859  1.1  simonb 
    860  1.1  simonb 	if (flags & PMAP_WIRED) flags |= prot;
    861  1.1  simonb 
    862  1.1  simonb 	/* If it has no protections don't bother w/the rest */
    863  1.1  simonb 	if (!(flags & VM_PROT_ALL))
    864  1.1  simonb 		return (0);
    865  1.1  simonb 
    866  1.1  simonb 	managed = 0;
    867  1.1  simonb 	if (vm_physseg_find(atop(pa), NULL) != -1)
    868  1.1  simonb 		managed = 1;
    869  1.1  simonb 
    870  1.1  simonb 	/*
    871  1.1  simonb 	 * Generate TTE.
    872  1.1  simonb 	 *
    873  1.1  simonb 	 * XXXX
    874  1.1  simonb 	 *
    875  1.1  simonb 	 * Since the kernel does not handle execution privileges properly,
    876  1.1  simonb 	 * we will handle read and execute permissions together.
    877  1.1  simonb 	 */
    878  1.1  simonb 	tte = TTE_PA(pa) | TTE_EX;
    879  1.1  simonb 	/* XXXX -- need to support multiple page sizes. */
    880  1.1  simonb 	tte |= TTE_SZ_16K;
    881  1.1  simonb #ifdef	DIAGNOSTIC
    882  1.1  simonb 	if ((flags & (PME_NOCACHE | PME_WRITETHROUG)) ==
    883  1.1  simonb 		(PME_NOCACHE | PME_WRITETHROUG))
    884  1.1  simonb 		panic("pmap_enter: uncached & writethrough\n");
    885  1.1  simonb #endif
    886  1.1  simonb 	if (flags & PME_NOCACHE)
    887  1.1  simonb 		/* Must be I/O mapping */
    888  1.1  simonb 		tte |= TTE_I | TTE_G;
    889  1.1  simonb #ifdef NOCACHE
    890  1.1  simonb 	tte |= TTE_I;
    891  1.1  simonb #else
    892  1.1  simonb 	else if (flags & PME_WRITETHROUG)
    893  1.1  simonb 		/* Uncached and writethrough are not compatible */
    894  1.1  simonb 		tte |= TTE_W;
    895  1.1  simonb #endif
    896  1.1  simonb 	if (pm == pmap_kernel())
    897  1.1  simonb 		tte |= TTE_ZONE(ZONE_PRIV);
    898  1.1  simonb 	else
    899  1.1  simonb 		tte |= TTE_ZONE(ZONE_USER);
    900  1.1  simonb 
    901  1.1  simonb 	if (flags & VM_PROT_WRITE)
    902  1.1  simonb 		tte |= TTE_WR;
    903  1.1  simonb 
    904  1.1  simonb 	/*
    905  1.1  simonb 	 * Now record mapping for later back-translation.
    906  1.1  simonb 	 */
    907  1.1  simonb 	if (pmap_initialized && managed) {
    908  1.1  simonb 		char *attr;
    909  1.1  simonb 
    910  1.1  simonb 		if (!pmap_enter_pv(pm, va, pa)) {
    911  1.1  simonb 			/* Could not enter pv on a managed page */
    912  1.1  simonb 			return 1;
    913  1.1  simonb 		}
    914  1.1  simonb 
    915  1.1  simonb 		/* Now set attributes. */
    916  1.1  simonb 		attr = pa_to_attr(pa);
    917  1.1  simonb #ifdef DIAGNOSTIC
    918  1.1  simonb 		if (!attr)
    919  1.1  simonb 			panic("managed but no attr\n");
    920  1.1  simonb #endif
    921  1.1  simonb 		if (flags & VM_PROT_ALL)
    922  1.1  simonb 			*attr |= PTE_HI_REF;
    923  1.1  simonb 		if (flags & VM_PROT_WRITE)
    924  1.1  simonb 			*attr |= PTE_HI_CHG;
    925  1.1  simonb 	}
    926  1.1  simonb 
    927  1.1  simonb 	s = splvm();
    928  1.1  simonb 	pm->pm_stats.resident_count++;
    929  1.1  simonb 
    930  1.1  simonb 	/* Insert page into page table. */
    931  1.1  simonb 	pte_enter(pm, va, tte);
    932  1.1  simonb 
    933  1.1  simonb 	/* If this is a real fault, enter it in the tlb */
    934  1.1  simonb 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    935  1.1  simonb 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    936  1.1  simonb 	}
    937  1.1  simonb 	splx(s);
    938  1.1  simonb 	return 0;
    939  1.1  simonb }
    940  1.1  simonb 
    941  1.1  simonb void
    942  1.1  simonb pmap_unwire(struct pmap *pm, vaddr_t va)
    943  1.1  simonb {
    944  1.1  simonb 	struct pv_entry *pv, *npv;
    945  1.1  simonb 	paddr_t pa;
    946  1.1  simonb 	int s = splvm();
    947  1.1  simonb 
    948  1.1  simonb         if (pm == NULL) {
    949  1.1  simonb                 return;
    950  1.1  simonb         }
    951  1.1  simonb 
    952  1.1  simonb 	if (!pmap_extract(pm, va, &pa)) {
    953  1.1  simonb 		return;
    954  1.1  simonb 	}
    955  1.1  simonb 
    956  1.1  simonb 	va |= PV_WIRED;
    957  1.1  simonb 
    958  1.1  simonb 	pv = pa_to_pv(pa);
    959  1.1  simonb 	if (!pv) return;
    960  1.1  simonb 
    961  1.1  simonb 	/*
    962  1.1  simonb 	 * If it is the first entry on the list, it is actually
    963  1.1  simonb 	 * in the header and we must copy the following entry up
    964  1.1  simonb 	 * to the header.  Otherwise we must search the list for
    965  1.1  simonb 	 * the entry.  In either case we free the now unused entry.
    966  1.1  simonb 	 */
    967  1.1  simonb 	for (npv = pv; (npv = pv->pv_next) != NULL; pv = npv) {
    968  1.1  simonb 		if (pm == npv->pv_pm && PV_CMPVA(va, npv)) {
    969  1.1  simonb 			npv->pv_va &= ~PV_WIRED;
    970  1.1  simonb 			break;
    971  1.1  simonb 		}
    972  1.1  simonb 	}
    973  1.1  simonb 	splx(s);
    974  1.1  simonb }
    975  1.1  simonb 
    976  1.1  simonb void
    977  1.1  simonb pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
    978  1.1  simonb {
    979  1.1  simonb 	int s;
    980  1.1  simonb 	u_int tte;
    981  1.1  simonb 	struct pmap *pm = pmap_kernel();
    982  1.1  simonb 
    983  1.1  simonb 	/*
    984  1.1  simonb 	 * Have to remove any existing mapping first.
    985  1.1  simonb 	 */
    986  1.1  simonb 
    987  1.1  simonb 	/*
    988  1.1  simonb 	 * Generate TTE.
    989  1.1  simonb 	 *
    990  1.1  simonb 	 * XXXX
    991  1.1  simonb 	 *
    992  1.1  simonb 	 * Since the kernel does not handle execution privileges properly,
    993  1.1  simonb 	 * we will handle read and execute permissions together.
    994  1.1  simonb 	 */
    995  1.1  simonb 	tte = 0;
    996  1.1  simonb 	if (prot & VM_PROT_ALL) {
    997  1.1  simonb 
    998  1.1  simonb 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    999  1.1  simonb 		/* XXXX -- need to support multiple page sizes. */
   1000  1.1  simonb 		tte |= TTE_SZ_16K;
   1001  1.1  simonb #ifdef DIAGNOSTIC
   1002  1.1  simonb 		if ((prot & (PME_NOCACHE | PME_WRITETHROUG)) ==
   1003  1.1  simonb 			(PME_NOCACHE | PME_WRITETHROUG))
   1004  1.1  simonb 			panic("pmap_kenter_pa: uncached & writethrough\n");
   1005  1.1  simonb #endif
   1006  1.1  simonb 		if (prot & PME_NOCACHE)
   1007  1.1  simonb 			/* Must be I/O mapping */
   1008  1.1  simonb 			tte |= TTE_I | TTE_G;
   1009  1.1  simonb #ifdef NOCACHE
   1010  1.1  simonb 		tte |= TTE_I;
   1011  1.1  simonb #else
   1012  1.1  simonb 		else if (prot & PME_WRITETHROUG)
   1013  1.1  simonb 			/* Uncached and writethrough are not compatible */
   1014  1.1  simonb 			tte |= TTE_W;
   1015  1.1  simonb #endif
   1016  1.1  simonb 		if (prot & VM_PROT_WRITE)
   1017  1.1  simonb 			tte |= TTE_WR;
   1018  1.1  simonb 	}
   1019  1.1  simonb 
   1020  1.1  simonb 	s = splvm();
   1021  1.1  simonb 	pm->pm_stats.resident_count++;
   1022  1.1  simonb 
   1023  1.1  simonb 	/* Insert page into page table. */
   1024  1.1  simonb 	pte_enter(pm, va, tte);
   1025  1.1  simonb 	splx(s);
   1026  1.1  simonb }
   1027  1.1  simonb 
   1028  1.1  simonb void
   1029  1.1  simonb pmap_kremove(vaddr_t va, vsize_t len)
   1030  1.1  simonb {
   1031  1.1  simonb 
   1032  1.1  simonb 	while (len > 0) {
   1033  1.1  simonb 		pte_enter(pmap_kernel(), va, 0);
   1034  1.1  simonb 		va += PAGE_SIZE;
   1035  1.1  simonb 		len -= PAGE_SIZE;
   1036  1.1  simonb 	}
   1037  1.1  simonb }
   1038  1.1  simonb 
   1039  1.1  simonb /*
   1040  1.1  simonb  * Remove the given range of mapping entries.
   1041  1.1  simonb  */
   1042  1.1  simonb void
   1043  1.1  simonb pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
   1044  1.1  simonb {
   1045  1.1  simonb 	int s;
   1046  1.1  simonb 	paddr_t pa;
   1047  1.1  simonb 	volatile u_int *ptp;
   1048  1.1  simonb 
   1049  1.1  simonb 	s = splvm();
   1050  1.1  simonb 	while (va < endva) {
   1051  1.1  simonb 
   1052  1.1  simonb 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
   1053  1.1  simonb 			pa = TTE_PA(pa);
   1054  1.1  simonb 			pmap_remove_pv(pm, va, pa);
   1055  1.1  simonb 			*ptp = 0;
   1056  1.1  simonb 			ppc4xx_tlb_flush(va, pm->pm_ctx);
   1057  1.1  simonb 			pm->pm_stats.resident_count--;
   1058  1.1  simonb 		}
   1059  1.1  simonb 		va += NBPG;
   1060  1.1  simonb 	}
   1061  1.1  simonb 
   1062  1.1  simonb 	splx(s);
   1063  1.1  simonb }
   1064  1.1  simonb 
   1065  1.1  simonb /*
   1066  1.1  simonb  * Get the physical page address for the given pmap/virtual address.
   1067  1.1  simonb  */
   1068  1.1  simonb boolean_t
   1069  1.1  simonb pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1070  1.1  simonb {
   1071  1.1  simonb 	int seg = STIDX(va);
   1072  1.1  simonb 	int ptn = PTIDX(va);
   1073  1.1  simonb 	u_int pa = 0;
   1074  1.1  simonb 	int s = splvm();
   1075  1.1  simonb 
   1076  1.1  simonb 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
   1077  1.1  simonb 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1078  1.1  simonb 	}
   1079  1.1  simonb 	splx(s);
   1080  1.1  simonb 	return (pa != 0);
   1081  1.1  simonb }
   1082  1.1  simonb 
   1083  1.1  simonb /*
   1084  1.1  simonb  * Lower the protection on the specified range of this pmap.
   1085  1.1  simonb  *
   1086  1.1  simonb  * There are only two cases: either the protection is going to 0,
   1087  1.1  simonb  * or it is going to read-only.
   1088  1.1  simonb  */
   1089  1.1  simonb void
   1090  1.1  simonb pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1091  1.1  simonb {
   1092  1.1  simonb 	volatile u_int *ptp;
   1093  1.1  simonb 	int s;
   1094  1.1  simonb 
   1095  1.1  simonb 	if (prot & VM_PROT_READ) {
   1096  1.1  simonb 		s = splvm();
   1097  1.1  simonb 		while (sva < eva) {
   1098  1.1  simonb 			if ((ptp = pte_find(pm, sva)) != NULL) {
   1099  1.1  simonb 				*ptp &= ~TTE_WR;
   1100  1.1  simonb 				ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1101  1.1  simonb 			}
   1102  1.1  simonb 			sva += NBPG;
   1103  1.1  simonb 		}
   1104  1.1  simonb 		splx(s);
   1105  1.1  simonb 		return;
   1106  1.1  simonb 	}
   1107  1.1  simonb 	pmap_remove(pm, sva, eva);
   1108  1.1  simonb }
   1109  1.1  simonb 
   1110  1.1  simonb boolean_t
   1111  1.1  simonb check_attr(struct vm_page *pg, u_int mask, int clear)
   1112  1.1  simonb {
   1113  1.1  simonb 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1114  1.1  simonb 	int s;
   1115  1.1  simonb 	char *attr;
   1116  1.1  simonb 	int rv;
   1117  1.1  simonb 
   1118  1.1  simonb 	/*
   1119  1.1  simonb 	 * First modify bits in cache.
   1120  1.1  simonb 	 */
   1121  1.1  simonb 	s = splvm();
   1122  1.1  simonb 	attr = pa_to_attr(pa);
   1123  1.1  simonb 	if (attr == NULL)
   1124  1.1  simonb 		return FALSE;
   1125  1.1  simonb 
   1126  1.1  simonb 	rv = ((*attr & mask) != 0);
   1127  1.1  simonb 	if (clear)
   1128  1.1  simonb 		*attr &= ~mask;
   1129  1.1  simonb 
   1130  1.1  simonb 	splx(s);
   1131  1.1  simonb 	return rv;
   1132  1.1  simonb }
   1133  1.1  simonb 
   1134  1.1  simonb 
   1135  1.1  simonb /*
   1136  1.1  simonb  * Lower the protection on the specified physical page.
   1137  1.1  simonb  *
   1138  1.1  simonb  * There are only two cases: either the protection is going to 0,
   1139  1.1  simonb  * or it is going to read-only.
   1140  1.1  simonb  */
   1141  1.1  simonb void
   1142  1.1  simonb pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1143  1.1  simonb {
   1144  1.1  simonb 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1145  1.1  simonb 	vaddr_t va;
   1146  1.1  simonb 	struct pv_entry *pvh, *pv, *npv;
   1147  1.1  simonb 	struct pmap *pm;
   1148  1.1  simonb 
   1149  1.1  simonb 	pvh = pa_to_pv(pa);
   1150  1.1  simonb 	if (pvh == NULL)
   1151  1.1  simonb 		return;
   1152  1.1  simonb 
   1153  1.1  simonb 	/* Handle extra pvs which may be deleted in the operation */
   1154  1.1  simonb 	for (pv = pvh->pv_next; pv; pv = npv) {
   1155  1.1  simonb 		npv = pv->pv_next;
   1156  1.1  simonb 
   1157  1.1  simonb 		pm = pv->pv_pm;
   1158  1.1  simonb 		va = pv->pv_va;
   1159  1.1  simonb 		pmap_protect(pm, va, va+NBPG, prot);
   1160  1.1  simonb 	}
   1161  1.1  simonb 	/* Now check the head pv */
   1162  1.1  simonb 	if (pvh->pv_pm) {
   1163  1.1  simonb 		pv = pvh;
   1164  1.1  simonb 		pm = pv->pv_pm;
   1165  1.1  simonb 		va = pv->pv_va;
   1166  1.1  simonb 		pmap_protect(pm, va, va+NBPG, prot);
   1167  1.1  simonb 	}
   1168  1.1  simonb }
   1169  1.1  simonb 
   1170  1.1  simonb /*
   1171  1.1  simonb  * Activate the address space for the specified process.  If the process
   1172  1.1  simonb  * is the current process, load the new MMU context.
   1173  1.1  simonb  */
   1174  1.1  simonb void
   1175  1.1  simonb pmap_activate(struct proc *p)
   1176  1.1  simonb {
   1177  1.1  simonb #if 0
   1178  1.1  simonb 	struct pcb *pcb = &p->p_addr->u_pcb;
   1179  1.1  simonb 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
   1180  1.1  simonb 
   1181  1.1  simonb 	/*
   1182  1.1  simonb 	 * XXX Normally performed in cpu_fork().
   1183  1.1  simonb 	 */
   1184  1.1  simonb 	printf("pmap_activate(%p), pmap=%p\n",p,pmap);
   1185  1.1  simonb 	if (pcb->pcb_pm != pmap) {
   1186  1.1  simonb 		pcb->pcb_pm = pmap;
   1187  1.1  simonb 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pcb->pcb_pm,
   1188  1.1  simonb 		    (paddr_t *)&pcb->pcb_pmreal);
   1189  1.1  simonb 	}
   1190  1.1  simonb 
   1191  1.1  simonb 	if (p == curproc) {
   1192  1.1  simonb 		/* Store pointer to new current pmap. */
   1193  1.1  simonb 		curpm = pcb->pcb_pmreal;
   1194  1.1  simonb 	}
   1195  1.1  simonb #endif
   1196  1.1  simonb }
   1197  1.1  simonb 
   1198  1.1  simonb /*
   1199  1.1  simonb  * Deactivate the specified process's address space.
   1200  1.1  simonb  */
   1201  1.1  simonb void
   1202  1.1  simonb pmap_deactivate(struct proc *p)
   1203  1.1  simonb {
   1204  1.1  simonb }
   1205  1.1  simonb 
   1206  1.1  simonb /*
   1207  1.1  simonb  * Synchronize caches corresponding to [addr, addr+len) in p.
   1208  1.1  simonb  */
   1209  1.1  simonb void
   1210  1.1  simonb pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1211  1.1  simonb {
   1212  1.1  simonb 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1213  1.1  simonb 	int msr, ctx, opid;
   1214  1.1  simonb 
   1215  1.1  simonb 
   1216  1.1  simonb 	/*
   1217  1.1  simonb 	 * Need to turn off IMMU and switch to user context.
   1218  1.1  simonb 	 * (icbi uses DMMU).
   1219  1.1  simonb 	 */
   1220  1.1  simonb 	if (!(ctx = pm->pm_ctx)) {
   1221  1.1  simonb 		/* No context -- assign it one */
   1222  1.1  simonb 		ctx_alloc(pm);
   1223  1.1  simonb 		ctx = pm->pm_ctx;
   1224  1.1  simonb 	}
   1225  1.1  simonb 	__asm __volatile("mfmsr %0;"
   1226  1.1  simonb 		"li %1, 0x20;"
   1227  1.1  simonb 		"andc %1,%0,%1;"
   1228  1.1  simonb 		"mtmsr %1;"
   1229  1.1  simonb 		"sync;isync;"
   1230  1.1  simonb 		"mfpid %1;"
   1231  1.1  simonb 		"mtpid %2;"
   1232  1.1  simonb 		"sync; isync;"
   1233  1.1  simonb 		"1:"
   1234  1.1  simonb 		"dcbf 0,%3;"
   1235  1.1  simonb 		"icbi 0,%3;"
   1236  1.1  simonb 		"addi %3,%3,32;"
   1237  1.1  simonb 		"addic. %4,%4,-32;"
   1238  1.1  simonb 		"bge 1b;"
   1239  1.1  simonb 		"mtpid %1;"
   1240  1.1  simonb 		"mtmsr %0;"
   1241  1.1  simonb 		"sync; isync"
   1242  1.1  simonb 		: "=&r" (msr), "=&r" (opid)
   1243  1.1  simonb 		: "r" (ctx), "r" (va), "r" (len));
   1244  1.1  simonb }
   1245  1.1  simonb 
   1246  1.1  simonb 
   1247  1.1  simonb /* This has to be done in real mode !!! */
   1248  1.1  simonb void
   1249  1.1  simonb ppc4xx_tlb_flush(vaddr_t va, int pid)
   1250  1.1  simonb {
   1251  1.1  simonb 	u_long i, found;
   1252  1.1  simonb 	u_long msr;
   1253  1.1  simonb 
   1254  1.1  simonb 	/* If there's no context then it can't be mapped. */
   1255  1.1  simonb 	if (!pid) return;
   1256  1.1  simonb 
   1257  1.1  simonb 	asm("mfpid %1;"			/* Save PID */
   1258  1.1  simonb 		"mfmsr %2;"		/* Save MSR */
   1259  1.1  simonb 		"li %0,0;"		/* Now clear MSR */
   1260  1.1  simonb 		"mtmsr %0;"
   1261  1.1  simonb 		"mtpid %4;"		/* Set PID */
   1262  1.1  simonb 		"sync;"
   1263  1.1  simonb 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1264  1.1  simonb 		"sync;"
   1265  1.1  simonb 		"mtpid %1;"		/* Restore PID */
   1266  1.1  simonb 		"mtmsr %2;"		/* Restore MSR */
   1267  1.1  simonb 		"sync;isync;"
   1268  1.1  simonb 		"li %1,1;"
   1269  1.1  simonb 		"beq 1f;"
   1270  1.1  simonb 		"li %1,0;"
   1271  1.1  simonb 		"1:"
   1272  1.1  simonb 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1273  1.1  simonb 		: "r" (va), "r" (pid));
   1274  1.1  simonb 	if (found && !TLB_LOCKED(i)) {
   1275  1.1  simonb 
   1276  1.1  simonb 		/* Now flush translation */
   1277  1.1  simonb 		asm volatile(
   1278  1.1  simonb 			"tlbwe %0,%1,0;"
   1279  1.1  simonb 			"sync;isync;"
   1280  1.1  simonb 			: : "r" (0), "r" (i));
   1281  1.1  simonb 
   1282  1.1  simonb 		tlb_info[i].ti_ctx = 0;
   1283  1.1  simonb 		tlb_info[i].ti_flags = 0;
   1284  1.1  simonb 		tlbnext = i;
   1285  1.1  simonb 		/* Successful flushes */
   1286  1.1  simonb 		tlbflush_ev.ev_count++;
   1287  1.1  simonb 	}
   1288  1.1  simonb }
   1289  1.1  simonb 
   1290  1.1  simonb void
   1291  1.1  simonb ppc4xx_tlb_flush_all(void)
   1292  1.1  simonb {
   1293  1.1  simonb 	u_long i;
   1294  1.1  simonb 
   1295  1.1  simonb 	for (i = 0; i < NTLB; i++)
   1296  1.1  simonb 		if (!TLB_LOCKED(i)) {
   1297  1.1  simonb 			asm volatile(
   1298  1.1  simonb 				"tlbwe %0,%1,0;"
   1299  1.1  simonb 				"sync;isync;"
   1300  1.1  simonb 				: : "r" (0), "r" (i));
   1301  1.1  simonb 			tlb_info[i].ti_ctx = 0;
   1302  1.1  simonb 			tlb_info[i].ti_flags = 0;
   1303  1.1  simonb 		}
   1304  1.1  simonb 
   1305  1.1  simonb 	asm volatile("sync;isync");
   1306  1.1  simonb }
   1307  1.1  simonb 
   1308  1.1  simonb /* Find a TLB entry to evict. */
   1309  1.1  simonb static int
   1310  1.1  simonb ppc4xx_tlb_find_victim(void)
   1311  1.1  simonb {
   1312  1.1  simonb 	int flags;
   1313  1.1  simonb 
   1314  1.1  simonb 	for (;;) {
   1315  1.1  simonb 		if (++tlbnext >= NTLB)
   1316  1.1  simonb 			tlbnext = TLB_NRESERVED;
   1317  1.1  simonb 		flags = tlb_info[tlbnext].ti_flags;
   1318  1.1  simonb 		if (!(flags & TLBF_USED) ||
   1319  1.1  simonb 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1320  1.1  simonb 			u_long va, stack = (u_long)&va;
   1321  1.1  simonb 
   1322  1.1  simonb 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1323  1.1  simonb 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1324  1.1  simonb 			     (flags & TLBF_USED)) {
   1325  1.1  simonb 				/* Kernel stack page */
   1326  1.1  simonb 				flags |= TLBF_USED;
   1327  1.1  simonb 				tlb_info[tlbnext].ti_flags = flags;
   1328  1.1  simonb 			} else {
   1329  1.1  simonb 				/* Found it! */
   1330  1.1  simonb 				return (tlbnext);
   1331  1.1  simonb 			}
   1332  1.1  simonb 		} else {
   1333  1.1  simonb 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1334  1.1  simonb 		}
   1335  1.1  simonb 	}
   1336  1.1  simonb }
   1337  1.1  simonb 
   1338  1.1  simonb void
   1339  1.1  simonb ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1340  1.1  simonb {
   1341  1.1  simonb 	u_long th, tl, idx;
   1342  1.1  simonb 	tlbpid_t pid;
   1343  1.1  simonb 	u_short msr;
   1344  1.1  simonb 	int s;
   1345  1.1  simonb 
   1346  1.1  simonb 	tlbenter_ev.ev_count++;
   1347  1.1  simonb 
   1348  1.1  simonb 	th = (va & TLB_EPN_MASK) |
   1349  1.1  simonb 		(((pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT) << TLB_SIZE_SHFT) |
   1350  1.1  simonb 		TLB_VALID;
   1351  1.1  simonb 	tl = pte;
   1352  1.1  simonb 
   1353  1.1  simonb 	s = splhigh();
   1354  1.1  simonb 	idx = ppc4xx_tlb_find_victim();
   1355  1.1  simonb 
   1356  1.1  simonb #ifdef DIAGNOSTIC
   1357  1.1  simonb 	if ((idx < TLB_NRESERVED) || (idx >= NTLB)) {
   1358  1.1  simonb 		panic("ppc4xx_tlb_enter: repacing entry %ld\n", idx);
   1359  1.1  simonb 	}
   1360  1.1  simonb #endif
   1361  1.1  simonb 
   1362  1.1  simonb 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1363  1.1  simonb 	tlb_info[idx].ti_ctx = ctx;
   1364  1.1  simonb 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1365  1.1  simonb 
   1366  1.1  simonb 	asm volatile(
   1367  1.1  simonb 		"mfmsr %0;"			/* Save MSR */
   1368  1.1  simonb 		"li %1,0;"
   1369  1.1  simonb 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1370  1.1  simonb 		"mtmsr %1;"			/* Clear MSR */
   1371  1.1  simonb 		"mfpid %1;"			/* Save old PID */
   1372  1.1  simonb 		"mtpid %2;"			/* Load translation ctx */
   1373  1.1  simonb 		"sync; isync;"
   1374  1.1  simonb #ifdef DEBUG
   1375  1.1  simonb 		"andi. %3,%3,63;"
   1376  1.1  simonb 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
   1377  1.1  simonb #endif
   1378  1.1  simonb 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1379  1.1  simonb 		"sync; isync;"
   1380  1.1  simonb 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1381  1.1  simonb 		"sync; isync;"
   1382  1.1  simonb 	: "=&r" (msr), "=&r" (pid)
   1383  1.1  simonb 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1384  1.1  simonb 	splx(s);
   1385  1.1  simonb }
   1386  1.1  simonb 
   1387  1.1  simonb void
   1388  1.1  simonb ppc4xx_tlb_unpin(int i)
   1389  1.1  simonb {
   1390  1.1  simonb 
   1391  1.1  simonb 	if (i == -1)
   1392  1.1  simonb 		for (i = 0; i < TLB_NRESERVED; i++)
   1393  1.1  simonb 			tlb_info[i].ti_flags &= ~TLBF_LOCKED;
   1394  1.1  simonb 	else
   1395  1.1  simonb 		tlb_info[i].ti_flags &= ~TLBF_LOCKED;
   1396  1.1  simonb }
   1397  1.1  simonb 
   1398  1.1  simonb void
   1399  1.1  simonb ppc4xx_tlb_init(void)
   1400  1.1  simonb {
   1401  1.1  simonb 	int i;
   1402  1.1  simonb 
   1403  1.1  simonb 	/* Mark reserved TLB entries */
   1404  1.1  simonb 	for (i = 0; i < TLB_NRESERVED; i++) {
   1405  1.1  simonb 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1406  1.1  simonb 		tlb_info[i].ti_ctx = KERNEL_PID;
   1407  1.1  simonb 	}
   1408  1.1  simonb 
   1409  1.1  simonb 	/* Setup security zones */
   1410  1.1  simonb 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1411  1.1  simonb 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1412  1.1  simonb 	 * Z3 - full access regardless of TLB entry permissions
   1413  1.1  simonb 	 */
   1414  1.1  simonb 
   1415  1.1  simonb 	asm volatile(
   1416  1.1  simonb 		"mtspr %0,%1;"
   1417  1.1  simonb 		"sync;"
   1418  1.1  simonb 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1419  1.1  simonb }
   1420  1.1  simonb 
   1421  1.1  simonb 
   1422  1.1  simonb /*
   1423  1.1  simonb  * We should pass the ctx in from trap code.
   1424  1.1  simonb  */
   1425  1.1  simonb int
   1426  1.1  simonb pmap_tlbmiss(vaddr_t va, int ctx)
   1427  1.1  simonb {
   1428  1.1  simonb 	volatile u_int *pte;
   1429  1.1  simonb 	u_long tte;
   1430  1.1  simonb 
   1431  1.1  simonb 	tlbmiss_ev.ev_count++;
   1432  1.1  simonb 
   1433  1.1  simonb 	/*
   1434  1.1  simonb 	 * XXXX We will reserve 0-0x80000000 for va==pa mappings.
   1435  1.1  simonb 	 */
   1436  1.1  simonb 	if (ctx != KERNEL_PID || (va & 0x80000000)) {
   1437  1.1  simonb 		pte = pte_find((struct pmap *)ctxbusy[ctx], va);
   1438  1.1  simonb 		if (pte == NULL) {
   1439  1.1  simonb 			/* Map unmanaged addresses directly for kernel access */
   1440  1.1  simonb 			return 1;
   1441  1.1  simonb 		}
   1442  1.1  simonb 		tte = *pte;
   1443  1.1  simonb 		if (tte == 0) {
   1444  1.1  simonb 			return 1;
   1445  1.1  simonb 		}
   1446  1.1  simonb 	} else {
   1447  1.1  simonb 		/* Create a 16MB writeable mapping. */
   1448  1.1  simonb #ifdef NOCACHE
   1449  1.1  simonb 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I | TTE_WR;
   1450  1.1  simonb #else
   1451  1.1  simonb 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1452  1.1  simonb #endif
   1453  1.1  simonb 	}
   1454  1.1  simonb 	tlbhit_ev.ev_count++;
   1455  1.1  simonb 	ppc4xx_tlb_enter(ctx, va, tte);
   1456  1.1  simonb 
   1457  1.1  simonb 	return 0;
   1458  1.1  simonb }
   1459  1.1  simonb 
   1460  1.1  simonb /*
   1461  1.1  simonb  * Flush all the entries matching a context from the TLB.
   1462  1.1  simonb  */
   1463  1.1  simonb static int
   1464  1.1  simonb ctx_flush(int cnum)
   1465  1.1  simonb {
   1466  1.1  simonb 	int i;
   1467  1.1  simonb 
   1468  1.1  simonb 	/* We gotta steal this context */
   1469  1.1  simonb 	for (i = TLB_NRESERVED; i < NTLB; i++) {
   1470  1.1  simonb 		if (tlb_info[i].ti_ctx == cnum) {
   1471  1.1  simonb 			/* Can't steal ctx if it has a locked entry. */
   1472  1.1  simonb 			if (TLB_LOCKED(i)) {
   1473  1.1  simonb #ifdef DIAGNOSTIC
   1474  1.1  simonb 				printf("ctx_flush: can't invalidate "
   1475  1.1  simonb 					"locked mapping %d "
   1476  1.1  simonb 					"for context %d\n", i, cnum);
   1477  1.1  simonb 				Debugger();
   1478  1.1  simonb #endif
   1479  1.1  simonb 				return (1);
   1480  1.1  simonb 			}
   1481  1.1  simonb #ifdef DIAGNOSTIC
   1482  1.1  simonb 			if (i < TLB_NRESERVED)
   1483  1.1  simonb 				panic("TLB entry %d not locked\n", i);
   1484  1.1  simonb #endif
   1485  1.1  simonb 			/* Invalidate particular TLB entry regardless of locked status */
   1486  1.1  simonb 			asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
   1487  1.1  simonb 			tlb_info[i].ti_flags = 0;
   1488  1.1  simonb 		}
   1489  1.1  simonb 	}
   1490  1.1  simonb 	return (0);
   1491  1.1  simonb }
   1492  1.1  simonb 
   1493  1.1  simonb /*
   1494  1.1  simonb  * Allocate a context.  If necessary, steal one from someone else.
   1495  1.1  simonb  *
   1496  1.1  simonb  * The new context is flushed from the TLB before returning.
   1497  1.1  simonb  */
   1498  1.1  simonb int
   1499  1.1  simonb ctx_alloc(struct pmap *pm)
   1500  1.1  simonb {
   1501  1.1  simonb 	int s, cnum;
   1502  1.1  simonb 	static int next = MINCTX;
   1503  1.1  simonb 
   1504  1.1  simonb 	if (pm == pmap_kernel()) {
   1505  1.1  simonb #ifdef DIAGNOSTIC
   1506  1.1  simonb 		printf("ctx_alloc: kernel pmap!\n");
   1507  1.1  simonb #endif
   1508  1.1  simonb 		return (0);
   1509  1.1  simonb 	}
   1510  1.1  simonb 	s = splvm();
   1511  1.1  simonb 
   1512  1.1  simonb 	/* Find a likely context. */
   1513  1.1  simonb 	cnum = next;
   1514  1.1  simonb 	do {
   1515  1.1  simonb 		if ((++cnum) > NUMCTX)
   1516  1.1  simonb 			cnum = MINCTX;
   1517  1.1  simonb 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1518  1.1  simonb 
   1519  1.1  simonb 	/* Now clean it out */
   1520  1.1  simonb oops:
   1521  1.1  simonb 	if (cnum < MINCTX)
   1522  1.1  simonb 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1523  1.1  simonb 	if (ctx_flush(cnum)) {
   1524  1.1  simonb 		/* oops -- something's wired. */
   1525  1.1  simonb 		if ((++cnum) > NUMCTX)
   1526  1.1  simonb 			cnum = MINCTX;
   1527  1.1  simonb 		goto oops;
   1528  1.1  simonb 	}
   1529  1.1  simonb 
   1530  1.1  simonb 	if (ctxbusy[cnum]) {
   1531  1.1  simonb #ifdef DEBUG
   1532  1.1  simonb 		/* We should identify this pmap and clear it */
   1533  1.1  simonb 		printf("Warning: stealing context %d\n", cnum);
   1534  1.1  simonb #endif
   1535  1.1  simonb 		ctxbusy[cnum]->pm_ctx = 0;
   1536  1.1  simonb 	}
   1537  1.1  simonb 	ctxbusy[cnum] = pm;
   1538  1.1  simonb 	next = cnum;
   1539  1.1  simonb 	splx(s);
   1540  1.1  simonb 	pm->pm_ctx = cnum;
   1541  1.1  simonb 
   1542  1.1  simonb 	return cnum;
   1543  1.1  simonb }
   1544  1.1  simonb 
   1545  1.1  simonb /*
   1546  1.1  simonb  * Give away a context.
   1547  1.1  simonb  */
   1548  1.1  simonb void
   1549  1.1  simonb ctx_free(struct pmap *pm)
   1550  1.1  simonb {
   1551  1.1  simonb 	int oldctx;
   1552  1.1  simonb 
   1553  1.1  simonb 	oldctx = pm->pm_ctx;
   1554  1.1  simonb 
   1555  1.1  simonb 	if (oldctx == 0)
   1556  1.1  simonb 		panic("ctx_free: freeing kernel context");
   1557  1.1  simonb #ifdef DIAGNOSTIC
   1558  1.1  simonb 	if (ctxbusy[oldctx] == 0)
   1559  1.1  simonb 		printf("ctx_free: freeing free context %d\n", oldctx);
   1560  1.1  simonb 	if (ctxbusy[oldctx] != pm) {
   1561  1.1  simonb 		printf("ctx_free: freeing someone esle's context\n "
   1562  1.1  simonb 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1563  1.1  simonb 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1564  1.1  simonb 		Debugger();
   1565  1.1  simonb 	}
   1566  1.1  simonb #endif
   1567  1.1  simonb 	/* We should verify it has not been stolen and reallocated... */
   1568  1.1  simonb 	ctxbusy[oldctx] = NULL;
   1569  1.1  simonb 	ctx_flush(oldctx);
   1570  1.1  simonb }
   1571  1.5     eeh 
   1572  1.1  simonb 
   1573  1.1  simonb #ifdef DEBUG
   1574  1.1  simonb /*
   1575  1.1  simonb  * Test ref/modify handling.
   1576  1.1  simonb  */
   1577  1.1  simonb void pmap_testout __P((void));
   1578  1.1  simonb void
   1579  1.1  simonb pmap_testout()
   1580  1.1  simonb {
   1581  1.1  simonb 	vaddr_t va;
   1582  1.1  simonb 	volatile int *loc;
   1583  1.1  simonb 	int val = 0;
   1584  1.1  simonb 	paddr_t pa;
   1585  1.1  simonb 	struct vm_page *pg;
   1586  1.1  simonb 	int ref, mod;
   1587  1.1  simonb 
   1588  1.1  simonb 	/* Allocate a page */
   1589  1.1  simonb 	va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1);
   1590  1.1  simonb 	loc = (int*)va;
   1591  1.1  simonb 
   1592  1.1  simonb 	pmap_extract(pmap_kernel(), va, &pa);
   1593  1.1  simonb 	pg = PHYS_TO_VM_PAGE(pa);
   1594  1.1  simonb 	pmap_unwire(pmap_kernel(), va);
   1595  1.1  simonb 
   1596  1.1  simonb 	pmap_remove(pmap_kernel(), va, va+1);
   1597  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1598  1.4   chris 	pmap_update(pmap_kernel());
   1599  1.1  simonb 
   1600  1.1  simonb 	/* Now clear reference and modify */
   1601  1.1  simonb 	ref = pmap_clear_reference(pg);
   1602  1.1  simonb 	mod = pmap_clear_modify(pg);
   1603  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1604  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1605  1.1  simonb 	       ref, mod);
   1606  1.1  simonb 
   1607  1.1  simonb 	/* Check it's properly cleared */
   1608  1.1  simonb 	ref = pmap_is_referenced(pg);
   1609  1.1  simonb 	mod = pmap_is_modified(pg);
   1610  1.1  simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1611  1.1  simonb 	       ref, mod);
   1612  1.1  simonb 
   1613  1.1  simonb 	/* Reference page */
   1614  1.1  simonb 	val = *loc;
   1615  1.1  simonb 
   1616  1.1  simonb 	ref = pmap_is_referenced(pg);
   1617  1.1  simonb 	mod = pmap_is_modified(pg);
   1618  1.1  simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1619  1.1  simonb 	       ref, mod, val);
   1620  1.1  simonb 
   1621  1.1  simonb 	/* Now clear reference and modify */
   1622  1.1  simonb 	ref = pmap_clear_reference(pg);
   1623  1.1  simonb 	mod = pmap_clear_modify(pg);
   1624  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1625  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1626  1.1  simonb 	       ref, mod);
   1627  1.1  simonb 
   1628  1.1  simonb 	/* Modify page */
   1629  1.1  simonb 	*loc = 1;
   1630  1.1  simonb 
   1631  1.1  simonb 	ref = pmap_is_referenced(pg);
   1632  1.1  simonb 	mod = pmap_is_modified(pg);
   1633  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1634  1.1  simonb 	       ref, mod);
   1635  1.1  simonb 
   1636  1.1  simonb 	/* Now clear reference and modify */
   1637  1.1  simonb 	ref = pmap_clear_reference(pg);
   1638  1.1  simonb 	mod = pmap_clear_modify(pg);
   1639  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1640  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1641  1.1  simonb 	       ref, mod);
   1642  1.1  simonb 
   1643  1.1  simonb 	/* Check it's properly cleared */
   1644  1.1  simonb 	ref = pmap_is_referenced(pg);
   1645  1.1  simonb 	mod = pmap_is_modified(pg);
   1646  1.1  simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1647  1.1  simonb 	       ref, mod);
   1648  1.1  simonb 
   1649  1.1  simonb 	/* Modify page */
   1650  1.1  simonb 	*loc = 1;
   1651  1.1  simonb 
   1652  1.1  simonb 	ref = pmap_is_referenced(pg);
   1653  1.1  simonb 	mod = pmap_is_modified(pg);
   1654  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1655  1.1  simonb 	       ref, mod);
   1656  1.1  simonb 
   1657  1.1  simonb 	/* Check pmap_protect() */
   1658  1.1  simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1659  1.4   chris 	pmap_update(pmap_kernel());
   1660  1.1  simonb 	ref = pmap_is_referenced(pg);
   1661  1.1  simonb 	mod = pmap_is_modified(pg);
   1662  1.1  simonb 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1663  1.1  simonb 	       ref, mod);
   1664  1.1  simonb 
   1665  1.1  simonb 	/* Now clear reference and modify */
   1666  1.1  simonb 	ref = pmap_clear_reference(pg);
   1667  1.1  simonb 	mod = pmap_clear_modify(pg);
   1668  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1669  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1670  1.1  simonb 	       ref, mod);
   1671  1.1  simonb 
   1672  1.1  simonb 	/* Reference page */
   1673  1.1  simonb 	val = *loc;
   1674  1.1  simonb 
   1675  1.1  simonb 	ref = pmap_is_referenced(pg);
   1676  1.1  simonb 	mod = pmap_is_modified(pg);
   1677  1.1  simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1678  1.1  simonb 	       ref, mod, val);
   1679  1.1  simonb 
   1680  1.1  simonb 	/* Now clear reference and modify */
   1681  1.1  simonb 	ref = pmap_clear_reference(pg);
   1682  1.1  simonb 	mod = pmap_clear_modify(pg);
   1683  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1684  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1685  1.1  simonb 	       ref, mod);
   1686  1.1  simonb 
   1687  1.1  simonb 	/* Modify page */
   1688  1.1  simonb #if 0
   1689  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1690  1.4   chris 	pmap_update(pmap_kernel());
   1691  1.1  simonb #endif
   1692  1.1  simonb 	*loc = 1;
   1693  1.1  simonb 
   1694  1.1  simonb 	ref = pmap_is_referenced(pg);
   1695  1.1  simonb 	mod = pmap_is_modified(pg);
   1696  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1697  1.1  simonb 	       ref, mod);
   1698  1.1  simonb 
   1699  1.1  simonb 	/* Check pmap_protect() */
   1700  1.1  simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1701  1.4   chris 	pmap_update(pmap_kernel());
   1702  1.1  simonb 	ref = pmap_is_referenced(pg);
   1703  1.1  simonb 	mod = pmap_is_modified(pg);
   1704  1.1  simonb 	printf("pmap_protect(): ref %d, mod %d\n",
   1705  1.1  simonb 	       ref, mod);
   1706  1.1  simonb 
   1707  1.1  simonb 	/* Now clear reference and modify */
   1708  1.1  simonb 	ref = pmap_clear_reference(pg);
   1709  1.1  simonb 	mod = pmap_clear_modify(pg);
   1710  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1711  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1712  1.1  simonb 	       ref, mod);
   1713  1.1  simonb 
   1714  1.1  simonb 	/* Reference page */
   1715  1.1  simonb 	val = *loc;
   1716  1.1  simonb 
   1717  1.1  simonb 	ref = pmap_is_referenced(pg);
   1718  1.1  simonb 	mod = pmap_is_modified(pg);
   1719  1.1  simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1720  1.1  simonb 	       ref, mod, val);
   1721  1.1  simonb 
   1722  1.1  simonb 	/* Now clear reference and modify */
   1723  1.1  simonb 	ref = pmap_clear_reference(pg);
   1724  1.1  simonb 	mod = pmap_clear_modify(pg);
   1725  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1726  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1727  1.1  simonb 	       ref, mod);
   1728  1.1  simonb 
   1729  1.1  simonb 	/* Modify page */
   1730  1.1  simonb #if 0
   1731  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1732  1.4   chris 	pmap_update(pmap_kernel());
   1733  1.1  simonb #endif
   1734  1.1  simonb 	*loc = 1;
   1735  1.1  simonb 
   1736  1.1  simonb 	ref = pmap_is_referenced(pg);
   1737  1.1  simonb 	mod = pmap_is_modified(pg);
   1738  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1739  1.1  simonb 	       ref, mod);
   1740  1.1  simonb 
   1741  1.1  simonb 	/* Check pmap_pag_protect() */
   1742  1.1  simonb 	pmap_page_protect(pg, VM_PROT_READ);
   1743  1.1  simonb 	ref = pmap_is_referenced(pg);
   1744  1.1  simonb 	mod = pmap_is_modified(pg);
   1745  1.1  simonb 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1746  1.1  simonb 	       ref, mod);
   1747  1.1  simonb 
   1748  1.1  simonb 	/* Now clear reference and modify */
   1749  1.1  simonb 	ref = pmap_clear_reference(pg);
   1750  1.1  simonb 	mod = pmap_clear_modify(pg);
   1751  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1752  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1753  1.1  simonb 	       ref, mod);
   1754  1.1  simonb 
   1755  1.1  simonb 	/* Reference page */
   1756  1.1  simonb 	val = *loc;
   1757  1.1  simonb 
   1758  1.1  simonb 	ref = pmap_is_referenced(pg);
   1759  1.1  simonb 	mod = pmap_is_modified(pg);
   1760  1.1  simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1761  1.1  simonb 	       ref, mod, val);
   1762  1.1  simonb 
   1763  1.1  simonb 	/* Now clear reference and modify */
   1764  1.1  simonb 	ref = pmap_clear_reference(pg);
   1765  1.1  simonb 	mod = pmap_clear_modify(pg);
   1766  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1767  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1768  1.1  simonb 	       ref, mod);
   1769  1.1  simonb 
   1770  1.1  simonb 	/* Modify page */
   1771  1.1  simonb #if 0
   1772  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1773  1.4   chris 	pmap_update(pmap_kernel());
   1774  1.1  simonb #endif
   1775  1.1  simonb 	*loc = 1;
   1776  1.1  simonb 
   1777  1.1  simonb 	ref = pmap_is_referenced(pg);
   1778  1.1  simonb 	mod = pmap_is_modified(pg);
   1779  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1780  1.1  simonb 	       ref, mod);
   1781  1.1  simonb 
   1782  1.1  simonb 	/* Check pmap_pag_protect() */
   1783  1.1  simonb 	pmap_page_protect(pg, VM_PROT_NONE);
   1784  1.1  simonb 	ref = pmap_is_referenced(pg);
   1785  1.1  simonb 	mod = pmap_is_modified(pg);
   1786  1.1  simonb 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1787  1.1  simonb 	       ref, mod);
   1788  1.1  simonb 
   1789  1.1  simonb 	/* Now clear reference and modify */
   1790  1.1  simonb 	ref = pmap_clear_reference(pg);
   1791  1.1  simonb 	mod = pmap_clear_modify(pg);
   1792  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1793  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1794  1.1  simonb 	       ref, mod);
   1795  1.1  simonb 
   1796  1.1  simonb 
   1797  1.1  simonb 	/* Reference page */
   1798  1.1  simonb 	val = *loc;
   1799  1.1  simonb 
   1800  1.1  simonb 	ref = pmap_is_referenced(pg);
   1801  1.1  simonb 	mod = pmap_is_modified(pg);
   1802  1.1  simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1803  1.1  simonb 	       ref, mod, val);
   1804  1.1  simonb 
   1805  1.1  simonb 	/* Now clear reference and modify */
   1806  1.1  simonb 	ref = pmap_clear_reference(pg);
   1807  1.1  simonb 	mod = pmap_clear_modify(pg);
   1808  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1809  1.1  simonb 	       (void *)(u_long)va, (long)pa,
   1810  1.1  simonb 	       ref, mod);
   1811  1.1  simonb 
   1812  1.1  simonb 	/* Modify page */
   1813  1.1  simonb #if 0
   1814  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1815  1.4   chris 	pmap_update(pmap_kernel());
   1816  1.1  simonb #endif
   1817  1.1  simonb 	*loc = 1;
   1818  1.1  simonb 
   1819  1.1  simonb 	ref = pmap_is_referenced(pg);
   1820  1.1  simonb 	mod = pmap_is_modified(pg);
   1821  1.1  simonb 	printf("Modified page: ref %d, mod %d\n",
   1822  1.1  simonb 	       ref, mod);
   1823  1.1  simonb 
   1824  1.1  simonb 	/* Unmap page */
   1825  1.1  simonb 	pmap_remove(pmap_kernel(), va, va+1);
   1826  1.4   chris 	pmap_update(pmap_kernel());
   1827  1.1  simonb 	ref = pmap_is_referenced(pg);
   1828  1.1  simonb 	mod = pmap_is_modified(pg);
   1829  1.1  simonb 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1830  1.1  simonb 
   1831  1.1  simonb 	/* Now clear reference and modify */
   1832  1.1  simonb 	ref = pmap_clear_reference(pg);
   1833  1.1  simonb 	mod = pmap_clear_modify(pg);
   1834  1.1  simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1835  1.1  simonb 	       (void *)(u_long)va, (long)pa, ref, mod);
   1836  1.1  simonb 
   1837  1.1  simonb 	/* Check it's properly cleared */
   1838  1.1  simonb 	ref = pmap_is_referenced(pg);
   1839  1.1  simonb 	mod = pmap_is_modified(pg);
   1840  1.1  simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1841  1.1  simonb 	       ref, mod);
   1842  1.1  simonb 
   1843  1.1  simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL,
   1844  1.1  simonb 		VM_PROT_ALL|PMAP_WIRED);
   1845  1.1  simonb 	uvm_km_free(kernel_map, (vaddr_t)va, NBPG);
   1846  1.1  simonb }
   1847  1.1  simonb #endif
   1848