Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.95.2.1
      1  1.95.2.1   thorpej /*	$NetBSD: pmap.c,v 1.95.2.1 2021/04/03 22:28:34 thorpej Exp $	*/
      2       1.1    simonb 
      3       1.1    simonb /*
      4       1.1    simonb  * Copyright 2001 Wasabi Systems, Inc.
      5       1.1    simonb  * All rights reserved.
      6       1.1    simonb  *
      7       1.1    simonb  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8       1.1    simonb  *
      9       1.1    simonb  * Redistribution and use in source and binary forms, with or without
     10       1.1    simonb  * modification, are permitted provided that the following conditions
     11       1.1    simonb  * are met:
     12       1.1    simonb  * 1. Redistributions of source code must retain the above copyright
     13       1.1    simonb  *    notice, this list of conditions and the following disclaimer.
     14       1.1    simonb  * 2. Redistributions in binary form must reproduce the above copyright
     15       1.1    simonb  *    notice, this list of conditions and the following disclaimer in the
     16       1.1    simonb  *    documentation and/or other materials provided with the distribution.
     17       1.1    simonb  * 3. All advertising materials mentioning features or use of this software
     18       1.1    simonb  *    must display the following acknowledgement:
     19       1.1    simonb  *      This product includes software developed for the NetBSD Project by
     20       1.1    simonb  *      Wasabi Systems, Inc.
     21       1.1    simonb  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22       1.1    simonb  *    or promote products derived from this software without specific prior
     23       1.1    simonb  *    written permission.
     24       1.1    simonb  *
     25       1.1    simonb  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26       1.1    simonb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27       1.1    simonb  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28       1.1    simonb  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29       1.1    simonb  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30       1.1    simonb  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31       1.1    simonb  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32       1.1    simonb  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33       1.1    simonb  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34       1.1    simonb  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35       1.1    simonb  * POSSIBILITY OF SUCH DAMAGE.
     36       1.1    simonb  */
     37       1.1    simonb 
     38       1.1    simonb /*
     39       1.1    simonb  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40       1.1    simonb  * Copyright (C) 1995, 1996 TooLs GmbH.
     41       1.1    simonb  * All rights reserved.
     42       1.1    simonb  *
     43       1.1    simonb  * Redistribution and use in source and binary forms, with or without
     44       1.1    simonb  * modification, are permitted provided that the following conditions
     45       1.1    simonb  * are met:
     46       1.1    simonb  * 1. Redistributions of source code must retain the above copyright
     47       1.1    simonb  *    notice, this list of conditions and the following disclaimer.
     48       1.1    simonb  * 2. Redistributions in binary form must reproduce the above copyright
     49       1.1    simonb  *    notice, this list of conditions and the following disclaimer in the
     50       1.1    simonb  *    documentation and/or other materials provided with the distribution.
     51       1.1    simonb  * 3. All advertising materials mentioning features or use of this software
     52       1.1    simonb  *    must display the following acknowledgement:
     53       1.1    simonb  *	This product includes software developed by TooLs GmbH.
     54       1.1    simonb  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55       1.1    simonb  *    derived from this software without specific prior written permission.
     56       1.1    simonb  *
     57       1.1    simonb  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58       1.1    simonb  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59       1.1    simonb  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60       1.1    simonb  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61       1.1    simonb  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62       1.1    simonb  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63       1.1    simonb  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64       1.1    simonb  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65       1.1    simonb  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66       1.1    simonb  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67       1.1    simonb  */
     68      1.23     lukem 
     69      1.23     lukem #include <sys/cdefs.h>
     70  1.95.2.1   thorpej __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.95.2.1 2021/04/03 22:28:34 thorpej Exp $");
     71      1.90       rin 
     72      1.90       rin #ifdef _KERNEL_OPT
     73      1.90       rin #include "opt_ddb.h"
     74      1.93       rin #include "opt_pmap.h"
     75      1.90       rin #endif
     76       1.1    simonb 
     77       1.1    simonb #include <sys/param.h>
     78      1.68      matt #include <sys/cpu.h>
     79      1.68      matt #include <sys/device.h>
     80      1.72      para #include <sys/kmem.h>
     81      1.68      matt #include <sys/pool.h>
     82       1.1    simonb #include <sys/proc.h>
     83       1.1    simonb #include <sys/queue.h>
     84       1.1    simonb #include <sys/systm.h>
     85       1.1    simonb 
     86       1.1    simonb #include <uvm/uvm.h>
     87       1.1    simonb 
     88       1.1    simonb #include <machine/powerpc.h>
     89       1.1    simonb 
     90      1.68      matt #include <powerpc/pcb.h>
     91      1.68      matt 
     92       1.1    simonb #include <powerpc/spr.h>
     93      1.62      matt #include <powerpc/ibm4xx/spr.h>
     94      1.67      matt 
     95      1.67      matt #include <powerpc/ibm4xx/cpu.h>
     96  1.95.2.1   thorpej #include <powerpc/ibm4xx/tlb.h>
     97       1.1    simonb 
     98       1.1    simonb /*
     99       1.1    simonb  * kernmap is an array of PTEs large enough to map in
    100       1.1    simonb  * 4GB.  At 16KB/page it is 256K entries or 2MB.
    101       1.1    simonb  */
    102      1.19   thorpej #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
    103      1.47  christos void *kernmap;
    104       1.1    simonb 
    105       1.1    simonb #define MINCTX		2
    106       1.1    simonb #define NUMCTX		256
    107      1.42     freza 
    108       1.1    simonb volatile struct pmap *ctxbusy[NUMCTX];
    109       1.1    simonb 
    110       1.1    simonb #define TLBF_USED	0x1
    111       1.1    simonb #define	TLBF_REF	0x2
    112       1.1    simonb #define	TLBF_LOCKED	0x4
    113       1.1    simonb #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    114      1.42     freza 
    115       1.1    simonb typedef struct tlb_info_s {
    116       1.1    simonb 	char	ti_flags;
    117       1.1    simonb 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    118       1.1    simonb 	u_int	ti_va;
    119       1.1    simonb } tlb_info_t;
    120       1.1    simonb 
    121       1.1    simonb volatile tlb_info_t tlb_info[NTLB];
    122       1.1    simonb /* We'll use a modified FIFO replacement policy cause it's cheap */
    123      1.42     freza volatile int tlbnext;
    124      1.42     freza 
    125      1.42     freza static int tlb_nreserved = 0;
    126      1.42     freza static int pmap_bootstrap_done = 0;
    127       1.1    simonb 
    128      1.14   thorpej /* Event counters */
    129      1.14   thorpej struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    130       1.1    simonb 	NULL, "cpu", "tlbmiss");
    131      1.14   thorpej struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    132       1.1    simonb 	NULL, "cpu", "tlbflush");
    133      1.14   thorpej struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    134       1.1    simonb 	NULL, "cpu", "tlbenter");
    135      1.66      matt EVCNT_ATTACH_STATIC(tlbmiss_ev);
    136      1.66      matt EVCNT_ATTACH_STATIC(tlbflush_ev);
    137      1.66      matt EVCNT_ATTACH_STATIC(tlbenter_ev);
    138       1.1    simonb 
    139       1.1    simonb struct pmap kernel_pmap_;
    140      1.52     pooka struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
    141       1.1    simonb 
    142       1.1    simonb static int npgs;
    143       1.1    simonb static u_int nextavail;
    144       1.1    simonb #ifndef MSGBUFADDR
    145       1.1    simonb extern paddr_t msgbuf_paddr;
    146       1.1    simonb #endif
    147       1.1    simonb 
    148       1.1    simonb static struct mem_region *mem, *avail;
    149       1.1    simonb 
    150       1.1    simonb /*
    151       1.1    simonb  * This is a cache of referenced/modified bits.
    152       1.1    simonb  * Bits herein are shifted by ATTRSHFT.
    153       1.1    simonb  */
    154       1.1    simonb static char *pmap_attrib;
    155       1.1    simonb 
    156       1.1    simonb #define PV_WIRED	0x1
    157       1.1    simonb #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    158      1.30       chs #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
    159      1.30       chs #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
    160      1.91       rin #define PV_VA(pv)	((pv)->pv_va & ~PV_WIRED)
    161      1.91       rin #define PV_CMPVA(va,pv)	(!(PV_VA(pv) ^ (va)))
    162       1.1    simonb 
    163       1.1    simonb struct pv_entry {
    164       1.1    simonb 	struct pv_entry *pv_next;	/* Linked list of mappings */
    165      1.68      matt 	struct pmap *pv_pm;
    166       1.1    simonb 	vaddr_t pv_va;			/* virtual address of mapping */
    167       1.1    simonb };
    168       1.1    simonb 
    169      1.42     freza /* Each index corresponds to TLB_SIZE_* value. */
    170      1.42     freza static size_t tlbsize[] = {
    171      1.42     freza 	1024, 		/* TLB_SIZE_1K */
    172      1.42     freza 	4096, 		/* TLB_SIZE_4K */
    173      1.42     freza 	16384, 		/* TLB_SIZE_16K */
    174      1.42     freza 	65536, 		/* TLB_SIZE_64K */
    175      1.42     freza 	262144, 	/* TLB_SIZE_256K */
    176      1.42     freza 	1048576, 	/* TLB_SIZE_1M */
    177      1.42     freza 	4194304, 	/* TLB_SIZE_4M */
    178      1.42     freza 	16777216, 	/* TLB_SIZE_16M */
    179      1.42     freza };
    180      1.42     freza 
    181       1.1    simonb struct pv_entry *pv_table;
    182       1.1    simonb static struct pool pv_pool;
    183       1.1    simonb 
    184       1.1    simonb static int pmap_initialized;
    185       1.1    simonb 
    186       1.1    simonb static int ctx_flush(int);
    187       1.1    simonb 
    188      1.68      matt struct pv_entry *pa_to_pv(paddr_t);
    189       1.1    simonb static inline char *pa_to_attr(paddr_t);
    190       1.1    simonb 
    191       1.1    simonb static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    192       1.1    simonb static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    193       1.1    simonb 
    194      1.49   hannken static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
    195       1.1    simonb static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    196       1.1    simonb 
    197      1.93       rin static inline void tlb_invalidate_entry(int);
    198      1.93       rin 
    199      1.42     freza static int ppc4xx_tlb_size_mask(size_t, int *, int *);
    200      1.42     freza 
    201       1.1    simonb 
    202      1.68      matt struct pv_entry *
    203       1.1    simonb pa_to_pv(paddr_t pa)
    204       1.1    simonb {
    205      1.75    cherry 	uvm_physseg_t bank;
    206      1.75    cherry 	psize_t pg;
    207       1.1    simonb 
    208      1.74    cherry 	bank = uvm_physseg_find(atop(pa), &pg);
    209      1.76    cherry 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
    210       1.1    simonb 		return NULL;
    211      1.75    cherry 	return &uvm_physseg_get_pmseg(bank)->pvent[pg];
    212       1.1    simonb }
    213       1.1    simonb 
    214       1.1    simonb static inline char *
    215       1.1    simonb pa_to_attr(paddr_t pa)
    216       1.1    simonb {
    217      1.75    cherry 	uvm_physseg_t bank;
    218      1.75    cherry 	psize_t pg;
    219       1.1    simonb 
    220      1.74    cherry 	bank = uvm_physseg_find(atop(pa), &pg);
    221      1.76    cherry 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
    222       1.1    simonb 		return NULL;
    223      1.75    cherry 	return &uvm_physseg_get_pmseg(bank)->attrs[pg];
    224       1.1    simonb }
    225       1.1    simonb 
    226       1.1    simonb /*
    227       1.1    simonb  * Insert PTE into page table.
    228       1.1    simonb  */
    229       1.1    simonb int
    230       1.1    simonb pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    231       1.1    simonb {
    232       1.1    simonb 	int seg = STIDX(va);
    233       1.1    simonb 	int ptn = PTIDX(va);
    234      1.22       scw 	u_int oldpte;
    235       1.1    simonb 
    236       1.1    simonb 	if (!pm->pm_ptbl[seg]) {
    237       1.1    simonb 		/* Don't allocate a page to clear a non-existent mapping. */
    238      1.30       chs 		if (!pte)
    239      1.30       chs 			return (0);
    240       1.1    simonb 		/* Allocate a page XXXX this will sleep! */
    241      1.19   thorpej 		pm->pm_ptbl[seg] =
    242      1.34      yamt 		    (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    243      1.34      yamt 		    UVM_KMF_WIRED | UVM_KMF_ZERO);
    244       1.1    simonb 	}
    245      1.22       scw 	oldpte = pm->pm_ptbl[seg][ptn];
    246       1.1    simonb 	pm->pm_ptbl[seg][ptn] = pte;
    247       1.1    simonb 
    248       1.1    simonb 	/* Flush entry. */
    249       1.1    simonb 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    250      1.22       scw 	if (oldpte != pte) {
    251      1.22       scw 		if (pte == 0)
    252      1.22       scw 			pm->pm_stats.resident_count--;
    253      1.22       scw 		else
    254      1.22       scw 			pm->pm_stats.resident_count++;
    255      1.22       scw 	}
    256       1.1    simonb 	return (1);
    257       1.1    simonb }
    258       1.1    simonb 
    259       1.1    simonb /*
    260       1.1    simonb  * Get a pointer to a PTE in a page table.
    261       1.1    simonb  */
    262       1.1    simonb volatile u_int *
    263       1.1    simonb pte_find(struct pmap *pm, vaddr_t va)
    264       1.1    simonb {
    265       1.1    simonb 	int seg = STIDX(va);
    266       1.1    simonb 	int ptn = PTIDX(va);
    267       1.1    simonb 
    268       1.1    simonb 	if (pm->pm_ptbl[seg])
    269       1.1    simonb 		return (&pm->pm_ptbl[seg][ptn]);
    270       1.1    simonb 
    271       1.1    simonb 	return (NULL);
    272       1.1    simonb }
    273       1.1    simonb 
    274       1.1    simonb /*
    275       1.1    simonb  * This is called during initppc, before the system is really initialized.
    276       1.1    simonb  */
    277       1.1    simonb void
    278       1.1    simonb pmap_bootstrap(u_int kernelstart, u_int kernelend)
    279       1.1    simonb {
    280       1.1    simonb 	struct mem_region *mp, *mp1;
    281       1.1    simonb 	int cnt, i;
    282       1.1    simonb 	u_int s, e, sz;
    283       1.1    simonb 
    284      1.42     freza 	tlbnext = tlb_nreserved;
    285      1.42     freza 
    286       1.1    simonb 	/*
    287       1.1    simonb 	 * Allocate the kernel page table at the end of
    288       1.1    simonb 	 * kernel space so it's in the locked TTE.
    289       1.1    simonb 	 */
    290      1.47  christos 	kernmap = (void *)kernelend;
    291       1.1    simonb 
    292       1.1    simonb 	/*
    293       1.1    simonb 	 * Initialize kernel page table.
    294       1.1    simonb 	 */
    295       1.1    simonb 	for (i = 0; i < STSZ; i++) {
    296      1.10       eeh 		pmap_kernel()->pm_ptbl[i] = 0;
    297       1.1    simonb 	}
    298       1.1    simonb 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    299       1.1    simonb 
    300       1.1    simonb 	/*
    301       1.1    simonb 	 * Announce page-size to the VM-system
    302       1.1    simonb 	 */
    303       1.1    simonb 	uvmexp.pagesize = NBPG;
    304      1.73    cherry 	uvm_md_init();
    305       1.1    simonb 
    306       1.1    simonb 	/*
    307       1.1    simonb 	 * Get memory.
    308       1.1    simonb 	 */
    309       1.1    simonb 	mem_regions(&mem, &avail);
    310       1.1    simonb 	for (mp = mem; mp->size; mp++) {
    311       1.1    simonb 		physmem += btoc(mp->size);
    312       1.1    simonb 		printf("+%lx,",mp->size);
    313       1.1    simonb 	}
    314       1.1    simonb 	printf("\n");
    315       1.1    simonb 	ppc4xx_tlb_init();
    316       1.1    simonb 	/*
    317       1.1    simonb 	 * Count the number of available entries.
    318       1.1    simonb 	 */
    319       1.1    simonb 	for (cnt = 0, mp = avail; mp->size; mp++)
    320       1.1    simonb 		cnt++;
    321       1.1    simonb 
    322       1.1    simonb 	/*
    323       1.1    simonb 	 * Page align all regions.
    324       1.1    simonb 	 * Non-page aligned memory isn't very interesting to us.
    325       1.1    simonb 	 * Also, sort the entries for ascending addresses.
    326       1.1    simonb 	 */
    327       1.1    simonb 	kernelstart &= ~PGOFSET;
    328       1.1    simonb 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    329       1.1    simonb 	for (mp = avail; mp->size; mp++) {
    330       1.1    simonb 		s = mp->start;
    331       1.1    simonb 		e = mp->start + mp->size;
    332       1.1    simonb 		printf("%08x-%08x -> ",s,e);
    333       1.1    simonb 		/*
    334       1.1    simonb 		 * Check whether this region holds all of the kernel.
    335       1.1    simonb 		 */
    336       1.1    simonb 		if (s < kernelstart && e > kernelend) {
    337       1.1    simonb 			avail[cnt].start = kernelend;
    338       1.1    simonb 			avail[cnt++].size = e - kernelend;
    339       1.1    simonb 			e = kernelstart;
    340       1.1    simonb 		}
    341       1.1    simonb 		/*
    342       1.1    simonb 		 * Look whether this regions starts within the kernel.
    343       1.1    simonb 		 */
    344       1.1    simonb 		if (s >= kernelstart && s < kernelend) {
    345       1.1    simonb 			if (e <= kernelend)
    346       1.1    simonb 				goto empty;
    347       1.1    simonb 			s = kernelend;
    348       1.1    simonb 		}
    349       1.1    simonb 		/*
    350       1.1    simonb 		 * Now look whether this region ends within the kernel.
    351       1.1    simonb 		 */
    352       1.1    simonb 		if (e > kernelstart && e <= kernelend) {
    353       1.1    simonb 			if (s >= kernelstart)
    354       1.1    simonb 				goto empty;
    355       1.1    simonb 			e = kernelstart;
    356       1.1    simonb 		}
    357       1.1    simonb 		/*
    358       1.1    simonb 		 * Now page align the start and size of the region.
    359       1.1    simonb 		 */
    360       1.1    simonb 		s = round_page(s);
    361       1.1    simonb 		e = trunc_page(e);
    362       1.1    simonb 		if (e < s)
    363       1.1    simonb 			e = s;
    364       1.1    simonb 		sz = e - s;
    365       1.1    simonb 		printf("%08x-%08x = %x\n",s,e,sz);
    366       1.1    simonb 		/*
    367       1.1    simonb 		 * Check whether some memory is left here.
    368       1.1    simonb 		 */
    369       1.1    simonb 		if (sz == 0) {
    370       1.1    simonb 		empty:
    371       1.3       wiz 			memmove(mp, mp + 1,
    372       1.3       wiz 				(cnt - (mp - avail)) * sizeof *mp);
    373       1.1    simonb 			cnt--;
    374       1.1    simonb 			mp--;
    375       1.1    simonb 			continue;
    376       1.1    simonb 		}
    377       1.1    simonb 		/*
    378       1.1    simonb 		 * Do an insertion sort.
    379       1.1    simonb 		 */
    380       1.1    simonb 		npgs += btoc(sz);
    381       1.1    simonb 		for (mp1 = avail; mp1 < mp; mp1++)
    382       1.1    simonb 			if (s < mp1->start)
    383       1.1    simonb 				break;
    384       1.1    simonb 		if (mp1 < mp) {
    385       1.3       wiz 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    386       1.1    simonb 			mp1->start = s;
    387       1.1    simonb 			mp1->size = sz;
    388       1.1    simonb 		} else {
    389       1.1    simonb 			mp->start = s;
    390       1.1    simonb 			mp->size = sz;
    391       1.1    simonb 		}
    392       1.1    simonb 	}
    393       1.1    simonb 
    394       1.1    simonb 	/*
    395       1.1    simonb 	 * We cannot do pmap_steal_memory here,
    396       1.1    simonb 	 * since we don't run with translation enabled yet.
    397       1.1    simonb 	 */
    398       1.1    simonb #ifndef MSGBUFADDR
    399       1.1    simonb 	/*
    400       1.1    simonb 	 * allow for msgbuf
    401       1.1    simonb 	 */
    402       1.1    simonb 	sz = round_page(MSGBUFSIZE);
    403       1.1    simonb 	mp = NULL;
    404       1.1    simonb 	for (mp1 = avail; mp1->size; mp1++)
    405       1.1    simonb 		if (mp1->size >= sz)
    406       1.1    simonb 			mp = mp1;
    407       1.1    simonb 	if (mp == NULL)
    408       1.1    simonb 		panic("not enough memory?");
    409       1.1    simonb 
    410       1.1    simonb 	npgs -= btoc(sz);
    411       1.1    simonb 	msgbuf_paddr = mp->start + mp->size - sz;
    412       1.1    simonb 	mp->size -= sz;
    413       1.1    simonb 	if (mp->size <= 0)
    414       1.3       wiz 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    415       1.1    simonb #endif
    416       1.1    simonb 
    417       1.1    simonb 	for (mp = avail; mp->size; mp++)
    418       1.1    simonb 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    419       1.1    simonb 			atop(mp->start), atop(mp->start + mp->size),
    420       1.1    simonb 			VM_FREELIST_DEFAULT);
    421       1.1    simonb 
    422       1.1    simonb 	/*
    423       1.1    simonb 	 * Initialize kernel pmap and hardware.
    424       1.1    simonb 	 */
    425       1.1    simonb 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    426       1.1    simonb 	pmap_kernel()->pm_ctx = KERNEL_PID;
    427       1.1    simonb 	nextavail = avail->start;
    428       1.1    simonb 
    429      1.42     freza 	pmap_bootstrap_done = 1;
    430       1.1    simonb }
    431       1.1    simonb 
    432       1.1    simonb /*
    433       1.1    simonb  * Restrict given range to physical memory
    434       1.1    simonb  *
    435       1.1    simonb  * (Used by /dev/mem)
    436       1.1    simonb  */
    437       1.1    simonb void
    438       1.1    simonb pmap_real_memory(paddr_t *start, psize_t *size)
    439       1.1    simonb {
    440       1.1    simonb 	struct mem_region *mp;
    441       1.1    simonb 
    442       1.1    simonb 	for (mp = mem; mp->size; mp++) {
    443       1.1    simonb 		if (*start + *size > mp->start &&
    444       1.1    simonb 		    *start < mp->start + mp->size) {
    445       1.1    simonb 			if (*start < mp->start) {
    446       1.1    simonb 				*size -= mp->start - *start;
    447       1.1    simonb 				*start = mp->start;
    448       1.1    simonb 			}
    449       1.1    simonb 			if (*start + *size > mp->start + mp->size)
    450       1.1    simonb 				*size = mp->start + mp->size - *start;
    451       1.1    simonb 			return;
    452       1.1    simonb 		}
    453       1.1    simonb 	}
    454       1.1    simonb 	*size = 0;
    455       1.1    simonb }
    456       1.1    simonb 
    457       1.1    simonb /*
    458       1.1    simonb  * Initialize anything else for pmap handling.
    459       1.1    simonb  * Called during vm_init().
    460       1.1    simonb  */
    461       1.1    simonb void
    462       1.1    simonb pmap_init(void)
    463       1.1    simonb {
    464       1.1    simonb 	struct pv_entry *pv;
    465       1.1    simonb 	vsize_t sz;
    466       1.1    simonb 	vaddr_t addr;
    467       1.1    simonb 	int i, s;
    468       1.1    simonb 	int bank;
    469       1.1    simonb 	char *attr;
    470       1.1    simonb 
    471       1.1    simonb 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    472       1.1    simonb 	sz = round_page(sz);
    473      1.34      yamt 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    474       1.1    simonb 	s = splvm();
    475       1.1    simonb 	pv = pv_table = (struct pv_entry *)addr;
    476       1.1    simonb 	for (i = npgs; --i >= 0;)
    477       1.1    simonb 		pv++->pv_pm = NULL;
    478       1.1    simonb 	pmap_attrib = (char *)pv;
    479       1.2       wiz 	memset(pv, 0, npgs);
    480       1.1    simonb 
    481       1.1    simonb 	pv = pv_table;
    482       1.1    simonb 	attr = pmap_attrib;
    483      1.75    cherry 	for (bank = uvm_physseg_get_first();
    484      1.75    cherry 	     uvm_physseg_valid_p(bank);
    485      1.75    cherry 	     bank = uvm_physseg_get_next(bank)) {
    486      1.75    cherry 		sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    487      1.75    cherry 		uvm_physseg_get_pmseg(bank)->pvent = pv;
    488      1.75    cherry 		uvm_physseg_get_pmseg(bank)->attrs = attr;
    489       1.1    simonb 		pv += sz;
    490       1.1    simonb 		attr += sz;
    491       1.1    simonb 	}
    492       1.1    simonb 
    493       1.1    simonb 	pmap_initialized = 1;
    494       1.1    simonb 	splx(s);
    495       1.1    simonb 
    496       1.1    simonb 	/* Setup a pool for additional pvlist structures */
    497      1.48        ad 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL,
    498      1.48        ad 	    IPL_VM);
    499      1.21   thorpej }
    500      1.21   thorpej 
    501      1.21   thorpej /*
    502      1.21   thorpej  * How much virtual space is available to the kernel?
    503      1.21   thorpej  */
    504      1.21   thorpej void
    505      1.21   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    506      1.21   thorpej {
    507      1.21   thorpej 
    508      1.21   thorpej #if 0
    509      1.21   thorpej 	/*
    510      1.21   thorpej 	 * Reserve one segment for kernel virtual memory
    511      1.21   thorpej 	 */
    512      1.21   thorpej 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    513      1.21   thorpej 	*end = *start + SEGMENT_LENGTH;
    514      1.21   thorpej #else
    515      1.21   thorpej 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    516      1.21   thorpej 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    517      1.21   thorpej #endif
    518       1.1    simonb }
    519       1.1    simonb 
    520       1.5       eeh #ifdef PMAP_GROWKERNEL
    521       1.5       eeh /*
    522       1.5       eeh  * Preallocate kernel page tables to a specified VA.
    523       1.5       eeh  * This simply loops through the first TTE for each
    524      1.12    simonb  * page table from the beginning of the kernel pmap,
    525       1.5       eeh  * reads the entry, and if the result is
    526       1.5       eeh  * zero (either invalid entry or no page table) it stores
    527       1.5       eeh  * a zero there, populating page tables in the process.
    528       1.5       eeh  * This is not the most efficient technique but i don't
    529       1.5       eeh  * expect it to be called that often.
    530       1.5       eeh  */
    531      1.53       dsl extern struct vm_page *vm_page_alloc1(void);
    532      1.53       dsl extern void vm_page_free1(struct vm_page *);
    533       1.5       eeh 
    534       1.5       eeh vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    535       1.5       eeh 
    536      1.12    simonb vaddr_t
    537      1.30       chs pmap_growkernel(vaddr_t maxkvaddr)
    538       1.5       eeh {
    539       1.5       eeh 	int s;
    540       1.5       eeh 	int seg;
    541       1.5       eeh 	paddr_t pg;
    542       1.5       eeh 	struct pmap *pm = pmap_kernel();
    543      1.12    simonb 
    544       1.5       eeh 	s = splvm();
    545       1.5       eeh 
    546       1.5       eeh 	/* Align with the start of a page table */
    547       1.5       eeh 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    548       1.5       eeh 	     kbreak += PTMAP) {
    549       1.5       eeh 		seg = STIDX(kbreak);
    550       1.5       eeh 
    551      1.30       chs 		if (pte_find(pm, kbreak))
    552      1.30       chs 			continue;
    553      1.12    simonb 
    554       1.5       eeh 		if (uvm.page_init_done) {
    555       1.5       eeh 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    556       1.5       eeh 		} else {
    557       1.5       eeh 			if (!uvm_page_physget(&pg))
    558       1.5       eeh 				panic("pmap_growkernel: no memory");
    559       1.5       eeh 		}
    560      1.32    simonb 		if (!pg)
    561      1.32    simonb 			panic("pmap_growkernel: no pages");
    562       1.5       eeh 		pmap_zero_page((paddr_t)pg);
    563       1.5       eeh 
    564       1.5       eeh 		/* XXX This is based on all phymem being addressable */
    565       1.5       eeh 		pm->pm_ptbl[seg] = (u_int *)pg;
    566       1.5       eeh 	}
    567       1.5       eeh 	splx(s);
    568       1.5       eeh 	return (kbreak);
    569       1.5       eeh }
    570       1.5       eeh 
    571       1.5       eeh /*
    572       1.5       eeh  *	vm_page_alloc1:
    573       1.5       eeh  *
    574       1.5       eeh  *	Allocate and return a memory cell with no associated object.
    575       1.5       eeh  */
    576       1.5       eeh struct vm_page *
    577      1.30       chs vm_page_alloc1(void)
    578       1.5       eeh {
    579      1.30       chs 	struct vm_page *pg;
    580      1.30       chs 
    581      1.30       chs 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    582       1.5       eeh 	if (pg) {
    583       1.5       eeh 		pg->wire_count = 1;	/* no mappings yet */
    584       1.5       eeh 		pg->flags &= ~PG_BUSY;	/* never busy */
    585       1.5       eeh 	}
    586       1.5       eeh 	return pg;
    587       1.5       eeh }
    588       1.5       eeh 
    589       1.5       eeh /*
    590       1.5       eeh  *	vm_page_free1:
    591       1.5       eeh  *
    592       1.5       eeh  *	Returns the given page to the free list,
    593       1.5       eeh  *	disassociating it with any VM object.
    594       1.5       eeh  *
    595       1.5       eeh  *	Object and page must be locked prior to entry.
    596       1.5       eeh  */
    597       1.5       eeh void
    598      1.36       scw vm_page_free1(struct vm_page *pg)
    599       1.5       eeh {
    600      1.10       eeh #ifdef DIAGNOSTIC
    601      1.36       scw 	if (pg->flags != (PG_CLEAN|PG_FAKE)) {
    602      1.36       scw 		printf("Freeing invalid page %p\n", pg);
    603      1.36       scw 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
    604      1.10       eeh #ifdef DDB
    605       1.5       eeh 		Debugger();
    606      1.10       eeh #endif
    607       1.5       eeh 		return;
    608       1.5       eeh 	}
    609      1.10       eeh #endif
    610      1.36       scw 	pg->flags |= PG_BUSY;
    611      1.36       scw 	pg->wire_count = 0;
    612      1.36       scw 	uvm_pagefree(pg);
    613       1.5       eeh }
    614       1.5       eeh #endif
    615       1.5       eeh 
    616       1.1    simonb /*
    617       1.1    simonb  * Create and return a physical map.
    618       1.1    simonb  */
    619       1.1    simonb struct pmap *
    620       1.1    simonb pmap_create(void)
    621       1.1    simonb {
    622       1.1    simonb 	struct pmap *pm;
    623       1.1    simonb 
    624      1.72      para 	pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
    625      1.30       chs 	memset(pm, 0, sizeof *pm);
    626      1.30       chs 	pm->pm_refs = 1;
    627       1.1    simonb 	return pm;
    628       1.1    simonb }
    629       1.1    simonb 
    630       1.1    simonb /*
    631       1.1    simonb  * Add a reference to the given pmap.
    632       1.1    simonb  */
    633       1.1    simonb void
    634       1.1    simonb pmap_reference(struct pmap *pm)
    635       1.1    simonb {
    636       1.1    simonb 
    637       1.1    simonb 	pm->pm_refs++;
    638       1.1    simonb }
    639       1.1    simonb 
    640       1.1    simonb /*
    641       1.1    simonb  * Retire the given pmap from service.
    642       1.1    simonb  * Should only be called if the map contains no valid mappings.
    643       1.1    simonb  */
    644       1.1    simonb void
    645       1.1    simonb pmap_destroy(struct pmap *pm)
    646       1.1    simonb {
    647      1.30       chs 	int i;
    648       1.1    simonb 
    649      1.30       chs 	if (--pm->pm_refs > 0) {
    650      1.30       chs 		return;
    651       1.1    simonb 	}
    652      1.30       chs 	KASSERT(pm->pm_stats.resident_count == 0);
    653      1.30       chs 	KASSERT(pm->pm_stats.wired_count == 0);
    654       1.1    simonb 	for (i = 0; i < STSZ; i++)
    655       1.1    simonb 		if (pm->pm_ptbl[i]) {
    656      1.19   thorpej 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
    657      1.34      yamt 			    PAGE_SIZE, UVM_KMF_WIRED);
    658       1.1    simonb 			pm->pm_ptbl[i] = NULL;
    659       1.1    simonb 		}
    660      1.30       chs 	if (pm->pm_ctx)
    661      1.30       chs 		ctx_free(pm);
    662      1.72      para 	kmem_free(pm, sizeof(*pm));
    663       1.1    simonb }
    664       1.1    simonb 
    665       1.1    simonb /*
    666       1.1    simonb  * Copy the range specified by src_addr/len
    667       1.1    simonb  * from the source map to the range dst_addr/len
    668       1.1    simonb  * in the destination map.
    669       1.1    simonb  *
    670       1.1    simonb  * This routine is only advisory and need not do anything.
    671       1.1    simonb  */
    672       1.1    simonb void
    673       1.1    simonb pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    674       1.1    simonb 	  vsize_t len, vaddr_t src_addr)
    675       1.1    simonb {
    676       1.1    simonb }
    677       1.1    simonb 
    678       1.1    simonb /*
    679       1.1    simonb  * Require that all active physical maps contain no
    680       1.1    simonb  * incorrect entries NOW.
    681       1.1    simonb  */
    682       1.1    simonb void
    683       1.4     chris pmap_update(struct pmap *pmap)
    684       1.1    simonb {
    685       1.1    simonb }
    686       1.1    simonb 
    687       1.1    simonb /*
    688       1.1    simonb  * Fill the given physical page with zeroes.
    689       1.1    simonb  */
    690       1.1    simonb void
    691       1.1    simonb pmap_zero_page(paddr_t pa)
    692       1.1    simonb {
    693       1.1    simonb 
    694       1.8   thorpej #ifdef PPC_4XX_NOCACHE
    695      1.47  christos 	memset((void *)pa, 0, PAGE_SIZE);
    696       1.1    simonb #else
    697       1.1    simonb 	int i;
    698       1.1    simonb 
    699      1.19   thorpej 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
    700      1.38     perry 		__asm volatile ("dcbz 0,%0" :: "r"(pa));
    701       1.1    simonb 		pa += CACHELINESIZE;
    702       1.1    simonb 	}
    703       1.1    simonb #endif
    704       1.1    simonb }
    705       1.1    simonb 
    706       1.1    simonb /*
    707       1.1    simonb  * Copy the given physical source page to its destination.
    708       1.1    simonb  */
    709       1.1    simonb void
    710       1.1    simonb pmap_copy_page(paddr_t src, paddr_t dst)
    711       1.1    simonb {
    712       1.1    simonb 
    713      1.47  christos 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
    714      1.69      matt 	dcache_wbinv_page(dst);
    715       1.1    simonb }
    716       1.1    simonb 
    717       1.1    simonb /*
    718      1.49   hannken  * This returns != 0 on success.
    719       1.1    simonb  */
    720       1.1    simonb static inline int
    721      1.49   hannken pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
    722       1.1    simonb {
    723       1.1    simonb 	struct pv_entry *pv, *npv = NULL;
    724       1.1    simonb 	int s;
    725       1.1    simonb 
    726       1.1    simonb 	if (!pmap_initialized)
    727       1.1    simonb 		return 0;
    728       1.1    simonb 
    729       1.1    simonb 	s = splvm();
    730       1.1    simonb 	pv = pa_to_pv(pa);
    731       1.1    simonb 	if (!pv->pv_pm) {
    732       1.1    simonb 		/*
    733       1.1    simonb 		 * No entries yet, use header as the first entry.
    734       1.1    simonb 		 */
    735       1.1    simonb 		pv->pv_va = va;
    736       1.1    simonb 		pv->pv_pm = pm;
    737       1.1    simonb 		pv->pv_next = NULL;
    738       1.1    simonb 	} else {
    739       1.1    simonb 		/*
    740       1.1    simonb 		 * There is at least one other VA mapping this page.
    741       1.1    simonb 		 * Place this entry after the header.
    742       1.1    simonb 		 */
    743      1.49   hannken 		npv = pool_get(&pv_pool, PR_NOWAIT);
    744      1.49   hannken 		if (npv == NULL) {
    745      1.49   hannken 			if ((flags & PMAP_CANFAIL) == 0)
    746      1.49   hannken 				panic("pmap_enter_pv: failed");
    747      1.49   hannken 			splx(s);
    748      1.49   hannken 			return 0;
    749      1.49   hannken 		}
    750       1.1    simonb 		npv->pv_va = va;
    751       1.1    simonb 		npv->pv_pm = pm;
    752       1.1    simonb 		npv->pv_next = pv->pv_next;
    753       1.1    simonb 		pv->pv_next = npv;
    754      1.33       chs 		pv = npv;
    755       1.1    simonb 	}
    756      1.49   hannken 	if (flags & PMAP_WIRED) {
    757      1.30       chs 		PV_WIRE(pv);
    758      1.33       chs 		pm->pm_stats.wired_count++;
    759      1.30       chs 	}
    760       1.1    simonb 	splx(s);
    761       1.1    simonb 	return (1);
    762       1.1    simonb }
    763       1.1    simonb 
    764       1.1    simonb static void
    765       1.1    simonb pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    766       1.1    simonb {
    767       1.1    simonb 	struct pv_entry *pv, *npv;
    768       1.1    simonb 
    769       1.1    simonb 	/*
    770       1.1    simonb 	 * Remove from the PV table.
    771       1.1    simonb 	 */
    772       1.1    simonb 	pv = pa_to_pv(pa);
    773      1.30       chs 	if (!pv)
    774      1.30       chs 		return;
    775       1.1    simonb 
    776       1.1    simonb 	/*
    777       1.1    simonb 	 * If it is the first entry on the list, it is actually
    778       1.1    simonb 	 * in the header and we must copy the following entry up
    779       1.1    simonb 	 * to the header.  Otherwise we must search the list for
    780       1.1    simonb 	 * the entry.  In either case we free the now unused entry.
    781       1.1    simonb 	 */
    782       1.1    simonb 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    783      1.30       chs 		if (PV_ISWIRED(pv)) {
    784      1.30       chs 			pm->pm_stats.wired_count--;
    785      1.30       chs 		}
    786       1.1    simonb 		if ((npv = pv->pv_next)) {
    787       1.1    simonb 			*pv = *npv;
    788       1.1    simonb 			pool_put(&pv_pool, npv);
    789       1.1    simonb 		} else
    790       1.1    simonb 			pv->pv_pm = NULL;
    791       1.1    simonb 	} else {
    792       1.1    simonb 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    793       1.1    simonb 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    794       1.1    simonb 				break;
    795       1.1    simonb 		if (npv) {
    796       1.1    simonb 			pv->pv_next = npv->pv_next;
    797      1.30       chs 			if (PV_ISWIRED(npv)) {
    798      1.30       chs 				pm->pm_stats.wired_count--;
    799      1.30       chs 			}
    800       1.1    simonb 			pool_put(&pv_pool, npv);
    801       1.1    simonb 		}
    802       1.1    simonb 	}
    803       1.1    simonb }
    804       1.1    simonb 
    805       1.1    simonb /*
    806       1.1    simonb  * Insert physical page at pa into the given pmap at virtual address va.
    807       1.1    simonb  */
    808       1.1    simonb int
    809      1.55        he pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    810       1.1    simonb {
    811       1.1    simonb 	int s;
    812       1.1    simonb 	u_int tte;
    813      1.57   thorpej 	bool managed;
    814       1.1    simonb 
    815       1.1    simonb 	/*
    816       1.1    simonb 	 * Have to remove any existing mapping first.
    817       1.1    simonb 	 */
    818      1.19   thorpej 	pmap_remove(pm, va, va + PAGE_SIZE);
    819       1.1    simonb 
    820      1.30       chs 	if (flags & PMAP_WIRED)
    821      1.30       chs 		flags |= prot;
    822       1.1    simonb 
    823      1.57   thorpej 	managed = uvm_pageismanaged(pa);
    824       1.1    simonb 
    825       1.1    simonb 	/*
    826       1.1    simonb 	 * Generate TTE.
    827       1.1    simonb 	 */
    828      1.26       chs 	tte = TTE_PA(pa);
    829       1.1    simonb 	/* XXXX -- need to support multiple page sizes. */
    830       1.1    simonb 	tte |= TTE_SZ_16K;
    831       1.1    simonb #ifdef	DIAGNOSTIC
    832      1.70      matt 	if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) ==
    833      1.70      matt 		(PMAP_NOCACHE | PME_WRITETHROUG))
    834      1.13    provos 		panic("pmap_enter: uncached & writethrough");
    835       1.1    simonb #endif
    836      1.70      matt 	if (flags & PMAP_NOCACHE)
    837       1.1    simonb 		/* Must be I/O mapping */
    838       1.1    simonb 		tte |= TTE_I | TTE_G;
    839       1.8   thorpej #ifdef PPC_4XX_NOCACHE
    840       1.1    simonb 	tte |= TTE_I;
    841       1.1    simonb #else
    842       1.1    simonb 	else if (flags & PME_WRITETHROUG)
    843       1.1    simonb 		/* Uncached and writethrough are not compatible */
    844       1.1    simonb 		tte |= TTE_W;
    845       1.1    simonb #endif
    846       1.1    simonb 	if (pm == pmap_kernel())
    847       1.1    simonb 		tte |= TTE_ZONE(ZONE_PRIV);
    848       1.1    simonb 	else
    849       1.1    simonb 		tte |= TTE_ZONE(ZONE_USER);
    850       1.1    simonb 
    851       1.1    simonb 	if (flags & VM_PROT_WRITE)
    852       1.1    simonb 		tte |= TTE_WR;
    853       1.1    simonb 
    854      1.26       chs 	if (flags & VM_PROT_EXECUTE)
    855      1.26       chs 		tte |= TTE_EX;
    856      1.26       chs 
    857       1.1    simonb 	/*
    858       1.1    simonb 	 * Now record mapping for later back-translation.
    859       1.1    simonb 	 */
    860       1.1    simonb 	if (pmap_initialized && managed) {
    861       1.1    simonb 		char *attr;
    862       1.1    simonb 
    863      1.49   hannken 		if (!pmap_enter_pv(pm, va, pa, flags)) {
    864       1.1    simonb 			/* Could not enter pv on a managed page */
    865       1.1    simonb 			return 1;
    866       1.1    simonb 		}
    867       1.1    simonb 
    868       1.1    simonb 		/* Now set attributes. */
    869       1.1    simonb 		attr = pa_to_attr(pa);
    870       1.1    simonb #ifdef DIAGNOSTIC
    871       1.1    simonb 		if (!attr)
    872      1.13    provos 			panic("managed but no attr");
    873       1.1    simonb #endif
    874       1.1    simonb 		if (flags & VM_PROT_ALL)
    875      1.30       chs 			*attr |= PMAP_ATTR_REF;
    876       1.1    simonb 		if (flags & VM_PROT_WRITE)
    877      1.30       chs 			*attr |= PMAP_ATTR_CHG;
    878       1.1    simonb 	}
    879       1.1    simonb 
    880       1.1    simonb 	s = splvm();
    881       1.1    simonb 
    882       1.1    simonb 	/* Insert page into page table. */
    883       1.1    simonb 	pte_enter(pm, va, tte);
    884       1.1    simonb 
    885       1.1    simonb 	/* If this is a real fault, enter it in the tlb */
    886       1.1    simonb 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    887      1.71  kiyohara 		int s2 = splhigh();
    888       1.1    simonb 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    889      1.71  kiyohara 		splx(s2);
    890       1.1    simonb 	}
    891       1.1    simonb 	splx(s);
    892       1.6    simonb 
    893       1.6    simonb 	/* Flush the real memory from the instruction cache. */
    894       1.6    simonb 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
    895       1.6    simonb 		__syncicache((void *)pa, PAGE_SIZE);
    896       1.6    simonb 
    897       1.1    simonb 	return 0;
    898       1.1    simonb }
    899       1.1    simonb 
    900       1.1    simonb void
    901       1.1    simonb pmap_unwire(struct pmap *pm, vaddr_t va)
    902       1.1    simonb {
    903      1.33       chs 	struct pv_entry *pv;
    904       1.1    simonb 	paddr_t pa;
    905      1.30       chs 	int s;
    906       1.1    simonb 
    907       1.1    simonb 	if (!pmap_extract(pm, va, &pa)) {
    908       1.1    simonb 		return;
    909       1.1    simonb 	}
    910       1.1    simonb 
    911       1.1    simonb 	pv = pa_to_pv(pa);
    912      1.30       chs 	if (!pv)
    913      1.30       chs 		return;
    914       1.1    simonb 
    915      1.30       chs 	s = splvm();
    916      1.33       chs 	while (pv != NULL) {
    917      1.33       chs 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    918      1.33       chs 			if (PV_ISWIRED(pv)) {
    919      1.33       chs 				PV_UNWIRE(pv);
    920      1.30       chs 				pm->pm_stats.wired_count--;
    921      1.30       chs 			}
    922       1.1    simonb 			break;
    923       1.1    simonb 		}
    924      1.33       chs 		pv = pv->pv_next;
    925       1.1    simonb 	}
    926       1.1    simonb 	splx(s);
    927       1.1    simonb }
    928       1.1    simonb 
    929       1.1    simonb void
    930      1.59    cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    931       1.1    simonb {
    932       1.1    simonb 	int s;
    933       1.1    simonb 	u_int tte;
    934       1.1    simonb 	struct pmap *pm = pmap_kernel();
    935       1.1    simonb 
    936       1.1    simonb 	/*
    937       1.1    simonb 	 * Generate TTE.
    938       1.1    simonb 	 *
    939       1.1    simonb 	 * XXXX
    940       1.1    simonb 	 *
    941       1.1    simonb 	 * Since the kernel does not handle execution privileges properly,
    942       1.1    simonb 	 * we will handle read and execute permissions together.
    943       1.1    simonb 	 */
    944       1.1    simonb 	tte = 0;
    945       1.1    simonb 	if (prot & VM_PROT_ALL) {
    946       1.1    simonb 
    947       1.1    simonb 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    948       1.1    simonb 		/* XXXX -- need to support multiple page sizes. */
    949       1.1    simonb 		tte |= TTE_SZ_16K;
    950       1.1    simonb #ifdef DIAGNOSTIC
    951      1.70      matt 		if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) ==
    952      1.70      matt 			(PMAP_NOCACHE | PME_WRITETHROUG))
    953      1.13    provos 			panic("pmap_kenter_pa: uncached & writethrough");
    954       1.1    simonb #endif
    955      1.70      matt 		if (flags & PMAP_NOCACHE)
    956       1.1    simonb 			/* Must be I/O mapping */
    957       1.1    simonb 			tte |= TTE_I | TTE_G;
    958       1.8   thorpej #ifdef PPC_4XX_NOCACHE
    959       1.1    simonb 		tte |= TTE_I;
    960       1.1    simonb #else
    961       1.1    simonb 		else if (prot & PME_WRITETHROUG)
    962       1.1    simonb 			/* Uncached and writethrough are not compatible */
    963       1.1    simonb 			tte |= TTE_W;
    964       1.1    simonb #endif
    965       1.1    simonb 		if (prot & VM_PROT_WRITE)
    966       1.1    simonb 			tte |= TTE_WR;
    967       1.1    simonb 	}
    968       1.1    simonb 
    969       1.1    simonb 	s = splvm();
    970       1.1    simonb 
    971       1.1    simonb 	/* Insert page into page table. */
    972       1.1    simonb 	pte_enter(pm, va, tte);
    973       1.1    simonb 	splx(s);
    974       1.1    simonb }
    975       1.1    simonb 
    976       1.1    simonb void
    977       1.1    simonb pmap_kremove(vaddr_t va, vsize_t len)
    978       1.1    simonb {
    979       1.1    simonb 
    980       1.1    simonb 	while (len > 0) {
    981       1.1    simonb 		pte_enter(pmap_kernel(), va, 0);
    982       1.1    simonb 		va += PAGE_SIZE;
    983       1.1    simonb 		len -= PAGE_SIZE;
    984       1.1    simonb 	}
    985       1.1    simonb }
    986       1.1    simonb 
    987       1.1    simonb /*
    988       1.1    simonb  * Remove the given range of mapping entries.
    989       1.1    simonb  */
    990       1.1    simonb void
    991       1.1    simonb pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
    992       1.1    simonb {
    993       1.1    simonb 	int s;
    994       1.1    simonb 	paddr_t pa;
    995       1.1    simonb 	volatile u_int *ptp;
    996       1.1    simonb 
    997       1.1    simonb 	s = splvm();
    998       1.1    simonb 	while (va < endva) {
    999       1.1    simonb 
   1000       1.1    simonb 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
   1001       1.1    simonb 			pa = TTE_PA(pa);
   1002       1.1    simonb 			pmap_remove_pv(pm, va, pa);
   1003       1.1    simonb 			*ptp = 0;
   1004       1.1    simonb 			ppc4xx_tlb_flush(va, pm->pm_ctx);
   1005       1.1    simonb 			pm->pm_stats.resident_count--;
   1006       1.1    simonb 		}
   1007      1.19   thorpej 		va += PAGE_SIZE;
   1008       1.1    simonb 	}
   1009       1.1    simonb 
   1010       1.1    simonb 	splx(s);
   1011       1.1    simonb }
   1012       1.1    simonb 
   1013       1.1    simonb /*
   1014       1.1    simonb  * Get the physical page address for the given pmap/virtual address.
   1015       1.1    simonb  */
   1016      1.45   thorpej bool
   1017       1.1    simonb pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1018       1.1    simonb {
   1019       1.1    simonb 	int seg = STIDX(va);
   1020       1.1    simonb 	int ptn = PTIDX(va);
   1021       1.1    simonb 	u_int pa = 0;
   1022      1.30       chs 	int s;
   1023       1.1    simonb 
   1024      1.30       chs 	s = splvm();
   1025      1.77       rin 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn]) && pap) {
   1026       1.1    simonb 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1027       1.1    simonb 	}
   1028       1.1    simonb 	splx(s);
   1029       1.1    simonb 	return (pa != 0);
   1030       1.1    simonb }
   1031       1.1    simonb 
   1032       1.1    simonb /*
   1033       1.1    simonb  * Lower the protection on the specified range of this pmap.
   1034       1.1    simonb  *
   1035       1.1    simonb  * There are only two cases: either the protection is going to 0,
   1036       1.1    simonb  * or it is going to read-only.
   1037       1.1    simonb  */
   1038       1.1    simonb void
   1039       1.1    simonb pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1040       1.1    simonb {
   1041       1.1    simonb 	volatile u_int *ptp;
   1042      1.26       chs 	int s, bic;
   1043       1.1    simonb 
   1044      1.26       chs 	if ((prot & VM_PROT_READ) == 0) {
   1045      1.26       chs 		pmap_remove(pm, sva, eva);
   1046      1.26       chs 		return;
   1047      1.26       chs 	}
   1048      1.26       chs 	bic = 0;
   1049      1.26       chs 	if ((prot & VM_PROT_WRITE) == 0) {
   1050      1.26       chs 		bic |= TTE_WR;
   1051      1.26       chs 	}
   1052      1.26       chs 	if ((prot & VM_PROT_EXECUTE) == 0) {
   1053      1.26       chs 		bic |= TTE_EX;
   1054      1.26       chs 	}
   1055      1.26       chs 	if (bic == 0) {
   1056      1.26       chs 		return;
   1057      1.26       chs 	}
   1058      1.26       chs 	s = splvm();
   1059      1.26       chs 	while (sva < eva) {
   1060      1.26       chs 		if ((ptp = pte_find(pm, sva)) != NULL) {
   1061      1.26       chs 			*ptp &= ~bic;
   1062      1.26       chs 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1063       1.1    simonb 		}
   1064      1.26       chs 		sva += PAGE_SIZE;
   1065       1.1    simonb 	}
   1066      1.26       chs 	splx(s);
   1067       1.1    simonb }
   1068       1.1    simonb 
   1069      1.45   thorpej bool
   1070      1.30       chs pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
   1071       1.1    simonb {
   1072      1.30       chs 	paddr_t pa;
   1073       1.1    simonb 	char *attr;
   1074      1.30       chs 	int s, rv;
   1075       1.1    simonb 
   1076       1.1    simonb 	/*
   1077       1.1    simonb 	 * First modify bits in cache.
   1078       1.1    simonb 	 */
   1079      1.30       chs 	pa = VM_PAGE_TO_PHYS(pg);
   1080       1.1    simonb 	attr = pa_to_attr(pa);
   1081       1.1    simonb 	if (attr == NULL)
   1082      1.46   thorpej 		return false;
   1083       1.1    simonb 
   1084      1.30       chs 	s = splvm();
   1085       1.1    simonb 	rv = ((*attr & mask) != 0);
   1086      1.11       eeh 	if (clear) {
   1087       1.1    simonb 		*attr &= ~mask;
   1088      1.30       chs 		pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
   1089      1.11       eeh 	}
   1090       1.1    simonb 	splx(s);
   1091       1.1    simonb 	return rv;
   1092       1.1    simonb }
   1093       1.1    simonb 
   1094       1.1    simonb 
   1095       1.1    simonb /*
   1096       1.1    simonb  * Lower the protection on the specified physical page.
   1097       1.1    simonb  *
   1098       1.1    simonb  * There are only two cases: either the protection is going to 0,
   1099       1.1    simonb  * or it is going to read-only.
   1100       1.1    simonb  */
   1101       1.1    simonb void
   1102       1.1    simonb pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1103       1.1    simonb {
   1104       1.1    simonb 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1105       1.1    simonb 	vaddr_t va;
   1106       1.1    simonb 	struct pv_entry *pvh, *pv, *npv;
   1107       1.1    simonb 	struct pmap *pm;
   1108       1.1    simonb 
   1109       1.1    simonb 	pvh = pa_to_pv(pa);
   1110       1.1    simonb 	if (pvh == NULL)
   1111       1.1    simonb 		return;
   1112       1.1    simonb 
   1113       1.1    simonb 	/* Handle extra pvs which may be deleted in the operation */
   1114       1.1    simonb 	for (pv = pvh->pv_next; pv; pv = npv) {
   1115       1.1    simonb 		npv = pv->pv_next;
   1116       1.1    simonb 
   1117       1.1    simonb 		pm = pv->pv_pm;
   1118      1.91       rin 		va = PV_VA(pv);
   1119      1.26       chs 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1120       1.1    simonb 	}
   1121       1.1    simonb 	/* Now check the head pv */
   1122       1.1    simonb 	if (pvh->pv_pm) {
   1123       1.1    simonb 		pv = pvh;
   1124       1.1    simonb 		pm = pv->pv_pm;
   1125      1.91       rin 		va = PV_VA(pv);
   1126      1.26       chs 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1127       1.1    simonb 	}
   1128       1.1    simonb }
   1129       1.1    simonb 
   1130       1.1    simonb /*
   1131       1.1    simonb  * Activate the address space for the specified process.  If the process
   1132       1.1    simonb  * is the current process, load the new MMU context.
   1133       1.1    simonb  */
   1134       1.1    simonb void
   1135      1.17   thorpej pmap_activate(struct lwp *l)
   1136       1.1    simonb {
   1137       1.1    simonb #if 0
   1138      1.65     rmind 	struct pcb *pcb = lwp_getpcb(l);
   1139      1.17   thorpej 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1140       1.1    simonb 
   1141       1.1    simonb 	/*
   1142      1.61     skrll 	 * XXX Normally performed in cpu_lwp_fork().
   1143       1.1    simonb 	 */
   1144      1.17   thorpej 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
   1145      1.25      matt 	pcb->pcb_pm = pmap;
   1146       1.1    simonb #endif
   1147       1.1    simonb }
   1148       1.1    simonb 
   1149       1.1    simonb /*
   1150       1.1    simonb  * Deactivate the specified process's address space.
   1151       1.1    simonb  */
   1152       1.1    simonb void
   1153      1.17   thorpej pmap_deactivate(struct lwp *l)
   1154       1.1    simonb {
   1155       1.1    simonb }
   1156       1.1    simonb 
   1157       1.1    simonb /*
   1158       1.1    simonb  * Synchronize caches corresponding to [addr, addr+len) in p.
   1159       1.1    simonb  */
   1160       1.1    simonb void
   1161       1.1    simonb pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1162       1.1    simonb {
   1163      1.95       rin 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1164      1.18   hannken 
   1165      1.94       rin 	if (__predict_true(p == curproc)) {
   1166      1.94       rin 		int msr, ctx, opid;
   1167       1.1    simonb 
   1168      1.94       rin 		/*
   1169      1.94       rin 		 * Take it easy! TLB miss handler takes care of us.
   1170      1.94       rin 		 */
   1171      1.94       rin 
   1172      1.94       rin 		/*
   1173      1.94       rin 	 	 * Need to turn off IMMU and switch to user context.
   1174      1.94       rin 		 * (icbi uses DMMU).
   1175      1.94       rin 		 */
   1176      1.94       rin 
   1177      1.94       rin 		if (!(ctx = pm->pm_ctx)) {
   1178      1.94       rin 			/* No context -- assign it one */
   1179      1.94       rin 			ctx_alloc(pm);
   1180      1.94       rin 			ctx = pm->pm_ctx;
   1181      1.94       rin 		}
   1182      1.94       rin 
   1183      1.94       rin 		__asm volatile(
   1184      1.94       rin 			"mfmsr %0;"
   1185      1.94       rin 			"li %1,0x20;"		/* Turn off IMMU */
   1186      1.94       rin 			"andc %1,%0,%1;"
   1187      1.94       rin 			"ori %1,%1,0x10;"	/* Turn on DMMU for sure */
   1188      1.94       rin 			"mtmsr %1;"
   1189      1.94       rin 			"isync;"
   1190      1.94       rin 			"mfpid %1;"
   1191      1.94       rin 			"mtpid %2;"
   1192      1.94       rin 			"isync;"
   1193      1.94       rin 		"1:"
   1194      1.94       rin 			"dcbst 0,%3;"
   1195      1.94       rin 			"icbi 0,%3;"
   1196      1.94       rin 			"add %3,%3,%5;"
   1197      1.94       rin 			"sub. %4,%4,%5;"
   1198      1.94       rin 			"bge 1b;"
   1199      1.94       rin 			"sync;"
   1200      1.94       rin 			"mtpid %1;"
   1201      1.94       rin 			"mtmsr %0;"
   1202      1.94       rin 			"isync;"
   1203      1.94       rin 			: "=&r" (msr), "=&r" (opid)
   1204      1.94       rin 			: "r" (ctx), "r" (va), "r" (len), "r" (CACHELINESIZE));
   1205      1.94       rin 	} else {
   1206      1.94       rin 		paddr_t pa;
   1207      1.94       rin 		vaddr_t tva, eva;
   1208      1.94       rin 		int tlen;
   1209      1.94       rin 
   1210      1.94       rin 		/*
   1211      1.94       rin 		 * For p != curproc, we cannot rely upon TLB miss handler in
   1212      1.94       rin 		 * user context. Therefore, extract pa and operate againt it.
   1213      1.94       rin 		 *
   1214      1.94       rin 		 * Note that va below VM_MIN_KERNEL_ADDRESS is reserved for
   1215      1.94       rin 		 * direct mapping.
   1216      1.94       rin 		 */
   1217      1.94       rin 
   1218      1.94       rin 		for (tva = va; len > 0; tva = eva, len -= tlen) {
   1219      1.94       rin 			eva = uimin(tva + len, trunc_page(tva + PAGE_SIZE));
   1220      1.94       rin 			tlen = eva - tva;
   1221      1.94       rin 			if (!pmap_extract(pm, tva, &pa)) {
   1222      1.94       rin 				/* XXX should be already unmapped */
   1223      1.94       rin 				continue;
   1224      1.94       rin 			}
   1225      1.94       rin 			__syncicache((void *)pa, tlen);
   1226      1.94       rin 		}
   1227       1.1    simonb 	}
   1228       1.1    simonb }
   1229       1.1    simonb 
   1230      1.93       rin static inline void
   1231      1.93       rin tlb_invalidate_entry(int i)
   1232      1.93       rin {
   1233      1.93       rin #ifdef PMAP_TLBDEBUG
   1234      1.93       rin 	/*
   1235      1.93       rin 	 * Clear only TLBHI[V] bit so that we can track invalidated entry.
   1236      1.93       rin 	 */
   1237      1.93       rin 	register_t msr, pid, hi;
   1238      1.93       rin 
   1239      1.93       rin 	KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1240      1.93       rin 
   1241      1.93       rin 	__asm volatile(
   1242      1.93       rin 		"mfmsr	%0;"
   1243      1.93       rin 		"li	%1,0;"
   1244      1.93       rin 		"mtmsr	%1;"
   1245      1.93       rin 		"mfpid	%1;"
   1246      1.93       rin 		"tlbre	%2,%3,0;"
   1247      1.93       rin 		"andc	%2,%2,%4;"
   1248      1.93       rin 		"tlbwe	%2,%3,0;"
   1249      1.93       rin 		"mtpid	%1;"
   1250      1.93       rin 		"mtmsr	%0;"
   1251      1.93       rin 		"isync;"
   1252      1.93       rin 		: "=&r" (msr), "=&r" (pid), "=&r" (hi)
   1253      1.93       rin 		: "r" (i), "r" (TLB_VALID));
   1254      1.93       rin #else
   1255      1.93       rin 	/*
   1256      1.93       rin 	 * Just clear entire TLBHI register.
   1257      1.93       rin 	 */
   1258      1.93       rin 	__asm volatile(
   1259      1.93       rin 		"tlbwe %0,%1,0;"
   1260      1.93       rin 		"isync;"
   1261      1.93       rin 		: : "r" (0), "r" (i));
   1262      1.93       rin #endif
   1263      1.93       rin 
   1264      1.93       rin 	tlb_info[i].ti_ctx = 0;
   1265      1.93       rin 	tlb_info[i].ti_flags = 0;
   1266      1.93       rin }
   1267       1.1    simonb 
   1268       1.1    simonb /* This has to be done in real mode !!! */
   1269       1.1    simonb void
   1270       1.1    simonb ppc4xx_tlb_flush(vaddr_t va, int pid)
   1271       1.1    simonb {
   1272       1.1    simonb 	u_long i, found;
   1273       1.1    simonb 	u_long msr;
   1274       1.1    simonb 
   1275       1.1    simonb 	/* If there's no context then it can't be mapped. */
   1276      1.26       chs 	if (!pid)
   1277      1.26       chs 		return;
   1278       1.1    simonb 
   1279      1.81       rin 	__asm volatile(
   1280      1.81       rin 		"mfpid %1;"		/* Save PID */
   1281       1.1    simonb 		"mfmsr %2;"		/* Save MSR */
   1282       1.1    simonb 		"li %0,0;"		/* Now clear MSR */
   1283       1.1    simonb 		"mtmsr %0;"
   1284      1.88       rin 		"isync;"
   1285       1.1    simonb 		"mtpid %4;"		/* Set PID */
   1286      1.88       rin 		"isync;"
   1287       1.1    simonb 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1288      1.88       rin 		"isync;"
   1289       1.1    simonb 		"mtpid %1;"		/* Restore PID */
   1290       1.1    simonb 		"mtmsr %2;"		/* Restore MSR */
   1291      1.88       rin 		"isync;"
   1292       1.1    simonb 		"li %1,1;"
   1293       1.1    simonb 		"beq 1f;"
   1294       1.1    simonb 		"li %1,0;"
   1295       1.1    simonb 		"1:"
   1296       1.1    simonb 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1297       1.1    simonb 		: "r" (va), "r" (pid));
   1298       1.1    simonb 	if (found && !TLB_LOCKED(i)) {
   1299       1.1    simonb 		/* Now flush translation */
   1300      1.93       rin 		tlb_invalidate_entry(i);
   1301       1.1    simonb 		tlbnext = i;
   1302       1.1    simonb 		/* Successful flushes */
   1303       1.1    simonb 		tlbflush_ev.ev_count++;
   1304       1.1    simonb 	}
   1305       1.1    simonb }
   1306       1.1    simonb 
   1307       1.1    simonb void
   1308       1.1    simonb ppc4xx_tlb_flush_all(void)
   1309       1.1    simonb {
   1310       1.1    simonb 	u_long i;
   1311       1.1    simonb 
   1312       1.1    simonb 	for (i = 0; i < NTLB; i++)
   1313      1.93       rin 		if (!TLB_LOCKED(i))
   1314      1.93       rin 			tlb_invalidate_entry(i);
   1315       1.1    simonb 
   1316      1.88       rin 	__asm volatile("isync");
   1317       1.1    simonb }
   1318       1.1    simonb 
   1319       1.1    simonb /* Find a TLB entry to evict. */
   1320       1.1    simonb static int
   1321       1.1    simonb ppc4xx_tlb_find_victim(void)
   1322       1.1    simonb {
   1323       1.1    simonb 	int flags;
   1324       1.1    simonb 
   1325       1.1    simonb 	for (;;) {
   1326       1.1    simonb 		if (++tlbnext >= NTLB)
   1327      1.42     freza 			tlbnext = tlb_nreserved;
   1328       1.1    simonb 		flags = tlb_info[tlbnext].ti_flags;
   1329      1.12    simonb 		if (!(flags & TLBF_USED) ||
   1330       1.1    simonb 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1331       1.1    simonb 			u_long va, stack = (u_long)&va;
   1332       1.1    simonb 
   1333       1.1    simonb 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1334       1.1    simonb 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1335       1.1    simonb 			     (flags & TLBF_USED)) {
   1336       1.1    simonb 				/* Kernel stack page */
   1337      1.80       rin 				flags |= TLBF_REF;
   1338       1.1    simonb 				tlb_info[tlbnext].ti_flags = flags;
   1339       1.1    simonb 			} else {
   1340       1.1    simonb 				/* Found it! */
   1341       1.1    simonb 				return (tlbnext);
   1342       1.1    simonb 			}
   1343       1.1    simonb 		} else {
   1344       1.1    simonb 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1345       1.1    simonb 		}
   1346       1.1    simonb 	}
   1347       1.1    simonb }
   1348       1.1    simonb 
   1349       1.1    simonb void
   1350       1.1    simonb ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1351       1.1    simonb {
   1352       1.1    simonb 	u_long th, tl, idx;
   1353      1.84       rin 	int msr, pid;
   1354      1.10       eeh 	paddr_t pa;
   1355      1.71  kiyohara 	int sz;
   1356      1.10       eeh 
   1357       1.1    simonb 	tlbenter_ev.ev_count++;
   1358       1.1    simonb 
   1359      1.10       eeh 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
   1360      1.10       eeh 	pa = (pte & TTE_RPN_MASK(sz));
   1361      1.10       eeh 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
   1362      1.10       eeh 	tl = (pte & ~TLB_RPN_MASK) | pa;
   1363      1.10       eeh 	tl |= ppc4xx_tlbflags(va, pa);
   1364       1.1    simonb 
   1365       1.1    simonb 	idx = ppc4xx_tlb_find_victim();
   1366       1.1    simonb 
   1367       1.1    simonb #ifdef DIAGNOSTIC
   1368      1.81       rin 	if ((idx < tlb_nreserved) || (idx >= NTLB) || (idx & 63) == 0) {
   1369      1.31    simonb 		panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
   1370       1.1    simonb 	}
   1371       1.1    simonb #endif
   1372      1.12    simonb 
   1373       1.1    simonb 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1374       1.1    simonb 	tlb_info[idx].ti_ctx = ctx;
   1375       1.1    simonb 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1376       1.1    simonb 
   1377      1.39     perry 	__asm volatile(
   1378       1.1    simonb 		"mfmsr %0;"			/* Save MSR */
   1379       1.1    simonb 		"li %1,0;"
   1380      1.82       rin 		"mtmsr %1;"			/* Clear MSR */
   1381      1.88       rin 		"isync;"
   1382       1.1    simonb 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1383       1.1    simonb 		"mfpid %1;"			/* Save old PID */
   1384       1.1    simonb 		"mtpid %2;"			/* Load translation ctx */
   1385      1.88       rin 		"isync;"
   1386       1.1    simonb 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1387      1.88       rin 		"isync;"
   1388       1.1    simonb 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1389      1.88       rin 		"isync;"
   1390       1.1    simonb 	: "=&r" (msr), "=&r" (pid)
   1391       1.1    simonb 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1392       1.1    simonb }
   1393       1.1    simonb 
   1394       1.1    simonb void
   1395       1.1    simonb ppc4xx_tlb_init(void)
   1396       1.1    simonb {
   1397       1.1    simonb 	int i;
   1398       1.1    simonb 
   1399       1.1    simonb 	/* Mark reserved TLB entries */
   1400      1.42     freza 	for (i = 0; i < tlb_nreserved; i++) {
   1401       1.1    simonb 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1402       1.1    simonb 		tlb_info[i].ti_ctx = KERNEL_PID;
   1403       1.1    simonb 	}
   1404       1.1    simonb 
   1405       1.1    simonb 	/* Setup security zones */
   1406       1.1    simonb 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1407       1.1    simonb 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1408       1.1    simonb 	 * Z3 - full access regardless of TLB entry permissions
   1409       1.1    simonb 	 */
   1410       1.1    simonb 
   1411      1.39     perry 	__asm volatile(
   1412       1.1    simonb 		"mtspr %0,%1;"
   1413      1.88       rin 		"isync;"
   1414       1.1    simonb 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1415       1.1    simonb }
   1416       1.1    simonb 
   1417      1.42     freza /*
   1418      1.42     freza  * ppc4xx_tlb_size_mask:
   1419      1.42     freza  *
   1420      1.42     freza  * 	Roundup size to supported page size, return TLBHI mask and real size.
   1421      1.42     freza  */
   1422      1.42     freza static int
   1423      1.42     freza ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
   1424      1.42     freza {
   1425      1.42     freza 	int 			i;
   1426      1.42     freza 
   1427      1.42     freza 	for (i = 0; i < __arraycount(tlbsize); i++)
   1428      1.42     freza 		if (size <= tlbsize[i]) {
   1429      1.42     freza 			*mask = (i << TLB_SIZE_SHFT);
   1430      1.42     freza 			*rsiz = tlbsize[i];
   1431      1.42     freza 			return (0);
   1432      1.42     freza 		}
   1433      1.42     freza 	return (EINVAL);
   1434      1.42     freza }
   1435      1.42     freza 
   1436      1.42     freza /*
   1437      1.42     freza  * ppc4xx_tlb_mapiodev:
   1438      1.42     freza  *
   1439      1.42     freza  * 	Lookup virtual address of mapping previously entered via
   1440      1.42     freza  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
   1441      1.42     freza  * 	need to waste extra storage for reserved mappings. Note
   1442      1.42     freza  * 	that reading TLBHI also sets PID, but all reserved mappings
   1443      1.42     freza  * 	use KERNEL_PID, so the side effect is nil.
   1444      1.42     freza  */
   1445      1.42     freza void *
   1446      1.42     freza ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
   1447      1.42     freza {
   1448      1.42     freza 	paddr_t 		pa;
   1449      1.42     freza 	vaddr_t 		va;
   1450      1.42     freza 	u_int 			lo, hi, sz;
   1451      1.42     freza 	int 			i;
   1452      1.42     freza 
   1453      1.42     freza 	/* tlb_nreserved is only allowed to grow, so this is safe. */
   1454      1.42     freza 	for (i = 0; i < tlb_nreserved; i++) {
   1455      1.42     freza 		__asm volatile (
   1456      1.42     freza 		    "	tlbre %0,%2,1 	\n" 	/* TLBLO */
   1457      1.42     freza 		    "	tlbre %1,%2,0 	\n" 	/* TLBHI */
   1458      1.42     freza 		    : "=&r" (lo), "=&r" (hi)
   1459      1.42     freza 		    : "r" (i));
   1460      1.42     freza 
   1461      1.42     freza 		KASSERT(hi & TLB_VALID);
   1462      1.42     freza 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1463      1.42     freza 
   1464      1.42     freza 		pa = (lo & TLB_RPN_MASK);
   1465      1.42     freza 		if (base < pa)
   1466      1.42     freza 			continue;
   1467      1.42     freza 
   1468      1.42     freza 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
   1469      1.42     freza 		if ((base + len) > (pa + sz))
   1470      1.42     freza 			continue;
   1471      1.42     freza 
   1472      1.42     freza 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
   1473      1.42     freza 		return (void *)(va);
   1474      1.42     freza 	}
   1475      1.42     freza 
   1476      1.42     freza 	return (NULL);
   1477      1.42     freza }
   1478      1.42     freza 
   1479      1.42     freza /*
   1480      1.42     freza  * ppc4xx_tlb_reserve:
   1481      1.42     freza  *
   1482      1.42     freza  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
   1483      1.42     freza  */
   1484      1.42     freza void
   1485      1.42     freza ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
   1486      1.42     freza {
   1487      1.42     freza 	u_int 			lo, hi;
   1488      1.42     freza 	int 			szmask, rsize;
   1489      1.42     freza 
   1490      1.42     freza 	/* Called before pmap_bootstrap(), va outside kernel space. */
   1491      1.42     freza 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
   1492      1.42     freza 	KASSERT(! pmap_bootstrap_done);
   1493      1.42     freza 	KASSERT(tlb_nreserved < NTLB);
   1494      1.42     freza 
   1495      1.42     freza 	/* Resolve size. */
   1496      1.42     freza 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
   1497      1.42     freza 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
   1498      1.42     freza 		    size, tlb_nreserved);
   1499      1.42     freza 
   1500      1.42     freza 	/* Real size will be power of two >= 1024, so this is OK. */
   1501      1.42     freza 	pa &= ~(rsize - 1); 	/* RPN */
   1502      1.42     freza 	va &= ~(rsize - 1); 	/* EPN */
   1503      1.42     freza 
   1504      1.42     freza 	lo = pa | TLB_WR | flags;
   1505      1.43  kiyohara 	hi = va | TLB_VALID | szmask;
   1506      1.42     freza 
   1507      1.42     freza #ifdef PPC_4XX_NOCACHE
   1508      1.42     freza 	lo |= TLB_I;
   1509      1.42     freza #endif
   1510      1.42     freza 
   1511      1.42     freza 	__asm volatile(
   1512      1.42     freza 	    "	tlbwe %1,%0,1 	\n" 	/* write TLBLO */
   1513      1.42     freza 	    "	tlbwe %2,%0,0 	\n" 	/* write TLBHI */
   1514      1.42     freza 	    "	isync 		\n"
   1515      1.42     freza 	    : : "r" (tlb_nreserved), "r" (lo), "r" (hi));
   1516      1.42     freza 
   1517      1.42     freza 	tlb_nreserved++;
   1518      1.42     freza }
   1519       1.1    simonb 
   1520       1.1    simonb /*
   1521       1.1    simonb  * We should pass the ctx in from trap code.
   1522       1.1    simonb  */
   1523       1.1    simonb int
   1524       1.1    simonb pmap_tlbmiss(vaddr_t va, int ctx)
   1525       1.1    simonb {
   1526       1.1    simonb 	volatile u_int *pte;
   1527       1.1    simonb 	u_long tte;
   1528       1.1    simonb 
   1529       1.1    simonb 	tlbmiss_ev.ev_count++;
   1530       1.1    simonb 
   1531       1.1    simonb 	/*
   1532      1.44     freza 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
   1533      1.44     freza 	 * Physical RAM is expected to live in this range, care must be taken
   1534      1.44     freza 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
   1535      1.44     freza 	 * code.
   1536       1.1    simonb 	 */
   1537      1.63  uebayasi 	if (ctx != KERNEL_PID ||
   1538      1.63  uebayasi 	    (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) {
   1539      1.36       scw 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
   1540       1.1    simonb 		if (pte == NULL) {
   1541       1.1    simonb 			/* Map unmanaged addresses directly for kernel access */
   1542       1.1    simonb 			return 1;
   1543       1.1    simonb 		}
   1544       1.1    simonb 		tte = *pte;
   1545       1.1    simonb 		if (tte == 0) {
   1546       1.1    simonb 			return 1;
   1547       1.1    simonb 		}
   1548       1.1    simonb 	} else {
   1549      1.16       wiz 		/* Create a 16MB writable mapping. */
   1550       1.8   thorpej #ifdef PPC_4XX_NOCACHE
   1551      1.44     freza 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR;
   1552       1.1    simonb #else
   1553       1.1    simonb 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1554       1.1    simonb #endif
   1555       1.1    simonb 	}
   1556       1.1    simonb 	ppc4xx_tlb_enter(ctx, va, tte);
   1557       1.1    simonb 
   1558       1.1    simonb 	return 0;
   1559       1.1    simonb }
   1560       1.1    simonb 
   1561       1.1    simonb /*
   1562       1.1    simonb  * Flush all the entries matching a context from the TLB.
   1563       1.1    simonb  */
   1564       1.1    simonb static int
   1565       1.1    simonb ctx_flush(int cnum)
   1566       1.1    simonb {
   1567       1.1    simonb 	int i;
   1568       1.1    simonb 
   1569       1.1    simonb 	/* We gotta steal this context */
   1570      1.42     freza 	for (i = tlb_nreserved; i < NTLB; i++) {
   1571       1.1    simonb 		if (tlb_info[i].ti_ctx == cnum) {
   1572       1.1    simonb 			/* Can't steal ctx if it has a locked entry. */
   1573       1.1    simonb 			if (TLB_LOCKED(i)) {
   1574       1.1    simonb #ifdef DIAGNOSTIC
   1575       1.1    simonb 				printf("ctx_flush: can't invalidate "
   1576       1.1    simonb 					"locked mapping %d "
   1577       1.1    simonb 					"for context %d\n", i, cnum);
   1578      1.10       eeh #ifdef DDB
   1579       1.1    simonb 				Debugger();
   1580       1.1    simonb #endif
   1581      1.10       eeh #endif
   1582       1.1    simonb 				return (1);
   1583       1.1    simonb 			}
   1584       1.1    simonb #ifdef DIAGNOSTIC
   1585      1.42     freza 			if (i < tlb_nreserved)
   1586      1.13    provos 				panic("TLB entry %d not locked", i);
   1587       1.1    simonb #endif
   1588      1.93       rin 			/*
   1589      1.93       rin 			 * Invalidate particular TLB entry regardless of
   1590      1.93       rin 			 * locked status
   1591      1.93       rin 			 */
   1592      1.93       rin 			tlb_invalidate_entry(i);
   1593       1.1    simonb 		}
   1594       1.1    simonb 	}
   1595       1.1    simonb 	return (0);
   1596       1.1    simonb }
   1597       1.1    simonb 
   1598       1.1    simonb /*
   1599       1.1    simonb  * Allocate a context.  If necessary, steal one from someone else.
   1600       1.1    simonb  *
   1601       1.1    simonb  * The new context is flushed from the TLB before returning.
   1602       1.1    simonb  */
   1603       1.1    simonb int
   1604       1.1    simonb ctx_alloc(struct pmap *pm)
   1605       1.1    simonb {
   1606       1.1    simonb 	int s, cnum;
   1607       1.1    simonb 	static int next = MINCTX;
   1608       1.1    simonb 
   1609       1.1    simonb 	if (pm == pmap_kernel()) {
   1610       1.1    simonb #ifdef DIAGNOSTIC
   1611       1.1    simonb 		printf("ctx_alloc: kernel pmap!\n");
   1612       1.1    simonb #endif
   1613       1.1    simonb 		return (0);
   1614       1.1    simonb 	}
   1615       1.1    simonb 	s = splvm();
   1616       1.1    simonb 
   1617       1.1    simonb 	/* Find a likely context. */
   1618       1.1    simonb 	cnum = next;
   1619       1.1    simonb 	do {
   1620      1.78       rin 		if ((++cnum) >= NUMCTX)
   1621       1.1    simonb 			cnum = MINCTX;
   1622       1.1    simonb 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1623       1.1    simonb 
   1624       1.1    simonb 	/* Now clean it out */
   1625       1.1    simonb oops:
   1626       1.1    simonb 	if (cnum < MINCTX)
   1627       1.1    simonb 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1628       1.1    simonb 	if (ctx_flush(cnum)) {
   1629       1.1    simonb 		/* oops -- something's wired. */
   1630      1.78       rin 		if ((++cnum) >= NUMCTX)
   1631       1.1    simonb 			cnum = MINCTX;
   1632       1.1    simonb 		goto oops;
   1633       1.1    simonb 	}
   1634       1.1    simonb 
   1635       1.1    simonb 	if (ctxbusy[cnum]) {
   1636       1.1    simonb #ifdef DEBUG
   1637       1.1    simonb 		/* We should identify this pmap and clear it */
   1638       1.1    simonb 		printf("Warning: stealing context %d\n", cnum);
   1639       1.1    simonb #endif
   1640       1.1    simonb 		ctxbusy[cnum]->pm_ctx = 0;
   1641       1.1    simonb 	}
   1642       1.1    simonb 	ctxbusy[cnum] = pm;
   1643       1.1    simonb 	next = cnum;
   1644       1.1    simonb 	splx(s);
   1645       1.1    simonb 	pm->pm_ctx = cnum;
   1646       1.1    simonb 
   1647       1.1    simonb 	return cnum;
   1648       1.1    simonb }
   1649       1.1    simonb 
   1650       1.1    simonb /*
   1651       1.1    simonb  * Give away a context.
   1652       1.1    simonb  */
   1653       1.1    simonb void
   1654       1.1    simonb ctx_free(struct pmap *pm)
   1655       1.1    simonb {
   1656       1.1    simonb 	int oldctx;
   1657       1.1    simonb 
   1658       1.1    simonb 	oldctx = pm->pm_ctx;
   1659       1.1    simonb 
   1660       1.1    simonb 	if (oldctx == 0)
   1661       1.1    simonb 		panic("ctx_free: freeing kernel context");
   1662       1.1    simonb #ifdef DIAGNOSTIC
   1663       1.1    simonb 	if (ctxbusy[oldctx] == 0)
   1664       1.1    simonb 		printf("ctx_free: freeing free context %d\n", oldctx);
   1665       1.1    simonb 	if (ctxbusy[oldctx] != pm) {
   1666       1.1    simonb 		printf("ctx_free: freeing someone esle's context\n "
   1667       1.1    simonb 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1668       1.1    simonb 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1669      1.10       eeh #ifdef DDB
   1670       1.1    simonb 		Debugger();
   1671      1.10       eeh #endif
   1672       1.1    simonb 	}
   1673       1.1    simonb #endif
   1674       1.1    simonb 	/* We should verify it has not been stolen and reallocated... */
   1675       1.1    simonb 	ctxbusy[oldctx] = NULL;
   1676       1.1    simonb 	ctx_flush(oldctx);
   1677       1.1    simonb }
   1678       1.5       eeh 
   1679       1.1    simonb 
   1680       1.1    simonb #ifdef DEBUG
   1681       1.1    simonb /*
   1682       1.1    simonb  * Test ref/modify handling.
   1683       1.1    simonb  */
   1684      1.53       dsl void pmap_testout(void);
   1685       1.1    simonb void
   1686      1.54    cegger pmap_testout(void)
   1687       1.1    simonb {
   1688       1.1    simonb 	vaddr_t va;
   1689       1.1    simonb 	volatile int *loc;
   1690       1.1    simonb 	int val = 0;
   1691       1.1    simonb 	paddr_t pa;
   1692       1.1    simonb 	struct vm_page *pg;
   1693       1.1    simonb 	int ref, mod;
   1694       1.1    simonb 
   1695       1.1    simonb 	/* Allocate a page */
   1696      1.34      yamt 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1697      1.34      yamt 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
   1698       1.1    simonb 	loc = (int*)va;
   1699       1.1    simonb 
   1700       1.1    simonb 	pmap_extract(pmap_kernel(), va, &pa);
   1701       1.1    simonb 	pg = PHYS_TO_VM_PAGE(pa);
   1702       1.1    simonb 	pmap_unwire(pmap_kernel(), va);
   1703       1.1    simonb 
   1704      1.34      yamt 	pmap_kremove(va, PAGE_SIZE);
   1705       1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1706       1.4     chris 	pmap_update(pmap_kernel());
   1707       1.1    simonb 
   1708       1.1    simonb 	/* Now clear reference and modify */
   1709       1.1    simonb 	ref = pmap_clear_reference(pg);
   1710       1.1    simonb 	mod = pmap_clear_modify(pg);
   1711       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1712       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1713       1.1    simonb 	       ref, mod);
   1714       1.1    simonb 
   1715       1.1    simonb 	/* Check it's properly cleared */
   1716       1.1    simonb 	ref = pmap_is_referenced(pg);
   1717       1.1    simonb 	mod = pmap_is_modified(pg);
   1718       1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1719       1.1    simonb 	       ref, mod);
   1720       1.1    simonb 
   1721       1.1    simonb 	/* Reference page */
   1722       1.1    simonb 	val = *loc;
   1723       1.1    simonb 
   1724       1.1    simonb 	ref = pmap_is_referenced(pg);
   1725       1.1    simonb 	mod = pmap_is_modified(pg);
   1726       1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1727       1.1    simonb 	       ref, mod, val);
   1728       1.1    simonb 
   1729       1.1    simonb 	/* Now clear reference and modify */
   1730       1.1    simonb 	ref = pmap_clear_reference(pg);
   1731       1.1    simonb 	mod = pmap_clear_modify(pg);
   1732       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1733       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1734       1.1    simonb 	       ref, mod);
   1735      1.12    simonb 
   1736       1.1    simonb 	/* Modify page */
   1737       1.1    simonb 	*loc = 1;
   1738       1.1    simonb 
   1739       1.1    simonb 	ref = pmap_is_referenced(pg);
   1740       1.1    simonb 	mod = pmap_is_modified(pg);
   1741       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1742       1.1    simonb 	       ref, mod);
   1743       1.1    simonb 
   1744       1.1    simonb 	/* Now clear reference and modify */
   1745       1.1    simonb 	ref = pmap_clear_reference(pg);
   1746       1.1    simonb 	mod = pmap_clear_modify(pg);
   1747       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1748       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1749       1.1    simonb 	       ref, mod);
   1750       1.1    simonb 
   1751       1.1    simonb 	/* Check it's properly cleared */
   1752       1.1    simonb 	ref = pmap_is_referenced(pg);
   1753       1.1    simonb 	mod = pmap_is_modified(pg);
   1754       1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1755       1.1    simonb 	       ref, mod);
   1756       1.1    simonb 
   1757       1.1    simonb 	/* Modify page */
   1758       1.1    simonb 	*loc = 1;
   1759       1.1    simonb 
   1760       1.1    simonb 	ref = pmap_is_referenced(pg);
   1761       1.1    simonb 	mod = pmap_is_modified(pg);
   1762       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1763       1.1    simonb 	       ref, mod);
   1764       1.1    simonb 
   1765       1.1    simonb 	/* Check pmap_protect() */
   1766       1.1    simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1767       1.4     chris 	pmap_update(pmap_kernel());
   1768       1.1    simonb 	ref = pmap_is_referenced(pg);
   1769       1.1    simonb 	mod = pmap_is_modified(pg);
   1770       1.1    simonb 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1771       1.1    simonb 	       ref, mod);
   1772       1.1    simonb 
   1773       1.1    simonb 	/* Now clear reference and modify */
   1774       1.1    simonb 	ref = pmap_clear_reference(pg);
   1775       1.1    simonb 	mod = pmap_clear_modify(pg);
   1776       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1777       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1778       1.1    simonb 	       ref, mod);
   1779       1.1    simonb 
   1780       1.1    simonb 	/* Reference page */
   1781       1.1    simonb 	val = *loc;
   1782       1.1    simonb 
   1783       1.1    simonb 	ref = pmap_is_referenced(pg);
   1784       1.1    simonb 	mod = pmap_is_modified(pg);
   1785       1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1786       1.1    simonb 	       ref, mod, val);
   1787       1.1    simonb 
   1788       1.1    simonb 	/* Now clear reference and modify */
   1789       1.1    simonb 	ref = pmap_clear_reference(pg);
   1790       1.1    simonb 	mod = pmap_clear_modify(pg);
   1791       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1792       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1793       1.1    simonb 	       ref, mod);
   1794      1.12    simonb 
   1795       1.1    simonb 	/* Modify page */
   1796       1.1    simonb #if 0
   1797       1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1798       1.4     chris 	pmap_update(pmap_kernel());
   1799       1.1    simonb #endif
   1800       1.1    simonb 	*loc = 1;
   1801       1.1    simonb 
   1802       1.1    simonb 	ref = pmap_is_referenced(pg);
   1803       1.1    simonb 	mod = pmap_is_modified(pg);
   1804       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1805       1.1    simonb 	       ref, mod);
   1806       1.1    simonb 
   1807       1.1    simonb 	/* Check pmap_protect() */
   1808       1.1    simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1809       1.4     chris 	pmap_update(pmap_kernel());
   1810       1.1    simonb 	ref = pmap_is_referenced(pg);
   1811       1.1    simonb 	mod = pmap_is_modified(pg);
   1812       1.1    simonb 	printf("pmap_protect(): ref %d, mod %d\n",
   1813       1.1    simonb 	       ref, mod);
   1814       1.1    simonb 
   1815       1.1    simonb 	/* Now clear reference and modify */
   1816       1.1    simonb 	ref = pmap_clear_reference(pg);
   1817       1.1    simonb 	mod = pmap_clear_modify(pg);
   1818       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1819       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1820       1.1    simonb 	       ref, mod);
   1821       1.1    simonb 
   1822       1.1    simonb 	/* Reference page */
   1823       1.1    simonb 	val = *loc;
   1824       1.1    simonb 
   1825       1.1    simonb 	ref = pmap_is_referenced(pg);
   1826       1.1    simonb 	mod = pmap_is_modified(pg);
   1827       1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1828       1.1    simonb 	       ref, mod, val);
   1829       1.1    simonb 
   1830       1.1    simonb 	/* Now clear reference and modify */
   1831       1.1    simonb 	ref = pmap_clear_reference(pg);
   1832       1.1    simonb 	mod = pmap_clear_modify(pg);
   1833       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1834       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1835       1.1    simonb 	       ref, mod);
   1836      1.12    simonb 
   1837       1.1    simonb 	/* Modify page */
   1838       1.1    simonb #if 0
   1839       1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1840       1.4     chris 	pmap_update(pmap_kernel());
   1841       1.1    simonb #endif
   1842       1.1    simonb 	*loc = 1;
   1843       1.1    simonb 
   1844       1.1    simonb 	ref = pmap_is_referenced(pg);
   1845       1.1    simonb 	mod = pmap_is_modified(pg);
   1846       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1847       1.1    simonb 	       ref, mod);
   1848       1.1    simonb 
   1849       1.1    simonb 	/* Check pmap_pag_protect() */
   1850       1.1    simonb 	pmap_page_protect(pg, VM_PROT_READ);
   1851       1.1    simonb 	ref = pmap_is_referenced(pg);
   1852       1.1    simonb 	mod = pmap_is_modified(pg);
   1853       1.1    simonb 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1854       1.1    simonb 	       ref, mod);
   1855       1.1    simonb 
   1856       1.1    simonb 	/* Now clear reference and modify */
   1857       1.1    simonb 	ref = pmap_clear_reference(pg);
   1858       1.1    simonb 	mod = pmap_clear_modify(pg);
   1859       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1860       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1861       1.1    simonb 	       ref, mod);
   1862       1.1    simonb 
   1863       1.1    simonb 	/* Reference page */
   1864       1.1    simonb 	val = *loc;
   1865       1.1    simonb 
   1866       1.1    simonb 	ref = pmap_is_referenced(pg);
   1867       1.1    simonb 	mod = pmap_is_modified(pg);
   1868       1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1869       1.1    simonb 	       ref, mod, val);
   1870       1.1    simonb 
   1871       1.1    simonb 	/* Now clear reference and modify */
   1872       1.1    simonb 	ref = pmap_clear_reference(pg);
   1873       1.1    simonb 	mod = pmap_clear_modify(pg);
   1874       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1875       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1876       1.1    simonb 	       ref, mod);
   1877      1.12    simonb 
   1878       1.1    simonb 	/* Modify page */
   1879       1.1    simonb #if 0
   1880       1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1881       1.4     chris 	pmap_update(pmap_kernel());
   1882       1.1    simonb #endif
   1883       1.1    simonb 	*loc = 1;
   1884       1.1    simonb 
   1885       1.1    simonb 	ref = pmap_is_referenced(pg);
   1886       1.1    simonb 	mod = pmap_is_modified(pg);
   1887       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1888       1.1    simonb 	       ref, mod);
   1889       1.1    simonb 
   1890       1.1    simonb 	/* Check pmap_pag_protect() */
   1891       1.1    simonb 	pmap_page_protect(pg, VM_PROT_NONE);
   1892       1.1    simonb 	ref = pmap_is_referenced(pg);
   1893       1.1    simonb 	mod = pmap_is_modified(pg);
   1894       1.1    simonb 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1895       1.1    simonb 	       ref, mod);
   1896       1.1    simonb 
   1897       1.1    simonb 	/* Now clear reference and modify */
   1898       1.1    simonb 	ref = pmap_clear_reference(pg);
   1899       1.1    simonb 	mod = pmap_clear_modify(pg);
   1900       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1901       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1902       1.1    simonb 	       ref, mod);
   1903       1.1    simonb 
   1904       1.1    simonb 
   1905       1.1    simonb 	/* Reference page */
   1906       1.1    simonb 	val = *loc;
   1907       1.1    simonb 
   1908       1.1    simonb 	ref = pmap_is_referenced(pg);
   1909       1.1    simonb 	mod = pmap_is_modified(pg);
   1910       1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1911       1.1    simonb 	       ref, mod, val);
   1912       1.1    simonb 
   1913       1.1    simonb 	/* Now clear reference and modify */
   1914       1.1    simonb 	ref = pmap_clear_reference(pg);
   1915       1.1    simonb 	mod = pmap_clear_modify(pg);
   1916       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1917       1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1918       1.1    simonb 	       ref, mod);
   1919      1.12    simonb 
   1920       1.1    simonb 	/* Modify page */
   1921       1.1    simonb #if 0
   1922       1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1923       1.4     chris 	pmap_update(pmap_kernel());
   1924       1.1    simonb #endif
   1925       1.1    simonb 	*loc = 1;
   1926       1.1    simonb 
   1927       1.1    simonb 	ref = pmap_is_referenced(pg);
   1928       1.1    simonb 	mod = pmap_is_modified(pg);
   1929       1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1930       1.1    simonb 	       ref, mod);
   1931       1.1    simonb 
   1932       1.1    simonb 	/* Unmap page */
   1933       1.1    simonb 	pmap_remove(pmap_kernel(), va, va+1);
   1934       1.4     chris 	pmap_update(pmap_kernel());
   1935       1.1    simonb 	ref = pmap_is_referenced(pg);
   1936       1.1    simonb 	mod = pmap_is_modified(pg);
   1937       1.1    simonb 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1938       1.1    simonb 
   1939       1.1    simonb 	/* Now clear reference and modify */
   1940       1.1    simonb 	ref = pmap_clear_reference(pg);
   1941       1.1    simonb 	mod = pmap_clear_modify(pg);
   1942       1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1943       1.1    simonb 	       (void *)(u_long)va, (long)pa, ref, mod);
   1944       1.1    simonb 
   1945       1.1    simonb 	/* Check it's properly cleared */
   1946       1.1    simonb 	ref = pmap_is_referenced(pg);
   1947       1.1    simonb 	mod = pmap_is_modified(pg);
   1948       1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1949       1.1    simonb 	       ref, mod);
   1950       1.1    simonb 
   1951      1.34      yamt 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
   1952      1.59    cegger 	pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);
   1953      1.34      yamt 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
   1954       1.1    simonb }
   1955       1.1    simonb #endif
   1956