Home | History | Annotate | Line # | Download | only in ibm4xx
pmap.c revision 1.72.24.1
      1  1.72.24.1     skrll /*	$NetBSD: pmap.c,v 1.72.24.1 2017/02/05 13:40:18 skrll Exp $	*/
      2        1.1    simonb 
      3        1.1    simonb /*
      4        1.1    simonb  * Copyright 2001 Wasabi Systems, Inc.
      5        1.1    simonb  * All rights reserved.
      6        1.1    simonb  *
      7        1.1    simonb  * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
      8        1.1    simonb  *
      9        1.1    simonb  * Redistribution and use in source and binary forms, with or without
     10        1.1    simonb  * modification, are permitted provided that the following conditions
     11        1.1    simonb  * are met:
     12        1.1    simonb  * 1. Redistributions of source code must retain the above copyright
     13        1.1    simonb  *    notice, this list of conditions and the following disclaimer.
     14        1.1    simonb  * 2. Redistributions in binary form must reproduce the above copyright
     15        1.1    simonb  *    notice, this list of conditions and the following disclaimer in the
     16        1.1    simonb  *    documentation and/or other materials provided with the distribution.
     17        1.1    simonb  * 3. All advertising materials mentioning features or use of this software
     18        1.1    simonb  *    must display the following acknowledgement:
     19        1.1    simonb  *      This product includes software developed for the NetBSD Project by
     20        1.1    simonb  *      Wasabi Systems, Inc.
     21        1.1    simonb  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
     22        1.1    simonb  *    or promote products derived from this software without specific prior
     23        1.1    simonb  *    written permission.
     24        1.1    simonb  *
     25        1.1    simonb  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
     26        1.1    simonb  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27        1.1    simonb  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28        1.1    simonb  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
     29        1.1    simonb  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30        1.1    simonb  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31        1.1    simonb  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32        1.1    simonb  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33        1.1    simonb  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34        1.1    simonb  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35        1.1    simonb  * POSSIBILITY OF SUCH DAMAGE.
     36        1.1    simonb  */
     37        1.1    simonb 
     38        1.1    simonb /*
     39        1.1    simonb  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     40        1.1    simonb  * Copyright (C) 1995, 1996 TooLs GmbH.
     41        1.1    simonb  * All rights reserved.
     42        1.1    simonb  *
     43        1.1    simonb  * Redistribution and use in source and binary forms, with or without
     44        1.1    simonb  * modification, are permitted provided that the following conditions
     45        1.1    simonb  * are met:
     46        1.1    simonb  * 1. Redistributions of source code must retain the above copyright
     47        1.1    simonb  *    notice, this list of conditions and the following disclaimer.
     48        1.1    simonb  * 2. Redistributions in binary form must reproduce the above copyright
     49        1.1    simonb  *    notice, this list of conditions and the following disclaimer in the
     50        1.1    simonb  *    documentation and/or other materials provided with the distribution.
     51        1.1    simonb  * 3. All advertising materials mentioning features or use of this software
     52        1.1    simonb  *    must display the following acknowledgement:
     53        1.1    simonb  *	This product includes software developed by TooLs GmbH.
     54        1.1    simonb  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     55        1.1    simonb  *    derived from this software without specific prior written permission.
     56        1.1    simonb  *
     57        1.1    simonb  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     58        1.1    simonb  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     59        1.1    simonb  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     60        1.1    simonb  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     61        1.1    simonb  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     62        1.1    simonb  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     63        1.1    simonb  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     64        1.1    simonb  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     65        1.1    simonb  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     66        1.1    simonb  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     67        1.1    simonb  */
     68       1.23     lukem 
     69       1.23     lukem #include <sys/cdefs.h>
     70  1.72.24.1     skrll __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.72.24.1 2017/02/05 13:40:18 skrll Exp $");
     71        1.1    simonb 
     72        1.1    simonb #include <sys/param.h>
     73       1.68      matt #include <sys/cpu.h>
     74       1.68      matt #include <sys/device.h>
     75       1.72      para #include <sys/kmem.h>
     76       1.68      matt #include <sys/pool.h>
     77        1.1    simonb #include <sys/proc.h>
     78        1.1    simonb #include <sys/queue.h>
     79        1.1    simonb #include <sys/systm.h>
     80        1.1    simonb 
     81        1.1    simonb #include <uvm/uvm.h>
     82        1.1    simonb 
     83        1.1    simonb #include <machine/powerpc.h>
     84       1.67      matt #include <machine/tlb.h>
     85        1.1    simonb 
     86       1.68      matt #include <powerpc/pcb.h>
     87       1.68      matt 
     88        1.1    simonb #include <powerpc/spr.h>
     89       1.62      matt #include <powerpc/ibm4xx/spr.h>
     90       1.67      matt 
     91       1.67      matt #include <powerpc/ibm4xx/cpu.h>
     92        1.1    simonb 
     93        1.1    simonb /*
     94        1.1    simonb  * kernmap is an array of PTEs large enough to map in
     95        1.1    simonb  * 4GB.  At 16KB/page it is 256K entries or 2MB.
     96        1.1    simonb  */
     97       1.19   thorpej #define KERNMAP_SIZE	((0xffffffffU/PAGE_SIZE)+1)
     98       1.47  christos void *kernmap;
     99        1.1    simonb 
    100        1.1    simonb #define MINCTX		2
    101        1.1    simonb #define NUMCTX		256
    102       1.42     freza 
    103        1.1    simonb volatile struct pmap *ctxbusy[NUMCTX];
    104        1.1    simonb 
    105        1.1    simonb #define TLBF_USED	0x1
    106        1.1    simonb #define	TLBF_REF	0x2
    107        1.1    simonb #define	TLBF_LOCKED	0x4
    108        1.1    simonb #define	TLB_LOCKED(i)	(tlb_info[(i)].ti_flags & TLBF_LOCKED)
    109       1.42     freza 
    110        1.1    simonb typedef struct tlb_info_s {
    111        1.1    simonb 	char	ti_flags;
    112        1.1    simonb 	char	ti_ctx;		/* TLB_PID assiciated with the entry */
    113        1.1    simonb 	u_int	ti_va;
    114        1.1    simonb } tlb_info_t;
    115        1.1    simonb 
    116        1.1    simonb volatile tlb_info_t tlb_info[NTLB];
    117        1.1    simonb /* We'll use a modified FIFO replacement policy cause it's cheap */
    118       1.42     freza volatile int tlbnext;
    119       1.42     freza 
    120       1.42     freza static int tlb_nreserved = 0;
    121       1.42     freza static int pmap_bootstrap_done = 0;
    122        1.1    simonb 
    123       1.14   thorpej /* Event counters */
    124       1.14   thorpej struct evcnt tlbmiss_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    125        1.1    simonb 	NULL, "cpu", "tlbmiss");
    126       1.14   thorpej struct evcnt tlbhit_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    127        1.1    simonb 	NULL, "cpu", "tlbhit");
    128       1.14   thorpej struct evcnt tlbflush_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    129        1.1    simonb 	NULL, "cpu", "tlbflush");
    130       1.14   thorpej struct evcnt tlbenter_ev = EVCNT_INITIALIZER(EVCNT_TYPE_TRAP,
    131        1.1    simonb 	NULL, "cpu", "tlbenter");
    132       1.66      matt EVCNT_ATTACH_STATIC(tlbmiss_ev);
    133       1.66      matt EVCNT_ATTACH_STATIC(tlbhit_ev);
    134       1.66      matt EVCNT_ATTACH_STATIC(tlbflush_ev);
    135       1.66      matt EVCNT_ATTACH_STATIC(tlbenter_ev);
    136        1.1    simonb 
    137        1.1    simonb struct pmap kernel_pmap_;
    138       1.52     pooka struct pmap *const kernel_pmap_ptr = &kernel_pmap_;
    139        1.1    simonb 
    140        1.1    simonb static int npgs;
    141        1.1    simonb static u_int nextavail;
    142        1.1    simonb #ifndef MSGBUFADDR
    143        1.1    simonb extern paddr_t msgbuf_paddr;
    144        1.1    simonb #endif
    145        1.1    simonb 
    146        1.1    simonb static struct mem_region *mem, *avail;
    147        1.1    simonb 
    148        1.1    simonb /*
    149        1.1    simonb  * This is a cache of referenced/modified bits.
    150        1.1    simonb  * Bits herein are shifted by ATTRSHFT.
    151        1.1    simonb  */
    152        1.1    simonb static char *pmap_attrib;
    153        1.1    simonb 
    154        1.1    simonb #define PV_WIRED	0x1
    155        1.1    simonb #define PV_WIRE(pv)	((pv)->pv_va |= PV_WIRED)
    156       1.30       chs #define PV_UNWIRE(pv)	((pv)->pv_va &= ~PV_WIRED)
    157       1.30       chs #define PV_ISWIRED(pv)	((pv)->pv_va & PV_WIRED)
    158       1.30       chs #define PV_CMPVA(va,pv)	(!(((pv)->pv_va ^ (va)) & (~PV_WIRED)))
    159        1.1    simonb 
    160        1.1    simonb struct pv_entry {
    161        1.1    simonb 	struct pv_entry *pv_next;	/* Linked list of mappings */
    162       1.68      matt 	struct pmap *pv_pm;
    163        1.1    simonb 	vaddr_t pv_va;			/* virtual address of mapping */
    164        1.1    simonb };
    165        1.1    simonb 
    166       1.42     freza /* Each index corresponds to TLB_SIZE_* value. */
    167       1.42     freza static size_t tlbsize[] = {
    168       1.42     freza 	1024, 		/* TLB_SIZE_1K */
    169       1.42     freza 	4096, 		/* TLB_SIZE_4K */
    170       1.42     freza 	16384, 		/* TLB_SIZE_16K */
    171       1.42     freza 	65536, 		/* TLB_SIZE_64K */
    172       1.42     freza 	262144, 	/* TLB_SIZE_256K */
    173       1.42     freza 	1048576, 	/* TLB_SIZE_1M */
    174       1.42     freza 	4194304, 	/* TLB_SIZE_4M */
    175       1.42     freza 	16777216, 	/* TLB_SIZE_16M */
    176       1.42     freza };
    177       1.42     freza 
    178        1.1    simonb struct pv_entry *pv_table;
    179        1.1    simonb static struct pool pv_pool;
    180        1.1    simonb 
    181        1.1    simonb static int pmap_initialized;
    182        1.1    simonb 
    183        1.1    simonb static int ctx_flush(int);
    184        1.1    simonb 
    185       1.68      matt struct pv_entry *pa_to_pv(paddr_t);
    186        1.1    simonb static inline char *pa_to_attr(paddr_t);
    187        1.1    simonb 
    188        1.1    simonb static inline volatile u_int *pte_find(struct pmap *, vaddr_t);
    189        1.1    simonb static inline int pte_enter(struct pmap *, vaddr_t, u_int);
    190        1.1    simonb 
    191       1.49   hannken static inline int pmap_enter_pv(struct pmap *, vaddr_t, paddr_t, int);
    192        1.1    simonb static void pmap_remove_pv(struct pmap *, vaddr_t, paddr_t);
    193        1.1    simonb 
    194       1.42     freza static int ppc4xx_tlb_size_mask(size_t, int *, int *);
    195       1.42     freza 
    196        1.1    simonb 
    197       1.68      matt struct pv_entry *
    198        1.1    simonb pa_to_pv(paddr_t pa)
    199        1.1    simonb {
    200  1.72.24.1     skrll 	uvm_physseg_t bank;
    201  1.72.24.1     skrll 	psize_t pg;
    202        1.1    simonb 
    203  1.72.24.1     skrll 	bank = uvm_physseg_find(atop(pa), &pg);
    204  1.72.24.1     skrll 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
    205        1.1    simonb 		return NULL;
    206  1.72.24.1     skrll 	return &uvm_physseg_get_pmseg(bank)->pvent[pg];
    207        1.1    simonb }
    208        1.1    simonb 
    209        1.1    simonb static inline char *
    210        1.1    simonb pa_to_attr(paddr_t pa)
    211        1.1    simonb {
    212  1.72.24.1     skrll 	uvm_physseg_t bank;
    213  1.72.24.1     skrll 	psize_t pg;
    214        1.1    simonb 
    215  1.72.24.1     skrll 	bank = uvm_physseg_find(atop(pa), &pg);
    216  1.72.24.1     skrll 	if (bank == UVM_PHYSSEG_TYPE_INVALID)
    217        1.1    simonb 		return NULL;
    218  1.72.24.1     skrll 	return &uvm_physseg_get_pmseg(bank)->attrs[pg];
    219        1.1    simonb }
    220        1.1    simonb 
    221        1.1    simonb /*
    222        1.1    simonb  * Insert PTE into page table.
    223        1.1    simonb  */
    224        1.1    simonb int
    225        1.1    simonb pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
    226        1.1    simonb {
    227        1.1    simonb 	int seg = STIDX(va);
    228        1.1    simonb 	int ptn = PTIDX(va);
    229       1.22       scw 	u_int oldpte;
    230        1.1    simonb 
    231        1.1    simonb 	if (!pm->pm_ptbl[seg]) {
    232        1.1    simonb 		/* Don't allocate a page to clear a non-existent mapping. */
    233       1.30       chs 		if (!pte)
    234       1.30       chs 			return (0);
    235        1.1    simonb 		/* Allocate a page XXXX this will sleep! */
    236       1.19   thorpej 		pm->pm_ptbl[seg] =
    237       1.34      yamt 		    (uint *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
    238       1.34      yamt 		    UVM_KMF_WIRED | UVM_KMF_ZERO);
    239        1.1    simonb 	}
    240       1.22       scw 	oldpte = pm->pm_ptbl[seg][ptn];
    241        1.1    simonb 	pm->pm_ptbl[seg][ptn] = pte;
    242        1.1    simonb 
    243        1.1    simonb 	/* Flush entry. */
    244        1.1    simonb 	ppc4xx_tlb_flush(va, pm->pm_ctx);
    245       1.22       scw 	if (oldpte != pte) {
    246       1.22       scw 		if (pte == 0)
    247       1.22       scw 			pm->pm_stats.resident_count--;
    248       1.22       scw 		else
    249       1.22       scw 			pm->pm_stats.resident_count++;
    250       1.22       scw 	}
    251        1.1    simonb 	return (1);
    252        1.1    simonb }
    253        1.1    simonb 
    254        1.1    simonb /*
    255        1.1    simonb  * Get a pointer to a PTE in a page table.
    256        1.1    simonb  */
    257        1.1    simonb volatile u_int *
    258        1.1    simonb pte_find(struct pmap *pm, vaddr_t va)
    259        1.1    simonb {
    260        1.1    simonb 	int seg = STIDX(va);
    261        1.1    simonb 	int ptn = PTIDX(va);
    262        1.1    simonb 
    263        1.1    simonb 	if (pm->pm_ptbl[seg])
    264        1.1    simonb 		return (&pm->pm_ptbl[seg][ptn]);
    265        1.1    simonb 
    266        1.1    simonb 	return (NULL);
    267        1.1    simonb }
    268        1.1    simonb 
    269        1.1    simonb /*
    270        1.1    simonb  * This is called during initppc, before the system is really initialized.
    271        1.1    simonb  */
    272        1.1    simonb void
    273        1.1    simonb pmap_bootstrap(u_int kernelstart, u_int kernelend)
    274        1.1    simonb {
    275        1.1    simonb 	struct mem_region *mp, *mp1;
    276        1.1    simonb 	int cnt, i;
    277        1.1    simonb 	u_int s, e, sz;
    278        1.1    simonb 
    279       1.42     freza 	tlbnext = tlb_nreserved;
    280       1.42     freza 
    281        1.1    simonb 	/*
    282        1.1    simonb 	 * Allocate the kernel page table at the end of
    283        1.1    simonb 	 * kernel space so it's in the locked TTE.
    284        1.1    simonb 	 */
    285       1.47  christos 	kernmap = (void *)kernelend;
    286        1.1    simonb 
    287        1.1    simonb 	/*
    288        1.1    simonb 	 * Initialize kernel page table.
    289        1.1    simonb 	 */
    290        1.1    simonb 	for (i = 0; i < STSZ; i++) {
    291       1.10       eeh 		pmap_kernel()->pm_ptbl[i] = 0;
    292        1.1    simonb 	}
    293        1.1    simonb 	ctxbusy[0] = ctxbusy[1] = pmap_kernel();
    294        1.1    simonb 
    295        1.1    simonb 	/*
    296        1.1    simonb 	 * Announce page-size to the VM-system
    297        1.1    simonb 	 */
    298        1.1    simonb 	uvmexp.pagesize = NBPG;
    299  1.72.24.1     skrll 	uvm_md_init();
    300        1.1    simonb 
    301        1.1    simonb 	/*
    302        1.1    simonb 	 * Get memory.
    303        1.1    simonb 	 */
    304        1.1    simonb 	mem_regions(&mem, &avail);
    305        1.1    simonb 	for (mp = mem; mp->size; mp++) {
    306        1.1    simonb 		physmem += btoc(mp->size);
    307        1.1    simonb 		printf("+%lx,",mp->size);
    308        1.1    simonb 	}
    309        1.1    simonb 	printf("\n");
    310        1.1    simonb 	ppc4xx_tlb_init();
    311        1.1    simonb 	/*
    312        1.1    simonb 	 * Count the number of available entries.
    313        1.1    simonb 	 */
    314        1.1    simonb 	for (cnt = 0, mp = avail; mp->size; mp++)
    315        1.1    simonb 		cnt++;
    316        1.1    simonb 
    317        1.1    simonb 	/*
    318        1.1    simonb 	 * Page align all regions.
    319        1.1    simonb 	 * Non-page aligned memory isn't very interesting to us.
    320        1.1    simonb 	 * Also, sort the entries for ascending addresses.
    321        1.1    simonb 	 */
    322        1.1    simonb 	kernelstart &= ~PGOFSET;
    323        1.1    simonb 	kernelend = (kernelend + PGOFSET) & ~PGOFSET;
    324        1.1    simonb 	for (mp = avail; mp->size; mp++) {
    325        1.1    simonb 		s = mp->start;
    326        1.1    simonb 		e = mp->start + mp->size;
    327        1.1    simonb 		printf("%08x-%08x -> ",s,e);
    328        1.1    simonb 		/*
    329        1.1    simonb 		 * Check whether this region holds all of the kernel.
    330        1.1    simonb 		 */
    331        1.1    simonb 		if (s < kernelstart && e > kernelend) {
    332        1.1    simonb 			avail[cnt].start = kernelend;
    333        1.1    simonb 			avail[cnt++].size = e - kernelend;
    334        1.1    simonb 			e = kernelstart;
    335        1.1    simonb 		}
    336        1.1    simonb 		/*
    337        1.1    simonb 		 * Look whether this regions starts within the kernel.
    338        1.1    simonb 		 */
    339        1.1    simonb 		if (s >= kernelstart && s < kernelend) {
    340        1.1    simonb 			if (e <= kernelend)
    341        1.1    simonb 				goto empty;
    342        1.1    simonb 			s = kernelend;
    343        1.1    simonb 		}
    344        1.1    simonb 		/*
    345        1.1    simonb 		 * Now look whether this region ends within the kernel.
    346        1.1    simonb 		 */
    347        1.1    simonb 		if (e > kernelstart && e <= kernelend) {
    348        1.1    simonb 			if (s >= kernelstart)
    349        1.1    simonb 				goto empty;
    350        1.1    simonb 			e = kernelstart;
    351        1.1    simonb 		}
    352        1.1    simonb 		/*
    353        1.1    simonb 		 * Now page align the start and size of the region.
    354        1.1    simonb 		 */
    355        1.1    simonb 		s = round_page(s);
    356        1.1    simonb 		e = trunc_page(e);
    357        1.1    simonb 		if (e < s)
    358        1.1    simonb 			e = s;
    359        1.1    simonb 		sz = e - s;
    360        1.1    simonb 		printf("%08x-%08x = %x\n",s,e,sz);
    361        1.1    simonb 		/*
    362        1.1    simonb 		 * Check whether some memory is left here.
    363        1.1    simonb 		 */
    364        1.1    simonb 		if (sz == 0) {
    365        1.1    simonb 		empty:
    366        1.3       wiz 			memmove(mp, mp + 1,
    367        1.3       wiz 				(cnt - (mp - avail)) * sizeof *mp);
    368        1.1    simonb 			cnt--;
    369        1.1    simonb 			mp--;
    370        1.1    simonb 			continue;
    371        1.1    simonb 		}
    372        1.1    simonb 		/*
    373        1.1    simonb 		 * Do an insertion sort.
    374        1.1    simonb 		 */
    375        1.1    simonb 		npgs += btoc(sz);
    376        1.1    simonb 		for (mp1 = avail; mp1 < mp; mp1++)
    377        1.1    simonb 			if (s < mp1->start)
    378        1.1    simonb 				break;
    379        1.1    simonb 		if (mp1 < mp) {
    380        1.3       wiz 			memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1);
    381        1.1    simonb 			mp1->start = s;
    382        1.1    simonb 			mp1->size = sz;
    383        1.1    simonb 		} else {
    384        1.1    simonb 			mp->start = s;
    385        1.1    simonb 			mp->size = sz;
    386        1.1    simonb 		}
    387        1.1    simonb 	}
    388        1.1    simonb 
    389        1.1    simonb 	/*
    390        1.1    simonb 	 * We cannot do pmap_steal_memory here,
    391        1.1    simonb 	 * since we don't run with translation enabled yet.
    392        1.1    simonb 	 */
    393        1.1    simonb #ifndef MSGBUFADDR
    394        1.1    simonb 	/*
    395        1.1    simonb 	 * allow for msgbuf
    396        1.1    simonb 	 */
    397        1.1    simonb 	sz = round_page(MSGBUFSIZE);
    398        1.1    simonb 	mp = NULL;
    399        1.1    simonb 	for (mp1 = avail; mp1->size; mp1++)
    400        1.1    simonb 		if (mp1->size >= sz)
    401        1.1    simonb 			mp = mp1;
    402        1.1    simonb 	if (mp == NULL)
    403        1.1    simonb 		panic("not enough memory?");
    404        1.1    simonb 
    405        1.1    simonb 	npgs -= btoc(sz);
    406        1.1    simonb 	msgbuf_paddr = mp->start + mp->size - sz;
    407        1.1    simonb 	mp->size -= sz;
    408        1.1    simonb 	if (mp->size <= 0)
    409        1.3       wiz 		memmove(mp, mp + 1, (cnt - (mp - avail)) * sizeof *mp);
    410        1.1    simonb #endif
    411        1.1    simonb 
    412        1.1    simonb 	for (mp = avail; mp->size; mp++)
    413        1.1    simonb 		uvm_page_physload(atop(mp->start), atop(mp->start + mp->size),
    414        1.1    simonb 			atop(mp->start), atop(mp->start + mp->size),
    415        1.1    simonb 			VM_FREELIST_DEFAULT);
    416        1.1    simonb 
    417        1.1    simonb 	/*
    418        1.1    simonb 	 * Initialize kernel pmap and hardware.
    419        1.1    simonb 	 */
    420        1.1    simonb 	/* Setup TLB pid allocator so it knows we alreadu using PID 1 */
    421        1.1    simonb 	pmap_kernel()->pm_ctx = KERNEL_PID;
    422        1.1    simonb 	nextavail = avail->start;
    423        1.1    simonb 
    424       1.42     freza 	pmap_bootstrap_done = 1;
    425        1.1    simonb }
    426        1.1    simonb 
    427        1.1    simonb /*
    428        1.1    simonb  * Restrict given range to physical memory
    429        1.1    simonb  *
    430        1.1    simonb  * (Used by /dev/mem)
    431        1.1    simonb  */
    432        1.1    simonb void
    433        1.1    simonb pmap_real_memory(paddr_t *start, psize_t *size)
    434        1.1    simonb {
    435        1.1    simonb 	struct mem_region *mp;
    436        1.1    simonb 
    437        1.1    simonb 	for (mp = mem; mp->size; mp++) {
    438        1.1    simonb 		if (*start + *size > mp->start &&
    439        1.1    simonb 		    *start < mp->start + mp->size) {
    440        1.1    simonb 			if (*start < mp->start) {
    441        1.1    simonb 				*size -= mp->start - *start;
    442        1.1    simonb 				*start = mp->start;
    443        1.1    simonb 			}
    444        1.1    simonb 			if (*start + *size > mp->start + mp->size)
    445        1.1    simonb 				*size = mp->start + mp->size - *start;
    446        1.1    simonb 			return;
    447        1.1    simonb 		}
    448        1.1    simonb 	}
    449        1.1    simonb 	*size = 0;
    450        1.1    simonb }
    451        1.1    simonb 
    452        1.1    simonb /*
    453        1.1    simonb  * Initialize anything else for pmap handling.
    454        1.1    simonb  * Called during vm_init().
    455        1.1    simonb  */
    456        1.1    simonb void
    457        1.1    simonb pmap_init(void)
    458        1.1    simonb {
    459        1.1    simonb 	struct pv_entry *pv;
    460        1.1    simonb 	vsize_t sz;
    461        1.1    simonb 	vaddr_t addr;
    462        1.1    simonb 	int i, s;
    463        1.1    simonb 	int bank;
    464        1.1    simonb 	char *attr;
    465        1.1    simonb 
    466        1.1    simonb 	sz = (vsize_t)((sizeof(struct pv_entry) + 1) * npgs);
    467        1.1    simonb 	sz = round_page(sz);
    468       1.34      yamt 	addr = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    469        1.1    simonb 	s = splvm();
    470        1.1    simonb 	pv = pv_table = (struct pv_entry *)addr;
    471        1.1    simonb 	for (i = npgs; --i >= 0;)
    472        1.1    simonb 		pv++->pv_pm = NULL;
    473        1.1    simonb 	pmap_attrib = (char *)pv;
    474        1.2       wiz 	memset(pv, 0, npgs);
    475        1.1    simonb 
    476        1.1    simonb 	pv = pv_table;
    477        1.1    simonb 	attr = pmap_attrib;
    478  1.72.24.1     skrll 	for (bank = uvm_physseg_get_first();
    479  1.72.24.1     skrll 	     uvm_physseg_valid_p(bank);
    480  1.72.24.1     skrll 	     bank = uvm_physseg_get_next(bank)) {
    481  1.72.24.1     skrll 		sz = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    482  1.72.24.1     skrll 		uvm_physseg_get_pmseg(bank)->pvent = pv;
    483  1.72.24.1     skrll 		uvm_physseg_get_pmseg(bank)->attrs = attr;
    484        1.1    simonb 		pv += sz;
    485        1.1    simonb 		attr += sz;
    486        1.1    simonb 	}
    487        1.1    simonb 
    488        1.1    simonb 	pmap_initialized = 1;
    489        1.1    simonb 	splx(s);
    490        1.1    simonb 
    491        1.1    simonb 	/* Setup a pool for additional pvlist structures */
    492       1.48        ad 	pool_init(&pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pv_entry", NULL,
    493       1.48        ad 	    IPL_VM);
    494       1.21   thorpej }
    495       1.21   thorpej 
    496       1.21   thorpej /*
    497       1.21   thorpej  * How much virtual space is available to the kernel?
    498       1.21   thorpej  */
    499       1.21   thorpej void
    500       1.21   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
    501       1.21   thorpej {
    502       1.21   thorpej 
    503       1.21   thorpej #if 0
    504       1.21   thorpej 	/*
    505       1.21   thorpej 	 * Reserve one segment for kernel virtual memory
    506       1.21   thorpej 	 */
    507       1.21   thorpej 	*start = (vaddr_t)(KERNEL_SR << ADDR_SR_SHFT);
    508       1.21   thorpej 	*end = *start + SEGMENT_LENGTH;
    509       1.21   thorpej #else
    510       1.21   thorpej 	*start = (vaddr_t) VM_MIN_KERNEL_ADDRESS;
    511       1.21   thorpej 	*end = (vaddr_t) VM_MAX_KERNEL_ADDRESS;
    512       1.21   thorpej #endif
    513        1.1    simonb }
    514        1.1    simonb 
    515        1.5       eeh #ifdef PMAP_GROWKERNEL
    516        1.5       eeh /*
    517        1.5       eeh  * Preallocate kernel page tables to a specified VA.
    518        1.5       eeh  * This simply loops through the first TTE for each
    519       1.12    simonb  * page table from the beginning of the kernel pmap,
    520        1.5       eeh  * reads the entry, and if the result is
    521        1.5       eeh  * zero (either invalid entry or no page table) it stores
    522        1.5       eeh  * a zero there, populating page tables in the process.
    523        1.5       eeh  * This is not the most efficient technique but i don't
    524        1.5       eeh  * expect it to be called that often.
    525        1.5       eeh  */
    526       1.53       dsl extern struct vm_page *vm_page_alloc1(void);
    527       1.53       dsl extern void vm_page_free1(struct vm_page *);
    528        1.5       eeh 
    529        1.5       eeh vaddr_t kbreak = VM_MIN_KERNEL_ADDRESS;
    530        1.5       eeh 
    531       1.12    simonb vaddr_t
    532       1.30       chs pmap_growkernel(vaddr_t maxkvaddr)
    533        1.5       eeh {
    534        1.5       eeh 	int s;
    535        1.5       eeh 	int seg;
    536        1.5       eeh 	paddr_t pg;
    537        1.5       eeh 	struct pmap *pm = pmap_kernel();
    538       1.12    simonb 
    539        1.5       eeh 	s = splvm();
    540        1.5       eeh 
    541        1.5       eeh 	/* Align with the start of a page table */
    542        1.5       eeh 	for (kbreak &= ~(PTMAP-1); kbreak < maxkvaddr;
    543        1.5       eeh 	     kbreak += PTMAP) {
    544        1.5       eeh 		seg = STIDX(kbreak);
    545        1.5       eeh 
    546       1.30       chs 		if (pte_find(pm, kbreak))
    547       1.30       chs 			continue;
    548       1.12    simonb 
    549        1.5       eeh 		if (uvm.page_init_done) {
    550        1.5       eeh 			pg = (paddr_t)VM_PAGE_TO_PHYS(vm_page_alloc1());
    551        1.5       eeh 		} else {
    552        1.5       eeh 			if (!uvm_page_physget(&pg))
    553        1.5       eeh 				panic("pmap_growkernel: no memory");
    554        1.5       eeh 		}
    555       1.32    simonb 		if (!pg)
    556       1.32    simonb 			panic("pmap_growkernel: no pages");
    557        1.5       eeh 		pmap_zero_page((paddr_t)pg);
    558        1.5       eeh 
    559        1.5       eeh 		/* XXX This is based on all phymem being addressable */
    560        1.5       eeh 		pm->pm_ptbl[seg] = (u_int *)pg;
    561        1.5       eeh 	}
    562        1.5       eeh 	splx(s);
    563        1.5       eeh 	return (kbreak);
    564        1.5       eeh }
    565        1.5       eeh 
    566        1.5       eeh /*
    567        1.5       eeh  *	vm_page_alloc1:
    568        1.5       eeh  *
    569        1.5       eeh  *	Allocate and return a memory cell with no associated object.
    570        1.5       eeh  */
    571        1.5       eeh struct vm_page *
    572       1.30       chs vm_page_alloc1(void)
    573        1.5       eeh {
    574       1.30       chs 	struct vm_page *pg;
    575       1.30       chs 
    576       1.30       chs 	pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE);
    577        1.5       eeh 	if (pg) {
    578        1.5       eeh 		pg->wire_count = 1;	/* no mappings yet */
    579        1.5       eeh 		pg->flags &= ~PG_BUSY;	/* never busy */
    580        1.5       eeh 	}
    581        1.5       eeh 	return pg;
    582        1.5       eeh }
    583        1.5       eeh 
    584        1.5       eeh /*
    585        1.5       eeh  *	vm_page_free1:
    586        1.5       eeh  *
    587        1.5       eeh  *	Returns the given page to the free list,
    588        1.5       eeh  *	disassociating it with any VM object.
    589        1.5       eeh  *
    590        1.5       eeh  *	Object and page must be locked prior to entry.
    591        1.5       eeh  */
    592        1.5       eeh void
    593       1.36       scw vm_page_free1(struct vm_page *pg)
    594        1.5       eeh {
    595       1.10       eeh #ifdef DIAGNOSTIC
    596       1.36       scw 	if (pg->flags != (PG_CLEAN|PG_FAKE)) {
    597       1.36       scw 		printf("Freeing invalid page %p\n", pg);
    598       1.36       scw 		printf("pa = %llx\n", (unsigned long long)VM_PAGE_TO_PHYS(pg));
    599       1.10       eeh #ifdef DDB
    600        1.5       eeh 		Debugger();
    601       1.10       eeh #endif
    602        1.5       eeh 		return;
    603        1.5       eeh 	}
    604       1.10       eeh #endif
    605       1.36       scw 	pg->flags |= PG_BUSY;
    606       1.36       scw 	pg->wire_count = 0;
    607       1.36       scw 	uvm_pagefree(pg);
    608        1.5       eeh }
    609        1.5       eeh #endif
    610        1.5       eeh 
    611        1.1    simonb /*
    612        1.1    simonb  * Create and return a physical map.
    613        1.1    simonb  */
    614        1.1    simonb struct pmap *
    615        1.1    simonb pmap_create(void)
    616        1.1    simonb {
    617        1.1    simonb 	struct pmap *pm;
    618        1.1    simonb 
    619       1.72      para 	pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
    620       1.30       chs 	memset(pm, 0, sizeof *pm);
    621       1.30       chs 	pm->pm_refs = 1;
    622        1.1    simonb 	return pm;
    623        1.1    simonb }
    624        1.1    simonb 
    625        1.1    simonb /*
    626        1.1    simonb  * Add a reference to the given pmap.
    627        1.1    simonb  */
    628        1.1    simonb void
    629        1.1    simonb pmap_reference(struct pmap *pm)
    630        1.1    simonb {
    631        1.1    simonb 
    632        1.1    simonb 	pm->pm_refs++;
    633        1.1    simonb }
    634        1.1    simonb 
    635        1.1    simonb /*
    636        1.1    simonb  * Retire the given pmap from service.
    637        1.1    simonb  * Should only be called if the map contains no valid mappings.
    638        1.1    simonb  */
    639        1.1    simonb void
    640        1.1    simonb pmap_destroy(struct pmap *pm)
    641        1.1    simonb {
    642       1.30       chs 	int i;
    643        1.1    simonb 
    644       1.30       chs 	if (--pm->pm_refs > 0) {
    645       1.30       chs 		return;
    646        1.1    simonb 	}
    647       1.30       chs 	KASSERT(pm->pm_stats.resident_count == 0);
    648       1.30       chs 	KASSERT(pm->pm_stats.wired_count == 0);
    649        1.1    simonb 	for (i = 0; i < STSZ; i++)
    650        1.1    simonb 		if (pm->pm_ptbl[i]) {
    651       1.19   thorpej 			uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
    652       1.34      yamt 			    PAGE_SIZE, UVM_KMF_WIRED);
    653        1.1    simonb 			pm->pm_ptbl[i] = NULL;
    654        1.1    simonb 		}
    655       1.30       chs 	if (pm->pm_ctx)
    656       1.30       chs 		ctx_free(pm);
    657       1.72      para 	kmem_free(pm, sizeof(*pm));
    658        1.1    simonb }
    659        1.1    simonb 
    660        1.1    simonb /*
    661        1.1    simonb  * Copy the range specified by src_addr/len
    662        1.1    simonb  * from the source map to the range dst_addr/len
    663        1.1    simonb  * in the destination map.
    664        1.1    simonb  *
    665        1.1    simonb  * This routine is only advisory and need not do anything.
    666        1.1    simonb  */
    667        1.1    simonb void
    668        1.1    simonb pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vaddr_t dst_addr,
    669        1.1    simonb 	  vsize_t len, vaddr_t src_addr)
    670        1.1    simonb {
    671        1.1    simonb }
    672        1.1    simonb 
    673        1.1    simonb /*
    674        1.1    simonb  * Require that all active physical maps contain no
    675        1.1    simonb  * incorrect entries NOW.
    676        1.1    simonb  */
    677        1.1    simonb void
    678        1.4     chris pmap_update(struct pmap *pmap)
    679        1.1    simonb {
    680        1.1    simonb }
    681        1.1    simonb 
    682        1.1    simonb /*
    683        1.1    simonb  * Fill the given physical page with zeroes.
    684        1.1    simonb  */
    685        1.1    simonb void
    686        1.1    simonb pmap_zero_page(paddr_t pa)
    687        1.1    simonb {
    688        1.1    simonb 
    689        1.8   thorpej #ifdef PPC_4XX_NOCACHE
    690       1.47  christos 	memset((void *)pa, 0, PAGE_SIZE);
    691        1.1    simonb #else
    692        1.1    simonb 	int i;
    693        1.1    simonb 
    694       1.19   thorpej 	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
    695       1.38     perry 		__asm volatile ("dcbz 0,%0" :: "r"(pa));
    696        1.1    simonb 		pa += CACHELINESIZE;
    697        1.1    simonb 	}
    698        1.1    simonb #endif
    699        1.1    simonb }
    700        1.1    simonb 
    701        1.1    simonb /*
    702        1.1    simonb  * Copy the given physical source page to its destination.
    703        1.1    simonb  */
    704        1.1    simonb void
    705        1.1    simonb pmap_copy_page(paddr_t src, paddr_t dst)
    706        1.1    simonb {
    707        1.1    simonb 
    708       1.47  christos 	memcpy((void *)dst, (void *)src, PAGE_SIZE);
    709       1.69      matt 	dcache_wbinv_page(dst);
    710        1.1    simonb }
    711        1.1    simonb 
    712        1.1    simonb /*
    713       1.49   hannken  * This returns != 0 on success.
    714        1.1    simonb  */
    715        1.1    simonb static inline int
    716       1.49   hannken pmap_enter_pv(struct pmap *pm, vaddr_t va, paddr_t pa, int flags)
    717        1.1    simonb {
    718        1.1    simonb 	struct pv_entry *pv, *npv = NULL;
    719        1.1    simonb 	int s;
    720        1.1    simonb 
    721        1.1    simonb 	if (!pmap_initialized)
    722        1.1    simonb 		return 0;
    723        1.1    simonb 
    724        1.1    simonb 	s = splvm();
    725        1.1    simonb 	pv = pa_to_pv(pa);
    726        1.1    simonb 	if (!pv->pv_pm) {
    727        1.1    simonb 		/*
    728        1.1    simonb 		 * No entries yet, use header as the first entry.
    729        1.1    simonb 		 */
    730        1.1    simonb 		pv->pv_va = va;
    731        1.1    simonb 		pv->pv_pm = pm;
    732        1.1    simonb 		pv->pv_next = NULL;
    733        1.1    simonb 	} else {
    734        1.1    simonb 		/*
    735        1.1    simonb 		 * There is at least one other VA mapping this page.
    736        1.1    simonb 		 * Place this entry after the header.
    737        1.1    simonb 		 */
    738       1.49   hannken 		npv = pool_get(&pv_pool, PR_NOWAIT);
    739       1.49   hannken 		if (npv == NULL) {
    740       1.49   hannken 			if ((flags & PMAP_CANFAIL) == 0)
    741       1.49   hannken 				panic("pmap_enter_pv: failed");
    742       1.49   hannken 			splx(s);
    743       1.49   hannken 			return 0;
    744       1.49   hannken 		}
    745        1.1    simonb 		npv->pv_va = va;
    746        1.1    simonb 		npv->pv_pm = pm;
    747        1.1    simonb 		npv->pv_next = pv->pv_next;
    748        1.1    simonb 		pv->pv_next = npv;
    749       1.33       chs 		pv = npv;
    750        1.1    simonb 	}
    751       1.49   hannken 	if (flags & PMAP_WIRED) {
    752       1.30       chs 		PV_WIRE(pv);
    753       1.33       chs 		pm->pm_stats.wired_count++;
    754       1.30       chs 	}
    755        1.1    simonb 	splx(s);
    756        1.1    simonb 	return (1);
    757        1.1    simonb }
    758        1.1    simonb 
    759        1.1    simonb static void
    760        1.1    simonb pmap_remove_pv(struct pmap *pm, vaddr_t va, paddr_t pa)
    761        1.1    simonb {
    762        1.1    simonb 	struct pv_entry *pv, *npv;
    763        1.1    simonb 
    764        1.1    simonb 	/*
    765        1.1    simonb 	 * Remove from the PV table.
    766        1.1    simonb 	 */
    767        1.1    simonb 	pv = pa_to_pv(pa);
    768       1.30       chs 	if (!pv)
    769       1.30       chs 		return;
    770        1.1    simonb 
    771        1.1    simonb 	/*
    772        1.1    simonb 	 * If it is the first entry on the list, it is actually
    773        1.1    simonb 	 * in the header and we must copy the following entry up
    774        1.1    simonb 	 * to the header.  Otherwise we must search the list for
    775        1.1    simonb 	 * the entry.  In either case we free the now unused entry.
    776        1.1    simonb 	 */
    777        1.1    simonb 	if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    778       1.30       chs 		if (PV_ISWIRED(pv)) {
    779       1.30       chs 			pm->pm_stats.wired_count--;
    780       1.30       chs 		}
    781        1.1    simonb 		if ((npv = pv->pv_next)) {
    782        1.1    simonb 			*pv = *npv;
    783        1.1    simonb 			pool_put(&pv_pool, npv);
    784        1.1    simonb 		} else
    785        1.1    simonb 			pv->pv_pm = NULL;
    786        1.1    simonb 	} else {
    787        1.1    simonb 		for (; (npv = pv->pv_next) != NULL; pv = npv)
    788        1.1    simonb 			if (pm == npv->pv_pm && PV_CMPVA(va, npv))
    789        1.1    simonb 				break;
    790        1.1    simonb 		if (npv) {
    791        1.1    simonb 			pv->pv_next = npv->pv_next;
    792       1.30       chs 			if (PV_ISWIRED(npv)) {
    793       1.30       chs 				pm->pm_stats.wired_count--;
    794       1.30       chs 			}
    795        1.1    simonb 			pool_put(&pv_pool, npv);
    796        1.1    simonb 		}
    797        1.1    simonb 	}
    798        1.1    simonb }
    799        1.1    simonb 
    800        1.1    simonb /*
    801        1.1    simonb  * Insert physical page at pa into the given pmap at virtual address va.
    802        1.1    simonb  */
    803        1.1    simonb int
    804       1.55        he pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    805        1.1    simonb {
    806        1.1    simonb 	int s;
    807        1.1    simonb 	u_int tte;
    808       1.57   thorpej 	bool managed;
    809        1.1    simonb 
    810        1.1    simonb 	/*
    811        1.1    simonb 	 * Have to remove any existing mapping first.
    812        1.1    simonb 	 */
    813       1.19   thorpej 	pmap_remove(pm, va, va + PAGE_SIZE);
    814        1.1    simonb 
    815       1.30       chs 	if (flags & PMAP_WIRED)
    816       1.30       chs 		flags |= prot;
    817        1.1    simonb 
    818       1.57   thorpej 	managed = uvm_pageismanaged(pa);
    819        1.1    simonb 
    820        1.1    simonb 	/*
    821        1.1    simonb 	 * Generate TTE.
    822        1.1    simonb 	 */
    823       1.26       chs 	tte = TTE_PA(pa);
    824        1.1    simonb 	/* XXXX -- need to support multiple page sizes. */
    825        1.1    simonb 	tte |= TTE_SZ_16K;
    826        1.1    simonb #ifdef	DIAGNOSTIC
    827       1.70      matt 	if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) ==
    828       1.70      matt 		(PMAP_NOCACHE | PME_WRITETHROUG))
    829       1.13    provos 		panic("pmap_enter: uncached & writethrough");
    830        1.1    simonb #endif
    831       1.70      matt 	if (flags & PMAP_NOCACHE)
    832        1.1    simonb 		/* Must be I/O mapping */
    833        1.1    simonb 		tte |= TTE_I | TTE_G;
    834        1.8   thorpej #ifdef PPC_4XX_NOCACHE
    835        1.1    simonb 	tte |= TTE_I;
    836        1.1    simonb #else
    837        1.1    simonb 	else if (flags & PME_WRITETHROUG)
    838        1.1    simonb 		/* Uncached and writethrough are not compatible */
    839        1.1    simonb 		tte |= TTE_W;
    840        1.1    simonb #endif
    841        1.1    simonb 	if (pm == pmap_kernel())
    842        1.1    simonb 		tte |= TTE_ZONE(ZONE_PRIV);
    843        1.1    simonb 	else
    844        1.1    simonb 		tte |= TTE_ZONE(ZONE_USER);
    845        1.1    simonb 
    846        1.1    simonb 	if (flags & VM_PROT_WRITE)
    847        1.1    simonb 		tte |= TTE_WR;
    848        1.1    simonb 
    849       1.26       chs 	if (flags & VM_PROT_EXECUTE)
    850       1.26       chs 		tte |= TTE_EX;
    851       1.26       chs 
    852        1.1    simonb 	/*
    853        1.1    simonb 	 * Now record mapping for later back-translation.
    854        1.1    simonb 	 */
    855        1.1    simonb 	if (pmap_initialized && managed) {
    856        1.1    simonb 		char *attr;
    857        1.1    simonb 
    858       1.49   hannken 		if (!pmap_enter_pv(pm, va, pa, flags)) {
    859        1.1    simonb 			/* Could not enter pv on a managed page */
    860        1.1    simonb 			return 1;
    861        1.1    simonb 		}
    862        1.1    simonb 
    863        1.1    simonb 		/* Now set attributes. */
    864        1.1    simonb 		attr = pa_to_attr(pa);
    865        1.1    simonb #ifdef DIAGNOSTIC
    866        1.1    simonb 		if (!attr)
    867       1.13    provos 			panic("managed but no attr");
    868        1.1    simonb #endif
    869        1.1    simonb 		if (flags & VM_PROT_ALL)
    870       1.30       chs 			*attr |= PMAP_ATTR_REF;
    871        1.1    simonb 		if (flags & VM_PROT_WRITE)
    872       1.30       chs 			*attr |= PMAP_ATTR_CHG;
    873        1.1    simonb 	}
    874        1.1    simonb 
    875        1.1    simonb 	s = splvm();
    876        1.1    simonb 
    877        1.1    simonb 	/* Insert page into page table. */
    878        1.1    simonb 	pte_enter(pm, va, tte);
    879        1.1    simonb 
    880        1.1    simonb 	/* If this is a real fault, enter it in the tlb */
    881        1.1    simonb 	if (tte && ((flags & PMAP_WIRED) == 0)) {
    882       1.71  kiyohara 		int s2 = splhigh();
    883        1.1    simonb 		ppc4xx_tlb_enter(pm->pm_ctx, va, tte);
    884       1.71  kiyohara 		splx(s2);
    885        1.1    simonb 	}
    886        1.1    simonb 	splx(s);
    887        1.6    simonb 
    888        1.6    simonb 	/* Flush the real memory from the instruction cache. */
    889        1.6    simonb 	if ((prot & VM_PROT_EXECUTE) && (tte & TTE_I) == 0)
    890        1.6    simonb 		__syncicache((void *)pa, PAGE_SIZE);
    891        1.6    simonb 
    892        1.1    simonb 	return 0;
    893        1.1    simonb }
    894        1.1    simonb 
    895        1.1    simonb void
    896        1.1    simonb pmap_unwire(struct pmap *pm, vaddr_t va)
    897        1.1    simonb {
    898       1.33       chs 	struct pv_entry *pv;
    899        1.1    simonb 	paddr_t pa;
    900       1.30       chs 	int s;
    901        1.1    simonb 
    902        1.1    simonb 	if (!pmap_extract(pm, va, &pa)) {
    903        1.1    simonb 		return;
    904        1.1    simonb 	}
    905        1.1    simonb 
    906        1.1    simonb 	pv = pa_to_pv(pa);
    907       1.30       chs 	if (!pv)
    908       1.30       chs 		return;
    909        1.1    simonb 
    910       1.30       chs 	s = splvm();
    911       1.33       chs 	while (pv != NULL) {
    912       1.33       chs 		if (pm == pv->pv_pm && PV_CMPVA(va, pv)) {
    913       1.33       chs 			if (PV_ISWIRED(pv)) {
    914       1.33       chs 				PV_UNWIRE(pv);
    915       1.30       chs 				pm->pm_stats.wired_count--;
    916       1.30       chs 			}
    917        1.1    simonb 			break;
    918        1.1    simonb 		}
    919       1.33       chs 		pv = pv->pv_next;
    920        1.1    simonb 	}
    921        1.1    simonb 	splx(s);
    922        1.1    simonb }
    923        1.1    simonb 
    924        1.1    simonb void
    925       1.59    cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
    926        1.1    simonb {
    927        1.1    simonb 	int s;
    928        1.1    simonb 	u_int tte;
    929        1.1    simonb 	struct pmap *pm = pmap_kernel();
    930        1.1    simonb 
    931        1.1    simonb 	/*
    932        1.1    simonb 	 * Have to remove any existing mapping first.
    933        1.1    simonb 	 */
    934        1.1    simonb 
    935        1.1    simonb 	/*
    936        1.1    simonb 	 * Generate TTE.
    937        1.1    simonb 	 *
    938        1.1    simonb 	 * XXXX
    939        1.1    simonb 	 *
    940        1.1    simonb 	 * Since the kernel does not handle execution privileges properly,
    941        1.1    simonb 	 * we will handle read and execute permissions together.
    942        1.1    simonb 	 */
    943        1.1    simonb 	tte = 0;
    944        1.1    simonb 	if (prot & VM_PROT_ALL) {
    945        1.1    simonb 
    946        1.1    simonb 		tte = TTE_PA(pa) | TTE_EX | TTE_ZONE(ZONE_PRIV);
    947        1.1    simonb 		/* XXXX -- need to support multiple page sizes. */
    948        1.1    simonb 		tte |= TTE_SZ_16K;
    949        1.1    simonb #ifdef DIAGNOSTIC
    950       1.70      matt 		if ((flags & (PMAP_NOCACHE | PME_WRITETHROUG)) ==
    951       1.70      matt 			(PMAP_NOCACHE | PME_WRITETHROUG))
    952       1.13    provos 			panic("pmap_kenter_pa: uncached & writethrough");
    953        1.1    simonb #endif
    954       1.70      matt 		if (flags & PMAP_NOCACHE)
    955        1.1    simonb 			/* Must be I/O mapping */
    956        1.1    simonb 			tte |= TTE_I | TTE_G;
    957        1.8   thorpej #ifdef PPC_4XX_NOCACHE
    958        1.1    simonb 		tte |= TTE_I;
    959        1.1    simonb #else
    960        1.1    simonb 		else if (prot & PME_WRITETHROUG)
    961        1.1    simonb 			/* Uncached and writethrough are not compatible */
    962        1.1    simonb 			tte |= TTE_W;
    963        1.1    simonb #endif
    964        1.1    simonb 		if (prot & VM_PROT_WRITE)
    965        1.1    simonb 			tte |= TTE_WR;
    966        1.1    simonb 	}
    967        1.1    simonb 
    968        1.1    simonb 	s = splvm();
    969        1.1    simonb 
    970        1.1    simonb 	/* Insert page into page table. */
    971        1.1    simonb 	pte_enter(pm, va, tte);
    972        1.1    simonb 	splx(s);
    973        1.1    simonb }
    974        1.1    simonb 
    975        1.1    simonb void
    976        1.1    simonb pmap_kremove(vaddr_t va, vsize_t len)
    977        1.1    simonb {
    978        1.1    simonb 
    979        1.1    simonb 	while (len > 0) {
    980        1.1    simonb 		pte_enter(pmap_kernel(), va, 0);
    981        1.1    simonb 		va += PAGE_SIZE;
    982        1.1    simonb 		len -= PAGE_SIZE;
    983        1.1    simonb 	}
    984        1.1    simonb }
    985        1.1    simonb 
    986        1.1    simonb /*
    987        1.1    simonb  * Remove the given range of mapping entries.
    988        1.1    simonb  */
    989        1.1    simonb void
    990        1.1    simonb pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
    991        1.1    simonb {
    992        1.1    simonb 	int s;
    993        1.1    simonb 	paddr_t pa;
    994        1.1    simonb 	volatile u_int *ptp;
    995        1.1    simonb 
    996        1.1    simonb 	s = splvm();
    997        1.1    simonb 	while (va < endva) {
    998        1.1    simonb 
    999        1.1    simonb 		if ((ptp = pte_find(pm, va)) && (pa = *ptp)) {
   1000        1.1    simonb 			pa = TTE_PA(pa);
   1001        1.1    simonb 			pmap_remove_pv(pm, va, pa);
   1002        1.1    simonb 			*ptp = 0;
   1003        1.1    simonb 			ppc4xx_tlb_flush(va, pm->pm_ctx);
   1004        1.1    simonb 			pm->pm_stats.resident_count--;
   1005        1.1    simonb 		}
   1006       1.19   thorpej 		va += PAGE_SIZE;
   1007        1.1    simonb 	}
   1008        1.1    simonb 
   1009        1.1    simonb 	splx(s);
   1010        1.1    simonb }
   1011        1.1    simonb 
   1012        1.1    simonb /*
   1013        1.1    simonb  * Get the physical page address for the given pmap/virtual address.
   1014        1.1    simonb  */
   1015       1.45   thorpej bool
   1016        1.1    simonb pmap_extract(struct pmap *pm, vaddr_t va, paddr_t *pap)
   1017        1.1    simonb {
   1018        1.1    simonb 	int seg = STIDX(va);
   1019        1.1    simonb 	int ptn = PTIDX(va);
   1020        1.1    simonb 	u_int pa = 0;
   1021       1.30       chs 	int s;
   1022        1.1    simonb 
   1023       1.30       chs 	s = splvm();
   1024        1.1    simonb 	if (pm->pm_ptbl[seg] && (pa = pm->pm_ptbl[seg][ptn])) {
   1025        1.1    simonb 		*pap = TTE_PA(pa) | (va & PGOFSET);
   1026        1.1    simonb 	}
   1027        1.1    simonb 	splx(s);
   1028        1.1    simonb 	return (pa != 0);
   1029        1.1    simonb }
   1030        1.1    simonb 
   1031        1.1    simonb /*
   1032        1.1    simonb  * Lower the protection on the specified range of this pmap.
   1033        1.1    simonb  *
   1034        1.1    simonb  * There are only two cases: either the protection is going to 0,
   1035        1.1    simonb  * or it is going to read-only.
   1036        1.1    simonb  */
   1037        1.1    simonb void
   1038        1.1    simonb pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1039        1.1    simonb {
   1040        1.1    simonb 	volatile u_int *ptp;
   1041       1.26       chs 	int s, bic;
   1042        1.1    simonb 
   1043       1.26       chs 	if ((prot & VM_PROT_READ) == 0) {
   1044       1.26       chs 		pmap_remove(pm, sva, eva);
   1045       1.26       chs 		return;
   1046       1.26       chs 	}
   1047       1.26       chs 	bic = 0;
   1048       1.26       chs 	if ((prot & VM_PROT_WRITE) == 0) {
   1049       1.26       chs 		bic |= TTE_WR;
   1050       1.26       chs 	}
   1051       1.26       chs 	if ((prot & VM_PROT_EXECUTE) == 0) {
   1052       1.26       chs 		bic |= TTE_EX;
   1053       1.26       chs 	}
   1054       1.26       chs 	if (bic == 0) {
   1055       1.26       chs 		return;
   1056       1.26       chs 	}
   1057       1.26       chs 	s = splvm();
   1058       1.26       chs 	while (sva < eva) {
   1059       1.26       chs 		if ((ptp = pte_find(pm, sva)) != NULL) {
   1060       1.26       chs 			*ptp &= ~bic;
   1061       1.26       chs 			ppc4xx_tlb_flush(sva, pm->pm_ctx);
   1062        1.1    simonb 		}
   1063       1.26       chs 		sva += PAGE_SIZE;
   1064        1.1    simonb 	}
   1065       1.26       chs 	splx(s);
   1066        1.1    simonb }
   1067        1.1    simonb 
   1068       1.45   thorpej bool
   1069       1.30       chs pmap_check_attr(struct vm_page *pg, u_int mask, int clear)
   1070        1.1    simonb {
   1071       1.30       chs 	paddr_t pa;
   1072        1.1    simonb 	char *attr;
   1073       1.30       chs 	int s, rv;
   1074        1.1    simonb 
   1075        1.1    simonb 	/*
   1076        1.1    simonb 	 * First modify bits in cache.
   1077        1.1    simonb 	 */
   1078       1.30       chs 	pa = VM_PAGE_TO_PHYS(pg);
   1079        1.1    simonb 	attr = pa_to_attr(pa);
   1080        1.1    simonb 	if (attr == NULL)
   1081       1.46   thorpej 		return false;
   1082        1.1    simonb 
   1083       1.30       chs 	s = splvm();
   1084        1.1    simonb 	rv = ((*attr & mask) != 0);
   1085       1.11       eeh 	if (clear) {
   1086        1.1    simonb 		*attr &= ~mask;
   1087       1.30       chs 		pmap_page_protect(pg, mask == PMAP_ATTR_CHG ? VM_PROT_READ : 0);
   1088       1.11       eeh 	}
   1089        1.1    simonb 	splx(s);
   1090        1.1    simonb 	return rv;
   1091        1.1    simonb }
   1092        1.1    simonb 
   1093        1.1    simonb 
   1094        1.1    simonb /*
   1095        1.1    simonb  * Lower the protection on the specified physical page.
   1096        1.1    simonb  *
   1097        1.1    simonb  * There are only two cases: either the protection is going to 0,
   1098        1.1    simonb  * or it is going to read-only.
   1099        1.1    simonb  */
   1100        1.1    simonb void
   1101        1.1    simonb pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   1102        1.1    simonb {
   1103        1.1    simonb 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1104        1.1    simonb 	vaddr_t va;
   1105        1.1    simonb 	struct pv_entry *pvh, *pv, *npv;
   1106        1.1    simonb 	struct pmap *pm;
   1107        1.1    simonb 
   1108        1.1    simonb 	pvh = pa_to_pv(pa);
   1109        1.1    simonb 	if (pvh == NULL)
   1110        1.1    simonb 		return;
   1111        1.1    simonb 
   1112        1.1    simonb 	/* Handle extra pvs which may be deleted in the operation */
   1113        1.1    simonb 	for (pv = pvh->pv_next; pv; pv = npv) {
   1114        1.1    simonb 		npv = pv->pv_next;
   1115        1.1    simonb 
   1116        1.1    simonb 		pm = pv->pv_pm;
   1117        1.1    simonb 		va = pv->pv_va;
   1118       1.26       chs 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1119        1.1    simonb 	}
   1120        1.1    simonb 	/* Now check the head pv */
   1121        1.1    simonb 	if (pvh->pv_pm) {
   1122        1.1    simonb 		pv = pvh;
   1123        1.1    simonb 		pm = pv->pv_pm;
   1124        1.1    simonb 		va = pv->pv_va;
   1125       1.26       chs 		pmap_protect(pm, va, va + PAGE_SIZE, prot);
   1126        1.1    simonb 	}
   1127        1.1    simonb }
   1128        1.1    simonb 
   1129        1.1    simonb /*
   1130        1.1    simonb  * Activate the address space for the specified process.  If the process
   1131        1.1    simonb  * is the current process, load the new MMU context.
   1132        1.1    simonb  */
   1133        1.1    simonb void
   1134       1.17   thorpej pmap_activate(struct lwp *l)
   1135        1.1    simonb {
   1136        1.1    simonb #if 0
   1137       1.65     rmind 	struct pcb *pcb = lwp_getpcb(l);
   1138       1.17   thorpej 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   1139        1.1    simonb 
   1140        1.1    simonb 	/*
   1141       1.61     skrll 	 * XXX Normally performed in cpu_lwp_fork().
   1142        1.1    simonb 	 */
   1143       1.17   thorpej 	printf("pmap_activate(%p), pmap=%p\n",l,pmap);
   1144       1.25      matt 	pcb->pcb_pm = pmap;
   1145        1.1    simonb #endif
   1146        1.1    simonb }
   1147        1.1    simonb 
   1148        1.1    simonb /*
   1149        1.1    simonb  * Deactivate the specified process's address space.
   1150        1.1    simonb  */
   1151        1.1    simonb void
   1152       1.17   thorpej pmap_deactivate(struct lwp *l)
   1153        1.1    simonb {
   1154        1.1    simonb }
   1155        1.1    simonb 
   1156        1.1    simonb /*
   1157        1.1    simonb  * Synchronize caches corresponding to [addr, addr+len) in p.
   1158        1.1    simonb  */
   1159        1.1    simonb void
   1160        1.1    simonb pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   1161        1.1    simonb {
   1162        1.1    simonb 	struct pmap *pm = p->p_vmspace->vm_map.pmap;
   1163       1.18   hannken 	int msr, ctx, opid, step;
   1164       1.18   hannken 
   1165       1.18   hannken 	step = CACHELINESIZE;
   1166        1.1    simonb 
   1167        1.1    simonb 	/*
   1168        1.1    simonb 	 * Need to turn off IMMU and switch to user context.
   1169        1.1    simonb 	 * (icbi uses DMMU).
   1170        1.1    simonb 	 */
   1171        1.1    simonb 	if (!(ctx = pm->pm_ctx)) {
   1172        1.1    simonb 		/* No context -- assign it one */
   1173        1.1    simonb 		ctx_alloc(pm);
   1174        1.1    simonb 		ctx = pm->pm_ctx;
   1175        1.1    simonb 	}
   1176       1.38     perry 	__asm volatile("mfmsr %0;"
   1177       1.27    simonb 		"li %1, %7;"
   1178        1.1    simonb 		"andc %1,%0,%1;"
   1179        1.1    simonb 		"mtmsr %1;"
   1180        1.1    simonb 		"sync;isync;"
   1181        1.1    simonb 		"mfpid %1;"
   1182        1.1    simonb 		"mtpid %2;"
   1183        1.1    simonb 		"sync; isync;"
   1184       1.12    simonb 		"1:"
   1185        1.1    simonb 		"dcbf 0,%3;"
   1186        1.1    simonb 		"icbi 0,%3;"
   1187       1.18   hannken 		"add %3,%3,%5;"
   1188       1.18   hannken 		"addc. %4,%4,%6;"
   1189        1.1    simonb 		"bge 1b;"
   1190        1.1    simonb 		"mtpid %1;"
   1191        1.1    simonb 		"mtmsr %0;"
   1192        1.1    simonb 		"sync; isync"
   1193        1.1    simonb 		: "=&r" (msr), "=&r" (opid)
   1194       1.27    simonb 		: "r" (ctx), "r" (va), "r" (len), "r" (step), "r" (-step),
   1195       1.27    simonb 		  "K" (PSL_IR | PSL_DR));
   1196        1.1    simonb }
   1197        1.1    simonb 
   1198        1.1    simonb 
   1199        1.1    simonb /* This has to be done in real mode !!! */
   1200        1.1    simonb void
   1201        1.1    simonb ppc4xx_tlb_flush(vaddr_t va, int pid)
   1202        1.1    simonb {
   1203        1.1    simonb 	u_long i, found;
   1204        1.1    simonb 	u_long msr;
   1205        1.1    simonb 
   1206        1.1    simonb 	/* If there's no context then it can't be mapped. */
   1207       1.26       chs 	if (!pid)
   1208       1.26       chs 		return;
   1209        1.1    simonb 
   1210       1.42     freza 	__asm( 	"mfpid %1;"		/* Save PID */
   1211        1.1    simonb 		"mfmsr %2;"		/* Save MSR */
   1212        1.1    simonb 		"li %0,0;"		/* Now clear MSR */
   1213        1.1    simonb 		"mtmsr %0;"
   1214        1.1    simonb 		"mtpid %4;"		/* Set PID */
   1215        1.1    simonb 		"sync;"
   1216        1.1    simonb 		"tlbsx. %0,0,%3;"	/* Search TLB */
   1217        1.1    simonb 		"sync;"
   1218        1.1    simonb 		"mtpid %1;"		/* Restore PID */
   1219        1.1    simonb 		"mtmsr %2;"		/* Restore MSR */
   1220        1.1    simonb 		"sync;isync;"
   1221        1.1    simonb 		"li %1,1;"
   1222        1.1    simonb 		"beq 1f;"
   1223        1.1    simonb 		"li %1,0;"
   1224        1.1    simonb 		"1:"
   1225        1.1    simonb 		: "=&r" (i), "=&r" (found), "=&r" (msr)
   1226        1.1    simonb 		: "r" (va), "r" (pid));
   1227        1.1    simonb 	if (found && !TLB_LOCKED(i)) {
   1228        1.1    simonb 
   1229        1.1    simonb 		/* Now flush translation */
   1230       1.39     perry 		__asm volatile(
   1231        1.1    simonb 			"tlbwe %0,%1,0;"
   1232        1.1    simonb 			"sync;isync;"
   1233        1.1    simonb 			: : "r" (0), "r" (i));
   1234        1.1    simonb 
   1235        1.1    simonb 		tlb_info[i].ti_ctx = 0;
   1236        1.1    simonb 		tlb_info[i].ti_flags = 0;
   1237        1.1    simonb 		tlbnext = i;
   1238        1.1    simonb 		/* Successful flushes */
   1239        1.1    simonb 		tlbflush_ev.ev_count++;
   1240        1.1    simonb 	}
   1241        1.1    simonb }
   1242        1.1    simonb 
   1243        1.1    simonb void
   1244        1.1    simonb ppc4xx_tlb_flush_all(void)
   1245        1.1    simonb {
   1246        1.1    simonb 	u_long i;
   1247        1.1    simonb 
   1248        1.1    simonb 	for (i = 0; i < NTLB; i++)
   1249        1.1    simonb 		if (!TLB_LOCKED(i)) {
   1250       1.39     perry 			__asm volatile(
   1251        1.1    simonb 				"tlbwe %0,%1,0;"
   1252        1.1    simonb 				"sync;isync;"
   1253        1.1    simonb 				: : "r" (0), "r" (i));
   1254        1.1    simonb 			tlb_info[i].ti_ctx = 0;
   1255        1.1    simonb 			tlb_info[i].ti_flags = 0;
   1256        1.1    simonb 		}
   1257        1.1    simonb 
   1258       1.39     perry 	__asm volatile("sync;isync");
   1259        1.1    simonb }
   1260        1.1    simonb 
   1261        1.1    simonb /* Find a TLB entry to evict. */
   1262        1.1    simonb static int
   1263        1.1    simonb ppc4xx_tlb_find_victim(void)
   1264        1.1    simonb {
   1265        1.1    simonb 	int flags;
   1266        1.1    simonb 
   1267        1.1    simonb 	for (;;) {
   1268        1.1    simonb 		if (++tlbnext >= NTLB)
   1269       1.42     freza 			tlbnext = tlb_nreserved;
   1270        1.1    simonb 		flags = tlb_info[tlbnext].ti_flags;
   1271       1.12    simonb 		if (!(flags & TLBF_USED) ||
   1272        1.1    simonb 			(flags & (TLBF_LOCKED | TLBF_REF)) == 0) {
   1273        1.1    simonb 			u_long va, stack = (u_long)&va;
   1274        1.1    simonb 
   1275        1.1    simonb 			if (!((tlb_info[tlbnext].ti_va ^ stack) & (~PGOFSET)) &&
   1276        1.1    simonb 			    (tlb_info[tlbnext].ti_ctx == KERNEL_PID) &&
   1277        1.1    simonb 			     (flags & TLBF_USED)) {
   1278        1.1    simonb 				/* Kernel stack page */
   1279        1.1    simonb 				flags |= TLBF_USED;
   1280        1.1    simonb 				tlb_info[tlbnext].ti_flags = flags;
   1281        1.1    simonb 			} else {
   1282        1.1    simonb 				/* Found it! */
   1283        1.1    simonb 				return (tlbnext);
   1284        1.1    simonb 			}
   1285        1.1    simonb 		} else {
   1286        1.1    simonb 			tlb_info[tlbnext].ti_flags = (flags & ~TLBF_REF);
   1287        1.1    simonb 		}
   1288        1.1    simonb 	}
   1289        1.1    simonb }
   1290        1.1    simonb 
   1291        1.1    simonb void
   1292        1.1    simonb ppc4xx_tlb_enter(int ctx, vaddr_t va, u_int pte)
   1293        1.1    simonb {
   1294        1.1    simonb 	u_long th, tl, idx;
   1295        1.1    simonb 	tlbpid_t pid;
   1296        1.1    simonb 	u_short msr;
   1297       1.10       eeh 	paddr_t pa;
   1298       1.71  kiyohara 	int sz;
   1299       1.10       eeh 
   1300        1.1    simonb 	tlbenter_ev.ev_count++;
   1301        1.1    simonb 
   1302       1.10       eeh 	sz = (pte & TTE_SZ_MASK) >> TTE_SZ_SHIFT;
   1303       1.10       eeh 	pa = (pte & TTE_RPN_MASK(sz));
   1304       1.10       eeh 	th = (va & TLB_EPN_MASK) | (sz << TLB_SIZE_SHFT) | TLB_VALID;
   1305       1.10       eeh 	tl = (pte & ~TLB_RPN_MASK) | pa;
   1306       1.10       eeh 	tl |= ppc4xx_tlbflags(va, pa);
   1307        1.1    simonb 
   1308        1.1    simonb 	idx = ppc4xx_tlb_find_victim();
   1309        1.1    simonb 
   1310        1.1    simonb #ifdef DIAGNOSTIC
   1311       1.42     freza 	if ((idx < tlb_nreserved) || (idx >= NTLB)) {
   1312       1.31    simonb 		panic("ppc4xx_tlb_enter: replacing entry %ld", idx);
   1313        1.1    simonb 	}
   1314        1.1    simonb #endif
   1315       1.12    simonb 
   1316        1.1    simonb 	tlb_info[idx].ti_va = (va & TLB_EPN_MASK);
   1317        1.1    simonb 	tlb_info[idx].ti_ctx = ctx;
   1318        1.1    simonb 	tlb_info[idx].ti_flags = TLBF_USED | TLBF_REF;
   1319        1.1    simonb 
   1320       1.39     perry 	__asm volatile(
   1321        1.1    simonb 		"mfmsr %0;"			/* Save MSR */
   1322        1.1    simonb 		"li %1,0;"
   1323        1.1    simonb 		"tlbwe %1,%3,0;"		/* Invalidate old entry. */
   1324        1.1    simonb 		"mtmsr %1;"			/* Clear MSR */
   1325        1.1    simonb 		"mfpid %1;"			/* Save old PID */
   1326        1.1    simonb 		"mtpid %2;"			/* Load translation ctx */
   1327        1.1    simonb 		"sync; isync;"
   1328        1.1    simonb #ifdef DEBUG
   1329        1.1    simonb 		"andi. %3,%3,63;"
   1330        1.1    simonb 		"tweqi %3,0;" 			/* XXXXX DEBUG trap on index 0 */
   1331        1.1    simonb #endif
   1332        1.1    simonb 		"tlbwe %4,%3,1; tlbwe %5,%3,0;"	/* Set TLB */
   1333        1.1    simonb 		"sync; isync;"
   1334        1.1    simonb 		"mtpid %1; mtmsr %0;"		/* Restore PID and MSR */
   1335        1.1    simonb 		"sync; isync;"
   1336        1.1    simonb 	: "=&r" (msr), "=&r" (pid)
   1337        1.1    simonb 	: "r" (ctx), "r" (idx), "r" (tl), "r" (th));
   1338        1.1    simonb }
   1339        1.1    simonb 
   1340        1.1    simonb void
   1341        1.1    simonb ppc4xx_tlb_init(void)
   1342        1.1    simonb {
   1343        1.1    simonb 	int i;
   1344        1.1    simonb 
   1345        1.1    simonb 	/* Mark reserved TLB entries */
   1346       1.42     freza 	for (i = 0; i < tlb_nreserved; i++) {
   1347        1.1    simonb 		tlb_info[i].ti_flags = TLBF_LOCKED | TLBF_USED;
   1348        1.1    simonb 		tlb_info[i].ti_ctx = KERNEL_PID;
   1349        1.1    simonb 	}
   1350        1.1    simonb 
   1351        1.1    simonb 	/* Setup security zones */
   1352        1.1    simonb 	/* Z0 - accessible by kernel only if TLB entry permissions allow
   1353        1.1    simonb 	 * Z1,Z2 - access is controlled by TLB entry permissions
   1354        1.1    simonb 	 * Z3 - full access regardless of TLB entry permissions
   1355        1.1    simonb 	 */
   1356        1.1    simonb 
   1357       1.39     perry 	__asm volatile(
   1358        1.1    simonb 		"mtspr %0,%1;"
   1359        1.1    simonb 		"sync;"
   1360        1.1    simonb 		::  "K"(SPR_ZPR), "r" (0x1b000000));
   1361        1.1    simonb }
   1362        1.1    simonb 
   1363       1.42     freza /*
   1364       1.42     freza  * ppc4xx_tlb_size_mask:
   1365       1.42     freza  *
   1366       1.42     freza  * 	Roundup size to supported page size, return TLBHI mask and real size.
   1367       1.42     freza  */
   1368       1.42     freza static int
   1369       1.42     freza ppc4xx_tlb_size_mask(size_t size, int *mask, int *rsiz)
   1370       1.42     freza {
   1371       1.42     freza 	int 			i;
   1372       1.42     freza 
   1373       1.42     freza 	for (i = 0; i < __arraycount(tlbsize); i++)
   1374       1.42     freza 		if (size <= tlbsize[i]) {
   1375       1.42     freza 			*mask = (i << TLB_SIZE_SHFT);
   1376       1.42     freza 			*rsiz = tlbsize[i];
   1377       1.42     freza 			return (0);
   1378       1.42     freza 		}
   1379       1.42     freza 	return (EINVAL);
   1380       1.42     freza }
   1381       1.42     freza 
   1382       1.42     freza /*
   1383       1.42     freza  * ppc4xx_tlb_mapiodev:
   1384       1.42     freza  *
   1385       1.42     freza  * 	Lookup virtual address of mapping previously entered via
   1386       1.42     freza  * 	ppc4xx_tlb_reserve. Search TLB directly so that we don't
   1387       1.42     freza  * 	need to waste extra storage for reserved mappings. Note
   1388       1.42     freza  * 	that reading TLBHI also sets PID, but all reserved mappings
   1389       1.42     freza  * 	use KERNEL_PID, so the side effect is nil.
   1390       1.42     freza  */
   1391       1.42     freza void *
   1392       1.42     freza ppc4xx_tlb_mapiodev(paddr_t base, psize_t len)
   1393       1.42     freza {
   1394       1.42     freza 	paddr_t 		pa;
   1395       1.42     freza 	vaddr_t 		va;
   1396       1.42     freza 	u_int 			lo, hi, sz;
   1397       1.42     freza 	int 			i;
   1398       1.42     freza 
   1399       1.42     freza 	/* tlb_nreserved is only allowed to grow, so this is safe. */
   1400       1.42     freza 	for (i = 0; i < tlb_nreserved; i++) {
   1401       1.42     freza 		__asm volatile (
   1402       1.42     freza 		    "	tlbre %0,%2,1 	\n" 	/* TLBLO */
   1403       1.42     freza 		    "	tlbre %1,%2,0 	\n" 	/* TLBHI */
   1404       1.42     freza 		    : "=&r" (lo), "=&r" (hi)
   1405       1.42     freza 		    : "r" (i));
   1406       1.42     freza 
   1407       1.42     freza 		KASSERT(hi & TLB_VALID);
   1408       1.42     freza 		KASSERT(mfspr(SPR_PID) == KERNEL_PID);
   1409       1.42     freza 
   1410       1.42     freza 		pa = (lo & TLB_RPN_MASK);
   1411       1.42     freza 		if (base < pa)
   1412       1.42     freza 			continue;
   1413       1.42     freza 
   1414       1.42     freza 		sz = tlbsize[(hi & TLB_SIZE_MASK) >> TLB_SIZE_SHFT];
   1415       1.42     freza 		if ((base + len) > (pa + sz))
   1416       1.42     freza 			continue;
   1417       1.42     freza 
   1418       1.42     freza 		va = (hi & TLB_EPN_MASK) + (base & (sz - 1)); 	/* sz = 2^n */
   1419       1.42     freza 		return (void *)(va);
   1420       1.42     freza 	}
   1421       1.42     freza 
   1422       1.42     freza 	return (NULL);
   1423       1.42     freza }
   1424       1.42     freza 
   1425       1.42     freza /*
   1426       1.42     freza  * ppc4xx_tlb_reserve:
   1427       1.42     freza  *
   1428       1.42     freza  * 	Map physical range to kernel virtual chunk via reserved TLB entry.
   1429       1.42     freza  */
   1430       1.42     freza void
   1431       1.42     freza ppc4xx_tlb_reserve(paddr_t pa, vaddr_t va, size_t size, int flags)
   1432       1.42     freza {
   1433       1.42     freza 	u_int 			lo, hi;
   1434       1.42     freza 	int 			szmask, rsize;
   1435       1.42     freza 
   1436       1.42     freza 	/* Called before pmap_bootstrap(), va outside kernel space. */
   1437       1.42     freza 	KASSERT(va < VM_MIN_KERNEL_ADDRESS || va >= VM_MAX_KERNEL_ADDRESS);
   1438       1.42     freza 	KASSERT(! pmap_bootstrap_done);
   1439       1.42     freza 	KASSERT(tlb_nreserved < NTLB);
   1440       1.42     freza 
   1441       1.42     freza 	/* Resolve size. */
   1442       1.42     freza 	if (ppc4xx_tlb_size_mask(size, &szmask, &rsize) != 0)
   1443       1.42     freza 		panic("ppc4xx_tlb_reserve: entry %d, %zuB too large",
   1444       1.42     freza 		    size, tlb_nreserved);
   1445       1.42     freza 
   1446       1.42     freza 	/* Real size will be power of two >= 1024, so this is OK. */
   1447       1.42     freza 	pa &= ~(rsize - 1); 	/* RPN */
   1448       1.42     freza 	va &= ~(rsize - 1); 	/* EPN */
   1449       1.42     freza 
   1450       1.42     freza 	lo = pa | TLB_WR | flags;
   1451       1.43  kiyohara 	hi = va | TLB_VALID | szmask;
   1452       1.42     freza 
   1453       1.42     freza #ifdef PPC_4XX_NOCACHE
   1454       1.42     freza 	lo |= TLB_I;
   1455       1.42     freza #endif
   1456       1.42     freza 
   1457       1.42     freza 	__asm volatile(
   1458       1.42     freza 	    "	tlbwe %1,%0,1 	\n" 	/* write TLBLO */
   1459       1.42     freza 	    "	tlbwe %2,%0,0 	\n" 	/* write TLBHI */
   1460       1.42     freza 	    "   sync 		\n"
   1461       1.42     freza 	    "	isync 		\n"
   1462       1.42     freza 	    : : "r" (tlb_nreserved), "r" (lo), "r" (hi));
   1463       1.42     freza 
   1464       1.42     freza 	tlb_nreserved++;
   1465       1.42     freza }
   1466        1.1    simonb 
   1467        1.1    simonb /*
   1468        1.1    simonb  * We should pass the ctx in from trap code.
   1469        1.1    simonb  */
   1470        1.1    simonb int
   1471        1.1    simonb pmap_tlbmiss(vaddr_t va, int ctx)
   1472        1.1    simonb {
   1473        1.1    simonb 	volatile u_int *pte;
   1474        1.1    simonb 	u_long tte;
   1475        1.1    simonb 
   1476        1.1    simonb 	tlbmiss_ev.ev_count++;
   1477        1.1    simonb 
   1478        1.1    simonb 	/*
   1479       1.44     freza 	 * We will reserve 0 upto VM_MIN_KERNEL_ADDRESS for va == pa mappings.
   1480       1.44     freza 	 * Physical RAM is expected to live in this range, care must be taken
   1481       1.44     freza 	 * to not clobber 0 upto ${physmem} with device mappings in machdep
   1482       1.44     freza 	 * code.
   1483        1.1    simonb 	 */
   1484       1.63  uebayasi 	if (ctx != KERNEL_PID ||
   1485       1.63  uebayasi 	    (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)) {
   1486       1.36       scw 		pte = pte_find((struct pmap *)__UNVOLATILE(ctxbusy[ctx]), va);
   1487        1.1    simonb 		if (pte == NULL) {
   1488        1.1    simonb 			/* Map unmanaged addresses directly for kernel access */
   1489        1.1    simonb 			return 1;
   1490        1.1    simonb 		}
   1491        1.1    simonb 		tte = *pte;
   1492        1.1    simonb 		if (tte == 0) {
   1493        1.1    simonb 			return 1;
   1494        1.1    simonb 		}
   1495        1.1    simonb 	} else {
   1496       1.16       wiz 		/* Create a 16MB writable mapping. */
   1497        1.8   thorpej #ifdef PPC_4XX_NOCACHE
   1498       1.44     freza 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_I |TTE_WR;
   1499        1.1    simonb #else
   1500        1.1    simonb 		tte = TTE_PA(va) | TTE_ZONE(ZONE_PRIV) | TTE_SZ_16M | TTE_WR;
   1501        1.1    simonb #endif
   1502        1.1    simonb 	}
   1503        1.1    simonb 	tlbhit_ev.ev_count++;
   1504        1.1    simonb 	ppc4xx_tlb_enter(ctx, va, tte);
   1505        1.1    simonb 
   1506        1.1    simonb 	return 0;
   1507        1.1    simonb }
   1508        1.1    simonb 
   1509        1.1    simonb /*
   1510        1.1    simonb  * Flush all the entries matching a context from the TLB.
   1511        1.1    simonb  */
   1512        1.1    simonb static int
   1513        1.1    simonb ctx_flush(int cnum)
   1514        1.1    simonb {
   1515        1.1    simonb 	int i;
   1516        1.1    simonb 
   1517        1.1    simonb 	/* We gotta steal this context */
   1518       1.42     freza 	for (i = tlb_nreserved; i < NTLB; i++) {
   1519        1.1    simonb 		if (tlb_info[i].ti_ctx == cnum) {
   1520        1.1    simonb 			/* Can't steal ctx if it has a locked entry. */
   1521        1.1    simonb 			if (TLB_LOCKED(i)) {
   1522        1.1    simonb #ifdef DIAGNOSTIC
   1523        1.1    simonb 				printf("ctx_flush: can't invalidate "
   1524        1.1    simonb 					"locked mapping %d "
   1525        1.1    simonb 					"for context %d\n", i, cnum);
   1526       1.10       eeh #ifdef DDB
   1527        1.1    simonb 				Debugger();
   1528        1.1    simonb #endif
   1529       1.10       eeh #endif
   1530        1.1    simonb 				return (1);
   1531        1.1    simonb 			}
   1532        1.1    simonb #ifdef DIAGNOSTIC
   1533       1.42     freza 			if (i < tlb_nreserved)
   1534       1.13    provos 				panic("TLB entry %d not locked", i);
   1535        1.1    simonb #endif
   1536        1.1    simonb 			/* Invalidate particular TLB entry regardless of locked status */
   1537       1.39     perry 			__asm volatile("tlbwe %0,%1,0" : :"r"(0),"r"(i));
   1538        1.1    simonb 			tlb_info[i].ti_flags = 0;
   1539        1.1    simonb 		}
   1540        1.1    simonb 	}
   1541        1.1    simonb 	return (0);
   1542        1.1    simonb }
   1543        1.1    simonb 
   1544        1.1    simonb /*
   1545        1.1    simonb  * Allocate a context.  If necessary, steal one from someone else.
   1546        1.1    simonb  *
   1547        1.1    simonb  * The new context is flushed from the TLB before returning.
   1548        1.1    simonb  */
   1549        1.1    simonb int
   1550        1.1    simonb ctx_alloc(struct pmap *pm)
   1551        1.1    simonb {
   1552        1.1    simonb 	int s, cnum;
   1553        1.1    simonb 	static int next = MINCTX;
   1554        1.1    simonb 
   1555        1.1    simonb 	if (pm == pmap_kernel()) {
   1556        1.1    simonb #ifdef DIAGNOSTIC
   1557        1.1    simonb 		printf("ctx_alloc: kernel pmap!\n");
   1558        1.1    simonb #endif
   1559        1.1    simonb 		return (0);
   1560        1.1    simonb 	}
   1561        1.1    simonb 	s = splvm();
   1562        1.1    simonb 
   1563        1.1    simonb 	/* Find a likely context. */
   1564        1.1    simonb 	cnum = next;
   1565        1.1    simonb 	do {
   1566        1.1    simonb 		if ((++cnum) > NUMCTX)
   1567        1.1    simonb 			cnum = MINCTX;
   1568        1.1    simonb 	} while (ctxbusy[cnum] != NULL && cnum != next);
   1569        1.1    simonb 
   1570        1.1    simonb 	/* Now clean it out */
   1571        1.1    simonb oops:
   1572        1.1    simonb 	if (cnum < MINCTX)
   1573        1.1    simonb 		cnum = MINCTX; /* Never steal ctx 0 or 1 */
   1574        1.1    simonb 	if (ctx_flush(cnum)) {
   1575        1.1    simonb 		/* oops -- something's wired. */
   1576        1.1    simonb 		if ((++cnum) > NUMCTX)
   1577        1.1    simonb 			cnum = MINCTX;
   1578        1.1    simonb 		goto oops;
   1579        1.1    simonb 	}
   1580        1.1    simonb 
   1581        1.1    simonb 	if (ctxbusy[cnum]) {
   1582        1.1    simonb #ifdef DEBUG
   1583        1.1    simonb 		/* We should identify this pmap and clear it */
   1584        1.1    simonb 		printf("Warning: stealing context %d\n", cnum);
   1585        1.1    simonb #endif
   1586        1.1    simonb 		ctxbusy[cnum]->pm_ctx = 0;
   1587        1.1    simonb 	}
   1588        1.1    simonb 	ctxbusy[cnum] = pm;
   1589        1.1    simonb 	next = cnum;
   1590        1.1    simonb 	splx(s);
   1591        1.1    simonb 	pm->pm_ctx = cnum;
   1592        1.1    simonb 
   1593        1.1    simonb 	return cnum;
   1594        1.1    simonb }
   1595        1.1    simonb 
   1596        1.1    simonb /*
   1597        1.1    simonb  * Give away a context.
   1598        1.1    simonb  */
   1599        1.1    simonb void
   1600        1.1    simonb ctx_free(struct pmap *pm)
   1601        1.1    simonb {
   1602        1.1    simonb 	int oldctx;
   1603        1.1    simonb 
   1604        1.1    simonb 	oldctx = pm->pm_ctx;
   1605        1.1    simonb 
   1606        1.1    simonb 	if (oldctx == 0)
   1607        1.1    simonb 		panic("ctx_free: freeing kernel context");
   1608        1.1    simonb #ifdef DIAGNOSTIC
   1609        1.1    simonb 	if (ctxbusy[oldctx] == 0)
   1610        1.1    simonb 		printf("ctx_free: freeing free context %d\n", oldctx);
   1611        1.1    simonb 	if (ctxbusy[oldctx] != pm) {
   1612        1.1    simonb 		printf("ctx_free: freeing someone esle's context\n "
   1613        1.1    simonb 		       "ctxbusy[%d] = %p, pm->pm_ctx = %p\n",
   1614        1.1    simonb 		       oldctx, (void *)(u_long)ctxbusy[oldctx], pm);
   1615       1.10       eeh #ifdef DDB
   1616        1.1    simonb 		Debugger();
   1617       1.10       eeh #endif
   1618        1.1    simonb 	}
   1619        1.1    simonb #endif
   1620        1.1    simonb 	/* We should verify it has not been stolen and reallocated... */
   1621        1.1    simonb 	ctxbusy[oldctx] = NULL;
   1622        1.1    simonb 	ctx_flush(oldctx);
   1623        1.1    simonb }
   1624        1.5       eeh 
   1625        1.1    simonb 
   1626        1.1    simonb #ifdef DEBUG
   1627        1.1    simonb /*
   1628        1.1    simonb  * Test ref/modify handling.
   1629        1.1    simonb  */
   1630       1.53       dsl void pmap_testout(void);
   1631        1.1    simonb void
   1632       1.54    cegger pmap_testout(void)
   1633        1.1    simonb {
   1634        1.1    simonb 	vaddr_t va;
   1635        1.1    simonb 	volatile int *loc;
   1636        1.1    simonb 	int val = 0;
   1637        1.1    simonb 	paddr_t pa;
   1638        1.1    simonb 	struct vm_page *pg;
   1639        1.1    simonb 	int ref, mod;
   1640        1.1    simonb 
   1641        1.1    simonb 	/* Allocate a page */
   1642       1.34      yamt 	va = (vaddr_t)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
   1643       1.34      yamt 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
   1644        1.1    simonb 	loc = (int*)va;
   1645        1.1    simonb 
   1646        1.1    simonb 	pmap_extract(pmap_kernel(), va, &pa);
   1647        1.1    simonb 	pg = PHYS_TO_VM_PAGE(pa);
   1648        1.1    simonb 	pmap_unwire(pmap_kernel(), va);
   1649        1.1    simonb 
   1650       1.34      yamt 	pmap_kremove(va, PAGE_SIZE);
   1651        1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1652        1.4     chris 	pmap_update(pmap_kernel());
   1653        1.1    simonb 
   1654        1.1    simonb 	/* Now clear reference and modify */
   1655        1.1    simonb 	ref = pmap_clear_reference(pg);
   1656        1.1    simonb 	mod = pmap_clear_modify(pg);
   1657        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1658        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1659        1.1    simonb 	       ref, mod);
   1660        1.1    simonb 
   1661        1.1    simonb 	/* Check it's properly cleared */
   1662        1.1    simonb 	ref = pmap_is_referenced(pg);
   1663        1.1    simonb 	mod = pmap_is_modified(pg);
   1664        1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1665        1.1    simonb 	       ref, mod);
   1666        1.1    simonb 
   1667        1.1    simonb 	/* Reference page */
   1668        1.1    simonb 	val = *loc;
   1669        1.1    simonb 
   1670        1.1    simonb 	ref = pmap_is_referenced(pg);
   1671        1.1    simonb 	mod = pmap_is_modified(pg);
   1672        1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1673        1.1    simonb 	       ref, mod, val);
   1674        1.1    simonb 
   1675        1.1    simonb 	/* Now clear reference and modify */
   1676        1.1    simonb 	ref = pmap_clear_reference(pg);
   1677        1.1    simonb 	mod = pmap_clear_modify(pg);
   1678        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1679        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1680        1.1    simonb 	       ref, mod);
   1681       1.12    simonb 
   1682        1.1    simonb 	/* Modify page */
   1683        1.1    simonb 	*loc = 1;
   1684        1.1    simonb 
   1685        1.1    simonb 	ref = pmap_is_referenced(pg);
   1686        1.1    simonb 	mod = pmap_is_modified(pg);
   1687        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1688        1.1    simonb 	       ref, mod);
   1689        1.1    simonb 
   1690        1.1    simonb 	/* Now clear reference and modify */
   1691        1.1    simonb 	ref = pmap_clear_reference(pg);
   1692        1.1    simonb 	mod = pmap_clear_modify(pg);
   1693        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1694        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1695        1.1    simonb 	       ref, mod);
   1696        1.1    simonb 
   1697        1.1    simonb 	/* Check it's properly cleared */
   1698        1.1    simonb 	ref = pmap_is_referenced(pg);
   1699        1.1    simonb 	mod = pmap_is_modified(pg);
   1700        1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1701        1.1    simonb 	       ref, mod);
   1702        1.1    simonb 
   1703        1.1    simonb 	/* Modify page */
   1704        1.1    simonb 	*loc = 1;
   1705        1.1    simonb 
   1706        1.1    simonb 	ref = pmap_is_referenced(pg);
   1707        1.1    simonb 	mod = pmap_is_modified(pg);
   1708        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1709        1.1    simonb 	       ref, mod);
   1710        1.1    simonb 
   1711        1.1    simonb 	/* Check pmap_protect() */
   1712        1.1    simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_READ);
   1713        1.4     chris 	pmap_update(pmap_kernel());
   1714        1.1    simonb 	ref = pmap_is_referenced(pg);
   1715        1.1    simonb 	mod = pmap_is_modified(pg);
   1716        1.1    simonb 	printf("pmap_protect(VM_PROT_READ): ref %d, mod %d\n",
   1717        1.1    simonb 	       ref, mod);
   1718        1.1    simonb 
   1719        1.1    simonb 	/* Now clear reference and modify */
   1720        1.1    simonb 	ref = pmap_clear_reference(pg);
   1721        1.1    simonb 	mod = pmap_clear_modify(pg);
   1722        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1723        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1724        1.1    simonb 	       ref, mod);
   1725        1.1    simonb 
   1726        1.1    simonb 	/* Reference page */
   1727        1.1    simonb 	val = *loc;
   1728        1.1    simonb 
   1729        1.1    simonb 	ref = pmap_is_referenced(pg);
   1730        1.1    simonb 	mod = pmap_is_modified(pg);
   1731        1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1732        1.1    simonb 	       ref, mod, val);
   1733        1.1    simonb 
   1734        1.1    simonb 	/* Now clear reference and modify */
   1735        1.1    simonb 	ref = pmap_clear_reference(pg);
   1736        1.1    simonb 	mod = pmap_clear_modify(pg);
   1737        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1738        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1739        1.1    simonb 	       ref, mod);
   1740       1.12    simonb 
   1741        1.1    simonb 	/* Modify page */
   1742        1.1    simonb #if 0
   1743        1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1744        1.4     chris 	pmap_update(pmap_kernel());
   1745        1.1    simonb #endif
   1746        1.1    simonb 	*loc = 1;
   1747        1.1    simonb 
   1748        1.1    simonb 	ref = pmap_is_referenced(pg);
   1749        1.1    simonb 	mod = pmap_is_modified(pg);
   1750        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1751        1.1    simonb 	       ref, mod);
   1752        1.1    simonb 
   1753        1.1    simonb 	/* Check pmap_protect() */
   1754        1.1    simonb 	pmap_protect(pmap_kernel(), va, va+1, VM_PROT_NONE);
   1755        1.4     chris 	pmap_update(pmap_kernel());
   1756        1.1    simonb 	ref = pmap_is_referenced(pg);
   1757        1.1    simonb 	mod = pmap_is_modified(pg);
   1758        1.1    simonb 	printf("pmap_protect(): ref %d, mod %d\n",
   1759        1.1    simonb 	       ref, mod);
   1760        1.1    simonb 
   1761        1.1    simonb 	/* Now clear reference and modify */
   1762        1.1    simonb 	ref = pmap_clear_reference(pg);
   1763        1.1    simonb 	mod = pmap_clear_modify(pg);
   1764        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1765        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1766        1.1    simonb 	       ref, mod);
   1767        1.1    simonb 
   1768        1.1    simonb 	/* Reference page */
   1769        1.1    simonb 	val = *loc;
   1770        1.1    simonb 
   1771        1.1    simonb 	ref = pmap_is_referenced(pg);
   1772        1.1    simonb 	mod = pmap_is_modified(pg);
   1773        1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1774        1.1    simonb 	       ref, mod, val);
   1775        1.1    simonb 
   1776        1.1    simonb 	/* Now clear reference and modify */
   1777        1.1    simonb 	ref = pmap_clear_reference(pg);
   1778        1.1    simonb 	mod = pmap_clear_modify(pg);
   1779        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1780        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1781        1.1    simonb 	       ref, mod);
   1782       1.12    simonb 
   1783        1.1    simonb 	/* Modify page */
   1784        1.1    simonb #if 0
   1785        1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1786        1.4     chris 	pmap_update(pmap_kernel());
   1787        1.1    simonb #endif
   1788        1.1    simonb 	*loc = 1;
   1789        1.1    simonb 
   1790        1.1    simonb 	ref = pmap_is_referenced(pg);
   1791        1.1    simonb 	mod = pmap_is_modified(pg);
   1792        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1793        1.1    simonb 	       ref, mod);
   1794        1.1    simonb 
   1795        1.1    simonb 	/* Check pmap_pag_protect() */
   1796        1.1    simonb 	pmap_page_protect(pg, VM_PROT_READ);
   1797        1.1    simonb 	ref = pmap_is_referenced(pg);
   1798        1.1    simonb 	mod = pmap_is_modified(pg);
   1799        1.1    simonb 	printf("pmap_page_protect(VM_PROT_READ): ref %d, mod %d\n",
   1800        1.1    simonb 	       ref, mod);
   1801        1.1    simonb 
   1802        1.1    simonb 	/* Now clear reference and modify */
   1803        1.1    simonb 	ref = pmap_clear_reference(pg);
   1804        1.1    simonb 	mod = pmap_clear_modify(pg);
   1805        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1806        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1807        1.1    simonb 	       ref, mod);
   1808        1.1    simonb 
   1809        1.1    simonb 	/* Reference page */
   1810        1.1    simonb 	val = *loc;
   1811        1.1    simonb 
   1812        1.1    simonb 	ref = pmap_is_referenced(pg);
   1813        1.1    simonb 	mod = pmap_is_modified(pg);
   1814        1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1815        1.1    simonb 	       ref, mod, val);
   1816        1.1    simonb 
   1817        1.1    simonb 	/* Now clear reference and modify */
   1818        1.1    simonb 	ref = pmap_clear_reference(pg);
   1819        1.1    simonb 	mod = pmap_clear_modify(pg);
   1820        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1821        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1822        1.1    simonb 	       ref, mod);
   1823       1.12    simonb 
   1824        1.1    simonb 	/* Modify page */
   1825        1.1    simonb #if 0
   1826        1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1827        1.4     chris 	pmap_update(pmap_kernel());
   1828        1.1    simonb #endif
   1829        1.1    simonb 	*loc = 1;
   1830        1.1    simonb 
   1831        1.1    simonb 	ref = pmap_is_referenced(pg);
   1832        1.1    simonb 	mod = pmap_is_modified(pg);
   1833        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1834        1.1    simonb 	       ref, mod);
   1835        1.1    simonb 
   1836        1.1    simonb 	/* Check pmap_pag_protect() */
   1837        1.1    simonb 	pmap_page_protect(pg, VM_PROT_NONE);
   1838        1.1    simonb 	ref = pmap_is_referenced(pg);
   1839        1.1    simonb 	mod = pmap_is_modified(pg);
   1840        1.1    simonb 	printf("pmap_page_protect(): ref %d, mod %d\n",
   1841        1.1    simonb 	       ref, mod);
   1842        1.1    simonb 
   1843        1.1    simonb 	/* Now clear reference and modify */
   1844        1.1    simonb 	ref = pmap_clear_reference(pg);
   1845        1.1    simonb 	mod = pmap_clear_modify(pg);
   1846        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1847        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1848        1.1    simonb 	       ref, mod);
   1849        1.1    simonb 
   1850        1.1    simonb 
   1851        1.1    simonb 	/* Reference page */
   1852        1.1    simonb 	val = *loc;
   1853        1.1    simonb 
   1854        1.1    simonb 	ref = pmap_is_referenced(pg);
   1855        1.1    simonb 	mod = pmap_is_modified(pg);
   1856        1.1    simonb 	printf("Referenced page: ref %d, mod %d val %x\n",
   1857        1.1    simonb 	       ref, mod, val);
   1858        1.1    simonb 
   1859        1.1    simonb 	/* Now clear reference and modify */
   1860        1.1    simonb 	ref = pmap_clear_reference(pg);
   1861        1.1    simonb 	mod = pmap_clear_modify(pg);
   1862        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1863        1.1    simonb 	       (void *)(u_long)va, (long)pa,
   1864        1.1    simonb 	       ref, mod);
   1865       1.12    simonb 
   1866        1.1    simonb 	/* Modify page */
   1867        1.1    simonb #if 0
   1868        1.1    simonb 	pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL, 0);
   1869        1.4     chris 	pmap_update(pmap_kernel());
   1870        1.1    simonb #endif
   1871        1.1    simonb 	*loc = 1;
   1872        1.1    simonb 
   1873        1.1    simonb 	ref = pmap_is_referenced(pg);
   1874        1.1    simonb 	mod = pmap_is_modified(pg);
   1875        1.1    simonb 	printf("Modified page: ref %d, mod %d\n",
   1876        1.1    simonb 	       ref, mod);
   1877        1.1    simonb 
   1878        1.1    simonb 	/* Unmap page */
   1879        1.1    simonb 	pmap_remove(pmap_kernel(), va, va+1);
   1880        1.4     chris 	pmap_update(pmap_kernel());
   1881        1.1    simonb 	ref = pmap_is_referenced(pg);
   1882        1.1    simonb 	mod = pmap_is_modified(pg);
   1883        1.1    simonb 	printf("Unmapped page: ref %d, mod %d\n", ref, mod);
   1884        1.1    simonb 
   1885        1.1    simonb 	/* Now clear reference and modify */
   1886        1.1    simonb 	ref = pmap_clear_reference(pg);
   1887        1.1    simonb 	mod = pmap_clear_modify(pg);
   1888        1.1    simonb 	printf("Clearing page va %p pa %lx: ref %d, mod %d\n",
   1889        1.1    simonb 	       (void *)(u_long)va, (long)pa, ref, mod);
   1890        1.1    simonb 
   1891        1.1    simonb 	/* Check it's properly cleared */
   1892        1.1    simonb 	ref = pmap_is_referenced(pg);
   1893        1.1    simonb 	mod = pmap_is_modified(pg);
   1894        1.1    simonb 	printf("Checking cleared page: ref %d, mod %d\n",
   1895        1.1    simonb 	       ref, mod);
   1896        1.1    simonb 
   1897       1.34      yamt 	pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
   1898       1.59    cegger 	pmap_kenter_pa(va, pa, VM_PROT_ALL, 0);
   1899       1.34      yamt 	uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE, UVM_KMF_WIRED);
   1900        1.1    simonb }
   1901        1.1    simonb #endif
   1902