Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.16
      1  1.16    chris /*	$NetBSD: pmap.c,v 1.16 2001/07/29 12:45:27 chris Exp $	*/
      2  1.12    chris 
      3  1.12    chris /*
      4  1.12    chris  * Copyright (c) 2001 Richard Earnshaw
      5  1.12    chris  * Copyright (c) 2001 Christopher Gilbert
      6  1.12    chris  * All rights reserved.
      7  1.12    chris  *
      8  1.12    chris  * 1. Redistributions of source code must retain the above copyright
      9  1.12    chris  *    notice, this list of conditions and the following disclaimer.
     10  1.12    chris  * 2. Redistributions in binary form must reproduce the above copyright
     11  1.12    chris  *    notice, this list of conditions and the following disclaimer in the
     12  1.12    chris  *    documentation and/or other materials provided with the distribution.
     13  1.12    chris  * 3. The name of the company nor the name of the author may be used to
     14  1.12    chris  *    endorse or promote products derived from this software without specific
     15  1.12    chris  *    prior written permission.
     16  1.12    chris  *
     17  1.12    chris  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     18  1.12    chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     19  1.12    chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  1.12    chris  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     21  1.12    chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     22  1.12    chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  1.12    chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  1.12    chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  1.12    chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  1.12    chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  1.12    chris  * SUCH DAMAGE.
     28  1.12    chris  */
     29   1.1     matt 
     30   1.1     matt /*-
     31   1.1     matt  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     32   1.1     matt  * All rights reserved.
     33   1.1     matt  *
     34   1.1     matt  * This code is derived from software contributed to The NetBSD Foundation
     35   1.1     matt  * by Charles M. Hannum.
     36   1.1     matt  *
     37   1.1     matt  * Redistribution and use in source and binary forms, with or without
     38   1.1     matt  * modification, are permitted provided that the following conditions
     39   1.1     matt  * are met:
     40   1.1     matt  * 1. Redistributions of source code must retain the above copyright
     41   1.1     matt  *    notice, this list of conditions and the following disclaimer.
     42   1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     43   1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     44   1.1     matt  *    documentation and/or other materials provided with the distribution.
     45   1.1     matt  * 3. All advertising materials mentioning features or use of this software
     46   1.1     matt  *    must display the following acknowledgement:
     47   1.1     matt  *        This product includes software developed by the NetBSD
     48   1.1     matt  *        Foundation, Inc. and its contributors.
     49   1.1     matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     50   1.1     matt  *    contributors may be used to endorse or promote products derived
     51   1.1     matt  *    from this software without specific prior written permission.
     52   1.1     matt  *
     53   1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54   1.1     matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55   1.1     matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56   1.1     matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57   1.1     matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58   1.1     matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59   1.1     matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60   1.1     matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61   1.1     matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62   1.1     matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63   1.1     matt  * POSSIBILITY OF SUCH DAMAGE.
     64   1.1     matt  */
     65   1.1     matt 
     66   1.1     matt /*
     67   1.1     matt  * Copyright (c) 1994-1998 Mark Brinicombe.
     68   1.1     matt  * Copyright (c) 1994 Brini.
     69   1.1     matt  * All rights reserved.
     70   1.1     matt  *
     71   1.1     matt  * This code is derived from software written for Brini by Mark Brinicombe
     72   1.1     matt  *
     73   1.1     matt  * Redistribution and use in source and binary forms, with or without
     74   1.1     matt  * modification, are permitted provided that the following conditions
     75   1.1     matt  * are met:
     76   1.1     matt  * 1. Redistributions of source code must retain the above copyright
     77   1.1     matt  *    notice, this list of conditions and the following disclaimer.
     78   1.1     matt  * 2. Redistributions in binary form must reproduce the above copyright
     79   1.1     matt  *    notice, this list of conditions and the following disclaimer in the
     80   1.1     matt  *    documentation and/or other materials provided with the distribution.
     81   1.1     matt  * 3. All advertising materials mentioning features or use of this software
     82   1.1     matt  *    must display the following acknowledgement:
     83   1.1     matt  *	This product includes software developed by Mark Brinicombe.
     84   1.1     matt  * 4. The name of the author may not be used to endorse or promote products
     85   1.1     matt  *    derived from this software without specific prior written permission.
     86   1.1     matt  *
     87   1.1     matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     88   1.1     matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     89   1.1     matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     90   1.1     matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     91   1.1     matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     92   1.1     matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     93   1.1     matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     94   1.1     matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     95   1.1     matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     96   1.1     matt  *
     97   1.1     matt  * RiscBSD kernel project
     98   1.1     matt  *
     99   1.1     matt  * pmap.c
    100   1.1     matt  *
    101   1.1     matt  * Machine dependant vm stuff
    102   1.1     matt  *
    103   1.1     matt  * Created      : 20/09/94
    104   1.1     matt  */
    105   1.1     matt 
    106   1.1     matt /*
    107   1.1     matt  * Performance improvements, UVM changes, overhauls and part-rewrites
    108   1.1     matt  * were contributed by Neil A. Carson <neil (at) causality.com>.
    109   1.1     matt  */
    110   1.1     matt 
    111   1.1     matt /*
    112   1.1     matt  * The dram block info is currently referenced from the bootconfig.
    113   1.1     matt  * This should be placed in a separate structure.
    114   1.1     matt  */
    115   1.1     matt 
    116   1.1     matt /*
    117   1.1     matt  * Special compilation symbols
    118   1.1     matt  * PMAP_DEBUG		- Build in pmap_debug_level code
    119   1.1     matt  */
    120   1.1     matt 
    121   1.1     matt /* Include header files */
    122   1.1     matt 
    123   1.1     matt #include "opt_pmap_debug.h"
    124   1.1     matt #include "opt_ddb.h"
    125   1.1     matt 
    126   1.1     matt #include <sys/types.h>
    127   1.1     matt #include <sys/param.h>
    128   1.1     matt #include <sys/kernel.h>
    129   1.1     matt #include <sys/systm.h>
    130   1.1     matt #include <sys/proc.h>
    131   1.1     matt #include <sys/malloc.h>
    132   1.1     matt #include <sys/user.h>
    133  1.10    chris #include <sys/pool.h>
    134  1.16    chris #include <sys/cdefs.h>
    135  1.16    chris 
    136   1.1     matt #include <uvm/uvm.h>
    137   1.1     matt 
    138   1.1     matt #include <machine/bootconfig.h>
    139   1.1     matt #include <machine/bus.h>
    140   1.1     matt #include <machine/pmap.h>
    141   1.1     matt #include <machine/pcb.h>
    142   1.1     matt #include <machine/param.h>
    143   1.1     matt #include <machine/katelib.h>
    144  1.16    chris 
    145  1.16    chris __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.16 2001/07/29 12:45:27 chris Exp $");
    146  1.16    chris 
    147   1.1     matt #ifdef PMAP_DEBUG
    148   1.1     matt #define	PDEBUG(_lev_,_stat_) \
    149   1.1     matt 	if (pmap_debug_level >= (_lev_)) \
    150   1.1     matt         	((_stat_))
    151   1.1     matt int pmap_debug_level = -2;
    152   1.1     matt #else	/* PMAP_DEBUG */
    153   1.1     matt #define	PDEBUG(_lev_,_stat_) /* Nothing */
    154   1.1     matt #endif	/* PMAP_DEBUG */
    155   1.1     matt 
    156   1.1     matt struct pmap     kernel_pmap_store;
    157   1.1     matt 
    158  1.10    chris /*
    159  1.10    chris  * pool that pmap structures are allocated from
    160  1.10    chris  */
    161  1.10    chris 
    162  1.10    chris struct pool pmap_pmap_pool;
    163  1.10    chris 
    164   1.1     matt pagehook_t page_hook0;
    165   1.1     matt pagehook_t page_hook1;
    166   1.1     matt char *memhook;
    167   1.1     matt pt_entry_t msgbufpte;
    168   1.1     matt extern caddr_t msgbufaddr;
    169   1.1     matt 
    170   1.1     matt #ifdef DIAGNOSTIC
    171   1.1     matt boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    172   1.1     matt #endif
    173   1.1     matt 
    174   1.1     matt TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
    175   1.1     matt 
    176   1.1     matt int pv_nfree = 0;
    177   1.1     matt 
    178   1.2     matt vsize_t npages;
    179   1.1     matt 
    180   1.2     matt extern paddr_t physical_start;
    181   1.2     matt extern paddr_t physical_freestart;
    182   1.2     matt extern paddr_t physical_end;
    183   1.2     matt extern paddr_t physical_freeend;
    184   1.1     matt extern unsigned int free_pages;
    185   1.1     matt extern int max_processes;
    186   1.1     matt 
    187   1.1     matt vaddr_t virtual_start;
    188   1.1     matt vaddr_t virtual_end;
    189   1.1     matt 
    190   1.1     matt vaddr_t avail_start;
    191   1.1     matt vaddr_t avail_end;
    192   1.1     matt 
    193   1.1     matt extern pv_addr_t systempage;
    194   1.1     matt 
    195   1.1     matt #define ALLOC_PAGE_HOOK(x, s) \
    196   1.1     matt 	x.va = virtual_start; \
    197  1.15    chris 	x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
    198   1.1     matt 	virtual_start += s;
    199   1.1     matt 
    200   1.1     matt /* Variables used by the L1 page table queue code */
    201   1.1     matt SIMPLEQ_HEAD(l1pt_queue, l1pt);
    202   1.1     matt struct l1pt_queue l1pt_static_queue;	/* head of our static l1 queue */
    203   1.1     matt int l1pt_static_queue_count;		/* items in the static l1 queue */
    204   1.1     matt int l1pt_static_create_count;		/* static l1 items created */
    205   1.1     matt struct l1pt_queue l1pt_queue;		/* head of our l1 queue */
    206   1.1     matt int l1pt_queue_count;			/* items in the l1 queue */
    207   1.1     matt int l1pt_create_count;			/* stat - L1's create count */
    208   1.1     matt int l1pt_reuse_count;			/* stat - L1's reused count */
    209   1.1     matt 
    210   1.1     matt /* Local function prototypes (not used outside this file) */
    211  1.15    chris pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
    212   1.1     matt void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
    213   1.2     matt     paddr_t pa, unsigned int flags));
    214   1.2     matt void pmap_copy_on_write __P((paddr_t pa));
    215  1.15    chris void pmap_pinit __P((struct pmap *));
    216  1.15    chris void pmap_freepagedir __P((struct pmap *));
    217  1.15    chris void pmap_release __P((struct pmap *));
    218   1.1     matt 
    219   1.1     matt /* Other function prototypes */
    220   1.1     matt extern void bzero_page __P((vaddr_t));
    221   1.1     matt extern void bcopy_page __P((vaddr_t, vaddr_t));
    222   1.1     matt 
    223   1.1     matt struct l1pt *pmap_alloc_l1pt __P((void));
    224  1.15    chris static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    225   1.1     matt      vaddr_t l2pa));
    226   1.1     matt 
    227  1.11    chris static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    228  1.11    chris /* eventually this will be a function */
    229  1.11    chris #define pmap_unmap_ptes(a)
    230  1.11    chris 
    231  1.11    chris void pmap_vac_me_harder __P((struct pmap *, struct pv_entry *,
    232  1.12    chris 	    pt_entry_t *, boolean_t));
    233  1.11    chris 
    234   1.1     matt #ifdef MYCROFT_HACK
    235   1.1     matt int mycroft_hack = 0;
    236   1.1     matt #endif
    237   1.1     matt 
    238   1.1     matt /* Function to set the debug level of the pmap code */
    239   1.1     matt 
    240   1.1     matt #ifdef PMAP_DEBUG
    241   1.1     matt void
    242   1.1     matt pmap_debug(level)
    243   1.1     matt 	int level;
    244   1.1     matt {
    245   1.1     matt 	pmap_debug_level = level;
    246   1.1     matt 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    247   1.1     matt }
    248   1.1     matt #endif	/* PMAP_DEBUG */
    249   1.1     matt 
    250   1.1     matt #include "isadma.h"
    251   1.1     matt 
    252   1.1     matt #if NISADMA > 0
    253   1.1     matt /*
    254   1.1     matt  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
    255   1.1     matt  * pages into the system, memory intersects with any of these ranges,
    256   1.1     matt  * the intersecting memory will be loaded into a lower-priority free list.
    257   1.1     matt  */
    258   1.1     matt bus_dma_segment_t *pmap_isa_dma_ranges;
    259   1.1     matt int pmap_isa_dma_nranges;
    260   1.1     matt 
    261   1.2     matt boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
    262   1.2     matt 	    paddr_t *, psize_t *));
    263   1.1     matt 
    264   1.1     matt /*
    265   1.1     matt  * Check if a memory range intersects with an ISA DMA range, and
    266   1.1     matt  * return the page-rounded intersection if it does.  The intersection
    267   1.1     matt  * will be placed on a lower-priority free list.
    268   1.1     matt  */
    269   1.1     matt boolean_t
    270   1.1     matt pmap_isa_dma_range_intersect(pa, size, pap, sizep)
    271   1.2     matt 	paddr_t pa;
    272   1.2     matt 	psize_t size;
    273   1.2     matt 	paddr_t *pap;
    274   1.2     matt 	psize_t *sizep;
    275   1.1     matt {
    276   1.1     matt 	bus_dma_segment_t *ds;
    277   1.1     matt 	int i;
    278   1.1     matt 
    279   1.1     matt 	if (pmap_isa_dma_ranges == NULL)
    280   1.1     matt 		return (FALSE);
    281   1.1     matt 
    282   1.1     matt 	for (i = 0, ds = pmap_isa_dma_ranges;
    283   1.1     matt 	     i < pmap_isa_dma_nranges; i++, ds++) {
    284   1.1     matt 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
    285   1.1     matt 			/*
    286   1.1     matt 			 * Beginning of region intersects with this range.
    287   1.1     matt 			 */
    288   1.1     matt 			*pap = trunc_page(pa);
    289   1.1     matt 			*sizep = round_page(min(pa + size,
    290   1.1     matt 			    ds->ds_addr + ds->ds_len) - pa);
    291   1.1     matt 			return (TRUE);
    292   1.1     matt 		}
    293   1.1     matt 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
    294   1.1     matt 			/*
    295   1.1     matt 			 * End of region intersects with this range.
    296   1.1     matt 			 */
    297   1.1     matt 			*pap = trunc_page(ds->ds_addr);
    298   1.1     matt 			*sizep = round_page(min((pa + size) - ds->ds_addr,
    299   1.1     matt 			    ds->ds_len));
    300   1.1     matt 			return (TRUE);
    301   1.1     matt 		}
    302   1.1     matt 	}
    303   1.1     matt 
    304   1.1     matt 	/*
    305   1.1     matt 	 * No intersection found.
    306   1.1     matt 	 */
    307   1.1     matt 	return (FALSE);
    308   1.1     matt }
    309   1.1     matt #endif /* NISADMA > 0 */
    310   1.1     matt 
    311   1.1     matt /*
    312   1.1     matt  * Functions for manipluation pv_entry structures. These are used to keep a
    313   1.1     matt  * record of the mappings of virtual addresses and the associated physical
    314   1.1     matt  * pages.
    315   1.1     matt  */
    316   1.1     matt 
    317   1.1     matt /*
    318   1.1     matt  * Allocate a new pv_entry structure from the freelist. If the list is
    319   1.1     matt  * empty allocate a new page and fill the freelist.
    320   1.1     matt  */
    321   1.1     matt struct pv_entry *
    322   1.1     matt pmap_alloc_pv()
    323   1.1     matt {
    324   1.1     matt 	struct pv_page *pvp;
    325   1.1     matt 	struct pv_entry *pv;
    326   1.1     matt 	int i;
    327   1.1     matt 
    328   1.1     matt 	/*
    329   1.1     matt 	 * Do we have any free pv_entry structures left ?
    330   1.1     matt 	 * If not allocate a page of them
    331   1.1     matt 	 */
    332   1.1     matt 
    333   1.1     matt 	if (pv_nfree == 0) {
    334   1.1     matt 		/* NOTE: can't lock kernel_map here */
    335   1.1     matt 		MALLOC(pvp, struct pv_page *, NBPG, M_VMPVENT, M_WAITOK);
    336   1.1     matt 		if (pvp == 0)
    337   1.1     matt 			panic("pmap_alloc_pv: kmem_alloc() failed");
    338   1.1     matt 		pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
    339   1.1     matt 		for (i = NPVPPG - 2; i; i--, pv++)
    340   1.1     matt 			pv->pv_next = pv + 1;
    341   1.1     matt 		pv->pv_next = 0;
    342   1.1     matt 		pv_nfree += pvp->pvp_pgi.pgi_nfree = NPVPPG - 1;
    343   1.1     matt 		TAILQ_INSERT_HEAD(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
    344   1.1     matt 		pv = &pvp->pvp_pv[0];
    345   1.1     matt 	} else {
    346   1.1     matt 		--pv_nfree;
    347   1.1     matt 		pvp = pv_page_freelist.tqh_first;
    348   1.1     matt 		if (--pvp->pvp_pgi.pgi_nfree == 0) {
    349   1.1     matt 			TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
    350   1.1     matt 		}
    351   1.1     matt 		pv = pvp->pvp_pgi.pgi_freelist;
    352   1.1     matt #ifdef DIAGNOSTIC
    353   1.1     matt 		if (pv == 0)
    354   1.1     matt 			panic("pmap_alloc_pv: pgi_nfree inconsistent");
    355   1.1     matt #endif	/* DIAGNOSTIC */
    356   1.1     matt 		pvp->pvp_pgi.pgi_freelist = pv->pv_next;
    357   1.1     matt 	}
    358   1.1     matt 	return pv;
    359   1.1     matt }
    360   1.1     matt 
    361   1.1     matt /*
    362   1.1     matt  * Release a pv_entry structure putting it back on the freelist.
    363   1.1     matt  */
    364   1.1     matt 
    365   1.1     matt void
    366   1.1     matt pmap_free_pv(pv)
    367   1.1     matt 	struct pv_entry *pv;
    368   1.1     matt {
    369   1.1     matt 	struct pv_page *pvp;
    370   1.1     matt 
    371   1.1     matt 	pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
    372   1.1     matt 	switch (++pvp->pvp_pgi.pgi_nfree) {
    373   1.1     matt 	case 1:
    374   1.1     matt 		TAILQ_INSERT_TAIL(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
    375   1.1     matt 	default:
    376   1.1     matt 		pv->pv_next = pvp->pvp_pgi.pgi_freelist;
    377   1.1     matt 		pvp->pvp_pgi.pgi_freelist = pv;
    378   1.1     matt 		++pv_nfree;
    379   1.1     matt 		break;
    380   1.1     matt 	case NPVPPG:
    381   1.1     matt 		pv_nfree -= NPVPPG - 1;
    382   1.1     matt 		TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
    383   1.1     matt 		FREE((vaddr_t)pvp, M_VMPVENT);
    384   1.1     matt 		break;
    385   1.1     matt 	}
    386   1.1     matt }
    387   1.1     matt 
    388   1.1     matt #if 0
    389   1.1     matt void
    390   1.1     matt pmap_collect_pv()
    391   1.1     matt {
    392   1.1     matt 	struct pv_page_list pv_page_collectlist;
    393   1.1     matt 	struct pv_page *pvp, *npvp;
    394   1.1     matt 	struct pv_entry *ph, *ppv, *pv, *npv;
    395   1.1     matt 	int s;
    396   1.1     matt 
    397   1.1     matt 	TAILQ_INIT(&pv_page_collectlist);
    398   1.1     matt 
    399   1.1     matt 	for (pvp = pv_page_freelist.tqh_first; pvp; pvp = npvp) {
    400   1.1     matt 		if (pv_nfree < NPVPPG)
    401   1.1     matt 			break;
    402   1.1     matt 		npvp = pvp->pvp_pgi.pgi_list.tqe_next;
    403   1.1     matt 		if (pvp->pvp_pgi.pgi_nfree > NPVPPG / 3) {
    404   1.1     matt 			TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
    405   1.1     matt 			TAILQ_INSERT_TAIL(&pv_page_collectlist, pvp,
    406   1.1     matt 			    pvp_pgi.pgi_list);
    407   1.1     matt 			pv_nfree -= NPVPPG;
    408   1.1     matt 			pvp->pvp_pgi.pgi_nfree = -1;
    409   1.1     matt 		}
    410   1.1     matt 	}
    411   1.1     matt 
    412   1.1     matt 	if (pv_page_collectlist.tqh_first == 0)
    413   1.1     matt 		return;
    414   1.1     matt 
    415   1.1     matt 	for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
    416   1.1     matt 		if (ph->pv_pmap == 0)
    417   1.1     matt 			continue;
    418   1.1     matt 		s = splvm();
    419   1.1     matt 		for (ppv = ph; (pv = ppv->pv_next) != 0; ) {
    420   1.1     matt 			pvp = (struct pv_page *) trunc_page((vaddr_t)pv);
    421   1.1     matt 			if (pvp->pvp_pgi.pgi_nfree == -1) {
    422   1.1     matt 				pvp = pv_page_freelist.tqh_first;
    423   1.1     matt 				if (--pvp->pvp_pgi.pgi_nfree == 0) {
    424   1.1     matt 					TAILQ_REMOVE(&pv_page_freelist,
    425   1.1     matt 					    pvp, pvp_pgi.pgi_list);
    426   1.1     matt 				}
    427   1.1     matt 				npv = pvp->pvp_pgi.pgi_freelist;
    428   1.1     matt #ifdef DIAGNOSTIC
    429   1.1     matt 				if (npv == 0)
    430   1.1     matt 					panic("pmap_collect_pv: pgi_nfree inconsistent");
    431   1.1     matt #endif	/* DIAGNOSTIC */
    432   1.1     matt 				pvp->pvp_pgi.pgi_freelist = npv->pv_next;
    433   1.1     matt 				*npv = *pv;
    434   1.1     matt 				ppv->pv_next = npv;
    435   1.1     matt 				ppv = npv;
    436   1.1     matt 			} else
    437   1.1     matt 				ppv = pv;
    438   1.1     matt 		}
    439   1.1     matt 		splx(s);
    440   1.1     matt 	}
    441   1.1     matt 
    442   1.1     matt 	for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
    443   1.1     matt 		npvp = pvp->pvp_pgi.pgi_list.tqe_next;
    444   1.1     matt 		FREE((vaddr_t)pvp, M_VMPVENT);
    445   1.1     matt 	}
    446   1.1     matt }
    447   1.1     matt #endif
    448   1.1     matt 
    449   1.1     matt /*
    450   1.1     matt  * Enter a new physical-virtual mapping into the pv table
    451   1.1     matt  */
    452   1.1     matt 
    453   1.1     matt /*__inline*/ void
    454   1.1     matt pmap_enter_pv(pmap, va, pv, flags)
    455  1.15    chris 	struct pmap *pmap;
    456   1.1     matt 	vaddr_t va;
    457   1.1     matt 	struct pv_entry *pv;
    458   1.1     matt 	u_int flags;
    459   1.1     matt {
    460   1.1     matt 	struct pv_entry *npv;
    461   1.1     matt 	u_int s;
    462   1.1     matt 
    463   1.1     matt #ifdef DIAGNOSTIC
    464   1.1     matt 	if (!pmap_initialized)
    465   1.1     matt 		panic("pmap_enter_pv: !pmap_initialized");
    466   1.1     matt #endif
    467   1.1     matt 
    468   1.1     matt 	s = splvm();
    469   1.1     matt 
    470   1.1     matt 	PDEBUG(5, printf("pmap_enter_pv: pv %p: %08lx/%p/%p\n",
    471   1.1     matt 	    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
    472   1.1     matt 
    473   1.1     matt 	if (pv->pv_pmap == NULL) {
    474   1.1     matt 		/*
    475   1.1     matt 		 * No entries yet, use header as the first entry
    476   1.1     matt 		 */
    477   1.1     matt 		pv->pv_va = va;
    478   1.1     matt 		pv->pv_pmap = pmap;
    479   1.1     matt 		pv->pv_next = NULL;
    480   1.1     matt 		pv->pv_flags = flags;
    481   1.1     matt 	} else {
    482   1.1     matt 		/*
    483   1.1     matt 		 * There is at least one other VA mapping this page.
    484   1.1     matt 		 * Place this entry after the header.
    485   1.1     matt 		 */
    486   1.1     matt #ifdef PMAP_DEBUG
    487   1.1     matt 		for (npv = pv; npv; npv = npv->pv_next)
    488   1.1     matt 			if (pmap == npv->pv_pmap && va == npv->pv_va)
    489   1.1     matt 				panic("pmap_enter_pv: already in pv_tab pv %p: %08lx/%p/%p",
    490   1.1     matt 				    pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
    491   1.1     matt #endif
    492   1.1     matt 		npv = pmap_alloc_pv();
    493  1.12    chris 		/* Must make sure that the new entry is before any others
    494  1.12    chris 		 * for the same pmap.  Otherwise the vac handling code
    495  1.12    chris 		 * will get confused.
    496  1.12    chris 		 * XXX this would be better if we used lists like i386 (infact
    497  1.12    chris 		 * this would be a lot simpler)
    498  1.12    chris 		 */
    499  1.12    chris 		*npv = *pv;
    500  1.12    chris  		pv->pv_va = va;
    501  1.12    chris  		pv->pv_pmap = pmap;
    502  1.12    chris  		pv->pv_flags = flags;
    503   1.1     matt 		pv->pv_next = npv;
    504   1.1     matt 	}
    505   1.1     matt 
    506   1.1     matt 	if (flags & PT_W)
    507   1.1     matt 		++pmap->pm_stats.wired_count;
    508   1.1     matt 
    509   1.1     matt 	splx(s);
    510   1.1     matt }
    511   1.1     matt 
    512   1.1     matt 
    513   1.1     matt /*
    514   1.1     matt  * Remove a physical-virtual mapping from the pv table
    515   1.1     matt  */
    516   1.1     matt 
    517   1.1     matt /*__inline*/ void
    518   1.1     matt pmap_remove_pv(pmap, va, pv)
    519  1.15    chris 	struct pmap *pmap;
    520   1.1     matt 	vaddr_t va;
    521   1.1     matt 	struct pv_entry *pv;
    522   1.1     matt {
    523   1.1     matt 	struct pv_entry *npv;
    524   1.1     matt 	u_int s;
    525   1.1     matt 	u_int flags = 0;
    526   1.1     matt 
    527   1.1     matt #ifdef DIAGNOSTIC
    528   1.1     matt 	if (!pmap_initialized)
    529   1.1     matt 		panic("pmap_remove_pv: !pmap_initialized");
    530   1.1     matt #endif
    531   1.1     matt 
    532   1.1     matt 	s = splvm();
    533   1.1     matt 
    534   1.1     matt 	/*
    535   1.1     matt 	 * If it is the first entry on the list, it is actually
    536   1.1     matt 	 * in the header and we must copy the following entry up
    537   1.1     matt 	 * to the header.  Otherwise we must search the list for
    538   1.1     matt 	 * the entry.  In either case we free the now unused entry.
    539   1.1     matt 	 */
    540   1.1     matt 
    541   1.1     matt 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
    542   1.1     matt 		npv = pv->pv_next;
    543   1.1     matt 		if (npv) {
    544   1.1     matt 			*pv = *npv;
    545   1.1     matt 			flags = npv->pv_flags;
    546   1.1     matt 			pmap_free_pv(npv);
    547   1.1     matt 		} else {
    548   1.1     matt 			flags = pv->pv_flags;
    549   1.1     matt 			pv->pv_pmap = NULL;
    550   1.1     matt 		}
    551   1.1     matt 	} else {
    552   1.1     matt 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
    553   1.1     matt 			if (pmap == npv->pv_pmap && va == npv->pv_va)
    554   1.1     matt 				break;
    555   1.1     matt 		}
    556   1.1     matt 		if (npv) {
    557   1.1     matt 			pv->pv_next = npv->pv_next;
    558   1.1     matt 			flags = npv->pv_flags;
    559   1.1     matt 			pmap_free_pv(npv);
    560   1.1     matt 		} else
    561   1.1     matt 			panic("pmap_remove_pv: lost entry");
    562   1.1     matt 	}
    563   1.1     matt 
    564   1.1     matt 	if (flags & PT_W)
    565   1.1     matt 		--pmap->pm_stats.wired_count;
    566   1.1     matt 
    567   1.1     matt 	splx(s);
    568   1.1     matt }
    569   1.1     matt 
    570   1.1     matt /*
    571   1.1     matt  * Modify a physical-virtual mapping in the pv table
    572   1.1     matt  */
    573   1.1     matt 
    574   1.1     matt /*__inline */ u_int
    575   1.1     matt pmap_modify_pv(pmap, va, pv, bic_mask, eor_mask)
    576  1.15    chris 	struct pmap *pmap;
    577   1.1     matt 	vaddr_t va;
    578   1.1     matt 	struct pv_entry *pv;
    579   1.1     matt 	u_int bic_mask;
    580   1.1     matt 	u_int eor_mask;
    581   1.1     matt {
    582   1.1     matt 	struct pv_entry *npv;
    583   1.1     matt 	u_int s;
    584   1.1     matt 	u_int flags, oflags;
    585   1.1     matt 
    586   1.1     matt 	PDEBUG(5, printf("pmap_modify_pv(pmap=%p, va=%08lx, pv=%p, bic_mask=%08x, eor_mask=%08x)\n",
    587   1.1     matt 	    pmap, va, pv, bic_mask, eor_mask));
    588   1.1     matt 
    589   1.1     matt #ifdef DIAGNOSTIC
    590   1.1     matt 	if (!pmap_initialized)
    591   1.1     matt 		panic("pmap_modify_pv: !pmap_initialized");
    592   1.1     matt #endif
    593   1.1     matt 
    594   1.1     matt 	s = splvm();
    595   1.1     matt 
    596   1.1     matt 	PDEBUG(5, printf("pmap_modify_pv: pv %p: %08lx/%p/%p/%08x ",
    597   1.1     matt 	    pv, pv->pv_va, pv->pv_pmap, pv->pv_next, pv->pv_flags));
    598   1.1     matt 
    599   1.1     matt 	/*
    600   1.1     matt 	 * There is at least one VA mapping this page.
    601   1.1     matt 	 */
    602   1.1     matt 
    603   1.1     matt 	for (npv = pv; npv; npv = npv->pv_next) {
    604   1.1     matt 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    605   1.1     matt 			oflags = npv->pv_flags;
    606   1.1     matt 			npv->pv_flags = flags =
    607   1.1     matt 			    ((oflags & ~bic_mask) ^ eor_mask);
    608   1.1     matt 			if ((flags ^ oflags) & PT_W) {
    609   1.1     matt 				if (flags & PT_W)
    610   1.1     matt 					++pmap->pm_stats.wired_count;
    611   1.1     matt 				else
    612   1.1     matt 					--pmap->pm_stats.wired_count;
    613   1.1     matt 			}
    614   1.1     matt 			PDEBUG(0, printf("done flags=%08x\n", flags));
    615   1.1     matt 			splx(s);
    616   1.1     matt 			return (oflags);
    617   1.1     matt 		}
    618   1.1     matt 	}
    619   1.1     matt 
    620   1.1     matt 	PDEBUG(0, printf("done.\n"));
    621   1.1     matt 	splx(s);
    622   1.1     matt 	return (0);
    623   1.1     matt }
    624   1.1     matt 
    625   1.1     matt 
    626   1.1     matt /*
    627   1.1     matt  * Map the specified level 2 pagetable into the level 1 page table for
    628   1.1     matt  * the given pmap to cover a chunk of virtual address space starting from the
    629   1.1     matt  * address specified.
    630   1.1     matt  */
    631   1.1     matt static /*__inline*/ void
    632   1.1     matt pmap_map_in_l1(pmap, va, l2pa)
    633  1.15    chris 	struct pmap *pmap;
    634   1.1     matt 	vaddr_t va, l2pa;
    635   1.1     matt {
    636   1.1     matt 	vaddr_t ptva;
    637   1.1     matt 
    638   1.1     matt 	/* Calculate the index into the L1 page table. */
    639   1.1     matt 	ptva = (va >> PDSHIFT) & ~3;
    640   1.1     matt 
    641   1.1     matt 	PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
    642   1.1     matt 	    pmap->pm_pdir, L1_PTE(l2pa), ptva));
    643   1.1     matt 
    644   1.1     matt 	/* Map page table into the L1. */
    645   1.1     matt 	pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
    646   1.1     matt 	pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
    647   1.1     matt 	pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
    648   1.1     matt 	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
    649   1.1     matt 
    650   1.1     matt 	PDEBUG(0, printf("pt self reference %lx in %lx\n",
    651   1.1     matt 	    L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
    652   1.1     matt 
    653   1.1     matt 	/* Map the page table into the page table area. */
    654   1.1     matt 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_PTE_NC_NB(l2pa, AP_KRW);
    655   1.1     matt 
    656   1.1     matt 	/* XXX should be a purge */
    657   1.1     matt /*	cpu_tlb_flushD();*/
    658   1.1     matt }
    659   1.1     matt 
    660   1.1     matt #if 0
    661   1.1     matt static /*__inline*/ void
    662   1.1     matt pmap_unmap_in_l1(pmap, va)
    663  1.15    chris 	struct pmap *pmap;
    664   1.1     matt 	vaddr_t va;
    665   1.1     matt {
    666   1.1     matt 	vaddr_t ptva;
    667   1.1     matt 
    668   1.1     matt 	/* Calculate the index into the L1 page table. */
    669   1.1     matt 	ptva = (va >> PDSHIFT) & ~3;
    670   1.1     matt 
    671   1.1     matt 	/* Unmap page table from the L1. */
    672   1.1     matt 	pmap->pm_pdir[ptva + 0] = 0;
    673   1.1     matt 	pmap->pm_pdir[ptva + 1] = 0;
    674   1.1     matt 	pmap->pm_pdir[ptva + 2] = 0;
    675   1.1     matt 	pmap->pm_pdir[ptva + 3] = 0;
    676   1.1     matt 
    677   1.1     matt 	/* Unmap the page table from the page table area. */
    678   1.1     matt 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
    679   1.1     matt 
    680   1.1     matt 	/* XXX should be a purge */
    681   1.1     matt /*	cpu_tlb_flushD();*/
    682   1.1     matt }
    683   1.1     matt #endif
    684   1.1     matt 
    685   1.1     matt 
    686   1.1     matt /*
    687   1.1     matt  *	Used to map a range of physical addresses into kernel
    688   1.1     matt  *	virtual address space.
    689   1.1     matt  *
    690   1.1     matt  *	For now, VM is already on, we only need to map the
    691   1.1     matt  *	specified memory.
    692   1.1     matt  */
    693   1.1     matt vaddr_t
    694   1.1     matt pmap_map(va, spa, epa, prot)
    695   1.1     matt 	vaddr_t va, spa, epa;
    696   1.1     matt 	int prot;
    697   1.1     matt {
    698   1.1     matt 	while (spa < epa) {
    699   1.1     matt 		pmap_enter(pmap_kernel(), va, spa, prot, 0);
    700   1.1     matt 		va += NBPG;
    701   1.1     matt 		spa += NBPG;
    702   1.1     matt 	}
    703   1.7  thorpej 	pmap_update();
    704   1.1     matt 	return(va);
    705   1.1     matt }
    706   1.1     matt 
    707   1.1     matt 
    708   1.1     matt /*
    709   1.3     matt  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    710   1.1     matt  *
    711   1.1     matt  * bootstrap the pmap system. This is called from initarm and allows
    712   1.1     matt  * the pmap system to initailise any structures it requires.
    713   1.1     matt  *
    714   1.1     matt  * Currently this sets up the kernel_pmap that is statically allocated
    715   1.1     matt  * and also allocated virtual addresses for certain page hooks.
    716   1.1     matt  * Currently the only one page hook is allocated that is used
    717   1.1     matt  * to zero physical pages of memory.
    718   1.1     matt  * It also initialises the start and end address of the kernel data space.
    719   1.1     matt  */
    720   1.2     matt extern paddr_t physical_freestart;
    721   1.2     matt extern paddr_t physical_freeend;
    722   1.1     matt 
    723   1.1     matt struct pv_entry *boot_pvent;
    724   1.1     matt char *boot_attrs;
    725   1.1     matt 
    726   1.1     matt void
    727   1.1     matt pmap_bootstrap(kernel_l1pt, kernel_ptpt)
    728   1.1     matt 	pd_entry_t *kernel_l1pt;
    729   1.1     matt 	pv_addr_t kernel_ptpt;
    730   1.1     matt {
    731   1.1     matt 	int loop;
    732   1.2     matt 	paddr_t start, end;
    733   1.1     matt #if NISADMA > 0
    734   1.2     matt 	paddr_t istart;
    735   1.2     matt 	psize_t isize;
    736   1.1     matt #endif
    737   1.1     matt 	vsize_t size;
    738   1.1     matt 
    739  1.15    chris 	pmap_kernel()->pm_pdir = kernel_l1pt;
    740  1.15    chris 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
    741  1.15    chris 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
    742  1.15    chris 	simple_lock_init(&pmap_kernel()->pm_lock);
    743  1.16    chris 	pmap_kernel()->pm_obj.pgops = NULL;
    744  1.16    chris 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
    745  1.16    chris 	pmap_kernel()->pm_obj.uo_npages = 0;
    746  1.16    chris 	pmap_kernel()->pm_obj.uo_refs = 1;
    747  1.16    chris 
    748   1.1     matt 	/*
    749   1.1     matt 	 * Initialize PAGE_SIZE-dependent variables.
    750   1.1     matt 	 */
    751   1.1     matt 	uvm_setpagesize();
    752   1.1     matt 
    753   1.1     matt 	npages = 0;
    754   1.1     matt 	loop = 0;
    755   1.1     matt 	while (loop < bootconfig.dramblocks) {
    756   1.2     matt 		start = (paddr_t)bootconfig.dram[loop].address;
    757   1.1     matt 		end = start + (bootconfig.dram[loop].pages * NBPG);
    758   1.1     matt 		if (start < physical_freestart)
    759   1.1     matt 			start = physical_freestart;
    760   1.1     matt 		if (end > physical_freeend)
    761   1.1     matt 			end = physical_freeend;
    762   1.1     matt #if 0
    763   1.1     matt 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
    764   1.1     matt #endif
    765   1.1     matt #if NISADMA > 0
    766   1.1     matt 		if (pmap_isa_dma_range_intersect(start, end - start,
    767   1.1     matt 		    &istart, &isize)) {
    768   1.1     matt 			/*
    769   1.1     matt 			 * Place the pages that intersect with the
    770   1.1     matt 			 * ISA DMA range onto the ISA DMA free list.
    771   1.1     matt 			 */
    772   1.1     matt #if 0
    773   1.1     matt 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
    774   1.1     matt 			    istart + isize - 1);
    775   1.1     matt #endif
    776   1.1     matt 			uvm_page_physload(atop(istart),
    777   1.1     matt 			    atop(istart + isize), atop(istart),
    778   1.1     matt 			    atop(istart + isize), VM_FREELIST_ISADMA);
    779   1.1     matt 			npages += atop(istart + isize) - atop(istart);
    780   1.1     matt 
    781   1.1     matt 			/*
    782   1.1     matt 			 * Load the pieces that come before
    783   1.1     matt 			 * the intersection into the default
    784   1.1     matt 			 * free list.
    785   1.1     matt 			 */
    786   1.1     matt 			if (start < istart) {
    787   1.1     matt #if 0
    788   1.1     matt 				printf("    BEFORE 0x%lx -> 0x%lx\n",
    789   1.1     matt 				    start, istart - 1);
    790   1.1     matt #endif
    791   1.1     matt 				uvm_page_physload(atop(start),
    792   1.1     matt 				    atop(istart), atop(start),
    793   1.1     matt 				    atop(istart), VM_FREELIST_DEFAULT);
    794   1.1     matt 				npages += atop(istart) - atop(start);
    795   1.1     matt 			}
    796   1.1     matt 
    797   1.1     matt 			/*
    798   1.1     matt 			 * Load the pieces that come after
    799   1.1     matt 			 * the intersection into the default
    800   1.1     matt 			 * free list.
    801   1.1     matt 			 */
    802   1.1     matt 			if ((istart + isize) < end) {
    803   1.1     matt #if 0
    804   1.1     matt 				printf("     AFTER 0x%lx -> 0x%lx\n",
    805   1.1     matt 				    (istart + isize), end - 1);
    806   1.1     matt #endif
    807   1.1     matt 				uvm_page_physload(atop(istart + isize),
    808   1.1     matt 				    atop(end), atop(istart + isize),
    809   1.1     matt 				    atop(end), VM_FREELIST_DEFAULT);
    810   1.1     matt 				npages += atop(end) - atop(istart + isize);
    811   1.1     matt 			}
    812   1.1     matt 		} else {
    813   1.1     matt 			uvm_page_physload(atop(start), atop(end),
    814   1.1     matt 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
    815   1.1     matt 			npages += atop(end) - atop(start);
    816   1.1     matt 		}
    817   1.1     matt #else	/* NISADMA > 0 */
    818   1.1     matt 		uvm_page_physload(atop(start), atop(end),
    819   1.1     matt 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
    820   1.1     matt 		npages += atop(end) - atop(start);
    821   1.1     matt #endif /* NISADMA > 0 */
    822   1.1     matt 		++loop;
    823   1.1     matt 	}
    824   1.1     matt 
    825   1.1     matt #ifdef MYCROFT_HACK
    826   1.1     matt 	printf("npages = %ld\n", npages);
    827   1.1     matt #endif
    828   1.1     matt 
    829   1.1     matt 	virtual_start = KERNEL_VM_BASE;
    830   1.1     matt 	virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
    831   1.1     matt 
    832   1.1     matt 	ALLOC_PAGE_HOOK(page_hook0, NBPG);
    833   1.1     matt 	ALLOC_PAGE_HOOK(page_hook1, NBPG);
    834   1.1     matt 
    835   1.1     matt 	/*
    836   1.1     matt 	 * The mem special device needs a virtual hook but we don't
    837   1.1     matt 	 * need a pte
    838   1.1     matt 	 */
    839   1.1     matt 	memhook = (char *)virtual_start;
    840   1.1     matt 	virtual_start += NBPG;
    841   1.1     matt 
    842   1.1     matt 	msgbufaddr = (caddr_t)virtual_start;
    843  1.15    chris 	msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
    844   1.1     matt 	virtual_start += round_page(MSGBUFSIZE);
    845   1.1     matt 
    846   1.1     matt 	size = npages * sizeof(struct pv_entry);
    847   1.1     matt 	boot_pvent = (struct pv_entry *)uvm_pageboot_alloc(size);
    848   1.1     matt 	bzero(boot_pvent, size);
    849   1.1     matt 	size = npages * sizeof(char);
    850   1.1     matt 	boot_attrs = (char *)uvm_pageboot_alloc(size);
    851   1.1     matt 	bzero(boot_attrs, size);
    852   1.1     matt 
    853  1.10    chris 	/*
    854  1.10    chris 	 * initialize the pmap pool.
    855  1.10    chris 	 */
    856  1.10    chris 
    857  1.10    chris 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
    858  1.10    chris 		  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
    859  1.10    chris 
    860   1.1     matt 	cpu_cache_cleanD();
    861   1.1     matt }
    862   1.1     matt 
    863   1.1     matt /*
    864   1.1     matt  * void pmap_init(void)
    865   1.1     matt  *
    866   1.1     matt  * Initialize the pmap module.
    867   1.1     matt  * Called by vm_init() in vm/vm_init.c in order to initialise
    868   1.1     matt  * any structures that the pmap system needs to map virtual memory.
    869   1.1     matt  */
    870   1.1     matt 
    871   1.1     matt extern int physmem;
    872   1.1     matt 
    873   1.1     matt void
    874   1.1     matt pmap_init()
    875   1.1     matt {
    876   1.1     matt 	int lcv;
    877   1.1     matt 
    878   1.1     matt #ifdef MYCROFT_HACK
    879   1.1     matt 	printf("physmem = %d\n", physmem);
    880   1.1     matt #endif
    881   1.1     matt 
    882   1.1     matt 	/*
    883   1.1     matt 	 * Set the available memory vars - These do not map to real memory
    884   1.1     matt 	 * addresses and cannot as the physical memory is fragmented.
    885   1.1     matt 	 * They are used by ps for %mem calculations.
    886   1.1     matt 	 * One could argue whether this should be the entire memory or just
    887   1.1     matt 	 * the memory that is useable in a user process.
    888   1.1     matt 	 */
    889   1.1     matt 	avail_start = 0;
    890   1.1     matt 	avail_end = physmem * NBPG;
    891   1.1     matt 
    892   1.1     matt 	/* Set up pmap info for physsegs. */
    893   1.1     matt 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
    894   1.1     matt 		vm_physmem[lcv].pmseg.pvent = boot_pvent;
    895   1.1     matt 		boot_pvent += vm_physmem[lcv].end - vm_physmem[lcv].start;
    896   1.1     matt 		vm_physmem[lcv].pmseg.attrs = boot_attrs;
    897   1.1     matt 		boot_attrs += vm_physmem[lcv].end - vm_physmem[lcv].start;
    898   1.1     matt 	}
    899   1.1     matt #ifdef MYCROFT_HACK
    900   1.1     matt 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
    901   1.1     matt 		printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
    902   1.1     matt 		    lcv,
    903   1.1     matt 		    vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
    904   1.1     matt 		    vm_physmem[lcv].start, vm_physmem[lcv].end);
    905   1.1     matt 	}
    906   1.1     matt #endif
    907   1.1     matt 	TAILQ_INIT(&pv_page_freelist);
    908   1.1     matt 
    909   1.1     matt #ifdef DIAGNOSTIC
    910   1.1     matt 	/* Now it is safe to enable pv_entry recording. */
    911   1.1     matt 	pmap_initialized = TRUE;
    912   1.1     matt #endif
    913   1.1     matt 
    914   1.1     matt 	/* Initialise our L1 page table queues and counters */
    915   1.1     matt 	SIMPLEQ_INIT(&l1pt_static_queue);
    916   1.1     matt 	l1pt_static_queue_count = 0;
    917   1.1     matt 	l1pt_static_create_count = 0;
    918   1.1     matt 	SIMPLEQ_INIT(&l1pt_queue);
    919   1.1     matt 	l1pt_queue_count = 0;
    920   1.1     matt 	l1pt_create_count = 0;
    921   1.1     matt 	l1pt_reuse_count = 0;
    922   1.1     matt }
    923   1.1     matt 
    924   1.1     matt /*
    925   1.1     matt  * pmap_postinit()
    926   1.1     matt  *
    927   1.1     matt  * This routine is called after the vm and kmem subsystems have been
    928   1.1     matt  * initialised. This allows the pmap code to perform any initialisation
    929   1.1     matt  * that can only be done one the memory allocation is in place.
    930   1.1     matt  */
    931   1.1     matt 
    932   1.1     matt void
    933   1.1     matt pmap_postinit()
    934   1.1     matt {
    935   1.1     matt 	int loop;
    936   1.1     matt 	struct l1pt *pt;
    937   1.1     matt 
    938   1.1     matt #ifdef PMAP_STATIC_L1S
    939   1.1     matt 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
    940   1.1     matt #else	/* PMAP_STATIC_L1S */
    941   1.1     matt 	for (loop = 0; loop < max_processes; ++loop) {
    942   1.1     matt #endif	/* PMAP_STATIC_L1S */
    943   1.1     matt 		/* Allocate a L1 page table */
    944   1.1     matt 		pt = pmap_alloc_l1pt();
    945   1.1     matt 		if (!pt)
    946   1.1     matt 			panic("Cannot allocate static L1 page tables\n");
    947   1.1     matt 
    948   1.1     matt 		/* Clean it */
    949   1.1     matt 		bzero((void *)pt->pt_va, PD_SIZE);
    950   1.1     matt 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
    951   1.1     matt 		/* Add the page table to the queue */
    952   1.1     matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
    953   1.1     matt 		++l1pt_static_queue_count;
    954   1.1     matt 		++l1pt_static_create_count;
    955   1.1     matt 	}
    956   1.1     matt }
    957   1.1     matt 
    958   1.1     matt 
    959   1.1     matt /*
    960   1.1     matt  * Create and return a physical map.
    961   1.1     matt  *
    962   1.1     matt  * If the size specified for the map is zero, the map is an actual physical
    963   1.1     matt  * map, and may be referenced by the hardware.
    964   1.1     matt  *
    965   1.1     matt  * If the size specified is non-zero, the map will be used in software only,
    966   1.1     matt  * and is bounded by that size.
    967   1.1     matt  */
    968   1.1     matt 
    969   1.1     matt pmap_t
    970   1.1     matt pmap_create()
    971   1.1     matt {
    972  1.15    chris 	struct pmap *pmap;
    973   1.1     matt 
    974  1.10    chris 	/*
    975  1.10    chris 	 * Fetch pmap entry from the pool
    976  1.10    chris 	 */
    977  1.10    chris 
    978  1.10    chris 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    979   1.1     matt 	bzero(pmap, sizeof(*pmap));
    980   1.1     matt 
    981  1.16    chris 	simple_lock_init(&pmap->pm_obj.vmobjlock);
    982  1.16    chris 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
    983  1.16    chris 	TAILQ_INIT(&pmap->pm_obj.memq);
    984  1.16    chris 	pmap->pm_obj.uo_npages = 0;
    985  1.16    chris 	pmap->pm_obj.uo_refs = 1;
    986  1.16    chris 	pmap->pm_stats.wired_count = 0;
    987  1.16    chris 	pmap->pm_stats.resident_count = 1;
    988  1.16    chris 
    989   1.1     matt 	/* Now init the machine part of the pmap */
    990   1.1     matt 	pmap_pinit(pmap);
    991   1.1     matt 	return(pmap);
    992   1.1     matt }
    993   1.1     matt 
    994   1.1     matt /*
    995   1.1     matt  * pmap_alloc_l1pt()
    996   1.1     matt  *
    997   1.1     matt  * This routine allocates physical and virtual memory for a L1 page table
    998   1.1     matt  * and wires it.
    999   1.1     matt  * A l1pt structure is returned to describe the allocated page table.
   1000   1.1     matt  *
   1001   1.1     matt  * This routine is allowed to fail if the required memory cannot be allocated.
   1002   1.1     matt  * In this case NULL is returned.
   1003   1.1     matt  */
   1004   1.1     matt 
   1005   1.1     matt struct l1pt *
   1006   1.1     matt pmap_alloc_l1pt(void)
   1007   1.1     matt {
   1008   1.2     matt 	paddr_t pa;
   1009   1.2     matt 	vaddr_t va;
   1010   1.1     matt 	struct l1pt *pt;
   1011   1.1     matt 	int error;
   1012   1.9      chs 	struct vm_page *m;
   1013  1.11    chris 	pt_entry_t *ptes;
   1014   1.1     matt 
   1015   1.1     matt 	/* Allocate virtual address space for the L1 page table */
   1016   1.1     matt 	va = uvm_km_valloc(kernel_map, PD_SIZE);
   1017   1.1     matt 	if (va == 0) {
   1018   1.1     matt #ifdef DIAGNOSTIC
   1019   1.1     matt 		printf("pmap: Cannot allocate pageable memory for L1\n");
   1020   1.1     matt #endif	/* DIAGNOSTIC */
   1021   1.1     matt 		return(NULL);
   1022   1.1     matt 	}
   1023   1.1     matt 
   1024   1.1     matt 	/* Allocate memory for the l1pt structure */
   1025   1.1     matt 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1026   1.1     matt 
   1027   1.1     matt 	/*
   1028   1.1     matt 	 * Allocate pages from the VM system.
   1029   1.1     matt 	 */
   1030   1.1     matt 	TAILQ_INIT(&pt->pt_plist);
   1031   1.1     matt 	error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
   1032   1.1     matt 	    PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1033   1.1     matt 	if (error) {
   1034   1.1     matt #ifdef DIAGNOSTIC
   1035   1.1     matt 		printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
   1036   1.1     matt 		    error);
   1037   1.1     matt #endif	/* DIAGNOSTIC */
   1038   1.1     matt 		/* Release the resources we already have claimed */
   1039   1.1     matt 		free(pt, M_VMPMAP);
   1040   1.1     matt 		uvm_km_free(kernel_map, va, PD_SIZE);
   1041   1.1     matt 		return(NULL);
   1042   1.1     matt 	}
   1043   1.1     matt 
   1044   1.1     matt 	/* Map our physical pages into our virtual space */
   1045   1.1     matt 	pt->pt_va = va;
   1046   1.1     matt 	m = pt->pt_plist.tqh_first;
   1047  1.11    chris 	ptes = pmap_map_ptes(pmap_kernel());
   1048   1.1     matt 	while (m && va < (pt->pt_va + PD_SIZE)) {
   1049   1.1     matt 		pa = VM_PAGE_TO_PHYS(m);
   1050   1.1     matt 
   1051   1.1     matt 		pmap_enter(pmap_kernel(), va, pa,
   1052   1.1     matt 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
   1053   1.1     matt 
   1054   1.1     matt 		/* Revoke cacheability and bufferability */
   1055   1.1     matt 		/* XXX should be done better than this */
   1056  1.11    chris 		ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
   1057   1.1     matt 
   1058   1.1     matt 		va += NBPG;
   1059   1.1     matt 		m = m->pageq.tqe_next;
   1060   1.1     matt 	}
   1061  1.11    chris 	pmap_unmap_ptes(pmap_kernel());
   1062   1.7  thorpej 	pmap_update();
   1063   1.1     matt 
   1064   1.1     matt #ifdef DIAGNOSTIC
   1065   1.1     matt 	if (m)
   1066   1.1     matt 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1067   1.1     matt #endif	/* DIAGNOSTIC */
   1068   1.1     matt 
   1069   1.1     matt 	pt->pt_flags = 0;
   1070   1.1     matt 	return(pt);
   1071   1.1     matt }
   1072   1.1     matt 
   1073   1.1     matt /*
   1074   1.1     matt  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1075   1.1     matt  */
   1076   1.1     matt void
   1077   1.1     matt pmap_free_l1pt(pt)
   1078   1.1     matt 	struct l1pt *pt;
   1079   1.1     matt {
   1080   1.1     matt 	/* Separate the physical memory for the virtual space */
   1081  1.15    chris 	pmap_remove(pmap_kernel(), pt->pt_va, pt->pt_va + PD_SIZE);
   1082   1.7  thorpej 	pmap_update();
   1083   1.1     matt 
   1084   1.1     matt 	/* Return the physical memory */
   1085   1.1     matt 	uvm_pglistfree(&pt->pt_plist);
   1086   1.1     matt 
   1087   1.1     matt 	/* Free the virtual space */
   1088   1.1     matt 	uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
   1089   1.1     matt 
   1090   1.1     matt 	/* Free the l1pt structure */
   1091   1.1     matt 	free(pt, M_VMPMAP);
   1092   1.1     matt }
   1093   1.1     matt 
   1094   1.1     matt /*
   1095   1.1     matt  * Allocate a page directory.
   1096   1.1     matt  * This routine will either allocate a new page directory from the pool
   1097   1.1     matt  * of L1 page tables currently held by the kernel or it will allocate
   1098   1.1     matt  * a new one via pmap_alloc_l1pt().
   1099   1.1     matt  * It will then initialise the l1 page table for use.
   1100   1.1     matt  */
   1101   1.1     matt int
   1102   1.1     matt pmap_allocpagedir(pmap)
   1103   1.1     matt 	struct pmap *pmap;
   1104   1.1     matt {
   1105   1.2     matt 	paddr_t pa;
   1106   1.1     matt 	struct l1pt *pt;
   1107   1.1     matt 	pt_entry_t *pte;
   1108   1.1     matt 
   1109   1.1     matt 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1110   1.1     matt 
   1111   1.1     matt 	/* Do we have any spare L1's lying around ? */
   1112   1.1     matt 	if (l1pt_static_queue_count) {
   1113   1.1     matt 		--l1pt_static_queue_count;
   1114   1.1     matt 		pt = l1pt_static_queue.sqh_first;
   1115   1.1     matt 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
   1116   1.1     matt 	} else if (l1pt_queue_count) {
   1117   1.1     matt 		--l1pt_queue_count;
   1118   1.1     matt 		pt = l1pt_queue.sqh_first;
   1119   1.1     matt 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
   1120   1.1     matt 		++l1pt_reuse_count;
   1121   1.1     matt 	} else {
   1122   1.1     matt 		pt = pmap_alloc_l1pt();
   1123   1.1     matt 		if (!pt)
   1124   1.1     matt 			return(ENOMEM);
   1125   1.1     matt 		++l1pt_create_count;
   1126   1.1     matt 	}
   1127   1.1     matt 
   1128   1.1     matt 	/* Store the pointer to the l1 descriptor in the pmap. */
   1129   1.1     matt 	pmap->pm_l1pt = pt;
   1130   1.1     matt 
   1131   1.1     matt 	/* Get the physical address of the start of the l1 */
   1132   1.1     matt 	pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
   1133   1.1     matt 
   1134   1.1     matt 	/* Store the virtual address of the l1 in the pmap. */
   1135   1.1     matt 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1136   1.1     matt 
   1137   1.1     matt 	/* Clean the L1 if it is dirty */
   1138   1.1     matt 	if (!(pt->pt_flags & PTFLAG_CLEAN))
   1139   1.1     matt 		bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
   1140   1.1     matt 
   1141   1.1     matt 	/* Do we already have the kernel mappings ? */
   1142   1.1     matt 	if (!(pt->pt_flags & PTFLAG_KPT)) {
   1143   1.1     matt 		/* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
   1144   1.1     matt 
   1145  1.15    chris 		bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
   1146   1.1     matt 		    (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
   1147   1.1     matt 		    KERNEL_PD_SIZE);
   1148   1.1     matt 		pt->pt_flags |= PTFLAG_KPT;
   1149   1.1     matt 	}
   1150   1.1     matt 
   1151   1.1     matt 	/* Allocate a page table to map all the page tables for this pmap */
   1152   1.1     matt 
   1153   1.1     matt #ifdef DIAGNOSTIC
   1154   1.1     matt 	if (pmap->pm_vptpt) {
   1155   1.1     matt 		/* XXX What if we have one already ? */
   1156   1.1     matt 		panic("pmap_allocpagedir: have pt already\n");
   1157   1.1     matt 	}
   1158   1.1     matt #endif	/* DIAGNOSTIC */
   1159   1.1     matt 	pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
   1160   1.5   toshii 	if (pmap->pm_vptpt == 0) {
   1161   1.5   toshii 		pmap_freepagedir(pmap);
   1162   1.5   toshii 		return(ENOMEM);
   1163   1.5   toshii 	}
   1164   1.5   toshii 
   1165  1.15    chris 	(void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
   1166   1.1     matt 	pmap->pm_pptpt &= PG_FRAME;
   1167   1.1     matt 	/* Revoke cacheability and bufferability */
   1168   1.1     matt 	/* XXX should be done better than this */
   1169  1.15    chris 	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
   1170   1.1     matt 	*pte = *pte & ~(PT_C | PT_B);
   1171   1.1     matt 
   1172   1.1     matt 	/* Wire in this page table */
   1173   1.1     matt 	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt);
   1174   1.1     matt 
   1175   1.1     matt 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1176   1.1     matt 
   1177   1.1     matt 	/*
   1178   1.1     matt 	 * Map the kernel page tables for 0xf0000000 +
   1179   1.1     matt 	 * into the page table used to map the
   1180   1.1     matt 	 * pmap's page tables
   1181   1.1     matt 	 */
   1182   1.1     matt 	bcopy((char *)(PROCESS_PAGE_TBLS_BASE
   1183   1.1     matt 	    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
   1184   1.1     matt 	    + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
   1185   1.1     matt 	    (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
   1186   1.1     matt 	    (KERNEL_PD_SIZE >> 2));
   1187   1.1     matt 
   1188   1.1     matt 	return(0);
   1189   1.1     matt }
   1190   1.1     matt 
   1191   1.1     matt 
   1192   1.1     matt /*
   1193   1.1     matt  * Initialize a preallocated and zeroed pmap structure,
   1194   1.1     matt  * such as one in a vmspace structure.
   1195   1.1     matt  */
   1196   1.1     matt 
   1197   1.1     matt static int pmap_pagedir_ident;	/* tsleep() ident */
   1198   1.1     matt 
   1199   1.1     matt void
   1200   1.1     matt pmap_pinit(pmap)
   1201   1.1     matt 	struct pmap *pmap;
   1202   1.1     matt {
   1203   1.1     matt 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1204   1.1     matt 
   1205   1.1     matt 	/* Keep looping until we succeed in allocating a page directory */
   1206   1.1     matt 	while (pmap_allocpagedir(pmap) != 0) {
   1207   1.1     matt 		/*
   1208   1.1     matt 		 * Ok we failed to allocate a suitable block of memory for an
   1209   1.1     matt 		 * L1 page table. This means that either:
   1210   1.1     matt 		 * 1. 16KB of virtual address space could not be allocated
   1211   1.1     matt 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1212   1.1     matt 		 *    could not be allocated.
   1213   1.1     matt 		 *
   1214   1.1     matt 		 * Since we cannot fail we will sleep for a while and try
   1215   1.1     matt 		 * again. Although we will be wakened when another page table
   1216   1.1     matt 		 * is freed other memory releasing and swapping may occur
   1217   1.1     matt 		 * that will mean we can succeed so we will keep trying
   1218   1.1     matt 		 * regularly just in case.
   1219   1.1     matt 		 */
   1220   1.1     matt 
   1221   1.1     matt 		if (tsleep((caddr_t)&pmap_pagedir_ident, PZERO,
   1222   1.1     matt 		   "l1ptwait", 1000) == EWOULDBLOCK)
   1223   1.1     matt 			printf("pmap: Cannot allocate L1 page table, sleeping ...\n");
   1224   1.1     matt 	}
   1225   1.1     matt 
   1226   1.1     matt 	/* Map zero page for the pmap. This will also map the L2 for it */
   1227   1.1     matt 	pmap_enter(pmap, 0x00000000, systempage.pv_pa,
   1228   1.1     matt 	    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1229   1.7  thorpej 	pmap_update();
   1230   1.1     matt }
   1231   1.1     matt 
   1232   1.1     matt 
   1233   1.1     matt void
   1234   1.1     matt pmap_freepagedir(pmap)
   1235  1.15    chris 	struct pmap *pmap;
   1236   1.1     matt {
   1237   1.1     matt 	/* Free the memory used for the page table mapping */
   1238   1.5   toshii 	if (pmap->pm_vptpt != 0)
   1239   1.5   toshii 		uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
   1240   1.1     matt 
   1241   1.1     matt 	/* junk the L1 page table */
   1242   1.1     matt 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1243   1.1     matt 		/* Add the page table to the queue */
   1244   1.1     matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
   1245   1.1     matt 		++l1pt_static_queue_count;
   1246   1.1     matt 		/* Wake up any sleeping processes waiting for a l1 page table */
   1247   1.1     matt 		wakeup((caddr_t)&pmap_pagedir_ident);
   1248   1.1     matt 	} else if (l1pt_queue_count < 8) {
   1249   1.1     matt 		/* Add the page table to the queue */
   1250   1.1     matt 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1251   1.1     matt 		++l1pt_queue_count;
   1252   1.1     matt 		/* Wake up any sleeping processes waiting for a l1 page table */
   1253   1.1     matt 		wakeup((caddr_t)&pmap_pagedir_ident);
   1254   1.1     matt 	} else
   1255   1.1     matt 		pmap_free_l1pt(pmap->pm_l1pt);
   1256   1.1     matt }
   1257   1.1     matt 
   1258   1.1     matt 
   1259   1.1     matt /*
   1260   1.1     matt  * Retire the given physical map from service.
   1261   1.1     matt  * Should only be called if the map contains no valid mappings.
   1262   1.1     matt  */
   1263   1.1     matt 
   1264   1.1     matt void
   1265   1.1     matt pmap_destroy(pmap)
   1266  1.15    chris 	struct pmap *pmap;
   1267   1.1     matt {
   1268   1.1     matt 	int count;
   1269   1.1     matt 
   1270   1.1     matt 	if (pmap == NULL)
   1271   1.1     matt 		return;
   1272   1.1     matt 
   1273   1.1     matt 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1274   1.1     matt 	simple_lock(&pmap->pm_lock);
   1275  1.16    chris 	count = --pmap->pm_obj.uo_refs;
   1276   1.1     matt 	simple_unlock(&pmap->pm_lock);
   1277   1.1     matt 	if (count == 0) {
   1278   1.1     matt 		pmap_release(pmap);
   1279  1.10    chris 		pool_put(&pmap_pmap_pool, pmap);
   1280   1.1     matt 	}
   1281   1.1     matt }
   1282   1.1     matt 
   1283   1.1     matt 
   1284   1.1     matt /*
   1285   1.1     matt  * Release any resources held by the given physical map.
   1286   1.1     matt  * Called when a pmap initialized by pmap_pinit is being released.
   1287   1.1     matt  * Should only be called if the map contains no valid mappings.
   1288   1.1     matt  */
   1289   1.1     matt 
   1290   1.1     matt void
   1291   1.1     matt pmap_release(pmap)
   1292  1.15    chris 	struct pmap *pmap;
   1293   1.1     matt {
   1294   1.1     matt 	struct vm_page *page;
   1295   1.1     matt 
   1296   1.1     matt 	PDEBUG(0, printf("pmap_release(%p)\n", pmap));
   1297   1.1     matt 
   1298   1.1     matt 	/* Remove the zero page mapping */
   1299   1.1     matt 	pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
   1300   1.7  thorpej 	pmap_update();
   1301   1.1     matt 
   1302   1.1     matt 	/*
   1303   1.1     matt 	 * Free any page tables still mapped
   1304   1.1     matt 	 * This is only temporay until pmap_enter can count the number
   1305   1.1     matt 	 * of mappings made in a page table. Then pmap_remove() can
   1306   1.1     matt 	 * reduce the count and free the pagetable when the count
   1307  1.16    chris 	 * reaches zero.  Note that entries in this list should match the
   1308  1.16    chris 	 * contents of the ptpt, however this is faster than walking a 1024
   1309  1.16    chris 	 * entries looking for pt's
   1310  1.16    chris 	 * taken from i386 pmap.c
   1311   1.1     matt 	 */
   1312  1.16    chris 	while (pmap->pm_obj.memq.tqh_first != NULL) {
   1313  1.16    chris 		page = pmap->pm_obj.memq.tqh_first;
   1314  1.16    chris #ifdef DIAGNOSTIC
   1315  1.16    chris 		if (page->flags & PG_BUSY)
   1316  1.16    chris 			panic("pmap_release: busy page table page");
   1317  1.16    chris #endif
   1318  1.16    chris 		/* pmap_page_protect?  currently no need for it. */
   1319  1.16    chris 
   1320  1.16    chris 		page->wire_count = 0;
   1321  1.16    chris 		uvm_pagefree(page);
   1322   1.1     matt 	}
   1323  1.16    chris 
   1324   1.1     matt 	/* Free the page dir */
   1325   1.1     matt 	pmap_freepagedir(pmap);
   1326   1.1     matt }
   1327   1.1     matt 
   1328   1.1     matt 
   1329   1.1     matt /*
   1330  1.15    chris  * void pmap_reference(struct pmap *pmap)
   1331   1.1     matt  *
   1332   1.1     matt  * Add a reference to the specified pmap.
   1333   1.1     matt  */
   1334   1.1     matt 
   1335   1.1     matt void
   1336   1.1     matt pmap_reference(pmap)
   1337  1.15    chris 	struct pmap *pmap;
   1338   1.1     matt {
   1339   1.1     matt 	if (pmap == NULL)
   1340   1.1     matt 		return;
   1341   1.1     matt 
   1342   1.1     matt 	simple_lock(&pmap->pm_lock);
   1343  1.16    chris 	pmap->pm_obj.uo_refs++;
   1344   1.1     matt 	simple_unlock(&pmap->pm_lock);
   1345   1.1     matt }
   1346   1.1     matt 
   1347   1.1     matt /*
   1348   1.1     matt  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1349   1.1     matt  *
   1350   1.1     matt  * Return the start and end addresses of the kernel's virtual space.
   1351   1.1     matt  * These values are setup in pmap_bootstrap and are updated as pages
   1352   1.1     matt  * are allocated.
   1353   1.1     matt  */
   1354   1.1     matt 
   1355   1.1     matt void
   1356   1.1     matt pmap_virtual_space(start, end)
   1357   1.1     matt 	vaddr_t *start;
   1358   1.1     matt 	vaddr_t *end;
   1359   1.1     matt {
   1360   1.1     matt 	*start = virtual_start;
   1361   1.1     matt 	*end = virtual_end;
   1362   1.1     matt }
   1363   1.1     matt 
   1364   1.1     matt 
   1365   1.1     matt /*
   1366   1.1     matt  * Activate the address space for the specified process.  If the process
   1367   1.1     matt  * is the current process, load the new MMU context.
   1368   1.1     matt  */
   1369   1.1     matt void
   1370   1.1     matt pmap_activate(p)
   1371   1.1     matt 	struct proc *p;
   1372   1.1     matt {
   1373  1.15    chris 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1374   1.1     matt 	struct pcb *pcb = &p->p_addr->u_pcb;
   1375   1.1     matt 
   1376  1.15    chris 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1377   1.1     matt 	    (paddr_t *)&pcb->pcb_pagedir);
   1378   1.1     matt 
   1379   1.1     matt 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1380   1.1     matt 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1381   1.1     matt 
   1382   1.1     matt 	if (p == curproc) {
   1383   1.1     matt 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1384   1.1     matt 		setttb((u_int)pcb->pcb_pagedir);
   1385   1.1     matt 	}
   1386   1.1     matt #if 0
   1387   1.1     matt 	pmap->pm_pdchanged = FALSE;
   1388   1.1     matt #endif
   1389   1.1     matt }
   1390   1.1     matt 
   1391   1.1     matt 
   1392   1.1     matt /*
   1393   1.1     matt  * Deactivate the address space of the specified process.
   1394   1.1     matt  */
   1395   1.1     matt void
   1396   1.1     matt pmap_deactivate(p)
   1397   1.1     matt 	struct proc *p;
   1398   1.1     matt {
   1399   1.1     matt }
   1400   1.1     matt 
   1401   1.1     matt 
   1402   1.1     matt /*
   1403   1.1     matt  * pmap_clean_page()
   1404   1.1     matt  *
   1405   1.1     matt  * This is a local function used to work out the best strategy to clean
   1406   1.1     matt  * a single page referenced by its entry in the PV table. It's used by
   1407   1.1     matt  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1408   1.1     matt  *
   1409   1.1     matt  * Its policy is effectively:
   1410   1.1     matt  *  o If there are no mappings, we don't bother doing anything with the cache.
   1411   1.1     matt  *  o If there is one mapping, we clean just that page.
   1412   1.1     matt  *  o If there are multiple mappings, we clean the entire cache.
   1413   1.1     matt  *
   1414   1.1     matt  * So that some functions can be further optimised, it returns 0 if it didn't
   1415   1.1     matt  * clean the entire cache, or 1 if it did.
   1416   1.1     matt  *
   1417   1.1     matt  * XXX One bug in this routine is that if the pv_entry has a single page
   1418   1.1     matt  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1419   1.1     matt  * just the 1 page. Since this should not occur in everyday use and if it does
   1420   1.1     matt  * it will just result in not the most efficient clean for the page.
   1421   1.1     matt  */
   1422   1.1     matt static int
   1423   1.1     matt pmap_clean_page(pv)
   1424   1.1     matt 	struct pv_entry *pv;
   1425   1.1     matt {
   1426   1.1     matt 	int s;
   1427   1.1     matt 	int cache_needs_cleaning = 0;
   1428   1.1     matt 	vaddr_t page_to_clean = 0;
   1429   1.1     matt 
   1430   1.1     matt 	/* Go to splvm() so we get exclusive lock for a mo */
   1431   1.1     matt 	s = splvm();
   1432   1.1     matt 	if (pv->pv_pmap) {
   1433   1.1     matt 		cache_needs_cleaning = 1;
   1434   1.1     matt 		if (!pv->pv_next)
   1435   1.1     matt 			page_to_clean = pv->pv_va;
   1436   1.1     matt 	}
   1437   1.1     matt 	splx(s);
   1438   1.1     matt 
   1439   1.1     matt 	/* Do cache ops outside the splvm. */
   1440   1.1     matt 	if (page_to_clean)
   1441   1.1     matt 		cpu_cache_purgeID_rng(page_to_clean, NBPG);
   1442   1.1     matt 	else if (cache_needs_cleaning) {
   1443   1.1     matt 		cpu_cache_purgeID();
   1444   1.1     matt 		return (1);
   1445   1.1     matt 	}
   1446   1.1     matt 	return (0);
   1447   1.1     matt }
   1448   1.1     matt 
   1449   1.1     matt /*
   1450   1.1     matt  * pmap_find_pv()
   1451   1.1     matt  *
   1452   1.1     matt  * This is a local function that finds a PV entry for a given physical page.
   1453   1.1     matt  * This is a common op, and this function removes loads of ifdefs in the code.
   1454   1.1     matt  */
   1455   1.1     matt static __inline struct pv_entry *
   1456   1.1     matt pmap_find_pv(phys)
   1457   1.2     matt 	paddr_t phys;
   1458   1.1     matt {
   1459   1.1     matt 	int bank, off;
   1460   1.1     matt 	struct pv_entry *pv;
   1461   1.1     matt 
   1462   1.1     matt #ifdef DIAGNOSTIC
   1463   1.1     matt 	if (!pmap_initialized)
   1464   1.1     matt 		panic("pmap_find_pv: !pmap_initialized");
   1465   1.1     matt #endif
   1466   1.1     matt 
   1467   1.1     matt 	if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
   1468   1.1     matt 		panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
   1469   1.1     matt 	pv = &vm_physmem[bank].pmseg.pvent[off];
   1470   1.1     matt 	return (pv);
   1471   1.1     matt }
   1472   1.1     matt 
   1473   1.1     matt /*
   1474   1.1     matt  * pmap_zero_page()
   1475   1.1     matt  *
   1476   1.1     matt  * Zero a given physical page by mapping it at a page hook point.
   1477   1.1     matt  * In doing the zero page op, the page we zero is mapped cachable, as with
   1478   1.1     matt  * StrongARM accesses to non-cached pages are non-burst making writing
   1479   1.1     matt  * _any_ bulk data very slow.
   1480   1.1     matt  */
   1481   1.1     matt void
   1482   1.1     matt pmap_zero_page(phys)
   1483   1.2     matt 	paddr_t phys;
   1484   1.1     matt {
   1485   1.1     matt 	struct pv_entry *pv;
   1486   1.1     matt 
   1487   1.1     matt 	/* Get an entry for this page, and clean it it. */
   1488   1.1     matt 	pv = pmap_find_pv(phys);
   1489   1.1     matt 	pmap_clean_page(pv);
   1490   1.1     matt 
   1491   1.1     matt 	/*
   1492   1.1     matt 	 * Hook in the page, zero it, and purge the cache for that
   1493   1.1     matt 	 * zeroed page. Invalidate the TLB as needed.
   1494   1.1     matt 	 */
   1495   1.1     matt 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
   1496   1.1     matt 	cpu_tlb_flushD_SE(page_hook0.va);
   1497   1.1     matt 	bzero_page(page_hook0.va);
   1498   1.1     matt 	cpu_cache_purgeD_rng(page_hook0.va, NBPG);
   1499   1.1     matt }
   1500   1.1     matt 
   1501   1.1     matt /*
   1502   1.1     matt  * pmap_copy_page()
   1503   1.1     matt  *
   1504   1.1     matt  * Copy one physical page into another, by mapping the pages into
   1505   1.1     matt  * hook points. The same comment regarding cachability as in
   1506   1.1     matt  * pmap_zero_page also applies here.
   1507   1.1     matt  */
   1508   1.1     matt void
   1509   1.1     matt pmap_copy_page(src, dest)
   1510   1.2     matt 	paddr_t src;
   1511   1.2     matt 	paddr_t dest;
   1512   1.1     matt {
   1513   1.1     matt 	struct pv_entry *src_pv, *dest_pv;
   1514   1.1     matt 
   1515   1.1     matt 	/* Get PV entries for the pages, and clean them if needed. */
   1516   1.1     matt 	src_pv = pmap_find_pv(src);
   1517   1.1     matt 	dest_pv = pmap_find_pv(dest);
   1518   1.1     matt 	if (!pmap_clean_page(src_pv))
   1519   1.1     matt 		pmap_clean_page(dest_pv);
   1520   1.1     matt 
   1521   1.1     matt 	/*
   1522   1.1     matt 	 * Map the pages into the page hook points, copy them, and purge
   1523   1.1     matt 	 * the cache for the appropriate page. Invalidate the TLB
   1524   1.1     matt 	 * as required.
   1525   1.1     matt 	 */
   1526   1.1     matt 	*page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
   1527   1.1     matt 	*page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
   1528   1.1     matt 	cpu_tlb_flushD_SE(page_hook0.va);
   1529   1.1     matt 	cpu_tlb_flushD_SE(page_hook1.va);
   1530   1.1     matt 	bcopy_page(page_hook0.va, page_hook1.va);
   1531   1.1     matt 	cpu_cache_purgeD_rng(page_hook0.va, NBPG);
   1532   1.1     matt 	cpu_cache_purgeD_rng(page_hook1.va, NBPG);
   1533   1.1     matt }
   1534   1.1     matt 
   1535   1.1     matt /*
   1536   1.2     matt  * int pmap_next_phys_page(paddr_t *addr)
   1537   1.1     matt  *
   1538   1.1     matt  * Allocate another physical page returning true or false depending
   1539   1.1     matt  * on whether a page could be allocated.
   1540   1.1     matt  */
   1541   1.1     matt 
   1542   1.2     matt paddr_t
   1543   1.1     matt pmap_next_phys_page(addr)
   1544   1.2     matt 	paddr_t addr;
   1545   1.1     matt 
   1546   1.1     matt {
   1547   1.1     matt 	int loop;
   1548   1.1     matt 
   1549   1.1     matt 	if (addr < bootconfig.dram[0].address)
   1550   1.1     matt 		return(bootconfig.dram[0].address);
   1551   1.1     matt 
   1552   1.1     matt 	loop = 0;
   1553   1.1     matt 
   1554   1.1     matt 	while (bootconfig.dram[loop].address != 0
   1555   1.1     matt 	    && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
   1556   1.1     matt 		++loop;
   1557   1.1     matt 
   1558   1.1     matt 	if (bootconfig.dram[loop].address == 0)
   1559   1.1     matt 		return(0);
   1560   1.1     matt 
   1561   1.1     matt 	addr += NBPG;
   1562   1.1     matt 
   1563   1.1     matt 	if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
   1564   1.1     matt 		if (bootconfig.dram[loop + 1].address == 0)
   1565   1.1     matt 			return(0);
   1566   1.1     matt 		addr = bootconfig.dram[loop + 1].address;
   1567   1.1     matt 	}
   1568   1.1     matt 
   1569   1.1     matt 	return(addr);
   1570   1.1     matt }
   1571   1.1     matt 
   1572   1.1     matt #if 0
   1573   1.1     matt void
   1574   1.1     matt pmap_pte_addref(pmap, va)
   1575  1.15    chris 	struct pmap *pmap;
   1576   1.1     matt 	vaddr_t va;
   1577   1.1     matt {
   1578   1.1     matt 	pd_entry_t *pde;
   1579   1.2     matt 	paddr_t pa;
   1580   1.1     matt 	struct vm_page *m;
   1581   1.1     matt 
   1582   1.1     matt 	if (pmap == pmap_kernel())
   1583   1.1     matt 		return;
   1584   1.1     matt 
   1585   1.1     matt 	pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
   1586   1.1     matt 	pa = pmap_pte_pa(pde);
   1587   1.1     matt 	m = PHYS_TO_VM_PAGE(pa);
   1588   1.1     matt 	++m->wire_count;
   1589   1.1     matt #ifdef MYCROFT_HACK
   1590   1.1     matt 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   1591   1.1     matt 	    pmap, va, pde, pa, m, m->wire_count);
   1592   1.1     matt #endif
   1593   1.1     matt }
   1594   1.1     matt 
   1595   1.1     matt void
   1596   1.1     matt pmap_pte_delref(pmap, va)
   1597  1.15    chris 	struct pmap *pmap;
   1598   1.1     matt 	vaddr_t va;
   1599   1.1     matt {
   1600   1.1     matt 	pd_entry_t *pde;
   1601   1.2     matt 	paddr_t pa;
   1602   1.1     matt 	struct vm_page *m;
   1603   1.1     matt 
   1604   1.1     matt 	if (pmap == pmap_kernel())
   1605   1.1     matt 		return;
   1606   1.1     matt 
   1607   1.1     matt 	pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
   1608   1.1     matt 	pa = pmap_pte_pa(pde);
   1609   1.1     matt 	m = PHYS_TO_VM_PAGE(pa);
   1610   1.1     matt 	--m->wire_count;
   1611   1.1     matt #ifdef MYCROFT_HACK
   1612   1.1     matt 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   1613   1.1     matt 	    pmap, va, pde, pa, m, m->wire_count);
   1614   1.1     matt #endif
   1615   1.1     matt 	if (m->wire_count == 0) {
   1616   1.1     matt #ifdef MYCROFT_HACK
   1617   1.1     matt 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   1618   1.1     matt 		    pmap, va, pde, pa, m);
   1619   1.1     matt #endif
   1620   1.1     matt 		pmap_unmap_in_l1(pmap, va);
   1621   1.1     matt 		uvm_pagefree(m);
   1622   1.1     matt 		--pmap->pm_stats.resident_count;
   1623   1.1     matt 	}
   1624   1.1     matt }
   1625   1.1     matt #else
   1626   1.1     matt #define	pmap_pte_addref(pmap, va)
   1627   1.1     matt #define	pmap_pte_delref(pmap, va)
   1628   1.1     matt #endif
   1629   1.1     matt 
   1630   1.1     matt /*
   1631   1.1     matt  * Since we have a virtually indexed cache, we may need to inhibit caching if
   1632   1.1     matt  * there is more than one mapping and at least one of them is writable.
   1633   1.1     matt  * Since we purge the cache on every context switch, we only need to check for
   1634   1.1     matt  * other mappings within the same pmap, or kernel_pmap.
   1635   1.1     matt  * This function is also called when a page is unmapped, to possibly reenable
   1636   1.1     matt  * caching on any remaining mappings.
   1637  1.11    chris  *
   1638  1.11    chris  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   1639   1.1     matt  */
   1640   1.1     matt void
   1641  1.12    chris pmap_vac_me_harder(struct pmap *pmap, struct pv_entry *pv, pt_entry_t *ptes,
   1642  1.12    chris 	boolean_t clear_cache)
   1643   1.1     matt {
   1644   1.1     matt 	struct pv_entry *npv;
   1645   1.1     matt 	pt_entry_t *pte;
   1646   1.1     matt 	int entries = 0;
   1647   1.1     matt 	int writeable = 0;
   1648  1.12    chris 	int cacheable_entries = 0;
   1649   1.1     matt 
   1650   1.1     matt 	if (pv->pv_pmap == NULL)
   1651   1.1     matt 		return;
   1652  1.11    chris 	KASSERT(ptes != NULL);
   1653   1.1     matt 
   1654   1.1     matt 	/*
   1655   1.1     matt 	 * Count mappings and writable mappings in this pmap.
   1656   1.1     matt 	 * Keep a pointer to the first one.
   1657   1.1     matt 	 */
   1658   1.1     matt 	for (npv = pv; npv; npv = npv->pv_next) {
   1659   1.1     matt 		/* Count mappings in the same pmap */
   1660   1.1     matt 		if (pmap == npv->pv_pmap) {
   1661   1.1     matt 			if (entries++ == 0)
   1662   1.1     matt 				pv = npv;
   1663  1.12    chris 			/* Cacheable mappings */
   1664  1.12    chris 			if ((npv->pv_flags & PT_NC) == 0)
   1665  1.12    chris 				cacheable_entries++;
   1666   1.1     matt 			/* Writeable mappings */
   1667   1.1     matt 			if (npv->pv_flags & PT_Wr)
   1668   1.1     matt 				++writeable;
   1669   1.1     matt 		}
   1670   1.1     matt 	}
   1671   1.1     matt 
   1672  1.12    chris 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   1673  1.12    chris 		"writeable %d cacheable %d %s\n", pmap, entries, writeable,
   1674  1.12    chris 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   1675  1.12    chris 
   1676   1.1     matt 	/*
   1677   1.1     matt 	 * Enable or disable caching as necessary.
   1678   1.1     matt 	 * We do a quick check of the first PTE to avoid walking the list if
   1679   1.1     matt 	 * we're already in the right state.
   1680   1.1     matt 	 */
   1681   1.1     matt 	if (entries > 1 && writeable) {
   1682  1.12    chris 		if (cacheable_entries == 0)
   1683  1.12    chris 		    return;
   1684  1.12    chris 		if (pv->pv_flags & PT_NC) {
   1685  1.12    chris #ifdef DIAGNOSTIC
   1686  1.12    chris     			/* We have cacheable entries, but the first one
   1687  1.12    chris  			isn't among them. Something is wrong.  */
   1688  1.12    chris     			if (cacheable_entries)
   1689  1.12    chris 				panic("pmap_vac_me_harder: "
   1690  1.12    chris 	    				"cacheable inconsistent");
   1691  1.12    chris #endif
   1692   1.1     matt 			return;
   1693  1.11    chris 		}
   1694  1.12    chris 		pte =  &ptes[arm_byte_to_page(pv->pv_va)];
   1695  1.11    chris 		*pte &= ~(PT_C | PT_B);
   1696  1.12    chris 		pv->pv_flags |= PT_NC;
   1697  1.12    chris 		if (clear_cache && cacheable_entries < 4) {
   1698  1.12    chris 			cpu_cache_purgeID_rng(pv->pv_va, NBPG);
   1699  1.12    chris 			cpu_tlb_flushID_SE(pv->pv_va);
   1700  1.12    chris 		}
   1701   1.1     matt 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   1702  1.12    chris 			if (pmap == npv->pv_pmap &&
   1703  1.12    chris 			    (npv->pv_flags & PT_NC) == 0) {
   1704  1.12    chris 				ptes[arm_byte_to_page(npv->pv_va)] &=
   1705  1.11    chris 				    ~(PT_C | PT_B);
   1706  1.12    chris  				npv->pv_flags |= PT_NC;
   1707  1.12    chris 				if (clear_cache && cacheable_entries < 4) {
   1708  1.12    chris 					cpu_cache_purgeID_rng(npv->pv_va,
   1709  1.12    chris 					    NBPG);
   1710  1.12    chris 					cpu_tlb_flushID_SE(npv->pv_va);
   1711  1.12    chris 				}
   1712   1.1     matt 			}
   1713   1.1     matt 		}
   1714  1.12    chris 		if (clear_cache && cacheable_entries >= 4) {
   1715  1.12    chris 			cpu_cache_purgeID();
   1716  1.12    chris 			cpu_tlb_flushID();
   1717  1.12    chris 		}
   1718   1.1     matt 	} else if (entries > 0) {
   1719  1.12    chris 		if ((pv->pv_flags & PT_NC) == 0)
   1720  1.12    chris 			return;
   1721  1.11    chris 		pte = &ptes[arm_byte_to_page(pv->pv_va)];
   1722  1.11    chris 		*pte |= (PT_C | PT_B);
   1723  1.12    chris 		pv->pv_flags &= ~PT_NC;
   1724   1.1     matt 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   1725  1.12    chris 			if (pmap == npv->pv_pmap &&
   1726  1.12    chris 				(npv->pv_flags & PT_NC)) {
   1727  1.11    chris 				ptes[arm_byte_to_page(npv->pv_va)] |=
   1728  1.12    chris 				    (PT_C | PT_B);
   1729  1.12    chris 				npv->pv_flags &= ~PT_NC;
   1730   1.1     matt 			}
   1731   1.1     matt 		}
   1732   1.1     matt 	}
   1733   1.1     matt }
   1734   1.1     matt 
   1735   1.1     matt /*
   1736   1.1     matt  * pmap_remove()
   1737   1.1     matt  *
   1738   1.1     matt  * pmap_remove is responsible for nuking a number of mappings for a range
   1739   1.1     matt  * of virtual address space in the current pmap. To do this efficiently
   1740   1.1     matt  * is interesting, because in a number of cases a wide virtual address
   1741   1.1     matt  * range may be supplied that contains few actual mappings. So, the
   1742   1.1     matt  * optimisations are:
   1743   1.1     matt  *  1. Try and skip over hunks of address space for which an L1 entry
   1744   1.1     matt  *     does not exist.
   1745   1.1     matt  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   1746   1.1     matt  *     maybe do just a partial cache clean. This path of execution is
   1747   1.1     matt  *     complicated by the fact that the cache must be flushed _before_
   1748   1.1     matt  *     the PTE is nuked, being a VAC :-)
   1749   1.1     matt  *  3. Maybe later fast-case a single page, but I don't think this is
   1750   1.1     matt  *     going to make _that_ much difference overall.
   1751   1.1     matt  */
   1752   1.1     matt 
   1753   1.1     matt #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   1754   1.1     matt 
   1755   1.1     matt void
   1756   1.1     matt pmap_remove(pmap, sva, eva)
   1757  1.15    chris 	struct pmap *pmap;
   1758   1.1     matt 	vaddr_t sva;
   1759   1.1     matt 	vaddr_t eva;
   1760   1.1     matt {
   1761   1.1     matt 	int cleanlist_idx = 0;
   1762   1.1     matt 	struct pagelist {
   1763   1.1     matt 		vaddr_t va;
   1764   1.1     matt 		pt_entry_t *pte;
   1765   1.1     matt 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   1766  1.11    chris 	pt_entry_t *pte = 0, *ptes;
   1767   1.2     matt 	paddr_t pa;
   1768   1.1     matt 	int pmap_active;
   1769   1.1     matt 	struct pv_entry *pv;
   1770   1.1     matt 
   1771   1.1     matt 	/* Exit quick if there is no pmap */
   1772   1.1     matt 	if (!pmap)
   1773   1.1     matt 		return;
   1774   1.1     matt 
   1775   1.1     matt 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
   1776   1.1     matt 
   1777   1.1     matt 	sva &= PG_FRAME;
   1778   1.1     matt 	eva &= PG_FRAME;
   1779   1.1     matt 
   1780  1.11    chris 	ptes = pmap_map_ptes(pmap);
   1781   1.1     matt 	/* Get a page table pointer */
   1782   1.1     matt 	while (sva < eva) {
   1783  1.11    chris 		if (pmap_pde_v(pmap_pde(pmap, sva)))
   1784   1.1     matt 			break;
   1785   1.1     matt 		sva = (sva & PD_MASK) + NBPD;
   1786   1.1     matt 	}
   1787  1.11    chris 
   1788  1.11    chris 	pte = &ptes[arm_byte_to_page(sva)];
   1789   1.1     matt 	/* Note if the pmap is active thus require cache and tlb cleans */
   1790   1.1     matt 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
   1791  1.15    chris 	    || (pmap == pmap_kernel()))
   1792   1.1     matt 		pmap_active = 1;
   1793   1.1     matt 	else
   1794   1.1     matt 		pmap_active = 0;
   1795   1.1     matt 
   1796   1.1     matt 	/* Now loop along */
   1797   1.1     matt 	while (sva < eva) {
   1798   1.1     matt 		/* Check if we can move to the next PDE (l1 chunk) */
   1799   1.1     matt 		if (!(sva & PT_MASK))
   1800   1.1     matt 			if (!pmap_pde_v(pmap_pde(pmap, sva))) {
   1801   1.1     matt 				sva += NBPD;
   1802   1.1     matt 				pte += arm_byte_to_page(NBPD);
   1803   1.1     matt 				continue;
   1804   1.1     matt 			}
   1805   1.1     matt 
   1806   1.1     matt 		/* We've found a valid PTE, so this page of PTEs has to go. */
   1807   1.1     matt 		if (pmap_pte_v(pte)) {
   1808   1.1     matt 			int bank, off;
   1809   1.1     matt 
   1810   1.1     matt 			/* Update statistics */
   1811   1.1     matt 			--pmap->pm_stats.resident_count;
   1812   1.1     matt 
   1813   1.1     matt 			/*
   1814   1.1     matt 			 * Add this page to our cache remove list, if we can.
   1815   1.1     matt 			 * If, however the cache remove list is totally full,
   1816   1.1     matt 			 * then do a complete cache invalidation taking note
   1817   1.1     matt 			 * to backtrack the PTE table beforehand, and ignore
   1818   1.1     matt 			 * the lists in future because there's no longer any
   1819   1.1     matt 			 * point in bothering with them (we've paid the
   1820   1.1     matt 			 * penalty, so will carry on unhindered). Otherwise,
   1821   1.1     matt 			 * when we fall out, we just clean the list.
   1822   1.1     matt 			 */
   1823   1.1     matt 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   1824   1.1     matt 			pa = pmap_pte_pa(pte);
   1825   1.1     matt 
   1826   1.1     matt 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   1827   1.1     matt 				/* Add to the clean list. */
   1828   1.1     matt 				cleanlist[cleanlist_idx].pte = pte;
   1829   1.1     matt 				cleanlist[cleanlist_idx].va = sva;
   1830   1.1     matt 				cleanlist_idx++;
   1831   1.1     matt 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   1832   1.1     matt 				int cnt;
   1833   1.1     matt 
   1834   1.1     matt 				/* Nuke everything if needed. */
   1835   1.1     matt 				if (pmap_active) {
   1836   1.1     matt 					cpu_cache_purgeID();
   1837   1.1     matt 					cpu_tlb_flushID();
   1838   1.1     matt 				}
   1839   1.1     matt 
   1840   1.1     matt 				/*
   1841   1.1     matt 				 * Roll back the previous PTE list,
   1842   1.1     matt 				 * and zero out the current PTE.
   1843   1.1     matt 				 */
   1844   1.1     matt 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   1845   1.1     matt 					*cleanlist[cnt].pte = 0;
   1846   1.1     matt 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   1847   1.1     matt 				}
   1848   1.1     matt 				*pte = 0;
   1849   1.1     matt 				pmap_pte_delref(pmap, sva);
   1850   1.1     matt 				cleanlist_idx++;
   1851   1.1     matt 			} else {
   1852   1.1     matt 				/*
   1853   1.1     matt 				 * We've already nuked the cache and
   1854   1.1     matt 				 * TLB, so just carry on regardless,
   1855   1.1     matt 				 * and we won't need to do it again
   1856   1.1     matt 				 */
   1857   1.1     matt 				*pte = 0;
   1858   1.1     matt 				pmap_pte_delref(pmap, sva);
   1859   1.1     matt 			}
   1860   1.1     matt 
   1861   1.1     matt 			/*
   1862   1.1     matt 			 * Update flags. In a number of circumstances,
   1863   1.1     matt 			 * we could cluster a lot of these and do a
   1864   1.1     matt 			 * number of sequential pages in one go.
   1865   1.1     matt 			 */
   1866   1.1     matt 			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   1867   1.1     matt 				pv = &vm_physmem[bank].pmseg.pvent[off];
   1868   1.1     matt 				pmap_remove_pv(pmap, sva, pv);
   1869  1.12    chris 				pmap_vac_me_harder(pmap, pv, ptes, FALSE);
   1870   1.1     matt 			}
   1871   1.1     matt 		}
   1872   1.1     matt 		sva += NBPG;
   1873   1.1     matt 		pte++;
   1874   1.1     matt 	}
   1875   1.1     matt 
   1876  1.11    chris 	pmap_unmap_ptes(pmap);
   1877   1.1     matt 	/*
   1878   1.1     matt 	 * Now, if we've fallen through down to here, chances are that there
   1879   1.1     matt 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   1880   1.1     matt 	 */
   1881   1.1     matt 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   1882   1.1     matt 		u_int cnt;
   1883   1.1     matt 
   1884   1.1     matt 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   1885   1.1     matt 			if (pmap_active) {
   1886   1.1     matt 				cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
   1887   1.1     matt 				*cleanlist[cnt].pte = 0;
   1888   1.1     matt 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   1889   1.1     matt 			} else
   1890   1.1     matt 				*cleanlist[cnt].pte = 0;
   1891   1.1     matt 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   1892   1.1     matt 		}
   1893   1.1     matt 	}
   1894   1.1     matt }
   1895   1.1     matt 
   1896   1.1     matt /*
   1897   1.1     matt  * Routine:	pmap_remove_all
   1898   1.1     matt  * Function:
   1899   1.1     matt  *		Removes this physical page from
   1900   1.1     matt  *		all physical maps in which it resides.
   1901   1.1     matt  *		Reflects back modify bits to the pager.
   1902   1.1     matt  */
   1903   1.1     matt 
   1904   1.1     matt void
   1905   1.1     matt pmap_remove_all(pa)
   1906   1.2     matt 	paddr_t pa;
   1907   1.1     matt {
   1908   1.1     matt 	struct pv_entry *ph, *pv, *npv;
   1909  1.15    chris 	struct pmap *pmap;
   1910  1.11    chris 	pt_entry_t *pte, *ptes;
   1911   1.1     matt 	int s;
   1912   1.1     matt 
   1913   1.1     matt 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
   1914   1.1     matt 
   1915   1.1     matt 	pv = ph = pmap_find_pv(pa);
   1916   1.1     matt 	pmap_clean_page(pv);
   1917   1.1     matt 
   1918   1.1     matt 	s = splvm();
   1919   1.1     matt 
   1920   1.1     matt 	if (ph->pv_pmap == NULL) {
   1921   1.1     matt 		PDEBUG(0, printf("free page\n"));
   1922   1.1     matt 		splx(s);
   1923   1.1     matt 		return;
   1924   1.1     matt 	}
   1925   1.1     matt 
   1926  1.11    chris 
   1927  1.11    chris 
   1928   1.1     matt 	while (pv) {
   1929   1.1     matt 		pmap = pv->pv_pmap;
   1930  1.11    chris 		ptes = pmap_map_ptes(pmap);
   1931  1.11    chris 		pte = &ptes[arm_byte_to_page(pv->pv_va)];
   1932   1.1     matt 
   1933   1.1     matt 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   1934   1.1     matt 		    pv->pv_va, pv->pv_flags));
   1935   1.1     matt #ifdef DEBUG
   1936  1.11    chris 		if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)
   1937  1.11    chris 			    || pmap_pte_pa(pte) != pa)
   1938   1.1     matt 			panic("pmap_remove_all: bad mapping");
   1939   1.1     matt #endif	/* DEBUG */
   1940   1.1     matt 
   1941   1.1     matt 		/*
   1942   1.1     matt 		 * Update statistics
   1943   1.1     matt 		 */
   1944   1.1     matt 		--pmap->pm_stats.resident_count;
   1945   1.1     matt 
   1946   1.1     matt 		/* Wired bit */
   1947   1.1     matt 		if (pv->pv_flags & PT_W)
   1948   1.1     matt 			--pmap->pm_stats.wired_count;
   1949   1.1     matt 
   1950   1.1     matt 		/*
   1951   1.1     matt 		 * Invalidate the PTEs.
   1952   1.1     matt 		 * XXX: should cluster them up and invalidate as many
   1953   1.1     matt 		 * as possible at once.
   1954   1.1     matt 		 */
   1955   1.1     matt 
   1956   1.1     matt #ifdef needednotdone
   1957   1.1     matt reduce wiring count on page table pages as references drop
   1958   1.1     matt #endif
   1959   1.1     matt 
   1960   1.1     matt 		*pte = 0;
   1961   1.1     matt 		pmap_pte_delref(pmap, pv->pv_va);
   1962   1.1     matt 
   1963   1.1     matt 		npv = pv->pv_next;
   1964   1.1     matt 		if (pv == ph)
   1965   1.1     matt 			ph->pv_pmap = NULL;
   1966   1.1     matt 		else
   1967   1.1     matt 			pmap_free_pv(pv);
   1968   1.1     matt 		pv = npv;
   1969  1.11    chris 		pmap_unmap_ptes(pmap);
   1970   1.1     matt 	}
   1971  1.11    chris 
   1972   1.1     matt 	splx(s);
   1973   1.1     matt 
   1974   1.1     matt 	PDEBUG(0, printf("done\n"));
   1975   1.1     matt 	cpu_tlb_flushID();
   1976   1.1     matt }
   1977   1.1     matt 
   1978   1.1     matt 
   1979   1.1     matt /*
   1980   1.1     matt  * Set the physical protection on the specified range of this map as requested.
   1981   1.1     matt  */
   1982   1.1     matt 
   1983   1.1     matt void
   1984   1.1     matt pmap_protect(pmap, sva, eva, prot)
   1985  1.15    chris 	struct pmap *pmap;
   1986   1.1     matt 	vaddr_t sva;
   1987   1.1     matt 	vaddr_t eva;
   1988   1.1     matt 	vm_prot_t prot;
   1989   1.1     matt {
   1990  1.11    chris 	pt_entry_t *pte = NULL, *ptes;
   1991   1.1     matt 	int armprot;
   1992   1.1     matt 	int flush = 0;
   1993   1.2     matt 	paddr_t pa;
   1994   1.1     matt 	int bank, off;
   1995   1.1     matt 	struct pv_entry *pv;
   1996   1.1     matt 
   1997   1.1     matt 	/*
   1998   1.1     matt 	 * Make sure pmap is valid. -dct
   1999   1.1     matt 	 */
   2000   1.1     matt 	if (pmap == NULL)
   2001   1.1     matt 		return;
   2002   1.1     matt 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2003   1.1     matt 	    pmap, sva, eva, prot));
   2004   1.1     matt 
   2005   1.1     matt 	if (~prot & VM_PROT_READ) {
   2006   1.1     matt 		/* Just remove the mappings. */
   2007   1.1     matt 		pmap_remove(pmap, sva, eva);
   2008   1.1     matt 		return;
   2009   1.1     matt 	}
   2010   1.1     matt 	if (prot & VM_PROT_WRITE) {
   2011   1.1     matt 		/*
   2012   1.1     matt 		 * If this is a read->write transition, just ignore it and let
   2013   1.1     matt 		 * uvm_fault() take care of it later.
   2014   1.1     matt 		 */
   2015   1.1     matt 		return;
   2016   1.1     matt 	}
   2017   1.1     matt 
   2018   1.1     matt 	sva &= PG_FRAME;
   2019   1.1     matt 	eva &= PG_FRAME;
   2020   1.1     matt 
   2021  1.11    chris 	ptes = pmap_map_ptes(pmap);
   2022   1.1     matt 	/*
   2023   1.1     matt 	 * We need to acquire a pointer to a page table page before entering
   2024   1.1     matt 	 * the following loop.
   2025   1.1     matt 	 */
   2026   1.1     matt 	while (sva < eva) {
   2027  1.11    chris 		if (pmap_pde_v(pmap_pde(pmap, sva)))
   2028   1.1     matt 			break;
   2029   1.1     matt 		sva = (sva & PD_MASK) + NBPD;
   2030   1.1     matt 	}
   2031  1.11    chris 
   2032  1.11    chris 	pte = &ptes[arm_byte_to_page(sva)];
   2033   1.1     matt 
   2034   1.1     matt 	while (sva < eva) {
   2035   1.1     matt 		/* only check once in a while */
   2036   1.1     matt 		if ((sva & PT_MASK) == 0) {
   2037   1.1     matt 			if (!pmap_pde_v(pmap_pde(pmap, sva))) {
   2038   1.1     matt 				/* We can race ahead here, to the next pde. */
   2039   1.1     matt 				sva += NBPD;
   2040   1.1     matt 				pte += arm_byte_to_page(NBPD);
   2041   1.1     matt 				continue;
   2042   1.1     matt 			}
   2043   1.1     matt 		}
   2044   1.1     matt 
   2045   1.1     matt 		if (!pmap_pte_v(pte))
   2046   1.1     matt 			goto next;
   2047   1.1     matt 
   2048   1.1     matt 		flush = 1;
   2049   1.1     matt 
   2050   1.1     matt 		armprot = 0;
   2051   1.1     matt 		if (sva < VM_MAXUSER_ADDRESS)
   2052   1.1     matt 			armprot |= PT_AP(AP_U);
   2053   1.1     matt 		else if (sva < VM_MAX_ADDRESS)
   2054   1.1     matt 			armprot |= PT_AP(AP_W);  /* XXX Ekk what is this ? */
   2055   1.1     matt 		*pte = (*pte & 0xfffff00f) | armprot;
   2056   1.1     matt 
   2057   1.1     matt 		pa = pmap_pte_pa(pte);
   2058   1.1     matt 
   2059   1.1     matt 		/* Get the physical page index */
   2060   1.1     matt 
   2061   1.1     matt 		/* Clear write flag */
   2062   1.1     matt 		if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2063   1.1     matt 			pv = &vm_physmem[bank].pmseg.pvent[off];
   2064   1.1     matt 			(void) pmap_modify_pv(pmap, sva, pv, PT_Wr, 0);
   2065  1.12    chris 			pmap_vac_me_harder(pmap, pv, ptes, FALSE);
   2066   1.1     matt 		}
   2067   1.1     matt 
   2068   1.1     matt next:
   2069   1.1     matt 		sva += NBPG;
   2070   1.1     matt 		pte++;
   2071   1.1     matt 	}
   2072  1.11    chris 	pmap_unmap_ptes(pmap);
   2073   1.1     matt 	if (flush)
   2074   1.1     matt 		cpu_tlb_flushID();
   2075   1.1     matt }
   2076   1.1     matt 
   2077   1.1     matt /*
   2078  1.15    chris  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2079   1.1     matt  * int flags)
   2080   1.1     matt  *
   2081   1.1     matt  *      Insert the given physical page (p) at
   2082   1.1     matt  *      the specified virtual address (v) in the
   2083   1.1     matt  *      target physical map with the protection requested.
   2084   1.1     matt  *
   2085   1.1     matt  *      If specified, the page will be wired down, meaning
   2086   1.1     matt  *      that the related pte can not be reclaimed.
   2087   1.1     matt  *
   2088   1.1     matt  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2089   1.1     matt  *      or lose information.  That is, this routine must actually
   2090   1.1     matt  *      insert this page into the given map NOW.
   2091   1.1     matt  */
   2092   1.1     matt 
   2093   1.1     matt int
   2094   1.1     matt pmap_enter(pmap, va, pa, prot, flags)
   2095  1.15    chris 	struct pmap *pmap;
   2096   1.1     matt 	vaddr_t va;
   2097   1.2     matt 	paddr_t pa;
   2098   1.1     matt 	vm_prot_t prot;
   2099   1.1     matt 	int flags;
   2100   1.1     matt {
   2101  1.11    chris 	pt_entry_t *pte, *ptes;
   2102   1.1     matt 	u_int npte;
   2103   1.1     matt 	int bank, off;
   2104   1.1     matt 	struct pv_entry *pv = NULL;
   2105   1.2     matt 	paddr_t opa;
   2106   1.1     matt 	int nflags;
   2107   1.1     matt 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2108   1.1     matt 
   2109   1.1     matt 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2110   1.1     matt 	    va, pa, pmap, prot, wired));
   2111   1.1     matt 
   2112   1.1     matt #ifdef DIAGNOSTIC
   2113   1.1     matt 	/* Valid address ? */
   2114   1.1     matt 	if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
   2115   1.1     matt 		panic("pmap_enter: too big");
   2116   1.1     matt 	if (pmap != pmap_kernel() && va != 0) {
   2117   1.1     matt 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2118   1.1     matt 			panic("pmap_enter: kernel page in user map");
   2119   1.1     matt 	} else {
   2120   1.1     matt 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2121   1.1     matt 			panic("pmap_enter: user page in kernel map");
   2122   1.1     matt 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2123   1.1     matt 			panic("pmap_enter: entering PT page");
   2124   1.1     matt 	}
   2125   1.1     matt #endif
   2126   1.1     matt 
   2127   1.1     matt 	/*
   2128   1.1     matt 	 * Get a pointer to the pte for this virtual address. If the
   2129   1.1     matt 	 * pte pointer is NULL then we are missing the L2 page table
   2130   1.1     matt 	 * so we need to create one.
   2131   1.1     matt 	 */
   2132   1.1     matt 	pte = pmap_pte(pmap, va);
   2133   1.1     matt 	if (!pte) {
   2134   1.2     matt 		paddr_t l2pa;
   2135   1.1     matt 		struct vm_page *m;
   2136   1.1     matt 
   2137   1.1     matt 		/* Allocate a page table */
   2138   1.1     matt 		for (;;) {
   2139  1.16    chris 			m = uvm_pagealloc(&(pmap->pm_obj), 0, NULL,
   2140  1.16    chris 				UVM_PGA_USERESERVE);
   2141   1.1     matt 			if (m != NULL)
   2142   1.1     matt 				break;
   2143   1.1     matt 
   2144   1.1     matt 			/*
   2145   1.1     matt 			 * No page available.  If we're the kernel
   2146   1.1     matt 			 * pmap, we die, since we might not have
   2147   1.1     matt 			 * a valid thread context.  For user pmaps,
   2148   1.1     matt 			 * we assume that we _do_ have a valid thread
   2149   1.1     matt 			 * context, so we wait here for the pagedaemon
   2150   1.1     matt 			 * to free up some pages.
   2151   1.1     matt 			 *
   2152   1.1     matt 			 * XXX THE VM CODE IS PROBABLY HOLDING LOCKS
   2153   1.1     matt 			 * XXX RIGHT NOW, BUT ONLY ON OUR PARENT VM_MAP
   2154   1.1     matt 			 * XXX SO THIS IS PROBABLY SAFE.  In any case,
   2155   1.1     matt 			 * XXX other pmap modules claim it is safe to
   2156   1.1     matt 			 * XXX sleep here if it's a user pmap.
   2157   1.1     matt 			 */
   2158   1.1     matt 			if (pmap == pmap_kernel())
   2159   1.1     matt 				panic("pmap_enter: no free pages");
   2160   1.1     matt 			else
   2161   1.1     matt 				uvm_wait("pmap_enter");
   2162   1.1     matt 		}
   2163   1.1     matt 
   2164   1.1     matt 		/* Wire this page table into the L1. */
   2165   1.1     matt 		l2pa = VM_PAGE_TO_PHYS(m);
   2166   1.1     matt 		pmap_zero_page(l2pa);
   2167   1.1     matt 		pmap_map_in_l1(pmap, va, l2pa);
   2168   1.1     matt 		++pmap->pm_stats.resident_count;
   2169  1.16    chris 		m->flags &= ~PG_BUSY;	/* never busy */
   2170  1.16    chris 		m->wire_count = 1;	/* no mappings yet */
   2171  1.16    chris 
   2172   1.1     matt 		pte = pmap_pte(pmap, va);
   2173   1.1     matt #ifdef DIAGNOSTIC
   2174   1.1     matt 		if (!pte)
   2175   1.1     matt 			panic("pmap_enter: no pte");
   2176   1.1     matt #endif
   2177   1.1     matt 	}
   2178   1.1     matt 
   2179   1.1     matt 	nflags = 0;
   2180   1.1     matt 	if (prot & VM_PROT_WRITE)
   2181   1.1     matt 		nflags |= PT_Wr;
   2182   1.1     matt 	if (wired)
   2183   1.1     matt 		nflags |= PT_W;
   2184   1.1     matt 
   2185   1.1     matt 	/* More debugging info */
   2186   1.1     matt 	PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
   2187   1.1     matt 	    *pte));
   2188   1.1     matt 
   2189   1.1     matt 	/* Is the pte valid ? If so then this page is already mapped */
   2190   1.1     matt 	if (pmap_pte_v(pte)) {
   2191   1.1     matt 		/* Get the physical address of the current page mapped */
   2192   1.1     matt 		opa = pmap_pte_pa(pte);
   2193   1.1     matt 
   2194   1.1     matt #ifdef MYCROFT_HACK
   2195   1.1     matt 		printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
   2196   1.1     matt #endif
   2197   1.1     matt 
   2198   1.1     matt 		/* Are we mapping the same page ? */
   2199   1.1     matt 		if (opa == pa) {
   2200   1.1     matt 			/* All we must be doing is changing the protection */
   2201   1.1     matt 			PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
   2202   1.1     matt 			    va, pa));
   2203   1.1     matt 
   2204   1.1     matt 			/* Has the wiring changed ? */
   2205   1.1     matt 			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2206   1.1     matt 				pv = &vm_physmem[bank].pmseg.pvent[off];
   2207   1.1     matt 				(void) pmap_modify_pv(pmap, va, pv,
   2208   1.1     matt 				    PT_Wr | PT_W, nflags);
   2209   1.1     matt  			}
   2210   1.1     matt 		} else {
   2211   1.1     matt 			/* We are replacing the page with a new one. */
   2212   1.1     matt 			cpu_cache_purgeID_rng(va, NBPG);
   2213   1.1     matt 
   2214   1.1     matt 			PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
   2215   1.1     matt 			    va, pa, opa));
   2216   1.1     matt 
   2217   1.1     matt 			/*
   2218   1.1     matt 			 * If it is part of our managed memory then we
   2219   1.1     matt 			 * must remove it from the PV list
   2220   1.1     matt 			 */
   2221   1.1     matt 			if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
   2222   1.1     matt 				pv = &vm_physmem[bank].pmseg.pvent[off];
   2223   1.1     matt 				pmap_remove_pv(pmap, va, pv);
   2224   1.1     matt 			}
   2225   1.1     matt 
   2226   1.1     matt 			goto enter;
   2227   1.1     matt 		}
   2228   1.1     matt 	} else {
   2229   1.1     matt 		opa = 0;
   2230   1.1     matt 		pmap_pte_addref(pmap, va);
   2231   1.1     matt 
   2232   1.1     matt 		/* pte is not valid so we must be hooking in a new page */
   2233   1.1     matt 		++pmap->pm_stats.resident_count;
   2234   1.1     matt 
   2235   1.1     matt 	enter:
   2236   1.1     matt 		/*
   2237   1.1     matt 		 * Enter on the PV list if part of our managed memory
   2238   1.1     matt 		 */
   2239   1.1     matt 		if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2240   1.1     matt 			pv = &vm_physmem[bank].pmseg.pvent[off];
   2241   1.1     matt 			pmap_enter_pv(pmap, va, pv, nflags);
   2242   1.1     matt 		}
   2243   1.1     matt 	}
   2244   1.1     matt 
   2245   1.1     matt #ifdef MYCROFT_HACK
   2246   1.1     matt 	if (mycroft_hack)
   2247   1.1     matt 		printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
   2248   1.1     matt #endif
   2249   1.1     matt 
   2250   1.1     matt 	/* Construct the pte, giving the correct access. */
   2251   1.1     matt 	npte = (pa & PG_FRAME);
   2252   1.1     matt 
   2253   1.1     matt 	/* VA 0 is magic. */
   2254   1.1     matt 	if (pmap != pmap_kernel() && va != 0)
   2255   1.1     matt 		npte |= PT_AP(AP_U);
   2256   1.1     matt 
   2257   1.1     matt 	if (bank != -1) {
   2258   1.1     matt #ifdef DIAGNOSTIC
   2259   1.1     matt 		if ((flags & VM_PROT_ALL) & ~prot)
   2260   1.1     matt 			panic("pmap_enter: access_type exceeds prot");
   2261   1.1     matt #endif
   2262   1.1     matt 		npte |= PT_C | PT_B;
   2263   1.1     matt 		if (flags & VM_PROT_WRITE) {
   2264   1.1     matt 			npte |= L2_SPAGE | PT_AP(AP_W);
   2265   1.1     matt 			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
   2266   1.1     matt 		} else if (flags & VM_PROT_ALL) {
   2267   1.1     matt 			npte |= L2_SPAGE;
   2268   1.1     matt 			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
   2269   1.1     matt 		} else
   2270   1.1     matt 			npte |= L2_INVAL;
   2271   1.1     matt 	} else {
   2272   1.1     matt 		if (prot & VM_PROT_WRITE)
   2273   1.1     matt 			npte |= L2_SPAGE | PT_AP(AP_W);
   2274   1.1     matt 		else if (prot & VM_PROT_ALL)
   2275   1.1     matt 			npte |= L2_SPAGE;
   2276   1.1     matt 		else
   2277   1.1     matt 			npte |= L2_INVAL;
   2278   1.1     matt 	}
   2279   1.1     matt 
   2280   1.1     matt #ifdef MYCROFT_HACK
   2281   1.1     matt 	if (mycroft_hack)
   2282   1.1     matt 		printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
   2283   1.1     matt #endif
   2284   1.1     matt 
   2285   1.1     matt 	*pte = npte;
   2286   1.1     matt 
   2287   1.1     matt 	if (bank != -1)
   2288  1.11    chris 	{
   2289  1.12    chris 		boolean_t pmap_active = FALSE;
   2290  1.11    chris 		/* XXX this will change once the whole of pmap_enter uses
   2291  1.11    chris 		 * map_ptes
   2292  1.11    chris 		 */
   2293  1.11    chris 		ptes = pmap_map_ptes(pmap);
   2294  1.12    chris 		if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
   2295  1.15    chris 		    || (pmap == pmap_kernel()))
   2296  1.12    chris 			pmap_active = TRUE;
   2297  1.12    chris  		pmap_vac_me_harder(pmap, pv, ptes, pmap_active);
   2298  1.11    chris 		pmap_unmap_ptes(pmap);
   2299  1.11    chris 	}
   2300   1.1     matt 
   2301   1.1     matt 	/* Better flush the TLB ... */
   2302   1.1     matt 	cpu_tlb_flushID_SE(va);
   2303   1.1     matt 
   2304   1.1     matt 	PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
   2305   1.1     matt 
   2306   1.4      chs 	return 0;
   2307   1.1     matt }
   2308   1.1     matt 
   2309   1.1     matt void
   2310   1.1     matt pmap_kenter_pa(va, pa, prot)
   2311   1.1     matt 	vaddr_t va;
   2312   1.1     matt 	paddr_t pa;
   2313   1.1     matt 	vm_prot_t prot;
   2314   1.1     matt {
   2315  1.14      chs 	struct pmap *pmap = pmap_kernel();
   2316  1.13    chris 	pt_entry_t *pte;
   2317  1.14      chs 	struct vm_page *pg;
   2318  1.13    chris 
   2319  1.14      chs 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
   2320  1.14      chs 
   2321  1.13    chris 		/*
   2322  1.13    chris 		 * For the kernel pmaps it would be better to ensure
   2323  1.13    chris 		 * that they are always present, and to grow the
   2324  1.13    chris 		 * kernel as required.
   2325  1.13    chris 		 */
   2326  1.13    chris 
   2327  1.13    chris 		/* Allocate a page table */
   2328  1.16    chris 		pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
   2329  1.14      chs 		    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
   2330  1.14      chs 		if (pg == NULL) {
   2331  1.13    chris 			panic("pmap_kenter_pa: no free pages");
   2332  1.13    chris 		}
   2333  1.16    chris 		pg->flags &= ~PG_BUSY;	/* never busy */
   2334  1.13    chris 
   2335  1.13    chris 		/* Wire this page table into the L1. */
   2336  1.14      chs 		pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg));
   2337  1.13    chris 	}
   2338  1.13    chris 	pte = vtopte(va);
   2339  1.14      chs 	KASSERT(!pmap_pte_v(pte));
   2340  1.13    chris 	*pte = L2_PTE(pa, AP_KRW);
   2341   1.1     matt }
   2342   1.1     matt 
   2343   1.1     matt void
   2344   1.1     matt pmap_kremove(va, len)
   2345   1.1     matt 	vaddr_t va;
   2346   1.1     matt 	vsize_t len;
   2347   1.1     matt {
   2348  1.14      chs 	pt_entry_t *pte;
   2349  1.14      chs 
   2350   1.1     matt 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2351  1.13    chris 
   2352  1.14      chs 		/*
   2353  1.14      chs 		 * We assume that we will only be called with small
   2354  1.14      chs 		 * regions of memory.
   2355  1.14      chs 		 */
   2356  1.14      chs 
   2357  1.14      chs 		KASSERT(pmap_pde_v(pmap_pde(pmap_kernel(), va)));
   2358  1.13    chris 		pte = vtopte(va);
   2359  1.13    chris 		cpu_cache_purgeID_rng(va, PAGE_SIZE);
   2360  1.13    chris 		*pte = 0;
   2361  1.13    chris 		cpu_tlb_flushID_SE(va);
   2362   1.1     matt 	}
   2363   1.1     matt }
   2364   1.1     matt 
   2365   1.1     matt /*
   2366   1.1     matt  * pmap_page_protect:
   2367   1.1     matt  *
   2368   1.1     matt  * Lower the permission for all mappings to a given page.
   2369   1.1     matt  */
   2370   1.1     matt 
   2371   1.1     matt void
   2372   1.1     matt pmap_page_protect(pg, prot)
   2373   1.1     matt 	struct vm_page *pg;
   2374   1.1     matt 	vm_prot_t prot;
   2375   1.1     matt {
   2376   1.1     matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2377   1.1     matt 
   2378   1.1     matt 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
   2379   1.1     matt 
   2380   1.1     matt 	switch(prot) {
   2381   1.1     matt 	case VM_PROT_READ:
   2382   1.1     matt 	case VM_PROT_READ|VM_PROT_EXECUTE:
   2383   1.1     matt 		pmap_copy_on_write(pa);
   2384   1.1     matt 		break;
   2385   1.1     matt 
   2386   1.1     matt 	case VM_PROT_ALL:
   2387   1.1     matt 		break;
   2388   1.1     matt 
   2389   1.1     matt 	default:
   2390   1.1     matt 		pmap_remove_all(pa);
   2391   1.1     matt 		break;
   2392   1.1     matt 	}
   2393   1.1     matt }
   2394   1.1     matt 
   2395   1.1     matt 
   2396   1.1     matt /*
   2397   1.1     matt  * Routine:	pmap_unwire
   2398   1.1     matt  * Function:	Clear the wired attribute for a map/virtual-address
   2399   1.1     matt  *		pair.
   2400   1.1     matt  * In/out conditions:
   2401   1.1     matt  *		The mapping must already exist in the pmap.
   2402   1.1     matt  */
   2403   1.1     matt 
   2404   1.1     matt void
   2405   1.1     matt pmap_unwire(pmap, va)
   2406  1.15    chris 	struct pmap *pmap;
   2407   1.1     matt 	vaddr_t va;
   2408   1.1     matt {
   2409   1.1     matt 	pt_entry_t *pte;
   2410   1.2     matt 	paddr_t pa;
   2411   1.1     matt 	int bank, off;
   2412   1.1     matt 	struct pv_entry *pv;
   2413   1.1     matt 
   2414   1.1     matt 	/*
   2415   1.1     matt 	 * Make sure pmap is valid. -dct
   2416   1.1     matt 	 */
   2417   1.1     matt 	if (pmap == NULL)
   2418   1.1     matt 		return;
   2419   1.1     matt 
   2420   1.1     matt 	/* Get the pte */
   2421   1.1     matt 	pte = pmap_pte(pmap, va);
   2422   1.1     matt 	if (!pte)
   2423   1.1     matt 		return;
   2424   1.1     matt 
   2425   1.1     matt 	/* Extract the physical address of the page */
   2426   1.1     matt 	pa = pmap_pte_pa(pte);
   2427   1.1     matt 
   2428   1.1     matt 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2429   1.1     matt 		return;
   2430   1.1     matt 	pv = &vm_physmem[bank].pmseg.pvent[off];
   2431   1.1     matt 	/* Update the wired bit in the pv entry for this page. */
   2432   1.1     matt 	(void) pmap_modify_pv(pmap, va, pv, PT_W, 0);
   2433   1.1     matt }
   2434   1.1     matt 
   2435   1.1     matt /*
   2436  1.15    chris  * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
   2437   1.1     matt  *
   2438   1.1     matt  * Return the pointer to a page table entry corresponding to the supplied
   2439   1.1     matt  * virtual address.
   2440   1.1     matt  *
   2441   1.1     matt  * The page directory is first checked to make sure that a page table
   2442   1.1     matt  * for the address in question exists and if it does a pointer to the
   2443   1.1     matt  * entry is returned.
   2444   1.1     matt  *
   2445   1.1     matt  * The way this works is that that the kernel page tables are mapped
   2446   1.1     matt  * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
   2447   1.1     matt  * This allows page tables to be located quickly.
   2448   1.1     matt  */
   2449   1.1     matt pt_entry_t *
   2450   1.1     matt pmap_pte(pmap, va)
   2451  1.15    chris 	struct pmap *pmap;
   2452   1.1     matt 	vaddr_t va;
   2453   1.1     matt {
   2454   1.1     matt 	pt_entry_t *ptp;
   2455   1.1     matt 	pt_entry_t *result;
   2456   1.1     matt 
   2457   1.1     matt 	/* The pmap must be valid */
   2458   1.1     matt 	if (!pmap)
   2459   1.1     matt 		return(NULL);
   2460   1.1     matt 
   2461   1.1     matt 	/* Return the address of the pte */
   2462   1.1     matt 	PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
   2463   1.1     matt 	    pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
   2464   1.1     matt 
   2465   1.1     matt 	/* Do we have a valid pde ? If not we don't have a page table */
   2466   1.1     matt 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
   2467   1.1     matt 		PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
   2468   1.1     matt 		    pmap_pde(pmap, va)));
   2469   1.1     matt 		return(NULL);
   2470   1.1     matt 	}
   2471   1.1     matt 
   2472   1.1     matt 	PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
   2473   1.1     matt 	    pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2474   1.1     matt 	    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2475   1.1     matt 	    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
   2476   1.1     matt 
   2477   1.1     matt 	/*
   2478   1.1     matt 	 * If the pmap is the kernel pmap or the pmap is the active one
   2479   1.1     matt 	 * then we can just return a pointer to entry relative to
   2480   1.1     matt 	 * PROCESS_PAGE_TBLS_BASE.
   2481   1.1     matt 	 * Otherwise we need to map the page tables to an alternative
   2482   1.1     matt 	 * address and reference them there.
   2483   1.1     matt 	 */
   2484  1.15    chris 	if (pmap == pmap_kernel() || pmap->pm_pptpt
   2485   1.1     matt 	    == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2486   1.1     matt 	    + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
   2487   1.1     matt 	    ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
   2488   1.1     matt 		ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
   2489   1.1     matt 	} else {
   2490   1.1     matt 		struct proc *p = curproc;
   2491   1.1     matt 
   2492   1.1     matt 		/* If we don't have a valid curproc use proc0 */
   2493   1.1     matt 		/* Perhaps we should just use kernel_pmap instead */
   2494   1.1     matt 		if (p == NULL)
   2495   1.1     matt 			p = &proc0;
   2496   1.1     matt #ifdef DIAGNOSTIC
   2497   1.1     matt 		/*
   2498   1.1     matt 		 * The pmap should always be valid for the process so
   2499   1.1     matt 		 * panic if it is not.
   2500   1.1     matt 		 */
   2501   1.1     matt 		if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
   2502   1.1     matt 			printf("pmap_pte: va=%08lx p=%p vm=%p\n",
   2503   1.1     matt 			    va, p, p->p_vmspace);
   2504   1.1     matt 			console_debugger();
   2505   1.1     matt 		}
   2506   1.1     matt 		/*
   2507   1.1     matt 		 * The pmap for the current process should be mapped. If it
   2508   1.1     matt 		 * is not then we have a problem.
   2509   1.1     matt 		 */
   2510   1.1     matt 		if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
   2511   1.1     matt 		    (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2512   1.1     matt 		    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2513   1.1     matt 		    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
   2514   1.1     matt 			printf("pmap pagetable = P%08lx current = P%08x ",
   2515   1.1     matt 			    pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2516   1.1     matt 			    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2517   1.1     matt 			    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
   2518   1.1     matt 			    PG_FRAME));
   2519   1.1     matt 			printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
   2520   1.1     matt 			panic("pmap_pte: current and pmap mismatch\n");
   2521   1.1     matt 		}
   2522   1.1     matt #endif
   2523   1.1     matt 
   2524   1.1     matt 		ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
   2525   1.1     matt 		pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
   2526   1.1     matt 		    pmap->pm_pptpt);
   2527   1.1     matt 		cpu_tlb_flushD();
   2528   1.1     matt 	}
   2529   1.1     matt 	PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
   2530   1.1     matt 	    ((va >> (PGSHIFT-2)) & ~3)));
   2531   1.1     matt 	result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
   2532   1.1     matt 	return(result);
   2533   1.1     matt }
   2534   1.1     matt 
   2535   1.1     matt /*
   2536   1.1     matt  * Routine:  pmap_extract
   2537   1.1     matt  * Function:
   2538   1.1     matt  *           Extract the physical page address associated
   2539   1.1     matt  *           with the given map/virtual_address pair.
   2540   1.1     matt  */
   2541   1.1     matt boolean_t
   2542   1.1     matt pmap_extract(pmap, va, pap)
   2543  1.15    chris 	struct pmap *pmap;
   2544   1.1     matt 	vaddr_t va;
   2545   1.1     matt 	paddr_t *pap;
   2546   1.1     matt {
   2547  1.11    chris 	pt_entry_t *pte, *ptes;
   2548   1.1     matt 	paddr_t pa;
   2549   1.1     matt 
   2550   1.1     matt 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
   2551   1.1     matt 
   2552   1.1     matt 	/*
   2553  1.11    chris 	 * Get the pte for this virtual address.
   2554   1.1     matt 	 */
   2555  1.11    chris 	ptes = pmap_map_ptes(pmap);
   2556  1.11    chris 	pte = &ptes[arm_byte_to_page(va)];
   2557   1.1     matt 
   2558  1.11    chris 	/*
   2559  1.11    chris 	 * If there is no pte then there is no page table etc.
   2560  1.11    chris 	 * Is the pte valid ? If not then no paged is actually mapped here
   2561  1.11    chris 	 */
   2562  1.11    chris 	if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
   2563  1.11    chris 	    pmap_unmap_ptes(pmap);
   2564  1.11    chris     	    return (FALSE);
   2565  1.11    chris 	}
   2566   1.1     matt 
   2567   1.1     matt 	/* Return the physical address depending on the PTE type */
   2568   1.1     matt 	/* XXX What about L1 section mappings ? */
   2569   1.1     matt 	if ((*(pte) & L2_MASK) == L2_LPAGE) {
   2570   1.1     matt 		/* Extract the physical address from the pte */
   2571   1.1     matt 		pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
   2572   1.1     matt 
   2573   1.1     matt 		PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
   2574   1.1     matt 		    (pa | (va & (L2_LPAGE_SIZE - 1)))));
   2575   1.1     matt 
   2576   1.1     matt 		if (pap != NULL)
   2577   1.1     matt 			*pap = pa | (va & (L2_LPAGE_SIZE - 1));
   2578   1.1     matt 	} else {
   2579   1.1     matt 		/* Extract the physical address from the pte */
   2580   1.1     matt 		pa = pmap_pte_pa(pte);
   2581   1.1     matt 
   2582   1.1     matt 		PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
   2583   1.1     matt 		    (pa | (va & ~PG_FRAME))));
   2584   1.1     matt 
   2585   1.1     matt 		if (pap != NULL)
   2586   1.1     matt 			*pap = pa | (va & ~PG_FRAME);
   2587   1.1     matt 	}
   2588  1.11    chris 	pmap_unmap_ptes(pmap);
   2589  1.11    chris 	return (TRUE);
   2590   1.1     matt }
   2591   1.1     matt 
   2592   1.1     matt 
   2593   1.1     matt /*
   2594   1.1     matt  * Copy the range specified by src_addr/len from the source map to the
   2595   1.1     matt  * range dst_addr/len in the destination map.
   2596   1.1     matt  *
   2597   1.1     matt  * This routine is only advisory and need not do anything.
   2598   1.1     matt  */
   2599   1.1     matt 
   2600   1.1     matt void
   2601   1.1     matt pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
   2602  1.15    chris 	struct pmap *dst_pmap;
   2603  1.15    chris 	struct pmap *src_pmap;
   2604   1.1     matt 	vaddr_t dst_addr;
   2605   1.2     matt 	vsize_t len;
   2606   1.1     matt 	vaddr_t src_addr;
   2607   1.1     matt {
   2608   1.1     matt 	PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
   2609   1.1     matt 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
   2610   1.1     matt }
   2611   1.1     matt 
   2612   1.1     matt #if defined(PMAP_DEBUG)
   2613   1.1     matt void
   2614   1.1     matt pmap_dump_pvlist(phys, m)
   2615   1.1     matt 	vaddr_t phys;
   2616   1.1     matt 	char *m;
   2617   1.1     matt {
   2618   1.1     matt 	struct pv_entry *pv;
   2619   1.1     matt 	int bank, off;
   2620   1.1     matt 
   2621   1.1     matt 	if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
   2622   1.1     matt 		printf("INVALID PA\n");
   2623   1.1     matt 		return;
   2624   1.1     matt 	}
   2625   1.1     matt 	pv = &vm_physmem[bank].pmseg.pvent[off];
   2626   1.1     matt 	printf("%s %08lx:", m, phys);
   2627   1.1     matt 	if (pv->pv_pmap == NULL) {
   2628   1.1     matt 		printf(" no mappings\n");
   2629   1.1     matt 		return;
   2630   1.1     matt 	}
   2631   1.1     matt 
   2632   1.1     matt 	for (; pv; pv = pv->pv_next)
   2633   1.1     matt 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   2634   1.1     matt 		    pv->pv_va, pv->pv_flags);
   2635   1.1     matt 
   2636   1.1     matt 	printf("\n");
   2637   1.1     matt }
   2638   1.1     matt 
   2639   1.1     matt #endif	/* PMAP_DEBUG */
   2640   1.1     matt 
   2641   1.1     matt boolean_t
   2642   1.1     matt pmap_testbit(pa, setbits)
   2643   1.2     matt 	paddr_t pa;
   2644   1.1     matt 	int setbits;
   2645   1.1     matt {
   2646   1.1     matt 	int bank, off;
   2647   1.1     matt 
   2648   1.1     matt 	PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
   2649   1.1     matt 
   2650   1.1     matt 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2651   1.1     matt 		return(FALSE);
   2652   1.1     matt 
   2653   1.1     matt 	/*
   2654   1.1     matt 	 * Check saved info only
   2655   1.1     matt 	 */
   2656   1.1     matt 	if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
   2657   1.1     matt 		PDEBUG(0, printf("pmap_attributes = %02x\n",
   2658   1.1     matt 		    vm_physmem[bank].pmseg.attrs[off]));
   2659   1.1     matt 		return(TRUE);
   2660   1.1     matt 	}
   2661   1.1     matt 
   2662   1.1     matt 	return(FALSE);
   2663   1.1     matt }
   2664   1.1     matt 
   2665  1.11    chris static pt_entry_t *
   2666  1.11    chris pmap_map_ptes(struct pmap *pmap)
   2667  1.11    chris {
   2668  1.11    chris     struct proc *p;
   2669  1.11    chris 
   2670  1.11    chris     /* the kernel's pmap is always accessible */
   2671  1.11    chris     if (pmap == pmap_kernel()) {
   2672  1.11    chris 	return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
   2673  1.11    chris     }
   2674  1.11    chris 
   2675  1.11    chris     if (curproc &&
   2676  1.11    chris 	    curproc->p_vmspace->vm_map.pmap == pmap)
   2677  1.11    chris 	return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
   2678  1.11    chris 
   2679  1.11    chris     p = curproc;
   2680  1.11    chris 
   2681  1.11    chris     if (p == NULL)
   2682  1.11    chris 	p = &proc0;
   2683  1.11    chris 
   2684  1.11    chris     pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
   2685  1.11    chris 	    pmap->pm_pptpt);
   2686  1.11    chris     cpu_tlb_flushD();
   2687  1.11    chris     return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
   2688  1.11    chris }
   2689   1.1     matt 
   2690   1.1     matt /*
   2691   1.1     matt  * Modify pte bits for all ptes corresponding to the given physical address.
   2692   1.1     matt  * We use `maskbits' rather than `clearbits' because we're always passing
   2693   1.1     matt  * constants and the latter would require an extra inversion at run-time.
   2694   1.1     matt  */
   2695   1.1     matt 
   2696   1.1     matt void
   2697   1.1     matt pmap_clearbit(pa, maskbits)
   2698   1.2     matt 	paddr_t pa;
   2699   1.1     matt 	int maskbits;
   2700   1.1     matt {
   2701   1.1     matt 	struct pv_entry *pv;
   2702   1.1     matt 	pt_entry_t *pte;
   2703   1.1     matt 	vaddr_t va;
   2704   1.1     matt 	int bank, off;
   2705   1.1     matt 	int s;
   2706   1.1     matt 
   2707   1.1     matt 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   2708   1.1     matt 	    pa, maskbits));
   2709   1.1     matt 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2710   1.1     matt 		return;
   2711   1.1     matt 	pv = &vm_physmem[bank].pmseg.pvent[off];
   2712   1.1     matt 	s = splvm();
   2713   1.1     matt 
   2714   1.1     matt 	/*
   2715   1.1     matt 	 * Clear saved attributes (modify, reference)
   2716   1.1     matt 	 */
   2717   1.1     matt 	vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
   2718   1.1     matt 
   2719   1.1     matt 	if (pv->pv_pmap == NULL) {
   2720   1.1     matt 		splx(s);
   2721   1.1     matt 		return;
   2722   1.1     matt 	}
   2723   1.1     matt 
   2724   1.1     matt 	/*
   2725   1.1     matt 	 * Loop over all current mappings setting/clearing as appropos
   2726   1.1     matt 	 */
   2727   1.1     matt 	for (; pv; pv = pv->pv_next) {
   2728   1.1     matt 		va = pv->pv_va;
   2729   1.1     matt 
   2730   1.1     matt 		/*
   2731   1.1     matt 		 * XXX don't write protect pager mappings
   2732   1.1     matt 		 */
   2733   1.1     matt 		if (va >= uvm.pager_sva && va < uvm.pager_eva) {
   2734  1.11    chris 			printf("pmap_clearbit: found page VA on pv_list\n");
   2735   1.1     matt 			continue;
   2736   1.1     matt 		}
   2737   1.1     matt 
   2738   1.1     matt 		pv->pv_flags &= ~maskbits;
   2739   1.1     matt 		pte = pmap_pte(pv->pv_pmap, va);
   2740   1.1     matt 		if (maskbits & (PT_Wr|PT_M))
   2741   1.1     matt 			*pte = *pte & ~PT_AP(AP_W);
   2742   1.1     matt 		if (maskbits & PT_H)
   2743   1.1     matt 			*pte = (*pte & ~L2_MASK) | L2_INVAL;
   2744   1.1     matt 	}
   2745   1.1     matt 	cpu_tlb_flushID();
   2746   1.1     matt 
   2747   1.1     matt 	splx(s);
   2748   1.1     matt }
   2749   1.1     matt 
   2750   1.1     matt 
   2751   1.1     matt boolean_t
   2752   1.1     matt pmap_clear_modify(pg)
   2753   1.1     matt 	struct vm_page *pg;
   2754   1.1     matt {
   2755   1.1     matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2756   1.1     matt 	boolean_t rv;
   2757   1.1     matt 
   2758   1.1     matt 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
   2759   1.1     matt 	rv = pmap_testbit(pa, PT_M);
   2760   1.1     matt 	pmap_clearbit(pa, PT_M);
   2761   1.1     matt 	return rv;
   2762   1.1     matt }
   2763   1.1     matt 
   2764   1.1     matt 
   2765   1.1     matt boolean_t
   2766   1.1     matt pmap_clear_reference(pg)
   2767   1.1     matt 	struct vm_page *pg;
   2768   1.1     matt {
   2769   1.1     matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2770   1.1     matt 	boolean_t rv;
   2771   1.1     matt 
   2772   1.1     matt 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
   2773   1.1     matt 	rv = pmap_testbit(pa, PT_H);
   2774   1.1     matt 	pmap_clearbit(pa, PT_H);
   2775   1.1     matt 	return rv;
   2776   1.1     matt }
   2777   1.1     matt 
   2778   1.1     matt 
   2779   1.1     matt void
   2780   1.1     matt pmap_copy_on_write(pa)
   2781   1.2     matt 	paddr_t pa;
   2782   1.1     matt {
   2783   1.1     matt 	PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
   2784   1.1     matt 	pmap_clearbit(pa, PT_Wr);
   2785   1.1     matt }
   2786   1.1     matt 
   2787   1.1     matt 
   2788   1.1     matt boolean_t
   2789   1.1     matt pmap_is_modified(pg)
   2790   1.1     matt 	struct vm_page *pg;
   2791   1.1     matt {
   2792   1.1     matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2793   1.1     matt 	boolean_t result;
   2794   1.1     matt 
   2795   1.1     matt 	result = pmap_testbit(pa, PT_M);
   2796   1.1     matt 	PDEBUG(0, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
   2797   1.1     matt 	return (result);
   2798   1.1     matt }
   2799   1.1     matt 
   2800   1.1     matt 
   2801   1.1     matt boolean_t
   2802   1.1     matt pmap_is_referenced(pg)
   2803   1.1     matt 	struct vm_page *pg;
   2804   1.1     matt {
   2805   1.1     matt 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2806   1.1     matt 	boolean_t result;
   2807   1.1     matt 
   2808   1.1     matt 	result = pmap_testbit(pa, PT_H);
   2809   1.1     matt 	PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
   2810   1.1     matt 	return (result);
   2811   1.1     matt }
   2812   1.1     matt 
   2813   1.1     matt 
   2814   1.1     matt int
   2815   1.1     matt pmap_modified_emulation(pmap, va)
   2816  1.15    chris 	struct pmap *pmap;
   2817   1.1     matt 	vaddr_t va;
   2818   1.1     matt {
   2819   1.1     matt 	pt_entry_t *pte;
   2820   1.2     matt 	paddr_t pa;
   2821   1.1     matt 	int bank, off;
   2822   1.1     matt 	struct pv_entry *pv;
   2823   1.1     matt 	u_int flags;
   2824   1.1     matt 
   2825   1.1     matt 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   2826   1.1     matt 
   2827   1.1     matt 	/* Get the pte */
   2828   1.1     matt 	pte = pmap_pte(pmap, va);
   2829   1.1     matt 	if (!pte) {
   2830   1.1     matt 		PDEBUG(2, printf("no pte\n"));
   2831   1.1     matt 		return(0);
   2832   1.1     matt 	}
   2833   1.1     matt 
   2834   1.1     matt 	PDEBUG(1, printf("*pte=%08x\n", *pte));
   2835   1.1     matt 
   2836   1.1     matt 	/* Check for a zero pte */
   2837   1.1     matt 	if (*pte == 0)
   2838   1.1     matt 		return(0);
   2839   1.1     matt 
   2840   1.1     matt 	/* This can happen if user code tries to access kernel memory. */
   2841   1.1     matt 	if ((*pte & PT_AP(AP_W)) != 0)
   2842   1.1     matt 		return (0);
   2843   1.1     matt 
   2844   1.1     matt 	/* Extract the physical address of the page */
   2845   1.1     matt 	pa = pmap_pte_pa(pte);
   2846   1.1     matt 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2847   1.1     matt 		return(0);
   2848   1.1     matt 
   2849   1.1     matt 	/* Get the current flags for this page. */
   2850   1.1     matt 	pv = &vm_physmem[bank].pmseg.pvent[off];
   2851   1.1     matt 	flags = pmap_modify_pv(pmap, va, pv, 0, 0);
   2852   1.1     matt 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   2853   1.1     matt 
   2854   1.1     matt 	/*
   2855   1.1     matt 	 * Do the flags say this page is writable ? If not then it is a
   2856   1.1     matt 	 * genuine write fault. If yes then the write fault is our fault
   2857   1.1     matt 	 * as we did not reflect the write access in the PTE. Now we know
   2858   1.1     matt 	 * a write has occurred we can correct this and also set the
   2859   1.1     matt 	 * modified bit
   2860   1.1     matt 	 */
   2861   1.1     matt 	if (~flags & PT_Wr)
   2862   1.1     matt 		return(0);
   2863   1.1     matt 
   2864   1.1     matt 	PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
   2865   1.1     matt 	    va, pte, *pte));
   2866   1.1     matt 	vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
   2867   1.1     matt 	*pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
   2868   1.1     matt 	PDEBUG(0, printf("->(%08x)\n", *pte));
   2869   1.1     matt 
   2870   1.1     matt 	/* Return, indicating the problem has been dealt with */
   2871   1.1     matt 	cpu_tlb_flushID_SE(va);
   2872   1.1     matt 	return(1);
   2873   1.1     matt }
   2874   1.1     matt 
   2875   1.1     matt 
   2876   1.1     matt int
   2877   1.1     matt pmap_handled_emulation(pmap, va)
   2878  1.15    chris 	struct pmap *pmap;
   2879   1.1     matt 	vaddr_t va;
   2880   1.1     matt {
   2881   1.1     matt 	pt_entry_t *pte;
   2882   1.2     matt 	paddr_t pa;
   2883   1.1     matt 	int bank, off;
   2884   1.1     matt 
   2885   1.1     matt 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   2886   1.1     matt 
   2887   1.1     matt 	/* Get the pte */
   2888   1.1     matt 	pte = pmap_pte(pmap, va);
   2889   1.1     matt 	if (!pte) {
   2890   1.1     matt 		PDEBUG(2, printf("no pte\n"));
   2891   1.1     matt 		return(0);
   2892   1.1     matt 	}
   2893   1.1     matt 
   2894   1.1     matt 	PDEBUG(1, printf("*pte=%08x\n", *pte));
   2895   1.1     matt 
   2896   1.1     matt 	/* Check for a zero pte */
   2897   1.1     matt 	if (*pte == 0)
   2898   1.1     matt 		return(0);
   2899   1.1     matt 
   2900   1.1     matt 	/* This can happen if user code tries to access kernel memory. */
   2901   1.1     matt 	if ((*pte & L2_MASK) != L2_INVAL)
   2902   1.1     matt 		return (0);
   2903   1.1     matt 
   2904   1.1     matt 	/* Extract the physical address of the page */
   2905   1.1     matt 	pa = pmap_pte_pa(pte);
   2906   1.1     matt 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2907   1.1     matt 		return(0);
   2908   1.1     matt 
   2909   1.1     matt 	/*
   2910   1.1     matt 	 * Ok we just enable the pte and mark the attibs as handled
   2911   1.1     matt 	 */
   2912   1.1     matt 	PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
   2913   1.1     matt 	    va, pte, *pte));
   2914   1.1     matt 	vm_physmem[bank].pmseg.attrs[off] |= PT_H;
   2915   1.1     matt 	*pte = (*pte & ~L2_MASK) | L2_SPAGE;
   2916   1.1     matt 	PDEBUG(0, printf("->(%08x)\n", *pte));
   2917   1.1     matt 
   2918   1.1     matt 	/* Return, indicating the problem has been dealt with */
   2919   1.1     matt 	cpu_tlb_flushID_SE(va);
   2920   1.1     matt 	return(1);
   2921   1.1     matt }
   2922   1.1     matt 
   2923   1.1     matt /*
   2924   1.1     matt  * pmap_collect: free resources held by a pmap
   2925   1.1     matt  *
   2926   1.1     matt  * => optional function.
   2927   1.1     matt  * => called when a process is swapped out to free memory.
   2928   1.1     matt  */
   2929   1.1     matt 
   2930   1.1     matt void
   2931   1.1     matt pmap_collect(pmap)
   2932  1.15    chris 	struct pmap *pmap;
   2933   1.1     matt {
   2934   1.1     matt }
   2935   1.1     matt 
   2936   1.1     matt /*
   2937   1.1     matt  * Routine:	pmap_procwr
   2938   1.1     matt  *
   2939   1.1     matt  * Function:
   2940   1.1     matt  *	Synchronize caches corresponding to [addr, addr+len) in p.
   2941   1.1     matt  *
   2942   1.1     matt  */
   2943   1.1     matt void
   2944   1.1     matt pmap_procwr(p, va, len)
   2945   1.1     matt 	struct proc	*p;
   2946   1.1     matt 	vaddr_t		va;
   2947   1.3     matt 	int		len;
   2948   1.1     matt {
   2949   1.1     matt 	/* We only need to do anything if it is the current process. */
   2950   1.1     matt 	if (p == curproc)
   2951   1.1     matt 		cpu_cache_syncI_rng(va, len);
   2952   1.1     matt }
   2953   1.1     matt 
   2954   1.1     matt /* End of pmap.c */
   2955