Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.19
      1 /*	$NetBSD: pmap.c,v 1.19 2001/09/10 21:19:35 chris Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 Richard Earnshaw
      5  * Copyright (c) 2001 Christopher Gilbert
      6  * All rights reserved.
      7  *
      8  * 1. Redistributions of source code must retain the above copyright
      9  *    notice, this list of conditions and the following disclaimer.
     10  * 2. Redistributions in binary form must reproduce the above copyright
     11  *    notice, this list of conditions and the following disclaimer in the
     12  *    documentation and/or other materials provided with the distribution.
     13  * 3. The name of the company nor the name of the author may be used to
     14  *    endorse or promote products derived from this software without specific
     15  *    prior written permission.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     18  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     21  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     22  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     23  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     27  * SUCH DAMAGE.
     28  */
     29 
     30 /*-
     31  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     32  * All rights reserved.
     33  *
     34  * This code is derived from software contributed to The NetBSD Foundation
     35  * by Charles M. Hannum.
     36  *
     37  * Redistribution and use in source and binary forms, with or without
     38  * modification, are permitted provided that the following conditions
     39  * are met:
     40  * 1. Redistributions of source code must retain the above copyright
     41  *    notice, this list of conditions and the following disclaimer.
     42  * 2. Redistributions in binary form must reproduce the above copyright
     43  *    notice, this list of conditions and the following disclaimer in the
     44  *    documentation and/or other materials provided with the distribution.
     45  * 3. All advertising materials mentioning features or use of this software
     46  *    must display the following acknowledgement:
     47  *        This product includes software developed by the NetBSD
     48  *        Foundation, Inc. and its contributors.
     49  * 4. Neither the name of The NetBSD Foundation nor the names of its
     50  *    contributors may be used to endorse or promote products derived
     51  *    from this software without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     54  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     55  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     56  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     57  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     58  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     59  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     60  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     61  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     62  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     63  * POSSIBILITY OF SUCH DAMAGE.
     64  */
     65 
     66 /*
     67  * Copyright (c) 1994-1998 Mark Brinicombe.
     68  * Copyright (c) 1994 Brini.
     69  * All rights reserved.
     70  *
     71  * This code is derived from software written for Brini by Mark Brinicombe
     72  *
     73  * Redistribution and use in source and binary forms, with or without
     74  * modification, are permitted provided that the following conditions
     75  * are met:
     76  * 1. Redistributions of source code must retain the above copyright
     77  *    notice, this list of conditions and the following disclaimer.
     78  * 2. Redistributions in binary form must reproduce the above copyright
     79  *    notice, this list of conditions and the following disclaimer in the
     80  *    documentation and/or other materials provided with the distribution.
     81  * 3. All advertising materials mentioning features or use of this software
     82  *    must display the following acknowledgement:
     83  *	This product includes software developed by Mark Brinicombe.
     84  * 4. The name of the author may not be used to endorse or promote products
     85  *    derived from this software without specific prior written permission.
     86  *
     87  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     88  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     89  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     90  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     91  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     92  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     93  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     94  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     95  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     96  *
     97  * RiscBSD kernel project
     98  *
     99  * pmap.c
    100  *
    101  * Machine dependant vm stuff
    102  *
    103  * Created      : 20/09/94
    104  */
    105 
    106 /*
    107  * Performance improvements, UVM changes, overhauls and part-rewrites
    108  * were contributed by Neil A. Carson <neil (at) causality.com>.
    109  */
    110 
    111 /*
    112  * The dram block info is currently referenced from the bootconfig.
    113  * This should be placed in a separate structure.
    114  */
    115 
    116 /*
    117  * Special compilation symbols
    118  * PMAP_DEBUG		- Build in pmap_debug_level code
    119  */
    120 
    121 /* Include header files */
    122 
    123 #include "opt_pmap_debug.h"
    124 #include "opt_ddb.h"
    125 
    126 #include <sys/types.h>
    127 #include <sys/param.h>
    128 #include <sys/kernel.h>
    129 #include <sys/systm.h>
    130 #include <sys/proc.h>
    131 #include <sys/malloc.h>
    132 #include <sys/user.h>
    133 #include <sys/pool.h>
    134 #include <sys/cdefs.h>
    135 
    136 #include <uvm/uvm.h>
    137 
    138 #include <machine/bootconfig.h>
    139 #include <machine/bus.h>
    140 #include <machine/pmap.h>
    141 #include <machine/pcb.h>
    142 #include <machine/param.h>
    143 #include <machine/katelib.h>
    144 
    145 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.19 2001/09/10 21:19:35 chris Exp $");
    146 #ifdef PMAP_DEBUG
    147 #define	PDEBUG(_lev_,_stat_) \
    148 	if (pmap_debug_level >= (_lev_)) \
    149         	((_stat_))
    150 int pmap_debug_level = -2;
    151 
    152 /*
    153  * for switching to potentially finer grained debugging
    154  */
    155 #define	PDB_FOLLOW	0x0001
    156 #define	PDB_INIT	0x0002
    157 #define	PDB_ENTER	0x0004
    158 #define	PDB_REMOVE	0x0008
    159 #define	PDB_CREATE	0x0010
    160 #define	PDB_PTPAGE	0x0020
    161 #define	PDB_ASN		0x0040
    162 #define	PDB_BITS	0x0080
    163 #define	PDB_COLLECT	0x0100
    164 #define	PDB_PROTECT	0x0200
    165 #define	PDB_BOOTSTRAP	0x1000
    166 #define	PDB_PARANOIA	0x2000
    167 #define	PDB_WIRING	0x4000
    168 #define	PDB_PVDUMP	0x8000
    169 
    170 int debugmap = 0;
    171 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
    172 #define	NPDEBUG(_lev_,_stat_) \
    173 	if (pmapdebug & (_lev_)) \
    174         	((_stat_))
    175 
    176 #else	/* PMAP_DEBUG */
    177 #define	PDEBUG(_lev_,_stat_) /* Nothing */
    178 #define PDEBUG(_lev_,_stat_) /* Nothing */
    179 #endif	/* PMAP_DEBUG */
    180 
    181 struct pmap     kernel_pmap_store;
    182 
    183 /*
    184  * pool that pmap structures are allocated from
    185  */
    186 
    187 struct pool pmap_pmap_pool;
    188 
    189 pagehook_t page_hook0;
    190 pagehook_t page_hook1;
    191 char *memhook;
    192 pt_entry_t msgbufpte;
    193 extern caddr_t msgbufaddr;
    194 
    195 boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    196 /*
    197  * locking data structures
    198  */
    199 
    200 static struct lock pmap_main_lock;
    201 static struct simplelock pvalloc_lock;
    202 #ifdef LOCKDEBUG
    203 #define PMAP_MAP_TO_HEAD_LOCK() \
    204      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
    205 #define PMAP_MAP_TO_HEAD_UNLOCK() \
    206      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    207 
    208 #define PMAP_HEAD_TO_MAP_LOCK() \
    209      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
    210 #define PMAP_HEAD_TO_MAP_UNLOCK() \
    211      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    212 #else
    213 #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
    214 #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
    215 #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
    216 #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
    217 #endif /* LOCKDEBUG */
    218 
    219 /*
    220  * pv_page management structures: locked by pvalloc_lock
    221  */
    222 
    223 TAILQ_HEAD(pv_pagelist, pv_page);
    224 static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
    225 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
    226 static int pv_nfpvents;			/* # of free pv entries */
    227 static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
    228 static vaddr_t pv_cachedva;		/* cached VA for later use */
    229 
    230 #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
    231 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
    232 					/* high water mark */
    233 
    234 /*
    235  * local prototypes
    236  */
    237 
    238 static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
    239 static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
    240 #define ALLOCPV_NEED	0	/* need PV now */
    241 #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
    242 #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
    243 static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
    244 static void		 pmap_enter_pv __P((struct pv_head *,
    245 					    struct pv_entry *, struct pmap *,
    246 					    vaddr_t, struct vm_page *, int));
    247 static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
    248 static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
    249 static void		 pmap_free_pv_doit __P((struct pv_entry *));
    250 static void		 pmap_free_pvpage __P((void));
    251 static boolean_t	 pmap_is_curpmap __P((struct pmap *));
    252 static struct pv_entry	*pmap_remove_pv __P((struct pv_head *, struct pmap *,
    253 			vaddr_t));
    254 #define PMAP_REMOVE_ALL		0	/* remove all mappings */
    255 #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
    256 
    257 vsize_t npages;
    258 
    259 static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t, boolean_t));
    260 static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t, boolean_t));
    261 
    262 extern paddr_t physical_start;
    263 extern paddr_t physical_freestart;
    264 extern paddr_t physical_end;
    265 extern paddr_t physical_freeend;
    266 extern unsigned int free_pages;
    267 extern int max_processes;
    268 
    269 vaddr_t virtual_start;
    270 vaddr_t virtual_end;
    271 
    272 vaddr_t avail_start;
    273 vaddr_t avail_end;
    274 
    275 extern pv_addr_t systempage;
    276 
    277 #define ALLOC_PAGE_HOOK(x, s) \
    278 	x.va = virtual_start; \
    279 	x.pte = (pt_entry_t *)pmap_pte(pmap_kernel(), virtual_start); \
    280 	virtual_start += s;
    281 
    282 /* Variables used by the L1 page table queue code */
    283 SIMPLEQ_HEAD(l1pt_queue, l1pt);
    284 struct l1pt_queue l1pt_static_queue;	/* head of our static l1 queue */
    285 int l1pt_static_queue_count;		/* items in the static l1 queue */
    286 int l1pt_static_create_count;		/* static l1 items created */
    287 struct l1pt_queue l1pt_queue;		/* head of our l1 queue */
    288 int l1pt_queue_count;			/* items in the l1 queue */
    289 int l1pt_create_count;			/* stat - L1's create count */
    290 int l1pt_reuse_count;			/* stat - L1's reused count */
    291 
    292 /* Local function prototypes (not used outside this file) */
    293 pt_entry_t *pmap_pte __P((struct pmap *pmap, vaddr_t va));
    294 void map_pagetable __P((vaddr_t pagetable, vaddr_t va,
    295     paddr_t pa, unsigned int flags));
    296 void pmap_copy_on_write __P((paddr_t pa));
    297 void pmap_pinit __P((struct pmap *));
    298 void pmap_freepagedir __P((struct pmap *));
    299 
    300 /* Other function prototypes */
    301 extern void bzero_page __P((vaddr_t));
    302 extern void bcopy_page __P((vaddr_t, vaddr_t));
    303 
    304 struct l1pt *pmap_alloc_l1pt __P((void));
    305 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    306      vaddr_t l2pa, boolean_t));
    307 
    308 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    309 static void pmap_unmap_ptes __P((struct pmap *));
    310 
    311 void pmap_vac_me_harder __P((struct pmap *, struct pv_head *,
    312 	    pt_entry_t *, boolean_t));
    313 
    314 /*
    315  * real definition of pv_entry.
    316  */
    317 
    318 struct pv_entry {
    319 	struct pv_entry *pv_next;       /* next pv_entry */
    320 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
    321 	vaddr_t         pv_va;          /* virtual address for mapping */
    322 	int             pv_flags;       /* flags */
    323 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
    324 };
    325 
    326 /*
    327  * pv_entrys are dynamically allocated in chunks from a single page.
    328  * we keep track of how many pv_entrys are in use for each page and
    329  * we can free pv_entry pages if needed.  there is one lock for the
    330  * entire allocation system.
    331  */
    332 
    333 struct pv_page_info {
    334 	TAILQ_ENTRY(pv_page) pvpi_list;
    335 	struct pv_entry *pvpi_pvfree;
    336 	int pvpi_nfree;
    337 };
    338 
    339 /*
    340  * number of pv_entry's in a pv_page
    341  * (note: won't work on systems where NPBG isn't a constant)
    342  */
    343 
    344 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    345 			sizeof(struct pv_entry))
    346 
    347 /*
    348  * a pv_page: where pv_entrys are allocated from
    349  */
    350 
    351 struct pv_page {
    352 	struct pv_page_info pvinfo;
    353 	struct pv_entry pvents[PVE_PER_PVPAGE];
    354 };
    355 
    356 #ifdef MYCROFT_HACK
    357 int mycroft_hack = 0;
    358 #endif
    359 
    360 /* Function to set the debug level of the pmap code */
    361 
    362 #ifdef PMAP_DEBUG
    363 void
    364 pmap_debug(level)
    365 	int level;
    366 {
    367 	pmap_debug_level = level;
    368 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    369 }
    370 #endif	/* PMAP_DEBUG */
    371 
    372 __inline boolean_t
    373 pmap_is_curpmap(struct pmap *pmap)
    374 {
    375     if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
    376 	    || (pmap == pmap_kernel()))
    377 	return (TRUE);
    378     return (FALSE);
    379 }
    380 #include "isadma.h"
    381 
    382 #if NISADMA > 0
    383 /*
    384  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
    385  * pages into the system, memory intersects with any of these ranges,
    386  * the intersecting memory will be loaded into a lower-priority free list.
    387  */
    388 bus_dma_segment_t *pmap_isa_dma_ranges;
    389 int pmap_isa_dma_nranges;
    390 
    391 boolean_t pmap_isa_dma_range_intersect __P((paddr_t, psize_t,
    392 	    paddr_t *, psize_t *));
    393 
    394 /*
    395  * Check if a memory range intersects with an ISA DMA range, and
    396  * return the page-rounded intersection if it does.  The intersection
    397  * will be placed on a lower-priority free list.
    398  */
    399 boolean_t
    400 pmap_isa_dma_range_intersect(pa, size, pap, sizep)
    401 	paddr_t pa;
    402 	psize_t size;
    403 	paddr_t *pap;
    404 	psize_t *sizep;
    405 {
    406 	bus_dma_segment_t *ds;
    407 	int i;
    408 
    409 	if (pmap_isa_dma_ranges == NULL)
    410 		return (FALSE);
    411 
    412 	for (i = 0, ds = pmap_isa_dma_ranges;
    413 	     i < pmap_isa_dma_nranges; i++, ds++) {
    414 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
    415 			/*
    416 			 * Beginning of region intersects with this range.
    417 			 */
    418 			*pap = trunc_page(pa);
    419 			*sizep = round_page(min(pa + size,
    420 			    ds->ds_addr + ds->ds_len) - pa);
    421 			return (TRUE);
    422 		}
    423 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
    424 			/*
    425 			 * End of region intersects with this range.
    426 			 */
    427 			*pap = trunc_page(ds->ds_addr);
    428 			*sizep = round_page(min((pa + size) - ds->ds_addr,
    429 			    ds->ds_len));
    430 			return (TRUE);
    431 		}
    432 	}
    433 
    434 	/*
    435 	 * No intersection found.
    436 	 */
    437 	return (FALSE);
    438 }
    439 #endif /* NISADMA > 0 */
    440 
    441 /*
    442  * p v _ e n t r y   f u n c t i o n s
    443  */
    444 
    445 /*
    446  * pv_entry allocation functions:
    447  *   the main pv_entry allocation functions are:
    448  *     pmap_alloc_pv: allocate a pv_entry structure
    449  *     pmap_free_pv: free one pv_entry
    450  *     pmap_free_pvs: free a list of pv_entrys
    451  *
    452  * the rest are helper functions
    453  */
    454 
    455 /*
    456  * pmap_alloc_pv: inline function to allocate a pv_entry structure
    457  * => we lock pvalloc_lock
    458  * => if we fail, we call out to pmap_alloc_pvpage
    459  * => 3 modes:
    460  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
    461  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
    462  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
    463  *			one now
    464  *
    465  * "try" is for optional functions like pmap_copy().
    466  */
    467 
    468 __inline static struct pv_entry *
    469 pmap_alloc_pv(pmap, mode)
    470 	struct pmap *pmap;
    471 	int mode;
    472 {
    473 	struct pv_page *pvpage;
    474 	struct pv_entry *pv;
    475 
    476 	simple_lock(&pvalloc_lock);
    477 
    478 	if (pv_freepages.tqh_first != NULL) {
    479 		pvpage = pv_freepages.tqh_first;
    480 		pvpage->pvinfo.pvpi_nfree--;
    481 		if (pvpage->pvinfo.pvpi_nfree == 0) {
    482 			/* nothing left in this one? */
    483 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
    484 		}
    485 		pv = pvpage->pvinfo.pvpi_pvfree;
    486 #ifdef DIAGNOSTIC
    487 		if (pv == NULL)
    488 			panic("pmap_alloc_pv: pvpi_nfree off");
    489 #endif
    490 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    491 		pv_nfpvents--;  /* took one from pool */
    492 	} else {
    493 		pv = NULL;		/* need more of them */
    494 	}
    495 
    496 	/*
    497 	 * if below low water mark or we didn't get a pv_entry we try and
    498 	 * create more pv_entrys ...
    499 	 */
    500 
    501 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
    502 		if (pv == NULL)
    503 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
    504 					       mode : ALLOCPV_NEED);
    505 		else
    506 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
    507 	}
    508 
    509 	simple_unlock(&pvalloc_lock);
    510 	return(pv);
    511 }
    512 
    513 /*
    514  * pmap_alloc_pvpage: maybe allocate a new pvpage
    515  *
    516  * if need_entry is false: try and allocate a new pv_page
    517  * if need_entry is true: try and allocate a new pv_page and return a
    518  *	new pv_entry from it.   if we are unable to allocate a pv_page
    519  *	we make a last ditch effort to steal a pv_page from some other
    520  *	mapping.    if that fails, we panic...
    521  *
    522  * => we assume that the caller holds pvalloc_lock
    523  */
    524 
    525 static struct pv_entry *
    526 pmap_alloc_pvpage(pmap, mode)
    527 	struct pmap *pmap;
    528 	int mode;
    529 {
    530 	struct vm_page *pg;
    531 	struct pv_page *pvpage;
    532 	struct pv_entry *pv;
    533 	int s;
    534 
    535 	/*
    536 	 * if we need_entry and we've got unused pv_pages, allocate from there
    537 	 */
    538 
    539 	if (mode != ALLOCPV_NONEED && pv_unusedpgs.tqh_first != NULL) {
    540 
    541 		/* move it to pv_freepages list */
    542 		pvpage = pv_unusedpgs.tqh_first;
    543 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
    544 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
    545 
    546 		/* allocate a pv_entry */
    547 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
    548 		pv = pvpage->pvinfo.pvpi_pvfree;
    549 #ifdef DIAGNOSTIC
    550 		if (pv == NULL)
    551 			panic("pmap_alloc_pvpage: pvpi_nfree off");
    552 #endif
    553 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    554 
    555 		pv_nfpvents--;  /* took one from pool */
    556 		return(pv);
    557 	}
    558 
    559 	/*
    560 	 *  see if we've got a cached unmapped VA that we can map a page in.
    561 	 * if not, try to allocate one.
    562 	 */
    563 
    564 	s = splvm();   /* must protect kmem_map/kmem_object with splvm! */
    565 	if (pv_cachedva == 0) {
    566 		pv_cachedva = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
    567 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
    568 		if (pv_cachedva == 0) {
    569 			splx(s);
    570 			return (NULL);
    571 		}
    572 	}
    573 
    574 	/*
    575 	 * we have a VA, now let's try and allocate a page in the object
    576 	 * note: we are still holding splvm to protect kmem_object
    577 	 */
    578 
    579 	if (!simple_lock_try(&uvmexp.kmem_object->vmobjlock)) {
    580 		splx(s);
    581 		return (NULL);
    582 	}
    583 
    584 	pg = uvm_pagealloc(uvmexp.kmem_object, pv_cachedva -
    585 			   vm_map_min(kernel_map),
    586 			   NULL, UVM_PGA_USERESERVE);
    587 	if (pg)
    588 		pg->flags &= ~PG_BUSY;	/* never busy */
    589 
    590 	simple_unlock(&uvmexp.kmem_object->vmobjlock);
    591 	splx(s);
    592 	/* splvm now dropped */
    593 
    594 	if (pg == NULL)
    595 		return (NULL);
    596 
    597 	/*
    598 	 * add a mapping for our new pv_page and free its entrys (save one!)
    599 	 *
    600 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
    601 	 * pmap is already locked!  (...but entering the mapping is safe...)
    602 	 */
    603 
    604 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL);
    605 	pmap_update(pmap_kernel());
    606 	pvpage = (struct pv_page *) pv_cachedva;
    607 	pv_cachedva = 0;
    608 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
    609 }
    610 
    611 /*
    612  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
    613  *
    614  * => caller must hold pvalloc_lock
    615  * => if need_entry is true, we allocate and return one pv_entry
    616  */
    617 
    618 static struct pv_entry *
    619 pmap_add_pvpage(pvp, need_entry)
    620 	struct pv_page *pvp;
    621 	boolean_t need_entry;
    622 {
    623 	int tofree, lcv;
    624 
    625 	/* do we need to return one? */
    626 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
    627 
    628 	pvp->pvinfo.pvpi_pvfree = NULL;
    629 	pvp->pvinfo.pvpi_nfree = tofree;
    630 	for (lcv = 0 ; lcv < tofree ; lcv++) {
    631 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
    632 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
    633 	}
    634 	if (need_entry)
    635 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
    636 	else
    637 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    638 	pv_nfpvents += tofree;
    639 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
    640 }
    641 
    642 /*
    643  * pmap_free_pv_doit: actually free a pv_entry
    644  *
    645  * => do not call this directly!  instead use either
    646  *    1. pmap_free_pv ==> free a single pv_entry
    647  *    2. pmap_free_pvs => free a list of pv_entrys
    648  * => we must be holding pvalloc_lock
    649  */
    650 
    651 __inline static void
    652 pmap_free_pv_doit(pv)
    653 	struct pv_entry *pv;
    654 {
    655 	struct pv_page *pvp;
    656 
    657 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
    658 	pv_nfpvents++;
    659 	pvp->pvinfo.pvpi_nfree++;
    660 
    661 	/* nfree == 1 => fully allocated page just became partly allocated */
    662 	if (pvp->pvinfo.pvpi_nfree == 1) {
    663 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
    664 	}
    665 
    666 	/* free it */
    667 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
    668 	pvp->pvinfo.pvpi_pvfree = pv;
    669 
    670 	/*
    671 	 * are all pv_page's pv_entry's free?  move it to unused queue.
    672 	 */
    673 
    674 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
    675 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
    676 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    677 	}
    678 }
    679 
    680 /*
    681  * pmap_free_pv: free a single pv_entry
    682  *
    683  * => we gain the pvalloc_lock
    684  */
    685 
    686 __inline static void
    687 pmap_free_pv(pmap, pv)
    688 	struct pmap *pmap;
    689 	struct pv_entry *pv;
    690 {
    691 	simple_lock(&pvalloc_lock);
    692 	pmap_free_pv_doit(pv);
    693 
    694 	/*
    695 	 * Can't free the PV page if the PV entries were associated with
    696 	 * the kernel pmap; the pmap is already locked.
    697 	 */
    698 	if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
    699 	    pmap != pmap_kernel())
    700 		pmap_free_pvpage();
    701 
    702 	simple_unlock(&pvalloc_lock);
    703 }
    704 
    705 /*
    706  * pmap_free_pvs: free a list of pv_entrys
    707  *
    708  * => we gain the pvalloc_lock
    709  */
    710 
    711 __inline static void
    712 pmap_free_pvs(pmap, pvs)
    713 	struct pmap *pmap;
    714 	struct pv_entry *pvs;
    715 {
    716 	struct pv_entry *nextpv;
    717 
    718 	simple_lock(&pvalloc_lock);
    719 
    720 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
    721 		nextpv = pvs->pv_next;
    722 		pmap_free_pv_doit(pvs);
    723 	}
    724 
    725 	/*
    726 	 * Can't free the PV page if the PV entries were associated with
    727 	 * the kernel pmap; the pmap is already locked.
    728 	 */
    729 	if (pv_nfpvents > PVE_HIWAT && pv_unusedpgs.tqh_first != NULL &&
    730 	    pmap != pmap_kernel())
    731 		pmap_free_pvpage();
    732 
    733 	simple_unlock(&pvalloc_lock);
    734 }
    735 
    736 
    737 /*
    738  * pmap_free_pvpage: try and free an unused pv_page structure
    739  *
    740  * => assume caller is holding the pvalloc_lock and that
    741  *	there is a page on the pv_unusedpgs list
    742  * => if we can't get a lock on the kmem_map we try again later
    743  * => note: analysis of MI kmem_map usage [i.e. malloc/free] shows
    744  *	that if we can lock the kmem_map then we are not already
    745  *	holding kmem_object's lock.
    746  */
    747 
    748 static void
    749 pmap_free_pvpage()
    750 {
    751 	int s;
    752 	struct vm_map *map;
    753 	struct vm_map_entry *dead_entries;
    754 	struct pv_page *pvp;
    755 
    756 	s = splvm(); /* protect kmem_map */
    757 
    758 	pvp = pv_unusedpgs.tqh_first;
    759 
    760 	/*
    761 	 * note: watch out for pv_initpage which is allocated out of
    762 	 * kernel_map rather than kmem_map.
    763 	 */
    764 	if (pvp == pv_initpage)
    765 		map = kernel_map;
    766 	else
    767 		map = kmem_map;
    768 
    769 	if (vm_map_lock_try(map)) {
    770 
    771 		/* remove pvp from pv_unusedpgs */
    772 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    773 
    774 		/* unmap the page */
    775 		dead_entries = NULL;
    776 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
    777 		    &dead_entries);
    778 		vm_map_unlock(map);
    779 
    780 		if (dead_entries != NULL)
    781 			uvm_unmap_detach(dead_entries, 0);
    782 
    783 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
    784 	}
    785 
    786 	if (pvp == pv_initpage)
    787 		/* no more initpage, we've freed it */
    788 		pv_initpage = NULL;
    789 
    790 	splx(s);
    791 }
    792 
    793 /*
    794  * main pv_entry manipulation functions:
    795  *   pmap_enter_pv: enter a mapping onto a pv_head list
    796  *   pmap_remove_pv: remove a mappiing from a pv_head list
    797  *
    798  * NOTE: pmap_enter_pv expects to lock the pvh itself
    799  *       pmap_remove_pv expects te caller to lock the pvh before calling
    800  */
    801 
    802 /*
    803  * pmap_enter_pv: enter a mapping onto a pv_head lst
    804  *
    805  * => caller should hold the proper lock on pmap_main_lock
    806  * => caller should have pmap locked
    807  * => we will gain the lock on the pv_head and allocate the new pv_entry
    808  * => caller should adjust ptp's wire_count before calling
    809  * => caller should not adjust pmap's wire_count
    810  */
    811 
    812 __inline static void
    813 pmap_enter_pv(pvh, pve, pmap, va, ptp, flags)
    814 	struct pv_head *pvh;
    815 	struct pv_entry *pve;	/* preallocated pve for us to use */
    816 	struct pmap *pmap;
    817 	vaddr_t va;
    818 	struct vm_page *ptp;	/* PTP in pmap that maps this VA */
    819 	int flags;
    820 {
    821 	pve->pv_pmap = pmap;
    822 	pve->pv_va = va;
    823 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
    824 	pve->pv_flags = flags;
    825 	simple_lock(&pvh->pvh_lock);		/* lock pv_head */
    826 	pve->pv_next = pvh->pvh_list;		/* add to ... */
    827 	pvh->pvh_list = pve;			/* ... locked list */
    828 	simple_unlock(&pvh->pvh_lock);		/* unlock, done! */
    829 	if (pve->pv_flags & PT_W)
    830 		++pmap->pm_stats.wired_count;
    831 }
    832 
    833 /*
    834  * pmap_remove_pv: try to remove a mapping from a pv_list
    835  *
    836  * => caller should hold proper lock on pmap_main_lock
    837  * => pmap should be locked
    838  * => caller should hold lock on pv_head [so that attrs can be adjusted]
    839  * => caller should adjust ptp's wire_count and free PTP if needed
    840  * => caller should NOT adjust pmap's wire_count
    841  * => we return the removed pve
    842  */
    843 
    844 __inline static struct pv_entry *
    845 pmap_remove_pv(pvh, pmap, va)
    846 	struct pv_head *pvh;
    847 	struct pmap *pmap;
    848 	vaddr_t va;
    849 {
    850 	struct pv_entry *pve, **prevptr;
    851 
    852 	prevptr = &pvh->pvh_list;		/* previous pv_entry pointer */
    853 	pve = *prevptr;
    854 	while (pve) {
    855 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
    856 			*prevptr = pve->pv_next;		/* remove it! */
    857 			if (pve->pv_flags & PT_W)
    858 			    --pmap->pm_stats.wired_count;
    859 			break;
    860 		}
    861 		prevptr = &pve->pv_next;		/* previous pointer */
    862 		pve = pve->pv_next;			/* advance */
    863 	}
    864 	return(pve);				/* return removed pve */
    865 }
    866 
    867 /*
    868  *
    869  * pmap_modify_pv: Update pv flags
    870  *
    871  * => caller should hold lock on pv_head [so that attrs can be adjusted]
    872  * => caller should NOT adjust pmap's wire_count
    873  * => we return the old flags
    874  *
    875  * Modify a physical-virtual mapping in the pv table
    876  */
    877 
    878 /*__inline */ u_int
    879 pmap_modify_pv(pmap, va, pvh, bic_mask, eor_mask)
    880 	struct pmap *pmap;
    881 	vaddr_t va;
    882 	struct pv_head *pvh;
    883 	u_int bic_mask;
    884 	u_int eor_mask;
    885 {
    886 	struct pv_entry *npv;
    887 	u_int flags, oflags;
    888 
    889 	/*
    890 	 * There is at least one VA mapping this page.
    891 	 */
    892 
    893 	for (npv = pvh->pvh_list; npv; npv = npv->pv_next) {
    894 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    895 			oflags = npv->pv_flags;
    896 			npv->pv_flags = flags =
    897 			    ((oflags & ~bic_mask) ^ eor_mask);
    898 			if ((flags ^ oflags) & PT_W) {
    899 				if (flags & PT_W)
    900 					++pmap->pm_stats.wired_count;
    901 				else
    902 					--pmap->pm_stats.wired_count;
    903 			}
    904 			return (oflags);
    905 		}
    906 	}
    907 	return (0);
    908 }
    909 
    910 
    911 /*
    912  * Map the specified level 2 pagetable into the level 1 page table for
    913  * the given pmap to cover a chunk of virtual address space starting from the
    914  * address specified.
    915  */
    916 static /*__inline*/ void
    917 pmap_map_in_l1(pmap, va, l2pa, selfref)
    918 	struct pmap *pmap;
    919 	vaddr_t va, l2pa;
    920 	boolean_t selfref;
    921 {
    922 	vaddr_t ptva;
    923 
    924 	/* Calculate the index into the L1 page table. */
    925 	ptva = (va >> PDSHIFT) & ~3;
    926 
    927 	PDEBUG(0, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
    928 	    pmap->pm_pdir, L1_PTE(l2pa), ptva));
    929 
    930 	/* Map page table into the L1. */
    931 	pmap->pm_pdir[ptva + 0] = L1_PTE(l2pa + 0x000);
    932 	pmap->pm_pdir[ptva + 1] = L1_PTE(l2pa + 0x400);
    933 	pmap->pm_pdir[ptva + 2] = L1_PTE(l2pa + 0x800);
    934 	pmap->pm_pdir[ptva + 3] = L1_PTE(l2pa + 0xc00);
    935 
    936 	PDEBUG(0, printf("pt self reference %lx in %lx\n",
    937 	    L2_PTE_NC_NB(l2pa, AP_KRW), pmap->pm_vptpt));
    938 
    939 	/* Map the page table into the page table area. */
    940 	if (selfref) {
    941 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) =
    942 			L2_PTE_NC_NB(l2pa, AP_KRW);
    943 	}
    944 	/* XXX should be a purge */
    945 /*	cpu_tlb_flushD();*/
    946 }
    947 
    948 #if 0
    949 static /*__inline*/ void
    950 pmap_unmap_in_l1(pmap, va)
    951 	struct pmap *pmap;
    952 	vaddr_t va;
    953 {
    954 	vaddr_t ptva;
    955 
    956 	/* Calculate the index into the L1 page table. */
    957 	ptva = (va >> PDSHIFT) & ~3;
    958 
    959 	/* Unmap page table from the L1. */
    960 	pmap->pm_pdir[ptva + 0] = 0;
    961 	pmap->pm_pdir[ptva + 1] = 0;
    962 	pmap->pm_pdir[ptva + 2] = 0;
    963 	pmap->pm_pdir[ptva + 3] = 0;
    964 
    965 	/* Unmap the page table from the page table area. */
    966 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
    967 
    968 	/* XXX should be a purge */
    969 /*	cpu_tlb_flushD();*/
    970 }
    971 #endif
    972 
    973 
    974 /*
    975  *	Used to map a range of physical addresses into kernel
    976  *	virtual address space.
    977  *
    978  *	For now, VM is already on, we only need to map the
    979  *	specified memory.
    980  */
    981 vaddr_t
    982 pmap_map(va, spa, epa, prot)
    983 	vaddr_t va, spa, epa;
    984 	int prot;
    985 {
    986 	while (spa < epa) {
    987 		pmap_enter(pmap_kernel(), va, spa, prot, 0);
    988 		va += NBPG;
    989 		spa += NBPG;
    990 	}
    991 	pmap_update(pmap_kernel());
    992 	return(va);
    993 }
    994 
    995 
    996 /*
    997  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    998  *
    999  * bootstrap the pmap system. This is called from initarm and allows
   1000  * the pmap system to initailise any structures it requires.
   1001  *
   1002  * Currently this sets up the kernel_pmap that is statically allocated
   1003  * and also allocated virtual addresses for certain page hooks.
   1004  * Currently the only one page hook is allocated that is used
   1005  * to zero physical pages of memory.
   1006  * It also initialises the start and end address of the kernel data space.
   1007  */
   1008 extern paddr_t physical_freestart;
   1009 extern paddr_t physical_freeend;
   1010 
   1011 char *boot_head;
   1012 
   1013 void
   1014 pmap_bootstrap(kernel_l1pt, kernel_ptpt)
   1015 	pd_entry_t *kernel_l1pt;
   1016 	pv_addr_t kernel_ptpt;
   1017 {
   1018 	int loop;
   1019 	paddr_t start, end;
   1020 #if NISADMA > 0
   1021 	paddr_t istart;
   1022 	psize_t isize;
   1023 #endif
   1024 
   1025 	pmap_kernel()->pm_pdir = kernel_l1pt;
   1026 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
   1027 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
   1028 	simple_lock_init(&pmap_kernel()->pm_lock);
   1029 	pmap_kernel()->pm_obj.pgops = NULL;
   1030 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
   1031 	pmap_kernel()->pm_obj.uo_npages = 0;
   1032 	pmap_kernel()->pm_obj.uo_refs = 1;
   1033 
   1034 	/*
   1035 	 * Initialize PAGE_SIZE-dependent variables.
   1036 	 */
   1037 	uvm_setpagesize();
   1038 
   1039 	npages = 0;
   1040 	loop = 0;
   1041 	while (loop < bootconfig.dramblocks) {
   1042 		start = (paddr_t)bootconfig.dram[loop].address;
   1043 		end = start + (bootconfig.dram[loop].pages * NBPG);
   1044 		if (start < physical_freestart)
   1045 			start = physical_freestart;
   1046 		if (end > physical_freeend)
   1047 			end = physical_freeend;
   1048 #if 0
   1049 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
   1050 #endif
   1051 #if NISADMA > 0
   1052 		if (pmap_isa_dma_range_intersect(start, end - start,
   1053 		    &istart, &isize)) {
   1054 			/*
   1055 			 * Place the pages that intersect with the
   1056 			 * ISA DMA range onto the ISA DMA free list.
   1057 			 */
   1058 #if 0
   1059 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
   1060 			    istart + isize - 1);
   1061 #endif
   1062 			uvm_page_physload(atop(istart),
   1063 			    atop(istart + isize), atop(istart),
   1064 			    atop(istart + isize), VM_FREELIST_ISADMA);
   1065 			npages += atop(istart + isize) - atop(istart);
   1066 
   1067 			/*
   1068 			 * Load the pieces that come before
   1069 			 * the intersection into the default
   1070 			 * free list.
   1071 			 */
   1072 			if (start < istart) {
   1073 #if 0
   1074 				printf("    BEFORE 0x%lx -> 0x%lx\n",
   1075 				    start, istart - 1);
   1076 #endif
   1077 				uvm_page_physload(atop(start),
   1078 				    atop(istart), atop(start),
   1079 				    atop(istart), VM_FREELIST_DEFAULT);
   1080 				npages += atop(istart) - atop(start);
   1081 			}
   1082 
   1083 			/*
   1084 			 * Load the pieces that come after
   1085 			 * the intersection into the default
   1086 			 * free list.
   1087 			 */
   1088 			if ((istart + isize) < end) {
   1089 #if 0
   1090 				printf("     AFTER 0x%lx -> 0x%lx\n",
   1091 				    (istart + isize), end - 1);
   1092 #endif
   1093 				uvm_page_physload(atop(istart + isize),
   1094 				    atop(end), atop(istart + isize),
   1095 				    atop(end), VM_FREELIST_DEFAULT);
   1096 				npages += atop(end) - atop(istart + isize);
   1097 			}
   1098 		} else {
   1099 			uvm_page_physload(atop(start), atop(end),
   1100 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1101 			npages += atop(end) - atop(start);
   1102 		}
   1103 #else	/* NISADMA > 0 */
   1104 		uvm_page_physload(atop(start), atop(end),
   1105 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1106 		npages += atop(end) - atop(start);
   1107 #endif /* NISADMA > 0 */
   1108 		++loop;
   1109 	}
   1110 
   1111 #ifdef MYCROFT_HACK
   1112 	printf("npages = %ld\n", npages);
   1113 #endif
   1114 
   1115 	virtual_start = KERNEL_VM_BASE;
   1116 	virtual_end = virtual_start + KERNEL_VM_SIZE - 1;
   1117 
   1118 	ALLOC_PAGE_HOOK(page_hook0, NBPG);
   1119 	ALLOC_PAGE_HOOK(page_hook1, NBPG);
   1120 
   1121 	/*
   1122 	 * The mem special device needs a virtual hook but we don't
   1123 	 * need a pte
   1124 	 */
   1125 	memhook = (char *)virtual_start;
   1126 	virtual_start += NBPG;
   1127 
   1128 	msgbufaddr = (caddr_t)virtual_start;
   1129 	msgbufpte = (pt_entry_t)pmap_pte(pmap_kernel(), virtual_start);
   1130 	virtual_start += round_page(MSGBUFSIZE);
   1131 
   1132 	/*
   1133 	 * init the static-global locks and global lists.
   1134 	 */
   1135 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
   1136 	simple_lock_init(&pvalloc_lock);
   1137 	TAILQ_INIT(&pv_freepages);
   1138 	TAILQ_INIT(&pv_unusedpgs);
   1139 
   1140 	/*
   1141 	 * compute the number of pages we have and then allocate RAM
   1142 	 * for each pages' pv_head and saved attributes.
   1143 	 */
   1144 	{
   1145 	       	int npages, lcv;
   1146 		vsize_t s;
   1147 
   1148 		npages = 0;
   1149 		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
   1150 			npages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
   1151 		s = (vsize_t) (sizeof(struct pv_head) * npages +
   1152 				sizeof(char) * npages);
   1153 		s = round_page(s); /* round up */
   1154 		boot_head = (char *)uvm_pageboot_alloc(s);
   1155 		bzero((char *)boot_head, s);
   1156 		if (boot_head == 0)
   1157 			panic("pmap_init: unable to allocate pv_heads");
   1158 	}
   1159 
   1160 	/*
   1161 	 * initialize the pmap pool.
   1162 	 */
   1163 
   1164 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1165 		  0, pool_page_alloc_nointr, pool_page_free_nointr, M_VMPMAP);
   1166 
   1167 	cpu_cache_cleanD();
   1168 }
   1169 
   1170 /*
   1171  * void pmap_init(void)
   1172  *
   1173  * Initialize the pmap module.
   1174  * Called by vm_init() in vm/vm_init.c in order to initialise
   1175  * any structures that the pmap system needs to map virtual memory.
   1176  */
   1177 
   1178 extern int physmem;
   1179 
   1180 void
   1181 pmap_init()
   1182 {
   1183 	int lcv, i;
   1184 
   1185 #ifdef MYCROFT_HACK
   1186 	printf("physmem = %d\n", physmem);
   1187 #endif
   1188 
   1189 	/*
   1190 	 * Set the available memory vars - These do not map to real memory
   1191 	 * addresses and cannot as the physical memory is fragmented.
   1192 	 * They are used by ps for %mem calculations.
   1193 	 * One could argue whether this should be the entire memory or just
   1194 	 * the memory that is useable in a user process.
   1195 	 */
   1196 	avail_start = 0;
   1197 	avail_end = physmem * NBPG;
   1198 
   1199 	/* allocate pv_head stuff first */
   1200 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
   1201 		vm_physmem[lcv].pmseg.pvhead = (struct pv_head *)boot_head;
   1202 		boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.pvhead +
   1203 				 (vm_physmem[lcv].end - vm_physmem[lcv].start));
   1204 		for (i = 0;
   1205 		     i < (vm_physmem[lcv].end - vm_physmem[lcv].start); i++) {
   1206 			simple_lock_init(
   1207 			    &vm_physmem[lcv].pmseg.pvhead[i].pvh_lock);
   1208 		}
   1209 	}
   1210 
   1211 	/* now allocate attrs */
   1212 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
   1213 		vm_physmem[lcv].pmseg.attrs = (char *) boot_head;
   1214 		boot_head = (char *)(vaddr_t)(vm_physmem[lcv].pmseg.attrs +
   1215 				 (vm_physmem[lcv].end - vm_physmem[lcv].start));
   1216 	}
   1217 
   1218 	/*
   1219 	 * now we need to free enough pv_entry structures to allow us to get
   1220 	 * the kmem_map/kmem_object allocated and inited (done after this
   1221 	 * function is finished).  to do this we allocate one bootstrap page out
   1222 	 * of kernel_map and use it to provide an initial pool of pv_entry
   1223 	 * structures.   we never free this page.
   1224 	 */
   1225 
   1226 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
   1227 	if (pv_initpage == NULL)
   1228 		panic("pmap_init: pv_initpage");
   1229 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
   1230 	pv_nfpvents = 0;
   1231 	(void) pmap_add_pvpage(pv_initpage, FALSE);
   1232 
   1233 #ifdef MYCROFT_HACK
   1234 	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
   1235 		printf("physseg[%d] pvent=%p attrs=%p start=%ld end=%ld\n",
   1236 		    lcv,
   1237 		    vm_physmem[lcv].pmseg.pvent, vm_physmem[lcv].pmseg.attrs,
   1238 		    vm_physmem[lcv].start, vm_physmem[lcv].end);
   1239 	}
   1240 #endif
   1241 	pmap_initialized = TRUE;
   1242 
   1243 	/* Initialise our L1 page table queues and counters */
   1244 	SIMPLEQ_INIT(&l1pt_static_queue);
   1245 	l1pt_static_queue_count = 0;
   1246 	l1pt_static_create_count = 0;
   1247 	SIMPLEQ_INIT(&l1pt_queue);
   1248 	l1pt_queue_count = 0;
   1249 	l1pt_create_count = 0;
   1250 	l1pt_reuse_count = 0;
   1251 }
   1252 
   1253 /*
   1254  * pmap_postinit()
   1255  *
   1256  * This routine is called after the vm and kmem subsystems have been
   1257  * initialised. This allows the pmap code to perform any initialisation
   1258  * that can only be done one the memory allocation is in place.
   1259  */
   1260 
   1261 void
   1262 pmap_postinit()
   1263 {
   1264 	int loop;
   1265 	struct l1pt *pt;
   1266 
   1267 #ifdef PMAP_STATIC_L1S
   1268 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
   1269 #else	/* PMAP_STATIC_L1S */
   1270 	for (loop = 0; loop < max_processes; ++loop) {
   1271 #endif	/* PMAP_STATIC_L1S */
   1272 		/* Allocate a L1 page table */
   1273 		pt = pmap_alloc_l1pt();
   1274 		if (!pt)
   1275 			panic("Cannot allocate static L1 page tables\n");
   1276 
   1277 		/* Clean it */
   1278 		bzero((void *)pt->pt_va, PD_SIZE);
   1279 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
   1280 		/* Add the page table to the queue */
   1281 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
   1282 		++l1pt_static_queue_count;
   1283 		++l1pt_static_create_count;
   1284 	}
   1285 }
   1286 
   1287 
   1288 /*
   1289  * Create and return a physical map.
   1290  *
   1291  * If the size specified for the map is zero, the map is an actual physical
   1292  * map, and may be referenced by the hardware.
   1293  *
   1294  * If the size specified is non-zero, the map will be used in software only,
   1295  * and is bounded by that size.
   1296  */
   1297 
   1298 pmap_t
   1299 pmap_create()
   1300 {
   1301 	struct pmap *pmap;
   1302 
   1303 	/*
   1304 	 * Fetch pmap entry from the pool
   1305 	 */
   1306 
   1307 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   1308 	/* XXX is this really needed! */
   1309 	memset(pmap, 0, sizeof(*pmap));
   1310 
   1311 	simple_lock_init(&pmap->pm_obj.vmobjlock);
   1312 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
   1313 	TAILQ_INIT(&pmap->pm_obj.memq);
   1314 	pmap->pm_obj.uo_npages = 0;
   1315 	pmap->pm_obj.uo_refs = 1;
   1316 	pmap->pm_stats.wired_count = 0;
   1317 	pmap->pm_stats.resident_count = 1;
   1318 
   1319 	/* Now init the machine part of the pmap */
   1320 	pmap_pinit(pmap);
   1321 	return(pmap);
   1322 }
   1323 
   1324 /*
   1325  * pmap_alloc_l1pt()
   1326  *
   1327  * This routine allocates physical and virtual memory for a L1 page table
   1328  * and wires it.
   1329  * A l1pt structure is returned to describe the allocated page table.
   1330  *
   1331  * This routine is allowed to fail if the required memory cannot be allocated.
   1332  * In this case NULL is returned.
   1333  */
   1334 
   1335 struct l1pt *
   1336 pmap_alloc_l1pt(void)
   1337 {
   1338 	paddr_t pa;
   1339 	vaddr_t va;
   1340 	struct l1pt *pt;
   1341 	int error;
   1342 	struct vm_page *m;
   1343 	pt_entry_t *ptes;
   1344 
   1345 	/* Allocate virtual address space for the L1 page table */
   1346 	va = uvm_km_valloc(kernel_map, PD_SIZE);
   1347 	if (va == 0) {
   1348 #ifdef DIAGNOSTIC
   1349 		printf("pmap: Cannot allocate pageable memory for L1\n");
   1350 #endif	/* DIAGNOSTIC */
   1351 		return(NULL);
   1352 	}
   1353 
   1354 	/* Allocate memory for the l1pt structure */
   1355 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1356 
   1357 	/*
   1358 	 * Allocate pages from the VM system.
   1359 	 */
   1360 	TAILQ_INIT(&pt->pt_plist);
   1361 	error = uvm_pglistalloc(PD_SIZE, physical_start, physical_end,
   1362 	    PD_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1363 	if (error) {
   1364 #ifdef DIAGNOSTIC
   1365 		printf("pmap: Cannot allocate physical memory for L1 (%d)\n",
   1366 		    error);
   1367 #endif	/* DIAGNOSTIC */
   1368 		/* Release the resources we already have claimed */
   1369 		free(pt, M_VMPMAP);
   1370 		uvm_km_free(kernel_map, va, PD_SIZE);
   1371 		return(NULL);
   1372 	}
   1373 
   1374 	/* Map our physical pages into our virtual space */
   1375 	pt->pt_va = va;
   1376 	m = pt->pt_plist.tqh_first;
   1377 	ptes = pmap_map_ptes(pmap_kernel());
   1378 	while (m && va < (pt->pt_va + PD_SIZE)) {
   1379 		pa = VM_PAGE_TO_PHYS(m);
   1380 
   1381 		pmap_enter(pmap_kernel(), va, pa,
   1382 		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
   1383 
   1384 		/* Revoke cacheability and bufferability */
   1385 		/* XXX should be done better than this */
   1386 		ptes[arm_byte_to_page(va)] &= ~(PT_C | PT_B);
   1387 
   1388 		va += NBPG;
   1389 		m = m->pageq.tqe_next;
   1390 	}
   1391 	pmap_unmap_ptes(pmap_kernel());
   1392 	pmap_update(pmap_kernel());
   1393 
   1394 #ifdef DIAGNOSTIC
   1395 	if (m)
   1396 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1397 #endif	/* DIAGNOSTIC */
   1398 
   1399 	pt->pt_flags = 0;
   1400 	return(pt);
   1401 }
   1402 
   1403 /*
   1404  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1405  */
   1406 void
   1407 pmap_free_l1pt(pt)
   1408 	struct l1pt *pt;
   1409 {
   1410 	/* Separate the physical memory for the virtual space */
   1411 	pmap_remove(pmap_kernel(), pt->pt_va, pt->pt_va + PD_SIZE);
   1412 	pmap_update(pmap_kernel());
   1413 
   1414 	/* Return the physical memory */
   1415 	uvm_pglistfree(&pt->pt_plist);
   1416 
   1417 	/* Free the virtual space */
   1418 	uvm_km_free(kernel_map, pt->pt_va, PD_SIZE);
   1419 
   1420 	/* Free the l1pt structure */
   1421 	free(pt, M_VMPMAP);
   1422 }
   1423 
   1424 /*
   1425  * Allocate a page directory.
   1426  * This routine will either allocate a new page directory from the pool
   1427  * of L1 page tables currently held by the kernel or it will allocate
   1428  * a new one via pmap_alloc_l1pt().
   1429  * It will then initialise the l1 page table for use.
   1430  */
   1431 int
   1432 pmap_allocpagedir(pmap)
   1433 	struct pmap *pmap;
   1434 {
   1435 	paddr_t pa;
   1436 	struct l1pt *pt;
   1437 	pt_entry_t *pte;
   1438 
   1439 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1440 
   1441 	/* Do we have any spare L1's lying around ? */
   1442 	if (l1pt_static_queue_count) {
   1443 		--l1pt_static_queue_count;
   1444 		pt = l1pt_static_queue.sqh_first;
   1445 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
   1446 	} else if (l1pt_queue_count) {
   1447 		--l1pt_queue_count;
   1448 		pt = l1pt_queue.sqh_first;
   1449 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
   1450 		++l1pt_reuse_count;
   1451 	} else {
   1452 		pt = pmap_alloc_l1pt();
   1453 		if (!pt)
   1454 			return(ENOMEM);
   1455 		++l1pt_create_count;
   1456 	}
   1457 
   1458 	/* Store the pointer to the l1 descriptor in the pmap. */
   1459 	pmap->pm_l1pt = pt;
   1460 
   1461 	/* Get the physical address of the start of the l1 */
   1462 	pa = VM_PAGE_TO_PHYS(pt->pt_plist.tqh_first);
   1463 
   1464 	/* Store the virtual address of the l1 in the pmap. */
   1465 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1466 
   1467 	/* Clean the L1 if it is dirty */
   1468 	if (!(pt->pt_flags & PTFLAG_CLEAN))
   1469 		bzero((void *)pmap->pm_pdir, (PD_SIZE - KERNEL_PD_SIZE));
   1470 
   1471 	/* Do we already have the kernel mappings ? */
   1472 	if (!(pt->pt_flags & PTFLAG_KPT)) {
   1473 		/* Duplicate the kernel mapping i.e. all mappings 0xf0000000+ */
   1474 
   1475 		bcopy((char *)pmap_kernel()->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
   1476 		    (char *)pmap->pm_pdir + (PD_SIZE - KERNEL_PD_SIZE),
   1477 		    KERNEL_PD_SIZE);
   1478 		pt->pt_flags |= PTFLAG_KPT;
   1479 	}
   1480 
   1481 	/* Allocate a page table to map all the page tables for this pmap */
   1482 
   1483 #ifdef DIAGNOSTIC
   1484 	if (pmap->pm_vptpt) {
   1485 		/* XXX What if we have one already ? */
   1486 		panic("pmap_allocpagedir: have pt already\n");
   1487 	}
   1488 #endif	/* DIAGNOSTIC */
   1489 	pmap->pm_vptpt = uvm_km_zalloc(kernel_map, NBPG);
   1490 	if (pmap->pm_vptpt == 0) {
   1491 		pmap_freepagedir(pmap);
   1492 		return(ENOMEM);
   1493 	}
   1494 
   1495 	(void) pmap_extract(pmap_kernel(), pmap->pm_vptpt, &pmap->pm_pptpt);
   1496 	pmap->pm_pptpt &= PG_FRAME;
   1497 	/* Revoke cacheability and bufferability */
   1498 	/* XXX should be done better than this */
   1499 	pte = pmap_pte(pmap_kernel(), pmap->pm_vptpt);
   1500 	*pte = *pte & ~(PT_C | PT_B);
   1501 
   1502 	/* Wire in this page table */
   1503 	pmap_map_in_l1(pmap, PROCESS_PAGE_TBLS_BASE, pmap->pm_pptpt, TRUE);
   1504 
   1505 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1506 
   1507 	/*
   1508 	 * Map the kernel page tables for 0xf0000000 +
   1509 	 * into the page table used to map the
   1510 	 * pmap's page tables
   1511 	 */
   1512 	bcopy((char *)(PROCESS_PAGE_TBLS_BASE
   1513 	    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2))
   1514 	    + ((PD_SIZE - KERNEL_PD_SIZE) >> 2)),
   1515 	    (char *)pmap->pm_vptpt + ((PD_SIZE - KERNEL_PD_SIZE) >> 2),
   1516 	    (KERNEL_PD_SIZE >> 2));
   1517 
   1518 	return(0);
   1519 }
   1520 
   1521 
   1522 /*
   1523  * Initialize a preallocated and zeroed pmap structure,
   1524  * such as one in a vmspace structure.
   1525  */
   1526 
   1527 void
   1528 pmap_pinit(pmap)
   1529 	struct pmap *pmap;
   1530 {
   1531 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1532 
   1533 	/* Keep looping until we succeed in allocating a page directory */
   1534 	while (pmap_allocpagedir(pmap) != 0) {
   1535 		/*
   1536 		 * Ok we failed to allocate a suitable block of memory for an
   1537 		 * L1 page table. This means that either:
   1538 		 * 1. 16KB of virtual address space could not be allocated
   1539 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1540 		 *    could not be allocated.
   1541 		 *
   1542 		 * Since we cannot fail we will sleep for a while and try
   1543 		 * again.
   1544 		 */
   1545 		(void) ltsleep(&lbolt, PVM, "l1ptwait", hz >> 3, NULL);
   1546 	}
   1547 
   1548 	/* Map zero page for the pmap. This will also map the L2 for it */
   1549 	pmap_enter(pmap, 0x00000000, systempage.pv_pa,
   1550 	    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1551 	pmap_update(pmap);
   1552 }
   1553 
   1554 
   1555 void
   1556 pmap_freepagedir(pmap)
   1557 	struct pmap *pmap;
   1558 {
   1559 	/* Free the memory used for the page table mapping */
   1560 	if (pmap->pm_vptpt != 0)
   1561 		uvm_km_free(kernel_map, (vaddr_t)pmap->pm_vptpt, NBPG);
   1562 
   1563 	/* junk the L1 page table */
   1564 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1565 		/* Add the page table to the queue */
   1566 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
   1567 		++l1pt_static_queue_count;
   1568 	} else if (l1pt_queue_count < 8) {
   1569 		/* Add the page table to the queue */
   1570 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1571 		++l1pt_queue_count;
   1572 	} else
   1573 		pmap_free_l1pt(pmap->pm_l1pt);
   1574 }
   1575 
   1576 
   1577 /*
   1578  * Retire the given physical map from service.
   1579  * Should only be called if the map contains no valid mappings.
   1580  */
   1581 
   1582 void
   1583 pmap_destroy(pmap)
   1584 	struct pmap *pmap;
   1585 {
   1586 	struct vm_page *page;
   1587 	int count;
   1588 
   1589 	if (pmap == NULL)
   1590 		return;
   1591 
   1592 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1593 
   1594 	/*
   1595 	 * Drop reference count
   1596 	 */
   1597 	simple_lock(&pmap->pm_obj.vmobjlock);
   1598 	count = --pmap->pm_obj.uo_refs;
   1599 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1600 	if (count > 0) {
   1601 		return;
   1602 	}
   1603 
   1604 	/*
   1605 	 * reference count is zero, free pmap resources and then free pmap.
   1606 	 */
   1607 
   1608 	/* Remove the zero page mapping */
   1609 	pmap_remove(pmap, 0x00000000, 0x00000000 + NBPG);
   1610 	pmap_update(pmap);
   1611 
   1612 	/*
   1613 	 * Free any page tables still mapped
   1614 	 * This is only temporay until pmap_enter can count the number
   1615 	 * of mappings made in a page table. Then pmap_remove() can
   1616 	 * reduce the count and free the pagetable when the count
   1617 	 * reaches zero.  Note that entries in this list should match the
   1618 	 * contents of the ptpt, however this is faster than walking a 1024
   1619 	 * entries looking for pt's
   1620 	 * taken from i386 pmap.c
   1621 	 */
   1622 	while (pmap->pm_obj.memq.tqh_first != NULL) {
   1623 		page = pmap->pm_obj.memq.tqh_first;
   1624 #ifdef DIAGNOSTIC
   1625 		if (page->flags & PG_BUSY)
   1626 			panic("pmap_release: busy page table page");
   1627 #endif
   1628 		/* pmap_page_protect?  currently no need for it. */
   1629 
   1630 		page->wire_count = 0;
   1631 		uvm_pagefree(page);
   1632 	}
   1633 
   1634 	/* Free the page dir */
   1635 	pmap_freepagedir(pmap);
   1636 
   1637 	/* return the pmap to the pool */
   1638 	pool_put(&pmap_pmap_pool, pmap);
   1639 }
   1640 
   1641 
   1642 /*
   1643  * void pmap_reference(struct pmap *pmap)
   1644  *
   1645  * Add a reference to the specified pmap.
   1646  */
   1647 
   1648 void
   1649 pmap_reference(pmap)
   1650 	struct pmap *pmap;
   1651 {
   1652 	if (pmap == NULL)
   1653 		return;
   1654 
   1655 	simple_lock(&pmap->pm_lock);
   1656 	pmap->pm_obj.uo_refs++;
   1657 	simple_unlock(&pmap->pm_lock);
   1658 }
   1659 
   1660 /*
   1661  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1662  *
   1663  * Return the start and end addresses of the kernel's virtual space.
   1664  * These values are setup in pmap_bootstrap and are updated as pages
   1665  * are allocated.
   1666  */
   1667 
   1668 void
   1669 pmap_virtual_space(start, end)
   1670 	vaddr_t *start;
   1671 	vaddr_t *end;
   1672 {
   1673 	*start = virtual_start;
   1674 	*end = virtual_end;
   1675 }
   1676 
   1677 
   1678 /*
   1679  * Activate the address space for the specified process.  If the process
   1680  * is the current process, load the new MMU context.
   1681  */
   1682 void
   1683 pmap_activate(p)
   1684 	struct proc *p;
   1685 {
   1686 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1687 	struct pcb *pcb = &p->p_addr->u_pcb;
   1688 
   1689 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1690 	    (paddr_t *)&pcb->pcb_pagedir);
   1691 
   1692 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1693 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1694 
   1695 	if (p == curproc) {
   1696 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1697 		setttb((u_int)pcb->pcb_pagedir);
   1698 	}
   1699 #if 0
   1700 	pmap->pm_pdchanged = FALSE;
   1701 #endif
   1702 }
   1703 
   1704 
   1705 /*
   1706  * Deactivate the address space of the specified process.
   1707  */
   1708 void
   1709 pmap_deactivate(p)
   1710 	struct proc *p;
   1711 {
   1712 }
   1713 
   1714 
   1715 /*
   1716  * pmap_clean_page()
   1717  *
   1718  * This is a local function used to work out the best strategy to clean
   1719  * a single page referenced by its entry in the PV table. It's used by
   1720  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1721  *
   1722  * Its policy is effectively:
   1723  *  o If there are no mappings, we don't bother doing anything with the cache.
   1724  *  o If there is one mapping, we clean just that page.
   1725  *  o If there are multiple mappings, we clean the entire cache.
   1726  *
   1727  * So that some functions can be further optimised, it returns 0 if it didn't
   1728  * clean the entire cache, or 1 if it did.
   1729  *
   1730  * XXX One bug in this routine is that if the pv_entry has a single page
   1731  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1732  * just the 1 page. Since this should not occur in everyday use and if it does
   1733  * it will just result in not the most efficient clean for the page.
   1734  */
   1735 static int
   1736 pmap_clean_page(pv, is_src)
   1737 	struct pv_entry *pv;
   1738 	boolean_t is_src;
   1739 {
   1740 	struct pmap *pmap;
   1741 	struct pv_entry *npv;
   1742 	int cache_needs_cleaning = 0;
   1743 	vaddr_t page_to_clean = 0;
   1744 
   1745 	if (pv == NULL)
   1746 		/* nothing mapped in so nothing to flush */
   1747 		return (0);
   1748 
   1749 	/* Since we flush the cache each time we change curproc, we
   1750 	 * only need to flush the page if it is in the current pmap.
   1751 	 */
   1752 	if (curproc)
   1753 		pmap = curproc->p_vmspace->vm_map.pmap;
   1754 	else
   1755 		pmap = pmap_kernel();
   1756 
   1757 	for (npv = pv; npv; npv = npv->pv_next) {
   1758 		if (npv->pv_pmap == pmap) {
   1759 			/* The page is mapped non-cacheable in
   1760 			 * this map.  No need to flush the cache.
   1761 			 */
   1762 			if (npv->pv_flags & PT_NC) {
   1763 #ifdef DIAGNOSTIC
   1764 				if (cache_needs_cleaning)
   1765 					panic("pmap_clean_page: "
   1766 							"cache inconsistency");
   1767 #endif
   1768 				break;
   1769 			}
   1770 #if 0
   1771 			/* This doesn't work, because pmap_protect
   1772 			   doesn't flush changes on pages that it
   1773 			   has write-protected.  */
   1774 			/* If the page is not writeable and this
   1775 			   is the source, then there is no need
   1776 			   to flush it from the cache.  */
   1777 			else if (is_src && ! (npv->pv_flags & PT_Wr))
   1778 				continue;
   1779 #endif
   1780 			if (cache_needs_cleaning){
   1781 				page_to_clean = 0;
   1782 				break;
   1783 			}
   1784 			else
   1785 				page_to_clean = npv->pv_va;
   1786 			cache_needs_cleaning = 1;
   1787 		}
   1788 	}
   1789 
   1790 	if (page_to_clean)
   1791 		cpu_cache_purgeID_rng(page_to_clean, NBPG);
   1792 	else if (cache_needs_cleaning) {
   1793 		cpu_cache_purgeID();
   1794 		return (1);
   1795 	}
   1796 	return (0);
   1797 }
   1798 
   1799 /*
   1800  * pmap_find_pv()
   1801  *
   1802  * This is a local function that finds a PV head for a given physical page.
   1803  * This is a common op, and this function removes loads of ifdefs in the code.
   1804  */
   1805 static __inline struct pv_head *
   1806 pmap_find_pvh(phys)
   1807 	paddr_t phys;
   1808 {
   1809 	int bank, off;
   1810 	struct pv_head *pvh;
   1811 
   1812 #ifdef DIAGNOSTIC
   1813 	if (!pmap_initialized)
   1814 		panic("pmap_find_pv: !pmap_initialized");
   1815 #endif
   1816 
   1817 	if ((bank = vm_physseg_find(atop(phys), &off)) == -1)
   1818 		panic("pmap_find_pv: not a real page, phys=%lx\n", phys);
   1819 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
   1820 	return (pvh);
   1821 }
   1822 
   1823 /*
   1824  * pmap_zero_page()
   1825  *
   1826  * Zero a given physical page by mapping it at a page hook point.
   1827  * In doing the zero page op, the page we zero is mapped cachable, as with
   1828  * StrongARM accesses to non-cached pages are non-burst making writing
   1829  * _any_ bulk data very slow.
   1830  */
   1831 void
   1832 pmap_zero_page(phys)
   1833 	paddr_t phys;
   1834 {
   1835 	struct pv_head *pvh;
   1836 
   1837 	/* Get an entry for this page, and clean it it. */
   1838 	PMAP_HEAD_TO_MAP_LOCK();
   1839 	pvh = pmap_find_pvh(phys);
   1840 	simple_lock(&pvh->pvh_lock);
   1841 	pmap_clean_page(pvh->pvh_list, FALSE);
   1842 	simple_unlock(&pvh->pvh_lock);
   1843 	PMAP_HEAD_TO_MAP_UNLOCK();
   1844 
   1845 	/*
   1846 	 * Hook in the page, zero it, and purge the cache for that
   1847 	 * zeroed page. Invalidate the TLB as needed.
   1848 	 */
   1849 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
   1850 	cpu_tlb_flushD_SE(page_hook0.va);
   1851 	bzero_page(page_hook0.va);
   1852 	cpu_cache_purgeD_rng(page_hook0.va, NBPG);
   1853 }
   1854 
   1855 /* pmap_pageidlezero()
   1856  *
   1857  * The same as above, except that we assume that the page is not
   1858  * mapped.  This means we never have to flush the cache first.  Called
   1859  * from the idle loop.
   1860  */
   1861 boolean_t
   1862 pmap_pageidlezero(phys)
   1863     paddr_t phys;
   1864 {
   1865 	int i, *ptr;
   1866 	boolean_t rv = TRUE;
   1867 
   1868 #ifdef DIAGNOSTIC
   1869 	struct pv_head *pvh;
   1870 
   1871 	pvh = pmap_find_pvh(phys);
   1872 	if (pvh->pvh_list != NULL)
   1873 		panic("pmap_pageidlezero: zeroing mapped page\n");
   1874 #endif
   1875 
   1876 	/*
   1877 	 * Hook in the page, zero it, and purge the cache for that
   1878 	 * zeroed page. Invalidate the TLB as needed.
   1879 	 */
   1880 	*page_hook0.pte = L2_PTE(phys & PG_FRAME, AP_KRW);
   1881 	cpu_tlb_flushD_SE(page_hook0.va);
   1882 
   1883 	for (i = 0, ptr = (int *)page_hook0.va;
   1884 			i < (NBPG / sizeof(int)); i++) {
   1885 		if (sched_whichqs != 0) {
   1886 			/*
   1887 			 * A process has become ready.  Abort now,
   1888 			 * so we don't keep it waiting while we
   1889 			 * do slow memory access to finish this
   1890 			 * page.
   1891 			 */
   1892 			rv = FALSE;
   1893 			break;
   1894 		}
   1895 		*ptr++ = 0;
   1896 	}
   1897 
   1898 	if (rv)
   1899 		/*
   1900 		 * if we aborted we'll rezero this page again later so don't
   1901 		 * purge it unless we finished it
   1902 		 */
   1903 		cpu_cache_purgeD_rng(page_hook0.va, NBPG);
   1904 	return (rv);
   1905 }
   1906 
   1907 /*
   1908  * pmap_copy_page()
   1909  *
   1910  * Copy one physical page into another, by mapping the pages into
   1911  * hook points. The same comment regarding cachability as in
   1912  * pmap_zero_page also applies here.
   1913  */
   1914 void
   1915 pmap_copy_page(src, dest)
   1916 	paddr_t src;
   1917 	paddr_t dest;
   1918 {
   1919 	struct pv_head *src_pvh, *dest_pvh;
   1920 
   1921 	PMAP_HEAD_TO_MAP_LOCK();
   1922 	/* Get PV entries for the pages, and clean them if needed. */
   1923 	src_pvh = pmap_find_pvh(src);
   1924 	simple_lock(&src_pvh->pvh_lock);
   1925 	dest_pvh = pmap_find_pvh(dest);
   1926 	simple_lock(&dest_pvh->pvh_lock);
   1927 	if (!pmap_clean_page(src_pvh->pvh_list, TRUE))
   1928 		pmap_clean_page(dest_pvh->pvh_list, FALSE);
   1929 
   1930 	simple_unlock(&dest_pvh->pvh_lock);
   1931 	simple_unlock(&src_pvh->pvh_lock);
   1932 	PMAP_HEAD_TO_MAP_UNLOCK();
   1933 
   1934 	/*
   1935 	 * Map the pages into the page hook points, copy them, and purge
   1936 	 * the cache for the appropriate page. Invalidate the TLB
   1937 	 * as required.
   1938 	 */
   1939 	*page_hook0.pte = L2_PTE(src & PG_FRAME, AP_KRW);
   1940 	*page_hook1.pte = L2_PTE(dest & PG_FRAME, AP_KRW);
   1941 	cpu_tlb_flushD_SE(page_hook0.va);
   1942 	cpu_tlb_flushD_SE(page_hook1.va);
   1943 	bcopy_page(page_hook0.va, page_hook1.va);
   1944 	cpu_cache_purgeD_rng(page_hook0.va, NBPG);
   1945 	cpu_cache_purgeD_rng(page_hook1.va, NBPG);
   1946 }
   1947 
   1948 /*
   1949  * int pmap_next_phys_page(paddr_t *addr)
   1950  *
   1951  * Allocate another physical page returning true or false depending
   1952  * on whether a page could be allocated.
   1953  */
   1954 
   1955 paddr_t
   1956 pmap_next_phys_page(addr)
   1957 	paddr_t addr;
   1958 
   1959 {
   1960 	int loop;
   1961 
   1962 	if (addr < bootconfig.dram[0].address)
   1963 		return(bootconfig.dram[0].address);
   1964 
   1965 	loop = 0;
   1966 
   1967 	while (bootconfig.dram[loop].address != 0
   1968 	    && addr > (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG))
   1969 		++loop;
   1970 
   1971 	if (bootconfig.dram[loop].address == 0)
   1972 		return(0);
   1973 
   1974 	addr += NBPG;
   1975 
   1976 	if (addr >= (bootconfig.dram[loop].address + bootconfig.dram[loop].pages * NBPG)) {
   1977 		if (bootconfig.dram[loop + 1].address == 0)
   1978 			return(0);
   1979 		addr = bootconfig.dram[loop + 1].address;
   1980 	}
   1981 
   1982 	return(addr);
   1983 }
   1984 
   1985 #if 0
   1986 void
   1987 pmap_pte_addref(pmap, va)
   1988 	struct pmap *pmap;
   1989 	vaddr_t va;
   1990 {
   1991 	pd_entry_t *pde;
   1992 	paddr_t pa;
   1993 	struct vm_page *m;
   1994 
   1995 	if (pmap == pmap_kernel())
   1996 		return;
   1997 
   1998 	pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
   1999 	pa = pmap_pte_pa(pde);
   2000 	m = PHYS_TO_VM_PAGE(pa);
   2001 	++m->wire_count;
   2002 #ifdef MYCROFT_HACK
   2003 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2004 	    pmap, va, pde, pa, m, m->wire_count);
   2005 #endif
   2006 }
   2007 
   2008 void
   2009 pmap_pte_delref(pmap, va)
   2010 	struct pmap *pmap;
   2011 	vaddr_t va;
   2012 {
   2013 	pd_entry_t *pde;
   2014 	paddr_t pa;
   2015 	struct vm_page *m;
   2016 
   2017 	if (pmap == pmap_kernel())
   2018 		return;
   2019 
   2020 	pde = pmap_pde(pmap, va & ~(3 << PDSHIFT));
   2021 	pa = pmap_pte_pa(pde);
   2022 	m = PHYS_TO_VM_PAGE(pa);
   2023 	--m->wire_count;
   2024 #ifdef MYCROFT_HACK
   2025 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2026 	    pmap, va, pde, pa, m, m->wire_count);
   2027 #endif
   2028 	if (m->wire_count == 0) {
   2029 #ifdef MYCROFT_HACK
   2030 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   2031 		    pmap, va, pde, pa, m);
   2032 #endif
   2033 		pmap_unmap_in_l1(pmap, va);
   2034 		uvm_pagefree(m);
   2035 		--pmap->pm_stats.resident_count;
   2036 	}
   2037 }
   2038 #else
   2039 #define	pmap_pte_addref(pmap, va)
   2040 #define	pmap_pte_delref(pmap, va)
   2041 #endif
   2042 
   2043 /*
   2044  * Since we have a virtually indexed cache, we may need to inhibit caching if
   2045  * there is more than one mapping and at least one of them is writable.
   2046  * Since we purge the cache on every context switch, we only need to check for
   2047  * other mappings within the same pmap, or kernel_pmap.
   2048  * This function is also called when a page is unmapped, to possibly reenable
   2049  * caching on any remaining mappings.
   2050  *
   2051  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   2052  */
   2053 void
   2054 pmap_vac_me_harder(struct pmap *pmap, struct pv_head *pvh, pt_entry_t *ptes,
   2055 	boolean_t clear_cache)
   2056 {
   2057 	struct pv_entry *pv, *npv;
   2058 	pt_entry_t *pte;
   2059 	int entries = 0;
   2060 	int writeable = 0;
   2061 	int cacheable_entries = 0;
   2062 
   2063 	pv = pvh->pvh_list;
   2064 	KASSERT(ptes != NULL);
   2065 
   2066 	/*
   2067 	 * Count mappings and writable mappings in this pmap.
   2068 	 * Keep a pointer to the first one.
   2069 	 */
   2070 	for (npv = pv; npv; npv = npv->pv_next) {
   2071 		/* Count mappings in the same pmap */
   2072 		if (pmap == npv->pv_pmap) {
   2073 			if (entries++ == 0)
   2074 				pv = npv;
   2075 			/* Cacheable mappings */
   2076 			if ((npv->pv_flags & PT_NC) == 0)
   2077 				cacheable_entries++;
   2078 			/* Writeable mappings */
   2079 			if (npv->pv_flags & PT_Wr)
   2080 				++writeable;
   2081 		}
   2082 	}
   2083 
   2084 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   2085 		"writeable %d cacheable %d %s\n", pmap, entries, writeable,
   2086 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   2087 
   2088 	/*
   2089 	 * Enable or disable caching as necessary.
   2090 	 * We do a quick check of the first PTE to avoid walking the list if
   2091 	 * we're already in the right state.
   2092 	 */
   2093 	if (entries > 1 && writeable) {
   2094 		if (cacheable_entries == 0)
   2095 		    return;
   2096 		if (pv->pv_flags & PT_NC) {
   2097 #ifdef DIAGNOSTIC
   2098     			/* We have cacheable entries, but the first one
   2099  			isn't among them. Something is wrong.  */
   2100     			if (cacheable_entries)
   2101 				panic("pmap_vac_me_harder: "
   2102 	    				"cacheable inconsistent");
   2103 #endif
   2104 			return;
   2105 		}
   2106 		pte =  &ptes[arm_byte_to_page(pv->pv_va)];
   2107 		*pte &= ~(PT_C | PT_B);
   2108 		pv->pv_flags |= PT_NC;
   2109 		if (clear_cache && cacheable_entries < 4) {
   2110 			cpu_cache_purgeID_rng(pv->pv_va, NBPG);
   2111 			cpu_tlb_flushID_SE(pv->pv_va);
   2112 		}
   2113 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   2114 			if (pmap == npv->pv_pmap &&
   2115 			    (npv->pv_flags & PT_NC) == 0) {
   2116 				ptes[arm_byte_to_page(npv->pv_va)] &=
   2117 				    ~(PT_C | PT_B);
   2118  				npv->pv_flags |= PT_NC;
   2119 				if (clear_cache && cacheable_entries < 4) {
   2120 					cpu_cache_purgeID_rng(npv->pv_va,
   2121 					    NBPG);
   2122 					cpu_tlb_flushID_SE(npv->pv_va);
   2123 				}
   2124 			}
   2125 		}
   2126 		if (clear_cache && cacheable_entries >= 4) {
   2127 			cpu_cache_purgeID();
   2128 			cpu_tlb_flushID();
   2129 		}
   2130 	} else if (entries > 0) {
   2131 		if ((pv->pv_flags & PT_NC) == 0)
   2132 			return;
   2133 		pte = &ptes[arm_byte_to_page(pv->pv_va)];
   2134 		*pte |= (PT_C | PT_B);
   2135 		pv->pv_flags &= ~PT_NC;
   2136 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   2137 			if (pmap == npv->pv_pmap &&
   2138 				(npv->pv_flags & PT_NC)) {
   2139 				ptes[arm_byte_to_page(npv->pv_va)] |=
   2140 				    (PT_C | PT_B);
   2141 				npv->pv_flags &= ~PT_NC;
   2142 			}
   2143 		}
   2144 	}
   2145 }
   2146 
   2147 /*
   2148  * pmap_remove()
   2149  *
   2150  * pmap_remove is responsible for nuking a number of mappings for a range
   2151  * of virtual address space in the current pmap. To do this efficiently
   2152  * is interesting, because in a number of cases a wide virtual address
   2153  * range may be supplied that contains few actual mappings. So, the
   2154  * optimisations are:
   2155  *  1. Try and skip over hunks of address space for which an L1 entry
   2156  *     does not exist.
   2157  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   2158  *     maybe do just a partial cache clean. This path of execution is
   2159  *     complicated by the fact that the cache must be flushed _before_
   2160  *     the PTE is nuked, being a VAC :-)
   2161  *  3. Maybe later fast-case a single page, but I don't think this is
   2162  *     going to make _that_ much difference overall.
   2163  */
   2164 
   2165 #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   2166 
   2167 void
   2168 pmap_remove(pmap, sva, eva)
   2169 	struct pmap *pmap;
   2170 	vaddr_t sva;
   2171 	vaddr_t eva;
   2172 {
   2173 	int cleanlist_idx = 0;
   2174 	struct pagelist {
   2175 		vaddr_t va;
   2176 		pt_entry_t *pte;
   2177 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   2178 	pt_entry_t *pte = 0, *ptes;
   2179 	paddr_t pa;
   2180 	int pmap_active;
   2181 	struct pv_head *pvh;
   2182 
   2183 	/* Exit quick if there is no pmap */
   2184 	if (!pmap)
   2185 		return;
   2186 
   2187 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", pmap, sva, eva));
   2188 
   2189 	sva &= PG_FRAME;
   2190 	eva &= PG_FRAME;
   2191 
   2192 	/*
   2193 	 * we lock in the pmap => pv_head direction
   2194 	 */
   2195 	PMAP_MAP_TO_HEAD_LOCK();
   2196 
   2197 	ptes = pmap_map_ptes(pmap);
   2198 	/* Get a page table pointer */
   2199 	while (sva < eva) {
   2200 		if (pmap_pde_v(pmap_pde(pmap, sva)))
   2201 			break;
   2202 		sva = (sva & PD_MASK) + NBPD;
   2203 	}
   2204 
   2205 	pte = &ptes[arm_byte_to_page(sva)];
   2206 	/* Note if the pmap is active thus require cache and tlb cleans */
   2207 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
   2208 	    || (pmap == pmap_kernel()))
   2209 		pmap_active = 1;
   2210 	else
   2211 		pmap_active = 0;
   2212 
   2213 	/* Now loop along */
   2214 	while (sva < eva) {
   2215 		/* Check if we can move to the next PDE (l1 chunk) */
   2216 		if (!(sva & PT_MASK))
   2217 			if (!pmap_pde_v(pmap_pde(pmap, sva))) {
   2218 				sva += NBPD;
   2219 				pte += arm_byte_to_page(NBPD);
   2220 				continue;
   2221 			}
   2222 
   2223 		/* We've found a valid PTE, so this page of PTEs has to go. */
   2224 		if (pmap_pte_v(pte)) {
   2225 			int bank, off;
   2226 
   2227 			/* Update statistics */
   2228 			--pmap->pm_stats.resident_count;
   2229 
   2230 			/*
   2231 			 * Add this page to our cache remove list, if we can.
   2232 			 * If, however the cache remove list is totally full,
   2233 			 * then do a complete cache invalidation taking note
   2234 			 * to backtrack the PTE table beforehand, and ignore
   2235 			 * the lists in future because there's no longer any
   2236 			 * point in bothering with them (we've paid the
   2237 			 * penalty, so will carry on unhindered). Otherwise,
   2238 			 * when we fall out, we just clean the list.
   2239 			 */
   2240 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   2241 			pa = pmap_pte_pa(pte);
   2242 
   2243 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2244 				/* Add to the clean list. */
   2245 				cleanlist[cleanlist_idx].pte = pte;
   2246 				cleanlist[cleanlist_idx].va = sva;
   2247 				cleanlist_idx++;
   2248 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2249 				int cnt;
   2250 
   2251 				/* Nuke everything if needed. */
   2252 				if (pmap_active) {
   2253 					cpu_cache_purgeID();
   2254 					cpu_tlb_flushID();
   2255 				}
   2256 
   2257 				/*
   2258 				 * Roll back the previous PTE list,
   2259 				 * and zero out the current PTE.
   2260 				 */
   2261 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   2262 					*cleanlist[cnt].pte = 0;
   2263 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   2264 				}
   2265 				*pte = 0;
   2266 				pmap_pte_delref(pmap, sva);
   2267 				cleanlist_idx++;
   2268 			} else {
   2269 				/*
   2270 				 * We've already nuked the cache and
   2271 				 * TLB, so just carry on regardless,
   2272 				 * and we won't need to do it again
   2273 				 */
   2274 				*pte = 0;
   2275 				pmap_pte_delref(pmap, sva);
   2276 			}
   2277 
   2278 			/*
   2279 			 * Update flags. In a number of circumstances,
   2280 			 * we could cluster a lot of these and do a
   2281 			 * number of sequential pages in one go.
   2282 			 */
   2283 			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2284 				struct pv_entry *pve;
   2285 				pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2286 				simple_lock(&pvh->pvh_lock);
   2287 				pve = pmap_remove_pv(pvh, pmap, sva);
   2288 				pmap_free_pv(pmap, pve);
   2289 				pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
   2290 				simple_unlock(&pvh->pvh_lock);
   2291 			}
   2292 		}
   2293 		sva += NBPG;
   2294 		pte++;
   2295 	}
   2296 
   2297 	pmap_unmap_ptes(pmap);
   2298 	/*
   2299 	 * Now, if we've fallen through down to here, chances are that there
   2300 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   2301 	 */
   2302 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2303 		u_int cnt;
   2304 
   2305 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   2306 			if (pmap_active) {
   2307 				cpu_cache_purgeID_rng(cleanlist[cnt].va, NBPG);
   2308 				*cleanlist[cnt].pte = 0;
   2309 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   2310 			} else
   2311 				*cleanlist[cnt].pte = 0;
   2312 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   2313 		}
   2314 	}
   2315 	PMAP_MAP_TO_HEAD_UNLOCK();
   2316 }
   2317 
   2318 /*
   2319  * Routine:	pmap_remove_all
   2320  * Function:
   2321  *		Removes this physical page from
   2322  *		all physical maps in which it resides.
   2323  *		Reflects back modify bits to the pager.
   2324  */
   2325 
   2326 void
   2327 pmap_remove_all(pa)
   2328 	paddr_t pa;
   2329 {
   2330 	struct pv_entry *pv, *npv;
   2331 	struct pv_head *pvh;
   2332 	struct pmap *pmap;
   2333 	pt_entry_t *pte, *ptes;
   2334 
   2335 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", pa));
   2336 
   2337 	/* set pv_head => pmap locking */
   2338 	PMAP_HEAD_TO_MAP_LOCK();
   2339 
   2340 	pvh = pmap_find_pvh(pa);
   2341 	simple_lock(&pvh->pvh_lock);
   2342 
   2343 	pv = pvh->pvh_list;
   2344 	if (pv == NULL)
   2345 	{
   2346 	    PDEBUG(0, printf("free page\n"));
   2347 	    simple_unlock(&pvh->pvh_lock);
   2348 	    PMAP_HEAD_TO_MAP_UNLOCK();
   2349 	    return;
   2350 	}
   2351 	pmap_clean_page(pv, FALSE);
   2352 
   2353 	while (pv) {
   2354 		pmap = pv->pv_pmap;
   2355 		ptes = pmap_map_ptes(pmap);
   2356 		pte = &ptes[arm_byte_to_page(pv->pv_va)];
   2357 
   2358 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   2359 		    pv->pv_va, pv->pv_flags));
   2360 #ifdef DEBUG
   2361 		if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)
   2362 			    || pmap_pte_pa(pte) != pa)
   2363 			panic("pmap_remove_all: bad mapping");
   2364 #endif	/* DEBUG */
   2365 
   2366 		/*
   2367 		 * Update statistics
   2368 		 */
   2369 		--pmap->pm_stats.resident_count;
   2370 
   2371 		/* Wired bit */
   2372 		if (pv->pv_flags & PT_W)
   2373 			--pmap->pm_stats.wired_count;
   2374 
   2375 		/*
   2376 		 * Invalidate the PTEs.
   2377 		 * XXX: should cluster them up and invalidate as many
   2378 		 * as possible at once.
   2379 		 */
   2380 
   2381 #ifdef needednotdone
   2382 reduce wiring count on page table pages as references drop
   2383 #endif
   2384 
   2385 		*pte = 0;
   2386 		pmap_pte_delref(pmap, pv->pv_va);
   2387 
   2388 		npv = pv->pv_next;
   2389 		pmap_free_pv(pmap, pv);
   2390 		pv = npv;
   2391 		pmap_unmap_ptes(pmap);
   2392 	}
   2393 	pvh->pvh_list = NULL;
   2394 	simple_unlock(&pvh->pvh_lock);
   2395 	PMAP_HEAD_TO_MAP_UNLOCK();
   2396 
   2397 	PDEBUG(0, printf("done\n"));
   2398 	cpu_tlb_flushID();
   2399 }
   2400 
   2401 
   2402 /*
   2403  * Set the physical protection on the specified range of this map as requested.
   2404  */
   2405 
   2406 void
   2407 pmap_protect(pmap, sva, eva, prot)
   2408 	struct pmap *pmap;
   2409 	vaddr_t sva;
   2410 	vaddr_t eva;
   2411 	vm_prot_t prot;
   2412 {
   2413 	pt_entry_t *pte = NULL, *ptes;
   2414 	int armprot;
   2415 	int flush = 0;
   2416 	paddr_t pa;
   2417 	int bank, off;
   2418 	struct pv_head *pvh;
   2419 
   2420 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2421 	    pmap, sva, eva, prot));
   2422 
   2423 	if (~prot & VM_PROT_READ) {
   2424 		/* Just remove the mappings. */
   2425 		pmap_remove(pmap, sva, eva);
   2426 		return;
   2427 	}
   2428 	if (prot & VM_PROT_WRITE) {
   2429 		/*
   2430 		 * If this is a read->write transition, just ignore it and let
   2431 		 * uvm_fault() take care of it later.
   2432 		 */
   2433 		return;
   2434 	}
   2435 
   2436 	sva &= PG_FRAME;
   2437 	eva &= PG_FRAME;
   2438 
   2439 	/* Need to lock map->head */
   2440 	PMAP_MAP_TO_HEAD_LOCK();
   2441 
   2442 	ptes = pmap_map_ptes(pmap);
   2443 	/*
   2444 	 * We need to acquire a pointer to a page table page before entering
   2445 	 * the following loop.
   2446 	 */
   2447 	while (sva < eva) {
   2448 		if (pmap_pde_v(pmap_pde(pmap, sva)))
   2449 			break;
   2450 		sva = (sva & PD_MASK) + NBPD;
   2451 	}
   2452 
   2453 	pte = &ptes[arm_byte_to_page(sva)];
   2454 
   2455 	while (sva < eva) {
   2456 		/* only check once in a while */
   2457 		if ((sva & PT_MASK) == 0) {
   2458 			if (!pmap_pde_v(pmap_pde(pmap, sva))) {
   2459 				/* We can race ahead here, to the next pde. */
   2460 				sva += NBPD;
   2461 				pte += arm_byte_to_page(NBPD);
   2462 				continue;
   2463 			}
   2464 		}
   2465 
   2466 		if (!pmap_pte_v(pte))
   2467 			goto next;
   2468 
   2469 		flush = 1;
   2470 
   2471 		armprot = 0;
   2472 		if (sva < VM_MAXUSER_ADDRESS)
   2473 			armprot |= PT_AP(AP_U);
   2474 		else if (sva < VM_MAX_ADDRESS)
   2475 			armprot |= PT_AP(AP_W);  /* XXX Ekk what is this ? */
   2476 		*pte = (*pte & 0xfffff00f) | armprot;
   2477 
   2478 		pa = pmap_pte_pa(pte);
   2479 
   2480 		/* Get the physical page index */
   2481 
   2482 		/* Clear write flag */
   2483 		if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2484 			pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2485 			simple_lock(&pvh->pvh_lock);
   2486 			(void) pmap_modify_pv(pmap, sva, pvh, PT_Wr, 0);
   2487 			pmap_vac_me_harder(pmap, pvh, ptes, FALSE);
   2488 			simple_unlock(&pvh->pvh_lock);
   2489 		}
   2490 
   2491 next:
   2492 		sva += NBPG;
   2493 		pte++;
   2494 	}
   2495 	pmap_unmap_ptes(pmap);
   2496 	PMAP_MAP_TO_HEAD_UNLOCK();
   2497 	if (flush)
   2498 		cpu_tlb_flushID();
   2499 }
   2500 
   2501 /*
   2502  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2503  * int flags)
   2504  *
   2505  *      Insert the given physical page (p) at
   2506  *      the specified virtual address (v) in the
   2507  *      target physical map with the protection requested.
   2508  *
   2509  *      If specified, the page will be wired down, meaning
   2510  *      that the related pte can not be reclaimed.
   2511  *
   2512  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2513  *      or lose information.  That is, this routine must actually
   2514  *      insert this page into the given map NOW.
   2515  */
   2516 
   2517 int
   2518 pmap_enter(pmap, va, pa, prot, flags)
   2519 	struct pmap *pmap;
   2520 	vaddr_t va;
   2521 	paddr_t pa;
   2522 	vm_prot_t prot;
   2523 	int flags;
   2524 {
   2525 	pt_entry_t *pte, *ptes;
   2526 	u_int npte;
   2527 	int bank, off;
   2528 	paddr_t opa;
   2529 	int nflags;
   2530 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2531 	struct pv_entry *pve;
   2532 	struct pv_head	*pvh;
   2533 	int error;
   2534 
   2535 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2536 	    va, pa, pmap, prot, wired));
   2537 
   2538 #ifdef DIAGNOSTIC
   2539 	/* Valid address ? */
   2540 	if (va >= (KERNEL_VM_BASE + KERNEL_VM_SIZE))
   2541 		panic("pmap_enter: too big");
   2542 	if (pmap != pmap_kernel() && va != 0) {
   2543 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2544 			panic("pmap_enter: kernel page in user map");
   2545 	} else {
   2546 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2547 			panic("pmap_enter: user page in kernel map");
   2548 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2549 			panic("pmap_enter: entering PT page");
   2550 	}
   2551 #endif
   2552 	/* get lock */
   2553 	PMAP_MAP_TO_HEAD_LOCK();
   2554 	/*
   2555 	 * Get a pointer to the pte for this virtual address. If the
   2556 	 * pte pointer is NULL then we are missing the L2 page table
   2557 	 * so we need to create one.
   2558 	 */
   2559 	pte = pmap_pte(pmap, va);
   2560 	if (!pte) {
   2561 		struct vm_page *ptp;
   2562 
   2563 		/* if failure is allowed then don't try too hard */
   2564 		ptp = pmap_get_ptp(pmap, va, flags & PMAP_CANFAIL);
   2565 		if (ptp == NULL) {
   2566 			if (flags & PMAP_CANFAIL) {
   2567 				error = ENOMEM;
   2568 				goto out;
   2569 			}
   2570 			panic("pmap_enter: get ptp failed");
   2571 		}
   2572 
   2573 		pte = pmap_pte(pmap, va);
   2574 #ifdef DIAGNOSTIC
   2575 		if (!pte)
   2576 			panic("pmap_enter: no pte");
   2577 #endif
   2578 	}
   2579 
   2580 	nflags = 0;
   2581 	if (prot & VM_PROT_WRITE)
   2582 		nflags |= PT_Wr;
   2583 	if (wired)
   2584 		nflags |= PT_W;
   2585 
   2586 	/* More debugging info */
   2587 	PDEBUG(5, printf("pmap_enter: pte for V%08lx = V%p (%08x)\n", va, pte,
   2588 	    *pte));
   2589 
   2590 	/* Is the pte valid ? If so then this page is already mapped */
   2591 	if (pmap_pte_v(pte)) {
   2592 		/* Get the physical address of the current page mapped */
   2593 		opa = pmap_pte_pa(pte);
   2594 
   2595 #ifdef MYCROFT_HACK
   2596 		printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx\n", pmap, va, pa, opa);
   2597 #endif
   2598 
   2599 		/* Are we mapping the same page ? */
   2600 		if (opa == pa) {
   2601 			/* All we must be doing is changing the protection */
   2602 			PDEBUG(0, printf("Case 02 in pmap_enter (V%08lx P%08lx)\n",
   2603 			    va, pa));
   2604 
   2605 			/* Has the wiring changed ? */
   2606 			if ((bank = vm_physseg_find(atop(pa), &off)) != -1) {
   2607 				pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2608 				simple_lock(&pvh->pvh_lock);
   2609 				(void) pmap_modify_pv(pmap, va, pvh,
   2610 				    PT_Wr | PT_W, nflags);
   2611 				simple_unlock(&pvh->pvh_lock);
   2612  			} else {
   2613 				pvh = NULL;
   2614 			}
   2615 		} else {
   2616 			/* We are replacing the page with a new one. */
   2617 			cpu_cache_purgeID_rng(va, NBPG);
   2618 
   2619 			PDEBUG(0, printf("Case 03 in pmap_enter (V%08lx P%08lx P%08lx)\n",
   2620 			    va, pa, opa));
   2621 
   2622 			/*
   2623 			 * If it is part of our managed memory then we
   2624 			 * must remove it from the PV list
   2625 			 */
   2626 			if ((bank = vm_physseg_find(atop(opa), &off)) != -1) {
   2627 				pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2628 				simple_lock(&pvh->pvh_lock);
   2629 				pve = pmap_remove_pv(pvh, pmap, va);
   2630 				simple_unlock(&pvh->pvh_lock);
   2631 			} else {
   2632 				pve = NULL;
   2633 			}
   2634 
   2635 			goto enter;
   2636 		}
   2637 	} else {
   2638 		opa = 0;
   2639 		pve = NULL;
   2640 		pmap_pte_addref(pmap, va);
   2641 
   2642 		/* pte is not valid so we must be hooking in a new page */
   2643 		++pmap->pm_stats.resident_count;
   2644 
   2645 	enter:
   2646 		/*
   2647 		 * Enter on the PV list if part of our managed memory
   2648 		 */
   2649 		bank = vm_physseg_find(atop(pa), &off);
   2650 
   2651 		if (pmap_initialized && (bank != -1)) {
   2652 			pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2653 			if (pve == NULL) {
   2654 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
   2655 				if (pve == NULL) {
   2656 					if (flags & PMAP_CANFAIL) {
   2657 						error = ENOMEM;
   2658 						goto out;
   2659 					}
   2660 					panic("pmap_enter: no pv entries available");
   2661 				}
   2662 			}
   2663 			/* enter_pv locks pvh when adding */
   2664 			pmap_enter_pv(pvh, pve, pmap, va, NULL, nflags);
   2665 		} else {
   2666 			pvh = NULL;
   2667 			if (pve != NULL)
   2668 				pmap_free_pv(pmap, pve);
   2669 		}
   2670 	}
   2671 
   2672 #ifdef MYCROFT_HACK
   2673 	if (mycroft_hack)
   2674 		printf("pmap_enter: pmap=%p va=%lx pa=%lx opa=%lx bank=%d off=%d pv=%p\n", pmap, va, pa, opa, bank, off, pv);
   2675 #endif
   2676 
   2677 	/* Construct the pte, giving the correct access. */
   2678 	npte = (pa & PG_FRAME);
   2679 
   2680 	/* VA 0 is magic. */
   2681 	if (pmap != pmap_kernel() && va != 0)
   2682 		npte |= PT_AP(AP_U);
   2683 
   2684 	if (pmap_initialized && bank != -1) {
   2685 #ifdef DIAGNOSTIC
   2686 		if ((flags & VM_PROT_ALL) & ~prot)
   2687 			panic("pmap_enter: access_type exceeds prot");
   2688 #endif
   2689 		npte |= PT_C | PT_B;
   2690 		if (flags & VM_PROT_WRITE) {
   2691 			npte |= L2_SPAGE | PT_AP(AP_W);
   2692 			vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
   2693 		} else if (flags & VM_PROT_ALL) {
   2694 			npte |= L2_SPAGE;
   2695 			vm_physmem[bank].pmseg.attrs[off] |= PT_H;
   2696 		} else
   2697 			npte |= L2_INVAL;
   2698 	} else {
   2699 		if (prot & VM_PROT_WRITE)
   2700 			npte |= L2_SPAGE | PT_AP(AP_W);
   2701 		else if (prot & VM_PROT_ALL)
   2702 			npte |= L2_SPAGE;
   2703 		else
   2704 			npte |= L2_INVAL;
   2705 	}
   2706 
   2707 #ifdef MYCROFT_HACK
   2708 	if (mycroft_hack)
   2709 		printf("pmap_enter: pmap=%p va=%lx pa=%lx prot=%x wired=%d access_type=%x npte=%08x\n", pmap, va, pa, prot, wired, flags & VM_PROT_ALL, npte);
   2710 #endif
   2711 
   2712 	*pte = npte;
   2713 
   2714 	if (pmap_initialized && bank != -1)
   2715 	{
   2716 		boolean_t pmap_active = FALSE;
   2717 		/* XXX this will change once the whole of pmap_enter uses
   2718 		 * map_ptes
   2719 		 */
   2720 		ptes = pmap_map_ptes(pmap);
   2721 		if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap)
   2722 		    || (pmap == pmap_kernel()))
   2723 			pmap_active = TRUE;
   2724 		simple_lock(&pvh->pvh_lock);
   2725  		pmap_vac_me_harder(pmap, pvh, ptes, pmap_active);
   2726 		simple_unlock(&pvh->pvh_lock);
   2727 		pmap_unmap_ptes(pmap);
   2728 	}
   2729 
   2730 	/* Better flush the TLB ... */
   2731 	cpu_tlb_flushID_SE(va);
   2732 	error = 0;
   2733 out:
   2734 	PMAP_MAP_TO_HEAD_UNLOCK();
   2735 	PDEBUG(5, printf("pmap_enter: pte = V%p %08x\n", pte, *pte));
   2736 
   2737 	return error;
   2738 }
   2739 
   2740 void
   2741 pmap_kenter_pa(va, pa, prot)
   2742 	vaddr_t va;
   2743 	paddr_t pa;
   2744 	vm_prot_t prot;
   2745 {
   2746 	struct pmap *pmap = pmap_kernel();
   2747 	pt_entry_t *pte;
   2748 	struct vm_page *pg;
   2749 
   2750 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
   2751 
   2752 		/*
   2753 		 * For the kernel pmaps it would be better to ensure
   2754 		 * that they are always present, and to grow the
   2755 		 * kernel as required.
   2756 		 */
   2757 
   2758 		/* Allocate a page table */
   2759 		pg = uvm_pagealloc(&(pmap_kernel()->pm_obj), 0, NULL,
   2760 		    UVM_PGA_USERESERVE | UVM_PGA_ZERO);
   2761 		if (pg == NULL) {
   2762 			panic("pmap_kenter_pa: no free pages");
   2763 		}
   2764 		pg->flags &= ~PG_BUSY;	/* never busy */
   2765 
   2766 		/* Wire this page table into the L1. */
   2767 		pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(pg), TRUE);
   2768 	}
   2769 	pte = vtopte(va);
   2770 	KASSERT(!pmap_pte_v(pte));
   2771 	*pte = L2_PTE(pa, AP_KRW);
   2772 }
   2773 
   2774 void
   2775 pmap_kremove(va, len)
   2776 	vaddr_t va;
   2777 	vsize_t len;
   2778 {
   2779 	pt_entry_t *pte;
   2780 
   2781 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2782 
   2783 		/*
   2784 		 * We assume that we will only be called with small
   2785 		 * regions of memory.
   2786 		 */
   2787 
   2788 		KASSERT(pmap_pde_v(pmap_pde(pmap_kernel(), va)));
   2789 		pte = vtopte(va);
   2790 		cpu_cache_purgeID_rng(va, PAGE_SIZE);
   2791 		*pte = 0;
   2792 		cpu_tlb_flushID_SE(va);
   2793 	}
   2794 }
   2795 
   2796 /*
   2797  * pmap_page_protect:
   2798  *
   2799  * Lower the permission for all mappings to a given page.
   2800  */
   2801 
   2802 void
   2803 pmap_page_protect(pg, prot)
   2804 	struct vm_page *pg;
   2805 	vm_prot_t prot;
   2806 {
   2807 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2808 
   2809 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", pa, prot));
   2810 
   2811 	switch(prot) {
   2812 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
   2813 	case VM_PROT_READ|VM_PROT_WRITE:
   2814 		return;
   2815 
   2816 	case VM_PROT_READ:
   2817 	case VM_PROT_READ|VM_PROT_EXECUTE:
   2818 		pmap_copy_on_write(pa);
   2819 		break;
   2820 
   2821 	default:
   2822 		pmap_remove_all(pa);
   2823 		break;
   2824 	}
   2825 }
   2826 
   2827 
   2828 /*
   2829  * Routine:	pmap_unwire
   2830  * Function:	Clear the wired attribute for a map/virtual-address
   2831  *		pair.
   2832  * In/out conditions:
   2833  *		The mapping must already exist in the pmap.
   2834  */
   2835 
   2836 void
   2837 pmap_unwire(pmap, va)
   2838 	struct pmap *pmap;
   2839 	vaddr_t va;
   2840 {
   2841 	pt_entry_t *pte;
   2842 	paddr_t pa;
   2843 	int bank, off;
   2844 	struct pv_head *pvh;
   2845 
   2846 	/*
   2847 	 * Make sure pmap is valid. -dct
   2848 	 */
   2849 	if (pmap == NULL)
   2850 		return;
   2851 
   2852 	/* Get the pte */
   2853 	pte = pmap_pte(pmap, va);
   2854 	if (!pte)
   2855 		return;
   2856 
   2857 	/* Extract the physical address of the page */
   2858 	pa = pmap_pte_pa(pte);
   2859 
   2860 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   2861 		return;
   2862 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
   2863 	simple_lock(&pvh->pvh_lock);
   2864 	/* Update the wired bit in the pv entry for this page. */
   2865 	(void) pmap_modify_pv(pmap, va, pvh, PT_W, 0);
   2866 	simple_unlock(&pvh->pvh_lock);
   2867 }
   2868 
   2869 /*
   2870  * pt_entry_t *pmap_pte(struct pmap *pmap, vaddr_t va)
   2871  *
   2872  * Return the pointer to a page table entry corresponding to the supplied
   2873  * virtual address.
   2874  *
   2875  * The page directory is first checked to make sure that a page table
   2876  * for the address in question exists and if it does a pointer to the
   2877  * entry is returned.
   2878  *
   2879  * The way this works is that that the kernel page tables are mapped
   2880  * into the memory map at ALT_PAGE_TBLS_BASE to ALT_PAGE_TBLS_BASE+4MB.
   2881  * This allows page tables to be located quickly.
   2882  */
   2883 pt_entry_t *
   2884 pmap_pte(pmap, va)
   2885 	struct pmap *pmap;
   2886 	vaddr_t va;
   2887 {
   2888 	pt_entry_t *ptp;
   2889 	pt_entry_t *result;
   2890 
   2891 	/* The pmap must be valid */
   2892 	if (!pmap)
   2893 		return(NULL);
   2894 
   2895 	/* Return the address of the pte */
   2896 	PDEBUG(10, printf("pmap_pte: pmap=%p va=V%08lx pde = V%p (%08X)\n",
   2897 	    pmap, va, pmap_pde(pmap, va), *(pmap_pde(pmap, va))));
   2898 
   2899 	/* Do we have a valid pde ? If not we don't have a page table */
   2900 	if (!pmap_pde_v(pmap_pde(pmap, va))) {
   2901 		PDEBUG(0, printf("pmap_pte: failed - pde = %p\n",
   2902 		    pmap_pde(pmap, va)));
   2903 		return(NULL);
   2904 	}
   2905 
   2906 	PDEBUG(10, printf("pmap pagetable = P%08lx current = P%08x\n",
   2907 	    pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2908 	    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2909 	    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)));
   2910 
   2911 	/*
   2912 	 * If the pmap is the kernel pmap or the pmap is the active one
   2913 	 * then we can just return a pointer to entry relative to
   2914 	 * PROCESS_PAGE_TBLS_BASE.
   2915 	 * Otherwise we need to map the page tables to an alternative
   2916 	 * address and reference them there.
   2917 	 */
   2918 	if (pmap == pmap_kernel() || pmap->pm_pptpt
   2919 	    == (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2920 	    + ((PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) &
   2921 	    ~3) + (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
   2922 		ptp = (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
   2923 	} else {
   2924 		struct proc *p = curproc;
   2925 
   2926 		/* If we don't have a valid curproc use proc0 */
   2927 		/* Perhaps we should just use kernel_pmap instead */
   2928 		if (p == NULL)
   2929 			p = &proc0;
   2930 #ifdef DIAGNOSTIC
   2931 		/*
   2932 		 * The pmap should always be valid for the process so
   2933 		 * panic if it is not.
   2934 		 */
   2935 		if (!p->p_vmspace || !p->p_vmspace->vm_map.pmap) {
   2936 			printf("pmap_pte: va=%08lx p=%p vm=%p\n",
   2937 			    va, p, p->p_vmspace);
   2938 			console_debugger();
   2939 		}
   2940 		/*
   2941 		 * The pmap for the current process should be mapped. If it
   2942 		 * is not then we have a problem.
   2943 		 */
   2944 		if (p->p_vmspace->vm_map.pmap->pm_pptpt !=
   2945 		    (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2946 		    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2947 		    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) & PG_FRAME)) {
   2948 			printf("pmap pagetable = P%08lx current = P%08x ",
   2949 			    pmap->pm_pptpt, (*((pt_entry_t *)(PROCESS_PAGE_TBLS_BASE
   2950 			    + (PROCESS_PAGE_TBLS_BASE >> (PGSHIFT - 2)) +
   2951 			    (PROCESS_PAGE_TBLS_BASE >> PDSHIFT))) &
   2952 			    PG_FRAME));
   2953 			printf("pptpt=%lx\n", p->p_vmspace->vm_map.pmap->pm_pptpt);
   2954 			panic("pmap_pte: current and pmap mismatch\n");
   2955 		}
   2956 #endif
   2957 
   2958 		ptp = (pt_entry_t *)ALT_PAGE_TBLS_BASE;
   2959 		pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
   2960 		    pmap->pm_pptpt, FALSE);
   2961 		cpu_tlb_flushD();
   2962 	}
   2963 	PDEBUG(10, printf("page tables base = %p offset=%lx\n", ptp,
   2964 	    ((va >> (PGSHIFT-2)) & ~3)));
   2965 	result = (pt_entry_t *)((char *)ptp + ((va >> (PGSHIFT-2)) & ~3));
   2966 	return(result);
   2967 }
   2968 
   2969 /*
   2970  * Routine:  pmap_extract
   2971  * Function:
   2972  *           Extract the physical page address associated
   2973  *           with the given map/virtual_address pair.
   2974  */
   2975 boolean_t
   2976 pmap_extract(pmap, va, pap)
   2977 	struct pmap *pmap;
   2978 	vaddr_t va;
   2979 	paddr_t *pap;
   2980 {
   2981 	pt_entry_t *pte, *ptes;
   2982 	paddr_t pa;
   2983 
   2984 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=V%08lx\n", pmap, va));
   2985 
   2986 	/*
   2987 	 * Get the pte for this virtual address.
   2988 	 */
   2989 	ptes = pmap_map_ptes(pmap);
   2990 	pte = &ptes[arm_byte_to_page(va)];
   2991 
   2992 	/*
   2993 	 * If there is no pte then there is no page table etc.
   2994 	 * Is the pte valid ? If not then no paged is actually mapped here
   2995 	 */
   2996 	if (!pmap_pde_v(pmap_pde(pmap, va)) || !pmap_pte_v(pte)){
   2997 	    pmap_unmap_ptes(pmap);
   2998     	    return (FALSE);
   2999 	}
   3000 
   3001 	/* Return the physical address depending on the PTE type */
   3002 	/* XXX What about L1 section mappings ? */
   3003 	if ((*(pte) & L2_MASK) == L2_LPAGE) {
   3004 		/* Extract the physical address from the pte */
   3005 		pa = (*(pte)) & ~(L2_LPAGE_SIZE - 1);
   3006 
   3007 		PDEBUG(5, printf("pmap_extract: LPAGE pa = P%08lx\n",
   3008 		    (pa | (va & (L2_LPAGE_SIZE - 1)))));
   3009 
   3010 		if (pap != NULL)
   3011 			*pap = pa | (va & (L2_LPAGE_SIZE - 1));
   3012 	} else {
   3013 		/* Extract the physical address from the pte */
   3014 		pa = pmap_pte_pa(pte);
   3015 
   3016 		PDEBUG(5, printf("pmap_extract: SPAGE pa = P%08lx\n",
   3017 		    (pa | (va & ~PG_FRAME))));
   3018 
   3019 		if (pap != NULL)
   3020 			*pap = pa | (va & ~PG_FRAME);
   3021 	}
   3022 	pmap_unmap_ptes(pmap);
   3023 	return (TRUE);
   3024 }
   3025 
   3026 
   3027 /*
   3028  * Copy the range specified by src_addr/len from the source map to the
   3029  * range dst_addr/len in the destination map.
   3030  *
   3031  * This routine is only advisory and need not do anything.
   3032  */
   3033 
   3034 void
   3035 pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
   3036 	struct pmap *dst_pmap;
   3037 	struct pmap *src_pmap;
   3038 	vaddr_t dst_addr;
   3039 	vsize_t len;
   3040 	vaddr_t src_addr;
   3041 {
   3042 	PDEBUG(0, printf("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
   3043 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
   3044 }
   3045 
   3046 #if defined(PMAP_DEBUG)
   3047 void
   3048 pmap_dump_pvlist(phys, m)
   3049 	vaddr_t phys;
   3050 	char *m;
   3051 {
   3052 	struct pv_head *pvh;
   3053 	struct pv_entry *pv;
   3054 	int bank, off;
   3055 
   3056 	if ((bank = vm_physseg_find(atop(phys), &off)) == -1) {
   3057 		printf("INVALID PA\n");
   3058 		return;
   3059 	}
   3060 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
   3061 	simple_lock(&pvh->pvh_lock);
   3062 	printf("%s %08lx:", m, phys);
   3063 	if (pvh->pvh_list == NULL) {
   3064 		printf(" no mappings\n");
   3065 		return;
   3066 	}
   3067 
   3068 	for (pv = pvh->pvh_list; pv; pv = pv->pv_next)
   3069 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   3070 		    pv->pv_va, pv->pv_flags);
   3071 
   3072 	printf("\n");
   3073 	simple_unlock(&pvh->pvh_lock);
   3074 }
   3075 
   3076 #endif	/* PMAP_DEBUG */
   3077 
   3078 boolean_t
   3079 pmap_testbit(pa, setbits)
   3080 	paddr_t pa;
   3081 	int setbits;
   3082 {
   3083 	int bank, off;
   3084 
   3085 	PDEBUG(1, printf("pmap_testbit: pa=%08lx set=%08x\n", pa, setbits));
   3086 
   3087 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   3088 		return(FALSE);
   3089 
   3090 	/*
   3091 	 * Check saved info only
   3092 	 */
   3093 	if (vm_physmem[bank].pmseg.attrs[off] & setbits) {
   3094 		PDEBUG(0, printf("pmap_attributes = %02x\n",
   3095 		    vm_physmem[bank].pmseg.attrs[off]));
   3096 		return(TRUE);
   3097 	}
   3098 
   3099 	return(FALSE);
   3100 }
   3101 
   3102 static pt_entry_t *
   3103 pmap_map_ptes(struct pmap *pmap)
   3104 {
   3105     	struct proc *p;
   3106 
   3107     	/* the kernel's pmap is always accessible */
   3108 	if (pmap == pmap_kernel()) {
   3109 		return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE ;
   3110 	}
   3111 
   3112 	if (pmap_is_curpmap(pmap)) {
   3113 		simple_lock(&pmap->pm_obj.vmobjlock);
   3114 		return (pt_entry_t *)PROCESS_PAGE_TBLS_BASE;
   3115 	}
   3116 
   3117 	p = curproc;
   3118 
   3119 	if (p == NULL)
   3120 		p = &proc0;
   3121 
   3122 	/* need to lock both curpmap and pmap: use ordered locking */
   3123 	if ((unsigned) pmap < (unsigned) curproc->p_vmspace->vm_map.pmap) {
   3124 		simple_lock(&pmap->pm_obj.vmobjlock);
   3125 		simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3126 	} else {
   3127 		simple_lock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3128 		simple_lock(&pmap->pm_obj.vmobjlock);
   3129 	}
   3130 
   3131 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, ALT_PAGE_TBLS_BASE,
   3132 			pmap->pm_pptpt, FALSE);
   3133 	cpu_tlb_flushD();
   3134 	return (pt_entry_t *)ALT_PAGE_TBLS_BASE;
   3135 }
   3136 
   3137 /*
   3138  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
   3139  */
   3140 
   3141 static void
   3142 pmap_unmap_ptes(pmap)
   3143 	struct pmap *pmap;
   3144 {
   3145 	if (pmap == pmap_kernel()) {
   3146 		return;
   3147 	}
   3148 	if (pmap_is_curpmap(pmap)) {
   3149 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3150 	} else {
   3151 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3152 		simple_unlock(&curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3153 	}
   3154 }
   3155 
   3156 /*
   3157  * Modify pte bits for all ptes corresponding to the given physical address.
   3158  * We use `maskbits' rather than `clearbits' because we're always passing
   3159  * constants and the latter would require an extra inversion at run-time.
   3160  */
   3161 
   3162 void
   3163 pmap_clearbit(pa, maskbits)
   3164 	paddr_t pa;
   3165 	int maskbits;
   3166 {
   3167 	struct pv_entry *pv;
   3168 	struct pv_head *pvh;
   3169 	pt_entry_t *pte;
   3170 	vaddr_t va;
   3171 	int bank, off;
   3172 
   3173 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   3174 	    pa, maskbits));
   3175 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   3176 		return;
   3177 	PMAP_HEAD_TO_MAP_LOCK();
   3178 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
   3179 	simple_lock(&pvh->pvh_lock);
   3180 
   3181 	/*
   3182 	 * Clear saved attributes (modify, reference)
   3183 	 */
   3184 	vm_physmem[bank].pmseg.attrs[off] &= ~maskbits;
   3185 
   3186 	if (pvh->pvh_list == NULL) {
   3187 		simple_unlock(&pvh->pvh_lock);
   3188 		PMAP_HEAD_TO_MAP_UNLOCK();
   3189 		return;
   3190 	}
   3191 
   3192 	/*
   3193 	 * Loop over all current mappings setting/clearing as appropos
   3194 	 */
   3195 	for (pv = pvh->pvh_list; pv; pv = pv->pv_next) {
   3196 		va = pv->pv_va;
   3197 
   3198 		/*
   3199 		 * XXX don't write protect pager mappings
   3200 		 */
   3201 		if (va >= uvm.pager_sva && va < uvm.pager_eva) {
   3202 			printf("pmap_clearbit: found page VA on pv_list\n");
   3203 			continue;
   3204 		}
   3205 
   3206 		pv->pv_flags &= ~maskbits;
   3207 		pte = pmap_pte(pv->pv_pmap, va);
   3208 		KASSERT(pte != NULL);
   3209 		if (maskbits & (PT_Wr|PT_M))
   3210 			*pte &= ~PT_AP(AP_W);
   3211 		if (maskbits & PT_H)
   3212 			*pte = (*pte & ~L2_MASK) | L2_INVAL;
   3213 	}
   3214 	simple_unlock(&pvh->pvh_lock);
   3215 	PMAP_HEAD_TO_MAP_UNLOCK();
   3216 	cpu_tlb_flushID();
   3217 
   3218 }
   3219 
   3220 
   3221 boolean_t
   3222 pmap_clear_modify(pg)
   3223 	struct vm_page *pg;
   3224 {
   3225 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   3226 	boolean_t rv;
   3227 
   3228 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx\n", pa));
   3229 	rv = pmap_testbit(pa, PT_M);
   3230 	pmap_clearbit(pa, PT_M);
   3231 	return rv;
   3232 }
   3233 
   3234 
   3235 boolean_t
   3236 pmap_clear_reference(pg)
   3237 	struct vm_page *pg;
   3238 {
   3239 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   3240 	boolean_t rv;
   3241 
   3242 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx\n", pa));
   3243 	rv = pmap_testbit(pa, PT_H);
   3244 	pmap_clearbit(pa, PT_H);
   3245 	return rv;
   3246 }
   3247 
   3248 
   3249 void
   3250 pmap_copy_on_write(pa)
   3251 	paddr_t pa;
   3252 {
   3253 	PDEBUG(0, printf("pmap_copy_on_write pa=%08lx\n", pa));
   3254 	pmap_clearbit(pa, PT_Wr);
   3255 }
   3256 
   3257 
   3258 boolean_t
   3259 pmap_is_modified(pg)
   3260 	struct vm_page *pg;
   3261 {
   3262 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   3263 	boolean_t result;
   3264 
   3265 	result = pmap_testbit(pa, PT_M);
   3266 	PDEBUG(1, printf("pmap_is_modified pa=%08lx %x\n", pa, result));
   3267 	return (result);
   3268 }
   3269 
   3270 
   3271 boolean_t
   3272 pmap_is_referenced(pg)
   3273 	struct vm_page *pg;
   3274 {
   3275 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   3276 	boolean_t result;
   3277 
   3278 	result = pmap_testbit(pa, PT_H);
   3279 	PDEBUG(0, printf("pmap_is_referenced pa=%08lx %x\n", pa, result));
   3280 	return (result);
   3281 }
   3282 
   3283 
   3284 int
   3285 pmap_modified_emulation(pmap, va)
   3286 	struct pmap *pmap;
   3287 	vaddr_t va;
   3288 {
   3289 	pt_entry_t *pte;
   3290 	paddr_t pa;
   3291 	int bank, off;
   3292 	struct pv_head *pvh;
   3293 	u_int flags;
   3294 
   3295 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   3296 
   3297 	/* Get the pte */
   3298 	pte = pmap_pte(pmap, va);
   3299 	if (!pte) {
   3300 		PDEBUG(2, printf("no pte\n"));
   3301 		return(0);
   3302 	}
   3303 
   3304 	PDEBUG(1, printf("*pte=%08x\n", *pte));
   3305 
   3306 	/* Check for a zero pte */
   3307 	if (*pte == 0)
   3308 		return(0);
   3309 
   3310 	/* This can happen if user code tries to access kernel memory. */
   3311 	if ((*pte & PT_AP(AP_W)) != 0)
   3312 		return (0);
   3313 
   3314 	/* Extract the physical address of the page */
   3315 	pa = pmap_pte_pa(pte);
   3316 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   3317 		return(0);
   3318 
   3319 	PMAP_HEAD_TO_MAP_LOCK();
   3320 	/* Get the current flags for this page. */
   3321 	pvh = &vm_physmem[bank].pmseg.pvhead[off];
   3322 	/* XXX: needed if we hold head->map lock? */
   3323 	simple_lock(&pvh->pvh_lock);
   3324 
   3325 	flags = pmap_modify_pv(pmap, va, pvh, 0, 0);
   3326 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   3327 
   3328 	/*
   3329 	 * Do the flags say this page is writable ? If not then it is a
   3330 	 * genuine write fault. If yes then the write fault is our fault
   3331 	 * as we did not reflect the write access in the PTE. Now we know
   3332 	 * a write has occurred we can correct this and also set the
   3333 	 * modified bit
   3334 	 */
   3335 	if (~flags & PT_Wr) {
   3336 	    	simple_unlock(&pvh->pvh_lock);
   3337 		PMAP_HEAD_TO_MAP_UNLOCK();
   3338 		return(0);
   3339 	}
   3340 
   3341 	PDEBUG(0, printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %p (%08x)\n",
   3342 	    va, pte, *pte));
   3343 	vm_physmem[bank].pmseg.attrs[off] |= PT_H | PT_M;
   3344 	*pte = (*pte & ~L2_MASK) | L2_SPAGE | PT_AP(AP_W);
   3345 	PDEBUG(0, printf("->(%08x)\n", *pte));
   3346 
   3347 	simple_unlock(&pvh->pvh_lock);
   3348 	PMAP_HEAD_TO_MAP_UNLOCK();
   3349 	/* Return, indicating the problem has been dealt with */
   3350 	cpu_tlb_flushID_SE(va);
   3351 	return(1);
   3352 }
   3353 
   3354 
   3355 int
   3356 pmap_handled_emulation(pmap, va)
   3357 	struct pmap *pmap;
   3358 	vaddr_t va;
   3359 {
   3360 	pt_entry_t *pte;
   3361 	paddr_t pa;
   3362 	int bank, off;
   3363 
   3364 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   3365 
   3366 	/* Get the pte */
   3367 	pte = pmap_pte(pmap, va);
   3368 	if (!pte) {
   3369 		PDEBUG(2, printf("no pte\n"));
   3370 		return(0);
   3371 	}
   3372 
   3373 	PDEBUG(1, printf("*pte=%08x\n", *pte));
   3374 
   3375 	/* Check for a zero pte */
   3376 	if (*pte == 0)
   3377 		return(0);
   3378 
   3379 	/* This can happen if user code tries to access kernel memory. */
   3380 	if ((*pte & L2_MASK) != L2_INVAL)
   3381 		return (0);
   3382 
   3383 	/* Extract the physical address of the page */
   3384 	pa = pmap_pte_pa(pte);
   3385 	if ((bank = vm_physseg_find(atop(pa), &off)) == -1)
   3386 		return(0);
   3387 
   3388 	/*
   3389 	 * Ok we just enable the pte and mark the attibs as handled
   3390 	 */
   3391 	PDEBUG(0, printf("pmap_handled_emulation: Got a hit va=%08lx pte = %p (%08x)\n",
   3392 	    va, pte, *pte));
   3393 	vm_physmem[bank].pmseg.attrs[off] |= PT_H;
   3394 	*pte = (*pte & ~L2_MASK) | L2_SPAGE;
   3395 	PDEBUG(0, printf("->(%08x)\n", *pte));
   3396 
   3397 	/* Return, indicating the problem has been dealt with */
   3398 	cpu_tlb_flushID_SE(va);
   3399 	return(1);
   3400 }
   3401 
   3402 
   3403 
   3404 
   3405 /*
   3406  * pmap_collect: free resources held by a pmap
   3407  *
   3408  * => optional function.
   3409  * => called when a process is swapped out to free memory.
   3410  */
   3411 
   3412 void
   3413 pmap_collect(pmap)
   3414 	struct pmap *pmap;
   3415 {
   3416 }
   3417 
   3418 /*
   3419  * Routine:	pmap_procwr
   3420  *
   3421  * Function:
   3422  *	Synchronize caches corresponding to [addr, addr+len) in p.
   3423  *
   3424  */
   3425 void
   3426 pmap_procwr(p, va, len)
   3427 	struct proc	*p;
   3428 	vaddr_t		va;
   3429 	int		len;
   3430 {
   3431 	/* We only need to do anything if it is the current process. */
   3432 	if (p == curproc)
   3433 		cpu_cache_syncI_rng(va, len);
   3434 }
   3435 /*
   3436  * PTP functions
   3437  */
   3438 
   3439 /*
   3440  * pmap_steal_ptp: Steal a PTP from somewhere else.
   3441  *
   3442  * This is just a placeholder, for now we never steal.
   3443  */
   3444 
   3445 static struct vm_page *
   3446 pmap_steal_ptp(struct pmap *pmap, vaddr_t va)
   3447 {
   3448     return (NULL);
   3449 }
   3450 
   3451 /*
   3452  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
   3453  *
   3454  * => pmap should NOT be pmap_kernel()
   3455  * => pmap should be locked
   3456  */
   3457 
   3458 static struct vm_page *
   3459 pmap_get_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
   3460 {
   3461     struct vm_page *ptp;
   3462 
   3463     if (pmap_pde_v(pmap_pde(pmap, va))) {
   3464 
   3465 	/* valid... check hint (saves us a PA->PG lookup) */
   3466 #if 0
   3467 	if (pmap->pm_ptphint &&
   3468     		((unsigned)pmap_pde(pmap, va) & PG_FRAME) ==
   3469 		VM_PAGE_TO_PHYS(pmap->pm_ptphint))
   3470 	    return (pmap->pm_ptphint);
   3471 #endif
   3472 	ptp = uvm_pagelookup(&pmap->pm_obj, va);
   3473 #ifdef DIAGNOSTIC
   3474 	if (ptp == NULL)
   3475     	    panic("pmap_get_ptp: unmanaged user PTP");
   3476 #endif
   3477 //	pmap->pm_ptphint = ptp;
   3478 	return(ptp);
   3479     }
   3480 
   3481     /* allocate a new PTP (updates ptphint) */
   3482     return(pmap_alloc_ptp(pmap, va, just_try));
   3483 }
   3484 
   3485 /*
   3486  * pmap_alloc_ptp: allocate a PTP for a PMAP
   3487  *
   3488  * => pmap should already be locked by caller
   3489  * => we use the ptp's wire_count to count the number of active mappings
   3490  *	in the PTP (we start it at one to prevent any chance this PTP
   3491  *	will ever leak onto the active/inactive queues)
   3492  */
   3493 
   3494 /*__inline */ static struct vm_page *
   3495 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va, boolean_t just_try)
   3496 {
   3497 	struct vm_page *ptp;
   3498 
   3499 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
   3500 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
   3501 	if (ptp == NULL) {
   3502 	    if (just_try)
   3503 		return (NULL);
   3504 
   3505 	    ptp = pmap_steal_ptp(pmap, va);
   3506 
   3507 	    if (ptp == NULL)
   3508 		return (NULL);
   3509 	    /* Stole a page, zero it.  */
   3510 	    pmap_zero_page(VM_PAGE_TO_PHYS(ptp));
   3511 	}
   3512 
   3513 	/* got one! */
   3514 	ptp->flags &= ~PG_BUSY;	/* never busy */
   3515 	ptp->wire_count = 1;	/* no mappings yet */
   3516 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
   3517 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
   3518 //	pmap->pm_ptphint = ptp;
   3519 	return (ptp);
   3520 }
   3521 
   3522 /* End of pmap.c */
   3523