Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.97.4.5
      1 /*	$NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Wasabi Systems, Inc.
      5  * Copyright (c) 2001 Richard Earnshaw
      6  * Copyright (c) 2001 Christopher Gilbert
      7  * All rights reserved.
      8  *
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. The name of the company nor the name of the author may be used to
     15  *    endorse or promote products derived from this software without specific
     16  *    prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     19  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     20  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     22  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28  * SUCH DAMAGE.
     29  */
     30 
     31 /*-
     32  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     33  * All rights reserved.
     34  *
     35  * This code is derived from software contributed to The NetBSD Foundation
     36  * by Charles M. Hannum.
     37  *
     38  * Redistribution and use in source and binary forms, with or without
     39  * modification, are permitted provided that the following conditions
     40  * are met:
     41  * 1. Redistributions of source code must retain the above copyright
     42  *    notice, this list of conditions and the following disclaimer.
     43  * 2. Redistributions in binary form must reproduce the above copyright
     44  *    notice, this list of conditions and the following disclaimer in the
     45  *    documentation and/or other materials provided with the distribution.
     46  * 3. All advertising materials mentioning features or use of this software
     47  *    must display the following acknowledgement:
     48  *        This product includes software developed by the NetBSD
     49  *        Foundation, Inc. and its contributors.
     50  * 4. Neither the name of The NetBSD Foundation nor the names of its
     51  *    contributors may be used to endorse or promote products derived
     52  *    from this software without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     55  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     56  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     57  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     58  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     59  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     60  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     61  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     62  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     63  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     64  * POSSIBILITY OF SUCH DAMAGE.
     65  */
     66 
     67 /*
     68  * Copyright (c) 1994-1998 Mark Brinicombe.
     69  * Copyright (c) 1994 Brini.
     70  * All rights reserved.
     71  *
     72  * This code is derived from software written for Brini by Mark Brinicombe
     73  *
     74  * Redistribution and use in source and binary forms, with or without
     75  * modification, are permitted provided that the following conditions
     76  * are met:
     77  * 1. Redistributions of source code must retain the above copyright
     78  *    notice, this list of conditions and the following disclaimer.
     79  * 2. Redistributions in binary form must reproduce the above copyright
     80  *    notice, this list of conditions and the following disclaimer in the
     81  *    documentation and/or other materials provided with the distribution.
     82  * 3. All advertising materials mentioning features or use of this software
     83  *    must display the following acknowledgement:
     84  *	This product includes software developed by Mark Brinicombe.
     85  * 4. The name of the author may not be used to endorse or promote products
     86  *    derived from this software without specific prior written permission.
     87  *
     88  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     89  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     90  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     91  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     92  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     93  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     94  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     95  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     96  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     97  *
     98  * RiscBSD kernel project
     99  *
    100  * pmap.c
    101  *
    102  * Machine dependant vm stuff
    103  *
    104  * Created      : 20/09/94
    105  */
    106 
    107 /*
    108  * Performance improvements, UVM changes, overhauls and part-rewrites
    109  * were contributed by Neil A. Carson <neil (at) causality.com>.
    110  */
    111 
    112 /*
    113  * The dram block info is currently referenced from the bootconfig.
    114  * This should be placed in a separate structure.
    115  */
    116 
    117 /*
    118  * Special compilation symbols
    119  * PMAP_DEBUG		- Build in pmap_debug_level code
    120  */
    121 
    122 /* Include header files */
    123 
    124 #include "opt_pmap_debug.h"
    125 #include "opt_ddb.h"
    126 
    127 #include <sys/types.h>
    128 #include <sys/param.h>
    129 #include <sys/kernel.h>
    130 #include <sys/systm.h>
    131 #include <sys/proc.h>
    132 #include <sys/malloc.h>
    133 #include <sys/user.h>
    134 #include <sys/pool.h>
    135 #include <sys/cdefs.h>
    136 
    137 #include <uvm/uvm.h>
    138 
    139 #include <machine/bootconfig.h>
    140 #include <machine/bus.h>
    141 #include <machine/pmap.h>
    142 #include <machine/pcb.h>
    143 #include <machine/param.h>
    144 #include <arm/arm32/katelib.h>
    145 
    146 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $");
    147 #ifdef PMAP_DEBUG
    148 #define	PDEBUG(_lev_,_stat_) \
    149 	if (pmap_debug_level >= (_lev_)) \
    150         	((_stat_))
    151 int pmap_debug_level = -2;
    152 void pmap_dump_pvlist(vaddr_t phys, char *m);
    153 
    154 /*
    155  * for switching to potentially finer grained debugging
    156  */
    157 #define	PDB_FOLLOW	0x0001
    158 #define	PDB_INIT	0x0002
    159 #define	PDB_ENTER	0x0004
    160 #define	PDB_REMOVE	0x0008
    161 #define	PDB_CREATE	0x0010
    162 #define	PDB_PTPAGE	0x0020
    163 #define	PDB_GROWKERN	0x0040
    164 #define	PDB_BITS	0x0080
    165 #define	PDB_COLLECT	0x0100
    166 #define	PDB_PROTECT	0x0200
    167 #define	PDB_MAP_L1	0x0400
    168 #define	PDB_BOOTSTRAP	0x1000
    169 #define	PDB_PARANOIA	0x2000
    170 #define	PDB_WIRING	0x4000
    171 #define	PDB_PVDUMP	0x8000
    172 
    173 int debugmap = 0;
    174 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
    175 #define	NPDEBUG(_lev_,_stat_) \
    176 	if (pmapdebug & (_lev_)) \
    177         	((_stat_))
    178 
    179 #else	/* PMAP_DEBUG */
    180 #define	PDEBUG(_lev_,_stat_) /* Nothing */
    181 #define NPDEBUG(_lev_,_stat_) /* Nothing */
    182 #endif	/* PMAP_DEBUG */
    183 
    184 struct pmap     kernel_pmap_store;
    185 
    186 /*
    187  * linked list of all non-kernel pmaps
    188  */
    189 
    190 static LIST_HEAD(, pmap) pmaps;
    191 
    192 /*
    193  * pool that pmap structures are allocated from
    194  */
    195 
    196 struct pool pmap_pmap_pool;
    197 
    198 /*
    199  * pool/cache that PT-PT's are allocated from
    200  */
    201 
    202 struct pool pmap_ptpt_pool;
    203 struct pool_cache pmap_ptpt_cache;
    204 u_int pmap_ptpt_cache_generation;
    205 
    206 static void *pmap_ptpt_page_alloc(struct pool *, int);
    207 static void pmap_ptpt_page_free(struct pool *, void *);
    208 
    209 struct pool_allocator pmap_ptpt_allocator = {
    210 	pmap_ptpt_page_alloc, pmap_ptpt_page_free,
    211 };
    212 
    213 static int pmap_ptpt_ctor(void *, void *, int);
    214 
    215 static pt_entry_t *csrc_pte, *cdst_pte;
    216 static vaddr_t csrcp, cdstp;
    217 
    218 char *memhook;
    219 extern caddr_t msgbufaddr;
    220 
    221 boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    222 /*
    223  * locking data structures
    224  */
    225 
    226 static struct lock pmap_main_lock;
    227 static struct simplelock pvalloc_lock;
    228 static struct simplelock pmaps_lock;
    229 #ifdef LOCKDEBUG
    230 #define PMAP_MAP_TO_HEAD_LOCK() \
    231      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
    232 #define PMAP_MAP_TO_HEAD_UNLOCK() \
    233      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    234 
    235 #define PMAP_HEAD_TO_MAP_LOCK() \
    236      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
    237 #define PMAP_HEAD_TO_MAP_UNLOCK() \
    238      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    239 #else
    240 #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
    241 #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
    242 #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
    243 #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
    244 #endif /* LOCKDEBUG */
    245 
    246 /*
    247  * pv_page management structures: locked by pvalloc_lock
    248  */
    249 
    250 TAILQ_HEAD(pv_pagelist, pv_page);
    251 static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
    252 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
    253 static int pv_nfpvents;			/* # of free pv entries */
    254 static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
    255 static vaddr_t pv_cachedva;		/* cached VA for later use */
    256 
    257 #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
    258 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
    259 					/* high water mark */
    260 
    261 /*
    262  * local prototypes
    263  */
    264 
    265 static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
    266 static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
    267 #define ALLOCPV_NEED	0	/* need PV now */
    268 #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
    269 #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
    270 static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
    271 static void		 pmap_enter_pv __P((struct vm_page *,
    272 					    struct pv_entry *, struct pmap *,
    273 					    vaddr_t, struct vm_page *, int));
    274 static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
    275 static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
    276 static void		 pmap_free_pv_doit __P((struct pv_entry *));
    277 static void		 pmap_free_pvpage __P((void));
    278 static boolean_t	 pmap_is_curpmap __P((struct pmap *));
    279 static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *,
    280 			vaddr_t));
    281 #define PMAP_REMOVE_ALL		0	/* remove all mappings */
    282 #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
    283 
    284 static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
    285 	u_int, u_int));
    286 
    287 /*
    288  * Structure that describes and L1 table.
    289  */
    290 struct l1pt {
    291 	SIMPLEQ_ENTRY(l1pt)	pt_queue;	/* Queue pointers */
    292 	struct pglist		pt_plist;	/* Allocated page list */
    293 	vaddr_t			pt_va;		/* Allocated virtual address */
    294 	int			pt_flags;	/* Flags */
    295 };
    296 #define	PTFLAG_STATIC		0x01		/* Statically allocated */
    297 #define	PTFLAG_KPT		0x02		/* Kernel pt's are mapped */
    298 #define	PTFLAG_CLEAN		0x04		/* L1 is clean */
    299 
    300 static void pmap_free_l1pt __P((struct l1pt *));
    301 static int pmap_allocpagedir __P((struct pmap *));
    302 static int pmap_clean_page __P((struct pv_entry *, boolean_t));
    303 static void pmap_remove_all __P((struct vm_page *));
    304 
    305 static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t));
    306 static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t));
    307 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
    308 
    309 extern paddr_t physical_start;
    310 extern paddr_t physical_freestart;
    311 extern paddr_t physical_end;
    312 extern paddr_t physical_freeend;
    313 extern unsigned int free_pages;
    314 extern int max_processes;
    315 
    316 vaddr_t virtual_avail;
    317 vaddr_t virtual_end;
    318 vaddr_t pmap_curmaxkvaddr;
    319 
    320 vaddr_t avail_start;
    321 vaddr_t avail_end;
    322 
    323 extern pv_addr_t systempage;
    324 
    325 /* Variables used by the L1 page table queue code */
    326 SIMPLEQ_HEAD(l1pt_queue, l1pt);
    327 static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
    328 static int l1pt_static_queue_count;	    /* items in the static l1 queue */
    329 static int l1pt_static_create_count;	    /* static l1 items created */
    330 static struct l1pt_queue l1pt_queue;	    /* head of our l1 queue */
    331 static int l1pt_queue_count;		    /* items in the l1 queue */
    332 static int l1pt_create_count;		    /* stat - L1's create count */
    333 static int l1pt_reuse_count;		    /* stat - L1's reused count */
    334 
    335 /* Local function prototypes (not used outside this file) */
    336 void pmap_pinit __P((struct pmap *));
    337 void pmap_freepagedir __P((struct pmap *));
    338 
    339 /* Other function prototypes */
    340 extern void bzero_page __P((vaddr_t));
    341 extern void bcopy_page __P((vaddr_t, vaddr_t));
    342 
    343 struct l1pt *pmap_alloc_l1pt __P((void));
    344 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    345      vaddr_t l2pa, boolean_t));
    346 
    347 static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    348 static void pmap_unmap_ptes __P((struct pmap *));
    349 
    350 __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
    351     pt_entry_t *, boolean_t));
    352 static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
    353     pt_entry_t *, boolean_t));
    354 static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
    355     pt_entry_t *, boolean_t));
    356 
    357 /*
    358  * real definition of pv_entry.
    359  */
    360 
    361 struct pv_entry {
    362 	struct pv_entry *pv_next;       /* next pv_entry */
    363 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
    364 	vaddr_t         pv_va;          /* virtual address for mapping */
    365 	int             pv_flags;       /* flags */
    366 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
    367 };
    368 
    369 /*
    370  * pv_entrys are dynamically allocated in chunks from a single page.
    371  * we keep track of how many pv_entrys are in use for each page and
    372  * we can free pv_entry pages if needed.  there is one lock for the
    373  * entire allocation system.
    374  */
    375 
    376 struct pv_page_info {
    377 	TAILQ_ENTRY(pv_page) pvpi_list;
    378 	struct pv_entry *pvpi_pvfree;
    379 	int pvpi_nfree;
    380 };
    381 
    382 /*
    383  * number of pv_entry's in a pv_page
    384  * (note: won't work on systems where NPBG isn't a constant)
    385  */
    386 
    387 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    388 			sizeof(struct pv_entry))
    389 
    390 /*
    391  * a pv_page: where pv_entrys are allocated from
    392  */
    393 
    394 struct pv_page {
    395 	struct pv_page_info pvinfo;
    396 	struct pv_entry pvents[PVE_PER_PVPAGE];
    397 };
    398 
    399 #ifdef MYCROFT_HACK
    400 int mycroft_hack = 0;
    401 #endif
    402 
    403 /* Function to set the debug level of the pmap code */
    404 
    405 #ifdef PMAP_DEBUG
    406 void
    407 pmap_debug(int level)
    408 {
    409 	pmap_debug_level = level;
    410 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    411 }
    412 #endif	/* PMAP_DEBUG */
    413 
    414 __inline static boolean_t
    415 pmap_is_curpmap(struct pmap *pmap)
    416 {
    417 
    418 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
    419 	    pmap == pmap_kernel())
    420 		return (TRUE);
    421 
    422 	return (FALSE);
    423 }
    424 
    425 #include "isadma.h"
    426 
    427 #if NISADMA > 0
    428 /*
    429  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
    430  * pages into the system, memory intersects with any of these ranges,
    431  * the intersecting memory will be loaded into a lower-priority free list.
    432  */
    433 bus_dma_segment_t *pmap_isa_dma_ranges;
    434 int pmap_isa_dma_nranges;
    435 
    436 /*
    437  * Check if a memory range intersects with an ISA DMA range, and
    438  * return the page-rounded intersection if it does.  The intersection
    439  * will be placed on a lower-priority free list.
    440  */
    441 static boolean_t
    442 pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap,
    443     psize_t *sizep)
    444 {
    445 	bus_dma_segment_t *ds;
    446 	int i;
    447 
    448 	if (pmap_isa_dma_ranges == NULL)
    449 		return (FALSE);
    450 
    451 	for (i = 0, ds = pmap_isa_dma_ranges;
    452 	     i < pmap_isa_dma_nranges; i++, ds++) {
    453 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
    454 			/*
    455 			 * Beginning of region intersects with this range.
    456 			 */
    457 			*pap = trunc_page(pa);
    458 			*sizep = round_page(min(pa + size,
    459 			    ds->ds_addr + ds->ds_len) - pa);
    460 			return (TRUE);
    461 		}
    462 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
    463 			/*
    464 			 * End of region intersects with this range.
    465 			 */
    466 			*pap = trunc_page(ds->ds_addr);
    467 			*sizep = round_page(min((pa + size) - ds->ds_addr,
    468 			    ds->ds_len));
    469 			return (TRUE);
    470 		}
    471 	}
    472 
    473 	/*
    474 	 * No intersection found.
    475 	 */
    476 	return (FALSE);
    477 }
    478 #endif /* NISADMA > 0 */
    479 
    480 /*
    481  * p v _ e n t r y   f u n c t i o n s
    482  */
    483 
    484 /*
    485  * pv_entry allocation functions:
    486  *   the main pv_entry allocation functions are:
    487  *     pmap_alloc_pv: allocate a pv_entry structure
    488  *     pmap_free_pv: free one pv_entry
    489  *     pmap_free_pvs: free a list of pv_entrys
    490  *
    491  * the rest are helper functions
    492  */
    493 
    494 /*
    495  * pmap_alloc_pv: inline function to allocate a pv_entry structure
    496  * => we lock pvalloc_lock
    497  * => if we fail, we call out to pmap_alloc_pvpage
    498  * => 3 modes:
    499  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
    500  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
    501  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
    502  *			one now
    503  *
    504  * "try" is for optional functions like pmap_copy().
    505  */
    506 
    507 __inline static struct pv_entry *
    508 pmap_alloc_pv(struct pmap *pmap, int mode)
    509 {
    510 	struct pv_page *pvpage;
    511 	struct pv_entry *pv;
    512 
    513 	simple_lock(&pvalloc_lock);
    514 
    515 	pvpage = TAILQ_FIRST(&pv_freepages);
    516 
    517 	if (pvpage != NULL) {
    518 		pvpage->pvinfo.pvpi_nfree--;
    519 		if (pvpage->pvinfo.pvpi_nfree == 0) {
    520 			/* nothing left in this one? */
    521 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
    522 		}
    523 		pv = pvpage->pvinfo.pvpi_pvfree;
    524 		KASSERT(pv);
    525 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    526 		pv_nfpvents--;  /* took one from pool */
    527 	} else {
    528 		pv = NULL;		/* need more of them */
    529 	}
    530 
    531 	/*
    532 	 * if below low water mark or we didn't get a pv_entry we try and
    533 	 * create more pv_entrys ...
    534 	 */
    535 
    536 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
    537 		if (pv == NULL)
    538 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
    539 					       mode : ALLOCPV_NEED);
    540 		else
    541 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
    542 	}
    543 
    544 	simple_unlock(&pvalloc_lock);
    545 	return(pv);
    546 }
    547 
    548 /*
    549  * pmap_alloc_pvpage: maybe allocate a new pvpage
    550  *
    551  * if need_entry is false: try and allocate a new pv_page
    552  * if need_entry is true: try and allocate a new pv_page and return a
    553  *	new pv_entry from it.   if we are unable to allocate a pv_page
    554  *	we make a last ditch effort to steal a pv_page from some other
    555  *	mapping.    if that fails, we panic...
    556  *
    557  * => we assume that the caller holds pvalloc_lock
    558  */
    559 
    560 static struct pv_entry *
    561 pmap_alloc_pvpage(struct pmap *pmap, int mode)
    562 {
    563 	struct vm_page *pg;
    564 	struct pv_page *pvpage;
    565 	struct pv_entry *pv;
    566 	int s;
    567 
    568 	/*
    569 	 * if we need_entry and we've got unused pv_pages, allocate from there
    570 	 */
    571 
    572 	pvpage = TAILQ_FIRST(&pv_unusedpgs);
    573 	if (mode != ALLOCPV_NONEED && pvpage != NULL) {
    574 
    575 		/* move it to pv_freepages list */
    576 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
    577 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
    578 
    579 		/* allocate a pv_entry */
    580 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
    581 		pv = pvpage->pvinfo.pvpi_pvfree;
    582 		KASSERT(pv);
    583 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    584 
    585 		pv_nfpvents--;  /* took one from pool */
    586 		return(pv);
    587 	}
    588 
    589 	/*
    590 	 *  see if we've got a cached unmapped VA that we can map a page in.
    591 	 * if not, try to allocate one.
    592 	 */
    593 
    594 
    595 	if (pv_cachedva == 0) {
    596 		s = splvm();
    597 		pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
    598 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
    599 		splx(s);
    600 		if (pv_cachedva == 0) {
    601 			return (NULL);
    602 		}
    603 	}
    604 
    605 	pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
    606 	    UVM_PGA_USERESERVE);
    607 
    608 	if (pg == NULL)
    609 		return (NULL);
    610 	pg->flags &= ~PG_BUSY;	/* never busy */
    611 
    612 	/*
    613 	 * add a mapping for our new pv_page and free its entrys (save one!)
    614 	 *
    615 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
    616 	 * pmap is already locked!  (...but entering the mapping is safe...)
    617 	 */
    618 
    619 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
    620 		VM_PROT_READ|VM_PROT_WRITE);
    621 	pmap_update(pmap_kernel());
    622 	pvpage = (struct pv_page *) pv_cachedva;
    623 	pv_cachedva = 0;
    624 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
    625 }
    626 
    627 /*
    628  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
    629  *
    630  * => caller must hold pvalloc_lock
    631  * => if need_entry is true, we allocate and return one pv_entry
    632  */
    633 
    634 static struct pv_entry *
    635 pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
    636 {
    637 	int tofree, lcv;
    638 
    639 	/* do we need to return one? */
    640 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
    641 
    642 	pvp->pvinfo.pvpi_pvfree = NULL;
    643 	pvp->pvinfo.pvpi_nfree = tofree;
    644 	for (lcv = 0 ; lcv < tofree ; lcv++) {
    645 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
    646 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
    647 	}
    648 	if (need_entry)
    649 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
    650 	else
    651 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    652 	pv_nfpvents += tofree;
    653 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
    654 }
    655 
    656 /*
    657  * pmap_free_pv_doit: actually free a pv_entry
    658  *
    659  * => do not call this directly!  instead use either
    660  *    1. pmap_free_pv ==> free a single pv_entry
    661  *    2. pmap_free_pvs => free a list of pv_entrys
    662  * => we must be holding pvalloc_lock
    663  */
    664 
    665 __inline static void
    666 pmap_free_pv_doit(struct pv_entry *pv)
    667 {
    668 	struct pv_page *pvp;
    669 
    670 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
    671 	pv_nfpvents++;
    672 	pvp->pvinfo.pvpi_nfree++;
    673 
    674 	/* nfree == 1 => fully allocated page just became partly allocated */
    675 	if (pvp->pvinfo.pvpi_nfree == 1) {
    676 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
    677 	}
    678 
    679 	/* free it */
    680 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
    681 	pvp->pvinfo.pvpi_pvfree = pv;
    682 
    683 	/*
    684 	 * are all pv_page's pv_entry's free?  move it to unused queue.
    685 	 */
    686 
    687 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
    688 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
    689 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    690 	}
    691 }
    692 
    693 /*
    694  * pmap_free_pv: free a single pv_entry
    695  *
    696  * => we gain the pvalloc_lock
    697  */
    698 
    699 __inline static void
    700 pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
    701 {
    702 	simple_lock(&pvalloc_lock);
    703 	pmap_free_pv_doit(pv);
    704 
    705 	/*
    706 	 * Can't free the PV page if the PV entries were associated with
    707 	 * the kernel pmap; the pmap is already locked.
    708 	 */
    709 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    710 	    pmap != pmap_kernel())
    711 		pmap_free_pvpage();
    712 
    713 	simple_unlock(&pvalloc_lock);
    714 }
    715 
    716 /*
    717  * pmap_free_pvs: free a list of pv_entrys
    718  *
    719  * => we gain the pvalloc_lock
    720  */
    721 
    722 __inline static void
    723 pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
    724 {
    725 	struct pv_entry *nextpv;
    726 
    727 	simple_lock(&pvalloc_lock);
    728 
    729 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
    730 		nextpv = pvs->pv_next;
    731 		pmap_free_pv_doit(pvs);
    732 	}
    733 
    734 	/*
    735 	 * Can't free the PV page if the PV entries were associated with
    736 	 * the kernel pmap; the pmap is already locked.
    737 	 */
    738 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    739 	    pmap != pmap_kernel())
    740 		pmap_free_pvpage();
    741 
    742 	simple_unlock(&pvalloc_lock);
    743 }
    744 
    745 
    746 /*
    747  * pmap_free_pvpage: try and free an unused pv_page structure
    748  *
    749  * => assume caller is holding the pvalloc_lock and that
    750  *	there is a page on the pv_unusedpgs list
    751  * => if we can't get a lock on the kmem_map we try again later
    752  */
    753 
    754 static void
    755 pmap_free_pvpage(void)
    756 {
    757 	int s;
    758 	struct vm_map *map;
    759 	struct vm_map_entry *dead_entries;
    760 	struct pv_page *pvp;
    761 
    762 	s = splvm(); /* protect kmem_map */
    763 
    764 	pvp = TAILQ_FIRST(&pv_unusedpgs);
    765 
    766 	/*
    767 	 * note: watch out for pv_initpage which is allocated out of
    768 	 * kernel_map rather than kmem_map.
    769 	 */
    770 	if (pvp == pv_initpage)
    771 		map = kernel_map;
    772 	else
    773 		map = kmem_map;
    774 	if (vm_map_lock_try(map)) {
    775 
    776 		/* remove pvp from pv_unusedpgs */
    777 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    778 
    779 		/* unmap the page */
    780 		dead_entries = NULL;
    781 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
    782 		    &dead_entries);
    783 		vm_map_unlock(map);
    784 
    785 		if (dead_entries != NULL)
    786 			uvm_unmap_detach(dead_entries, 0);
    787 
    788 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
    789 	}
    790 	if (pvp == pv_initpage)
    791 		/* no more initpage, we've freed it */
    792 		pv_initpage = NULL;
    793 
    794 	splx(s);
    795 }
    796 
    797 /*
    798  * main pv_entry manipulation functions:
    799  *   pmap_enter_pv: enter a mapping onto a vm_page list
    800  *   pmap_remove_pv: remove a mappiing from a vm_page list
    801  *
    802  * NOTE: pmap_enter_pv expects to lock the pvh itself
    803  *       pmap_remove_pv expects te caller to lock the pvh before calling
    804  */
    805 
    806 /*
    807  * pmap_enter_pv: enter a mapping onto a vm_page lst
    808  *
    809  * => caller should hold the proper lock on pmap_main_lock
    810  * => caller should have pmap locked
    811  * => we will gain the lock on the vm_page and allocate the new pv_entry
    812  * => caller should adjust ptp's wire_count before calling
    813  * => caller should not adjust pmap's wire_count
    814  */
    815 
    816 __inline static void
    817 pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
    818     vaddr_t va, struct vm_page *ptp, int flags)
    819 {
    820 	pve->pv_pmap = pmap;
    821 	pve->pv_va = va;
    822 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
    823 	pve->pv_flags = flags;
    824 	simple_lock(&pg->mdpage.pvh_slock);	/* lock vm_page */
    825 	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
    826 	pg->mdpage.pvh_list = pve;		/* ... locked list */
    827 	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
    828 	if (pve->pv_flags & PVF_WIRED)
    829 		++pmap->pm_stats.wired_count;
    830 #ifdef PMAP_ALIAS_DEBUG
    831     {
    832 	int s = splhigh();
    833 	if (pve->pv_flags & PVF_WRITE)
    834 		pg->mdpage.rw_mappings++;
    835 	else
    836 		pg->mdpage.ro_mappings++;
    837 	if (pg->mdpage.rw_mappings != 0 &&
    838 	    (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
    839 		printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
    840 		    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
    841 		    pg->mdpage.krw_mappings);
    842 	}
    843 	splx(s);
    844     }
    845 #endif /* PMAP_ALIAS_DEBUG */
    846 }
    847 
    848 /*
    849  * pmap_remove_pv: try to remove a mapping from a pv_list
    850  *
    851  * => caller should hold proper lock on pmap_main_lock
    852  * => pmap should be locked
    853  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    854  * => caller should adjust ptp's wire_count and free PTP if needed
    855  * => caller should NOT adjust pmap's wire_count
    856  * => we return the removed pve
    857  */
    858 
    859 __inline static struct pv_entry *
    860 pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
    861 {
    862 	struct pv_entry *pve, **prevptr;
    863 
    864 	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
    865 	pve = *prevptr;
    866 	while (pve) {
    867 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
    868 			*prevptr = pve->pv_next;		/* remove it! */
    869 			if (pve->pv_flags & PVF_WIRED)
    870 			    --pmap->pm_stats.wired_count;
    871 #ifdef PMAP_ALIAS_DEBUG
    872     {
    873 			int s = splhigh();
    874 			if (pve->pv_flags & PVF_WRITE) {
    875 				KASSERT(pg->mdpage.rw_mappings != 0);
    876 				pg->mdpage.rw_mappings--;
    877 			} else {
    878 				KASSERT(pg->mdpage.ro_mappings != 0);
    879 				pg->mdpage.ro_mappings--;
    880 			}
    881 			splx(s);
    882     }
    883 #endif /* PMAP_ALIAS_DEBUG */
    884 			break;
    885 		}
    886 		prevptr = &pve->pv_next;		/* previous pointer */
    887 		pve = pve->pv_next;			/* advance */
    888 	}
    889 	return(pve);				/* return removed pve */
    890 }
    891 
    892 /*
    893  *
    894  * pmap_modify_pv: Update pv flags
    895  *
    896  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    897  * => caller should NOT adjust pmap's wire_count
    898  * => caller must call pmap_vac_me_harder() if writable status of a page
    899  *    may have changed.
    900  * => we return the old flags
    901  *
    902  * Modify a physical-virtual mapping in the pv table
    903  */
    904 
    905 static /* __inline */ u_int
    906 pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
    907     u_int bic_mask, u_int eor_mask)
    908 {
    909 	struct pv_entry *npv;
    910 	u_int flags, oflags;
    911 
    912 	/*
    913 	 * There is at least one VA mapping this page.
    914 	 */
    915 
    916 	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
    917 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    918 			oflags = npv->pv_flags;
    919 			npv->pv_flags = flags =
    920 			    ((oflags & ~bic_mask) ^ eor_mask);
    921 			if ((flags ^ oflags) & PVF_WIRED) {
    922 				if (flags & PVF_WIRED)
    923 					++pmap->pm_stats.wired_count;
    924 				else
    925 					--pmap->pm_stats.wired_count;
    926 			}
    927 #ifdef PMAP_ALIAS_DEBUG
    928     {
    929 			int s = splhigh();
    930 			if ((flags ^ oflags) & PVF_WRITE) {
    931 				if (flags & PVF_WRITE) {
    932 					pg->mdpage.rw_mappings++;
    933 					pg->mdpage.ro_mappings--;
    934 					if (pg->mdpage.rw_mappings != 0 &&
    935 					    (pg->mdpage.kro_mappings != 0 ||
    936 					     pg->mdpage.krw_mappings != 0)) {
    937 						printf("pmap_modify_pv: rw %u, "
    938 						    "kro %u, krw %u\n",
    939 						    pg->mdpage.rw_mappings,
    940 						    pg->mdpage.kro_mappings,
    941 						    pg->mdpage.krw_mappings);
    942 					}
    943 				} else {
    944 					KASSERT(pg->mdpage.rw_mappings != 0);
    945 					pg->mdpage.rw_mappings--;
    946 					pg->mdpage.ro_mappings++;
    947 				}
    948 			}
    949 			splx(s);
    950     }
    951 #endif /* PMAP_ALIAS_DEBUG */
    952 			return (oflags);
    953 		}
    954 	}
    955 	return (0);
    956 }
    957 
    958 /*
    959  * Map the specified level 2 pagetable into the level 1 page table for
    960  * the given pmap to cover a chunk of virtual address space starting from the
    961  * address specified.
    962  */
    963 static __inline void
    964 pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
    965 {
    966 	vaddr_t ptva;
    967 
    968 	/* Calculate the index into the L1 page table. */
    969 	ptva = (va >> L1_S_SHIFT) & ~3;
    970 
    971 	/* Map page table into the L1. */
    972 	pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
    973 	pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
    974 	pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
    975 	pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
    976 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    977 
    978 	/* Map the page table into the page table area. */
    979 	if (selfref)
    980 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
    981 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
    982 }
    983 
    984 #if 0
    985 static __inline void
    986 pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
    987 {
    988 	vaddr_t ptva;
    989 
    990 	/* Calculate the index into the L1 page table. */
    991 	ptva = (va >> L1_S_SHIFT) & ~3;
    992 
    993 	/* Unmap page table from the L1. */
    994 	pmap->pm_pdir[ptva + 0] = 0;
    995 	pmap->pm_pdir[ptva + 1] = 0;
    996 	pmap->pm_pdir[ptva + 2] = 0;
    997 	pmap->pm_pdir[ptva + 3] = 0;
    998 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    999 
   1000 	/* Unmap the page table from the page table area. */
   1001 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
   1002 }
   1003 #endif
   1004 
   1005 /*
   1006  *	Used to map a range of physical addresses into kernel
   1007  *	virtual address space.
   1008  *
   1009  *	For now, VM is already on, we only need to map the
   1010  *	specified memory.
   1011  *
   1012  *	XXX This routine should eventually go away; it's only used
   1013  *	XXX by machine-dependent crash dump code.
   1014  */
   1015 vaddr_t
   1016 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
   1017 {
   1018 	pt_entry_t *pte;
   1019 
   1020 	while (spa < epa) {
   1021 		pte = vtopte(va);
   1022 
   1023 		*pte = L2_S_PROTO | spa |
   1024 		    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   1025 		cpu_tlb_flushID_SE(va);
   1026 		va += NBPG;
   1027 		spa += NBPG;
   1028 	}
   1029 	pmap_update(pmap_kernel());
   1030 	return(va);
   1031 }
   1032 
   1033 
   1034 /*
   1035  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
   1036  *
   1037  * bootstrap the pmap system. This is called from initarm and allows
   1038  * the pmap system to initailise any structures it requires.
   1039  *
   1040  * Currently this sets up the kernel_pmap that is statically allocated
   1041  * and also allocated virtual addresses for certain page hooks.
   1042  * Currently the only one page hook is allocated that is used
   1043  * to zero physical pages of memory.
   1044  * It also initialises the start and end address of the kernel data space.
   1045  */
   1046 extern paddr_t physical_freestart;
   1047 extern paddr_t physical_freeend;
   1048 
   1049 char *boot_head;
   1050 
   1051 void
   1052 pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
   1053 {
   1054 	pt_entry_t *pte;
   1055 	int loop;
   1056 	paddr_t start, end;
   1057 #if NISADMA > 0
   1058 	paddr_t istart;
   1059 	psize_t isize;
   1060 #endif
   1061 
   1062 	pmap_kernel()->pm_pdir = kernel_l1pt;
   1063 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
   1064 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
   1065 	simple_lock_init(&pmap_kernel()->pm_lock);
   1066 	pmap_kernel()->pm_obj.pgops = NULL;
   1067 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
   1068 	pmap_kernel()->pm_obj.uo_npages = 0;
   1069 	pmap_kernel()->pm_obj.uo_refs = 1;
   1070 
   1071 	/*
   1072 	 * Initialize PAGE_SIZE-dependent variables.
   1073 	 */
   1074 	uvm_setpagesize();
   1075 
   1076 	loop = 0;
   1077 	while (loop < bootconfig.dramblocks) {
   1078 		start = (paddr_t)bootconfig.dram[loop].address;
   1079 		end = start + (bootconfig.dram[loop].pages * NBPG);
   1080 		if (start < physical_freestart)
   1081 			start = physical_freestart;
   1082 		if (end > physical_freeend)
   1083 			end = physical_freeend;
   1084 #if 0
   1085 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
   1086 #endif
   1087 #if NISADMA > 0
   1088 		if (pmap_isa_dma_range_intersect(start, end - start,
   1089 		    &istart, &isize)) {
   1090 			/*
   1091 			 * Place the pages that intersect with the
   1092 			 * ISA DMA range onto the ISA DMA free list.
   1093 			 */
   1094 #if 0
   1095 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
   1096 			    istart + isize - 1);
   1097 #endif
   1098 			uvm_page_physload(atop(istart),
   1099 			    atop(istart + isize), atop(istart),
   1100 			    atop(istart + isize), VM_FREELIST_ISADMA);
   1101 
   1102 			/*
   1103 			 * Load the pieces that come before
   1104 			 * the intersection into the default
   1105 			 * free list.
   1106 			 */
   1107 			if (start < istart) {
   1108 #if 0
   1109 				printf("    BEFORE 0x%lx -> 0x%lx\n",
   1110 				    start, istart - 1);
   1111 #endif
   1112 				uvm_page_physload(atop(start),
   1113 				    atop(istart), atop(start),
   1114 				    atop(istart), VM_FREELIST_DEFAULT);
   1115 			}
   1116 
   1117 			/*
   1118 			 * Load the pieces that come after
   1119 			 * the intersection into the default
   1120 			 * free list.
   1121 			 */
   1122 			if ((istart + isize) < end) {
   1123 #if 0
   1124 				printf("     AFTER 0x%lx -> 0x%lx\n",
   1125 				    (istart + isize), end - 1);
   1126 #endif
   1127 				uvm_page_physload(atop(istart + isize),
   1128 				    atop(end), atop(istart + isize),
   1129 				    atop(end), VM_FREELIST_DEFAULT);
   1130 			}
   1131 		} else {
   1132 			uvm_page_physload(atop(start), atop(end),
   1133 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1134 		}
   1135 #else	/* NISADMA > 0 */
   1136 		uvm_page_physload(atop(start), atop(end),
   1137 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1138 #endif /* NISADMA > 0 */
   1139 		++loop;
   1140 	}
   1141 
   1142 	virtual_avail = KERNEL_VM_BASE;
   1143 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
   1144 
   1145 	/*
   1146 	 * now we allocate the "special" VAs which are used for tmp mappings
   1147 	 * by the pmap (and other modules).  we allocate the VAs by advancing
   1148 	 * virtual_avail (note that there are no pages mapped at these VAs).
   1149 	 * we find the PTE that maps the allocated VA via the linear PTE
   1150 	 * mapping.
   1151 	 */
   1152 
   1153 	pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
   1154 
   1155 	csrcp = virtual_avail; csrc_pte = pte;
   1156 	virtual_avail += PAGE_SIZE; pte++;
   1157 
   1158 	cdstp = virtual_avail; cdst_pte = pte;
   1159 	virtual_avail += PAGE_SIZE; pte++;
   1160 
   1161 	memhook = (char *) virtual_avail;	/* don't need pte */
   1162 	virtual_avail += PAGE_SIZE; pte++;
   1163 
   1164 	msgbufaddr = (caddr_t) virtual_avail;	/* don't need pte */
   1165 	virtual_avail += round_page(MSGBUFSIZE);
   1166 	pte += atop(round_page(MSGBUFSIZE));
   1167 
   1168 	/*
   1169 	 * init the static-global locks and global lists.
   1170 	 */
   1171 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
   1172 	simple_lock_init(&pvalloc_lock);
   1173 	simple_lock_init(&pmaps_lock);
   1174 	LIST_INIT(&pmaps);
   1175 	TAILQ_INIT(&pv_freepages);
   1176 	TAILQ_INIT(&pv_unusedpgs);
   1177 
   1178 	/*
   1179 	 * initialize the pmap pool.
   1180 	 */
   1181 
   1182 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1183 		  &pool_allocator_nointr);
   1184 
   1185 	/*
   1186 	 * initialize the PT-PT pool and cache.
   1187 	 */
   1188 
   1189 	pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
   1190 		  &pmap_ptpt_allocator);
   1191 	pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
   1192 			pmap_ptpt_ctor, NULL, NULL);
   1193 
   1194 	cpu_dcache_wbinv_all();
   1195 }
   1196 
   1197 /*
   1198  * void pmap_init(void)
   1199  *
   1200  * Initialize the pmap module.
   1201  * Called by vm_init() in vm/vm_init.c in order to initialise
   1202  * any structures that the pmap system needs to map virtual memory.
   1203  */
   1204 
   1205 extern int physmem;
   1206 
   1207 void
   1208 pmap_init(void)
   1209 {
   1210 
   1211 	/*
   1212 	 * Set the available memory vars - These do not map to real memory
   1213 	 * addresses and cannot as the physical memory is fragmented.
   1214 	 * They are used by ps for %mem calculations.
   1215 	 * One could argue whether this should be the entire memory or just
   1216 	 * the memory that is useable in a user process.
   1217 	 */
   1218 	avail_start = 0;
   1219 	avail_end = physmem * NBPG;
   1220 
   1221 	/*
   1222 	 * now we need to free enough pv_entry structures to allow us to get
   1223 	 * the kmem_map/kmem_object allocated and inited (done after this
   1224 	 * function is finished).  to do this we allocate one bootstrap page out
   1225 	 * of kernel_map and use it to provide an initial pool of pv_entry
   1226 	 * structures.   we never free this page.
   1227 	 */
   1228 
   1229 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
   1230 	if (pv_initpage == NULL)
   1231 		panic("pmap_init: pv_initpage");
   1232 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
   1233 	pv_nfpvents = 0;
   1234 	(void) pmap_add_pvpage(pv_initpage, FALSE);
   1235 
   1236 	pmap_initialized = TRUE;
   1237 
   1238 	/* Initialise our L1 page table queues and counters */
   1239 	SIMPLEQ_INIT(&l1pt_static_queue);
   1240 	l1pt_static_queue_count = 0;
   1241 	l1pt_static_create_count = 0;
   1242 	SIMPLEQ_INIT(&l1pt_queue);
   1243 	l1pt_queue_count = 0;
   1244 	l1pt_create_count = 0;
   1245 	l1pt_reuse_count = 0;
   1246 }
   1247 
   1248 /*
   1249  * pmap_postinit()
   1250  *
   1251  * This routine is called after the vm and kmem subsystems have been
   1252  * initialised. This allows the pmap code to perform any initialisation
   1253  * that can only be done one the memory allocation is in place.
   1254  */
   1255 
   1256 void
   1257 pmap_postinit(void)
   1258 {
   1259 	int loop;
   1260 	struct l1pt *pt;
   1261 
   1262 #ifdef PMAP_STATIC_L1S
   1263 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
   1264 #else	/* PMAP_STATIC_L1S */
   1265 	for (loop = 0; loop < max_processes; ++loop) {
   1266 #endif	/* PMAP_STATIC_L1S */
   1267 		/* Allocate a L1 page table */
   1268 		pt = pmap_alloc_l1pt();
   1269 		if (!pt)
   1270 			panic("Cannot allocate static L1 page tables\n");
   1271 
   1272 		/* Clean it */
   1273 		bzero((void *)pt->pt_va, L1_TABLE_SIZE);
   1274 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
   1275 		/* Add the page table to the queue */
   1276 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
   1277 		++l1pt_static_queue_count;
   1278 		++l1pt_static_create_count;
   1279 	}
   1280 }
   1281 
   1282 
   1283 /*
   1284  * Create and return a physical map.
   1285  *
   1286  * If the size specified for the map is zero, the map is an actual physical
   1287  * map, and may be referenced by the hardware.
   1288  *
   1289  * If the size specified is non-zero, the map will be used in software only,
   1290  * and is bounded by that size.
   1291  */
   1292 
   1293 pmap_t
   1294 pmap_create(void)
   1295 {
   1296 	struct pmap *pmap;
   1297 
   1298 	/*
   1299 	 * Fetch pmap entry from the pool
   1300 	 */
   1301 
   1302 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   1303 	/* XXX is this really needed! */
   1304 	memset(pmap, 0, sizeof(*pmap));
   1305 
   1306 	simple_lock_init(&pmap->pm_obj.vmobjlock);
   1307 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
   1308 	TAILQ_INIT(&pmap->pm_obj.memq);
   1309 	pmap->pm_obj.uo_npages = 0;
   1310 	pmap->pm_obj.uo_refs = 1;
   1311 	pmap->pm_stats.wired_count = 0;
   1312 	pmap->pm_stats.resident_count = 1;
   1313 	pmap->pm_ptphint = NULL;
   1314 
   1315 	/* Now init the machine part of the pmap */
   1316 	pmap_pinit(pmap);
   1317 	return(pmap);
   1318 }
   1319 
   1320 /*
   1321  * pmap_alloc_l1pt()
   1322  *
   1323  * This routine allocates physical and virtual memory for a L1 page table
   1324  * and wires it.
   1325  * A l1pt structure is returned to describe the allocated page table.
   1326  *
   1327  * This routine is allowed to fail if the required memory cannot be allocated.
   1328  * In this case NULL is returned.
   1329  */
   1330 
   1331 struct l1pt *
   1332 pmap_alloc_l1pt(void)
   1333 {
   1334 	paddr_t pa;
   1335 	vaddr_t va;
   1336 	struct l1pt *pt;
   1337 	int error;
   1338 	struct vm_page *m;
   1339 
   1340 	/* Allocate virtual address space for the L1 page table */
   1341 	va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
   1342 	if (va == 0) {
   1343 #ifdef DIAGNOSTIC
   1344 		PDEBUG(0,
   1345 		    printf("pmap: Cannot allocate pageable memory for L1\n"));
   1346 #endif	/* DIAGNOSTIC */
   1347 		return(NULL);
   1348 	}
   1349 
   1350 	/* Allocate memory for the l1pt structure */
   1351 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1352 
   1353 	/*
   1354 	 * Allocate pages from the VM system.
   1355 	 */
   1356 	TAILQ_INIT(&pt->pt_plist);
   1357 	error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
   1358 	    L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1359 	if (error) {
   1360 #ifdef DIAGNOSTIC
   1361 		PDEBUG(0,
   1362 		    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
   1363 		    error));
   1364 #endif	/* DIAGNOSTIC */
   1365 		/* Release the resources we already have claimed */
   1366 		free(pt, M_VMPMAP);
   1367 		uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
   1368 		return(NULL);
   1369 	}
   1370 
   1371 	/* Map our physical pages into our virtual space */
   1372 	pt->pt_va = va;
   1373 	m = TAILQ_FIRST(&pt->pt_plist);
   1374 	while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
   1375 		pa = VM_PAGE_TO_PHYS(m);
   1376 
   1377 		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
   1378 
   1379 		va += NBPG;
   1380 		m = m->pageq.tqe_next;
   1381 	}
   1382 
   1383 #ifdef DIAGNOSTIC
   1384 	if (m)
   1385 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1386 #endif	/* DIAGNOSTIC */
   1387 
   1388 	pt->pt_flags = 0;
   1389 	return(pt);
   1390 }
   1391 
   1392 /*
   1393  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1394  */
   1395 static void
   1396 pmap_free_l1pt(struct l1pt *pt)
   1397 {
   1398 	/* Separate the physical memory for the virtual space */
   1399 	pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
   1400 	pmap_update(pmap_kernel());
   1401 
   1402 	/* Return the physical memory */
   1403 	uvm_pglistfree(&pt->pt_plist);
   1404 
   1405 	/* Free the virtual space */
   1406 	uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
   1407 
   1408 	/* Free the l1pt structure */
   1409 	free(pt, M_VMPMAP);
   1410 }
   1411 
   1412 /*
   1413  * pmap_ptpt_page_alloc:
   1414  *
   1415  *	Back-end page allocator for the PT-PT pool.
   1416  */
   1417 static void *
   1418 pmap_ptpt_page_alloc(struct pool *pp, int flags)
   1419 {
   1420 	struct vm_page *pg;
   1421 	pt_entry_t *pte;
   1422 	vaddr_t va;
   1423 
   1424 	/* XXX PR_WAITOK? */
   1425 	va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
   1426 	if (va == 0)
   1427 		return (NULL);
   1428 
   1429 	for (;;) {
   1430 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   1431 		if (pg != NULL)
   1432 			break;
   1433 		if ((flags & PR_WAITOK) == 0) {
   1434 			uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1435 			return (NULL);
   1436 		}
   1437 		uvm_wait("pmap_ptpt");
   1438 	}
   1439 
   1440 	pte = vtopte(va);
   1441 	KDASSERT(pmap_pte_v(pte) == 0);
   1442 
   1443 	*pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
   1444 	     L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
   1445 #ifdef PMAP_ALIAS_DEBUG
   1446     {
   1447 	int s = splhigh();
   1448 	pg->mdpage.krw_mappings++;
   1449 	splx(s);
   1450     }
   1451 #endif /* PMAP_ALIAS_DEBUG */
   1452 
   1453 	return ((void *) va);
   1454 }
   1455 
   1456 /*
   1457  * pmap_ptpt_page_free:
   1458  *
   1459  *	Back-end page free'er for the PT-PT pool.
   1460  */
   1461 static void
   1462 pmap_ptpt_page_free(struct pool *pp, void *v)
   1463 {
   1464 	vaddr_t va = (vaddr_t) v;
   1465 	paddr_t pa;
   1466 
   1467 	pa = vtophys(va);
   1468 
   1469 	pmap_kremove(va, L2_TABLE_SIZE);
   1470 	pmap_update(pmap_kernel());
   1471 
   1472 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
   1473 
   1474 	uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1475 }
   1476 
   1477 /*
   1478  * pmap_ptpt_ctor:
   1479  *
   1480  *	Constructor for the PT-PT cache.
   1481  */
   1482 static int
   1483 pmap_ptpt_ctor(void *arg, void *object, int flags)
   1484 {
   1485 	caddr_t vptpt = object;
   1486 
   1487 	/* Page is already zero'd. */
   1488 
   1489 	/*
   1490 	 * Map in kernel PTs.
   1491 	 *
   1492 	 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1493 	 */
   1494 	memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
   1495 	       (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
   1496 			((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
   1497 	       (KERNEL_PD_SIZE >> 2));
   1498 
   1499 	return (0);
   1500 }
   1501 
   1502 /*
   1503  * Allocate a page directory.
   1504  * This routine will either allocate a new page directory from the pool
   1505  * of L1 page tables currently held by the kernel or it will allocate
   1506  * a new one via pmap_alloc_l1pt().
   1507  * It will then initialise the l1 page table for use.
   1508  */
   1509 static int
   1510 pmap_allocpagedir(struct pmap *pmap)
   1511 {
   1512 	vaddr_t vptpt;
   1513 	paddr_t pa;
   1514 	struct l1pt *pt;
   1515 	u_int gen;
   1516 
   1517 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1518 
   1519 	/* Do we have any spare L1's lying around ? */
   1520 	if (l1pt_static_queue_count) {
   1521 		--l1pt_static_queue_count;
   1522 		pt = l1pt_static_queue.sqh_first;
   1523 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
   1524 	} else if (l1pt_queue_count) {
   1525 		--l1pt_queue_count;
   1526 		pt = l1pt_queue.sqh_first;
   1527 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
   1528 		++l1pt_reuse_count;
   1529 	} else {
   1530 		pt = pmap_alloc_l1pt();
   1531 		if (!pt)
   1532 			return(ENOMEM);
   1533 		++l1pt_create_count;
   1534 	}
   1535 
   1536 	/* Store the pointer to the l1 descriptor in the pmap. */
   1537 	pmap->pm_l1pt = pt;
   1538 
   1539 	/* Get the physical address of the start of the l1 */
   1540 	pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
   1541 
   1542 	/* Store the virtual address of the l1 in the pmap. */
   1543 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1544 
   1545 	/* Clean the L1 if it is dirty */
   1546 	if (!(pt->pt_flags & PTFLAG_CLEAN)) {
   1547 		bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1548 		cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
   1549 		    (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1550 	}
   1551 
   1552 	/* Allocate a page table to map all the page tables for this pmap */
   1553 	KASSERT(pmap->pm_vptpt == 0);
   1554 
   1555  try_again:
   1556 	gen = pmap_ptpt_cache_generation;
   1557 	vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
   1558 	if (vptpt == NULL) {
   1559 		PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
   1560 		pmap_freepagedir(pmap);
   1561 		return (ENOMEM);
   1562 	}
   1563 
   1564 	/* need to lock this all up for growkernel */
   1565 	simple_lock(&pmaps_lock);
   1566 
   1567 	if (gen != pmap_ptpt_cache_generation) {
   1568 		simple_unlock(&pmaps_lock);
   1569 		pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
   1570 		goto try_again;
   1571 	}
   1572 
   1573 	pmap->pm_vptpt = vptpt;
   1574 	pmap->pm_pptpt = vtophys(vptpt);
   1575 
   1576 	/* Duplicate the kernel mappings. */
   1577 	bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1578 		(char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1579 		KERNEL_PD_SIZE);
   1580 	cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
   1581 	    (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
   1582 
   1583 	/* Wire in this page table */
   1584 	pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
   1585 
   1586 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1587 
   1588 	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
   1589 	simple_unlock(&pmaps_lock);
   1590 
   1591 	return(0);
   1592 }
   1593 
   1594 
   1595 /*
   1596  * Initialize a preallocated and zeroed pmap structure,
   1597  * such as one in a vmspace structure.
   1598  */
   1599 
   1600 void
   1601 pmap_pinit(struct pmap *pmap)
   1602 {
   1603 	int backoff = 6;
   1604 	int retry = 10;
   1605 
   1606 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1607 
   1608 	/* Keep looping until we succeed in allocating a page directory */
   1609 	while (pmap_allocpagedir(pmap) != 0) {
   1610 		/*
   1611 		 * Ok we failed to allocate a suitable block of memory for an
   1612 		 * L1 page table. This means that either:
   1613 		 * 1. 16KB of virtual address space could not be allocated
   1614 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1615 		 *    could not be allocated.
   1616 		 *
   1617 		 * Since we cannot fail we will sleep for a while and try
   1618 		 * again.
   1619 		 *
   1620 		 * Searching for a suitable L1 PT is expensive:
   1621 		 * to avoid hogging the system when memory is really
   1622 		 * scarce, use an exponential back-off so that
   1623 		 * eventually we won't retry more than once every 8
   1624 		 * seconds.  This should allow other processes to run
   1625 		 * to completion and free up resources.
   1626 		 */
   1627 		(void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
   1628 		    NULL);
   1629 		if (--retry == 0) {
   1630 			retry = 10;
   1631 			if (backoff)
   1632 				--backoff;
   1633 		}
   1634 	}
   1635 
   1636 	if (vector_page < KERNEL_BASE) {
   1637 		/*
   1638 		 * Map the vector page.  This will also allocate and map
   1639 		 * an L2 table for it.
   1640 		 */
   1641 		pmap_enter(pmap, vector_page, systempage.pv_pa,
   1642 		    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1643 		pmap_update(pmap);
   1644 	}
   1645 }
   1646 
   1647 void
   1648 pmap_freepagedir(struct pmap *pmap)
   1649 {
   1650 	/* Free the memory used for the page table mapping */
   1651 	if (pmap->pm_vptpt != 0) {
   1652 		/*
   1653 		 * XXX Objects freed to a pool cache must be in constructed
   1654 		 * XXX form when freed, but we don't free page tables as we
   1655 		 * XXX go, so we need to zap the mappings here.
   1656 		 *
   1657 		 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1658 		 */
   1659 		memset((caddr_t) pmap->pm_vptpt, 0,
   1660 		       ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
   1661 		pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
   1662 	}
   1663 
   1664 	/* junk the L1 page table */
   1665 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1666 		/* Add the page table to the queue */
   1667 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
   1668 				    pmap->pm_l1pt, pt_queue);
   1669 		++l1pt_static_queue_count;
   1670 	} else if (l1pt_queue_count < 8) {
   1671 		/* Add the page table to the queue */
   1672 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1673 		++l1pt_queue_count;
   1674 	} else
   1675 		pmap_free_l1pt(pmap->pm_l1pt);
   1676 }
   1677 
   1678 /*
   1679  * Retire the given physical map from service.
   1680  * Should only be called if the map contains no valid mappings.
   1681  */
   1682 
   1683 void
   1684 pmap_destroy(struct pmap *pmap)
   1685 {
   1686 	struct vm_page *page;
   1687 	int count;
   1688 
   1689 	if (pmap == NULL)
   1690 		return;
   1691 
   1692 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1693 
   1694 	/*
   1695 	 * Drop reference count
   1696 	 */
   1697 	simple_lock(&pmap->pm_obj.vmobjlock);
   1698 	count = --pmap->pm_obj.uo_refs;
   1699 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1700 	if (count > 0) {
   1701 		return;
   1702 	}
   1703 
   1704 	/*
   1705 	 * reference count is zero, free pmap resources and then free pmap.
   1706 	 */
   1707 
   1708 	/*
   1709 	 * remove it from global list of pmaps
   1710 	 */
   1711 
   1712 	simple_lock(&pmaps_lock);
   1713 	LIST_REMOVE(pmap, pm_list);
   1714 	simple_unlock(&pmaps_lock);
   1715 
   1716 	if (vector_page < KERNEL_BASE) {
   1717 		/* Remove the vector page mapping */
   1718 		pmap_remove(pmap, vector_page, vector_page + NBPG);
   1719 		pmap_update(pmap);
   1720 	}
   1721 
   1722 	/*
   1723 	 * Free any page tables still mapped
   1724 	 * This is only temporay until pmap_enter can count the number
   1725 	 * of mappings made in a page table. Then pmap_remove() can
   1726 	 * reduce the count and free the pagetable when the count
   1727 	 * reaches zero.  Note that entries in this list should match the
   1728 	 * contents of the ptpt, however this is faster than walking a 1024
   1729 	 * entries looking for pt's
   1730 	 * taken from i386 pmap.c
   1731 	 */
   1732 	/*
   1733 	 * vmobjlock must be held while freeing pages
   1734 	 */
   1735 	simple_lock(&pmap->pm_obj.vmobjlock);
   1736 	while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
   1737 		KASSERT((page->flags & PG_BUSY) == 0);
   1738 		page->wire_count = 0;
   1739 		uvm_pagefree(page);
   1740 	}
   1741 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1742 
   1743 	/* Free the page dir */
   1744 	pmap_freepagedir(pmap);
   1745 
   1746 	/* return the pmap to the pool */
   1747 	pool_put(&pmap_pmap_pool, pmap);
   1748 }
   1749 
   1750 
   1751 /*
   1752  * void pmap_reference(struct pmap *pmap)
   1753  *
   1754  * Add a reference to the specified pmap.
   1755  */
   1756 
   1757 void
   1758 pmap_reference(struct pmap *pmap)
   1759 {
   1760 	if (pmap == NULL)
   1761 		return;
   1762 
   1763 	simple_lock(&pmap->pm_lock);
   1764 	pmap->pm_obj.uo_refs++;
   1765 	simple_unlock(&pmap->pm_lock);
   1766 }
   1767 
   1768 /*
   1769  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1770  *
   1771  * Return the start and end addresses of the kernel's virtual space.
   1772  * These values are setup in pmap_bootstrap and are updated as pages
   1773  * are allocated.
   1774  */
   1775 
   1776 void
   1777 pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1778 {
   1779 	*start = virtual_avail;
   1780 	*end = virtual_end;
   1781 }
   1782 
   1783 /*
   1784  * Activate the address space for the specified process.  If the process
   1785  * is the current process, load the new MMU context.
   1786  */
   1787 void
   1788 pmap_activate(struct proc *p)
   1789 {
   1790 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1791 	struct pcb *pcb = &p->p_addr->u_pcb;
   1792 
   1793 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1794 	    (paddr_t *)&pcb->pcb_pagedir);
   1795 
   1796 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1797 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1798 
   1799 	if (p == curproc) {
   1800 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1801 		setttb((u_int)pcb->pcb_pagedir);
   1802 	}
   1803 }
   1804 
   1805 /*
   1806  * Deactivate the address space of the specified process.
   1807  */
   1808 void
   1809 pmap_deactivate(struct proc *p)
   1810 {
   1811 }
   1812 
   1813 /*
   1814  * Perform any deferred pmap operations.
   1815  */
   1816 void
   1817 pmap_update(struct pmap *pmap)
   1818 {
   1819 
   1820 	/*
   1821 	 * We haven't deferred any pmap operations, but we do need to
   1822 	 * make sure TLB/cache operations have completed.
   1823 	 */
   1824 	cpu_cpwait();
   1825 }
   1826 
   1827 /*
   1828  * pmap_clean_page()
   1829  *
   1830  * This is a local function used to work out the best strategy to clean
   1831  * a single page referenced by its entry in the PV table. It's used by
   1832  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1833  *
   1834  * Its policy is effectively:
   1835  *  o If there are no mappings, we don't bother doing anything with the cache.
   1836  *  o If there is one mapping, we clean just that page.
   1837  *  o If there are multiple mappings, we clean the entire cache.
   1838  *
   1839  * So that some functions can be further optimised, it returns 0 if it didn't
   1840  * clean the entire cache, or 1 if it did.
   1841  *
   1842  * XXX One bug in this routine is that if the pv_entry has a single page
   1843  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1844  * just the 1 page. Since this should not occur in everyday use and if it does
   1845  * it will just result in not the most efficient clean for the page.
   1846  */
   1847 static int
   1848 pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
   1849 {
   1850 	struct pmap *pmap;
   1851 	struct pv_entry *npv;
   1852 	int cache_needs_cleaning = 0;
   1853 	vaddr_t page_to_clean = 0;
   1854 
   1855 	if (pv == NULL)
   1856 		/* nothing mapped in so nothing to flush */
   1857 		return (0);
   1858 
   1859 	/* Since we flush the cache each time we change curproc, we
   1860 	 * only need to flush the page if it is in the current pmap.
   1861 	 */
   1862 	if (curproc)
   1863 		pmap = curproc->p_vmspace->vm_map.pmap;
   1864 	else
   1865 		pmap = pmap_kernel();
   1866 
   1867 	for (npv = pv; npv; npv = npv->pv_next) {
   1868 		if (npv->pv_pmap == pmap) {
   1869 			/* The page is mapped non-cacheable in
   1870 			 * this map.  No need to flush the cache.
   1871 			 */
   1872 			if (npv->pv_flags & PVF_NC) {
   1873 #ifdef DIAGNOSTIC
   1874 				if (cache_needs_cleaning)
   1875 					panic("pmap_clean_page: "
   1876 							"cache inconsistency");
   1877 #endif
   1878 				break;
   1879 			}
   1880 #if 0
   1881 			/*
   1882 			 * XXX Can't do this because pmap_protect doesn't
   1883 			 * XXX clean the page when it does a write-protect.
   1884 			 */
   1885 			else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
   1886 				continue;
   1887 #endif
   1888 			if (cache_needs_cleaning){
   1889 				page_to_clean = 0;
   1890 				break;
   1891 			}
   1892 			else
   1893 				page_to_clean = npv->pv_va;
   1894 			cache_needs_cleaning = 1;
   1895 		}
   1896 	}
   1897 
   1898 	if (page_to_clean)
   1899 		cpu_idcache_wbinv_range(page_to_clean, NBPG);
   1900 	else if (cache_needs_cleaning) {
   1901 		cpu_idcache_wbinv_all();
   1902 		return (1);
   1903 	}
   1904 	return (0);
   1905 }
   1906 
   1907 /*
   1908  * pmap_zero_page()
   1909  *
   1910  * Zero a given physical page by mapping it at a page hook point.
   1911  * In doing the zero page op, the page we zero is mapped cachable, as with
   1912  * StrongARM accesses to non-cached pages are non-burst making writing
   1913  * _any_ bulk data very slow.
   1914  */
   1915 #if ARM_MMU_GENERIC == 1
   1916 void
   1917 pmap_zero_page_generic(paddr_t phys)
   1918 {
   1919 #ifdef DEBUG
   1920 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1921 
   1922 	if (pg->mdpage.pvh_list != NULL)
   1923 		panic("pmap_zero_page: page has mappings");
   1924 #endif
   1925 
   1926 	KDASSERT((phys & PGOFSET) == 0);
   1927 
   1928 	/*
   1929 	 * Hook in the page, zero it, and purge the cache for that
   1930 	 * zeroed page. Invalidate the TLB as needed.
   1931 	 */
   1932 	*cdst_pte = L2_S_PROTO | phys |
   1933 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1934 	cpu_tlb_flushD_SE(cdstp);
   1935 	cpu_cpwait();
   1936 	bzero_page(cdstp);
   1937 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1938 }
   1939 #endif /* ARM_MMU_GENERIC == 1 */
   1940 
   1941 #if ARM_MMU_XSCALE == 1
   1942 void
   1943 pmap_zero_page_xscale(paddr_t phys)
   1944 {
   1945 #ifdef DEBUG
   1946 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1947 
   1948 	if (pg->mdpage.pvh_list != NULL)
   1949 		panic("pmap_zero_page: page has mappings");
   1950 #endif
   1951 
   1952 	KDASSERT((phys & PGOFSET) == 0);
   1953 
   1954 	/*
   1955 	 * Hook in the page, zero it, and purge the cache for that
   1956 	 * zeroed page. Invalidate the TLB as needed.
   1957 	 */
   1958 	*cdst_pte = L2_S_PROTO | phys |
   1959 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1960 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1961 	cpu_tlb_flushD_SE(cdstp);
   1962 	cpu_cpwait();
   1963 	bzero_page(cdstp);
   1964 	xscale_cache_clean_minidata();
   1965 }
   1966 #endif /* ARM_MMU_XSCALE == 1 */
   1967 
   1968 /* pmap_pageidlezero()
   1969  *
   1970  * The same as above, except that we assume that the page is not
   1971  * mapped.  This means we never have to flush the cache first.  Called
   1972  * from the idle loop.
   1973  */
   1974 boolean_t
   1975 pmap_pageidlezero(paddr_t phys)
   1976 {
   1977 	int i, *ptr;
   1978 	boolean_t rv = TRUE;
   1979 #ifdef DEBUG
   1980 	struct vm_page *pg;
   1981 
   1982 	pg = PHYS_TO_VM_PAGE(phys);
   1983 	if (pg->mdpage.pvh_list != NULL)
   1984 		panic("pmap_pageidlezero: page has mappings");
   1985 #endif
   1986 
   1987 	KDASSERT((phys & PGOFSET) == 0);
   1988 
   1989 	/*
   1990 	 * Hook in the page, zero it, and purge the cache for that
   1991 	 * zeroed page. Invalidate the TLB as needed.
   1992 	 */
   1993 	*cdst_pte = L2_S_PROTO | phys |
   1994 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1995 	cpu_tlb_flushD_SE(cdstp);
   1996 	cpu_cpwait();
   1997 
   1998 	for (i = 0, ptr = (int *)cdstp;
   1999 			i < (NBPG / sizeof(int)); i++) {
   2000 		if (sched_whichqs != 0) {
   2001 			/*
   2002 			 * A process has become ready.  Abort now,
   2003 			 * so we don't keep it waiting while we
   2004 			 * do slow memory access to finish this
   2005 			 * page.
   2006 			 */
   2007 			rv = FALSE;
   2008 			break;
   2009 		}
   2010 		*ptr++ = 0;
   2011 	}
   2012 
   2013 	if (rv)
   2014 		/*
   2015 		 * if we aborted we'll rezero this page again later so don't
   2016 		 * purge it unless we finished it
   2017 		 */
   2018 		cpu_dcache_wbinv_range(cdstp, NBPG);
   2019 	return (rv);
   2020 }
   2021 
   2022 /*
   2023  * pmap_copy_page()
   2024  *
   2025  * Copy one physical page into another, by mapping the pages into
   2026  * hook points. The same comment regarding cachability as in
   2027  * pmap_zero_page also applies here.
   2028  */
   2029 #if ARM_MMU_GENERIC == 1
   2030 void
   2031 pmap_copy_page_generic(paddr_t src, paddr_t dst)
   2032 {
   2033 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   2034 #ifdef DEBUG
   2035 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   2036 
   2037 	if (dst_pg->mdpage.pvh_list != NULL)
   2038 		panic("pmap_copy_page: dst page has mappings");
   2039 #endif
   2040 
   2041 	KDASSERT((src & PGOFSET) == 0);
   2042 	KDASSERT((dst & PGOFSET) == 0);
   2043 
   2044 	/*
   2045 	 * Clean the source page.  Hold the source page's lock for
   2046 	 * the duration of the copy so that no other mappings can
   2047 	 * be created while we have a potentially aliased mapping.
   2048 	 */
   2049 	simple_lock(&src_pg->mdpage.pvh_slock);
   2050 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   2051 
   2052 	/*
   2053 	 * Map the pages into the page hook points, copy them, and purge
   2054 	 * the cache for the appropriate page. Invalidate the TLB
   2055 	 * as required.
   2056 	 */
   2057 	*csrc_pte = L2_S_PROTO | src |
   2058 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
   2059 	*cdst_pte = L2_S_PROTO | dst |
   2060 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   2061 	cpu_tlb_flushD_SE(csrcp);
   2062 	cpu_tlb_flushD_SE(cdstp);
   2063 	cpu_cpwait();
   2064 	bcopy_page(csrcp, cdstp);
   2065 	cpu_dcache_inv_range(csrcp, NBPG);
   2066 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   2067 	cpu_dcache_wbinv_range(cdstp, NBPG);
   2068 }
   2069 #endif /* ARM_MMU_GENERIC == 1 */
   2070 
   2071 #if ARM_MMU_XSCALE == 1
   2072 void
   2073 pmap_copy_page_xscale(paddr_t src, paddr_t dst)
   2074 {
   2075 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   2076 #ifdef DEBUG
   2077 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   2078 
   2079 	if (dst_pg->mdpage.pvh_list != NULL)
   2080 		panic("pmap_copy_page: dst page has mappings");
   2081 #endif
   2082 
   2083 	KDASSERT((src & PGOFSET) == 0);
   2084 	KDASSERT((dst & PGOFSET) == 0);
   2085 
   2086 	/*
   2087 	 * Clean the source page.  Hold the source page's lock for
   2088 	 * the duration of the copy so that no other mappings can
   2089 	 * be created while we have a potentially aliased mapping.
   2090 	 */
   2091 	simple_lock(&src_pg->mdpage.pvh_slock);
   2092 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   2093 
   2094 	/*
   2095 	 * Map the pages into the page hook points, copy them, and purge
   2096 	 * the cache for the appropriate page. Invalidate the TLB
   2097 	 * as required.
   2098 	 */
   2099 	*csrc_pte = L2_S_PROTO | src |
   2100 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   2101 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   2102 	*cdst_pte = L2_S_PROTO | dst |
   2103 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   2104 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   2105 	cpu_tlb_flushD_SE(csrcp);
   2106 	cpu_tlb_flushD_SE(cdstp);
   2107 	cpu_cpwait();
   2108 	bcopy_page(csrcp, cdstp);
   2109 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   2110 	xscale_cache_clean_minidata();
   2111 }
   2112 #endif /* ARM_MMU_XSCALE == 1 */
   2113 
   2114 #if 0
   2115 void
   2116 pmap_pte_addref(struct pmap *pmap, vaddr_t va)
   2117 {
   2118 	pd_entry_t *pde;
   2119 	paddr_t pa;
   2120 	struct vm_page *m;
   2121 
   2122 	if (pmap == pmap_kernel())
   2123 		return;
   2124 
   2125 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2126 	pa = pmap_pte_pa(pde);
   2127 	m = PHYS_TO_VM_PAGE(pa);
   2128 	++m->wire_count;
   2129 #ifdef MYCROFT_HACK
   2130 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2131 	    pmap, va, pde, pa, m, m->wire_count);
   2132 #endif
   2133 }
   2134 
   2135 void
   2136 pmap_pte_delref(struct pmap *pmap, vaddr_t va)
   2137 {
   2138 	pd_entry_t *pde;
   2139 	paddr_t pa;
   2140 	struct vm_page *m;
   2141 
   2142 	if (pmap == pmap_kernel())
   2143 		return;
   2144 
   2145 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2146 	pa = pmap_pte_pa(pde);
   2147 	m = PHYS_TO_VM_PAGE(pa);
   2148 	--m->wire_count;
   2149 #ifdef MYCROFT_HACK
   2150 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2151 	    pmap, va, pde, pa, m, m->wire_count);
   2152 #endif
   2153 	if (m->wire_count == 0) {
   2154 #ifdef MYCROFT_HACK
   2155 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   2156 		    pmap, va, pde, pa, m);
   2157 #endif
   2158 		pmap_unmap_in_l1(pmap, va);
   2159 		uvm_pagefree(m);
   2160 		--pmap->pm_stats.resident_count;
   2161 	}
   2162 }
   2163 #else
   2164 #define	pmap_pte_addref(pmap, va)
   2165 #define	pmap_pte_delref(pmap, va)
   2166 #endif
   2167 
   2168 /*
   2169  * Since we have a virtually indexed cache, we may need to inhibit caching if
   2170  * there is more than one mapping and at least one of them is writable.
   2171  * Since we purge the cache on every context switch, we only need to check for
   2172  * other mappings within the same pmap, or kernel_pmap.
   2173  * This function is also called when a page is unmapped, to possibly reenable
   2174  * caching on any remaining mappings.
   2175  *
   2176  * The code implements the following logic, where:
   2177  *
   2178  * KW = # of kernel read/write pages
   2179  * KR = # of kernel read only pages
   2180  * UW = # of user read/write pages
   2181  * UR = # of user read only pages
   2182  * OW = # of user read/write pages in another pmap, then
   2183  *
   2184  * KC = kernel mapping is cacheable
   2185  * UC = user mapping is cacheable
   2186  *
   2187  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
   2188  *                   +---------------------------------------------
   2189  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
   2190  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
   2191  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
   2192  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2193  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2194  *
   2195  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   2196  */
   2197 __inline static void
   2198 pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2199 	boolean_t clear_cache)
   2200 {
   2201 	if (pmap == pmap_kernel())
   2202 		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
   2203 	else
   2204 		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2205 }
   2206 
   2207 static void
   2208 pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2209 	boolean_t clear_cache)
   2210 {
   2211 	int user_entries = 0;
   2212 	int user_writable = 0;
   2213 	int user_cacheable = 0;
   2214 	int kernel_entries = 0;
   2215 	int kernel_writable = 0;
   2216 	int kernel_cacheable = 0;
   2217 	struct pv_entry *pv;
   2218 	struct pmap *last_pmap = pmap;
   2219 
   2220 #ifdef DIAGNOSTIC
   2221 	if (pmap != pmap_kernel())
   2222 		panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
   2223 #endif
   2224 
   2225 	/*
   2226 	 * Pass one, see if there are both kernel and user pmaps for
   2227 	 * this page.  Calculate whether there are user-writable or
   2228 	 * kernel-writable pages.
   2229 	 */
   2230 	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
   2231 		if (pv->pv_pmap != pmap) {
   2232 			user_entries++;
   2233 			if (pv->pv_flags & PVF_WRITE)
   2234 				user_writable++;
   2235 			if ((pv->pv_flags & PVF_NC) == 0)
   2236 				user_cacheable++;
   2237 		} else {
   2238 			kernel_entries++;
   2239 			if (pv->pv_flags & PVF_WRITE)
   2240 				kernel_writable++;
   2241 			if ((pv->pv_flags & PVF_NC) == 0)
   2242 				kernel_cacheable++;
   2243 		}
   2244 	}
   2245 
   2246 	/*
   2247 	 * We know we have just been updating a kernel entry, so if
   2248 	 * all user pages are already cacheable, then there is nothing
   2249 	 * further to do.
   2250 	 */
   2251 	if (kernel_entries == 0 &&
   2252 	    user_cacheable == user_entries)
   2253 		return;
   2254 
   2255 	if (user_entries) {
   2256 		/*
   2257 		 * Scan over the list again, for each entry, if it
   2258 		 * might not be set correctly, call pmap_vac_me_user
   2259 		 * to recalculate the settings.
   2260 		 */
   2261 		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   2262 			/*
   2263 			 * We know kernel mappings will get set
   2264 			 * correctly in other calls.  We also know
   2265 			 * that if the pmap is the same as last_pmap
   2266 			 * then we've just handled this entry.
   2267 			 */
   2268 			if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
   2269 				continue;
   2270 			/*
   2271 			 * If there are kernel entries and this page
   2272 			 * is writable but non-cacheable, then we can
   2273 			 * skip this entry also.
   2274 			 */
   2275 			if (kernel_entries > 0 &&
   2276 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
   2277 			    (PVF_NC | PVF_WRITE))
   2278 				continue;
   2279 			/*
   2280 			 * Similarly if there are no kernel-writable
   2281 			 * entries and the page is already
   2282 			 * read-only/cacheable.
   2283 			 */
   2284 			if (kernel_writable == 0 &&
   2285 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
   2286 				continue;
   2287 			/*
   2288 			 * For some of the remaining cases, we know
   2289 			 * that we must recalculate, but for others we
   2290 			 * can't tell if they are correct or not, so
   2291 			 * we recalculate anyway.
   2292 			 */
   2293 			pmap_unmap_ptes(last_pmap);
   2294 			last_pmap = pv->pv_pmap;
   2295 			ptes = pmap_map_ptes(last_pmap);
   2296 			pmap_vac_me_user(last_pmap, pg, ptes,
   2297 			    pmap_is_curpmap(last_pmap));
   2298 		}
   2299 		/* Restore the pte mapping that was passed to us.  */
   2300 		if (last_pmap != pmap) {
   2301 			pmap_unmap_ptes(last_pmap);
   2302 			ptes = pmap_map_ptes(pmap);
   2303 		}
   2304 		if (kernel_entries == 0)
   2305 			return;
   2306 	}
   2307 
   2308 	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2309 	return;
   2310 }
   2311 
   2312 static void
   2313 pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2314 	boolean_t clear_cache)
   2315 {
   2316 	struct pmap *kpmap = pmap_kernel();
   2317 	struct pv_entry *pv, *npv;
   2318 	int entries = 0;
   2319 	int writable = 0;
   2320 	int cacheable_entries = 0;
   2321 	int kern_cacheable = 0;
   2322 	int other_writable = 0;
   2323 
   2324 	pv = pg->mdpage.pvh_list;
   2325 	KASSERT(ptes != NULL);
   2326 
   2327 	/*
   2328 	 * Count mappings and writable mappings in this pmap.
   2329 	 * Include kernel mappings as part of our own.
   2330 	 * Keep a pointer to the first one.
   2331 	 */
   2332 	for (npv = pv; npv; npv = npv->pv_next) {
   2333 		/* Count mappings in the same pmap */
   2334 		if (pmap == npv->pv_pmap ||
   2335 		    kpmap == npv->pv_pmap) {
   2336 			if (entries++ == 0)
   2337 				pv = npv;
   2338 			/* Cacheable mappings */
   2339 			if ((npv->pv_flags & PVF_NC) == 0) {
   2340 				cacheable_entries++;
   2341 				if (kpmap == npv->pv_pmap)
   2342 					kern_cacheable++;
   2343 			}
   2344 			/* Writable mappings */
   2345 			if (npv->pv_flags & PVF_WRITE)
   2346 				++writable;
   2347 		} else if (npv->pv_flags & PVF_WRITE)
   2348 			other_writable = 1;
   2349 	}
   2350 
   2351 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   2352 		"writable %d cacheable %d %s\n", pmap, entries, writable,
   2353 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   2354 
   2355 	/*
   2356 	 * Enable or disable caching as necessary.
   2357 	 * Note: the first entry might be part of the kernel pmap,
   2358 	 * so we can't assume this is indicative of the state of the
   2359 	 * other (maybe non-kpmap) entries.
   2360 	 */
   2361 	if ((entries > 1 && writable) ||
   2362 	    (entries > 0 && pmap == kpmap && other_writable)) {
   2363 		if (cacheable_entries == 0)
   2364 		    return;
   2365 		for (npv = pv; npv; npv = npv->pv_next) {
   2366 			if ((pmap == npv->pv_pmap
   2367 			    || kpmap == npv->pv_pmap) &&
   2368 			    (npv->pv_flags & PVF_NC) == 0) {
   2369 				ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
   2370  				npv->pv_flags |= PVF_NC;
   2371 				/*
   2372 				 * If this page needs flushing from the
   2373 				 * cache, and we aren't going to do it
   2374 				 * below, do it now.
   2375 				 */
   2376 				if ((cacheable_entries < 4 &&
   2377 				    (clear_cache || npv->pv_pmap == kpmap)) ||
   2378 				    (npv->pv_pmap == kpmap &&
   2379 				    !clear_cache && kern_cacheable < 4)) {
   2380 					cpu_idcache_wbinv_range(npv->pv_va,
   2381 					    NBPG);
   2382 					cpu_tlb_flushID_SE(npv->pv_va);
   2383 				}
   2384 			}
   2385 		}
   2386 		if ((clear_cache && cacheable_entries >= 4) ||
   2387 		    kern_cacheable >= 4) {
   2388 			cpu_idcache_wbinv_all();
   2389 			cpu_tlb_flushID();
   2390 		}
   2391 		cpu_cpwait();
   2392 	} else if (entries > 0) {
   2393 		/*
   2394 		 * Turn cacheing back on for some pages.  If it is a kernel
   2395 		 * page, only do so if there are no other writable pages.
   2396 		 */
   2397 		for (npv = pv; npv; npv = npv->pv_next) {
   2398 			if ((pmap == npv->pv_pmap ||
   2399 			    (kpmap == npv->pv_pmap && other_writable == 0)) &&
   2400 			    (npv->pv_flags & PVF_NC)) {
   2401 				ptes[arm_btop(npv->pv_va)] |=
   2402 				    pte_l2_s_cache_mode;
   2403 				npv->pv_flags &= ~PVF_NC;
   2404 			}
   2405 		}
   2406 	}
   2407 }
   2408 
   2409 /*
   2410  * pmap_remove()
   2411  *
   2412  * pmap_remove is responsible for nuking a number of mappings for a range
   2413  * of virtual address space in the current pmap. To do this efficiently
   2414  * is interesting, because in a number of cases a wide virtual address
   2415  * range may be supplied that contains few actual mappings. So, the
   2416  * optimisations are:
   2417  *  1. Try and skip over hunks of address space for which an L1 entry
   2418  *     does not exist.
   2419  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   2420  *     maybe do just a partial cache clean. This path of execution is
   2421  *     complicated by the fact that the cache must be flushed _before_
   2422  *     the PTE is nuked, being a VAC :-)
   2423  *  3. Maybe later fast-case a single page, but I don't think this is
   2424  *     going to make _that_ much difference overall.
   2425  */
   2426 
   2427 #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   2428 
   2429 void
   2430 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
   2431 {
   2432 	int cleanlist_idx = 0;
   2433 	struct pagelist {
   2434 		vaddr_t va;
   2435 		pt_entry_t *pte;
   2436 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   2437 	pt_entry_t *pte = 0, *ptes;
   2438 	paddr_t pa;
   2439 	int pmap_active;
   2440 	struct vm_page *pg;
   2441 
   2442 	/* Exit quick if there is no pmap */
   2443 	if (!pmap)
   2444 		return;
   2445 
   2446 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
   2447 	    pmap, sva, eva));
   2448 
   2449 	/*
   2450 	 * we lock in the pmap => vm_page direction
   2451 	 */
   2452 	PMAP_MAP_TO_HEAD_LOCK();
   2453 
   2454 	ptes = pmap_map_ptes(pmap);
   2455 	/* Get a page table pointer */
   2456 	while (sva < eva) {
   2457 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2458 			break;
   2459 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2460 	}
   2461 
   2462 	pte = &ptes[arm_btop(sva)];
   2463 	/* Note if the pmap is active thus require cache and tlb cleans */
   2464 	pmap_active = pmap_is_curpmap(pmap);
   2465 
   2466 	/* Now loop along */
   2467 	while (sva < eva) {
   2468 		/* Check if we can move to the next PDE (l1 chunk) */
   2469 		if (!(sva & L2_ADDR_BITS))
   2470 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2471 				sva += L1_S_SIZE;
   2472 				pte += arm_btop(L1_S_SIZE);
   2473 				continue;
   2474 			}
   2475 
   2476 		/* We've found a valid PTE, so this page of PTEs has to go. */
   2477 		if (pmap_pte_v(pte)) {
   2478 			/* Update statistics */
   2479 			--pmap->pm_stats.resident_count;
   2480 
   2481 			/*
   2482 			 * Add this page to our cache remove list, if we can.
   2483 			 * If, however the cache remove list is totally full,
   2484 			 * then do a complete cache invalidation taking note
   2485 			 * to backtrack the PTE table beforehand, and ignore
   2486 			 * the lists in future because there's no longer any
   2487 			 * point in bothering with them (we've paid the
   2488 			 * penalty, so will carry on unhindered). Otherwise,
   2489 			 * when we fall out, we just clean the list.
   2490 			 */
   2491 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   2492 			pa = pmap_pte_pa(pte);
   2493 
   2494 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2495 				/* Add to the clean list. */
   2496 				cleanlist[cleanlist_idx].pte = pte;
   2497 				cleanlist[cleanlist_idx].va = sva;
   2498 				cleanlist_idx++;
   2499 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2500 				int cnt;
   2501 
   2502 				/* Nuke everything if needed. */
   2503 				if (pmap_active) {
   2504 					cpu_idcache_wbinv_all();
   2505 					cpu_tlb_flushID();
   2506 				}
   2507 
   2508 				/*
   2509 				 * Roll back the previous PTE list,
   2510 				 * and zero out the current PTE.
   2511 				 */
   2512 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   2513 					*cleanlist[cnt].pte = 0;
   2514 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   2515 				}
   2516 				*pte = 0;
   2517 				pmap_pte_delref(pmap, sva);
   2518 				cleanlist_idx++;
   2519 			} else {
   2520 				/*
   2521 				 * We've already nuked the cache and
   2522 				 * TLB, so just carry on regardless,
   2523 				 * and we won't need to do it again
   2524 				 */
   2525 				*pte = 0;
   2526 				pmap_pte_delref(pmap, sva);
   2527 			}
   2528 
   2529 			/*
   2530 			 * Update flags. In a number of circumstances,
   2531 			 * we could cluster a lot of these and do a
   2532 			 * number of sequential pages in one go.
   2533 			 */
   2534 			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
   2535 				struct pv_entry *pve;
   2536 				simple_lock(&pg->mdpage.pvh_slock);
   2537 				pve = pmap_remove_pv(pg, pmap, sva);
   2538 				pmap_free_pv(pmap, pve);
   2539 				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2540 				simple_unlock(&pg->mdpage.pvh_slock);
   2541 			}
   2542 		}
   2543 		sva += NBPG;
   2544 		pte++;
   2545 	}
   2546 
   2547 	pmap_unmap_ptes(pmap);
   2548 	/*
   2549 	 * Now, if we've fallen through down to here, chances are that there
   2550 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   2551 	 */
   2552 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2553 		u_int cnt;
   2554 
   2555 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   2556 			if (pmap_active) {
   2557 				cpu_idcache_wbinv_range(cleanlist[cnt].va,
   2558 				    NBPG);
   2559 				*cleanlist[cnt].pte = 0;
   2560 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   2561 			} else
   2562 				*cleanlist[cnt].pte = 0;
   2563 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   2564 		}
   2565 	}
   2566 	PMAP_MAP_TO_HEAD_UNLOCK();
   2567 }
   2568 
   2569 /*
   2570  * Routine:	pmap_remove_all
   2571  * Function:
   2572  *		Removes this physical page from
   2573  *		all physical maps in which it resides.
   2574  *		Reflects back modify bits to the pager.
   2575  */
   2576 
   2577 static void
   2578 pmap_remove_all(struct vm_page *pg)
   2579 {
   2580 	struct pv_entry *pv, *npv;
   2581 	struct pmap *pmap;
   2582 	pt_entry_t *pte, *ptes;
   2583 
   2584 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
   2585 
   2586 	/* set vm_page => pmap locking */
   2587 	PMAP_HEAD_TO_MAP_LOCK();
   2588 
   2589 	simple_lock(&pg->mdpage.pvh_slock);
   2590 
   2591 	pv = pg->mdpage.pvh_list;
   2592 	if (pv == NULL) {
   2593 		PDEBUG(0, printf("free page\n"));
   2594 		simple_unlock(&pg->mdpage.pvh_slock);
   2595 		PMAP_HEAD_TO_MAP_UNLOCK();
   2596 		return;
   2597 	}
   2598 	pmap_clean_page(pv, FALSE);
   2599 
   2600 	while (pv) {
   2601 		pmap = pv->pv_pmap;
   2602 		ptes = pmap_map_ptes(pmap);
   2603 		pte = &ptes[arm_btop(pv->pv_va)];
   2604 
   2605 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   2606 		    pv->pv_va, pv->pv_flags));
   2607 #ifdef DEBUG
   2608 		if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
   2609 		    pmap_pte_v(pte) == 0 ||
   2610 		    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
   2611 			panic("pmap_remove_all: bad mapping");
   2612 #endif	/* DEBUG */
   2613 
   2614 		/*
   2615 		 * Update statistics
   2616 		 */
   2617 		--pmap->pm_stats.resident_count;
   2618 
   2619 		/* Wired bit */
   2620 		if (pv->pv_flags & PVF_WIRED)
   2621 			--pmap->pm_stats.wired_count;
   2622 
   2623 		/*
   2624 		 * Invalidate the PTEs.
   2625 		 * XXX: should cluster them up and invalidate as many
   2626 		 * as possible at once.
   2627 		 */
   2628 
   2629 #ifdef needednotdone
   2630 reduce wiring count on page table pages as references drop
   2631 #endif
   2632 
   2633 		*pte = 0;
   2634 		pmap_pte_delref(pmap, pv->pv_va);
   2635 
   2636 		npv = pv->pv_next;
   2637 		pmap_free_pv(pmap, pv);
   2638 		pv = npv;
   2639 		pmap_unmap_ptes(pmap);
   2640 	}
   2641 	pg->mdpage.pvh_list = NULL;
   2642 	simple_unlock(&pg->mdpage.pvh_slock);
   2643 	PMAP_HEAD_TO_MAP_UNLOCK();
   2644 
   2645 	PDEBUG(0, printf("done\n"));
   2646 	cpu_tlb_flushID();
   2647 	cpu_cpwait();
   2648 }
   2649 
   2650 
   2651 /*
   2652  * Set the physical protection on the specified range of this map as requested.
   2653  */
   2654 
   2655 void
   2656 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   2657 {
   2658 	pt_entry_t *pte = NULL, *ptes;
   2659 	struct vm_page *pg;
   2660 	int flush = 0;
   2661 
   2662 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2663 	    pmap, sva, eva, prot));
   2664 
   2665 	if (~prot & VM_PROT_READ) {
   2666 		/*
   2667 		 * Just remove the mappings.  pmap_update() is not required
   2668 		 * here since the caller should do it.
   2669 		 */
   2670 		pmap_remove(pmap, sva, eva);
   2671 		return;
   2672 	}
   2673 	if (prot & VM_PROT_WRITE) {
   2674 		/*
   2675 		 * If this is a read->write transition, just ignore it and let
   2676 		 * uvm_fault() take care of it later.
   2677 		 */
   2678 		return;
   2679 	}
   2680 
   2681 	/* Need to lock map->head */
   2682 	PMAP_MAP_TO_HEAD_LOCK();
   2683 
   2684 	ptes = pmap_map_ptes(pmap);
   2685 
   2686 	/*
   2687 	 * OK, at this point, we know we're doing write-protect operation.
   2688 	 * If the pmap is active, write-back the range.
   2689 	 */
   2690 	if (pmap_is_curpmap(pmap))
   2691 		cpu_dcache_wb_range(sva, eva - sva);
   2692 
   2693 	/*
   2694 	 * We need to acquire a pointer to a page table page before entering
   2695 	 * the following loop.
   2696 	 */
   2697 	while (sva < eva) {
   2698 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2699 			break;
   2700 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2701 	}
   2702 
   2703 	pte = &ptes[arm_btop(sva)];
   2704 
   2705 	while (sva < eva) {
   2706 		/* only check once in a while */
   2707 		if ((sva & L2_ADDR_BITS) == 0) {
   2708 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2709 				/* We can race ahead here, to the next pde. */
   2710 				sva += L1_S_SIZE;
   2711 				pte += arm_btop(L1_S_SIZE);
   2712 				continue;
   2713 			}
   2714 		}
   2715 
   2716 		if (!pmap_pte_v(pte))
   2717 			goto next;
   2718 
   2719 		flush = 1;
   2720 
   2721 		*pte &= ~L2_S_PROT_W;		/* clear write bit */
   2722 
   2723 		/* Clear write flag */
   2724 		if ((pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte))) != NULL) {
   2725 			simple_lock(&pg->mdpage.pvh_slock);
   2726 			(void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
   2727 			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2728 			simple_unlock(&pg->mdpage.pvh_slock);
   2729 		}
   2730 
   2731  next:
   2732 		sva += NBPG;
   2733 		pte++;
   2734 	}
   2735 	pmap_unmap_ptes(pmap);
   2736 	PMAP_MAP_TO_HEAD_UNLOCK();
   2737 	if (flush)
   2738 		cpu_tlb_flushID();
   2739 }
   2740 
   2741 /*
   2742  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2743  * int flags)
   2744  *
   2745  *      Insert the given physical page (p) at
   2746  *      the specified virtual address (v) in the
   2747  *      target physical map with the protection requested.
   2748  *
   2749  *      If specified, the page will be wired down, meaning
   2750  *      that the related pte can not be reclaimed.
   2751  *
   2752  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2753  *      or lose information.  That is, this routine must actually
   2754  *      insert this page into the given map NOW.
   2755  */
   2756 
   2757 int
   2758 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2759     int flags)
   2760 {
   2761 	pt_entry_t *ptes, opte, npte;
   2762 	paddr_t opa;
   2763 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2764 	struct vm_page *pg;
   2765 	struct pv_entry *pve;
   2766 	int error, nflags;
   2767 
   2768 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2769 	    va, pa, pmap, prot, wired));
   2770 
   2771 #ifdef DIAGNOSTIC
   2772 	/* Valid address ? */
   2773 	if (va >= (pmap_curmaxkvaddr))
   2774 		panic("pmap_enter: too big");
   2775 	if (pmap != pmap_kernel() && va != 0) {
   2776 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2777 			panic("pmap_enter: kernel page in user map");
   2778 	} else {
   2779 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2780 			panic("pmap_enter: user page in kernel map");
   2781 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2782 			panic("pmap_enter: entering PT page");
   2783 	}
   2784 #endif
   2785 
   2786 	KDASSERT(((va | pa) & PGOFSET) == 0);
   2787 
   2788 	/*
   2789 	 * Get a pointer to the page.  Later on in this function, we
   2790 	 * test for a managed page by checking pg != NULL.
   2791 	 */
   2792 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
   2793 
   2794 	/* get lock */
   2795 	PMAP_MAP_TO_HEAD_LOCK();
   2796 
   2797 	/*
   2798 	 * map the ptes.  If there's not already an L2 table for this
   2799 	 * address, allocate one.
   2800 	 */
   2801 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2802 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   2803 		struct vm_page *ptp;
   2804 
   2805 		/* kernel should be pre-grown */
   2806 		KASSERT(pmap != pmap_kernel());
   2807 
   2808 		/* if failure is allowed then don't try too hard */
   2809 		ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
   2810 		if (ptp == NULL) {
   2811 			if (flags & PMAP_CANFAIL) {
   2812 				error = ENOMEM;
   2813 				goto out;
   2814 			}
   2815 			panic("pmap_enter: get ptp failed");
   2816 		}
   2817 	}
   2818 	opte = ptes[arm_btop(va)];
   2819 
   2820 	nflags = 0;
   2821 	if (prot & VM_PROT_WRITE)
   2822 		nflags |= PVF_WRITE;
   2823 	if (wired)
   2824 		nflags |= PVF_WIRED;
   2825 
   2826 	/* Is the pte valid ? If so then this page is already mapped */
   2827 	if (l2pte_valid(opte)) {
   2828 		/* Get the physical address of the current page mapped */
   2829 		opa = l2pte_pa(opte);
   2830 
   2831 		/* Are we mapping the same page ? */
   2832 		if (opa == pa) {
   2833 			/* Has the wiring changed ? */
   2834 			if (pg != NULL) {
   2835 				simple_lock(&pg->mdpage.pvh_slock);
   2836 				(void) pmap_modify_pv(pmap, va, pg,
   2837 				    PVF_WRITE | PVF_WIRED, nflags);
   2838 				simple_unlock(&pg->mdpage.pvh_slock);
   2839  			}
   2840 		} else {
   2841 			struct vm_page *opg;
   2842 
   2843 			/* We are replacing the page with a new one. */
   2844 			cpu_idcache_wbinv_range(va, NBPG);
   2845 
   2846 			/*
   2847 			 * If it is part of our managed memory then we
   2848 			 * must remove it from the PV list
   2849 			 */
   2850 			if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
   2851 				simple_lock(&opg->mdpage.pvh_slock);
   2852 				pve = pmap_remove_pv(opg, pmap, va);
   2853 				simple_unlock(&opg->mdpage.pvh_slock);
   2854 			} else {
   2855 				pve = NULL;
   2856 			}
   2857 
   2858 			goto enter;
   2859 		}
   2860 	} else {
   2861 		opa = 0;
   2862 		pve = NULL;
   2863 		pmap_pte_addref(pmap, va);
   2864 
   2865 		/* pte is not valid so we must be hooking in a new page */
   2866 		++pmap->pm_stats.resident_count;
   2867 
   2868 	enter:
   2869 		/*
   2870 		 * Enter on the PV list if part of our managed memory
   2871 		 */
   2872 		if (pg != NULL) {
   2873 			if (pve == NULL) {
   2874 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
   2875 				if (pve == NULL) {
   2876 					if (flags & PMAP_CANFAIL) {
   2877 						error = ENOMEM;
   2878 						goto out;
   2879 					}
   2880 					panic("pmap_enter: no pv entries "
   2881 					    "available");
   2882 				}
   2883 			}
   2884 			/* enter_pv locks pvh when adding */
   2885 			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
   2886 		} else {
   2887 			if (pve != NULL)
   2888 				pmap_free_pv(pmap, pve);
   2889 		}
   2890 	}
   2891 
   2892 	/* Construct the pte, giving the correct access. */
   2893 	npte = pa;
   2894 
   2895 	/* VA 0 is magic. */
   2896 	if (pmap != pmap_kernel() && va != vector_page)
   2897 		npte |= L2_S_PROT_U;
   2898 
   2899 	if (pg != NULL) {
   2900 #ifdef DIAGNOSTIC
   2901 		if ((flags & VM_PROT_ALL) & ~prot)
   2902 			panic("pmap_enter: access_type exceeds prot");
   2903 #endif
   2904 		npte |= pte_l2_s_cache_mode;
   2905 		if (flags & VM_PROT_WRITE) {
   2906 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2907 			pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   2908 		} else if (flags & VM_PROT_ALL) {
   2909 			npte |= L2_S_PROTO;
   2910 			pg->mdpage.pvh_attrs |= PVF_REF;
   2911 		} else
   2912 			npte |= L2_TYPE_INV;
   2913 	} else {
   2914 		if (prot & VM_PROT_WRITE)
   2915 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2916 		else if (prot & VM_PROT_ALL)
   2917 			npte |= L2_S_PROTO;
   2918 		else
   2919 			npte |= L2_TYPE_INV;
   2920 	}
   2921 
   2922 	ptes[arm_btop(va)] = npte;
   2923 
   2924 	if (pg != NULL) {
   2925 		simple_lock(&pg->mdpage.pvh_slock);
   2926  		pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
   2927 		simple_unlock(&pg->mdpage.pvh_slock);
   2928 	}
   2929 
   2930 	/* Better flush the TLB ... */
   2931 	cpu_tlb_flushID_SE(va);
   2932 	error = 0;
   2933 out:
   2934 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2935 	PMAP_MAP_TO_HEAD_UNLOCK();
   2936 
   2937 	return error;
   2938 }
   2939 
   2940 /*
   2941  * pmap_kenter_pa: enter a kernel mapping
   2942  *
   2943  * => no need to lock anything assume va is already allocated
   2944  * => should be faster than normal pmap enter function
   2945  */
   2946 void
   2947 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2948 {
   2949 	pt_entry_t *pte;
   2950 
   2951 	pte = vtopte(va);
   2952 	KASSERT(!pmap_pte_v(pte));
   2953 
   2954 #ifdef PMAP_ALIAS_DEBUG
   2955     {
   2956 	struct vm_page *pg;
   2957 	int s;
   2958 
   2959 	pg = PHYS_TO_VM_PAGE(pa);
   2960 	if (pg != NULL) {
   2961 		s = splhigh();
   2962 		if (pg->mdpage.ro_mappings == 0 &&
   2963 		    pg->mdpage.rw_mappings == 0 &&
   2964 		    pg->mdpage.kro_mappings == 0 &&
   2965 		    pg->mdpage.krw_mappings == 0) {
   2966 			/* This case is okay. */
   2967 		} else if (pg->mdpage.rw_mappings == 0 &&
   2968 			   pg->mdpage.krw_mappings == 0 &&
   2969 			   (prot & VM_PROT_WRITE) == 0) {
   2970 			/* This case is okay. */
   2971 		} else {
   2972 			/* Something is awry. */
   2973 			printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
   2974 			    "prot 0x%x\n", pg->mdpage.ro_mappings,
   2975 			    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
   2976 			    pg->mdpage.krw_mappings, prot);
   2977 			Debugger();
   2978 		}
   2979 		if (prot & VM_PROT_WRITE)
   2980 			pg->mdpage.krw_mappings++;
   2981 		else
   2982 			pg->mdpage.kro_mappings++;
   2983 		splx(s);
   2984 	}
   2985     }
   2986 #endif /* PMAP_ALIAS_DEBUG */
   2987 
   2988 	*pte = L2_S_PROTO | pa |
   2989 	    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   2990 }
   2991 
   2992 void
   2993 pmap_kremove(vaddr_t va, vsize_t len)
   2994 {
   2995 	pt_entry_t *pte;
   2996 
   2997 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2998 
   2999 		/*
   3000 		 * We assume that we will only be called with small
   3001 		 * regions of memory.
   3002 		 */
   3003 
   3004 		KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
   3005 		pte = vtopte(va);
   3006 #ifdef PMAP_ALIAS_DEBUG
   3007     {
   3008 		struct vm_page *pg;
   3009 		int s;
   3010 
   3011 		if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
   3012 		    (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
   3013 			s = splhigh();
   3014 			if (*pte & L2_S_PROT_W) {
   3015 				KASSERT(pg->mdpage.krw_mappings != 0);
   3016 				pg->mdpage.krw_mappings--;
   3017 			} else {
   3018 				KASSERT(pg->mdpage.kro_mappings != 0);
   3019 				pg->mdpage.kro_mappings--;
   3020 			}
   3021 			splx(s);
   3022 		}
   3023     }
   3024 #endif /* PMAP_ALIAS_DEBUG */
   3025 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
   3026 		*pte = 0;
   3027 		cpu_tlb_flushID_SE(va);
   3028 	}
   3029 }
   3030 
   3031 /*
   3032  * pmap_page_protect:
   3033  *
   3034  * Lower the permission for all mappings to a given page.
   3035  */
   3036 
   3037 void
   3038 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   3039 {
   3040 
   3041 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
   3042 	    VM_PAGE_TO_PHYS(pg), prot));
   3043 
   3044 	switch(prot) {
   3045 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
   3046 	case VM_PROT_READ|VM_PROT_WRITE:
   3047 		return;
   3048 
   3049 	case VM_PROT_READ:
   3050 	case VM_PROT_READ|VM_PROT_EXECUTE:
   3051 		pmap_clearbit(pg, PVF_WRITE);
   3052 		break;
   3053 
   3054 	default:
   3055 		pmap_remove_all(pg);
   3056 		break;
   3057 	}
   3058 }
   3059 
   3060 
   3061 /*
   3062  * Routine:	pmap_unwire
   3063  * Function:	Clear the wired attribute for a map/virtual-address
   3064  *		pair.
   3065  * In/out conditions:
   3066  *		The mapping must already exist in the pmap.
   3067  */
   3068 
   3069 void
   3070 pmap_unwire(struct pmap *pmap, vaddr_t va)
   3071 {
   3072 	pt_entry_t *ptes;
   3073 	struct vm_page *pg;
   3074 	paddr_t pa;
   3075 
   3076 	PMAP_MAP_TO_HEAD_LOCK();
   3077 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3078 
   3079 	if (pmap_pde_v(pmap_pde(pmap, va))) {
   3080 #ifdef DIAGNOSTIC
   3081 		if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3082 			panic("pmap_unwire: invalid L2 PTE");
   3083 #endif
   3084 		/* Extract the physical address of the page */
   3085 		pa = l2pte_pa(ptes[arm_btop(va)]);
   3086 
   3087 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3088 			goto out;
   3089 
   3090 		/* Update the wired bit in the pv entry for this page. */
   3091 		simple_lock(&pg->mdpage.pvh_slock);
   3092 		(void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
   3093 		simple_unlock(&pg->mdpage.pvh_slock);
   3094 	}
   3095 #ifdef DIAGNOSTIC
   3096 	else {
   3097 		panic("pmap_unwire: invalid L1 PTE");
   3098 	}
   3099 #endif
   3100  out:
   3101 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3102 	PMAP_MAP_TO_HEAD_UNLOCK();
   3103 }
   3104 
   3105 /*
   3106  * Routine:  pmap_extract
   3107  * Function:
   3108  *           Extract the physical page address associated
   3109  *           with the given map/virtual_address pair.
   3110  */
   3111 boolean_t
   3112 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
   3113 {
   3114 	pd_entry_t *pde;
   3115 	pt_entry_t *pte, *ptes;
   3116 	paddr_t pa;
   3117 
   3118 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
   3119 
   3120 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3121 
   3122 	pde = pmap_pde(pmap, va);
   3123 	pte = &ptes[arm_btop(va)];
   3124 
   3125 	if (pmap_pde_section(pde)) {
   3126 		pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
   3127 		PDEBUG(5, printf("section pa=0x%08lx\n", pa));
   3128 		goto out;
   3129 	} else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
   3130 		PDEBUG(5, printf("no mapping\n"));
   3131 		goto failed;
   3132 	}
   3133 
   3134 	if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
   3135 		pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
   3136 		PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
   3137 		goto out;
   3138 	}
   3139 
   3140 	pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
   3141 	PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
   3142 
   3143  out:
   3144 	if (pap != NULL)
   3145 		*pap = pa;
   3146 
   3147 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3148 	return (TRUE);
   3149 
   3150  failed:
   3151 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3152 	return (FALSE);
   3153 }
   3154 
   3155 
   3156 /*
   3157  * pmap_copy:
   3158  *
   3159  *	Copy the range specified by src_addr/len from the source map to the
   3160  *	range dst_addr/len in the destination map.
   3161  *
   3162  *	This routine is only advisory and need not do anything.
   3163  */
   3164 /* Call deleted in <arm/arm32/pmap.h> */
   3165 
   3166 #if defined(PMAP_DEBUG)
   3167 void
   3168 pmap_dump_pvlist(phys, m)
   3169 	vaddr_t phys;
   3170 	char *m;
   3171 {
   3172 	struct vm_page *pg;
   3173 	struct pv_entry *pv;
   3174 
   3175 	if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
   3176 		printf("INVALID PA\n");
   3177 		return;
   3178 	}
   3179 	simple_lock(&pg->mdpage.pvh_slock);
   3180 	printf("%s %08lx:", m, phys);
   3181 	if (pg->mdpage.pvh_list == NULL) {
   3182 		simple_unlock(&pg->mdpage.pvh_slock);
   3183 		printf(" no mappings\n");
   3184 		return;
   3185 	}
   3186 
   3187 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
   3188 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   3189 		    pv->pv_va, pv->pv_flags);
   3190 
   3191 	printf("\n");
   3192 	simple_unlock(&pg->mdpage.pvh_slock);
   3193 }
   3194 
   3195 #endif	/* PMAP_DEBUG */
   3196 
   3197 static pt_entry_t *
   3198 pmap_map_ptes(struct pmap *pmap)
   3199 {
   3200 	struct proc *p;
   3201 
   3202     	/* the kernel's pmap is always accessible */
   3203 	if (pmap == pmap_kernel()) {
   3204 		return (pt_entry_t *)PTE_BASE;
   3205 	}
   3206 
   3207 	if (pmap_is_curpmap(pmap)) {
   3208 		simple_lock(&pmap->pm_obj.vmobjlock);
   3209 		return (pt_entry_t *)PTE_BASE;
   3210 	}
   3211 
   3212 	p = curproc;
   3213 	KDASSERT(p != NULL);
   3214 
   3215 	/* need to lock both curpmap and pmap: use ordered locking */
   3216 	if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
   3217 		simple_lock(&pmap->pm_obj.vmobjlock);
   3218 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3219 	} else {
   3220 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3221 		simple_lock(&pmap->pm_obj.vmobjlock);
   3222 	}
   3223 
   3224 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
   3225 	    FALSE);
   3226 	cpu_tlb_flushD();
   3227 	cpu_cpwait();
   3228 	return (pt_entry_t *)APTE_BASE;
   3229 }
   3230 
   3231 /*
   3232  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
   3233  */
   3234 
   3235 static void
   3236 pmap_unmap_ptes(struct pmap *pmap)
   3237 {
   3238 
   3239 	if (pmap == pmap_kernel()) {
   3240 		return;
   3241 	}
   3242 	if (pmap_is_curpmap(pmap)) {
   3243 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3244 	} else {
   3245 		KDASSERT(curproc != NULL);
   3246 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3247 		simple_unlock(
   3248 		    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3249 	}
   3250 }
   3251 
   3252 /*
   3253  * Modify pte bits for all ptes corresponding to the given physical address.
   3254  * We use `maskbits' rather than `clearbits' because we're always passing
   3255  * constants and the latter would require an extra inversion at run-time.
   3256  */
   3257 
   3258 static void
   3259 pmap_clearbit(struct vm_page *pg, u_int maskbits)
   3260 {
   3261 	struct pv_entry *pv;
   3262 	pt_entry_t *ptes;
   3263 	vaddr_t va;
   3264 	int tlbentry;
   3265 
   3266 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   3267 	    VM_PAGE_TO_PHYS(pg), maskbits));
   3268 
   3269 	tlbentry = 0;
   3270 
   3271 	PMAP_HEAD_TO_MAP_LOCK();
   3272 	simple_lock(&pg->mdpage.pvh_slock);
   3273 
   3274 	/*
   3275 	 * Clear saved attributes (modify, reference)
   3276 	 */
   3277 	pg->mdpage.pvh_attrs &= ~maskbits;
   3278 
   3279 	if (pg->mdpage.pvh_list == NULL) {
   3280 		simple_unlock(&pg->mdpage.pvh_slock);
   3281 		PMAP_HEAD_TO_MAP_UNLOCK();
   3282 		return;
   3283 	}
   3284 
   3285 	/*
   3286 	 * Loop over all current mappings setting/clearing as appropos
   3287 	 */
   3288 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   3289 #ifdef PMAP_ALIAS_DEBUG
   3290     {
   3291 		int s = splhigh();
   3292 		if ((maskbits & PVF_WRITE) != 0 &&
   3293 		    (pv->pv_flags & PVF_WRITE) != 0) {
   3294 			KASSERT(pg->mdpage.rw_mappings != 0);
   3295 			pg->mdpage.rw_mappings--;
   3296 			pg->mdpage.ro_mappings++;
   3297 		}
   3298 		splx(s);
   3299     }
   3300 #endif /* PMAP_ALIAS_DEBUG */
   3301 		va = pv->pv_va;
   3302 		pv->pv_flags &= ~maskbits;
   3303 		ptes = pmap_map_ptes(pv->pv_pmap);	/* locks pmap */
   3304 		KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
   3305 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
   3306 			if ((pv->pv_flags & PVF_NC)) {
   3307 				/*
   3308 				 * Entry is not cacheable: reenable
   3309 				 * the cache, nothing to flush
   3310 				 *
   3311 				 * Don't turn caching on again if this
   3312 				 * is a modified emulation.  This
   3313 				 * would be inconsitent with the
   3314 				 * settings created by
   3315 				 * pmap_vac_me_harder().
   3316 				 *
   3317 				 * There's no need to call
   3318 				 * pmap_vac_me_harder() here: all
   3319 				 * pages are loosing their write
   3320 				 * permission.
   3321 				 *
   3322 				 */
   3323 				if (maskbits & PVF_WRITE) {
   3324 					ptes[arm_btop(va)] |=
   3325 					    pte_l2_s_cache_mode;
   3326 					pv->pv_flags &= ~PVF_NC;
   3327 				}
   3328 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
   3329 				/*
   3330 				 * Entry is cacheable: check if pmap is
   3331 				 * current if it is flush it,
   3332 				 * otherwise it won't be in the cache
   3333 				 */
   3334 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
   3335 			}
   3336 
   3337 			/* make the pte read only */
   3338 			ptes[arm_btop(va)] &= ~L2_S_PROT_W;
   3339 		}
   3340 
   3341 		if (maskbits & PVF_REF)
   3342 			ptes[arm_btop(va)] =
   3343 			    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV;
   3344 
   3345 		if (pmap_is_curpmap(pv->pv_pmap)) {
   3346 			/*
   3347 			 * if we had cacheable pte's we'd clean the
   3348 			 * pte out to memory here
   3349 			 *
   3350 			 * flush tlb entry as it's in the current pmap
   3351 			 */
   3352 			cpu_tlb_flushID_SE(pv->pv_va);
   3353 		}
   3354 		pmap_unmap_ptes(pv->pv_pmap);		/* unlocks pmap */
   3355 	}
   3356 	cpu_cpwait();
   3357 
   3358 	simple_unlock(&pg->mdpage.pvh_slock);
   3359 	PMAP_HEAD_TO_MAP_UNLOCK();
   3360 }
   3361 
   3362 /*
   3363  * pmap_clear_modify:
   3364  *
   3365  *	Clear the "modified" attribute for a page.
   3366  */
   3367 boolean_t
   3368 pmap_clear_modify(struct vm_page *pg)
   3369 {
   3370 	boolean_t rv;
   3371 
   3372 	if (pg->mdpage.pvh_attrs & PVF_MOD) {
   3373 		rv = TRUE;
   3374 		pmap_clearbit(pg, PVF_MOD);
   3375 	} else
   3376 		rv = FALSE;
   3377 
   3378 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
   3379 	    VM_PAGE_TO_PHYS(pg), rv));
   3380 
   3381 	return (rv);
   3382 }
   3383 
   3384 /*
   3385  * pmap_clear_reference:
   3386  *
   3387  *	Clear the "referenced" attribute for a page.
   3388  */
   3389 boolean_t
   3390 pmap_clear_reference(struct vm_page *pg)
   3391 {
   3392 	boolean_t rv;
   3393 
   3394 	if (pg->mdpage.pvh_attrs & PVF_REF) {
   3395 		rv = TRUE;
   3396 		pmap_clearbit(pg, PVF_REF);
   3397 	} else
   3398 		rv = FALSE;
   3399 
   3400 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
   3401 	    VM_PAGE_TO_PHYS(pg), rv));
   3402 
   3403 	return (rv);
   3404 }
   3405 
   3406 /*
   3407  * pmap_is_modified:
   3408  *
   3409  *	Test if a page has the "modified" attribute.
   3410  */
   3411 /* See <arm/arm32/pmap.h> */
   3412 
   3413 /*
   3414  * pmap_is_referenced:
   3415  *
   3416  *	Test if a page has the "referenced" attribute.
   3417  */
   3418 /* See <arm/arm32/pmap.h> */
   3419 
   3420 int
   3421 pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
   3422 {
   3423 	pt_entry_t *ptes;
   3424 	struct vm_page *pg;
   3425 	paddr_t pa;
   3426 	u_int flags;
   3427 	int rv = 0;
   3428 
   3429 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   3430 
   3431 	PMAP_MAP_TO_HEAD_LOCK();
   3432 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3433 
   3434 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3435 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3436 		goto out;
   3437 	}
   3438 
   3439 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3440 
   3441 	/* Check for a invalid pte */
   3442 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3443 		goto out;
   3444 
   3445 	/* This can happen if user code tries to access kernel memory. */
   3446 	if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
   3447 		goto out;
   3448 
   3449 	/* Extract the physical address of the page */
   3450 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3451 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3452 		goto out;
   3453 
   3454 	/* Get the current flags for this page. */
   3455 	simple_lock(&pg->mdpage.pvh_slock);
   3456 
   3457 	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
   3458 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   3459 
   3460 	/*
   3461 	 * Do the flags say this page is writable ? If not then it is a
   3462 	 * genuine write fault. If yes then the write fault is our fault
   3463 	 * as we did not reflect the write access in the PTE. Now we know
   3464 	 * a write has occurred we can correct this and also set the
   3465 	 * modified bit
   3466 	 */
   3467 	if (~flags & PVF_WRITE) {
   3468 	    	simple_unlock(&pg->mdpage.pvh_slock);
   3469 		goto out;
   3470 	}
   3471 
   3472 	PDEBUG(0,
   3473 	    printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
   3474 	    va, ptes[arm_btop(va)]));
   3475 	pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   3476 
   3477 	/*
   3478 	 * Re-enable write permissions for the page.  No need to call
   3479 	 * pmap_vac_me_harder(), since this is just a
   3480 	 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
   3481 	 * We've already set the cacheable bits based on the assumption
   3482 	 * that we can write to this page.
   3483 	 */
   3484 	ptes[arm_btop(va)] =
   3485 	    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
   3486 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3487 
   3488 	simple_unlock(&pg->mdpage.pvh_slock);
   3489 
   3490 	cpu_tlb_flushID_SE(va);
   3491 	cpu_cpwait();
   3492 	rv = 1;
   3493  out:
   3494 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3495 	PMAP_MAP_TO_HEAD_UNLOCK();
   3496 	return (rv);
   3497 }
   3498 
   3499 int
   3500 pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
   3501 {
   3502 	pt_entry_t *ptes;
   3503 	struct vm_page *pg;
   3504 	paddr_t pa;
   3505 	int rv = 0;
   3506 
   3507 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   3508 
   3509 	PMAP_MAP_TO_HEAD_LOCK();
   3510 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3511 
   3512 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3513 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3514 		goto out;
   3515 	}
   3516 
   3517 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3518 
   3519 	/* Check for invalid pte */
   3520 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3521 		goto out;
   3522 
   3523 	/* This can happen if user code tries to access kernel memory. */
   3524 	if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
   3525 		goto out;
   3526 
   3527 	/* Extract the physical address of the page */
   3528 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3529 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3530 		goto out;
   3531 
   3532 	simple_lock(&pg->mdpage.pvh_slock);
   3533 
   3534 	/*
   3535 	 * Ok we just enable the pte and mark the attibs as handled
   3536 	 * XXX Should we traverse the PV list and enable all PTEs?
   3537 	 */
   3538 	PDEBUG(0,
   3539 	    printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
   3540 	    va, ptes[arm_btop(va)]));
   3541 	pg->mdpage.pvh_attrs |= PVF_REF;
   3542 
   3543 	ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
   3544 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3545 
   3546 	simple_unlock(&pg->mdpage.pvh_slock);
   3547 
   3548 	cpu_tlb_flushID_SE(va);
   3549 	cpu_cpwait();
   3550 	rv = 1;
   3551  out:
   3552 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3553 	PMAP_MAP_TO_HEAD_UNLOCK();
   3554 	return (rv);
   3555 }
   3556 
   3557 /*
   3558  * pmap_collect: free resources held by a pmap
   3559  *
   3560  * => optional function.
   3561  * => called when a process is swapped out to free memory.
   3562  */
   3563 
   3564 void
   3565 pmap_collect(struct pmap *pmap)
   3566 {
   3567 }
   3568 
   3569 /*
   3570  * Routine:	pmap_procwr
   3571  *
   3572  * Function:
   3573  *	Synchronize caches corresponding to [addr, addr+len) in p.
   3574  *
   3575  */
   3576 void
   3577 pmap_procwr(struct proc *p, vaddr_t va, int len)
   3578 {
   3579 	/* We only need to do anything if it is the current process. */
   3580 	if (p == curproc)
   3581 		cpu_icache_sync_range(va, len);
   3582 }
   3583 /*
   3584  * PTP functions
   3585  */
   3586 
   3587 /*
   3588  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
   3589  *
   3590  * => pmap should NOT be pmap_kernel()
   3591  * => pmap should be locked
   3592  */
   3593 
   3594 static struct vm_page *
   3595 pmap_get_ptp(struct pmap *pmap, vaddr_t va)
   3596 {
   3597 	struct vm_page *ptp;
   3598 
   3599 	if (pmap_pde_page(pmap_pde(pmap, va))) {
   3600 
   3601 		/* valid... check hint (saves us a PA->PG lookup) */
   3602 		if (pmap->pm_ptphint &&
   3603 		    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
   3604 		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
   3605 			return (pmap->pm_ptphint);
   3606 		ptp = uvm_pagelookup(&pmap->pm_obj, va);
   3607 #ifdef DIAGNOSTIC
   3608 		if (ptp == NULL)
   3609 			panic("pmap_get_ptp: unmanaged user PTP");
   3610 #endif
   3611 		pmap->pm_ptphint = ptp;
   3612 		return(ptp);
   3613 	}
   3614 
   3615 	/* allocate a new PTP (updates ptphint) */
   3616 	return(pmap_alloc_ptp(pmap, va));
   3617 }
   3618 
   3619 /*
   3620  * pmap_alloc_ptp: allocate a PTP for a PMAP
   3621  *
   3622  * => pmap should already be locked by caller
   3623  * => we use the ptp's wire_count to count the number of active mappings
   3624  *	in the PTP (we start it at one to prevent any chance this PTP
   3625  *	will ever leak onto the active/inactive queues)
   3626  */
   3627 
   3628 /*__inline */ static struct vm_page *
   3629 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
   3630 {
   3631 	struct vm_page *ptp;
   3632 
   3633 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
   3634 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
   3635 	if (ptp == NULL)
   3636 		return (NULL);
   3637 
   3638 	/* got one! */
   3639 	ptp->flags &= ~PG_BUSY;	/* never busy */
   3640 	ptp->wire_count = 1;	/* no mappings yet */
   3641 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
   3642 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
   3643 	pmap->pm_ptphint = ptp;
   3644 	return (ptp);
   3645 }
   3646 
   3647 vaddr_t
   3648 pmap_growkernel(vaddr_t maxkvaddr)
   3649 {
   3650 	struct pmap *kpm = pmap_kernel(), *pm;
   3651 	int s;
   3652 	paddr_t ptaddr;
   3653 	struct vm_page *ptp;
   3654 
   3655 	if (maxkvaddr <= pmap_curmaxkvaddr)
   3656 		goto out;		/* we are OK */
   3657 	NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
   3658 		    pmap_curmaxkvaddr, maxkvaddr));
   3659 
   3660 	/*
   3661 	 * whoops!   we need to add kernel PTPs
   3662 	 */
   3663 
   3664 	s = splhigh();	/* to be safe */
   3665 	simple_lock(&kpm->pm_obj.vmobjlock);
   3666 	/* due to the way the arm pmap works we map 4MB at a time */
   3667 	for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
   3668 	     pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
   3669 
   3670 		if (uvm.page_init_done == FALSE) {
   3671 
   3672 			/*
   3673 			 * we're growing the kernel pmap early (from
   3674 			 * uvm_pageboot_alloc()).  this case must be
   3675 			 * handled a little differently.
   3676 			 */
   3677 
   3678 			if (uvm_page_physget(&ptaddr) == FALSE)
   3679 				panic("pmap_growkernel: out of memory");
   3680 			pmap_zero_page(ptaddr);
   3681 
   3682 			/* map this page in */
   3683 			pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
   3684 
   3685 			/* count PTP as resident */
   3686 			kpm->pm_stats.resident_count++;
   3687 			continue;
   3688 		}
   3689 
   3690 		/*
   3691 		 * THIS *MUST* BE CODED SO AS TO WORK IN THE
   3692 		 * pmap_initialized == FALSE CASE!  WE MAY BE
   3693 		 * INVOKED WHILE pmap_init() IS RUNNING!
   3694 		 */
   3695 
   3696 		if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
   3697 			panic("pmap_growkernel: alloc ptp failed");
   3698 
   3699 		/* distribute new kernel PTP to all active pmaps */
   3700 		simple_lock(&pmaps_lock);
   3701 		LIST_FOREACH(pm, &pmaps, pm_list) {
   3702 			pmap_map_in_l1(pm, pmap_curmaxkvaddr,
   3703 			    VM_PAGE_TO_PHYS(ptp), TRUE);
   3704 		}
   3705 
   3706 		/* Invalidate the PTPT cache. */
   3707 		pool_cache_invalidate(&pmap_ptpt_cache);
   3708 		pmap_ptpt_cache_generation++;
   3709 
   3710 		simple_unlock(&pmaps_lock);
   3711 	}
   3712 
   3713 	/*
   3714 	 * flush out the cache, expensive but growkernel will happen so
   3715 	 * rarely
   3716 	 */
   3717 	cpu_tlb_flushD();
   3718 	cpu_cpwait();
   3719 
   3720 	simple_unlock(&kpm->pm_obj.vmobjlock);
   3721 	splx(s);
   3722 
   3723 out:
   3724 	return (pmap_curmaxkvaddr);
   3725 }
   3726 
   3727 /************************ Utility routines ****************************/
   3728 
   3729 /*
   3730  * vector_page_setprot:
   3731  *
   3732  *	Manipulate the protection of the vector page.
   3733  */
   3734 void
   3735 vector_page_setprot(int prot)
   3736 {
   3737 	pt_entry_t *pte;
   3738 
   3739 	pte = vtopte(vector_page);
   3740 
   3741 	*pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
   3742 	cpu_tlb_flushD_SE(vector_page);
   3743 	cpu_cpwait();
   3744 }
   3745 
   3746 /************************ Bootstrapping routines ****************************/
   3747 
   3748 /*
   3749  * This list exists for the benefit of pmap_map_chunk().  It keeps track
   3750  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
   3751  * find them as necessary.
   3752  *
   3753  * Note that the data on this list is not valid after initarm() returns.
   3754  */
   3755 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
   3756 
   3757 static vaddr_t
   3758 kernel_pt_lookup(paddr_t pa)
   3759 {
   3760 	pv_addr_t *pv;
   3761 
   3762 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
   3763 		if (pv->pv_pa == pa)
   3764 			return (pv->pv_va);
   3765 	}
   3766 	return (0);
   3767 }
   3768 
   3769 /*
   3770  * pmap_map_section:
   3771  *
   3772  *	Create a single section mapping.
   3773  */
   3774 void
   3775 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3776 {
   3777 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3778 	pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3779 
   3780 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
   3781 
   3782 	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3783 	    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3784 }
   3785 
   3786 /*
   3787  * pmap_map_entry:
   3788  *
   3789  *	Create a single page mapping.
   3790  */
   3791 void
   3792 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3793 {
   3794 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3795 	pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3796 	pt_entry_t *pte;
   3797 
   3798 	KASSERT(((va | pa) & PGOFSET) == 0);
   3799 
   3800 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3801 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
   3802 
   3803 	pte = (pt_entry_t *)
   3804 	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3805 	if (pte == NULL)
   3806 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
   3807 
   3808 	pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3809 	    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3810 }
   3811 
   3812 /*
   3813  * pmap_link_l2pt:
   3814  *
   3815  *	Link the L2 page table specified by "pa" into the L1
   3816  *	page table at the slot for "va".
   3817  */
   3818 void
   3819 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
   3820 {
   3821 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3822 	u_int slot = va >> L1_S_SHIFT;
   3823 
   3824 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
   3825 
   3826 	pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
   3827 	pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
   3828 	pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
   3829 	pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
   3830 
   3831 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
   3832 }
   3833 
   3834 /*
   3835  * pmap_map_chunk:
   3836  *
   3837  *	Map a chunk of memory using the most efficient mappings
   3838  *	possible (section, large page, small page) into the
   3839  *	provided L1 and L2 tables at the specified virtual address.
   3840  */
   3841 vsize_t
   3842 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
   3843     int prot, int cache)
   3844 {
   3845 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3846 	pt_entry_t *pte, fl;
   3847 	vsize_t resid;
   3848 	int i;
   3849 
   3850 	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
   3851 
   3852 	if (l1pt == 0)
   3853 		panic("pmap_map_chunk: no L1 table provided");
   3854 
   3855 #ifdef VERBOSE_INIT_ARM
   3856 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
   3857 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
   3858 #endif
   3859 
   3860 	size = resid;
   3861 
   3862 	while (resid > 0) {
   3863 		/* See if we can use a section mapping. */
   3864 		if (((pa | va) & L1_S_OFFSET) == 0 &&
   3865 		    resid >= L1_S_SIZE) {
   3866 			fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3867 #ifdef VERBOSE_INIT_ARM
   3868 			printf("S");
   3869 #endif
   3870 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3871 			    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3872 			va += L1_S_SIZE;
   3873 			pa += L1_S_SIZE;
   3874 			resid -= L1_S_SIZE;
   3875 			continue;
   3876 		}
   3877 
   3878 		/*
   3879 		 * Ok, we're going to use an L2 table.  Make sure
   3880 		 * one is actually in the corresponding L1 slot
   3881 		 * for the current VA.
   3882 		 */
   3883 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3884 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
   3885 
   3886 		pte = (pt_entry_t *)
   3887 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3888 		if (pte == NULL)
   3889 			panic("pmap_map_chunk: can't find L2 table for VA"
   3890 			    "0x%08lx", va);
   3891 
   3892 		/* See if we can use a L2 large page mapping. */
   3893 		if (((pa | va) & L2_L_OFFSET) == 0 &&
   3894 		    resid >= L2_L_SIZE) {
   3895 			fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
   3896 #ifdef VERBOSE_INIT_ARM
   3897 			printf("L");
   3898 #endif
   3899 			for (i = 0; i < 16; i++) {
   3900 				pte[((va >> PGSHIFT) & 0x3f0) + i] =
   3901 				    L2_L_PROTO | pa |
   3902 				    L2_L_PROT(PTE_KERNEL, prot) | fl;
   3903 			}
   3904 			va += L2_L_SIZE;
   3905 			pa += L2_L_SIZE;
   3906 			resid -= L2_L_SIZE;
   3907 			continue;
   3908 		}
   3909 
   3910 		/* Use a small page mapping. */
   3911 		fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3912 #ifdef VERBOSE_INIT_ARM
   3913 		printf("P");
   3914 #endif
   3915 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3916 		    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3917 		va += NBPG;
   3918 		pa += NBPG;
   3919 		resid -= NBPG;
   3920 	}
   3921 #ifdef VERBOSE_INIT_ARM
   3922 	printf("\n");
   3923 #endif
   3924 	return (size);
   3925 }
   3926 
   3927 /********************** PTE initialization routines **************************/
   3928 
   3929 /*
   3930  * These routines are called when the CPU type is identified to set up
   3931  * the PTE prototypes, cache modes, etc.
   3932  *
   3933  * The variables are always here, just in case LKMs need to reference
   3934  * them (though, they shouldn't).
   3935  */
   3936 
   3937 pt_entry_t	pte_l1_s_cache_mode;
   3938 pt_entry_t	pte_l1_s_cache_mask;
   3939 
   3940 pt_entry_t	pte_l2_l_cache_mode;
   3941 pt_entry_t	pte_l2_l_cache_mask;
   3942 
   3943 pt_entry_t	pte_l2_s_cache_mode;
   3944 pt_entry_t	pte_l2_s_cache_mask;
   3945 
   3946 pt_entry_t	pte_l2_s_prot_u;
   3947 pt_entry_t	pte_l2_s_prot_w;
   3948 pt_entry_t	pte_l2_s_prot_mask;
   3949 
   3950 pt_entry_t	pte_l1_s_proto;
   3951 pt_entry_t	pte_l1_c_proto;
   3952 pt_entry_t	pte_l2_s_proto;
   3953 
   3954 void		(*pmap_copy_page_func)(paddr_t, paddr_t);
   3955 void		(*pmap_zero_page_func)(paddr_t);
   3956 
   3957 #if ARM_MMU_GENERIC == 1
   3958 void
   3959 pmap_pte_init_generic(void)
   3960 {
   3961 
   3962 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3963 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
   3964 
   3965 	pte_l2_l_cache_mode = L2_B|L2_C;
   3966 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
   3967 
   3968 	pte_l2_s_cache_mode = L2_B|L2_C;
   3969 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
   3970 
   3971 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
   3972 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
   3973 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
   3974 
   3975 	pte_l1_s_proto = L1_S_PROTO_generic;
   3976 	pte_l1_c_proto = L1_C_PROTO_generic;
   3977 	pte_l2_s_proto = L2_S_PROTO_generic;
   3978 
   3979 	pmap_copy_page_func = pmap_copy_page_generic;
   3980 	pmap_zero_page_func = pmap_zero_page_generic;
   3981 }
   3982 
   3983 #if defined(CPU_ARM9)
   3984 void
   3985 pmap_pte_init_arm9(void)
   3986 {
   3987 
   3988 	/*
   3989 	 * ARM9 is compatible with generic, but we want to use
   3990 	 * write-through caching for now.
   3991 	 */
   3992 	pmap_pte_init_generic();
   3993 
   3994 	pte_l1_s_cache_mode = L1_S_C;
   3995 	pte_l2_l_cache_mode = L2_C;
   3996 	pte_l2_s_cache_mode = L2_C;
   3997 }
   3998 #endif /* CPU_ARM9 */
   3999 #endif /* ARM_MMU_GENERIC == 1 */
   4000 
   4001 #if ARM_MMU_XSCALE == 1
   4002 void
   4003 pmap_pte_init_xscale(void)
   4004 {
   4005 	uint32_t auxctl;
   4006 
   4007 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   4008 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
   4009 
   4010 	pte_l2_l_cache_mode = L2_B|L2_C;
   4011 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
   4012 
   4013 	pte_l2_s_cache_mode = L2_B|L2_C;
   4014 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
   4015 
   4016 #ifdef XSCALE_CACHE_WRITE_THROUGH
   4017 	/*
   4018 	 * Some versions of the XScale core have various bugs in
   4019 	 * their cache units, the work-around for which is to run
   4020 	 * the cache in write-through mode.  Unfortunately, this
   4021 	 * has a major (negative) impact on performance.  So, we
   4022 	 * go ahead and run fast-and-loose, in the hopes that we
   4023 	 * don't line up the planets in a way that will trip the
   4024 	 * bugs.
   4025 	 *
   4026 	 * However, we give you the option to be slow-but-correct.
   4027 	 */
   4028 	pte_l1_s_cache_mode = L1_S_C;
   4029 	pte_l2_l_cache_mode = L2_C;
   4030 	pte_l2_s_cache_mode = L2_C;
   4031 #endif /* XSCALE_CACHE_WRITE_THROUGH */
   4032 
   4033 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
   4034 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
   4035 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
   4036 
   4037 	pte_l1_s_proto = L1_S_PROTO_xscale;
   4038 	pte_l1_c_proto = L1_C_PROTO_xscale;
   4039 	pte_l2_s_proto = L2_S_PROTO_xscale;
   4040 
   4041 	pmap_copy_page_func = pmap_copy_page_xscale;
   4042 	pmap_zero_page_func = pmap_zero_page_xscale;
   4043 
   4044 	/*
   4045 	 * Disable ECC protection of page table access, for now.
   4046 	 */
   4047 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   4048 		: "=r" (auxctl));
   4049 	auxctl &= ~XSCALE_AUXCTL_P;
   4050 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   4051 		:
   4052 		: "r" (auxctl));
   4053 }
   4054 
   4055 /*
   4056  * xscale_setup_minidata:
   4057  *
   4058  *	Set up the mini-data cache clean area.  We require the
   4059  *	caller to allocate the right amount of physically and
   4060  *	virtually contiguous space.
   4061  */
   4062 void
   4063 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
   4064 {
   4065 	extern vaddr_t xscale_minidata_clean_addr;
   4066 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
   4067 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   4068 	pt_entry_t *pte;
   4069 	vsize_t size;
   4070 	uint32_t auxctl;
   4071 
   4072 	xscale_minidata_clean_addr = va;
   4073 
   4074 	/* Round it to page size. */
   4075 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
   4076 
   4077 	for (; size != 0;
   4078 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
   4079 		pte = (pt_entry_t *)
   4080 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   4081 		if (pte == NULL)
   4082 			panic("xscale_setup_minidata: can't find L2 table for "
   4083 			    "VA 0x%08lx", va);
   4084 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   4085 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   4086 		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
   4087 	}
   4088 
   4089 	/*
   4090 	 * Configure the mini-data cache for write-back with
   4091 	 * read/write-allocate.
   4092 	 *
   4093 	 * NOTE: In order to reconfigure the mini-data cache, we must
   4094 	 * make sure it contains no valid data!  In order to do that,
   4095 	 * we must issue a global data cache invalidate command!
   4096 	 *
   4097 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
   4098 	 * THIS IS VERY IMPORTANT!
   4099 	 */
   4100 
   4101 	/* Invalidate data and mini-data. */
   4102 	__asm __volatile("mcr p15, 0, %0, c7, c6, 0"
   4103 		:
   4104 		: "r" (auxctl));
   4105 
   4106 
   4107 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   4108 		: "=r" (auxctl));
   4109 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
   4110 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   4111 		:
   4112 		: "r" (auxctl));
   4113 }
   4114 #endif /* ARM_MMU_XSCALE == 1 */
   4115