Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.112
      1  1.112   thorpej /*	$NetBSD: pmap.c,v 1.112 2002/08/22 01:13:55 thorpej Exp $	*/
      2   1.12     chris 
      3   1.12     chris /*
      4   1.49   thorpej  * Copyright (c) 2002 Wasabi Systems, Inc.
      5   1.12     chris  * Copyright (c) 2001 Richard Earnshaw
      6   1.12     chris  * Copyright (c) 2001 Christopher Gilbert
      7   1.12     chris  * All rights reserved.
      8   1.12     chris  *
      9   1.12     chris  * 1. Redistributions of source code must retain the above copyright
     10   1.12     chris  *    notice, this list of conditions and the following disclaimer.
     11   1.12     chris  * 2. Redistributions in binary form must reproduce the above copyright
     12   1.12     chris  *    notice, this list of conditions and the following disclaimer in the
     13   1.12     chris  *    documentation and/or other materials provided with the distribution.
     14   1.12     chris  * 3. The name of the company nor the name of the author may be used to
     15   1.12     chris  *    endorse or promote products derived from this software without specific
     16   1.12     chris  *    prior written permission.
     17   1.12     chris  *
     18   1.12     chris  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     19   1.12     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     20   1.12     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21   1.12     chris  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     22   1.12     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23   1.12     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24   1.12     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25   1.12     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26   1.12     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27   1.12     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28   1.12     chris  * SUCH DAMAGE.
     29   1.12     chris  */
     30    1.1      matt 
     31    1.1      matt /*-
     32    1.1      matt  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     33    1.1      matt  * All rights reserved.
     34    1.1      matt  *
     35    1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
     36    1.1      matt  * by Charles M. Hannum.
     37    1.1      matt  *
     38    1.1      matt  * Redistribution and use in source and binary forms, with or without
     39    1.1      matt  * modification, are permitted provided that the following conditions
     40    1.1      matt  * are met:
     41    1.1      matt  * 1. Redistributions of source code must retain the above copyright
     42    1.1      matt  *    notice, this list of conditions and the following disclaimer.
     43    1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     44    1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     45    1.1      matt  *    documentation and/or other materials provided with the distribution.
     46    1.1      matt  * 3. All advertising materials mentioning features or use of this software
     47    1.1      matt  *    must display the following acknowledgement:
     48    1.1      matt  *        This product includes software developed by the NetBSD
     49    1.1      matt  *        Foundation, Inc. and its contributors.
     50    1.1      matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     51    1.1      matt  *    contributors may be used to endorse or promote products derived
     52    1.1      matt  *    from this software without specific prior written permission.
     53    1.1      matt  *
     54    1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     55    1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     56    1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     57    1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     58    1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     59    1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     60    1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     61    1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     62    1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     63    1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     64    1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     65    1.1      matt  */
     66    1.1      matt 
     67    1.1      matt /*
     68    1.1      matt  * Copyright (c) 1994-1998 Mark Brinicombe.
     69    1.1      matt  * Copyright (c) 1994 Brini.
     70    1.1      matt  * All rights reserved.
     71    1.1      matt  *
     72    1.1      matt  * This code is derived from software written for Brini by Mark Brinicombe
     73    1.1      matt  *
     74    1.1      matt  * Redistribution and use in source and binary forms, with or without
     75    1.1      matt  * modification, are permitted provided that the following conditions
     76    1.1      matt  * are met:
     77    1.1      matt  * 1. Redistributions of source code must retain the above copyright
     78    1.1      matt  *    notice, this list of conditions and the following disclaimer.
     79    1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     80    1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     81    1.1      matt  *    documentation and/or other materials provided with the distribution.
     82    1.1      matt  * 3. All advertising materials mentioning features or use of this software
     83    1.1      matt  *    must display the following acknowledgement:
     84    1.1      matt  *	This product includes software developed by Mark Brinicombe.
     85    1.1      matt  * 4. The name of the author may not be used to endorse or promote products
     86    1.1      matt  *    derived from this software without specific prior written permission.
     87    1.1      matt  *
     88    1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     89    1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     90    1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     91    1.1      matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     92    1.1      matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     93    1.1      matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     94    1.1      matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     95    1.1      matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     96    1.1      matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     97    1.1      matt  *
     98    1.1      matt  * RiscBSD kernel project
     99    1.1      matt  *
    100    1.1      matt  * pmap.c
    101    1.1      matt  *
    102    1.1      matt  * Machine dependant vm stuff
    103    1.1      matt  *
    104    1.1      matt  * Created      : 20/09/94
    105    1.1      matt  */
    106    1.1      matt 
    107    1.1      matt /*
    108    1.1      matt  * Performance improvements, UVM changes, overhauls and part-rewrites
    109    1.1      matt  * were contributed by Neil A. Carson <neil (at) causality.com>.
    110    1.1      matt  */
    111    1.1      matt 
    112    1.1      matt /*
    113    1.1      matt  * The dram block info is currently referenced from the bootconfig.
    114    1.1      matt  * This should be placed in a separate structure.
    115    1.1      matt  */
    116    1.1      matt 
    117    1.1      matt /*
    118    1.1      matt  * Special compilation symbols
    119    1.1      matt  * PMAP_DEBUG		- Build in pmap_debug_level code
    120    1.1      matt  */
    121    1.1      matt 
    122    1.1      matt /* Include header files */
    123    1.1      matt 
    124    1.1      matt #include "opt_pmap_debug.h"
    125    1.1      matt #include "opt_ddb.h"
    126    1.1      matt 
    127    1.1      matt #include <sys/types.h>
    128    1.1      matt #include <sys/param.h>
    129    1.1      matt #include <sys/kernel.h>
    130    1.1      matt #include <sys/systm.h>
    131    1.1      matt #include <sys/proc.h>
    132    1.1      matt #include <sys/malloc.h>
    133    1.1      matt #include <sys/user.h>
    134   1.10     chris #include <sys/pool.h>
    135   1.16     chris #include <sys/cdefs.h>
    136   1.16     chris 
    137    1.1      matt #include <uvm/uvm.h>
    138    1.1      matt 
    139    1.1      matt #include <machine/bootconfig.h>
    140    1.1      matt #include <machine/bus.h>
    141    1.1      matt #include <machine/pmap.h>
    142    1.1      matt #include <machine/pcb.h>
    143    1.1      matt #include <machine/param.h>
    144   1.32   thorpej #include <arm/arm32/katelib.h>
    145   1.16     chris 
    146  1.112   thorpej __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.112 2002/08/22 01:13:55 thorpej Exp $");
    147    1.1      matt #ifdef PMAP_DEBUG
    148    1.1      matt #define	PDEBUG(_lev_,_stat_) \
    149    1.1      matt 	if (pmap_debug_level >= (_lev_)) \
    150    1.1      matt         	((_stat_))
    151    1.1      matt int pmap_debug_level = -2;
    152   1.48     chris void pmap_dump_pvlist(vaddr_t phys, char *m);
    153   1.17     chris 
    154   1.17     chris /*
    155   1.17     chris  * for switching to potentially finer grained debugging
    156   1.17     chris  */
    157   1.17     chris #define	PDB_FOLLOW	0x0001
    158   1.17     chris #define	PDB_INIT	0x0002
    159   1.17     chris #define	PDB_ENTER	0x0004
    160   1.17     chris #define	PDB_REMOVE	0x0008
    161   1.17     chris #define	PDB_CREATE	0x0010
    162   1.17     chris #define	PDB_PTPAGE	0x0020
    163   1.48     chris #define	PDB_GROWKERN	0x0040
    164   1.17     chris #define	PDB_BITS	0x0080
    165   1.17     chris #define	PDB_COLLECT	0x0100
    166   1.17     chris #define	PDB_PROTECT	0x0200
    167   1.48     chris #define	PDB_MAP_L1	0x0400
    168   1.17     chris #define	PDB_BOOTSTRAP	0x1000
    169   1.17     chris #define	PDB_PARANOIA	0x2000
    170   1.17     chris #define	PDB_WIRING	0x4000
    171   1.17     chris #define	PDB_PVDUMP	0x8000
    172   1.17     chris 
    173   1.17     chris int debugmap = 0;
    174   1.17     chris int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
    175   1.17     chris #define	NPDEBUG(_lev_,_stat_) \
    176   1.17     chris 	if (pmapdebug & (_lev_)) \
    177   1.17     chris         	((_stat_))
    178   1.17     chris 
    179    1.1      matt #else	/* PMAP_DEBUG */
    180    1.1      matt #define	PDEBUG(_lev_,_stat_) /* Nothing */
    181   1.48     chris #define NPDEBUG(_lev_,_stat_) /* Nothing */
    182    1.1      matt #endif	/* PMAP_DEBUG */
    183    1.1      matt 
    184    1.1      matt struct pmap     kernel_pmap_store;
    185    1.1      matt 
    186   1.10     chris /*
    187   1.48     chris  * linked list of all non-kernel pmaps
    188   1.48     chris  */
    189   1.48     chris 
    190   1.69   thorpej static LIST_HEAD(, pmap) pmaps;
    191   1.48     chris 
    192   1.48     chris /*
    193   1.10     chris  * pool that pmap structures are allocated from
    194   1.10     chris  */
    195   1.10     chris 
    196   1.10     chris struct pool pmap_pmap_pool;
    197   1.10     chris 
    198  1.111   thorpej /*
    199  1.111   thorpej  * pool/cache that PT-PT's are allocated from
    200  1.111   thorpej  */
    201  1.111   thorpej 
    202  1.111   thorpej struct pool pmap_ptpt_pool;
    203  1.111   thorpej struct pool_cache pmap_ptpt_cache;
    204  1.111   thorpej u_int pmap_ptpt_cache_generation;
    205  1.111   thorpej 
    206  1.111   thorpej static void *pmap_ptpt_page_alloc(struct pool *, int);
    207  1.111   thorpej static void pmap_ptpt_page_free(struct pool *, void *);
    208  1.111   thorpej 
    209  1.111   thorpej struct pool_allocator pmap_ptpt_allocator = {
    210  1.111   thorpej 	pmap_ptpt_page_alloc, pmap_ptpt_page_free,
    211  1.111   thorpej };
    212  1.111   thorpej 
    213  1.111   thorpej static int pmap_ptpt_ctor(void *, void *, int);
    214  1.111   thorpej 
    215   1.54   thorpej static pt_entry_t *csrc_pte, *cdst_pte;
    216   1.54   thorpej static vaddr_t csrcp, cdstp;
    217   1.54   thorpej 
    218    1.1      matt char *memhook;
    219    1.1      matt extern caddr_t msgbufaddr;
    220    1.1      matt 
    221    1.1      matt boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    222   1.17     chris /*
    223   1.17     chris  * locking data structures
    224   1.17     chris  */
    225    1.1      matt 
    226   1.17     chris static struct lock pmap_main_lock;
    227   1.17     chris static struct simplelock pvalloc_lock;
    228   1.48     chris static struct simplelock pmaps_lock;
    229   1.17     chris #ifdef LOCKDEBUG
    230   1.17     chris #define PMAP_MAP_TO_HEAD_LOCK() \
    231   1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
    232   1.17     chris #define PMAP_MAP_TO_HEAD_UNLOCK() \
    233   1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    234   1.17     chris 
    235   1.17     chris #define PMAP_HEAD_TO_MAP_LOCK() \
    236   1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
    237   1.17     chris #define PMAP_HEAD_TO_MAP_UNLOCK() \
    238   1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    239   1.17     chris #else
    240   1.17     chris #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
    241   1.17     chris #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
    242   1.17     chris #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
    243   1.17     chris #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
    244   1.17     chris #endif /* LOCKDEBUG */
    245   1.17     chris 
    246   1.17     chris /*
    247   1.17     chris  * pv_page management structures: locked by pvalloc_lock
    248   1.17     chris  */
    249    1.1      matt 
    250   1.17     chris TAILQ_HEAD(pv_pagelist, pv_page);
    251   1.17     chris static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
    252   1.17     chris static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
    253   1.17     chris static int pv_nfpvents;			/* # of free pv entries */
    254   1.17     chris static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
    255   1.17     chris static vaddr_t pv_cachedva;		/* cached VA for later use */
    256   1.17     chris 
    257   1.17     chris #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
    258   1.17     chris #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
    259   1.17     chris 					/* high water mark */
    260   1.17     chris 
    261   1.17     chris /*
    262   1.17     chris  * local prototypes
    263   1.17     chris  */
    264   1.17     chris 
    265   1.17     chris static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
    266   1.17     chris static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
    267   1.17     chris #define ALLOCPV_NEED	0	/* need PV now */
    268   1.17     chris #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
    269   1.17     chris #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
    270   1.17     chris static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
    271   1.49   thorpej static void		 pmap_enter_pv __P((struct vm_page *,
    272   1.17     chris 					    struct pv_entry *, struct pmap *,
    273   1.17     chris 					    vaddr_t, struct vm_page *, int));
    274   1.17     chris static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
    275   1.17     chris static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
    276   1.17     chris static void		 pmap_free_pv_doit __P((struct pv_entry *));
    277   1.17     chris static void		 pmap_free_pvpage __P((void));
    278   1.17     chris static boolean_t	 pmap_is_curpmap __P((struct pmap *));
    279   1.49   thorpej static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *,
    280   1.17     chris 			vaddr_t));
    281   1.17     chris #define PMAP_REMOVE_ALL		0	/* remove all mappings */
    282   1.17     chris #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
    283    1.1      matt 
    284   1.49   thorpej static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
    285   1.33     chris 	u_int, u_int));
    286   1.33     chris 
    287   1.69   thorpej /*
    288   1.69   thorpej  * Structure that describes and L1 table.
    289   1.69   thorpej  */
    290   1.69   thorpej struct l1pt {
    291   1.69   thorpej 	SIMPLEQ_ENTRY(l1pt)	pt_queue;	/* Queue pointers */
    292   1.69   thorpej 	struct pglist		pt_plist;	/* Allocated page list */
    293   1.69   thorpej 	vaddr_t			pt_va;		/* Allocated virtual address */
    294   1.69   thorpej 	int			pt_flags;	/* Flags */
    295   1.69   thorpej };
    296   1.69   thorpej #define	PTFLAG_STATIC		0x01		/* Statically allocated */
    297   1.69   thorpej #define	PTFLAG_KPT		0x02		/* Kernel pt's are mapped */
    298   1.69   thorpej #define	PTFLAG_CLEAN		0x04		/* L1 is clean */
    299   1.69   thorpej 
    300   1.33     chris static void pmap_free_l1pt __P((struct l1pt *));
    301   1.33     chris static int pmap_allocpagedir __P((struct pmap *));
    302   1.33     chris static int pmap_clean_page __P((struct pv_entry *, boolean_t));
    303   1.49   thorpej static void pmap_remove_all __P((struct vm_page *));
    304   1.33     chris 
    305   1.57   thorpej static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t));
    306   1.57   thorpej static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t));
    307   1.49   thorpej __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
    308   1.17     chris 
    309    1.2      matt extern paddr_t physical_start;
    310    1.2      matt extern paddr_t physical_end;
    311    1.1      matt extern unsigned int free_pages;
    312    1.1      matt extern int max_processes;
    313    1.1      matt 
    314   1.54   thorpej vaddr_t virtual_avail;
    315    1.1      matt vaddr_t virtual_end;
    316   1.48     chris vaddr_t pmap_curmaxkvaddr;
    317    1.1      matt 
    318    1.1      matt vaddr_t avail_start;
    319    1.1      matt vaddr_t avail_end;
    320    1.1      matt 
    321    1.1      matt extern pv_addr_t systempage;
    322    1.1      matt 
    323    1.1      matt /* Variables used by the L1 page table queue code */
    324    1.1      matt SIMPLEQ_HEAD(l1pt_queue, l1pt);
    325   1.73   thorpej static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
    326   1.73   thorpej static int l1pt_static_queue_count;	    /* items in the static l1 queue */
    327   1.73   thorpej static int l1pt_static_create_count;	    /* static l1 items created */
    328   1.73   thorpej static struct l1pt_queue l1pt_queue;	    /* head of our l1 queue */
    329   1.73   thorpej static int l1pt_queue_count;		    /* items in the l1 queue */
    330   1.73   thorpej static int l1pt_create_count;		    /* stat - L1's create count */
    331   1.73   thorpej static int l1pt_reuse_count;		    /* stat - L1's reused count */
    332    1.1      matt 
    333    1.1      matt /* Local function prototypes (not used outside this file) */
    334   1.15     chris void pmap_pinit __P((struct pmap *));
    335   1.15     chris void pmap_freepagedir __P((struct pmap *));
    336    1.1      matt 
    337    1.1      matt /* Other function prototypes */
    338    1.1      matt extern void bzero_page __P((vaddr_t));
    339    1.1      matt extern void bcopy_page __P((vaddr_t, vaddr_t));
    340    1.1      matt 
    341    1.1      matt struct l1pt *pmap_alloc_l1pt __P((void));
    342   1.15     chris static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    343   1.17     chris      vaddr_t l2pa, boolean_t));
    344    1.1      matt 
    345   1.11     chris static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    346   1.17     chris static void pmap_unmap_ptes __P((struct pmap *));
    347   1.11     chris 
    348   1.49   thorpej __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
    349   1.25  rearnsha     pt_entry_t *, boolean_t));
    350   1.49   thorpej static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
    351   1.25  rearnsha     pt_entry_t *, boolean_t));
    352   1.49   thorpej static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
    353   1.25  rearnsha     pt_entry_t *, boolean_t));
    354   1.11     chris 
    355   1.17     chris /*
    356   1.17     chris  * real definition of pv_entry.
    357   1.17     chris  */
    358   1.17     chris 
    359   1.17     chris struct pv_entry {
    360   1.17     chris 	struct pv_entry *pv_next;       /* next pv_entry */
    361   1.17     chris 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
    362   1.17     chris 	vaddr_t         pv_va;          /* virtual address for mapping */
    363   1.17     chris 	int             pv_flags;       /* flags */
    364   1.17     chris 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
    365   1.17     chris };
    366   1.17     chris 
    367   1.17     chris /*
    368   1.17     chris  * pv_entrys are dynamically allocated in chunks from a single page.
    369   1.17     chris  * we keep track of how many pv_entrys are in use for each page and
    370   1.17     chris  * we can free pv_entry pages if needed.  there is one lock for the
    371   1.17     chris  * entire allocation system.
    372   1.17     chris  */
    373   1.17     chris 
    374   1.17     chris struct pv_page_info {
    375   1.17     chris 	TAILQ_ENTRY(pv_page) pvpi_list;
    376   1.17     chris 	struct pv_entry *pvpi_pvfree;
    377   1.17     chris 	int pvpi_nfree;
    378   1.17     chris };
    379   1.17     chris 
    380   1.17     chris /*
    381   1.17     chris  * number of pv_entry's in a pv_page
    382   1.17     chris  * (note: won't work on systems where NPBG isn't a constant)
    383   1.17     chris  */
    384   1.17     chris 
    385   1.17     chris #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    386   1.17     chris 			sizeof(struct pv_entry))
    387   1.17     chris 
    388   1.17     chris /*
    389   1.17     chris  * a pv_page: where pv_entrys are allocated from
    390   1.17     chris  */
    391   1.17     chris 
    392   1.17     chris struct pv_page {
    393   1.17     chris 	struct pv_page_info pvinfo;
    394   1.17     chris 	struct pv_entry pvents[PVE_PER_PVPAGE];
    395   1.17     chris };
    396   1.17     chris 
    397    1.1      matt #ifdef MYCROFT_HACK
    398    1.1      matt int mycroft_hack = 0;
    399    1.1      matt #endif
    400    1.1      matt 
    401    1.1      matt /* Function to set the debug level of the pmap code */
    402    1.1      matt 
    403    1.1      matt #ifdef PMAP_DEBUG
    404    1.1      matt void
    405   1.73   thorpej pmap_debug(int level)
    406    1.1      matt {
    407    1.1      matt 	pmap_debug_level = level;
    408    1.1      matt 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    409    1.1      matt }
    410    1.1      matt #endif	/* PMAP_DEBUG */
    411    1.1      matt 
    412   1.22     chris __inline static boolean_t
    413   1.17     chris pmap_is_curpmap(struct pmap *pmap)
    414   1.17     chris {
    415   1.58   thorpej 
    416   1.58   thorpej 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
    417   1.58   thorpej 	    pmap == pmap_kernel())
    418   1.58   thorpej 		return (TRUE);
    419   1.58   thorpej 
    420   1.58   thorpej 	return (FALSE);
    421   1.17     chris }
    422    1.1      matt 
    423    1.1      matt /*
    424   1.17     chris  * p v _ e n t r y   f u n c t i o n s
    425   1.17     chris  */
    426   1.17     chris 
    427   1.17     chris /*
    428   1.17     chris  * pv_entry allocation functions:
    429   1.17     chris  *   the main pv_entry allocation functions are:
    430   1.17     chris  *     pmap_alloc_pv: allocate a pv_entry structure
    431   1.17     chris  *     pmap_free_pv: free one pv_entry
    432   1.17     chris  *     pmap_free_pvs: free a list of pv_entrys
    433   1.17     chris  *
    434   1.17     chris  * the rest are helper functions
    435    1.1      matt  */
    436    1.1      matt 
    437    1.1      matt /*
    438   1.17     chris  * pmap_alloc_pv: inline function to allocate a pv_entry structure
    439   1.17     chris  * => we lock pvalloc_lock
    440   1.17     chris  * => if we fail, we call out to pmap_alloc_pvpage
    441   1.17     chris  * => 3 modes:
    442   1.17     chris  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
    443   1.17     chris  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
    444   1.17     chris  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
    445   1.17     chris  *			one now
    446   1.17     chris  *
    447   1.17     chris  * "try" is for optional functions like pmap_copy().
    448    1.1      matt  */
    449   1.17     chris 
    450   1.17     chris __inline static struct pv_entry *
    451   1.73   thorpej pmap_alloc_pv(struct pmap *pmap, int mode)
    452    1.1      matt {
    453   1.17     chris 	struct pv_page *pvpage;
    454   1.17     chris 	struct pv_entry *pv;
    455   1.17     chris 
    456   1.17     chris 	simple_lock(&pvalloc_lock);
    457   1.17     chris 
    458   1.51     chris 	pvpage = TAILQ_FIRST(&pv_freepages);
    459   1.51     chris 
    460   1.51     chris 	if (pvpage != NULL) {
    461   1.17     chris 		pvpage->pvinfo.pvpi_nfree--;
    462   1.17     chris 		if (pvpage->pvinfo.pvpi_nfree == 0) {
    463   1.17     chris 			/* nothing left in this one? */
    464   1.17     chris 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
    465   1.17     chris 		}
    466   1.17     chris 		pv = pvpage->pvinfo.pvpi_pvfree;
    467   1.51     chris 		KASSERT(pv);
    468   1.17     chris 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    469   1.17     chris 		pv_nfpvents--;  /* took one from pool */
    470   1.17     chris 	} else {
    471   1.17     chris 		pv = NULL;		/* need more of them */
    472   1.17     chris 	}
    473   1.17     chris 
    474   1.17     chris 	/*
    475   1.17     chris 	 * if below low water mark or we didn't get a pv_entry we try and
    476   1.17     chris 	 * create more pv_entrys ...
    477   1.17     chris 	 */
    478   1.17     chris 
    479   1.17     chris 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
    480   1.17     chris 		if (pv == NULL)
    481   1.17     chris 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
    482   1.17     chris 					       mode : ALLOCPV_NEED);
    483   1.17     chris 		else
    484   1.17     chris 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
    485   1.17     chris 	}
    486   1.17     chris 
    487   1.17     chris 	simple_unlock(&pvalloc_lock);
    488   1.17     chris 	return(pv);
    489   1.17     chris }
    490   1.17     chris 
    491   1.17     chris /*
    492   1.17     chris  * pmap_alloc_pvpage: maybe allocate a new pvpage
    493   1.17     chris  *
    494   1.17     chris  * if need_entry is false: try and allocate a new pv_page
    495   1.17     chris  * if need_entry is true: try and allocate a new pv_page and return a
    496   1.17     chris  *	new pv_entry from it.   if we are unable to allocate a pv_page
    497   1.17     chris  *	we make a last ditch effort to steal a pv_page from some other
    498   1.17     chris  *	mapping.    if that fails, we panic...
    499   1.17     chris  *
    500   1.17     chris  * => we assume that the caller holds pvalloc_lock
    501   1.17     chris  */
    502   1.17     chris 
    503   1.17     chris static struct pv_entry *
    504   1.73   thorpej pmap_alloc_pvpage(struct pmap *pmap, int mode)
    505   1.17     chris {
    506   1.17     chris 	struct vm_page *pg;
    507   1.17     chris 	struct pv_page *pvpage;
    508    1.1      matt 	struct pv_entry *pv;
    509   1.17     chris 	int s;
    510   1.17     chris 
    511   1.17     chris 	/*
    512   1.17     chris 	 * if we need_entry and we've got unused pv_pages, allocate from there
    513   1.17     chris 	 */
    514   1.17     chris 
    515   1.51     chris 	pvpage = TAILQ_FIRST(&pv_unusedpgs);
    516   1.51     chris 	if (mode != ALLOCPV_NONEED && pvpage != NULL) {
    517   1.17     chris 
    518   1.17     chris 		/* move it to pv_freepages list */
    519   1.17     chris 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
    520   1.17     chris 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
    521   1.17     chris 
    522   1.17     chris 		/* allocate a pv_entry */
    523   1.17     chris 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
    524   1.17     chris 		pv = pvpage->pvinfo.pvpi_pvfree;
    525   1.51     chris 		KASSERT(pv);
    526   1.17     chris 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    527   1.17     chris 
    528   1.17     chris 		pv_nfpvents--;  /* took one from pool */
    529   1.17     chris 		return(pv);
    530   1.17     chris 	}
    531    1.1      matt 
    532    1.1      matt 	/*
    533   1.17     chris 	 *  see if we've got a cached unmapped VA that we can map a page in.
    534   1.17     chris 	 * if not, try to allocate one.
    535    1.1      matt 	 */
    536    1.1      matt 
    537   1.23       chs 
    538   1.17     chris 	if (pv_cachedva == 0) {
    539   1.23       chs 		s = splvm();
    540   1.23       chs 		pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
    541   1.17     chris 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
    542   1.23       chs 		splx(s);
    543   1.17     chris 		if (pv_cachedva == 0) {
    544   1.17     chris 			return (NULL);
    545    1.1      matt 		}
    546    1.1      matt 	}
    547   1.17     chris 
    548   1.23       chs 	pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
    549   1.23       chs 	    UVM_PGA_USERESERVE);
    550   1.17     chris 
    551   1.17     chris 	if (pg == NULL)
    552   1.17     chris 		return (NULL);
    553   1.51     chris 	pg->flags &= ~PG_BUSY;	/* never busy */
    554   1.17     chris 
    555   1.17     chris 	/*
    556   1.17     chris 	 * add a mapping for our new pv_page and free its entrys (save one!)
    557   1.17     chris 	 *
    558   1.17     chris 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
    559   1.17     chris 	 * pmap is already locked!  (...but entering the mapping is safe...)
    560   1.17     chris 	 */
    561   1.17     chris 
    562   1.51     chris 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
    563   1.51     chris 		VM_PROT_READ|VM_PROT_WRITE);
    564   1.19     chris 	pmap_update(pmap_kernel());
    565   1.17     chris 	pvpage = (struct pv_page *) pv_cachedva;
    566   1.17     chris 	pv_cachedva = 0;
    567   1.17     chris 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
    568    1.1      matt }
    569    1.1      matt 
    570    1.1      matt /*
    571   1.17     chris  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
    572   1.17     chris  *
    573   1.17     chris  * => caller must hold pvalloc_lock
    574   1.17     chris  * => if need_entry is true, we allocate and return one pv_entry
    575    1.1      matt  */
    576    1.1      matt 
    577   1.17     chris static struct pv_entry *
    578   1.73   thorpej pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
    579    1.1      matt {
    580   1.17     chris 	int tofree, lcv;
    581   1.17     chris 
    582   1.17     chris 	/* do we need to return one? */
    583   1.17     chris 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
    584    1.1      matt 
    585   1.17     chris 	pvp->pvinfo.pvpi_pvfree = NULL;
    586   1.17     chris 	pvp->pvinfo.pvpi_nfree = tofree;
    587   1.17     chris 	for (lcv = 0 ; lcv < tofree ; lcv++) {
    588   1.17     chris 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
    589   1.17     chris 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
    590    1.1      matt 	}
    591   1.17     chris 	if (need_entry)
    592   1.17     chris 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
    593   1.17     chris 	else
    594   1.17     chris 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    595   1.17     chris 	pv_nfpvents += tofree;
    596   1.17     chris 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
    597    1.1      matt }
    598    1.1      matt 
    599   1.17     chris /*
    600   1.17     chris  * pmap_free_pv_doit: actually free a pv_entry
    601   1.17     chris  *
    602   1.17     chris  * => do not call this directly!  instead use either
    603   1.17     chris  *    1. pmap_free_pv ==> free a single pv_entry
    604   1.17     chris  *    2. pmap_free_pvs => free a list of pv_entrys
    605   1.17     chris  * => we must be holding pvalloc_lock
    606   1.17     chris  */
    607   1.17     chris 
    608   1.17     chris __inline static void
    609   1.73   thorpej pmap_free_pv_doit(struct pv_entry *pv)
    610    1.1      matt {
    611   1.17     chris 	struct pv_page *pvp;
    612    1.1      matt 
    613   1.17     chris 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
    614   1.17     chris 	pv_nfpvents++;
    615   1.17     chris 	pvp->pvinfo.pvpi_nfree++;
    616    1.1      matt 
    617   1.17     chris 	/* nfree == 1 => fully allocated page just became partly allocated */
    618   1.17     chris 	if (pvp->pvinfo.pvpi_nfree == 1) {
    619   1.17     chris 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
    620    1.1      matt 	}
    621    1.1      matt 
    622   1.17     chris 	/* free it */
    623   1.17     chris 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
    624   1.17     chris 	pvp->pvinfo.pvpi_pvfree = pv;
    625    1.1      matt 
    626   1.17     chris 	/*
    627   1.17     chris 	 * are all pv_page's pv_entry's free?  move it to unused queue.
    628   1.17     chris 	 */
    629    1.1      matt 
    630   1.17     chris 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
    631   1.17     chris 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
    632   1.17     chris 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    633    1.1      matt 	}
    634    1.1      matt }
    635    1.1      matt 
    636    1.1      matt /*
    637   1.17     chris  * pmap_free_pv: free a single pv_entry
    638   1.17     chris  *
    639   1.17     chris  * => we gain the pvalloc_lock
    640    1.1      matt  */
    641    1.1      matt 
    642   1.17     chris __inline static void
    643   1.73   thorpej pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
    644    1.1      matt {
    645   1.17     chris 	simple_lock(&pvalloc_lock);
    646   1.17     chris 	pmap_free_pv_doit(pv);
    647   1.17     chris 
    648   1.17     chris 	/*
    649   1.17     chris 	 * Can't free the PV page if the PV entries were associated with
    650   1.17     chris 	 * the kernel pmap; the pmap is already locked.
    651   1.17     chris 	 */
    652   1.51     chris 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    653   1.17     chris 	    pmap != pmap_kernel())
    654   1.17     chris 		pmap_free_pvpage();
    655   1.17     chris 
    656   1.17     chris 	simple_unlock(&pvalloc_lock);
    657   1.17     chris }
    658    1.1      matt 
    659   1.17     chris /*
    660   1.17     chris  * pmap_free_pvs: free a list of pv_entrys
    661   1.17     chris  *
    662   1.17     chris  * => we gain the pvalloc_lock
    663   1.17     chris  */
    664    1.1      matt 
    665   1.17     chris __inline static void
    666   1.73   thorpej pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
    667   1.17     chris {
    668   1.17     chris 	struct pv_entry *nextpv;
    669    1.1      matt 
    670   1.17     chris 	simple_lock(&pvalloc_lock);
    671    1.1      matt 
    672   1.17     chris 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
    673   1.17     chris 		nextpv = pvs->pv_next;
    674   1.17     chris 		pmap_free_pv_doit(pvs);
    675    1.1      matt 	}
    676    1.1      matt 
    677   1.17     chris 	/*
    678   1.17     chris 	 * Can't free the PV page if the PV entries were associated with
    679   1.17     chris 	 * the kernel pmap; the pmap is already locked.
    680   1.17     chris 	 */
    681   1.51     chris 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    682   1.17     chris 	    pmap != pmap_kernel())
    683   1.17     chris 		pmap_free_pvpage();
    684    1.1      matt 
    685   1.17     chris 	simple_unlock(&pvalloc_lock);
    686    1.1      matt }
    687    1.1      matt 
    688    1.1      matt 
    689    1.1      matt /*
    690   1.17     chris  * pmap_free_pvpage: try and free an unused pv_page structure
    691   1.17     chris  *
    692   1.17     chris  * => assume caller is holding the pvalloc_lock and that
    693   1.17     chris  *	there is a page on the pv_unusedpgs list
    694   1.17     chris  * => if we can't get a lock on the kmem_map we try again later
    695    1.1      matt  */
    696    1.1      matt 
    697   1.17     chris static void
    698   1.73   thorpej pmap_free_pvpage(void)
    699    1.1      matt {
    700   1.17     chris 	int s;
    701   1.17     chris 	struct vm_map *map;
    702   1.17     chris 	struct vm_map_entry *dead_entries;
    703   1.17     chris 	struct pv_page *pvp;
    704   1.17     chris 
    705   1.17     chris 	s = splvm(); /* protect kmem_map */
    706    1.1      matt 
    707   1.51     chris 	pvp = TAILQ_FIRST(&pv_unusedpgs);
    708    1.1      matt 
    709    1.1      matt 	/*
    710   1.17     chris 	 * note: watch out for pv_initpage which is allocated out of
    711   1.17     chris 	 * kernel_map rather than kmem_map.
    712    1.1      matt 	 */
    713   1.17     chris 	if (pvp == pv_initpage)
    714   1.17     chris 		map = kernel_map;
    715   1.17     chris 	else
    716   1.17     chris 		map = kmem_map;
    717   1.17     chris 	if (vm_map_lock_try(map)) {
    718   1.17     chris 
    719   1.17     chris 		/* remove pvp from pv_unusedpgs */
    720   1.17     chris 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    721   1.17     chris 
    722   1.17     chris 		/* unmap the page */
    723   1.17     chris 		dead_entries = NULL;
    724   1.17     chris 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
    725   1.17     chris 		    &dead_entries);
    726   1.17     chris 		vm_map_unlock(map);
    727   1.17     chris 
    728   1.17     chris 		if (dead_entries != NULL)
    729   1.17     chris 			uvm_unmap_detach(dead_entries, 0);
    730    1.1      matt 
    731   1.17     chris 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
    732    1.1      matt 	}
    733   1.17     chris 	if (pvp == pv_initpage)
    734   1.17     chris 		/* no more initpage, we've freed it */
    735   1.17     chris 		pv_initpage = NULL;
    736    1.1      matt 
    737    1.1      matt 	splx(s);
    738    1.1      matt }
    739    1.1      matt 
    740    1.1      matt /*
    741   1.17     chris  * main pv_entry manipulation functions:
    742   1.49   thorpej  *   pmap_enter_pv: enter a mapping onto a vm_page list
    743   1.49   thorpej  *   pmap_remove_pv: remove a mappiing from a vm_page list
    744   1.17     chris  *
    745   1.17     chris  * NOTE: pmap_enter_pv expects to lock the pvh itself
    746   1.17     chris  *       pmap_remove_pv expects te caller to lock the pvh before calling
    747   1.17     chris  */
    748   1.17     chris 
    749   1.17     chris /*
    750   1.49   thorpej  * pmap_enter_pv: enter a mapping onto a vm_page lst
    751   1.17     chris  *
    752   1.17     chris  * => caller should hold the proper lock on pmap_main_lock
    753   1.17     chris  * => caller should have pmap locked
    754   1.49   thorpej  * => we will gain the lock on the vm_page and allocate the new pv_entry
    755   1.17     chris  * => caller should adjust ptp's wire_count before calling
    756   1.17     chris  * => caller should not adjust pmap's wire_count
    757   1.17     chris  */
    758   1.17     chris 
    759   1.17     chris __inline static void
    760   1.73   thorpej pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
    761   1.73   thorpej     vaddr_t va, struct vm_page *ptp, int flags)
    762   1.17     chris {
    763   1.17     chris 	pve->pv_pmap = pmap;
    764   1.17     chris 	pve->pv_va = va;
    765   1.17     chris 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
    766   1.17     chris 	pve->pv_flags = flags;
    767   1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);	/* lock vm_page */
    768   1.49   thorpej 	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
    769   1.49   thorpej 	pg->mdpage.pvh_list = pve;		/* ... locked list */
    770   1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
    771   1.78   thorpej 	if (pve->pv_flags & PVF_WIRED)
    772   1.17     chris 		++pmap->pm_stats.wired_count;
    773  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
    774  1.105   thorpej     {
    775  1.105   thorpej 	int s = splhigh();
    776  1.105   thorpej 	if (pve->pv_flags & PVF_WRITE)
    777  1.105   thorpej 		pg->mdpage.rw_mappings++;
    778  1.105   thorpej 	else
    779  1.105   thorpej 		pg->mdpage.ro_mappings++;
    780  1.105   thorpej 	if (pg->mdpage.rw_mappings != 0 &&
    781  1.105   thorpej 	    (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
    782  1.105   thorpej 		printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
    783  1.105   thorpej 		    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
    784  1.105   thorpej 		    pg->mdpage.krw_mappings);
    785  1.105   thorpej 	}
    786  1.105   thorpej 	splx(s);
    787  1.105   thorpej     }
    788  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
    789   1.17     chris }
    790   1.17     chris 
    791   1.17     chris /*
    792   1.17     chris  * pmap_remove_pv: try to remove a mapping from a pv_list
    793   1.17     chris  *
    794   1.17     chris  * => caller should hold proper lock on pmap_main_lock
    795   1.17     chris  * => pmap should be locked
    796   1.49   thorpej  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    797   1.17     chris  * => caller should adjust ptp's wire_count and free PTP if needed
    798   1.17     chris  * => caller should NOT adjust pmap's wire_count
    799   1.17     chris  * => we return the removed pve
    800   1.17     chris  */
    801   1.17     chris 
    802   1.17     chris __inline static struct pv_entry *
    803   1.73   thorpej pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
    804   1.17     chris {
    805   1.17     chris 	struct pv_entry *pve, **prevptr;
    806   1.17     chris 
    807   1.49   thorpej 	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
    808   1.17     chris 	pve = *prevptr;
    809   1.17     chris 	while (pve) {
    810   1.17     chris 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
    811   1.17     chris 			*prevptr = pve->pv_next;		/* remove it! */
    812   1.78   thorpej 			if (pve->pv_flags & PVF_WIRED)
    813   1.17     chris 			    --pmap->pm_stats.wired_count;
    814  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
    815  1.105   thorpej     {
    816  1.105   thorpej 			int s = splhigh();
    817  1.105   thorpej 			if (pve->pv_flags & PVF_WRITE) {
    818  1.105   thorpej 				KASSERT(pg->mdpage.rw_mappings != 0);
    819  1.105   thorpej 				pg->mdpage.rw_mappings--;
    820  1.105   thorpej 			} else {
    821  1.105   thorpej 				KASSERT(pg->mdpage.ro_mappings != 0);
    822  1.105   thorpej 				pg->mdpage.ro_mappings--;
    823  1.105   thorpej 			}
    824  1.105   thorpej 			splx(s);
    825  1.105   thorpej     }
    826  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
    827   1.17     chris 			break;
    828   1.17     chris 		}
    829   1.17     chris 		prevptr = &pve->pv_next;		/* previous pointer */
    830   1.17     chris 		pve = pve->pv_next;			/* advance */
    831   1.17     chris 	}
    832   1.17     chris 	return(pve);				/* return removed pve */
    833   1.17     chris }
    834   1.17     chris 
    835   1.17     chris /*
    836   1.17     chris  *
    837   1.17     chris  * pmap_modify_pv: Update pv flags
    838   1.17     chris  *
    839   1.49   thorpej  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    840   1.17     chris  * => caller should NOT adjust pmap's wire_count
    841   1.29  rearnsha  * => caller must call pmap_vac_me_harder() if writable status of a page
    842   1.29  rearnsha  *    may have changed.
    843   1.17     chris  * => we return the old flags
    844   1.17     chris  *
    845    1.1      matt  * Modify a physical-virtual mapping in the pv table
    846    1.1      matt  */
    847    1.1      matt 
    848   1.73   thorpej static /* __inline */ u_int
    849   1.73   thorpej pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
    850   1.73   thorpej     u_int bic_mask, u_int eor_mask)
    851    1.1      matt {
    852    1.1      matt 	struct pv_entry *npv;
    853    1.1      matt 	u_int flags, oflags;
    854    1.1      matt 
    855    1.1      matt 	/*
    856    1.1      matt 	 * There is at least one VA mapping this page.
    857    1.1      matt 	 */
    858    1.1      matt 
    859   1.49   thorpej 	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
    860    1.1      matt 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    861    1.1      matt 			oflags = npv->pv_flags;
    862    1.1      matt 			npv->pv_flags = flags =
    863    1.1      matt 			    ((oflags & ~bic_mask) ^ eor_mask);
    864   1.78   thorpej 			if ((flags ^ oflags) & PVF_WIRED) {
    865   1.78   thorpej 				if (flags & PVF_WIRED)
    866    1.1      matt 					++pmap->pm_stats.wired_count;
    867    1.1      matt 				else
    868    1.1      matt 					--pmap->pm_stats.wired_count;
    869    1.1      matt 			}
    870  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
    871  1.105   thorpej     {
    872  1.105   thorpej 			int s = splhigh();
    873  1.105   thorpej 			if ((flags ^ oflags) & PVF_WRITE) {
    874  1.105   thorpej 				if (flags & PVF_WRITE) {
    875  1.105   thorpej 					pg->mdpage.rw_mappings++;
    876  1.105   thorpej 					pg->mdpage.ro_mappings--;
    877  1.105   thorpej 					if (pg->mdpage.rw_mappings != 0 &&
    878  1.105   thorpej 					    (pg->mdpage.kro_mappings != 0 ||
    879  1.105   thorpej 					     pg->mdpage.krw_mappings != 0)) {
    880  1.105   thorpej 						printf("pmap_modify_pv: rw %u, "
    881  1.105   thorpej 						    "kro %u, krw %u\n",
    882  1.105   thorpej 						    pg->mdpage.rw_mappings,
    883  1.105   thorpej 						    pg->mdpage.kro_mappings,
    884  1.105   thorpej 						    pg->mdpage.krw_mappings);
    885  1.105   thorpej 					}
    886  1.105   thorpej 				} else {
    887  1.105   thorpej 					KASSERT(pg->mdpage.rw_mappings != 0);
    888  1.105   thorpej 					pg->mdpage.rw_mappings--;
    889  1.105   thorpej 					pg->mdpage.ro_mappings++;
    890  1.105   thorpej 				}
    891  1.105   thorpej 			}
    892  1.105   thorpej 			splx(s);
    893  1.105   thorpej     }
    894  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
    895    1.1      matt 			return (oflags);
    896    1.1      matt 		}
    897    1.1      matt 	}
    898    1.1      matt 	return (0);
    899    1.1      matt }
    900    1.1      matt 
    901    1.1      matt /*
    902    1.1      matt  * Map the specified level 2 pagetable into the level 1 page table for
    903    1.1      matt  * the given pmap to cover a chunk of virtual address space starting from the
    904    1.1      matt  * address specified.
    905    1.1      matt  */
    906   1.73   thorpej static __inline void
    907   1.73   thorpej pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
    908    1.1      matt {
    909    1.1      matt 	vaddr_t ptva;
    910    1.1      matt 
    911    1.1      matt 	/* Calculate the index into the L1 page table. */
    912   1.81   thorpej 	ptva = (va >> L1_S_SHIFT) & ~3;
    913    1.1      matt 
    914    1.1      matt 	/* Map page table into the L1. */
    915   1.83   thorpej 	pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
    916   1.83   thorpej 	pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
    917   1.83   thorpej 	pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
    918   1.83   thorpej 	pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
    919  1.110   thorpej 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    920    1.1      matt 
    921    1.1      matt 	/* Map the page table into the page table area. */
    922   1.73   thorpej 	if (selfref)
    923   1.83   thorpej 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
    924   1.83   thorpej 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
    925    1.1      matt }
    926    1.1      matt 
    927    1.1      matt #if 0
    928   1.73   thorpej static __inline void
    929   1.73   thorpej pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
    930    1.1      matt {
    931    1.1      matt 	vaddr_t ptva;
    932    1.1      matt 
    933    1.1      matt 	/* Calculate the index into the L1 page table. */
    934   1.81   thorpej 	ptva = (va >> L1_S_SHIFT) & ~3;
    935    1.1      matt 
    936    1.1      matt 	/* Unmap page table from the L1. */
    937    1.1      matt 	pmap->pm_pdir[ptva + 0] = 0;
    938    1.1      matt 	pmap->pm_pdir[ptva + 1] = 0;
    939    1.1      matt 	pmap->pm_pdir[ptva + 2] = 0;
    940    1.1      matt 	pmap->pm_pdir[ptva + 3] = 0;
    941  1.110   thorpej 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    942    1.1      matt 
    943    1.1      matt 	/* Unmap the page table from the page table area. */
    944    1.1      matt 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
    945    1.1      matt }
    946    1.1      matt #endif
    947    1.1      matt 
    948    1.1      matt /*
    949    1.1      matt  *	Used to map a range of physical addresses into kernel
    950    1.1      matt  *	virtual address space.
    951    1.1      matt  *
    952    1.1      matt  *	For now, VM is already on, we only need to map the
    953    1.1      matt  *	specified memory.
    954  1.100   thorpej  *
    955  1.100   thorpej  *	XXX This routine should eventually go away; it's only used
    956  1.100   thorpej  *	XXX by machine-dependent crash dump code.
    957    1.1      matt  */
    958    1.1      matt vaddr_t
    959   1.73   thorpej pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
    960    1.1      matt {
    961  1.100   thorpej 	pt_entry_t *pte;
    962  1.100   thorpej 
    963    1.1      matt 	while (spa < epa) {
    964  1.100   thorpej 		pte = vtopte(va);
    965  1.100   thorpej 
    966  1.100   thorpej 		*pte = L2_S_PROTO | spa |
    967  1.100   thorpej 		    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
    968  1.112   thorpej 		PTE_SYNC(pte);
    969  1.100   thorpej 		cpu_tlb_flushID_SE(va);
    970    1.1      matt 		va += NBPG;
    971    1.1      matt 		spa += NBPG;
    972    1.1      matt 	}
    973   1.19     chris 	pmap_update(pmap_kernel());
    974    1.1      matt 	return(va);
    975    1.1      matt }
    976    1.1      matt 
    977    1.1      matt 
    978    1.1      matt /*
    979    1.3      matt  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    980    1.1      matt  *
    981    1.1      matt  * bootstrap the pmap system. This is called from initarm and allows
    982    1.1      matt  * the pmap system to initailise any structures it requires.
    983    1.1      matt  *
    984    1.1      matt  * Currently this sets up the kernel_pmap that is statically allocated
    985    1.1      matt  * and also allocated virtual addresses for certain page hooks.
    986    1.1      matt  * Currently the only one page hook is allocated that is used
    987    1.1      matt  * to zero physical pages of memory.
    988    1.1      matt  * It also initialises the start and end address of the kernel data space.
    989    1.1      matt  */
    990    1.1      matt 
    991   1.17     chris char *boot_head;
    992    1.1      matt 
    993    1.1      matt void
    994   1.73   thorpej pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    995    1.1      matt {
    996   1.54   thorpej 	pt_entry_t *pte;
    997    1.1      matt 
    998   1.15     chris 	pmap_kernel()->pm_pdir = kernel_l1pt;
    999   1.15     chris 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
   1000   1.15     chris 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
   1001   1.15     chris 	simple_lock_init(&pmap_kernel()->pm_lock);
   1002   1.16     chris 	pmap_kernel()->pm_obj.pgops = NULL;
   1003   1.16     chris 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
   1004   1.16     chris 	pmap_kernel()->pm_obj.uo_npages = 0;
   1005   1.16     chris 	pmap_kernel()->pm_obj.uo_refs = 1;
   1006    1.1      matt 
   1007   1.54   thorpej 	virtual_avail = KERNEL_VM_BASE;
   1008   1.74   thorpej 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
   1009    1.1      matt 
   1010    1.1      matt 	/*
   1011   1.54   thorpej 	 * now we allocate the "special" VAs which are used for tmp mappings
   1012   1.54   thorpej 	 * by the pmap (and other modules).  we allocate the VAs by advancing
   1013   1.54   thorpej 	 * virtual_avail (note that there are no pages mapped at these VAs).
   1014   1.54   thorpej 	 * we find the PTE that maps the allocated VA via the linear PTE
   1015   1.54   thorpej 	 * mapping.
   1016    1.1      matt 	 */
   1017    1.1      matt 
   1018   1.54   thorpej 	pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
   1019   1.54   thorpej 
   1020   1.54   thorpej 	csrcp = virtual_avail; csrc_pte = pte;
   1021   1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1022   1.54   thorpej 
   1023   1.54   thorpej 	cdstp = virtual_avail; cdst_pte = pte;
   1024   1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1025   1.54   thorpej 
   1026   1.54   thorpej 	memhook = (char *) virtual_avail;	/* don't need pte */
   1027   1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1028   1.54   thorpej 
   1029   1.54   thorpej 	msgbufaddr = (caddr_t) virtual_avail;	/* don't need pte */
   1030   1.54   thorpej 	virtual_avail += round_page(MSGBUFSIZE);
   1031   1.54   thorpej 	pte += atop(round_page(MSGBUFSIZE));
   1032    1.1      matt 
   1033   1.17     chris 	/*
   1034   1.17     chris 	 * init the static-global locks and global lists.
   1035   1.17     chris 	 */
   1036   1.17     chris 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
   1037   1.17     chris 	simple_lock_init(&pvalloc_lock);
   1038   1.48     chris 	simple_lock_init(&pmaps_lock);
   1039   1.48     chris 	LIST_INIT(&pmaps);
   1040   1.17     chris 	TAILQ_INIT(&pv_freepages);
   1041   1.17     chris 	TAILQ_INIT(&pv_unusedpgs);
   1042    1.1      matt 
   1043   1.10     chris 	/*
   1044   1.10     chris 	 * initialize the pmap pool.
   1045   1.10     chris 	 */
   1046   1.10     chris 
   1047   1.10     chris 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1048   1.52   thorpej 		  &pool_allocator_nointr);
   1049  1.111   thorpej 
   1050  1.111   thorpej 	/*
   1051  1.111   thorpej 	 * initialize the PT-PT pool and cache.
   1052  1.111   thorpej 	 */
   1053  1.111   thorpej 
   1054  1.111   thorpej 	pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
   1055  1.111   thorpej 		  &pmap_ptpt_allocator);
   1056  1.111   thorpej 	pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
   1057  1.111   thorpej 			pmap_ptpt_ctor, NULL, NULL);
   1058  1.111   thorpej 
   1059   1.36   thorpej 	cpu_dcache_wbinv_all();
   1060    1.1      matt }
   1061    1.1      matt 
   1062    1.1      matt /*
   1063    1.1      matt  * void pmap_init(void)
   1064    1.1      matt  *
   1065    1.1      matt  * Initialize the pmap module.
   1066    1.1      matt  * Called by vm_init() in vm/vm_init.c in order to initialise
   1067    1.1      matt  * any structures that the pmap system needs to map virtual memory.
   1068    1.1      matt  */
   1069    1.1      matt 
   1070    1.1      matt extern int physmem;
   1071    1.1      matt 
   1072    1.1      matt void
   1073   1.73   thorpej pmap_init(void)
   1074    1.1      matt {
   1075    1.1      matt 
   1076    1.1      matt 	/*
   1077    1.1      matt 	 * Set the available memory vars - These do not map to real memory
   1078    1.1      matt 	 * addresses and cannot as the physical memory is fragmented.
   1079    1.1      matt 	 * They are used by ps for %mem calculations.
   1080    1.1      matt 	 * One could argue whether this should be the entire memory or just
   1081    1.1      matt 	 * the memory that is useable in a user process.
   1082    1.1      matt 	 */
   1083    1.1      matt 	avail_start = 0;
   1084    1.1      matt 	avail_end = physmem * NBPG;
   1085    1.1      matt 
   1086   1.17     chris 	/*
   1087   1.17     chris 	 * now we need to free enough pv_entry structures to allow us to get
   1088   1.17     chris 	 * the kmem_map/kmem_object allocated and inited (done after this
   1089   1.17     chris 	 * function is finished).  to do this we allocate one bootstrap page out
   1090   1.17     chris 	 * of kernel_map and use it to provide an initial pool of pv_entry
   1091   1.17     chris 	 * structures.   we never free this page.
   1092   1.17     chris 	 */
   1093   1.17     chris 
   1094   1.17     chris 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
   1095   1.17     chris 	if (pv_initpage == NULL)
   1096   1.17     chris 		panic("pmap_init: pv_initpage");
   1097   1.17     chris 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
   1098   1.17     chris 	pv_nfpvents = 0;
   1099   1.17     chris 	(void) pmap_add_pvpage(pv_initpage, FALSE);
   1100   1.17     chris 
   1101    1.1      matt 	pmap_initialized = TRUE;
   1102    1.1      matt 
   1103    1.1      matt 	/* Initialise our L1 page table queues and counters */
   1104    1.1      matt 	SIMPLEQ_INIT(&l1pt_static_queue);
   1105    1.1      matt 	l1pt_static_queue_count = 0;
   1106    1.1      matt 	l1pt_static_create_count = 0;
   1107    1.1      matt 	SIMPLEQ_INIT(&l1pt_queue);
   1108    1.1      matt 	l1pt_queue_count = 0;
   1109    1.1      matt 	l1pt_create_count = 0;
   1110    1.1      matt 	l1pt_reuse_count = 0;
   1111    1.1      matt }
   1112    1.1      matt 
   1113    1.1      matt /*
   1114    1.1      matt  * pmap_postinit()
   1115    1.1      matt  *
   1116    1.1      matt  * This routine is called after the vm and kmem subsystems have been
   1117    1.1      matt  * initialised. This allows the pmap code to perform any initialisation
   1118    1.1      matt  * that can only be done one the memory allocation is in place.
   1119    1.1      matt  */
   1120    1.1      matt 
   1121    1.1      matt void
   1122   1.73   thorpej pmap_postinit(void)
   1123    1.1      matt {
   1124    1.1      matt 	int loop;
   1125    1.1      matt 	struct l1pt *pt;
   1126    1.1      matt 
   1127    1.1      matt #ifdef PMAP_STATIC_L1S
   1128    1.1      matt 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
   1129    1.1      matt #else	/* PMAP_STATIC_L1S */
   1130    1.1      matt 	for (loop = 0; loop < max_processes; ++loop) {
   1131    1.1      matt #endif	/* PMAP_STATIC_L1S */
   1132    1.1      matt 		/* Allocate a L1 page table */
   1133    1.1      matt 		pt = pmap_alloc_l1pt();
   1134    1.1      matt 		if (!pt)
   1135    1.1      matt 			panic("Cannot allocate static L1 page tables\n");
   1136    1.1      matt 
   1137    1.1      matt 		/* Clean it */
   1138   1.81   thorpej 		bzero((void *)pt->pt_va, L1_TABLE_SIZE);
   1139    1.1      matt 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
   1140    1.1      matt 		/* Add the page table to the queue */
   1141    1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
   1142    1.1      matt 		++l1pt_static_queue_count;
   1143    1.1      matt 		++l1pt_static_create_count;
   1144    1.1      matt 	}
   1145    1.1      matt }
   1146    1.1      matt 
   1147    1.1      matt 
   1148    1.1      matt /*
   1149    1.1      matt  * Create and return a physical map.
   1150    1.1      matt  *
   1151    1.1      matt  * If the size specified for the map is zero, the map is an actual physical
   1152    1.1      matt  * map, and may be referenced by the hardware.
   1153    1.1      matt  *
   1154    1.1      matt  * If the size specified is non-zero, the map will be used in software only,
   1155    1.1      matt  * and is bounded by that size.
   1156    1.1      matt  */
   1157    1.1      matt 
   1158    1.1      matt pmap_t
   1159   1.73   thorpej pmap_create(void)
   1160    1.1      matt {
   1161   1.15     chris 	struct pmap *pmap;
   1162    1.1      matt 
   1163   1.10     chris 	/*
   1164   1.10     chris 	 * Fetch pmap entry from the pool
   1165   1.10     chris 	 */
   1166   1.10     chris 
   1167   1.10     chris 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   1168   1.17     chris 	/* XXX is this really needed! */
   1169   1.17     chris 	memset(pmap, 0, sizeof(*pmap));
   1170    1.1      matt 
   1171   1.16     chris 	simple_lock_init(&pmap->pm_obj.vmobjlock);
   1172   1.16     chris 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
   1173   1.16     chris 	TAILQ_INIT(&pmap->pm_obj.memq);
   1174   1.16     chris 	pmap->pm_obj.uo_npages = 0;
   1175   1.16     chris 	pmap->pm_obj.uo_refs = 1;
   1176   1.16     chris 	pmap->pm_stats.wired_count = 0;
   1177   1.16     chris 	pmap->pm_stats.resident_count = 1;
   1178   1.70   thorpej 	pmap->pm_ptphint = NULL;
   1179   1.16     chris 
   1180    1.1      matt 	/* Now init the machine part of the pmap */
   1181    1.1      matt 	pmap_pinit(pmap);
   1182    1.1      matt 	return(pmap);
   1183    1.1      matt }
   1184    1.1      matt 
   1185    1.1      matt /*
   1186    1.1      matt  * pmap_alloc_l1pt()
   1187    1.1      matt  *
   1188    1.1      matt  * This routine allocates physical and virtual memory for a L1 page table
   1189    1.1      matt  * and wires it.
   1190    1.1      matt  * A l1pt structure is returned to describe the allocated page table.
   1191    1.1      matt  *
   1192    1.1      matt  * This routine is allowed to fail if the required memory cannot be allocated.
   1193    1.1      matt  * In this case NULL is returned.
   1194    1.1      matt  */
   1195    1.1      matt 
   1196    1.1      matt struct l1pt *
   1197    1.1      matt pmap_alloc_l1pt(void)
   1198    1.1      matt {
   1199    1.2      matt 	paddr_t pa;
   1200    1.2      matt 	vaddr_t va;
   1201    1.1      matt 	struct l1pt *pt;
   1202    1.1      matt 	int error;
   1203    1.9       chs 	struct vm_page *m;
   1204    1.1      matt 
   1205    1.1      matt 	/* Allocate virtual address space for the L1 page table */
   1206   1.81   thorpej 	va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
   1207    1.1      matt 	if (va == 0) {
   1208    1.1      matt #ifdef DIAGNOSTIC
   1209   1.26  rearnsha 		PDEBUG(0,
   1210   1.26  rearnsha 		    printf("pmap: Cannot allocate pageable memory for L1\n"));
   1211    1.1      matt #endif	/* DIAGNOSTIC */
   1212    1.1      matt 		return(NULL);
   1213    1.1      matt 	}
   1214    1.1      matt 
   1215    1.1      matt 	/* Allocate memory for the l1pt structure */
   1216    1.1      matt 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1217    1.1      matt 
   1218    1.1      matt 	/*
   1219    1.1      matt 	 * Allocate pages from the VM system.
   1220    1.1      matt 	 */
   1221   1.81   thorpej 	error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
   1222   1.81   thorpej 	    L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1223    1.1      matt 	if (error) {
   1224    1.1      matt #ifdef DIAGNOSTIC
   1225   1.26  rearnsha 		PDEBUG(0,
   1226   1.26  rearnsha 		    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
   1227   1.26  rearnsha 		    error));
   1228    1.1      matt #endif	/* DIAGNOSTIC */
   1229    1.1      matt 		/* Release the resources we already have claimed */
   1230    1.1      matt 		free(pt, M_VMPMAP);
   1231   1.81   thorpej 		uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
   1232    1.1      matt 		return(NULL);
   1233    1.1      matt 	}
   1234    1.1      matt 
   1235    1.1      matt 	/* Map our physical pages into our virtual space */
   1236    1.1      matt 	pt->pt_va = va;
   1237   1.51     chris 	m = TAILQ_FIRST(&pt->pt_plist);
   1238   1.81   thorpej 	while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
   1239    1.1      matt 		pa = VM_PAGE_TO_PHYS(m);
   1240    1.1      matt 
   1241  1.110   thorpej 		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
   1242    1.1      matt 
   1243    1.1      matt 		va += NBPG;
   1244    1.1      matt 		m = m->pageq.tqe_next;
   1245    1.1      matt 	}
   1246    1.1      matt 
   1247    1.1      matt #ifdef DIAGNOSTIC
   1248    1.1      matt 	if (m)
   1249    1.1      matt 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1250    1.1      matt #endif	/* DIAGNOSTIC */
   1251    1.1      matt 
   1252    1.1      matt 	pt->pt_flags = 0;
   1253    1.1      matt 	return(pt);
   1254    1.1      matt }
   1255    1.1      matt 
   1256    1.1      matt /*
   1257    1.1      matt  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1258    1.1      matt  */
   1259   1.33     chris static void
   1260   1.73   thorpej pmap_free_l1pt(struct l1pt *pt)
   1261    1.1      matt {
   1262    1.1      matt 	/* Separate the physical memory for the virtual space */
   1263   1.81   thorpej 	pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
   1264   1.19     chris 	pmap_update(pmap_kernel());
   1265    1.1      matt 
   1266    1.1      matt 	/* Return the physical memory */
   1267    1.1      matt 	uvm_pglistfree(&pt->pt_plist);
   1268    1.1      matt 
   1269    1.1      matt 	/* Free the virtual space */
   1270   1.81   thorpej 	uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
   1271    1.1      matt 
   1272    1.1      matt 	/* Free the l1pt structure */
   1273    1.1      matt 	free(pt, M_VMPMAP);
   1274    1.1      matt }
   1275    1.1      matt 
   1276    1.1      matt /*
   1277  1.111   thorpej  * pmap_ptpt_page_alloc:
   1278   1.93   thorpej  *
   1279  1.111   thorpej  *	Back-end page allocator for the PT-PT pool.
   1280   1.93   thorpej  */
   1281  1.111   thorpej static void *
   1282  1.111   thorpej pmap_ptpt_page_alloc(struct pool *pp, int flags)
   1283   1.93   thorpej {
   1284   1.93   thorpej 	struct vm_page *pg;
   1285   1.93   thorpej 	pt_entry_t *pte;
   1286  1.111   thorpej 	vaddr_t va;
   1287   1.93   thorpej 
   1288  1.111   thorpej 	/* XXX PR_WAITOK? */
   1289  1.111   thorpej 	va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
   1290  1.111   thorpej 	if (va == 0)
   1291  1.111   thorpej 		return (NULL);
   1292   1.93   thorpej 
   1293   1.93   thorpej 	for (;;) {
   1294   1.93   thorpej 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   1295   1.93   thorpej 		if (pg != NULL)
   1296   1.93   thorpej 			break;
   1297  1.111   thorpej 		if ((flags & PR_WAITOK) == 0) {
   1298  1.111   thorpej 			uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1299  1.111   thorpej 			return (NULL);
   1300  1.111   thorpej 		}
   1301   1.93   thorpej 		uvm_wait("pmap_ptpt");
   1302   1.93   thorpej 	}
   1303   1.93   thorpej 
   1304  1.111   thorpej 	pte = vtopte(va);
   1305   1.93   thorpej 	KDASSERT(pmap_pte_v(pte) == 0);
   1306   1.93   thorpej 
   1307  1.111   thorpej 	*pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
   1308  1.111   thorpej 	     L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
   1309  1.112   thorpej 	PTE_SYNC(pte);
   1310  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
   1311  1.105   thorpej     {
   1312  1.105   thorpej 	int s = splhigh();
   1313  1.105   thorpej 	pg->mdpage.krw_mappings++;
   1314  1.105   thorpej 	splx(s);
   1315  1.105   thorpej     }
   1316  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
   1317   1.93   thorpej 
   1318  1.111   thorpej 	return ((void *) va);
   1319   1.93   thorpej }
   1320   1.93   thorpej 
   1321   1.93   thorpej /*
   1322  1.111   thorpej  * pmap_ptpt_page_free:
   1323   1.93   thorpej  *
   1324  1.111   thorpej  *	Back-end page free'er for the PT-PT pool.
   1325   1.93   thorpej  */
   1326   1.93   thorpej static void
   1327  1.111   thorpej pmap_ptpt_page_free(struct pool *pp, void *v)
   1328   1.93   thorpej {
   1329  1.111   thorpej 	vaddr_t va = (vaddr_t) v;
   1330  1.111   thorpej 	paddr_t pa;
   1331  1.111   thorpej 
   1332  1.111   thorpej 	pa = vtophys(va);
   1333   1.93   thorpej 
   1334  1.111   thorpej 	pmap_kremove(va, L2_TABLE_SIZE);
   1335   1.93   thorpej 	pmap_update(pmap_kernel());
   1336   1.93   thorpej 
   1337  1.111   thorpej 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
   1338  1.111   thorpej 
   1339  1.111   thorpej 	uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1340  1.111   thorpej }
   1341  1.111   thorpej 
   1342  1.111   thorpej /*
   1343  1.111   thorpej  * pmap_ptpt_ctor:
   1344  1.111   thorpej  *
   1345  1.111   thorpej  *	Constructor for the PT-PT cache.
   1346  1.111   thorpej  */
   1347  1.111   thorpej static int
   1348  1.111   thorpej pmap_ptpt_ctor(void *arg, void *object, int flags)
   1349  1.111   thorpej {
   1350  1.111   thorpej 	caddr_t vptpt = object;
   1351  1.111   thorpej 
   1352  1.111   thorpej 	/* Page is already zero'd. */
   1353   1.93   thorpej 
   1354  1.111   thorpej 	/*
   1355  1.111   thorpej 	 * Map in kernel PTs.
   1356  1.111   thorpej 	 *
   1357  1.111   thorpej 	 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1358  1.111   thorpej 	 */
   1359  1.111   thorpej 	memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
   1360  1.111   thorpej 	       (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
   1361  1.111   thorpej 			((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
   1362  1.111   thorpej 	       (KERNEL_PD_SIZE >> 2));
   1363  1.111   thorpej 
   1364  1.111   thorpej 	return (0);
   1365   1.93   thorpej }
   1366   1.93   thorpej 
   1367   1.93   thorpej /*
   1368    1.1      matt  * Allocate a page directory.
   1369    1.1      matt  * This routine will either allocate a new page directory from the pool
   1370    1.1      matt  * of L1 page tables currently held by the kernel or it will allocate
   1371    1.1      matt  * a new one via pmap_alloc_l1pt().
   1372    1.1      matt  * It will then initialise the l1 page table for use.
   1373    1.1      matt  */
   1374   1.33     chris static int
   1375   1.73   thorpej pmap_allocpagedir(struct pmap *pmap)
   1376    1.1      matt {
   1377  1.111   thorpej 	vaddr_t vptpt;
   1378    1.2      matt 	paddr_t pa;
   1379    1.1      matt 	struct l1pt *pt;
   1380  1.111   thorpej 	u_int gen;
   1381    1.1      matt 
   1382    1.1      matt 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1383    1.1      matt 
   1384    1.1      matt 	/* Do we have any spare L1's lying around ? */
   1385    1.1      matt 	if (l1pt_static_queue_count) {
   1386    1.1      matt 		--l1pt_static_queue_count;
   1387   1.98     lukem 		pt = SIMPLEQ_FIRST(&l1pt_static_queue);
   1388   1.98     lukem 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
   1389    1.1      matt 	} else if (l1pt_queue_count) {
   1390    1.1      matt 		--l1pt_queue_count;
   1391   1.98     lukem 		pt = SIMPLEQ_FIRST(&l1pt_queue);
   1392   1.98     lukem 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
   1393    1.1      matt 		++l1pt_reuse_count;
   1394    1.1      matt 	} else {
   1395    1.1      matt 		pt = pmap_alloc_l1pt();
   1396    1.1      matt 		if (!pt)
   1397    1.1      matt 			return(ENOMEM);
   1398    1.1      matt 		++l1pt_create_count;
   1399    1.1      matt 	}
   1400    1.1      matt 
   1401    1.1      matt 	/* Store the pointer to the l1 descriptor in the pmap. */
   1402    1.1      matt 	pmap->pm_l1pt = pt;
   1403    1.1      matt 
   1404    1.1      matt 	/* Get the physical address of the start of the l1 */
   1405   1.51     chris 	pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
   1406    1.1      matt 
   1407    1.1      matt 	/* Store the virtual address of the l1 in the pmap. */
   1408    1.1      matt 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1409    1.1      matt 
   1410    1.1      matt 	/* Clean the L1 if it is dirty */
   1411  1.110   thorpej 	if (!(pt->pt_flags & PTFLAG_CLEAN)) {
   1412   1.81   thorpej 		bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1413  1.110   thorpej 		cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
   1414  1.110   thorpej 		    (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1415  1.110   thorpej 	}
   1416    1.1      matt 
   1417    1.1      matt 	/* Allocate a page table to map all the page tables for this pmap */
   1418  1.111   thorpej 	KASSERT(pmap->pm_vptpt == 0);
   1419  1.111   thorpej 
   1420  1.111   thorpej  try_again:
   1421  1.111   thorpej 	gen = pmap_ptpt_cache_generation;
   1422  1.111   thorpej 	vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
   1423  1.111   thorpej 	if (vptpt == NULL) {
   1424  1.111   thorpej 		PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
   1425   1.93   thorpej 		pmap_freepagedir(pmap);
   1426  1.111   thorpej 		return (ENOMEM);
   1427    1.5    toshii 	}
   1428    1.5    toshii 
   1429   1.93   thorpej 	/* need to lock this all up for growkernel */
   1430   1.48     chris 	simple_lock(&pmaps_lock);
   1431   1.48     chris 
   1432  1.111   thorpej 	if (gen != pmap_ptpt_cache_generation) {
   1433  1.111   thorpej 		simple_unlock(&pmaps_lock);
   1434  1.111   thorpej 		pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
   1435  1.111   thorpej 		goto try_again;
   1436  1.111   thorpej 	}
   1437  1.111   thorpej 
   1438  1.111   thorpej 	pmap->pm_vptpt = vptpt;
   1439  1.111   thorpej 	pmap->pm_pptpt = vtophys(vptpt);
   1440  1.111   thorpej 
   1441   1.64   thorpej 	/* Duplicate the kernel mappings. */
   1442   1.81   thorpej 	bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1443   1.81   thorpej 		(char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1444   1.48     chris 		KERNEL_PD_SIZE);
   1445  1.110   thorpej 	cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
   1446  1.110   thorpej 	    (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
   1447   1.48     chris 
   1448    1.1      matt 	/* Wire in this page table */
   1449   1.53   thorpej 	pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
   1450    1.1      matt 
   1451    1.1      matt 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1452  1.110   thorpej 
   1453   1.48     chris 	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
   1454   1.48     chris 	simple_unlock(&pmaps_lock);
   1455   1.48     chris 
   1456    1.1      matt 	return(0);
   1457    1.1      matt }
   1458    1.1      matt 
   1459    1.1      matt 
   1460    1.1      matt /*
   1461    1.1      matt  * Initialize a preallocated and zeroed pmap structure,
   1462    1.1      matt  * such as one in a vmspace structure.
   1463    1.1      matt  */
   1464    1.1      matt 
   1465    1.1      matt void
   1466   1.73   thorpej pmap_pinit(struct pmap *pmap)
   1467    1.1      matt {
   1468   1.26  rearnsha 	int backoff = 6;
   1469   1.26  rearnsha 	int retry = 10;
   1470   1.26  rearnsha 
   1471    1.1      matt 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1472    1.1      matt 
   1473    1.1      matt 	/* Keep looping until we succeed in allocating a page directory */
   1474    1.1      matt 	while (pmap_allocpagedir(pmap) != 0) {
   1475    1.1      matt 		/*
   1476    1.1      matt 		 * Ok we failed to allocate a suitable block of memory for an
   1477    1.1      matt 		 * L1 page table. This means that either:
   1478    1.1      matt 		 * 1. 16KB of virtual address space could not be allocated
   1479    1.1      matt 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1480    1.1      matt 		 *    could not be allocated.
   1481    1.1      matt 		 *
   1482    1.1      matt 		 * Since we cannot fail we will sleep for a while and try
   1483   1.17     chris 		 * again.
   1484   1.26  rearnsha 		 *
   1485   1.26  rearnsha 		 * Searching for a suitable L1 PT is expensive:
   1486   1.26  rearnsha 		 * to avoid hogging the system when memory is really
   1487   1.26  rearnsha 		 * scarce, use an exponential back-off so that
   1488   1.26  rearnsha 		 * eventually we won't retry more than once every 8
   1489   1.26  rearnsha 		 * seconds.  This should allow other processes to run
   1490   1.26  rearnsha 		 * to completion and free up resources.
   1491    1.1      matt 		 */
   1492   1.26  rearnsha 		(void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
   1493   1.26  rearnsha 		    NULL);
   1494   1.26  rearnsha 		if (--retry == 0) {
   1495   1.26  rearnsha 			retry = 10;
   1496   1.26  rearnsha 			if (backoff)
   1497   1.26  rearnsha 				--backoff;
   1498   1.26  rearnsha 		}
   1499    1.1      matt 	}
   1500    1.1      matt 
   1501   1.76   thorpej 	if (vector_page < KERNEL_BASE) {
   1502   1.76   thorpej 		/*
   1503   1.76   thorpej 		 * Map the vector page.  This will also allocate and map
   1504   1.76   thorpej 		 * an L2 table for it.
   1505   1.76   thorpej 		 */
   1506   1.76   thorpej 		pmap_enter(pmap, vector_page, systempage.pv_pa,
   1507   1.76   thorpej 		    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1508   1.76   thorpej 		pmap_update(pmap);
   1509   1.76   thorpej 	}
   1510    1.1      matt }
   1511    1.1      matt 
   1512    1.1      matt void
   1513   1.73   thorpej pmap_freepagedir(struct pmap *pmap)
   1514    1.1      matt {
   1515    1.1      matt 	/* Free the memory used for the page table mapping */
   1516  1.111   thorpej 	if (pmap->pm_vptpt != 0) {
   1517  1.111   thorpej 		/*
   1518  1.111   thorpej 		 * XXX Objects freed to a pool cache must be in constructed
   1519  1.111   thorpej 		 * XXX form when freed, but we don't free page tables as we
   1520  1.111   thorpej 		 * XXX go, so we need to zap the mappings here.
   1521  1.111   thorpej 		 *
   1522  1.111   thorpej 		 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1523  1.111   thorpej 		 */
   1524  1.111   thorpej 		memset((caddr_t) pmap->pm_vptpt, 0,
   1525  1.111   thorpej 		       ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
   1526  1.111   thorpej 		pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
   1527  1.111   thorpej 	}
   1528    1.1      matt 
   1529    1.1      matt 	/* junk the L1 page table */
   1530    1.1      matt 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1531    1.1      matt 		/* Add the page table to the queue */
   1532  1.111   thorpej 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
   1533  1.111   thorpej 				    pmap->pm_l1pt, pt_queue);
   1534    1.1      matt 		++l1pt_static_queue_count;
   1535    1.1      matt 	} else if (l1pt_queue_count < 8) {
   1536    1.1      matt 		/* Add the page table to the queue */
   1537    1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1538    1.1      matt 		++l1pt_queue_count;
   1539    1.1      matt 	} else
   1540    1.1      matt 		pmap_free_l1pt(pmap->pm_l1pt);
   1541    1.1      matt }
   1542    1.1      matt 
   1543    1.1      matt /*
   1544    1.1      matt  * Retire the given physical map from service.
   1545    1.1      matt  * Should only be called if the map contains no valid mappings.
   1546    1.1      matt  */
   1547    1.1      matt 
   1548    1.1      matt void
   1549   1.73   thorpej pmap_destroy(struct pmap *pmap)
   1550    1.1      matt {
   1551   1.17     chris 	struct vm_page *page;
   1552    1.1      matt 	int count;
   1553    1.1      matt 
   1554    1.1      matt 	if (pmap == NULL)
   1555    1.1      matt 		return;
   1556    1.1      matt 
   1557    1.1      matt 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1558   1.17     chris 
   1559   1.17     chris 	/*
   1560   1.17     chris 	 * Drop reference count
   1561   1.17     chris 	 */
   1562   1.17     chris 	simple_lock(&pmap->pm_obj.vmobjlock);
   1563   1.16     chris 	count = --pmap->pm_obj.uo_refs;
   1564   1.17     chris 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1565   1.17     chris 	if (count > 0) {
   1566   1.17     chris 		return;
   1567    1.1      matt 	}
   1568    1.1      matt 
   1569   1.17     chris 	/*
   1570   1.17     chris 	 * reference count is zero, free pmap resources and then free pmap.
   1571   1.17     chris 	 */
   1572   1.48     chris 
   1573   1.48     chris 	/*
   1574   1.48     chris 	 * remove it from global list of pmaps
   1575   1.48     chris 	 */
   1576   1.48     chris 
   1577   1.48     chris 	simple_lock(&pmaps_lock);
   1578   1.48     chris 	LIST_REMOVE(pmap, pm_list);
   1579   1.48     chris 	simple_unlock(&pmaps_lock);
   1580   1.17     chris 
   1581   1.77   thorpej 	if (vector_page < KERNEL_BASE) {
   1582   1.77   thorpej 		/* Remove the vector page mapping */
   1583   1.77   thorpej 		pmap_remove(pmap, vector_page, vector_page + NBPG);
   1584   1.77   thorpej 		pmap_update(pmap);
   1585   1.77   thorpej 	}
   1586    1.1      matt 
   1587    1.1      matt 	/*
   1588    1.1      matt 	 * Free any page tables still mapped
   1589    1.1      matt 	 * This is only temporay until pmap_enter can count the number
   1590    1.1      matt 	 * of mappings made in a page table. Then pmap_remove() can
   1591    1.1      matt 	 * reduce the count and free the pagetable when the count
   1592   1.16     chris 	 * reaches zero.  Note that entries in this list should match the
   1593   1.16     chris 	 * contents of the ptpt, however this is faster than walking a 1024
   1594   1.16     chris 	 * entries looking for pt's
   1595   1.16     chris 	 * taken from i386 pmap.c
   1596    1.1      matt 	 */
   1597   1.97     chris 	/*
   1598   1.97     chris 	 * vmobjlock must be held while freeing pages
   1599   1.97     chris 	 */
   1600   1.97     chris 	simple_lock(&pmap->pm_obj.vmobjlock);
   1601   1.51     chris 	while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
   1602   1.51     chris 		KASSERT((page->flags & PG_BUSY) == 0);
   1603   1.16     chris 		page->wire_count = 0;
   1604   1.16     chris 		uvm_pagefree(page);
   1605    1.1      matt 	}
   1606   1.97     chris 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1607  1.111   thorpej 
   1608    1.1      matt 	/* Free the page dir */
   1609    1.1      matt 	pmap_freepagedir(pmap);
   1610  1.111   thorpej 
   1611   1.17     chris 	/* return the pmap to the pool */
   1612   1.17     chris 	pool_put(&pmap_pmap_pool, pmap);
   1613    1.1      matt }
   1614    1.1      matt 
   1615    1.1      matt 
   1616    1.1      matt /*
   1617   1.15     chris  * void pmap_reference(struct pmap *pmap)
   1618    1.1      matt  *
   1619    1.1      matt  * Add a reference to the specified pmap.
   1620    1.1      matt  */
   1621    1.1      matt 
   1622    1.1      matt void
   1623   1.73   thorpej pmap_reference(struct pmap *pmap)
   1624    1.1      matt {
   1625    1.1      matt 	if (pmap == NULL)
   1626    1.1      matt 		return;
   1627    1.1      matt 
   1628    1.1      matt 	simple_lock(&pmap->pm_lock);
   1629   1.16     chris 	pmap->pm_obj.uo_refs++;
   1630    1.1      matt 	simple_unlock(&pmap->pm_lock);
   1631    1.1      matt }
   1632    1.1      matt 
   1633    1.1      matt /*
   1634    1.1      matt  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1635    1.1      matt  *
   1636    1.1      matt  * Return the start and end addresses of the kernel's virtual space.
   1637    1.1      matt  * These values are setup in pmap_bootstrap and are updated as pages
   1638    1.1      matt  * are allocated.
   1639    1.1      matt  */
   1640    1.1      matt 
   1641    1.1      matt void
   1642   1.73   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1643    1.1      matt {
   1644   1.54   thorpej 	*start = virtual_avail;
   1645    1.1      matt 	*end = virtual_end;
   1646    1.1      matt }
   1647    1.1      matt 
   1648    1.1      matt /*
   1649    1.1      matt  * Activate the address space for the specified process.  If the process
   1650    1.1      matt  * is the current process, load the new MMU context.
   1651    1.1      matt  */
   1652    1.1      matt void
   1653   1.73   thorpej pmap_activate(struct proc *p)
   1654    1.1      matt {
   1655   1.15     chris 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1656    1.1      matt 	struct pcb *pcb = &p->p_addr->u_pcb;
   1657    1.1      matt 
   1658   1.15     chris 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1659    1.1      matt 	    (paddr_t *)&pcb->pcb_pagedir);
   1660    1.1      matt 
   1661    1.1      matt 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1662    1.1      matt 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1663    1.1      matt 
   1664    1.1      matt 	if (p == curproc) {
   1665    1.1      matt 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1666    1.1      matt 		setttb((u_int)pcb->pcb_pagedir);
   1667    1.1      matt 	}
   1668    1.1      matt }
   1669    1.1      matt 
   1670    1.1      matt /*
   1671    1.1      matt  * Deactivate the address space of the specified process.
   1672    1.1      matt  */
   1673    1.1      matt void
   1674   1.73   thorpej pmap_deactivate(struct proc *p)
   1675    1.1      matt {
   1676    1.1      matt }
   1677    1.1      matt 
   1678   1.31   thorpej /*
   1679   1.31   thorpej  * Perform any deferred pmap operations.
   1680   1.31   thorpej  */
   1681   1.31   thorpej void
   1682   1.31   thorpej pmap_update(struct pmap *pmap)
   1683   1.31   thorpej {
   1684   1.31   thorpej 
   1685   1.31   thorpej 	/*
   1686   1.31   thorpej 	 * We haven't deferred any pmap operations, but we do need to
   1687   1.31   thorpej 	 * make sure TLB/cache operations have completed.
   1688   1.31   thorpej 	 */
   1689   1.31   thorpej 	cpu_cpwait();
   1690   1.31   thorpej }
   1691    1.1      matt 
   1692    1.1      matt /*
   1693    1.1      matt  * pmap_clean_page()
   1694    1.1      matt  *
   1695    1.1      matt  * This is a local function used to work out the best strategy to clean
   1696    1.1      matt  * a single page referenced by its entry in the PV table. It's used by
   1697    1.1      matt  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1698    1.1      matt  *
   1699    1.1      matt  * Its policy is effectively:
   1700    1.1      matt  *  o If there are no mappings, we don't bother doing anything with the cache.
   1701    1.1      matt  *  o If there is one mapping, we clean just that page.
   1702    1.1      matt  *  o If there are multiple mappings, we clean the entire cache.
   1703    1.1      matt  *
   1704    1.1      matt  * So that some functions can be further optimised, it returns 0 if it didn't
   1705    1.1      matt  * clean the entire cache, or 1 if it did.
   1706    1.1      matt  *
   1707    1.1      matt  * XXX One bug in this routine is that if the pv_entry has a single page
   1708    1.1      matt  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1709    1.1      matt  * just the 1 page. Since this should not occur in everyday use and if it does
   1710    1.1      matt  * it will just result in not the most efficient clean for the page.
   1711    1.1      matt  */
   1712    1.1      matt static int
   1713   1.73   thorpej pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
   1714    1.1      matt {
   1715   1.17     chris 	struct pmap *pmap;
   1716   1.17     chris 	struct pv_entry *npv;
   1717    1.1      matt 	int cache_needs_cleaning = 0;
   1718    1.1      matt 	vaddr_t page_to_clean = 0;
   1719    1.1      matt 
   1720  1.108   thorpej 	if (pv == NULL) {
   1721   1.17     chris 		/* nothing mapped in so nothing to flush */
   1722   1.17     chris 		return (0);
   1723  1.108   thorpej 	}
   1724   1.17     chris 
   1725  1.108   thorpej 	/*
   1726  1.108   thorpej 	 * Since we flush the cache each time we change curproc, we
   1727   1.17     chris 	 * only need to flush the page if it is in the current pmap.
   1728   1.17     chris 	 */
   1729   1.17     chris 	if (curproc)
   1730   1.17     chris 		pmap = curproc->p_vmspace->vm_map.pmap;
   1731   1.17     chris 	else
   1732   1.17     chris 		pmap = pmap_kernel();
   1733   1.17     chris 
   1734   1.17     chris 	for (npv = pv; npv; npv = npv->pv_next) {
   1735   1.17     chris 		if (npv->pv_pmap == pmap) {
   1736  1.108   thorpej 			/*
   1737  1.108   thorpej 			 * The page is mapped non-cacheable in
   1738   1.17     chris 			 * this map.  No need to flush the cache.
   1739   1.17     chris 			 */
   1740   1.78   thorpej 			if (npv->pv_flags & PVF_NC) {
   1741   1.17     chris #ifdef DIAGNOSTIC
   1742   1.17     chris 				if (cache_needs_cleaning)
   1743   1.17     chris 					panic("pmap_clean_page: "
   1744  1.108   thorpej 					    "cache inconsistency");
   1745   1.17     chris #endif
   1746   1.17     chris 				break;
   1747  1.108   thorpej 			} else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
   1748   1.17     chris 				continue;
   1749  1.108   thorpej 			if (cache_needs_cleaning) {
   1750   1.17     chris 				page_to_clean = 0;
   1751   1.17     chris 				break;
   1752  1.108   thorpej 			} else
   1753   1.17     chris 				page_to_clean = npv->pv_va;
   1754   1.17     chris 			cache_needs_cleaning = 1;
   1755   1.17     chris 		}
   1756    1.1      matt 	}
   1757    1.1      matt 
   1758  1.108   thorpej 	if (page_to_clean) {
   1759  1.108   thorpej 		/*
   1760  1.108   thorpej 		 * XXX If is_src, we really only need to write-back,
   1761  1.108   thorpej 		 * XXX not invalidate, too.  Investigate further.
   1762  1.108   thorpej 		 * XXX --thorpej (at) netbsd.org
   1763  1.108   thorpej 		 */
   1764   1.36   thorpej 		cpu_idcache_wbinv_range(page_to_clean, NBPG);
   1765  1.108   thorpej 	} else if (cache_needs_cleaning) {
   1766   1.36   thorpej 		cpu_idcache_wbinv_all();
   1767    1.1      matt 		return (1);
   1768    1.1      matt 	}
   1769    1.1      matt 	return (0);
   1770    1.1      matt }
   1771    1.1      matt 
   1772    1.1      matt /*
   1773    1.1      matt  * pmap_zero_page()
   1774    1.1      matt  *
   1775    1.1      matt  * Zero a given physical page by mapping it at a page hook point.
   1776    1.1      matt  * In doing the zero page op, the page we zero is mapped cachable, as with
   1777    1.1      matt  * StrongARM accesses to non-cached pages are non-burst making writing
   1778    1.1      matt  * _any_ bulk data very slow.
   1779    1.1      matt  */
   1780   1.88   thorpej #if ARM_MMU_GENERIC == 1
   1781    1.1      matt void
   1782   1.88   thorpej pmap_zero_page_generic(paddr_t phys)
   1783    1.1      matt {
   1784   1.71   thorpej #ifdef DEBUG
   1785   1.71   thorpej 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1786   1.71   thorpej 
   1787   1.71   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1788   1.71   thorpej 		panic("pmap_zero_page: page has mappings");
   1789   1.71   thorpej #endif
   1790    1.1      matt 
   1791   1.79   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1792   1.79   thorpej 
   1793    1.1      matt 	/*
   1794    1.1      matt 	 * Hook in the page, zero it, and purge the cache for that
   1795    1.1      matt 	 * zeroed page. Invalidate the TLB as needed.
   1796    1.1      matt 	 */
   1797   1.83   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1798   1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1799   1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1800   1.32   thorpej 	cpu_cpwait();
   1801   1.54   thorpej 	bzero_page(cdstp);
   1802   1.54   thorpej 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1803    1.1      matt }
   1804   1.88   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   1805   1.88   thorpej 
   1806   1.88   thorpej #if ARM_MMU_XSCALE == 1
   1807   1.88   thorpej void
   1808   1.88   thorpej pmap_zero_page_xscale(paddr_t phys)
   1809   1.88   thorpej {
   1810   1.88   thorpej #ifdef DEBUG
   1811   1.88   thorpej 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1812   1.88   thorpej 
   1813   1.88   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1814   1.88   thorpej 		panic("pmap_zero_page: page has mappings");
   1815   1.88   thorpej #endif
   1816   1.88   thorpej 
   1817   1.88   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1818   1.88   thorpej 
   1819   1.88   thorpej 	/*
   1820   1.88   thorpej 	 * Hook in the page, zero it, and purge the cache for that
   1821   1.88   thorpej 	 * zeroed page. Invalidate the TLB as needed.
   1822   1.88   thorpej 	 */
   1823   1.88   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1824   1.88   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1825   1.88   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1826   1.88   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1827   1.88   thorpej 	cpu_cpwait();
   1828   1.88   thorpej 	bzero_page(cdstp);
   1829   1.88   thorpej 	xscale_cache_clean_minidata();
   1830   1.88   thorpej }
   1831   1.88   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   1832    1.1      matt 
   1833   1.17     chris /* pmap_pageidlezero()
   1834   1.17     chris  *
   1835   1.17     chris  * The same as above, except that we assume that the page is not
   1836   1.17     chris  * mapped.  This means we never have to flush the cache first.  Called
   1837   1.17     chris  * from the idle loop.
   1838   1.17     chris  */
   1839   1.17     chris boolean_t
   1840   1.73   thorpej pmap_pageidlezero(paddr_t phys)
   1841   1.17     chris {
   1842   1.17     chris 	int i, *ptr;
   1843   1.17     chris 	boolean_t rv = TRUE;
   1844   1.71   thorpej #ifdef DEBUG
   1845   1.49   thorpej 	struct vm_page *pg;
   1846   1.17     chris 
   1847   1.49   thorpej 	pg = PHYS_TO_VM_PAGE(phys);
   1848   1.49   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1849   1.71   thorpej 		panic("pmap_pageidlezero: page has mappings");
   1850   1.17     chris #endif
   1851   1.79   thorpej 
   1852   1.79   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1853   1.79   thorpej 
   1854   1.17     chris 	/*
   1855   1.17     chris 	 * Hook in the page, zero it, and purge the cache for that
   1856   1.17     chris 	 * zeroed page. Invalidate the TLB as needed.
   1857   1.17     chris 	 */
   1858   1.83   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1859   1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1860   1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1861   1.32   thorpej 	cpu_cpwait();
   1862   1.32   thorpej 
   1863   1.54   thorpej 	for (i = 0, ptr = (int *)cdstp;
   1864   1.17     chris 			i < (NBPG / sizeof(int)); i++) {
   1865   1.17     chris 		if (sched_whichqs != 0) {
   1866   1.17     chris 			/*
   1867   1.17     chris 			 * A process has become ready.  Abort now,
   1868   1.17     chris 			 * so we don't keep it waiting while we
   1869   1.17     chris 			 * do slow memory access to finish this
   1870   1.17     chris 			 * page.
   1871   1.17     chris 			 */
   1872   1.17     chris 			rv = FALSE;
   1873   1.17     chris 			break;
   1874   1.17     chris 		}
   1875   1.17     chris 		*ptr++ = 0;
   1876   1.17     chris 	}
   1877   1.17     chris 
   1878   1.17     chris 	if (rv)
   1879   1.17     chris 		/*
   1880   1.17     chris 		 * if we aborted we'll rezero this page again later so don't
   1881   1.17     chris 		 * purge it unless we finished it
   1882   1.17     chris 		 */
   1883   1.54   thorpej 		cpu_dcache_wbinv_range(cdstp, NBPG);
   1884   1.17     chris 	return (rv);
   1885   1.17     chris }
   1886   1.17     chris 
   1887    1.1      matt /*
   1888    1.1      matt  * pmap_copy_page()
   1889    1.1      matt  *
   1890    1.1      matt  * Copy one physical page into another, by mapping the pages into
   1891    1.1      matt  * hook points. The same comment regarding cachability as in
   1892    1.1      matt  * pmap_zero_page also applies here.
   1893    1.1      matt  */
   1894   1.88   thorpej #if ARM_MMU_GENERIC == 1
   1895    1.1      matt void
   1896   1.88   thorpej pmap_copy_page_generic(paddr_t src, paddr_t dst)
   1897    1.1      matt {
   1898   1.71   thorpej 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   1899   1.71   thorpej #ifdef DEBUG
   1900   1.71   thorpej 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   1901   1.71   thorpej 
   1902   1.71   thorpej 	if (dst_pg->mdpage.pvh_list != NULL)
   1903   1.71   thorpej 		panic("pmap_copy_page: dst page has mappings");
   1904   1.71   thorpej #endif
   1905   1.71   thorpej 
   1906   1.79   thorpej 	KDASSERT((src & PGOFSET) == 0);
   1907   1.79   thorpej 	KDASSERT((dst & PGOFSET) == 0);
   1908   1.79   thorpej 
   1909   1.71   thorpej 	/*
   1910   1.71   thorpej 	 * Clean the source page.  Hold the source page's lock for
   1911   1.71   thorpej 	 * the duration of the copy so that no other mappings can
   1912   1.71   thorpej 	 * be created while we have a potentially aliased mapping.
   1913   1.71   thorpej 	 */
   1914   1.49   thorpej 	simple_lock(&src_pg->mdpage.pvh_slock);
   1915   1.71   thorpej 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   1916    1.1      matt 
   1917    1.1      matt 	/*
   1918    1.1      matt 	 * Map the pages into the page hook points, copy them, and purge
   1919    1.1      matt 	 * the cache for the appropriate page. Invalidate the TLB
   1920    1.1      matt 	 * as required.
   1921    1.1      matt 	 */
   1922   1.83   thorpej 	*csrc_pte = L2_S_PROTO | src |
   1923   1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
   1924   1.83   thorpej 	*cdst_pte = L2_S_PROTO | dst |
   1925   1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1926   1.54   thorpej 	cpu_tlb_flushD_SE(csrcp);
   1927   1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1928   1.32   thorpej 	cpu_cpwait();
   1929   1.54   thorpej 	bcopy_page(csrcp, cdstp);
   1930   1.65     chris 	cpu_dcache_inv_range(csrcp, NBPG);
   1931   1.71   thorpej 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   1932   1.54   thorpej 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1933    1.1      matt }
   1934   1.88   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   1935   1.88   thorpej 
   1936   1.88   thorpej #if ARM_MMU_XSCALE == 1
   1937   1.88   thorpej void
   1938   1.88   thorpej pmap_copy_page_xscale(paddr_t src, paddr_t dst)
   1939   1.88   thorpej {
   1940   1.88   thorpej 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   1941   1.88   thorpej #ifdef DEBUG
   1942   1.88   thorpej 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   1943   1.88   thorpej 
   1944   1.88   thorpej 	if (dst_pg->mdpage.pvh_list != NULL)
   1945   1.88   thorpej 		panic("pmap_copy_page: dst page has mappings");
   1946   1.88   thorpej #endif
   1947   1.88   thorpej 
   1948   1.88   thorpej 	KDASSERT((src & PGOFSET) == 0);
   1949   1.88   thorpej 	KDASSERT((dst & PGOFSET) == 0);
   1950   1.88   thorpej 
   1951   1.88   thorpej 	/*
   1952   1.88   thorpej 	 * Clean the source page.  Hold the source page's lock for
   1953   1.88   thorpej 	 * the duration of the copy so that no other mappings can
   1954   1.88   thorpej 	 * be created while we have a potentially aliased mapping.
   1955   1.88   thorpej 	 */
   1956   1.88   thorpej 	simple_lock(&src_pg->mdpage.pvh_slock);
   1957   1.88   thorpej 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   1958   1.88   thorpej 
   1959   1.88   thorpej 	/*
   1960   1.88   thorpej 	 * Map the pages into the page hook points, copy them, and purge
   1961   1.88   thorpej 	 * the cache for the appropriate page. Invalidate the TLB
   1962   1.88   thorpej 	 * as required.
   1963   1.88   thorpej 	 */
   1964   1.88   thorpej 	*csrc_pte = L2_S_PROTO | src |
   1965   1.89   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   1966   1.89   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1967   1.88   thorpej 	*cdst_pte = L2_S_PROTO | dst |
   1968   1.88   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1969   1.88   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1970   1.88   thorpej 	cpu_tlb_flushD_SE(csrcp);
   1971   1.88   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1972   1.88   thorpej 	cpu_cpwait();
   1973   1.88   thorpej 	bcopy_page(csrcp, cdstp);
   1974   1.88   thorpej 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   1975   1.88   thorpej 	xscale_cache_clean_minidata();
   1976   1.88   thorpej }
   1977   1.88   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   1978    1.1      matt 
   1979    1.1      matt #if 0
   1980    1.1      matt void
   1981   1.73   thorpej pmap_pte_addref(struct pmap *pmap, vaddr_t va)
   1982    1.1      matt {
   1983    1.1      matt 	pd_entry_t *pde;
   1984    1.2      matt 	paddr_t pa;
   1985    1.1      matt 	struct vm_page *m;
   1986    1.1      matt 
   1987    1.1      matt 	if (pmap == pmap_kernel())
   1988    1.1      matt 		return;
   1989    1.1      matt 
   1990   1.81   thorpej 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   1991    1.1      matt 	pa = pmap_pte_pa(pde);
   1992    1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   1993    1.1      matt 	++m->wire_count;
   1994    1.1      matt #ifdef MYCROFT_HACK
   1995    1.1      matt 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   1996    1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   1997    1.1      matt #endif
   1998    1.1      matt }
   1999    1.1      matt 
   2000    1.1      matt void
   2001   1.73   thorpej pmap_pte_delref(struct pmap *pmap, vaddr_t va)
   2002    1.1      matt {
   2003    1.1      matt 	pd_entry_t *pde;
   2004    1.2      matt 	paddr_t pa;
   2005    1.1      matt 	struct vm_page *m;
   2006    1.1      matt 
   2007    1.1      matt 	if (pmap == pmap_kernel())
   2008    1.1      matt 		return;
   2009    1.1      matt 
   2010   1.81   thorpej 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2011    1.1      matt 	pa = pmap_pte_pa(pde);
   2012    1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   2013    1.1      matt 	--m->wire_count;
   2014    1.1      matt #ifdef MYCROFT_HACK
   2015    1.1      matt 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2016    1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   2017    1.1      matt #endif
   2018    1.1      matt 	if (m->wire_count == 0) {
   2019    1.1      matt #ifdef MYCROFT_HACK
   2020    1.1      matt 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   2021    1.1      matt 		    pmap, va, pde, pa, m);
   2022    1.1      matt #endif
   2023    1.1      matt 		pmap_unmap_in_l1(pmap, va);
   2024    1.1      matt 		uvm_pagefree(m);
   2025    1.1      matt 		--pmap->pm_stats.resident_count;
   2026    1.1      matt 	}
   2027    1.1      matt }
   2028    1.1      matt #else
   2029    1.1      matt #define	pmap_pte_addref(pmap, va)
   2030    1.1      matt #define	pmap_pte_delref(pmap, va)
   2031    1.1      matt #endif
   2032    1.1      matt 
   2033    1.1      matt /*
   2034    1.1      matt  * Since we have a virtually indexed cache, we may need to inhibit caching if
   2035    1.1      matt  * there is more than one mapping and at least one of them is writable.
   2036    1.1      matt  * Since we purge the cache on every context switch, we only need to check for
   2037    1.1      matt  * other mappings within the same pmap, or kernel_pmap.
   2038    1.1      matt  * This function is also called when a page is unmapped, to possibly reenable
   2039    1.1      matt  * caching on any remaining mappings.
   2040   1.28  rearnsha  *
   2041   1.28  rearnsha  * The code implements the following logic, where:
   2042   1.28  rearnsha  *
   2043   1.28  rearnsha  * KW = # of kernel read/write pages
   2044   1.28  rearnsha  * KR = # of kernel read only pages
   2045   1.28  rearnsha  * UW = # of user read/write pages
   2046   1.28  rearnsha  * UR = # of user read only pages
   2047   1.28  rearnsha  * OW = # of user read/write pages in another pmap, then
   2048   1.28  rearnsha  *
   2049   1.28  rearnsha  * KC = kernel mapping is cacheable
   2050   1.28  rearnsha  * UC = user mapping is cacheable
   2051   1.28  rearnsha  *
   2052   1.28  rearnsha  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
   2053   1.28  rearnsha  *                   +---------------------------------------------
   2054   1.28  rearnsha  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
   2055   1.28  rearnsha  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
   2056   1.28  rearnsha  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
   2057   1.28  rearnsha  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2058   1.28  rearnsha  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2059   1.11     chris  *
   2060   1.11     chris  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   2061    1.1      matt  */
   2062   1.25  rearnsha __inline static void
   2063   1.49   thorpej pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2064   1.12     chris 	boolean_t clear_cache)
   2065    1.1      matt {
   2066   1.25  rearnsha 	if (pmap == pmap_kernel())
   2067   1.49   thorpej 		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
   2068   1.25  rearnsha 	else
   2069   1.49   thorpej 		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2070   1.25  rearnsha }
   2071   1.25  rearnsha 
   2072   1.25  rearnsha static void
   2073   1.49   thorpej pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2074   1.25  rearnsha 	boolean_t clear_cache)
   2075   1.25  rearnsha {
   2076   1.25  rearnsha 	int user_entries = 0;
   2077   1.25  rearnsha 	int user_writable = 0;
   2078   1.25  rearnsha 	int user_cacheable = 0;
   2079   1.25  rearnsha 	int kernel_entries = 0;
   2080   1.25  rearnsha 	int kernel_writable = 0;
   2081   1.25  rearnsha 	int kernel_cacheable = 0;
   2082   1.25  rearnsha 	struct pv_entry *pv;
   2083   1.25  rearnsha 	struct pmap *last_pmap = pmap;
   2084   1.25  rearnsha 
   2085   1.25  rearnsha #ifdef DIAGNOSTIC
   2086   1.25  rearnsha 	if (pmap != pmap_kernel())
   2087   1.25  rearnsha 		panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
   2088   1.25  rearnsha #endif
   2089   1.25  rearnsha 
   2090   1.25  rearnsha 	/*
   2091   1.25  rearnsha 	 * Pass one, see if there are both kernel and user pmaps for
   2092   1.25  rearnsha 	 * this page.  Calculate whether there are user-writable or
   2093   1.25  rearnsha 	 * kernel-writable pages.
   2094   1.25  rearnsha 	 */
   2095   1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
   2096   1.25  rearnsha 		if (pv->pv_pmap != pmap) {
   2097   1.25  rearnsha 			user_entries++;
   2098   1.78   thorpej 			if (pv->pv_flags & PVF_WRITE)
   2099   1.25  rearnsha 				user_writable++;
   2100   1.78   thorpej 			if ((pv->pv_flags & PVF_NC) == 0)
   2101   1.25  rearnsha 				user_cacheable++;
   2102   1.25  rearnsha 		} else {
   2103   1.25  rearnsha 			kernel_entries++;
   2104   1.78   thorpej 			if (pv->pv_flags & PVF_WRITE)
   2105   1.25  rearnsha 				kernel_writable++;
   2106   1.78   thorpej 			if ((pv->pv_flags & PVF_NC) == 0)
   2107   1.25  rearnsha 				kernel_cacheable++;
   2108   1.25  rearnsha 		}
   2109   1.25  rearnsha 	}
   2110   1.25  rearnsha 
   2111   1.25  rearnsha 	/*
   2112   1.25  rearnsha 	 * We know we have just been updating a kernel entry, so if
   2113   1.25  rearnsha 	 * all user pages are already cacheable, then there is nothing
   2114   1.25  rearnsha 	 * further to do.
   2115   1.25  rearnsha 	 */
   2116   1.25  rearnsha 	if (kernel_entries == 0 &&
   2117   1.25  rearnsha 	    user_cacheable == user_entries)
   2118   1.25  rearnsha 		return;
   2119   1.25  rearnsha 
   2120   1.25  rearnsha 	if (user_entries) {
   2121   1.25  rearnsha 		/*
   2122   1.25  rearnsha 		 * Scan over the list again, for each entry, if it
   2123   1.25  rearnsha 		 * might not be set correctly, call pmap_vac_me_user
   2124   1.25  rearnsha 		 * to recalculate the settings.
   2125   1.25  rearnsha 		 */
   2126   1.49   thorpej 		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   2127   1.25  rearnsha 			/*
   2128   1.25  rearnsha 			 * We know kernel mappings will get set
   2129   1.25  rearnsha 			 * correctly in other calls.  We also know
   2130   1.25  rearnsha 			 * that if the pmap is the same as last_pmap
   2131   1.25  rearnsha 			 * then we've just handled this entry.
   2132   1.25  rearnsha 			 */
   2133   1.25  rearnsha 			if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
   2134   1.25  rearnsha 				continue;
   2135   1.25  rearnsha 			/*
   2136   1.25  rearnsha 			 * If there are kernel entries and this page
   2137   1.25  rearnsha 			 * is writable but non-cacheable, then we can
   2138   1.25  rearnsha 			 * skip this entry also.
   2139   1.25  rearnsha 			 */
   2140   1.25  rearnsha 			if (kernel_entries > 0 &&
   2141   1.78   thorpej 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
   2142   1.78   thorpej 			    (PVF_NC | PVF_WRITE))
   2143   1.25  rearnsha 				continue;
   2144   1.25  rearnsha 			/*
   2145   1.25  rearnsha 			 * Similarly if there are no kernel-writable
   2146   1.25  rearnsha 			 * entries and the page is already
   2147   1.25  rearnsha 			 * read-only/cacheable.
   2148   1.25  rearnsha 			 */
   2149   1.25  rearnsha 			if (kernel_writable == 0 &&
   2150   1.78   thorpej 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
   2151   1.25  rearnsha 				continue;
   2152   1.25  rearnsha 			/*
   2153   1.25  rearnsha 			 * For some of the remaining cases, we know
   2154   1.25  rearnsha 			 * that we must recalculate, but for others we
   2155   1.25  rearnsha 			 * can't tell if they are correct or not, so
   2156   1.25  rearnsha 			 * we recalculate anyway.
   2157   1.25  rearnsha 			 */
   2158   1.25  rearnsha 			pmap_unmap_ptes(last_pmap);
   2159   1.25  rearnsha 			last_pmap = pv->pv_pmap;
   2160   1.25  rearnsha 			ptes = pmap_map_ptes(last_pmap);
   2161   1.49   thorpej 			pmap_vac_me_user(last_pmap, pg, ptes,
   2162   1.25  rearnsha 			    pmap_is_curpmap(last_pmap));
   2163   1.25  rearnsha 		}
   2164   1.25  rearnsha 		/* Restore the pte mapping that was passed to us.  */
   2165   1.25  rearnsha 		if (last_pmap != pmap) {
   2166   1.25  rearnsha 			pmap_unmap_ptes(last_pmap);
   2167   1.25  rearnsha 			ptes = pmap_map_ptes(pmap);
   2168   1.25  rearnsha 		}
   2169   1.25  rearnsha 		if (kernel_entries == 0)
   2170   1.25  rearnsha 			return;
   2171   1.25  rearnsha 	}
   2172   1.25  rearnsha 
   2173   1.49   thorpej 	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2174   1.25  rearnsha 	return;
   2175   1.25  rearnsha }
   2176   1.25  rearnsha 
   2177   1.25  rearnsha static void
   2178   1.49   thorpej pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2179   1.25  rearnsha 	boolean_t clear_cache)
   2180   1.25  rearnsha {
   2181   1.25  rearnsha 	struct pmap *kpmap = pmap_kernel();
   2182   1.17     chris 	struct pv_entry *pv, *npv;
   2183    1.1      matt 	int entries = 0;
   2184   1.25  rearnsha 	int writable = 0;
   2185   1.12     chris 	int cacheable_entries = 0;
   2186   1.25  rearnsha 	int kern_cacheable = 0;
   2187   1.25  rearnsha 	int other_writable = 0;
   2188    1.1      matt 
   2189   1.49   thorpej 	pv = pg->mdpage.pvh_list;
   2190   1.11     chris 	KASSERT(ptes != NULL);
   2191    1.1      matt 
   2192    1.1      matt 	/*
   2193    1.1      matt 	 * Count mappings and writable mappings in this pmap.
   2194   1.25  rearnsha 	 * Include kernel mappings as part of our own.
   2195    1.1      matt 	 * Keep a pointer to the first one.
   2196    1.1      matt 	 */
   2197    1.1      matt 	for (npv = pv; npv; npv = npv->pv_next) {
   2198    1.1      matt 		/* Count mappings in the same pmap */
   2199   1.25  rearnsha 		if (pmap == npv->pv_pmap ||
   2200   1.25  rearnsha 		    kpmap == npv->pv_pmap) {
   2201    1.1      matt 			if (entries++ == 0)
   2202    1.1      matt 				pv = npv;
   2203   1.12     chris 			/* Cacheable mappings */
   2204   1.78   thorpej 			if ((npv->pv_flags & PVF_NC) == 0) {
   2205   1.12     chris 				cacheable_entries++;
   2206   1.25  rearnsha 				if (kpmap == npv->pv_pmap)
   2207   1.25  rearnsha 					kern_cacheable++;
   2208   1.25  rearnsha 			}
   2209   1.25  rearnsha 			/* Writable mappings */
   2210   1.78   thorpej 			if (npv->pv_flags & PVF_WRITE)
   2211   1.25  rearnsha 				++writable;
   2212   1.78   thorpej 		} else if (npv->pv_flags & PVF_WRITE)
   2213   1.25  rearnsha 			other_writable = 1;
   2214    1.1      matt 	}
   2215    1.1      matt 
   2216   1.12     chris 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   2217   1.25  rearnsha 		"writable %d cacheable %d %s\n", pmap, entries, writable,
   2218   1.12     chris 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   2219   1.12     chris 
   2220    1.1      matt 	/*
   2221    1.1      matt 	 * Enable or disable caching as necessary.
   2222   1.25  rearnsha 	 * Note: the first entry might be part of the kernel pmap,
   2223   1.25  rearnsha 	 * so we can't assume this is indicative of the state of the
   2224   1.25  rearnsha 	 * other (maybe non-kpmap) entries.
   2225    1.1      matt 	 */
   2226   1.25  rearnsha 	if ((entries > 1 && writable) ||
   2227   1.25  rearnsha 	    (entries > 0 && pmap == kpmap && other_writable)) {
   2228   1.12     chris 		if (cacheable_entries == 0)
   2229   1.12     chris 		    return;
   2230   1.25  rearnsha 		for (npv = pv; npv; npv = npv->pv_next) {
   2231   1.25  rearnsha 			if ((pmap == npv->pv_pmap
   2232   1.25  rearnsha 			    || kpmap == npv->pv_pmap) &&
   2233   1.78   thorpej 			    (npv->pv_flags & PVF_NC) == 0) {
   2234   1.91   thorpej 				ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
   2235   1.78   thorpej  				npv->pv_flags |= PVF_NC;
   2236   1.25  rearnsha 				/*
   2237   1.25  rearnsha 				 * If this page needs flushing from the
   2238   1.25  rearnsha 				 * cache, and we aren't going to do it
   2239   1.25  rearnsha 				 * below, do it now.
   2240   1.25  rearnsha 				 */
   2241   1.25  rearnsha 				if ((cacheable_entries < 4 &&
   2242   1.25  rearnsha 				    (clear_cache || npv->pv_pmap == kpmap)) ||
   2243   1.25  rearnsha 				    (npv->pv_pmap == kpmap &&
   2244   1.25  rearnsha 				    !clear_cache && kern_cacheable < 4)) {
   2245   1.36   thorpej 					cpu_idcache_wbinv_range(npv->pv_va,
   2246   1.12     chris 					    NBPG);
   2247   1.12     chris 					cpu_tlb_flushID_SE(npv->pv_va);
   2248   1.12     chris 				}
   2249    1.1      matt 			}
   2250    1.1      matt 		}
   2251   1.25  rearnsha 		if ((clear_cache && cacheable_entries >= 4) ||
   2252   1.25  rearnsha 		    kern_cacheable >= 4) {
   2253   1.36   thorpej 			cpu_idcache_wbinv_all();
   2254   1.12     chris 			cpu_tlb_flushID();
   2255   1.12     chris 		}
   2256   1.32   thorpej 		cpu_cpwait();
   2257    1.1      matt 	} else if (entries > 0) {
   2258   1.25  rearnsha 		/*
   2259   1.25  rearnsha 		 * Turn cacheing back on for some pages.  If it is a kernel
   2260   1.25  rearnsha 		 * page, only do so if there are no other writable pages.
   2261   1.25  rearnsha 		 */
   2262   1.25  rearnsha 		for (npv = pv; npv; npv = npv->pv_next) {
   2263   1.25  rearnsha 			if ((pmap == npv->pv_pmap ||
   2264   1.25  rearnsha 			    (kpmap == npv->pv_pmap && other_writable == 0)) &&
   2265   1.78   thorpej 			    (npv->pv_flags & PVF_NC)) {
   2266   1.86   thorpej 				ptes[arm_btop(npv->pv_va)] |=
   2267   1.86   thorpej 				    pte_l2_s_cache_mode;
   2268   1.78   thorpej 				npv->pv_flags &= ~PVF_NC;
   2269    1.1      matt 			}
   2270    1.1      matt 		}
   2271    1.1      matt 	}
   2272    1.1      matt }
   2273    1.1      matt 
   2274    1.1      matt /*
   2275    1.1      matt  * pmap_remove()
   2276    1.1      matt  *
   2277    1.1      matt  * pmap_remove is responsible for nuking a number of mappings for a range
   2278    1.1      matt  * of virtual address space in the current pmap. To do this efficiently
   2279    1.1      matt  * is interesting, because in a number of cases a wide virtual address
   2280    1.1      matt  * range may be supplied that contains few actual mappings. So, the
   2281    1.1      matt  * optimisations are:
   2282    1.1      matt  *  1. Try and skip over hunks of address space for which an L1 entry
   2283    1.1      matt  *     does not exist.
   2284    1.1      matt  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   2285    1.1      matt  *     maybe do just a partial cache clean. This path of execution is
   2286    1.1      matt  *     complicated by the fact that the cache must be flushed _before_
   2287    1.1      matt  *     the PTE is nuked, being a VAC :-)
   2288    1.1      matt  *  3. Maybe later fast-case a single page, but I don't think this is
   2289    1.1      matt  *     going to make _that_ much difference overall.
   2290    1.1      matt  */
   2291    1.1      matt 
   2292    1.1      matt #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   2293    1.1      matt 
   2294    1.1      matt void
   2295   1.73   thorpej pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
   2296    1.1      matt {
   2297    1.1      matt 	int cleanlist_idx = 0;
   2298    1.1      matt 	struct pagelist {
   2299    1.1      matt 		vaddr_t va;
   2300    1.1      matt 		pt_entry_t *pte;
   2301    1.1      matt 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   2302   1.11     chris 	pt_entry_t *pte = 0, *ptes;
   2303    1.2      matt 	paddr_t pa;
   2304    1.1      matt 	int pmap_active;
   2305   1.49   thorpej 	struct vm_page *pg;
   2306    1.1      matt 
   2307    1.1      matt 	/* Exit quick if there is no pmap */
   2308    1.1      matt 	if (!pmap)
   2309    1.1      matt 		return;
   2310    1.1      matt 
   2311   1.79   thorpej 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
   2312   1.79   thorpej 	    pmap, sva, eva));
   2313    1.1      matt 
   2314   1.17     chris 	/*
   2315   1.49   thorpej 	 * we lock in the pmap => vm_page direction
   2316   1.17     chris 	 */
   2317   1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2318   1.17     chris 
   2319   1.11     chris 	ptes = pmap_map_ptes(pmap);
   2320    1.1      matt 	/* Get a page table pointer */
   2321    1.1      matt 	while (sva < eva) {
   2322   1.30  rearnsha 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2323    1.1      matt 			break;
   2324   1.81   thorpej 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2325    1.1      matt 	}
   2326   1.11     chris 
   2327   1.56   thorpej 	pte = &ptes[arm_btop(sva)];
   2328    1.1      matt 	/* Note if the pmap is active thus require cache and tlb cleans */
   2329   1.58   thorpej 	pmap_active = pmap_is_curpmap(pmap);
   2330    1.1      matt 
   2331    1.1      matt 	/* Now loop along */
   2332    1.1      matt 	while (sva < eva) {
   2333    1.1      matt 		/* Check if we can move to the next PDE (l1 chunk) */
   2334   1.81   thorpej 		if (!(sva & L2_ADDR_BITS))
   2335   1.30  rearnsha 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2336   1.81   thorpej 				sva += L1_S_SIZE;
   2337   1.81   thorpej 				pte += arm_btop(L1_S_SIZE);
   2338    1.1      matt 				continue;
   2339    1.1      matt 			}
   2340    1.1      matt 
   2341    1.1      matt 		/* We've found a valid PTE, so this page of PTEs has to go. */
   2342    1.1      matt 		if (pmap_pte_v(pte)) {
   2343    1.1      matt 			/* Update statistics */
   2344    1.1      matt 			--pmap->pm_stats.resident_count;
   2345    1.1      matt 
   2346    1.1      matt 			/*
   2347    1.1      matt 			 * Add this page to our cache remove list, if we can.
   2348    1.1      matt 			 * If, however the cache remove list is totally full,
   2349    1.1      matt 			 * then do a complete cache invalidation taking note
   2350    1.1      matt 			 * to backtrack the PTE table beforehand, and ignore
   2351    1.1      matt 			 * the lists in future because there's no longer any
   2352    1.1      matt 			 * point in bothering with them (we've paid the
   2353    1.1      matt 			 * penalty, so will carry on unhindered). Otherwise,
   2354    1.1      matt 			 * when we fall out, we just clean the list.
   2355    1.1      matt 			 */
   2356    1.1      matt 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   2357    1.1      matt 			pa = pmap_pte_pa(pte);
   2358    1.1      matt 
   2359    1.1      matt 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2360    1.1      matt 				/* Add to the clean list. */
   2361    1.1      matt 				cleanlist[cleanlist_idx].pte = pte;
   2362    1.1      matt 				cleanlist[cleanlist_idx].va = sva;
   2363    1.1      matt 				cleanlist_idx++;
   2364    1.1      matt 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2365    1.1      matt 				int cnt;
   2366    1.1      matt 
   2367    1.1      matt 				/* Nuke everything if needed. */
   2368    1.1      matt 				if (pmap_active) {
   2369   1.36   thorpej 					cpu_idcache_wbinv_all();
   2370    1.1      matt 					cpu_tlb_flushID();
   2371    1.1      matt 				}
   2372    1.1      matt 
   2373    1.1      matt 				/*
   2374    1.1      matt 				 * Roll back the previous PTE list,
   2375    1.1      matt 				 * and zero out the current PTE.
   2376    1.1      matt 				 */
   2377    1.1      matt 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   2378    1.1      matt 					*cleanlist[cnt].pte = 0;
   2379    1.1      matt 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   2380    1.1      matt 				}
   2381    1.1      matt 				*pte = 0;
   2382    1.1      matt 				pmap_pte_delref(pmap, sva);
   2383    1.1      matt 				cleanlist_idx++;
   2384    1.1      matt 			} else {
   2385    1.1      matt 				/*
   2386    1.1      matt 				 * We've already nuked the cache and
   2387    1.1      matt 				 * TLB, so just carry on regardless,
   2388    1.1      matt 				 * and we won't need to do it again
   2389    1.1      matt 				 */
   2390    1.1      matt 				*pte = 0;
   2391    1.1      matt 				pmap_pte_delref(pmap, sva);
   2392    1.1      matt 			}
   2393    1.1      matt 
   2394    1.1      matt 			/*
   2395    1.1      matt 			 * Update flags. In a number of circumstances,
   2396    1.1      matt 			 * we could cluster a lot of these and do a
   2397    1.1      matt 			 * number of sequential pages in one go.
   2398    1.1      matt 			 */
   2399   1.49   thorpej 			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
   2400   1.17     chris 				struct pv_entry *pve;
   2401   1.49   thorpej 				simple_lock(&pg->mdpage.pvh_slock);
   2402   1.49   thorpej 				pve = pmap_remove_pv(pg, pmap, sva);
   2403   1.17     chris 				pmap_free_pv(pmap, pve);
   2404   1.49   thorpej 				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2405   1.49   thorpej 				simple_unlock(&pg->mdpage.pvh_slock);
   2406    1.1      matt 			}
   2407    1.1      matt 		}
   2408    1.1      matt 		sva += NBPG;
   2409    1.1      matt 		pte++;
   2410    1.1      matt 	}
   2411    1.1      matt 
   2412    1.1      matt 	/*
   2413    1.1      matt 	 * Now, if we've fallen through down to here, chances are that there
   2414    1.1      matt 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   2415    1.1      matt 	 */
   2416    1.1      matt 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2417    1.1      matt 		u_int cnt;
   2418    1.1      matt 
   2419    1.1      matt 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   2420    1.1      matt 			if (pmap_active) {
   2421   1.36   thorpej 				cpu_idcache_wbinv_range(cleanlist[cnt].va,
   2422   1.36   thorpej 				    NBPG);
   2423    1.1      matt 				*cleanlist[cnt].pte = 0;
   2424    1.1      matt 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   2425    1.1      matt 			} else
   2426    1.1      matt 				*cleanlist[cnt].pte = 0;
   2427    1.1      matt 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   2428    1.1      matt 		}
   2429    1.1      matt 	}
   2430  1.104   thorpej 
   2431  1.104   thorpej 	pmap_unmap_ptes(pmap);
   2432  1.104   thorpej 
   2433   1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2434    1.1      matt }
   2435    1.1      matt 
   2436    1.1      matt /*
   2437    1.1      matt  * Routine:	pmap_remove_all
   2438    1.1      matt  * Function:
   2439    1.1      matt  *		Removes this physical page from
   2440    1.1      matt  *		all physical maps in which it resides.
   2441    1.1      matt  *		Reflects back modify bits to the pager.
   2442    1.1      matt  */
   2443    1.1      matt 
   2444   1.33     chris static void
   2445   1.73   thorpej pmap_remove_all(struct vm_page *pg)
   2446    1.1      matt {
   2447   1.17     chris 	struct pv_entry *pv, *npv;
   2448   1.15     chris 	struct pmap *pmap;
   2449   1.11     chris 	pt_entry_t *pte, *ptes;
   2450    1.1      matt 
   2451   1.49   thorpej 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
   2452    1.1      matt 
   2453   1.49   thorpej 	/* set vm_page => pmap locking */
   2454   1.17     chris 	PMAP_HEAD_TO_MAP_LOCK();
   2455    1.1      matt 
   2456   1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   2457   1.17     chris 
   2458   1.49   thorpej 	pv = pg->mdpage.pvh_list;
   2459   1.49   thorpej 	if (pv == NULL) {
   2460   1.49   thorpej 		PDEBUG(0, printf("free page\n"));
   2461   1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   2462   1.49   thorpej 		PMAP_HEAD_TO_MAP_UNLOCK();
   2463   1.49   thorpej 		return;
   2464    1.1      matt 	}
   2465   1.17     chris 	pmap_clean_page(pv, FALSE);
   2466    1.1      matt 
   2467    1.1      matt 	while (pv) {
   2468    1.1      matt 		pmap = pv->pv_pmap;
   2469   1.11     chris 		ptes = pmap_map_ptes(pmap);
   2470   1.56   thorpej 		pte = &ptes[arm_btop(pv->pv_va)];
   2471    1.1      matt 
   2472    1.1      matt 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   2473    1.1      matt 		    pv->pv_va, pv->pv_flags));
   2474    1.1      matt #ifdef DEBUG
   2475   1.79   thorpej 		if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
   2476   1.79   thorpej 		    pmap_pte_v(pte) == 0 ||
   2477   1.79   thorpej 		    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
   2478    1.1      matt 			panic("pmap_remove_all: bad mapping");
   2479    1.1      matt #endif	/* DEBUG */
   2480    1.1      matt 
   2481    1.1      matt 		/*
   2482    1.1      matt 		 * Update statistics
   2483    1.1      matt 		 */
   2484    1.1      matt 		--pmap->pm_stats.resident_count;
   2485    1.1      matt 
   2486    1.1      matt 		/* Wired bit */
   2487   1.78   thorpej 		if (pv->pv_flags & PVF_WIRED)
   2488    1.1      matt 			--pmap->pm_stats.wired_count;
   2489    1.1      matt 
   2490    1.1      matt 		/*
   2491    1.1      matt 		 * Invalidate the PTEs.
   2492    1.1      matt 		 * XXX: should cluster them up and invalidate as many
   2493    1.1      matt 		 * as possible at once.
   2494    1.1      matt 		 */
   2495    1.1      matt 
   2496    1.1      matt #ifdef needednotdone
   2497    1.1      matt reduce wiring count on page table pages as references drop
   2498    1.1      matt #endif
   2499    1.1      matt 
   2500    1.1      matt 		*pte = 0;
   2501    1.1      matt 		pmap_pte_delref(pmap, pv->pv_va);
   2502    1.1      matt 
   2503    1.1      matt 		npv = pv->pv_next;
   2504   1.17     chris 		pmap_free_pv(pmap, pv);
   2505    1.1      matt 		pv = npv;
   2506   1.11     chris 		pmap_unmap_ptes(pmap);
   2507    1.1      matt 	}
   2508   1.49   thorpej 	pg->mdpage.pvh_list = NULL;
   2509   1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   2510   1.17     chris 	PMAP_HEAD_TO_MAP_UNLOCK();
   2511    1.1      matt 
   2512    1.1      matt 	PDEBUG(0, printf("done\n"));
   2513    1.1      matt 	cpu_tlb_flushID();
   2514   1.32   thorpej 	cpu_cpwait();
   2515    1.1      matt }
   2516    1.1      matt 
   2517    1.1      matt 
   2518    1.1      matt /*
   2519    1.1      matt  * Set the physical protection on the specified range of this map as requested.
   2520    1.1      matt  */
   2521    1.1      matt 
   2522    1.1      matt void
   2523   1.73   thorpej pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   2524    1.1      matt {
   2525   1.11     chris 	pt_entry_t *pte = NULL, *ptes;
   2526   1.49   thorpej 	struct vm_page *pg;
   2527    1.1      matt 	int flush = 0;
   2528    1.1      matt 
   2529    1.1      matt 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2530    1.1      matt 	    pmap, sva, eva, prot));
   2531    1.1      matt 
   2532    1.1      matt 	if (~prot & VM_PROT_READ) {
   2533  1.107   thorpej 		/*
   2534  1.107   thorpej 		 * Just remove the mappings.  pmap_update() is not required
   2535  1.107   thorpej 		 * here since the caller should do it.
   2536  1.107   thorpej 		 */
   2537    1.1      matt 		pmap_remove(pmap, sva, eva);
   2538    1.1      matt 		return;
   2539    1.1      matt 	}
   2540    1.1      matt 	if (prot & VM_PROT_WRITE) {
   2541    1.1      matt 		/*
   2542    1.1      matt 		 * If this is a read->write transition, just ignore it and let
   2543    1.1      matt 		 * uvm_fault() take care of it later.
   2544    1.1      matt 		 */
   2545    1.1      matt 		return;
   2546    1.1      matt 	}
   2547    1.1      matt 
   2548   1.17     chris 	/* Need to lock map->head */
   2549   1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2550   1.17     chris 
   2551   1.11     chris 	ptes = pmap_map_ptes(pmap);
   2552   1.96   thorpej 
   2553   1.96   thorpej 	/*
   2554   1.96   thorpej 	 * OK, at this point, we know we're doing write-protect operation.
   2555   1.96   thorpej 	 * If the pmap is active, write-back the range.
   2556   1.96   thorpej 	 */
   2557   1.96   thorpej 	if (pmap_is_curpmap(pmap))
   2558   1.96   thorpej 		cpu_dcache_wb_range(sva, eva - sva);
   2559   1.96   thorpej 
   2560    1.1      matt 	/*
   2561    1.1      matt 	 * We need to acquire a pointer to a page table page before entering
   2562    1.1      matt 	 * the following loop.
   2563    1.1      matt 	 */
   2564    1.1      matt 	while (sva < eva) {
   2565   1.30  rearnsha 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2566    1.1      matt 			break;
   2567   1.81   thorpej 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2568    1.1      matt 	}
   2569   1.11     chris 
   2570   1.56   thorpej 	pte = &ptes[arm_btop(sva)];
   2571   1.17     chris 
   2572    1.1      matt 	while (sva < eva) {
   2573    1.1      matt 		/* only check once in a while */
   2574   1.81   thorpej 		if ((sva & L2_ADDR_BITS) == 0) {
   2575   1.30  rearnsha 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2576    1.1      matt 				/* We can race ahead here, to the next pde. */
   2577   1.81   thorpej 				sva += L1_S_SIZE;
   2578   1.81   thorpej 				pte += arm_btop(L1_S_SIZE);
   2579    1.1      matt 				continue;
   2580    1.1      matt 			}
   2581    1.1      matt 		}
   2582    1.1      matt 
   2583    1.1      matt 		if (!pmap_pte_v(pte))
   2584    1.1      matt 			goto next;
   2585    1.1      matt 
   2586    1.1      matt 		flush = 1;
   2587    1.1      matt 
   2588  1.107   thorpej 		*pte &= ~L2_S_PROT_W;		/* clear write bit */
   2589    1.1      matt 
   2590    1.1      matt 		/* Clear write flag */
   2591  1.107   thorpej 		if ((pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte))) != NULL) {
   2592   1.49   thorpej 			simple_lock(&pg->mdpage.pvh_slock);
   2593   1.78   thorpej 			(void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
   2594   1.49   thorpej 			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2595   1.49   thorpej 			simple_unlock(&pg->mdpage.pvh_slock);
   2596    1.1      matt 		}
   2597    1.1      matt 
   2598  1.107   thorpej  next:
   2599    1.1      matt 		sva += NBPG;
   2600    1.1      matt 		pte++;
   2601    1.1      matt 	}
   2602   1.11     chris 	pmap_unmap_ptes(pmap);
   2603   1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2604    1.1      matt 	if (flush)
   2605    1.1      matt 		cpu_tlb_flushID();
   2606    1.1      matt }
   2607    1.1      matt 
   2608    1.1      matt /*
   2609   1.15     chris  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2610    1.1      matt  * int flags)
   2611    1.1      matt  *
   2612    1.1      matt  *      Insert the given physical page (p) at
   2613    1.1      matt  *      the specified virtual address (v) in the
   2614    1.1      matt  *      target physical map with the protection requested.
   2615    1.1      matt  *
   2616    1.1      matt  *      If specified, the page will be wired down, meaning
   2617    1.1      matt  *      that the related pte can not be reclaimed.
   2618    1.1      matt  *
   2619    1.1      matt  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2620    1.1      matt  *      or lose information.  That is, this routine must actually
   2621    1.1      matt  *      insert this page into the given map NOW.
   2622    1.1      matt  */
   2623    1.1      matt 
   2624    1.1      matt int
   2625   1.73   thorpej pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2626   1.73   thorpej     int flags)
   2627    1.1      matt {
   2628   1.66   thorpej 	pt_entry_t *ptes, opte, npte;
   2629    1.2      matt 	paddr_t opa;
   2630    1.1      matt 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2631   1.49   thorpej 	struct vm_page *pg;
   2632   1.17     chris 	struct pv_entry *pve;
   2633   1.66   thorpej 	int error, nflags;
   2634    1.1      matt 
   2635    1.1      matt 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2636    1.1      matt 	    va, pa, pmap, prot, wired));
   2637    1.1      matt 
   2638    1.1      matt #ifdef DIAGNOSTIC
   2639    1.1      matt 	/* Valid address ? */
   2640   1.48     chris 	if (va >= (pmap_curmaxkvaddr))
   2641    1.1      matt 		panic("pmap_enter: too big");
   2642    1.1      matt 	if (pmap != pmap_kernel() && va != 0) {
   2643    1.1      matt 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2644    1.1      matt 			panic("pmap_enter: kernel page in user map");
   2645    1.1      matt 	} else {
   2646    1.1      matt 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2647    1.1      matt 			panic("pmap_enter: user page in kernel map");
   2648    1.1      matt 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2649    1.1      matt 			panic("pmap_enter: entering PT page");
   2650    1.1      matt 	}
   2651    1.1      matt #endif
   2652   1.79   thorpej 
   2653   1.79   thorpej 	KDASSERT(((va | pa) & PGOFSET) == 0);
   2654   1.79   thorpej 
   2655   1.49   thorpej 	/*
   2656   1.49   thorpej 	 * Get a pointer to the page.  Later on in this function, we
   2657   1.49   thorpej 	 * test for a managed page by checking pg != NULL.
   2658   1.49   thorpej 	 */
   2659   1.55   thorpej 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
   2660   1.49   thorpej 
   2661   1.17     chris 	/* get lock */
   2662   1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2663   1.66   thorpej 
   2664    1.1      matt 	/*
   2665   1.66   thorpej 	 * map the ptes.  If there's not already an L2 table for this
   2666   1.66   thorpej 	 * address, allocate one.
   2667    1.1      matt 	 */
   2668   1.66   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2669   1.66   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   2670   1.17     chris 		struct vm_page *ptp;
   2671   1.57   thorpej 
   2672   1.57   thorpej 		/* kernel should be pre-grown */
   2673   1.57   thorpej 		KASSERT(pmap != pmap_kernel());
   2674   1.17     chris 
   2675   1.17     chris 		/* if failure is allowed then don't try too hard */
   2676   1.81   thorpej 		ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
   2677   1.17     chris 		if (ptp == NULL) {
   2678   1.17     chris 			if (flags & PMAP_CANFAIL) {
   2679   1.17     chris 				error = ENOMEM;
   2680   1.17     chris 				goto out;
   2681   1.17     chris 			}
   2682   1.17     chris 			panic("pmap_enter: get ptp failed");
   2683    1.1      matt 		}
   2684    1.1      matt 	}
   2685   1.66   thorpej 	opte = ptes[arm_btop(va)];
   2686    1.1      matt 
   2687    1.1      matt 	nflags = 0;
   2688    1.1      matt 	if (prot & VM_PROT_WRITE)
   2689   1.78   thorpej 		nflags |= PVF_WRITE;
   2690    1.1      matt 	if (wired)
   2691   1.78   thorpej 		nflags |= PVF_WIRED;
   2692    1.1      matt 
   2693    1.1      matt 	/* Is the pte valid ? If so then this page is already mapped */
   2694   1.66   thorpej 	if (l2pte_valid(opte)) {
   2695    1.1      matt 		/* Get the physical address of the current page mapped */
   2696   1.66   thorpej 		opa = l2pte_pa(opte);
   2697    1.1      matt 
   2698    1.1      matt 		/* Are we mapping the same page ? */
   2699    1.1      matt 		if (opa == pa) {
   2700  1.104   thorpej 			/* Check to see if we're doing rw->ro. */
   2701  1.104   thorpej 			if ((opte & L2_S_PROT_W) != 0 &&
   2702  1.104   thorpej 			    (prot & VM_PROT_WRITE) == 0) {
   2703  1.104   thorpej 				/* Yup, flush the cache if current pmap. */
   2704  1.104   thorpej 				if (pmap_is_curpmap(pmap))
   2705  1.104   thorpej 					cpu_dcache_wb_range(va, NBPG);
   2706  1.104   thorpej 			}
   2707  1.104   thorpej 
   2708    1.1      matt 			/* Has the wiring changed ? */
   2709   1.49   thorpej 			if (pg != NULL) {
   2710   1.49   thorpej 				simple_lock(&pg->mdpage.pvh_slock);
   2711   1.49   thorpej 				(void) pmap_modify_pv(pmap, va, pg,
   2712   1.78   thorpej 				    PVF_WRITE | PVF_WIRED, nflags);
   2713   1.49   thorpej 				simple_unlock(&pg->mdpage.pvh_slock);
   2714   1.49   thorpej  			}
   2715    1.1      matt 		} else {
   2716   1.49   thorpej 			struct vm_page *opg;
   2717   1.49   thorpej 
   2718    1.1      matt 			/* We are replacing the page with a new one. */
   2719   1.36   thorpej 			cpu_idcache_wbinv_range(va, NBPG);
   2720    1.1      matt 
   2721    1.1      matt 			/*
   2722    1.1      matt 			 * If it is part of our managed memory then we
   2723    1.1      matt 			 * must remove it from the PV list
   2724    1.1      matt 			 */
   2725   1.49   thorpej 			if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
   2726   1.49   thorpej 				simple_lock(&opg->mdpage.pvh_slock);
   2727   1.49   thorpej 				pve = pmap_remove_pv(opg, pmap, va);
   2728   1.49   thorpej 				simple_unlock(&opg->mdpage.pvh_slock);
   2729   1.17     chris 			} else {
   2730   1.17     chris 				pve = NULL;
   2731    1.1      matt 			}
   2732    1.1      matt 
   2733    1.1      matt 			goto enter;
   2734    1.1      matt 		}
   2735    1.1      matt 	} else {
   2736    1.1      matt 		opa = 0;
   2737   1.17     chris 		pve = NULL;
   2738    1.1      matt 		pmap_pte_addref(pmap, va);
   2739    1.1      matt 
   2740    1.1      matt 		/* pte is not valid so we must be hooking in a new page */
   2741    1.1      matt 		++pmap->pm_stats.resident_count;
   2742    1.1      matt 
   2743    1.1      matt 	enter:
   2744    1.1      matt 		/*
   2745    1.1      matt 		 * Enter on the PV list if part of our managed memory
   2746    1.1      matt 		 */
   2747   1.55   thorpej 		if (pg != NULL) {
   2748   1.17     chris 			if (pve == NULL) {
   2749   1.17     chris 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
   2750   1.17     chris 				if (pve == NULL) {
   2751   1.17     chris 					if (flags & PMAP_CANFAIL) {
   2752   1.17     chris 						error = ENOMEM;
   2753   1.17     chris 						goto out;
   2754   1.17     chris 					}
   2755   1.66   thorpej 					panic("pmap_enter: no pv entries "
   2756   1.66   thorpej 					    "available");
   2757   1.17     chris 				}
   2758   1.17     chris 			}
   2759   1.17     chris 			/* enter_pv locks pvh when adding */
   2760   1.49   thorpej 			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
   2761   1.17     chris 		} else {
   2762   1.17     chris 			if (pve != NULL)
   2763   1.17     chris 				pmap_free_pv(pmap, pve);
   2764    1.1      matt 		}
   2765    1.1      matt 	}
   2766    1.1      matt 
   2767    1.1      matt 	/* Construct the pte, giving the correct access. */
   2768   1.79   thorpej 	npte = pa;
   2769    1.1      matt 
   2770    1.1      matt 	/* VA 0 is magic. */
   2771   1.77   thorpej 	if (pmap != pmap_kernel() && va != vector_page)
   2772   1.83   thorpej 		npte |= L2_S_PROT_U;
   2773    1.1      matt 
   2774   1.55   thorpej 	if (pg != NULL) {
   2775    1.1      matt #ifdef DIAGNOSTIC
   2776    1.1      matt 		if ((flags & VM_PROT_ALL) & ~prot)
   2777    1.1      matt 			panic("pmap_enter: access_type exceeds prot");
   2778    1.1      matt #endif
   2779   1.86   thorpej 		npte |= pte_l2_s_cache_mode;
   2780    1.1      matt 		if (flags & VM_PROT_WRITE) {
   2781   1.84   thorpej 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2782   1.78   thorpej 			pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   2783    1.1      matt 		} else if (flags & VM_PROT_ALL) {
   2784   1.84   thorpej 			npte |= L2_S_PROTO;
   2785   1.78   thorpej 			pg->mdpage.pvh_attrs |= PVF_REF;
   2786    1.1      matt 		} else
   2787   1.81   thorpej 			npte |= L2_TYPE_INV;
   2788    1.1      matt 	} else {
   2789    1.1      matt 		if (prot & VM_PROT_WRITE)
   2790   1.84   thorpej 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2791    1.1      matt 		else if (prot & VM_PROT_ALL)
   2792   1.84   thorpej 			npte |= L2_S_PROTO;
   2793    1.1      matt 		else
   2794   1.81   thorpej 			npte |= L2_TYPE_INV;
   2795    1.1      matt 	}
   2796    1.1      matt 
   2797  1.109   thorpej #if ARM_MMU_XSCALE == 1 && defined(XSCALE_CACHE_READ_WRITE_ALLOCATE)
   2798  1.109   thorpej #if ARM_NMMUS > 1
   2799  1.109   thorpej # error "XXX Unable to use read/write-allocate and configure non-XScale"
   2800  1.109   thorpej #endif
   2801  1.109   thorpej 	/*
   2802  1.109   thorpej 	 * XXX BRUTAL HACK!  This allows us to limp along with
   2803  1.109   thorpej 	 * XXX the read/write-allocate cache mode.
   2804  1.109   thorpej 	 */
   2805  1.109   thorpej 	if (pmap == pmap_kernel())
   2806  1.109   thorpej 		npte &= ~L2_XSCALE_T_TEX(TEX_XSCALE_X);
   2807  1.109   thorpej #endif
   2808   1.66   thorpej 	ptes[arm_btop(va)] = npte;
   2809    1.1      matt 
   2810   1.55   thorpej 	if (pg != NULL) {
   2811   1.49   thorpej 		simple_lock(&pg->mdpage.pvh_slock);
   2812   1.59   thorpej  		pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
   2813   1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   2814   1.11     chris 	}
   2815    1.1      matt 
   2816    1.1      matt 	/* Better flush the TLB ... */
   2817    1.1      matt 	cpu_tlb_flushID_SE(va);
   2818   1.17     chris 	error = 0;
   2819   1.17     chris out:
   2820   1.66   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2821   1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2822    1.1      matt 
   2823   1.17     chris 	return error;
   2824    1.1      matt }
   2825    1.1      matt 
   2826   1.48     chris /*
   2827   1.48     chris  * pmap_kenter_pa: enter a kernel mapping
   2828   1.48     chris  *
   2829   1.48     chris  * => no need to lock anything assume va is already allocated
   2830   1.48     chris  * => should be faster than normal pmap enter function
   2831   1.48     chris  */
   2832    1.1      matt void
   2833   1.73   thorpej pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2834    1.1      matt {
   2835   1.13     chris 	pt_entry_t *pte;
   2836  1.105   thorpej 
   2837   1.13     chris 	pte = vtopte(va);
   2838   1.14       chs 	KASSERT(!pmap_pte_v(pte));
   2839   1.83   thorpej 
   2840  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
   2841  1.105   thorpej     {
   2842  1.105   thorpej 	struct vm_page *pg;
   2843  1.105   thorpej 	int s;
   2844  1.105   thorpej 
   2845  1.105   thorpej 	pg = PHYS_TO_VM_PAGE(pa);
   2846  1.105   thorpej 	if (pg != NULL) {
   2847  1.105   thorpej 		s = splhigh();
   2848  1.105   thorpej 		if (pg->mdpage.ro_mappings == 0 &&
   2849  1.105   thorpej 		    pg->mdpage.rw_mappings == 0 &&
   2850  1.105   thorpej 		    pg->mdpage.kro_mappings == 0 &&
   2851  1.105   thorpej 		    pg->mdpage.krw_mappings == 0) {
   2852  1.105   thorpej 			/* This case is okay. */
   2853  1.105   thorpej 		} else if (pg->mdpage.rw_mappings == 0 &&
   2854  1.105   thorpej 			   pg->mdpage.krw_mappings == 0 &&
   2855  1.105   thorpej 			   (prot & VM_PROT_WRITE) == 0) {
   2856  1.105   thorpej 			/* This case is okay. */
   2857  1.105   thorpej 		} else {
   2858  1.105   thorpej 			/* Something is awry. */
   2859  1.105   thorpej 			printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
   2860  1.105   thorpej 			    "prot 0x%x\n", pg->mdpage.ro_mappings,
   2861  1.105   thorpej 			    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
   2862  1.105   thorpej 			    pg->mdpage.krw_mappings, prot);
   2863  1.105   thorpej 			Debugger();
   2864  1.105   thorpej 		}
   2865  1.105   thorpej 		if (prot & VM_PROT_WRITE)
   2866  1.105   thorpej 			pg->mdpage.krw_mappings++;
   2867  1.105   thorpej 		else
   2868  1.105   thorpej 			pg->mdpage.kro_mappings++;
   2869  1.105   thorpej 		splx(s);
   2870  1.105   thorpej 	}
   2871  1.105   thorpej     }
   2872  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
   2873  1.105   thorpej 
   2874   1.83   thorpej 	*pte = L2_S_PROTO | pa |
   2875   1.90   thorpej 	    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   2876  1.112   thorpej 	PTE_SYNC(pte);
   2877    1.1      matt }
   2878    1.1      matt 
   2879    1.1      matt void
   2880   1.73   thorpej pmap_kremove(vaddr_t va, vsize_t len)
   2881    1.1      matt {
   2882   1.14       chs 	pt_entry_t *pte;
   2883  1.112   thorpej 	vaddr_t ova = va;
   2884  1.112   thorpej 	vaddr_t olen = len;
   2885   1.14       chs 
   2886    1.1      matt 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2887   1.13     chris 
   2888   1.14       chs 		/*
   2889   1.14       chs 		 * We assume that we will only be called with small
   2890   1.14       chs 		 * regions of memory.
   2891   1.14       chs 		 */
   2892   1.14       chs 
   2893   1.30  rearnsha 		KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
   2894   1.13     chris 		pte = vtopte(va);
   2895  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
   2896  1.105   thorpej     {
   2897  1.105   thorpej 		struct vm_page *pg;
   2898  1.105   thorpej 		int s;
   2899  1.105   thorpej 
   2900  1.105   thorpej 		if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
   2901  1.105   thorpej 		    (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
   2902  1.105   thorpej 			s = splhigh();
   2903  1.105   thorpej 			if (*pte & L2_S_PROT_W) {
   2904  1.105   thorpej 				KASSERT(pg->mdpage.krw_mappings != 0);
   2905  1.105   thorpej 				pg->mdpage.krw_mappings--;
   2906  1.105   thorpej 			} else {
   2907  1.105   thorpej 				KASSERT(pg->mdpage.kro_mappings != 0);
   2908  1.105   thorpej 				pg->mdpage.kro_mappings--;
   2909  1.105   thorpej 			}
   2910  1.105   thorpej 			splx(s);
   2911  1.105   thorpej 		}
   2912  1.105   thorpej     }
   2913  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
   2914   1.36   thorpej 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
   2915   1.13     chris 		*pte = 0;
   2916   1.13     chris 		cpu_tlb_flushID_SE(va);
   2917    1.1      matt 	}
   2918  1.112   thorpej 	PTE_SYNC_RANGE(vtopte(ova), olen >> PAGE_SHIFT);
   2919    1.1      matt }
   2920    1.1      matt 
   2921    1.1      matt /*
   2922    1.1      matt  * pmap_page_protect:
   2923    1.1      matt  *
   2924    1.1      matt  * Lower the permission for all mappings to a given page.
   2925    1.1      matt  */
   2926    1.1      matt 
   2927    1.1      matt void
   2928   1.73   thorpej pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2929    1.1      matt {
   2930    1.1      matt 
   2931   1.49   thorpej 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
   2932   1.49   thorpej 	    VM_PAGE_TO_PHYS(pg), prot));
   2933    1.1      matt 
   2934    1.1      matt 	switch(prot) {
   2935   1.17     chris 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
   2936   1.17     chris 	case VM_PROT_READ|VM_PROT_WRITE:
   2937   1.17     chris 		return;
   2938   1.17     chris 
   2939    1.1      matt 	case VM_PROT_READ:
   2940    1.1      matt 	case VM_PROT_READ|VM_PROT_EXECUTE:
   2941   1.78   thorpej 		pmap_clearbit(pg, PVF_WRITE);
   2942    1.1      matt 		break;
   2943    1.1      matt 
   2944    1.1      matt 	default:
   2945   1.49   thorpej 		pmap_remove_all(pg);
   2946    1.1      matt 		break;
   2947    1.1      matt 	}
   2948    1.1      matt }
   2949    1.1      matt 
   2950    1.1      matt 
   2951    1.1      matt /*
   2952    1.1      matt  * Routine:	pmap_unwire
   2953    1.1      matt  * Function:	Clear the wired attribute for a map/virtual-address
   2954    1.1      matt  *		pair.
   2955    1.1      matt  * In/out conditions:
   2956    1.1      matt  *		The mapping must already exist in the pmap.
   2957    1.1      matt  */
   2958    1.1      matt 
   2959    1.1      matt void
   2960   1.73   thorpej pmap_unwire(struct pmap *pmap, vaddr_t va)
   2961    1.1      matt {
   2962   1.60   thorpej 	pt_entry_t *ptes;
   2963   1.60   thorpej 	struct vm_page *pg;
   2964    1.2      matt 	paddr_t pa;
   2965    1.1      matt 
   2966   1.60   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   2967   1.60   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2968    1.1      matt 
   2969   1.60   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va))) {
   2970   1.60   thorpej #ifdef DIAGNOSTIC
   2971   1.60   thorpej 		if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   2972   1.60   thorpej 			panic("pmap_unwire: invalid L2 PTE");
   2973   1.60   thorpej #endif
   2974   1.60   thorpej 		/* Extract the physical address of the page */
   2975   1.60   thorpej 		pa = l2pte_pa(ptes[arm_btop(va)]);
   2976    1.1      matt 
   2977   1.60   thorpej 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   2978   1.60   thorpej 			goto out;
   2979    1.1      matt 
   2980   1.60   thorpej 		/* Update the wired bit in the pv entry for this page. */
   2981   1.60   thorpej 		simple_lock(&pg->mdpage.pvh_slock);
   2982   1.78   thorpej 		(void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
   2983   1.60   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   2984   1.60   thorpej 	}
   2985   1.60   thorpej #ifdef DIAGNOSTIC
   2986   1.60   thorpej 	else {
   2987   1.60   thorpej 		panic("pmap_unwire: invalid L1 PTE");
   2988   1.60   thorpej 	}
   2989   1.60   thorpej #endif
   2990   1.60   thorpej  out:
   2991   1.60   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2992   1.60   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   2993    1.1      matt }
   2994    1.1      matt 
   2995    1.1      matt /*
   2996    1.1      matt  * Routine:  pmap_extract
   2997    1.1      matt  * Function:
   2998    1.1      matt  *           Extract the physical page address associated
   2999    1.1      matt  *           with the given map/virtual_address pair.
   3000    1.1      matt  */
   3001    1.1      matt boolean_t
   3002   1.73   thorpej pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
   3003    1.1      matt {
   3004   1.34   thorpej 	pd_entry_t *pde;
   3005   1.11     chris 	pt_entry_t *pte, *ptes;
   3006    1.1      matt 	paddr_t pa;
   3007    1.1      matt 
   3008   1.82   thorpej 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
   3009   1.82   thorpej 
   3010   1.82   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3011    1.1      matt 
   3012   1.34   thorpej 	pde = pmap_pde(pmap, va);
   3013   1.56   thorpej 	pte = &ptes[arm_btop(va)];
   3014    1.1      matt 
   3015   1.82   thorpej 	if (pmap_pde_section(pde)) {
   3016   1.82   thorpej 		pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
   3017   1.82   thorpej 		PDEBUG(5, printf("section pa=0x%08lx\n", pa));
   3018   1.82   thorpej 		goto out;
   3019   1.82   thorpej 	} else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
   3020   1.82   thorpej 		PDEBUG(5, printf("no mapping\n"));
   3021   1.82   thorpej 		goto failed;
   3022   1.82   thorpej 	}
   3023   1.75   reinoud 
   3024   1.82   thorpej 	if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
   3025   1.82   thorpej 		pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
   3026   1.82   thorpej 		PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
   3027   1.82   thorpej 		goto out;
   3028   1.82   thorpej 	}
   3029    1.1      matt 
   3030   1.82   thorpej 	pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
   3031   1.82   thorpej 	PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
   3032    1.1      matt 
   3033   1.82   thorpej  out:
   3034   1.82   thorpej 	if (pap != NULL)
   3035   1.82   thorpej 		*pap = pa;
   3036    1.1      matt 
   3037   1.82   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3038   1.82   thorpej 	return (TRUE);
   3039   1.34   thorpej 
   3040   1.82   thorpej  failed:
   3041   1.82   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3042   1.82   thorpej 	return (FALSE);
   3043    1.1      matt }
   3044    1.1      matt 
   3045    1.1      matt 
   3046    1.1      matt /*
   3047   1.73   thorpej  * pmap_copy:
   3048    1.1      matt  *
   3049   1.73   thorpej  *	Copy the range specified by src_addr/len from the source map to the
   3050   1.73   thorpej  *	range dst_addr/len in the destination map.
   3051   1.73   thorpej  *
   3052   1.73   thorpej  *	This routine is only advisory and need not do anything.
   3053    1.1      matt  */
   3054   1.73   thorpej /* Call deleted in <arm/arm32/pmap.h> */
   3055    1.1      matt 
   3056    1.1      matt #if defined(PMAP_DEBUG)
   3057    1.1      matt void
   3058    1.1      matt pmap_dump_pvlist(phys, m)
   3059    1.1      matt 	vaddr_t phys;
   3060    1.1      matt 	char *m;
   3061    1.1      matt {
   3062   1.49   thorpej 	struct vm_page *pg;
   3063    1.1      matt 	struct pv_entry *pv;
   3064    1.1      matt 
   3065   1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
   3066    1.1      matt 		printf("INVALID PA\n");
   3067    1.1      matt 		return;
   3068    1.1      matt 	}
   3069   1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3070    1.1      matt 	printf("%s %08lx:", m, phys);
   3071   1.49   thorpej 	if (pg->mdpage.pvh_list == NULL) {
   3072   1.97     chris 		simple_unlock(&pg->mdpage.pvh_slock);
   3073    1.1      matt 		printf(" no mappings\n");
   3074    1.1      matt 		return;
   3075    1.1      matt 	}
   3076    1.1      matt 
   3077   1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
   3078    1.1      matt 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   3079    1.1      matt 		    pv->pv_va, pv->pv_flags);
   3080    1.1      matt 
   3081    1.1      matt 	printf("\n");
   3082   1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3083    1.1      matt }
   3084    1.1      matt 
   3085    1.1      matt #endif	/* PMAP_DEBUG */
   3086    1.1      matt 
   3087   1.11     chris static pt_entry_t *
   3088   1.11     chris pmap_map_ptes(struct pmap *pmap)
   3089   1.11     chris {
   3090   1.72   thorpej 	struct proc *p;
   3091   1.17     chris 
   3092   1.17     chris     	/* the kernel's pmap is always accessible */
   3093   1.17     chris 	if (pmap == pmap_kernel()) {
   3094   1.72   thorpej 		return (pt_entry_t *)PTE_BASE;
   3095   1.17     chris 	}
   3096   1.17     chris 
   3097   1.17     chris 	if (pmap_is_curpmap(pmap)) {
   3098   1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3099   1.53   thorpej 		return (pt_entry_t *)PTE_BASE;
   3100   1.17     chris 	}
   3101   1.72   thorpej 
   3102   1.17     chris 	p = curproc;
   3103   1.72   thorpej 	KDASSERT(p != NULL);
   3104   1.17     chris 
   3105   1.17     chris 	/* need to lock both curpmap and pmap: use ordered locking */
   3106   1.72   thorpej 	if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
   3107   1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3108   1.72   thorpej 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3109   1.17     chris 	} else {
   3110   1.72   thorpej 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3111   1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3112   1.17     chris 	}
   3113   1.11     chris 
   3114   1.72   thorpej 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
   3115   1.72   thorpej 	    FALSE);
   3116   1.17     chris 	cpu_tlb_flushD();
   3117   1.32   thorpej 	cpu_cpwait();
   3118   1.53   thorpej 	return (pt_entry_t *)APTE_BASE;
   3119   1.17     chris }
   3120   1.17     chris 
   3121   1.17     chris /*
   3122   1.17     chris  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
   3123   1.17     chris  */
   3124   1.17     chris 
   3125   1.17     chris static void
   3126   1.73   thorpej pmap_unmap_ptes(struct pmap *pmap)
   3127   1.17     chris {
   3128   1.72   thorpej 
   3129   1.17     chris 	if (pmap == pmap_kernel()) {
   3130   1.17     chris 		return;
   3131   1.17     chris 	}
   3132   1.17     chris 	if (pmap_is_curpmap(pmap)) {
   3133   1.17     chris 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3134   1.17     chris 	} else {
   3135   1.72   thorpej 		KDASSERT(curproc != NULL);
   3136   1.17     chris 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3137   1.72   thorpej 		simple_unlock(
   3138   1.72   thorpej 		    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3139   1.17     chris 	}
   3140   1.11     chris }
   3141    1.1      matt 
   3142    1.1      matt /*
   3143    1.1      matt  * Modify pte bits for all ptes corresponding to the given physical address.
   3144    1.1      matt  * We use `maskbits' rather than `clearbits' because we're always passing
   3145    1.1      matt  * constants and the latter would require an extra inversion at run-time.
   3146    1.1      matt  */
   3147    1.1      matt 
   3148   1.22     chris static void
   3149   1.73   thorpej pmap_clearbit(struct vm_page *pg, u_int maskbits)
   3150    1.1      matt {
   3151    1.1      matt 	struct pv_entry *pv;
   3152  1.104   thorpej 	pt_entry_t *ptes, npte, opte;
   3153    1.1      matt 	vaddr_t va;
   3154   1.49   thorpej 	int tlbentry;
   3155    1.1      matt 
   3156    1.1      matt 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   3157   1.49   thorpej 	    VM_PAGE_TO_PHYS(pg), maskbits));
   3158   1.21     chris 
   3159   1.21     chris 	tlbentry = 0;
   3160   1.21     chris 
   3161   1.17     chris 	PMAP_HEAD_TO_MAP_LOCK();
   3162   1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3163   1.17     chris 
   3164    1.1      matt 	/*
   3165    1.1      matt 	 * Clear saved attributes (modify, reference)
   3166    1.1      matt 	 */
   3167   1.49   thorpej 	pg->mdpage.pvh_attrs &= ~maskbits;
   3168    1.1      matt 
   3169   1.49   thorpej 	if (pg->mdpage.pvh_list == NULL) {
   3170   1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   3171   1.17     chris 		PMAP_HEAD_TO_MAP_UNLOCK();
   3172    1.1      matt 		return;
   3173    1.1      matt 	}
   3174    1.1      matt 
   3175    1.1      matt 	/*
   3176    1.1      matt 	 * Loop over all current mappings setting/clearing as appropos
   3177    1.1      matt 	 */
   3178   1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   3179  1.105   thorpej #ifdef PMAP_ALIAS_DEBUG
   3180  1.105   thorpej     {
   3181  1.105   thorpej 		int s = splhigh();
   3182  1.105   thorpej 		if ((maskbits & PVF_WRITE) != 0 &&
   3183  1.105   thorpej 		    (pv->pv_flags & PVF_WRITE) != 0) {
   3184  1.105   thorpej 			KASSERT(pg->mdpage.rw_mappings != 0);
   3185  1.105   thorpej 			pg->mdpage.rw_mappings--;
   3186  1.105   thorpej 			pg->mdpage.ro_mappings++;
   3187  1.105   thorpej 		}
   3188  1.105   thorpej 		splx(s);
   3189  1.105   thorpej     }
   3190  1.105   thorpej #endif /* PMAP_ALIAS_DEBUG */
   3191    1.1      matt 		va = pv->pv_va;
   3192    1.1      matt 		pv->pv_flags &= ~maskbits;
   3193   1.59   thorpej 		ptes = pmap_map_ptes(pv->pv_pmap);	/* locks pmap */
   3194   1.59   thorpej 		KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
   3195  1.104   thorpej 		npte = opte = ptes[arm_btop(va)];
   3196   1.78   thorpej 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
   3197   1.78   thorpej 			if ((pv->pv_flags & PVF_NC)) {
   3198   1.29  rearnsha 				/*
   3199   1.29  rearnsha 				 * Entry is not cacheable: reenable
   3200   1.29  rearnsha 				 * the cache, nothing to flush
   3201   1.29  rearnsha 				 *
   3202   1.29  rearnsha 				 * Don't turn caching on again if this
   3203   1.29  rearnsha 				 * is a modified emulation.  This
   3204   1.29  rearnsha 				 * would be inconsitent with the
   3205   1.29  rearnsha 				 * settings created by
   3206   1.29  rearnsha 				 * pmap_vac_me_harder().
   3207   1.29  rearnsha 				 *
   3208   1.29  rearnsha 				 * There's no need to call
   3209   1.29  rearnsha 				 * pmap_vac_me_harder() here: all
   3210   1.29  rearnsha 				 * pages are loosing their write
   3211   1.29  rearnsha 				 * permission.
   3212   1.29  rearnsha 				 *
   3213   1.29  rearnsha 				 */
   3214   1.78   thorpej 				if (maskbits & PVF_WRITE) {
   3215  1.104   thorpej 					npte |= pte_l2_s_cache_mode;
   3216   1.78   thorpej 					pv->pv_flags &= ~PVF_NC;
   3217   1.29  rearnsha 				}
   3218   1.59   thorpej 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
   3219   1.29  rearnsha 				/*
   3220   1.29  rearnsha 				 * Entry is cacheable: check if pmap is
   3221   1.29  rearnsha 				 * current if it is flush it,
   3222   1.29  rearnsha 				 * otherwise it won't be in the cache
   3223   1.29  rearnsha 				 */
   3224   1.36   thorpej 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
   3225   1.59   thorpej 			}
   3226   1.29  rearnsha 
   3227   1.29  rearnsha 			/* make the pte read only */
   3228  1.104   thorpej 			npte &= ~L2_S_PROT_W;
   3229   1.29  rearnsha 		}
   3230   1.29  rearnsha 
   3231  1.104   thorpej 		if (maskbits & PVF_REF) {
   3232  1.104   thorpej 			if (pmap_is_curpmap(pv->pv_pmap) &&
   3233  1.104   thorpej 			    (pv->pv_flags & PVF_NC) == 0) {
   3234  1.104   thorpej 				/*
   3235  1.104   thorpej 				 * Check npte here; we may have already
   3236  1.104   thorpej 				 * done the wbinv above, and the validity
   3237  1.104   thorpej 				 * of the PTE is the same for opte and
   3238  1.104   thorpej 				 * npte.
   3239  1.104   thorpej 				 */
   3240  1.104   thorpej 				if (npte & L2_S_PROT_W) {
   3241  1.104   thorpej 					cpu_idcache_wbinv_range(pv->pv_va,
   3242  1.104   thorpej 					    NBPG);
   3243  1.104   thorpej 				} else if ((npte & L2_TYPE_MASK)
   3244  1.104   thorpej 					   != L2_TYPE_INV) {
   3245  1.104   thorpej 					/* XXXJRT need idcache_inv_range */
   3246  1.104   thorpej 					cpu_idcache_wbinv_range(pv->pv_va,
   3247  1.104   thorpej 					    NBPG);
   3248  1.104   thorpej 				}
   3249  1.104   thorpej 			}
   3250  1.104   thorpej 
   3251  1.104   thorpej 			/* make the pte invalid */
   3252  1.104   thorpej 			npte = (npte & ~L2_TYPE_MASK) | L2_TYPE_INV;
   3253  1.104   thorpej 		}
   3254   1.21     chris 
   3255  1.104   thorpej 		if (npte != opte) {
   3256  1.104   thorpej 			ptes[arm_btop(va)] = npte;
   3257  1.104   thorpej 			/* Flush the TLB entry if a current pmap. */
   3258  1.104   thorpej 			if (pmap_is_curpmap(pv->pv_pmap))
   3259  1.104   thorpej 				cpu_tlb_flushID_SE(pv->pv_va);
   3260   1.59   thorpej 		}
   3261  1.104   thorpej 
   3262   1.59   thorpej 		pmap_unmap_ptes(pv->pv_pmap);		/* unlocks pmap */
   3263   1.29  rearnsha 	}
   3264   1.32   thorpej 	cpu_cpwait();
   3265   1.21     chris 
   3266   1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3267   1.17     chris 	PMAP_HEAD_TO_MAP_UNLOCK();
   3268    1.1      matt }
   3269    1.1      matt 
   3270   1.50   thorpej /*
   3271   1.50   thorpej  * pmap_clear_modify:
   3272   1.50   thorpej  *
   3273   1.50   thorpej  *	Clear the "modified" attribute for a page.
   3274   1.50   thorpej  */
   3275    1.1      matt boolean_t
   3276   1.73   thorpej pmap_clear_modify(struct vm_page *pg)
   3277    1.1      matt {
   3278    1.1      matt 	boolean_t rv;
   3279    1.1      matt 
   3280   1.78   thorpej 	if (pg->mdpage.pvh_attrs & PVF_MOD) {
   3281   1.50   thorpej 		rv = TRUE;
   3282   1.78   thorpej 		pmap_clearbit(pg, PVF_MOD);
   3283   1.50   thorpej 	} else
   3284   1.50   thorpej 		rv = FALSE;
   3285   1.50   thorpej 
   3286   1.50   thorpej 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
   3287   1.50   thorpej 	    VM_PAGE_TO_PHYS(pg), rv));
   3288   1.50   thorpej 
   3289   1.50   thorpej 	return (rv);
   3290    1.1      matt }
   3291    1.1      matt 
   3292   1.50   thorpej /*
   3293   1.50   thorpej  * pmap_clear_reference:
   3294   1.50   thorpej  *
   3295   1.50   thorpej  *	Clear the "referenced" attribute for a page.
   3296   1.50   thorpej  */
   3297    1.1      matt boolean_t
   3298   1.73   thorpej pmap_clear_reference(struct vm_page *pg)
   3299    1.1      matt {
   3300    1.1      matt 	boolean_t rv;
   3301    1.1      matt 
   3302   1.78   thorpej 	if (pg->mdpage.pvh_attrs & PVF_REF) {
   3303   1.50   thorpej 		rv = TRUE;
   3304   1.78   thorpej 		pmap_clearbit(pg, PVF_REF);
   3305   1.50   thorpej 	} else
   3306   1.50   thorpej 		rv = FALSE;
   3307   1.50   thorpej 
   3308   1.50   thorpej 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
   3309   1.50   thorpej 	    VM_PAGE_TO_PHYS(pg), rv));
   3310   1.50   thorpej 
   3311   1.50   thorpej 	return (rv);
   3312    1.1      matt }
   3313    1.1      matt 
   3314   1.50   thorpej /*
   3315   1.50   thorpej  * pmap_is_modified:
   3316   1.50   thorpej  *
   3317   1.50   thorpej  *	Test if a page has the "modified" attribute.
   3318   1.50   thorpej  */
   3319   1.50   thorpej /* See <arm/arm32/pmap.h> */
   3320   1.39   thorpej 
   3321   1.50   thorpej /*
   3322   1.50   thorpej  * pmap_is_referenced:
   3323   1.50   thorpej  *
   3324   1.50   thorpej  *	Test if a page has the "referenced" attribute.
   3325   1.50   thorpej  */
   3326   1.50   thorpej /* See <arm/arm32/pmap.h> */
   3327    1.1      matt 
   3328    1.1      matt int
   3329   1.73   thorpej pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
   3330    1.1      matt {
   3331   1.61   thorpej 	pt_entry_t *ptes;
   3332   1.61   thorpej 	struct vm_page *pg;
   3333    1.2      matt 	paddr_t pa;
   3334    1.1      matt 	u_int flags;
   3335   1.61   thorpej 	int rv = 0;
   3336    1.1      matt 
   3337    1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   3338    1.1      matt 
   3339   1.61   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   3340   1.62   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3341   1.61   thorpej 
   3342   1.61   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3343   1.61   thorpej 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3344   1.61   thorpej 		goto out;
   3345    1.1      matt 	}
   3346    1.1      matt 
   3347   1.61   thorpej 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3348    1.1      matt 
   3349   1.61   thorpej 	/* Check for a invalid pte */
   3350   1.61   thorpej 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3351   1.61   thorpej 		goto out;
   3352    1.1      matt 
   3353    1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3354   1.83   thorpej 	if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
   3355   1.61   thorpej 		goto out;
   3356    1.1      matt 
   3357    1.1      matt 	/* Extract the physical address of the page */
   3358   1.61   thorpej 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3359   1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3360   1.61   thorpej 		goto out;
   3361    1.1      matt 
   3362   1.49   thorpej 	/* Get the current flags for this page. */
   3363   1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3364   1.17     chris 
   3365   1.49   thorpej 	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
   3366    1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   3367    1.1      matt 
   3368    1.1      matt 	/*
   3369    1.1      matt 	 * Do the flags say this page is writable ? If not then it is a
   3370    1.1      matt 	 * genuine write fault. If yes then the write fault is our fault
   3371    1.1      matt 	 * as we did not reflect the write access in the PTE. Now we know
   3372    1.1      matt 	 * a write has occurred we can correct this and also set the
   3373    1.1      matt 	 * modified bit
   3374    1.1      matt 	 */
   3375   1.78   thorpej 	if (~flags & PVF_WRITE) {
   3376   1.49   thorpej 	    	simple_unlock(&pg->mdpage.pvh_slock);
   3377   1.61   thorpej 		goto out;
   3378   1.17     chris 	}
   3379    1.1      matt 
   3380   1.61   thorpej 	PDEBUG(0,
   3381   1.61   thorpej 	    printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
   3382   1.61   thorpej 	    va, ptes[arm_btop(va)]));
   3383   1.78   thorpej 	pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   3384   1.29  rearnsha 
   3385   1.29  rearnsha 	/*
   3386   1.29  rearnsha 	 * Re-enable write permissions for the page.  No need to call
   3387   1.29  rearnsha 	 * pmap_vac_me_harder(), since this is just a
   3388   1.78   thorpej 	 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
   3389   1.78   thorpej 	 * We've already set the cacheable bits based on the assumption
   3390   1.78   thorpej 	 * that we can write to this page.
   3391   1.29  rearnsha 	 */
   3392   1.61   thorpej 	ptes[arm_btop(va)] =
   3393   1.84   thorpej 	    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
   3394   1.61   thorpej 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3395    1.1      matt 
   3396   1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3397   1.61   thorpej 
   3398    1.1      matt 	cpu_tlb_flushID_SE(va);
   3399   1.32   thorpej 	cpu_cpwait();
   3400   1.61   thorpej 	rv = 1;
   3401   1.61   thorpej  out:
   3402   1.61   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3403   1.61   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   3404   1.61   thorpej 	return (rv);
   3405    1.1      matt }
   3406    1.1      matt 
   3407    1.1      matt int
   3408   1.73   thorpej pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
   3409    1.1      matt {
   3410   1.62   thorpej 	pt_entry_t *ptes;
   3411   1.62   thorpej 	struct vm_page *pg;
   3412    1.2      matt 	paddr_t pa;
   3413   1.62   thorpej 	int rv = 0;
   3414    1.1      matt 
   3415    1.1      matt 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   3416    1.1      matt 
   3417   1.63   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   3418   1.62   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3419   1.62   thorpej 
   3420   1.62   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3421   1.62   thorpej 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3422   1.62   thorpej 		goto out;
   3423    1.1      matt 	}
   3424    1.1      matt 
   3425   1.62   thorpej 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3426    1.1      matt 
   3427   1.62   thorpej 	/* Check for invalid pte */
   3428   1.62   thorpej 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3429   1.62   thorpej 		goto out;
   3430    1.1      matt 
   3431    1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3432   1.81   thorpej 	if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
   3433   1.62   thorpej 		goto out;
   3434    1.1      matt 
   3435    1.1      matt 	/* Extract the physical address of the page */
   3436   1.62   thorpej 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3437   1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3438   1.62   thorpej 		goto out;
   3439    1.1      matt 
   3440   1.63   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3441   1.63   thorpej 
   3442    1.1      matt 	/*
   3443    1.1      matt 	 * Ok we just enable the pte and mark the attibs as handled
   3444   1.63   thorpej 	 * XXX Should we traverse the PV list and enable all PTEs?
   3445    1.1      matt 	 */
   3446   1.62   thorpej 	PDEBUG(0,
   3447   1.62   thorpej 	    printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
   3448   1.62   thorpej 	    va, ptes[arm_btop(va)]));
   3449   1.78   thorpej 	pg->mdpage.pvh_attrs |= PVF_REF;
   3450    1.1      matt 
   3451   1.84   thorpej 	ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
   3452   1.62   thorpej 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3453   1.62   thorpej 
   3454   1.63   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3455   1.63   thorpej 
   3456    1.1      matt 	cpu_tlb_flushID_SE(va);
   3457   1.32   thorpej 	cpu_cpwait();
   3458   1.62   thorpej 	rv = 1;
   3459   1.62   thorpej  out:
   3460   1.62   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3461   1.63   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   3462   1.62   thorpej 	return (rv);
   3463    1.1      matt }
   3464   1.17     chris 
   3465    1.1      matt /*
   3466    1.1      matt  * pmap_collect: free resources held by a pmap
   3467    1.1      matt  *
   3468    1.1      matt  * => optional function.
   3469    1.1      matt  * => called when a process is swapped out to free memory.
   3470    1.1      matt  */
   3471    1.1      matt 
   3472    1.1      matt void
   3473   1.73   thorpej pmap_collect(struct pmap *pmap)
   3474    1.1      matt {
   3475    1.1      matt }
   3476    1.1      matt 
   3477    1.1      matt /*
   3478    1.1      matt  * Routine:	pmap_procwr
   3479    1.1      matt  *
   3480    1.1      matt  * Function:
   3481    1.1      matt  *	Synchronize caches corresponding to [addr, addr+len) in p.
   3482    1.1      matt  *
   3483    1.1      matt  */
   3484    1.1      matt void
   3485   1.73   thorpej pmap_procwr(struct proc *p, vaddr_t va, int len)
   3486    1.1      matt {
   3487    1.1      matt 	/* We only need to do anything if it is the current process. */
   3488    1.1      matt 	if (p == curproc)
   3489   1.36   thorpej 		cpu_icache_sync_range(va, len);
   3490   1.17     chris }
   3491   1.17     chris /*
   3492   1.17     chris  * PTP functions
   3493   1.17     chris  */
   3494   1.17     chris 
   3495   1.17     chris /*
   3496   1.17     chris  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
   3497   1.17     chris  *
   3498   1.17     chris  * => pmap should NOT be pmap_kernel()
   3499   1.17     chris  * => pmap should be locked
   3500   1.17     chris  */
   3501   1.17     chris 
   3502   1.17     chris static struct vm_page *
   3503   1.57   thorpej pmap_get_ptp(struct pmap *pmap, vaddr_t va)
   3504   1.17     chris {
   3505   1.57   thorpej 	struct vm_page *ptp;
   3506   1.17     chris 
   3507   1.57   thorpej 	if (pmap_pde_page(pmap_pde(pmap, va))) {
   3508   1.17     chris 
   3509   1.57   thorpej 		/* valid... check hint (saves us a PA->PG lookup) */
   3510   1.57   thorpej 		if (pmap->pm_ptphint &&
   3511   1.81   thorpej 		    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
   3512   1.57   thorpej 		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
   3513   1.57   thorpej 			return (pmap->pm_ptphint);
   3514   1.57   thorpej 		ptp = uvm_pagelookup(&pmap->pm_obj, va);
   3515   1.17     chris #ifdef DIAGNOSTIC
   3516   1.57   thorpej 		if (ptp == NULL)
   3517   1.57   thorpej 			panic("pmap_get_ptp: unmanaged user PTP");
   3518   1.17     chris #endif
   3519   1.70   thorpej 		pmap->pm_ptphint = ptp;
   3520   1.57   thorpej 		return(ptp);
   3521   1.57   thorpej 	}
   3522   1.17     chris 
   3523   1.57   thorpej 	/* allocate a new PTP (updates ptphint) */
   3524   1.57   thorpej 	return(pmap_alloc_ptp(pmap, va));
   3525   1.17     chris }
   3526   1.17     chris 
   3527   1.17     chris /*
   3528   1.17     chris  * pmap_alloc_ptp: allocate a PTP for a PMAP
   3529   1.17     chris  *
   3530   1.17     chris  * => pmap should already be locked by caller
   3531   1.17     chris  * => we use the ptp's wire_count to count the number of active mappings
   3532   1.17     chris  *	in the PTP (we start it at one to prevent any chance this PTP
   3533   1.17     chris  *	will ever leak onto the active/inactive queues)
   3534   1.17     chris  */
   3535   1.17     chris 
   3536   1.17     chris /*__inline */ static struct vm_page *
   3537   1.57   thorpej pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
   3538   1.17     chris {
   3539   1.17     chris 	struct vm_page *ptp;
   3540   1.17     chris 
   3541   1.17     chris 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
   3542   1.17     chris 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
   3543   1.57   thorpej 	if (ptp == NULL)
   3544   1.17     chris 		return (NULL);
   3545   1.17     chris 
   3546   1.17     chris 	/* got one! */
   3547   1.17     chris 	ptp->flags &= ~PG_BUSY;	/* never busy */
   3548   1.17     chris 	ptp->wire_count = 1;	/* no mappings yet */
   3549   1.17     chris 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
   3550   1.17     chris 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
   3551   1.70   thorpej 	pmap->pm_ptphint = ptp;
   3552   1.17     chris 	return (ptp);
   3553    1.1      matt }
   3554   1.48     chris 
   3555   1.48     chris vaddr_t
   3556   1.73   thorpej pmap_growkernel(vaddr_t maxkvaddr)
   3557   1.48     chris {
   3558   1.48     chris 	struct pmap *kpm = pmap_kernel(), *pm;
   3559   1.48     chris 	int s;
   3560   1.48     chris 	paddr_t ptaddr;
   3561   1.48     chris 	struct vm_page *ptp;
   3562   1.48     chris 
   3563   1.48     chris 	if (maxkvaddr <= pmap_curmaxkvaddr)
   3564   1.48     chris 		goto out;		/* we are OK */
   3565   1.48     chris 	NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
   3566   1.48     chris 		    pmap_curmaxkvaddr, maxkvaddr));
   3567   1.48     chris 
   3568   1.48     chris 	/*
   3569   1.48     chris 	 * whoops!   we need to add kernel PTPs
   3570   1.48     chris 	 */
   3571   1.48     chris 
   3572   1.48     chris 	s = splhigh();	/* to be safe */
   3573   1.48     chris 	simple_lock(&kpm->pm_obj.vmobjlock);
   3574   1.48     chris 	/* due to the way the arm pmap works we map 4MB at a time */
   3575   1.70   thorpej 	for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
   3576   1.81   thorpej 	     pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
   3577   1.48     chris 
   3578   1.48     chris 		if (uvm.page_init_done == FALSE) {
   3579   1.48     chris 
   3580   1.48     chris 			/*
   3581   1.48     chris 			 * we're growing the kernel pmap early (from
   3582   1.48     chris 			 * uvm_pageboot_alloc()).  this case must be
   3583   1.48     chris 			 * handled a little differently.
   3584   1.48     chris 			 */
   3585   1.48     chris 
   3586   1.48     chris 			if (uvm_page_physget(&ptaddr) == FALSE)
   3587   1.48     chris 				panic("pmap_growkernel: out of memory");
   3588   1.48     chris 			pmap_zero_page(ptaddr);
   3589   1.48     chris 
   3590   1.48     chris 			/* map this page in */
   3591   1.70   thorpej 			pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
   3592   1.48     chris 
   3593   1.48     chris 			/* count PTP as resident */
   3594   1.48     chris 			kpm->pm_stats.resident_count++;
   3595   1.48     chris 			continue;
   3596   1.48     chris 		}
   3597   1.48     chris 
   3598   1.48     chris 		/*
   3599   1.48     chris 		 * THIS *MUST* BE CODED SO AS TO WORK IN THE
   3600   1.48     chris 		 * pmap_initialized == FALSE CASE!  WE MAY BE
   3601   1.48     chris 		 * INVOKED WHILE pmap_init() IS RUNNING!
   3602   1.48     chris 		 */
   3603   1.48     chris 
   3604   1.70   thorpej 		if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
   3605   1.48     chris 			panic("pmap_growkernel: alloc ptp failed");
   3606   1.48     chris 
   3607   1.48     chris 		/* distribute new kernel PTP to all active pmaps */
   3608   1.48     chris 		simple_lock(&pmaps_lock);
   3609   1.48     chris 		LIST_FOREACH(pm, &pmaps, pm_list) {
   3610   1.70   thorpej 			pmap_map_in_l1(pm, pmap_curmaxkvaddr,
   3611   1.70   thorpej 			    VM_PAGE_TO_PHYS(ptp), TRUE);
   3612   1.48     chris 		}
   3613  1.111   thorpej 
   3614  1.111   thorpej 		/* Invalidate the PTPT cache. */
   3615  1.111   thorpej 		pool_cache_invalidate(&pmap_ptpt_cache);
   3616  1.111   thorpej 		pmap_ptpt_cache_generation++;
   3617   1.48     chris 
   3618   1.48     chris 		simple_unlock(&pmaps_lock);
   3619   1.48     chris 	}
   3620   1.48     chris 
   3621   1.48     chris 	/*
   3622   1.48     chris 	 * flush out the cache, expensive but growkernel will happen so
   3623   1.48     chris 	 * rarely
   3624   1.48     chris 	 */
   3625   1.48     chris 	cpu_tlb_flushD();
   3626   1.48     chris 	cpu_cpwait();
   3627   1.48     chris 
   3628   1.48     chris 	simple_unlock(&kpm->pm_obj.vmobjlock);
   3629   1.48     chris 	splx(s);
   3630   1.48     chris 
   3631   1.48     chris out:
   3632   1.48     chris 	return (pmap_curmaxkvaddr);
   3633   1.48     chris }
   3634   1.48     chris 
   3635   1.76   thorpej /************************ Utility routines ****************************/
   3636   1.76   thorpej 
   3637   1.76   thorpej /*
   3638   1.76   thorpej  * vector_page_setprot:
   3639   1.76   thorpej  *
   3640   1.76   thorpej  *	Manipulate the protection of the vector page.
   3641   1.76   thorpej  */
   3642   1.76   thorpej void
   3643   1.76   thorpej vector_page_setprot(int prot)
   3644   1.76   thorpej {
   3645   1.76   thorpej 	pt_entry_t *pte;
   3646   1.76   thorpej 
   3647   1.76   thorpej 	pte = vtopte(vector_page);
   3648   1.48     chris 
   3649   1.83   thorpej 	*pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
   3650  1.112   thorpej 	PTE_SYNC(pte);
   3651   1.76   thorpej 	cpu_tlb_flushD_SE(vector_page);
   3652   1.76   thorpej 	cpu_cpwait();
   3653   1.76   thorpej }
   3654    1.1      matt 
   3655   1.40   thorpej /************************ Bootstrapping routines ****************************/
   3656   1.40   thorpej 
   3657   1.40   thorpej /*
   3658   1.46   thorpej  * This list exists for the benefit of pmap_map_chunk().  It keeps track
   3659   1.46   thorpej  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
   3660   1.46   thorpej  * find them as necessary.
   3661   1.46   thorpej  *
   3662   1.46   thorpej  * Note that the data on this list is not valid after initarm() returns.
   3663   1.46   thorpej  */
   3664   1.46   thorpej SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
   3665   1.46   thorpej 
   3666   1.46   thorpej static vaddr_t
   3667   1.46   thorpej kernel_pt_lookup(paddr_t pa)
   3668   1.46   thorpej {
   3669   1.46   thorpej 	pv_addr_t *pv;
   3670   1.46   thorpej 
   3671   1.46   thorpej 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
   3672   1.46   thorpej 		if (pv->pv_pa == pa)
   3673   1.46   thorpej 			return (pv->pv_va);
   3674   1.46   thorpej 	}
   3675   1.46   thorpej 	return (0);
   3676   1.46   thorpej }
   3677   1.46   thorpej 
   3678   1.46   thorpej /*
   3679   1.40   thorpej  * pmap_map_section:
   3680   1.40   thorpej  *
   3681   1.40   thorpej  *	Create a single section mapping.
   3682   1.40   thorpej  */
   3683   1.40   thorpej void
   3684   1.40   thorpej pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3685   1.40   thorpej {
   3686   1.40   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3687   1.86   thorpej 	pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3688   1.40   thorpej 
   3689   1.81   thorpej 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
   3690   1.40   thorpej 
   3691   1.83   thorpej 	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3692   1.83   thorpej 	    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3693   1.41   thorpej }
   3694   1.41   thorpej 
   3695   1.41   thorpej /*
   3696   1.41   thorpej  * pmap_map_entry:
   3697   1.41   thorpej  *
   3698   1.41   thorpej  *	Create a single page mapping.
   3699   1.41   thorpej  */
   3700   1.41   thorpej void
   3701   1.47   thorpej pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3702   1.41   thorpej {
   3703   1.47   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3704   1.86   thorpej 	pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3705   1.47   thorpej 	pt_entry_t *pte;
   3706   1.41   thorpej 
   3707   1.41   thorpej 	KASSERT(((va | pa) & PGOFSET) == 0);
   3708   1.41   thorpej 
   3709   1.81   thorpej 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3710   1.47   thorpej 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
   3711   1.47   thorpej 
   3712   1.47   thorpej 	pte = (pt_entry_t *)
   3713   1.81   thorpej 	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3714   1.47   thorpej 	if (pte == NULL)
   3715   1.47   thorpej 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
   3716   1.47   thorpej 
   3717   1.83   thorpej 	pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3718   1.83   thorpej 	    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3719   1.42   thorpej }
   3720   1.42   thorpej 
   3721   1.42   thorpej /*
   3722   1.42   thorpej  * pmap_link_l2pt:
   3723   1.42   thorpej  *
   3724   1.42   thorpej  *	Link the L2 page table specified by "pa" into the L1
   3725   1.42   thorpej  *	page table at the slot for "va".
   3726   1.42   thorpej  */
   3727   1.42   thorpej void
   3728   1.46   thorpej pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
   3729   1.42   thorpej {
   3730   1.42   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3731   1.81   thorpej 	u_int slot = va >> L1_S_SHIFT;
   3732   1.42   thorpej 
   3733   1.46   thorpej 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
   3734   1.46   thorpej 
   3735   1.83   thorpej 	pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
   3736   1.83   thorpej 	pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
   3737   1.83   thorpej 	pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
   3738   1.83   thorpej 	pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
   3739   1.42   thorpej 
   3740   1.46   thorpej 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
   3741   1.43   thorpej }
   3742   1.43   thorpej 
   3743   1.43   thorpej /*
   3744   1.43   thorpej  * pmap_map_chunk:
   3745   1.43   thorpej  *
   3746   1.43   thorpej  *	Map a chunk of memory using the most efficient mappings
   3747   1.43   thorpej  *	possible (section, large page, small page) into the
   3748   1.43   thorpej  *	provided L1 and L2 tables at the specified virtual address.
   3749   1.43   thorpej  */
   3750   1.43   thorpej vsize_t
   3751   1.46   thorpej pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
   3752   1.46   thorpej     int prot, int cache)
   3753   1.43   thorpej {
   3754   1.43   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3755   1.86   thorpej 	pt_entry_t *pte, fl;
   3756   1.43   thorpej 	vsize_t resid;
   3757   1.43   thorpej 	int i;
   3758   1.43   thorpej 
   3759   1.43   thorpej 	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
   3760   1.43   thorpej 
   3761   1.44   thorpej 	if (l1pt == 0)
   3762   1.44   thorpej 		panic("pmap_map_chunk: no L1 table provided");
   3763   1.44   thorpej 
   3764   1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3765   1.43   thorpej 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
   3766   1.43   thorpej 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
   3767   1.43   thorpej #endif
   3768   1.43   thorpej 
   3769   1.43   thorpej 	size = resid;
   3770   1.43   thorpej 
   3771   1.43   thorpej 	while (resid > 0) {
   3772   1.43   thorpej 		/* See if we can use a section mapping. */
   3773   1.81   thorpej 		if (((pa | va) & L1_S_OFFSET) == 0 &&
   3774   1.81   thorpej 		    resid >= L1_S_SIZE) {
   3775   1.86   thorpej 			fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3776   1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3777   1.43   thorpej 			printf("S");
   3778   1.43   thorpej #endif
   3779   1.83   thorpej 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3780   1.83   thorpej 			    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3781   1.81   thorpej 			va += L1_S_SIZE;
   3782   1.81   thorpej 			pa += L1_S_SIZE;
   3783   1.81   thorpej 			resid -= L1_S_SIZE;
   3784   1.43   thorpej 			continue;
   3785   1.43   thorpej 		}
   3786   1.45   thorpej 
   3787   1.45   thorpej 		/*
   3788   1.45   thorpej 		 * Ok, we're going to use an L2 table.  Make sure
   3789   1.45   thorpej 		 * one is actually in the corresponding L1 slot
   3790   1.45   thorpej 		 * for the current VA.
   3791   1.45   thorpej 		 */
   3792   1.81   thorpej 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3793   1.46   thorpej 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
   3794   1.46   thorpej 
   3795   1.46   thorpej 		pte = (pt_entry_t *)
   3796   1.81   thorpej 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3797   1.46   thorpej 		if (pte == NULL)
   3798   1.46   thorpej 			panic("pmap_map_chunk: can't find L2 table for VA"
   3799   1.46   thorpej 			    "0x%08lx", va);
   3800   1.43   thorpej 
   3801   1.43   thorpej 		/* See if we can use a L2 large page mapping. */
   3802   1.81   thorpej 		if (((pa | va) & L2_L_OFFSET) == 0 &&
   3803   1.81   thorpej 		    resid >= L2_L_SIZE) {
   3804   1.86   thorpej 			fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
   3805   1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3806   1.43   thorpej 			printf("L");
   3807   1.43   thorpej #endif
   3808   1.43   thorpej 			for (i = 0; i < 16; i++) {
   3809   1.43   thorpej 				pte[((va >> PGSHIFT) & 0x3f0) + i] =
   3810   1.83   thorpej 				    L2_L_PROTO | pa |
   3811   1.83   thorpej 				    L2_L_PROT(PTE_KERNEL, prot) | fl;
   3812   1.43   thorpej 			}
   3813   1.81   thorpej 			va += L2_L_SIZE;
   3814   1.81   thorpej 			pa += L2_L_SIZE;
   3815   1.81   thorpej 			resid -= L2_L_SIZE;
   3816   1.43   thorpej 			continue;
   3817   1.43   thorpej 		}
   3818   1.43   thorpej 
   3819   1.43   thorpej 		/* Use a small page mapping. */
   3820   1.86   thorpej 		fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3821   1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3822   1.43   thorpej 		printf("P");
   3823   1.43   thorpej #endif
   3824   1.83   thorpej 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3825   1.83   thorpej 		    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3826   1.43   thorpej 		va += NBPG;
   3827   1.43   thorpej 		pa += NBPG;
   3828   1.43   thorpej 		resid -= NBPG;
   3829   1.43   thorpej 	}
   3830   1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3831   1.43   thorpej 	printf("\n");
   3832   1.43   thorpej #endif
   3833   1.43   thorpej 	return (size);
   3834   1.40   thorpej }
   3835   1.85   thorpej 
   3836   1.85   thorpej /********************** PTE initialization routines **************************/
   3837   1.85   thorpej 
   3838   1.85   thorpej /*
   3839   1.85   thorpej  * These routines are called when the CPU type is identified to set up
   3840   1.85   thorpej  * the PTE prototypes, cache modes, etc.
   3841   1.85   thorpej  *
   3842   1.85   thorpej  * The variables are always here, just in case LKMs need to reference
   3843   1.85   thorpej  * them (though, they shouldn't).
   3844   1.85   thorpej  */
   3845   1.85   thorpej 
   3846   1.86   thorpej pt_entry_t	pte_l1_s_cache_mode;
   3847   1.86   thorpej pt_entry_t	pte_l1_s_cache_mask;
   3848   1.86   thorpej 
   3849   1.86   thorpej pt_entry_t	pte_l2_l_cache_mode;
   3850   1.86   thorpej pt_entry_t	pte_l2_l_cache_mask;
   3851   1.86   thorpej 
   3852   1.86   thorpej pt_entry_t	pte_l2_s_cache_mode;
   3853   1.86   thorpej pt_entry_t	pte_l2_s_cache_mask;
   3854   1.85   thorpej 
   3855   1.85   thorpej pt_entry_t	pte_l2_s_prot_u;
   3856   1.85   thorpej pt_entry_t	pte_l2_s_prot_w;
   3857   1.85   thorpej pt_entry_t	pte_l2_s_prot_mask;
   3858   1.85   thorpej 
   3859   1.85   thorpej pt_entry_t	pte_l1_s_proto;
   3860   1.85   thorpej pt_entry_t	pte_l1_c_proto;
   3861   1.85   thorpej pt_entry_t	pte_l2_s_proto;
   3862   1.85   thorpej 
   3863   1.88   thorpej void		(*pmap_copy_page_func)(paddr_t, paddr_t);
   3864   1.88   thorpej void		(*pmap_zero_page_func)(paddr_t);
   3865   1.88   thorpej 
   3866   1.85   thorpej #if ARM_MMU_GENERIC == 1
   3867   1.85   thorpej void
   3868   1.85   thorpej pmap_pte_init_generic(void)
   3869   1.85   thorpej {
   3870   1.85   thorpej 
   3871   1.86   thorpej 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3872   1.86   thorpej 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
   3873   1.86   thorpej 
   3874   1.86   thorpej 	pte_l2_l_cache_mode = L2_B|L2_C;
   3875   1.86   thorpej 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
   3876   1.86   thorpej 
   3877   1.86   thorpej 	pte_l2_s_cache_mode = L2_B|L2_C;
   3878   1.86   thorpej 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
   3879   1.85   thorpej 
   3880   1.85   thorpej 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
   3881   1.85   thorpej 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
   3882   1.85   thorpej 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
   3883   1.85   thorpej 
   3884   1.85   thorpej 	pte_l1_s_proto = L1_S_PROTO_generic;
   3885   1.85   thorpej 	pte_l1_c_proto = L1_C_PROTO_generic;
   3886   1.85   thorpej 	pte_l2_s_proto = L2_S_PROTO_generic;
   3887   1.88   thorpej 
   3888   1.88   thorpej 	pmap_copy_page_func = pmap_copy_page_generic;
   3889   1.88   thorpej 	pmap_zero_page_func = pmap_zero_page_generic;
   3890   1.85   thorpej }
   3891   1.85   thorpej 
   3892   1.85   thorpej #if defined(CPU_ARM9)
   3893   1.85   thorpej void
   3894   1.85   thorpej pmap_pte_init_arm9(void)
   3895   1.85   thorpej {
   3896   1.85   thorpej 
   3897   1.85   thorpej 	/*
   3898   1.85   thorpej 	 * ARM9 is compatible with generic, but we want to use
   3899   1.85   thorpej 	 * write-through caching for now.
   3900   1.85   thorpej 	 */
   3901   1.85   thorpej 	pmap_pte_init_generic();
   3902   1.86   thorpej 
   3903   1.86   thorpej 	pte_l1_s_cache_mode = L1_S_C;
   3904   1.86   thorpej 	pte_l2_l_cache_mode = L2_C;
   3905   1.86   thorpej 	pte_l2_s_cache_mode = L2_C;
   3906   1.85   thorpej }
   3907   1.85   thorpej #endif /* CPU_ARM9 */
   3908   1.85   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   3909   1.85   thorpej 
   3910   1.85   thorpej #if ARM_MMU_XSCALE == 1
   3911   1.85   thorpej void
   3912   1.85   thorpej pmap_pte_init_xscale(void)
   3913   1.85   thorpej {
   3914   1.96   thorpej 	uint32_t auxctl;
   3915   1.85   thorpej 
   3916   1.96   thorpej 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3917   1.86   thorpej 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
   3918   1.86   thorpej 
   3919   1.96   thorpej 	pte_l2_l_cache_mode = L2_B|L2_C;
   3920   1.86   thorpej 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
   3921   1.86   thorpej 
   3922   1.96   thorpej 	pte_l2_s_cache_mode = L2_B|L2_C;
   3923   1.86   thorpej 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
   3924  1.106   thorpej 
   3925  1.106   thorpej #ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
   3926  1.106   thorpej 	/*
   3927  1.106   thorpej 	 * The XScale core has an enhanced mode where writes that
   3928  1.106   thorpej 	 * miss the cache cause a cache line to be allocated.  This
   3929  1.106   thorpej 	 * is significantly faster than the traditional, write-through
   3930  1.106   thorpej 	 * behavior of this case.
   3931  1.106   thorpej 	 *
   3932  1.106   thorpej 	 * However, there is a bug lurking in this pmap module, or in
   3933  1.106   thorpej 	 * other parts of the VM system, or both, which causes corruption
   3934  1.106   thorpej 	 * of NFS-backed files when this cache mode is used.  We have
   3935  1.106   thorpej 	 * an ugly work-around for this problem (disable r/w-allocate
   3936  1.106   thorpej 	 * for managed kernel mappings), but the bug is still evil enough
   3937  1.106   thorpej 	 * to consider this cache mode "experimental".
   3938  1.106   thorpej 	 */
   3939  1.106   thorpej 	pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
   3940  1.106   thorpej 	pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
   3941  1.106   thorpej 	pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
   3942  1.106   thorpej #endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
   3943   1.85   thorpej 
   3944   1.95   thorpej #ifdef XSCALE_CACHE_WRITE_THROUGH
   3945   1.95   thorpej 	/*
   3946   1.95   thorpej 	 * Some versions of the XScale core have various bugs in
   3947   1.95   thorpej 	 * their cache units, the work-around for which is to run
   3948   1.95   thorpej 	 * the cache in write-through mode.  Unfortunately, this
   3949   1.95   thorpej 	 * has a major (negative) impact on performance.  So, we
   3950   1.95   thorpej 	 * go ahead and run fast-and-loose, in the hopes that we
   3951   1.95   thorpej 	 * don't line up the planets in a way that will trip the
   3952   1.95   thorpej 	 * bugs.
   3953   1.95   thorpej 	 *
   3954   1.95   thorpej 	 * However, we give you the option to be slow-but-correct.
   3955   1.95   thorpej 	 */
   3956   1.95   thorpej 	pte_l1_s_cache_mode = L1_S_C;
   3957   1.95   thorpej 	pte_l2_l_cache_mode = L2_C;
   3958   1.95   thorpej 	pte_l2_s_cache_mode = L2_C;
   3959   1.95   thorpej #endif /* XSCALE_CACHE_WRITE_THROUGH */
   3960   1.95   thorpej 
   3961   1.85   thorpej 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
   3962   1.85   thorpej 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
   3963   1.85   thorpej 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
   3964   1.85   thorpej 
   3965   1.85   thorpej 	pte_l1_s_proto = L1_S_PROTO_xscale;
   3966   1.85   thorpej 	pte_l1_c_proto = L1_C_PROTO_xscale;
   3967   1.85   thorpej 	pte_l2_s_proto = L2_S_PROTO_xscale;
   3968   1.88   thorpej 
   3969   1.88   thorpej 	pmap_copy_page_func = pmap_copy_page_xscale;
   3970   1.88   thorpej 	pmap_zero_page_func = pmap_zero_page_xscale;
   3971   1.96   thorpej 
   3972   1.96   thorpej 	/*
   3973   1.96   thorpej 	 * Disable ECC protection of page table access, for now.
   3974   1.96   thorpej 	 */
   3975   1.96   thorpej 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   3976   1.96   thorpej 		: "=r" (auxctl));
   3977   1.96   thorpej 	auxctl &= ~XSCALE_AUXCTL_P;
   3978   1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   3979   1.96   thorpej 		:
   3980   1.96   thorpej 		: "r" (auxctl));
   3981   1.85   thorpej }
   3982   1.87   thorpej 
   3983   1.87   thorpej /*
   3984   1.87   thorpej  * xscale_setup_minidata:
   3985   1.87   thorpej  *
   3986   1.87   thorpej  *	Set up the mini-data cache clean area.  We require the
   3987   1.87   thorpej  *	caller to allocate the right amount of physically and
   3988   1.87   thorpej  *	virtually contiguous space.
   3989   1.87   thorpej  */
   3990   1.87   thorpej void
   3991   1.87   thorpej xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
   3992   1.87   thorpej {
   3993   1.87   thorpej 	extern vaddr_t xscale_minidata_clean_addr;
   3994   1.87   thorpej 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
   3995   1.87   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3996   1.87   thorpej 	pt_entry_t *pte;
   3997   1.87   thorpej 	vsize_t size;
   3998   1.96   thorpej 	uint32_t auxctl;
   3999   1.87   thorpej 
   4000   1.87   thorpej 	xscale_minidata_clean_addr = va;
   4001   1.87   thorpej 
   4002   1.87   thorpej 	/* Round it to page size. */
   4003   1.87   thorpej 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
   4004   1.87   thorpej 
   4005   1.87   thorpej 	for (; size != 0;
   4006   1.87   thorpej 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
   4007   1.87   thorpej 		pte = (pt_entry_t *)
   4008   1.87   thorpej 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   4009   1.87   thorpej 		if (pte == NULL)
   4010   1.87   thorpej 			panic("xscale_setup_minidata: can't find L2 table for "
   4011   1.87   thorpej 			    "VA 0x%08lx", va);
   4012   1.87   thorpej 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   4013   1.87   thorpej 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   4014   1.87   thorpej 		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
   4015   1.87   thorpej 	}
   4016   1.96   thorpej 
   4017   1.96   thorpej 	/*
   4018   1.96   thorpej 	 * Configure the mini-data cache for write-back with
   4019   1.96   thorpej 	 * read/write-allocate.
   4020   1.96   thorpej 	 *
   4021   1.96   thorpej 	 * NOTE: In order to reconfigure the mini-data cache, we must
   4022   1.96   thorpej 	 * make sure it contains no valid data!  In order to do that,
   4023   1.96   thorpej 	 * we must issue a global data cache invalidate command!
   4024   1.96   thorpej 	 *
   4025   1.96   thorpej 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
   4026   1.96   thorpej 	 * THIS IS VERY IMPORTANT!
   4027   1.96   thorpej 	 */
   4028   1.96   thorpej 
   4029   1.96   thorpej 	/* Invalidate data and mini-data. */
   4030   1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c7, c6, 0"
   4031   1.96   thorpej 		:
   4032   1.96   thorpej 		: "r" (auxctl));
   4033   1.96   thorpej 
   4034   1.96   thorpej 
   4035   1.96   thorpej 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   4036   1.96   thorpej 		: "=r" (auxctl));
   4037   1.96   thorpej 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
   4038   1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   4039   1.96   thorpej 		:
   4040   1.96   thorpej 		: "r" (auxctl));
   4041   1.87   thorpej }
   4042   1.85   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   4043