Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.14.2.7
      1  1.14.2.7  jdolecek /*	$NetBSD: pmap.c,v 1.14.2.7 2002/06/23 17:34:45 jdolecek Exp $	*/
      2      1.12     chris 
      3      1.12     chris /*
      4  1.14.2.6  jdolecek  * Copyright (c) 2002 Wasabi Systems, Inc.
      5      1.12     chris  * Copyright (c) 2001 Richard Earnshaw
      6      1.12     chris  * Copyright (c) 2001 Christopher Gilbert
      7      1.12     chris  * All rights reserved.
      8      1.12     chris  *
      9      1.12     chris  * 1. Redistributions of source code must retain the above copyright
     10      1.12     chris  *    notice, this list of conditions and the following disclaimer.
     11      1.12     chris  * 2. Redistributions in binary form must reproduce the above copyright
     12      1.12     chris  *    notice, this list of conditions and the following disclaimer in the
     13      1.12     chris  *    documentation and/or other materials provided with the distribution.
     14      1.12     chris  * 3. The name of the company nor the name of the author may be used to
     15      1.12     chris  *    endorse or promote products derived from this software without specific
     16      1.12     chris  *    prior written permission.
     17      1.12     chris  *
     18      1.12     chris  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     19      1.12     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     20      1.12     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21      1.12     chris  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     22      1.12     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23      1.12     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24      1.12     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25      1.12     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26      1.12     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27      1.12     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28      1.12     chris  * SUCH DAMAGE.
     29      1.12     chris  */
     30       1.1      matt 
     31       1.1      matt /*-
     32       1.1      matt  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     33       1.1      matt  * All rights reserved.
     34       1.1      matt  *
     35       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
     36       1.1      matt  * by Charles M. Hannum.
     37       1.1      matt  *
     38       1.1      matt  * Redistribution and use in source and binary forms, with or without
     39       1.1      matt  * modification, are permitted provided that the following conditions
     40       1.1      matt  * are met:
     41       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     42       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     43       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     44       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     45       1.1      matt  *    documentation and/or other materials provided with the distribution.
     46       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     47       1.1      matt  *    must display the following acknowledgement:
     48       1.1      matt  *        This product includes software developed by the NetBSD
     49       1.1      matt  *        Foundation, Inc. and its contributors.
     50       1.1      matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     51       1.1      matt  *    contributors may be used to endorse or promote products derived
     52       1.1      matt  *    from this software without specific prior written permission.
     53       1.1      matt  *
     54       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     55       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     56       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     57       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     58       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     59       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     60       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     61       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     62       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     63       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     64       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     65       1.1      matt  */
     66       1.1      matt 
     67       1.1      matt /*
     68       1.1      matt  * Copyright (c) 1994-1998 Mark Brinicombe.
     69       1.1      matt  * Copyright (c) 1994 Brini.
     70       1.1      matt  * All rights reserved.
     71       1.1      matt  *
     72       1.1      matt  * This code is derived from software written for Brini by Mark Brinicombe
     73       1.1      matt  *
     74       1.1      matt  * Redistribution and use in source and binary forms, with or without
     75       1.1      matt  * modification, are permitted provided that the following conditions
     76       1.1      matt  * are met:
     77       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     78       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     79       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     80       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     81       1.1      matt  *    documentation and/or other materials provided with the distribution.
     82       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     83       1.1      matt  *    must display the following acknowledgement:
     84       1.1      matt  *	This product includes software developed by Mark Brinicombe.
     85       1.1      matt  * 4. The name of the author may not be used to endorse or promote products
     86       1.1      matt  *    derived from this software without specific prior written permission.
     87       1.1      matt  *
     88       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     89       1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     90       1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     91       1.1      matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     92       1.1      matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     93       1.1      matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     94       1.1      matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     95       1.1      matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     96       1.1      matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     97       1.1      matt  *
     98       1.1      matt  * RiscBSD kernel project
     99       1.1      matt  *
    100       1.1      matt  * pmap.c
    101       1.1      matt  *
    102       1.1      matt  * Machine dependant vm stuff
    103       1.1      matt  *
    104       1.1      matt  * Created      : 20/09/94
    105       1.1      matt  */
    106       1.1      matt 
    107       1.1      matt /*
    108       1.1      matt  * Performance improvements, UVM changes, overhauls and part-rewrites
    109       1.1      matt  * were contributed by Neil A. Carson <neil (at) causality.com>.
    110       1.1      matt  */
    111       1.1      matt 
    112       1.1      matt /*
    113       1.1      matt  * The dram block info is currently referenced from the bootconfig.
    114       1.1      matt  * This should be placed in a separate structure.
    115       1.1      matt  */
    116       1.1      matt 
    117       1.1      matt /*
    118       1.1      matt  * Special compilation symbols
    119       1.1      matt  * PMAP_DEBUG		- Build in pmap_debug_level code
    120       1.1      matt  */
    121       1.1      matt 
    122       1.1      matt /* Include header files */
    123       1.1      matt 
    124       1.1      matt #include "opt_pmap_debug.h"
    125       1.1      matt #include "opt_ddb.h"
    126       1.1      matt 
    127       1.1      matt #include <sys/types.h>
    128       1.1      matt #include <sys/param.h>
    129       1.1      matt #include <sys/kernel.h>
    130       1.1      matt #include <sys/systm.h>
    131       1.1      matt #include <sys/proc.h>
    132       1.1      matt #include <sys/malloc.h>
    133       1.1      matt #include <sys/user.h>
    134      1.10     chris #include <sys/pool.h>
    135  1.14.2.1     lukem #include <sys/cdefs.h>
    136  1.14.2.1     lukem 
    137       1.1      matt #include <uvm/uvm.h>
    138       1.1      matt 
    139       1.1      matt #include <machine/bootconfig.h>
    140       1.1      matt #include <machine/bus.h>
    141       1.1      matt #include <machine/pmap.h>
    142       1.1      matt #include <machine/pcb.h>
    143       1.1      matt #include <machine/param.h>
    144  1.14.2.4   thorpej #include <arm/arm32/katelib.h>
    145  1.14.2.4   thorpej 
    146  1.14.2.7  jdolecek __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.14.2.7 2002/06/23 17:34:45 jdolecek Exp $");
    147  1.14.2.1     lukem 
    148       1.1      matt #ifdef PMAP_DEBUG
    149       1.1      matt #define	PDEBUG(_lev_,_stat_) \
    150       1.1      matt 	if (pmap_debug_level >= (_lev_)) \
    151       1.1      matt         	((_stat_))
    152       1.1      matt int pmap_debug_level = -2;
    153  1.14.2.6  jdolecek void pmap_dump_pvlist(vaddr_t phys, char *m);
    154  1.14.2.2   thorpej 
    155  1.14.2.2   thorpej /*
    156  1.14.2.2   thorpej  * for switching to potentially finer grained debugging
    157  1.14.2.2   thorpej  */
    158  1.14.2.2   thorpej #define	PDB_FOLLOW	0x0001
    159  1.14.2.2   thorpej #define	PDB_INIT	0x0002
    160  1.14.2.2   thorpej #define	PDB_ENTER	0x0004
    161  1.14.2.2   thorpej #define	PDB_REMOVE	0x0008
    162  1.14.2.2   thorpej #define	PDB_CREATE	0x0010
    163  1.14.2.2   thorpej #define	PDB_PTPAGE	0x0020
    164  1.14.2.6  jdolecek #define	PDB_GROWKERN	0x0040
    165  1.14.2.2   thorpej #define	PDB_BITS	0x0080
    166  1.14.2.2   thorpej #define	PDB_COLLECT	0x0100
    167  1.14.2.2   thorpej #define	PDB_PROTECT	0x0200
    168  1.14.2.6  jdolecek #define	PDB_MAP_L1	0x0400
    169  1.14.2.2   thorpej #define	PDB_BOOTSTRAP	0x1000
    170  1.14.2.2   thorpej #define	PDB_PARANOIA	0x2000
    171  1.14.2.2   thorpej #define	PDB_WIRING	0x4000
    172  1.14.2.2   thorpej #define	PDB_PVDUMP	0x8000
    173  1.14.2.2   thorpej 
    174  1.14.2.2   thorpej int debugmap = 0;
    175  1.14.2.2   thorpej int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
    176  1.14.2.2   thorpej #define	NPDEBUG(_lev_,_stat_) \
    177  1.14.2.2   thorpej 	if (pmapdebug & (_lev_)) \
    178  1.14.2.2   thorpej         	((_stat_))
    179  1.14.2.2   thorpej 
    180       1.1      matt #else	/* PMAP_DEBUG */
    181       1.1      matt #define	PDEBUG(_lev_,_stat_) /* Nothing */
    182  1.14.2.6  jdolecek #define NPDEBUG(_lev_,_stat_) /* Nothing */
    183       1.1      matt #endif	/* PMAP_DEBUG */
    184       1.1      matt 
    185       1.1      matt struct pmap     kernel_pmap_store;
    186       1.1      matt 
    187      1.10     chris /*
    188  1.14.2.6  jdolecek  * linked list of all non-kernel pmaps
    189  1.14.2.6  jdolecek  */
    190  1.14.2.6  jdolecek 
    191  1.14.2.7  jdolecek static LIST_HEAD(, pmap) pmaps;
    192  1.14.2.6  jdolecek 
    193  1.14.2.6  jdolecek /*
    194      1.10     chris  * pool that pmap structures are allocated from
    195      1.10     chris  */
    196      1.10     chris 
    197      1.10     chris struct pool pmap_pmap_pool;
    198      1.10     chris 
    199  1.14.2.7  jdolecek static pt_entry_t *csrc_pte, *cdst_pte;
    200  1.14.2.7  jdolecek static vaddr_t csrcp, cdstp;
    201  1.14.2.7  jdolecek 
    202       1.1      matt char *memhook;
    203       1.1      matt extern caddr_t msgbufaddr;
    204       1.1      matt 
    205       1.1      matt boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    206  1.14.2.2   thorpej /*
    207  1.14.2.2   thorpej  * locking data structures
    208  1.14.2.2   thorpej  */
    209       1.1      matt 
    210  1.14.2.2   thorpej static struct lock pmap_main_lock;
    211  1.14.2.2   thorpej static struct simplelock pvalloc_lock;
    212  1.14.2.6  jdolecek static struct simplelock pmaps_lock;
    213  1.14.2.2   thorpej #ifdef LOCKDEBUG
    214  1.14.2.2   thorpej #define PMAP_MAP_TO_HEAD_LOCK() \
    215  1.14.2.2   thorpej      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
    216  1.14.2.2   thorpej #define PMAP_MAP_TO_HEAD_UNLOCK() \
    217  1.14.2.2   thorpej      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    218  1.14.2.2   thorpej 
    219  1.14.2.2   thorpej #define PMAP_HEAD_TO_MAP_LOCK() \
    220  1.14.2.2   thorpej      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
    221  1.14.2.2   thorpej #define PMAP_HEAD_TO_MAP_UNLOCK() \
    222  1.14.2.2   thorpej      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    223  1.14.2.2   thorpej #else
    224  1.14.2.2   thorpej #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
    225  1.14.2.2   thorpej #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
    226  1.14.2.2   thorpej #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
    227  1.14.2.2   thorpej #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
    228  1.14.2.2   thorpej #endif /* LOCKDEBUG */
    229  1.14.2.2   thorpej 
    230  1.14.2.2   thorpej /*
    231  1.14.2.2   thorpej  * pv_page management structures: locked by pvalloc_lock
    232  1.14.2.2   thorpej  */
    233       1.1      matt 
    234  1.14.2.2   thorpej TAILQ_HEAD(pv_pagelist, pv_page);
    235  1.14.2.2   thorpej static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
    236  1.14.2.2   thorpej static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
    237  1.14.2.2   thorpej static int pv_nfpvents;			/* # of free pv entries */
    238  1.14.2.2   thorpej static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
    239  1.14.2.2   thorpej static vaddr_t pv_cachedva;		/* cached VA for later use */
    240  1.14.2.2   thorpej 
    241  1.14.2.2   thorpej #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
    242  1.14.2.2   thorpej #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
    243  1.14.2.2   thorpej 					/* high water mark */
    244  1.14.2.2   thorpej 
    245  1.14.2.2   thorpej /*
    246  1.14.2.2   thorpej  * local prototypes
    247  1.14.2.2   thorpej  */
    248  1.14.2.2   thorpej 
    249  1.14.2.2   thorpej static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
    250  1.14.2.2   thorpej static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
    251  1.14.2.2   thorpej #define ALLOCPV_NEED	0	/* need PV now */
    252  1.14.2.2   thorpej #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
    253  1.14.2.2   thorpej #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
    254  1.14.2.2   thorpej static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
    255  1.14.2.6  jdolecek static void		 pmap_enter_pv __P((struct vm_page *,
    256  1.14.2.2   thorpej 					    struct pv_entry *, struct pmap *,
    257  1.14.2.2   thorpej 					    vaddr_t, struct vm_page *, int));
    258  1.14.2.2   thorpej static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
    259  1.14.2.2   thorpej static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
    260  1.14.2.2   thorpej static void		 pmap_free_pv_doit __P((struct pv_entry *));
    261  1.14.2.2   thorpej static void		 pmap_free_pvpage __P((void));
    262  1.14.2.2   thorpej static boolean_t	 pmap_is_curpmap __P((struct pmap *));
    263  1.14.2.6  jdolecek static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *,
    264  1.14.2.2   thorpej 			vaddr_t));
    265  1.14.2.2   thorpej #define PMAP_REMOVE_ALL		0	/* remove all mappings */
    266  1.14.2.2   thorpej #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
    267       1.1      matt 
    268  1.14.2.6  jdolecek static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
    269  1.14.2.4   thorpej 	u_int, u_int));
    270  1.14.2.4   thorpej 
    271  1.14.2.7  jdolecek /*
    272  1.14.2.7  jdolecek  * Structure that describes and L1 table.
    273  1.14.2.7  jdolecek  */
    274  1.14.2.7  jdolecek struct l1pt {
    275  1.14.2.7  jdolecek 	SIMPLEQ_ENTRY(l1pt)	pt_queue;	/* Queue pointers */
    276  1.14.2.7  jdolecek 	struct pglist		pt_plist;	/* Allocated page list */
    277  1.14.2.7  jdolecek 	vaddr_t			pt_va;		/* Allocated virtual address */
    278  1.14.2.7  jdolecek 	int			pt_flags;	/* Flags */
    279  1.14.2.7  jdolecek };
    280  1.14.2.7  jdolecek #define	PTFLAG_STATIC		0x01		/* Statically allocated */
    281  1.14.2.7  jdolecek #define	PTFLAG_KPT		0x02		/* Kernel pt's are mapped */
    282  1.14.2.7  jdolecek #define	PTFLAG_CLEAN		0x04		/* L1 is clean */
    283  1.14.2.7  jdolecek 
    284  1.14.2.4   thorpej static void pmap_free_l1pt __P((struct l1pt *));
    285  1.14.2.4   thorpej static int pmap_allocpagedir __P((struct pmap *));
    286  1.14.2.4   thorpej static int pmap_clean_page __P((struct pv_entry *, boolean_t));
    287  1.14.2.6  jdolecek static void pmap_remove_all __P((struct vm_page *));
    288  1.14.2.4   thorpej 
    289  1.14.2.7  jdolecek static int pmap_alloc_ptpt(struct pmap *);
    290  1.14.2.7  jdolecek static void pmap_free_ptpt(struct pmap *);
    291  1.14.2.4   thorpej 
    292  1.14.2.7  jdolecek static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t));
    293  1.14.2.7  jdolecek static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t));
    294  1.14.2.6  jdolecek __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
    295  1.14.2.2   thorpej 
    296       1.2      matt extern paddr_t physical_start;
    297       1.2      matt extern paddr_t physical_freestart;
    298       1.2      matt extern paddr_t physical_end;
    299       1.2      matt extern paddr_t physical_freeend;
    300       1.1      matt extern unsigned int free_pages;
    301       1.1      matt extern int max_processes;
    302       1.1      matt 
    303  1.14.2.7  jdolecek vaddr_t virtual_avail;
    304       1.1      matt vaddr_t virtual_end;
    305  1.14.2.6  jdolecek vaddr_t pmap_curmaxkvaddr;
    306       1.1      matt 
    307       1.1      matt vaddr_t avail_start;
    308       1.1      matt vaddr_t avail_end;
    309       1.1      matt 
    310       1.1      matt extern pv_addr_t systempage;
    311       1.1      matt 
    312       1.1      matt /* Variables used by the L1 page table queue code */
    313       1.1      matt SIMPLEQ_HEAD(l1pt_queue, l1pt);
    314  1.14.2.7  jdolecek static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
    315  1.14.2.7  jdolecek static int l1pt_static_queue_count;	    /* items in the static l1 queue */
    316  1.14.2.7  jdolecek static int l1pt_static_create_count;	    /* static l1 items created */
    317  1.14.2.7  jdolecek static struct l1pt_queue l1pt_queue;	    /* head of our l1 queue */
    318  1.14.2.7  jdolecek static int l1pt_queue_count;		    /* items in the l1 queue */
    319  1.14.2.7  jdolecek static int l1pt_create_count;		    /* stat - L1's create count */
    320  1.14.2.7  jdolecek static int l1pt_reuse_count;		    /* stat - L1's reused count */
    321       1.1      matt 
    322       1.1      matt /* Local function prototypes (not used outside this file) */
    323  1.14.2.1     lukem void pmap_pinit __P((struct pmap *));
    324  1.14.2.1     lukem void pmap_freepagedir __P((struct pmap *));
    325       1.1      matt 
    326       1.1      matt /* Other function prototypes */
    327       1.1      matt extern void bzero_page __P((vaddr_t));
    328       1.1      matt extern void bcopy_page __P((vaddr_t, vaddr_t));
    329       1.1      matt 
    330       1.1      matt struct l1pt *pmap_alloc_l1pt __P((void));
    331  1.14.2.1     lukem static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    332  1.14.2.2   thorpej      vaddr_t l2pa, boolean_t));
    333       1.1      matt 
    334      1.11     chris static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    335  1.14.2.2   thorpej static void pmap_unmap_ptes __P((struct pmap *));
    336      1.11     chris 
    337  1.14.2.6  jdolecek __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
    338  1.14.2.4   thorpej     pt_entry_t *, boolean_t));
    339  1.14.2.6  jdolecek static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
    340  1.14.2.4   thorpej     pt_entry_t *, boolean_t));
    341  1.14.2.6  jdolecek static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
    342  1.14.2.4   thorpej     pt_entry_t *, boolean_t));
    343  1.14.2.4   thorpej 
    344  1.14.2.4   thorpej /*
    345  1.14.2.2   thorpej  * real definition of pv_entry.
    346  1.14.2.2   thorpej  */
    347  1.14.2.2   thorpej 
    348  1.14.2.2   thorpej struct pv_entry {
    349  1.14.2.2   thorpej 	struct pv_entry *pv_next;       /* next pv_entry */
    350  1.14.2.2   thorpej 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
    351  1.14.2.2   thorpej 	vaddr_t         pv_va;          /* virtual address for mapping */
    352  1.14.2.2   thorpej 	int             pv_flags;       /* flags */
    353  1.14.2.2   thorpej 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
    354  1.14.2.2   thorpej };
    355  1.14.2.2   thorpej 
    356  1.14.2.2   thorpej /*
    357  1.14.2.2   thorpej  * pv_entrys are dynamically allocated in chunks from a single page.
    358  1.14.2.2   thorpej  * we keep track of how many pv_entrys are in use for each page and
    359  1.14.2.2   thorpej  * we can free pv_entry pages if needed.  there is one lock for the
    360  1.14.2.2   thorpej  * entire allocation system.
    361  1.14.2.2   thorpej  */
    362  1.14.2.2   thorpej 
    363  1.14.2.2   thorpej struct pv_page_info {
    364  1.14.2.2   thorpej 	TAILQ_ENTRY(pv_page) pvpi_list;
    365  1.14.2.2   thorpej 	struct pv_entry *pvpi_pvfree;
    366  1.14.2.2   thorpej 	int pvpi_nfree;
    367  1.14.2.2   thorpej };
    368  1.14.2.2   thorpej 
    369  1.14.2.2   thorpej /*
    370  1.14.2.2   thorpej  * number of pv_entry's in a pv_page
    371  1.14.2.2   thorpej  * (note: won't work on systems where NPBG isn't a constant)
    372  1.14.2.2   thorpej  */
    373  1.14.2.2   thorpej 
    374  1.14.2.2   thorpej #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    375  1.14.2.2   thorpej 			sizeof(struct pv_entry))
    376  1.14.2.2   thorpej 
    377  1.14.2.2   thorpej /*
    378  1.14.2.2   thorpej  * a pv_page: where pv_entrys are allocated from
    379  1.14.2.2   thorpej  */
    380  1.14.2.2   thorpej 
    381  1.14.2.2   thorpej struct pv_page {
    382  1.14.2.2   thorpej 	struct pv_page_info pvinfo;
    383  1.14.2.2   thorpej 	struct pv_entry pvents[PVE_PER_PVPAGE];
    384  1.14.2.2   thorpej };
    385  1.14.2.2   thorpej 
    386       1.1      matt #ifdef MYCROFT_HACK
    387       1.1      matt int mycroft_hack = 0;
    388       1.1      matt #endif
    389       1.1      matt 
    390       1.1      matt /* Function to set the debug level of the pmap code */
    391       1.1      matt 
    392       1.1      matt #ifdef PMAP_DEBUG
    393       1.1      matt void
    394  1.14.2.7  jdolecek pmap_debug(int level)
    395       1.1      matt {
    396       1.1      matt 	pmap_debug_level = level;
    397       1.1      matt 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    398       1.1      matt }
    399       1.1      matt #endif	/* PMAP_DEBUG */
    400       1.1      matt 
    401  1.14.2.4   thorpej __inline static boolean_t
    402  1.14.2.2   thorpej pmap_is_curpmap(struct pmap *pmap)
    403  1.14.2.2   thorpej {
    404  1.14.2.7  jdolecek 
    405  1.14.2.7  jdolecek 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
    406  1.14.2.7  jdolecek 	    pmap == pmap_kernel())
    407  1.14.2.7  jdolecek 		return (TRUE);
    408  1.14.2.7  jdolecek 
    409  1.14.2.7  jdolecek 	return (FALSE);
    410  1.14.2.2   thorpej }
    411  1.14.2.7  jdolecek 
    412       1.1      matt #include "isadma.h"
    413       1.1      matt 
    414       1.1      matt #if NISADMA > 0
    415       1.1      matt /*
    416       1.1      matt  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
    417       1.1      matt  * pages into the system, memory intersects with any of these ranges,
    418       1.1      matt  * the intersecting memory will be loaded into a lower-priority free list.
    419       1.1      matt  */
    420       1.1      matt bus_dma_segment_t *pmap_isa_dma_ranges;
    421       1.1      matt int pmap_isa_dma_nranges;
    422       1.1      matt 
    423       1.1      matt /*
    424       1.1      matt  * Check if a memory range intersects with an ISA DMA range, and
    425       1.1      matt  * return the page-rounded intersection if it does.  The intersection
    426       1.1      matt  * will be placed on a lower-priority free list.
    427       1.1      matt  */
    428  1.14.2.7  jdolecek static boolean_t
    429  1.14.2.7  jdolecek pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap,
    430  1.14.2.7  jdolecek     psize_t *sizep)
    431       1.1      matt {
    432       1.1      matt 	bus_dma_segment_t *ds;
    433       1.1      matt 	int i;
    434       1.1      matt 
    435       1.1      matt 	if (pmap_isa_dma_ranges == NULL)
    436       1.1      matt 		return (FALSE);
    437       1.1      matt 
    438       1.1      matt 	for (i = 0, ds = pmap_isa_dma_ranges;
    439       1.1      matt 	     i < pmap_isa_dma_nranges; i++, ds++) {
    440       1.1      matt 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
    441       1.1      matt 			/*
    442       1.1      matt 			 * Beginning of region intersects with this range.
    443       1.1      matt 			 */
    444       1.1      matt 			*pap = trunc_page(pa);
    445       1.1      matt 			*sizep = round_page(min(pa + size,
    446       1.1      matt 			    ds->ds_addr + ds->ds_len) - pa);
    447       1.1      matt 			return (TRUE);
    448       1.1      matt 		}
    449       1.1      matt 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
    450       1.1      matt 			/*
    451       1.1      matt 			 * End of region intersects with this range.
    452       1.1      matt 			 */
    453       1.1      matt 			*pap = trunc_page(ds->ds_addr);
    454       1.1      matt 			*sizep = round_page(min((pa + size) - ds->ds_addr,
    455       1.1      matt 			    ds->ds_len));
    456       1.1      matt 			return (TRUE);
    457       1.1      matt 		}
    458       1.1      matt 	}
    459       1.1      matt 
    460       1.1      matt 	/*
    461       1.1      matt 	 * No intersection found.
    462       1.1      matt 	 */
    463       1.1      matt 	return (FALSE);
    464       1.1      matt }
    465       1.1      matt #endif /* NISADMA > 0 */
    466       1.1      matt 
    467       1.1      matt /*
    468  1.14.2.2   thorpej  * p v _ e n t r y   f u n c t i o n s
    469  1.14.2.2   thorpej  */
    470  1.14.2.2   thorpej 
    471  1.14.2.2   thorpej /*
    472  1.14.2.2   thorpej  * pv_entry allocation functions:
    473  1.14.2.2   thorpej  *   the main pv_entry allocation functions are:
    474  1.14.2.2   thorpej  *     pmap_alloc_pv: allocate a pv_entry structure
    475  1.14.2.2   thorpej  *     pmap_free_pv: free one pv_entry
    476  1.14.2.2   thorpej  *     pmap_free_pvs: free a list of pv_entrys
    477  1.14.2.2   thorpej  *
    478  1.14.2.2   thorpej  * the rest are helper functions
    479       1.1      matt  */
    480       1.1      matt 
    481       1.1      matt /*
    482  1.14.2.2   thorpej  * pmap_alloc_pv: inline function to allocate a pv_entry structure
    483  1.14.2.2   thorpej  * => we lock pvalloc_lock
    484  1.14.2.2   thorpej  * => if we fail, we call out to pmap_alloc_pvpage
    485  1.14.2.2   thorpej  * => 3 modes:
    486  1.14.2.2   thorpej  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
    487  1.14.2.2   thorpej  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
    488  1.14.2.2   thorpej  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
    489  1.14.2.2   thorpej  *			one now
    490  1.14.2.2   thorpej  *
    491  1.14.2.2   thorpej  * "try" is for optional functions like pmap_copy().
    492       1.1      matt  */
    493  1.14.2.2   thorpej 
    494  1.14.2.2   thorpej __inline static struct pv_entry *
    495  1.14.2.7  jdolecek pmap_alloc_pv(struct pmap *pmap, int mode)
    496       1.1      matt {
    497  1.14.2.2   thorpej 	struct pv_page *pvpage;
    498       1.1      matt 	struct pv_entry *pv;
    499       1.1      matt 
    500  1.14.2.2   thorpej 	simple_lock(&pvalloc_lock);
    501       1.1      matt 
    502  1.14.2.6  jdolecek 	pvpage = TAILQ_FIRST(&pv_freepages);
    503  1.14.2.6  jdolecek 
    504  1.14.2.6  jdolecek 	if (pvpage != NULL) {
    505  1.14.2.2   thorpej 		pvpage->pvinfo.pvpi_nfree--;
    506  1.14.2.2   thorpej 		if (pvpage->pvinfo.pvpi_nfree == 0) {
    507  1.14.2.2   thorpej 			/* nothing left in this one? */
    508  1.14.2.2   thorpej 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
    509       1.1      matt 		}
    510  1.14.2.2   thorpej 		pv = pvpage->pvinfo.pvpi_pvfree;
    511  1.14.2.6  jdolecek 		KASSERT(pv);
    512  1.14.2.2   thorpej 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    513  1.14.2.2   thorpej 		pv_nfpvents--;  /* took one from pool */
    514  1.14.2.2   thorpej 	} else {
    515  1.14.2.2   thorpej 		pv = NULL;		/* need more of them */
    516       1.1      matt 	}
    517  1.14.2.2   thorpej 
    518  1.14.2.2   thorpej 	/*
    519  1.14.2.2   thorpej 	 * if below low water mark or we didn't get a pv_entry we try and
    520  1.14.2.2   thorpej 	 * create more pv_entrys ...
    521  1.14.2.2   thorpej 	 */
    522  1.14.2.2   thorpej 
    523  1.14.2.2   thorpej 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
    524  1.14.2.2   thorpej 		if (pv == NULL)
    525  1.14.2.2   thorpej 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
    526  1.14.2.2   thorpej 					       mode : ALLOCPV_NEED);
    527  1.14.2.2   thorpej 		else
    528  1.14.2.2   thorpej 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
    529  1.14.2.2   thorpej 	}
    530  1.14.2.2   thorpej 
    531  1.14.2.2   thorpej 	simple_unlock(&pvalloc_lock);
    532  1.14.2.2   thorpej 	return(pv);
    533       1.1      matt }
    534       1.1      matt 
    535       1.1      matt /*
    536  1.14.2.2   thorpej  * pmap_alloc_pvpage: maybe allocate a new pvpage
    537  1.14.2.2   thorpej  *
    538  1.14.2.2   thorpej  * if need_entry is false: try and allocate a new pv_page
    539  1.14.2.2   thorpej  * if need_entry is true: try and allocate a new pv_page and return a
    540  1.14.2.2   thorpej  *	new pv_entry from it.   if we are unable to allocate a pv_page
    541  1.14.2.2   thorpej  *	we make a last ditch effort to steal a pv_page from some other
    542  1.14.2.2   thorpej  *	mapping.    if that fails, we panic...
    543  1.14.2.2   thorpej  *
    544  1.14.2.2   thorpej  * => we assume that the caller holds pvalloc_lock
    545       1.1      matt  */
    546       1.1      matt 
    547  1.14.2.2   thorpej static struct pv_entry *
    548  1.14.2.7  jdolecek pmap_alloc_pvpage(struct pmap *pmap, int mode)
    549       1.1      matt {
    550  1.14.2.2   thorpej 	struct vm_page *pg;
    551  1.14.2.2   thorpej 	struct pv_page *pvpage;
    552  1.14.2.2   thorpej 	struct pv_entry *pv;
    553  1.14.2.2   thorpej 	int s;
    554       1.1      matt 
    555  1.14.2.2   thorpej 	/*
    556  1.14.2.2   thorpej 	 * if we need_entry and we've got unused pv_pages, allocate from there
    557  1.14.2.2   thorpej 	 */
    558  1.14.2.2   thorpej 
    559  1.14.2.6  jdolecek 	pvpage = TAILQ_FIRST(&pv_unusedpgs);
    560  1.14.2.6  jdolecek 	if (mode != ALLOCPV_NONEED && pvpage != NULL) {
    561  1.14.2.2   thorpej 
    562  1.14.2.2   thorpej 		/* move it to pv_freepages list */
    563  1.14.2.2   thorpej 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
    564  1.14.2.2   thorpej 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
    565  1.14.2.2   thorpej 
    566  1.14.2.2   thorpej 		/* allocate a pv_entry */
    567  1.14.2.2   thorpej 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
    568  1.14.2.2   thorpej 		pv = pvpage->pvinfo.pvpi_pvfree;
    569  1.14.2.6  jdolecek 		KASSERT(pv);
    570  1.14.2.2   thorpej 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    571  1.14.2.2   thorpej 
    572  1.14.2.2   thorpej 		pv_nfpvents--;  /* took one from pool */
    573  1.14.2.2   thorpej 		return(pv);
    574  1.14.2.2   thorpej 	}
    575  1.14.2.2   thorpej 
    576  1.14.2.2   thorpej 	/*
    577  1.14.2.2   thorpej 	 *  see if we've got a cached unmapped VA that we can map a page in.
    578  1.14.2.2   thorpej 	 * if not, try to allocate one.
    579  1.14.2.2   thorpej 	 */
    580  1.14.2.2   thorpej 
    581  1.14.2.4   thorpej 
    582  1.14.2.2   thorpej 	if (pv_cachedva == 0) {
    583  1.14.2.4   thorpej 		s = splvm();
    584  1.14.2.4   thorpej 		pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
    585  1.14.2.2   thorpej 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
    586  1.14.2.4   thorpej 		splx(s);
    587  1.14.2.2   thorpej 		if (pv_cachedva == 0) {
    588  1.14.2.2   thorpej 			return (NULL);
    589  1.14.2.2   thorpej 		}
    590  1.14.2.2   thorpej 	}
    591  1.14.2.2   thorpej 
    592  1.14.2.4   thorpej 	pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
    593  1.14.2.4   thorpej 	    UVM_PGA_USERESERVE);
    594  1.14.2.2   thorpej 
    595  1.14.2.2   thorpej 	if (pg == NULL)
    596  1.14.2.2   thorpej 		return (NULL);
    597  1.14.2.6  jdolecek 	pg->flags &= ~PG_BUSY;	/* never busy */
    598  1.14.2.2   thorpej 
    599  1.14.2.2   thorpej 	/*
    600  1.14.2.2   thorpej 	 * add a mapping for our new pv_page and free its entrys (save one!)
    601  1.14.2.2   thorpej 	 *
    602  1.14.2.2   thorpej 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
    603  1.14.2.2   thorpej 	 * pmap is already locked!  (...but entering the mapping is safe...)
    604  1.14.2.2   thorpej 	 */
    605  1.14.2.2   thorpej 
    606  1.14.2.6  jdolecek 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
    607  1.14.2.6  jdolecek 		VM_PROT_READ|VM_PROT_WRITE);
    608  1.14.2.3   thorpej 	pmap_update(pmap_kernel());
    609  1.14.2.2   thorpej 	pvpage = (struct pv_page *) pv_cachedva;
    610  1.14.2.2   thorpej 	pv_cachedva = 0;
    611  1.14.2.2   thorpej 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
    612       1.1      matt }
    613       1.1      matt 
    614  1.14.2.2   thorpej /*
    615  1.14.2.2   thorpej  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
    616  1.14.2.2   thorpej  *
    617  1.14.2.2   thorpej  * => caller must hold pvalloc_lock
    618  1.14.2.2   thorpej  * => if need_entry is true, we allocate and return one pv_entry
    619  1.14.2.2   thorpej  */
    620  1.14.2.2   thorpej 
    621  1.14.2.2   thorpej static struct pv_entry *
    622  1.14.2.7  jdolecek pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
    623       1.1      matt {
    624  1.14.2.2   thorpej 	int tofree, lcv;
    625       1.1      matt 
    626  1.14.2.2   thorpej 	/* do we need to return one? */
    627  1.14.2.2   thorpej 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
    628       1.1      matt 
    629  1.14.2.2   thorpej 	pvp->pvinfo.pvpi_pvfree = NULL;
    630  1.14.2.2   thorpej 	pvp->pvinfo.pvpi_nfree = tofree;
    631  1.14.2.2   thorpej 	for (lcv = 0 ; lcv < tofree ; lcv++) {
    632  1.14.2.2   thorpej 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
    633  1.14.2.2   thorpej 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
    634       1.1      matt 	}
    635  1.14.2.2   thorpej 	if (need_entry)
    636  1.14.2.2   thorpej 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
    637  1.14.2.2   thorpej 	else
    638  1.14.2.2   thorpej 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    639  1.14.2.2   thorpej 	pv_nfpvents += tofree;
    640  1.14.2.2   thorpej 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
    641  1.14.2.2   thorpej }
    642       1.1      matt 
    643  1.14.2.2   thorpej /*
    644  1.14.2.2   thorpej  * pmap_free_pv_doit: actually free a pv_entry
    645  1.14.2.2   thorpej  *
    646  1.14.2.2   thorpej  * => do not call this directly!  instead use either
    647  1.14.2.2   thorpej  *    1. pmap_free_pv ==> free a single pv_entry
    648  1.14.2.2   thorpej  *    2. pmap_free_pvs => free a list of pv_entrys
    649  1.14.2.2   thorpej  * => we must be holding pvalloc_lock
    650  1.14.2.2   thorpej  */
    651       1.1      matt 
    652  1.14.2.2   thorpej __inline static void
    653  1.14.2.7  jdolecek pmap_free_pv_doit(struct pv_entry *pv)
    654  1.14.2.2   thorpej {
    655  1.14.2.2   thorpej 	struct pv_page *pvp;
    656  1.14.2.2   thorpej 
    657  1.14.2.2   thorpej 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
    658  1.14.2.2   thorpej 	pv_nfpvents++;
    659  1.14.2.2   thorpej 	pvp->pvinfo.pvpi_nfree++;
    660  1.14.2.2   thorpej 
    661  1.14.2.2   thorpej 	/* nfree == 1 => fully allocated page just became partly allocated */
    662  1.14.2.2   thorpej 	if (pvp->pvinfo.pvpi_nfree == 1) {
    663  1.14.2.2   thorpej 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
    664       1.1      matt 	}
    665       1.1      matt 
    666  1.14.2.2   thorpej 	/* free it */
    667  1.14.2.2   thorpej 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
    668  1.14.2.2   thorpej 	pvp->pvinfo.pvpi_pvfree = pv;
    669  1.14.2.2   thorpej 
    670  1.14.2.2   thorpej 	/*
    671  1.14.2.2   thorpej 	 * are all pv_page's pv_entry's free?  move it to unused queue.
    672  1.14.2.2   thorpej 	 */
    673  1.14.2.2   thorpej 
    674  1.14.2.2   thorpej 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
    675  1.14.2.2   thorpej 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
    676  1.14.2.2   thorpej 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    677       1.1      matt 	}
    678       1.1      matt }
    679       1.1      matt 
    680       1.1      matt /*
    681  1.14.2.2   thorpej  * pmap_free_pv: free a single pv_entry
    682  1.14.2.2   thorpej  *
    683  1.14.2.2   thorpej  * => we gain the pvalloc_lock
    684       1.1      matt  */
    685       1.1      matt 
    686  1.14.2.2   thorpej __inline static void
    687  1.14.2.7  jdolecek pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
    688       1.1      matt {
    689  1.14.2.2   thorpej 	simple_lock(&pvalloc_lock);
    690  1.14.2.2   thorpej 	pmap_free_pv_doit(pv);
    691       1.1      matt 
    692  1.14.2.2   thorpej 	/*
    693  1.14.2.2   thorpej 	 * Can't free the PV page if the PV entries were associated with
    694  1.14.2.2   thorpej 	 * the kernel pmap; the pmap is already locked.
    695  1.14.2.2   thorpej 	 */
    696  1.14.2.6  jdolecek 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    697  1.14.2.2   thorpej 	    pmap != pmap_kernel())
    698  1.14.2.2   thorpej 		pmap_free_pvpage();
    699       1.1      matt 
    700  1.14.2.2   thorpej 	simple_unlock(&pvalloc_lock);
    701  1.14.2.2   thorpej }
    702       1.1      matt 
    703  1.14.2.2   thorpej /*
    704  1.14.2.2   thorpej  * pmap_free_pvs: free a list of pv_entrys
    705  1.14.2.2   thorpej  *
    706  1.14.2.2   thorpej  * => we gain the pvalloc_lock
    707  1.14.2.2   thorpej  */
    708       1.1      matt 
    709  1.14.2.2   thorpej __inline static void
    710  1.14.2.7  jdolecek pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
    711  1.14.2.2   thorpej {
    712  1.14.2.2   thorpej 	struct pv_entry *nextpv;
    713  1.14.2.2   thorpej 
    714  1.14.2.2   thorpej 	simple_lock(&pvalloc_lock);
    715  1.14.2.2   thorpej 
    716  1.14.2.2   thorpej 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
    717  1.14.2.2   thorpej 		nextpv = pvs->pv_next;
    718  1.14.2.2   thorpej 		pmap_free_pv_doit(pvs);
    719       1.1      matt 	}
    720       1.1      matt 
    721  1.14.2.2   thorpej 	/*
    722  1.14.2.2   thorpej 	 * Can't free the PV page if the PV entries were associated with
    723  1.14.2.2   thorpej 	 * the kernel pmap; the pmap is already locked.
    724  1.14.2.2   thorpej 	 */
    725  1.14.2.6  jdolecek 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    726  1.14.2.2   thorpej 	    pmap != pmap_kernel())
    727  1.14.2.2   thorpej 		pmap_free_pvpage();
    728       1.1      matt 
    729  1.14.2.2   thorpej 	simple_unlock(&pvalloc_lock);
    730       1.1      matt }
    731       1.1      matt 
    732       1.1      matt 
    733       1.1      matt /*
    734  1.14.2.2   thorpej  * pmap_free_pvpage: try and free an unused pv_page structure
    735  1.14.2.2   thorpej  *
    736  1.14.2.2   thorpej  * => assume caller is holding the pvalloc_lock and that
    737  1.14.2.2   thorpej  *	there is a page on the pv_unusedpgs list
    738  1.14.2.2   thorpej  * => if we can't get a lock on the kmem_map we try again later
    739       1.1      matt  */
    740       1.1      matt 
    741  1.14.2.2   thorpej static void
    742  1.14.2.7  jdolecek pmap_free_pvpage(void)
    743       1.1      matt {
    744  1.14.2.2   thorpej 	int s;
    745  1.14.2.2   thorpej 	struct vm_map *map;
    746  1.14.2.2   thorpej 	struct vm_map_entry *dead_entries;
    747  1.14.2.2   thorpej 	struct pv_page *pvp;
    748       1.1      matt 
    749  1.14.2.2   thorpej 	s = splvm(); /* protect kmem_map */
    750  1.14.2.2   thorpej 
    751  1.14.2.6  jdolecek 	pvp = TAILQ_FIRST(&pv_unusedpgs);
    752       1.1      matt 
    753       1.1      matt 	/*
    754  1.14.2.2   thorpej 	 * note: watch out for pv_initpage which is allocated out of
    755  1.14.2.2   thorpej 	 * kernel_map rather than kmem_map.
    756       1.1      matt 	 */
    757  1.14.2.2   thorpej 	if (pvp == pv_initpage)
    758  1.14.2.2   thorpej 		map = kernel_map;
    759  1.14.2.2   thorpej 	else
    760  1.14.2.2   thorpej 		map = kmem_map;
    761  1.14.2.2   thorpej 	if (vm_map_lock_try(map)) {
    762  1.14.2.2   thorpej 
    763  1.14.2.2   thorpej 		/* remove pvp from pv_unusedpgs */
    764  1.14.2.2   thorpej 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    765  1.14.2.2   thorpej 
    766  1.14.2.2   thorpej 		/* unmap the page */
    767  1.14.2.2   thorpej 		dead_entries = NULL;
    768  1.14.2.2   thorpej 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
    769  1.14.2.2   thorpej 		    &dead_entries);
    770  1.14.2.2   thorpej 		vm_map_unlock(map);
    771  1.14.2.2   thorpej 
    772  1.14.2.2   thorpej 		if (dead_entries != NULL)
    773  1.14.2.2   thorpej 			uvm_unmap_detach(dead_entries, 0);
    774  1.14.2.2   thorpej 
    775  1.14.2.2   thorpej 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
    776       1.1      matt 	}
    777  1.14.2.2   thorpej 	if (pvp == pv_initpage)
    778  1.14.2.2   thorpej 		/* no more initpage, we've freed it */
    779  1.14.2.2   thorpej 		pv_initpage = NULL;
    780       1.1      matt 
    781       1.1      matt 	splx(s);
    782       1.1      matt }
    783       1.1      matt 
    784       1.1      matt /*
    785  1.14.2.2   thorpej  * main pv_entry manipulation functions:
    786  1.14.2.6  jdolecek  *   pmap_enter_pv: enter a mapping onto a vm_page list
    787  1.14.2.6  jdolecek  *   pmap_remove_pv: remove a mappiing from a vm_page list
    788  1.14.2.2   thorpej  *
    789  1.14.2.2   thorpej  * NOTE: pmap_enter_pv expects to lock the pvh itself
    790  1.14.2.2   thorpej  *       pmap_remove_pv expects te caller to lock the pvh before calling
    791  1.14.2.2   thorpej  */
    792  1.14.2.2   thorpej 
    793  1.14.2.2   thorpej /*
    794  1.14.2.6  jdolecek  * pmap_enter_pv: enter a mapping onto a vm_page lst
    795  1.14.2.2   thorpej  *
    796  1.14.2.2   thorpej  * => caller should hold the proper lock on pmap_main_lock
    797  1.14.2.2   thorpej  * => caller should have pmap locked
    798  1.14.2.6  jdolecek  * => we will gain the lock on the vm_page and allocate the new pv_entry
    799  1.14.2.2   thorpej  * => caller should adjust ptp's wire_count before calling
    800  1.14.2.2   thorpej  * => caller should not adjust pmap's wire_count
    801  1.14.2.2   thorpej  */
    802  1.14.2.2   thorpej 
    803  1.14.2.2   thorpej __inline static void
    804  1.14.2.7  jdolecek pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
    805  1.14.2.7  jdolecek     vaddr_t va, struct vm_page *ptp, int flags)
    806  1.14.2.2   thorpej {
    807  1.14.2.2   thorpej 	pve->pv_pmap = pmap;
    808  1.14.2.2   thorpej 	pve->pv_va = va;
    809  1.14.2.2   thorpej 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
    810  1.14.2.2   thorpej 	pve->pv_flags = flags;
    811  1.14.2.6  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);	/* lock vm_page */
    812  1.14.2.6  jdolecek 	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
    813  1.14.2.6  jdolecek 	pg->mdpage.pvh_list = pve;		/* ... locked list */
    814  1.14.2.6  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
    815  1.14.2.7  jdolecek 	if (pve->pv_flags & PVF_WIRED)
    816  1.14.2.2   thorpej 		++pmap->pm_stats.wired_count;
    817  1.14.2.2   thorpej }
    818  1.14.2.2   thorpej 
    819  1.14.2.2   thorpej /*
    820  1.14.2.2   thorpej  * pmap_remove_pv: try to remove a mapping from a pv_list
    821  1.14.2.2   thorpej  *
    822  1.14.2.2   thorpej  * => caller should hold proper lock on pmap_main_lock
    823  1.14.2.2   thorpej  * => pmap should be locked
    824  1.14.2.6  jdolecek  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    825  1.14.2.2   thorpej  * => caller should adjust ptp's wire_count and free PTP if needed
    826  1.14.2.2   thorpej  * => caller should NOT adjust pmap's wire_count
    827  1.14.2.2   thorpej  * => we return the removed pve
    828  1.14.2.2   thorpej  */
    829  1.14.2.2   thorpej 
    830  1.14.2.2   thorpej __inline static struct pv_entry *
    831  1.14.2.7  jdolecek pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
    832  1.14.2.2   thorpej {
    833  1.14.2.2   thorpej 	struct pv_entry *pve, **prevptr;
    834  1.14.2.2   thorpej 
    835  1.14.2.6  jdolecek 	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
    836  1.14.2.2   thorpej 	pve = *prevptr;
    837  1.14.2.2   thorpej 	while (pve) {
    838  1.14.2.2   thorpej 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
    839  1.14.2.2   thorpej 			*prevptr = pve->pv_next;		/* remove it! */
    840  1.14.2.7  jdolecek 			if (pve->pv_flags & PVF_WIRED)
    841  1.14.2.2   thorpej 			    --pmap->pm_stats.wired_count;
    842  1.14.2.2   thorpej 			break;
    843  1.14.2.2   thorpej 		}
    844  1.14.2.2   thorpej 		prevptr = &pve->pv_next;		/* previous pointer */
    845  1.14.2.2   thorpej 		pve = pve->pv_next;			/* advance */
    846  1.14.2.2   thorpej 	}
    847  1.14.2.2   thorpej 	return(pve);				/* return removed pve */
    848  1.14.2.2   thorpej }
    849  1.14.2.2   thorpej 
    850  1.14.2.2   thorpej /*
    851  1.14.2.2   thorpej  *
    852  1.14.2.2   thorpej  * pmap_modify_pv: Update pv flags
    853  1.14.2.2   thorpej  *
    854  1.14.2.6  jdolecek  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    855  1.14.2.2   thorpej  * => caller should NOT adjust pmap's wire_count
    856  1.14.2.4   thorpej  * => caller must call pmap_vac_me_harder() if writable status of a page
    857  1.14.2.4   thorpej  *    may have changed.
    858  1.14.2.2   thorpej  * => we return the old flags
    859  1.14.2.2   thorpej  *
    860       1.1      matt  * Modify a physical-virtual mapping in the pv table
    861       1.1      matt  */
    862       1.1      matt 
    863  1.14.2.7  jdolecek static /* __inline */ u_int
    864  1.14.2.7  jdolecek pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
    865  1.14.2.7  jdolecek     u_int bic_mask, u_int eor_mask)
    866       1.1      matt {
    867       1.1      matt 	struct pv_entry *npv;
    868       1.1      matt 	u_int flags, oflags;
    869       1.1      matt 
    870       1.1      matt 	/*
    871       1.1      matt 	 * There is at least one VA mapping this page.
    872       1.1      matt 	 */
    873       1.1      matt 
    874  1.14.2.6  jdolecek 	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
    875       1.1      matt 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    876       1.1      matt 			oflags = npv->pv_flags;
    877       1.1      matt 			npv->pv_flags = flags =
    878       1.1      matt 			    ((oflags & ~bic_mask) ^ eor_mask);
    879  1.14.2.7  jdolecek 			if ((flags ^ oflags) & PVF_WIRED) {
    880  1.14.2.7  jdolecek 				if (flags & PVF_WIRED)
    881       1.1      matt 					++pmap->pm_stats.wired_count;
    882       1.1      matt 				else
    883       1.1      matt 					--pmap->pm_stats.wired_count;
    884       1.1      matt 			}
    885       1.1      matt 			return (oflags);
    886       1.1      matt 		}
    887       1.1      matt 	}
    888       1.1      matt 	return (0);
    889       1.1      matt }
    890       1.1      matt 
    891       1.1      matt /*
    892       1.1      matt  * Map the specified level 2 pagetable into the level 1 page table for
    893       1.1      matt  * the given pmap to cover a chunk of virtual address space starting from the
    894       1.1      matt  * address specified.
    895       1.1      matt  */
    896  1.14.2.7  jdolecek static __inline void
    897  1.14.2.7  jdolecek pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
    898       1.1      matt {
    899       1.1      matt 	vaddr_t ptva;
    900       1.1      matt 
    901       1.1      matt 	/* Calculate the index into the L1 page table. */
    902  1.14.2.7  jdolecek 	ptva = (va >> L1_S_SHIFT) & ~3;
    903       1.1      matt 
    904  1.14.2.6  jdolecek 	NPDEBUG(PDB_MAP_L1, printf("wiring %08lx in to pd%p pte0x%lx va0x%lx\n", l2pa,
    905       1.1      matt 	    pmap->pm_pdir, L1_PTE(l2pa), ptva));
    906       1.1      matt 
    907       1.1      matt 	/* Map page table into the L1. */
    908  1.14.2.7  jdolecek 	pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
    909  1.14.2.7  jdolecek 	pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
    910  1.14.2.7  jdolecek 	pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
    911  1.14.2.7  jdolecek 	pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
    912       1.1      matt 
    913       1.1      matt 	/* Map the page table into the page table area. */
    914  1.14.2.7  jdolecek 	if (selfref)
    915  1.14.2.7  jdolecek 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
    916  1.14.2.7  jdolecek 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
    917       1.1      matt }
    918       1.1      matt 
    919       1.1      matt #if 0
    920  1.14.2.7  jdolecek static __inline void
    921  1.14.2.7  jdolecek pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
    922       1.1      matt {
    923       1.1      matt 	vaddr_t ptva;
    924       1.1      matt 
    925       1.1      matt 	/* Calculate the index into the L1 page table. */
    926  1.14.2.7  jdolecek 	ptva = (va >> L1_S_SHIFT) & ~3;
    927       1.1      matt 
    928       1.1      matt 	/* Unmap page table from the L1. */
    929       1.1      matt 	pmap->pm_pdir[ptva + 0] = 0;
    930       1.1      matt 	pmap->pm_pdir[ptva + 1] = 0;
    931       1.1      matt 	pmap->pm_pdir[ptva + 2] = 0;
    932       1.1      matt 	pmap->pm_pdir[ptva + 3] = 0;
    933       1.1      matt 
    934       1.1      matt 	/* Unmap the page table from the page table area. */
    935       1.1      matt 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
    936       1.1      matt }
    937       1.1      matt #endif
    938       1.1      matt 
    939       1.1      matt /*
    940       1.1      matt  *	Used to map a range of physical addresses into kernel
    941       1.1      matt  *	virtual address space.
    942       1.1      matt  *
    943       1.1      matt  *	For now, VM is already on, we only need to map the
    944       1.1      matt  *	specified memory.
    945       1.1      matt  */
    946       1.1      matt vaddr_t
    947  1.14.2.7  jdolecek pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
    948       1.1      matt {
    949       1.1      matt 	while (spa < epa) {
    950  1.14.2.3   thorpej 		pmap_kenter_pa(va, spa, prot);
    951       1.1      matt 		va += NBPG;
    952       1.1      matt 		spa += NBPG;
    953       1.1      matt 	}
    954  1.14.2.3   thorpej 	pmap_update(pmap_kernel());
    955       1.1      matt 	return(va);
    956       1.1      matt }
    957       1.1      matt 
    958       1.1      matt 
    959       1.1      matt /*
    960       1.3      matt  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    961       1.1      matt  *
    962       1.1      matt  * bootstrap the pmap system. This is called from initarm and allows
    963       1.1      matt  * the pmap system to initailise any structures it requires.
    964       1.1      matt  *
    965       1.1      matt  * Currently this sets up the kernel_pmap that is statically allocated
    966       1.1      matt  * and also allocated virtual addresses for certain page hooks.
    967       1.1      matt  * Currently the only one page hook is allocated that is used
    968       1.1      matt  * to zero physical pages of memory.
    969       1.1      matt  * It also initialises the start and end address of the kernel data space.
    970       1.1      matt  */
    971       1.2      matt extern paddr_t physical_freestart;
    972       1.2      matt extern paddr_t physical_freeend;
    973       1.1      matt 
    974  1.14.2.2   thorpej char *boot_head;
    975       1.1      matt 
    976       1.1      matt void
    977  1.14.2.7  jdolecek pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
    978       1.1      matt {
    979  1.14.2.7  jdolecek 	pt_entry_t *pte;
    980       1.1      matt 	int loop;
    981       1.2      matt 	paddr_t start, end;
    982       1.1      matt #if NISADMA > 0
    983       1.2      matt 	paddr_t istart;
    984       1.2      matt 	psize_t isize;
    985       1.1      matt #endif
    986       1.1      matt 
    987  1.14.2.1     lukem 	pmap_kernel()->pm_pdir = kernel_l1pt;
    988  1.14.2.1     lukem 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
    989  1.14.2.1     lukem 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
    990  1.14.2.1     lukem 	simple_lock_init(&pmap_kernel()->pm_lock);
    991  1.14.2.1     lukem 	pmap_kernel()->pm_obj.pgops = NULL;
    992  1.14.2.1     lukem 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
    993  1.14.2.1     lukem 	pmap_kernel()->pm_obj.uo_npages = 0;
    994  1.14.2.1     lukem 	pmap_kernel()->pm_obj.uo_refs = 1;
    995  1.14.2.1     lukem 
    996       1.1      matt 	/*
    997       1.1      matt 	 * Initialize PAGE_SIZE-dependent variables.
    998       1.1      matt 	 */
    999       1.1      matt 	uvm_setpagesize();
   1000       1.1      matt 
   1001       1.1      matt 	loop = 0;
   1002       1.1      matt 	while (loop < bootconfig.dramblocks) {
   1003       1.2      matt 		start = (paddr_t)bootconfig.dram[loop].address;
   1004       1.1      matt 		end = start + (bootconfig.dram[loop].pages * NBPG);
   1005       1.1      matt 		if (start < physical_freestart)
   1006       1.1      matt 			start = physical_freestart;
   1007       1.1      matt 		if (end > physical_freeend)
   1008       1.1      matt 			end = physical_freeend;
   1009       1.1      matt #if 0
   1010       1.1      matt 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
   1011       1.1      matt #endif
   1012       1.1      matt #if NISADMA > 0
   1013       1.1      matt 		if (pmap_isa_dma_range_intersect(start, end - start,
   1014       1.1      matt 		    &istart, &isize)) {
   1015       1.1      matt 			/*
   1016       1.1      matt 			 * Place the pages that intersect with the
   1017       1.1      matt 			 * ISA DMA range onto the ISA DMA free list.
   1018       1.1      matt 			 */
   1019       1.1      matt #if 0
   1020       1.1      matt 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
   1021       1.1      matt 			    istart + isize - 1);
   1022       1.1      matt #endif
   1023       1.1      matt 			uvm_page_physload(atop(istart),
   1024       1.1      matt 			    atop(istart + isize), atop(istart),
   1025       1.1      matt 			    atop(istart + isize), VM_FREELIST_ISADMA);
   1026  1.14.2.7  jdolecek 
   1027       1.1      matt 			/*
   1028       1.1      matt 			 * Load the pieces that come before
   1029       1.1      matt 			 * the intersection into the default
   1030       1.1      matt 			 * free list.
   1031       1.1      matt 			 */
   1032       1.1      matt 			if (start < istart) {
   1033       1.1      matt #if 0
   1034       1.1      matt 				printf("    BEFORE 0x%lx -> 0x%lx\n",
   1035       1.1      matt 				    start, istart - 1);
   1036       1.1      matt #endif
   1037       1.1      matt 				uvm_page_physload(atop(start),
   1038       1.1      matt 				    atop(istart), atop(start),
   1039       1.1      matt 				    atop(istart), VM_FREELIST_DEFAULT);
   1040       1.1      matt 			}
   1041       1.1      matt 
   1042       1.1      matt 			/*
   1043       1.1      matt 			 * Load the pieces that come after
   1044       1.1      matt 			 * the intersection into the default
   1045       1.1      matt 			 * free list.
   1046       1.1      matt 			 */
   1047       1.1      matt 			if ((istart + isize) < end) {
   1048       1.1      matt #if 0
   1049       1.1      matt 				printf("     AFTER 0x%lx -> 0x%lx\n",
   1050       1.1      matt 				    (istart + isize), end - 1);
   1051       1.1      matt #endif
   1052       1.1      matt 				uvm_page_physload(atop(istart + isize),
   1053       1.1      matt 				    atop(end), atop(istart + isize),
   1054       1.1      matt 				    atop(end), VM_FREELIST_DEFAULT);
   1055       1.1      matt 			}
   1056       1.1      matt 		} else {
   1057       1.1      matt 			uvm_page_physload(atop(start), atop(end),
   1058       1.1      matt 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1059       1.1      matt 		}
   1060       1.1      matt #else	/* NISADMA > 0 */
   1061       1.1      matt 		uvm_page_physload(atop(start), atop(end),
   1062       1.1      matt 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1063       1.1      matt #endif /* NISADMA > 0 */
   1064       1.1      matt 		++loop;
   1065       1.1      matt 	}
   1066       1.1      matt 
   1067  1.14.2.7  jdolecek 	virtual_avail = KERNEL_VM_BASE;
   1068  1.14.2.7  jdolecek 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
   1069       1.1      matt 
   1070       1.1      matt 	/*
   1071  1.14.2.7  jdolecek 	 * now we allocate the "special" VAs which are used for tmp mappings
   1072  1.14.2.7  jdolecek 	 * by the pmap (and other modules).  we allocate the VAs by advancing
   1073  1.14.2.7  jdolecek 	 * virtual_avail (note that there are no pages mapped at these VAs).
   1074  1.14.2.7  jdolecek 	 * we find the PTE that maps the allocated VA via the linear PTE
   1075  1.14.2.7  jdolecek 	 * mapping.
   1076       1.1      matt 	 */
   1077       1.1      matt 
   1078  1.14.2.7  jdolecek 	pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
   1079  1.14.2.7  jdolecek 
   1080  1.14.2.7  jdolecek 	csrcp = virtual_avail; csrc_pte = pte;
   1081  1.14.2.7  jdolecek 	virtual_avail += PAGE_SIZE; pte++;
   1082  1.14.2.7  jdolecek 
   1083  1.14.2.7  jdolecek 	cdstp = virtual_avail; cdst_pte = pte;
   1084  1.14.2.7  jdolecek 	virtual_avail += PAGE_SIZE; pte++;
   1085  1.14.2.7  jdolecek 
   1086  1.14.2.7  jdolecek 	memhook = (char *) virtual_avail;	/* don't need pte */
   1087  1.14.2.7  jdolecek 	virtual_avail += PAGE_SIZE; pte++;
   1088  1.14.2.7  jdolecek 
   1089  1.14.2.7  jdolecek 	msgbufaddr = (caddr_t) virtual_avail;	/* don't need pte */
   1090  1.14.2.7  jdolecek 	virtual_avail += round_page(MSGBUFSIZE);
   1091  1.14.2.7  jdolecek 	pte += atop(round_page(MSGBUFSIZE));
   1092       1.1      matt 
   1093  1.14.2.2   thorpej 	/*
   1094  1.14.2.2   thorpej 	 * init the static-global locks and global lists.
   1095  1.14.2.2   thorpej 	 */
   1096  1.14.2.2   thorpej 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
   1097  1.14.2.2   thorpej 	simple_lock_init(&pvalloc_lock);
   1098  1.14.2.6  jdolecek 	simple_lock_init(&pmaps_lock);
   1099  1.14.2.6  jdolecek 	LIST_INIT(&pmaps);
   1100  1.14.2.2   thorpej 	TAILQ_INIT(&pv_freepages);
   1101  1.14.2.2   thorpej 	TAILQ_INIT(&pv_unusedpgs);
   1102       1.1      matt 
   1103      1.10     chris 	/*
   1104      1.10     chris 	 * initialize the pmap pool.
   1105      1.10     chris 	 */
   1106      1.10     chris 
   1107      1.10     chris 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1108  1.14.2.6  jdolecek 		  &pool_allocator_nointr);
   1109      1.10     chris 
   1110  1.14.2.5  jdolecek 	cpu_dcache_wbinv_all();
   1111       1.1      matt }
   1112       1.1      matt 
   1113       1.1      matt /*
   1114       1.1      matt  * void pmap_init(void)
   1115       1.1      matt  *
   1116       1.1      matt  * Initialize the pmap module.
   1117       1.1      matt  * Called by vm_init() in vm/vm_init.c in order to initialise
   1118       1.1      matt  * any structures that the pmap system needs to map virtual memory.
   1119       1.1      matt  */
   1120       1.1      matt 
   1121       1.1      matt extern int physmem;
   1122       1.1      matt 
   1123       1.1      matt void
   1124  1.14.2.7  jdolecek pmap_init(void)
   1125       1.1      matt {
   1126       1.1      matt 
   1127       1.1      matt 	/*
   1128       1.1      matt 	 * Set the available memory vars - These do not map to real memory
   1129       1.1      matt 	 * addresses and cannot as the physical memory is fragmented.
   1130       1.1      matt 	 * They are used by ps for %mem calculations.
   1131       1.1      matt 	 * One could argue whether this should be the entire memory or just
   1132       1.1      matt 	 * the memory that is useable in a user process.
   1133       1.1      matt 	 */
   1134       1.1      matt 	avail_start = 0;
   1135       1.1      matt 	avail_end = physmem * NBPG;
   1136       1.1      matt 
   1137  1.14.2.2   thorpej 	/*
   1138  1.14.2.2   thorpej 	 * now we need to free enough pv_entry structures to allow us to get
   1139  1.14.2.2   thorpej 	 * the kmem_map/kmem_object allocated and inited (done after this
   1140  1.14.2.2   thorpej 	 * function is finished).  to do this we allocate one bootstrap page out
   1141  1.14.2.2   thorpej 	 * of kernel_map and use it to provide an initial pool of pv_entry
   1142  1.14.2.2   thorpej 	 * structures.   we never free this page.
   1143  1.14.2.2   thorpej 	 */
   1144  1.14.2.2   thorpej 
   1145  1.14.2.2   thorpej 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
   1146  1.14.2.2   thorpej 	if (pv_initpage == NULL)
   1147  1.14.2.2   thorpej 		panic("pmap_init: pv_initpage");
   1148  1.14.2.2   thorpej 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
   1149  1.14.2.2   thorpej 	pv_nfpvents = 0;
   1150  1.14.2.2   thorpej 	(void) pmap_add_pvpage(pv_initpage, FALSE);
   1151  1.14.2.2   thorpej 
   1152       1.1      matt 	pmap_initialized = TRUE;
   1153       1.1      matt 
   1154       1.1      matt 	/* Initialise our L1 page table queues and counters */
   1155       1.1      matt 	SIMPLEQ_INIT(&l1pt_static_queue);
   1156       1.1      matt 	l1pt_static_queue_count = 0;
   1157       1.1      matt 	l1pt_static_create_count = 0;
   1158       1.1      matt 	SIMPLEQ_INIT(&l1pt_queue);
   1159       1.1      matt 	l1pt_queue_count = 0;
   1160       1.1      matt 	l1pt_create_count = 0;
   1161       1.1      matt 	l1pt_reuse_count = 0;
   1162       1.1      matt }
   1163       1.1      matt 
   1164       1.1      matt /*
   1165       1.1      matt  * pmap_postinit()
   1166       1.1      matt  *
   1167       1.1      matt  * This routine is called after the vm and kmem subsystems have been
   1168       1.1      matt  * initialised. This allows the pmap code to perform any initialisation
   1169       1.1      matt  * that can only be done one the memory allocation is in place.
   1170       1.1      matt  */
   1171       1.1      matt 
   1172       1.1      matt void
   1173  1.14.2.7  jdolecek pmap_postinit(void)
   1174       1.1      matt {
   1175       1.1      matt 	int loop;
   1176       1.1      matt 	struct l1pt *pt;
   1177       1.1      matt 
   1178       1.1      matt #ifdef PMAP_STATIC_L1S
   1179       1.1      matt 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
   1180       1.1      matt #else	/* PMAP_STATIC_L1S */
   1181       1.1      matt 	for (loop = 0; loop < max_processes; ++loop) {
   1182       1.1      matt #endif	/* PMAP_STATIC_L1S */
   1183       1.1      matt 		/* Allocate a L1 page table */
   1184       1.1      matt 		pt = pmap_alloc_l1pt();
   1185       1.1      matt 		if (!pt)
   1186       1.1      matt 			panic("Cannot allocate static L1 page tables\n");
   1187       1.1      matt 
   1188       1.1      matt 		/* Clean it */
   1189  1.14.2.7  jdolecek 		bzero((void *)pt->pt_va, L1_TABLE_SIZE);
   1190       1.1      matt 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
   1191       1.1      matt 		/* Add the page table to the queue */
   1192       1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
   1193       1.1      matt 		++l1pt_static_queue_count;
   1194       1.1      matt 		++l1pt_static_create_count;
   1195       1.1      matt 	}
   1196       1.1      matt }
   1197       1.1      matt 
   1198       1.1      matt 
   1199       1.1      matt /*
   1200       1.1      matt  * Create and return a physical map.
   1201       1.1      matt  *
   1202       1.1      matt  * If the size specified for the map is zero, the map is an actual physical
   1203       1.1      matt  * map, and may be referenced by the hardware.
   1204       1.1      matt  *
   1205       1.1      matt  * If the size specified is non-zero, the map will be used in software only,
   1206       1.1      matt  * and is bounded by that size.
   1207       1.1      matt  */
   1208       1.1      matt 
   1209       1.1      matt pmap_t
   1210  1.14.2.7  jdolecek pmap_create(void)
   1211       1.1      matt {
   1212  1.14.2.1     lukem 	struct pmap *pmap;
   1213       1.1      matt 
   1214      1.10     chris 	/*
   1215      1.10     chris 	 * Fetch pmap entry from the pool
   1216      1.10     chris 	 */
   1217      1.10     chris 
   1218      1.10     chris 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   1219  1.14.2.2   thorpej 	/* XXX is this really needed! */
   1220  1.14.2.2   thorpej 	memset(pmap, 0, sizeof(*pmap));
   1221       1.1      matt 
   1222  1.14.2.1     lukem 	simple_lock_init(&pmap->pm_obj.vmobjlock);
   1223  1.14.2.1     lukem 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
   1224  1.14.2.1     lukem 	TAILQ_INIT(&pmap->pm_obj.memq);
   1225  1.14.2.1     lukem 	pmap->pm_obj.uo_npages = 0;
   1226  1.14.2.1     lukem 	pmap->pm_obj.uo_refs = 1;
   1227  1.14.2.1     lukem 	pmap->pm_stats.wired_count = 0;
   1228  1.14.2.1     lukem 	pmap->pm_stats.resident_count = 1;
   1229  1.14.2.7  jdolecek 	pmap->pm_ptphint = NULL;
   1230  1.14.2.1     lukem 
   1231       1.1      matt 	/* Now init the machine part of the pmap */
   1232       1.1      matt 	pmap_pinit(pmap);
   1233       1.1      matt 	return(pmap);
   1234       1.1      matt }
   1235       1.1      matt 
   1236       1.1      matt /*
   1237       1.1      matt  * pmap_alloc_l1pt()
   1238       1.1      matt  *
   1239       1.1      matt  * This routine allocates physical and virtual memory for a L1 page table
   1240       1.1      matt  * and wires it.
   1241       1.1      matt  * A l1pt structure is returned to describe the allocated page table.
   1242       1.1      matt  *
   1243       1.1      matt  * This routine is allowed to fail if the required memory cannot be allocated.
   1244       1.1      matt  * In this case NULL is returned.
   1245       1.1      matt  */
   1246       1.1      matt 
   1247       1.1      matt struct l1pt *
   1248       1.1      matt pmap_alloc_l1pt(void)
   1249       1.1      matt {
   1250       1.2      matt 	paddr_t pa;
   1251       1.2      matt 	vaddr_t va;
   1252       1.1      matt 	struct l1pt *pt;
   1253       1.1      matt 	int error;
   1254       1.9       chs 	struct vm_page *m;
   1255  1.14.2.7  jdolecek 	pt_entry_t *pte;
   1256       1.1      matt 
   1257       1.1      matt 	/* Allocate virtual address space for the L1 page table */
   1258  1.14.2.7  jdolecek 	va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
   1259       1.1      matt 	if (va == 0) {
   1260       1.1      matt #ifdef DIAGNOSTIC
   1261  1.14.2.4   thorpej 		PDEBUG(0,
   1262  1.14.2.4   thorpej 		    printf("pmap: Cannot allocate pageable memory for L1\n"));
   1263       1.1      matt #endif	/* DIAGNOSTIC */
   1264       1.1      matt 		return(NULL);
   1265       1.1      matt 	}
   1266       1.1      matt 
   1267       1.1      matt 	/* Allocate memory for the l1pt structure */
   1268       1.1      matt 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1269       1.1      matt 
   1270       1.1      matt 	/*
   1271       1.1      matt 	 * Allocate pages from the VM system.
   1272       1.1      matt 	 */
   1273  1.14.2.7  jdolecek 	error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
   1274  1.14.2.7  jdolecek 	    L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1275       1.1      matt 	if (error) {
   1276       1.1      matt #ifdef DIAGNOSTIC
   1277  1.14.2.4   thorpej 		PDEBUG(0,
   1278  1.14.2.4   thorpej 		    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
   1279  1.14.2.4   thorpej 		    error));
   1280       1.1      matt #endif	/* DIAGNOSTIC */
   1281       1.1      matt 		/* Release the resources we already have claimed */
   1282       1.1      matt 		free(pt, M_VMPMAP);
   1283  1.14.2.7  jdolecek 		uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
   1284       1.1      matt 		return(NULL);
   1285       1.1      matt 	}
   1286       1.1      matt 
   1287       1.1      matt 	/* Map our physical pages into our virtual space */
   1288       1.1      matt 	pt->pt_va = va;
   1289  1.14.2.6  jdolecek 	m = TAILQ_FIRST(&pt->pt_plist);
   1290  1.14.2.7  jdolecek 	while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
   1291       1.1      matt 		pa = VM_PAGE_TO_PHYS(m);
   1292       1.1      matt 
   1293  1.14.2.7  jdolecek 		pte = vtopte(va);
   1294       1.1      matt 
   1295  1.14.2.7  jdolecek 		/*
   1296  1.14.2.7  jdolecek 		 * Assert that the PTE is invalid.  If it's invalid,
   1297  1.14.2.7  jdolecek 		 * then we are guaranteed that there won't be an entry
   1298  1.14.2.7  jdolecek 		 * for this VA in the TLB.
   1299  1.14.2.7  jdolecek 		 */
   1300  1.14.2.7  jdolecek 		KDASSERT(pmap_pte_v(pte) == 0);
   1301  1.14.2.7  jdolecek 
   1302  1.14.2.7  jdolecek 		*pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) |
   1303  1.14.2.7  jdolecek 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
   1304       1.1      matt 
   1305       1.1      matt 		va += NBPG;
   1306       1.1      matt 		m = m->pageq.tqe_next;
   1307       1.1      matt 	}
   1308       1.1      matt 
   1309       1.1      matt #ifdef DIAGNOSTIC
   1310       1.1      matt 	if (m)
   1311       1.1      matt 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1312       1.1      matt #endif	/* DIAGNOSTIC */
   1313       1.1      matt 
   1314       1.1      matt 	pt->pt_flags = 0;
   1315       1.1      matt 	return(pt);
   1316       1.1      matt }
   1317       1.1      matt 
   1318       1.1      matt /*
   1319       1.1      matt  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1320       1.1      matt  */
   1321  1.14.2.4   thorpej static void
   1322  1.14.2.7  jdolecek pmap_free_l1pt(struct l1pt *pt)
   1323       1.1      matt {
   1324       1.1      matt 	/* Separate the physical memory for the virtual space */
   1325  1.14.2.7  jdolecek 	pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
   1326  1.14.2.3   thorpej 	pmap_update(pmap_kernel());
   1327       1.1      matt 
   1328       1.1      matt 	/* Return the physical memory */
   1329       1.1      matt 	uvm_pglistfree(&pt->pt_plist);
   1330       1.1      matt 
   1331       1.1      matt 	/* Free the virtual space */
   1332  1.14.2.7  jdolecek 	uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
   1333       1.1      matt 
   1334       1.1      matt 	/* Free the l1pt structure */
   1335       1.1      matt 	free(pt, M_VMPMAP);
   1336       1.1      matt }
   1337       1.1      matt 
   1338       1.1      matt /*
   1339  1.14.2.7  jdolecek  * pmap_alloc_ptpt:
   1340  1.14.2.7  jdolecek  *
   1341  1.14.2.7  jdolecek  *	Allocate the page table that maps the PTE array.
   1342  1.14.2.7  jdolecek  */
   1343  1.14.2.7  jdolecek static int
   1344  1.14.2.7  jdolecek pmap_alloc_ptpt(struct pmap *pmap)
   1345  1.14.2.7  jdolecek {
   1346  1.14.2.7  jdolecek 	struct vm_page *pg;
   1347  1.14.2.7  jdolecek 	pt_entry_t *pte;
   1348  1.14.2.7  jdolecek 
   1349  1.14.2.7  jdolecek 	KASSERT(pmap->pm_vptpt == 0);
   1350  1.14.2.7  jdolecek 
   1351  1.14.2.7  jdolecek 	pmap->pm_vptpt = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
   1352  1.14.2.7  jdolecek 	if (pmap->pm_vptpt == 0) {
   1353  1.14.2.7  jdolecek 		PDEBUG(0,
   1354  1.14.2.7  jdolecek 		    printf("pmap_alloc_ptpt: no KVA for PTPT\n"));
   1355  1.14.2.7  jdolecek 		return (ENOMEM);
   1356  1.14.2.7  jdolecek 	}
   1357  1.14.2.7  jdolecek 
   1358  1.14.2.7  jdolecek 	for (;;) {
   1359  1.14.2.7  jdolecek 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   1360  1.14.2.7  jdolecek 		if (pg != NULL)
   1361  1.14.2.7  jdolecek 			break;
   1362  1.14.2.7  jdolecek 		uvm_wait("pmap_ptpt");
   1363  1.14.2.7  jdolecek 	}
   1364  1.14.2.7  jdolecek 
   1365  1.14.2.7  jdolecek 	pmap->pm_pptpt = VM_PAGE_TO_PHYS(pg);
   1366  1.14.2.7  jdolecek 
   1367  1.14.2.7  jdolecek 	pte = vtopte(pmap->pm_vptpt);
   1368  1.14.2.7  jdolecek 
   1369  1.14.2.7  jdolecek 	KDASSERT(pmap_pte_v(pte) == 0);
   1370  1.14.2.7  jdolecek 
   1371  1.14.2.7  jdolecek 	*pte = L2_S_PROTO | pmap->pm_pptpt |
   1372  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
   1373  1.14.2.7  jdolecek 
   1374  1.14.2.7  jdolecek 	return (0);
   1375  1.14.2.7  jdolecek }
   1376  1.14.2.7  jdolecek 
   1377  1.14.2.7  jdolecek /*
   1378  1.14.2.7  jdolecek  * pmap_free_ptpt:
   1379  1.14.2.7  jdolecek  *
   1380  1.14.2.7  jdolecek  *	Free the page table that maps the PTE array.
   1381  1.14.2.7  jdolecek  */
   1382  1.14.2.7  jdolecek static void
   1383  1.14.2.7  jdolecek pmap_free_ptpt(struct pmap *pmap)
   1384  1.14.2.7  jdolecek {
   1385  1.14.2.7  jdolecek 
   1386  1.14.2.7  jdolecek 	pmap_kremove(pmap->pm_vptpt, L2_TABLE_SIZE);
   1387  1.14.2.7  jdolecek 	pmap_update(pmap_kernel());
   1388  1.14.2.7  jdolecek 
   1389  1.14.2.7  jdolecek 	uvm_pagefree(PHYS_TO_VM_PAGE(pmap->pm_pptpt));
   1390  1.14.2.7  jdolecek 
   1391  1.14.2.7  jdolecek 	uvm_km_free(kernel_map, pmap->pm_vptpt, L2_TABLE_SIZE);
   1392  1.14.2.7  jdolecek }
   1393  1.14.2.7  jdolecek 
   1394  1.14.2.7  jdolecek /*
   1395       1.1      matt  * Allocate a page directory.
   1396       1.1      matt  * This routine will either allocate a new page directory from the pool
   1397       1.1      matt  * of L1 page tables currently held by the kernel or it will allocate
   1398       1.1      matt  * a new one via pmap_alloc_l1pt().
   1399       1.1      matt  * It will then initialise the l1 page table for use.
   1400       1.1      matt  */
   1401  1.14.2.4   thorpej static int
   1402  1.14.2.7  jdolecek pmap_allocpagedir(struct pmap *pmap)
   1403       1.1      matt {
   1404       1.2      matt 	paddr_t pa;
   1405       1.1      matt 	struct l1pt *pt;
   1406  1.14.2.7  jdolecek 	int error;
   1407       1.1      matt 
   1408       1.1      matt 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1409       1.1      matt 
   1410       1.1      matt 	/* Do we have any spare L1's lying around ? */
   1411       1.1      matt 	if (l1pt_static_queue_count) {
   1412       1.1      matt 		--l1pt_static_queue_count;
   1413  1.14.2.7  jdolecek 		pt = SIMPLEQ_FIRST(&l1pt_static_queue);
   1414  1.14.2.7  jdolecek 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt_queue);
   1415       1.1      matt 	} else if (l1pt_queue_count) {
   1416       1.1      matt 		--l1pt_queue_count;
   1417  1.14.2.7  jdolecek 		pt = SIMPLEQ_FIRST(&l1pt_queue);
   1418  1.14.2.7  jdolecek 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt_queue);
   1419       1.1      matt 		++l1pt_reuse_count;
   1420       1.1      matt 	} else {
   1421       1.1      matt 		pt = pmap_alloc_l1pt();
   1422       1.1      matt 		if (!pt)
   1423       1.1      matt 			return(ENOMEM);
   1424       1.1      matt 		++l1pt_create_count;
   1425       1.1      matt 	}
   1426       1.1      matt 
   1427       1.1      matt 	/* Store the pointer to the l1 descriptor in the pmap. */
   1428       1.1      matt 	pmap->pm_l1pt = pt;
   1429       1.1      matt 
   1430       1.1      matt 	/* Get the physical address of the start of the l1 */
   1431  1.14.2.6  jdolecek 	pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
   1432       1.1      matt 
   1433       1.1      matt 	/* Store the virtual address of the l1 in the pmap. */
   1434       1.1      matt 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1435       1.1      matt 
   1436       1.1      matt 	/* Clean the L1 if it is dirty */
   1437       1.1      matt 	if (!(pt->pt_flags & PTFLAG_CLEAN))
   1438  1.14.2.7  jdolecek 		bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1439       1.1      matt 
   1440       1.1      matt 	/* Allocate a page table to map all the page tables for this pmap */
   1441  1.14.2.7  jdolecek 	if ((error = pmap_alloc_ptpt(pmap)) != 0) {
   1442  1.14.2.7  jdolecek 		pmap_freepagedir(pmap);
   1443  1.14.2.7  jdolecek 		return (error);
   1444       1.5    toshii 	}
   1445       1.5    toshii 
   1446  1.14.2.7  jdolecek 	/* need to lock this all up for growkernel */
   1447  1.14.2.6  jdolecek 	simple_lock(&pmaps_lock);
   1448  1.14.2.6  jdolecek 
   1449  1.14.2.7  jdolecek 	/* Duplicate the kernel mappings. */
   1450  1.14.2.7  jdolecek 	bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1451  1.14.2.7  jdolecek 		(char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1452  1.14.2.6  jdolecek 		KERNEL_PD_SIZE);
   1453  1.14.2.6  jdolecek 
   1454       1.1      matt 	/* Wire in this page table */
   1455  1.14.2.7  jdolecek 	pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
   1456       1.1      matt 
   1457       1.1      matt 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1458  1.14.2.6  jdolecek 
   1459       1.1      matt 	/*
   1460  1.14.2.7  jdolecek 	 * Map the kernel page tables into the new PT map.
   1461  1.14.2.7  jdolecek 	 */
   1462  1.14.2.7  jdolecek 	bcopy((char *)(PTE_BASE
   1463  1.14.2.7  jdolecek 	    + (PTE_BASE >> (PGSHIFT - 2))
   1464  1.14.2.7  jdolecek 	    + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
   1465  1.14.2.7  jdolecek 	    (char *)pmap->pm_vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
   1466       1.1      matt 	    (KERNEL_PD_SIZE >> 2));
   1467       1.1      matt 
   1468  1.14.2.6  jdolecek 	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
   1469  1.14.2.6  jdolecek 	simple_unlock(&pmaps_lock);
   1470  1.14.2.6  jdolecek 
   1471       1.1      matt 	return(0);
   1472       1.1      matt }
   1473       1.1      matt 
   1474       1.1      matt 
   1475       1.1      matt /*
   1476       1.1      matt  * Initialize a preallocated and zeroed pmap structure,
   1477       1.1      matt  * such as one in a vmspace structure.
   1478       1.1      matt  */
   1479       1.1      matt 
   1480       1.1      matt void
   1481  1.14.2.7  jdolecek pmap_pinit(struct pmap *pmap)
   1482       1.1      matt {
   1483  1.14.2.4   thorpej 	int backoff = 6;
   1484  1.14.2.4   thorpej 	int retry = 10;
   1485  1.14.2.4   thorpej 
   1486       1.1      matt 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1487       1.1      matt 
   1488       1.1      matt 	/* Keep looping until we succeed in allocating a page directory */
   1489       1.1      matt 	while (pmap_allocpagedir(pmap) != 0) {
   1490       1.1      matt 		/*
   1491       1.1      matt 		 * Ok we failed to allocate a suitable block of memory for an
   1492       1.1      matt 		 * L1 page table. This means that either:
   1493       1.1      matt 		 * 1. 16KB of virtual address space could not be allocated
   1494       1.1      matt 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1495       1.1      matt 		 *    could not be allocated.
   1496       1.1      matt 		 *
   1497       1.1      matt 		 * Since we cannot fail we will sleep for a while and try
   1498  1.14.2.2   thorpej 		 * again.
   1499  1.14.2.4   thorpej 		 *
   1500  1.14.2.4   thorpej 		 * Searching for a suitable L1 PT is expensive:
   1501  1.14.2.4   thorpej 		 * to avoid hogging the system when memory is really
   1502  1.14.2.4   thorpej 		 * scarce, use an exponential back-off so that
   1503  1.14.2.4   thorpej 		 * eventually we won't retry more than once every 8
   1504  1.14.2.4   thorpej 		 * seconds.  This should allow other processes to run
   1505  1.14.2.4   thorpej 		 * to completion and free up resources.
   1506       1.1      matt 		 */
   1507  1.14.2.4   thorpej 		(void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
   1508  1.14.2.4   thorpej 		    NULL);
   1509  1.14.2.4   thorpej 		if (--retry == 0) {
   1510  1.14.2.4   thorpej 			retry = 10;
   1511  1.14.2.4   thorpej 			if (backoff)
   1512  1.14.2.4   thorpej 				--backoff;
   1513  1.14.2.4   thorpej 		}
   1514       1.1      matt 	}
   1515       1.1      matt 
   1516  1.14.2.7  jdolecek 	if (vector_page < KERNEL_BASE) {
   1517  1.14.2.7  jdolecek 		/*
   1518  1.14.2.7  jdolecek 		 * Map the vector page.  This will also allocate and map
   1519  1.14.2.7  jdolecek 		 * an L2 table for it.
   1520  1.14.2.7  jdolecek 		 */
   1521  1.14.2.7  jdolecek 		pmap_enter(pmap, vector_page, systempage.pv_pa,
   1522  1.14.2.7  jdolecek 		    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1523  1.14.2.7  jdolecek 		pmap_update(pmap);
   1524  1.14.2.7  jdolecek 	}
   1525       1.1      matt }
   1526       1.1      matt 
   1527       1.1      matt 
   1528       1.1      matt void
   1529  1.14.2.7  jdolecek pmap_freepagedir(struct pmap *pmap)
   1530       1.1      matt {
   1531       1.1      matt 	/* Free the memory used for the page table mapping */
   1532       1.5    toshii 	if (pmap->pm_vptpt != 0)
   1533  1.14.2.7  jdolecek 		pmap_free_ptpt(pmap);
   1534       1.1      matt 
   1535       1.1      matt 	/* junk the L1 page table */
   1536       1.1      matt 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1537       1.1      matt 		/* Add the page table to the queue */
   1538       1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue);
   1539       1.1      matt 		++l1pt_static_queue_count;
   1540       1.1      matt 	} else if (l1pt_queue_count < 8) {
   1541       1.1      matt 		/* Add the page table to the queue */
   1542       1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1543       1.1      matt 		++l1pt_queue_count;
   1544       1.1      matt 	} else
   1545       1.1      matt 		pmap_free_l1pt(pmap->pm_l1pt);
   1546       1.1      matt }
   1547       1.1      matt 
   1548       1.1      matt 
   1549       1.1      matt /*
   1550       1.1      matt  * Retire the given physical map from service.
   1551       1.1      matt  * Should only be called if the map contains no valid mappings.
   1552       1.1      matt  */
   1553       1.1      matt 
   1554       1.1      matt void
   1555  1.14.2.7  jdolecek pmap_destroy(struct pmap *pmap)
   1556       1.1      matt {
   1557  1.14.2.2   thorpej 	struct vm_page *page;
   1558       1.1      matt 	int count;
   1559       1.1      matt 
   1560       1.1      matt 	if (pmap == NULL)
   1561       1.1      matt 		return;
   1562       1.1      matt 
   1563       1.1      matt 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1564  1.14.2.2   thorpej 
   1565  1.14.2.2   thorpej 	/*
   1566  1.14.2.2   thorpej 	 * Drop reference count
   1567  1.14.2.2   thorpej 	 */
   1568  1.14.2.2   thorpej 	simple_lock(&pmap->pm_obj.vmobjlock);
   1569  1.14.2.1     lukem 	count = --pmap->pm_obj.uo_refs;
   1570  1.14.2.2   thorpej 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1571  1.14.2.2   thorpej 	if (count > 0) {
   1572  1.14.2.2   thorpej 		return;
   1573       1.1      matt 	}
   1574       1.1      matt 
   1575  1.14.2.2   thorpej 	/*
   1576  1.14.2.2   thorpej 	 * reference count is zero, free pmap resources and then free pmap.
   1577  1.14.2.2   thorpej 	 */
   1578  1.14.2.6  jdolecek 
   1579  1.14.2.6  jdolecek 	/*
   1580  1.14.2.6  jdolecek 	 * remove it from global list of pmaps
   1581  1.14.2.6  jdolecek 	 */
   1582  1.14.2.6  jdolecek 
   1583  1.14.2.6  jdolecek 	simple_lock(&pmaps_lock);
   1584  1.14.2.6  jdolecek 	LIST_REMOVE(pmap, pm_list);
   1585  1.14.2.6  jdolecek 	simple_unlock(&pmaps_lock);
   1586  1.14.2.2   thorpej 
   1587  1.14.2.7  jdolecek 	if (vector_page < KERNEL_BASE) {
   1588  1.14.2.7  jdolecek 		/* Remove the vector page mapping */
   1589  1.14.2.7  jdolecek 		pmap_remove(pmap, vector_page, vector_page + NBPG);
   1590  1.14.2.7  jdolecek 		pmap_update(pmap);
   1591  1.14.2.7  jdolecek 	}
   1592       1.1      matt 
   1593       1.1      matt 	/*
   1594       1.1      matt 	 * Free any page tables still mapped
   1595       1.1      matt 	 * This is only temporay until pmap_enter can count the number
   1596       1.1      matt 	 * of mappings made in a page table. Then pmap_remove() can
   1597       1.1      matt 	 * reduce the count and free the pagetable when the count
   1598  1.14.2.1     lukem 	 * reaches zero.  Note that entries in this list should match the
   1599  1.14.2.1     lukem 	 * contents of the ptpt, however this is faster than walking a 1024
   1600  1.14.2.1     lukem 	 * entries looking for pt's
   1601  1.14.2.1     lukem 	 * taken from i386 pmap.c
   1602       1.1      matt 	 */
   1603  1.14.2.7  jdolecek 	/*
   1604  1.14.2.7  jdolecek 	 * vmobjlock must be held while freeing pages
   1605  1.14.2.7  jdolecek 	 */
   1606  1.14.2.7  jdolecek 	simple_lock(&pmap->pm_obj.vmobjlock);
   1607  1.14.2.6  jdolecek 	while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
   1608  1.14.2.6  jdolecek 		KASSERT((page->flags & PG_BUSY) == 0);
   1609  1.14.2.1     lukem 		page->wire_count = 0;
   1610  1.14.2.1     lukem 		uvm_pagefree(page);
   1611       1.1      matt 	}
   1612  1.14.2.7  jdolecek 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1613  1.14.2.1     lukem 
   1614       1.1      matt 	/* Free the page dir */
   1615       1.1      matt 	pmap_freepagedir(pmap);
   1616  1.14.2.2   thorpej 
   1617  1.14.2.2   thorpej 	/* return the pmap to the pool */
   1618  1.14.2.2   thorpej 	pool_put(&pmap_pmap_pool, pmap);
   1619       1.1      matt }
   1620       1.1      matt 
   1621       1.1      matt 
   1622       1.1      matt /*
   1623  1.14.2.1     lukem  * void pmap_reference(struct pmap *pmap)
   1624       1.1      matt  *
   1625       1.1      matt  * Add a reference to the specified pmap.
   1626       1.1      matt  */
   1627       1.1      matt 
   1628       1.1      matt void
   1629  1.14.2.7  jdolecek pmap_reference(struct pmap *pmap)
   1630       1.1      matt {
   1631       1.1      matt 	if (pmap == NULL)
   1632       1.1      matt 		return;
   1633       1.1      matt 
   1634       1.1      matt 	simple_lock(&pmap->pm_lock);
   1635  1.14.2.1     lukem 	pmap->pm_obj.uo_refs++;
   1636       1.1      matt 	simple_unlock(&pmap->pm_lock);
   1637       1.1      matt }
   1638       1.1      matt 
   1639       1.1      matt /*
   1640       1.1      matt  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1641       1.1      matt  *
   1642       1.1      matt  * Return the start and end addresses of the kernel's virtual space.
   1643       1.1      matt  * These values are setup in pmap_bootstrap and are updated as pages
   1644       1.1      matt  * are allocated.
   1645       1.1      matt  */
   1646       1.1      matt 
   1647       1.1      matt void
   1648  1.14.2.7  jdolecek pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1649       1.1      matt {
   1650  1.14.2.7  jdolecek 	*start = virtual_avail;
   1651       1.1      matt 	*end = virtual_end;
   1652       1.1      matt }
   1653       1.1      matt 
   1654       1.1      matt /*
   1655       1.1      matt  * Activate the address space for the specified process.  If the process
   1656       1.1      matt  * is the current process, load the new MMU context.
   1657       1.1      matt  */
   1658       1.1      matt void
   1659  1.14.2.7  jdolecek pmap_activate(struct proc *p)
   1660       1.1      matt {
   1661  1.14.2.1     lukem 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1662       1.1      matt 	struct pcb *pcb = &p->p_addr->u_pcb;
   1663       1.1      matt 
   1664  1.14.2.1     lukem 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1665       1.1      matt 	    (paddr_t *)&pcb->pcb_pagedir);
   1666       1.1      matt 
   1667       1.1      matt 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1668       1.1      matt 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1669       1.1      matt 
   1670       1.1      matt 	if (p == curproc) {
   1671       1.1      matt 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1672       1.1      matt 		setttb((u_int)pcb->pcb_pagedir);
   1673       1.1      matt 	}
   1674       1.1      matt }
   1675       1.1      matt 
   1676       1.1      matt /*
   1677       1.1      matt  * Deactivate the address space of the specified process.
   1678       1.1      matt  */
   1679       1.1      matt void
   1680  1.14.2.7  jdolecek pmap_deactivate(struct proc *p)
   1681       1.1      matt {
   1682       1.1      matt }
   1683       1.1      matt 
   1684  1.14.2.4   thorpej /*
   1685  1.14.2.4   thorpej  * Perform any deferred pmap operations.
   1686  1.14.2.4   thorpej  */
   1687  1.14.2.4   thorpej void
   1688  1.14.2.4   thorpej pmap_update(struct pmap *pmap)
   1689  1.14.2.4   thorpej {
   1690  1.14.2.4   thorpej 
   1691  1.14.2.4   thorpej 	/*
   1692  1.14.2.4   thorpej 	 * We haven't deferred any pmap operations, but we do need to
   1693  1.14.2.4   thorpej 	 * make sure TLB/cache operations have completed.
   1694  1.14.2.4   thorpej 	 */
   1695  1.14.2.4   thorpej 	cpu_cpwait();
   1696  1.14.2.4   thorpej }
   1697       1.1      matt 
   1698       1.1      matt /*
   1699       1.1      matt  * pmap_clean_page()
   1700       1.1      matt  *
   1701       1.1      matt  * This is a local function used to work out the best strategy to clean
   1702       1.1      matt  * a single page referenced by its entry in the PV table. It's used by
   1703       1.1      matt  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1704       1.1      matt  *
   1705       1.1      matt  * Its policy is effectively:
   1706       1.1      matt  *  o If there are no mappings, we don't bother doing anything with the cache.
   1707       1.1      matt  *  o If there is one mapping, we clean just that page.
   1708       1.1      matt  *  o If there are multiple mappings, we clean the entire cache.
   1709       1.1      matt  *
   1710       1.1      matt  * So that some functions can be further optimised, it returns 0 if it didn't
   1711       1.1      matt  * clean the entire cache, or 1 if it did.
   1712       1.1      matt  *
   1713       1.1      matt  * XXX One bug in this routine is that if the pv_entry has a single page
   1714       1.1      matt  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1715       1.1      matt  * just the 1 page. Since this should not occur in everyday use and if it does
   1716       1.1      matt  * it will just result in not the most efficient clean for the page.
   1717       1.1      matt  */
   1718       1.1      matt static int
   1719  1.14.2.7  jdolecek pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
   1720       1.1      matt {
   1721  1.14.2.2   thorpej 	struct pmap *pmap;
   1722  1.14.2.2   thorpej 	struct pv_entry *npv;
   1723       1.1      matt 	int cache_needs_cleaning = 0;
   1724       1.1      matt 	vaddr_t page_to_clean = 0;
   1725       1.1      matt 
   1726  1.14.2.2   thorpej 	if (pv == NULL)
   1727  1.14.2.2   thorpej 		/* nothing mapped in so nothing to flush */
   1728  1.14.2.2   thorpej 		return (0);
   1729  1.14.2.2   thorpej 
   1730  1.14.2.2   thorpej 	/* Since we flush the cache each time we change curproc, we
   1731  1.14.2.2   thorpej 	 * only need to flush the page if it is in the current pmap.
   1732  1.14.2.2   thorpej 	 */
   1733  1.14.2.2   thorpej 	if (curproc)
   1734  1.14.2.2   thorpej 		pmap = curproc->p_vmspace->vm_map.pmap;
   1735  1.14.2.2   thorpej 	else
   1736  1.14.2.2   thorpej 		pmap = pmap_kernel();
   1737  1.14.2.2   thorpej 
   1738  1.14.2.2   thorpej 	for (npv = pv; npv; npv = npv->pv_next) {
   1739  1.14.2.2   thorpej 		if (npv->pv_pmap == pmap) {
   1740  1.14.2.2   thorpej 			/* The page is mapped non-cacheable in
   1741  1.14.2.2   thorpej 			 * this map.  No need to flush the cache.
   1742  1.14.2.2   thorpej 			 */
   1743  1.14.2.7  jdolecek 			if (npv->pv_flags & PVF_NC) {
   1744  1.14.2.2   thorpej #ifdef DIAGNOSTIC
   1745  1.14.2.2   thorpej 				if (cache_needs_cleaning)
   1746  1.14.2.2   thorpej 					panic("pmap_clean_page: "
   1747  1.14.2.2   thorpej 							"cache inconsistency");
   1748  1.14.2.2   thorpej #endif
   1749  1.14.2.2   thorpej 				break;
   1750  1.14.2.2   thorpej 			}
   1751  1.14.2.2   thorpej #if 0
   1752  1.14.2.7  jdolecek 			/*
   1753  1.14.2.7  jdolecek 			 * XXX Can't do this because pmap_protect doesn't
   1754  1.14.2.7  jdolecek 			 * XXX clean the page when it does a write-protect.
   1755  1.14.2.7  jdolecek 			 */
   1756  1.14.2.7  jdolecek 			else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
   1757  1.14.2.2   thorpej 				continue;
   1758  1.14.2.2   thorpej #endif
   1759  1.14.2.2   thorpej 			if (cache_needs_cleaning){
   1760  1.14.2.2   thorpej 				page_to_clean = 0;
   1761  1.14.2.2   thorpej 				break;
   1762  1.14.2.2   thorpej 			}
   1763  1.14.2.2   thorpej 			else
   1764  1.14.2.2   thorpej 				page_to_clean = npv->pv_va;
   1765  1.14.2.2   thorpej 			cache_needs_cleaning = 1;
   1766  1.14.2.2   thorpej 		}
   1767       1.1      matt 	}
   1768       1.1      matt 
   1769       1.1      matt 	if (page_to_clean)
   1770  1.14.2.5  jdolecek 		cpu_idcache_wbinv_range(page_to_clean, NBPG);
   1771       1.1      matt 	else if (cache_needs_cleaning) {
   1772  1.14.2.5  jdolecek 		cpu_idcache_wbinv_all();
   1773       1.1      matt 		return (1);
   1774       1.1      matt 	}
   1775       1.1      matt 	return (0);
   1776       1.1      matt }
   1777       1.1      matt 
   1778       1.1      matt /*
   1779       1.1      matt  * pmap_zero_page()
   1780       1.1      matt  *
   1781       1.1      matt  * Zero a given physical page by mapping it at a page hook point.
   1782       1.1      matt  * In doing the zero page op, the page we zero is mapped cachable, as with
   1783       1.1      matt  * StrongARM accesses to non-cached pages are non-burst making writing
   1784       1.1      matt  * _any_ bulk data very slow.
   1785       1.1      matt  */
   1786  1.14.2.7  jdolecek #if ARM_MMU_GENERIC == 1
   1787       1.1      matt void
   1788  1.14.2.7  jdolecek pmap_zero_page_generic(paddr_t phys)
   1789       1.1      matt {
   1790  1.14.2.7  jdolecek #ifdef DEBUG
   1791  1.14.2.7  jdolecek 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1792  1.14.2.7  jdolecek 
   1793  1.14.2.7  jdolecek 	if (pg->mdpage.pvh_list != NULL)
   1794  1.14.2.7  jdolecek 		panic("pmap_zero_page: page has mappings");
   1795  1.14.2.7  jdolecek #endif
   1796  1.14.2.7  jdolecek 
   1797  1.14.2.7  jdolecek 	KDASSERT((phys & PGOFSET) == 0);
   1798  1.14.2.7  jdolecek 
   1799  1.14.2.7  jdolecek 	/*
   1800  1.14.2.7  jdolecek 	 * Hook in the page, zero it, and purge the cache for that
   1801  1.14.2.7  jdolecek 	 * zeroed page. Invalidate the TLB as needed.
   1802  1.14.2.7  jdolecek 	 */
   1803  1.14.2.7  jdolecek 	*cdst_pte = L2_S_PROTO | phys |
   1804  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1805  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(cdstp);
   1806  1.14.2.7  jdolecek 	cpu_cpwait();
   1807  1.14.2.7  jdolecek 	bzero_page(cdstp);
   1808  1.14.2.7  jdolecek 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1809  1.14.2.7  jdolecek }
   1810  1.14.2.7  jdolecek #endif /* ARM_MMU_GENERIC == 1 */
   1811  1.14.2.7  jdolecek 
   1812  1.14.2.7  jdolecek #if ARM_MMU_XSCALE == 1
   1813  1.14.2.7  jdolecek void
   1814  1.14.2.7  jdolecek pmap_zero_page_xscale(paddr_t phys)
   1815  1.14.2.7  jdolecek {
   1816  1.14.2.7  jdolecek #ifdef DEBUG
   1817  1.14.2.7  jdolecek 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1818  1.14.2.7  jdolecek 
   1819  1.14.2.7  jdolecek 	if (pg->mdpage.pvh_list != NULL)
   1820  1.14.2.7  jdolecek 		panic("pmap_zero_page: page has mappings");
   1821  1.14.2.7  jdolecek #endif
   1822  1.14.2.7  jdolecek 
   1823  1.14.2.7  jdolecek 	KDASSERT((phys & PGOFSET) == 0);
   1824       1.1      matt 
   1825       1.1      matt 	/*
   1826       1.1      matt 	 * Hook in the page, zero it, and purge the cache for that
   1827       1.1      matt 	 * zeroed page. Invalidate the TLB as needed.
   1828       1.1      matt 	 */
   1829  1.14.2.7  jdolecek 	*cdst_pte = L2_S_PROTO | phys |
   1830  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1831  1.14.2.7  jdolecek 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1832  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(cdstp);
   1833  1.14.2.4   thorpej 	cpu_cpwait();
   1834  1.14.2.7  jdolecek 	bzero_page(cdstp);
   1835  1.14.2.7  jdolecek 	xscale_cache_clean_minidata();
   1836       1.1      matt }
   1837  1.14.2.7  jdolecek #endif /* ARM_MMU_XSCALE == 1 */
   1838       1.1      matt 
   1839  1.14.2.2   thorpej /* pmap_pageidlezero()
   1840  1.14.2.2   thorpej  *
   1841  1.14.2.2   thorpej  * The same as above, except that we assume that the page is not
   1842  1.14.2.2   thorpej  * mapped.  This means we never have to flush the cache first.  Called
   1843  1.14.2.2   thorpej  * from the idle loop.
   1844  1.14.2.2   thorpej  */
   1845  1.14.2.2   thorpej boolean_t
   1846  1.14.2.7  jdolecek pmap_pageidlezero(paddr_t phys)
   1847  1.14.2.2   thorpej {
   1848  1.14.2.2   thorpej 	int i, *ptr;
   1849  1.14.2.2   thorpej 	boolean_t rv = TRUE;
   1850  1.14.2.7  jdolecek #ifdef DEBUG
   1851  1.14.2.6  jdolecek 	struct vm_page *pg;
   1852  1.14.2.2   thorpej 
   1853  1.14.2.6  jdolecek 	pg = PHYS_TO_VM_PAGE(phys);
   1854  1.14.2.6  jdolecek 	if (pg->mdpage.pvh_list != NULL)
   1855  1.14.2.7  jdolecek 		panic("pmap_pageidlezero: page has mappings");
   1856  1.14.2.2   thorpej #endif
   1857  1.14.2.7  jdolecek 
   1858  1.14.2.7  jdolecek 	KDASSERT((phys & PGOFSET) == 0);
   1859  1.14.2.7  jdolecek 
   1860  1.14.2.2   thorpej 	/*
   1861  1.14.2.2   thorpej 	 * Hook in the page, zero it, and purge the cache for that
   1862  1.14.2.2   thorpej 	 * zeroed page. Invalidate the TLB as needed.
   1863  1.14.2.2   thorpej 	 */
   1864  1.14.2.7  jdolecek 	*cdst_pte = L2_S_PROTO | phys |
   1865  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1866  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(cdstp);
   1867  1.14.2.4   thorpej 	cpu_cpwait();
   1868  1.14.2.4   thorpej 
   1869  1.14.2.7  jdolecek 	for (i = 0, ptr = (int *)cdstp;
   1870  1.14.2.2   thorpej 			i < (NBPG / sizeof(int)); i++) {
   1871  1.14.2.2   thorpej 		if (sched_whichqs != 0) {
   1872  1.14.2.2   thorpej 			/*
   1873  1.14.2.2   thorpej 			 * A process has become ready.  Abort now,
   1874  1.14.2.2   thorpej 			 * so we don't keep it waiting while we
   1875  1.14.2.2   thorpej 			 * do slow memory access to finish this
   1876  1.14.2.2   thorpej 			 * page.
   1877  1.14.2.2   thorpej 			 */
   1878  1.14.2.2   thorpej 			rv = FALSE;
   1879  1.14.2.2   thorpej 			break;
   1880  1.14.2.2   thorpej 		}
   1881  1.14.2.2   thorpej 		*ptr++ = 0;
   1882  1.14.2.2   thorpej 	}
   1883  1.14.2.2   thorpej 
   1884  1.14.2.2   thorpej 	if (rv)
   1885  1.14.2.2   thorpej 		/*
   1886  1.14.2.2   thorpej 		 * if we aborted we'll rezero this page again later so don't
   1887  1.14.2.2   thorpej 		 * purge it unless we finished it
   1888  1.14.2.2   thorpej 		 */
   1889  1.14.2.7  jdolecek 		cpu_dcache_wbinv_range(cdstp, NBPG);
   1890  1.14.2.2   thorpej 	return (rv);
   1891  1.14.2.2   thorpej }
   1892  1.14.2.2   thorpej 
   1893       1.1      matt /*
   1894       1.1      matt  * pmap_copy_page()
   1895       1.1      matt  *
   1896       1.1      matt  * Copy one physical page into another, by mapping the pages into
   1897       1.1      matt  * hook points. The same comment regarding cachability as in
   1898       1.1      matt  * pmap_zero_page also applies here.
   1899       1.1      matt  */
   1900  1.14.2.7  jdolecek #if ARM_MMU_GENERIC == 1
   1901       1.1      matt void
   1902  1.14.2.7  jdolecek pmap_copy_page_generic(paddr_t src, paddr_t dst)
   1903       1.1      matt {
   1904  1.14.2.7  jdolecek 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   1905  1.14.2.7  jdolecek #ifdef DEBUG
   1906  1.14.2.7  jdolecek 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   1907  1.14.2.7  jdolecek 
   1908  1.14.2.7  jdolecek 	if (dst_pg->mdpage.pvh_list != NULL)
   1909  1.14.2.7  jdolecek 		panic("pmap_copy_page: dst page has mappings");
   1910  1.14.2.7  jdolecek #endif
   1911  1.14.2.7  jdolecek 
   1912  1.14.2.7  jdolecek 	KDASSERT((src & PGOFSET) == 0);
   1913  1.14.2.7  jdolecek 	KDASSERT((dst & PGOFSET) == 0);
   1914  1.14.2.7  jdolecek 
   1915  1.14.2.7  jdolecek 	/*
   1916  1.14.2.7  jdolecek 	 * Clean the source page.  Hold the source page's lock for
   1917  1.14.2.7  jdolecek 	 * the duration of the copy so that no other mappings can
   1918  1.14.2.7  jdolecek 	 * be created while we have a potentially aliased mapping.
   1919  1.14.2.7  jdolecek 	 */
   1920  1.14.2.6  jdolecek 	simple_lock(&src_pg->mdpage.pvh_slock);
   1921  1.14.2.7  jdolecek 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   1922       1.1      matt 
   1923       1.1      matt 	/*
   1924       1.1      matt 	 * Map the pages into the page hook points, copy them, and purge
   1925       1.1      matt 	 * the cache for the appropriate page. Invalidate the TLB
   1926       1.1      matt 	 * as required.
   1927       1.1      matt 	 */
   1928  1.14.2.7  jdolecek 	*csrc_pte = L2_S_PROTO | src |
   1929  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
   1930  1.14.2.7  jdolecek 	*cdst_pte = L2_S_PROTO | dst |
   1931  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1932  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(csrcp);
   1933  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(cdstp);
   1934  1.14.2.4   thorpej 	cpu_cpwait();
   1935  1.14.2.7  jdolecek 	bcopy_page(csrcp, cdstp);
   1936  1.14.2.7  jdolecek 	cpu_dcache_inv_range(csrcp, NBPG);
   1937  1.14.2.7  jdolecek 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   1938  1.14.2.7  jdolecek 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1939       1.1      matt }
   1940  1.14.2.7  jdolecek #endif /* ARM_MMU_GENERIC == 1 */
   1941  1.14.2.7  jdolecek 
   1942  1.14.2.7  jdolecek #if ARM_MMU_XSCALE == 1
   1943  1.14.2.7  jdolecek void
   1944  1.14.2.7  jdolecek pmap_copy_page_xscale(paddr_t src, paddr_t dst)
   1945  1.14.2.7  jdolecek {
   1946  1.14.2.7  jdolecek 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   1947  1.14.2.7  jdolecek #ifdef DEBUG
   1948  1.14.2.7  jdolecek 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   1949  1.14.2.7  jdolecek 
   1950  1.14.2.7  jdolecek 	if (dst_pg->mdpage.pvh_list != NULL)
   1951  1.14.2.7  jdolecek 		panic("pmap_copy_page: dst page has mappings");
   1952  1.14.2.7  jdolecek #endif
   1953  1.14.2.7  jdolecek 
   1954  1.14.2.7  jdolecek 	KDASSERT((src & PGOFSET) == 0);
   1955  1.14.2.7  jdolecek 	KDASSERT((dst & PGOFSET) == 0);
   1956  1.14.2.7  jdolecek 
   1957  1.14.2.7  jdolecek 	/*
   1958  1.14.2.7  jdolecek 	 * Clean the source page.  Hold the source page's lock for
   1959  1.14.2.7  jdolecek 	 * the duration of the copy so that no other mappings can
   1960  1.14.2.7  jdolecek 	 * be created while we have a potentially aliased mapping.
   1961  1.14.2.7  jdolecek 	 */
   1962  1.14.2.7  jdolecek 	simple_lock(&src_pg->mdpage.pvh_slock);
   1963  1.14.2.7  jdolecek 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   1964  1.14.2.7  jdolecek 
   1965  1.14.2.7  jdolecek 	/*
   1966  1.14.2.7  jdolecek 	 * Map the pages into the page hook points, copy them, and purge
   1967  1.14.2.7  jdolecek 	 * the cache for the appropriate page. Invalidate the TLB
   1968  1.14.2.7  jdolecek 	 * as required.
   1969  1.14.2.7  jdolecek 	 */
   1970  1.14.2.7  jdolecek 	*csrc_pte = L2_S_PROTO | src |
   1971  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   1972  1.14.2.7  jdolecek 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1973  1.14.2.7  jdolecek 	*cdst_pte = L2_S_PROTO | dst |
   1974  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1975  1.14.2.7  jdolecek 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1976  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(csrcp);
   1977  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(cdstp);
   1978  1.14.2.7  jdolecek 	cpu_cpwait();
   1979  1.14.2.7  jdolecek 	bcopy_page(csrcp, cdstp);
   1980  1.14.2.7  jdolecek 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   1981  1.14.2.7  jdolecek 	xscale_cache_clean_minidata();
   1982  1.14.2.7  jdolecek }
   1983  1.14.2.7  jdolecek #endif /* ARM_MMU_XSCALE == 1 */
   1984       1.1      matt 
   1985       1.1      matt #if 0
   1986       1.1      matt void
   1987  1.14.2.7  jdolecek pmap_pte_addref(struct pmap *pmap, vaddr_t va)
   1988       1.1      matt {
   1989       1.1      matt 	pd_entry_t *pde;
   1990       1.2      matt 	paddr_t pa;
   1991       1.1      matt 	struct vm_page *m;
   1992       1.1      matt 
   1993       1.1      matt 	if (pmap == pmap_kernel())
   1994       1.1      matt 		return;
   1995       1.1      matt 
   1996  1.14.2.7  jdolecek 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   1997       1.1      matt 	pa = pmap_pte_pa(pde);
   1998       1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   1999       1.1      matt 	++m->wire_count;
   2000       1.1      matt #ifdef MYCROFT_HACK
   2001       1.1      matt 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2002       1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   2003       1.1      matt #endif
   2004       1.1      matt }
   2005       1.1      matt 
   2006       1.1      matt void
   2007  1.14.2.7  jdolecek pmap_pte_delref(struct pmap *pmap, vaddr_t va)
   2008       1.1      matt {
   2009       1.1      matt 	pd_entry_t *pde;
   2010       1.2      matt 	paddr_t pa;
   2011       1.1      matt 	struct vm_page *m;
   2012       1.1      matt 
   2013       1.1      matt 	if (pmap == pmap_kernel())
   2014       1.1      matt 		return;
   2015       1.1      matt 
   2016  1.14.2.7  jdolecek 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2017       1.1      matt 	pa = pmap_pte_pa(pde);
   2018       1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   2019       1.1      matt 	--m->wire_count;
   2020       1.1      matt #ifdef MYCROFT_HACK
   2021       1.1      matt 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2022       1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   2023       1.1      matt #endif
   2024       1.1      matt 	if (m->wire_count == 0) {
   2025       1.1      matt #ifdef MYCROFT_HACK
   2026       1.1      matt 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   2027       1.1      matt 		    pmap, va, pde, pa, m);
   2028       1.1      matt #endif
   2029       1.1      matt 		pmap_unmap_in_l1(pmap, va);
   2030       1.1      matt 		uvm_pagefree(m);
   2031       1.1      matt 		--pmap->pm_stats.resident_count;
   2032       1.1      matt 	}
   2033       1.1      matt }
   2034       1.1      matt #else
   2035       1.1      matt #define	pmap_pte_addref(pmap, va)
   2036       1.1      matt #define	pmap_pte_delref(pmap, va)
   2037       1.1      matt #endif
   2038       1.1      matt 
   2039       1.1      matt /*
   2040       1.1      matt  * Since we have a virtually indexed cache, we may need to inhibit caching if
   2041       1.1      matt  * there is more than one mapping and at least one of them is writable.
   2042       1.1      matt  * Since we purge the cache on every context switch, we only need to check for
   2043       1.1      matt  * other mappings within the same pmap, or kernel_pmap.
   2044       1.1      matt  * This function is also called when a page is unmapped, to possibly reenable
   2045       1.1      matt  * caching on any remaining mappings.
   2046      1.11     chris  *
   2047  1.14.2.4   thorpej  * The code implements the following logic, where:
   2048  1.14.2.4   thorpej  *
   2049  1.14.2.4   thorpej  * KW = # of kernel read/write pages
   2050  1.14.2.4   thorpej  * KR = # of kernel read only pages
   2051  1.14.2.4   thorpej  * UW = # of user read/write pages
   2052  1.14.2.4   thorpej  * UR = # of user read only pages
   2053  1.14.2.4   thorpej  * OW = # of user read/write pages in another pmap, then
   2054  1.14.2.4   thorpej  *
   2055  1.14.2.4   thorpej  * KC = kernel mapping is cacheable
   2056  1.14.2.4   thorpej  * UC = user mapping is cacheable
   2057  1.14.2.4   thorpej  *
   2058  1.14.2.4   thorpej  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
   2059  1.14.2.4   thorpej  *                   +---------------------------------------------
   2060  1.14.2.4   thorpej  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
   2061  1.14.2.4   thorpej  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
   2062  1.14.2.4   thorpej  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
   2063  1.14.2.4   thorpej  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2064  1.14.2.4   thorpej  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2065  1.14.2.4   thorpej  *
   2066      1.11     chris  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   2067       1.1      matt  */
   2068  1.14.2.4   thorpej __inline static void
   2069  1.14.2.6  jdolecek pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2070      1.12     chris 	boolean_t clear_cache)
   2071       1.1      matt {
   2072  1.14.2.4   thorpej 	if (pmap == pmap_kernel())
   2073  1.14.2.6  jdolecek 		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
   2074  1.14.2.4   thorpej 	else
   2075  1.14.2.6  jdolecek 		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2076  1.14.2.4   thorpej }
   2077  1.14.2.4   thorpej 
   2078  1.14.2.4   thorpej static void
   2079  1.14.2.6  jdolecek pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2080  1.14.2.4   thorpej 	boolean_t clear_cache)
   2081  1.14.2.4   thorpej {
   2082  1.14.2.4   thorpej 	int user_entries = 0;
   2083  1.14.2.4   thorpej 	int user_writable = 0;
   2084  1.14.2.4   thorpej 	int user_cacheable = 0;
   2085  1.14.2.4   thorpej 	int kernel_entries = 0;
   2086  1.14.2.4   thorpej 	int kernel_writable = 0;
   2087  1.14.2.4   thorpej 	int kernel_cacheable = 0;
   2088  1.14.2.4   thorpej 	struct pv_entry *pv;
   2089  1.14.2.4   thorpej 	struct pmap *last_pmap = pmap;
   2090  1.14.2.4   thorpej 
   2091  1.14.2.4   thorpej #ifdef DIAGNOSTIC
   2092  1.14.2.4   thorpej 	if (pmap != pmap_kernel())
   2093  1.14.2.4   thorpej 		panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
   2094  1.14.2.4   thorpej #endif
   2095  1.14.2.4   thorpej 
   2096  1.14.2.4   thorpej 	/*
   2097  1.14.2.4   thorpej 	 * Pass one, see if there are both kernel and user pmaps for
   2098  1.14.2.4   thorpej 	 * this page.  Calculate whether there are user-writable or
   2099  1.14.2.4   thorpej 	 * kernel-writable pages.
   2100  1.14.2.4   thorpej 	 */
   2101  1.14.2.6  jdolecek 	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
   2102  1.14.2.4   thorpej 		if (pv->pv_pmap != pmap) {
   2103  1.14.2.4   thorpej 			user_entries++;
   2104  1.14.2.7  jdolecek 			if (pv->pv_flags & PVF_WRITE)
   2105  1.14.2.4   thorpej 				user_writable++;
   2106  1.14.2.7  jdolecek 			if ((pv->pv_flags & PVF_NC) == 0)
   2107  1.14.2.4   thorpej 				user_cacheable++;
   2108  1.14.2.4   thorpej 		} else {
   2109  1.14.2.4   thorpej 			kernel_entries++;
   2110  1.14.2.7  jdolecek 			if (pv->pv_flags & PVF_WRITE)
   2111  1.14.2.4   thorpej 				kernel_writable++;
   2112  1.14.2.7  jdolecek 			if ((pv->pv_flags & PVF_NC) == 0)
   2113  1.14.2.4   thorpej 				kernel_cacheable++;
   2114  1.14.2.4   thorpej 		}
   2115  1.14.2.4   thorpej 	}
   2116  1.14.2.4   thorpej 
   2117  1.14.2.4   thorpej 	/*
   2118  1.14.2.4   thorpej 	 * We know we have just been updating a kernel entry, so if
   2119  1.14.2.4   thorpej 	 * all user pages are already cacheable, then there is nothing
   2120  1.14.2.4   thorpej 	 * further to do.
   2121  1.14.2.4   thorpej 	 */
   2122  1.14.2.4   thorpej 	if (kernel_entries == 0 &&
   2123  1.14.2.4   thorpej 	    user_cacheable == user_entries)
   2124  1.14.2.4   thorpej 		return;
   2125  1.14.2.4   thorpej 
   2126  1.14.2.4   thorpej 	if (user_entries) {
   2127  1.14.2.4   thorpej 		/*
   2128  1.14.2.4   thorpej 		 * Scan over the list again, for each entry, if it
   2129  1.14.2.4   thorpej 		 * might not be set correctly, call pmap_vac_me_user
   2130  1.14.2.4   thorpej 		 * to recalculate the settings.
   2131  1.14.2.4   thorpej 		 */
   2132  1.14.2.6  jdolecek 		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   2133  1.14.2.4   thorpej 			/*
   2134  1.14.2.4   thorpej 			 * We know kernel mappings will get set
   2135  1.14.2.4   thorpej 			 * correctly in other calls.  We also know
   2136  1.14.2.4   thorpej 			 * that if the pmap is the same as last_pmap
   2137  1.14.2.4   thorpej 			 * then we've just handled this entry.
   2138  1.14.2.4   thorpej 			 */
   2139  1.14.2.4   thorpej 			if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
   2140  1.14.2.4   thorpej 				continue;
   2141  1.14.2.4   thorpej 			/*
   2142  1.14.2.4   thorpej 			 * If there are kernel entries and this page
   2143  1.14.2.4   thorpej 			 * is writable but non-cacheable, then we can
   2144  1.14.2.4   thorpej 			 * skip this entry also.
   2145  1.14.2.4   thorpej 			 */
   2146  1.14.2.4   thorpej 			if (kernel_entries > 0 &&
   2147  1.14.2.7  jdolecek 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
   2148  1.14.2.7  jdolecek 			    (PVF_NC | PVF_WRITE))
   2149  1.14.2.4   thorpej 				continue;
   2150  1.14.2.4   thorpej 			/*
   2151  1.14.2.4   thorpej 			 * Similarly if there are no kernel-writable
   2152  1.14.2.4   thorpej 			 * entries and the page is already
   2153  1.14.2.4   thorpej 			 * read-only/cacheable.
   2154  1.14.2.4   thorpej 			 */
   2155  1.14.2.4   thorpej 			if (kernel_writable == 0 &&
   2156  1.14.2.7  jdolecek 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
   2157  1.14.2.4   thorpej 				continue;
   2158  1.14.2.4   thorpej 			/*
   2159  1.14.2.4   thorpej 			 * For some of the remaining cases, we know
   2160  1.14.2.4   thorpej 			 * that we must recalculate, but for others we
   2161  1.14.2.4   thorpej 			 * can't tell if they are correct or not, so
   2162  1.14.2.4   thorpej 			 * we recalculate anyway.
   2163  1.14.2.4   thorpej 			 */
   2164  1.14.2.4   thorpej 			pmap_unmap_ptes(last_pmap);
   2165  1.14.2.4   thorpej 			last_pmap = pv->pv_pmap;
   2166  1.14.2.4   thorpej 			ptes = pmap_map_ptes(last_pmap);
   2167  1.14.2.6  jdolecek 			pmap_vac_me_user(last_pmap, pg, ptes,
   2168  1.14.2.4   thorpej 			    pmap_is_curpmap(last_pmap));
   2169  1.14.2.4   thorpej 		}
   2170  1.14.2.4   thorpej 		/* Restore the pte mapping that was passed to us.  */
   2171  1.14.2.4   thorpej 		if (last_pmap != pmap) {
   2172  1.14.2.4   thorpej 			pmap_unmap_ptes(last_pmap);
   2173  1.14.2.4   thorpej 			ptes = pmap_map_ptes(pmap);
   2174  1.14.2.4   thorpej 		}
   2175  1.14.2.4   thorpej 		if (kernel_entries == 0)
   2176  1.14.2.4   thorpej 			return;
   2177  1.14.2.4   thorpej 	}
   2178  1.14.2.4   thorpej 
   2179  1.14.2.6  jdolecek 	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2180  1.14.2.4   thorpej 	return;
   2181  1.14.2.4   thorpej }
   2182  1.14.2.4   thorpej 
   2183  1.14.2.4   thorpej static void
   2184  1.14.2.6  jdolecek pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2185  1.14.2.4   thorpej 	boolean_t clear_cache)
   2186  1.14.2.4   thorpej {
   2187  1.14.2.4   thorpej 	struct pmap *kpmap = pmap_kernel();
   2188  1.14.2.2   thorpej 	struct pv_entry *pv, *npv;
   2189       1.1      matt 	int entries = 0;
   2190  1.14.2.4   thorpej 	int writable = 0;
   2191      1.12     chris 	int cacheable_entries = 0;
   2192  1.14.2.4   thorpej 	int kern_cacheable = 0;
   2193  1.14.2.4   thorpej 	int other_writable = 0;
   2194       1.1      matt 
   2195  1.14.2.6  jdolecek 	pv = pg->mdpage.pvh_list;
   2196      1.11     chris 	KASSERT(ptes != NULL);
   2197       1.1      matt 
   2198       1.1      matt 	/*
   2199       1.1      matt 	 * Count mappings and writable mappings in this pmap.
   2200  1.14.2.4   thorpej 	 * Include kernel mappings as part of our own.
   2201       1.1      matt 	 * Keep a pointer to the first one.
   2202       1.1      matt 	 */
   2203       1.1      matt 	for (npv = pv; npv; npv = npv->pv_next) {
   2204       1.1      matt 		/* Count mappings in the same pmap */
   2205  1.14.2.4   thorpej 		if (pmap == npv->pv_pmap ||
   2206  1.14.2.4   thorpej 		    kpmap == npv->pv_pmap) {
   2207       1.1      matt 			if (entries++ == 0)
   2208       1.1      matt 				pv = npv;
   2209      1.12     chris 			/* Cacheable mappings */
   2210  1.14.2.7  jdolecek 			if ((npv->pv_flags & PVF_NC) == 0) {
   2211      1.12     chris 				cacheable_entries++;
   2212  1.14.2.4   thorpej 				if (kpmap == npv->pv_pmap)
   2213  1.14.2.4   thorpej 					kern_cacheable++;
   2214  1.14.2.4   thorpej 			}
   2215  1.14.2.4   thorpej 			/* Writable mappings */
   2216  1.14.2.7  jdolecek 			if (npv->pv_flags & PVF_WRITE)
   2217  1.14.2.4   thorpej 				++writable;
   2218  1.14.2.7  jdolecek 		} else if (npv->pv_flags & PVF_WRITE)
   2219  1.14.2.4   thorpej 			other_writable = 1;
   2220       1.1      matt 	}
   2221       1.1      matt 
   2222      1.12     chris 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   2223  1.14.2.4   thorpej 		"writable %d cacheable %d %s\n", pmap, entries, writable,
   2224      1.12     chris 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   2225      1.12     chris 
   2226       1.1      matt 	/*
   2227       1.1      matt 	 * Enable or disable caching as necessary.
   2228  1.14.2.4   thorpej 	 * Note: the first entry might be part of the kernel pmap,
   2229  1.14.2.4   thorpej 	 * so we can't assume this is indicative of the state of the
   2230  1.14.2.4   thorpej 	 * other (maybe non-kpmap) entries.
   2231       1.1      matt 	 */
   2232  1.14.2.4   thorpej 	if ((entries > 1 && writable) ||
   2233  1.14.2.4   thorpej 	    (entries > 0 && pmap == kpmap && other_writable)) {
   2234      1.12     chris 		if (cacheable_entries == 0)
   2235      1.12     chris 		    return;
   2236  1.14.2.4   thorpej 		for (npv = pv; npv; npv = npv->pv_next) {
   2237  1.14.2.4   thorpej 			if ((pmap == npv->pv_pmap
   2238  1.14.2.4   thorpej 			    || kpmap == npv->pv_pmap) &&
   2239  1.14.2.7  jdolecek 			    (npv->pv_flags & PVF_NC) == 0) {
   2240  1.14.2.7  jdolecek 				ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
   2241  1.14.2.7  jdolecek  				npv->pv_flags |= PVF_NC;
   2242  1.14.2.4   thorpej 				/*
   2243  1.14.2.4   thorpej 				 * If this page needs flushing from the
   2244  1.14.2.4   thorpej 				 * cache, and we aren't going to do it
   2245  1.14.2.4   thorpej 				 * below, do it now.
   2246  1.14.2.4   thorpej 				 */
   2247  1.14.2.4   thorpej 				if ((cacheable_entries < 4 &&
   2248  1.14.2.4   thorpej 				    (clear_cache || npv->pv_pmap == kpmap)) ||
   2249  1.14.2.4   thorpej 				    (npv->pv_pmap == kpmap &&
   2250  1.14.2.4   thorpej 				    !clear_cache && kern_cacheable < 4)) {
   2251  1.14.2.5  jdolecek 					cpu_idcache_wbinv_range(npv->pv_va,
   2252      1.12     chris 					    NBPG);
   2253      1.12     chris 					cpu_tlb_flushID_SE(npv->pv_va);
   2254      1.12     chris 				}
   2255       1.1      matt 			}
   2256       1.1      matt 		}
   2257  1.14.2.4   thorpej 		if ((clear_cache && cacheable_entries >= 4) ||
   2258  1.14.2.4   thorpej 		    kern_cacheable >= 4) {
   2259  1.14.2.5  jdolecek 			cpu_idcache_wbinv_all();
   2260      1.12     chris 			cpu_tlb_flushID();
   2261      1.12     chris 		}
   2262  1.14.2.4   thorpej 		cpu_cpwait();
   2263       1.1      matt 	} else if (entries > 0) {
   2264  1.14.2.4   thorpej 		/*
   2265  1.14.2.4   thorpej 		 * Turn cacheing back on for some pages.  If it is a kernel
   2266  1.14.2.4   thorpej 		 * page, only do so if there are no other writable pages.
   2267  1.14.2.4   thorpej 		 */
   2268  1.14.2.4   thorpej 		for (npv = pv; npv; npv = npv->pv_next) {
   2269  1.14.2.4   thorpej 			if ((pmap == npv->pv_pmap ||
   2270  1.14.2.4   thorpej 			    (kpmap == npv->pv_pmap && other_writable == 0)) &&
   2271  1.14.2.7  jdolecek 			    (npv->pv_flags & PVF_NC)) {
   2272  1.14.2.7  jdolecek 				ptes[arm_btop(npv->pv_va)] |=
   2273  1.14.2.7  jdolecek 				    pte_l2_s_cache_mode;
   2274  1.14.2.7  jdolecek 				npv->pv_flags &= ~PVF_NC;
   2275       1.1      matt 			}
   2276       1.1      matt 		}
   2277       1.1      matt 	}
   2278       1.1      matt }
   2279       1.1      matt 
   2280       1.1      matt /*
   2281       1.1      matt  * pmap_remove()
   2282       1.1      matt  *
   2283       1.1      matt  * pmap_remove is responsible for nuking a number of mappings for a range
   2284       1.1      matt  * of virtual address space in the current pmap. To do this efficiently
   2285       1.1      matt  * is interesting, because in a number of cases a wide virtual address
   2286       1.1      matt  * range may be supplied that contains few actual mappings. So, the
   2287       1.1      matt  * optimisations are:
   2288       1.1      matt  *  1. Try and skip over hunks of address space for which an L1 entry
   2289       1.1      matt  *     does not exist.
   2290       1.1      matt  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   2291       1.1      matt  *     maybe do just a partial cache clean. This path of execution is
   2292       1.1      matt  *     complicated by the fact that the cache must be flushed _before_
   2293       1.1      matt  *     the PTE is nuked, being a VAC :-)
   2294       1.1      matt  *  3. Maybe later fast-case a single page, but I don't think this is
   2295       1.1      matt  *     going to make _that_ much difference overall.
   2296       1.1      matt  */
   2297       1.1      matt 
   2298       1.1      matt #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   2299       1.1      matt 
   2300       1.1      matt void
   2301  1.14.2.7  jdolecek pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
   2302       1.1      matt {
   2303       1.1      matt 	int cleanlist_idx = 0;
   2304       1.1      matt 	struct pagelist {
   2305       1.1      matt 		vaddr_t va;
   2306       1.1      matt 		pt_entry_t *pte;
   2307       1.1      matt 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   2308      1.11     chris 	pt_entry_t *pte = 0, *ptes;
   2309       1.2      matt 	paddr_t pa;
   2310       1.1      matt 	int pmap_active;
   2311  1.14.2.6  jdolecek 	struct vm_page *pg;
   2312       1.1      matt 
   2313       1.1      matt 	/* Exit quick if there is no pmap */
   2314       1.1      matt 	if (!pmap)
   2315       1.1      matt 		return;
   2316       1.1      matt 
   2317  1.14.2.7  jdolecek 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
   2318  1.14.2.7  jdolecek 	    pmap, sva, eva));
   2319       1.1      matt 
   2320  1.14.2.2   thorpej 	/*
   2321  1.14.2.6  jdolecek 	 * we lock in the pmap => vm_page direction
   2322  1.14.2.2   thorpej 	 */
   2323  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   2324  1.14.2.2   thorpej 
   2325      1.11     chris 	ptes = pmap_map_ptes(pmap);
   2326       1.1      matt 	/* Get a page table pointer */
   2327       1.1      matt 	while (sva < eva) {
   2328  1.14.2.4   thorpej 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2329       1.1      matt 			break;
   2330  1.14.2.7  jdolecek 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2331       1.1      matt 	}
   2332      1.11     chris 
   2333  1.14.2.7  jdolecek 	pte = &ptes[arm_btop(sva)];
   2334       1.1      matt 	/* Note if the pmap is active thus require cache and tlb cleans */
   2335  1.14.2.7  jdolecek 	pmap_active = pmap_is_curpmap(pmap);
   2336       1.1      matt 
   2337       1.1      matt 	/* Now loop along */
   2338       1.1      matt 	while (sva < eva) {
   2339       1.1      matt 		/* Check if we can move to the next PDE (l1 chunk) */
   2340  1.14.2.7  jdolecek 		if (!(sva & L2_ADDR_BITS))
   2341  1.14.2.4   thorpej 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2342  1.14.2.7  jdolecek 				sva += L1_S_SIZE;
   2343  1.14.2.7  jdolecek 				pte += arm_btop(L1_S_SIZE);
   2344       1.1      matt 				continue;
   2345       1.1      matt 			}
   2346       1.1      matt 
   2347       1.1      matt 		/* We've found a valid PTE, so this page of PTEs has to go. */
   2348       1.1      matt 		if (pmap_pte_v(pte)) {
   2349       1.1      matt 			/* Update statistics */
   2350       1.1      matt 			--pmap->pm_stats.resident_count;
   2351       1.1      matt 
   2352       1.1      matt 			/*
   2353       1.1      matt 			 * Add this page to our cache remove list, if we can.
   2354       1.1      matt 			 * If, however the cache remove list is totally full,
   2355       1.1      matt 			 * then do a complete cache invalidation taking note
   2356       1.1      matt 			 * to backtrack the PTE table beforehand, and ignore
   2357       1.1      matt 			 * the lists in future because there's no longer any
   2358       1.1      matt 			 * point in bothering with them (we've paid the
   2359       1.1      matt 			 * penalty, so will carry on unhindered). Otherwise,
   2360       1.1      matt 			 * when we fall out, we just clean the list.
   2361       1.1      matt 			 */
   2362       1.1      matt 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   2363       1.1      matt 			pa = pmap_pte_pa(pte);
   2364       1.1      matt 
   2365       1.1      matt 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2366       1.1      matt 				/* Add to the clean list. */
   2367       1.1      matt 				cleanlist[cleanlist_idx].pte = pte;
   2368       1.1      matt 				cleanlist[cleanlist_idx].va = sva;
   2369       1.1      matt 				cleanlist_idx++;
   2370       1.1      matt 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2371       1.1      matt 				int cnt;
   2372       1.1      matt 
   2373       1.1      matt 				/* Nuke everything if needed. */
   2374       1.1      matt 				if (pmap_active) {
   2375  1.14.2.5  jdolecek 					cpu_idcache_wbinv_all();
   2376       1.1      matt 					cpu_tlb_flushID();
   2377       1.1      matt 				}
   2378       1.1      matt 
   2379       1.1      matt 				/*
   2380       1.1      matt 				 * Roll back the previous PTE list,
   2381       1.1      matt 				 * and zero out the current PTE.
   2382       1.1      matt 				 */
   2383       1.1      matt 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   2384       1.1      matt 					*cleanlist[cnt].pte = 0;
   2385       1.1      matt 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   2386       1.1      matt 				}
   2387       1.1      matt 				*pte = 0;
   2388       1.1      matt 				pmap_pte_delref(pmap, sva);
   2389       1.1      matt 				cleanlist_idx++;
   2390       1.1      matt 			} else {
   2391       1.1      matt 				/*
   2392       1.1      matt 				 * We've already nuked the cache and
   2393       1.1      matt 				 * TLB, so just carry on regardless,
   2394       1.1      matt 				 * and we won't need to do it again
   2395       1.1      matt 				 */
   2396       1.1      matt 				*pte = 0;
   2397       1.1      matt 				pmap_pte_delref(pmap, sva);
   2398       1.1      matt 			}
   2399       1.1      matt 
   2400       1.1      matt 			/*
   2401       1.1      matt 			 * Update flags. In a number of circumstances,
   2402       1.1      matt 			 * we could cluster a lot of these and do a
   2403       1.1      matt 			 * number of sequential pages in one go.
   2404       1.1      matt 			 */
   2405  1.14.2.6  jdolecek 			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
   2406  1.14.2.2   thorpej 				struct pv_entry *pve;
   2407  1.14.2.6  jdolecek 				simple_lock(&pg->mdpage.pvh_slock);
   2408  1.14.2.6  jdolecek 				pve = pmap_remove_pv(pg, pmap, sva);
   2409  1.14.2.2   thorpej 				pmap_free_pv(pmap, pve);
   2410  1.14.2.6  jdolecek 				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2411  1.14.2.6  jdolecek 				simple_unlock(&pg->mdpage.pvh_slock);
   2412       1.1      matt 			}
   2413       1.1      matt 		}
   2414       1.1      matt 		sva += NBPG;
   2415       1.1      matt 		pte++;
   2416       1.1      matt 	}
   2417       1.1      matt 
   2418      1.11     chris 	pmap_unmap_ptes(pmap);
   2419       1.1      matt 	/*
   2420       1.1      matt 	 * Now, if we've fallen through down to here, chances are that there
   2421       1.1      matt 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   2422       1.1      matt 	 */
   2423       1.1      matt 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2424       1.1      matt 		u_int cnt;
   2425       1.1      matt 
   2426       1.1      matt 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   2427       1.1      matt 			if (pmap_active) {
   2428  1.14.2.5  jdolecek 				cpu_idcache_wbinv_range(cleanlist[cnt].va,
   2429  1.14.2.5  jdolecek 				    NBPG);
   2430       1.1      matt 				*cleanlist[cnt].pte = 0;
   2431       1.1      matt 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   2432       1.1      matt 			} else
   2433       1.1      matt 				*cleanlist[cnt].pte = 0;
   2434       1.1      matt 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   2435       1.1      matt 		}
   2436       1.1      matt 	}
   2437  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   2438       1.1      matt }
   2439       1.1      matt 
   2440       1.1      matt /*
   2441       1.1      matt  * Routine:	pmap_remove_all
   2442       1.1      matt  * Function:
   2443       1.1      matt  *		Removes this physical page from
   2444       1.1      matt  *		all physical maps in which it resides.
   2445       1.1      matt  *		Reflects back modify bits to the pager.
   2446       1.1      matt  */
   2447       1.1      matt 
   2448  1.14.2.4   thorpej static void
   2449  1.14.2.7  jdolecek pmap_remove_all(struct vm_page *pg)
   2450       1.1      matt {
   2451  1.14.2.2   thorpej 	struct pv_entry *pv, *npv;
   2452  1.14.2.1     lukem 	struct pmap *pmap;
   2453      1.11     chris 	pt_entry_t *pte, *ptes;
   2454       1.1      matt 
   2455  1.14.2.6  jdolecek 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
   2456       1.1      matt 
   2457  1.14.2.6  jdolecek 	/* set vm_page => pmap locking */
   2458  1.14.2.2   thorpej 	PMAP_HEAD_TO_MAP_LOCK();
   2459       1.1      matt 
   2460  1.14.2.6  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);
   2461  1.14.2.2   thorpej 
   2462  1.14.2.6  jdolecek 	pv = pg->mdpage.pvh_list;
   2463  1.14.2.6  jdolecek 	if (pv == NULL) {
   2464  1.14.2.6  jdolecek 		PDEBUG(0, printf("free page\n"));
   2465  1.14.2.6  jdolecek 		simple_unlock(&pg->mdpage.pvh_slock);
   2466  1.14.2.6  jdolecek 		PMAP_HEAD_TO_MAP_UNLOCK();
   2467  1.14.2.6  jdolecek 		return;
   2468       1.1      matt 	}
   2469  1.14.2.2   thorpej 	pmap_clean_page(pv, FALSE);
   2470       1.1      matt 
   2471       1.1      matt 	while (pv) {
   2472       1.1      matt 		pmap = pv->pv_pmap;
   2473      1.11     chris 		ptes = pmap_map_ptes(pmap);
   2474  1.14.2.7  jdolecek 		pte = &ptes[arm_btop(pv->pv_va)];
   2475       1.1      matt 
   2476       1.1      matt 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   2477       1.1      matt 		    pv->pv_va, pv->pv_flags));
   2478       1.1      matt #ifdef DEBUG
   2479  1.14.2.7  jdolecek 		if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
   2480  1.14.2.7  jdolecek 		    pmap_pte_v(pte) == 0 ||
   2481  1.14.2.7  jdolecek 		    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
   2482       1.1      matt 			panic("pmap_remove_all: bad mapping");
   2483       1.1      matt #endif	/* DEBUG */
   2484       1.1      matt 
   2485       1.1      matt 		/*
   2486       1.1      matt 		 * Update statistics
   2487       1.1      matt 		 */
   2488       1.1      matt 		--pmap->pm_stats.resident_count;
   2489       1.1      matt 
   2490       1.1      matt 		/* Wired bit */
   2491  1.14.2.7  jdolecek 		if (pv->pv_flags & PVF_WIRED)
   2492       1.1      matt 			--pmap->pm_stats.wired_count;
   2493       1.1      matt 
   2494       1.1      matt 		/*
   2495       1.1      matt 		 * Invalidate the PTEs.
   2496       1.1      matt 		 * XXX: should cluster them up and invalidate as many
   2497       1.1      matt 		 * as possible at once.
   2498       1.1      matt 		 */
   2499       1.1      matt 
   2500       1.1      matt #ifdef needednotdone
   2501       1.1      matt reduce wiring count on page table pages as references drop
   2502       1.1      matt #endif
   2503       1.1      matt 
   2504       1.1      matt 		*pte = 0;
   2505       1.1      matt 		pmap_pte_delref(pmap, pv->pv_va);
   2506       1.1      matt 
   2507       1.1      matt 		npv = pv->pv_next;
   2508  1.14.2.2   thorpej 		pmap_free_pv(pmap, pv);
   2509       1.1      matt 		pv = npv;
   2510      1.11     chris 		pmap_unmap_ptes(pmap);
   2511       1.1      matt 	}
   2512  1.14.2.6  jdolecek 	pg->mdpage.pvh_list = NULL;
   2513  1.14.2.6  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);
   2514  1.14.2.2   thorpej 	PMAP_HEAD_TO_MAP_UNLOCK();
   2515       1.1      matt 
   2516       1.1      matt 	PDEBUG(0, printf("done\n"));
   2517       1.1      matt 	cpu_tlb_flushID();
   2518  1.14.2.4   thorpej 	cpu_cpwait();
   2519       1.1      matt }
   2520       1.1      matt 
   2521       1.1      matt 
   2522       1.1      matt /*
   2523       1.1      matt  * Set the physical protection on the specified range of this map as requested.
   2524       1.1      matt  */
   2525       1.1      matt 
   2526       1.1      matt void
   2527  1.14.2.7  jdolecek pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   2528       1.1      matt {
   2529      1.11     chris 	pt_entry_t *pte = NULL, *ptes;
   2530  1.14.2.6  jdolecek 	struct vm_page *pg;
   2531       1.1      matt 	int armprot;
   2532       1.1      matt 	int flush = 0;
   2533       1.2      matt 	paddr_t pa;
   2534       1.1      matt 
   2535       1.1      matt 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2536       1.1      matt 	    pmap, sva, eva, prot));
   2537       1.1      matt 
   2538       1.1      matt 	if (~prot & VM_PROT_READ) {
   2539       1.1      matt 		/* Just remove the mappings. */
   2540       1.1      matt 		pmap_remove(pmap, sva, eva);
   2541  1.14.2.4   thorpej 		/* pmap_update not needed as it should be called by the caller
   2542  1.14.2.4   thorpej 		 * of pmap_protect */
   2543       1.1      matt 		return;
   2544       1.1      matt 	}
   2545       1.1      matt 	if (prot & VM_PROT_WRITE) {
   2546       1.1      matt 		/*
   2547       1.1      matt 		 * If this is a read->write transition, just ignore it and let
   2548       1.1      matt 		 * uvm_fault() take care of it later.
   2549       1.1      matt 		 */
   2550       1.1      matt 		return;
   2551       1.1      matt 	}
   2552       1.1      matt 
   2553  1.14.2.2   thorpej 	/* Need to lock map->head */
   2554  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   2555  1.14.2.2   thorpej 
   2556      1.11     chris 	ptes = pmap_map_ptes(pmap);
   2557  1.14.2.7  jdolecek 
   2558  1.14.2.7  jdolecek 	/*
   2559  1.14.2.7  jdolecek 	 * OK, at this point, we know we're doing write-protect operation.
   2560  1.14.2.7  jdolecek 	 * If the pmap is active, write-back the range.
   2561  1.14.2.7  jdolecek 	 */
   2562  1.14.2.7  jdolecek 	if (pmap_is_curpmap(pmap))
   2563  1.14.2.7  jdolecek 		cpu_dcache_wb_range(sva, eva - sva);
   2564  1.14.2.7  jdolecek 
   2565       1.1      matt 	/*
   2566       1.1      matt 	 * We need to acquire a pointer to a page table page before entering
   2567       1.1      matt 	 * the following loop.
   2568       1.1      matt 	 */
   2569       1.1      matt 	while (sva < eva) {
   2570  1.14.2.4   thorpej 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2571       1.1      matt 			break;
   2572  1.14.2.7  jdolecek 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2573       1.1      matt 	}
   2574      1.11     chris 
   2575  1.14.2.7  jdolecek 	pte = &ptes[arm_btop(sva)];
   2576  1.14.2.2   thorpej 
   2577       1.1      matt 	while (sva < eva) {
   2578       1.1      matt 		/* only check once in a while */
   2579  1.14.2.7  jdolecek 		if ((sva & L2_ADDR_BITS) == 0) {
   2580  1.14.2.4   thorpej 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2581       1.1      matt 				/* We can race ahead here, to the next pde. */
   2582  1.14.2.7  jdolecek 				sva += L1_S_SIZE;
   2583  1.14.2.7  jdolecek 				pte += arm_btop(L1_S_SIZE);
   2584       1.1      matt 				continue;
   2585       1.1      matt 			}
   2586       1.1      matt 		}
   2587       1.1      matt 
   2588       1.1      matt 		if (!pmap_pte_v(pte))
   2589       1.1      matt 			goto next;
   2590       1.1      matt 
   2591       1.1      matt 		flush = 1;
   2592       1.1      matt 
   2593       1.1      matt 		armprot = 0;
   2594       1.1      matt 		if (sva < VM_MAXUSER_ADDRESS)
   2595  1.14.2.7  jdolecek 			armprot |= L2_S_PROT_U;
   2596       1.1      matt 		else if (sva < VM_MAX_ADDRESS)
   2597  1.14.2.7  jdolecek 			armprot |= L2_S_PROT_W;  /* XXX Ekk what is this ? */
   2598       1.1      matt 		*pte = (*pte & 0xfffff00f) | armprot;
   2599       1.1      matt 
   2600       1.1      matt 		pa = pmap_pte_pa(pte);
   2601       1.1      matt 
   2602       1.1      matt 		/* Get the physical page index */
   2603       1.1      matt 
   2604       1.1      matt 		/* Clear write flag */
   2605  1.14.2.6  jdolecek 		if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
   2606  1.14.2.6  jdolecek 			simple_lock(&pg->mdpage.pvh_slock);
   2607  1.14.2.7  jdolecek 			(void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
   2608  1.14.2.6  jdolecek 			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2609  1.14.2.6  jdolecek 			simple_unlock(&pg->mdpage.pvh_slock);
   2610       1.1      matt 		}
   2611       1.1      matt 
   2612       1.1      matt next:
   2613       1.1      matt 		sva += NBPG;
   2614       1.1      matt 		pte++;
   2615       1.1      matt 	}
   2616      1.11     chris 	pmap_unmap_ptes(pmap);
   2617  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   2618       1.1      matt 	if (flush)
   2619       1.1      matt 		cpu_tlb_flushID();
   2620       1.1      matt }
   2621       1.1      matt 
   2622       1.1      matt /*
   2623  1.14.2.1     lukem  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2624       1.1      matt  * int flags)
   2625       1.1      matt  *
   2626       1.1      matt  *      Insert the given physical page (p) at
   2627       1.1      matt  *      the specified virtual address (v) in the
   2628       1.1      matt  *      target physical map with the protection requested.
   2629       1.1      matt  *
   2630       1.1      matt  *      If specified, the page will be wired down, meaning
   2631       1.1      matt  *      that the related pte can not be reclaimed.
   2632       1.1      matt  *
   2633       1.1      matt  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2634       1.1      matt  *      or lose information.  That is, this routine must actually
   2635       1.1      matt  *      insert this page into the given map NOW.
   2636       1.1      matt  */
   2637       1.1      matt 
   2638       1.1      matt int
   2639  1.14.2.7  jdolecek pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2640  1.14.2.7  jdolecek     int flags)
   2641       1.1      matt {
   2642  1.14.2.7  jdolecek 	pt_entry_t *ptes, opte, npte;
   2643       1.2      matt 	paddr_t opa;
   2644       1.1      matt 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2645  1.14.2.6  jdolecek 	struct vm_page *pg;
   2646  1.14.2.2   thorpej 	struct pv_entry *pve;
   2647  1.14.2.7  jdolecek 	int error, nflags;
   2648       1.1      matt 
   2649       1.1      matt 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2650       1.1      matt 	    va, pa, pmap, prot, wired));
   2651       1.1      matt 
   2652       1.1      matt #ifdef DIAGNOSTIC
   2653       1.1      matt 	/* Valid address ? */
   2654  1.14.2.6  jdolecek 	if (va >= (pmap_curmaxkvaddr))
   2655       1.1      matt 		panic("pmap_enter: too big");
   2656       1.1      matt 	if (pmap != pmap_kernel() && va != 0) {
   2657       1.1      matt 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2658       1.1      matt 			panic("pmap_enter: kernel page in user map");
   2659       1.1      matt 	} else {
   2660       1.1      matt 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2661       1.1      matt 			panic("pmap_enter: user page in kernel map");
   2662       1.1      matt 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2663       1.1      matt 			panic("pmap_enter: entering PT page");
   2664       1.1      matt 	}
   2665       1.1      matt #endif
   2666  1.14.2.7  jdolecek 
   2667  1.14.2.7  jdolecek 	KDASSERT(((va | pa) & PGOFSET) == 0);
   2668  1.14.2.7  jdolecek 
   2669  1.14.2.6  jdolecek 	/*
   2670  1.14.2.6  jdolecek 	 * Get a pointer to the page.  Later on in this function, we
   2671  1.14.2.6  jdolecek 	 * test for a managed page by checking pg != NULL.
   2672  1.14.2.6  jdolecek 	 */
   2673  1.14.2.7  jdolecek 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
   2674  1.14.2.6  jdolecek 
   2675  1.14.2.2   thorpej 	/* get lock */
   2676  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   2677  1.14.2.7  jdolecek 
   2678       1.1      matt 	/*
   2679  1.14.2.7  jdolecek 	 * map the ptes.  If there's not already an L2 table for this
   2680  1.14.2.7  jdolecek 	 * address, allocate one.
   2681       1.1      matt 	 */
   2682  1.14.2.7  jdolecek 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2683  1.14.2.7  jdolecek 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   2684  1.14.2.2   thorpej 		struct vm_page *ptp;
   2685  1.14.2.7  jdolecek 
   2686  1.14.2.7  jdolecek 		/* kernel should be pre-grown */
   2687  1.14.2.7  jdolecek 		KASSERT(pmap != pmap_kernel());
   2688       1.1      matt 
   2689  1.14.2.2   thorpej 		/* if failure is allowed then don't try too hard */
   2690  1.14.2.7  jdolecek 		ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
   2691  1.14.2.2   thorpej 		if (ptp == NULL) {
   2692  1.14.2.2   thorpej 			if (flags & PMAP_CANFAIL) {
   2693  1.14.2.2   thorpej 				error = ENOMEM;
   2694  1.14.2.2   thorpej 				goto out;
   2695  1.14.2.2   thorpej 			}
   2696  1.14.2.2   thorpej 			panic("pmap_enter: get ptp failed");
   2697  1.14.2.2   thorpej 		}
   2698       1.1      matt 	}
   2699  1.14.2.7  jdolecek 	opte = ptes[arm_btop(va)];
   2700       1.1      matt 
   2701       1.1      matt 	nflags = 0;
   2702       1.1      matt 	if (prot & VM_PROT_WRITE)
   2703  1.14.2.7  jdolecek 		nflags |= PVF_WRITE;
   2704       1.1      matt 	if (wired)
   2705  1.14.2.7  jdolecek 		nflags |= PVF_WIRED;
   2706       1.1      matt 
   2707       1.1      matt 	/* Is the pte valid ? If so then this page is already mapped */
   2708  1.14.2.7  jdolecek 	if (l2pte_valid(opte)) {
   2709       1.1      matt 		/* Get the physical address of the current page mapped */
   2710  1.14.2.7  jdolecek 		opa = l2pte_pa(opte);
   2711       1.1      matt 
   2712       1.1      matt 		/* Are we mapping the same page ? */
   2713       1.1      matt 		if (opa == pa) {
   2714       1.1      matt 			/* Has the wiring changed ? */
   2715  1.14.2.6  jdolecek 			if (pg != NULL) {
   2716  1.14.2.6  jdolecek 				simple_lock(&pg->mdpage.pvh_slock);
   2717  1.14.2.6  jdolecek 				(void) pmap_modify_pv(pmap, va, pg,
   2718  1.14.2.7  jdolecek 				    PVF_WRITE | PVF_WIRED, nflags);
   2719  1.14.2.6  jdolecek 				simple_unlock(&pg->mdpage.pvh_slock);
   2720  1.14.2.6  jdolecek  			}
   2721       1.1      matt 		} else {
   2722  1.14.2.6  jdolecek 			struct vm_page *opg;
   2723  1.14.2.6  jdolecek 
   2724       1.1      matt 			/* We are replacing the page with a new one. */
   2725  1.14.2.5  jdolecek 			cpu_idcache_wbinv_range(va, NBPG);
   2726       1.1      matt 
   2727       1.1      matt 			/*
   2728       1.1      matt 			 * If it is part of our managed memory then we
   2729       1.1      matt 			 * must remove it from the PV list
   2730       1.1      matt 			 */
   2731  1.14.2.6  jdolecek 			if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
   2732  1.14.2.6  jdolecek 				simple_lock(&opg->mdpage.pvh_slock);
   2733  1.14.2.6  jdolecek 				pve = pmap_remove_pv(opg, pmap, va);
   2734  1.14.2.6  jdolecek 				simple_unlock(&opg->mdpage.pvh_slock);
   2735  1.14.2.2   thorpej 			} else {
   2736  1.14.2.2   thorpej 				pve = NULL;
   2737       1.1      matt 			}
   2738       1.1      matt 
   2739       1.1      matt 			goto enter;
   2740       1.1      matt 		}
   2741       1.1      matt 	} else {
   2742       1.1      matt 		opa = 0;
   2743  1.14.2.2   thorpej 		pve = NULL;
   2744       1.1      matt 		pmap_pte_addref(pmap, va);
   2745       1.1      matt 
   2746       1.1      matt 		/* pte is not valid so we must be hooking in a new page */
   2747       1.1      matt 		++pmap->pm_stats.resident_count;
   2748       1.1      matt 
   2749       1.1      matt 	enter:
   2750       1.1      matt 		/*
   2751       1.1      matt 		 * Enter on the PV list if part of our managed memory
   2752       1.1      matt 		 */
   2753  1.14.2.7  jdolecek 		if (pg != NULL) {
   2754  1.14.2.2   thorpej 			if (pve == NULL) {
   2755  1.14.2.2   thorpej 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
   2756  1.14.2.2   thorpej 				if (pve == NULL) {
   2757  1.14.2.2   thorpej 					if (flags & PMAP_CANFAIL) {
   2758  1.14.2.2   thorpej 						error = ENOMEM;
   2759  1.14.2.2   thorpej 						goto out;
   2760  1.14.2.2   thorpej 					}
   2761  1.14.2.7  jdolecek 					panic("pmap_enter: no pv entries "
   2762  1.14.2.7  jdolecek 					    "available");
   2763  1.14.2.2   thorpej 				}
   2764  1.14.2.2   thorpej 			}
   2765  1.14.2.2   thorpej 			/* enter_pv locks pvh when adding */
   2766  1.14.2.6  jdolecek 			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
   2767  1.14.2.2   thorpej 		} else {
   2768  1.14.2.2   thorpej 			if (pve != NULL)
   2769  1.14.2.2   thorpej 				pmap_free_pv(pmap, pve);
   2770       1.1      matt 		}
   2771       1.1      matt 	}
   2772       1.1      matt 
   2773       1.1      matt 	/* Construct the pte, giving the correct access. */
   2774  1.14.2.7  jdolecek 	npte = pa;
   2775       1.1      matt 
   2776       1.1      matt 	/* VA 0 is magic. */
   2777  1.14.2.7  jdolecek 	if (pmap != pmap_kernel() && va != vector_page)
   2778  1.14.2.7  jdolecek 		npte |= L2_S_PROT_U;
   2779       1.1      matt 
   2780  1.14.2.7  jdolecek 	if (pg != NULL) {
   2781       1.1      matt #ifdef DIAGNOSTIC
   2782       1.1      matt 		if ((flags & VM_PROT_ALL) & ~prot)
   2783       1.1      matt 			panic("pmap_enter: access_type exceeds prot");
   2784       1.1      matt #endif
   2785  1.14.2.7  jdolecek 		npte |= pte_l2_s_cache_mode;
   2786       1.1      matt 		if (flags & VM_PROT_WRITE) {
   2787  1.14.2.7  jdolecek 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2788  1.14.2.7  jdolecek 			pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   2789       1.1      matt 		} else if (flags & VM_PROT_ALL) {
   2790  1.14.2.7  jdolecek 			npte |= L2_S_PROTO;
   2791  1.14.2.7  jdolecek 			pg->mdpage.pvh_attrs |= PVF_REF;
   2792       1.1      matt 		} else
   2793  1.14.2.7  jdolecek 			npte |= L2_TYPE_INV;
   2794       1.1      matt 	} else {
   2795       1.1      matt 		if (prot & VM_PROT_WRITE)
   2796  1.14.2.7  jdolecek 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2797       1.1      matt 		else if (prot & VM_PROT_ALL)
   2798  1.14.2.7  jdolecek 			npte |= L2_S_PROTO;
   2799       1.1      matt 		else
   2800  1.14.2.7  jdolecek 			npte |= L2_TYPE_INV;
   2801       1.1      matt 	}
   2802       1.1      matt 
   2803  1.14.2.7  jdolecek 	ptes[arm_btop(va)] = npte;
   2804       1.1      matt 
   2805  1.14.2.7  jdolecek 	if (pg != NULL) {
   2806  1.14.2.6  jdolecek 		simple_lock(&pg->mdpage.pvh_slock);
   2807  1.14.2.7  jdolecek  		pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
   2808  1.14.2.6  jdolecek 		simple_unlock(&pg->mdpage.pvh_slock);
   2809      1.11     chris 	}
   2810       1.1      matt 
   2811       1.1      matt 	/* Better flush the TLB ... */
   2812       1.1      matt 	cpu_tlb_flushID_SE(va);
   2813  1.14.2.2   thorpej 	error = 0;
   2814  1.14.2.2   thorpej out:
   2815  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2816  1.14.2.2   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   2817       1.1      matt 
   2818  1.14.2.2   thorpej 	return error;
   2819       1.1      matt }
   2820       1.1      matt 
   2821  1.14.2.6  jdolecek /*
   2822  1.14.2.6  jdolecek  * pmap_kenter_pa: enter a kernel mapping
   2823  1.14.2.6  jdolecek  *
   2824  1.14.2.6  jdolecek  * => no need to lock anything assume va is already allocated
   2825  1.14.2.6  jdolecek  * => should be faster than normal pmap enter function
   2826  1.14.2.6  jdolecek  */
   2827       1.1      matt void
   2828  1.14.2.7  jdolecek pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2829       1.1      matt {
   2830      1.13     chris 	pt_entry_t *pte;
   2831      1.13     chris 
   2832      1.13     chris 	pte = vtopte(va);
   2833      1.14       chs 	KASSERT(!pmap_pte_v(pte));
   2834  1.14.2.7  jdolecek 
   2835  1.14.2.7  jdolecek 	*pte = L2_S_PROTO | pa |
   2836  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   2837       1.1      matt }
   2838       1.1      matt 
   2839       1.1      matt void
   2840  1.14.2.7  jdolecek pmap_kremove(vaddr_t va, vsize_t len)
   2841       1.1      matt {
   2842      1.14       chs 	pt_entry_t *pte;
   2843      1.14       chs 
   2844       1.1      matt 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2845      1.13     chris 
   2846      1.14       chs 		/*
   2847      1.14       chs 		 * We assume that we will only be called with small
   2848      1.14       chs 		 * regions of memory.
   2849      1.14       chs 		 */
   2850      1.14       chs 
   2851  1.14.2.4   thorpej 		KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
   2852      1.13     chris 		pte = vtopte(va);
   2853  1.14.2.5  jdolecek 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
   2854      1.13     chris 		*pte = 0;
   2855      1.13     chris 		cpu_tlb_flushID_SE(va);
   2856       1.1      matt 	}
   2857       1.1      matt }
   2858       1.1      matt 
   2859       1.1      matt /*
   2860       1.1      matt  * pmap_page_protect:
   2861       1.1      matt  *
   2862       1.1      matt  * Lower the permission for all mappings to a given page.
   2863       1.1      matt  */
   2864       1.1      matt 
   2865       1.1      matt void
   2866  1.14.2.7  jdolecek pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2867       1.1      matt {
   2868       1.1      matt 
   2869  1.14.2.6  jdolecek 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
   2870  1.14.2.6  jdolecek 	    VM_PAGE_TO_PHYS(pg), prot));
   2871       1.1      matt 
   2872       1.1      matt 	switch(prot) {
   2873  1.14.2.2   thorpej 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
   2874  1.14.2.2   thorpej 	case VM_PROT_READ|VM_PROT_WRITE:
   2875  1.14.2.2   thorpej 		return;
   2876  1.14.2.2   thorpej 
   2877       1.1      matt 	case VM_PROT_READ:
   2878       1.1      matt 	case VM_PROT_READ|VM_PROT_EXECUTE:
   2879  1.14.2.7  jdolecek 		pmap_clearbit(pg, PVF_WRITE);
   2880       1.1      matt 		break;
   2881       1.1      matt 
   2882       1.1      matt 	default:
   2883  1.14.2.6  jdolecek 		pmap_remove_all(pg);
   2884       1.1      matt 		break;
   2885       1.1      matt 	}
   2886       1.1      matt }
   2887       1.1      matt 
   2888       1.1      matt 
   2889       1.1      matt /*
   2890       1.1      matt  * Routine:	pmap_unwire
   2891       1.1      matt  * Function:	Clear the wired attribute for a map/virtual-address
   2892       1.1      matt  *		pair.
   2893       1.1      matt  * In/out conditions:
   2894       1.1      matt  *		The mapping must already exist in the pmap.
   2895       1.1      matt  */
   2896       1.1      matt 
   2897       1.1      matt void
   2898  1.14.2.7  jdolecek pmap_unwire(struct pmap *pmap, vaddr_t va)
   2899       1.1      matt {
   2900  1.14.2.7  jdolecek 	pt_entry_t *ptes;
   2901  1.14.2.6  jdolecek 	struct vm_page *pg;
   2902  1.14.2.7  jdolecek 	paddr_t pa;
   2903       1.1      matt 
   2904  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_LOCK();
   2905  1.14.2.7  jdolecek 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2906       1.1      matt 
   2907  1.14.2.7  jdolecek 	if (pmap_pde_v(pmap_pde(pmap, va))) {
   2908       1.1      matt #ifdef DIAGNOSTIC
   2909  1.14.2.7  jdolecek 		if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   2910  1.14.2.7  jdolecek 			panic("pmap_unwire: invalid L2 PTE");
   2911       1.1      matt #endif
   2912  1.14.2.7  jdolecek 		/* Extract the physical address of the page */
   2913  1.14.2.7  jdolecek 		pa = l2pte_pa(ptes[arm_btop(va)]);
   2914       1.1      matt 
   2915  1.14.2.7  jdolecek 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   2916  1.14.2.7  jdolecek 			goto out;
   2917  1.14.2.7  jdolecek 
   2918  1.14.2.7  jdolecek 		/* Update the wired bit in the pv entry for this page. */
   2919  1.14.2.7  jdolecek 		simple_lock(&pg->mdpage.pvh_slock);
   2920  1.14.2.7  jdolecek 		(void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
   2921  1.14.2.7  jdolecek 		simple_unlock(&pg->mdpage.pvh_slock);
   2922       1.1      matt 	}
   2923  1.14.2.7  jdolecek #ifdef DIAGNOSTIC
   2924  1.14.2.7  jdolecek 	else {
   2925  1.14.2.7  jdolecek 		panic("pmap_unwire: invalid L1 PTE");
   2926  1.14.2.7  jdolecek 	}
   2927  1.14.2.7  jdolecek #endif
   2928  1.14.2.7  jdolecek  out:
   2929  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2930  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_UNLOCK();
   2931       1.1      matt }
   2932       1.1      matt 
   2933       1.1      matt /*
   2934       1.1      matt  * Routine:  pmap_extract
   2935       1.1      matt  * Function:
   2936       1.1      matt  *           Extract the physical page address associated
   2937       1.1      matt  *           with the given map/virtual_address pair.
   2938       1.1      matt  */
   2939       1.1      matt boolean_t
   2940  1.14.2.7  jdolecek pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
   2941       1.1      matt {
   2942  1.14.2.5  jdolecek 	pd_entry_t *pde;
   2943      1.11     chris 	pt_entry_t *pte, *ptes;
   2944       1.1      matt 	paddr_t pa;
   2945       1.1      matt 
   2946  1.14.2.7  jdolecek 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
   2947  1.14.2.7  jdolecek 
   2948  1.14.2.7  jdolecek 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2949       1.1      matt 
   2950  1.14.2.5  jdolecek 	pde = pmap_pde(pmap, va);
   2951  1.14.2.7  jdolecek 	pte = &ptes[arm_btop(va)];
   2952       1.1      matt 
   2953  1.14.2.5  jdolecek 	if (pmap_pde_section(pde)) {
   2954  1.14.2.7  jdolecek 		pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
   2955  1.14.2.7  jdolecek 		PDEBUG(5, printf("section pa=0x%08lx\n", pa));
   2956  1.14.2.5  jdolecek 		goto out;
   2957  1.14.2.5  jdolecek 	} else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
   2958  1.14.2.7  jdolecek 		PDEBUG(5, printf("no mapping\n"));
   2959  1.14.2.7  jdolecek 		goto failed;
   2960      1.11     chris 	}
   2961       1.1      matt 
   2962  1.14.2.7  jdolecek 	if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
   2963  1.14.2.7  jdolecek 		pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
   2964  1.14.2.7  jdolecek 		PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
   2965  1.14.2.5  jdolecek 		goto out;
   2966  1.14.2.5  jdolecek 	}
   2967       1.1      matt 
   2968  1.14.2.7  jdolecek 	pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
   2969  1.14.2.7  jdolecek 	PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
   2970  1.14.2.5  jdolecek 
   2971  1.14.2.5  jdolecek  out:
   2972  1.14.2.7  jdolecek 	if (pap != NULL)
   2973  1.14.2.7  jdolecek 		*pap = pa;
   2974  1.14.2.7  jdolecek 
   2975  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2976  1.14.2.7  jdolecek 	return (TRUE);
   2977  1.14.2.7  jdolecek 
   2978  1.14.2.7  jdolecek  failed:
   2979  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2980  1.14.2.7  jdolecek 	return (FALSE);
   2981       1.1      matt }
   2982       1.1      matt 
   2983       1.1      matt 
   2984       1.1      matt /*
   2985  1.14.2.7  jdolecek  * pmap_copy:
   2986  1.14.2.7  jdolecek  *
   2987  1.14.2.7  jdolecek  *	Copy the range specified by src_addr/len from the source map to the
   2988  1.14.2.7  jdolecek  *	range dst_addr/len in the destination map.
   2989       1.1      matt  *
   2990  1.14.2.7  jdolecek  *	This routine is only advisory and need not do anything.
   2991       1.1      matt  */
   2992  1.14.2.7  jdolecek /* Call deleted in <arm/arm32/pmap.h> */
   2993       1.1      matt 
   2994       1.1      matt #if defined(PMAP_DEBUG)
   2995       1.1      matt void
   2996       1.1      matt pmap_dump_pvlist(phys, m)
   2997       1.1      matt 	vaddr_t phys;
   2998       1.1      matt 	char *m;
   2999       1.1      matt {
   3000  1.14.2.6  jdolecek 	struct vm_page *pg;
   3001       1.1      matt 	struct pv_entry *pv;
   3002       1.1      matt 
   3003  1.14.2.6  jdolecek 	if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
   3004       1.1      matt 		printf("INVALID PA\n");
   3005       1.1      matt 		return;
   3006       1.1      matt 	}
   3007  1.14.2.6  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);
   3008       1.1      matt 	printf("%s %08lx:", m, phys);
   3009  1.14.2.6  jdolecek 	if (pg->mdpage.pvh_list == NULL) {
   3010  1.14.2.7  jdolecek 		simple_unlock(&pg->mdpage.pvh_slock);
   3011       1.1      matt 		printf(" no mappings\n");
   3012       1.1      matt 		return;
   3013       1.1      matt 	}
   3014       1.1      matt 
   3015  1.14.2.6  jdolecek 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
   3016       1.1      matt 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   3017       1.1      matt 		    pv->pv_va, pv->pv_flags);
   3018       1.1      matt 
   3019       1.1      matt 	printf("\n");
   3020  1.14.2.6  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);
   3021       1.1      matt }
   3022       1.1      matt 
   3023       1.1      matt #endif	/* PMAP_DEBUG */
   3024       1.1      matt 
   3025      1.11     chris static pt_entry_t *
   3026      1.11     chris pmap_map_ptes(struct pmap *pmap)
   3027      1.11     chris {
   3028  1.14.2.7  jdolecek 	struct proc *p;
   3029  1.14.2.2   thorpej 
   3030  1.14.2.2   thorpej     	/* the kernel's pmap is always accessible */
   3031  1.14.2.2   thorpej 	if (pmap == pmap_kernel()) {
   3032  1.14.2.7  jdolecek 		return (pt_entry_t *)PTE_BASE;
   3033  1.14.2.2   thorpej 	}
   3034  1.14.2.2   thorpej 
   3035  1.14.2.2   thorpej 	if (pmap_is_curpmap(pmap)) {
   3036  1.14.2.2   thorpej 		simple_lock(&pmap->pm_obj.vmobjlock);
   3037  1.14.2.7  jdolecek 		return (pt_entry_t *)PTE_BASE;
   3038  1.14.2.2   thorpej 	}
   3039  1.14.2.7  jdolecek 
   3040  1.14.2.2   thorpej 	p = curproc;
   3041  1.14.2.7  jdolecek 	KDASSERT(p != NULL);
   3042  1.14.2.2   thorpej 
   3043  1.14.2.2   thorpej 	/* need to lock both curpmap and pmap: use ordered locking */
   3044  1.14.2.7  jdolecek 	if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
   3045  1.14.2.2   thorpej 		simple_lock(&pmap->pm_obj.vmobjlock);
   3046  1.14.2.7  jdolecek 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3047  1.14.2.2   thorpej 	} else {
   3048  1.14.2.7  jdolecek 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3049  1.14.2.2   thorpej 		simple_lock(&pmap->pm_obj.vmobjlock);
   3050  1.14.2.2   thorpej 	}
   3051      1.11     chris 
   3052  1.14.2.7  jdolecek 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
   3053  1.14.2.7  jdolecek 	    FALSE);
   3054  1.14.2.2   thorpej 	cpu_tlb_flushD();
   3055  1.14.2.4   thorpej 	cpu_cpwait();
   3056  1.14.2.7  jdolecek 	return (pt_entry_t *)APTE_BASE;
   3057  1.14.2.2   thorpej }
   3058  1.14.2.2   thorpej 
   3059  1.14.2.2   thorpej /*
   3060  1.14.2.2   thorpej  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
   3061  1.14.2.2   thorpej  */
   3062  1.14.2.2   thorpej 
   3063  1.14.2.2   thorpej static void
   3064  1.14.2.7  jdolecek pmap_unmap_ptes(struct pmap *pmap)
   3065  1.14.2.2   thorpej {
   3066  1.14.2.7  jdolecek 
   3067  1.14.2.2   thorpej 	if (pmap == pmap_kernel()) {
   3068  1.14.2.2   thorpej 		return;
   3069  1.14.2.2   thorpej 	}
   3070  1.14.2.2   thorpej 	if (pmap_is_curpmap(pmap)) {
   3071  1.14.2.2   thorpej 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3072  1.14.2.2   thorpej 	} else {
   3073  1.14.2.7  jdolecek 		KDASSERT(curproc != NULL);
   3074  1.14.2.2   thorpej 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3075  1.14.2.7  jdolecek 		simple_unlock(
   3076  1.14.2.7  jdolecek 		    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3077  1.14.2.2   thorpej 	}
   3078      1.11     chris }
   3079       1.1      matt 
   3080       1.1      matt /*
   3081       1.1      matt  * Modify pte bits for all ptes corresponding to the given physical address.
   3082       1.1      matt  * We use `maskbits' rather than `clearbits' because we're always passing
   3083       1.1      matt  * constants and the latter would require an extra inversion at run-time.
   3084       1.1      matt  */
   3085       1.1      matt 
   3086  1.14.2.4   thorpej static void
   3087  1.14.2.7  jdolecek pmap_clearbit(struct vm_page *pg, u_int maskbits)
   3088       1.1      matt {
   3089       1.1      matt 	struct pv_entry *pv;
   3090  1.14.2.7  jdolecek 	pt_entry_t *ptes;
   3091       1.1      matt 	vaddr_t va;
   3092  1.14.2.6  jdolecek 	int tlbentry;
   3093       1.1      matt 
   3094       1.1      matt 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   3095  1.14.2.6  jdolecek 	    VM_PAGE_TO_PHYS(pg), maskbits));
   3096  1.14.2.4   thorpej 
   3097  1.14.2.4   thorpej 	tlbentry = 0;
   3098  1.14.2.4   thorpej 
   3099  1.14.2.2   thorpej 	PMAP_HEAD_TO_MAP_LOCK();
   3100  1.14.2.6  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);
   3101  1.14.2.2   thorpej 
   3102       1.1      matt 	/*
   3103       1.1      matt 	 * Clear saved attributes (modify, reference)
   3104       1.1      matt 	 */
   3105  1.14.2.6  jdolecek 	pg->mdpage.pvh_attrs &= ~maskbits;
   3106       1.1      matt 
   3107  1.14.2.6  jdolecek 	if (pg->mdpage.pvh_list == NULL) {
   3108  1.14.2.6  jdolecek 		simple_unlock(&pg->mdpage.pvh_slock);
   3109  1.14.2.2   thorpej 		PMAP_HEAD_TO_MAP_UNLOCK();
   3110       1.1      matt 		return;
   3111       1.1      matt 	}
   3112       1.1      matt 
   3113       1.1      matt 	/*
   3114       1.1      matt 	 * Loop over all current mappings setting/clearing as appropos
   3115       1.1      matt 	 */
   3116  1.14.2.6  jdolecek 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   3117       1.1      matt 		va = pv->pv_va;
   3118       1.1      matt 		pv->pv_flags &= ~maskbits;
   3119  1.14.2.7  jdolecek 		ptes = pmap_map_ptes(pv->pv_pmap);	/* locks pmap */
   3120  1.14.2.7  jdolecek 		KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
   3121  1.14.2.7  jdolecek 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
   3122  1.14.2.7  jdolecek 			if ((pv->pv_flags & PVF_NC)) {
   3123  1.14.2.4   thorpej 				/*
   3124  1.14.2.4   thorpej 				 * Entry is not cacheable: reenable
   3125  1.14.2.4   thorpej 				 * the cache, nothing to flush
   3126  1.14.2.4   thorpej 				 *
   3127  1.14.2.4   thorpej 				 * Don't turn caching on again if this
   3128  1.14.2.4   thorpej 				 * is a modified emulation.  This
   3129  1.14.2.4   thorpej 				 * would be inconsitent with the
   3130  1.14.2.4   thorpej 				 * settings created by
   3131  1.14.2.4   thorpej 				 * pmap_vac_me_harder().
   3132  1.14.2.4   thorpej 				 *
   3133  1.14.2.4   thorpej 				 * There's no need to call
   3134  1.14.2.4   thorpej 				 * pmap_vac_me_harder() here: all
   3135  1.14.2.4   thorpej 				 * pages are loosing their write
   3136  1.14.2.4   thorpej 				 * permission.
   3137  1.14.2.4   thorpej 				 *
   3138  1.14.2.4   thorpej 				 */
   3139  1.14.2.7  jdolecek 				if (maskbits & PVF_WRITE) {
   3140  1.14.2.7  jdolecek 					ptes[arm_btop(va)] |=
   3141  1.14.2.7  jdolecek 					    pte_l2_s_cache_mode;
   3142  1.14.2.7  jdolecek 					pv->pv_flags &= ~PVF_NC;
   3143  1.14.2.4   thorpej 				}
   3144  1.14.2.7  jdolecek 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
   3145  1.14.2.4   thorpej 				/*
   3146  1.14.2.4   thorpej 				 * Entry is cacheable: check if pmap is
   3147  1.14.2.4   thorpej 				 * current if it is flush it,
   3148  1.14.2.4   thorpej 				 * otherwise it won't be in the cache
   3149  1.14.2.4   thorpej 				 */
   3150  1.14.2.5  jdolecek 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
   3151  1.14.2.7  jdolecek 			}
   3152  1.14.2.4   thorpej 
   3153  1.14.2.4   thorpej 			/* make the pte read only */
   3154  1.14.2.7  jdolecek 			ptes[arm_btop(va)] &= ~L2_S_PROT_W;
   3155  1.14.2.4   thorpej 		}
   3156  1.14.2.4   thorpej 
   3157  1.14.2.7  jdolecek 		if (maskbits & PVF_REF)
   3158  1.14.2.7  jdolecek 			ptes[arm_btop(va)] =
   3159  1.14.2.7  jdolecek 			    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV;
   3160  1.14.2.4   thorpej 
   3161  1.14.2.7  jdolecek 		if (pmap_is_curpmap(pv->pv_pmap)) {
   3162  1.14.2.4   thorpej 			/*
   3163  1.14.2.4   thorpej 			 * if we had cacheable pte's we'd clean the
   3164  1.14.2.4   thorpej 			 * pte out to memory here
   3165  1.14.2.4   thorpej 			 *
   3166  1.14.2.4   thorpej 			 * flush tlb entry as it's in the current pmap
   3167  1.14.2.4   thorpej 			 */
   3168  1.14.2.4   thorpej 			cpu_tlb_flushID_SE(pv->pv_va);
   3169  1.14.2.7  jdolecek 		}
   3170  1.14.2.7  jdolecek 		pmap_unmap_ptes(pv->pv_pmap);		/* unlocks pmap */
   3171       1.1      matt 	}
   3172  1.14.2.4   thorpej 	cpu_cpwait();
   3173  1.14.2.4   thorpej 
   3174  1.14.2.6  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);
   3175  1.14.2.2   thorpej 	PMAP_HEAD_TO_MAP_UNLOCK();
   3176       1.1      matt }
   3177       1.1      matt 
   3178  1.14.2.6  jdolecek /*
   3179  1.14.2.6  jdolecek  * pmap_clear_modify:
   3180  1.14.2.6  jdolecek  *
   3181  1.14.2.6  jdolecek  *	Clear the "modified" attribute for a page.
   3182  1.14.2.6  jdolecek  */
   3183       1.1      matt boolean_t
   3184  1.14.2.7  jdolecek pmap_clear_modify(struct vm_page *pg)
   3185       1.1      matt {
   3186       1.1      matt 	boolean_t rv;
   3187       1.1      matt 
   3188  1.14.2.7  jdolecek 	if (pg->mdpage.pvh_attrs & PVF_MOD) {
   3189  1.14.2.6  jdolecek 		rv = TRUE;
   3190  1.14.2.7  jdolecek 		pmap_clearbit(pg, PVF_MOD);
   3191  1.14.2.6  jdolecek 	} else
   3192  1.14.2.6  jdolecek 		rv = FALSE;
   3193  1.14.2.6  jdolecek 
   3194  1.14.2.6  jdolecek 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
   3195  1.14.2.6  jdolecek 	    VM_PAGE_TO_PHYS(pg), rv));
   3196       1.1      matt 
   3197  1.14.2.6  jdolecek 	return (rv);
   3198  1.14.2.6  jdolecek }
   3199       1.1      matt 
   3200  1.14.2.6  jdolecek /*
   3201  1.14.2.6  jdolecek  * pmap_clear_reference:
   3202  1.14.2.6  jdolecek  *
   3203  1.14.2.6  jdolecek  *	Clear the "referenced" attribute for a page.
   3204  1.14.2.6  jdolecek  */
   3205       1.1      matt boolean_t
   3206  1.14.2.7  jdolecek pmap_clear_reference(struct vm_page *pg)
   3207       1.1      matt {
   3208       1.1      matt 	boolean_t rv;
   3209       1.1      matt 
   3210  1.14.2.7  jdolecek 	if (pg->mdpage.pvh_attrs & PVF_REF) {
   3211  1.14.2.6  jdolecek 		rv = TRUE;
   3212  1.14.2.7  jdolecek 		pmap_clearbit(pg, PVF_REF);
   3213  1.14.2.6  jdolecek 	} else
   3214  1.14.2.6  jdolecek 		rv = FALSE;
   3215       1.1      matt 
   3216  1.14.2.6  jdolecek 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
   3217  1.14.2.6  jdolecek 	    VM_PAGE_TO_PHYS(pg), rv));
   3218       1.1      matt 
   3219  1.14.2.6  jdolecek 	return (rv);
   3220       1.1      matt }
   3221       1.1      matt 
   3222  1.14.2.6  jdolecek /*
   3223  1.14.2.6  jdolecek  * pmap_is_modified:
   3224  1.14.2.6  jdolecek  *
   3225  1.14.2.6  jdolecek  *	Test if a page has the "modified" attribute.
   3226  1.14.2.6  jdolecek  */
   3227  1.14.2.6  jdolecek /* See <arm/arm32/pmap.h> */
   3228       1.1      matt 
   3229  1.14.2.6  jdolecek /*
   3230  1.14.2.6  jdolecek  * pmap_is_referenced:
   3231  1.14.2.6  jdolecek  *
   3232  1.14.2.6  jdolecek  *	Test if a page has the "referenced" attribute.
   3233  1.14.2.6  jdolecek  */
   3234  1.14.2.6  jdolecek /* See <arm/arm32/pmap.h> */
   3235       1.1      matt 
   3236       1.1      matt int
   3237  1.14.2.7  jdolecek pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
   3238       1.1      matt {
   3239  1.14.2.7  jdolecek 	pt_entry_t *ptes;
   3240  1.14.2.6  jdolecek 	struct vm_page *pg;
   3241  1.14.2.7  jdolecek 	paddr_t pa;
   3242       1.1      matt 	u_int flags;
   3243  1.14.2.7  jdolecek 	int rv = 0;
   3244       1.1      matt 
   3245       1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   3246       1.1      matt 
   3247  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_LOCK();
   3248  1.14.2.7  jdolecek 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3249  1.14.2.7  jdolecek 
   3250  1.14.2.7  jdolecek 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3251  1.14.2.7  jdolecek 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3252  1.14.2.7  jdolecek 		goto out;
   3253       1.1      matt 	}
   3254       1.1      matt 
   3255  1.14.2.7  jdolecek 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3256  1.14.2.7  jdolecek 
   3257  1.14.2.7  jdolecek 	/* Check for a invalid pte */
   3258  1.14.2.7  jdolecek 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3259  1.14.2.7  jdolecek 		goto out;
   3260       1.1      matt 
   3261       1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3262  1.14.2.7  jdolecek 	if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
   3263  1.14.2.7  jdolecek 		goto out;
   3264       1.1      matt 
   3265       1.1      matt 	/* Extract the physical address of the page */
   3266  1.14.2.7  jdolecek 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3267  1.14.2.6  jdolecek 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3268  1.14.2.7  jdolecek 		goto out;
   3269       1.1      matt 
   3270       1.1      matt 	/* Get the current flags for this page. */
   3271  1.14.2.6  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);
   3272  1.14.2.2   thorpej 
   3273  1.14.2.6  jdolecek 	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
   3274       1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   3275       1.1      matt 
   3276       1.1      matt 	/*
   3277       1.1      matt 	 * Do the flags say this page is writable ? If not then it is a
   3278       1.1      matt 	 * genuine write fault. If yes then the write fault is our fault
   3279       1.1      matt 	 * as we did not reflect the write access in the PTE. Now we know
   3280       1.1      matt 	 * a write has occurred we can correct this and also set the
   3281       1.1      matt 	 * modified bit
   3282       1.1      matt 	 */
   3283  1.14.2.7  jdolecek 	if (~flags & PVF_WRITE) {
   3284  1.14.2.6  jdolecek 	    	simple_unlock(&pg->mdpage.pvh_slock);
   3285  1.14.2.7  jdolecek 		goto out;
   3286  1.14.2.2   thorpej 	}
   3287       1.1      matt 
   3288  1.14.2.7  jdolecek 	PDEBUG(0,
   3289  1.14.2.7  jdolecek 	    printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
   3290  1.14.2.7  jdolecek 	    va, ptes[arm_btop(va)]));
   3291  1.14.2.7  jdolecek 	pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   3292  1.14.2.4   thorpej 
   3293  1.14.2.4   thorpej 	/*
   3294  1.14.2.4   thorpej 	 * Re-enable write permissions for the page.  No need to call
   3295  1.14.2.4   thorpej 	 * pmap_vac_me_harder(), since this is just a
   3296  1.14.2.7  jdolecek 	 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
   3297  1.14.2.7  jdolecek 	 * We've already set the cacheable bits based on the assumption
   3298  1.14.2.7  jdolecek 	 * that we can write to this page.
   3299  1.14.2.7  jdolecek 	 */
   3300  1.14.2.7  jdolecek 	ptes[arm_btop(va)] =
   3301  1.14.2.7  jdolecek 	    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
   3302  1.14.2.7  jdolecek 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3303       1.1      matt 
   3304  1.14.2.6  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);
   3305  1.14.2.7  jdolecek 
   3306       1.1      matt 	cpu_tlb_flushID_SE(va);
   3307  1.14.2.4   thorpej 	cpu_cpwait();
   3308  1.14.2.7  jdolecek 	rv = 1;
   3309  1.14.2.7  jdolecek  out:
   3310  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3311  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_UNLOCK();
   3312  1.14.2.7  jdolecek 	return (rv);
   3313       1.1      matt }
   3314       1.1      matt 
   3315       1.1      matt int
   3316  1.14.2.7  jdolecek pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
   3317       1.1      matt {
   3318  1.14.2.7  jdolecek 	pt_entry_t *ptes;
   3319  1.14.2.6  jdolecek 	struct vm_page *pg;
   3320  1.14.2.7  jdolecek 	paddr_t pa;
   3321  1.14.2.7  jdolecek 	int rv = 0;
   3322       1.1      matt 
   3323       1.1      matt 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   3324       1.1      matt 
   3325  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_LOCK();
   3326  1.14.2.7  jdolecek 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3327  1.14.2.7  jdolecek 
   3328  1.14.2.7  jdolecek 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3329  1.14.2.7  jdolecek 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3330  1.14.2.7  jdolecek 		goto out;
   3331       1.1      matt 	}
   3332       1.1      matt 
   3333  1.14.2.7  jdolecek 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3334  1.14.2.7  jdolecek 
   3335  1.14.2.7  jdolecek 	/* Check for invalid pte */
   3336  1.14.2.7  jdolecek 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3337  1.14.2.7  jdolecek 		goto out;
   3338       1.1      matt 
   3339       1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3340  1.14.2.7  jdolecek 	if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
   3341  1.14.2.7  jdolecek 		goto out;
   3342       1.1      matt 
   3343       1.1      matt 	/* Extract the physical address of the page */
   3344  1.14.2.7  jdolecek 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3345  1.14.2.6  jdolecek 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3346  1.14.2.7  jdolecek 		goto out;
   3347  1.14.2.7  jdolecek 
   3348  1.14.2.7  jdolecek 	simple_lock(&pg->mdpage.pvh_slock);
   3349       1.1      matt 
   3350       1.1      matt 	/*
   3351       1.1      matt 	 * Ok we just enable the pte and mark the attibs as handled
   3352  1.14.2.7  jdolecek 	 * XXX Should we traverse the PV list and enable all PTEs?
   3353       1.1      matt 	 */
   3354  1.14.2.7  jdolecek 	PDEBUG(0,
   3355  1.14.2.7  jdolecek 	    printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
   3356  1.14.2.7  jdolecek 	    va, ptes[arm_btop(va)]));
   3357  1.14.2.7  jdolecek 	pg->mdpage.pvh_attrs |= PVF_REF;
   3358  1.14.2.7  jdolecek 
   3359  1.14.2.7  jdolecek 	ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
   3360  1.14.2.7  jdolecek 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3361  1.14.2.7  jdolecek 
   3362  1.14.2.7  jdolecek 	simple_unlock(&pg->mdpage.pvh_slock);
   3363       1.1      matt 
   3364       1.1      matt 	cpu_tlb_flushID_SE(va);
   3365  1.14.2.4   thorpej 	cpu_cpwait();
   3366  1.14.2.7  jdolecek 	rv = 1;
   3367  1.14.2.7  jdolecek  out:
   3368  1.14.2.7  jdolecek 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3369  1.14.2.7  jdolecek 	PMAP_MAP_TO_HEAD_UNLOCK();
   3370  1.14.2.7  jdolecek 	return (rv);
   3371       1.1      matt }
   3372       1.1      matt 
   3373       1.1      matt /*
   3374       1.1      matt  * pmap_collect: free resources held by a pmap
   3375       1.1      matt  *
   3376       1.1      matt  * => optional function.
   3377       1.1      matt  * => called when a process is swapped out to free memory.
   3378       1.1      matt  */
   3379       1.1      matt 
   3380       1.1      matt void
   3381  1.14.2.7  jdolecek pmap_collect(struct pmap *pmap)
   3382       1.1      matt {
   3383       1.1      matt }
   3384       1.1      matt 
   3385       1.1      matt /*
   3386       1.1      matt  * Routine:	pmap_procwr
   3387       1.1      matt  *
   3388       1.1      matt  * Function:
   3389       1.1      matt  *	Synchronize caches corresponding to [addr, addr+len) in p.
   3390       1.1      matt  *
   3391       1.1      matt  */
   3392       1.1      matt void
   3393  1.14.2.7  jdolecek pmap_procwr(struct proc *p, vaddr_t va, int len)
   3394       1.1      matt {
   3395       1.1      matt 	/* We only need to do anything if it is the current process. */
   3396       1.1      matt 	if (p == curproc)
   3397  1.14.2.5  jdolecek 		cpu_icache_sync_range(va, len);
   3398  1.14.2.2   thorpej }
   3399  1.14.2.2   thorpej /*
   3400  1.14.2.2   thorpej  * PTP functions
   3401  1.14.2.2   thorpej  */
   3402  1.14.2.2   thorpej 
   3403  1.14.2.2   thorpej /*
   3404  1.14.2.2   thorpej  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
   3405  1.14.2.2   thorpej  *
   3406  1.14.2.2   thorpej  * => pmap should NOT be pmap_kernel()
   3407  1.14.2.2   thorpej  * => pmap should be locked
   3408  1.14.2.2   thorpej  */
   3409  1.14.2.2   thorpej 
   3410  1.14.2.2   thorpej static struct vm_page *
   3411  1.14.2.7  jdolecek pmap_get_ptp(struct pmap *pmap, vaddr_t va)
   3412  1.14.2.2   thorpej {
   3413  1.14.2.7  jdolecek 	struct vm_page *ptp;
   3414  1.14.2.2   thorpej 
   3415  1.14.2.7  jdolecek 	if (pmap_pde_page(pmap_pde(pmap, va))) {
   3416  1.14.2.2   thorpej 
   3417  1.14.2.7  jdolecek 		/* valid... check hint (saves us a PA->PG lookup) */
   3418  1.14.2.7  jdolecek 		if (pmap->pm_ptphint &&
   3419  1.14.2.7  jdolecek 		    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
   3420  1.14.2.7  jdolecek 		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
   3421  1.14.2.7  jdolecek 			return (pmap->pm_ptphint);
   3422  1.14.2.7  jdolecek 		ptp = uvm_pagelookup(&pmap->pm_obj, va);
   3423  1.14.2.2   thorpej #ifdef DIAGNOSTIC
   3424  1.14.2.7  jdolecek 		if (ptp == NULL)
   3425  1.14.2.7  jdolecek 			panic("pmap_get_ptp: unmanaged user PTP");
   3426  1.14.2.2   thorpej #endif
   3427  1.14.2.7  jdolecek 		pmap->pm_ptphint = ptp;
   3428  1.14.2.7  jdolecek 		return(ptp);
   3429  1.14.2.7  jdolecek 	}
   3430  1.14.2.2   thorpej 
   3431  1.14.2.7  jdolecek 	/* allocate a new PTP (updates ptphint) */
   3432  1.14.2.7  jdolecek 	return(pmap_alloc_ptp(pmap, va));
   3433  1.14.2.2   thorpej }
   3434  1.14.2.2   thorpej 
   3435  1.14.2.2   thorpej /*
   3436  1.14.2.2   thorpej  * pmap_alloc_ptp: allocate a PTP for a PMAP
   3437  1.14.2.2   thorpej  *
   3438  1.14.2.2   thorpej  * => pmap should already be locked by caller
   3439  1.14.2.2   thorpej  * => we use the ptp's wire_count to count the number of active mappings
   3440  1.14.2.2   thorpej  *	in the PTP (we start it at one to prevent any chance this PTP
   3441  1.14.2.2   thorpej  *	will ever leak onto the active/inactive queues)
   3442  1.14.2.2   thorpej  */
   3443  1.14.2.2   thorpej 
   3444  1.14.2.2   thorpej /*__inline */ static struct vm_page *
   3445  1.14.2.7  jdolecek pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
   3446  1.14.2.2   thorpej {
   3447  1.14.2.2   thorpej 	struct vm_page *ptp;
   3448  1.14.2.2   thorpej 
   3449  1.14.2.2   thorpej 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
   3450  1.14.2.2   thorpej 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
   3451  1.14.2.7  jdolecek 	if (ptp == NULL)
   3452  1.14.2.2   thorpej 		return (NULL);
   3453  1.14.2.2   thorpej 
   3454  1.14.2.2   thorpej 	/* got one! */
   3455  1.14.2.2   thorpej 	ptp->flags &= ~PG_BUSY;	/* never busy */
   3456  1.14.2.2   thorpej 	ptp->wire_count = 1;	/* no mappings yet */
   3457  1.14.2.2   thorpej 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
   3458  1.14.2.2   thorpej 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
   3459  1.14.2.7  jdolecek 	pmap->pm_ptphint = ptp;
   3460  1.14.2.2   thorpej 	return (ptp);
   3461       1.1      matt }
   3462       1.1      matt 
   3463  1.14.2.6  jdolecek vaddr_t
   3464  1.14.2.7  jdolecek pmap_growkernel(vaddr_t maxkvaddr)
   3465  1.14.2.6  jdolecek {
   3466  1.14.2.6  jdolecek 	struct pmap *kpm = pmap_kernel(), *pm;
   3467  1.14.2.6  jdolecek 	int s;
   3468  1.14.2.6  jdolecek 	paddr_t ptaddr;
   3469  1.14.2.6  jdolecek 	struct vm_page *ptp;
   3470  1.14.2.6  jdolecek 
   3471  1.14.2.6  jdolecek 	if (maxkvaddr <= pmap_curmaxkvaddr)
   3472  1.14.2.6  jdolecek 		goto out;		/* we are OK */
   3473  1.14.2.6  jdolecek 	NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
   3474  1.14.2.6  jdolecek 		    pmap_curmaxkvaddr, maxkvaddr));
   3475  1.14.2.6  jdolecek 
   3476  1.14.2.6  jdolecek 	/*
   3477  1.14.2.6  jdolecek 	 * whoops!   we need to add kernel PTPs
   3478  1.14.2.6  jdolecek 	 */
   3479  1.14.2.6  jdolecek 
   3480  1.14.2.6  jdolecek 	s = splhigh();	/* to be safe */
   3481  1.14.2.6  jdolecek 	simple_lock(&kpm->pm_obj.vmobjlock);
   3482  1.14.2.6  jdolecek 	/* due to the way the arm pmap works we map 4MB at a time */
   3483  1.14.2.7  jdolecek 	for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
   3484  1.14.2.7  jdolecek 	     pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
   3485  1.14.2.6  jdolecek 
   3486  1.14.2.6  jdolecek 		if (uvm.page_init_done == FALSE) {
   3487  1.14.2.6  jdolecek 
   3488  1.14.2.6  jdolecek 			/*
   3489  1.14.2.6  jdolecek 			 * we're growing the kernel pmap early (from
   3490  1.14.2.6  jdolecek 			 * uvm_pageboot_alloc()).  this case must be
   3491  1.14.2.6  jdolecek 			 * handled a little differently.
   3492  1.14.2.6  jdolecek 			 */
   3493  1.14.2.6  jdolecek 
   3494  1.14.2.6  jdolecek 			if (uvm_page_physget(&ptaddr) == FALSE)
   3495  1.14.2.6  jdolecek 				panic("pmap_growkernel: out of memory");
   3496  1.14.2.6  jdolecek 			pmap_zero_page(ptaddr);
   3497  1.14.2.6  jdolecek 
   3498  1.14.2.6  jdolecek 			/* map this page in */
   3499  1.14.2.7  jdolecek 			pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
   3500  1.14.2.6  jdolecek 
   3501  1.14.2.6  jdolecek 			/* count PTP as resident */
   3502  1.14.2.6  jdolecek 			kpm->pm_stats.resident_count++;
   3503  1.14.2.6  jdolecek 			continue;
   3504  1.14.2.6  jdolecek 		}
   3505  1.14.2.6  jdolecek 
   3506  1.14.2.6  jdolecek 		/*
   3507  1.14.2.6  jdolecek 		 * THIS *MUST* BE CODED SO AS TO WORK IN THE
   3508  1.14.2.6  jdolecek 		 * pmap_initialized == FALSE CASE!  WE MAY BE
   3509  1.14.2.6  jdolecek 		 * INVOKED WHILE pmap_init() IS RUNNING!
   3510  1.14.2.6  jdolecek 		 */
   3511  1.14.2.6  jdolecek 
   3512  1.14.2.7  jdolecek 		if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
   3513  1.14.2.6  jdolecek 			panic("pmap_growkernel: alloc ptp failed");
   3514  1.14.2.6  jdolecek 
   3515  1.14.2.6  jdolecek 		/* distribute new kernel PTP to all active pmaps */
   3516  1.14.2.6  jdolecek 		simple_lock(&pmaps_lock);
   3517  1.14.2.6  jdolecek 		LIST_FOREACH(pm, &pmaps, pm_list) {
   3518  1.14.2.7  jdolecek 			pmap_map_in_l1(pm, pmap_curmaxkvaddr,
   3519  1.14.2.7  jdolecek 			    VM_PAGE_TO_PHYS(ptp), TRUE);
   3520  1.14.2.6  jdolecek 		}
   3521  1.14.2.6  jdolecek 
   3522  1.14.2.6  jdolecek 		simple_unlock(&pmaps_lock);
   3523  1.14.2.6  jdolecek 	}
   3524  1.14.2.6  jdolecek 
   3525  1.14.2.6  jdolecek 	/*
   3526  1.14.2.6  jdolecek 	 * flush out the cache, expensive but growkernel will happen so
   3527  1.14.2.6  jdolecek 	 * rarely
   3528  1.14.2.6  jdolecek 	 */
   3529  1.14.2.6  jdolecek 	cpu_tlb_flushD();
   3530  1.14.2.6  jdolecek 	cpu_cpwait();
   3531  1.14.2.6  jdolecek 
   3532  1.14.2.6  jdolecek 	simple_unlock(&kpm->pm_obj.vmobjlock);
   3533  1.14.2.6  jdolecek 	splx(s);
   3534  1.14.2.6  jdolecek 
   3535  1.14.2.6  jdolecek out:
   3536  1.14.2.6  jdolecek 	return (pmap_curmaxkvaddr);
   3537  1.14.2.6  jdolecek }
   3538  1.14.2.6  jdolecek 
   3539  1.14.2.7  jdolecek /************************ Utility routines ****************************/
   3540  1.14.2.7  jdolecek 
   3541  1.14.2.7  jdolecek /*
   3542  1.14.2.7  jdolecek  * vector_page_setprot:
   3543  1.14.2.7  jdolecek  *
   3544  1.14.2.7  jdolecek  *	Manipulate the protection of the vector page.
   3545  1.14.2.7  jdolecek  */
   3546  1.14.2.7  jdolecek void
   3547  1.14.2.7  jdolecek vector_page_setprot(int prot)
   3548  1.14.2.7  jdolecek {
   3549  1.14.2.7  jdolecek 	pt_entry_t *pte;
   3550  1.14.2.6  jdolecek 
   3551  1.14.2.7  jdolecek 	pte = vtopte(vector_page);
   3552  1.14.2.7  jdolecek 
   3553  1.14.2.7  jdolecek 	*pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
   3554  1.14.2.7  jdolecek 	cpu_tlb_flushD_SE(vector_page);
   3555  1.14.2.7  jdolecek 	cpu_cpwait();
   3556  1.14.2.7  jdolecek }
   3557  1.14.2.6  jdolecek 
   3558  1.14.2.6  jdolecek /************************ Bootstrapping routines ****************************/
   3559  1.14.2.6  jdolecek 
   3560  1.14.2.6  jdolecek /*
   3561  1.14.2.6  jdolecek  * This list exists for the benefit of pmap_map_chunk().  It keeps track
   3562  1.14.2.6  jdolecek  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
   3563  1.14.2.6  jdolecek  * find them as necessary.
   3564  1.14.2.6  jdolecek  *
   3565  1.14.2.6  jdolecek  * Note that the data on this list is not valid after initarm() returns.
   3566  1.14.2.6  jdolecek  */
   3567  1.14.2.6  jdolecek SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
   3568  1.14.2.6  jdolecek 
   3569  1.14.2.6  jdolecek static vaddr_t
   3570  1.14.2.6  jdolecek kernel_pt_lookup(paddr_t pa)
   3571  1.14.2.6  jdolecek {
   3572  1.14.2.6  jdolecek 	pv_addr_t *pv;
   3573  1.14.2.6  jdolecek 
   3574  1.14.2.6  jdolecek 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
   3575  1.14.2.6  jdolecek 		if (pv->pv_pa == pa)
   3576  1.14.2.6  jdolecek 			return (pv->pv_va);
   3577  1.14.2.6  jdolecek 	}
   3578  1.14.2.6  jdolecek 	return (0);
   3579  1.14.2.6  jdolecek }
   3580  1.14.2.6  jdolecek 
   3581  1.14.2.6  jdolecek /*
   3582  1.14.2.6  jdolecek  * pmap_map_section:
   3583  1.14.2.6  jdolecek  *
   3584  1.14.2.6  jdolecek  *	Create a single section mapping.
   3585  1.14.2.6  jdolecek  */
   3586  1.14.2.6  jdolecek void
   3587  1.14.2.6  jdolecek pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3588  1.14.2.6  jdolecek {
   3589  1.14.2.6  jdolecek 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3590  1.14.2.7  jdolecek 	pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3591  1.14.2.6  jdolecek 
   3592  1.14.2.7  jdolecek 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
   3593  1.14.2.6  jdolecek 
   3594  1.14.2.7  jdolecek 	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3595  1.14.2.7  jdolecek 	    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3596  1.14.2.6  jdolecek }
   3597  1.14.2.6  jdolecek 
   3598  1.14.2.6  jdolecek /*
   3599  1.14.2.6  jdolecek  * pmap_map_entry:
   3600  1.14.2.6  jdolecek  *
   3601  1.14.2.6  jdolecek  *	Create a single page mapping.
   3602  1.14.2.6  jdolecek  */
   3603  1.14.2.6  jdolecek void
   3604  1.14.2.6  jdolecek pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3605  1.14.2.6  jdolecek {
   3606  1.14.2.6  jdolecek 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3607  1.14.2.7  jdolecek 	pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3608  1.14.2.6  jdolecek 	pt_entry_t *pte;
   3609  1.14.2.6  jdolecek 
   3610  1.14.2.6  jdolecek 	KASSERT(((va | pa) & PGOFSET) == 0);
   3611  1.14.2.6  jdolecek 
   3612  1.14.2.7  jdolecek 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3613  1.14.2.6  jdolecek 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
   3614  1.14.2.6  jdolecek 
   3615  1.14.2.6  jdolecek 	pte = (pt_entry_t *)
   3616  1.14.2.7  jdolecek 	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3617  1.14.2.6  jdolecek 	if (pte == NULL)
   3618  1.14.2.6  jdolecek 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
   3619  1.14.2.6  jdolecek 
   3620  1.14.2.7  jdolecek 	pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3621  1.14.2.7  jdolecek 	    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3622  1.14.2.6  jdolecek }
   3623  1.14.2.6  jdolecek 
   3624  1.14.2.6  jdolecek /*
   3625  1.14.2.6  jdolecek  * pmap_link_l2pt:
   3626  1.14.2.6  jdolecek  *
   3627  1.14.2.6  jdolecek  *	Link the L2 page table specified by "pa" into the L1
   3628  1.14.2.6  jdolecek  *	page table at the slot for "va".
   3629  1.14.2.6  jdolecek  */
   3630  1.14.2.6  jdolecek void
   3631  1.14.2.6  jdolecek pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
   3632  1.14.2.6  jdolecek {
   3633  1.14.2.6  jdolecek 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3634  1.14.2.7  jdolecek 	u_int slot = va >> L1_S_SHIFT;
   3635  1.14.2.6  jdolecek 
   3636  1.14.2.6  jdolecek 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
   3637  1.14.2.6  jdolecek 
   3638  1.14.2.7  jdolecek 	pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
   3639  1.14.2.7  jdolecek 	pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
   3640  1.14.2.7  jdolecek 	pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
   3641  1.14.2.7  jdolecek 	pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
   3642  1.14.2.6  jdolecek 
   3643  1.14.2.6  jdolecek 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
   3644  1.14.2.6  jdolecek }
   3645  1.14.2.6  jdolecek 
   3646  1.14.2.6  jdolecek /*
   3647  1.14.2.6  jdolecek  * pmap_map_chunk:
   3648  1.14.2.6  jdolecek  *
   3649  1.14.2.6  jdolecek  *	Map a chunk of memory using the most efficient mappings
   3650  1.14.2.6  jdolecek  *	possible (section, large page, small page) into the
   3651  1.14.2.6  jdolecek  *	provided L1 and L2 tables at the specified virtual address.
   3652  1.14.2.6  jdolecek  */
   3653  1.14.2.6  jdolecek vsize_t
   3654  1.14.2.6  jdolecek pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
   3655  1.14.2.6  jdolecek     int prot, int cache)
   3656  1.14.2.6  jdolecek {
   3657  1.14.2.6  jdolecek 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3658  1.14.2.7  jdolecek 	pt_entry_t *pte, fl;
   3659  1.14.2.6  jdolecek 	vsize_t resid;
   3660  1.14.2.6  jdolecek 	int i;
   3661  1.14.2.6  jdolecek 
   3662  1.14.2.6  jdolecek 	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
   3663  1.14.2.6  jdolecek 
   3664  1.14.2.6  jdolecek 	if (l1pt == 0)
   3665  1.14.2.6  jdolecek 		panic("pmap_map_chunk: no L1 table provided");
   3666  1.14.2.6  jdolecek 
   3667  1.14.2.6  jdolecek #ifdef VERBOSE_INIT_ARM
   3668  1.14.2.6  jdolecek 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
   3669  1.14.2.6  jdolecek 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
   3670  1.14.2.6  jdolecek #endif
   3671  1.14.2.6  jdolecek 
   3672  1.14.2.6  jdolecek 	size = resid;
   3673  1.14.2.6  jdolecek 
   3674  1.14.2.6  jdolecek 	while (resid > 0) {
   3675  1.14.2.6  jdolecek 		/* See if we can use a section mapping. */
   3676  1.14.2.7  jdolecek 		if (((pa | va) & L1_S_OFFSET) == 0 &&
   3677  1.14.2.7  jdolecek 		    resid >= L1_S_SIZE) {
   3678  1.14.2.7  jdolecek 			fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3679  1.14.2.6  jdolecek #ifdef VERBOSE_INIT_ARM
   3680  1.14.2.6  jdolecek 			printf("S");
   3681  1.14.2.6  jdolecek #endif
   3682  1.14.2.7  jdolecek 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3683  1.14.2.7  jdolecek 			    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3684  1.14.2.7  jdolecek 			va += L1_S_SIZE;
   3685  1.14.2.7  jdolecek 			pa += L1_S_SIZE;
   3686  1.14.2.7  jdolecek 			resid -= L1_S_SIZE;
   3687  1.14.2.6  jdolecek 			continue;
   3688  1.14.2.6  jdolecek 		}
   3689  1.14.2.6  jdolecek 
   3690  1.14.2.6  jdolecek 		/*
   3691  1.14.2.6  jdolecek 		 * Ok, we're going to use an L2 table.  Make sure
   3692  1.14.2.6  jdolecek 		 * one is actually in the corresponding L1 slot
   3693  1.14.2.6  jdolecek 		 * for the current VA.
   3694  1.14.2.6  jdolecek 		 */
   3695  1.14.2.7  jdolecek 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3696  1.14.2.6  jdolecek 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
   3697  1.14.2.6  jdolecek 
   3698  1.14.2.6  jdolecek 		pte = (pt_entry_t *)
   3699  1.14.2.7  jdolecek 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3700  1.14.2.6  jdolecek 		if (pte == NULL)
   3701  1.14.2.6  jdolecek 			panic("pmap_map_chunk: can't find L2 table for VA"
   3702  1.14.2.6  jdolecek 			    "0x%08lx", va);
   3703  1.14.2.6  jdolecek 
   3704  1.14.2.6  jdolecek 		/* See if we can use a L2 large page mapping. */
   3705  1.14.2.7  jdolecek 		if (((pa | va) & L2_L_OFFSET) == 0 &&
   3706  1.14.2.7  jdolecek 		    resid >= L2_L_SIZE) {
   3707  1.14.2.7  jdolecek 			fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
   3708  1.14.2.6  jdolecek #ifdef VERBOSE_INIT_ARM
   3709  1.14.2.6  jdolecek 			printf("L");
   3710  1.14.2.6  jdolecek #endif
   3711  1.14.2.6  jdolecek 			for (i = 0; i < 16; i++) {
   3712  1.14.2.6  jdolecek 				pte[((va >> PGSHIFT) & 0x3f0) + i] =
   3713  1.14.2.7  jdolecek 				    L2_L_PROTO | pa |
   3714  1.14.2.7  jdolecek 				    L2_L_PROT(PTE_KERNEL, prot) | fl;
   3715  1.14.2.6  jdolecek 			}
   3716  1.14.2.7  jdolecek 			va += L2_L_SIZE;
   3717  1.14.2.7  jdolecek 			pa += L2_L_SIZE;
   3718  1.14.2.7  jdolecek 			resid -= L2_L_SIZE;
   3719  1.14.2.6  jdolecek 			continue;
   3720  1.14.2.6  jdolecek 		}
   3721  1.14.2.6  jdolecek 
   3722  1.14.2.6  jdolecek 		/* Use a small page mapping. */
   3723  1.14.2.7  jdolecek 		fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3724  1.14.2.6  jdolecek #ifdef VERBOSE_INIT_ARM
   3725  1.14.2.6  jdolecek 		printf("P");
   3726  1.14.2.6  jdolecek #endif
   3727  1.14.2.7  jdolecek 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3728  1.14.2.7  jdolecek 		    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3729  1.14.2.6  jdolecek 		va += NBPG;
   3730  1.14.2.6  jdolecek 		pa += NBPG;
   3731  1.14.2.6  jdolecek 		resid -= NBPG;
   3732  1.14.2.6  jdolecek 	}
   3733  1.14.2.6  jdolecek #ifdef VERBOSE_INIT_ARM
   3734  1.14.2.6  jdolecek 	printf("\n");
   3735  1.14.2.6  jdolecek #endif
   3736  1.14.2.6  jdolecek 	return (size);
   3737  1.14.2.6  jdolecek }
   3738  1.14.2.7  jdolecek 
   3739  1.14.2.7  jdolecek /********************** PTE initialization routines **************************/
   3740  1.14.2.7  jdolecek 
   3741  1.14.2.7  jdolecek /*
   3742  1.14.2.7  jdolecek  * These routines are called when the CPU type is identified to set up
   3743  1.14.2.7  jdolecek  * the PTE prototypes, cache modes, etc.
   3744  1.14.2.7  jdolecek  *
   3745  1.14.2.7  jdolecek  * The variables are always here, just in case LKMs need to reference
   3746  1.14.2.7  jdolecek  * them (though, they shouldn't).
   3747  1.14.2.7  jdolecek  */
   3748  1.14.2.7  jdolecek 
   3749  1.14.2.7  jdolecek pt_entry_t	pte_l1_s_cache_mode;
   3750  1.14.2.7  jdolecek pt_entry_t	pte_l1_s_cache_mask;
   3751  1.14.2.7  jdolecek 
   3752  1.14.2.7  jdolecek pt_entry_t	pte_l2_l_cache_mode;
   3753  1.14.2.7  jdolecek pt_entry_t	pte_l2_l_cache_mask;
   3754  1.14.2.7  jdolecek 
   3755  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_cache_mode;
   3756  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_cache_mask;
   3757  1.14.2.7  jdolecek 
   3758  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_prot_u;
   3759  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_prot_w;
   3760  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_prot_mask;
   3761  1.14.2.7  jdolecek 
   3762  1.14.2.7  jdolecek pt_entry_t	pte_l1_s_proto;
   3763  1.14.2.7  jdolecek pt_entry_t	pte_l1_c_proto;
   3764  1.14.2.7  jdolecek pt_entry_t	pte_l2_s_proto;
   3765  1.14.2.7  jdolecek 
   3766  1.14.2.7  jdolecek void		(*pmap_copy_page_func)(paddr_t, paddr_t);
   3767  1.14.2.7  jdolecek void		(*pmap_zero_page_func)(paddr_t);
   3768  1.14.2.7  jdolecek 
   3769  1.14.2.7  jdolecek #if ARM_MMU_GENERIC == 1
   3770  1.14.2.7  jdolecek void
   3771  1.14.2.7  jdolecek pmap_pte_init_generic(void)
   3772  1.14.2.7  jdolecek {
   3773  1.14.2.7  jdolecek 
   3774  1.14.2.7  jdolecek 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3775  1.14.2.7  jdolecek 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
   3776  1.14.2.7  jdolecek 
   3777  1.14.2.7  jdolecek 	pte_l2_l_cache_mode = L2_B|L2_C;
   3778  1.14.2.7  jdolecek 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
   3779  1.14.2.7  jdolecek 
   3780  1.14.2.7  jdolecek 	pte_l2_s_cache_mode = L2_B|L2_C;
   3781  1.14.2.7  jdolecek 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
   3782  1.14.2.7  jdolecek 
   3783  1.14.2.7  jdolecek 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
   3784  1.14.2.7  jdolecek 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
   3785  1.14.2.7  jdolecek 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
   3786  1.14.2.7  jdolecek 
   3787  1.14.2.7  jdolecek 	pte_l1_s_proto = L1_S_PROTO_generic;
   3788  1.14.2.7  jdolecek 	pte_l1_c_proto = L1_C_PROTO_generic;
   3789  1.14.2.7  jdolecek 	pte_l2_s_proto = L2_S_PROTO_generic;
   3790  1.14.2.7  jdolecek 
   3791  1.14.2.7  jdolecek 	pmap_copy_page_func = pmap_copy_page_generic;
   3792  1.14.2.7  jdolecek 	pmap_zero_page_func = pmap_zero_page_generic;
   3793  1.14.2.7  jdolecek }
   3794  1.14.2.7  jdolecek 
   3795  1.14.2.7  jdolecek #if defined(CPU_ARM9)
   3796  1.14.2.7  jdolecek void
   3797  1.14.2.7  jdolecek pmap_pte_init_arm9(void)
   3798  1.14.2.7  jdolecek {
   3799  1.14.2.7  jdolecek 
   3800  1.14.2.7  jdolecek 	/*
   3801  1.14.2.7  jdolecek 	 * ARM9 is compatible with generic, but we want to use
   3802  1.14.2.7  jdolecek 	 * write-through caching for now.
   3803  1.14.2.7  jdolecek 	 */
   3804  1.14.2.7  jdolecek 	pmap_pte_init_generic();
   3805  1.14.2.7  jdolecek 
   3806  1.14.2.7  jdolecek 	pte_l1_s_cache_mode = L1_S_C;
   3807  1.14.2.7  jdolecek 	pte_l2_l_cache_mode = L2_C;
   3808  1.14.2.7  jdolecek 	pte_l2_s_cache_mode = L2_C;
   3809  1.14.2.7  jdolecek }
   3810  1.14.2.7  jdolecek #endif /* CPU_ARM9 */
   3811  1.14.2.7  jdolecek #endif /* ARM_MMU_GENERIC == 1 */
   3812  1.14.2.7  jdolecek 
   3813  1.14.2.7  jdolecek #if ARM_MMU_XSCALE == 1
   3814  1.14.2.7  jdolecek void
   3815  1.14.2.7  jdolecek pmap_pte_init_xscale(void)
   3816  1.14.2.7  jdolecek {
   3817  1.14.2.7  jdolecek 	uint32_t auxctl;
   3818  1.14.2.7  jdolecek 
   3819  1.14.2.7  jdolecek 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3820  1.14.2.7  jdolecek 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
   3821  1.14.2.7  jdolecek 
   3822  1.14.2.7  jdolecek 	pte_l2_l_cache_mode = L2_B|L2_C;
   3823  1.14.2.7  jdolecek 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
   3824  1.14.2.7  jdolecek 
   3825  1.14.2.7  jdolecek 	pte_l2_s_cache_mode = L2_B|L2_C;
   3826  1.14.2.7  jdolecek 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
   3827  1.14.2.7  jdolecek 
   3828  1.14.2.7  jdolecek #ifdef XSCALE_CACHE_WRITE_THROUGH
   3829  1.14.2.7  jdolecek 	/*
   3830  1.14.2.7  jdolecek 	 * Some versions of the XScale core have various bugs in
   3831  1.14.2.7  jdolecek 	 * their cache units, the work-around for which is to run
   3832  1.14.2.7  jdolecek 	 * the cache in write-through mode.  Unfortunately, this
   3833  1.14.2.7  jdolecek 	 * has a major (negative) impact on performance.  So, we
   3834  1.14.2.7  jdolecek 	 * go ahead and run fast-and-loose, in the hopes that we
   3835  1.14.2.7  jdolecek 	 * don't line up the planets in a way that will trip the
   3836  1.14.2.7  jdolecek 	 * bugs.
   3837  1.14.2.7  jdolecek 	 *
   3838  1.14.2.7  jdolecek 	 * However, we give you the option to be slow-but-correct.
   3839  1.14.2.7  jdolecek 	 */
   3840  1.14.2.7  jdolecek 	pte_l1_s_cache_mode = L1_S_C;
   3841  1.14.2.7  jdolecek 	pte_l2_l_cache_mode = L2_C;
   3842  1.14.2.7  jdolecek 	pte_l2_s_cache_mode = L2_C;
   3843  1.14.2.7  jdolecek #endif /* XSCALE_CACHE_WRITE_THROUGH */
   3844  1.14.2.7  jdolecek 
   3845  1.14.2.7  jdolecek 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
   3846  1.14.2.7  jdolecek 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
   3847  1.14.2.7  jdolecek 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
   3848  1.14.2.7  jdolecek 
   3849  1.14.2.7  jdolecek 	pte_l1_s_proto = L1_S_PROTO_xscale;
   3850  1.14.2.7  jdolecek 	pte_l1_c_proto = L1_C_PROTO_xscale;
   3851  1.14.2.7  jdolecek 	pte_l2_s_proto = L2_S_PROTO_xscale;
   3852  1.14.2.7  jdolecek 
   3853  1.14.2.7  jdolecek 	pmap_copy_page_func = pmap_copy_page_xscale;
   3854  1.14.2.7  jdolecek 	pmap_zero_page_func = pmap_zero_page_xscale;
   3855  1.14.2.7  jdolecek 
   3856  1.14.2.7  jdolecek 	/*
   3857  1.14.2.7  jdolecek 	 * Disable ECC protection of page table access, for now.
   3858  1.14.2.7  jdolecek 	 */
   3859  1.14.2.7  jdolecek 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   3860  1.14.2.7  jdolecek 		: "=r" (auxctl));
   3861  1.14.2.7  jdolecek 	auxctl &= ~XSCALE_AUXCTL_P;
   3862  1.14.2.7  jdolecek 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   3863  1.14.2.7  jdolecek 		:
   3864  1.14.2.7  jdolecek 		: "r" (auxctl));
   3865  1.14.2.7  jdolecek }
   3866  1.14.2.7  jdolecek 
   3867  1.14.2.7  jdolecek /*
   3868  1.14.2.7  jdolecek  * xscale_setup_minidata:
   3869  1.14.2.7  jdolecek  *
   3870  1.14.2.7  jdolecek  *	Set up the mini-data cache clean area.  We require the
   3871  1.14.2.7  jdolecek  *	caller to allocate the right amount of physically and
   3872  1.14.2.7  jdolecek  *	virtually contiguous space.
   3873  1.14.2.7  jdolecek  */
   3874  1.14.2.7  jdolecek void
   3875  1.14.2.7  jdolecek xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
   3876  1.14.2.7  jdolecek {
   3877  1.14.2.7  jdolecek 	extern vaddr_t xscale_minidata_clean_addr;
   3878  1.14.2.7  jdolecek 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
   3879  1.14.2.7  jdolecek 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3880  1.14.2.7  jdolecek 	pt_entry_t *pte;
   3881  1.14.2.7  jdolecek 	vsize_t size;
   3882  1.14.2.7  jdolecek 	uint32_t auxctl;
   3883  1.14.2.7  jdolecek 
   3884  1.14.2.7  jdolecek 	xscale_minidata_clean_addr = va;
   3885  1.14.2.7  jdolecek 
   3886  1.14.2.7  jdolecek 	/* Round it to page size. */
   3887  1.14.2.7  jdolecek 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
   3888  1.14.2.7  jdolecek 
   3889  1.14.2.7  jdolecek 	for (; size != 0;
   3890  1.14.2.7  jdolecek 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
   3891  1.14.2.7  jdolecek 		pte = (pt_entry_t *)
   3892  1.14.2.7  jdolecek 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3893  1.14.2.7  jdolecek 		if (pte == NULL)
   3894  1.14.2.7  jdolecek 			panic("xscale_setup_minidata: can't find L2 table for "
   3895  1.14.2.7  jdolecek 			    "VA 0x%08lx", va);
   3896  1.14.2.7  jdolecek 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3897  1.14.2.7  jdolecek 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   3898  1.14.2.7  jdolecek 		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
   3899  1.14.2.7  jdolecek 	}
   3900  1.14.2.7  jdolecek 
   3901  1.14.2.7  jdolecek 	/*
   3902  1.14.2.7  jdolecek 	 * Configure the mini-data cache for write-back with
   3903  1.14.2.7  jdolecek 	 * read/write-allocate.
   3904  1.14.2.7  jdolecek 	 *
   3905  1.14.2.7  jdolecek 	 * NOTE: In order to reconfigure the mini-data cache, we must
   3906  1.14.2.7  jdolecek 	 * make sure it contains no valid data!  In order to do that,
   3907  1.14.2.7  jdolecek 	 * we must issue a global data cache invalidate command!
   3908  1.14.2.7  jdolecek 	 *
   3909  1.14.2.7  jdolecek 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
   3910  1.14.2.7  jdolecek 	 * THIS IS VERY IMPORTANT!
   3911  1.14.2.7  jdolecek 	 */
   3912  1.14.2.7  jdolecek 
   3913  1.14.2.7  jdolecek 	/* Invalidate data and mini-data. */
   3914  1.14.2.7  jdolecek 	__asm __volatile("mcr p15, 0, %0, c7, c6, 0"
   3915  1.14.2.7  jdolecek 		:
   3916  1.14.2.7  jdolecek 		: "r" (auxctl));
   3917  1.14.2.7  jdolecek 
   3918  1.14.2.7  jdolecek 
   3919  1.14.2.7  jdolecek 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   3920  1.14.2.7  jdolecek 		: "=r" (auxctl));
   3921  1.14.2.7  jdolecek 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
   3922  1.14.2.7  jdolecek 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   3923  1.14.2.7  jdolecek 		:
   3924  1.14.2.7  jdolecek 		: "r" (auxctl));
   3925  1.14.2.7  jdolecek }
   3926  1.14.2.7  jdolecek #endif /* ARM_MMU_XSCALE == 1 */
   3927