Home | History | Annotate | Line # | Download | only in arm32
pmap.c revision 1.97.4.5
      1  1.97.4.5        he /*	$NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $	*/
      2      1.12     chris 
      3      1.12     chris /*
      4      1.49   thorpej  * Copyright (c) 2002 Wasabi Systems, Inc.
      5      1.12     chris  * Copyright (c) 2001 Richard Earnshaw
      6      1.12     chris  * Copyright (c) 2001 Christopher Gilbert
      7      1.12     chris  * All rights reserved.
      8      1.12     chris  *
      9      1.12     chris  * 1. Redistributions of source code must retain the above copyright
     10      1.12     chris  *    notice, this list of conditions and the following disclaimer.
     11      1.12     chris  * 2. Redistributions in binary form must reproduce the above copyright
     12      1.12     chris  *    notice, this list of conditions and the following disclaimer in the
     13      1.12     chris  *    documentation and/or other materials provided with the distribution.
     14      1.12     chris  * 3. The name of the company nor the name of the author may be used to
     15      1.12     chris  *    endorse or promote products derived from this software without specific
     16      1.12     chris  *    prior written permission.
     17      1.12     chris  *
     18      1.12     chris  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
     19      1.12     chris  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
     20      1.12     chris  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21      1.12     chris  * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
     22      1.12     chris  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     23      1.12     chris  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     24      1.12     chris  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     25      1.12     chris  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     26      1.12     chris  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     27      1.12     chris  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     28      1.12     chris  * SUCH DAMAGE.
     29      1.12     chris  */
     30       1.1      matt 
     31       1.1      matt /*-
     32       1.1      matt  * Copyright (c) 1999 The NetBSD Foundation, Inc.
     33       1.1      matt  * All rights reserved.
     34       1.1      matt  *
     35       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
     36       1.1      matt  * by Charles M. Hannum.
     37       1.1      matt  *
     38       1.1      matt  * Redistribution and use in source and binary forms, with or without
     39       1.1      matt  * modification, are permitted provided that the following conditions
     40       1.1      matt  * are met:
     41       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     42       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     43       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     44       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     45       1.1      matt  *    documentation and/or other materials provided with the distribution.
     46       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     47       1.1      matt  *    must display the following acknowledgement:
     48       1.1      matt  *        This product includes software developed by the NetBSD
     49       1.1      matt  *        Foundation, Inc. and its contributors.
     50       1.1      matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     51       1.1      matt  *    contributors may be used to endorse or promote products derived
     52       1.1      matt  *    from this software without specific prior written permission.
     53       1.1      matt  *
     54       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     55       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     56       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     57       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     58       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     59       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     60       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     61       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     62       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     63       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     64       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     65       1.1      matt  */
     66       1.1      matt 
     67       1.1      matt /*
     68       1.1      matt  * Copyright (c) 1994-1998 Mark Brinicombe.
     69       1.1      matt  * Copyright (c) 1994 Brini.
     70       1.1      matt  * All rights reserved.
     71       1.1      matt  *
     72       1.1      matt  * This code is derived from software written for Brini by Mark Brinicombe
     73       1.1      matt  *
     74       1.1      matt  * Redistribution and use in source and binary forms, with or without
     75       1.1      matt  * modification, are permitted provided that the following conditions
     76       1.1      matt  * are met:
     77       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     78       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     79       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     80       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     81       1.1      matt  *    documentation and/or other materials provided with the distribution.
     82       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     83       1.1      matt  *    must display the following acknowledgement:
     84       1.1      matt  *	This product includes software developed by Mark Brinicombe.
     85       1.1      matt  * 4. The name of the author may not be used to endorse or promote products
     86       1.1      matt  *    derived from this software without specific prior written permission.
     87       1.1      matt  *
     88       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     89       1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     90       1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     91       1.1      matt  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     92       1.1      matt  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     93       1.1      matt  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     94       1.1      matt  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     95       1.1      matt  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     96       1.1      matt  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     97       1.1      matt  *
     98       1.1      matt  * RiscBSD kernel project
     99       1.1      matt  *
    100       1.1      matt  * pmap.c
    101       1.1      matt  *
    102       1.1      matt  * Machine dependant vm stuff
    103       1.1      matt  *
    104       1.1      matt  * Created      : 20/09/94
    105       1.1      matt  */
    106       1.1      matt 
    107       1.1      matt /*
    108       1.1      matt  * Performance improvements, UVM changes, overhauls and part-rewrites
    109       1.1      matt  * were contributed by Neil A. Carson <neil (at) causality.com>.
    110       1.1      matt  */
    111       1.1      matt 
    112       1.1      matt /*
    113       1.1      matt  * The dram block info is currently referenced from the bootconfig.
    114       1.1      matt  * This should be placed in a separate structure.
    115       1.1      matt  */
    116       1.1      matt 
    117       1.1      matt /*
    118       1.1      matt  * Special compilation symbols
    119       1.1      matt  * PMAP_DEBUG		- Build in pmap_debug_level code
    120       1.1      matt  */
    121       1.1      matt 
    122       1.1      matt /* Include header files */
    123       1.1      matt 
    124       1.1      matt #include "opt_pmap_debug.h"
    125       1.1      matt #include "opt_ddb.h"
    126       1.1      matt 
    127       1.1      matt #include <sys/types.h>
    128       1.1      matt #include <sys/param.h>
    129       1.1      matt #include <sys/kernel.h>
    130       1.1      matt #include <sys/systm.h>
    131       1.1      matt #include <sys/proc.h>
    132       1.1      matt #include <sys/malloc.h>
    133       1.1      matt #include <sys/user.h>
    134      1.10     chris #include <sys/pool.h>
    135      1.16     chris #include <sys/cdefs.h>
    136      1.16     chris 
    137       1.1      matt #include <uvm/uvm.h>
    138       1.1      matt 
    139       1.1      matt #include <machine/bootconfig.h>
    140       1.1      matt #include <machine/bus.h>
    141       1.1      matt #include <machine/pmap.h>
    142       1.1      matt #include <machine/pcb.h>
    143       1.1      matt #include <machine/param.h>
    144      1.32   thorpej #include <arm/arm32/katelib.h>
    145      1.16     chris 
    146  1.97.4.5        he __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.4.5 2002/12/07 20:44:23 he Exp $");
    147       1.1      matt #ifdef PMAP_DEBUG
    148       1.1      matt #define	PDEBUG(_lev_,_stat_) \
    149       1.1      matt 	if (pmap_debug_level >= (_lev_)) \
    150       1.1      matt         	((_stat_))
    151       1.1      matt int pmap_debug_level = -2;
    152      1.48     chris void pmap_dump_pvlist(vaddr_t phys, char *m);
    153      1.17     chris 
    154      1.17     chris /*
    155      1.17     chris  * for switching to potentially finer grained debugging
    156      1.17     chris  */
    157      1.17     chris #define	PDB_FOLLOW	0x0001
    158      1.17     chris #define	PDB_INIT	0x0002
    159      1.17     chris #define	PDB_ENTER	0x0004
    160      1.17     chris #define	PDB_REMOVE	0x0008
    161      1.17     chris #define	PDB_CREATE	0x0010
    162      1.17     chris #define	PDB_PTPAGE	0x0020
    163      1.48     chris #define	PDB_GROWKERN	0x0040
    164      1.17     chris #define	PDB_BITS	0x0080
    165      1.17     chris #define	PDB_COLLECT	0x0100
    166      1.17     chris #define	PDB_PROTECT	0x0200
    167      1.48     chris #define	PDB_MAP_L1	0x0400
    168      1.17     chris #define	PDB_BOOTSTRAP	0x1000
    169      1.17     chris #define	PDB_PARANOIA	0x2000
    170      1.17     chris #define	PDB_WIRING	0x4000
    171      1.17     chris #define	PDB_PVDUMP	0x8000
    172      1.17     chris 
    173      1.17     chris int debugmap = 0;
    174      1.17     chris int pmapdebug = PDB_PARANOIA | PDB_FOLLOW;
    175      1.17     chris #define	NPDEBUG(_lev_,_stat_) \
    176      1.17     chris 	if (pmapdebug & (_lev_)) \
    177      1.17     chris         	((_stat_))
    178      1.17     chris 
    179       1.1      matt #else	/* PMAP_DEBUG */
    180       1.1      matt #define	PDEBUG(_lev_,_stat_) /* Nothing */
    181      1.48     chris #define NPDEBUG(_lev_,_stat_) /* Nothing */
    182       1.1      matt #endif	/* PMAP_DEBUG */
    183       1.1      matt 
    184       1.1      matt struct pmap     kernel_pmap_store;
    185       1.1      matt 
    186      1.10     chris /*
    187      1.48     chris  * linked list of all non-kernel pmaps
    188      1.48     chris  */
    189      1.48     chris 
    190      1.69   thorpej static LIST_HEAD(, pmap) pmaps;
    191      1.48     chris 
    192      1.48     chris /*
    193      1.10     chris  * pool that pmap structures are allocated from
    194      1.10     chris  */
    195      1.10     chris 
    196      1.10     chris struct pool pmap_pmap_pool;
    197      1.10     chris 
    198  1.97.4.5        he /*
    199  1.97.4.5        he  * pool/cache that PT-PT's are allocated from
    200  1.97.4.5        he  */
    201  1.97.4.5        he 
    202  1.97.4.5        he struct pool pmap_ptpt_pool;
    203  1.97.4.5        he struct pool_cache pmap_ptpt_cache;
    204  1.97.4.5        he u_int pmap_ptpt_cache_generation;
    205  1.97.4.5        he 
    206  1.97.4.5        he static void *pmap_ptpt_page_alloc(struct pool *, int);
    207  1.97.4.5        he static void pmap_ptpt_page_free(struct pool *, void *);
    208  1.97.4.5        he 
    209  1.97.4.5        he struct pool_allocator pmap_ptpt_allocator = {
    210  1.97.4.5        he 	pmap_ptpt_page_alloc, pmap_ptpt_page_free,
    211  1.97.4.5        he };
    212  1.97.4.5        he 
    213  1.97.4.5        he static int pmap_ptpt_ctor(void *, void *, int);
    214  1.97.4.5        he 
    215      1.54   thorpej static pt_entry_t *csrc_pte, *cdst_pte;
    216      1.54   thorpej static vaddr_t csrcp, cdstp;
    217      1.54   thorpej 
    218       1.1      matt char *memhook;
    219       1.1      matt extern caddr_t msgbufaddr;
    220       1.1      matt 
    221       1.1      matt boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
    222      1.17     chris /*
    223      1.17     chris  * locking data structures
    224      1.17     chris  */
    225       1.1      matt 
    226      1.17     chris static struct lock pmap_main_lock;
    227      1.17     chris static struct simplelock pvalloc_lock;
    228      1.48     chris static struct simplelock pmaps_lock;
    229      1.17     chris #ifdef LOCKDEBUG
    230      1.17     chris #define PMAP_MAP_TO_HEAD_LOCK() \
    231      1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL)
    232      1.17     chris #define PMAP_MAP_TO_HEAD_UNLOCK() \
    233      1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    234      1.17     chris 
    235      1.17     chris #define PMAP_HEAD_TO_MAP_LOCK() \
    236      1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL)
    237      1.17     chris #define PMAP_HEAD_TO_MAP_UNLOCK() \
    238      1.17     chris      (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL)
    239      1.17     chris #else
    240      1.17     chris #define	PMAP_MAP_TO_HEAD_LOCK()		/* nothing */
    241      1.17     chris #define	PMAP_MAP_TO_HEAD_UNLOCK()	/* nothing */
    242      1.17     chris #define	PMAP_HEAD_TO_MAP_LOCK()		/* nothing */
    243      1.17     chris #define	PMAP_HEAD_TO_MAP_UNLOCK()	/* nothing */
    244      1.17     chris #endif /* LOCKDEBUG */
    245      1.17     chris 
    246      1.17     chris /*
    247      1.17     chris  * pv_page management structures: locked by pvalloc_lock
    248      1.17     chris  */
    249       1.1      matt 
    250      1.17     chris TAILQ_HEAD(pv_pagelist, pv_page);
    251      1.17     chris static struct pv_pagelist pv_freepages;	/* list of pv_pages with free entrys */
    252      1.17     chris static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */
    253      1.17     chris static int pv_nfpvents;			/* # of free pv entries */
    254      1.17     chris static struct pv_page *pv_initpage;	/* bootstrap page from kernel_map */
    255      1.17     chris static vaddr_t pv_cachedva;		/* cached VA for later use */
    256      1.17     chris 
    257      1.17     chris #define PVE_LOWAT (PVE_PER_PVPAGE / 2)	/* free pv_entry low water mark */
    258      1.17     chris #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2))
    259      1.17     chris 					/* high water mark */
    260      1.17     chris 
    261      1.17     chris /*
    262      1.17     chris  * local prototypes
    263      1.17     chris  */
    264      1.17     chris 
    265      1.17     chris static struct pv_entry	*pmap_add_pvpage __P((struct pv_page *, boolean_t));
    266      1.17     chris static struct pv_entry	*pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */
    267      1.17     chris #define ALLOCPV_NEED	0	/* need PV now */
    268      1.17     chris #define ALLOCPV_TRY	1	/* just try to allocate, don't steal */
    269      1.17     chris #define ALLOCPV_NONEED	2	/* don't need PV, just growing cache */
    270      1.17     chris static struct pv_entry	*pmap_alloc_pvpage __P((struct pmap *, int));
    271      1.49   thorpej static void		 pmap_enter_pv __P((struct vm_page *,
    272      1.17     chris 					    struct pv_entry *, struct pmap *,
    273      1.17     chris 					    vaddr_t, struct vm_page *, int));
    274      1.17     chris static void		 pmap_free_pv __P((struct pmap *, struct pv_entry *));
    275      1.17     chris static void		 pmap_free_pvs __P((struct pmap *, struct pv_entry *));
    276      1.17     chris static void		 pmap_free_pv_doit __P((struct pv_entry *));
    277      1.17     chris static void		 pmap_free_pvpage __P((void));
    278      1.17     chris static boolean_t	 pmap_is_curpmap __P((struct pmap *));
    279      1.49   thorpej static struct pv_entry	*pmap_remove_pv __P((struct vm_page *, struct pmap *,
    280      1.17     chris 			vaddr_t));
    281      1.17     chris #define PMAP_REMOVE_ALL		0	/* remove all mappings */
    282      1.17     chris #define PMAP_REMOVE_SKIPWIRED	1	/* skip wired mappings */
    283       1.1      matt 
    284      1.49   thorpej static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *,
    285      1.33     chris 	u_int, u_int));
    286      1.33     chris 
    287      1.69   thorpej /*
    288      1.69   thorpej  * Structure that describes and L1 table.
    289      1.69   thorpej  */
    290      1.69   thorpej struct l1pt {
    291      1.69   thorpej 	SIMPLEQ_ENTRY(l1pt)	pt_queue;	/* Queue pointers */
    292      1.69   thorpej 	struct pglist		pt_plist;	/* Allocated page list */
    293      1.69   thorpej 	vaddr_t			pt_va;		/* Allocated virtual address */
    294      1.69   thorpej 	int			pt_flags;	/* Flags */
    295      1.69   thorpej };
    296      1.69   thorpej #define	PTFLAG_STATIC		0x01		/* Statically allocated */
    297      1.69   thorpej #define	PTFLAG_KPT		0x02		/* Kernel pt's are mapped */
    298      1.69   thorpej #define	PTFLAG_CLEAN		0x04		/* L1 is clean */
    299      1.69   thorpej 
    300      1.33     chris static void pmap_free_l1pt __P((struct l1pt *));
    301      1.33     chris static int pmap_allocpagedir __P((struct pmap *));
    302      1.33     chris static int pmap_clean_page __P((struct pv_entry *, boolean_t));
    303      1.49   thorpej static void pmap_remove_all __P((struct vm_page *));
    304      1.33     chris 
    305      1.57   thorpej static struct vm_page	*pmap_alloc_ptp __P((struct pmap *, vaddr_t));
    306      1.57   thorpej static struct vm_page	*pmap_get_ptp __P((struct pmap *, vaddr_t));
    307      1.49   thorpej __inline static void pmap_clearbit __P((struct vm_page *, unsigned int));
    308      1.17     chris 
    309       1.2      matt extern paddr_t physical_start;
    310       1.2      matt extern paddr_t physical_freestart;
    311       1.2      matt extern paddr_t physical_end;
    312       1.2      matt extern paddr_t physical_freeend;
    313       1.1      matt extern unsigned int free_pages;
    314       1.1      matt extern int max_processes;
    315       1.1      matt 
    316      1.54   thorpej vaddr_t virtual_avail;
    317       1.1      matt vaddr_t virtual_end;
    318      1.48     chris vaddr_t pmap_curmaxkvaddr;
    319       1.1      matt 
    320       1.1      matt vaddr_t avail_start;
    321       1.1      matt vaddr_t avail_end;
    322       1.1      matt 
    323       1.1      matt extern pv_addr_t systempage;
    324       1.1      matt 
    325       1.1      matt /* Variables used by the L1 page table queue code */
    326       1.1      matt SIMPLEQ_HEAD(l1pt_queue, l1pt);
    327      1.73   thorpej static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */
    328      1.73   thorpej static int l1pt_static_queue_count;	    /* items in the static l1 queue */
    329      1.73   thorpej static int l1pt_static_create_count;	    /* static l1 items created */
    330      1.73   thorpej static struct l1pt_queue l1pt_queue;	    /* head of our l1 queue */
    331      1.73   thorpej static int l1pt_queue_count;		    /* items in the l1 queue */
    332      1.73   thorpej static int l1pt_create_count;		    /* stat - L1's create count */
    333      1.73   thorpej static int l1pt_reuse_count;		    /* stat - L1's reused count */
    334       1.1      matt 
    335       1.1      matt /* Local function prototypes (not used outside this file) */
    336      1.15     chris void pmap_pinit __P((struct pmap *));
    337      1.15     chris void pmap_freepagedir __P((struct pmap *));
    338       1.1      matt 
    339       1.1      matt /* Other function prototypes */
    340       1.1      matt extern void bzero_page __P((vaddr_t));
    341       1.1      matt extern void bcopy_page __P((vaddr_t, vaddr_t));
    342       1.1      matt 
    343       1.1      matt struct l1pt *pmap_alloc_l1pt __P((void));
    344      1.15     chris static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va,
    345      1.17     chris      vaddr_t l2pa, boolean_t));
    346       1.1      matt 
    347      1.11     chris static pt_entry_t *pmap_map_ptes __P((struct pmap *));
    348      1.17     chris static void pmap_unmap_ptes __P((struct pmap *));
    349      1.11     chris 
    350      1.49   thorpej __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *,
    351      1.25  rearnsha     pt_entry_t *, boolean_t));
    352      1.49   thorpej static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *,
    353      1.25  rearnsha     pt_entry_t *, boolean_t));
    354      1.49   thorpej static void pmap_vac_me_user __P((struct pmap *, struct vm_page *,
    355      1.25  rearnsha     pt_entry_t *, boolean_t));
    356      1.11     chris 
    357      1.17     chris /*
    358      1.17     chris  * real definition of pv_entry.
    359      1.17     chris  */
    360      1.17     chris 
    361      1.17     chris struct pv_entry {
    362      1.17     chris 	struct pv_entry *pv_next;       /* next pv_entry */
    363      1.17     chris 	struct pmap     *pv_pmap;        /* pmap where mapping lies */
    364      1.17     chris 	vaddr_t         pv_va;          /* virtual address for mapping */
    365      1.17     chris 	int             pv_flags;       /* flags */
    366      1.17     chris 	struct vm_page	*pv_ptp;	/* vm_page for the ptp */
    367      1.17     chris };
    368      1.17     chris 
    369      1.17     chris /*
    370      1.17     chris  * pv_entrys are dynamically allocated in chunks from a single page.
    371      1.17     chris  * we keep track of how many pv_entrys are in use for each page and
    372      1.17     chris  * we can free pv_entry pages if needed.  there is one lock for the
    373      1.17     chris  * entire allocation system.
    374      1.17     chris  */
    375      1.17     chris 
    376      1.17     chris struct pv_page_info {
    377      1.17     chris 	TAILQ_ENTRY(pv_page) pvpi_list;
    378      1.17     chris 	struct pv_entry *pvpi_pvfree;
    379      1.17     chris 	int pvpi_nfree;
    380      1.17     chris };
    381      1.17     chris 
    382      1.17     chris /*
    383      1.17     chris  * number of pv_entry's in a pv_page
    384      1.17     chris  * (note: won't work on systems where NPBG isn't a constant)
    385      1.17     chris  */
    386      1.17     chris 
    387      1.17     chris #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
    388      1.17     chris 			sizeof(struct pv_entry))
    389      1.17     chris 
    390      1.17     chris /*
    391      1.17     chris  * a pv_page: where pv_entrys are allocated from
    392      1.17     chris  */
    393      1.17     chris 
    394      1.17     chris struct pv_page {
    395      1.17     chris 	struct pv_page_info pvinfo;
    396      1.17     chris 	struct pv_entry pvents[PVE_PER_PVPAGE];
    397      1.17     chris };
    398      1.17     chris 
    399       1.1      matt #ifdef MYCROFT_HACK
    400       1.1      matt int mycroft_hack = 0;
    401       1.1      matt #endif
    402       1.1      matt 
    403       1.1      matt /* Function to set the debug level of the pmap code */
    404       1.1      matt 
    405       1.1      matt #ifdef PMAP_DEBUG
    406       1.1      matt void
    407      1.73   thorpej pmap_debug(int level)
    408       1.1      matt {
    409       1.1      matt 	pmap_debug_level = level;
    410       1.1      matt 	printf("pmap_debug: level=%d\n", pmap_debug_level);
    411       1.1      matt }
    412       1.1      matt #endif	/* PMAP_DEBUG */
    413       1.1      matt 
    414      1.22     chris __inline static boolean_t
    415      1.17     chris pmap_is_curpmap(struct pmap *pmap)
    416      1.17     chris {
    417      1.58   thorpej 
    418      1.58   thorpej 	if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) ||
    419      1.58   thorpej 	    pmap == pmap_kernel())
    420      1.58   thorpej 		return (TRUE);
    421      1.58   thorpej 
    422      1.58   thorpej 	return (FALSE);
    423      1.17     chris }
    424      1.58   thorpej 
    425       1.1      matt #include "isadma.h"
    426       1.1      matt 
    427       1.1      matt #if NISADMA > 0
    428       1.1      matt /*
    429       1.1      matt  * Used to protect memory for ISA DMA bounce buffers.  If, when loading
    430       1.1      matt  * pages into the system, memory intersects with any of these ranges,
    431       1.1      matt  * the intersecting memory will be loaded into a lower-priority free list.
    432       1.1      matt  */
    433       1.1      matt bus_dma_segment_t *pmap_isa_dma_ranges;
    434       1.1      matt int pmap_isa_dma_nranges;
    435       1.1      matt 
    436       1.1      matt /*
    437       1.1      matt  * Check if a memory range intersects with an ISA DMA range, and
    438       1.1      matt  * return the page-rounded intersection if it does.  The intersection
    439       1.1      matt  * will be placed on a lower-priority free list.
    440       1.1      matt  */
    441      1.73   thorpej static boolean_t
    442      1.73   thorpej pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap,
    443      1.73   thorpej     psize_t *sizep)
    444       1.1      matt {
    445       1.1      matt 	bus_dma_segment_t *ds;
    446       1.1      matt 	int i;
    447       1.1      matt 
    448       1.1      matt 	if (pmap_isa_dma_ranges == NULL)
    449       1.1      matt 		return (FALSE);
    450       1.1      matt 
    451       1.1      matt 	for (i = 0, ds = pmap_isa_dma_ranges;
    452       1.1      matt 	     i < pmap_isa_dma_nranges; i++, ds++) {
    453       1.1      matt 		if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) {
    454       1.1      matt 			/*
    455       1.1      matt 			 * Beginning of region intersects with this range.
    456       1.1      matt 			 */
    457       1.1      matt 			*pap = trunc_page(pa);
    458       1.1      matt 			*sizep = round_page(min(pa + size,
    459       1.1      matt 			    ds->ds_addr + ds->ds_len) - pa);
    460       1.1      matt 			return (TRUE);
    461       1.1      matt 		}
    462       1.1      matt 		if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) {
    463       1.1      matt 			/*
    464       1.1      matt 			 * End of region intersects with this range.
    465       1.1      matt 			 */
    466       1.1      matt 			*pap = trunc_page(ds->ds_addr);
    467       1.1      matt 			*sizep = round_page(min((pa + size) - ds->ds_addr,
    468       1.1      matt 			    ds->ds_len));
    469       1.1      matt 			return (TRUE);
    470       1.1      matt 		}
    471       1.1      matt 	}
    472       1.1      matt 
    473       1.1      matt 	/*
    474       1.1      matt 	 * No intersection found.
    475       1.1      matt 	 */
    476       1.1      matt 	return (FALSE);
    477       1.1      matt }
    478       1.1      matt #endif /* NISADMA > 0 */
    479       1.1      matt 
    480       1.1      matt /*
    481      1.17     chris  * p v _ e n t r y   f u n c t i o n s
    482      1.17     chris  */
    483      1.17     chris 
    484      1.17     chris /*
    485      1.17     chris  * pv_entry allocation functions:
    486      1.17     chris  *   the main pv_entry allocation functions are:
    487      1.17     chris  *     pmap_alloc_pv: allocate a pv_entry structure
    488      1.17     chris  *     pmap_free_pv: free one pv_entry
    489      1.17     chris  *     pmap_free_pvs: free a list of pv_entrys
    490      1.17     chris  *
    491      1.17     chris  * the rest are helper functions
    492       1.1      matt  */
    493       1.1      matt 
    494       1.1      matt /*
    495      1.17     chris  * pmap_alloc_pv: inline function to allocate a pv_entry structure
    496      1.17     chris  * => we lock pvalloc_lock
    497      1.17     chris  * => if we fail, we call out to pmap_alloc_pvpage
    498      1.17     chris  * => 3 modes:
    499      1.17     chris  *    ALLOCPV_NEED   = we really need a pv_entry, even if we have to steal it
    500      1.17     chris  *    ALLOCPV_TRY    = we want a pv_entry, but not enough to steal
    501      1.17     chris  *    ALLOCPV_NONEED = we are trying to grow our free list, don't really need
    502      1.17     chris  *			one now
    503      1.17     chris  *
    504      1.17     chris  * "try" is for optional functions like pmap_copy().
    505       1.1      matt  */
    506      1.17     chris 
    507      1.17     chris __inline static struct pv_entry *
    508      1.73   thorpej pmap_alloc_pv(struct pmap *pmap, int mode)
    509       1.1      matt {
    510      1.17     chris 	struct pv_page *pvpage;
    511      1.17     chris 	struct pv_entry *pv;
    512      1.17     chris 
    513      1.17     chris 	simple_lock(&pvalloc_lock);
    514      1.17     chris 
    515      1.51     chris 	pvpage = TAILQ_FIRST(&pv_freepages);
    516      1.51     chris 
    517      1.51     chris 	if (pvpage != NULL) {
    518      1.17     chris 		pvpage->pvinfo.pvpi_nfree--;
    519      1.17     chris 		if (pvpage->pvinfo.pvpi_nfree == 0) {
    520      1.17     chris 			/* nothing left in this one? */
    521      1.17     chris 			TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list);
    522      1.17     chris 		}
    523      1.17     chris 		pv = pvpage->pvinfo.pvpi_pvfree;
    524      1.51     chris 		KASSERT(pv);
    525      1.17     chris 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    526      1.17     chris 		pv_nfpvents--;  /* took one from pool */
    527      1.17     chris 	} else {
    528      1.17     chris 		pv = NULL;		/* need more of them */
    529      1.17     chris 	}
    530      1.17     chris 
    531      1.17     chris 	/*
    532      1.17     chris 	 * if below low water mark or we didn't get a pv_entry we try and
    533      1.17     chris 	 * create more pv_entrys ...
    534      1.17     chris 	 */
    535      1.17     chris 
    536      1.17     chris 	if (pv_nfpvents < PVE_LOWAT || pv == NULL) {
    537      1.17     chris 		if (pv == NULL)
    538      1.17     chris 			pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ?
    539      1.17     chris 					       mode : ALLOCPV_NEED);
    540      1.17     chris 		else
    541      1.17     chris 			(void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED);
    542      1.17     chris 	}
    543      1.17     chris 
    544      1.17     chris 	simple_unlock(&pvalloc_lock);
    545      1.17     chris 	return(pv);
    546      1.17     chris }
    547      1.17     chris 
    548      1.17     chris /*
    549      1.17     chris  * pmap_alloc_pvpage: maybe allocate a new pvpage
    550      1.17     chris  *
    551      1.17     chris  * if need_entry is false: try and allocate a new pv_page
    552      1.17     chris  * if need_entry is true: try and allocate a new pv_page and return a
    553      1.17     chris  *	new pv_entry from it.   if we are unable to allocate a pv_page
    554      1.17     chris  *	we make a last ditch effort to steal a pv_page from some other
    555      1.17     chris  *	mapping.    if that fails, we panic...
    556      1.17     chris  *
    557      1.17     chris  * => we assume that the caller holds pvalloc_lock
    558      1.17     chris  */
    559      1.17     chris 
    560      1.17     chris static struct pv_entry *
    561      1.73   thorpej pmap_alloc_pvpage(struct pmap *pmap, int mode)
    562      1.17     chris {
    563      1.17     chris 	struct vm_page *pg;
    564      1.17     chris 	struct pv_page *pvpage;
    565       1.1      matt 	struct pv_entry *pv;
    566      1.17     chris 	int s;
    567      1.17     chris 
    568      1.17     chris 	/*
    569      1.17     chris 	 * if we need_entry and we've got unused pv_pages, allocate from there
    570      1.17     chris 	 */
    571      1.17     chris 
    572      1.51     chris 	pvpage = TAILQ_FIRST(&pv_unusedpgs);
    573      1.51     chris 	if (mode != ALLOCPV_NONEED && pvpage != NULL) {
    574      1.17     chris 
    575      1.17     chris 		/* move it to pv_freepages list */
    576      1.17     chris 		TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list);
    577      1.17     chris 		TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list);
    578      1.17     chris 
    579      1.17     chris 		/* allocate a pv_entry */
    580      1.17     chris 		pvpage->pvinfo.pvpi_nfree--;	/* can't go to zero */
    581      1.17     chris 		pv = pvpage->pvinfo.pvpi_pvfree;
    582      1.51     chris 		KASSERT(pv);
    583      1.17     chris 		pvpage->pvinfo.pvpi_pvfree = pv->pv_next;
    584      1.17     chris 
    585      1.17     chris 		pv_nfpvents--;  /* took one from pool */
    586      1.17     chris 		return(pv);
    587      1.17     chris 	}
    588       1.1      matt 
    589       1.1      matt 	/*
    590      1.17     chris 	 *  see if we've got a cached unmapped VA that we can map a page in.
    591      1.17     chris 	 * if not, try to allocate one.
    592       1.1      matt 	 */
    593       1.1      matt 
    594      1.23       chs 
    595      1.17     chris 	if (pv_cachedva == 0) {
    596      1.23       chs 		s = splvm();
    597      1.23       chs 		pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL,
    598      1.17     chris 		    PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
    599      1.23       chs 		splx(s);
    600      1.17     chris 		if (pv_cachedva == 0) {
    601      1.17     chris 			return (NULL);
    602       1.1      matt 		}
    603       1.1      matt 	}
    604      1.17     chris 
    605      1.23       chs 	pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL,
    606      1.23       chs 	    UVM_PGA_USERESERVE);
    607      1.17     chris 
    608      1.17     chris 	if (pg == NULL)
    609      1.17     chris 		return (NULL);
    610      1.51     chris 	pg->flags &= ~PG_BUSY;	/* never busy */
    611      1.17     chris 
    612      1.17     chris 	/*
    613      1.17     chris 	 * add a mapping for our new pv_page and free its entrys (save one!)
    614      1.17     chris 	 *
    615      1.17     chris 	 * NOTE: If we are allocating a PV page for the kernel pmap, the
    616      1.17     chris 	 * pmap is already locked!  (...but entering the mapping is safe...)
    617      1.17     chris 	 */
    618      1.17     chris 
    619      1.51     chris 	pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg),
    620      1.51     chris 		VM_PROT_READ|VM_PROT_WRITE);
    621      1.19     chris 	pmap_update(pmap_kernel());
    622      1.17     chris 	pvpage = (struct pv_page *) pv_cachedva;
    623      1.17     chris 	pv_cachedva = 0;
    624      1.17     chris 	return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED));
    625       1.1      matt }
    626       1.1      matt 
    627       1.1      matt /*
    628      1.17     chris  * pmap_add_pvpage: add a pv_page's pv_entrys to the free list
    629      1.17     chris  *
    630      1.17     chris  * => caller must hold pvalloc_lock
    631      1.17     chris  * => if need_entry is true, we allocate and return one pv_entry
    632       1.1      matt  */
    633       1.1      matt 
    634      1.17     chris static struct pv_entry *
    635      1.73   thorpej pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry)
    636       1.1      matt {
    637      1.17     chris 	int tofree, lcv;
    638      1.17     chris 
    639      1.17     chris 	/* do we need to return one? */
    640      1.17     chris 	tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE;
    641       1.1      matt 
    642      1.17     chris 	pvp->pvinfo.pvpi_pvfree = NULL;
    643      1.17     chris 	pvp->pvinfo.pvpi_nfree = tofree;
    644      1.17     chris 	for (lcv = 0 ; lcv < tofree ; lcv++) {
    645      1.17     chris 		pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree;
    646      1.17     chris 		pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv];
    647       1.1      matt 	}
    648      1.17     chris 	if (need_entry)
    649      1.17     chris 		TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list);
    650      1.17     chris 	else
    651      1.17     chris 		TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    652      1.17     chris 	pv_nfpvents += tofree;
    653      1.17     chris 	return((need_entry) ? &pvp->pvents[lcv] : NULL);
    654       1.1      matt }
    655       1.1      matt 
    656      1.17     chris /*
    657      1.17     chris  * pmap_free_pv_doit: actually free a pv_entry
    658      1.17     chris  *
    659      1.17     chris  * => do not call this directly!  instead use either
    660      1.17     chris  *    1. pmap_free_pv ==> free a single pv_entry
    661      1.17     chris  *    2. pmap_free_pvs => free a list of pv_entrys
    662      1.17     chris  * => we must be holding pvalloc_lock
    663      1.17     chris  */
    664      1.17     chris 
    665      1.17     chris __inline static void
    666      1.73   thorpej pmap_free_pv_doit(struct pv_entry *pv)
    667       1.1      matt {
    668      1.17     chris 	struct pv_page *pvp;
    669       1.1      matt 
    670      1.17     chris 	pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv);
    671      1.17     chris 	pv_nfpvents++;
    672      1.17     chris 	pvp->pvinfo.pvpi_nfree++;
    673       1.1      matt 
    674      1.17     chris 	/* nfree == 1 => fully allocated page just became partly allocated */
    675      1.17     chris 	if (pvp->pvinfo.pvpi_nfree == 1) {
    676      1.17     chris 		TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list);
    677       1.1      matt 	}
    678       1.1      matt 
    679      1.17     chris 	/* free it */
    680      1.17     chris 	pv->pv_next = pvp->pvinfo.pvpi_pvfree;
    681      1.17     chris 	pvp->pvinfo.pvpi_pvfree = pv;
    682       1.1      matt 
    683      1.17     chris 	/*
    684      1.17     chris 	 * are all pv_page's pv_entry's free?  move it to unused queue.
    685      1.17     chris 	 */
    686       1.1      matt 
    687      1.17     chris 	if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) {
    688      1.17     chris 		TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list);
    689      1.17     chris 		TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    690       1.1      matt 	}
    691       1.1      matt }
    692       1.1      matt 
    693       1.1      matt /*
    694      1.17     chris  * pmap_free_pv: free a single pv_entry
    695      1.17     chris  *
    696      1.17     chris  * => we gain the pvalloc_lock
    697       1.1      matt  */
    698       1.1      matt 
    699      1.17     chris __inline static void
    700      1.73   thorpej pmap_free_pv(struct pmap *pmap, struct pv_entry *pv)
    701       1.1      matt {
    702      1.17     chris 	simple_lock(&pvalloc_lock);
    703      1.17     chris 	pmap_free_pv_doit(pv);
    704      1.17     chris 
    705      1.17     chris 	/*
    706      1.17     chris 	 * Can't free the PV page if the PV entries were associated with
    707      1.17     chris 	 * the kernel pmap; the pmap is already locked.
    708      1.17     chris 	 */
    709      1.51     chris 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    710      1.17     chris 	    pmap != pmap_kernel())
    711      1.17     chris 		pmap_free_pvpage();
    712      1.17     chris 
    713      1.17     chris 	simple_unlock(&pvalloc_lock);
    714      1.17     chris }
    715       1.1      matt 
    716      1.17     chris /*
    717      1.17     chris  * pmap_free_pvs: free a list of pv_entrys
    718      1.17     chris  *
    719      1.17     chris  * => we gain the pvalloc_lock
    720      1.17     chris  */
    721       1.1      matt 
    722      1.17     chris __inline static void
    723      1.73   thorpej pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs)
    724      1.17     chris {
    725      1.17     chris 	struct pv_entry *nextpv;
    726       1.1      matt 
    727      1.17     chris 	simple_lock(&pvalloc_lock);
    728       1.1      matt 
    729      1.17     chris 	for ( /* null */ ; pvs != NULL ; pvs = nextpv) {
    730      1.17     chris 		nextpv = pvs->pv_next;
    731      1.17     chris 		pmap_free_pv_doit(pvs);
    732       1.1      matt 	}
    733       1.1      matt 
    734      1.17     chris 	/*
    735      1.17     chris 	 * Can't free the PV page if the PV entries were associated with
    736      1.17     chris 	 * the kernel pmap; the pmap is already locked.
    737      1.17     chris 	 */
    738      1.51     chris 	if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL &&
    739      1.17     chris 	    pmap != pmap_kernel())
    740      1.17     chris 		pmap_free_pvpage();
    741       1.1      matt 
    742      1.17     chris 	simple_unlock(&pvalloc_lock);
    743       1.1      matt }
    744       1.1      matt 
    745       1.1      matt 
    746       1.1      matt /*
    747      1.17     chris  * pmap_free_pvpage: try and free an unused pv_page structure
    748      1.17     chris  *
    749      1.17     chris  * => assume caller is holding the pvalloc_lock and that
    750      1.17     chris  *	there is a page on the pv_unusedpgs list
    751      1.17     chris  * => if we can't get a lock on the kmem_map we try again later
    752       1.1      matt  */
    753       1.1      matt 
    754      1.17     chris static void
    755      1.73   thorpej pmap_free_pvpage(void)
    756       1.1      matt {
    757      1.17     chris 	int s;
    758      1.17     chris 	struct vm_map *map;
    759      1.17     chris 	struct vm_map_entry *dead_entries;
    760      1.17     chris 	struct pv_page *pvp;
    761      1.17     chris 
    762      1.17     chris 	s = splvm(); /* protect kmem_map */
    763       1.1      matt 
    764      1.51     chris 	pvp = TAILQ_FIRST(&pv_unusedpgs);
    765       1.1      matt 
    766       1.1      matt 	/*
    767      1.17     chris 	 * note: watch out for pv_initpage which is allocated out of
    768      1.17     chris 	 * kernel_map rather than kmem_map.
    769       1.1      matt 	 */
    770      1.17     chris 	if (pvp == pv_initpage)
    771      1.17     chris 		map = kernel_map;
    772      1.17     chris 	else
    773      1.17     chris 		map = kmem_map;
    774      1.17     chris 	if (vm_map_lock_try(map)) {
    775      1.17     chris 
    776      1.17     chris 		/* remove pvp from pv_unusedpgs */
    777      1.17     chris 		TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list);
    778      1.17     chris 
    779      1.17     chris 		/* unmap the page */
    780      1.17     chris 		dead_entries = NULL;
    781      1.17     chris 		uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE,
    782      1.17     chris 		    &dead_entries);
    783      1.17     chris 		vm_map_unlock(map);
    784      1.17     chris 
    785      1.17     chris 		if (dead_entries != NULL)
    786      1.17     chris 			uvm_unmap_detach(dead_entries, 0);
    787       1.1      matt 
    788      1.17     chris 		pv_nfpvents -= PVE_PER_PVPAGE;  /* update free count */
    789       1.1      matt 	}
    790      1.17     chris 	if (pvp == pv_initpage)
    791      1.17     chris 		/* no more initpage, we've freed it */
    792      1.17     chris 		pv_initpage = NULL;
    793       1.1      matt 
    794       1.1      matt 	splx(s);
    795       1.1      matt }
    796       1.1      matt 
    797       1.1      matt /*
    798      1.17     chris  * main pv_entry manipulation functions:
    799      1.49   thorpej  *   pmap_enter_pv: enter a mapping onto a vm_page list
    800      1.49   thorpej  *   pmap_remove_pv: remove a mappiing from a vm_page list
    801      1.17     chris  *
    802      1.17     chris  * NOTE: pmap_enter_pv expects to lock the pvh itself
    803      1.17     chris  *       pmap_remove_pv expects te caller to lock the pvh before calling
    804      1.17     chris  */
    805      1.17     chris 
    806      1.17     chris /*
    807      1.49   thorpej  * pmap_enter_pv: enter a mapping onto a vm_page lst
    808      1.17     chris  *
    809      1.17     chris  * => caller should hold the proper lock on pmap_main_lock
    810      1.17     chris  * => caller should have pmap locked
    811      1.49   thorpej  * => we will gain the lock on the vm_page and allocate the new pv_entry
    812      1.17     chris  * => caller should adjust ptp's wire_count before calling
    813      1.17     chris  * => caller should not adjust pmap's wire_count
    814      1.17     chris  */
    815      1.17     chris 
    816      1.17     chris __inline static void
    817      1.73   thorpej pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap,
    818      1.73   thorpej     vaddr_t va, struct vm_page *ptp, int flags)
    819      1.17     chris {
    820      1.17     chris 	pve->pv_pmap = pmap;
    821      1.17     chris 	pve->pv_va = va;
    822      1.17     chris 	pve->pv_ptp = ptp;			/* NULL for kernel pmap */
    823      1.17     chris 	pve->pv_flags = flags;
    824      1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);	/* lock vm_page */
    825      1.49   thorpej 	pve->pv_next = pg->mdpage.pvh_list;	/* add to ... */
    826      1.49   thorpej 	pg->mdpage.pvh_list = pve;		/* ... locked list */
    827      1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);	/* unlock, done! */
    828      1.78   thorpej 	if (pve->pv_flags & PVF_WIRED)
    829      1.17     chris 		++pmap->pm_stats.wired_count;
    830  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
    831  1.97.4.4        he     {
    832  1.97.4.4        he 	int s = splhigh();
    833  1.97.4.4        he 	if (pve->pv_flags & PVF_WRITE)
    834  1.97.4.4        he 		pg->mdpage.rw_mappings++;
    835  1.97.4.4        he 	else
    836  1.97.4.4        he 		pg->mdpage.ro_mappings++;
    837  1.97.4.4        he 	if (pg->mdpage.rw_mappings != 0 &&
    838  1.97.4.4        he 	    (pg->mdpage.kro_mappings != 0 || pg->mdpage.krw_mappings != 0)) {
    839  1.97.4.4        he 		printf("pmap_enter_pv: rw %u, kro %u, krw %u\n",
    840  1.97.4.4        he 		    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
    841  1.97.4.4        he 		    pg->mdpage.krw_mappings);
    842  1.97.4.4        he 	}
    843  1.97.4.4        he 	splx(s);
    844  1.97.4.4        he     }
    845  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
    846      1.17     chris }
    847      1.17     chris 
    848      1.17     chris /*
    849      1.17     chris  * pmap_remove_pv: try to remove a mapping from a pv_list
    850      1.17     chris  *
    851      1.17     chris  * => caller should hold proper lock on pmap_main_lock
    852      1.17     chris  * => pmap should be locked
    853      1.49   thorpej  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    854      1.17     chris  * => caller should adjust ptp's wire_count and free PTP if needed
    855      1.17     chris  * => caller should NOT adjust pmap's wire_count
    856      1.17     chris  * => we return the removed pve
    857      1.17     chris  */
    858      1.17     chris 
    859      1.17     chris __inline static struct pv_entry *
    860      1.73   thorpej pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va)
    861      1.17     chris {
    862      1.17     chris 	struct pv_entry *pve, **prevptr;
    863      1.17     chris 
    864      1.49   thorpej 	prevptr = &pg->mdpage.pvh_list;		/* previous pv_entry pointer */
    865      1.17     chris 	pve = *prevptr;
    866      1.17     chris 	while (pve) {
    867      1.17     chris 		if (pve->pv_pmap == pmap && pve->pv_va == va) {	/* match? */
    868      1.17     chris 			*prevptr = pve->pv_next;		/* remove it! */
    869      1.78   thorpej 			if (pve->pv_flags & PVF_WIRED)
    870      1.17     chris 			    --pmap->pm_stats.wired_count;
    871  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
    872  1.97.4.4        he     {
    873  1.97.4.4        he 			int s = splhigh();
    874  1.97.4.4        he 			if (pve->pv_flags & PVF_WRITE) {
    875  1.97.4.4        he 				KASSERT(pg->mdpage.rw_mappings != 0);
    876  1.97.4.4        he 				pg->mdpage.rw_mappings--;
    877  1.97.4.4        he 			} else {
    878  1.97.4.4        he 				KASSERT(pg->mdpage.ro_mappings != 0);
    879  1.97.4.4        he 				pg->mdpage.ro_mappings--;
    880  1.97.4.4        he 			}
    881  1.97.4.4        he 			splx(s);
    882  1.97.4.4        he     }
    883  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
    884      1.17     chris 			break;
    885      1.17     chris 		}
    886      1.17     chris 		prevptr = &pve->pv_next;		/* previous pointer */
    887      1.17     chris 		pve = pve->pv_next;			/* advance */
    888      1.17     chris 	}
    889      1.17     chris 	return(pve);				/* return removed pve */
    890      1.17     chris }
    891      1.17     chris 
    892      1.17     chris /*
    893      1.17     chris  *
    894      1.17     chris  * pmap_modify_pv: Update pv flags
    895      1.17     chris  *
    896      1.49   thorpej  * => caller should hold lock on vm_page [so that attrs can be adjusted]
    897      1.17     chris  * => caller should NOT adjust pmap's wire_count
    898      1.29  rearnsha  * => caller must call pmap_vac_me_harder() if writable status of a page
    899      1.29  rearnsha  *    may have changed.
    900      1.17     chris  * => we return the old flags
    901      1.17     chris  *
    902       1.1      matt  * Modify a physical-virtual mapping in the pv table
    903       1.1      matt  */
    904       1.1      matt 
    905      1.73   thorpej static /* __inline */ u_int
    906      1.73   thorpej pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg,
    907      1.73   thorpej     u_int bic_mask, u_int eor_mask)
    908       1.1      matt {
    909       1.1      matt 	struct pv_entry *npv;
    910       1.1      matt 	u_int flags, oflags;
    911       1.1      matt 
    912       1.1      matt 	/*
    913       1.1      matt 	 * There is at least one VA mapping this page.
    914       1.1      matt 	 */
    915       1.1      matt 
    916      1.49   thorpej 	for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) {
    917       1.1      matt 		if (pmap == npv->pv_pmap && va == npv->pv_va) {
    918       1.1      matt 			oflags = npv->pv_flags;
    919       1.1      matt 			npv->pv_flags = flags =
    920       1.1      matt 			    ((oflags & ~bic_mask) ^ eor_mask);
    921      1.78   thorpej 			if ((flags ^ oflags) & PVF_WIRED) {
    922      1.78   thorpej 				if (flags & PVF_WIRED)
    923       1.1      matt 					++pmap->pm_stats.wired_count;
    924       1.1      matt 				else
    925       1.1      matt 					--pmap->pm_stats.wired_count;
    926       1.1      matt 			}
    927  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
    928  1.97.4.4        he     {
    929  1.97.4.4        he 			int s = splhigh();
    930  1.97.4.4        he 			if ((flags ^ oflags) & PVF_WRITE) {
    931  1.97.4.4        he 				if (flags & PVF_WRITE) {
    932  1.97.4.4        he 					pg->mdpage.rw_mappings++;
    933  1.97.4.4        he 					pg->mdpage.ro_mappings--;
    934  1.97.4.4        he 					if (pg->mdpage.rw_mappings != 0 &&
    935  1.97.4.4        he 					    (pg->mdpage.kro_mappings != 0 ||
    936  1.97.4.4        he 					     pg->mdpage.krw_mappings != 0)) {
    937  1.97.4.4        he 						printf("pmap_modify_pv: rw %u, "
    938  1.97.4.4        he 						    "kro %u, krw %u\n",
    939  1.97.4.4        he 						    pg->mdpage.rw_mappings,
    940  1.97.4.4        he 						    pg->mdpage.kro_mappings,
    941  1.97.4.4        he 						    pg->mdpage.krw_mappings);
    942  1.97.4.4        he 					}
    943  1.97.4.4        he 				} else {
    944  1.97.4.4        he 					KASSERT(pg->mdpage.rw_mappings != 0);
    945  1.97.4.4        he 					pg->mdpage.rw_mappings--;
    946  1.97.4.4        he 					pg->mdpage.ro_mappings++;
    947  1.97.4.4        he 				}
    948  1.97.4.4        he 			}
    949  1.97.4.4        he 			splx(s);
    950  1.97.4.4        he     }
    951  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
    952       1.1      matt 			return (oflags);
    953       1.1      matt 		}
    954       1.1      matt 	}
    955       1.1      matt 	return (0);
    956       1.1      matt }
    957       1.1      matt 
    958       1.1      matt /*
    959       1.1      matt  * Map the specified level 2 pagetable into the level 1 page table for
    960       1.1      matt  * the given pmap to cover a chunk of virtual address space starting from the
    961       1.1      matt  * address specified.
    962       1.1      matt  */
    963      1.73   thorpej static __inline void
    964      1.73   thorpej pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref)
    965       1.1      matt {
    966       1.1      matt 	vaddr_t ptva;
    967       1.1      matt 
    968       1.1      matt 	/* Calculate the index into the L1 page table. */
    969      1.81   thorpej 	ptva = (va >> L1_S_SHIFT) & ~3;
    970       1.1      matt 
    971       1.1      matt 	/* Map page table into the L1. */
    972      1.83   thorpej 	pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000);
    973      1.83   thorpej 	pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400);
    974      1.83   thorpej 	pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800);
    975      1.83   thorpej 	pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00);
    976  1.97.4.3        he 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    977       1.1      matt 
    978       1.1      matt 	/* Map the page table into the page table area. */
    979      1.73   thorpej 	if (selfref)
    980      1.83   thorpej 		*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa |
    981      1.83   thorpej 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
    982       1.1      matt }
    983       1.1      matt 
    984       1.1      matt #if 0
    985      1.73   thorpej static __inline void
    986      1.73   thorpej pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va)
    987       1.1      matt {
    988       1.1      matt 	vaddr_t ptva;
    989       1.1      matt 
    990       1.1      matt 	/* Calculate the index into the L1 page table. */
    991      1.81   thorpej 	ptva = (va >> L1_S_SHIFT) & ~3;
    992       1.1      matt 
    993       1.1      matt 	/* Unmap page table from the L1. */
    994       1.1      matt 	pmap->pm_pdir[ptva + 0] = 0;
    995       1.1      matt 	pmap->pm_pdir[ptva + 1] = 0;
    996       1.1      matt 	pmap->pm_pdir[ptva + 2] = 0;
    997       1.1      matt 	pmap->pm_pdir[ptva + 3] = 0;
    998  1.97.4.3        he 	cpu_dcache_wb_range((vaddr_t) &pmap->pm_pdir[ptva + 0], 16);
    999       1.1      matt 
   1000       1.1      matt 	/* Unmap the page table from the page table area. */
   1001       1.1      matt 	*((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0;
   1002       1.1      matt }
   1003       1.1      matt #endif
   1004       1.1      matt 
   1005       1.1      matt /*
   1006       1.1      matt  *	Used to map a range of physical addresses into kernel
   1007       1.1      matt  *	virtual address space.
   1008       1.1      matt  *
   1009       1.1      matt  *	For now, VM is already on, we only need to map the
   1010       1.1      matt  *	specified memory.
   1011  1.97.4.1     lukem  *
   1012  1.97.4.1     lukem  *	XXX This routine should eventually go away; it's only used
   1013  1.97.4.1     lukem  *	XXX by machine-dependent crash dump code.
   1014       1.1      matt  */
   1015       1.1      matt vaddr_t
   1016      1.73   thorpej pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot)
   1017       1.1      matt {
   1018  1.97.4.1     lukem 	pt_entry_t *pte;
   1019  1.97.4.1     lukem 
   1020       1.1      matt 	while (spa < epa) {
   1021  1.97.4.1     lukem 		pte = vtopte(va);
   1022  1.97.4.1     lukem 
   1023  1.97.4.1     lukem 		*pte = L2_S_PROTO | spa |
   1024  1.97.4.1     lukem 		    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   1025  1.97.4.1     lukem 		cpu_tlb_flushID_SE(va);
   1026       1.1      matt 		va += NBPG;
   1027       1.1      matt 		spa += NBPG;
   1028       1.1      matt 	}
   1029      1.19     chris 	pmap_update(pmap_kernel());
   1030       1.1      matt 	return(va);
   1031       1.1      matt }
   1032       1.1      matt 
   1033       1.1      matt 
   1034       1.1      matt /*
   1035       1.3      matt  * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
   1036       1.1      matt  *
   1037       1.1      matt  * bootstrap the pmap system. This is called from initarm and allows
   1038       1.1      matt  * the pmap system to initailise any structures it requires.
   1039       1.1      matt  *
   1040       1.1      matt  * Currently this sets up the kernel_pmap that is statically allocated
   1041       1.1      matt  * and also allocated virtual addresses for certain page hooks.
   1042       1.1      matt  * Currently the only one page hook is allocated that is used
   1043       1.1      matt  * to zero physical pages of memory.
   1044       1.1      matt  * It also initialises the start and end address of the kernel data space.
   1045       1.1      matt  */
   1046       1.2      matt extern paddr_t physical_freestart;
   1047       1.2      matt extern paddr_t physical_freeend;
   1048       1.1      matt 
   1049      1.17     chris char *boot_head;
   1050       1.1      matt 
   1051       1.1      matt void
   1052      1.73   thorpej pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt)
   1053       1.1      matt {
   1054      1.54   thorpej 	pt_entry_t *pte;
   1055       1.1      matt 	int loop;
   1056       1.2      matt 	paddr_t start, end;
   1057       1.1      matt #if NISADMA > 0
   1058       1.2      matt 	paddr_t istart;
   1059       1.2      matt 	psize_t isize;
   1060       1.1      matt #endif
   1061       1.1      matt 
   1062      1.15     chris 	pmap_kernel()->pm_pdir = kernel_l1pt;
   1063      1.15     chris 	pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa;
   1064      1.15     chris 	pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va;
   1065      1.15     chris 	simple_lock_init(&pmap_kernel()->pm_lock);
   1066      1.16     chris 	pmap_kernel()->pm_obj.pgops = NULL;
   1067      1.16     chris 	TAILQ_INIT(&(pmap_kernel()->pm_obj.memq));
   1068      1.16     chris 	pmap_kernel()->pm_obj.uo_npages = 0;
   1069      1.16     chris 	pmap_kernel()->pm_obj.uo_refs = 1;
   1070  1.97.4.1     lukem 
   1071       1.1      matt 	/*
   1072       1.1      matt 	 * Initialize PAGE_SIZE-dependent variables.
   1073       1.1      matt 	 */
   1074       1.1      matt 	uvm_setpagesize();
   1075       1.1      matt 
   1076       1.1      matt 	loop = 0;
   1077       1.1      matt 	while (loop < bootconfig.dramblocks) {
   1078       1.2      matt 		start = (paddr_t)bootconfig.dram[loop].address;
   1079       1.1      matt 		end = start + (bootconfig.dram[loop].pages * NBPG);
   1080       1.1      matt 		if (start < physical_freestart)
   1081       1.1      matt 			start = physical_freestart;
   1082       1.1      matt 		if (end > physical_freeend)
   1083       1.1      matt 			end = physical_freeend;
   1084       1.1      matt #if 0
   1085       1.1      matt 		printf("%d: %lx -> %lx\n", loop, start, end - 1);
   1086       1.1      matt #endif
   1087       1.1      matt #if NISADMA > 0
   1088       1.1      matt 		if (pmap_isa_dma_range_intersect(start, end - start,
   1089       1.1      matt 		    &istart, &isize)) {
   1090       1.1      matt 			/*
   1091       1.1      matt 			 * Place the pages that intersect with the
   1092       1.1      matt 			 * ISA DMA range onto the ISA DMA free list.
   1093       1.1      matt 			 */
   1094       1.1      matt #if 0
   1095       1.1      matt 			printf("    ISADMA 0x%lx -> 0x%lx\n", istart,
   1096       1.1      matt 			    istart + isize - 1);
   1097       1.1      matt #endif
   1098       1.1      matt 			uvm_page_physload(atop(istart),
   1099       1.1      matt 			    atop(istart + isize), atop(istart),
   1100       1.1      matt 			    atop(istart + isize), VM_FREELIST_ISADMA);
   1101      1.73   thorpej 
   1102       1.1      matt 			/*
   1103       1.1      matt 			 * Load the pieces that come before
   1104       1.1      matt 			 * the intersection into the default
   1105       1.1      matt 			 * free list.
   1106       1.1      matt 			 */
   1107       1.1      matt 			if (start < istart) {
   1108       1.1      matt #if 0
   1109       1.1      matt 				printf("    BEFORE 0x%lx -> 0x%lx\n",
   1110       1.1      matt 				    start, istart - 1);
   1111       1.1      matt #endif
   1112       1.1      matt 				uvm_page_physload(atop(start),
   1113       1.1      matt 				    atop(istart), atop(start),
   1114       1.1      matt 				    atop(istart), VM_FREELIST_DEFAULT);
   1115       1.1      matt 			}
   1116       1.1      matt 
   1117       1.1      matt 			/*
   1118       1.1      matt 			 * Load the pieces that come after
   1119       1.1      matt 			 * the intersection into the default
   1120       1.1      matt 			 * free list.
   1121       1.1      matt 			 */
   1122       1.1      matt 			if ((istart + isize) < end) {
   1123       1.1      matt #if 0
   1124       1.1      matt 				printf("     AFTER 0x%lx -> 0x%lx\n",
   1125       1.1      matt 				    (istart + isize), end - 1);
   1126       1.1      matt #endif
   1127       1.1      matt 				uvm_page_physload(atop(istart + isize),
   1128       1.1      matt 				    atop(end), atop(istart + isize),
   1129       1.1      matt 				    atop(end), VM_FREELIST_DEFAULT);
   1130       1.1      matt 			}
   1131       1.1      matt 		} else {
   1132       1.1      matt 			uvm_page_physload(atop(start), atop(end),
   1133       1.1      matt 			    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1134       1.1      matt 		}
   1135       1.1      matt #else	/* NISADMA > 0 */
   1136       1.1      matt 		uvm_page_physload(atop(start), atop(end),
   1137       1.1      matt 		    atop(start), atop(end), VM_FREELIST_DEFAULT);
   1138       1.1      matt #endif /* NISADMA > 0 */
   1139       1.1      matt 		++loop;
   1140       1.1      matt 	}
   1141       1.1      matt 
   1142      1.54   thorpej 	virtual_avail = KERNEL_VM_BASE;
   1143      1.74   thorpej 	virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE;
   1144       1.1      matt 
   1145       1.1      matt 	/*
   1146      1.54   thorpej 	 * now we allocate the "special" VAs which are used for tmp mappings
   1147      1.54   thorpej 	 * by the pmap (and other modules).  we allocate the VAs by advancing
   1148      1.54   thorpej 	 * virtual_avail (note that there are no pages mapped at these VAs).
   1149      1.54   thorpej 	 * we find the PTE that maps the allocated VA via the linear PTE
   1150      1.54   thorpej 	 * mapping.
   1151       1.1      matt 	 */
   1152       1.1      matt 
   1153      1.54   thorpej 	pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail);
   1154      1.54   thorpej 
   1155      1.54   thorpej 	csrcp = virtual_avail; csrc_pte = pte;
   1156      1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1157      1.54   thorpej 
   1158      1.54   thorpej 	cdstp = virtual_avail; cdst_pte = pte;
   1159      1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1160      1.54   thorpej 
   1161      1.54   thorpej 	memhook = (char *) virtual_avail;	/* don't need pte */
   1162      1.54   thorpej 	virtual_avail += PAGE_SIZE; pte++;
   1163      1.54   thorpej 
   1164      1.54   thorpej 	msgbufaddr = (caddr_t) virtual_avail;	/* don't need pte */
   1165      1.54   thorpej 	virtual_avail += round_page(MSGBUFSIZE);
   1166      1.54   thorpej 	pte += atop(round_page(MSGBUFSIZE));
   1167       1.1      matt 
   1168      1.17     chris 	/*
   1169      1.17     chris 	 * init the static-global locks and global lists.
   1170      1.17     chris 	 */
   1171      1.17     chris 	spinlockinit(&pmap_main_lock, "pmaplk", 0);
   1172      1.17     chris 	simple_lock_init(&pvalloc_lock);
   1173      1.48     chris 	simple_lock_init(&pmaps_lock);
   1174      1.48     chris 	LIST_INIT(&pmaps);
   1175      1.17     chris 	TAILQ_INIT(&pv_freepages);
   1176      1.17     chris 	TAILQ_INIT(&pv_unusedpgs);
   1177       1.1      matt 
   1178      1.10     chris 	/*
   1179      1.10     chris 	 * initialize the pmap pool.
   1180      1.10     chris 	 */
   1181      1.10     chris 
   1182      1.10     chris 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1183      1.52   thorpej 		  &pool_allocator_nointr);
   1184  1.97.4.5        he 
   1185  1.97.4.5        he 	/*
   1186  1.97.4.5        he 	 * initialize the PT-PT pool and cache.
   1187  1.97.4.5        he 	 */
   1188  1.97.4.5        he 
   1189  1.97.4.5        he 	pool_init(&pmap_ptpt_pool, PAGE_SIZE, 0, 0, 0, "ptptpl",
   1190  1.97.4.5        he 		  &pmap_ptpt_allocator);
   1191  1.97.4.5        he 	pool_cache_init(&pmap_ptpt_cache, &pmap_ptpt_pool,
   1192  1.97.4.5        he 			pmap_ptpt_ctor, NULL, NULL);
   1193  1.97.4.5        he 
   1194      1.36   thorpej 	cpu_dcache_wbinv_all();
   1195       1.1      matt }
   1196       1.1      matt 
   1197       1.1      matt /*
   1198       1.1      matt  * void pmap_init(void)
   1199       1.1      matt  *
   1200       1.1      matt  * Initialize the pmap module.
   1201       1.1      matt  * Called by vm_init() in vm/vm_init.c in order to initialise
   1202       1.1      matt  * any structures that the pmap system needs to map virtual memory.
   1203       1.1      matt  */
   1204       1.1      matt 
   1205       1.1      matt extern int physmem;
   1206       1.1      matt 
   1207       1.1      matt void
   1208      1.73   thorpej pmap_init(void)
   1209       1.1      matt {
   1210       1.1      matt 
   1211       1.1      matt 	/*
   1212       1.1      matt 	 * Set the available memory vars - These do not map to real memory
   1213       1.1      matt 	 * addresses and cannot as the physical memory is fragmented.
   1214       1.1      matt 	 * They are used by ps for %mem calculations.
   1215       1.1      matt 	 * One could argue whether this should be the entire memory or just
   1216       1.1      matt 	 * the memory that is useable in a user process.
   1217       1.1      matt 	 */
   1218       1.1      matt 	avail_start = 0;
   1219       1.1      matt 	avail_end = physmem * NBPG;
   1220       1.1      matt 
   1221      1.17     chris 	/*
   1222      1.17     chris 	 * now we need to free enough pv_entry structures to allow us to get
   1223      1.17     chris 	 * the kmem_map/kmem_object allocated and inited (done after this
   1224      1.17     chris 	 * function is finished).  to do this we allocate one bootstrap page out
   1225      1.17     chris 	 * of kernel_map and use it to provide an initial pool of pv_entry
   1226      1.17     chris 	 * structures.   we never free this page.
   1227      1.17     chris 	 */
   1228      1.17     chris 
   1229      1.17     chris 	pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
   1230      1.17     chris 	if (pv_initpage == NULL)
   1231      1.17     chris 		panic("pmap_init: pv_initpage");
   1232      1.17     chris 	pv_cachedva = 0;   /* a VA we have allocated but not used yet */
   1233      1.17     chris 	pv_nfpvents = 0;
   1234      1.17     chris 	(void) pmap_add_pvpage(pv_initpage, FALSE);
   1235      1.17     chris 
   1236       1.1      matt 	pmap_initialized = TRUE;
   1237       1.1      matt 
   1238       1.1      matt 	/* Initialise our L1 page table queues and counters */
   1239       1.1      matt 	SIMPLEQ_INIT(&l1pt_static_queue);
   1240       1.1      matt 	l1pt_static_queue_count = 0;
   1241       1.1      matt 	l1pt_static_create_count = 0;
   1242       1.1      matt 	SIMPLEQ_INIT(&l1pt_queue);
   1243       1.1      matt 	l1pt_queue_count = 0;
   1244       1.1      matt 	l1pt_create_count = 0;
   1245       1.1      matt 	l1pt_reuse_count = 0;
   1246       1.1      matt }
   1247       1.1      matt 
   1248       1.1      matt /*
   1249       1.1      matt  * pmap_postinit()
   1250       1.1      matt  *
   1251       1.1      matt  * This routine is called after the vm and kmem subsystems have been
   1252       1.1      matt  * initialised. This allows the pmap code to perform any initialisation
   1253       1.1      matt  * that can only be done one the memory allocation is in place.
   1254       1.1      matt  */
   1255       1.1      matt 
   1256       1.1      matt void
   1257      1.73   thorpej pmap_postinit(void)
   1258       1.1      matt {
   1259       1.1      matt 	int loop;
   1260       1.1      matt 	struct l1pt *pt;
   1261       1.1      matt 
   1262       1.1      matt #ifdef PMAP_STATIC_L1S
   1263       1.1      matt 	for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) {
   1264       1.1      matt #else	/* PMAP_STATIC_L1S */
   1265       1.1      matt 	for (loop = 0; loop < max_processes; ++loop) {
   1266       1.1      matt #endif	/* PMAP_STATIC_L1S */
   1267       1.1      matt 		/* Allocate a L1 page table */
   1268       1.1      matt 		pt = pmap_alloc_l1pt();
   1269       1.1      matt 		if (!pt)
   1270       1.1      matt 			panic("Cannot allocate static L1 page tables\n");
   1271       1.1      matt 
   1272       1.1      matt 		/* Clean it */
   1273      1.81   thorpej 		bzero((void *)pt->pt_va, L1_TABLE_SIZE);
   1274       1.1      matt 		pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN);
   1275       1.1      matt 		/* Add the page table to the queue */
   1276       1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue);
   1277       1.1      matt 		++l1pt_static_queue_count;
   1278       1.1      matt 		++l1pt_static_create_count;
   1279       1.1      matt 	}
   1280       1.1      matt }
   1281       1.1      matt 
   1282       1.1      matt 
   1283       1.1      matt /*
   1284       1.1      matt  * Create and return a physical map.
   1285       1.1      matt  *
   1286       1.1      matt  * If the size specified for the map is zero, the map is an actual physical
   1287       1.1      matt  * map, and may be referenced by the hardware.
   1288       1.1      matt  *
   1289       1.1      matt  * If the size specified is non-zero, the map will be used in software only,
   1290       1.1      matt  * and is bounded by that size.
   1291       1.1      matt  */
   1292       1.1      matt 
   1293       1.1      matt pmap_t
   1294      1.73   thorpej pmap_create(void)
   1295       1.1      matt {
   1296      1.15     chris 	struct pmap *pmap;
   1297       1.1      matt 
   1298      1.10     chris 	/*
   1299      1.10     chris 	 * Fetch pmap entry from the pool
   1300      1.10     chris 	 */
   1301      1.10     chris 
   1302      1.10     chris 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   1303      1.17     chris 	/* XXX is this really needed! */
   1304      1.17     chris 	memset(pmap, 0, sizeof(*pmap));
   1305       1.1      matt 
   1306      1.16     chris 	simple_lock_init(&pmap->pm_obj.vmobjlock);
   1307      1.16     chris 	pmap->pm_obj.pgops = NULL;	/* currently not a mappable object */
   1308      1.16     chris 	TAILQ_INIT(&pmap->pm_obj.memq);
   1309      1.16     chris 	pmap->pm_obj.uo_npages = 0;
   1310      1.16     chris 	pmap->pm_obj.uo_refs = 1;
   1311      1.16     chris 	pmap->pm_stats.wired_count = 0;
   1312      1.16     chris 	pmap->pm_stats.resident_count = 1;
   1313      1.70   thorpej 	pmap->pm_ptphint = NULL;
   1314      1.16     chris 
   1315       1.1      matt 	/* Now init the machine part of the pmap */
   1316       1.1      matt 	pmap_pinit(pmap);
   1317       1.1      matt 	return(pmap);
   1318       1.1      matt }
   1319       1.1      matt 
   1320       1.1      matt /*
   1321       1.1      matt  * pmap_alloc_l1pt()
   1322       1.1      matt  *
   1323       1.1      matt  * This routine allocates physical and virtual memory for a L1 page table
   1324       1.1      matt  * and wires it.
   1325       1.1      matt  * A l1pt structure is returned to describe the allocated page table.
   1326       1.1      matt  *
   1327       1.1      matt  * This routine is allowed to fail if the required memory cannot be allocated.
   1328       1.1      matt  * In this case NULL is returned.
   1329       1.1      matt  */
   1330       1.1      matt 
   1331       1.1      matt struct l1pt *
   1332       1.1      matt pmap_alloc_l1pt(void)
   1333       1.1      matt {
   1334       1.2      matt 	paddr_t pa;
   1335       1.2      matt 	vaddr_t va;
   1336       1.1      matt 	struct l1pt *pt;
   1337       1.1      matt 	int error;
   1338       1.9       chs 	struct vm_page *m;
   1339       1.1      matt 
   1340       1.1      matt 	/* Allocate virtual address space for the L1 page table */
   1341      1.81   thorpej 	va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE);
   1342       1.1      matt 	if (va == 0) {
   1343       1.1      matt #ifdef DIAGNOSTIC
   1344      1.26  rearnsha 		PDEBUG(0,
   1345      1.26  rearnsha 		    printf("pmap: Cannot allocate pageable memory for L1\n"));
   1346       1.1      matt #endif	/* DIAGNOSTIC */
   1347       1.1      matt 		return(NULL);
   1348       1.1      matt 	}
   1349       1.1      matt 
   1350       1.1      matt 	/* Allocate memory for the l1pt structure */
   1351       1.1      matt 	pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK);
   1352       1.1      matt 
   1353       1.1      matt 	/*
   1354       1.1      matt 	 * Allocate pages from the VM system.
   1355       1.1      matt 	 */
   1356       1.1      matt 	TAILQ_INIT(&pt->pt_plist);
   1357      1.81   thorpej 	error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end,
   1358      1.81   thorpej 	    L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK);
   1359       1.1      matt 	if (error) {
   1360       1.1      matt #ifdef DIAGNOSTIC
   1361      1.26  rearnsha 		PDEBUG(0,
   1362      1.26  rearnsha 		    printf("pmap: Cannot allocate physical mem for L1 (%d)\n",
   1363      1.26  rearnsha 		    error));
   1364       1.1      matt #endif	/* DIAGNOSTIC */
   1365       1.1      matt 		/* Release the resources we already have claimed */
   1366       1.1      matt 		free(pt, M_VMPMAP);
   1367      1.81   thorpej 		uvm_km_free(kernel_map, va, L1_TABLE_SIZE);
   1368       1.1      matt 		return(NULL);
   1369       1.1      matt 	}
   1370       1.1      matt 
   1371       1.1      matt 	/* Map our physical pages into our virtual space */
   1372       1.1      matt 	pt->pt_va = va;
   1373      1.51     chris 	m = TAILQ_FIRST(&pt->pt_plist);
   1374      1.81   thorpej 	while (m && va < (pt->pt_va + L1_TABLE_SIZE)) {
   1375       1.1      matt 		pa = VM_PAGE_TO_PHYS(m);
   1376       1.1      matt 
   1377  1.97.4.3        he 		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE);
   1378       1.1      matt 
   1379       1.1      matt 		va += NBPG;
   1380       1.1      matt 		m = m->pageq.tqe_next;
   1381       1.1      matt 	}
   1382       1.1      matt 
   1383       1.1      matt #ifdef DIAGNOSTIC
   1384       1.1      matt 	if (m)
   1385       1.1      matt 		panic("pmap_alloc_l1pt: pglist not empty\n");
   1386       1.1      matt #endif	/* DIAGNOSTIC */
   1387       1.1      matt 
   1388       1.1      matt 	pt->pt_flags = 0;
   1389       1.1      matt 	return(pt);
   1390       1.1      matt }
   1391       1.1      matt 
   1392       1.1      matt /*
   1393       1.1      matt  * Free a L1 page table previously allocated with pmap_alloc_l1pt().
   1394       1.1      matt  */
   1395      1.33     chris static void
   1396      1.73   thorpej pmap_free_l1pt(struct l1pt *pt)
   1397       1.1      matt {
   1398       1.1      matt 	/* Separate the physical memory for the virtual space */
   1399      1.81   thorpej 	pmap_kremove(pt->pt_va, L1_TABLE_SIZE);
   1400      1.19     chris 	pmap_update(pmap_kernel());
   1401       1.1      matt 
   1402       1.1      matt 	/* Return the physical memory */
   1403       1.1      matt 	uvm_pglistfree(&pt->pt_plist);
   1404       1.1      matt 
   1405       1.1      matt 	/* Free the virtual space */
   1406      1.81   thorpej 	uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE);
   1407       1.1      matt 
   1408       1.1      matt 	/* Free the l1pt structure */
   1409       1.1      matt 	free(pt, M_VMPMAP);
   1410       1.1      matt }
   1411       1.1      matt 
   1412       1.1      matt /*
   1413  1.97.4.5        he  * pmap_ptpt_page_alloc:
   1414      1.93   thorpej  *
   1415  1.97.4.5        he  *	Back-end page allocator for the PT-PT pool.
   1416      1.93   thorpej  */
   1417  1.97.4.5        he static void *
   1418  1.97.4.5        he pmap_ptpt_page_alloc(struct pool *pp, int flags)
   1419      1.93   thorpej {
   1420      1.93   thorpej 	struct vm_page *pg;
   1421      1.93   thorpej 	pt_entry_t *pte;
   1422  1.97.4.5        he 	vaddr_t va;
   1423      1.93   thorpej 
   1424  1.97.4.5        he 	/* XXX PR_WAITOK? */
   1425  1.97.4.5        he 	va = uvm_km_valloc(kernel_map, L2_TABLE_SIZE);
   1426  1.97.4.5        he 	if (va == 0)
   1427  1.97.4.5        he 		return (NULL);
   1428      1.93   thorpej 
   1429      1.93   thorpej 	for (;;) {
   1430      1.93   thorpej 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
   1431      1.93   thorpej 		if (pg != NULL)
   1432      1.93   thorpej 			break;
   1433  1.97.4.5        he 		if ((flags & PR_WAITOK) == 0) {
   1434  1.97.4.5        he 			uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1435  1.97.4.5        he 			return (NULL);
   1436  1.97.4.5        he 		}
   1437      1.93   thorpej 		uvm_wait("pmap_ptpt");
   1438      1.93   thorpej 	}
   1439      1.93   thorpej 
   1440  1.97.4.5        he 	pte = vtopte(va);
   1441      1.93   thorpej 	KDASSERT(pmap_pte_v(pte) == 0);
   1442      1.93   thorpej 
   1443  1.97.4.5        he 	*pte = L2_S_PROTO | VM_PAGE_TO_PHYS(pg) |
   1444  1.97.4.5        he 	     L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE);
   1445  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
   1446  1.97.4.4        he     {
   1447  1.97.4.4        he 	int s = splhigh();
   1448  1.97.4.4        he 	pg->mdpage.krw_mappings++;
   1449  1.97.4.4        he 	splx(s);
   1450  1.97.4.4        he     }
   1451  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
   1452      1.93   thorpej 
   1453  1.97.4.5        he 	return ((void *) va);
   1454      1.93   thorpej }
   1455      1.93   thorpej 
   1456      1.93   thorpej /*
   1457  1.97.4.5        he  * pmap_ptpt_page_free:
   1458      1.93   thorpej  *
   1459  1.97.4.5        he  *	Back-end page free'er for the PT-PT pool.
   1460      1.93   thorpej  */
   1461      1.93   thorpej static void
   1462  1.97.4.5        he pmap_ptpt_page_free(struct pool *pp, void *v)
   1463      1.93   thorpej {
   1464  1.97.4.5        he 	vaddr_t va = (vaddr_t) v;
   1465  1.97.4.5        he 	paddr_t pa;
   1466  1.97.4.5        he 
   1467  1.97.4.5        he 	pa = vtophys(va);
   1468      1.93   thorpej 
   1469  1.97.4.5        he 	pmap_kremove(va, L2_TABLE_SIZE);
   1470      1.93   thorpej 	pmap_update(pmap_kernel());
   1471      1.93   thorpej 
   1472  1.97.4.5        he 	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
   1473  1.97.4.5        he 
   1474  1.97.4.5        he 	uvm_km_free(kernel_map, va, L2_TABLE_SIZE);
   1475  1.97.4.5        he }
   1476  1.97.4.5        he 
   1477  1.97.4.5        he /*
   1478  1.97.4.5        he  * pmap_ptpt_ctor:
   1479  1.97.4.5        he  *
   1480  1.97.4.5        he  *	Constructor for the PT-PT cache.
   1481  1.97.4.5        he  */
   1482  1.97.4.5        he static int
   1483  1.97.4.5        he pmap_ptpt_ctor(void *arg, void *object, int flags)
   1484  1.97.4.5        he {
   1485  1.97.4.5        he 	caddr_t vptpt = object;
   1486  1.97.4.5        he 
   1487  1.97.4.5        he 	/* Page is already zero'd. */
   1488      1.93   thorpej 
   1489  1.97.4.5        he 	/*
   1490  1.97.4.5        he 	 * Map in kernel PTs.
   1491  1.97.4.5        he 	 *
   1492  1.97.4.5        he 	 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1493  1.97.4.5        he 	 */
   1494  1.97.4.5        he 	memcpy(vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2),
   1495  1.97.4.5        he 	       (char *)(PTE_BASE + (PTE_BASE >> (PGSHIFT - 2)) +
   1496  1.97.4.5        he 			((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)),
   1497  1.97.4.5        he 	       (KERNEL_PD_SIZE >> 2));
   1498  1.97.4.5        he 
   1499  1.97.4.5        he 	return (0);
   1500      1.93   thorpej }
   1501      1.93   thorpej 
   1502      1.93   thorpej /*
   1503       1.1      matt  * Allocate a page directory.
   1504       1.1      matt  * This routine will either allocate a new page directory from the pool
   1505       1.1      matt  * of L1 page tables currently held by the kernel or it will allocate
   1506       1.1      matt  * a new one via pmap_alloc_l1pt().
   1507       1.1      matt  * It will then initialise the l1 page table for use.
   1508       1.1      matt  */
   1509      1.33     chris static int
   1510      1.73   thorpej pmap_allocpagedir(struct pmap *pmap)
   1511       1.1      matt {
   1512  1.97.4.5        he 	vaddr_t vptpt;
   1513       1.2      matt 	paddr_t pa;
   1514       1.1      matt 	struct l1pt *pt;
   1515  1.97.4.5        he 	u_int gen;
   1516       1.1      matt 
   1517       1.1      matt 	PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap));
   1518       1.1      matt 
   1519       1.1      matt 	/* Do we have any spare L1's lying around ? */
   1520       1.1      matt 	if (l1pt_static_queue_count) {
   1521       1.1      matt 		--l1pt_static_queue_count;
   1522       1.1      matt 		pt = l1pt_static_queue.sqh_first;
   1523       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue);
   1524       1.1      matt 	} else if (l1pt_queue_count) {
   1525       1.1      matt 		--l1pt_queue_count;
   1526       1.1      matt 		pt = l1pt_queue.sqh_first;
   1527       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue);
   1528       1.1      matt 		++l1pt_reuse_count;
   1529       1.1      matt 	} else {
   1530       1.1      matt 		pt = pmap_alloc_l1pt();
   1531       1.1      matt 		if (!pt)
   1532       1.1      matt 			return(ENOMEM);
   1533       1.1      matt 		++l1pt_create_count;
   1534       1.1      matt 	}
   1535       1.1      matt 
   1536       1.1      matt 	/* Store the pointer to the l1 descriptor in the pmap. */
   1537       1.1      matt 	pmap->pm_l1pt = pt;
   1538       1.1      matt 
   1539       1.1      matt 	/* Get the physical address of the start of the l1 */
   1540      1.51     chris 	pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist));
   1541       1.1      matt 
   1542       1.1      matt 	/* Store the virtual address of the l1 in the pmap. */
   1543       1.1      matt 	pmap->pm_pdir = (pd_entry_t *)pt->pt_va;
   1544       1.1      matt 
   1545       1.1      matt 	/* Clean the L1 if it is dirty */
   1546  1.97.4.3        he 	if (!(pt->pt_flags & PTFLAG_CLEAN)) {
   1547      1.81   thorpej 		bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1548  1.97.4.3        he 		cpu_dcache_wb_range((vaddr_t) pmap->pm_pdir,
   1549  1.97.4.3        he 		    (L1_TABLE_SIZE - KERNEL_PD_SIZE));
   1550  1.97.4.3        he 	}
   1551       1.1      matt 
   1552       1.1      matt 	/* Allocate a page table to map all the page tables for this pmap */
   1553  1.97.4.5        he 	KASSERT(pmap->pm_vptpt == 0);
   1554  1.97.4.5        he 
   1555  1.97.4.5        he  try_again:
   1556  1.97.4.5        he 	gen = pmap_ptpt_cache_generation;
   1557  1.97.4.5        he 	vptpt = (vaddr_t) pool_cache_get(&pmap_ptpt_cache, PR_WAITOK);
   1558  1.97.4.5        he 	if (vptpt == NULL) {
   1559  1.97.4.5        he 		PDEBUG(0, printf("pmap_alloc_pagedir: no KVA for PTPT\n"));
   1560      1.93   thorpej 		pmap_freepagedir(pmap);
   1561  1.97.4.5        he 		return (ENOMEM);
   1562       1.5    toshii 	}
   1563       1.5    toshii 
   1564      1.93   thorpej 	/* need to lock this all up for growkernel */
   1565      1.48     chris 	simple_lock(&pmaps_lock);
   1566      1.48     chris 
   1567  1.97.4.5        he 	if (gen != pmap_ptpt_cache_generation) {
   1568  1.97.4.5        he 		simple_unlock(&pmaps_lock);
   1569  1.97.4.5        he 		pool_cache_destruct_object(&pmap_ptpt_cache, (void *) vptpt);
   1570  1.97.4.5        he 		goto try_again;
   1571  1.97.4.5        he 	}
   1572  1.97.4.5        he 
   1573  1.97.4.5        he 	pmap->pm_vptpt = vptpt;
   1574  1.97.4.5        he 	pmap->pm_pptpt = vtophys(vptpt);
   1575  1.97.4.5        he 
   1576      1.64   thorpej 	/* Duplicate the kernel mappings. */
   1577      1.81   thorpej 	bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1578      1.81   thorpej 		(char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE),
   1579      1.48     chris 		KERNEL_PD_SIZE);
   1580  1.97.4.3        he 	cpu_dcache_wb_range((vaddr_t)pmap->pm_pdir +
   1581  1.97.4.3        he 	    (L1_TABLE_SIZE - KERNEL_PD_SIZE), KERNEL_PD_SIZE);
   1582      1.48     chris 
   1583       1.1      matt 	/* Wire in this page table */
   1584      1.53   thorpej 	pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE);
   1585       1.1      matt 
   1586       1.1      matt 	pt->pt_flags &= ~PTFLAG_CLEAN;	/* L1 is dirty now */
   1587  1.97.4.3        he 
   1588      1.48     chris 	LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
   1589      1.48     chris 	simple_unlock(&pmaps_lock);
   1590      1.48     chris 
   1591       1.1      matt 	return(0);
   1592       1.1      matt }
   1593       1.1      matt 
   1594       1.1      matt 
   1595       1.1      matt /*
   1596       1.1      matt  * Initialize a preallocated and zeroed pmap structure,
   1597       1.1      matt  * such as one in a vmspace structure.
   1598       1.1      matt  */
   1599       1.1      matt 
   1600       1.1      matt void
   1601      1.73   thorpej pmap_pinit(struct pmap *pmap)
   1602       1.1      matt {
   1603      1.26  rearnsha 	int backoff = 6;
   1604      1.26  rearnsha 	int retry = 10;
   1605      1.26  rearnsha 
   1606       1.1      matt 	PDEBUG(0, printf("pmap_pinit(%p)\n", pmap));
   1607       1.1      matt 
   1608       1.1      matt 	/* Keep looping until we succeed in allocating a page directory */
   1609       1.1      matt 	while (pmap_allocpagedir(pmap) != 0) {
   1610       1.1      matt 		/*
   1611       1.1      matt 		 * Ok we failed to allocate a suitable block of memory for an
   1612       1.1      matt 		 * L1 page table. This means that either:
   1613       1.1      matt 		 * 1. 16KB of virtual address space could not be allocated
   1614       1.1      matt 		 * 2. 16KB of physically contiguous memory on a 16KB boundary
   1615       1.1      matt 		 *    could not be allocated.
   1616       1.1      matt 		 *
   1617       1.1      matt 		 * Since we cannot fail we will sleep for a while and try
   1618      1.17     chris 		 * again.
   1619      1.26  rearnsha 		 *
   1620      1.26  rearnsha 		 * Searching for a suitable L1 PT is expensive:
   1621      1.26  rearnsha 		 * to avoid hogging the system when memory is really
   1622      1.26  rearnsha 		 * scarce, use an exponential back-off so that
   1623      1.26  rearnsha 		 * eventually we won't retry more than once every 8
   1624      1.26  rearnsha 		 * seconds.  This should allow other processes to run
   1625      1.26  rearnsha 		 * to completion and free up resources.
   1626       1.1      matt 		 */
   1627      1.26  rearnsha 		(void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff,
   1628      1.26  rearnsha 		    NULL);
   1629      1.26  rearnsha 		if (--retry == 0) {
   1630      1.26  rearnsha 			retry = 10;
   1631      1.26  rearnsha 			if (backoff)
   1632      1.26  rearnsha 				--backoff;
   1633      1.26  rearnsha 		}
   1634       1.1      matt 	}
   1635       1.1      matt 
   1636      1.76   thorpej 	if (vector_page < KERNEL_BASE) {
   1637      1.76   thorpej 		/*
   1638      1.76   thorpej 		 * Map the vector page.  This will also allocate and map
   1639      1.76   thorpej 		 * an L2 table for it.
   1640      1.76   thorpej 		 */
   1641      1.76   thorpej 		pmap_enter(pmap, vector_page, systempage.pv_pa,
   1642      1.76   thorpej 		    VM_PROT_READ, VM_PROT_READ | PMAP_WIRED);
   1643      1.76   thorpej 		pmap_update(pmap);
   1644      1.76   thorpej 	}
   1645       1.1      matt }
   1646       1.1      matt 
   1647       1.1      matt void
   1648      1.73   thorpej pmap_freepagedir(struct pmap *pmap)
   1649       1.1      matt {
   1650       1.1      matt 	/* Free the memory used for the page table mapping */
   1651  1.97.4.5        he 	if (pmap->pm_vptpt != 0) {
   1652  1.97.4.5        he 		/*
   1653  1.97.4.5        he 		 * XXX Objects freed to a pool cache must be in constructed
   1654  1.97.4.5        he 		 * XXX form when freed, but we don't free page tables as we
   1655  1.97.4.5        he 		 * XXX go, so we need to zap the mappings here.
   1656  1.97.4.5        he 		 *
   1657  1.97.4.5        he 		 * XXX THIS IS CURRENTLY DONE AS UNCACHED MEMORY ACCESS.
   1658  1.97.4.5        he 		 */
   1659  1.97.4.5        he 		memset((caddr_t) pmap->pm_vptpt, 0,
   1660  1.97.4.5        he 		       ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2));
   1661  1.97.4.5        he 		pool_cache_put(&pmap_ptpt_cache, (void *) pmap->pm_vptpt);
   1662  1.97.4.5        he 	}
   1663       1.1      matt 
   1664       1.1      matt 	/* junk the L1 page table */
   1665       1.1      matt 	if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) {
   1666       1.1      matt 		/* Add the page table to the queue */
   1667  1.97.4.5        he 		SIMPLEQ_INSERT_TAIL(&l1pt_static_queue,
   1668  1.97.4.5        he 				    pmap->pm_l1pt, pt_queue);
   1669       1.1      matt 		++l1pt_static_queue_count;
   1670       1.1      matt 	} else if (l1pt_queue_count < 8) {
   1671       1.1      matt 		/* Add the page table to the queue */
   1672       1.1      matt 		SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue);
   1673       1.1      matt 		++l1pt_queue_count;
   1674       1.1      matt 	} else
   1675       1.1      matt 		pmap_free_l1pt(pmap->pm_l1pt);
   1676       1.1      matt }
   1677       1.1      matt 
   1678       1.1      matt /*
   1679       1.1      matt  * Retire the given physical map from service.
   1680       1.1      matt  * Should only be called if the map contains no valid mappings.
   1681       1.1      matt  */
   1682       1.1      matt 
   1683       1.1      matt void
   1684      1.73   thorpej pmap_destroy(struct pmap *pmap)
   1685       1.1      matt {
   1686      1.17     chris 	struct vm_page *page;
   1687       1.1      matt 	int count;
   1688       1.1      matt 
   1689       1.1      matt 	if (pmap == NULL)
   1690       1.1      matt 		return;
   1691       1.1      matt 
   1692       1.1      matt 	PDEBUG(0, printf("pmap_destroy(%p)\n", pmap));
   1693      1.17     chris 
   1694      1.17     chris 	/*
   1695      1.17     chris 	 * Drop reference count
   1696      1.17     chris 	 */
   1697      1.17     chris 	simple_lock(&pmap->pm_obj.vmobjlock);
   1698      1.16     chris 	count = --pmap->pm_obj.uo_refs;
   1699      1.17     chris 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1700      1.17     chris 	if (count > 0) {
   1701      1.17     chris 		return;
   1702       1.1      matt 	}
   1703       1.1      matt 
   1704      1.17     chris 	/*
   1705      1.17     chris 	 * reference count is zero, free pmap resources and then free pmap.
   1706      1.17     chris 	 */
   1707      1.48     chris 
   1708      1.48     chris 	/*
   1709      1.48     chris 	 * remove it from global list of pmaps
   1710      1.48     chris 	 */
   1711      1.48     chris 
   1712      1.48     chris 	simple_lock(&pmaps_lock);
   1713      1.48     chris 	LIST_REMOVE(pmap, pm_list);
   1714      1.48     chris 	simple_unlock(&pmaps_lock);
   1715      1.17     chris 
   1716      1.77   thorpej 	if (vector_page < KERNEL_BASE) {
   1717      1.77   thorpej 		/* Remove the vector page mapping */
   1718      1.77   thorpej 		pmap_remove(pmap, vector_page, vector_page + NBPG);
   1719      1.77   thorpej 		pmap_update(pmap);
   1720      1.77   thorpej 	}
   1721       1.1      matt 
   1722       1.1      matt 	/*
   1723       1.1      matt 	 * Free any page tables still mapped
   1724       1.1      matt 	 * This is only temporay until pmap_enter can count the number
   1725       1.1      matt 	 * of mappings made in a page table. Then pmap_remove() can
   1726       1.1      matt 	 * reduce the count and free the pagetable when the count
   1727      1.16     chris 	 * reaches zero.  Note that entries in this list should match the
   1728      1.16     chris 	 * contents of the ptpt, however this is faster than walking a 1024
   1729      1.16     chris 	 * entries looking for pt's
   1730      1.16     chris 	 * taken from i386 pmap.c
   1731       1.1      matt 	 */
   1732      1.97     chris 	/*
   1733      1.97     chris 	 * vmobjlock must be held while freeing pages
   1734      1.97     chris 	 */
   1735      1.97     chris 	simple_lock(&pmap->pm_obj.vmobjlock);
   1736      1.51     chris 	while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) {
   1737      1.51     chris 		KASSERT((page->flags & PG_BUSY) == 0);
   1738      1.16     chris 		page->wire_count = 0;
   1739      1.16     chris 		uvm_pagefree(page);
   1740       1.1      matt 	}
   1741      1.97     chris 	simple_unlock(&pmap->pm_obj.vmobjlock);
   1742  1.97.4.5        he 
   1743       1.1      matt 	/* Free the page dir */
   1744       1.1      matt 	pmap_freepagedir(pmap);
   1745  1.97.4.5        he 
   1746      1.17     chris 	/* return the pmap to the pool */
   1747      1.17     chris 	pool_put(&pmap_pmap_pool, pmap);
   1748       1.1      matt }
   1749       1.1      matt 
   1750       1.1      matt 
   1751       1.1      matt /*
   1752      1.15     chris  * void pmap_reference(struct pmap *pmap)
   1753       1.1      matt  *
   1754       1.1      matt  * Add a reference to the specified pmap.
   1755       1.1      matt  */
   1756       1.1      matt 
   1757       1.1      matt void
   1758      1.73   thorpej pmap_reference(struct pmap *pmap)
   1759       1.1      matt {
   1760       1.1      matt 	if (pmap == NULL)
   1761       1.1      matt 		return;
   1762       1.1      matt 
   1763       1.1      matt 	simple_lock(&pmap->pm_lock);
   1764      1.16     chris 	pmap->pm_obj.uo_refs++;
   1765       1.1      matt 	simple_unlock(&pmap->pm_lock);
   1766       1.1      matt }
   1767       1.1      matt 
   1768       1.1      matt /*
   1769       1.1      matt  * void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1770       1.1      matt  *
   1771       1.1      matt  * Return the start and end addresses of the kernel's virtual space.
   1772       1.1      matt  * These values are setup in pmap_bootstrap and are updated as pages
   1773       1.1      matt  * are allocated.
   1774       1.1      matt  */
   1775       1.1      matt 
   1776       1.1      matt void
   1777      1.73   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1778       1.1      matt {
   1779      1.54   thorpej 	*start = virtual_avail;
   1780       1.1      matt 	*end = virtual_end;
   1781       1.1      matt }
   1782       1.1      matt 
   1783       1.1      matt /*
   1784       1.1      matt  * Activate the address space for the specified process.  If the process
   1785       1.1      matt  * is the current process, load the new MMU context.
   1786       1.1      matt  */
   1787       1.1      matt void
   1788      1.73   thorpej pmap_activate(struct proc *p)
   1789       1.1      matt {
   1790      1.15     chris 	struct pmap *pmap = p->p_vmspace->vm_map.pmap;
   1791       1.1      matt 	struct pcb *pcb = &p->p_addr->u_pcb;
   1792       1.1      matt 
   1793      1.15     chris 	(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
   1794       1.1      matt 	    (paddr_t *)&pcb->pcb_pagedir);
   1795       1.1      matt 
   1796       1.1      matt 	PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n",
   1797       1.1      matt 	    p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir));
   1798       1.1      matt 
   1799       1.1      matt 	if (p == curproc) {
   1800       1.1      matt 		PDEBUG(0, printf("pmap_activate: setting TTB\n"));
   1801       1.1      matt 		setttb((u_int)pcb->pcb_pagedir);
   1802       1.1      matt 	}
   1803       1.1      matt }
   1804       1.1      matt 
   1805       1.1      matt /*
   1806       1.1      matt  * Deactivate the address space of the specified process.
   1807       1.1      matt  */
   1808       1.1      matt void
   1809      1.73   thorpej pmap_deactivate(struct proc *p)
   1810       1.1      matt {
   1811       1.1      matt }
   1812       1.1      matt 
   1813      1.31   thorpej /*
   1814      1.31   thorpej  * Perform any deferred pmap operations.
   1815      1.31   thorpej  */
   1816      1.31   thorpej void
   1817      1.31   thorpej pmap_update(struct pmap *pmap)
   1818      1.31   thorpej {
   1819      1.31   thorpej 
   1820      1.31   thorpej 	/*
   1821      1.31   thorpej 	 * We haven't deferred any pmap operations, but we do need to
   1822      1.31   thorpej 	 * make sure TLB/cache operations have completed.
   1823      1.31   thorpej 	 */
   1824      1.31   thorpej 	cpu_cpwait();
   1825      1.31   thorpej }
   1826       1.1      matt 
   1827       1.1      matt /*
   1828       1.1      matt  * pmap_clean_page()
   1829       1.1      matt  *
   1830       1.1      matt  * This is a local function used to work out the best strategy to clean
   1831       1.1      matt  * a single page referenced by its entry in the PV table. It's used by
   1832       1.1      matt  * pmap_copy_page, pmap_zero page and maybe some others later on.
   1833       1.1      matt  *
   1834       1.1      matt  * Its policy is effectively:
   1835       1.1      matt  *  o If there are no mappings, we don't bother doing anything with the cache.
   1836       1.1      matt  *  o If there is one mapping, we clean just that page.
   1837       1.1      matt  *  o If there are multiple mappings, we clean the entire cache.
   1838       1.1      matt  *
   1839       1.1      matt  * So that some functions can be further optimised, it returns 0 if it didn't
   1840       1.1      matt  * clean the entire cache, or 1 if it did.
   1841       1.1      matt  *
   1842       1.1      matt  * XXX One bug in this routine is that if the pv_entry has a single page
   1843       1.1      matt  * mapped at 0x00000000 a whole cache clean will be performed rather than
   1844       1.1      matt  * just the 1 page. Since this should not occur in everyday use and if it does
   1845       1.1      matt  * it will just result in not the most efficient clean for the page.
   1846       1.1      matt  */
   1847       1.1      matt static int
   1848      1.73   thorpej pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
   1849       1.1      matt {
   1850      1.17     chris 	struct pmap *pmap;
   1851      1.17     chris 	struct pv_entry *npv;
   1852       1.1      matt 	int cache_needs_cleaning = 0;
   1853       1.1      matt 	vaddr_t page_to_clean = 0;
   1854       1.1      matt 
   1855      1.17     chris 	if (pv == NULL)
   1856      1.17     chris 		/* nothing mapped in so nothing to flush */
   1857      1.17     chris 		return (0);
   1858      1.17     chris 
   1859      1.17     chris 	/* Since we flush the cache each time we change curproc, we
   1860      1.17     chris 	 * only need to flush the page if it is in the current pmap.
   1861      1.17     chris 	 */
   1862      1.17     chris 	if (curproc)
   1863      1.17     chris 		pmap = curproc->p_vmspace->vm_map.pmap;
   1864      1.17     chris 	else
   1865      1.17     chris 		pmap = pmap_kernel();
   1866      1.17     chris 
   1867      1.17     chris 	for (npv = pv; npv; npv = npv->pv_next) {
   1868      1.17     chris 		if (npv->pv_pmap == pmap) {
   1869      1.17     chris 			/* The page is mapped non-cacheable in
   1870      1.17     chris 			 * this map.  No need to flush the cache.
   1871      1.17     chris 			 */
   1872      1.78   thorpej 			if (npv->pv_flags & PVF_NC) {
   1873      1.17     chris #ifdef DIAGNOSTIC
   1874      1.17     chris 				if (cache_needs_cleaning)
   1875      1.17     chris 					panic("pmap_clean_page: "
   1876      1.17     chris 							"cache inconsistency");
   1877      1.17     chris #endif
   1878      1.17     chris 				break;
   1879      1.17     chris 			}
   1880      1.17     chris #if 0
   1881      1.96   thorpej 			/*
   1882      1.96   thorpej 			 * XXX Can't do this because pmap_protect doesn't
   1883      1.96   thorpej 			 * XXX clean the page when it does a write-protect.
   1884      1.96   thorpej 			 */
   1885      1.96   thorpej 			else if (is_src && (npv->pv_flags & PVF_WRITE) == 0)
   1886      1.17     chris 				continue;
   1887      1.17     chris #endif
   1888      1.17     chris 			if (cache_needs_cleaning){
   1889      1.17     chris 				page_to_clean = 0;
   1890      1.17     chris 				break;
   1891      1.17     chris 			}
   1892      1.17     chris 			else
   1893      1.17     chris 				page_to_clean = npv->pv_va;
   1894      1.17     chris 			cache_needs_cleaning = 1;
   1895      1.17     chris 		}
   1896       1.1      matt 	}
   1897       1.1      matt 
   1898       1.1      matt 	if (page_to_clean)
   1899      1.36   thorpej 		cpu_idcache_wbinv_range(page_to_clean, NBPG);
   1900       1.1      matt 	else if (cache_needs_cleaning) {
   1901      1.36   thorpej 		cpu_idcache_wbinv_all();
   1902       1.1      matt 		return (1);
   1903       1.1      matt 	}
   1904       1.1      matt 	return (0);
   1905       1.1      matt }
   1906       1.1      matt 
   1907       1.1      matt /*
   1908       1.1      matt  * pmap_zero_page()
   1909       1.1      matt  *
   1910       1.1      matt  * Zero a given physical page by mapping it at a page hook point.
   1911       1.1      matt  * In doing the zero page op, the page we zero is mapped cachable, as with
   1912       1.1      matt  * StrongARM accesses to non-cached pages are non-burst making writing
   1913       1.1      matt  * _any_ bulk data very slow.
   1914       1.1      matt  */
   1915      1.88   thorpej #if ARM_MMU_GENERIC == 1
   1916       1.1      matt void
   1917      1.88   thorpej pmap_zero_page_generic(paddr_t phys)
   1918       1.1      matt {
   1919      1.71   thorpej #ifdef DEBUG
   1920      1.71   thorpej 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1921      1.71   thorpej 
   1922      1.71   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1923      1.71   thorpej 		panic("pmap_zero_page: page has mappings");
   1924      1.71   thorpej #endif
   1925       1.1      matt 
   1926      1.79   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1927      1.79   thorpej 
   1928       1.1      matt 	/*
   1929       1.1      matt 	 * Hook in the page, zero it, and purge the cache for that
   1930       1.1      matt 	 * zeroed page. Invalidate the TLB as needed.
   1931       1.1      matt 	 */
   1932      1.83   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1933      1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1934      1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1935      1.32   thorpej 	cpu_cpwait();
   1936      1.54   thorpej 	bzero_page(cdstp);
   1937      1.54   thorpej 	cpu_dcache_wbinv_range(cdstp, NBPG);
   1938       1.1      matt }
   1939      1.88   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   1940      1.88   thorpej 
   1941      1.88   thorpej #if ARM_MMU_XSCALE == 1
   1942      1.88   thorpej void
   1943      1.88   thorpej pmap_zero_page_xscale(paddr_t phys)
   1944      1.88   thorpej {
   1945      1.88   thorpej #ifdef DEBUG
   1946      1.88   thorpej 	struct vm_page *pg = PHYS_TO_VM_PAGE(phys);
   1947      1.88   thorpej 
   1948      1.88   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1949      1.88   thorpej 		panic("pmap_zero_page: page has mappings");
   1950      1.88   thorpej #endif
   1951      1.88   thorpej 
   1952      1.88   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1953      1.88   thorpej 
   1954      1.88   thorpej 	/*
   1955      1.88   thorpej 	 * Hook in the page, zero it, and purge the cache for that
   1956      1.88   thorpej 	 * zeroed page. Invalidate the TLB as needed.
   1957      1.88   thorpej 	 */
   1958      1.88   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1959      1.88   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   1960      1.88   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   1961      1.88   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1962      1.88   thorpej 	cpu_cpwait();
   1963      1.88   thorpej 	bzero_page(cdstp);
   1964      1.88   thorpej 	xscale_cache_clean_minidata();
   1965      1.88   thorpej }
   1966      1.88   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   1967       1.1      matt 
   1968      1.17     chris /* pmap_pageidlezero()
   1969      1.17     chris  *
   1970      1.17     chris  * The same as above, except that we assume that the page is not
   1971      1.17     chris  * mapped.  This means we never have to flush the cache first.  Called
   1972      1.17     chris  * from the idle loop.
   1973      1.17     chris  */
   1974      1.17     chris boolean_t
   1975      1.73   thorpej pmap_pageidlezero(paddr_t phys)
   1976      1.17     chris {
   1977      1.17     chris 	int i, *ptr;
   1978      1.17     chris 	boolean_t rv = TRUE;
   1979      1.71   thorpej #ifdef DEBUG
   1980      1.49   thorpej 	struct vm_page *pg;
   1981      1.17     chris 
   1982      1.49   thorpej 	pg = PHYS_TO_VM_PAGE(phys);
   1983      1.49   thorpej 	if (pg->mdpage.pvh_list != NULL)
   1984      1.71   thorpej 		panic("pmap_pageidlezero: page has mappings");
   1985      1.17     chris #endif
   1986      1.79   thorpej 
   1987      1.79   thorpej 	KDASSERT((phys & PGOFSET) == 0);
   1988      1.79   thorpej 
   1989      1.17     chris 	/*
   1990      1.17     chris 	 * Hook in the page, zero it, and purge the cache for that
   1991      1.17     chris 	 * zeroed page. Invalidate the TLB as needed.
   1992      1.17     chris 	 */
   1993      1.83   thorpej 	*cdst_pte = L2_S_PROTO | phys |
   1994      1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   1995      1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   1996      1.32   thorpej 	cpu_cpwait();
   1997      1.32   thorpej 
   1998      1.54   thorpej 	for (i = 0, ptr = (int *)cdstp;
   1999      1.17     chris 			i < (NBPG / sizeof(int)); i++) {
   2000      1.17     chris 		if (sched_whichqs != 0) {
   2001      1.17     chris 			/*
   2002      1.17     chris 			 * A process has become ready.  Abort now,
   2003      1.17     chris 			 * so we don't keep it waiting while we
   2004      1.17     chris 			 * do slow memory access to finish this
   2005      1.17     chris 			 * page.
   2006      1.17     chris 			 */
   2007      1.17     chris 			rv = FALSE;
   2008      1.17     chris 			break;
   2009      1.17     chris 		}
   2010      1.17     chris 		*ptr++ = 0;
   2011      1.17     chris 	}
   2012      1.17     chris 
   2013      1.17     chris 	if (rv)
   2014      1.17     chris 		/*
   2015      1.17     chris 		 * if we aborted we'll rezero this page again later so don't
   2016      1.17     chris 		 * purge it unless we finished it
   2017      1.17     chris 		 */
   2018      1.54   thorpej 		cpu_dcache_wbinv_range(cdstp, NBPG);
   2019      1.17     chris 	return (rv);
   2020      1.17     chris }
   2021      1.17     chris 
   2022       1.1      matt /*
   2023       1.1      matt  * pmap_copy_page()
   2024       1.1      matt  *
   2025       1.1      matt  * Copy one physical page into another, by mapping the pages into
   2026       1.1      matt  * hook points. The same comment regarding cachability as in
   2027       1.1      matt  * pmap_zero_page also applies here.
   2028       1.1      matt  */
   2029      1.88   thorpej #if ARM_MMU_GENERIC == 1
   2030       1.1      matt void
   2031      1.88   thorpej pmap_copy_page_generic(paddr_t src, paddr_t dst)
   2032       1.1      matt {
   2033      1.71   thorpej 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   2034      1.71   thorpej #ifdef DEBUG
   2035      1.71   thorpej 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   2036      1.71   thorpej 
   2037      1.71   thorpej 	if (dst_pg->mdpage.pvh_list != NULL)
   2038      1.71   thorpej 		panic("pmap_copy_page: dst page has mappings");
   2039      1.71   thorpej #endif
   2040      1.71   thorpej 
   2041      1.79   thorpej 	KDASSERT((src & PGOFSET) == 0);
   2042      1.79   thorpej 	KDASSERT((dst & PGOFSET) == 0);
   2043      1.79   thorpej 
   2044      1.71   thorpej 	/*
   2045      1.71   thorpej 	 * Clean the source page.  Hold the source page's lock for
   2046      1.71   thorpej 	 * the duration of the copy so that no other mappings can
   2047      1.71   thorpej 	 * be created while we have a potentially aliased mapping.
   2048      1.71   thorpej 	 */
   2049      1.49   thorpej 	simple_lock(&src_pg->mdpage.pvh_slock);
   2050      1.71   thorpej 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   2051       1.1      matt 
   2052       1.1      matt 	/*
   2053       1.1      matt 	 * Map the pages into the page hook points, copy them, and purge
   2054       1.1      matt 	 * the cache for the appropriate page. Invalidate the TLB
   2055       1.1      matt 	 * as required.
   2056       1.1      matt 	 */
   2057      1.83   thorpej 	*csrc_pte = L2_S_PROTO | src |
   2058      1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
   2059      1.83   thorpej 	*cdst_pte = L2_S_PROTO | dst |
   2060      1.86   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode;
   2061      1.54   thorpej 	cpu_tlb_flushD_SE(csrcp);
   2062      1.54   thorpej 	cpu_tlb_flushD_SE(cdstp);
   2063      1.32   thorpej 	cpu_cpwait();
   2064      1.54   thorpej 	bcopy_page(csrcp, cdstp);
   2065      1.65     chris 	cpu_dcache_inv_range(csrcp, NBPG);
   2066      1.71   thorpej 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   2067      1.54   thorpej 	cpu_dcache_wbinv_range(cdstp, NBPG);
   2068       1.1      matt }
   2069      1.88   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   2070      1.88   thorpej 
   2071      1.88   thorpej #if ARM_MMU_XSCALE == 1
   2072      1.88   thorpej void
   2073      1.88   thorpej pmap_copy_page_xscale(paddr_t src, paddr_t dst)
   2074      1.88   thorpej {
   2075      1.88   thorpej 	struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
   2076      1.88   thorpej #ifdef DEBUG
   2077      1.88   thorpej 	struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst);
   2078      1.88   thorpej 
   2079      1.88   thorpej 	if (dst_pg->mdpage.pvh_list != NULL)
   2080      1.88   thorpej 		panic("pmap_copy_page: dst page has mappings");
   2081      1.88   thorpej #endif
   2082      1.88   thorpej 
   2083      1.88   thorpej 	KDASSERT((src & PGOFSET) == 0);
   2084      1.88   thorpej 	KDASSERT((dst & PGOFSET) == 0);
   2085      1.88   thorpej 
   2086      1.88   thorpej 	/*
   2087      1.88   thorpej 	 * Clean the source page.  Hold the source page's lock for
   2088      1.88   thorpej 	 * the duration of the copy so that no other mappings can
   2089      1.88   thorpej 	 * be created while we have a potentially aliased mapping.
   2090      1.88   thorpej 	 */
   2091      1.88   thorpej 	simple_lock(&src_pg->mdpage.pvh_slock);
   2092      1.88   thorpej 	(void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE);
   2093      1.88   thorpej 
   2094      1.88   thorpej 	/*
   2095      1.88   thorpej 	 * Map the pages into the page hook points, copy them, and purge
   2096      1.88   thorpej 	 * the cache for the appropriate page. Invalidate the TLB
   2097      1.88   thorpej 	 * as required.
   2098      1.88   thorpej 	 */
   2099      1.88   thorpej 	*csrc_pte = L2_S_PROTO | src |
   2100      1.89   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   2101      1.89   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   2102      1.88   thorpej 	*cdst_pte = L2_S_PROTO | dst |
   2103      1.88   thorpej 	    L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
   2104      1.88   thorpej 	    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);	/* mini-data */
   2105      1.88   thorpej 	cpu_tlb_flushD_SE(csrcp);
   2106      1.88   thorpej 	cpu_tlb_flushD_SE(cdstp);
   2107      1.88   thorpej 	cpu_cpwait();
   2108      1.88   thorpej 	bcopy_page(csrcp, cdstp);
   2109      1.88   thorpej 	simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */
   2110      1.88   thorpej 	xscale_cache_clean_minidata();
   2111      1.88   thorpej }
   2112      1.88   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   2113       1.1      matt 
   2114       1.1      matt #if 0
   2115       1.1      matt void
   2116      1.73   thorpej pmap_pte_addref(struct pmap *pmap, vaddr_t va)
   2117       1.1      matt {
   2118       1.1      matt 	pd_entry_t *pde;
   2119       1.2      matt 	paddr_t pa;
   2120       1.1      matt 	struct vm_page *m;
   2121       1.1      matt 
   2122       1.1      matt 	if (pmap == pmap_kernel())
   2123       1.1      matt 		return;
   2124       1.1      matt 
   2125      1.81   thorpej 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2126       1.1      matt 	pa = pmap_pte_pa(pde);
   2127       1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   2128       1.1      matt 	++m->wire_count;
   2129       1.1      matt #ifdef MYCROFT_HACK
   2130       1.1      matt 	printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2131       1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   2132       1.1      matt #endif
   2133       1.1      matt }
   2134       1.1      matt 
   2135       1.1      matt void
   2136      1.73   thorpej pmap_pte_delref(struct pmap *pmap, vaddr_t va)
   2137       1.1      matt {
   2138       1.1      matt 	pd_entry_t *pde;
   2139       1.2      matt 	paddr_t pa;
   2140       1.1      matt 	struct vm_page *m;
   2141       1.1      matt 
   2142       1.1      matt 	if (pmap == pmap_kernel())
   2143       1.1      matt 		return;
   2144       1.1      matt 
   2145      1.81   thorpej 	pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT));
   2146       1.1      matt 	pa = pmap_pte_pa(pde);
   2147       1.1      matt 	m = PHYS_TO_VM_PAGE(pa);
   2148       1.1      matt 	--m->wire_count;
   2149       1.1      matt #ifdef MYCROFT_HACK
   2150       1.1      matt 	printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n",
   2151       1.1      matt 	    pmap, va, pde, pa, m, m->wire_count);
   2152       1.1      matt #endif
   2153       1.1      matt 	if (m->wire_count == 0) {
   2154       1.1      matt #ifdef MYCROFT_HACK
   2155       1.1      matt 		printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n",
   2156       1.1      matt 		    pmap, va, pde, pa, m);
   2157       1.1      matt #endif
   2158       1.1      matt 		pmap_unmap_in_l1(pmap, va);
   2159       1.1      matt 		uvm_pagefree(m);
   2160       1.1      matt 		--pmap->pm_stats.resident_count;
   2161       1.1      matt 	}
   2162       1.1      matt }
   2163       1.1      matt #else
   2164       1.1      matt #define	pmap_pte_addref(pmap, va)
   2165       1.1      matt #define	pmap_pte_delref(pmap, va)
   2166       1.1      matt #endif
   2167       1.1      matt 
   2168       1.1      matt /*
   2169       1.1      matt  * Since we have a virtually indexed cache, we may need to inhibit caching if
   2170       1.1      matt  * there is more than one mapping and at least one of them is writable.
   2171       1.1      matt  * Since we purge the cache on every context switch, we only need to check for
   2172       1.1      matt  * other mappings within the same pmap, or kernel_pmap.
   2173       1.1      matt  * This function is also called when a page is unmapped, to possibly reenable
   2174       1.1      matt  * caching on any remaining mappings.
   2175      1.28  rearnsha  *
   2176      1.28  rearnsha  * The code implements the following logic, where:
   2177      1.28  rearnsha  *
   2178      1.28  rearnsha  * KW = # of kernel read/write pages
   2179      1.28  rearnsha  * KR = # of kernel read only pages
   2180      1.28  rearnsha  * UW = # of user read/write pages
   2181      1.28  rearnsha  * UR = # of user read only pages
   2182      1.28  rearnsha  * OW = # of user read/write pages in another pmap, then
   2183      1.28  rearnsha  *
   2184      1.28  rearnsha  * KC = kernel mapping is cacheable
   2185      1.28  rearnsha  * UC = user mapping is cacheable
   2186      1.28  rearnsha  *
   2187      1.28  rearnsha  *                     KW=0,KR=0  KW=0,KR>0  KW=1,KR=0  KW>1,KR>=0
   2188      1.28  rearnsha  *                   +---------------------------------------------
   2189      1.28  rearnsha  * UW=0,UR=0,OW=0    | ---        KC=1       KC=1       KC=0
   2190      1.28  rearnsha  * UW=0,UR>0,OW=0    | UC=1       KC=1,UC=1  KC=0,UC=0  KC=0,UC=0
   2191      1.28  rearnsha  * UW=0,UR>0,OW>0    | UC=1       KC=0,UC=1  KC=0,UC=0  KC=0,UC=0
   2192      1.28  rearnsha  * UW=1,UR=0,OW=0    | UC=1       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2193      1.28  rearnsha  * UW>1,UR>=0,OW>=0  | UC=0       KC=0,UC=0  KC=0,UC=0  KC=0,UC=0
   2194      1.11     chris  *
   2195      1.11     chris  * Note that the pmap must have it's ptes mapped in, and passed with ptes.
   2196       1.1      matt  */
   2197      1.25  rearnsha __inline static void
   2198      1.49   thorpej pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2199      1.12     chris 	boolean_t clear_cache)
   2200       1.1      matt {
   2201      1.25  rearnsha 	if (pmap == pmap_kernel())
   2202      1.49   thorpej 		pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache);
   2203      1.25  rearnsha 	else
   2204      1.49   thorpej 		pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2205      1.25  rearnsha }
   2206      1.25  rearnsha 
   2207      1.25  rearnsha static void
   2208      1.49   thorpej pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2209      1.25  rearnsha 	boolean_t clear_cache)
   2210      1.25  rearnsha {
   2211      1.25  rearnsha 	int user_entries = 0;
   2212      1.25  rearnsha 	int user_writable = 0;
   2213      1.25  rearnsha 	int user_cacheable = 0;
   2214      1.25  rearnsha 	int kernel_entries = 0;
   2215      1.25  rearnsha 	int kernel_writable = 0;
   2216      1.25  rearnsha 	int kernel_cacheable = 0;
   2217      1.25  rearnsha 	struct pv_entry *pv;
   2218      1.25  rearnsha 	struct pmap *last_pmap = pmap;
   2219      1.25  rearnsha 
   2220      1.25  rearnsha #ifdef DIAGNOSTIC
   2221      1.25  rearnsha 	if (pmap != pmap_kernel())
   2222      1.25  rearnsha 		panic("pmap_vac_me_kpmap: pmap != pmap_kernel()");
   2223      1.25  rearnsha #endif
   2224      1.25  rearnsha 
   2225      1.25  rearnsha 	/*
   2226      1.25  rearnsha 	 * Pass one, see if there are both kernel and user pmaps for
   2227      1.25  rearnsha 	 * this page.  Calculate whether there are user-writable or
   2228      1.25  rearnsha 	 * kernel-writable pages.
   2229      1.25  rearnsha 	 */
   2230      1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) {
   2231      1.25  rearnsha 		if (pv->pv_pmap != pmap) {
   2232      1.25  rearnsha 			user_entries++;
   2233      1.78   thorpej 			if (pv->pv_flags & PVF_WRITE)
   2234      1.25  rearnsha 				user_writable++;
   2235      1.78   thorpej 			if ((pv->pv_flags & PVF_NC) == 0)
   2236      1.25  rearnsha 				user_cacheable++;
   2237      1.25  rearnsha 		} else {
   2238      1.25  rearnsha 			kernel_entries++;
   2239      1.78   thorpej 			if (pv->pv_flags & PVF_WRITE)
   2240      1.25  rearnsha 				kernel_writable++;
   2241      1.78   thorpej 			if ((pv->pv_flags & PVF_NC) == 0)
   2242      1.25  rearnsha 				kernel_cacheable++;
   2243      1.25  rearnsha 		}
   2244      1.25  rearnsha 	}
   2245      1.25  rearnsha 
   2246      1.25  rearnsha 	/*
   2247      1.25  rearnsha 	 * We know we have just been updating a kernel entry, so if
   2248      1.25  rearnsha 	 * all user pages are already cacheable, then there is nothing
   2249      1.25  rearnsha 	 * further to do.
   2250      1.25  rearnsha 	 */
   2251      1.25  rearnsha 	if (kernel_entries == 0 &&
   2252      1.25  rearnsha 	    user_cacheable == user_entries)
   2253      1.25  rearnsha 		return;
   2254      1.25  rearnsha 
   2255      1.25  rearnsha 	if (user_entries) {
   2256      1.25  rearnsha 		/*
   2257      1.25  rearnsha 		 * Scan over the list again, for each entry, if it
   2258      1.25  rearnsha 		 * might not be set correctly, call pmap_vac_me_user
   2259      1.25  rearnsha 		 * to recalculate the settings.
   2260      1.25  rearnsha 		 */
   2261      1.49   thorpej 		for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   2262      1.25  rearnsha 			/*
   2263      1.25  rearnsha 			 * We know kernel mappings will get set
   2264      1.25  rearnsha 			 * correctly in other calls.  We also know
   2265      1.25  rearnsha 			 * that if the pmap is the same as last_pmap
   2266      1.25  rearnsha 			 * then we've just handled this entry.
   2267      1.25  rearnsha 			 */
   2268      1.25  rearnsha 			if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap)
   2269      1.25  rearnsha 				continue;
   2270      1.25  rearnsha 			/*
   2271      1.25  rearnsha 			 * If there are kernel entries and this page
   2272      1.25  rearnsha 			 * is writable but non-cacheable, then we can
   2273      1.25  rearnsha 			 * skip this entry also.
   2274      1.25  rearnsha 			 */
   2275      1.25  rearnsha 			if (kernel_entries > 0 &&
   2276      1.78   thorpej 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
   2277      1.78   thorpej 			    (PVF_NC | PVF_WRITE))
   2278      1.25  rearnsha 				continue;
   2279      1.25  rearnsha 			/*
   2280      1.25  rearnsha 			 * Similarly if there are no kernel-writable
   2281      1.25  rearnsha 			 * entries and the page is already
   2282      1.25  rearnsha 			 * read-only/cacheable.
   2283      1.25  rearnsha 			 */
   2284      1.25  rearnsha 			if (kernel_writable == 0 &&
   2285      1.78   thorpej 			    (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
   2286      1.25  rearnsha 				continue;
   2287      1.25  rearnsha 			/*
   2288      1.25  rearnsha 			 * For some of the remaining cases, we know
   2289      1.25  rearnsha 			 * that we must recalculate, but for others we
   2290      1.25  rearnsha 			 * can't tell if they are correct or not, so
   2291      1.25  rearnsha 			 * we recalculate anyway.
   2292      1.25  rearnsha 			 */
   2293      1.25  rearnsha 			pmap_unmap_ptes(last_pmap);
   2294      1.25  rearnsha 			last_pmap = pv->pv_pmap;
   2295      1.25  rearnsha 			ptes = pmap_map_ptes(last_pmap);
   2296      1.49   thorpej 			pmap_vac_me_user(last_pmap, pg, ptes,
   2297      1.25  rearnsha 			    pmap_is_curpmap(last_pmap));
   2298      1.25  rearnsha 		}
   2299      1.25  rearnsha 		/* Restore the pte mapping that was passed to us.  */
   2300      1.25  rearnsha 		if (last_pmap != pmap) {
   2301      1.25  rearnsha 			pmap_unmap_ptes(last_pmap);
   2302      1.25  rearnsha 			ptes = pmap_map_ptes(pmap);
   2303      1.25  rearnsha 		}
   2304      1.25  rearnsha 		if (kernel_entries == 0)
   2305      1.25  rearnsha 			return;
   2306      1.25  rearnsha 	}
   2307      1.25  rearnsha 
   2308      1.49   thorpej 	pmap_vac_me_user(pmap, pg, ptes, clear_cache);
   2309      1.25  rearnsha 	return;
   2310      1.25  rearnsha }
   2311      1.25  rearnsha 
   2312      1.25  rearnsha static void
   2313      1.49   thorpej pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes,
   2314      1.25  rearnsha 	boolean_t clear_cache)
   2315      1.25  rearnsha {
   2316      1.25  rearnsha 	struct pmap *kpmap = pmap_kernel();
   2317      1.17     chris 	struct pv_entry *pv, *npv;
   2318       1.1      matt 	int entries = 0;
   2319      1.25  rearnsha 	int writable = 0;
   2320      1.12     chris 	int cacheable_entries = 0;
   2321      1.25  rearnsha 	int kern_cacheable = 0;
   2322      1.25  rearnsha 	int other_writable = 0;
   2323       1.1      matt 
   2324      1.49   thorpej 	pv = pg->mdpage.pvh_list;
   2325      1.11     chris 	KASSERT(ptes != NULL);
   2326       1.1      matt 
   2327       1.1      matt 	/*
   2328       1.1      matt 	 * Count mappings and writable mappings in this pmap.
   2329      1.25  rearnsha 	 * Include kernel mappings as part of our own.
   2330       1.1      matt 	 * Keep a pointer to the first one.
   2331       1.1      matt 	 */
   2332       1.1      matt 	for (npv = pv; npv; npv = npv->pv_next) {
   2333       1.1      matt 		/* Count mappings in the same pmap */
   2334      1.25  rearnsha 		if (pmap == npv->pv_pmap ||
   2335      1.25  rearnsha 		    kpmap == npv->pv_pmap) {
   2336       1.1      matt 			if (entries++ == 0)
   2337       1.1      matt 				pv = npv;
   2338      1.12     chris 			/* Cacheable mappings */
   2339      1.78   thorpej 			if ((npv->pv_flags & PVF_NC) == 0) {
   2340      1.12     chris 				cacheable_entries++;
   2341      1.25  rearnsha 				if (kpmap == npv->pv_pmap)
   2342      1.25  rearnsha 					kern_cacheable++;
   2343      1.25  rearnsha 			}
   2344      1.25  rearnsha 			/* Writable mappings */
   2345      1.78   thorpej 			if (npv->pv_flags & PVF_WRITE)
   2346      1.25  rearnsha 				++writable;
   2347      1.78   thorpej 		} else if (npv->pv_flags & PVF_WRITE)
   2348      1.25  rearnsha 			other_writable = 1;
   2349       1.1      matt 	}
   2350       1.1      matt 
   2351      1.12     chris 	PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, "
   2352      1.25  rearnsha 		"writable %d cacheable %d %s\n", pmap, entries, writable,
   2353      1.12     chris 	    	cacheable_entries, clear_cache ? "clean" : "no clean"));
   2354      1.12     chris 
   2355       1.1      matt 	/*
   2356       1.1      matt 	 * Enable or disable caching as necessary.
   2357      1.25  rearnsha 	 * Note: the first entry might be part of the kernel pmap,
   2358      1.25  rearnsha 	 * so we can't assume this is indicative of the state of the
   2359      1.25  rearnsha 	 * other (maybe non-kpmap) entries.
   2360       1.1      matt 	 */
   2361      1.25  rearnsha 	if ((entries > 1 && writable) ||
   2362      1.25  rearnsha 	    (entries > 0 && pmap == kpmap && other_writable)) {
   2363      1.12     chris 		if (cacheable_entries == 0)
   2364      1.12     chris 		    return;
   2365      1.25  rearnsha 		for (npv = pv; npv; npv = npv->pv_next) {
   2366      1.25  rearnsha 			if ((pmap == npv->pv_pmap
   2367      1.25  rearnsha 			    || kpmap == npv->pv_pmap) &&
   2368      1.78   thorpej 			    (npv->pv_flags & PVF_NC) == 0) {
   2369      1.91   thorpej 				ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK;
   2370      1.78   thorpej  				npv->pv_flags |= PVF_NC;
   2371      1.25  rearnsha 				/*
   2372      1.25  rearnsha 				 * If this page needs flushing from the
   2373      1.25  rearnsha 				 * cache, and we aren't going to do it
   2374      1.25  rearnsha 				 * below, do it now.
   2375      1.25  rearnsha 				 */
   2376      1.25  rearnsha 				if ((cacheable_entries < 4 &&
   2377      1.25  rearnsha 				    (clear_cache || npv->pv_pmap == kpmap)) ||
   2378      1.25  rearnsha 				    (npv->pv_pmap == kpmap &&
   2379      1.25  rearnsha 				    !clear_cache && kern_cacheable < 4)) {
   2380      1.36   thorpej 					cpu_idcache_wbinv_range(npv->pv_va,
   2381      1.12     chris 					    NBPG);
   2382      1.12     chris 					cpu_tlb_flushID_SE(npv->pv_va);
   2383      1.12     chris 				}
   2384       1.1      matt 			}
   2385       1.1      matt 		}
   2386      1.25  rearnsha 		if ((clear_cache && cacheable_entries >= 4) ||
   2387      1.25  rearnsha 		    kern_cacheable >= 4) {
   2388      1.36   thorpej 			cpu_idcache_wbinv_all();
   2389      1.12     chris 			cpu_tlb_flushID();
   2390      1.12     chris 		}
   2391      1.32   thorpej 		cpu_cpwait();
   2392       1.1      matt 	} else if (entries > 0) {
   2393      1.25  rearnsha 		/*
   2394      1.25  rearnsha 		 * Turn cacheing back on for some pages.  If it is a kernel
   2395      1.25  rearnsha 		 * page, only do so if there are no other writable pages.
   2396      1.25  rearnsha 		 */
   2397      1.25  rearnsha 		for (npv = pv; npv; npv = npv->pv_next) {
   2398      1.25  rearnsha 			if ((pmap == npv->pv_pmap ||
   2399      1.25  rearnsha 			    (kpmap == npv->pv_pmap && other_writable == 0)) &&
   2400      1.78   thorpej 			    (npv->pv_flags & PVF_NC)) {
   2401      1.86   thorpej 				ptes[arm_btop(npv->pv_va)] |=
   2402      1.86   thorpej 				    pte_l2_s_cache_mode;
   2403      1.78   thorpej 				npv->pv_flags &= ~PVF_NC;
   2404       1.1      matt 			}
   2405       1.1      matt 		}
   2406       1.1      matt 	}
   2407       1.1      matt }
   2408       1.1      matt 
   2409       1.1      matt /*
   2410       1.1      matt  * pmap_remove()
   2411       1.1      matt  *
   2412       1.1      matt  * pmap_remove is responsible for nuking a number of mappings for a range
   2413       1.1      matt  * of virtual address space in the current pmap. To do this efficiently
   2414       1.1      matt  * is interesting, because in a number of cases a wide virtual address
   2415       1.1      matt  * range may be supplied that contains few actual mappings. So, the
   2416       1.1      matt  * optimisations are:
   2417       1.1      matt  *  1. Try and skip over hunks of address space for which an L1 entry
   2418       1.1      matt  *     does not exist.
   2419       1.1      matt  *  2. Build up a list of pages we've hit, up to a maximum, so we can
   2420       1.1      matt  *     maybe do just a partial cache clean. This path of execution is
   2421       1.1      matt  *     complicated by the fact that the cache must be flushed _before_
   2422       1.1      matt  *     the PTE is nuked, being a VAC :-)
   2423       1.1      matt  *  3. Maybe later fast-case a single page, but I don't think this is
   2424       1.1      matt  *     going to make _that_ much difference overall.
   2425       1.1      matt  */
   2426       1.1      matt 
   2427       1.1      matt #define PMAP_REMOVE_CLEAN_LIST_SIZE	3
   2428       1.1      matt 
   2429       1.1      matt void
   2430      1.73   thorpej pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva)
   2431       1.1      matt {
   2432       1.1      matt 	int cleanlist_idx = 0;
   2433       1.1      matt 	struct pagelist {
   2434       1.1      matt 		vaddr_t va;
   2435       1.1      matt 		pt_entry_t *pte;
   2436       1.1      matt 	} cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
   2437      1.11     chris 	pt_entry_t *pte = 0, *ptes;
   2438       1.2      matt 	paddr_t pa;
   2439       1.1      matt 	int pmap_active;
   2440      1.49   thorpej 	struct vm_page *pg;
   2441       1.1      matt 
   2442       1.1      matt 	/* Exit quick if there is no pmap */
   2443       1.1      matt 	if (!pmap)
   2444       1.1      matt 		return;
   2445       1.1      matt 
   2446      1.79   thorpej 	PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n",
   2447      1.79   thorpej 	    pmap, sva, eva));
   2448       1.1      matt 
   2449      1.17     chris 	/*
   2450      1.49   thorpej 	 * we lock in the pmap => vm_page direction
   2451      1.17     chris 	 */
   2452      1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2453      1.17     chris 
   2454      1.11     chris 	ptes = pmap_map_ptes(pmap);
   2455       1.1      matt 	/* Get a page table pointer */
   2456       1.1      matt 	while (sva < eva) {
   2457      1.30  rearnsha 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2458       1.1      matt 			break;
   2459      1.81   thorpej 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2460       1.1      matt 	}
   2461      1.11     chris 
   2462      1.56   thorpej 	pte = &ptes[arm_btop(sva)];
   2463       1.1      matt 	/* Note if the pmap is active thus require cache and tlb cleans */
   2464      1.58   thorpej 	pmap_active = pmap_is_curpmap(pmap);
   2465       1.1      matt 
   2466       1.1      matt 	/* Now loop along */
   2467       1.1      matt 	while (sva < eva) {
   2468       1.1      matt 		/* Check if we can move to the next PDE (l1 chunk) */
   2469      1.81   thorpej 		if (!(sva & L2_ADDR_BITS))
   2470      1.30  rearnsha 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2471      1.81   thorpej 				sva += L1_S_SIZE;
   2472      1.81   thorpej 				pte += arm_btop(L1_S_SIZE);
   2473       1.1      matt 				continue;
   2474       1.1      matt 			}
   2475       1.1      matt 
   2476       1.1      matt 		/* We've found a valid PTE, so this page of PTEs has to go. */
   2477       1.1      matt 		if (pmap_pte_v(pte)) {
   2478       1.1      matt 			/* Update statistics */
   2479       1.1      matt 			--pmap->pm_stats.resident_count;
   2480       1.1      matt 
   2481       1.1      matt 			/*
   2482       1.1      matt 			 * Add this page to our cache remove list, if we can.
   2483       1.1      matt 			 * If, however the cache remove list is totally full,
   2484       1.1      matt 			 * then do a complete cache invalidation taking note
   2485       1.1      matt 			 * to backtrack the PTE table beforehand, and ignore
   2486       1.1      matt 			 * the lists in future because there's no longer any
   2487       1.1      matt 			 * point in bothering with them (we've paid the
   2488       1.1      matt 			 * penalty, so will carry on unhindered). Otherwise,
   2489       1.1      matt 			 * when we fall out, we just clean the list.
   2490       1.1      matt 			 */
   2491       1.1      matt 			PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte));
   2492       1.1      matt 			pa = pmap_pte_pa(pte);
   2493       1.1      matt 
   2494       1.1      matt 			if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2495       1.1      matt 				/* Add to the clean list. */
   2496       1.1      matt 				cleanlist[cleanlist_idx].pte = pte;
   2497       1.1      matt 				cleanlist[cleanlist_idx].va = sva;
   2498       1.1      matt 				cleanlist_idx++;
   2499       1.1      matt 			} else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2500       1.1      matt 				int cnt;
   2501       1.1      matt 
   2502       1.1      matt 				/* Nuke everything if needed. */
   2503       1.1      matt 				if (pmap_active) {
   2504      1.36   thorpej 					cpu_idcache_wbinv_all();
   2505       1.1      matt 					cpu_tlb_flushID();
   2506       1.1      matt 				}
   2507       1.1      matt 
   2508       1.1      matt 				/*
   2509       1.1      matt 				 * Roll back the previous PTE list,
   2510       1.1      matt 				 * and zero out the current PTE.
   2511       1.1      matt 				 */
   2512       1.1      matt 				for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
   2513       1.1      matt 					*cleanlist[cnt].pte = 0;
   2514       1.1      matt 					pmap_pte_delref(pmap, cleanlist[cnt].va);
   2515       1.1      matt 				}
   2516       1.1      matt 				*pte = 0;
   2517       1.1      matt 				pmap_pte_delref(pmap, sva);
   2518       1.1      matt 				cleanlist_idx++;
   2519       1.1      matt 			} else {
   2520       1.1      matt 				/*
   2521       1.1      matt 				 * We've already nuked the cache and
   2522       1.1      matt 				 * TLB, so just carry on regardless,
   2523       1.1      matt 				 * and we won't need to do it again
   2524       1.1      matt 				 */
   2525       1.1      matt 				*pte = 0;
   2526       1.1      matt 				pmap_pte_delref(pmap, sva);
   2527       1.1      matt 			}
   2528       1.1      matt 
   2529       1.1      matt 			/*
   2530       1.1      matt 			 * Update flags. In a number of circumstances,
   2531       1.1      matt 			 * we could cluster a lot of these and do a
   2532       1.1      matt 			 * number of sequential pages in one go.
   2533       1.1      matt 			 */
   2534      1.49   thorpej 			if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) {
   2535      1.17     chris 				struct pv_entry *pve;
   2536      1.49   thorpej 				simple_lock(&pg->mdpage.pvh_slock);
   2537      1.49   thorpej 				pve = pmap_remove_pv(pg, pmap, sva);
   2538      1.17     chris 				pmap_free_pv(pmap, pve);
   2539      1.49   thorpej 				pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2540      1.49   thorpej 				simple_unlock(&pg->mdpage.pvh_slock);
   2541       1.1      matt 			}
   2542       1.1      matt 		}
   2543       1.1      matt 		sva += NBPG;
   2544       1.1      matt 		pte++;
   2545       1.1      matt 	}
   2546       1.1      matt 
   2547      1.11     chris 	pmap_unmap_ptes(pmap);
   2548       1.1      matt 	/*
   2549       1.1      matt 	 * Now, if we've fallen through down to here, chances are that there
   2550       1.1      matt 	 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left.
   2551       1.1      matt 	 */
   2552       1.1      matt 	if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) {
   2553       1.1      matt 		u_int cnt;
   2554       1.1      matt 
   2555       1.1      matt 		for (cnt = 0; cnt < cleanlist_idx; cnt++) {
   2556       1.1      matt 			if (pmap_active) {
   2557      1.36   thorpej 				cpu_idcache_wbinv_range(cleanlist[cnt].va,
   2558      1.36   thorpej 				    NBPG);
   2559       1.1      matt 				*cleanlist[cnt].pte = 0;
   2560       1.1      matt 				cpu_tlb_flushID_SE(cleanlist[cnt].va);
   2561       1.1      matt 			} else
   2562       1.1      matt 				*cleanlist[cnt].pte = 0;
   2563       1.1      matt 			pmap_pte_delref(pmap, cleanlist[cnt].va);
   2564       1.1      matt 		}
   2565       1.1      matt 	}
   2566      1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2567       1.1      matt }
   2568       1.1      matt 
   2569       1.1      matt /*
   2570       1.1      matt  * Routine:	pmap_remove_all
   2571       1.1      matt  * Function:
   2572       1.1      matt  *		Removes this physical page from
   2573       1.1      matt  *		all physical maps in which it resides.
   2574       1.1      matt  *		Reflects back modify bits to the pager.
   2575       1.1      matt  */
   2576       1.1      matt 
   2577      1.33     chris static void
   2578      1.73   thorpej pmap_remove_all(struct vm_page *pg)
   2579       1.1      matt {
   2580      1.17     chris 	struct pv_entry *pv, *npv;
   2581      1.15     chris 	struct pmap *pmap;
   2582      1.11     chris 	pt_entry_t *pte, *ptes;
   2583       1.1      matt 
   2584      1.49   thorpej 	PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg)));
   2585       1.1      matt 
   2586      1.49   thorpej 	/* set vm_page => pmap locking */
   2587      1.17     chris 	PMAP_HEAD_TO_MAP_LOCK();
   2588       1.1      matt 
   2589      1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   2590      1.17     chris 
   2591      1.49   thorpej 	pv = pg->mdpage.pvh_list;
   2592      1.49   thorpej 	if (pv == NULL) {
   2593      1.49   thorpej 		PDEBUG(0, printf("free page\n"));
   2594      1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   2595      1.49   thorpej 		PMAP_HEAD_TO_MAP_UNLOCK();
   2596      1.49   thorpej 		return;
   2597       1.1      matt 	}
   2598      1.17     chris 	pmap_clean_page(pv, FALSE);
   2599       1.1      matt 
   2600       1.1      matt 	while (pv) {
   2601       1.1      matt 		pmap = pv->pv_pmap;
   2602      1.11     chris 		ptes = pmap_map_ptes(pmap);
   2603      1.56   thorpej 		pte = &ptes[arm_btop(pv->pv_va)];
   2604       1.1      matt 
   2605       1.1      matt 		PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte,
   2606       1.1      matt 		    pv->pv_va, pv->pv_flags));
   2607       1.1      matt #ifdef DEBUG
   2608      1.79   thorpej 		if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 ||
   2609      1.79   thorpej 		    pmap_pte_v(pte) == 0 ||
   2610      1.79   thorpej 		    pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg))
   2611       1.1      matt 			panic("pmap_remove_all: bad mapping");
   2612       1.1      matt #endif	/* DEBUG */
   2613       1.1      matt 
   2614       1.1      matt 		/*
   2615       1.1      matt 		 * Update statistics
   2616       1.1      matt 		 */
   2617       1.1      matt 		--pmap->pm_stats.resident_count;
   2618       1.1      matt 
   2619       1.1      matt 		/* Wired bit */
   2620      1.78   thorpej 		if (pv->pv_flags & PVF_WIRED)
   2621       1.1      matt 			--pmap->pm_stats.wired_count;
   2622       1.1      matt 
   2623       1.1      matt 		/*
   2624       1.1      matt 		 * Invalidate the PTEs.
   2625       1.1      matt 		 * XXX: should cluster them up and invalidate as many
   2626       1.1      matt 		 * as possible at once.
   2627       1.1      matt 		 */
   2628       1.1      matt 
   2629       1.1      matt #ifdef needednotdone
   2630       1.1      matt reduce wiring count on page table pages as references drop
   2631       1.1      matt #endif
   2632       1.1      matt 
   2633       1.1      matt 		*pte = 0;
   2634       1.1      matt 		pmap_pte_delref(pmap, pv->pv_va);
   2635       1.1      matt 
   2636       1.1      matt 		npv = pv->pv_next;
   2637      1.17     chris 		pmap_free_pv(pmap, pv);
   2638       1.1      matt 		pv = npv;
   2639      1.11     chris 		pmap_unmap_ptes(pmap);
   2640       1.1      matt 	}
   2641      1.49   thorpej 	pg->mdpage.pvh_list = NULL;
   2642      1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   2643      1.17     chris 	PMAP_HEAD_TO_MAP_UNLOCK();
   2644       1.1      matt 
   2645       1.1      matt 	PDEBUG(0, printf("done\n"));
   2646       1.1      matt 	cpu_tlb_flushID();
   2647      1.32   thorpej 	cpu_cpwait();
   2648       1.1      matt }
   2649       1.1      matt 
   2650       1.1      matt 
   2651       1.1      matt /*
   2652       1.1      matt  * Set the physical protection on the specified range of this map as requested.
   2653       1.1      matt  */
   2654       1.1      matt 
   2655       1.1      matt void
   2656      1.73   thorpej pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   2657       1.1      matt {
   2658      1.11     chris 	pt_entry_t *pte = NULL, *ptes;
   2659      1.49   thorpej 	struct vm_page *pg;
   2660       1.1      matt 	int flush = 0;
   2661       1.1      matt 
   2662       1.1      matt 	PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n",
   2663       1.1      matt 	    pmap, sva, eva, prot));
   2664       1.1      matt 
   2665       1.1      matt 	if (~prot & VM_PROT_READ) {
   2666  1.97.4.2        he 		/*
   2667  1.97.4.2        he 		 * Just remove the mappings.  pmap_update() is not required
   2668  1.97.4.2        he 		 * here since the caller should do it.
   2669  1.97.4.2        he 		 */
   2670       1.1      matt 		pmap_remove(pmap, sva, eva);
   2671       1.1      matt 		return;
   2672       1.1      matt 	}
   2673       1.1      matt 	if (prot & VM_PROT_WRITE) {
   2674       1.1      matt 		/*
   2675       1.1      matt 		 * If this is a read->write transition, just ignore it and let
   2676       1.1      matt 		 * uvm_fault() take care of it later.
   2677       1.1      matt 		 */
   2678       1.1      matt 		return;
   2679       1.1      matt 	}
   2680       1.1      matt 
   2681      1.17     chris 	/* Need to lock map->head */
   2682      1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2683      1.17     chris 
   2684      1.11     chris 	ptes = pmap_map_ptes(pmap);
   2685      1.96   thorpej 
   2686      1.96   thorpej 	/*
   2687      1.96   thorpej 	 * OK, at this point, we know we're doing write-protect operation.
   2688      1.96   thorpej 	 * If the pmap is active, write-back the range.
   2689      1.96   thorpej 	 */
   2690      1.96   thorpej 	if (pmap_is_curpmap(pmap))
   2691      1.96   thorpej 		cpu_dcache_wb_range(sva, eva - sva);
   2692      1.96   thorpej 
   2693       1.1      matt 	/*
   2694       1.1      matt 	 * We need to acquire a pointer to a page table page before entering
   2695       1.1      matt 	 * the following loop.
   2696       1.1      matt 	 */
   2697       1.1      matt 	while (sva < eva) {
   2698      1.30  rearnsha 		if (pmap_pde_page(pmap_pde(pmap, sva)))
   2699       1.1      matt 			break;
   2700      1.81   thorpej 		sva = (sva & L1_S_FRAME) + L1_S_SIZE;
   2701       1.1      matt 	}
   2702      1.11     chris 
   2703      1.56   thorpej 	pte = &ptes[arm_btop(sva)];
   2704      1.17     chris 
   2705       1.1      matt 	while (sva < eva) {
   2706       1.1      matt 		/* only check once in a while */
   2707      1.81   thorpej 		if ((sva & L2_ADDR_BITS) == 0) {
   2708      1.30  rearnsha 			if (!pmap_pde_page(pmap_pde(pmap, sva))) {
   2709       1.1      matt 				/* We can race ahead here, to the next pde. */
   2710      1.81   thorpej 				sva += L1_S_SIZE;
   2711      1.81   thorpej 				pte += arm_btop(L1_S_SIZE);
   2712       1.1      matt 				continue;
   2713       1.1      matt 			}
   2714       1.1      matt 		}
   2715       1.1      matt 
   2716       1.1      matt 		if (!pmap_pte_v(pte))
   2717       1.1      matt 			goto next;
   2718       1.1      matt 
   2719       1.1      matt 		flush = 1;
   2720       1.1      matt 
   2721  1.97.4.2        he 		*pte &= ~L2_S_PROT_W;		/* clear write bit */
   2722       1.1      matt 
   2723       1.1      matt 		/* Clear write flag */
   2724  1.97.4.2        he 		if ((pg = PHYS_TO_VM_PAGE(pmap_pte_pa(pte))) != NULL) {
   2725      1.49   thorpej 			simple_lock(&pg->mdpage.pvh_slock);
   2726      1.78   thorpej 			(void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0);
   2727      1.49   thorpej 			pmap_vac_me_harder(pmap, pg, ptes, FALSE);
   2728      1.49   thorpej 			simple_unlock(&pg->mdpage.pvh_slock);
   2729       1.1      matt 		}
   2730       1.1      matt 
   2731  1.97.4.2        he  next:
   2732       1.1      matt 		sva += NBPG;
   2733       1.1      matt 		pte++;
   2734       1.1      matt 	}
   2735      1.11     chris 	pmap_unmap_ptes(pmap);
   2736      1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2737       1.1      matt 	if (flush)
   2738       1.1      matt 		cpu_tlb_flushID();
   2739       1.1      matt }
   2740       1.1      matt 
   2741       1.1      matt /*
   2742      1.15     chris  * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2743       1.1      matt  * int flags)
   2744       1.1      matt  *
   2745       1.1      matt  *      Insert the given physical page (p) at
   2746       1.1      matt  *      the specified virtual address (v) in the
   2747       1.1      matt  *      target physical map with the protection requested.
   2748       1.1      matt  *
   2749       1.1      matt  *      If specified, the page will be wired down, meaning
   2750       1.1      matt  *      that the related pte can not be reclaimed.
   2751       1.1      matt  *
   2752       1.1      matt  *      NB:  This is the only routine which MAY NOT lazy-evaluate
   2753       1.1      matt  *      or lose information.  That is, this routine must actually
   2754       1.1      matt  *      insert this page into the given map NOW.
   2755       1.1      matt  */
   2756       1.1      matt 
   2757       1.1      matt int
   2758      1.73   thorpej pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot,
   2759      1.73   thorpej     int flags)
   2760       1.1      matt {
   2761      1.66   thorpej 	pt_entry_t *ptes, opte, npte;
   2762       1.2      matt 	paddr_t opa;
   2763       1.1      matt 	boolean_t wired = (flags & PMAP_WIRED) != 0;
   2764      1.49   thorpej 	struct vm_page *pg;
   2765      1.17     chris 	struct pv_entry *pve;
   2766      1.66   thorpej 	int error, nflags;
   2767       1.1      matt 
   2768       1.1      matt 	PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n",
   2769       1.1      matt 	    va, pa, pmap, prot, wired));
   2770       1.1      matt 
   2771       1.1      matt #ifdef DIAGNOSTIC
   2772       1.1      matt 	/* Valid address ? */
   2773      1.48     chris 	if (va >= (pmap_curmaxkvaddr))
   2774       1.1      matt 		panic("pmap_enter: too big");
   2775       1.1      matt 	if (pmap != pmap_kernel() && va != 0) {
   2776       1.1      matt 		if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS)
   2777       1.1      matt 			panic("pmap_enter: kernel page in user map");
   2778       1.1      matt 	} else {
   2779       1.1      matt 		if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS)
   2780       1.1      matt 			panic("pmap_enter: user page in kernel map");
   2781       1.1      matt 		if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS)
   2782       1.1      matt 			panic("pmap_enter: entering PT page");
   2783       1.1      matt 	}
   2784       1.1      matt #endif
   2785      1.79   thorpej 
   2786      1.79   thorpej 	KDASSERT(((va | pa) & PGOFSET) == 0);
   2787      1.79   thorpej 
   2788      1.49   thorpej 	/*
   2789      1.49   thorpej 	 * Get a pointer to the page.  Later on in this function, we
   2790      1.49   thorpej 	 * test for a managed page by checking pg != NULL.
   2791      1.49   thorpej 	 */
   2792      1.55   thorpej 	pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL;
   2793      1.49   thorpej 
   2794      1.17     chris 	/* get lock */
   2795      1.17     chris 	PMAP_MAP_TO_HEAD_LOCK();
   2796      1.66   thorpej 
   2797       1.1      matt 	/*
   2798      1.66   thorpej 	 * map the ptes.  If there's not already an L2 table for this
   2799      1.66   thorpej 	 * address, allocate one.
   2800       1.1      matt 	 */
   2801      1.66   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   2802      1.66   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   2803      1.17     chris 		struct vm_page *ptp;
   2804      1.57   thorpej 
   2805      1.57   thorpej 		/* kernel should be pre-grown */
   2806      1.57   thorpej 		KASSERT(pmap != pmap_kernel());
   2807      1.17     chris 
   2808      1.17     chris 		/* if failure is allowed then don't try too hard */
   2809      1.81   thorpej 		ptp = pmap_get_ptp(pmap, va & L1_S_FRAME);
   2810      1.17     chris 		if (ptp == NULL) {
   2811      1.17     chris 			if (flags & PMAP_CANFAIL) {
   2812      1.17     chris 				error = ENOMEM;
   2813      1.17     chris 				goto out;
   2814      1.17     chris 			}
   2815      1.17     chris 			panic("pmap_enter: get ptp failed");
   2816       1.1      matt 		}
   2817       1.1      matt 	}
   2818      1.66   thorpej 	opte = ptes[arm_btop(va)];
   2819       1.1      matt 
   2820       1.1      matt 	nflags = 0;
   2821       1.1      matt 	if (prot & VM_PROT_WRITE)
   2822      1.78   thorpej 		nflags |= PVF_WRITE;
   2823       1.1      matt 	if (wired)
   2824      1.78   thorpej 		nflags |= PVF_WIRED;
   2825       1.1      matt 
   2826       1.1      matt 	/* Is the pte valid ? If so then this page is already mapped */
   2827      1.66   thorpej 	if (l2pte_valid(opte)) {
   2828       1.1      matt 		/* Get the physical address of the current page mapped */
   2829      1.66   thorpej 		opa = l2pte_pa(opte);
   2830       1.1      matt 
   2831       1.1      matt 		/* Are we mapping the same page ? */
   2832       1.1      matt 		if (opa == pa) {
   2833       1.1      matt 			/* Has the wiring changed ? */
   2834      1.49   thorpej 			if (pg != NULL) {
   2835      1.49   thorpej 				simple_lock(&pg->mdpage.pvh_slock);
   2836      1.49   thorpej 				(void) pmap_modify_pv(pmap, va, pg,
   2837      1.78   thorpej 				    PVF_WRITE | PVF_WIRED, nflags);
   2838      1.49   thorpej 				simple_unlock(&pg->mdpage.pvh_slock);
   2839      1.49   thorpej  			}
   2840       1.1      matt 		} else {
   2841      1.49   thorpej 			struct vm_page *opg;
   2842      1.49   thorpej 
   2843       1.1      matt 			/* We are replacing the page with a new one. */
   2844      1.36   thorpej 			cpu_idcache_wbinv_range(va, NBPG);
   2845       1.1      matt 
   2846       1.1      matt 			/*
   2847       1.1      matt 			 * If it is part of our managed memory then we
   2848       1.1      matt 			 * must remove it from the PV list
   2849       1.1      matt 			 */
   2850      1.49   thorpej 			if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) {
   2851      1.49   thorpej 				simple_lock(&opg->mdpage.pvh_slock);
   2852      1.49   thorpej 				pve = pmap_remove_pv(opg, pmap, va);
   2853      1.49   thorpej 				simple_unlock(&opg->mdpage.pvh_slock);
   2854      1.17     chris 			} else {
   2855      1.17     chris 				pve = NULL;
   2856       1.1      matt 			}
   2857       1.1      matt 
   2858       1.1      matt 			goto enter;
   2859       1.1      matt 		}
   2860       1.1      matt 	} else {
   2861       1.1      matt 		opa = 0;
   2862      1.17     chris 		pve = NULL;
   2863       1.1      matt 		pmap_pte_addref(pmap, va);
   2864       1.1      matt 
   2865       1.1      matt 		/* pte is not valid so we must be hooking in a new page */
   2866       1.1      matt 		++pmap->pm_stats.resident_count;
   2867       1.1      matt 
   2868       1.1      matt 	enter:
   2869       1.1      matt 		/*
   2870       1.1      matt 		 * Enter on the PV list if part of our managed memory
   2871       1.1      matt 		 */
   2872      1.55   thorpej 		if (pg != NULL) {
   2873      1.17     chris 			if (pve == NULL) {
   2874      1.17     chris 				pve = pmap_alloc_pv(pmap, ALLOCPV_NEED);
   2875      1.17     chris 				if (pve == NULL) {
   2876      1.17     chris 					if (flags & PMAP_CANFAIL) {
   2877      1.17     chris 						error = ENOMEM;
   2878      1.17     chris 						goto out;
   2879      1.17     chris 					}
   2880      1.66   thorpej 					panic("pmap_enter: no pv entries "
   2881      1.66   thorpej 					    "available");
   2882      1.17     chris 				}
   2883      1.17     chris 			}
   2884      1.17     chris 			/* enter_pv locks pvh when adding */
   2885      1.49   thorpej 			pmap_enter_pv(pg, pve, pmap, va, NULL, nflags);
   2886      1.17     chris 		} else {
   2887      1.17     chris 			if (pve != NULL)
   2888      1.17     chris 				pmap_free_pv(pmap, pve);
   2889       1.1      matt 		}
   2890       1.1      matt 	}
   2891       1.1      matt 
   2892       1.1      matt 	/* Construct the pte, giving the correct access. */
   2893      1.79   thorpej 	npte = pa;
   2894       1.1      matt 
   2895       1.1      matt 	/* VA 0 is magic. */
   2896      1.77   thorpej 	if (pmap != pmap_kernel() && va != vector_page)
   2897      1.83   thorpej 		npte |= L2_S_PROT_U;
   2898       1.1      matt 
   2899      1.55   thorpej 	if (pg != NULL) {
   2900       1.1      matt #ifdef DIAGNOSTIC
   2901       1.1      matt 		if ((flags & VM_PROT_ALL) & ~prot)
   2902       1.1      matt 			panic("pmap_enter: access_type exceeds prot");
   2903       1.1      matt #endif
   2904      1.86   thorpej 		npte |= pte_l2_s_cache_mode;
   2905       1.1      matt 		if (flags & VM_PROT_WRITE) {
   2906      1.84   thorpej 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2907      1.78   thorpej 			pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   2908       1.1      matt 		} else if (flags & VM_PROT_ALL) {
   2909      1.84   thorpej 			npte |= L2_S_PROTO;
   2910      1.78   thorpej 			pg->mdpage.pvh_attrs |= PVF_REF;
   2911       1.1      matt 		} else
   2912      1.81   thorpej 			npte |= L2_TYPE_INV;
   2913       1.1      matt 	} else {
   2914       1.1      matt 		if (prot & VM_PROT_WRITE)
   2915      1.84   thorpej 			npte |= L2_S_PROTO | L2_S_PROT_W;
   2916       1.1      matt 		else if (prot & VM_PROT_ALL)
   2917      1.84   thorpej 			npte |= L2_S_PROTO;
   2918       1.1      matt 		else
   2919      1.81   thorpej 			npte |= L2_TYPE_INV;
   2920       1.1      matt 	}
   2921       1.1      matt 
   2922      1.66   thorpej 	ptes[arm_btop(va)] = npte;
   2923       1.1      matt 
   2924      1.55   thorpej 	if (pg != NULL) {
   2925      1.49   thorpej 		simple_lock(&pg->mdpage.pvh_slock);
   2926      1.59   thorpej  		pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap));
   2927      1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   2928      1.11     chris 	}
   2929       1.1      matt 
   2930       1.1      matt 	/* Better flush the TLB ... */
   2931       1.1      matt 	cpu_tlb_flushID_SE(va);
   2932      1.17     chris 	error = 0;
   2933      1.17     chris out:
   2934      1.66   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   2935      1.17     chris 	PMAP_MAP_TO_HEAD_UNLOCK();
   2936       1.1      matt 
   2937      1.17     chris 	return error;
   2938       1.1      matt }
   2939       1.1      matt 
   2940      1.48     chris /*
   2941      1.48     chris  * pmap_kenter_pa: enter a kernel mapping
   2942      1.48     chris  *
   2943      1.48     chris  * => no need to lock anything assume va is already allocated
   2944      1.48     chris  * => should be faster than normal pmap enter function
   2945      1.48     chris  */
   2946       1.1      matt void
   2947      1.73   thorpej pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2948       1.1      matt {
   2949      1.13     chris 	pt_entry_t *pte;
   2950  1.97.4.4        he 
   2951      1.13     chris 	pte = vtopte(va);
   2952      1.14       chs 	KASSERT(!pmap_pte_v(pte));
   2953      1.83   thorpej 
   2954  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
   2955  1.97.4.4        he     {
   2956  1.97.4.4        he 	struct vm_page *pg;
   2957  1.97.4.4        he 	int s;
   2958  1.97.4.4        he 
   2959  1.97.4.4        he 	pg = PHYS_TO_VM_PAGE(pa);
   2960  1.97.4.4        he 	if (pg != NULL) {
   2961  1.97.4.4        he 		s = splhigh();
   2962  1.97.4.4        he 		if (pg->mdpage.ro_mappings == 0 &&
   2963  1.97.4.4        he 		    pg->mdpage.rw_mappings == 0 &&
   2964  1.97.4.4        he 		    pg->mdpage.kro_mappings == 0 &&
   2965  1.97.4.4        he 		    pg->mdpage.krw_mappings == 0) {
   2966  1.97.4.4        he 			/* This case is okay. */
   2967  1.97.4.4        he 		} else if (pg->mdpage.rw_mappings == 0 &&
   2968  1.97.4.4        he 			   pg->mdpage.krw_mappings == 0 &&
   2969  1.97.4.4        he 			   (prot & VM_PROT_WRITE) == 0) {
   2970  1.97.4.4        he 			/* This case is okay. */
   2971  1.97.4.4        he 		} else {
   2972  1.97.4.4        he 			/* Something is awry. */
   2973  1.97.4.4        he 			printf("pmap_kenter_pa: ro %u, rw %u, kro %u, krw %u "
   2974  1.97.4.4        he 			    "prot 0x%x\n", pg->mdpage.ro_mappings,
   2975  1.97.4.4        he 			    pg->mdpage.rw_mappings, pg->mdpage.kro_mappings,
   2976  1.97.4.4        he 			    pg->mdpage.krw_mappings, prot);
   2977  1.97.4.4        he 			Debugger();
   2978  1.97.4.4        he 		}
   2979  1.97.4.4        he 		if (prot & VM_PROT_WRITE)
   2980  1.97.4.4        he 			pg->mdpage.krw_mappings++;
   2981  1.97.4.4        he 		else
   2982  1.97.4.4        he 			pg->mdpage.kro_mappings++;
   2983  1.97.4.4        he 		splx(s);
   2984  1.97.4.4        he 	}
   2985  1.97.4.4        he     }
   2986  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
   2987  1.97.4.4        he 
   2988      1.83   thorpej 	*pte = L2_S_PROTO | pa |
   2989      1.90   thorpej 	    L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode;
   2990       1.1      matt }
   2991       1.1      matt 
   2992       1.1      matt void
   2993      1.73   thorpej pmap_kremove(vaddr_t va, vsize_t len)
   2994       1.1      matt {
   2995      1.14       chs 	pt_entry_t *pte;
   2996      1.14       chs 
   2997       1.1      matt 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
   2998      1.13     chris 
   2999      1.14       chs 		/*
   3000      1.14       chs 		 * We assume that we will only be called with small
   3001      1.14       chs 		 * regions of memory.
   3002      1.14       chs 		 */
   3003      1.14       chs 
   3004      1.30  rearnsha 		KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va)));
   3005      1.13     chris 		pte = vtopte(va);
   3006  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
   3007  1.97.4.4        he     {
   3008  1.97.4.4        he 		struct vm_page *pg;
   3009  1.97.4.4        he 		int s;
   3010  1.97.4.4        he 
   3011  1.97.4.4        he 		if ((*pte & L2_TYPE_MASK) != L2_TYPE_INV &&
   3012  1.97.4.4        he 		    (pg = PHYS_TO_VM_PAGE(*pte & L2_S_FRAME)) != NULL) {
   3013  1.97.4.4        he 			s = splhigh();
   3014  1.97.4.4        he 			if (*pte & L2_S_PROT_W) {
   3015  1.97.4.4        he 				KASSERT(pg->mdpage.krw_mappings != 0);
   3016  1.97.4.4        he 				pg->mdpage.krw_mappings--;
   3017  1.97.4.4        he 			} else {
   3018  1.97.4.4        he 				KASSERT(pg->mdpage.kro_mappings != 0);
   3019  1.97.4.4        he 				pg->mdpage.kro_mappings--;
   3020  1.97.4.4        he 			}
   3021  1.97.4.4        he 			splx(s);
   3022  1.97.4.4        he 		}
   3023  1.97.4.4        he     }
   3024  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
   3025      1.36   thorpej 		cpu_idcache_wbinv_range(va, PAGE_SIZE);
   3026      1.13     chris 		*pte = 0;
   3027      1.13     chris 		cpu_tlb_flushID_SE(va);
   3028       1.1      matt 	}
   3029       1.1      matt }
   3030       1.1      matt 
   3031       1.1      matt /*
   3032       1.1      matt  * pmap_page_protect:
   3033       1.1      matt  *
   3034       1.1      matt  * Lower the permission for all mappings to a given page.
   3035       1.1      matt  */
   3036       1.1      matt 
   3037       1.1      matt void
   3038      1.73   thorpej pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   3039       1.1      matt {
   3040       1.1      matt 
   3041      1.49   thorpej 	PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n",
   3042      1.49   thorpej 	    VM_PAGE_TO_PHYS(pg), prot));
   3043       1.1      matt 
   3044       1.1      matt 	switch(prot) {
   3045      1.17     chris 	case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
   3046      1.17     chris 	case VM_PROT_READ|VM_PROT_WRITE:
   3047      1.17     chris 		return;
   3048      1.17     chris 
   3049       1.1      matt 	case VM_PROT_READ:
   3050       1.1      matt 	case VM_PROT_READ|VM_PROT_EXECUTE:
   3051      1.78   thorpej 		pmap_clearbit(pg, PVF_WRITE);
   3052       1.1      matt 		break;
   3053       1.1      matt 
   3054       1.1      matt 	default:
   3055      1.49   thorpej 		pmap_remove_all(pg);
   3056       1.1      matt 		break;
   3057       1.1      matt 	}
   3058       1.1      matt }
   3059       1.1      matt 
   3060       1.1      matt 
   3061       1.1      matt /*
   3062       1.1      matt  * Routine:	pmap_unwire
   3063       1.1      matt  * Function:	Clear the wired attribute for a map/virtual-address
   3064       1.1      matt  *		pair.
   3065       1.1      matt  * In/out conditions:
   3066       1.1      matt  *		The mapping must already exist in the pmap.
   3067       1.1      matt  */
   3068       1.1      matt 
   3069       1.1      matt void
   3070      1.73   thorpej pmap_unwire(struct pmap *pmap, vaddr_t va)
   3071       1.1      matt {
   3072      1.60   thorpej 	pt_entry_t *ptes;
   3073      1.60   thorpej 	struct vm_page *pg;
   3074       1.2      matt 	paddr_t pa;
   3075       1.1      matt 
   3076      1.60   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   3077      1.60   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3078       1.1      matt 
   3079      1.60   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va))) {
   3080      1.60   thorpej #ifdef DIAGNOSTIC
   3081      1.60   thorpej 		if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3082      1.60   thorpej 			panic("pmap_unwire: invalid L2 PTE");
   3083      1.60   thorpej #endif
   3084      1.60   thorpej 		/* Extract the physical address of the page */
   3085      1.60   thorpej 		pa = l2pte_pa(ptes[arm_btop(va)]);
   3086       1.1      matt 
   3087      1.60   thorpej 		if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3088      1.60   thorpej 			goto out;
   3089       1.1      matt 
   3090      1.60   thorpej 		/* Update the wired bit in the pv entry for this page. */
   3091      1.60   thorpej 		simple_lock(&pg->mdpage.pvh_slock);
   3092      1.78   thorpej 		(void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0);
   3093      1.60   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   3094      1.60   thorpej 	}
   3095      1.60   thorpej #ifdef DIAGNOSTIC
   3096      1.60   thorpej 	else {
   3097      1.60   thorpej 		panic("pmap_unwire: invalid L1 PTE");
   3098      1.60   thorpej 	}
   3099      1.60   thorpej #endif
   3100      1.60   thorpej  out:
   3101      1.60   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3102      1.60   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   3103       1.1      matt }
   3104       1.1      matt 
   3105       1.1      matt /*
   3106       1.1      matt  * Routine:  pmap_extract
   3107       1.1      matt  * Function:
   3108       1.1      matt  *           Extract the physical page address associated
   3109       1.1      matt  *           with the given map/virtual_address pair.
   3110       1.1      matt  */
   3111       1.1      matt boolean_t
   3112      1.73   thorpej pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap)
   3113       1.1      matt {
   3114      1.34   thorpej 	pd_entry_t *pde;
   3115      1.11     chris 	pt_entry_t *pte, *ptes;
   3116       1.1      matt 	paddr_t pa;
   3117       1.1      matt 
   3118      1.82   thorpej 	PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va));
   3119      1.82   thorpej 
   3120      1.82   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3121       1.1      matt 
   3122      1.34   thorpej 	pde = pmap_pde(pmap, va);
   3123      1.56   thorpej 	pte = &ptes[arm_btop(va)];
   3124       1.1      matt 
   3125      1.82   thorpej 	if (pmap_pde_section(pde)) {
   3126      1.82   thorpej 		pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET);
   3127      1.82   thorpej 		PDEBUG(5, printf("section pa=0x%08lx\n", pa));
   3128      1.82   thorpej 		goto out;
   3129      1.82   thorpej 	} else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) {
   3130      1.82   thorpej 		PDEBUG(5, printf("no mapping\n"));
   3131      1.82   thorpej 		goto failed;
   3132      1.82   thorpej 	}
   3133      1.75   reinoud 
   3134      1.82   thorpej 	if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) {
   3135      1.82   thorpej 		pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET);
   3136      1.82   thorpej 		PDEBUG(5, printf("large page pa=0x%08lx\n", pa));
   3137      1.82   thorpej 		goto out;
   3138      1.82   thorpej 	}
   3139       1.1      matt 
   3140      1.82   thorpej 	pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET);
   3141      1.82   thorpej 	PDEBUG(5, printf("small page pa=0x%08lx\n", pa));
   3142       1.1      matt 
   3143      1.82   thorpej  out:
   3144      1.82   thorpej 	if (pap != NULL)
   3145      1.82   thorpej 		*pap = pa;
   3146       1.1      matt 
   3147      1.82   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3148      1.82   thorpej 	return (TRUE);
   3149      1.34   thorpej 
   3150      1.82   thorpej  failed:
   3151      1.82   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3152      1.82   thorpej 	return (FALSE);
   3153       1.1      matt }
   3154       1.1      matt 
   3155       1.1      matt 
   3156       1.1      matt /*
   3157      1.73   thorpej  * pmap_copy:
   3158       1.1      matt  *
   3159      1.73   thorpej  *	Copy the range specified by src_addr/len from the source map to the
   3160      1.73   thorpej  *	range dst_addr/len in the destination map.
   3161      1.73   thorpej  *
   3162      1.73   thorpej  *	This routine is only advisory and need not do anything.
   3163       1.1      matt  */
   3164      1.73   thorpej /* Call deleted in <arm/arm32/pmap.h> */
   3165       1.1      matt 
   3166       1.1      matt #if defined(PMAP_DEBUG)
   3167       1.1      matt void
   3168       1.1      matt pmap_dump_pvlist(phys, m)
   3169       1.1      matt 	vaddr_t phys;
   3170       1.1      matt 	char *m;
   3171       1.1      matt {
   3172      1.49   thorpej 	struct vm_page *pg;
   3173       1.1      matt 	struct pv_entry *pv;
   3174       1.1      matt 
   3175      1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) {
   3176       1.1      matt 		printf("INVALID PA\n");
   3177       1.1      matt 		return;
   3178       1.1      matt 	}
   3179      1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3180       1.1      matt 	printf("%s %08lx:", m, phys);
   3181      1.49   thorpej 	if (pg->mdpage.pvh_list == NULL) {
   3182      1.97     chris 		simple_unlock(&pg->mdpage.pvh_slock);
   3183       1.1      matt 		printf(" no mappings\n");
   3184       1.1      matt 		return;
   3185       1.1      matt 	}
   3186       1.1      matt 
   3187      1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next)
   3188       1.1      matt 		printf(" pmap %p va %08lx flags %08x", pv->pv_pmap,
   3189       1.1      matt 		    pv->pv_va, pv->pv_flags);
   3190       1.1      matt 
   3191       1.1      matt 	printf("\n");
   3192      1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3193       1.1      matt }
   3194       1.1      matt 
   3195       1.1      matt #endif	/* PMAP_DEBUG */
   3196       1.1      matt 
   3197      1.11     chris static pt_entry_t *
   3198      1.11     chris pmap_map_ptes(struct pmap *pmap)
   3199      1.11     chris {
   3200      1.72   thorpej 	struct proc *p;
   3201      1.17     chris 
   3202      1.17     chris     	/* the kernel's pmap is always accessible */
   3203      1.17     chris 	if (pmap == pmap_kernel()) {
   3204      1.72   thorpej 		return (pt_entry_t *)PTE_BASE;
   3205      1.17     chris 	}
   3206      1.17     chris 
   3207      1.17     chris 	if (pmap_is_curpmap(pmap)) {
   3208      1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3209      1.53   thorpej 		return (pt_entry_t *)PTE_BASE;
   3210      1.17     chris 	}
   3211      1.72   thorpej 
   3212      1.17     chris 	p = curproc;
   3213      1.72   thorpej 	KDASSERT(p != NULL);
   3214      1.17     chris 
   3215      1.17     chris 	/* need to lock both curpmap and pmap: use ordered locking */
   3216      1.72   thorpej 	if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) {
   3217      1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3218      1.72   thorpej 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3219      1.17     chris 	} else {
   3220      1.72   thorpej 		simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3221      1.17     chris 		simple_lock(&pmap->pm_obj.vmobjlock);
   3222      1.17     chris 	}
   3223      1.11     chris 
   3224      1.72   thorpej 	pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt,
   3225      1.72   thorpej 	    FALSE);
   3226      1.17     chris 	cpu_tlb_flushD();
   3227      1.32   thorpej 	cpu_cpwait();
   3228      1.53   thorpej 	return (pt_entry_t *)APTE_BASE;
   3229      1.17     chris }
   3230      1.17     chris 
   3231      1.17     chris /*
   3232      1.17     chris  * pmap_unmap_ptes: unlock the PTE mapping of "pmap"
   3233      1.17     chris  */
   3234      1.17     chris 
   3235      1.17     chris static void
   3236      1.73   thorpej pmap_unmap_ptes(struct pmap *pmap)
   3237      1.17     chris {
   3238      1.72   thorpej 
   3239      1.17     chris 	if (pmap == pmap_kernel()) {
   3240      1.17     chris 		return;
   3241      1.17     chris 	}
   3242      1.17     chris 	if (pmap_is_curpmap(pmap)) {
   3243      1.17     chris 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3244      1.17     chris 	} else {
   3245      1.72   thorpej 		KDASSERT(curproc != NULL);
   3246      1.17     chris 		simple_unlock(&pmap->pm_obj.vmobjlock);
   3247      1.72   thorpej 		simple_unlock(
   3248      1.72   thorpej 		    &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock);
   3249      1.17     chris 	}
   3250      1.11     chris }
   3251       1.1      matt 
   3252       1.1      matt /*
   3253       1.1      matt  * Modify pte bits for all ptes corresponding to the given physical address.
   3254       1.1      matt  * We use `maskbits' rather than `clearbits' because we're always passing
   3255       1.1      matt  * constants and the latter would require an extra inversion at run-time.
   3256       1.1      matt  */
   3257       1.1      matt 
   3258      1.22     chris static void
   3259      1.73   thorpej pmap_clearbit(struct vm_page *pg, u_int maskbits)
   3260       1.1      matt {
   3261       1.1      matt 	struct pv_entry *pv;
   3262      1.59   thorpej 	pt_entry_t *ptes;
   3263       1.1      matt 	vaddr_t va;
   3264      1.49   thorpej 	int tlbentry;
   3265       1.1      matt 
   3266       1.1      matt 	PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n",
   3267      1.49   thorpej 	    VM_PAGE_TO_PHYS(pg), maskbits));
   3268      1.21     chris 
   3269      1.21     chris 	tlbentry = 0;
   3270      1.21     chris 
   3271      1.17     chris 	PMAP_HEAD_TO_MAP_LOCK();
   3272      1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3273      1.17     chris 
   3274       1.1      matt 	/*
   3275       1.1      matt 	 * Clear saved attributes (modify, reference)
   3276       1.1      matt 	 */
   3277      1.49   thorpej 	pg->mdpage.pvh_attrs &= ~maskbits;
   3278       1.1      matt 
   3279      1.49   thorpej 	if (pg->mdpage.pvh_list == NULL) {
   3280      1.49   thorpej 		simple_unlock(&pg->mdpage.pvh_slock);
   3281      1.17     chris 		PMAP_HEAD_TO_MAP_UNLOCK();
   3282       1.1      matt 		return;
   3283       1.1      matt 	}
   3284       1.1      matt 
   3285       1.1      matt 	/*
   3286       1.1      matt 	 * Loop over all current mappings setting/clearing as appropos
   3287       1.1      matt 	 */
   3288      1.49   thorpej 	for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) {
   3289  1.97.4.4        he #ifdef PMAP_ALIAS_DEBUG
   3290  1.97.4.4        he     {
   3291  1.97.4.4        he 		int s = splhigh();
   3292  1.97.4.4        he 		if ((maskbits & PVF_WRITE) != 0 &&
   3293  1.97.4.4        he 		    (pv->pv_flags & PVF_WRITE) != 0) {
   3294  1.97.4.4        he 			KASSERT(pg->mdpage.rw_mappings != 0);
   3295  1.97.4.4        he 			pg->mdpage.rw_mappings--;
   3296  1.97.4.4        he 			pg->mdpage.ro_mappings++;
   3297  1.97.4.4        he 		}
   3298  1.97.4.4        he 		splx(s);
   3299  1.97.4.4        he     }
   3300  1.97.4.4        he #endif /* PMAP_ALIAS_DEBUG */
   3301       1.1      matt 		va = pv->pv_va;
   3302       1.1      matt 		pv->pv_flags &= ~maskbits;
   3303      1.59   thorpej 		ptes = pmap_map_ptes(pv->pv_pmap);	/* locks pmap */
   3304      1.59   thorpej 		KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va)));
   3305      1.78   thorpej 		if (maskbits & (PVF_WRITE|PVF_MOD)) {
   3306      1.78   thorpej 			if ((pv->pv_flags & PVF_NC)) {
   3307      1.29  rearnsha 				/*
   3308      1.29  rearnsha 				 * Entry is not cacheable: reenable
   3309      1.29  rearnsha 				 * the cache, nothing to flush
   3310      1.29  rearnsha 				 *
   3311      1.29  rearnsha 				 * Don't turn caching on again if this
   3312      1.29  rearnsha 				 * is a modified emulation.  This
   3313      1.29  rearnsha 				 * would be inconsitent with the
   3314      1.29  rearnsha 				 * settings created by
   3315      1.29  rearnsha 				 * pmap_vac_me_harder().
   3316      1.29  rearnsha 				 *
   3317      1.29  rearnsha 				 * There's no need to call
   3318      1.29  rearnsha 				 * pmap_vac_me_harder() here: all
   3319      1.29  rearnsha 				 * pages are loosing their write
   3320      1.29  rearnsha 				 * permission.
   3321      1.29  rearnsha 				 *
   3322      1.29  rearnsha 				 */
   3323      1.78   thorpej 				if (maskbits & PVF_WRITE) {
   3324      1.86   thorpej 					ptes[arm_btop(va)] |=
   3325      1.86   thorpej 					    pte_l2_s_cache_mode;
   3326      1.78   thorpej 					pv->pv_flags &= ~PVF_NC;
   3327      1.29  rearnsha 				}
   3328      1.59   thorpej 			} else if (pmap_is_curpmap(pv->pv_pmap)) {
   3329      1.29  rearnsha 				/*
   3330      1.29  rearnsha 				 * Entry is cacheable: check if pmap is
   3331      1.29  rearnsha 				 * current if it is flush it,
   3332      1.29  rearnsha 				 * otherwise it won't be in the cache
   3333      1.29  rearnsha 				 */
   3334      1.36   thorpej 				cpu_idcache_wbinv_range(pv->pv_va, NBPG);
   3335      1.59   thorpej 			}
   3336      1.29  rearnsha 
   3337      1.29  rearnsha 			/* make the pte read only */
   3338      1.83   thorpej 			ptes[arm_btop(va)] &= ~L2_S_PROT_W;
   3339      1.29  rearnsha 		}
   3340      1.29  rearnsha 
   3341      1.78   thorpej 		if (maskbits & PVF_REF)
   3342      1.59   thorpej 			ptes[arm_btop(va)] =
   3343      1.81   thorpej 			    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV;
   3344      1.21     chris 
   3345      1.59   thorpej 		if (pmap_is_curpmap(pv->pv_pmap)) {
   3346      1.21     chris 			/*
   3347      1.29  rearnsha 			 * if we had cacheable pte's we'd clean the
   3348      1.29  rearnsha 			 * pte out to memory here
   3349      1.29  rearnsha 			 *
   3350      1.21     chris 			 * flush tlb entry as it's in the current pmap
   3351      1.21     chris 			 */
   3352      1.21     chris 			cpu_tlb_flushID_SE(pv->pv_va);
   3353      1.59   thorpej 		}
   3354      1.59   thorpej 		pmap_unmap_ptes(pv->pv_pmap);		/* unlocks pmap */
   3355      1.29  rearnsha 	}
   3356      1.32   thorpej 	cpu_cpwait();
   3357      1.21     chris 
   3358      1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3359      1.17     chris 	PMAP_HEAD_TO_MAP_UNLOCK();
   3360       1.1      matt }
   3361       1.1      matt 
   3362      1.50   thorpej /*
   3363      1.50   thorpej  * pmap_clear_modify:
   3364      1.50   thorpej  *
   3365      1.50   thorpej  *	Clear the "modified" attribute for a page.
   3366      1.50   thorpej  */
   3367       1.1      matt boolean_t
   3368      1.73   thorpej pmap_clear_modify(struct vm_page *pg)
   3369       1.1      matt {
   3370       1.1      matt 	boolean_t rv;
   3371       1.1      matt 
   3372      1.78   thorpej 	if (pg->mdpage.pvh_attrs & PVF_MOD) {
   3373      1.50   thorpej 		rv = TRUE;
   3374      1.78   thorpej 		pmap_clearbit(pg, PVF_MOD);
   3375      1.50   thorpej 	} else
   3376      1.50   thorpej 		rv = FALSE;
   3377      1.50   thorpej 
   3378      1.50   thorpej 	PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n",
   3379      1.50   thorpej 	    VM_PAGE_TO_PHYS(pg), rv));
   3380      1.50   thorpej 
   3381      1.50   thorpej 	return (rv);
   3382       1.1      matt }
   3383       1.1      matt 
   3384      1.50   thorpej /*
   3385      1.50   thorpej  * pmap_clear_reference:
   3386      1.50   thorpej  *
   3387      1.50   thorpej  *	Clear the "referenced" attribute for a page.
   3388      1.50   thorpej  */
   3389       1.1      matt boolean_t
   3390      1.73   thorpej pmap_clear_reference(struct vm_page *pg)
   3391       1.1      matt {
   3392       1.1      matt 	boolean_t rv;
   3393       1.1      matt 
   3394      1.78   thorpej 	if (pg->mdpage.pvh_attrs & PVF_REF) {
   3395      1.50   thorpej 		rv = TRUE;
   3396      1.78   thorpej 		pmap_clearbit(pg, PVF_REF);
   3397      1.50   thorpej 	} else
   3398      1.50   thorpej 		rv = FALSE;
   3399      1.50   thorpej 
   3400      1.50   thorpej 	PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n",
   3401      1.50   thorpej 	    VM_PAGE_TO_PHYS(pg), rv));
   3402      1.50   thorpej 
   3403      1.50   thorpej 	return (rv);
   3404       1.1      matt }
   3405       1.1      matt 
   3406      1.50   thorpej /*
   3407      1.50   thorpej  * pmap_is_modified:
   3408      1.50   thorpej  *
   3409      1.50   thorpej  *	Test if a page has the "modified" attribute.
   3410      1.50   thorpej  */
   3411      1.50   thorpej /* See <arm/arm32/pmap.h> */
   3412      1.39   thorpej 
   3413      1.50   thorpej /*
   3414      1.50   thorpej  * pmap_is_referenced:
   3415      1.50   thorpej  *
   3416      1.50   thorpej  *	Test if a page has the "referenced" attribute.
   3417      1.50   thorpej  */
   3418      1.50   thorpej /* See <arm/arm32/pmap.h> */
   3419       1.1      matt 
   3420       1.1      matt int
   3421      1.73   thorpej pmap_modified_emulation(struct pmap *pmap, vaddr_t va)
   3422       1.1      matt {
   3423      1.61   thorpej 	pt_entry_t *ptes;
   3424      1.61   thorpej 	struct vm_page *pg;
   3425       1.2      matt 	paddr_t pa;
   3426       1.1      matt 	u_int flags;
   3427      1.61   thorpej 	int rv = 0;
   3428       1.1      matt 
   3429       1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation\n"));
   3430       1.1      matt 
   3431      1.61   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   3432      1.62   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3433      1.61   thorpej 
   3434      1.61   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3435      1.61   thorpej 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3436      1.61   thorpej 		goto out;
   3437       1.1      matt 	}
   3438       1.1      matt 
   3439      1.61   thorpej 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3440       1.1      matt 
   3441      1.61   thorpej 	/* Check for a invalid pte */
   3442      1.61   thorpej 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3443      1.61   thorpej 		goto out;
   3444       1.1      matt 
   3445       1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3446      1.83   thorpej 	if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0)
   3447      1.61   thorpej 		goto out;
   3448       1.1      matt 
   3449       1.1      matt 	/* Extract the physical address of the page */
   3450      1.61   thorpej 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3451      1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3452      1.61   thorpej 		goto out;
   3453       1.1      matt 
   3454      1.49   thorpej 	/* Get the current flags for this page. */
   3455      1.49   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3456      1.17     chris 
   3457      1.49   thorpej 	flags = pmap_modify_pv(pmap, va, pg, 0, 0);
   3458       1.1      matt 	PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags));
   3459       1.1      matt 
   3460       1.1      matt 	/*
   3461       1.1      matt 	 * Do the flags say this page is writable ? If not then it is a
   3462       1.1      matt 	 * genuine write fault. If yes then the write fault is our fault
   3463       1.1      matt 	 * as we did not reflect the write access in the PTE. Now we know
   3464       1.1      matt 	 * a write has occurred we can correct this and also set the
   3465       1.1      matt 	 * modified bit
   3466       1.1      matt 	 */
   3467      1.78   thorpej 	if (~flags & PVF_WRITE) {
   3468      1.49   thorpej 	    	simple_unlock(&pg->mdpage.pvh_slock);
   3469      1.61   thorpej 		goto out;
   3470      1.17     chris 	}
   3471       1.1      matt 
   3472      1.61   thorpej 	PDEBUG(0,
   3473      1.61   thorpej 	    printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n",
   3474      1.61   thorpej 	    va, ptes[arm_btop(va)]));
   3475      1.78   thorpej 	pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD;
   3476      1.29  rearnsha 
   3477      1.29  rearnsha 	/*
   3478      1.29  rearnsha 	 * Re-enable write permissions for the page.  No need to call
   3479      1.29  rearnsha 	 * pmap_vac_me_harder(), since this is just a
   3480      1.78   thorpej 	 * modified-emulation fault, and the PVF_WRITE bit isn't changing.
   3481      1.78   thorpej 	 * We've already set the cacheable bits based on the assumption
   3482      1.78   thorpej 	 * that we can write to this page.
   3483      1.29  rearnsha 	 */
   3484      1.61   thorpej 	ptes[arm_btop(va)] =
   3485      1.84   thorpej 	    (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W;
   3486      1.61   thorpej 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3487       1.1      matt 
   3488      1.49   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3489      1.61   thorpej 
   3490       1.1      matt 	cpu_tlb_flushID_SE(va);
   3491      1.32   thorpej 	cpu_cpwait();
   3492      1.61   thorpej 	rv = 1;
   3493      1.61   thorpej  out:
   3494      1.61   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3495      1.61   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   3496      1.61   thorpej 	return (rv);
   3497       1.1      matt }
   3498       1.1      matt 
   3499       1.1      matt int
   3500      1.73   thorpej pmap_handled_emulation(struct pmap *pmap, vaddr_t va)
   3501       1.1      matt {
   3502      1.62   thorpej 	pt_entry_t *ptes;
   3503      1.62   thorpej 	struct vm_page *pg;
   3504       1.2      matt 	paddr_t pa;
   3505      1.62   thorpej 	int rv = 0;
   3506       1.1      matt 
   3507       1.1      matt 	PDEBUG(2, printf("pmap_handled_emulation\n"));
   3508       1.1      matt 
   3509      1.63   thorpej 	PMAP_MAP_TO_HEAD_LOCK();
   3510      1.62   thorpej 	ptes = pmap_map_ptes(pmap);		/* locks pmap */
   3511      1.62   thorpej 
   3512      1.62   thorpej 	if (pmap_pde_v(pmap_pde(pmap, va)) == 0) {
   3513      1.62   thorpej 		PDEBUG(2, printf("L1 PTE invalid\n"));
   3514      1.62   thorpej 		goto out;
   3515       1.1      matt 	}
   3516       1.1      matt 
   3517      1.62   thorpej 	PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)]));
   3518       1.1      matt 
   3519      1.62   thorpej 	/* Check for invalid pte */
   3520      1.62   thorpej 	if (l2pte_valid(ptes[arm_btop(va)]) == 0)
   3521      1.62   thorpej 		goto out;
   3522       1.1      matt 
   3523       1.1      matt 	/* This can happen if user code tries to access kernel memory. */
   3524      1.81   thorpej 	if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV)
   3525      1.62   thorpej 		goto out;
   3526       1.1      matt 
   3527       1.1      matt 	/* Extract the physical address of the page */
   3528      1.62   thorpej 	pa = l2pte_pa(ptes[arm_btop(va)]);
   3529      1.49   thorpej 	if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
   3530      1.62   thorpej 		goto out;
   3531       1.1      matt 
   3532      1.63   thorpej 	simple_lock(&pg->mdpage.pvh_slock);
   3533      1.63   thorpej 
   3534       1.1      matt 	/*
   3535       1.1      matt 	 * Ok we just enable the pte and mark the attibs as handled
   3536      1.63   thorpej 	 * XXX Should we traverse the PV list and enable all PTEs?
   3537       1.1      matt 	 */
   3538      1.62   thorpej 	PDEBUG(0,
   3539      1.62   thorpej 	    printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n",
   3540      1.62   thorpej 	    va, ptes[arm_btop(va)]));
   3541      1.78   thorpej 	pg->mdpage.pvh_attrs |= PVF_REF;
   3542       1.1      matt 
   3543      1.84   thorpej 	ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO;
   3544      1.62   thorpej 	PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)]));
   3545      1.62   thorpej 
   3546      1.63   thorpej 	simple_unlock(&pg->mdpage.pvh_slock);
   3547      1.63   thorpej 
   3548       1.1      matt 	cpu_tlb_flushID_SE(va);
   3549      1.32   thorpej 	cpu_cpwait();
   3550      1.62   thorpej 	rv = 1;
   3551      1.62   thorpej  out:
   3552      1.62   thorpej 	pmap_unmap_ptes(pmap);			/* unlocks pmap */
   3553      1.63   thorpej 	PMAP_MAP_TO_HEAD_UNLOCK();
   3554      1.62   thorpej 	return (rv);
   3555       1.1      matt }
   3556      1.17     chris 
   3557       1.1      matt /*
   3558       1.1      matt  * pmap_collect: free resources held by a pmap
   3559       1.1      matt  *
   3560       1.1      matt  * => optional function.
   3561       1.1      matt  * => called when a process is swapped out to free memory.
   3562       1.1      matt  */
   3563       1.1      matt 
   3564       1.1      matt void
   3565      1.73   thorpej pmap_collect(struct pmap *pmap)
   3566       1.1      matt {
   3567       1.1      matt }
   3568       1.1      matt 
   3569       1.1      matt /*
   3570       1.1      matt  * Routine:	pmap_procwr
   3571       1.1      matt  *
   3572       1.1      matt  * Function:
   3573       1.1      matt  *	Synchronize caches corresponding to [addr, addr+len) in p.
   3574       1.1      matt  *
   3575       1.1      matt  */
   3576       1.1      matt void
   3577      1.73   thorpej pmap_procwr(struct proc *p, vaddr_t va, int len)
   3578       1.1      matt {
   3579       1.1      matt 	/* We only need to do anything if it is the current process. */
   3580       1.1      matt 	if (p == curproc)
   3581      1.36   thorpej 		cpu_icache_sync_range(va, len);
   3582      1.17     chris }
   3583      1.17     chris /*
   3584      1.17     chris  * PTP functions
   3585      1.17     chris  */
   3586      1.17     chris 
   3587      1.17     chris /*
   3588      1.17     chris  * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one)
   3589      1.17     chris  *
   3590      1.17     chris  * => pmap should NOT be pmap_kernel()
   3591      1.17     chris  * => pmap should be locked
   3592      1.17     chris  */
   3593      1.17     chris 
   3594      1.17     chris static struct vm_page *
   3595      1.57   thorpej pmap_get_ptp(struct pmap *pmap, vaddr_t va)
   3596      1.17     chris {
   3597      1.57   thorpej 	struct vm_page *ptp;
   3598      1.17     chris 
   3599      1.57   thorpej 	if (pmap_pde_page(pmap_pde(pmap, va))) {
   3600      1.17     chris 
   3601      1.57   thorpej 		/* valid... check hint (saves us a PA->PG lookup) */
   3602      1.57   thorpej 		if (pmap->pm_ptphint &&
   3603      1.81   thorpej 		    (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) ==
   3604      1.57   thorpej 		    VM_PAGE_TO_PHYS(pmap->pm_ptphint))
   3605      1.57   thorpej 			return (pmap->pm_ptphint);
   3606      1.57   thorpej 		ptp = uvm_pagelookup(&pmap->pm_obj, va);
   3607      1.17     chris #ifdef DIAGNOSTIC
   3608      1.57   thorpej 		if (ptp == NULL)
   3609      1.57   thorpej 			panic("pmap_get_ptp: unmanaged user PTP");
   3610      1.17     chris #endif
   3611      1.70   thorpej 		pmap->pm_ptphint = ptp;
   3612      1.57   thorpej 		return(ptp);
   3613      1.57   thorpej 	}
   3614      1.17     chris 
   3615      1.57   thorpej 	/* allocate a new PTP (updates ptphint) */
   3616      1.57   thorpej 	return(pmap_alloc_ptp(pmap, va));
   3617      1.17     chris }
   3618      1.17     chris 
   3619      1.17     chris /*
   3620      1.17     chris  * pmap_alloc_ptp: allocate a PTP for a PMAP
   3621      1.17     chris  *
   3622      1.17     chris  * => pmap should already be locked by caller
   3623      1.17     chris  * => we use the ptp's wire_count to count the number of active mappings
   3624      1.17     chris  *	in the PTP (we start it at one to prevent any chance this PTP
   3625      1.17     chris  *	will ever leak onto the active/inactive queues)
   3626      1.17     chris  */
   3627      1.17     chris 
   3628      1.17     chris /*__inline */ static struct vm_page *
   3629      1.57   thorpej pmap_alloc_ptp(struct pmap *pmap, vaddr_t va)
   3630      1.17     chris {
   3631      1.17     chris 	struct vm_page *ptp;
   3632      1.17     chris 
   3633      1.17     chris 	ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL,
   3634      1.17     chris 		UVM_PGA_USERESERVE|UVM_PGA_ZERO);
   3635      1.57   thorpej 	if (ptp == NULL)
   3636      1.17     chris 		return (NULL);
   3637      1.17     chris 
   3638      1.17     chris 	/* got one! */
   3639      1.17     chris 	ptp->flags &= ~PG_BUSY;	/* never busy */
   3640      1.17     chris 	ptp->wire_count = 1;	/* no mappings yet */
   3641      1.17     chris 	pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE);
   3642      1.17     chris 	pmap->pm_stats.resident_count++;	/* count PTP as resident */
   3643      1.70   thorpej 	pmap->pm_ptphint = ptp;
   3644      1.17     chris 	return (ptp);
   3645       1.1      matt }
   3646      1.48     chris 
   3647      1.48     chris vaddr_t
   3648      1.73   thorpej pmap_growkernel(vaddr_t maxkvaddr)
   3649      1.48     chris {
   3650      1.48     chris 	struct pmap *kpm = pmap_kernel(), *pm;
   3651      1.48     chris 	int s;
   3652      1.48     chris 	paddr_t ptaddr;
   3653      1.48     chris 	struct vm_page *ptp;
   3654      1.48     chris 
   3655      1.48     chris 	if (maxkvaddr <= pmap_curmaxkvaddr)
   3656      1.48     chris 		goto out;		/* we are OK */
   3657      1.48     chris 	NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n",
   3658      1.48     chris 		    pmap_curmaxkvaddr, maxkvaddr));
   3659      1.48     chris 
   3660      1.48     chris 	/*
   3661      1.48     chris 	 * whoops!   we need to add kernel PTPs
   3662      1.48     chris 	 */
   3663      1.48     chris 
   3664      1.48     chris 	s = splhigh();	/* to be safe */
   3665      1.48     chris 	simple_lock(&kpm->pm_obj.vmobjlock);
   3666      1.48     chris 	/* due to the way the arm pmap works we map 4MB at a time */
   3667      1.70   thorpej 	for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr;
   3668      1.81   thorpej 	     pmap_curmaxkvaddr += 4 * L1_S_SIZE) {
   3669      1.48     chris 
   3670      1.48     chris 		if (uvm.page_init_done == FALSE) {
   3671      1.48     chris 
   3672      1.48     chris 			/*
   3673      1.48     chris 			 * we're growing the kernel pmap early (from
   3674      1.48     chris 			 * uvm_pageboot_alloc()).  this case must be
   3675      1.48     chris 			 * handled a little differently.
   3676      1.48     chris 			 */
   3677      1.48     chris 
   3678      1.48     chris 			if (uvm_page_physget(&ptaddr) == FALSE)
   3679      1.48     chris 				panic("pmap_growkernel: out of memory");
   3680      1.48     chris 			pmap_zero_page(ptaddr);
   3681      1.48     chris 
   3682      1.48     chris 			/* map this page in */
   3683      1.70   thorpej 			pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE);
   3684      1.48     chris 
   3685      1.48     chris 			/* count PTP as resident */
   3686      1.48     chris 			kpm->pm_stats.resident_count++;
   3687      1.48     chris 			continue;
   3688      1.48     chris 		}
   3689      1.48     chris 
   3690      1.48     chris 		/*
   3691      1.48     chris 		 * THIS *MUST* BE CODED SO AS TO WORK IN THE
   3692      1.48     chris 		 * pmap_initialized == FALSE CASE!  WE MAY BE
   3693      1.48     chris 		 * INVOKED WHILE pmap_init() IS RUNNING!
   3694      1.48     chris 		 */
   3695      1.48     chris 
   3696      1.70   thorpej 		if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL)
   3697      1.48     chris 			panic("pmap_growkernel: alloc ptp failed");
   3698      1.48     chris 
   3699      1.48     chris 		/* distribute new kernel PTP to all active pmaps */
   3700      1.48     chris 		simple_lock(&pmaps_lock);
   3701      1.48     chris 		LIST_FOREACH(pm, &pmaps, pm_list) {
   3702      1.70   thorpej 			pmap_map_in_l1(pm, pmap_curmaxkvaddr,
   3703      1.70   thorpej 			    VM_PAGE_TO_PHYS(ptp), TRUE);
   3704      1.48     chris 		}
   3705  1.97.4.5        he 
   3706  1.97.4.5        he 		/* Invalidate the PTPT cache. */
   3707  1.97.4.5        he 		pool_cache_invalidate(&pmap_ptpt_cache);
   3708  1.97.4.5        he 		pmap_ptpt_cache_generation++;
   3709      1.48     chris 
   3710      1.48     chris 		simple_unlock(&pmaps_lock);
   3711      1.48     chris 	}
   3712      1.48     chris 
   3713      1.48     chris 	/*
   3714      1.48     chris 	 * flush out the cache, expensive but growkernel will happen so
   3715      1.48     chris 	 * rarely
   3716      1.48     chris 	 */
   3717      1.48     chris 	cpu_tlb_flushD();
   3718      1.48     chris 	cpu_cpwait();
   3719      1.48     chris 
   3720      1.48     chris 	simple_unlock(&kpm->pm_obj.vmobjlock);
   3721      1.48     chris 	splx(s);
   3722      1.48     chris 
   3723      1.48     chris out:
   3724      1.48     chris 	return (pmap_curmaxkvaddr);
   3725      1.48     chris }
   3726      1.48     chris 
   3727      1.76   thorpej /************************ Utility routines ****************************/
   3728      1.76   thorpej 
   3729      1.76   thorpej /*
   3730      1.76   thorpej  * vector_page_setprot:
   3731      1.76   thorpej  *
   3732      1.76   thorpej  *	Manipulate the protection of the vector page.
   3733      1.76   thorpej  */
   3734      1.76   thorpej void
   3735      1.76   thorpej vector_page_setprot(int prot)
   3736      1.76   thorpej {
   3737      1.76   thorpej 	pt_entry_t *pte;
   3738      1.76   thorpej 
   3739      1.76   thorpej 	pte = vtopte(vector_page);
   3740      1.48     chris 
   3741      1.83   thorpej 	*pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot);
   3742      1.76   thorpej 	cpu_tlb_flushD_SE(vector_page);
   3743      1.76   thorpej 	cpu_cpwait();
   3744      1.76   thorpej }
   3745       1.1      matt 
   3746      1.40   thorpej /************************ Bootstrapping routines ****************************/
   3747      1.40   thorpej 
   3748      1.40   thorpej /*
   3749      1.46   thorpej  * This list exists for the benefit of pmap_map_chunk().  It keeps track
   3750      1.46   thorpej  * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can
   3751      1.46   thorpej  * find them as necessary.
   3752      1.46   thorpej  *
   3753      1.46   thorpej  * Note that the data on this list is not valid after initarm() returns.
   3754      1.46   thorpej  */
   3755      1.46   thorpej SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list);
   3756      1.46   thorpej 
   3757      1.46   thorpej static vaddr_t
   3758      1.46   thorpej kernel_pt_lookup(paddr_t pa)
   3759      1.46   thorpej {
   3760      1.46   thorpej 	pv_addr_t *pv;
   3761      1.46   thorpej 
   3762      1.46   thorpej 	SLIST_FOREACH(pv, &kernel_pt_list, pv_list) {
   3763      1.46   thorpej 		if (pv->pv_pa == pa)
   3764      1.46   thorpej 			return (pv->pv_va);
   3765      1.46   thorpej 	}
   3766      1.46   thorpej 	return (0);
   3767      1.46   thorpej }
   3768      1.46   thorpej 
   3769      1.46   thorpej /*
   3770      1.40   thorpej  * pmap_map_section:
   3771      1.40   thorpej  *
   3772      1.40   thorpej  *	Create a single section mapping.
   3773      1.40   thorpej  */
   3774      1.40   thorpej void
   3775      1.40   thorpej pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3776      1.40   thorpej {
   3777      1.40   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3778      1.86   thorpej 	pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3779      1.40   thorpej 
   3780      1.81   thorpej 	KASSERT(((va | pa) & L1_S_OFFSET) == 0);
   3781      1.40   thorpej 
   3782      1.83   thorpej 	pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3783      1.83   thorpej 	    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3784      1.41   thorpej }
   3785      1.41   thorpej 
   3786      1.41   thorpej /*
   3787      1.41   thorpej  * pmap_map_entry:
   3788      1.41   thorpej  *
   3789      1.41   thorpej  *	Create a single page mapping.
   3790      1.41   thorpej  */
   3791      1.41   thorpej void
   3792      1.47   thorpej pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache)
   3793      1.41   thorpej {
   3794      1.47   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3795      1.86   thorpej 	pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3796      1.47   thorpej 	pt_entry_t *pte;
   3797      1.41   thorpej 
   3798      1.41   thorpej 	KASSERT(((va | pa) & PGOFSET) == 0);
   3799      1.41   thorpej 
   3800      1.81   thorpej 	if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3801      1.47   thorpej 		panic("pmap_map_entry: no L2 table for VA 0x%08lx", va);
   3802      1.47   thorpej 
   3803      1.47   thorpej 	pte = (pt_entry_t *)
   3804      1.81   thorpej 	    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3805      1.47   thorpej 	if (pte == NULL)
   3806      1.47   thorpej 		panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va);
   3807      1.47   thorpej 
   3808      1.83   thorpej 	pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3809      1.83   thorpej 	    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3810      1.42   thorpej }
   3811      1.42   thorpej 
   3812      1.42   thorpej /*
   3813      1.42   thorpej  * pmap_link_l2pt:
   3814      1.42   thorpej  *
   3815      1.42   thorpej  *	Link the L2 page table specified by "pa" into the L1
   3816      1.42   thorpej  *	page table at the slot for "va".
   3817      1.42   thorpej  */
   3818      1.42   thorpej void
   3819      1.46   thorpej pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv)
   3820      1.42   thorpej {
   3821      1.42   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3822      1.81   thorpej 	u_int slot = va >> L1_S_SHIFT;
   3823      1.42   thorpej 
   3824      1.46   thorpej 	KASSERT((l2pv->pv_pa & PGOFSET) == 0);
   3825      1.46   thorpej 
   3826      1.83   thorpej 	pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000);
   3827      1.83   thorpej 	pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400);
   3828      1.83   thorpej 	pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800);
   3829      1.83   thorpej 	pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00);
   3830      1.42   thorpej 
   3831      1.46   thorpej 	SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list);
   3832      1.43   thorpej }
   3833      1.43   thorpej 
   3834      1.43   thorpej /*
   3835      1.43   thorpej  * pmap_map_chunk:
   3836      1.43   thorpej  *
   3837      1.43   thorpej  *	Map a chunk of memory using the most efficient mappings
   3838      1.43   thorpej  *	possible (section, large page, small page) into the
   3839      1.43   thorpej  *	provided L1 and L2 tables at the specified virtual address.
   3840      1.43   thorpej  */
   3841      1.43   thorpej vsize_t
   3842      1.46   thorpej pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size,
   3843      1.46   thorpej     int prot, int cache)
   3844      1.43   thorpej {
   3845      1.43   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   3846      1.86   thorpej 	pt_entry_t *pte, fl;
   3847      1.43   thorpej 	vsize_t resid;
   3848      1.43   thorpej 	int i;
   3849      1.43   thorpej 
   3850      1.43   thorpej 	resid = (size + (NBPG - 1)) & ~(NBPG - 1);
   3851      1.43   thorpej 
   3852      1.44   thorpej 	if (l1pt == 0)
   3853      1.44   thorpej 		panic("pmap_map_chunk: no L1 table provided");
   3854      1.44   thorpej 
   3855      1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3856      1.43   thorpej 	printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx "
   3857      1.43   thorpej 	    "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache);
   3858      1.43   thorpej #endif
   3859      1.43   thorpej 
   3860      1.43   thorpej 	size = resid;
   3861      1.43   thorpej 
   3862      1.43   thorpej 	while (resid > 0) {
   3863      1.43   thorpej 		/* See if we can use a section mapping. */
   3864      1.81   thorpej 		if (((pa | va) & L1_S_OFFSET) == 0 &&
   3865      1.81   thorpej 		    resid >= L1_S_SIZE) {
   3866      1.86   thorpej 			fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0;
   3867      1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3868      1.43   thorpej 			printf("S");
   3869      1.43   thorpej #endif
   3870      1.83   thorpej 			pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa |
   3871      1.83   thorpej 			    L1_S_PROT(PTE_KERNEL, prot) | fl;
   3872      1.81   thorpej 			va += L1_S_SIZE;
   3873      1.81   thorpej 			pa += L1_S_SIZE;
   3874      1.81   thorpej 			resid -= L1_S_SIZE;
   3875      1.43   thorpej 			continue;
   3876      1.43   thorpej 		}
   3877      1.45   thorpej 
   3878      1.45   thorpej 		/*
   3879      1.45   thorpej 		 * Ok, we're going to use an L2 table.  Make sure
   3880      1.45   thorpej 		 * one is actually in the corresponding L1 slot
   3881      1.45   thorpej 		 * for the current VA.
   3882      1.45   thorpej 		 */
   3883      1.81   thorpej 		if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C)
   3884      1.46   thorpej 			panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va);
   3885      1.46   thorpej 
   3886      1.46   thorpej 		pte = (pt_entry_t *)
   3887      1.81   thorpej 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   3888      1.46   thorpej 		if (pte == NULL)
   3889      1.46   thorpej 			panic("pmap_map_chunk: can't find L2 table for VA"
   3890      1.46   thorpej 			    "0x%08lx", va);
   3891      1.43   thorpej 
   3892      1.43   thorpej 		/* See if we can use a L2 large page mapping. */
   3893      1.81   thorpej 		if (((pa | va) & L2_L_OFFSET) == 0 &&
   3894      1.81   thorpej 		    resid >= L2_L_SIZE) {
   3895      1.86   thorpej 			fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0;
   3896      1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3897      1.43   thorpej 			printf("L");
   3898      1.43   thorpej #endif
   3899      1.43   thorpej 			for (i = 0; i < 16; i++) {
   3900      1.43   thorpej 				pte[((va >> PGSHIFT) & 0x3f0) + i] =
   3901      1.83   thorpej 				    L2_L_PROTO | pa |
   3902      1.83   thorpej 				    L2_L_PROT(PTE_KERNEL, prot) | fl;
   3903      1.43   thorpej 			}
   3904      1.81   thorpej 			va += L2_L_SIZE;
   3905      1.81   thorpej 			pa += L2_L_SIZE;
   3906      1.81   thorpej 			resid -= L2_L_SIZE;
   3907      1.43   thorpej 			continue;
   3908      1.43   thorpej 		}
   3909      1.43   thorpej 
   3910      1.43   thorpej 		/* Use a small page mapping. */
   3911      1.86   thorpej 		fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0;
   3912      1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3913      1.43   thorpej 		printf("P");
   3914      1.43   thorpej #endif
   3915      1.83   thorpej 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   3916      1.83   thorpej 		    L2_S_PROT(PTE_KERNEL, prot) | fl;
   3917      1.43   thorpej 		va += NBPG;
   3918      1.43   thorpej 		pa += NBPG;
   3919      1.43   thorpej 		resid -= NBPG;
   3920      1.43   thorpej 	}
   3921      1.43   thorpej #ifdef VERBOSE_INIT_ARM
   3922      1.43   thorpej 	printf("\n");
   3923      1.43   thorpej #endif
   3924      1.43   thorpej 	return (size);
   3925      1.40   thorpej }
   3926      1.85   thorpej 
   3927      1.85   thorpej /********************** PTE initialization routines **************************/
   3928      1.85   thorpej 
   3929      1.85   thorpej /*
   3930      1.85   thorpej  * These routines are called when the CPU type is identified to set up
   3931      1.85   thorpej  * the PTE prototypes, cache modes, etc.
   3932      1.85   thorpej  *
   3933      1.85   thorpej  * The variables are always here, just in case LKMs need to reference
   3934      1.85   thorpej  * them (though, they shouldn't).
   3935      1.85   thorpej  */
   3936      1.85   thorpej 
   3937      1.86   thorpej pt_entry_t	pte_l1_s_cache_mode;
   3938      1.86   thorpej pt_entry_t	pte_l1_s_cache_mask;
   3939      1.86   thorpej 
   3940      1.86   thorpej pt_entry_t	pte_l2_l_cache_mode;
   3941      1.86   thorpej pt_entry_t	pte_l2_l_cache_mask;
   3942      1.86   thorpej 
   3943      1.86   thorpej pt_entry_t	pte_l2_s_cache_mode;
   3944      1.86   thorpej pt_entry_t	pte_l2_s_cache_mask;
   3945      1.85   thorpej 
   3946      1.85   thorpej pt_entry_t	pte_l2_s_prot_u;
   3947      1.85   thorpej pt_entry_t	pte_l2_s_prot_w;
   3948      1.85   thorpej pt_entry_t	pte_l2_s_prot_mask;
   3949      1.85   thorpej 
   3950      1.85   thorpej pt_entry_t	pte_l1_s_proto;
   3951      1.85   thorpej pt_entry_t	pte_l1_c_proto;
   3952      1.85   thorpej pt_entry_t	pte_l2_s_proto;
   3953      1.85   thorpej 
   3954      1.88   thorpej void		(*pmap_copy_page_func)(paddr_t, paddr_t);
   3955      1.88   thorpej void		(*pmap_zero_page_func)(paddr_t);
   3956      1.88   thorpej 
   3957      1.85   thorpej #if ARM_MMU_GENERIC == 1
   3958      1.85   thorpej void
   3959      1.85   thorpej pmap_pte_init_generic(void)
   3960      1.85   thorpej {
   3961      1.85   thorpej 
   3962      1.86   thorpej 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   3963      1.86   thorpej 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic;
   3964      1.86   thorpej 
   3965      1.86   thorpej 	pte_l2_l_cache_mode = L2_B|L2_C;
   3966      1.86   thorpej 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic;
   3967      1.86   thorpej 
   3968      1.86   thorpej 	pte_l2_s_cache_mode = L2_B|L2_C;
   3969      1.86   thorpej 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic;
   3970      1.85   thorpej 
   3971      1.85   thorpej 	pte_l2_s_prot_u = L2_S_PROT_U_generic;
   3972      1.85   thorpej 	pte_l2_s_prot_w = L2_S_PROT_W_generic;
   3973      1.85   thorpej 	pte_l2_s_prot_mask = L2_S_PROT_MASK_generic;
   3974      1.85   thorpej 
   3975      1.85   thorpej 	pte_l1_s_proto = L1_S_PROTO_generic;
   3976      1.85   thorpej 	pte_l1_c_proto = L1_C_PROTO_generic;
   3977      1.85   thorpej 	pte_l2_s_proto = L2_S_PROTO_generic;
   3978      1.88   thorpej 
   3979      1.88   thorpej 	pmap_copy_page_func = pmap_copy_page_generic;
   3980      1.88   thorpej 	pmap_zero_page_func = pmap_zero_page_generic;
   3981      1.85   thorpej }
   3982      1.85   thorpej 
   3983      1.85   thorpej #if defined(CPU_ARM9)
   3984      1.85   thorpej void
   3985      1.85   thorpej pmap_pte_init_arm9(void)
   3986      1.85   thorpej {
   3987      1.85   thorpej 
   3988      1.85   thorpej 	/*
   3989      1.85   thorpej 	 * ARM9 is compatible with generic, but we want to use
   3990      1.85   thorpej 	 * write-through caching for now.
   3991      1.85   thorpej 	 */
   3992      1.85   thorpej 	pmap_pte_init_generic();
   3993      1.86   thorpej 
   3994      1.86   thorpej 	pte_l1_s_cache_mode = L1_S_C;
   3995      1.86   thorpej 	pte_l2_l_cache_mode = L2_C;
   3996      1.86   thorpej 	pte_l2_s_cache_mode = L2_C;
   3997      1.85   thorpej }
   3998      1.85   thorpej #endif /* CPU_ARM9 */
   3999      1.85   thorpej #endif /* ARM_MMU_GENERIC == 1 */
   4000      1.85   thorpej 
   4001      1.85   thorpej #if ARM_MMU_XSCALE == 1
   4002      1.85   thorpej void
   4003      1.85   thorpej pmap_pte_init_xscale(void)
   4004      1.85   thorpej {
   4005      1.96   thorpej 	uint32_t auxctl;
   4006      1.85   thorpej 
   4007      1.96   thorpej 	pte_l1_s_cache_mode = L1_S_B|L1_S_C;
   4008      1.86   thorpej 	pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
   4009      1.86   thorpej 
   4010      1.96   thorpej 	pte_l2_l_cache_mode = L2_B|L2_C;
   4011      1.86   thorpej 	pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
   4012      1.86   thorpej 
   4013      1.96   thorpej 	pte_l2_s_cache_mode = L2_B|L2_C;
   4014      1.86   thorpej 	pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
   4015      1.85   thorpej 
   4016      1.95   thorpej #ifdef XSCALE_CACHE_WRITE_THROUGH
   4017      1.95   thorpej 	/*
   4018      1.95   thorpej 	 * Some versions of the XScale core have various bugs in
   4019      1.95   thorpej 	 * their cache units, the work-around for which is to run
   4020      1.95   thorpej 	 * the cache in write-through mode.  Unfortunately, this
   4021      1.95   thorpej 	 * has a major (negative) impact on performance.  So, we
   4022      1.95   thorpej 	 * go ahead and run fast-and-loose, in the hopes that we
   4023      1.95   thorpej 	 * don't line up the planets in a way that will trip the
   4024      1.95   thorpej 	 * bugs.
   4025      1.95   thorpej 	 *
   4026      1.95   thorpej 	 * However, we give you the option to be slow-but-correct.
   4027      1.95   thorpej 	 */
   4028      1.95   thorpej 	pte_l1_s_cache_mode = L1_S_C;
   4029      1.95   thorpej 	pte_l2_l_cache_mode = L2_C;
   4030      1.95   thorpej 	pte_l2_s_cache_mode = L2_C;
   4031      1.95   thorpej #endif /* XSCALE_CACHE_WRITE_THROUGH */
   4032      1.95   thorpej 
   4033      1.85   thorpej 	pte_l2_s_prot_u = L2_S_PROT_U_xscale;
   4034      1.85   thorpej 	pte_l2_s_prot_w = L2_S_PROT_W_xscale;
   4035      1.85   thorpej 	pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
   4036      1.85   thorpej 
   4037      1.85   thorpej 	pte_l1_s_proto = L1_S_PROTO_xscale;
   4038      1.85   thorpej 	pte_l1_c_proto = L1_C_PROTO_xscale;
   4039      1.85   thorpej 	pte_l2_s_proto = L2_S_PROTO_xscale;
   4040      1.88   thorpej 
   4041      1.88   thorpej 	pmap_copy_page_func = pmap_copy_page_xscale;
   4042      1.88   thorpej 	pmap_zero_page_func = pmap_zero_page_xscale;
   4043      1.96   thorpej 
   4044      1.96   thorpej 	/*
   4045      1.96   thorpej 	 * Disable ECC protection of page table access, for now.
   4046      1.96   thorpej 	 */
   4047      1.96   thorpej 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   4048      1.96   thorpej 		: "=r" (auxctl));
   4049      1.96   thorpej 	auxctl &= ~XSCALE_AUXCTL_P;
   4050      1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   4051      1.96   thorpej 		:
   4052      1.96   thorpej 		: "r" (auxctl));
   4053      1.85   thorpej }
   4054      1.87   thorpej 
   4055      1.87   thorpej /*
   4056      1.87   thorpej  * xscale_setup_minidata:
   4057      1.87   thorpej  *
   4058      1.87   thorpej  *	Set up the mini-data cache clean area.  We require the
   4059      1.87   thorpej  *	caller to allocate the right amount of physically and
   4060      1.87   thorpej  *	virtually contiguous space.
   4061      1.87   thorpej  */
   4062      1.87   thorpej void
   4063      1.87   thorpej xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa)
   4064      1.87   thorpej {
   4065      1.87   thorpej 	extern vaddr_t xscale_minidata_clean_addr;
   4066      1.87   thorpej 	extern vsize_t xscale_minidata_clean_size; /* already initialized */
   4067      1.87   thorpej 	pd_entry_t *pde = (pd_entry_t *) l1pt;
   4068      1.87   thorpej 	pt_entry_t *pte;
   4069      1.87   thorpej 	vsize_t size;
   4070      1.96   thorpej 	uint32_t auxctl;
   4071      1.87   thorpej 
   4072      1.87   thorpej 	xscale_minidata_clean_addr = va;
   4073      1.87   thorpej 
   4074      1.87   thorpej 	/* Round it to page size. */
   4075      1.87   thorpej 	size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
   4076      1.87   thorpej 
   4077      1.87   thorpej 	for (; size != 0;
   4078      1.87   thorpej 	     va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
   4079      1.87   thorpej 		pte = (pt_entry_t *)
   4080      1.87   thorpej 		    kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME);
   4081      1.87   thorpej 		if (pte == NULL)
   4082      1.87   thorpej 			panic("xscale_setup_minidata: can't find L2 table for "
   4083      1.87   thorpej 			    "VA 0x%08lx", va);
   4084      1.87   thorpej 		pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa |
   4085      1.87   thorpej 		    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
   4086      1.87   thorpej 		    L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
   4087      1.87   thorpej 	}
   4088      1.96   thorpej 
   4089      1.96   thorpej 	/*
   4090      1.96   thorpej 	 * Configure the mini-data cache for write-back with
   4091      1.96   thorpej 	 * read/write-allocate.
   4092      1.96   thorpej 	 *
   4093      1.96   thorpej 	 * NOTE: In order to reconfigure the mini-data cache, we must
   4094      1.96   thorpej 	 * make sure it contains no valid data!  In order to do that,
   4095      1.96   thorpej 	 * we must issue a global data cache invalidate command!
   4096      1.96   thorpej 	 *
   4097      1.96   thorpej 	 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
   4098      1.96   thorpej 	 * THIS IS VERY IMPORTANT!
   4099      1.96   thorpej 	 */
   4100      1.96   thorpej 
   4101      1.96   thorpej 	/* Invalidate data and mini-data. */
   4102      1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c7, c6, 0"
   4103      1.96   thorpej 		:
   4104      1.96   thorpej 		: "r" (auxctl));
   4105      1.96   thorpej 
   4106      1.96   thorpej 
   4107      1.96   thorpej 	__asm __volatile("mrc p15, 0, %0, c1, c0, 1"
   4108      1.96   thorpej 		: "=r" (auxctl));
   4109      1.96   thorpej 	auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
   4110      1.96   thorpej 	__asm __volatile("mcr p15, 0, %0, c1, c0, 1"
   4111      1.96   thorpej 		:
   4112      1.96   thorpej 		: "r" (auxctl));
   4113      1.87   thorpej }
   4114      1.85   thorpej #endif /* ARM_MMU_XSCALE == 1 */
   4115