Home | History | Annotate | Line # | Download | only in oea
pmap.c revision 1.48.18.1
      1  1.48.18.1      matt /*	$NetBSD: pmap.c,v 1.48.18.1 2008/01/09 01:47:53 matt Exp $	*/
      2        1.1      matt /*-
      3        1.1      matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      4        1.1      matt  * All rights reserved.
      5        1.1      matt  *
      6        1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      7        1.1      matt  * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
      8        1.1      matt  *
      9       1.38   sanjayl  * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
     10       1.38   sanjayl  * of Kyma Systems LLC.
     11       1.38   sanjayl  *
     12        1.1      matt  * Redistribution and use in source and binary forms, with or without
     13        1.1      matt  * modification, are permitted provided that the following conditions
     14        1.1      matt  * are met:
     15        1.1      matt  * 1. Redistributions of source code must retain the above copyright
     16        1.1      matt  *    notice, this list of conditions and the following disclaimer.
     17        1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     18        1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     19        1.1      matt  *    documentation and/or other materials provided with the distribution.
     20        1.1      matt  * 3. All advertising materials mentioning features or use of this software
     21        1.1      matt  *    must display the following acknowledgement:
     22        1.1      matt  *        This product includes software developed by the NetBSD
     23        1.1      matt  *        Foundation, Inc. and its contributors.
     24        1.1      matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25        1.1      matt  *    contributors may be used to endorse or promote products derived
     26        1.1      matt  *    from this software without specific prior written permission.
     27        1.1      matt  *
     28        1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29        1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30        1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31        1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32        1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33        1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34        1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35        1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36        1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37        1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38        1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     39        1.1      matt  */
     40        1.1      matt 
     41        1.1      matt /*
     42        1.1      matt  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     43        1.1      matt  * Copyright (C) 1995, 1996 TooLs GmbH.
     44        1.1      matt  * All rights reserved.
     45        1.1      matt  *
     46        1.1      matt  * Redistribution and use in source and binary forms, with or without
     47        1.1      matt  * modification, are permitted provided that the following conditions
     48        1.1      matt  * are met:
     49        1.1      matt  * 1. Redistributions of source code must retain the above copyright
     50        1.1      matt  *    notice, this list of conditions and the following disclaimer.
     51        1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     52        1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     53        1.1      matt  *    documentation and/or other materials provided with the distribution.
     54        1.1      matt  * 3. All advertising materials mentioning features or use of this software
     55        1.1      matt  *    must display the following acknowledgement:
     56        1.1      matt  *	This product includes software developed by TooLs GmbH.
     57        1.1      matt  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     58        1.1      matt  *    derived from this software without specific prior written permission.
     59        1.1      matt  *
     60        1.1      matt  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     61        1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     62        1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     63        1.1      matt  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     64        1.1      matt  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     65        1.1      matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     66        1.1      matt  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     67        1.1      matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     68        1.1      matt  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     69        1.1      matt  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     70        1.1      matt  */
     71       1.11     lukem 
     72       1.11     lukem #include <sys/cdefs.h>
     73  1.48.18.1      matt __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.48.18.1 2008/01/09 01:47:53 matt Exp $");
     74        1.1      matt 
     75       1.18      matt #include "opt_ppcarch.h"
     76        1.1      matt #include "opt_altivec.h"
     77        1.1      matt #include "opt_pmap.h"
     78        1.1      matt #include <sys/param.h>
     79        1.1      matt #include <sys/malloc.h>
     80        1.1      matt #include <sys/proc.h>
     81        1.1      matt #include <sys/user.h>
     82        1.1      matt #include <sys/pool.h>
     83        1.1      matt #include <sys/queue.h>
     84        1.1      matt #include <sys/device.h>		/* for evcnt */
     85        1.1      matt #include <sys/systm.h>
     86  1.48.18.1      matt #include <sys/atomic.h>
     87        1.1      matt 
     88        1.1      matt #if __NetBSD_Version__ < 105010000
     89        1.1      matt #include <vm/vm.h>
     90        1.1      matt #include <vm/vm_kern.h>
     91        1.1      matt #define	splvm()		splimp()
     92        1.1      matt #endif
     93        1.1      matt 
     94        1.1      matt #include <uvm/uvm.h>
     95        1.1      matt 
     96        1.1      matt #include <machine/pcb.h>
     97        1.1      matt #include <machine/powerpc.h>
     98        1.1      matt #include <powerpc/spr.h>
     99        1.1      matt #include <powerpc/oea/sr_601.h>
    100        1.1      matt #include <powerpc/bat.h>
    101       1.38   sanjayl #include <powerpc/stdarg.h>
    102        1.1      matt 
    103        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
    104        1.1      matt #define	STATIC
    105        1.1      matt #else
    106        1.1      matt #define	STATIC	static
    107        1.1      matt #endif
    108        1.1      matt 
    109        1.1      matt #ifdef ALTIVEC
    110        1.1      matt int pmap_use_altivec;
    111        1.1      matt #endif
    112        1.1      matt 
    113        1.2      matt volatile struct pteg *pmap_pteg_table;
    114        1.1      matt unsigned int pmap_pteg_cnt;
    115        1.1      matt unsigned int pmap_pteg_mask;
    116       1.21   aymeric #ifdef PMAP_MEMLIMIT
    117       1.21   aymeric paddr_t pmap_memlimit = PMAP_MEMLIMIT;
    118       1.21   aymeric #else
    119        1.6   thorpej paddr_t pmap_memlimit = -PAGE_SIZE;		/* there is no limit */
    120       1.21   aymeric #endif
    121        1.1      matt 
    122        1.1      matt struct pmap kernel_pmap_;
    123        1.1      matt unsigned int pmap_pages_stolen;
    124        1.1      matt u_long pmap_pte_valid;
    125        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    126        1.1      matt u_long pmap_pvo_enter_depth;
    127        1.1      matt u_long pmap_pvo_remove_depth;
    128        1.1      matt #endif
    129        1.1      matt 
    130        1.1      matt int physmem;
    131        1.1      matt #ifndef MSGBUFADDR
    132        1.1      matt extern paddr_t msgbuf_paddr;
    133        1.1      matt #endif
    134        1.1      matt 
    135        1.1      matt static struct mem_region *mem, *avail;
    136        1.1      matt static u_int mem_cnt, avail_cnt;
    137        1.1      matt 
    138        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    139        1.1      matt /*
    140        1.1      matt  * This is a cache of referenced/modified bits.
    141        1.1      matt  * Bits herein are shifted by ATTRSHFT.
    142        1.1      matt  */
    143        1.1      matt #define	ATTR_SHFT	4
    144        1.1      matt struct pmap_physseg pmap_physseg;
    145        1.1      matt #endif
    146        1.1      matt 
    147        1.1      matt /*
    148       1.38   sanjayl  * The following structure is aligned to 32 bytes
    149        1.1      matt  */
    150        1.1      matt struct pvo_entry {
    151        1.1      matt 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
    152        1.1      matt 	TAILQ_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
    153        1.1      matt 	struct pte pvo_pte;			/* Prebuilt PTE */
    154        1.1      matt 	pmap_t pvo_pmap;			/* ptr to owning pmap */
    155        1.1      matt 	vaddr_t pvo_vaddr;			/* VA of entry */
    156        1.1      matt #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
    157        1.1      matt #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
    158        1.1      matt #define	PVO_WIRED		0x0010		/* PVO entry is wired */
    159        1.1      matt #define	PVO_MANAGED		0x0020		/* PVO e. for managed page */
    160        1.1      matt #define	PVO_EXECUTABLE		0x0040		/* PVO e. for executable page */
    161       1.39      matt #define	PVO_WIRED_P(pvo)	((pvo)->pvo_vaddr & PVO_WIRED)
    162       1.39      matt #define	PVO_MANAGED_P(pvo)	((pvo)->pvo_vaddr & PVO_MANAGED)
    163       1.39      matt #define	PVO_EXECUTABLE_P(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
    164       1.12      matt #define	PVO_ENTER_INSERT	0		/* PVO has been removed */
    165       1.12      matt #define	PVO_SPILL_UNSET		1		/* PVO has been evicted */
    166       1.12      matt #define	PVO_SPILL_SET		2		/* PVO has been spilled */
    167       1.12      matt #define	PVO_SPILL_INSERT	3		/* PVO has been inserted */
    168       1.12      matt #define	PVO_PMAP_PAGE_PROTECT	4		/* PVO has changed */
    169       1.12      matt #define	PVO_PMAP_PROTECT	5		/* PVO has changed */
    170       1.12      matt #define	PVO_REMOVE		6		/* PVO has been removed */
    171       1.12      matt #define	PVO_WHERE_MASK		15
    172       1.12      matt #define	PVO_WHERE_SHFT		8
    173       1.38   sanjayl } __attribute__ ((aligned (32)));
    174        1.1      matt #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
    175        1.1      matt #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
    176        1.1      matt #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
    177        1.1      matt #define	PVO_PTEGIDX_CLR(pvo)	\
    178        1.1      matt 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
    179        1.1      matt #define	PVO_PTEGIDX_SET(pvo,i)	\
    180        1.1      matt 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
    181       1.12      matt #define	PVO_WHERE(pvo,w)	\
    182       1.12      matt 	((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
    183       1.12      matt 	 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
    184        1.1      matt 
    185        1.1      matt TAILQ_HEAD(pvo_tqhead, pvo_entry);
    186        1.1      matt struct pvo_tqhead *pmap_pvo_table;	/* pvo entries by ptegroup index */
    187        1.1      matt struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
    188        1.1      matt struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
    189        1.1      matt 
    190        1.1      matt struct pool pmap_pool;		/* pool for pmap structures */
    191        1.1      matt struct pool pmap_upvo_pool;	/* pool for pvo entries for unmanaged pages */
    192        1.1      matt struct pool pmap_mpvo_pool;	/* pool for pvo entries for managed pages */
    193        1.1      matt 
    194        1.1      matt /*
    195        1.1      matt  * We keep a cache of unmanaged pages to be used for pvo entries for
    196        1.1      matt  * unmanaged pages.
    197        1.1      matt  */
    198        1.1      matt struct pvo_page {
    199        1.1      matt 	SIMPLEQ_ENTRY(pvo_page) pvop_link;
    200        1.1      matt };
    201        1.1      matt SIMPLEQ_HEAD(pvop_head, pvo_page);
    202        1.1      matt struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
    203        1.1      matt struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
    204        1.1      matt u_long pmap_upvop_free;
    205        1.1      matt u_long pmap_upvop_maxfree;
    206        1.1      matt u_long pmap_mpvop_free;
    207        1.1      matt u_long pmap_mpvop_maxfree;
    208        1.1      matt 
    209        1.1      matt STATIC void *pmap_pool_ualloc(struct pool *, int);
    210        1.1      matt STATIC void *pmap_pool_malloc(struct pool *, int);
    211        1.1      matt 
    212        1.1      matt STATIC void pmap_pool_ufree(struct pool *, void *);
    213        1.1      matt STATIC void pmap_pool_mfree(struct pool *, void *);
    214        1.1      matt 
    215        1.1      matt static struct pool_allocator pmap_pool_mallocator = {
    216       1.43   garbled 	.pa_alloc = pmap_pool_malloc,
    217       1.43   garbled 	.pa_free = pmap_pool_mfree,
    218       1.43   garbled 	.pa_pagesz = 0,
    219        1.1      matt };
    220        1.1      matt 
    221        1.1      matt static struct pool_allocator pmap_pool_uallocator = {
    222       1.43   garbled 	.pa_alloc = pmap_pool_ualloc,
    223       1.43   garbled 	.pa_free = pmap_pool_ufree,
    224       1.43   garbled 	.pa_pagesz = 0,
    225        1.1      matt };
    226        1.1      matt 
    227        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    228        1.2      matt void pmap_pte_print(volatile struct pte *);
    229        1.1      matt void pmap_pteg_check(void);
    230        1.1      matt void pmap_pteg_dist(void);
    231        1.1      matt void pmap_print_pte(pmap_t, vaddr_t);
    232        1.1      matt void pmap_print_mmuregs(void);
    233        1.1      matt #endif
    234        1.1      matt 
    235        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
    236        1.1      matt #ifdef PMAPCHECK
    237        1.1      matt int pmapcheck = 1;
    238        1.1      matt #else
    239        1.1      matt int pmapcheck = 0;
    240        1.1      matt #endif
    241        1.1      matt void pmap_pvo_verify(void);
    242        1.1      matt STATIC void pmap_pvo_check(const struct pvo_entry *);
    243        1.1      matt #define	PMAP_PVO_CHECK(pvo)	 		\
    244        1.1      matt 	do {					\
    245        1.1      matt 		if (pmapcheck)			\
    246        1.1      matt 			pmap_pvo_check(pvo);	\
    247        1.1      matt 	} while (0)
    248        1.1      matt #else
    249        1.1      matt #define	PMAP_PVO_CHECK(pvo)	do { } while (/*CONSTCOND*/0)
    250        1.1      matt #endif
    251        1.2      matt STATIC int pmap_pte_insert(int, struct pte *);
    252        1.1      matt STATIC int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
    253        1.2      matt 	vaddr_t, paddr_t, register_t, int);
    254       1.33       chs STATIC void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
    255       1.33       chs STATIC void pmap_pvo_free(struct pvo_entry *);
    256       1.33       chs STATIC void pmap_pvo_free_list(struct pvo_head *);
    257        1.1      matt STATIC struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
    258        1.2      matt STATIC volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
    259       1.25       chs STATIC struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
    260       1.14       chs STATIC void pvo_set_exec(struct pvo_entry *);
    261       1.14       chs STATIC void pvo_clear_exec(struct pvo_entry *);
    262        1.1      matt 
    263        1.1      matt STATIC void tlbia(void);
    264        1.1      matt 
    265        1.1      matt STATIC void pmap_release(pmap_t);
    266        1.1      matt STATIC void *pmap_boot_find_memory(psize_t, psize_t, int);
    267        1.1      matt 
    268       1.25       chs static uint32_t pmap_pvo_reclaim_nextidx;
    269       1.25       chs #ifdef DEBUG
    270       1.25       chs static int pmap_pvo_reclaim_debugctr;
    271       1.25       chs #endif
    272       1.25       chs 
    273        1.1      matt #define	VSID_NBPW	(sizeof(uint32_t) * 8)
    274        1.1      matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
    275        1.1      matt 
    276        1.1      matt static int pmap_initialized;
    277        1.1      matt 
    278        1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
    279        1.1      matt #define	PMAPDEBUG_BOOT		0x0001
    280        1.1      matt #define	PMAPDEBUG_PTE		0x0002
    281        1.1      matt #define	PMAPDEBUG_EXEC		0x0008
    282        1.1      matt #define	PMAPDEBUG_PVOENTER	0x0010
    283        1.1      matt #define	PMAPDEBUG_PVOREMOVE	0x0020
    284        1.1      matt #define	PMAPDEBUG_ACTIVATE	0x0100
    285        1.1      matt #define	PMAPDEBUG_CREATE	0x0200
    286        1.1      matt #define	PMAPDEBUG_ENTER		0x1000
    287        1.1      matt #define	PMAPDEBUG_KENTER	0x2000
    288        1.1      matt #define	PMAPDEBUG_KREMOVE	0x4000
    289        1.1      matt #define	PMAPDEBUG_REMOVE	0x8000
    290       1.38   sanjayl 
    291        1.1      matt unsigned int pmapdebug = 0;
    292       1.38   sanjayl 
    293        1.1      matt # define DPRINTF(x)		printf x
    294        1.1      matt # define DPRINTFN(n, x)		if (pmapdebug & PMAPDEBUG_ ## n) printf x
    295        1.1      matt #else
    296        1.1      matt # define DPRINTF(x)
    297        1.1      matt # define DPRINTFN(n, x)
    298        1.1      matt #endif
    299        1.1      matt 
    300        1.1      matt 
    301        1.1      matt #ifdef PMAPCOUNTERS
    302        1.1      matt #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
    303        1.1      matt #define	PMAPCOUNT2(ev)	((ev).ev_count++)
    304        1.1      matt 
    305        1.1      matt struct evcnt pmap_evcnt_mappings =
    306        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    307        1.1      matt 	    "pmap", "pages mapped");
    308        1.1      matt struct evcnt pmap_evcnt_unmappings =
    309        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    310        1.1      matt 	    "pmap", "pages unmapped");
    311        1.1      matt 
    312        1.1      matt struct evcnt pmap_evcnt_kernel_mappings =
    313        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    314        1.1      matt 	    "pmap", "kernel pages mapped");
    315        1.1      matt struct evcnt pmap_evcnt_kernel_unmappings =
    316        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_kernel_mappings,
    317        1.1      matt 	    "pmap", "kernel pages unmapped");
    318        1.1      matt 
    319        1.1      matt struct evcnt pmap_evcnt_mappings_replaced =
    320        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    321        1.1      matt 	    "pmap", "page mappings replaced");
    322        1.1      matt 
    323        1.1      matt struct evcnt pmap_evcnt_exec_mappings =
    324        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    325        1.1      matt 	    "pmap", "exec pages mapped");
    326        1.1      matt struct evcnt pmap_evcnt_exec_cached =
    327        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_mappings,
    328        1.1      matt 	    "pmap", "exec pages cached");
    329        1.1      matt 
    330        1.1      matt struct evcnt pmap_evcnt_exec_synced =
    331        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    332        1.1      matt 	    "pmap", "exec pages synced");
    333        1.1      matt struct evcnt pmap_evcnt_exec_synced_clear_modify =
    334        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    335        1.1      matt 	    "pmap", "exec pages synced (CM)");
    336       1.37      matt struct evcnt pmap_evcnt_exec_synced_pvo_remove =
    337       1.37      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    338       1.37      matt 	    "pmap", "exec pages synced (PR)");
    339        1.1      matt 
    340        1.1      matt struct evcnt pmap_evcnt_exec_uncached_page_protect =
    341        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    342        1.1      matt 	    "pmap", "exec pages uncached (PP)");
    343        1.1      matt struct evcnt pmap_evcnt_exec_uncached_clear_modify =
    344        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    345        1.1      matt 	    "pmap", "exec pages uncached (CM)");
    346        1.1      matt struct evcnt pmap_evcnt_exec_uncached_zero_page =
    347        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    348        1.1      matt 	    "pmap", "exec pages uncached (ZP)");
    349        1.1      matt struct evcnt pmap_evcnt_exec_uncached_copy_page =
    350        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    351        1.1      matt 	    "pmap", "exec pages uncached (CP)");
    352       1.37      matt struct evcnt pmap_evcnt_exec_uncached_pvo_remove =
    353       1.37      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, &pmap_evcnt_exec_mappings,
    354       1.37      matt 	    "pmap", "exec pages uncached (PR)");
    355        1.1      matt 
    356        1.1      matt struct evcnt pmap_evcnt_updates =
    357        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    358        1.1      matt 	    "pmap", "updates");
    359        1.1      matt struct evcnt pmap_evcnt_collects =
    360        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    361        1.1      matt 	    "pmap", "collects");
    362        1.1      matt struct evcnt pmap_evcnt_copies =
    363        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    364        1.1      matt 	    "pmap", "copies");
    365        1.1      matt 
    366        1.1      matt struct evcnt pmap_evcnt_ptes_spilled =
    367        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    368        1.1      matt 	    "pmap", "ptes spilled from overflow");
    369        1.1      matt struct evcnt pmap_evcnt_ptes_unspilled =
    370        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    371        1.1      matt 	    "pmap", "ptes not spilled");
    372        1.1      matt struct evcnt pmap_evcnt_ptes_evicted =
    373        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    374        1.1      matt 	    "pmap", "ptes evicted");
    375        1.1      matt 
    376        1.1      matt struct evcnt pmap_evcnt_ptes_primary[8] = {
    377        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    378        1.1      matt 	    "pmap", "ptes added at primary[0]"),
    379        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    380        1.1      matt 	    "pmap", "ptes added at primary[1]"),
    381        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    382        1.1      matt 	    "pmap", "ptes added at primary[2]"),
    383        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    384        1.1      matt 	    "pmap", "ptes added at primary[3]"),
    385        1.1      matt 
    386        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    387        1.1      matt 	    "pmap", "ptes added at primary[4]"),
    388        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    389        1.1      matt 	    "pmap", "ptes added at primary[5]"),
    390        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    391        1.1      matt 	    "pmap", "ptes added at primary[6]"),
    392        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    393        1.1      matt 	    "pmap", "ptes added at primary[7]"),
    394        1.1      matt };
    395        1.1      matt struct evcnt pmap_evcnt_ptes_secondary[8] = {
    396        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    397        1.1      matt 	    "pmap", "ptes added at secondary[0]"),
    398        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    399        1.1      matt 	    "pmap", "ptes added at secondary[1]"),
    400        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    401        1.1      matt 	    "pmap", "ptes added at secondary[2]"),
    402        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    403        1.1      matt 	    "pmap", "ptes added at secondary[3]"),
    404        1.1      matt 
    405        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    406        1.1      matt 	    "pmap", "ptes added at secondary[4]"),
    407        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    408        1.1      matt 	    "pmap", "ptes added at secondary[5]"),
    409        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    410        1.1      matt 	    "pmap", "ptes added at secondary[6]"),
    411        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    412        1.1      matt 	    "pmap", "ptes added at secondary[7]"),
    413        1.1      matt };
    414        1.1      matt struct evcnt pmap_evcnt_ptes_removed =
    415        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    416        1.1      matt 	    "pmap", "ptes removed");
    417        1.1      matt struct evcnt pmap_evcnt_ptes_changed =
    418        1.1      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    419        1.1      matt 	    "pmap", "ptes changed");
    420       1.26      matt struct evcnt pmap_evcnt_pvos_reclaimed =
    421       1.26      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    422       1.26      matt 	    "pmap", "pvos reclaimed");
    423       1.26      matt struct evcnt pmap_evcnt_pvos_failed =
    424       1.26      matt     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL,
    425       1.26      matt 	    "pmap", "pvo allocation failures");
    426        1.1      matt 
    427        1.1      matt /*
    428        1.1      matt  * From pmap_subr.c
    429        1.1      matt  */
    430        1.1      matt extern struct evcnt pmap_evcnt_zeroed_pages;
    431        1.1      matt extern struct evcnt pmap_evcnt_copied_pages;
    432        1.1      matt extern struct evcnt pmap_evcnt_idlezeroed_pages;
    433       1.26      matt 
    434       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_mappings);
    435       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_mappings_replaced);
    436       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_unmappings);
    437       1.26      matt 
    438       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_mappings);
    439       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_kernel_unmappings);
    440       1.26      matt 
    441       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_mappings);
    442       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_cached);
    443       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced);
    444       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_clear_modify);
    445       1.37      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_synced_pvo_remove);
    446       1.26      matt 
    447       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_page_protect);
    448       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_clear_modify);
    449       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_zero_page);
    450       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_copy_page);
    451       1.37      matt EVCNT_ATTACH_STATIC(pmap_evcnt_exec_uncached_pvo_remove);
    452       1.26      matt 
    453       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_zeroed_pages);
    454       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_copied_pages);
    455       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_idlezeroed_pages);
    456       1.26      matt 
    457       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_updates);
    458       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_collects);
    459       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_copies);
    460       1.26      matt 
    461       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_spilled);
    462       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_unspilled);
    463       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_evicted);
    464       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_removed);
    465       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_ptes_changed);
    466       1.26      matt 
    467       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 0);
    468       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 1);
    469       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 2);
    470       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 3);
    471       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 4);
    472       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 5);
    473       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 6);
    474       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_primary, 7);
    475       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 0);
    476       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 1);
    477       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 2);
    478       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 3);
    479       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 4);
    480       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 5);
    481       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 6);
    482       1.26      matt EVCNT_ATTACH_STATIC2(pmap_evcnt_ptes_secondary, 7);
    483       1.26      matt 
    484       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_reclaimed);
    485       1.26      matt EVCNT_ATTACH_STATIC(pmap_evcnt_pvos_failed);
    486        1.1      matt #else
    487        1.1      matt #define	PMAPCOUNT(ev)	((void) 0)
    488        1.1      matt #define	PMAPCOUNT2(ev)	((void) 0)
    489        1.1      matt #endif
    490        1.1      matt 
    491       1.35     perry #define	TLBIE(va)	__asm volatile("tlbie %0" :: "r"(va))
    492       1.38   sanjayl 
    493       1.38   sanjayl /* XXXSL: this needs to be moved to assembler */
    494       1.38   sanjayl #define	TLBIEL(va)	__asm __volatile("tlbie %0" :: "r"(va))
    495       1.38   sanjayl 
    496       1.35     perry #define	TLBSYNC()	__asm volatile("tlbsync")
    497       1.35     perry #define	SYNC()		__asm volatile("sync")
    498       1.35     perry #define	EIEIO()		__asm volatile("eieio")
    499        1.1      matt #define	MFMSR()		mfmsr()
    500        1.1      matt #define	MTMSR(psl)	mtmsr(psl)
    501        1.1      matt #define	MFPVR()		mfpvr()
    502        1.1      matt #define	MFSRIN(va)	mfsrin(va)
    503        1.1      matt #define	MFTB()		mfrtcltbl()
    504        1.1      matt 
    505       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    506       1.35     perry static inline register_t
    507        1.1      matt mfsrin(vaddr_t va)
    508        1.1      matt {
    509        1.2      matt 	register_t sr;
    510       1.35     perry 	__asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
    511        1.1      matt 	return sr;
    512        1.1      matt }
    513       1.38   sanjayl #endif	/* PPC_OEA*/
    514       1.38   sanjayl 
    515       1.38   sanjayl #if defined (PPC_OEA64_BRIDGE)
    516       1.38   sanjayl extern void mfmsr64 (register64_t *result);
    517       1.38   sanjayl #endif /* PPC_OEA64_BRIDGE */
    518       1.38   sanjayl 
    519  1.48.18.1      matt #define	PMAP_LOCK()		KERNEL_LOCK(1, NULL)
    520  1.48.18.1      matt #define	PMAP_UNLOCK()		KERNEL_UNLOCK_ONE(NULL)
    521        1.1      matt 
    522       1.35     perry static inline register_t
    523        1.1      matt pmap_interrupts_off(void)
    524        1.1      matt {
    525        1.2      matt 	register_t msr = MFMSR();
    526        1.1      matt 	if (msr & PSL_EE)
    527        1.1      matt 		MTMSR(msr & ~PSL_EE);
    528        1.1      matt 	return msr;
    529        1.1      matt }
    530        1.1      matt 
    531        1.1      matt static void
    532        1.2      matt pmap_interrupts_restore(register_t msr)
    533        1.1      matt {
    534        1.1      matt 	if (msr & PSL_EE)
    535        1.1      matt 		MTMSR(msr);
    536        1.1      matt }
    537        1.1      matt 
    538       1.35     perry static inline u_int32_t
    539        1.1      matt mfrtcltbl(void)
    540        1.1      matt {
    541        1.1      matt 
    542        1.1      matt 	if ((MFPVR() >> 16) == MPC601)
    543        1.1      matt 		return (mfrtcl() >> 7);
    544        1.1      matt 	else
    545        1.1      matt 		return (mftbl());
    546        1.1      matt }
    547        1.1      matt 
    548        1.1      matt /*
    549        1.1      matt  * These small routines may have to be replaced,
    550        1.1      matt  * if/when we support processors other that the 604.
    551        1.1      matt  */
    552        1.1      matt 
    553        1.1      matt void
    554        1.1      matt tlbia(void)
    555        1.1      matt {
    556       1.47  macallan 	char *i;
    557        1.1      matt 
    558        1.1      matt 	SYNC();
    559       1.38   sanjayl #if defined(PPC_OEA)
    560        1.1      matt 	/*
    561        1.1      matt 	 * Why not use "tlbia"?  Because not all processors implement it.
    562        1.1      matt 	 *
    563       1.20       wiz 	 * This needs to be a per-CPU callback to do the appropriate thing
    564        1.1      matt 	 * for the CPU. XXX
    565        1.1      matt 	 */
    566       1.47  macallan 	for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
    567        1.1      matt 		TLBIE(i);
    568        1.1      matt 		EIEIO();
    569        1.1      matt 		SYNC();
    570        1.1      matt 	}
    571       1.38   sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
    572       1.38   sanjayl 	printf("Invalidating ALL TLB entries......\n");
    573       1.38   sanjayl 	/* This is specifically for the 970, 970UM v1.6 pp. 140. */
    574       1.46  christos 	for (i = 0; i <= (void *)0xFF000; i += 0x00001000) {
    575       1.38   sanjayl 		TLBIEL(i);
    576       1.38   sanjayl 		EIEIO();
    577       1.38   sanjayl 		SYNC();
    578       1.38   sanjayl 	}
    579       1.38   sanjayl #endif
    580        1.1      matt 	TLBSYNC();
    581        1.1      matt 	SYNC();
    582        1.1      matt }
    583        1.1      matt 
    584       1.35     perry static inline register_t
    585        1.2      matt va_to_vsid(const struct pmap *pm, vaddr_t addr)
    586        1.1      matt {
    587       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
    588       1.38   sanjayl 	return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
    589       1.38   sanjayl #else /* PPC_OEA64 */
    590       1.18      matt #if 0
    591       1.18      matt 	const struct ste *ste;
    592       1.18      matt 	register_t hash;
    593       1.18      matt 	int i;
    594       1.18      matt 
    595       1.18      matt 	hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
    596       1.18      matt 
    597       1.18      matt 	/*
    598       1.18      matt 	 * Try the primary group first
    599       1.18      matt 	 */
    600       1.18      matt 	ste = pm->pm_stes[hash].stes;
    601       1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    602       1.18      matt 		if (ste->ste_hi & STE_V) &&
    603       1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    604       1.18      matt 			return ste;
    605       1.18      matt 	}
    606       1.18      matt 
    607       1.18      matt 	/*
    608       1.18      matt 	 * Then the secondary group.
    609       1.18      matt 	 */
    610       1.18      matt 	ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
    611       1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    612       1.18      matt 		if (ste->ste_hi & STE_V) &&
    613       1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    614       1.18      matt 			return addr;
    615       1.18      matt 	}
    616       1.18      matt 
    617       1.18      matt 	return NULL;
    618       1.18      matt #else
    619       1.18      matt 	/*
    620       1.18      matt 	 * Rather than searching the STE groups for the VSID, we know
    621       1.18      matt 	 * how we generate that from the ESID and so do that.
    622       1.18      matt 	 */
    623       1.18      matt 	return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
    624       1.18      matt #endif
    625       1.38   sanjayl #endif /* PPC_OEA */
    626        1.1      matt }
    627        1.1      matt 
    628       1.35     perry static inline register_t
    629        1.2      matt va_to_pteg(const struct pmap *pm, vaddr_t addr)
    630        1.1      matt {
    631        1.2      matt 	register_t hash;
    632        1.2      matt 
    633        1.2      matt 	hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
    634        1.1      matt 	return hash & pmap_pteg_mask;
    635        1.1      matt }
    636        1.1      matt 
    637        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    638        1.1      matt /*
    639        1.1      matt  * Given a PTE in the page table, calculate the VADDR that hashes to it.
    640        1.1      matt  * The only bit of magic is that the top 4 bits of the address doesn't
    641        1.1      matt  * technically exist in the PTE.  But we know we reserved 4 bits of the
    642        1.1      matt  * VSID for it so that's how we get it.
    643        1.1      matt  */
    644        1.1      matt static vaddr_t
    645        1.2      matt pmap_pte_to_va(volatile const struct pte *pt)
    646        1.1      matt {
    647        1.1      matt 	vaddr_t va;
    648        1.1      matt 	uintptr_t ptaddr = (uintptr_t) pt;
    649        1.1      matt 
    650        1.1      matt 	if (pt->pte_hi & PTE_HID)
    651        1.2      matt 		ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
    652        1.1      matt 
    653       1.18      matt 	/* PPC Bits 10-19  PPC64 Bits 42-51 */
    654       1.38   sanjayl #if defined(PPC_OEA)
    655        1.4      matt 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
    656       1.38   sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
    657       1.38   sanjayl 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
    658       1.38   sanjayl #endif
    659        1.1      matt 	va <<= ADDR_PIDX_SHFT;
    660        1.1      matt 
    661       1.18      matt 	/* PPC Bits 4-9  PPC64 Bits 36-41 */
    662        1.1      matt 	va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
    663        1.1      matt 
    664       1.38   sanjayl #if defined(PPC_OEA64)
    665       1.18      matt 	/* PPC63 Bits 0-35 */
    666       1.18      matt 	/* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
    667       1.38   sanjayl #elif defined(PPC_OEA) || defined(PPC_OEA64_BRIDGE)
    668        1.1      matt 	/* PPC Bits 0-3 */
    669        1.1      matt 	va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
    670       1.18      matt #endif
    671        1.1      matt 
    672        1.1      matt 	return va;
    673        1.1      matt }
    674        1.1      matt #endif
    675        1.1      matt 
    676       1.35     perry static inline struct pvo_head *
    677        1.1      matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
    678        1.1      matt {
    679        1.1      matt #ifdef __HAVE_VM_PAGE_MD
    680        1.1      matt 	struct vm_page *pg;
    681        1.1      matt 
    682        1.1      matt 	pg = PHYS_TO_VM_PAGE(pa);
    683        1.1      matt 	if (pg_p != NULL)
    684        1.1      matt 		*pg_p = pg;
    685        1.1      matt 	if (pg == NULL)
    686        1.1      matt 		return &pmap_pvo_unmanaged;
    687        1.1      matt 	return &pg->mdpage.mdpg_pvoh;
    688        1.1      matt #endif
    689        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    690        1.1      matt 	int bank, pg;
    691        1.1      matt 
    692        1.1      matt 	bank = vm_physseg_find(atop(pa), &pg);
    693        1.1      matt 	if (pg_p != NULL)
    694        1.1      matt 		*pg_p = pg;
    695        1.1      matt 	if (bank == -1)
    696        1.1      matt 		return &pmap_pvo_unmanaged;
    697        1.1      matt 	return &vm_physmem[bank].pmseg.pvoh[pg];
    698        1.1      matt #endif
    699        1.1      matt }
    700        1.1      matt 
    701       1.35     perry static inline struct pvo_head *
    702        1.1      matt vm_page_to_pvoh(struct vm_page *pg)
    703        1.1      matt {
    704        1.1      matt #ifdef __HAVE_VM_PAGE_MD
    705        1.1      matt 	return &pg->mdpage.mdpg_pvoh;
    706        1.1      matt #endif
    707        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    708        1.1      matt 	return pa_to_pvoh(VM_PAGE_TO_PHYS(pg), NULL);
    709        1.1      matt #endif
    710        1.1      matt }
    711        1.1      matt 
    712        1.1      matt 
    713        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    714       1.35     perry static inline char *
    715        1.1      matt pa_to_attr(paddr_t pa)
    716        1.1      matt {
    717        1.1      matt 	int bank, pg;
    718        1.1      matt 
    719        1.1      matt 	bank = vm_physseg_find(atop(pa), &pg);
    720        1.1      matt 	if (bank == -1)
    721        1.1      matt 		return NULL;
    722        1.1      matt 	return &vm_physmem[bank].pmseg.attrs[pg];
    723        1.1      matt }
    724        1.1      matt #endif
    725        1.1      matt 
    726       1.35     perry static inline void
    727        1.1      matt pmap_attr_clear(struct vm_page *pg, int ptebit)
    728        1.1      matt {
    729        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    730        1.1      matt 	*pa_to_attr(VM_PAGE_TO_PHYS(pg)) &= ~(ptebit >> ATTR_SHFT);
    731        1.1      matt #endif
    732        1.1      matt #ifdef __HAVE_VM_PAGE_MD
    733        1.1      matt 	pg->mdpage.mdpg_attrs &= ~ptebit;
    734        1.1      matt #endif
    735        1.1      matt }
    736        1.1      matt 
    737       1.35     perry static inline int
    738        1.1      matt pmap_attr_fetch(struct vm_page *pg)
    739        1.1      matt {
    740        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    741        1.1      matt 	return *pa_to_attr(VM_PAGE_TO_PHYS(pg)) << ATTR_SHFT;
    742        1.1      matt #endif
    743        1.1      matt #ifdef __HAVE_VM_PAGE_MD
    744        1.1      matt 	return pg->mdpage.mdpg_attrs;
    745        1.1      matt #endif
    746        1.1      matt }
    747        1.1      matt 
    748       1.35     perry static inline void
    749        1.1      matt pmap_attr_save(struct vm_page *pg, int ptebit)
    750        1.1      matt {
    751        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
    752        1.1      matt 	*pa_to_attr(VM_PAGE_TO_PHYS(pg)) |= (ptebit >> ATTR_SHFT);
    753        1.1      matt #endif
    754        1.1      matt #ifdef __HAVE_VM_PAGE_MD
    755        1.1      matt 	pg->mdpage.mdpg_attrs |= ptebit;
    756        1.1      matt #endif
    757        1.1      matt }
    758        1.1      matt 
    759       1.35     perry static inline int
    760        1.2      matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
    761        1.1      matt {
    762        1.1      matt 	if (pt->pte_hi == pvo_pt->pte_hi
    763        1.1      matt #if 0
    764        1.1      matt 	    && ((pt->pte_lo ^ pvo_pt->pte_lo) &
    765        1.1      matt 	        ~(PTE_REF|PTE_CHG)) == 0
    766        1.1      matt #endif
    767        1.1      matt 	    )
    768        1.1      matt 		return 1;
    769        1.1      matt 	return 0;
    770        1.1      matt }
    771        1.1      matt 
    772       1.35     perry static inline void
    773        1.2      matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
    774        1.1      matt {
    775        1.1      matt 	/*
    776        1.1      matt 	 * Construct the PTE.  Default to IMB initially.  Valid bit
    777        1.1      matt 	 * only gets set when the real pte is set in memory.
    778        1.1      matt 	 *
    779        1.1      matt 	 * Note: Don't set the valid bit for correct operation of tlb update.
    780        1.1      matt 	 */
    781       1.38   sanjayl #if defined(PPC_OEA)
    782        1.2      matt 	pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
    783        1.2      matt 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    784        1.1      matt 	pt->pte_lo = pte_lo;
    785       1.38   sanjayl #elif defined (PPC_OEA64_BRIDGE)
    786       1.38   sanjayl 	pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
    787       1.38   sanjayl 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    788       1.38   sanjayl 	pt->pte_lo = (u_int64_t) pte_lo;
    789       1.38   sanjayl #elif defined (PPC_OEA64)
    790       1.38   sanjayl #error PPC_OEA64 not supported
    791       1.38   sanjayl #endif /* PPC_OEA */
    792        1.1      matt }
    793        1.1      matt 
    794       1.35     perry static inline void
    795        1.2      matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
    796        1.1      matt {
    797        1.1      matt 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
    798        1.1      matt }
    799        1.1      matt 
    800       1.35     perry static inline void
    801        1.2      matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
    802        1.1      matt {
    803        1.1      matt 	/*
    804        1.1      matt 	 * As shown in Section 7.6.3.2.3
    805        1.1      matt 	 */
    806        1.1      matt 	pt->pte_lo &= ~ptebit;
    807        1.1      matt 	TLBIE(va);
    808        1.1      matt 	SYNC();
    809        1.1      matt 	EIEIO();
    810        1.1      matt 	TLBSYNC();
    811        1.1      matt 	SYNC();
    812        1.1      matt }
    813        1.1      matt 
    814       1.35     perry static inline void
    815        1.2      matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
    816        1.1      matt {
    817        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    818        1.1      matt 	if (pvo_pt->pte_hi & PTE_VALID)
    819        1.1      matt 		panic("pte_set: setting an already valid pte %p", pvo_pt);
    820        1.1      matt #endif
    821        1.1      matt 	pvo_pt->pte_hi |= PTE_VALID;
    822       1.38   sanjayl 
    823        1.1      matt 	/*
    824        1.1      matt 	 * Update the PTE as defined in section 7.6.3.1
    825        1.1      matt 	 * Note that the REF/CHG bits are from pvo_pt and thus should
    826        1.1      matt 	 * have been saved so this routine can restore them (if desired).
    827        1.1      matt 	 */
    828        1.1      matt 	pt->pte_lo = pvo_pt->pte_lo;
    829        1.1      matt 	EIEIO();
    830        1.1      matt 	pt->pte_hi = pvo_pt->pte_hi;
    831       1.38   sanjayl 	TLBSYNC();
    832        1.1      matt 	SYNC();
    833        1.1      matt 	pmap_pte_valid++;
    834        1.1      matt }
    835        1.1      matt 
    836       1.35     perry static inline void
    837        1.2      matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    838        1.1      matt {
    839        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    840        1.1      matt 	if ((pvo_pt->pte_hi & PTE_VALID) == 0)
    841        1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
    842        1.1      matt 	if ((pt->pte_hi & PTE_VALID) == 0)
    843        1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
    844        1.1      matt #endif
    845        1.1      matt 
    846        1.1      matt 	pvo_pt->pte_hi &= ~PTE_VALID;
    847        1.1      matt 	/*
    848        1.1      matt 	 * Force the ref & chg bits back into the PTEs.
    849        1.1      matt 	 */
    850        1.1      matt 	SYNC();
    851        1.1      matt 	/*
    852        1.1      matt 	 * Invalidate the pte ... (Section 7.6.3.3)
    853        1.1      matt 	 */
    854        1.1      matt 	pt->pte_hi &= ~PTE_VALID;
    855        1.1      matt 	SYNC();
    856        1.1      matt 	TLBIE(va);
    857        1.1      matt 	SYNC();
    858        1.1      matt 	EIEIO();
    859        1.1      matt 	TLBSYNC();
    860        1.1      matt 	SYNC();
    861        1.1      matt 	/*
    862        1.1      matt 	 * Save the ref & chg bits ...
    863        1.1      matt 	 */
    864        1.1      matt 	pmap_pte_synch(pt, pvo_pt);
    865        1.1      matt 	pmap_pte_valid--;
    866        1.1      matt }
    867        1.1      matt 
    868       1.35     perry static inline void
    869        1.2      matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    870        1.1      matt {
    871        1.1      matt 	/*
    872        1.1      matt 	 * Invalidate the PTE
    873        1.1      matt 	 */
    874        1.1      matt 	pmap_pte_unset(pt, pvo_pt, va);
    875        1.1      matt 	pmap_pte_set(pt, pvo_pt);
    876        1.1      matt }
    877        1.1      matt 
    878        1.1      matt /*
    879        1.1      matt  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
    880        1.1      matt  * (either primary or secondary location).
    881        1.1      matt  *
    882        1.1      matt  * Note: both the destination and source PTEs must not have PTE_VALID set.
    883        1.1      matt  */
    884        1.1      matt 
    885        1.1      matt STATIC int
    886        1.2      matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
    887        1.1      matt {
    888        1.2      matt 	volatile struct pte *pt;
    889        1.1      matt 	int i;
    890        1.1      matt 
    891        1.1      matt #if defined(DEBUG)
    892       1.38   sanjayl #if defined (PPC_OEA)
    893       1.18      matt 	DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%x 0x%x\n",
    894       1.19       mjl 		ptegidx, (unsigned int) pvo_pt->pte_hi, (unsigned int) pvo_pt->pte_lo));
    895       1.38   sanjayl #elif defined (PPC_OEA64_BRIDGE)
    896       1.38   sanjayl 	DPRINTFN(PTE, ("pmap_pte_insert: idx 0x%x, pte 0x%016llx 0x%016llx\n",
    897       1.38   sanjayl 		ptegidx, (unsigned long long) pvo_pt->pte_hi,
    898       1.38   sanjayl 		(unsigned long long) pvo_pt->pte_lo));
    899       1.38   sanjayl 
    900       1.38   sanjayl #endif
    901        1.1      matt #endif
    902        1.1      matt 	/*
    903        1.1      matt 	 * First try primary hash.
    904        1.1      matt 	 */
    905        1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    906        1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    907        1.1      matt 			pvo_pt->pte_hi &= ~PTE_HID;
    908        1.1      matt 			pmap_pte_set(pt, pvo_pt);
    909        1.1      matt 			return i;
    910        1.1      matt 		}
    911        1.1      matt 	}
    912        1.1      matt 
    913        1.1      matt 	/*
    914        1.1      matt 	 * Now try secondary hash.
    915        1.1      matt 	 */
    916        1.1      matt 	ptegidx ^= pmap_pteg_mask;
    917        1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    918        1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    919        1.1      matt 			pvo_pt->pte_hi |= PTE_HID;
    920        1.1      matt 			pmap_pte_set(pt, pvo_pt);
    921        1.1      matt 			return i;
    922        1.1      matt 		}
    923        1.1      matt 	}
    924        1.1      matt 	return -1;
    925        1.1      matt }
    926        1.1      matt 
    927        1.1      matt /*
    928        1.1      matt  * Spill handler.
    929        1.1      matt  *
    930        1.1      matt  * Tries to spill a page table entry from the overflow area.
    931        1.1      matt  * This runs in either real mode (if dealing with a exception spill)
    932        1.1      matt  * or virtual mode when dealing with manually spilling one of the
    933        1.1      matt  * kernel's pte entries.  In either case, interrupts are already
    934        1.1      matt  * disabled.
    935        1.1      matt  */
    936       1.14       chs 
    937        1.1      matt int
    938       1.44   thorpej pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
    939        1.1      matt {
    940        1.1      matt 	struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
    941        1.1      matt 	struct pvo_entry *pvo;
    942       1.15    dyoung 	/* XXX: gcc -- vpvoh is always set at either *1* or *2* */
    943       1.15    dyoung 	struct pvo_tqhead *pvoh, *vpvoh = NULL;
    944        1.1      matt 	int ptegidx, i, j;
    945        1.2      matt 	volatile struct pteg *pteg;
    946        1.2      matt 	volatile struct pte *pt;
    947        1.1      matt 
    948  1.48.18.1      matt 	PMAP_LOCK();
    949  1.48.18.1      matt 
    950        1.2      matt 	ptegidx = va_to_pteg(pm, addr);
    951        1.1      matt 
    952        1.1      matt 	/*
    953        1.1      matt 	 * Have to substitute some entry. Use the primary hash for this.
    954       1.12      matt 	 * Use low bits of timebase as random generator.  Make sure we are
    955       1.12      matt 	 * not picking a kernel pte for replacement.
    956        1.1      matt 	 */
    957        1.1      matt 	pteg = &pmap_pteg_table[ptegidx];
    958        1.1      matt 	i = MFTB() & 7;
    959       1.12      matt 	for (j = 0; j < 8; j++) {
    960       1.12      matt 		pt = &pteg->pt[i];
    961       1.12      matt 		if ((pt->pte_hi & PTE_VALID) == 0 ||
    962       1.12      matt 		    VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
    963       1.12      matt 				!= KERNEL_VSIDBITS)
    964       1.12      matt 			break;
    965       1.12      matt 		i = (i + 1) & 7;
    966       1.12      matt 	}
    967       1.12      matt 	KASSERT(j < 8);
    968        1.1      matt 
    969        1.1      matt 	source_pvo = NULL;
    970        1.1      matt 	victim_pvo = NULL;
    971        1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
    972        1.1      matt 	TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
    973        1.1      matt 
    974        1.1      matt 		/*
    975        1.1      matt 		 * We need to find pvo entry for this address...
    976        1.1      matt 		 */
    977        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
    978        1.1      matt 
    979        1.1      matt 		/*
    980        1.1      matt 		 * If we haven't found the source and we come to a PVO with
    981        1.1      matt 		 * a valid PTE, then we know we can't find it because all
    982        1.1      matt 		 * evicted PVOs always are first in the list.
    983        1.1      matt 		 */
    984        1.1      matt 		if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
    985        1.1      matt 			break;
    986        1.2      matt 		if (source_pvo == NULL && pm == pvo->pvo_pmap &&
    987        1.2      matt 		    addr == PVO_VADDR(pvo)) {
    988        1.1      matt 
    989        1.1      matt 			/*
    990        1.1      matt 			 * Now we have found the entry to be spilled into the
    991        1.1      matt 			 * pteg.  Attempt to insert it into the page table.
    992        1.1      matt 			 */
    993        1.1      matt 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
    994        1.1      matt 			if (j >= 0) {
    995        1.1      matt 				PVO_PTEGIDX_SET(pvo, j);
    996        1.1      matt 				PMAP_PVO_CHECK(pvo);	/* sanity check */
    997       1.12      matt 				PVO_WHERE(pvo, SPILL_INSERT);
    998        1.1      matt 				pvo->pvo_pmap->pm_evictions--;
    999        1.1      matt 				PMAPCOUNT(ptes_spilled);
   1000        1.1      matt 				PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
   1001        1.1      matt 				    ? pmap_evcnt_ptes_secondary
   1002        1.1      matt 				    : pmap_evcnt_ptes_primary)[j]);
   1003        1.1      matt 
   1004        1.1      matt 				/*
   1005        1.1      matt 				 * Since we keep the evicted entries at the
   1006        1.1      matt 				 * from of the PVO list, we need move this
   1007        1.1      matt 				 * (now resident) PVO after the evicted
   1008        1.1      matt 				 * entries.
   1009        1.1      matt 				 */
   1010        1.1      matt 				next_pvo = TAILQ_NEXT(pvo, pvo_olink);
   1011        1.1      matt 
   1012        1.1      matt 				/*
   1013        1.5      matt 				 * If we don't have to move (either we were the
   1014        1.5      matt 				 * last entry or the next entry was valid),
   1015        1.1      matt 				 * don't change our position.  Otherwise
   1016        1.1      matt 				 * move ourselves to the tail of the queue.
   1017        1.1      matt 				 */
   1018        1.1      matt 				if (next_pvo != NULL &&
   1019        1.1      matt 				    !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
   1020        1.1      matt 					TAILQ_REMOVE(pvoh, pvo, pvo_olink);
   1021        1.1      matt 					TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1022        1.1      matt 				}
   1023  1.48.18.1      matt 				PMAP_UNLOCK();
   1024        1.1      matt 				return 1;
   1025        1.1      matt 			}
   1026        1.1      matt 			source_pvo = pvo;
   1027       1.39      matt 			if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
   1028       1.14       chs 				return 0;
   1029       1.14       chs 			}
   1030        1.1      matt 			if (victim_pvo != NULL)
   1031        1.1      matt 				break;
   1032        1.1      matt 		}
   1033        1.1      matt 
   1034        1.1      matt 		/*
   1035        1.1      matt 		 * We also need the pvo entry of the victim we are replacing
   1036        1.1      matt 		 * so save the R & C bits of the PTE.
   1037        1.1      matt 		 */
   1038        1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
   1039        1.1      matt 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1040       1.15    dyoung 			vpvoh = pvoh;			/* *1* */
   1041        1.1      matt 			victim_pvo = pvo;
   1042        1.1      matt 			if (source_pvo != NULL)
   1043        1.1      matt 				break;
   1044        1.1      matt 		}
   1045        1.1      matt 	}
   1046        1.1      matt 
   1047        1.1      matt 	if (source_pvo == NULL) {
   1048        1.1      matt 		PMAPCOUNT(ptes_unspilled);
   1049  1.48.18.1      matt 		PMAP_UNLOCK();
   1050        1.1      matt 		return 0;
   1051        1.1      matt 	}
   1052        1.1      matt 
   1053        1.1      matt 	if (victim_pvo == NULL) {
   1054        1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0)
   1055        1.1      matt 			panic("pmap_pte_spill: victim p-pte (%p) has "
   1056        1.1      matt 			    "no pvo entry!", pt);
   1057        1.1      matt 
   1058        1.1      matt 		/*
   1059        1.1      matt 		 * If this is a secondary PTE, we need to search
   1060        1.1      matt 		 * its primary pvo bucket for the matching PVO.
   1061        1.1      matt 		 */
   1062       1.15    dyoung 		vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
   1063        1.1      matt 		TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
   1064        1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   1065        1.1      matt 
   1066        1.1      matt 			/*
   1067        1.1      matt 			 * We also need the pvo entry of the victim we are
   1068        1.1      matt 			 * replacing so save the R & C bits of the PTE.
   1069        1.1      matt 			 */
   1070        1.1      matt 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1071        1.1      matt 				victim_pvo = pvo;
   1072        1.1      matt 				break;
   1073        1.1      matt 			}
   1074        1.1      matt 		}
   1075        1.1      matt 		if (victim_pvo == NULL)
   1076        1.1      matt 			panic("pmap_pte_spill: victim s-pte (%p) has "
   1077        1.1      matt 			    "no pvo entry!", pt);
   1078        1.1      matt 	}
   1079        1.1      matt 
   1080        1.1      matt 	/*
   1081       1.12      matt 	 * The victim should be not be a kernel PVO/PTE entry.
   1082       1.12      matt 	 */
   1083       1.12      matt 	KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
   1084       1.12      matt 	KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
   1085       1.12      matt 	KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
   1086       1.12      matt 
   1087       1.12      matt 	/*
   1088        1.1      matt 	 * We are invalidating the TLB entry for the EA for the
   1089        1.1      matt 	 * we are replacing even though its valid; If we don't
   1090        1.1      matt 	 * we lose any ref/chg bit changes contained in the TLB
   1091        1.1      matt 	 * entry.
   1092        1.1      matt 	 */
   1093        1.1      matt 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
   1094        1.1      matt 
   1095        1.1      matt 	/*
   1096        1.1      matt 	 * To enforce the PVO list ordering constraint that all
   1097        1.1      matt 	 * evicted entries should come before all valid entries,
   1098        1.1      matt 	 * move the source PVO to the tail of its list and the
   1099        1.1      matt 	 * victim PVO to the head of its list (which might not be
   1100        1.1      matt 	 * the same list, if the victim was using the secondary hash).
   1101        1.1      matt 	 */
   1102        1.1      matt 	TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
   1103        1.1      matt 	TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
   1104        1.1      matt 	TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
   1105        1.1      matt 	TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
   1106        1.1      matt 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
   1107        1.1      matt 	pmap_pte_set(pt, &source_pvo->pvo_pte);
   1108        1.1      matt 	victim_pvo->pvo_pmap->pm_evictions++;
   1109        1.1      matt 	source_pvo->pvo_pmap->pm_evictions--;
   1110       1.12      matt 	PVO_WHERE(victim_pvo, SPILL_UNSET);
   1111       1.12      matt 	PVO_WHERE(source_pvo, SPILL_SET);
   1112        1.1      matt 
   1113        1.1      matt 	PVO_PTEGIDX_CLR(victim_pvo);
   1114        1.1      matt 	PVO_PTEGIDX_SET(source_pvo, i);
   1115        1.1      matt 	PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
   1116        1.1      matt 	PMAPCOUNT(ptes_spilled);
   1117        1.1      matt 	PMAPCOUNT(ptes_evicted);
   1118        1.1      matt 	PMAPCOUNT(ptes_removed);
   1119        1.1      matt 
   1120        1.1      matt 	PMAP_PVO_CHECK(victim_pvo);
   1121        1.1      matt 	PMAP_PVO_CHECK(source_pvo);
   1122  1.48.18.1      matt 
   1123  1.48.18.1      matt 	PMAP_UNLOCK();
   1124        1.1      matt 	return 1;
   1125        1.1      matt }
   1126        1.1      matt 
   1127        1.1      matt /*
   1128        1.1      matt  * Restrict given range to physical memory
   1129        1.1      matt  */
   1130        1.1      matt void
   1131        1.1      matt pmap_real_memory(paddr_t *start, psize_t *size)
   1132        1.1      matt {
   1133        1.1      matt 	struct mem_region *mp;
   1134        1.1      matt 
   1135        1.1      matt 	for (mp = mem; mp->size; mp++) {
   1136        1.1      matt 		if (*start + *size > mp->start
   1137        1.1      matt 		    && *start < mp->start + mp->size) {
   1138        1.1      matt 			if (*start < mp->start) {
   1139        1.1      matt 				*size -= mp->start - *start;
   1140        1.1      matt 				*start = mp->start;
   1141        1.1      matt 			}
   1142        1.1      matt 			if (*start + *size > mp->start + mp->size)
   1143        1.1      matt 				*size = mp->start + mp->size - *start;
   1144        1.1      matt 			return;
   1145        1.1      matt 		}
   1146        1.1      matt 	}
   1147        1.1      matt 	*size = 0;
   1148        1.1      matt }
   1149        1.1      matt 
   1150        1.1      matt /*
   1151        1.1      matt  * Initialize anything else for pmap handling.
   1152        1.1      matt  * Called during vm_init().
   1153        1.1      matt  */
   1154        1.1      matt void
   1155        1.1      matt pmap_init(void)
   1156        1.1      matt {
   1157        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
   1158        1.1      matt 	struct pvo_tqhead *pvoh;
   1159        1.1      matt 	int bank;
   1160        1.1      matt 	long sz;
   1161        1.1      matt 	char *attr;
   1162        1.1      matt 
   1163        1.1      matt 	pvoh = pmap_physseg.pvoh;
   1164        1.1      matt 	attr = pmap_physseg.attrs;
   1165        1.1      matt 	for (bank = 0; bank < vm_nphysseg; bank++) {
   1166        1.1      matt 		sz = vm_physmem[bank].end - vm_physmem[bank].start;
   1167        1.1      matt 		vm_physmem[bank].pmseg.pvoh = pvoh;
   1168        1.1      matt 		vm_physmem[bank].pmseg.attrs = attr;
   1169        1.1      matt 		for (; sz > 0; sz--, pvoh++, attr++) {
   1170        1.1      matt 			TAILQ_INIT(pvoh);
   1171        1.1      matt 			*attr = 0;
   1172        1.1      matt 		}
   1173        1.1      matt 	}
   1174        1.1      matt #endif
   1175        1.1      matt 
   1176        1.1      matt 	pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
   1177        1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
   1178       1.48        ad 	    &pmap_pool_mallocator, IPL_NONE);
   1179        1.1      matt 
   1180        1.1      matt 	pool_setlowat(&pmap_mpvo_pool, 1008);
   1181        1.1      matt 
   1182        1.1      matt 	pmap_initialized = 1;
   1183        1.1      matt 
   1184        1.1      matt }
   1185        1.1      matt 
   1186        1.1      matt /*
   1187       1.10   thorpej  * How much virtual space does the kernel get?
   1188       1.10   thorpej  */
   1189       1.10   thorpej void
   1190       1.10   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1191       1.10   thorpej {
   1192       1.10   thorpej 	/*
   1193       1.10   thorpej 	 * For now, reserve one segment (minus some overhead) for kernel
   1194       1.10   thorpej 	 * virtual memory
   1195       1.10   thorpej 	 */
   1196       1.10   thorpej 	*start = VM_MIN_KERNEL_ADDRESS;
   1197       1.10   thorpej 	*end = VM_MAX_KERNEL_ADDRESS;
   1198       1.10   thorpej }
   1199       1.10   thorpej 
   1200       1.10   thorpej /*
   1201        1.1      matt  * Allocate, initialize, and return a new physical map.
   1202        1.1      matt  */
   1203        1.1      matt pmap_t
   1204        1.1      matt pmap_create(void)
   1205        1.1      matt {
   1206        1.1      matt 	pmap_t pm;
   1207       1.38   sanjayl 
   1208        1.1      matt 	pm = pool_get(&pmap_pool, PR_WAITOK);
   1209       1.46  christos 	memset((void *)pm, 0, sizeof *pm);
   1210        1.1      matt 	pmap_pinit(pm);
   1211        1.1      matt 
   1212        1.1      matt 	DPRINTFN(CREATE,("pmap_create: pm %p:\n"
   1213       1.18      matt 	    "\t%06x %06x %06x %06x    %06x %06x %06x %06x\n"
   1214       1.18      matt 	    "\t%06x %06x %06x %06x    %06x %06x %06x %06x\n", pm,
   1215       1.19       mjl 	    (unsigned int) pm->pm_sr[0], (unsigned int) pm->pm_sr[1],
   1216       1.19       mjl 	    (unsigned int) pm->pm_sr[2], (unsigned int) pm->pm_sr[3],
   1217       1.19       mjl 	    (unsigned int) pm->pm_sr[4], (unsigned int) pm->pm_sr[5],
   1218       1.19       mjl 	    (unsigned int) pm->pm_sr[6], (unsigned int) pm->pm_sr[7],
   1219       1.19       mjl 	    (unsigned int) pm->pm_sr[8], (unsigned int) pm->pm_sr[9],
   1220       1.19       mjl 	    (unsigned int) pm->pm_sr[10], (unsigned int) pm->pm_sr[11],
   1221       1.19       mjl 	    (unsigned int) pm->pm_sr[12], (unsigned int) pm->pm_sr[13],
   1222       1.19       mjl 	    (unsigned int) pm->pm_sr[14], (unsigned int) pm->pm_sr[15]));
   1223        1.1      matt 	return pm;
   1224        1.1      matt }
   1225        1.1      matt 
   1226        1.1      matt /*
   1227        1.1      matt  * Initialize a preallocated and zeroed pmap structure.
   1228        1.1      matt  */
   1229        1.1      matt void
   1230        1.1      matt pmap_pinit(pmap_t pm)
   1231        1.1      matt {
   1232        1.2      matt 	register_t entropy = MFTB();
   1233        1.2      matt 	register_t mask;
   1234        1.2      matt 	int i;
   1235        1.1      matt 
   1236        1.1      matt 	/*
   1237        1.1      matt 	 * Allocate some segment registers for this pmap.
   1238        1.1      matt 	 */
   1239        1.1      matt 	pm->pm_refs = 1;
   1240  1.48.18.1      matt 	PMAP_LOCK();
   1241        1.2      matt 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
   1242        1.2      matt 		static register_t pmap_vsidcontext;
   1243        1.2      matt 		register_t hash;
   1244        1.2      matt 		unsigned int n;
   1245        1.1      matt 
   1246        1.1      matt 		/* Create a new value by multiplying by a prime adding in
   1247        1.1      matt 		 * entropy from the timebase register.  This is to make the
   1248        1.1      matt 		 * VSID more random so that the PT Hash function collides
   1249        1.1      matt 		 * less often. (note that the prime causes gcc to do shifts
   1250        1.1      matt 		 * instead of a multiply)
   1251        1.1      matt 		 */
   1252        1.1      matt 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
   1253        1.1      matt 		hash = pmap_vsidcontext & (NPMAPS - 1);
   1254       1.23   aymeric 		if (hash == 0) {		/* 0 is special, avoid it */
   1255       1.23   aymeric 			entropy += 0xbadf00d;
   1256        1.1      matt 			continue;
   1257       1.23   aymeric 		}
   1258        1.1      matt 		n = hash >> 5;
   1259        1.2      matt 		mask = 1L << (hash & (VSID_NBPW-1));
   1260        1.2      matt 		hash = pmap_vsidcontext;
   1261        1.1      matt 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
   1262        1.1      matt 			/* anything free in this bucket? */
   1263        1.2      matt 			if (~pmap_vsid_bitmap[n] == 0) {
   1264       1.23   aymeric 				entropy = hash ^ (hash >> 16);
   1265        1.1      matt 				continue;
   1266        1.1      matt 			}
   1267        1.1      matt 			i = ffs(~pmap_vsid_bitmap[n]) - 1;
   1268        1.2      matt 			mask = 1L << i;
   1269        1.2      matt 			hash &= ~(VSID_NBPW-1);
   1270        1.1      matt 			hash |= i;
   1271        1.1      matt 		}
   1272       1.18      matt 		hash &= PTE_VSID >> PTE_VSID_SHFT;
   1273        1.1      matt 		pmap_vsid_bitmap[n] |= mask;
   1274       1.18      matt 		pm->pm_vsid = hash;
   1275       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
   1276        1.1      matt 		for (i = 0; i < 16; i++)
   1277       1.14       chs 			pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
   1278       1.14       chs 			    SR_NOEXEC;
   1279       1.18      matt #endif
   1280  1.48.18.1      matt 		PMAP_UNLOCK();
   1281        1.1      matt 		return;
   1282        1.1      matt 	}
   1283  1.48.18.1      matt 	PMAP_UNLOCK();
   1284        1.1      matt 	panic("pmap_pinit: out of segments");
   1285        1.1      matt }
   1286        1.1      matt 
   1287        1.1      matt /*
   1288        1.1      matt  * Add a reference to the given pmap.
   1289        1.1      matt  */
   1290        1.1      matt void
   1291        1.1      matt pmap_reference(pmap_t pm)
   1292        1.1      matt {
   1293  1.48.18.1      matt 	atomic_inc_uint(&pm->pm_refs);
   1294        1.1      matt }
   1295        1.1      matt 
   1296        1.1      matt /*
   1297        1.1      matt  * Retire the given pmap from service.
   1298        1.1      matt  * Should only be called if the map contains no valid mappings.
   1299        1.1      matt  */
   1300        1.1      matt void
   1301        1.1      matt pmap_destroy(pmap_t pm)
   1302        1.1      matt {
   1303  1.48.18.1      matt 	if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
   1304        1.1      matt 		pmap_release(pm);
   1305        1.1      matt 		pool_put(&pmap_pool, pm);
   1306        1.1      matt 	}
   1307        1.1      matt }
   1308        1.1      matt 
   1309        1.1      matt /*
   1310        1.1      matt  * Release any resources held by the given physical map.
   1311        1.1      matt  * Called when a pmap initialized by pmap_pinit is being released.
   1312        1.1      matt  */
   1313        1.1      matt void
   1314        1.1      matt pmap_release(pmap_t pm)
   1315        1.1      matt {
   1316        1.1      matt 	int idx, mask;
   1317       1.39      matt 
   1318       1.39      matt 	KASSERT(pm->pm_stats.resident_count == 0);
   1319       1.39      matt 	KASSERT(pm->pm_stats.wired_count == 0);
   1320        1.1      matt 
   1321  1.48.18.1      matt 	PMAP_LOCK();
   1322        1.1      matt 	if (pm->pm_sr[0] == 0)
   1323        1.1      matt 		panic("pmap_release");
   1324       1.22   aymeric 	idx = pm->pm_vsid & (NPMAPS-1);
   1325        1.1      matt 	mask = 1 << (idx % VSID_NBPW);
   1326        1.1      matt 	idx /= VSID_NBPW;
   1327       1.22   aymeric 
   1328       1.22   aymeric 	KASSERT(pmap_vsid_bitmap[idx] & mask);
   1329        1.1      matt 	pmap_vsid_bitmap[idx] &= ~mask;
   1330  1.48.18.1      matt 	PMAP_UNLOCK();
   1331        1.1      matt }
   1332        1.1      matt 
   1333        1.1      matt /*
   1334        1.1      matt  * Copy the range specified by src_addr/len
   1335        1.1      matt  * from the source map to the range dst_addr/len
   1336        1.1      matt  * in the destination map.
   1337        1.1      matt  *
   1338        1.1      matt  * This routine is only advisory and need not do anything.
   1339        1.1      matt  */
   1340        1.1      matt void
   1341        1.1      matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
   1342        1.1      matt 	vsize_t len, vaddr_t src_addr)
   1343        1.1      matt {
   1344        1.1      matt 	PMAPCOUNT(copies);
   1345        1.1      matt }
   1346        1.1      matt 
   1347        1.1      matt /*
   1348        1.1      matt  * Require that all active physical maps contain no
   1349        1.1      matt  * incorrect entries NOW.
   1350        1.1      matt  */
   1351        1.1      matt void
   1352        1.1      matt pmap_update(struct pmap *pmap)
   1353        1.1      matt {
   1354        1.1      matt 	PMAPCOUNT(updates);
   1355        1.1      matt 	TLBSYNC();
   1356        1.1      matt }
   1357        1.1      matt 
   1358        1.1      matt /*
   1359        1.1      matt  * Garbage collects the physical map system for
   1360        1.1      matt  * pages which are no longer used.
   1361        1.1      matt  * Success need not be guaranteed -- that is, there
   1362        1.1      matt  * may well be pages which are not referenced, but
   1363        1.1      matt  * others may be collected.
   1364        1.1      matt  * Called by the pageout daemon when pages are scarce.
   1365        1.1      matt  */
   1366        1.1      matt void
   1367        1.1      matt pmap_collect(pmap_t pm)
   1368        1.1      matt {
   1369        1.1      matt 	PMAPCOUNT(collects);
   1370        1.1      matt }
   1371        1.1      matt 
   1372       1.35     perry static inline int
   1373        1.1      matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
   1374        1.1      matt {
   1375        1.1      matt 	int pteidx;
   1376        1.1      matt 	/*
   1377        1.1      matt 	 * We can find the actual pte entry without searching by
   1378        1.1      matt 	 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
   1379        1.1      matt 	 * and by noticing the HID bit.
   1380        1.1      matt 	 */
   1381        1.1      matt 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
   1382        1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_HID)
   1383        1.1      matt 		pteidx ^= pmap_pteg_mask * 8;
   1384        1.1      matt 	return pteidx;
   1385        1.1      matt }
   1386        1.1      matt 
   1387        1.2      matt volatile struct pte *
   1388        1.1      matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
   1389        1.1      matt {
   1390        1.2      matt 	volatile struct pte *pt;
   1391        1.1      matt 
   1392        1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1393        1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
   1394        1.1      matt 		return NULL;
   1395        1.1      matt #endif
   1396        1.1      matt 
   1397        1.1      matt 	/*
   1398        1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1399        1.1      matt 	 */
   1400        1.1      matt 	if (pteidx == -1) {
   1401        1.1      matt 		int ptegidx;
   1402        1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1403        1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1404        1.1      matt 	}
   1405        1.1      matt 
   1406        1.1      matt 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
   1407        1.1      matt 
   1408        1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1409        1.1      matt 	return pt;
   1410        1.1      matt #else
   1411        1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
   1412        1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1413        1.1      matt 		    "pvo but no valid pte index", pvo);
   1414        1.1      matt 	}
   1415        1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
   1416        1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
   1417        1.1      matt 		    "pvo but no valid pte", pvo);
   1418        1.1      matt 	}
   1419        1.1      matt 
   1420        1.1      matt 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
   1421        1.1      matt 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
   1422        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1423        1.1      matt 			pmap_pte_print(pt);
   1424        1.1      matt #endif
   1425        1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1426        1.1      matt 			    "pmap_pteg_table %p but invalid in pvo",
   1427        1.1      matt 			    pvo, pt);
   1428        1.1      matt 		}
   1429        1.1      matt 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
   1430        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1431        1.1      matt 			pmap_pte_print(pt);
   1432        1.1      matt #endif
   1433        1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
   1434        1.1      matt 			    "not match pte %p in pmap_pteg_table",
   1435        1.1      matt 			    pvo, pt);
   1436        1.1      matt 		}
   1437        1.1      matt 		return pt;
   1438        1.1      matt 	}
   1439        1.1      matt 
   1440        1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1441        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1442        1.1      matt 		pmap_pte_print(pt);
   1443        1.1      matt #endif
   1444       1.12      matt 		panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
   1445        1.1      matt 		    "pmap_pteg_table but valid in pvo", pvo, pt);
   1446        1.1      matt 	}
   1447        1.1      matt 	return NULL;
   1448        1.1      matt #endif	/* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
   1449        1.1      matt }
   1450        1.1      matt 
   1451        1.1      matt struct pvo_entry *
   1452        1.1      matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
   1453        1.1      matt {
   1454        1.1      matt 	struct pvo_entry *pvo;
   1455        1.1      matt 	int ptegidx;
   1456        1.1      matt 
   1457        1.1      matt 	va &= ~ADDR_POFF;
   1458        1.2      matt 	ptegidx = va_to_pteg(pm, va);
   1459        1.1      matt 
   1460        1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1461        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1462        1.1      matt 		if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   1463        1.1      matt 			panic("pmap_pvo_find_va: invalid pvo %p on "
   1464        1.1      matt 			    "list %#x (%p)", pvo, ptegidx,
   1465        1.1      matt 			     &pmap_pvo_table[ptegidx]);
   1466        1.1      matt #endif
   1467        1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1468        1.1      matt 			if (pteidx_p)
   1469        1.1      matt 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
   1470        1.1      matt 			return pvo;
   1471        1.1      matt 		}
   1472        1.1      matt 	}
   1473       1.38   sanjayl 	if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
   1474  1.48.18.1      matt 		panic("%s: returning NULL for %s pmap, va: 0x%08lx\n", __func__,
   1475       1.38   sanjayl 		(pm == pmap_kernel() ? "kernel" : "user"), va);
   1476        1.1      matt 	return NULL;
   1477        1.1      matt }
   1478        1.1      matt 
   1479        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1480        1.1      matt void
   1481        1.1      matt pmap_pvo_check(const struct pvo_entry *pvo)
   1482        1.1      matt {
   1483        1.1      matt 	struct pvo_head *pvo_head;
   1484        1.1      matt 	struct pvo_entry *pvo0;
   1485        1.2      matt 	volatile struct pte *pt;
   1486        1.1      matt 	int failed = 0;
   1487        1.1      matt 
   1488  1.48.18.1      matt 	PMAP_LOCK();
   1489  1.48.18.1      matt 
   1490        1.1      matt 	if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
   1491        1.1      matt 		panic("pmap_pvo_check: pvo %p: invalid address", pvo);
   1492        1.1      matt 
   1493        1.1      matt 	if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
   1494        1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
   1495        1.1      matt 		    pvo, pvo->pvo_pmap);
   1496        1.1      matt 		failed = 1;
   1497        1.1      matt 	}
   1498        1.1      matt 
   1499        1.1      matt 	if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
   1500        1.1      matt 	    (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
   1501        1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1502        1.1      matt 		    pvo, TAILQ_NEXT(pvo, pvo_olink));
   1503        1.1      matt 		failed = 1;
   1504        1.1      matt 	}
   1505        1.1      matt 
   1506        1.1      matt 	if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
   1507        1.1      matt 	    (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
   1508        1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1509        1.1      matt 		    pvo, LIST_NEXT(pvo, pvo_vlink));
   1510        1.1      matt 		failed = 1;
   1511        1.1      matt 	}
   1512        1.1      matt 
   1513       1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1514        1.1      matt 		pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
   1515        1.1      matt 	} else {
   1516        1.1      matt 		if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
   1517        1.1      matt 			printf("pmap_pvo_check: pvo %p: non kernel address "
   1518        1.1      matt 			    "on kernel unmanaged list\n", pvo);
   1519        1.1      matt 			failed = 1;
   1520        1.1      matt 		}
   1521        1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1522        1.1      matt 	}
   1523        1.1      matt 	LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
   1524        1.1      matt 		if (pvo0 == pvo)
   1525        1.1      matt 			break;
   1526        1.1      matt 	}
   1527        1.1      matt 	if (pvo0 == NULL) {
   1528        1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1529        1.1      matt 		    "on its vlist head %p\n", pvo, pvo_head);
   1530        1.1      matt 		failed = 1;
   1531        1.1      matt 	}
   1532        1.1      matt 	if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
   1533        1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1534        1.1      matt 		    "on its olist head\n", pvo);
   1535        1.1      matt 		failed = 1;
   1536        1.1      matt 	}
   1537        1.1      matt 	pt = pmap_pvo_to_pte(pvo, -1);
   1538        1.1      matt 	if (pt == NULL) {
   1539        1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1540        1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1541        1.1      matt 			    "no PTE\n", pvo);
   1542        1.1      matt 			failed = 1;
   1543        1.1      matt 		}
   1544        1.1      matt 	} else {
   1545        1.1      matt 		if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
   1546        1.1      matt 		    (uintptr_t) pt >=
   1547        1.1      matt 		    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
   1548        1.1      matt 			printf("pmap_pvo_check: pvo %p: pte %p not in "
   1549        1.1      matt 			    "pteg table\n", pvo, pt);
   1550        1.1      matt 			failed = 1;
   1551        1.1      matt 		}
   1552        1.1      matt 		if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
   1553        1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1554        1.1      matt 			    "no PTE\n", pvo);
   1555        1.1      matt 			failed = 1;
   1556        1.1      matt 		}
   1557        1.1      matt 		if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
   1558        1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi differ: "
   1559       1.19       mjl 			    "%#x/%#x\n", pvo, (unsigned int) pvo->pvo_pte.pte_hi, (unsigned int) pt->pte_hi);
   1560        1.1      matt 			failed = 1;
   1561        1.1      matt 		}
   1562        1.1      matt 		if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
   1563        1.1      matt 		    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
   1564        1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_lo differ: "
   1565       1.18      matt 			    "%#x/%#x\n", pvo,
   1566       1.19       mjl 			    (unsigned int) (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
   1567       1.19       mjl 			    (unsigned int) (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
   1568        1.1      matt 			failed = 1;
   1569        1.1      matt 		}
   1570        1.1      matt 		if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
   1571        1.1      matt 			printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#lx"
   1572        1.1      matt 			    " doesn't not match PVO's VA %#lx\n",
   1573        1.1      matt 			    pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
   1574        1.1      matt 			failed = 1;
   1575        1.1      matt 		}
   1576        1.1      matt 		if (failed)
   1577        1.1      matt 			pmap_pte_print(pt);
   1578        1.1      matt 	}
   1579        1.1      matt 	if (failed)
   1580        1.1      matt 		panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
   1581        1.1      matt 		    pvo->pvo_pmap);
   1582  1.48.18.1      matt 
   1583  1.48.18.1      matt 	PMAP_UNLOCK();
   1584        1.1      matt }
   1585        1.1      matt #endif /* DEBUG || PMAPCHECK */
   1586        1.1      matt 
   1587        1.1      matt /*
   1588       1.25       chs  * Search the PVO table looking for a non-wired entry.
   1589       1.25       chs  * If we find one, remove it and return it.
   1590       1.25       chs  */
   1591       1.25       chs 
   1592       1.25       chs struct pvo_entry *
   1593       1.25       chs pmap_pvo_reclaim(struct pmap *pm)
   1594       1.25       chs {
   1595       1.25       chs 	struct pvo_tqhead *pvoh;
   1596       1.25       chs 	struct pvo_entry *pvo;
   1597       1.25       chs 	uint32_t idx, endidx;
   1598       1.25       chs 
   1599       1.25       chs 	endidx = pmap_pvo_reclaim_nextidx;
   1600       1.25       chs 	for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
   1601       1.25       chs 	     idx = (idx + 1) & pmap_pteg_mask) {
   1602       1.25       chs 		pvoh = &pmap_pvo_table[idx];
   1603       1.25       chs 		TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
   1604       1.39      matt 			if (!PVO_WIRED_P(pvo)) {
   1605       1.33       chs 				pmap_pvo_remove(pvo, -1, NULL);
   1606       1.25       chs 				pmap_pvo_reclaim_nextidx = idx;
   1607       1.26      matt 				PMAPCOUNT(pvos_reclaimed);
   1608       1.25       chs 				return pvo;
   1609       1.25       chs 			}
   1610       1.25       chs 		}
   1611       1.25       chs 	}
   1612       1.25       chs 	return NULL;
   1613       1.25       chs }
   1614       1.25       chs 
   1615       1.25       chs /*
   1616        1.1      matt  * This returns whether this is the first mapping of a page.
   1617        1.1      matt  */
   1618        1.1      matt int
   1619        1.1      matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
   1620        1.2      matt 	vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
   1621        1.1      matt {
   1622        1.1      matt 	struct pvo_entry *pvo;
   1623        1.1      matt 	struct pvo_tqhead *pvoh;
   1624        1.2      matt 	register_t msr;
   1625        1.1      matt 	int ptegidx;
   1626        1.1      matt 	int i;
   1627        1.1      matt 	int poolflags = PR_NOWAIT;
   1628        1.1      matt 
   1629       1.28       chs 	/*
   1630       1.28       chs 	 * Compute the PTE Group index.
   1631       1.28       chs 	 */
   1632       1.28       chs 	va &= ~ADDR_POFF;
   1633       1.28       chs 	ptegidx = va_to_pteg(pm, va);
   1634       1.28       chs 
   1635       1.28       chs 	msr = pmap_interrupts_off();
   1636       1.28       chs 
   1637        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1638        1.1      matt 	if (pmap_pvo_remove_depth > 0)
   1639        1.1      matt 		panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
   1640        1.1      matt 	if (++pmap_pvo_enter_depth > 1)
   1641        1.1      matt 		panic("pmap_pvo_enter: called recursively!");
   1642        1.1      matt #endif
   1643        1.1      matt 
   1644        1.1      matt 	/*
   1645        1.1      matt 	 * Remove any existing mapping for this page.  Reuse the
   1646        1.1      matt 	 * pvo entry if there a mapping.
   1647        1.1      matt 	 */
   1648        1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1649        1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1650        1.1      matt #ifdef DEBUG
   1651        1.1      matt 			if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
   1652        1.1      matt 			    ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
   1653        1.1      matt 			    ~(PTE_REF|PTE_CHG)) == 0 &&
   1654        1.1      matt 			   va < VM_MIN_KERNEL_ADDRESS) {
   1655       1.18      matt 				printf("pmap_pvo_enter: pvo %p: dup %#x/%#lx\n",
   1656       1.19       mjl 				    pvo, (unsigned int) pvo->pvo_pte.pte_lo, (unsigned int) pte_lo|pa);
   1657       1.18      matt 				printf("pmap_pvo_enter: pte_hi=%#x sr=%#x\n",
   1658       1.19       mjl 				    (unsigned int) pvo->pvo_pte.pte_hi,
   1659       1.19       mjl 				    (unsigned int) pm->pm_sr[va >> ADDR_SR_SHFT]);
   1660        1.1      matt 				pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
   1661        1.1      matt #ifdef DDBX
   1662        1.1      matt 				Debugger();
   1663        1.1      matt #endif
   1664        1.1      matt 			}
   1665        1.1      matt #endif
   1666        1.1      matt 			PMAPCOUNT(mappings_replaced);
   1667       1.33       chs 			pmap_pvo_remove(pvo, -1, NULL);
   1668        1.1      matt 			break;
   1669        1.1      matt 		}
   1670        1.1      matt 	}
   1671        1.1      matt 
   1672        1.1      matt 	/*
   1673        1.1      matt 	 * If we aren't overwriting an mapping, try to allocate
   1674        1.1      matt 	 */
   1675       1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1676       1.26      matt 	--pmap_pvo_enter_depth;
   1677       1.26      matt #endif
   1678        1.1      matt 	pmap_interrupts_restore(msr);
   1679       1.33       chs 	if (pvo) {
   1680       1.33       chs 		pmap_pvo_free(pvo);
   1681       1.33       chs 	}
   1682        1.1      matt 	pvo = pool_get(pl, poolflags);
   1683       1.25       chs 
   1684       1.25       chs #ifdef DEBUG
   1685       1.25       chs 	/*
   1686       1.25       chs 	 * Exercise pmap_pvo_reclaim() a little.
   1687       1.25       chs 	 */
   1688       1.25       chs 	if (pvo && (flags & PMAP_CANFAIL) != 0 &&
   1689       1.25       chs 	    pmap_pvo_reclaim_debugctr++ > 0x1000 &&
   1690       1.25       chs 	    (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
   1691       1.25       chs 		pool_put(pl, pvo);
   1692       1.25       chs 		pvo = NULL;
   1693       1.25       chs 	}
   1694       1.25       chs #endif
   1695       1.25       chs 
   1696        1.1      matt 	msr = pmap_interrupts_off();
   1697       1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1698       1.26      matt 	++pmap_pvo_enter_depth;
   1699       1.26      matt #endif
   1700        1.1      matt 	if (pvo == NULL) {
   1701        1.1      matt 		pvo = pmap_pvo_reclaim(pm);
   1702        1.1      matt 		if (pvo == NULL) {
   1703        1.1      matt 			if ((flags & PMAP_CANFAIL) == 0)
   1704        1.1      matt 				panic("pmap_pvo_enter: failed");
   1705        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1706        1.1      matt 			pmap_pvo_enter_depth--;
   1707        1.1      matt #endif
   1708       1.26      matt 			PMAPCOUNT(pvos_failed);
   1709        1.1      matt 			pmap_interrupts_restore(msr);
   1710        1.1      matt 			return ENOMEM;
   1711        1.1      matt 		}
   1712        1.1      matt 	}
   1713       1.25       chs 
   1714        1.1      matt 	pvo->pvo_vaddr = va;
   1715        1.1      matt 	pvo->pvo_pmap = pm;
   1716        1.1      matt 	pvo->pvo_vaddr &= ~ADDR_POFF;
   1717        1.1      matt 	if (flags & VM_PROT_EXECUTE) {
   1718        1.1      matt 		PMAPCOUNT(exec_mappings);
   1719       1.14       chs 		pvo_set_exec(pvo);
   1720        1.1      matt 	}
   1721        1.1      matt 	if (flags & PMAP_WIRED)
   1722        1.1      matt 		pvo->pvo_vaddr |= PVO_WIRED;
   1723        1.1      matt 	if (pvo_head != &pmap_pvo_kunmanaged) {
   1724        1.1      matt 		pvo->pvo_vaddr |= PVO_MANAGED;
   1725        1.1      matt 		PMAPCOUNT(mappings);
   1726        1.1      matt 	} else {
   1727        1.1      matt 		PMAPCOUNT(kernel_mappings);
   1728        1.1      matt 	}
   1729        1.2      matt 	pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
   1730        1.1      matt 
   1731        1.1      matt 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
   1732       1.39      matt 	if (PVO_WIRED_P(pvo))
   1733        1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count++;
   1734        1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count++;
   1735        1.1      matt #if defined(DEBUG)
   1736       1.38   sanjayl /*	if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
   1737        1.1      matt 		DPRINTFN(PVOENTER,
   1738        1.1      matt 		    ("pmap_pvo_enter: pvo %p: pm %p va %#lx pa %#lx\n",
   1739        1.1      matt 		    pvo, pm, va, pa));
   1740        1.1      matt #endif
   1741        1.1      matt 
   1742        1.1      matt 	/*
   1743        1.1      matt 	 * We hope this succeeds but it isn't required.
   1744        1.1      matt 	 */
   1745        1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
   1746        1.1      matt 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
   1747        1.1      matt 	if (i >= 0) {
   1748        1.1      matt 		PVO_PTEGIDX_SET(pvo, i);
   1749       1.12      matt 		PVO_WHERE(pvo, ENTER_INSERT);
   1750        1.1      matt 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
   1751        1.1      matt 		    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
   1752        1.1      matt 		TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1753       1.38   sanjayl 
   1754        1.1      matt 	} else {
   1755        1.1      matt 		/*
   1756        1.1      matt 		 * Since we didn't have room for this entry (which makes it
   1757        1.1      matt 		 * and evicted entry), place it at the head of the list.
   1758        1.1      matt 		 */
   1759        1.1      matt 		TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
   1760        1.1      matt 		PMAPCOUNT(ptes_evicted);
   1761        1.1      matt 		pm->pm_evictions++;
   1762       1.12      matt 		/*
   1763       1.12      matt 		 * If this is a kernel page, make sure it's active.
   1764       1.12      matt 		 */
   1765       1.12      matt 		if (pm == pmap_kernel()) {
   1766       1.45   thorpej 			i = pmap_pte_spill(pm, va, false);
   1767       1.12      matt 			KASSERT(i);
   1768       1.12      matt 		}
   1769        1.1      matt 	}
   1770        1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1771        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1772        1.1      matt 	pmap_pvo_enter_depth--;
   1773        1.1      matt #endif
   1774        1.1      matt 	pmap_interrupts_restore(msr);
   1775        1.1      matt 	return 0;
   1776        1.1      matt }
   1777        1.1      matt 
   1778        1.1      matt void
   1779       1.33       chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
   1780        1.1      matt {
   1781        1.2      matt 	volatile struct pte *pt;
   1782        1.1      matt 	int ptegidx;
   1783        1.1      matt 
   1784        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1785        1.1      matt 	if (++pmap_pvo_remove_depth > 1)
   1786        1.1      matt 		panic("pmap_pvo_remove: called recursively!");
   1787        1.1      matt #endif
   1788        1.1      matt 
   1789        1.1      matt 	/*
   1790        1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1791        1.1      matt 	 */
   1792        1.1      matt 	if (pteidx == -1) {
   1793        1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1794        1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1795        1.1      matt 	} else {
   1796        1.1      matt 		ptegidx = pteidx >> 3;
   1797        1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_HID)
   1798        1.1      matt 			ptegidx ^= pmap_pteg_mask;
   1799        1.1      matt 	}
   1800        1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1801        1.1      matt 
   1802        1.1      matt 	/*
   1803        1.1      matt 	 * If there is an active pte entry, we need to deactivate it
   1804        1.1      matt 	 * (and save the ref & chg bits).
   1805        1.1      matt 	 */
   1806        1.1      matt 	pt = pmap_pvo_to_pte(pvo, pteidx);
   1807        1.1      matt 	if (pt != NULL) {
   1808        1.1      matt 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   1809       1.12      matt 		PVO_WHERE(pvo, REMOVE);
   1810        1.1      matt 		PVO_PTEGIDX_CLR(pvo);
   1811        1.1      matt 		PMAPCOUNT(ptes_removed);
   1812        1.1      matt 	} else {
   1813        1.1      matt 		KASSERT(pvo->pvo_pmap->pm_evictions > 0);
   1814        1.1      matt 		pvo->pvo_pmap->pm_evictions--;
   1815        1.1      matt 	}
   1816        1.1      matt 
   1817        1.1      matt 	/*
   1818       1.14       chs 	 * Account for executable mappings.
   1819       1.14       chs 	 */
   1820       1.39      matt 	if (PVO_EXECUTABLE_P(pvo))
   1821       1.14       chs 		pvo_clear_exec(pvo);
   1822       1.14       chs 
   1823       1.14       chs 	/*
   1824       1.14       chs 	 * Update our statistics.
   1825        1.1      matt 	 */
   1826        1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count--;
   1827       1.39      matt 	if (PVO_WIRED_P(pvo))
   1828        1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count--;
   1829        1.1      matt 
   1830        1.1      matt 	/*
   1831        1.1      matt 	 * Save the REF/CHG bits into their cache if the page is managed.
   1832        1.1      matt 	 */
   1833       1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1834        1.2      matt 		register_t ptelo = pvo->pvo_pte.pte_lo;
   1835        1.1      matt 		struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
   1836        1.1      matt 
   1837        1.1      matt 		if (pg != NULL) {
   1838       1.37      matt 			/*
   1839       1.37      matt 			 * If this page was changed and it is mapped exec,
   1840       1.37      matt 			 * invalidate it.
   1841       1.37      matt 			 */
   1842       1.37      matt 			if ((ptelo & PTE_CHG) &&
   1843       1.37      matt 			    (pmap_attr_fetch(pg) & PTE_EXEC)) {
   1844       1.37      matt 				struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   1845       1.37      matt 				if (LIST_EMPTY(pvoh)) {
   1846       1.37      matt 					DPRINTFN(EXEC, ("[pmap_pvo_remove: "
   1847       1.37      matt 					    "%#lx: clear-exec]\n",
   1848       1.37      matt 					    VM_PAGE_TO_PHYS(pg)));
   1849       1.37      matt 					pmap_attr_clear(pg, PTE_EXEC);
   1850       1.37      matt 					PMAPCOUNT(exec_uncached_pvo_remove);
   1851       1.37      matt 				} else {
   1852       1.37      matt 					DPRINTFN(EXEC, ("[pmap_pvo_remove: "
   1853       1.37      matt 					    "%#lx: syncicache]\n",
   1854       1.37      matt 					    VM_PAGE_TO_PHYS(pg)));
   1855       1.37      matt 					pmap_syncicache(VM_PAGE_TO_PHYS(pg),
   1856       1.37      matt 					    PAGE_SIZE);
   1857       1.37      matt 					PMAPCOUNT(exec_synced_pvo_remove);
   1858       1.37      matt 				}
   1859       1.37      matt 			}
   1860       1.37      matt 
   1861        1.1      matt 			pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
   1862        1.1      matt 		}
   1863        1.1      matt 		PMAPCOUNT(unmappings);
   1864        1.1      matt 	} else {
   1865        1.1      matt 		PMAPCOUNT(kernel_unmappings);
   1866        1.1      matt 	}
   1867        1.1      matt 
   1868        1.1      matt 	/*
   1869        1.1      matt 	 * Remove the PVO from its lists and return it to the pool.
   1870        1.1      matt 	 */
   1871        1.1      matt 	LIST_REMOVE(pvo, pvo_vlink);
   1872        1.1      matt 	TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
   1873       1.33       chs 	if (pvol) {
   1874       1.33       chs 		LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
   1875       1.25       chs 	}
   1876        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1877        1.1      matt 	pmap_pvo_remove_depth--;
   1878        1.1      matt #endif
   1879        1.1      matt }
   1880        1.1      matt 
   1881       1.33       chs void
   1882       1.33       chs pmap_pvo_free(struct pvo_entry *pvo)
   1883       1.33       chs {
   1884       1.33       chs 
   1885       1.39      matt 	pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
   1886       1.33       chs }
   1887       1.33       chs 
   1888       1.33       chs void
   1889       1.33       chs pmap_pvo_free_list(struct pvo_head *pvol)
   1890       1.33       chs {
   1891       1.33       chs 	struct pvo_entry *pvo, *npvo;
   1892       1.33       chs 
   1893       1.33       chs 	for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
   1894       1.33       chs 		npvo = LIST_NEXT(pvo, pvo_vlink);
   1895       1.33       chs 		LIST_REMOVE(pvo, pvo_vlink);
   1896       1.33       chs 		pmap_pvo_free(pvo);
   1897       1.33       chs 	}
   1898       1.33       chs }
   1899       1.33       chs 
   1900        1.1      matt /*
   1901       1.14       chs  * Mark a mapping as executable.
   1902       1.14       chs  * If this is the first executable mapping in the segment,
   1903       1.14       chs  * clear the noexec flag.
   1904       1.14       chs  */
   1905       1.14       chs STATIC void
   1906       1.14       chs pvo_set_exec(struct pvo_entry *pvo)
   1907       1.14       chs {
   1908       1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1909       1.14       chs 
   1910       1.39      matt 	if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
   1911       1.14       chs 		return;
   1912       1.14       chs 	}
   1913       1.14       chs 	pvo->pvo_vaddr |= PVO_EXECUTABLE;
   1914       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
   1915       1.18      matt 	{
   1916       1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1917       1.18      matt 		if (pm->pm_exec[sr]++ == 0) {
   1918       1.18      matt 			pm->pm_sr[sr] &= ~SR_NOEXEC;
   1919       1.18      matt 		}
   1920       1.14       chs 	}
   1921       1.18      matt #endif
   1922       1.14       chs }
   1923       1.14       chs 
   1924       1.14       chs /*
   1925       1.14       chs  * Mark a mapping as non-executable.
   1926       1.14       chs  * If this was the last executable mapping in the segment,
   1927       1.14       chs  * set the noexec flag.
   1928       1.14       chs  */
   1929       1.14       chs STATIC void
   1930       1.14       chs pvo_clear_exec(struct pvo_entry *pvo)
   1931       1.14       chs {
   1932       1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1933       1.14       chs 
   1934       1.39      matt 	if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
   1935       1.14       chs 		return;
   1936       1.14       chs 	}
   1937       1.14       chs 	pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
   1938       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
   1939       1.18      matt 	{
   1940       1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1941       1.18      matt 		if (--pm->pm_exec[sr] == 0) {
   1942       1.18      matt 			pm->pm_sr[sr] |= SR_NOEXEC;
   1943       1.18      matt 		}
   1944       1.14       chs 	}
   1945       1.18      matt #endif
   1946       1.14       chs }
   1947       1.14       chs 
   1948       1.14       chs /*
   1949        1.1      matt  * Insert physical page at pa into the given pmap at virtual address va.
   1950        1.1      matt  */
   1951        1.1      matt int
   1952        1.1      matt pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
   1953        1.1      matt {
   1954        1.1      matt 	struct mem_region *mp;
   1955        1.1      matt 	struct pvo_head *pvo_head;
   1956        1.1      matt 	struct vm_page *pg;
   1957        1.1      matt 	struct pool *pl;
   1958        1.2      matt 	register_t pte_lo;
   1959        1.1      matt 	int error;
   1960        1.1      matt 	u_int pvo_flags;
   1961        1.1      matt 	u_int was_exec = 0;
   1962        1.1      matt 
   1963  1.48.18.1      matt 	PMAP_LOCK();
   1964  1.48.18.1      matt 
   1965        1.1      matt 	if (__predict_false(!pmap_initialized)) {
   1966        1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1967        1.1      matt 		pl = &pmap_upvo_pool;
   1968        1.1      matt 		pvo_flags = 0;
   1969        1.1      matt 		pg = NULL;
   1970        1.1      matt 		was_exec = PTE_EXEC;
   1971        1.1      matt 	} else {
   1972        1.1      matt 		pvo_head = pa_to_pvoh(pa, &pg);
   1973        1.1      matt 		pl = &pmap_mpvo_pool;
   1974        1.1      matt 		pvo_flags = PVO_MANAGED;
   1975        1.1      matt 	}
   1976        1.1      matt 
   1977        1.1      matt 	DPRINTFN(ENTER,
   1978        1.1      matt 	    ("pmap_enter(%p, 0x%lx, 0x%lx, 0x%x, 0x%x):",
   1979        1.1      matt 	    pm, va, pa, prot, flags));
   1980        1.1      matt 
   1981        1.1      matt 	/*
   1982        1.1      matt 	 * If this is a managed page, and it's the first reference to the
   1983        1.1      matt 	 * page clear the execness of the page.  Otherwise fetch the execness.
   1984        1.1      matt 	 */
   1985        1.1      matt 	if (pg != NULL)
   1986        1.1      matt 		was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
   1987        1.1      matt 
   1988        1.1      matt 	DPRINTFN(ENTER, (" was_exec=%d", was_exec));
   1989        1.1      matt 
   1990        1.1      matt 	/*
   1991        1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   1992        1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   1993        1.1      matt 	 * asssume it's in memory coherent memory.
   1994        1.1      matt 	 */
   1995        1.1      matt 	pte_lo = PTE_IG;
   1996        1.1      matt 	if ((flags & PMAP_NC) == 0) {
   1997        1.1      matt 		for (mp = mem; mp->size; mp++) {
   1998        1.1      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   1999        1.1      matt 				pte_lo = PTE_M;
   2000        1.1      matt 				break;
   2001        1.1      matt 			}
   2002        1.1      matt 		}
   2003        1.1      matt 	}
   2004        1.1      matt 
   2005        1.1      matt 	if (prot & VM_PROT_WRITE)
   2006        1.1      matt 		pte_lo |= PTE_BW;
   2007        1.1      matt 	else
   2008        1.1      matt 		pte_lo |= PTE_BR;
   2009        1.1      matt 
   2010        1.1      matt 	/*
   2011        1.1      matt 	 * If this was in response to a fault, "pre-fault" the PTE's
   2012        1.1      matt 	 * changed/referenced bit appropriately.
   2013        1.1      matt 	 */
   2014        1.1      matt 	if (flags & VM_PROT_WRITE)
   2015        1.1      matt 		pte_lo |= PTE_CHG;
   2016       1.30       chs 	if (flags & VM_PROT_ALL)
   2017        1.1      matt 		pte_lo |= PTE_REF;
   2018        1.1      matt 
   2019        1.1      matt 	/*
   2020        1.1      matt 	 * We need to know if this page can be executable
   2021        1.1      matt 	 */
   2022        1.1      matt 	flags |= (prot & VM_PROT_EXECUTE);
   2023        1.1      matt 
   2024        1.1      matt 	/*
   2025        1.1      matt 	 * Record mapping for later back-translation and pte spilling.
   2026        1.1      matt 	 * This will overwrite any existing mapping.
   2027        1.1      matt 	 */
   2028        1.1      matt 	error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
   2029        1.1      matt 
   2030        1.1      matt 	/*
   2031        1.1      matt 	 * Flush the real page from the instruction cache if this page is
   2032        1.1      matt 	 * mapped executable and cacheable and has not been flushed since
   2033        1.1      matt 	 * the last time it was modified.
   2034        1.1      matt 	 */
   2035        1.1      matt 	if (error == 0 &&
   2036        1.1      matt             (flags & VM_PROT_EXECUTE) &&
   2037        1.1      matt             (pte_lo & PTE_I) == 0 &&
   2038        1.1      matt 	    was_exec == 0) {
   2039        1.1      matt 		DPRINTFN(ENTER, (" syncicache"));
   2040        1.1      matt 		PMAPCOUNT(exec_synced);
   2041        1.6   thorpej 		pmap_syncicache(pa, PAGE_SIZE);
   2042        1.1      matt 		if (pg != NULL) {
   2043        1.1      matt 			pmap_attr_save(pg, PTE_EXEC);
   2044        1.1      matt 			PMAPCOUNT(exec_cached);
   2045        1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
   2046        1.1      matt 			if (pmapdebug & PMAPDEBUG_ENTER)
   2047        1.1      matt 				printf(" marked-as-exec");
   2048        1.1      matt 			else if (pmapdebug & PMAPDEBUG_EXEC)
   2049        1.1      matt 				printf("[pmap_enter: %#lx: marked-as-exec]\n",
   2050       1.34      yamt 				    VM_PAGE_TO_PHYS(pg));
   2051        1.1      matt 
   2052        1.1      matt #endif
   2053        1.1      matt 		}
   2054        1.1      matt 	}
   2055        1.1      matt 
   2056        1.1      matt 	DPRINTFN(ENTER, (": error=%d\n", error));
   2057        1.1      matt 
   2058  1.48.18.1      matt 	PMAP_UNLOCK();
   2059  1.48.18.1      matt 
   2060        1.1      matt 	return error;
   2061        1.1      matt }
   2062        1.1      matt 
   2063        1.1      matt void
   2064        1.1      matt pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2065        1.1      matt {
   2066        1.1      matt 	struct mem_region *mp;
   2067        1.2      matt 	register_t pte_lo;
   2068        1.1      matt 	int error;
   2069        1.1      matt 
   2070       1.38   sanjayl #if defined (PPC_OEA64_BRIDGE)
   2071        1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2072        1.1      matt 		panic("pmap_kenter_pa: attempt to enter "
   2073        1.1      matt 		    "non-kernel address %#lx!", va);
   2074       1.38   sanjayl #endif
   2075        1.1      matt 
   2076        1.1      matt 	DPRINTFN(KENTER,
   2077        1.1      matt 	    ("pmap_kenter_pa(%#lx,%#lx,%#x)\n", va, pa, prot));
   2078        1.1      matt 
   2079  1.48.18.1      matt 	PMAP_LOCK();
   2080  1.48.18.1      matt 
   2081        1.1      matt 	/*
   2082        1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   2083        1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   2084        1.1      matt 	 * asssume it's in memory coherent memory.
   2085        1.1      matt 	 */
   2086        1.1      matt 	pte_lo = PTE_IG;
   2087        1.4      matt 	if ((prot & PMAP_NC) == 0) {
   2088        1.4      matt 		for (mp = mem; mp->size; mp++) {
   2089        1.4      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   2090        1.4      matt 				pte_lo = PTE_M;
   2091        1.4      matt 				break;
   2092        1.4      matt 			}
   2093        1.1      matt 		}
   2094        1.1      matt 	}
   2095        1.1      matt 
   2096        1.1      matt 	if (prot & VM_PROT_WRITE)
   2097        1.1      matt 		pte_lo |= PTE_BW;
   2098        1.1      matt 	else
   2099        1.1      matt 		pte_lo |= PTE_BR;
   2100        1.1      matt 
   2101        1.1      matt 	/*
   2102        1.1      matt 	 * We don't care about REF/CHG on PVOs on the unmanaged list.
   2103        1.1      matt 	 */
   2104        1.1      matt 	error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
   2105        1.1      matt 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
   2106        1.1      matt 
   2107        1.1      matt 	if (error != 0)
   2108        1.1      matt 		panic("pmap_kenter_pa: failed to enter va %#lx pa %#lx: %d",
   2109        1.1      matt 		      va, pa, error);
   2110  1.48.18.1      matt 
   2111  1.48.18.1      matt 	PMAP_UNLOCK();
   2112        1.1      matt }
   2113        1.1      matt 
   2114        1.1      matt void
   2115        1.1      matt pmap_kremove(vaddr_t va, vsize_t len)
   2116        1.1      matt {
   2117        1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2118        1.1      matt 		panic("pmap_kremove: attempt to remove "
   2119        1.1      matt 		    "non-kernel address %#lx!", va);
   2120        1.1      matt 
   2121        1.1      matt 	DPRINTFN(KREMOVE,("pmap_kremove(%#lx,%#lx)\n", va, len));
   2122        1.1      matt 	pmap_remove(pmap_kernel(), va, va + len);
   2123        1.1      matt }
   2124        1.1      matt 
   2125        1.1      matt /*
   2126        1.1      matt  * Remove the given range of mapping entries.
   2127        1.1      matt  */
   2128        1.1      matt void
   2129        1.1      matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
   2130        1.1      matt {
   2131       1.33       chs 	struct pvo_head pvol;
   2132        1.1      matt 	struct pvo_entry *pvo;
   2133        1.2      matt 	register_t msr;
   2134        1.1      matt 	int pteidx;
   2135        1.1      matt 
   2136  1.48.18.1      matt 	PMAP_LOCK();
   2137       1.33       chs 	LIST_INIT(&pvol);
   2138       1.14       chs 	msr = pmap_interrupts_off();
   2139        1.1      matt 	for (; va < endva; va += PAGE_SIZE) {
   2140        1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2141        1.1      matt 		if (pvo != NULL) {
   2142       1.33       chs 			pmap_pvo_remove(pvo, pteidx, &pvol);
   2143        1.1      matt 		}
   2144        1.1      matt 	}
   2145       1.14       chs 	pmap_interrupts_restore(msr);
   2146       1.33       chs 	pmap_pvo_free_list(&pvol);
   2147  1.48.18.1      matt 	PMAP_UNLOCK();
   2148        1.1      matt }
   2149        1.1      matt 
   2150        1.1      matt /*
   2151        1.1      matt  * Get the physical page address for the given pmap/virtual address.
   2152        1.1      matt  */
   2153       1.44   thorpej bool
   2154        1.1      matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
   2155        1.1      matt {
   2156        1.1      matt 	struct pvo_entry *pvo;
   2157        1.2      matt 	register_t msr;
   2158        1.7      matt 
   2159  1.48.18.1      matt 	PMAP_LOCK();
   2160       1.38   sanjayl 
   2161        1.7      matt 	/*
   2162        1.7      matt 	 * If this is a kernel pmap lookup, also check the battable
   2163        1.7      matt 	 * and if we get a hit, translate the VA to a PA using the
   2164       1.36   nathanw 	 * BAT entries.  Don't check for VM_MAX_KERNEL_ADDRESS is
   2165        1.7      matt 	 * that will wrap back to 0.
   2166        1.7      matt 	 */
   2167        1.7      matt 	if (pm == pmap_kernel() &&
   2168        1.7      matt 	    (va < VM_MIN_KERNEL_ADDRESS ||
   2169        1.7      matt 	     (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
   2170        1.8      matt 		KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
   2171       1.38   sanjayl #if defined (PPC_OEA)
   2172       1.24    kleink 		if ((MFPVR() >> 16) != MPC601) {
   2173       1.24    kleink 			register_t batu = battable[va >> ADDR_SR_SHFT].batu;
   2174       1.24    kleink 			if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
   2175       1.24    kleink 				register_t batl =
   2176       1.24    kleink 				    battable[va >> ADDR_SR_SHFT].batl;
   2177       1.24    kleink 				register_t mask =
   2178       1.24    kleink 				    (~(batu & BAT_BL) << 15) & ~0x1ffffL;
   2179       1.29    briggs 				if (pap)
   2180       1.29    briggs 					*pap = (batl & mask) | (va & ~mask);
   2181  1.48.18.1      matt 				PMAP_UNLOCK();
   2182       1.45   thorpej 				return true;
   2183       1.24    kleink 			}
   2184       1.24    kleink 		} else {
   2185       1.24    kleink 			register_t batu = battable[va >> 23].batu;
   2186       1.24    kleink 			register_t batl = battable[va >> 23].batl;
   2187       1.24    kleink 			register_t sr = iosrtable[va >> ADDR_SR_SHFT];
   2188       1.24    kleink 			if (BAT601_VALID_P(batl) &&
   2189       1.24    kleink 			    BAT601_VA_MATCH_P(batu, batl, va)) {
   2190       1.24    kleink 				register_t mask =
   2191       1.24    kleink 				    (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
   2192       1.29    briggs 				if (pap)
   2193       1.29    briggs 					*pap = (batl & mask) | (va & ~mask);
   2194  1.48.18.1      matt 				PMAP_UNLOCK();
   2195       1.45   thorpej 				return true;
   2196       1.24    kleink 			} else if (SR601_VALID_P(sr) &&
   2197       1.24    kleink 				   SR601_PA_MATCH_P(sr, va)) {
   2198       1.29    briggs 				if (pap)
   2199       1.29    briggs 					*pap = va;
   2200  1.48.18.1      matt 				PMAP_UNLOCK();
   2201       1.45   thorpej 				return true;
   2202       1.24    kleink 			}
   2203        1.7      matt 		}
   2204  1.48.18.1      matt 		PMAP_UNLOCK();
   2205       1.45   thorpej 		return false;
   2206       1.38   sanjayl #elif defined (PPC_OEA64_BRIDGE)
   2207  1.48.18.1      matt 	panic("%s: pm: %s, va: 0x%08lx\n", __func__,
   2208       1.38   sanjayl 		(pm == pmap_kernel() ? "kernel" : "user"), va);
   2209       1.38   sanjayl #elif defined (PPC_OEA64)
   2210       1.38   sanjayl #error PPC_OEA64 not supported
   2211       1.38   sanjayl #endif /* PPC_OEA */
   2212        1.7      matt 	}
   2213        1.1      matt 
   2214        1.1      matt 	msr = pmap_interrupts_off();
   2215        1.1      matt 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
   2216        1.1      matt 	if (pvo != NULL) {
   2217        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2218       1.29    briggs 		if (pap)
   2219       1.29    briggs 			*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
   2220       1.29    briggs 			    | (va & ADDR_POFF);
   2221        1.1      matt 	}
   2222        1.1      matt 	pmap_interrupts_restore(msr);
   2223  1.48.18.1      matt 	PMAP_UNLOCK();
   2224        1.1      matt 	return pvo != NULL;
   2225        1.1      matt }
   2226        1.1      matt 
   2227        1.1      matt /*
   2228        1.1      matt  * Lower the protection on the specified range of this pmap.
   2229        1.1      matt  */
   2230        1.1      matt void
   2231        1.1      matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
   2232        1.1      matt {
   2233        1.1      matt 	struct pvo_entry *pvo;
   2234        1.2      matt 	volatile struct pte *pt;
   2235        1.2      matt 	register_t msr;
   2236        1.1      matt 	int pteidx;
   2237        1.1      matt 
   2238        1.1      matt 	/*
   2239        1.1      matt 	 * Since this routine only downgrades protection, we should
   2240       1.14       chs 	 * always be called with at least one bit not set.
   2241        1.1      matt 	 */
   2242       1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2243        1.1      matt 
   2244        1.1      matt 	/*
   2245        1.1      matt 	 * If there is no protection, this is equivalent to
   2246        1.1      matt 	 * remove the pmap from the pmap.
   2247        1.1      matt 	 */
   2248        1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2249        1.1      matt 		pmap_remove(pm, va, endva);
   2250        1.1      matt 		return;
   2251        1.1      matt 	}
   2252        1.1      matt 
   2253  1.48.18.1      matt 	PMAP_LOCK();
   2254  1.48.18.1      matt 
   2255        1.1      matt 	msr = pmap_interrupts_off();
   2256        1.6   thorpej 	for (; va < endva; va += PAGE_SIZE) {
   2257        1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2258        1.1      matt 		if (pvo == NULL)
   2259        1.1      matt 			continue;
   2260        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2261        1.1      matt 
   2262        1.1      matt 		/*
   2263        1.1      matt 		 * Revoke executable if asked to do so.
   2264        1.1      matt 		 */
   2265        1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2266       1.14       chs 			pvo_clear_exec(pvo);
   2267        1.1      matt 
   2268        1.1      matt #if 0
   2269        1.1      matt 		/*
   2270        1.1      matt 		 * If the page is already read-only, no change
   2271        1.1      matt 		 * needs to be made.
   2272        1.1      matt 		 */
   2273        1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
   2274        1.1      matt 			continue;
   2275        1.1      matt #endif
   2276        1.1      matt 		/*
   2277        1.1      matt 		 * Grab the PTE pointer before we diddle with
   2278        1.1      matt 		 * the cached PTE copy.
   2279        1.1      matt 		 */
   2280        1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2281        1.1      matt 		/*
   2282        1.1      matt 		 * Change the protection of the page.
   2283        1.1      matt 		 */
   2284        1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2285        1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2286        1.1      matt 
   2287        1.1      matt 		/*
   2288        1.1      matt 		 * If the PVO is in the page table, update
   2289        1.1      matt 		 * that pte at well.
   2290        1.1      matt 		 */
   2291        1.1      matt 		if (pt != NULL) {
   2292        1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2293       1.12      matt 			PVO_WHERE(pvo, PMAP_PROTECT);
   2294        1.1      matt 			PMAPCOUNT(ptes_changed);
   2295        1.1      matt 		}
   2296        1.1      matt 
   2297        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2298        1.1      matt 	}
   2299        1.1      matt 	pmap_interrupts_restore(msr);
   2300  1.48.18.1      matt 	PMAP_UNLOCK();
   2301        1.1      matt }
   2302        1.1      matt 
   2303        1.1      matt void
   2304        1.1      matt pmap_unwire(pmap_t pm, vaddr_t va)
   2305        1.1      matt {
   2306        1.1      matt 	struct pvo_entry *pvo;
   2307        1.2      matt 	register_t msr;
   2308        1.1      matt 
   2309  1.48.18.1      matt 	PMAP_LOCK();
   2310        1.1      matt 	msr = pmap_interrupts_off();
   2311        1.1      matt 	pvo = pmap_pvo_find_va(pm, va, NULL);
   2312        1.1      matt 	if (pvo != NULL) {
   2313       1.39      matt 		if (PVO_WIRED_P(pvo)) {
   2314        1.1      matt 			pvo->pvo_vaddr &= ~PVO_WIRED;
   2315        1.1      matt 			pm->pm_stats.wired_count--;
   2316        1.1      matt 		}
   2317        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2318        1.1      matt 	}
   2319        1.1      matt 	pmap_interrupts_restore(msr);
   2320  1.48.18.1      matt 	PMAP_UNLOCK();
   2321        1.1      matt }
   2322        1.1      matt 
   2323        1.1      matt /*
   2324        1.1      matt  * Lower the protection on the specified physical page.
   2325        1.1      matt  */
   2326        1.1      matt void
   2327        1.1      matt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2328        1.1      matt {
   2329       1.33       chs 	struct pvo_head *pvo_head, pvol;
   2330        1.1      matt 	struct pvo_entry *pvo, *next_pvo;
   2331        1.2      matt 	volatile struct pte *pt;
   2332        1.2      matt 	register_t msr;
   2333        1.1      matt 
   2334  1.48.18.1      matt 	PMAP_LOCK();
   2335  1.48.18.1      matt 
   2336       1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2337       1.33       chs 	LIST_INIT(&pvol);
   2338        1.1      matt 	msr = pmap_interrupts_off();
   2339        1.1      matt 
   2340        1.1      matt 	/*
   2341        1.1      matt 	 * When UVM reuses a page, it does a pmap_page_protect with
   2342        1.1      matt 	 * VM_PROT_NONE.  At that point, we can clear the exec flag
   2343        1.1      matt 	 * since we know the page will have different contents.
   2344        1.1      matt 	 */
   2345        1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2346        1.1      matt 		DPRINTFN(EXEC, ("[pmap_page_protect: %#lx: clear-exec]\n",
   2347       1.34      yamt 		    VM_PAGE_TO_PHYS(pg)));
   2348        1.1      matt 		if (pmap_attr_fetch(pg) & PTE_EXEC) {
   2349        1.1      matt 			PMAPCOUNT(exec_uncached_page_protect);
   2350        1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2351        1.1      matt 		}
   2352        1.1      matt 	}
   2353        1.1      matt 
   2354        1.1      matt 	pvo_head = vm_page_to_pvoh(pg);
   2355        1.1      matt 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
   2356        1.1      matt 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
   2357        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2358        1.1      matt 
   2359        1.1      matt 		/*
   2360        1.1      matt 		 * Downgrading to no mapping at all, we just remove the entry.
   2361        1.1      matt 		 */
   2362        1.1      matt 		if ((prot & VM_PROT_READ) == 0) {
   2363       1.33       chs 			pmap_pvo_remove(pvo, -1, &pvol);
   2364        1.1      matt 			continue;
   2365        1.1      matt 		}
   2366        1.1      matt 
   2367        1.1      matt 		/*
   2368        1.1      matt 		 * If EXEC permission is being revoked, just clear the
   2369        1.1      matt 		 * flag in the PVO.
   2370        1.1      matt 		 */
   2371        1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2372       1.14       chs 			pvo_clear_exec(pvo);
   2373        1.1      matt 
   2374        1.1      matt 		/*
   2375        1.1      matt 		 * If this entry is already RO, don't diddle with the
   2376        1.1      matt 		 * page table.
   2377        1.1      matt 		 */
   2378        1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
   2379        1.1      matt 			PMAP_PVO_CHECK(pvo);
   2380        1.1      matt 			continue;
   2381        1.1      matt 		}
   2382        1.1      matt 
   2383        1.1      matt 		/*
   2384        1.1      matt 		 * Grab the PTE before the we diddle the bits so
   2385        1.1      matt 		 * pvo_to_pte can verify the pte contents are as
   2386        1.1      matt 		 * expected.
   2387        1.1      matt 		 */
   2388        1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2389        1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2390        1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2391        1.1      matt 		if (pt != NULL) {
   2392        1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2393       1.12      matt 			PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
   2394        1.1      matt 			PMAPCOUNT(ptes_changed);
   2395        1.1      matt 		}
   2396        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2397        1.1      matt 	}
   2398        1.1      matt 	pmap_interrupts_restore(msr);
   2399       1.33       chs 	pmap_pvo_free_list(&pvol);
   2400  1.48.18.1      matt 
   2401  1.48.18.1      matt 	PMAP_UNLOCK();
   2402        1.1      matt }
   2403        1.1      matt 
   2404        1.1      matt /*
   2405        1.1      matt  * Activate the address space for the specified process.  If the process
   2406        1.1      matt  * is the current process, load the new MMU context.
   2407        1.1      matt  */
   2408        1.1      matt void
   2409        1.1      matt pmap_activate(struct lwp *l)
   2410        1.1      matt {
   2411        1.1      matt 	struct pcb *pcb = &l->l_addr->u_pcb;
   2412        1.1      matt 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   2413        1.1      matt 
   2414        1.1      matt 	DPRINTFN(ACTIVATE,
   2415        1.1      matt 	    ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
   2416        1.1      matt 
   2417        1.1      matt 	/*
   2418        1.1      matt 	 * XXX Normally performed in cpu_fork().
   2419        1.1      matt 	 */
   2420       1.13      matt 	pcb->pcb_pm = pmap;
   2421       1.17      matt 
   2422       1.17      matt 	/*
   2423       1.17      matt 	* In theory, the SR registers need only be valid on return
   2424       1.17      matt 	* to user space wait to do them there.
   2425       1.17      matt 	*/
   2426       1.17      matt 	if (l == curlwp) {
   2427       1.17      matt 		/* Store pointer to new current pmap. */
   2428       1.17      matt 		curpm = pmap;
   2429       1.17      matt 	}
   2430        1.1      matt }
   2431        1.1      matt 
   2432        1.1      matt /*
   2433        1.1      matt  * Deactivate the specified process's address space.
   2434        1.1      matt  */
   2435        1.1      matt void
   2436        1.1      matt pmap_deactivate(struct lwp *l)
   2437        1.1      matt {
   2438        1.1      matt }
   2439        1.1      matt 
   2440       1.44   thorpej bool
   2441        1.1      matt pmap_query_bit(struct vm_page *pg, int ptebit)
   2442        1.1      matt {
   2443        1.1      matt 	struct pvo_entry *pvo;
   2444        1.2      matt 	volatile struct pte *pt;
   2445        1.2      matt 	register_t msr;
   2446        1.1      matt 
   2447  1.48.18.1      matt 	PMAP_LOCK();
   2448  1.48.18.1      matt 
   2449  1.48.18.1      matt 	if (pmap_attr_fetch(pg) & ptebit) {
   2450  1.48.18.1      matt 		PMAP_UNLOCK();
   2451       1.45   thorpej 		return true;
   2452  1.48.18.1      matt 	}
   2453       1.14       chs 
   2454        1.1      matt 	msr = pmap_interrupts_off();
   2455        1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2456        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2457        1.1      matt 		/*
   2458        1.1      matt 		 * See if we saved the bit off.  If so cache, it and return
   2459        1.1      matt 		 * success.
   2460        1.1      matt 		 */
   2461        1.1      matt 		if (pvo->pvo_pte.pte_lo & ptebit) {
   2462        1.1      matt 			pmap_attr_save(pg, ptebit);
   2463        1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   2464        1.1      matt 			pmap_interrupts_restore(msr);
   2465  1.48.18.1      matt 			PMAP_UNLOCK();
   2466       1.45   thorpej 			return true;
   2467        1.1      matt 		}
   2468        1.1      matt 	}
   2469        1.1      matt 	/*
   2470        1.1      matt 	 * No luck, now go thru the hard part of looking at the ptes
   2471        1.1      matt 	 * themselves.  Sync so any pending REF/CHG bits are flushed
   2472        1.1      matt 	 * to the PTEs.
   2473        1.1      matt 	 */
   2474        1.1      matt 	SYNC();
   2475        1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2476        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2477        1.1      matt 		/*
   2478        1.1      matt 		 * See if this pvo have a valid PTE.  If so, fetch the
   2479        1.1      matt 		 * REF/CHG bits from the valid PTE.  If the appropriate
   2480        1.1      matt 		 * ptebit is set, cache, it and return success.
   2481        1.1      matt 		 */
   2482        1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2483        1.1      matt 		if (pt != NULL) {
   2484        1.1      matt 			pmap_pte_synch(pt, &pvo->pvo_pte);
   2485        1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit) {
   2486        1.1      matt 				pmap_attr_save(pg, ptebit);
   2487        1.1      matt 				PMAP_PVO_CHECK(pvo);		/* sanity check */
   2488        1.1      matt 				pmap_interrupts_restore(msr);
   2489  1.48.18.1      matt 				PMAP_UNLOCK();
   2490       1.45   thorpej 				return true;
   2491        1.1      matt 			}
   2492        1.1      matt 		}
   2493        1.1      matt 	}
   2494        1.1      matt 	pmap_interrupts_restore(msr);
   2495  1.48.18.1      matt 	PMAP_UNLOCK();
   2496       1.45   thorpej 	return false;
   2497        1.1      matt }
   2498        1.1      matt 
   2499       1.44   thorpej bool
   2500        1.1      matt pmap_clear_bit(struct vm_page *pg, int ptebit)
   2501        1.1      matt {
   2502        1.1      matt 	struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   2503        1.1      matt 	struct pvo_entry *pvo;
   2504        1.2      matt 	volatile struct pte *pt;
   2505        1.2      matt 	register_t msr;
   2506        1.1      matt 	int rv = 0;
   2507        1.1      matt 
   2508  1.48.18.1      matt 	PMAP_LOCK();
   2509        1.1      matt 	msr = pmap_interrupts_off();
   2510        1.1      matt 
   2511        1.1      matt 	/*
   2512        1.1      matt 	 * Fetch the cache value
   2513        1.1      matt 	 */
   2514        1.1      matt 	rv |= pmap_attr_fetch(pg);
   2515        1.1      matt 
   2516        1.1      matt 	/*
   2517        1.1      matt 	 * Clear the cached value.
   2518        1.1      matt 	 */
   2519        1.1      matt 	pmap_attr_clear(pg, ptebit);
   2520        1.1      matt 
   2521        1.1      matt 	/*
   2522        1.1      matt 	 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
   2523        1.1      matt 	 * can reset the right ones).  Note that since the pvo entries and
   2524        1.1      matt 	 * list heads are accessed via BAT0 and are never placed in the
   2525        1.1      matt 	 * page table, we don't have to worry about further accesses setting
   2526        1.1      matt 	 * the REF/CHG bits.
   2527        1.1      matt 	 */
   2528        1.1      matt 	SYNC();
   2529        1.1      matt 
   2530        1.1      matt 	/*
   2531        1.1      matt 	 * For each pvo entry, clear pvo's ptebit.  If this pvo have a
   2532        1.1      matt 	 * valid PTE.  If so, clear the ptebit from the valid PTE.
   2533        1.1      matt 	 */
   2534        1.1      matt 	LIST_FOREACH(pvo, pvoh, pvo_vlink) {
   2535        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2536        1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2537        1.1      matt 		if (pt != NULL) {
   2538        1.1      matt 			/*
   2539        1.1      matt 			 * Only sync the PTE if the bit we are looking
   2540        1.1      matt 			 * for is not already set.
   2541        1.1      matt 			 */
   2542        1.1      matt 			if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
   2543        1.1      matt 				pmap_pte_synch(pt, &pvo->pvo_pte);
   2544        1.1      matt 			/*
   2545        1.1      matt 			 * If the bit we are looking for was already set,
   2546        1.1      matt 			 * clear that bit in the pte.
   2547        1.1      matt 			 */
   2548        1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit)
   2549        1.1      matt 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
   2550        1.1      matt 		}
   2551        1.1      matt 		rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
   2552        1.1      matt 		pvo->pvo_pte.pte_lo &= ~ptebit;
   2553        1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2554        1.1      matt 	}
   2555        1.1      matt 	pmap_interrupts_restore(msr);
   2556       1.14       chs 
   2557        1.1      matt 	/*
   2558        1.1      matt 	 * If we are clearing the modify bit and this page was marked EXEC
   2559        1.1      matt 	 * and the user of the page thinks the page was modified, then we
   2560        1.1      matt 	 * need to clean it from the icache if it's mapped or clear the EXEC
   2561        1.1      matt 	 * bit if it's not mapped.  The page itself might not have the CHG
   2562        1.1      matt 	 * bit set if the modification was done via DMA to the page.
   2563        1.1      matt 	 */
   2564        1.1      matt 	if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
   2565        1.1      matt 		if (LIST_EMPTY(pvoh)) {
   2566        1.1      matt 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: clear-exec]\n",
   2567       1.34      yamt 			    VM_PAGE_TO_PHYS(pg)));
   2568        1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2569        1.1      matt 			PMAPCOUNT(exec_uncached_clear_modify);
   2570        1.1      matt 		} else {
   2571        1.1      matt 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#lx: syncicache]\n",
   2572       1.34      yamt 			    VM_PAGE_TO_PHYS(pg)));
   2573       1.34      yamt 			pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
   2574        1.1      matt 			PMAPCOUNT(exec_synced_clear_modify);
   2575        1.1      matt 		}
   2576        1.1      matt 	}
   2577  1.48.18.1      matt 	PMAP_UNLOCK();
   2578        1.1      matt 	return (rv & ptebit) != 0;
   2579        1.1      matt }
   2580        1.1      matt 
   2581        1.1      matt void
   2582        1.1      matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   2583        1.1      matt {
   2584        1.1      matt 	struct pvo_entry *pvo;
   2585        1.1      matt 	size_t offset = va & ADDR_POFF;
   2586        1.1      matt 	int s;
   2587        1.1      matt 
   2588  1.48.18.1      matt 	PMAP_LOCK();
   2589        1.1      matt 	s = splvm();
   2590        1.1      matt 	while (len > 0) {
   2591        1.6   thorpej 		size_t seglen = PAGE_SIZE - offset;
   2592        1.1      matt 		if (seglen > len)
   2593        1.1      matt 			seglen = len;
   2594        1.1      matt 		pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
   2595       1.39      matt 		if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
   2596        1.1      matt 			pmap_syncicache(
   2597        1.1      matt 			    (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
   2598        1.1      matt 			PMAP_PVO_CHECK(pvo);
   2599        1.1      matt 		}
   2600        1.1      matt 		va += seglen;
   2601        1.1      matt 		len -= seglen;
   2602        1.1      matt 		offset = 0;
   2603        1.1      matt 	}
   2604        1.1      matt 	splx(s);
   2605  1.48.18.1      matt 	PMAP_UNLOCK();
   2606        1.1      matt }
   2607        1.1      matt 
   2608        1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
   2609        1.1      matt void
   2610        1.2      matt pmap_pte_print(volatile struct pte *pt)
   2611        1.1      matt {
   2612        1.1      matt 	printf("PTE %p: ", pt);
   2613       1.38   sanjayl 
   2614       1.38   sanjayl #if defined(PPC_OEA)
   2615        1.1      matt 	/* High word: */
   2616        1.2      matt 	printf("0x%08lx: [", pt->pte_hi);
   2617       1.38   sanjayl #elif defined (PPC_OEA64_BRIDGE)
   2618       1.38   sanjayl 	printf("0x%016llx: [", pt->pte_hi);
   2619       1.38   sanjayl #else /* PPC_OEA64 */
   2620       1.38   sanjayl 	printf("0x%016lx: [", pt->pte_hi);
   2621       1.38   sanjayl #endif /* PPC_OEA */
   2622       1.38   sanjayl 
   2623        1.1      matt 	printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
   2624        1.1      matt 	printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
   2625       1.38   sanjayl 
   2626       1.38   sanjayl #if defined (PPC_OEA)
   2627        1.2      matt 	printf("0x%06lx 0x%02lx",
   2628        1.1      matt 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2629        1.1      matt 	    pt->pte_hi & PTE_API);
   2630        1.1      matt 	printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
   2631       1.38   sanjayl #elif defined (PPC_OEA64)
   2632       1.38   sanjayl 	printf("0x%06lx 0x%02lx",
   2633       1.38   sanjayl 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2634       1.38   sanjayl 	    pt->pte_hi & PTE_API);
   2635       1.38   sanjayl 	printf(" (va 0x%016lx)] ", pmap_pte_to_va(pt));
   2636       1.38   sanjayl #else
   2637       1.38   sanjayl 	/* PPC_OEA64_BRIDGE */
   2638       1.38   sanjayl 	printf("0x%06llx 0x%02llx",
   2639       1.38   sanjayl 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2640       1.38   sanjayl 	    pt->pte_hi & PTE_API);
   2641       1.38   sanjayl 	printf(" (va 0x%08lx)] ", pmap_pte_to_va(pt));
   2642       1.38   sanjayl #endif /* PPC_OEA */
   2643       1.38   sanjayl 
   2644        1.1      matt 	/* Low word: */
   2645       1.38   sanjayl #if defined (PPC_OEA)
   2646        1.2      matt 	printf(" 0x%08lx: [", pt->pte_lo);
   2647        1.2      matt 	printf("0x%05lx... ", pt->pte_lo >> 12);
   2648       1.38   sanjayl #elif defined (PPC_OEA64)
   2649       1.38   sanjayl 	printf(" 0x%016lx: [", pt->pte_lo);
   2650       1.38   sanjayl 	printf("0x%012lx... ", pt->pte_lo >> 12);
   2651       1.38   sanjayl #else	/* PPC_OEA64_BRIDGE */
   2652       1.38   sanjayl 	printf(" 0x%016llx: [", pt->pte_lo);
   2653       1.38   sanjayl 	printf("0x%012llx... ", pt->pte_lo >> 12);
   2654       1.38   sanjayl #endif
   2655        1.1      matt 	printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
   2656        1.1      matt 	printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
   2657        1.1      matt 	printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
   2658        1.1      matt 	printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
   2659        1.1      matt 	printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
   2660        1.1      matt 	printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
   2661        1.1      matt 	switch (pt->pte_lo & PTE_PP) {
   2662        1.1      matt 	case PTE_BR: printf("br]\n"); break;
   2663        1.1      matt 	case PTE_BW: printf("bw]\n"); break;
   2664        1.1      matt 	case PTE_SO: printf("so]\n"); break;
   2665        1.1      matt 	case PTE_SW: printf("sw]\n"); break;
   2666        1.1      matt 	}
   2667        1.1      matt }
   2668        1.1      matt #endif
   2669        1.1      matt 
   2670        1.1      matt #if defined(DDB)
   2671        1.1      matt void
   2672        1.1      matt pmap_pteg_check(void)
   2673        1.1      matt {
   2674        1.2      matt 	volatile struct pte *pt;
   2675        1.1      matt 	int i;
   2676        1.1      matt 	int ptegidx;
   2677        1.1      matt 	u_int p_valid = 0;
   2678        1.1      matt 	u_int s_valid = 0;
   2679        1.1      matt 	u_int invalid = 0;
   2680       1.38   sanjayl 
   2681        1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2682        1.1      matt 		for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
   2683        1.1      matt 			if (pt->pte_hi & PTE_VALID) {
   2684        1.1      matt 				if (pt->pte_hi & PTE_HID)
   2685        1.1      matt 					s_valid++;
   2686        1.1      matt 				else
   2687       1.38   sanjayl 				{
   2688        1.1      matt 					p_valid++;
   2689       1.38   sanjayl 				}
   2690        1.1      matt 			} else
   2691        1.1      matt 				invalid++;
   2692        1.1      matt 		}
   2693        1.1      matt 	}
   2694        1.1      matt 	printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
   2695        1.1      matt 		p_valid, p_valid, s_valid, s_valid,
   2696        1.1      matt 		invalid, invalid);
   2697        1.1      matt }
   2698        1.1      matt 
   2699        1.1      matt void
   2700        1.1      matt pmap_print_mmuregs(void)
   2701        1.1      matt {
   2702        1.1      matt 	int i;
   2703        1.1      matt 	u_int cpuvers;
   2704       1.18      matt #ifndef PPC_OEA64
   2705        1.1      matt 	vaddr_t addr;
   2706        1.2      matt 	register_t soft_sr[16];
   2707       1.18      matt #endif
   2708       1.38   sanjayl #if defined (PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
   2709        1.1      matt 	struct bat soft_ibat[4];
   2710        1.1      matt 	struct bat soft_dbat[4];
   2711       1.38   sanjayl #endif
   2712        1.2      matt 	register_t sdr1;
   2713        1.1      matt 
   2714        1.1      matt 	cpuvers = MFPVR() >> 16;
   2715       1.35     perry 	__asm volatile ("mfsdr1 %0" : "=r"(sdr1));
   2716       1.18      matt #ifndef PPC_OEA64
   2717       1.16    kleink 	addr = 0;
   2718       1.27       chs 	for (i = 0; i < 16; i++) {
   2719        1.1      matt 		soft_sr[i] = MFSRIN(addr);
   2720        1.1      matt 		addr += (1 << ADDR_SR_SHFT);
   2721        1.1      matt 	}
   2722       1.18      matt #endif
   2723        1.1      matt 
   2724       1.38   sanjayl #if defined(PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
   2725        1.1      matt 	/* read iBAT (601: uBAT) registers */
   2726       1.35     perry 	__asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
   2727       1.35     perry 	__asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
   2728       1.35     perry 	__asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
   2729       1.35     perry 	__asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
   2730       1.35     perry 	__asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
   2731       1.35     perry 	__asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
   2732       1.35     perry 	__asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
   2733       1.35     perry 	__asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
   2734        1.1      matt 
   2735        1.1      matt 
   2736        1.1      matt 	if (cpuvers != MPC601) {
   2737        1.1      matt 		/* read dBAT registers */
   2738       1.35     perry 		__asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
   2739       1.35     perry 		__asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
   2740       1.35     perry 		__asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
   2741       1.35     perry 		__asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
   2742       1.35     perry 		__asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
   2743       1.35     perry 		__asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
   2744       1.35     perry 		__asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
   2745       1.35     perry 		__asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
   2746        1.1      matt 	}
   2747       1.38   sanjayl #endif
   2748        1.1      matt 
   2749       1.18      matt 	printf("SDR1:\t0x%lx\n", (long) sdr1);
   2750       1.18      matt #ifndef PPC_OEA64
   2751        1.1      matt 	printf("SR[]:\t");
   2752       1.27       chs 	for (i = 0; i < 4; i++)
   2753       1.38   sanjayl 		printf("0x%08lx,   ", (long) soft_sr[i]);
   2754        1.1      matt 	printf("\n\t");
   2755       1.27       chs 	for ( ; i < 8; i++)
   2756       1.38   sanjayl 		printf("0x%08lx,   ", (long) soft_sr[i]);
   2757        1.1      matt 	printf("\n\t");
   2758       1.27       chs 	for ( ; i < 12; i++)
   2759       1.38   sanjayl 		printf("0x%08lx,   ", (long) soft_sr[i]);
   2760        1.1      matt 	printf("\n\t");
   2761       1.27       chs 	for ( ; i < 16; i++)
   2762       1.38   sanjayl 		printf("0x%08lx,   ", (long) soft_sr[i]);
   2763        1.1      matt 	printf("\n");
   2764       1.18      matt #endif
   2765        1.1      matt 
   2766       1.38   sanjayl #if defined(PPC_OEA) && !defined (PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
   2767        1.1      matt 	printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
   2768       1.27       chs 	for (i = 0; i < 4; i++) {
   2769        1.2      matt 		printf("0x%08lx 0x%08lx, ",
   2770        1.1      matt 			soft_ibat[i].batu, soft_ibat[i].batl);
   2771        1.1      matt 		if (i == 1)
   2772        1.1      matt 			printf("\n\t");
   2773        1.1      matt 	}
   2774        1.1      matt 	if (cpuvers != MPC601) {
   2775        1.1      matt 		printf("\ndBAT[]:\t");
   2776       1.27       chs 		for (i = 0; i < 4; i++) {
   2777        1.2      matt 			printf("0x%08lx 0x%08lx, ",
   2778        1.1      matt 				soft_dbat[i].batu, soft_dbat[i].batl);
   2779        1.1      matt 			if (i == 1)
   2780        1.1      matt 				printf("\n\t");
   2781        1.1      matt 		}
   2782        1.1      matt 	}
   2783        1.1      matt 	printf("\n");
   2784       1.38   sanjayl #endif /* PPC_OEA... */
   2785        1.1      matt }
   2786        1.1      matt 
   2787        1.1      matt void
   2788        1.1      matt pmap_print_pte(pmap_t pm, vaddr_t va)
   2789        1.1      matt {
   2790        1.1      matt 	struct pvo_entry *pvo;
   2791        1.2      matt 	volatile struct pte *pt;
   2792        1.1      matt 	int pteidx;
   2793        1.1      matt 
   2794        1.1      matt 	pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2795        1.1      matt 	if (pvo != NULL) {
   2796        1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2797        1.1      matt 		if (pt != NULL) {
   2798       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64)
   2799        1.2      matt 			printf("VA %#lx -> %p -> %s %#lx, %#lx\n",
   2800        1.1      matt 				va, pt,
   2801        1.1      matt 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
   2802        1.1      matt 				pt->pte_hi, pt->pte_lo);
   2803       1.38   sanjayl #else	/* PPC_OEA64_BRIDGE */
   2804       1.38   sanjayl 			printf("VA %#lx -> %p -> %s %#llx, %#llx\n",
   2805       1.38   sanjayl 				va, pt,
   2806       1.38   sanjayl 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
   2807       1.38   sanjayl 				pt->pte_hi, pt->pte_lo);
   2808       1.38   sanjayl #endif
   2809        1.1      matt 		} else {
   2810        1.1      matt 			printf("No valid PTE found\n");
   2811        1.1      matt 		}
   2812        1.1      matt 	} else {
   2813        1.1      matt 		printf("Address not in pmap\n");
   2814        1.1      matt 	}
   2815        1.1      matt }
   2816        1.1      matt 
   2817        1.1      matt void
   2818        1.1      matt pmap_pteg_dist(void)
   2819        1.1      matt {
   2820        1.1      matt 	struct pvo_entry *pvo;
   2821        1.1      matt 	int ptegidx;
   2822        1.1      matt 	int depth;
   2823        1.1      matt 	int max_depth = 0;
   2824        1.1      matt 	unsigned int depths[64];
   2825        1.1      matt 
   2826        1.1      matt 	memset(depths, 0, sizeof(depths));
   2827        1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2828        1.1      matt 		depth = 0;
   2829        1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2830        1.1      matt 			depth++;
   2831        1.1      matt 		}
   2832        1.1      matt 		if (depth > max_depth)
   2833        1.1      matt 			max_depth = depth;
   2834        1.1      matt 		if (depth > 63)
   2835        1.1      matt 			depth = 63;
   2836        1.1      matt 		depths[depth]++;
   2837        1.1      matt 	}
   2838        1.1      matt 
   2839        1.1      matt 	for (depth = 0; depth < 64; depth++) {
   2840        1.1      matt 		printf("  [%2d]: %8u", depth, depths[depth]);
   2841        1.1      matt 		if ((depth & 3) == 3)
   2842        1.1      matt 			printf("\n");
   2843        1.1      matt 		if (depth == max_depth)
   2844        1.1      matt 			break;
   2845        1.1      matt 	}
   2846        1.1      matt 	if ((depth & 3) != 3)
   2847        1.1      matt 		printf("\n");
   2848        1.1      matt 	printf("Max depth found was %d\n", max_depth);
   2849        1.1      matt }
   2850        1.1      matt #endif /* DEBUG */
   2851        1.1      matt 
   2852        1.1      matt #if defined(PMAPCHECK) || defined(DEBUG)
   2853        1.1      matt void
   2854        1.1      matt pmap_pvo_verify(void)
   2855        1.1      matt {
   2856        1.1      matt 	int ptegidx;
   2857        1.1      matt 	int s;
   2858        1.1      matt 
   2859        1.1      matt 	s = splvm();
   2860        1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2861        1.1      matt 		struct pvo_entry *pvo;
   2862        1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2863        1.1      matt 			if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   2864        1.1      matt 				panic("pmap_pvo_verify: invalid pvo %p "
   2865        1.1      matt 				    "on list %#x", pvo, ptegidx);
   2866        1.1      matt 			pmap_pvo_check(pvo);
   2867        1.1      matt 		}
   2868        1.1      matt 	}
   2869        1.1      matt 	splx(s);
   2870        1.1      matt }
   2871        1.1      matt #endif /* PMAPCHECK */
   2872        1.1      matt 
   2873        1.1      matt 
   2874        1.1      matt void *
   2875        1.1      matt pmap_pool_ualloc(struct pool *pp, int flags)
   2876        1.1      matt {
   2877        1.1      matt 	struct pvo_page *pvop;
   2878        1.1      matt 
   2879  1.48.18.1      matt 	if (uvm.page_init_done != true) {
   2880  1.48.18.1      matt 		return (void *) uvm_pageboot_alloc(PAGE_SIZE);
   2881  1.48.18.1      matt 	}
   2882  1.48.18.1      matt 
   2883  1.48.18.1      matt 	PMAP_LOCK();
   2884        1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
   2885        1.1      matt 	if (pvop != NULL) {
   2886        1.1      matt 		pmap_upvop_free--;
   2887        1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
   2888  1.48.18.1      matt 		PMAP_UNLOCK();
   2889        1.1      matt 		return pvop;
   2890        1.1      matt 	}
   2891  1.48.18.1      matt 	PMAP_UNLOCK();
   2892        1.1      matt 	return pmap_pool_malloc(pp, flags);
   2893        1.1      matt }
   2894        1.1      matt 
   2895        1.1      matt void *
   2896        1.1      matt pmap_pool_malloc(struct pool *pp, int flags)
   2897        1.1      matt {
   2898        1.1      matt 	struct pvo_page *pvop;
   2899        1.1      matt 	struct vm_page *pg;
   2900        1.1      matt 
   2901  1.48.18.1      matt 	PMAP_LOCK();
   2902        1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
   2903        1.1      matt 	if (pvop != NULL) {
   2904        1.1      matt 		pmap_mpvop_free--;
   2905        1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
   2906  1.48.18.1      matt 		PMAP_UNLOCK();
   2907        1.1      matt 		return pvop;
   2908        1.1      matt 	}
   2909  1.48.18.1      matt 	PMAP_UNLOCK();
   2910        1.1      matt  again:
   2911        1.1      matt 	pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
   2912        1.1      matt 	    UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
   2913        1.1      matt 	if (__predict_false(pg == NULL)) {
   2914        1.1      matt 		if (flags & PR_WAITOK) {
   2915        1.1      matt 			uvm_wait("plpg");
   2916        1.1      matt 			goto again;
   2917        1.1      matt 		} else {
   2918        1.1      matt 			return (0);
   2919        1.1      matt 		}
   2920        1.1      matt 	}
   2921        1.1      matt 	return (void *) VM_PAGE_TO_PHYS(pg);
   2922        1.1      matt }
   2923        1.1      matt 
   2924        1.1      matt void
   2925        1.1      matt pmap_pool_ufree(struct pool *pp, void *va)
   2926        1.1      matt {
   2927        1.1      matt 	struct pvo_page *pvop;
   2928        1.1      matt #if 0
   2929        1.1      matt 	if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
   2930        1.1      matt 		pmap_pool_mfree(va, size, tag);
   2931        1.1      matt 		return;
   2932        1.1      matt 	}
   2933        1.1      matt #endif
   2934  1.48.18.1      matt 	PMAP_LOCK();
   2935        1.1      matt 	pvop = va;
   2936        1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
   2937        1.1      matt 	pmap_upvop_free++;
   2938        1.1      matt 	if (pmap_upvop_free > pmap_upvop_maxfree)
   2939        1.1      matt 		pmap_upvop_maxfree = pmap_upvop_free;
   2940  1.48.18.1      matt 	PMAP_UNLOCK();
   2941        1.1      matt }
   2942        1.1      matt 
   2943        1.1      matt void
   2944        1.1      matt pmap_pool_mfree(struct pool *pp, void *va)
   2945        1.1      matt {
   2946        1.1      matt 	struct pvo_page *pvop;
   2947        1.1      matt 
   2948  1.48.18.1      matt 	PMAP_LOCK();
   2949        1.1      matt 	pvop = va;
   2950        1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
   2951        1.1      matt 	pmap_mpvop_free++;
   2952        1.1      matt 	if (pmap_mpvop_free > pmap_mpvop_maxfree)
   2953        1.1      matt 		pmap_mpvop_maxfree = pmap_mpvop_free;
   2954  1.48.18.1      matt 	PMAP_UNLOCK();
   2955        1.1      matt #if 0
   2956        1.1      matt 	uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
   2957        1.1      matt #endif
   2958        1.1      matt }
   2959        1.1      matt 
   2960        1.1      matt /*
   2961        1.1      matt  * This routine in bootstraping to steal to-be-managed memory (which will
   2962        1.1      matt  * then be unmanaged).  We use it to grab from the first 256MB for our
   2963        1.1      matt  * pmap needs and above 256MB for other stuff.
   2964        1.1      matt  */
   2965        1.1      matt vaddr_t
   2966       1.10   thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
   2967        1.1      matt {
   2968        1.1      matt 	vsize_t size;
   2969        1.1      matt 	vaddr_t va;
   2970        1.1      matt 	paddr_t pa = 0;
   2971        1.1      matt 	int npgs, bank;
   2972        1.1      matt 	struct vm_physseg *ps;
   2973        1.1      matt 
   2974       1.45   thorpej 	if (uvm.page_init_done == true)
   2975        1.1      matt 		panic("pmap_steal_memory: called _after_ bootstrap");
   2976        1.1      matt 
   2977       1.10   thorpej 	*vstartp = VM_MIN_KERNEL_ADDRESS;
   2978       1.10   thorpej 	*vendp = VM_MAX_KERNEL_ADDRESS;
   2979       1.10   thorpej 
   2980        1.1      matt 	size = round_page(vsize);
   2981        1.1      matt 	npgs = atop(size);
   2982        1.1      matt 
   2983        1.1      matt 	/*
   2984        1.1      matt 	 * PA 0 will never be among those given to UVM so we can use it
   2985        1.1      matt 	 * to indicate we couldn't steal any memory.
   2986        1.1      matt 	 */
   2987        1.1      matt 	for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
   2988        1.1      matt 		if (ps->free_list == VM_FREELIST_FIRST256 &&
   2989        1.1      matt 		    ps->avail_end - ps->avail_start >= npgs) {
   2990        1.1      matt 			pa = ptoa(ps->avail_start);
   2991        1.1      matt 			break;
   2992        1.1      matt 		}
   2993        1.1      matt 	}
   2994        1.1      matt 
   2995        1.1      matt 	if (pa == 0)
   2996        1.1      matt 		panic("pmap_steal_memory: no approriate memory to steal!");
   2997        1.1      matt 
   2998        1.1      matt 	ps->avail_start += npgs;
   2999        1.1      matt 	ps->start += npgs;
   3000        1.1      matt 
   3001        1.1      matt 	/*
   3002        1.1      matt 	 * If we've used up all the pages in the segment, remove it and
   3003        1.1      matt 	 * compact the list.
   3004        1.1      matt 	 */
   3005        1.1      matt 	if (ps->avail_start == ps->end) {
   3006        1.1      matt 		/*
   3007        1.1      matt 		 * If this was the last one, then a very bad thing has occurred
   3008        1.1      matt 		 */
   3009        1.1      matt 		if (--vm_nphysseg == 0)
   3010        1.1      matt 			panic("pmap_steal_memory: out of memory!");
   3011        1.1      matt 
   3012        1.1      matt 		printf("pmap_steal_memory: consumed bank %d\n", bank);
   3013        1.1      matt 		for (; bank < vm_nphysseg; bank++, ps++) {
   3014        1.1      matt 			ps[0] = ps[1];
   3015        1.1      matt 		}
   3016        1.1      matt 	}
   3017        1.1      matt 
   3018        1.1      matt 	va = (vaddr_t) pa;
   3019       1.46  christos 	memset((void *) va, 0, size);
   3020        1.1      matt 	pmap_pages_stolen += npgs;
   3021        1.1      matt #ifdef DEBUG
   3022        1.1      matt 	if (pmapdebug && npgs > 1) {
   3023        1.1      matt 		u_int cnt = 0;
   3024        1.1      matt 		for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
   3025        1.1      matt 			cnt += ps->avail_end - ps->avail_start;
   3026        1.1      matt 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
   3027        1.1      matt 		    npgs, pmap_pages_stolen, cnt);
   3028        1.1      matt 	}
   3029        1.1      matt #endif
   3030        1.1      matt 
   3031        1.1      matt 	return va;
   3032        1.1      matt }
   3033        1.1      matt 
   3034        1.1      matt /*
   3035        1.1      matt  * Find a chuck of memory with right size and alignment.
   3036        1.1      matt  */
   3037        1.1      matt void *
   3038        1.1      matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
   3039        1.1      matt {
   3040        1.1      matt 	struct mem_region *mp;
   3041        1.1      matt 	paddr_t s, e;
   3042        1.1      matt 	int i, j;
   3043        1.1      matt 
   3044        1.1      matt 	size = round_page(size);
   3045        1.1      matt 
   3046        1.1      matt 	DPRINTFN(BOOT,
   3047        1.1      matt 	    ("pmap_boot_find_memory: size=%lx, alignment=%lx, at_end=%d",
   3048        1.1      matt 	    size, alignment, at_end));
   3049        1.1      matt 
   3050        1.6   thorpej 	if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
   3051        1.1      matt 		panic("pmap_boot_find_memory: invalid alignment %lx",
   3052        1.1      matt 		    alignment);
   3053        1.1      matt 
   3054        1.1      matt 	if (at_end) {
   3055        1.6   thorpej 		if (alignment != PAGE_SIZE)
   3056        1.1      matt 			panic("pmap_boot_find_memory: invalid ending "
   3057        1.1      matt 			    "alignment %lx", alignment);
   3058        1.1      matt 
   3059        1.1      matt 		for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
   3060        1.1      matt 			s = mp->start + mp->size - size;
   3061        1.1      matt 			if (s >= mp->start && mp->size >= size) {
   3062        1.1      matt 				DPRINTFN(BOOT,(": %lx\n", s));
   3063        1.1      matt 				DPRINTFN(BOOT,
   3064        1.1      matt 				    ("pmap_boot_find_memory: b-avail[%d] start "
   3065        1.1      matt 				     "0x%lx size 0x%lx\n", mp - avail,
   3066        1.1      matt 				     mp->start, mp->size));
   3067        1.1      matt 				mp->size -= size;
   3068        1.1      matt 				DPRINTFN(BOOT,
   3069        1.1      matt 				    ("pmap_boot_find_memory: a-avail[%d] start "
   3070        1.1      matt 				     "0x%lx size 0x%lx\n", mp - avail,
   3071        1.1      matt 				     mp->start, mp->size));
   3072        1.1      matt 				return (void *) s;
   3073        1.1      matt 			}
   3074        1.1      matt 		}
   3075        1.1      matt 		panic("pmap_boot_find_memory: no available memory");
   3076        1.1      matt 	}
   3077        1.1      matt 
   3078        1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3079        1.1      matt 		s = (mp->start + alignment - 1) & ~(alignment-1);
   3080        1.1      matt 		e = s + size;
   3081        1.1      matt 
   3082        1.1      matt 		/*
   3083        1.1      matt 		 * Is the calculated region entirely within the region?
   3084        1.1      matt 		 */
   3085        1.1      matt 		if (s < mp->start || e > mp->start + mp->size)
   3086        1.1      matt 			continue;
   3087        1.1      matt 
   3088        1.1      matt 		DPRINTFN(BOOT,(": %lx\n", s));
   3089        1.1      matt 		if (s == mp->start) {
   3090        1.1      matt 			/*
   3091        1.1      matt 			 * If the block starts at the beginning of region,
   3092        1.1      matt 			 * adjust the size & start. (the region may now be
   3093        1.1      matt 			 * zero in length)
   3094        1.1      matt 			 */
   3095        1.1      matt 			DPRINTFN(BOOT,
   3096        1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3097        1.1      matt 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   3098        1.1      matt 			mp->start += size;
   3099        1.1      matt 			mp->size -= size;
   3100        1.1      matt 			DPRINTFN(BOOT,
   3101        1.1      matt 			    ("pmap_boot_find_memory: a-avail[%d] start "
   3102        1.1      matt 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   3103        1.1      matt 		} else if (e == mp->start + mp->size) {
   3104        1.1      matt 			/*
   3105        1.1      matt 			 * If the block starts at the beginning of region,
   3106        1.1      matt 			 * adjust only the size.
   3107        1.1      matt 			 */
   3108        1.1      matt 			DPRINTFN(BOOT,
   3109        1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3110        1.1      matt 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   3111        1.1      matt 			mp->size -= size;
   3112        1.1      matt 			DPRINTFN(BOOT,
   3113        1.1      matt 			    ("pmap_boot_find_memory: a-avail[%d] start "
   3114        1.1      matt 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   3115        1.1      matt 		} else {
   3116        1.1      matt 			/*
   3117        1.1      matt 			 * Block is in the middle of the region, so we
   3118        1.1      matt 			 * have to split it in two.
   3119        1.1      matt 			 */
   3120        1.1      matt 			for (j = avail_cnt; j > i + 1; j--) {
   3121        1.1      matt 				avail[j] = avail[j-1];
   3122        1.1      matt 			}
   3123        1.1      matt 			DPRINTFN(BOOT,
   3124        1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3125        1.1      matt 			     "0x%lx size 0x%lx\n", i, mp->start, mp->size));
   3126        1.1      matt 			mp[1].start = e;
   3127        1.1      matt 			mp[1].size = mp[0].start + mp[0].size - e;
   3128        1.1      matt 			mp[0].size = s - mp[0].start;
   3129        1.1      matt 			avail_cnt++;
   3130        1.1      matt 			for (; i < avail_cnt; i++) {
   3131        1.1      matt 				DPRINTFN(BOOT,
   3132        1.1      matt 				    ("pmap_boot_find_memory: a-avail[%d] "
   3133        1.1      matt 				     "start 0x%lx size 0x%lx\n", i,
   3134        1.1      matt 				     avail[i].start, avail[i].size));
   3135        1.1      matt 			}
   3136        1.1      matt 		}
   3137        1.1      matt 		return (void *) s;
   3138        1.1      matt 	}
   3139        1.1      matt 	panic("pmap_boot_find_memory: not enough memory for "
   3140        1.1      matt 	    "%lx/%lx allocation?", size, alignment);
   3141        1.1      matt }
   3142        1.1      matt 
   3143       1.38   sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
   3144       1.38   sanjayl #if defined (PPC_OEA64_BRIDGE)
   3145       1.38   sanjayl int
   3146       1.38   sanjayl pmap_setup_segment0_map(int use_large_pages, ...)
   3147       1.38   sanjayl {
   3148       1.38   sanjayl     vaddr_t va;
   3149       1.38   sanjayl 
   3150       1.38   sanjayl     register_t pte_lo = 0x0;
   3151       1.38   sanjayl     int ptegidx = 0, i = 0;
   3152       1.38   sanjayl     struct pte pte;
   3153       1.38   sanjayl     va_list ap;
   3154       1.38   sanjayl 
   3155       1.38   sanjayl     /* Coherent + Supervisor RW, no user access */
   3156       1.38   sanjayl     pte_lo = PTE_M;
   3157       1.38   sanjayl 
   3158       1.38   sanjayl     /* XXXSL
   3159       1.38   sanjayl      * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
   3160       1.38   sanjayl      * these have to take priority.
   3161       1.38   sanjayl      */
   3162       1.38   sanjayl     for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
   3163       1.38   sanjayl         ptegidx = va_to_pteg(pmap_kernel(), va);
   3164       1.38   sanjayl         pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
   3165       1.38   sanjayl         i = pmap_pte_insert(ptegidx, &pte);
   3166       1.38   sanjayl     }
   3167       1.38   sanjayl 
   3168       1.38   sanjayl     va_start(ap, use_large_pages);
   3169       1.38   sanjayl     while (1) {
   3170       1.38   sanjayl         paddr_t pa;
   3171       1.38   sanjayl         size_t size;
   3172       1.38   sanjayl 
   3173       1.38   sanjayl         va = va_arg(ap, vaddr_t);
   3174       1.38   sanjayl 
   3175       1.38   sanjayl         if (va == 0)
   3176       1.38   sanjayl             break;
   3177       1.38   sanjayl 
   3178       1.38   sanjayl         pa = va_arg(ap, paddr_t);
   3179       1.38   sanjayl         size = va_arg(ap, size_t);
   3180       1.38   sanjayl 
   3181       1.38   sanjayl         for (; va < (va + size); va += 0x1000, pa += 0x1000) {
   3182       1.38   sanjayl #if 0
   3183  1.48.18.1      matt 	    printf("%s: Inserting: va: 0x%08lx, pa: 0x%08lx\n", __func__,  va, pa);
   3184       1.38   sanjayl #endif
   3185       1.38   sanjayl             ptegidx = va_to_pteg(pmap_kernel(), va);
   3186       1.38   sanjayl             pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
   3187       1.38   sanjayl             i = pmap_pte_insert(ptegidx, &pte);
   3188       1.38   sanjayl         }
   3189       1.38   sanjayl     }
   3190       1.38   sanjayl 
   3191       1.38   sanjayl     TLBSYNC();
   3192       1.38   sanjayl     SYNC();
   3193       1.38   sanjayl     return (0);
   3194       1.38   sanjayl }
   3195       1.38   sanjayl #endif /* PPC_OEA64_BRIDGE */
   3196       1.38   sanjayl 
   3197        1.1      matt /*
   3198        1.1      matt  * This is not part of the defined PMAP interface and is specific to the
   3199        1.1      matt  * PowerPC architecture.  This is called during initppc, before the system
   3200        1.1      matt  * is really initialized.
   3201        1.1      matt  */
   3202        1.1      matt void
   3203        1.1      matt pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
   3204        1.1      matt {
   3205        1.1      matt 	struct mem_region *mp, tmp;
   3206        1.1      matt 	paddr_t s, e;
   3207        1.1      matt 	psize_t size;
   3208        1.1      matt 	int i, j;
   3209        1.1      matt 
   3210        1.1      matt 	/*
   3211        1.1      matt 	 * Get memory.
   3212        1.1      matt 	 */
   3213        1.1      matt 	mem_regions(&mem, &avail);
   3214        1.1      matt #if defined(DEBUG)
   3215        1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3216        1.1      matt 		printf("pmap_bootstrap: memory configuration:\n");
   3217        1.1      matt 		for (mp = mem; mp->size; mp++) {
   3218        1.1      matt 			printf("pmap_bootstrap: mem start 0x%lx size 0x%lx\n",
   3219        1.1      matt 				mp->start, mp->size);
   3220        1.1      matt 		}
   3221        1.1      matt 		for (mp = avail; mp->size; mp++) {
   3222        1.1      matt 			printf("pmap_bootstrap: avail start 0x%lx size 0x%lx\n",
   3223        1.1      matt 				mp->start, mp->size);
   3224        1.1      matt 		}
   3225        1.1      matt 	}
   3226        1.1      matt #endif
   3227        1.1      matt 
   3228        1.1      matt 	/*
   3229        1.1      matt 	 * Find out how much physical memory we have and in how many chunks.
   3230        1.1      matt 	 */
   3231        1.1      matt 	for (mem_cnt = 0, mp = mem; mp->size; mp++) {
   3232        1.1      matt 		if (mp->start >= pmap_memlimit)
   3233        1.1      matt 			continue;
   3234        1.1      matt 		if (mp->start + mp->size > pmap_memlimit) {
   3235        1.1      matt 			size = pmap_memlimit - mp->start;
   3236        1.1      matt 			physmem += btoc(size);
   3237        1.1      matt 		} else {
   3238        1.1      matt 			physmem += btoc(mp->size);
   3239        1.1      matt 		}
   3240        1.1      matt 		mem_cnt++;
   3241        1.1      matt 	}
   3242        1.1      matt 
   3243        1.1      matt 	/*
   3244        1.1      matt 	 * Count the number of available entries.
   3245        1.1      matt 	 */
   3246        1.1      matt 	for (avail_cnt = 0, mp = avail; mp->size; mp++)
   3247        1.1      matt 		avail_cnt++;
   3248        1.1      matt 
   3249        1.1      matt 	/*
   3250        1.1      matt 	 * Page align all regions.
   3251        1.1      matt 	 */
   3252        1.1      matt 	kernelstart = trunc_page(kernelstart);
   3253        1.1      matt 	kernelend = round_page(kernelend);
   3254        1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3255        1.1      matt 		s = round_page(mp->start);
   3256        1.1      matt 		mp->size -= (s - mp->start);
   3257        1.1      matt 		mp->size = trunc_page(mp->size);
   3258        1.1      matt 		mp->start = s;
   3259        1.1      matt 		e = mp->start + mp->size;
   3260        1.1      matt 
   3261        1.1      matt 		DPRINTFN(BOOT,
   3262        1.1      matt 		    ("pmap_bootstrap: b-avail[%d] start 0x%lx size 0x%lx\n",
   3263        1.1      matt 		    i, mp->start, mp->size));
   3264        1.1      matt 
   3265        1.1      matt 		/*
   3266        1.1      matt 		 * Don't allow the end to run beyond our artificial limit
   3267        1.1      matt 		 */
   3268        1.1      matt 		if (e > pmap_memlimit)
   3269        1.1      matt 			e = pmap_memlimit;
   3270        1.1      matt 
   3271        1.1      matt 		/*
   3272        1.1      matt 		 * Is this region empty or strange?  skip it.
   3273        1.1      matt 		 */
   3274        1.1      matt 		if (e <= s) {
   3275        1.1      matt 			mp->start = 0;
   3276        1.1      matt 			mp->size = 0;
   3277        1.1      matt 			continue;
   3278        1.1      matt 		}
   3279        1.1      matt 
   3280        1.1      matt 		/*
   3281        1.1      matt 		 * Does this overlap the beginning of kernel?
   3282        1.1      matt 		 *   Does extend past the end of the kernel?
   3283        1.1      matt 		 */
   3284        1.1      matt 		else if (s < kernelstart && e > kernelstart) {
   3285        1.1      matt 			if (e > kernelend) {
   3286        1.1      matt 				avail[avail_cnt].start = kernelend;
   3287        1.1      matt 				avail[avail_cnt].size = e - kernelend;
   3288        1.1      matt 				avail_cnt++;
   3289        1.1      matt 			}
   3290        1.1      matt 			mp->size = kernelstart - s;
   3291        1.1      matt 		}
   3292        1.1      matt 		/*
   3293        1.1      matt 		 * Check whether this region overlaps the end of the kernel.
   3294        1.1      matt 		 */
   3295        1.1      matt 		else if (s < kernelend && e > kernelend) {
   3296        1.1      matt 			mp->start = kernelend;
   3297        1.1      matt 			mp->size = e - kernelend;
   3298        1.1      matt 		}
   3299        1.1      matt 		/*
   3300        1.1      matt 		 * Look whether this regions is completely inside the kernel.
   3301        1.1      matt 		 * Nuke it if it does.
   3302        1.1      matt 		 */
   3303        1.1      matt 		else if (s >= kernelstart && e <= kernelend) {
   3304        1.1      matt 			mp->start = 0;
   3305        1.1      matt 			mp->size = 0;
   3306        1.1      matt 		}
   3307        1.1      matt 		/*
   3308        1.1      matt 		 * If the user imposed a memory limit, enforce it.
   3309        1.1      matt 		 */
   3310        1.1      matt 		else if (s >= pmap_memlimit) {
   3311        1.6   thorpej 			mp->start = -PAGE_SIZE;	/* let's know why */
   3312        1.1      matt 			mp->size = 0;
   3313        1.1      matt 		}
   3314        1.1      matt 		else {
   3315        1.1      matt 			mp->start = s;
   3316        1.1      matt 			mp->size = e - s;
   3317        1.1      matt 		}
   3318        1.1      matt 		DPRINTFN(BOOT,
   3319        1.1      matt 		    ("pmap_bootstrap: a-avail[%d] start 0x%lx size 0x%lx\n",
   3320        1.1      matt 		    i, mp->start, mp->size));
   3321        1.1      matt 	}
   3322        1.1      matt 
   3323        1.1      matt 	/*
   3324        1.1      matt 	 * Move (and uncount) all the null return to the end.
   3325        1.1      matt 	 */
   3326        1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3327        1.1      matt 		if (mp->size == 0) {
   3328        1.1      matt 			tmp = avail[i];
   3329        1.1      matt 			avail[i] = avail[--avail_cnt];
   3330        1.1      matt 			avail[avail_cnt] = avail[i];
   3331        1.1      matt 		}
   3332        1.1      matt 	}
   3333        1.1      matt 
   3334        1.1      matt 	/*
   3335        1.1      matt 	 * (Bubble)sort them into asecnding order.
   3336        1.1      matt 	 */
   3337        1.1      matt 	for (i = 0; i < avail_cnt; i++) {
   3338        1.1      matt 		for (j = i + 1; j < avail_cnt; j++) {
   3339        1.1      matt 			if (avail[i].start > avail[j].start) {
   3340        1.1      matt 				tmp = avail[i];
   3341        1.1      matt 				avail[i] = avail[j];
   3342        1.1      matt 				avail[j] = tmp;
   3343        1.1      matt 			}
   3344        1.1      matt 		}
   3345        1.1      matt 	}
   3346        1.1      matt 
   3347        1.1      matt 	/*
   3348        1.1      matt 	 * Make sure they don't overlap.
   3349        1.1      matt 	 */
   3350        1.1      matt 	for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
   3351        1.1      matt 		if (mp[0].start + mp[0].size > mp[1].start) {
   3352        1.1      matt 			mp[0].size = mp[1].start - mp[0].start;
   3353        1.1      matt 		}
   3354        1.1      matt 		DPRINTFN(BOOT,
   3355        1.1      matt 		    ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
   3356        1.1      matt 		    i, mp->start, mp->size));
   3357        1.1      matt 	}
   3358        1.1      matt 	DPRINTFN(BOOT,
   3359        1.1      matt 	    ("pmap_bootstrap: avail[%d] start 0x%lx size 0x%lx\n",
   3360        1.1      matt 	    i, mp->start, mp->size));
   3361        1.1      matt 
   3362        1.1      matt #ifdef	PTEGCOUNT
   3363        1.1      matt 	pmap_pteg_cnt = PTEGCOUNT;
   3364        1.1      matt #else /* PTEGCOUNT */
   3365       1.38   sanjayl 
   3366        1.1      matt 	pmap_pteg_cnt = 0x1000;
   3367        1.1      matt 
   3368        1.1      matt 	while (pmap_pteg_cnt < physmem)
   3369        1.1      matt 		pmap_pteg_cnt <<= 1;
   3370        1.1      matt 
   3371        1.1      matt 	pmap_pteg_cnt >>= 1;
   3372        1.1      matt #endif /* PTEGCOUNT */
   3373        1.1      matt 
   3374       1.38   sanjayl #ifdef DEBUG
   3375       1.38   sanjayl 	DPRINTFN(BOOT,
   3376       1.38   sanjayl 		("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt));
   3377       1.38   sanjayl #endif
   3378       1.38   sanjayl 
   3379        1.1      matt 	/*
   3380        1.1      matt 	 * Find suitably aligned memory for PTEG hash table.
   3381        1.1      matt 	 */
   3382        1.2      matt 	size = pmap_pteg_cnt * sizeof(struct pteg);
   3383        1.1      matt 	pmap_pteg_table = pmap_boot_find_memory(size, size, 0);
   3384       1.38   sanjayl 
   3385       1.38   sanjayl #ifdef DEBUG
   3386       1.38   sanjayl 	DPRINTFN(BOOT,
   3387       1.38   sanjayl 		("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table));
   3388       1.38   sanjayl #endif
   3389       1.38   sanjayl 
   3390       1.38   sanjayl 
   3391        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3392        1.1      matt 	if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
   3393        1.1      matt 		panic("pmap_bootstrap: pmap_pteg_table end (%p + %lx) > 256MB",
   3394        1.1      matt 		    pmap_pteg_table, size);
   3395        1.1      matt #endif
   3396        1.1      matt 
   3397       1.32        he 	memset(__UNVOLATILE(pmap_pteg_table), 0,
   3398       1.32        he 		pmap_pteg_cnt * sizeof(struct pteg));
   3399        1.1      matt 	pmap_pteg_mask = pmap_pteg_cnt - 1;
   3400        1.1      matt 
   3401        1.1      matt 	/*
   3402        1.1      matt 	 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
   3403        1.1      matt 	 * with pages.  So we just steal them before giving them to UVM.
   3404        1.1      matt 	 */
   3405        1.1      matt 	size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
   3406        1.6   thorpej 	pmap_pvo_table = pmap_boot_find_memory(size, PAGE_SIZE, 0);
   3407        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3408        1.1      matt 	if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
   3409        1.1      matt 		panic("pmap_bootstrap: pmap_pvo_table end (%p + %lx) > 256MB",
   3410        1.1      matt 		    pmap_pvo_table, size);
   3411        1.1      matt #endif
   3412        1.1      matt 
   3413        1.1      matt 	for (i = 0; i < pmap_pteg_cnt; i++)
   3414        1.1      matt 		TAILQ_INIT(&pmap_pvo_table[i]);
   3415        1.1      matt 
   3416        1.1      matt #ifndef MSGBUFADDR
   3417        1.1      matt 	/*
   3418        1.1      matt 	 * Allocate msgbuf in high memory.
   3419        1.1      matt 	 */
   3420        1.6   thorpej 	msgbuf_paddr =
   3421        1.6   thorpej 	    (paddr_t) pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
   3422        1.1      matt #endif
   3423        1.1      matt 
   3424        1.1      matt #ifdef __HAVE_PMAP_PHYSSEG
   3425        1.1      matt 	{
   3426        1.1      matt 		u_int npgs = 0;
   3427        1.1      matt 		for (i = 0, mp = avail; i < avail_cnt; i++, mp++)
   3428        1.1      matt 			npgs += btoc(mp->size);
   3429        1.1      matt 		size = (sizeof(struct pvo_head) + 1) * npgs;
   3430        1.6   thorpej 		pmap_physseg.pvoh = pmap_boot_find_memory(size, PAGE_SIZE, 0);
   3431        1.1      matt 		pmap_physseg.attrs = (char *) &pmap_physseg.pvoh[npgs];
   3432        1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3433        1.1      matt 		if ((uintptr_t)pmap_physseg.pvoh + size > SEGMENT_LENGTH)
   3434        1.1      matt 			panic("pmap_bootstrap: PVO list end (%p + %lx) > 256MB",
   3435        1.1      matt 			    pmap_physseg.pvoh, size);
   3436        1.1      matt #endif
   3437        1.1      matt 	}
   3438        1.1      matt #endif
   3439        1.1      matt 
   3440       1.38   sanjayl 
   3441        1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
   3442        1.1      matt 		paddr_t pfstart = atop(mp->start);
   3443        1.1      matt 		paddr_t pfend = atop(mp->start + mp->size);
   3444        1.1      matt 		if (mp->size == 0)
   3445        1.1      matt 			continue;
   3446        1.1      matt 		if (mp->start + mp->size <= SEGMENT_LENGTH) {
   3447        1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3448        1.1      matt 				VM_FREELIST_FIRST256);
   3449        1.1      matt 		} else if (mp->start >= SEGMENT_LENGTH) {
   3450        1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3451        1.1      matt 				VM_FREELIST_DEFAULT);
   3452        1.1      matt 		} else {
   3453        1.1      matt 			pfend = atop(SEGMENT_LENGTH);
   3454        1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3455        1.1      matt 				VM_FREELIST_FIRST256);
   3456        1.1      matt 			pfstart = atop(SEGMENT_LENGTH);
   3457        1.1      matt 			pfend = atop(mp->start + mp->size);
   3458        1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3459        1.1      matt 				VM_FREELIST_DEFAULT);
   3460        1.1      matt 		}
   3461        1.1      matt 	}
   3462        1.1      matt 
   3463        1.1      matt 	/*
   3464        1.1      matt 	 * Make sure kernel vsid is allocated as well as VSID 0.
   3465        1.1      matt 	 */
   3466        1.1      matt 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   3467        1.1      matt 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
   3468        1.1      matt 	pmap_vsid_bitmap[0] |= 1;
   3469        1.1      matt 
   3470        1.1      matt 	/*
   3471        1.1      matt 	 * Initialize kernel pmap and hardware.
   3472        1.1      matt 	 */
   3473       1.38   sanjayl 
   3474       1.38   sanjayl /* PPC_OEA64_BRIDGE does support these instructions */
   3475       1.38   sanjayl #if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
   3476        1.1      matt 	for (i = 0; i < 16; i++) {
   3477       1.38   sanjayl  		pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
   3478       1.35     perry 		__asm volatile ("mtsrin %0,%1"
   3479       1.38   sanjayl  			      :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
   3480        1.1      matt 	}
   3481        1.1      matt 
   3482        1.1      matt 	pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
   3483       1.35     perry 	__asm volatile ("mtsr %0,%1"
   3484        1.1      matt 		      :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
   3485        1.1      matt #ifdef KERNEL2_SR
   3486        1.1      matt 	pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
   3487       1.35     perry 	__asm volatile ("mtsr %0,%1"
   3488        1.1      matt 		      :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
   3489        1.1      matt #endif
   3490        1.1      matt 	for (i = 0; i < 16; i++) {
   3491        1.1      matt 		if (iosrtable[i] & SR601_T) {
   3492        1.1      matt 			pmap_kernel()->pm_sr[i] = iosrtable[i];
   3493       1.35     perry 			__asm volatile ("mtsrin %0,%1"
   3494        1.1      matt 			    :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
   3495        1.1      matt 		}
   3496        1.1      matt 	}
   3497       1.38   sanjayl #endif /* PPC_OEA || PPC_OEA64_BRIDGE */
   3498       1.38   sanjayl #if defined (PPC_OEA)
   3499       1.35     perry 	__asm volatile ("sync; mtsdr1 %0; isync"
   3500        1.2      matt 		      :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
   3501       1.38   sanjayl #elif defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE)
   3502       1.38   sanjayl  	__asm __volatile ("sync; mtsdr1 %0; isync"
   3503       1.38   sanjayl  		      :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11))));
   3504       1.38   sanjayl #endif
   3505        1.1      matt 	tlbia();
   3506        1.1      matt 
   3507        1.1      matt #ifdef ALTIVEC
   3508        1.1      matt 	pmap_use_altivec = cpu_altivec;
   3509        1.1      matt #endif
   3510        1.1      matt 
   3511        1.1      matt #ifdef DEBUG
   3512        1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3513        1.1      matt 		u_int cnt;
   3514        1.1      matt 		int bank;
   3515        1.1      matt 		char pbuf[9];
   3516        1.1      matt 		for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
   3517        1.1      matt 			cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
   3518        1.1      matt 			printf("pmap_bootstrap: vm_physmem[%d]=%#lx-%#lx/%#lx\n",
   3519        1.1      matt 			    bank,
   3520        1.1      matt 			    ptoa(vm_physmem[bank].avail_start),
   3521        1.1      matt 			    ptoa(vm_physmem[bank].avail_end),
   3522        1.1      matt 			    ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
   3523        1.1      matt 		}
   3524        1.1      matt 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
   3525        1.1      matt 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
   3526        1.1      matt 		    pbuf, cnt);
   3527        1.1      matt 	}
   3528        1.1      matt #endif
   3529        1.1      matt 
   3530        1.1      matt 	pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
   3531        1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
   3532       1.48        ad 	    &pmap_pool_uallocator, IPL_NONE);
   3533        1.1      matt 
   3534        1.1      matt 	pool_setlowat(&pmap_upvo_pool, 252);
   3535        1.1      matt 
   3536        1.1      matt 	pool_init(&pmap_pool, sizeof(struct pmap),
   3537       1.48        ad 	    sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
   3538       1.48        ad 	    IPL_NONE);
   3539       1.41      matt 
   3540       1.41      matt #if defined(PMAP_NEED_MAPKERNEL)
   3541       1.41      matt 	{
   3542       1.41      matt 		extern int etext[], kernel_text[];
   3543       1.41      matt 		vaddr_t va, va_etext = (paddr_t) etext;
   3544       1.41      matt 		paddr_t pa;
   3545       1.42      matt 		register_t sr;
   3546       1.42      matt 
   3547       1.42      matt 		sr = KERNELN_SEGMENT(kernelstart >> ADDR_SR_SHFT)
   3548       1.42      matt 		    |SR_SUKEY|SR_PRKEY;
   3549       1.41      matt 
   3550       1.41      matt 		va = (vaddr_t) kernel_text;
   3551       1.41      matt 
   3552       1.41      matt 		for (pa = kernelstart; va < va_etext;
   3553       1.41      matt 		     pa += PAGE_SIZE, va += PAGE_SIZE)
   3554       1.41      matt 			pmap_enter(pmap_kernel(), va, pa,
   3555       1.41      matt 			    VM_PROT_READ|VM_PROT_EXECUTE, 0);
   3556       1.41      matt 
   3557       1.41      matt 		for (; pa < kernelend;
   3558       1.41      matt 		     pa += PAGE_SIZE, va += PAGE_SIZE)
   3559       1.41      matt 			pmap_enter(pmap_kernel(), va, pa,
   3560       1.41      matt 			    VM_PROT_READ|VM_PROT_WRITE, 0);
   3561       1.42      matt 
   3562       1.42      matt 		pmap_kernel()->pm_sr[kernelstart >> ADDR_SR_SHFT] = sr;
   3563       1.42      matt 		__asm volatile ("mtsrin %0,%1"
   3564       1.42      matt  			      :: "r"(sr), "r"(kernelstart));
   3565       1.41      matt 	}
   3566       1.41      matt #endif
   3567        1.1      matt }
   3568