Home | History | Annotate | Line # | Download | only in oea
pmap.c revision 1.56.6.1
      1  1.56.6.1       mjf /*	$NetBSD: pmap.c,v 1.56.6.1 2008/04/03 12:42:23 mjf Exp $	*/
      2       1.1      matt /*-
      3       1.1      matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      4       1.1      matt  * All rights reserved.
      5       1.1      matt  *
      6       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      7       1.1      matt  * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
      8       1.1      matt  *
      9      1.38   sanjayl  * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
     10      1.38   sanjayl  * of Kyma Systems LLC.
     11      1.38   sanjayl  *
     12       1.1      matt  * Redistribution and use in source and binary forms, with or without
     13       1.1      matt  * modification, are permitted provided that the following conditions
     14       1.1      matt  * are met:
     15       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     17       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     18       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     19       1.1      matt  *    documentation and/or other materials provided with the distribution.
     20       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     21       1.1      matt  *    must display the following acknowledgement:
     22       1.1      matt  *        This product includes software developed by the NetBSD
     23       1.1      matt  *        Foundation, Inc. and its contributors.
     24       1.1      matt  * 4. Neither the name of The NetBSD Foundation nor the names of its
     25       1.1      matt  *    contributors may be used to endorse or promote products derived
     26       1.1      matt  *    from this software without specific prior written permission.
     27       1.1      matt  *
     28       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     29       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     30       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     31       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     32       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     33       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     34       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     35       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     36       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     37       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     38       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     39       1.1      matt  */
     40       1.1      matt 
     41       1.1      matt /*
     42       1.1      matt  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     43       1.1      matt  * Copyright (C) 1995, 1996 TooLs GmbH.
     44       1.1      matt  * All rights reserved.
     45       1.1      matt  *
     46       1.1      matt  * Redistribution and use in source and binary forms, with or without
     47       1.1      matt  * modification, are permitted provided that the following conditions
     48       1.1      matt  * are met:
     49       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     50       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     51       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     52       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     53       1.1      matt  *    documentation and/or other materials provided with the distribution.
     54       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     55       1.1      matt  *    must display the following acknowledgement:
     56       1.1      matt  *	This product includes software developed by TooLs GmbH.
     57       1.1      matt  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     58       1.1      matt  *    derived from this software without specific prior written permission.
     59       1.1      matt  *
     60       1.1      matt  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     61       1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     62       1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     63       1.1      matt  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     64       1.1      matt  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     65       1.1      matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     66       1.1      matt  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     67       1.1      matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     68       1.1      matt  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     69       1.1      matt  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     70       1.1      matt  */
     71      1.11     lukem 
     72      1.11     lukem #include <sys/cdefs.h>
     73  1.56.6.1       mjf __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.56.6.1 2008/04/03 12:42:23 mjf Exp $");
     74      1.53   garbled 
     75      1.53   garbled #define	PMAP_NOOPNAMES
     76       1.1      matt 
     77      1.18      matt #include "opt_ppcarch.h"
     78       1.1      matt #include "opt_altivec.h"
     79  1.56.6.1       mjf #include "opt_multiprocessor.h"
     80       1.1      matt #include "opt_pmap.h"
     81  1.56.6.1       mjf 
     82       1.1      matt #include <sys/param.h>
     83       1.1      matt #include <sys/malloc.h>
     84       1.1      matt #include <sys/proc.h>
     85       1.1      matt #include <sys/user.h>
     86       1.1      matt #include <sys/pool.h>
     87       1.1      matt #include <sys/queue.h>
     88       1.1      matt #include <sys/device.h>		/* for evcnt */
     89       1.1      matt #include <sys/systm.h>
     90      1.50        ad #include <sys/atomic.h>
     91       1.1      matt 
     92       1.1      matt #include <uvm/uvm.h>
     93       1.1      matt 
     94       1.1      matt #include <machine/pcb.h>
     95       1.1      matt #include <machine/powerpc.h>
     96       1.1      matt #include <powerpc/spr.h>
     97       1.1      matt #include <powerpc/oea/sr_601.h>
     98       1.1      matt #include <powerpc/bat.h>
     99      1.38   sanjayl #include <powerpc/stdarg.h>
    100       1.1      matt 
    101       1.1      matt #ifdef ALTIVEC
    102       1.1      matt int pmap_use_altivec;
    103       1.1      matt #endif
    104       1.1      matt 
    105       1.2      matt volatile struct pteg *pmap_pteg_table;
    106       1.1      matt unsigned int pmap_pteg_cnt;
    107       1.1      matt unsigned int pmap_pteg_mask;
    108      1.21   aymeric #ifdef PMAP_MEMLIMIT
    109      1.53   garbled static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
    110      1.21   aymeric #else
    111      1.53   garbled static paddr_t pmap_memlimit = -PAGE_SIZE;		/* there is no limit */
    112      1.21   aymeric #endif
    113       1.1      matt 
    114       1.1      matt struct pmap kernel_pmap_;
    115       1.1      matt unsigned int pmap_pages_stolen;
    116       1.1      matt u_long pmap_pte_valid;
    117       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    118       1.1      matt u_long pmap_pvo_enter_depth;
    119       1.1      matt u_long pmap_pvo_remove_depth;
    120       1.1      matt #endif
    121       1.1      matt 
    122       1.1      matt int physmem;
    123       1.1      matt #ifndef MSGBUFADDR
    124       1.1      matt extern paddr_t msgbuf_paddr;
    125       1.1      matt #endif
    126       1.1      matt 
    127       1.1      matt static struct mem_region *mem, *avail;
    128       1.1      matt static u_int mem_cnt, avail_cnt;
    129       1.1      matt 
    130      1.53   garbled #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
    131      1.53   garbled # define	PMAP_OEA 1
    132      1.53   garbled # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA64) && !defined(PPC_OEA64_BRIDGE)
    133      1.53   garbled #  define	PMAPNAME(name)	pmap_##name
    134      1.53   garbled # endif
    135      1.53   garbled #endif
    136      1.53   garbled 
    137      1.53   garbled #if defined(PMAP_OEA64)
    138      1.53   garbled # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64_BRIDGE)
    139      1.53   garbled #  define	PMAPNAME(name)	pmap_##name
    140      1.53   garbled # endif
    141      1.53   garbled #endif
    142      1.53   garbled 
    143      1.53   garbled #if defined(PMAP_OEA64_BRIDGE)
    144      1.53   garbled # if defined(PMAP_EXCLUDE_DECLS) && !defined(PPC_OEA) && !defined(PPC_OEA64)
    145      1.53   garbled #  define	PMAPNAME(name)	pmap_##name
    146      1.53   garbled # endif
    147      1.53   garbled #endif
    148      1.53   garbled 
    149      1.53   garbled #if defined(PMAP_OEA)
    150      1.53   garbled #define	_PRIxpte	"lx"
    151      1.53   garbled #else
    152      1.53   garbled #define	_PRIxpte	PRIx64
    153      1.53   garbled #endif
    154      1.53   garbled #define	_PRIxpa		"lx"
    155      1.53   garbled #define	_PRIxva		"lx"
    156      1.54   mlelstv #define	_PRIsr  	"lx"
    157      1.53   garbled 
    158      1.53   garbled #if defined(PMAP_EXCLUDE_DECLS) && !defined(PMAPNAME)
    159      1.53   garbled #if defined(PMAP_OEA)
    160      1.53   garbled #define	PMAPNAME(name)	pmap32_##name
    161      1.53   garbled #elif defined(PMAP_OEA64)
    162      1.53   garbled #define	PMAPNAME(name)	pmap64_##name
    163      1.53   garbled #elif defined(PMAP_OEA64_BRIDGE)
    164      1.53   garbled #define	PMAPNAME(name)	pmap64bridge_##name
    165      1.53   garbled #else
    166      1.53   garbled #error unknown variant for pmap
    167      1.53   garbled #endif
    168      1.53   garbled #endif /* PMAP_EXLCUDE_DECLS && !PMAPNAME */
    169      1.53   garbled 
    170      1.53   garbled #if defined(PMAPNAME)
    171      1.53   garbled #define	STATIC			static
    172      1.53   garbled #define pmap_pte_spill		PMAPNAME(pte_spill)
    173      1.53   garbled #define pmap_real_memory	PMAPNAME(real_memory)
    174      1.53   garbled #define pmap_init		PMAPNAME(init)
    175      1.53   garbled #define pmap_virtual_space	PMAPNAME(virtual_space)
    176      1.53   garbled #define pmap_create		PMAPNAME(create)
    177      1.53   garbled #define pmap_reference		PMAPNAME(reference)
    178      1.53   garbled #define pmap_destroy		PMAPNAME(destroy)
    179      1.53   garbled #define pmap_copy		PMAPNAME(copy)
    180      1.53   garbled #define pmap_update		PMAPNAME(update)
    181      1.53   garbled #define pmap_collect		PMAPNAME(collect)
    182      1.53   garbled #define pmap_enter		PMAPNAME(enter)
    183      1.53   garbled #define pmap_remove		PMAPNAME(remove)
    184      1.53   garbled #define pmap_kenter_pa		PMAPNAME(kenter_pa)
    185      1.53   garbled #define pmap_kremove		PMAPNAME(kremove)
    186      1.53   garbled #define pmap_extract		PMAPNAME(extract)
    187      1.53   garbled #define pmap_protect		PMAPNAME(protect)
    188      1.53   garbled #define pmap_unwire		PMAPNAME(unwire)
    189      1.53   garbled #define pmap_page_protect	PMAPNAME(page_protect)
    190      1.53   garbled #define pmap_query_bit		PMAPNAME(query_bit)
    191      1.53   garbled #define pmap_clear_bit		PMAPNAME(clear_bit)
    192      1.53   garbled 
    193      1.53   garbled #define pmap_activate		PMAPNAME(activate)
    194      1.53   garbled #define pmap_deactivate		PMAPNAME(deactivate)
    195      1.53   garbled 
    196      1.53   garbled #define pmap_pinit		PMAPNAME(pinit)
    197      1.53   garbled #define pmap_procwr		PMAPNAME(procwr)
    198      1.53   garbled 
    199      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    200      1.53   garbled #define pmap_pte_print		PMAPNAME(pte_print)
    201      1.53   garbled #define pmap_pteg_check		PMAPNAME(pteg_check)
    202      1.53   garbled #define pmap_print_mmruregs	PMAPNAME(print_mmuregs)
    203      1.53   garbled #define pmap_print_pte		PMAPNAME(print_pte)
    204      1.53   garbled #define pmap_pteg_dist		PMAPNAME(pteg_dist)
    205      1.53   garbled #endif
    206      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    207      1.53   garbled #define	pmap_pvo_verify		PMAPNAME(pvo_verify)
    208      1.56       phx #define pmapcheck		PMAPNAME(check)
    209      1.56       phx #endif
    210      1.56       phx #if defined(DEBUG) || defined(PMAPDEBUG)
    211      1.56       phx #define pmapdebug		PMAPNAME(debug)
    212      1.53   garbled #endif
    213      1.53   garbled #define pmap_steal_memory	PMAPNAME(steal_memory)
    214      1.53   garbled #define pmap_bootstrap		PMAPNAME(bootstrap)
    215      1.53   garbled #else
    216      1.53   garbled #define	STATIC			/* nothing */
    217      1.53   garbled #endif /* PMAPNAME */
    218      1.53   garbled 
    219      1.53   garbled STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
    220      1.53   garbled STATIC void pmap_real_memory(paddr_t *, psize_t *);
    221      1.53   garbled STATIC void pmap_init(void);
    222      1.53   garbled STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
    223      1.53   garbled STATIC pmap_t pmap_create(void);
    224      1.53   garbled STATIC void pmap_reference(pmap_t);
    225      1.53   garbled STATIC void pmap_destroy(pmap_t);
    226      1.53   garbled STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
    227      1.53   garbled STATIC void pmap_update(pmap_t);
    228      1.53   garbled STATIC void pmap_collect(pmap_t);
    229      1.53   garbled STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
    230      1.53   garbled STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
    231      1.53   garbled STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
    232      1.53   garbled STATIC void pmap_kremove(vaddr_t, vsize_t);
    233      1.53   garbled STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
    234      1.53   garbled 
    235      1.53   garbled STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
    236      1.53   garbled STATIC void pmap_unwire(pmap_t, vaddr_t);
    237      1.53   garbled STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
    238      1.53   garbled STATIC bool pmap_query_bit(struct vm_page *, int);
    239      1.53   garbled STATIC bool pmap_clear_bit(struct vm_page *, int);
    240      1.53   garbled 
    241      1.53   garbled STATIC void pmap_activate(struct lwp *);
    242      1.53   garbled STATIC void pmap_deactivate(struct lwp *);
    243      1.53   garbled 
    244      1.53   garbled STATIC void pmap_pinit(pmap_t pm);
    245      1.53   garbled STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
    246      1.53   garbled 
    247      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    248      1.53   garbled STATIC void pmap_pte_print(volatile struct pte *);
    249      1.53   garbled STATIC void pmap_pteg_check(void);
    250      1.53   garbled STATIC void pmap_print_mmuregs(void);
    251      1.53   garbled STATIC void pmap_print_pte(pmap_t, vaddr_t);
    252      1.53   garbled STATIC void pmap_pteg_dist(void);
    253      1.53   garbled #endif
    254      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    255      1.53   garbled STATIC void pmap_pvo_verify(void);
    256      1.53   garbled #endif
    257      1.53   garbled STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    258      1.53   garbled STATIC void pmap_bootstrap(paddr_t, paddr_t);
    259      1.53   garbled 
    260      1.53   garbled #ifdef PMAPNAME
    261      1.53   garbled const struct pmap_ops PMAPNAME(ops) = {
    262      1.53   garbled 	.pmapop_pte_spill = pmap_pte_spill,
    263      1.53   garbled 	.pmapop_real_memory = pmap_real_memory,
    264      1.53   garbled 	.pmapop_init = pmap_init,
    265      1.53   garbled 	.pmapop_virtual_space = pmap_virtual_space,
    266      1.53   garbled 	.pmapop_create = pmap_create,
    267      1.53   garbled 	.pmapop_reference = pmap_reference,
    268      1.53   garbled 	.pmapop_destroy = pmap_destroy,
    269      1.53   garbled 	.pmapop_copy = pmap_copy,
    270      1.53   garbled 	.pmapop_update = pmap_update,
    271      1.53   garbled 	.pmapop_collect = pmap_collect,
    272      1.53   garbled 	.pmapop_enter = pmap_enter,
    273      1.53   garbled 	.pmapop_remove = pmap_remove,
    274      1.53   garbled 	.pmapop_kenter_pa = pmap_kenter_pa,
    275      1.53   garbled 	.pmapop_kremove = pmap_kremove,
    276      1.53   garbled 	.pmapop_extract = pmap_extract,
    277      1.53   garbled 	.pmapop_protect = pmap_protect,
    278      1.53   garbled 	.pmapop_unwire = pmap_unwire,
    279      1.53   garbled 	.pmapop_page_protect = pmap_page_protect,
    280      1.53   garbled 	.pmapop_query_bit = pmap_query_bit,
    281      1.53   garbled 	.pmapop_clear_bit = pmap_clear_bit,
    282      1.53   garbled 	.pmapop_activate = pmap_activate,
    283      1.53   garbled 	.pmapop_deactivate = pmap_deactivate,
    284      1.53   garbled 	.pmapop_pinit = pmap_pinit,
    285      1.53   garbled 	.pmapop_procwr = pmap_procwr,
    286      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    287      1.53   garbled 	.pmapop_pte_print = pmap_pte_print,
    288      1.53   garbled 	.pmapop_pteg_check = pmap_pteg_check,
    289      1.53   garbled 	.pmapop_print_mmuregs = pmap_print_mmuregs,
    290      1.53   garbled 	.pmapop_print_pte = pmap_print_pte,
    291      1.53   garbled 	.pmapop_pteg_dist = pmap_pteg_dist,
    292      1.53   garbled #else
    293      1.53   garbled 	.pmapop_pte_print = NULL,
    294      1.53   garbled 	.pmapop_pteg_check = NULL,
    295      1.53   garbled 	.pmapop_print_mmuregs = NULL,
    296      1.53   garbled 	.pmapop_print_pte = NULL,
    297      1.53   garbled 	.pmapop_pteg_dist = NULL,
    298      1.53   garbled #endif
    299      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    300      1.53   garbled 	.pmapop_pvo_verify = pmap_pvo_verify,
    301      1.53   garbled #else
    302      1.53   garbled 	.pmapop_pvo_verify = NULL,
    303       1.1      matt #endif
    304      1.53   garbled 	.pmapop_steal_memory = pmap_steal_memory,
    305      1.53   garbled 	.pmapop_bootstrap = pmap_bootstrap,
    306      1.53   garbled };
    307      1.53   garbled #endif /* !PMAPNAME */
    308       1.1      matt 
    309       1.1      matt /*
    310      1.38   sanjayl  * The following structure is aligned to 32 bytes
    311       1.1      matt  */
    312       1.1      matt struct pvo_entry {
    313       1.1      matt 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
    314       1.1      matt 	TAILQ_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
    315       1.1      matt 	struct pte pvo_pte;			/* Prebuilt PTE */
    316       1.1      matt 	pmap_t pvo_pmap;			/* ptr to owning pmap */
    317       1.1      matt 	vaddr_t pvo_vaddr;			/* VA of entry */
    318       1.1      matt #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
    319       1.1      matt #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
    320       1.1      matt #define	PVO_WIRED		0x0010		/* PVO entry is wired */
    321       1.1      matt #define	PVO_MANAGED		0x0020		/* PVO e. for managed page */
    322       1.1      matt #define	PVO_EXECUTABLE		0x0040		/* PVO e. for executable page */
    323      1.39      matt #define	PVO_WIRED_P(pvo)	((pvo)->pvo_vaddr & PVO_WIRED)
    324      1.39      matt #define	PVO_MANAGED_P(pvo)	((pvo)->pvo_vaddr & PVO_MANAGED)
    325      1.39      matt #define	PVO_EXECUTABLE_P(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
    326      1.12      matt #define	PVO_ENTER_INSERT	0		/* PVO has been removed */
    327      1.12      matt #define	PVO_SPILL_UNSET		1		/* PVO has been evicted */
    328      1.12      matt #define	PVO_SPILL_SET		2		/* PVO has been spilled */
    329      1.12      matt #define	PVO_SPILL_INSERT	3		/* PVO has been inserted */
    330      1.12      matt #define	PVO_PMAP_PAGE_PROTECT	4		/* PVO has changed */
    331      1.12      matt #define	PVO_PMAP_PROTECT	5		/* PVO has changed */
    332      1.12      matt #define	PVO_REMOVE		6		/* PVO has been removed */
    333      1.12      matt #define	PVO_WHERE_MASK		15
    334      1.12      matt #define	PVO_WHERE_SHFT		8
    335      1.38   sanjayl } __attribute__ ((aligned (32)));
    336       1.1      matt #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
    337       1.1      matt #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
    338       1.1      matt #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
    339       1.1      matt #define	PVO_PTEGIDX_CLR(pvo)	\
    340       1.1      matt 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
    341       1.1      matt #define	PVO_PTEGIDX_SET(pvo,i)	\
    342       1.1      matt 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
    343      1.12      matt #define	PVO_WHERE(pvo,w)	\
    344      1.12      matt 	((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
    345      1.12      matt 	 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
    346       1.1      matt 
    347       1.1      matt TAILQ_HEAD(pvo_tqhead, pvo_entry);
    348       1.1      matt struct pvo_tqhead *pmap_pvo_table;	/* pvo entries by ptegroup index */
    349      1.53   garbled static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
    350      1.53   garbled static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
    351       1.1      matt 
    352       1.1      matt struct pool pmap_pool;		/* pool for pmap structures */
    353       1.1      matt struct pool pmap_upvo_pool;	/* pool for pvo entries for unmanaged pages */
    354       1.1      matt struct pool pmap_mpvo_pool;	/* pool for pvo entries for managed pages */
    355       1.1      matt 
    356       1.1      matt /*
    357       1.1      matt  * We keep a cache of unmanaged pages to be used for pvo entries for
    358       1.1      matt  * unmanaged pages.
    359       1.1      matt  */
    360       1.1      matt struct pvo_page {
    361       1.1      matt 	SIMPLEQ_ENTRY(pvo_page) pvop_link;
    362       1.1      matt };
    363       1.1      matt SIMPLEQ_HEAD(pvop_head, pvo_page);
    364      1.53   garbled static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
    365      1.53   garbled static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
    366       1.1      matt u_long pmap_upvop_free;
    367       1.1      matt u_long pmap_upvop_maxfree;
    368       1.1      matt u_long pmap_mpvop_free;
    369       1.1      matt u_long pmap_mpvop_maxfree;
    370       1.1      matt 
    371      1.53   garbled static void *pmap_pool_ualloc(struct pool *, int);
    372      1.53   garbled static void *pmap_pool_malloc(struct pool *, int);
    373       1.1      matt 
    374      1.53   garbled static void pmap_pool_ufree(struct pool *, void *);
    375      1.53   garbled static void pmap_pool_mfree(struct pool *, void *);
    376       1.1      matt 
    377       1.1      matt static struct pool_allocator pmap_pool_mallocator = {
    378      1.43   garbled 	.pa_alloc = pmap_pool_malloc,
    379      1.43   garbled 	.pa_free = pmap_pool_mfree,
    380      1.43   garbled 	.pa_pagesz = 0,
    381       1.1      matt };
    382       1.1      matt 
    383       1.1      matt static struct pool_allocator pmap_pool_uallocator = {
    384      1.43   garbled 	.pa_alloc = pmap_pool_ualloc,
    385      1.43   garbled 	.pa_free = pmap_pool_ufree,
    386      1.43   garbled 	.pa_pagesz = 0,
    387       1.1      matt };
    388       1.1      matt 
    389       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    390       1.2      matt void pmap_pte_print(volatile struct pte *);
    391       1.1      matt void pmap_pteg_check(void);
    392       1.1      matt void pmap_pteg_dist(void);
    393       1.1      matt void pmap_print_pte(pmap_t, vaddr_t);
    394       1.1      matt void pmap_print_mmuregs(void);
    395       1.1      matt #endif
    396       1.1      matt 
    397       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
    398       1.1      matt #ifdef PMAPCHECK
    399       1.1      matt int pmapcheck = 1;
    400       1.1      matt #else
    401       1.1      matt int pmapcheck = 0;
    402       1.1      matt #endif
    403       1.1      matt void pmap_pvo_verify(void);
    404      1.53   garbled static void pmap_pvo_check(const struct pvo_entry *);
    405       1.1      matt #define	PMAP_PVO_CHECK(pvo)	 		\
    406       1.1      matt 	do {					\
    407       1.1      matt 		if (pmapcheck)			\
    408       1.1      matt 			pmap_pvo_check(pvo);	\
    409       1.1      matt 	} while (0)
    410       1.1      matt #else
    411       1.1      matt #define	PMAP_PVO_CHECK(pvo)	do { } while (/*CONSTCOND*/0)
    412       1.1      matt #endif
    413      1.53   garbled static int pmap_pte_insert(int, struct pte *);
    414      1.53   garbled static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
    415       1.2      matt 	vaddr_t, paddr_t, register_t, int);
    416      1.53   garbled static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
    417      1.53   garbled static void pmap_pvo_free(struct pvo_entry *);
    418      1.53   garbled static void pmap_pvo_free_list(struct pvo_head *);
    419      1.53   garbled static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
    420      1.53   garbled static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
    421      1.53   garbled static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
    422      1.53   garbled static void pvo_set_exec(struct pvo_entry *);
    423      1.53   garbled static void pvo_clear_exec(struct pvo_entry *);
    424       1.1      matt 
    425      1.53   garbled static void tlbia(void);
    426       1.1      matt 
    427      1.53   garbled static void pmap_release(pmap_t);
    428      1.53   garbled static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
    429       1.1      matt 
    430      1.25       chs static uint32_t pmap_pvo_reclaim_nextidx;
    431      1.25       chs #ifdef DEBUG
    432      1.25       chs static int pmap_pvo_reclaim_debugctr;
    433      1.25       chs #endif
    434      1.25       chs 
    435       1.1      matt #define	VSID_NBPW	(sizeof(uint32_t) * 8)
    436       1.1      matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
    437       1.1      matt 
    438       1.1      matt static int pmap_initialized;
    439       1.1      matt 
    440       1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
    441       1.1      matt #define	PMAPDEBUG_BOOT		0x0001
    442       1.1      matt #define	PMAPDEBUG_PTE		0x0002
    443       1.1      matt #define	PMAPDEBUG_EXEC		0x0008
    444       1.1      matt #define	PMAPDEBUG_PVOENTER	0x0010
    445       1.1      matt #define	PMAPDEBUG_PVOREMOVE	0x0020
    446       1.1      matt #define	PMAPDEBUG_ACTIVATE	0x0100
    447       1.1      matt #define	PMAPDEBUG_CREATE	0x0200
    448       1.1      matt #define	PMAPDEBUG_ENTER		0x1000
    449       1.1      matt #define	PMAPDEBUG_KENTER	0x2000
    450       1.1      matt #define	PMAPDEBUG_KREMOVE	0x4000
    451       1.1      matt #define	PMAPDEBUG_REMOVE	0x8000
    452      1.38   sanjayl 
    453       1.1      matt unsigned int pmapdebug = 0;
    454      1.38   sanjayl 
    455       1.1      matt # define DPRINTF(x)		printf x
    456       1.1      matt # define DPRINTFN(n, x)		if (pmapdebug & PMAPDEBUG_ ## n) printf x
    457       1.1      matt #else
    458       1.1      matt # define DPRINTF(x)
    459       1.1      matt # define DPRINTFN(n, x)
    460       1.1      matt #endif
    461       1.1      matt 
    462       1.1      matt 
    463       1.1      matt #ifdef PMAPCOUNTERS
    464       1.1      matt /*
    465       1.1      matt  * From pmap_subr.c
    466       1.1      matt  */
    467      1.53   garbled extern struct evcnt pmap_evcnt_mappings;
    468      1.53   garbled extern struct evcnt pmap_evcnt_unmappings;
    469      1.53   garbled 
    470      1.53   garbled extern struct evcnt pmap_evcnt_kernel_mappings;
    471      1.53   garbled extern struct evcnt pmap_evcnt_kernel_unmappings;
    472      1.53   garbled 
    473      1.53   garbled extern struct evcnt pmap_evcnt_mappings_replaced;
    474      1.53   garbled 
    475      1.53   garbled extern struct evcnt pmap_evcnt_exec_mappings;
    476      1.53   garbled extern struct evcnt pmap_evcnt_exec_cached;
    477      1.53   garbled 
    478      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced;
    479      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
    480      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
    481      1.53   garbled 
    482      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
    483      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
    484      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
    485      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
    486      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
    487      1.53   garbled 
    488      1.53   garbled extern struct evcnt pmap_evcnt_updates;
    489      1.53   garbled extern struct evcnt pmap_evcnt_collects;
    490      1.53   garbled extern struct evcnt pmap_evcnt_copies;
    491      1.53   garbled 
    492      1.53   garbled extern struct evcnt pmap_evcnt_ptes_spilled;
    493      1.53   garbled extern struct evcnt pmap_evcnt_ptes_unspilled;
    494      1.53   garbled extern struct evcnt pmap_evcnt_ptes_evicted;
    495      1.53   garbled 
    496      1.53   garbled extern struct evcnt pmap_evcnt_ptes_primary[8];
    497      1.53   garbled extern struct evcnt pmap_evcnt_ptes_secondary[8];
    498      1.53   garbled extern struct evcnt pmap_evcnt_ptes_removed;
    499      1.53   garbled extern struct evcnt pmap_evcnt_ptes_changed;
    500      1.53   garbled extern struct evcnt pmap_evcnt_pvos_reclaimed;
    501      1.53   garbled extern struct evcnt pmap_evcnt_pvos_failed;
    502      1.53   garbled 
    503       1.1      matt extern struct evcnt pmap_evcnt_zeroed_pages;
    504       1.1      matt extern struct evcnt pmap_evcnt_copied_pages;
    505       1.1      matt extern struct evcnt pmap_evcnt_idlezeroed_pages;
    506      1.26      matt 
    507      1.53   garbled #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
    508      1.53   garbled #define	PMAPCOUNT2(ev)	((ev).ev_count++)
    509       1.1      matt #else
    510       1.1      matt #define	PMAPCOUNT(ev)	((void) 0)
    511       1.1      matt #define	PMAPCOUNT2(ev)	((void) 0)
    512       1.1      matt #endif
    513       1.1      matt 
    514      1.35     perry #define	TLBIE(va)	__asm volatile("tlbie %0" :: "r"(va))
    515      1.38   sanjayl 
    516      1.38   sanjayl /* XXXSL: this needs to be moved to assembler */
    517      1.38   sanjayl #define	TLBIEL(va)	__asm __volatile("tlbie %0" :: "r"(va))
    518      1.38   sanjayl 
    519      1.35     perry #define	TLBSYNC()	__asm volatile("tlbsync")
    520      1.35     perry #define	SYNC()		__asm volatile("sync")
    521      1.35     perry #define	EIEIO()		__asm volatile("eieio")
    522  1.56.6.1       mjf #define	DCBST(va)	__asm __volatile("dcbst 0,%0" :: "r"(va))
    523       1.1      matt #define	MFMSR()		mfmsr()
    524       1.1      matt #define	MTMSR(psl)	mtmsr(psl)
    525       1.1      matt #define	MFPVR()		mfpvr()
    526       1.1      matt #define	MFSRIN(va)	mfsrin(va)
    527       1.1      matt #define	MFTB()		mfrtcltbl()
    528       1.1      matt 
    529      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
    530      1.35     perry static inline register_t
    531       1.1      matt mfsrin(vaddr_t va)
    532       1.1      matt {
    533       1.2      matt 	register_t sr;
    534      1.35     perry 	__asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
    535       1.1      matt 	return sr;
    536       1.1      matt }
    537      1.53   garbled #endif	/* PMAP_OEA*/
    538      1.38   sanjayl 
    539      1.53   garbled #if defined (PMAP_OEA64_BRIDGE)
    540      1.38   sanjayl extern void mfmsr64 (register64_t *result);
    541      1.53   garbled #endif /* PMAP_OEA64_BRIDGE */
    542      1.38   sanjayl 
    543      1.50        ad #define	PMAP_LOCK()		KERNEL_LOCK(1, NULL)
    544      1.50        ad #define	PMAP_UNLOCK()		KERNEL_UNLOCK_ONE(NULL)
    545       1.1      matt 
    546      1.35     perry static inline register_t
    547       1.1      matt pmap_interrupts_off(void)
    548       1.1      matt {
    549       1.2      matt 	register_t msr = MFMSR();
    550       1.1      matt 	if (msr & PSL_EE)
    551       1.1      matt 		MTMSR(msr & ~PSL_EE);
    552       1.1      matt 	return msr;
    553       1.1      matt }
    554       1.1      matt 
    555       1.1      matt static void
    556       1.2      matt pmap_interrupts_restore(register_t msr)
    557       1.1      matt {
    558       1.1      matt 	if (msr & PSL_EE)
    559       1.1      matt 		MTMSR(msr);
    560       1.1      matt }
    561       1.1      matt 
    562      1.35     perry static inline u_int32_t
    563       1.1      matt mfrtcltbl(void)
    564       1.1      matt {
    565      1.55   garbled #ifdef PPC_OEA601
    566       1.1      matt 	if ((MFPVR() >> 16) == MPC601)
    567       1.1      matt 		return (mfrtcl() >> 7);
    568       1.1      matt 	else
    569      1.55   garbled #endif
    570       1.1      matt 		return (mftbl());
    571       1.1      matt }
    572       1.1      matt 
    573       1.1      matt /*
    574       1.1      matt  * These small routines may have to be replaced,
    575       1.1      matt  * if/when we support processors other that the 604.
    576       1.1      matt  */
    577       1.1      matt 
    578       1.1      matt void
    579       1.1      matt tlbia(void)
    580       1.1      matt {
    581      1.47  macallan 	char *i;
    582       1.1      matt 
    583       1.1      matt 	SYNC();
    584      1.53   garbled #if defined(PMAP_OEA)
    585       1.1      matt 	/*
    586       1.1      matt 	 * Why not use "tlbia"?  Because not all processors implement it.
    587       1.1      matt 	 *
    588      1.20       wiz 	 * This needs to be a per-CPU callback to do the appropriate thing
    589       1.1      matt 	 * for the CPU. XXX
    590       1.1      matt 	 */
    591      1.47  macallan 	for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
    592       1.1      matt 		TLBIE(i);
    593       1.1      matt 		EIEIO();
    594       1.1      matt 		SYNC();
    595       1.1      matt 	}
    596      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
    597      1.38   sanjayl 	/* This is specifically for the 970, 970UM v1.6 pp. 140. */
    598      1.51   garbled 	for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
    599      1.38   sanjayl 		TLBIEL(i);
    600      1.38   sanjayl 		EIEIO();
    601      1.38   sanjayl 		SYNC();
    602      1.38   sanjayl 	}
    603      1.38   sanjayl #endif
    604       1.1      matt 	TLBSYNC();
    605       1.1      matt 	SYNC();
    606       1.1      matt }
    607       1.1      matt 
    608      1.35     perry static inline register_t
    609       1.2      matt va_to_vsid(const struct pmap *pm, vaddr_t addr)
    610       1.1      matt {
    611      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
    612      1.38   sanjayl 	return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
    613      1.53   garbled #else /* PMAP_OEA64 */
    614      1.18      matt #if 0
    615      1.18      matt 	const struct ste *ste;
    616      1.18      matt 	register_t hash;
    617      1.18      matt 	int i;
    618      1.18      matt 
    619      1.18      matt 	hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
    620      1.18      matt 
    621      1.18      matt 	/*
    622      1.18      matt 	 * Try the primary group first
    623      1.18      matt 	 */
    624      1.18      matt 	ste = pm->pm_stes[hash].stes;
    625      1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    626      1.18      matt 		if (ste->ste_hi & STE_V) &&
    627      1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    628      1.18      matt 			return ste;
    629      1.18      matt 	}
    630      1.18      matt 
    631      1.18      matt 	/*
    632      1.18      matt 	 * Then the secondary group.
    633      1.18      matt 	 */
    634      1.18      matt 	ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
    635      1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    636      1.18      matt 		if (ste->ste_hi & STE_V) &&
    637      1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    638      1.18      matt 			return addr;
    639      1.18      matt 	}
    640      1.18      matt 
    641      1.18      matt 	return NULL;
    642      1.18      matt #else
    643      1.18      matt 	/*
    644      1.18      matt 	 * Rather than searching the STE groups for the VSID, we know
    645      1.18      matt 	 * how we generate that from the ESID and so do that.
    646      1.18      matt 	 */
    647      1.18      matt 	return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
    648      1.18      matt #endif
    649      1.53   garbled #endif /* PMAP_OEA */
    650       1.1      matt }
    651       1.1      matt 
    652      1.35     perry static inline register_t
    653       1.2      matt va_to_pteg(const struct pmap *pm, vaddr_t addr)
    654       1.1      matt {
    655       1.2      matt 	register_t hash;
    656       1.2      matt 
    657       1.2      matt 	hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
    658       1.1      matt 	return hash & pmap_pteg_mask;
    659       1.1      matt }
    660       1.1      matt 
    661       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    662       1.1      matt /*
    663       1.1      matt  * Given a PTE in the page table, calculate the VADDR that hashes to it.
    664       1.1      matt  * The only bit of magic is that the top 4 bits of the address doesn't
    665       1.1      matt  * technically exist in the PTE.  But we know we reserved 4 bits of the
    666       1.1      matt  * VSID for it so that's how we get it.
    667       1.1      matt  */
    668       1.1      matt static vaddr_t
    669       1.2      matt pmap_pte_to_va(volatile const struct pte *pt)
    670       1.1      matt {
    671       1.1      matt 	vaddr_t va;
    672       1.1      matt 	uintptr_t ptaddr = (uintptr_t) pt;
    673       1.1      matt 
    674       1.1      matt 	if (pt->pte_hi & PTE_HID)
    675       1.2      matt 		ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
    676       1.1      matt 
    677      1.18      matt 	/* PPC Bits 10-19  PPC64 Bits 42-51 */
    678      1.53   garbled #if defined(PMAP_OEA)
    679       1.4      matt 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
    680      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
    681      1.38   sanjayl 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
    682      1.38   sanjayl #endif
    683       1.1      matt 	va <<= ADDR_PIDX_SHFT;
    684       1.1      matt 
    685      1.18      matt 	/* PPC Bits 4-9  PPC64 Bits 36-41 */
    686       1.1      matt 	va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
    687       1.1      matt 
    688      1.53   garbled #if defined(PMAP_OEA64)
    689      1.18      matt 	/* PPC63 Bits 0-35 */
    690      1.18      matt 	/* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
    691      1.53   garbled #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
    692       1.1      matt 	/* PPC Bits 0-3 */
    693       1.1      matt 	va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
    694      1.18      matt #endif
    695       1.1      matt 
    696       1.1      matt 	return va;
    697       1.1      matt }
    698       1.1      matt #endif
    699       1.1      matt 
    700      1.35     perry static inline struct pvo_head *
    701       1.1      matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
    702       1.1      matt {
    703       1.1      matt 	struct vm_page *pg;
    704       1.1      matt 
    705       1.1      matt 	pg = PHYS_TO_VM_PAGE(pa);
    706       1.1      matt 	if (pg_p != NULL)
    707       1.1      matt 		*pg_p = pg;
    708       1.1      matt 	if (pg == NULL)
    709       1.1      matt 		return &pmap_pvo_unmanaged;
    710       1.1      matt 	return &pg->mdpage.mdpg_pvoh;
    711       1.1      matt }
    712       1.1      matt 
    713      1.35     perry static inline struct pvo_head *
    714       1.1      matt vm_page_to_pvoh(struct vm_page *pg)
    715       1.1      matt {
    716       1.1      matt 	return &pg->mdpage.mdpg_pvoh;
    717       1.1      matt }
    718       1.1      matt 
    719       1.1      matt 
    720      1.35     perry static inline void
    721       1.1      matt pmap_attr_clear(struct vm_page *pg, int ptebit)
    722       1.1      matt {
    723       1.1      matt 	pg->mdpage.mdpg_attrs &= ~ptebit;
    724       1.1      matt }
    725       1.1      matt 
    726      1.35     perry static inline int
    727       1.1      matt pmap_attr_fetch(struct vm_page *pg)
    728       1.1      matt {
    729       1.1      matt 	return pg->mdpage.mdpg_attrs;
    730       1.1      matt }
    731       1.1      matt 
    732      1.35     perry static inline void
    733       1.1      matt pmap_attr_save(struct vm_page *pg, int ptebit)
    734       1.1      matt {
    735       1.1      matt 	pg->mdpage.mdpg_attrs |= ptebit;
    736       1.1      matt }
    737       1.1      matt 
    738      1.35     perry static inline int
    739       1.2      matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
    740       1.1      matt {
    741       1.1      matt 	if (pt->pte_hi == pvo_pt->pte_hi
    742       1.1      matt #if 0
    743       1.1      matt 	    && ((pt->pte_lo ^ pvo_pt->pte_lo) &
    744       1.1      matt 	        ~(PTE_REF|PTE_CHG)) == 0
    745       1.1      matt #endif
    746       1.1      matt 	    )
    747       1.1      matt 		return 1;
    748       1.1      matt 	return 0;
    749       1.1      matt }
    750       1.1      matt 
    751      1.35     perry static inline void
    752       1.2      matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
    753       1.1      matt {
    754       1.1      matt 	/*
    755       1.1      matt 	 * Construct the PTE.  Default to IMB initially.  Valid bit
    756       1.1      matt 	 * only gets set when the real pte is set in memory.
    757       1.1      matt 	 *
    758       1.1      matt 	 * Note: Don't set the valid bit for correct operation of tlb update.
    759       1.1      matt 	 */
    760      1.53   garbled #if defined(PMAP_OEA)
    761       1.2      matt 	pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
    762       1.2      matt 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    763       1.1      matt 	pt->pte_lo = pte_lo;
    764      1.53   garbled #elif defined (PMAP_OEA64_BRIDGE)
    765      1.38   sanjayl 	pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
    766      1.38   sanjayl 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    767      1.38   sanjayl 	pt->pte_lo = (u_int64_t) pte_lo;
    768      1.53   garbled #elif defined (PMAP_OEA64)
    769      1.53   garbled #error PMAP_OEA64 not supported
    770      1.53   garbled #endif /* PMAP_OEA */
    771       1.1      matt }
    772       1.1      matt 
    773      1.35     perry static inline void
    774       1.2      matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
    775       1.1      matt {
    776       1.1      matt 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
    777       1.1      matt }
    778       1.1      matt 
    779      1.35     perry static inline void
    780       1.2      matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
    781       1.1      matt {
    782       1.1      matt 	/*
    783       1.1      matt 	 * As shown in Section 7.6.3.2.3
    784       1.1      matt 	 */
    785       1.1      matt 	pt->pte_lo &= ~ptebit;
    786       1.1      matt 	TLBIE(va);
    787       1.1      matt 	SYNC();
    788       1.1      matt 	EIEIO();
    789       1.1      matt 	TLBSYNC();
    790       1.1      matt 	SYNC();
    791  1.56.6.1       mjf #ifdef MULTIPROCESSOR
    792  1.56.6.1       mjf 	DCBST(pt);
    793  1.56.6.1       mjf #endif
    794       1.1      matt }
    795       1.1      matt 
    796      1.35     perry static inline void
    797       1.2      matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
    798       1.1      matt {
    799       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    800       1.1      matt 	if (pvo_pt->pte_hi & PTE_VALID)
    801       1.1      matt 		panic("pte_set: setting an already valid pte %p", pvo_pt);
    802       1.1      matt #endif
    803       1.1      matt 	pvo_pt->pte_hi |= PTE_VALID;
    804      1.38   sanjayl 
    805       1.1      matt 	/*
    806       1.1      matt 	 * Update the PTE as defined in section 7.6.3.1
    807       1.1      matt 	 * Note that the REF/CHG bits are from pvo_pt and thus should
    808       1.1      matt 	 * have been saved so this routine can restore them (if desired).
    809       1.1      matt 	 */
    810       1.1      matt 	pt->pte_lo = pvo_pt->pte_lo;
    811       1.1      matt 	EIEIO();
    812       1.1      matt 	pt->pte_hi = pvo_pt->pte_hi;
    813      1.38   sanjayl 	TLBSYNC();
    814       1.1      matt 	SYNC();
    815  1.56.6.1       mjf #ifdef MULTIPROCESSOR
    816  1.56.6.1       mjf 	DCBST(pt);
    817  1.56.6.1       mjf #endif
    818       1.1      matt 	pmap_pte_valid++;
    819       1.1      matt }
    820       1.1      matt 
    821      1.35     perry static inline void
    822       1.2      matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    823       1.1      matt {
    824       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    825       1.1      matt 	if ((pvo_pt->pte_hi & PTE_VALID) == 0)
    826       1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
    827       1.1      matt 	if ((pt->pte_hi & PTE_VALID) == 0)
    828       1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
    829       1.1      matt #endif
    830       1.1      matt 
    831       1.1      matt 	pvo_pt->pte_hi &= ~PTE_VALID;
    832       1.1      matt 	/*
    833       1.1      matt 	 * Force the ref & chg bits back into the PTEs.
    834       1.1      matt 	 */
    835       1.1      matt 	SYNC();
    836       1.1      matt 	/*
    837       1.1      matt 	 * Invalidate the pte ... (Section 7.6.3.3)
    838       1.1      matt 	 */
    839       1.1      matt 	pt->pte_hi &= ~PTE_VALID;
    840       1.1      matt 	SYNC();
    841       1.1      matt 	TLBIE(va);
    842       1.1      matt 	SYNC();
    843       1.1      matt 	EIEIO();
    844       1.1      matt 	TLBSYNC();
    845       1.1      matt 	SYNC();
    846       1.1      matt 	/*
    847       1.1      matt 	 * Save the ref & chg bits ...
    848       1.1      matt 	 */
    849       1.1      matt 	pmap_pte_synch(pt, pvo_pt);
    850       1.1      matt 	pmap_pte_valid--;
    851       1.1      matt }
    852       1.1      matt 
    853      1.35     perry static inline void
    854       1.2      matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    855       1.1      matt {
    856       1.1      matt 	/*
    857       1.1      matt 	 * Invalidate the PTE
    858       1.1      matt 	 */
    859       1.1      matt 	pmap_pte_unset(pt, pvo_pt, va);
    860       1.1      matt 	pmap_pte_set(pt, pvo_pt);
    861       1.1      matt }
    862       1.1      matt 
    863       1.1      matt /*
    864       1.1      matt  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
    865       1.1      matt  * (either primary or secondary location).
    866       1.1      matt  *
    867       1.1      matt  * Note: both the destination and source PTEs must not have PTE_VALID set.
    868       1.1      matt  */
    869       1.1      matt 
    870      1.53   garbled static int
    871       1.2      matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
    872       1.1      matt {
    873       1.2      matt 	volatile struct pte *pt;
    874       1.1      matt 	int i;
    875       1.1      matt 
    876       1.1      matt #if defined(DEBUG)
    877      1.54   mlelstv 	DPRINTFN(PTE, ("pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
    878      1.53   garbled 		ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo));
    879       1.1      matt #endif
    880       1.1      matt 	/*
    881       1.1      matt 	 * First try primary hash.
    882       1.1      matt 	 */
    883       1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    884       1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    885       1.1      matt 			pvo_pt->pte_hi &= ~PTE_HID;
    886       1.1      matt 			pmap_pte_set(pt, pvo_pt);
    887       1.1      matt 			return i;
    888       1.1      matt 		}
    889       1.1      matt 	}
    890       1.1      matt 
    891       1.1      matt 	/*
    892       1.1      matt 	 * Now try secondary hash.
    893       1.1      matt 	 */
    894       1.1      matt 	ptegidx ^= pmap_pteg_mask;
    895       1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    896       1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    897       1.1      matt 			pvo_pt->pte_hi |= PTE_HID;
    898       1.1      matt 			pmap_pte_set(pt, pvo_pt);
    899       1.1      matt 			return i;
    900       1.1      matt 		}
    901       1.1      matt 	}
    902       1.1      matt 	return -1;
    903       1.1      matt }
    904       1.1      matt 
    905       1.1      matt /*
    906       1.1      matt  * Spill handler.
    907       1.1      matt  *
    908       1.1      matt  * Tries to spill a page table entry from the overflow area.
    909       1.1      matt  * This runs in either real mode (if dealing with a exception spill)
    910       1.1      matt  * or virtual mode when dealing with manually spilling one of the
    911       1.1      matt  * kernel's pte entries.  In either case, interrupts are already
    912       1.1      matt  * disabled.
    913       1.1      matt  */
    914      1.14       chs 
    915       1.1      matt int
    916      1.44   thorpej pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
    917       1.1      matt {
    918       1.1      matt 	struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
    919       1.1      matt 	struct pvo_entry *pvo;
    920      1.15    dyoung 	/* XXX: gcc -- vpvoh is always set at either *1* or *2* */
    921      1.15    dyoung 	struct pvo_tqhead *pvoh, *vpvoh = NULL;
    922       1.1      matt 	int ptegidx, i, j;
    923       1.2      matt 	volatile struct pteg *pteg;
    924       1.2      matt 	volatile struct pte *pt;
    925       1.1      matt 
    926      1.50        ad 	PMAP_LOCK();
    927      1.50        ad 
    928       1.2      matt 	ptegidx = va_to_pteg(pm, addr);
    929       1.1      matt 
    930       1.1      matt 	/*
    931       1.1      matt 	 * Have to substitute some entry. Use the primary hash for this.
    932      1.12      matt 	 * Use low bits of timebase as random generator.  Make sure we are
    933      1.12      matt 	 * not picking a kernel pte for replacement.
    934       1.1      matt 	 */
    935       1.1      matt 	pteg = &pmap_pteg_table[ptegidx];
    936       1.1      matt 	i = MFTB() & 7;
    937      1.12      matt 	for (j = 0; j < 8; j++) {
    938      1.12      matt 		pt = &pteg->pt[i];
    939      1.53   garbled 		if ((pt->pte_hi & PTE_VALID) == 0)
    940      1.53   garbled 			break;
    941      1.53   garbled 		if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
    942      1.53   garbled 				< PHYSMAP_VSIDBITS)
    943      1.12      matt 			break;
    944      1.12      matt 		i = (i + 1) & 7;
    945      1.12      matt 	}
    946      1.12      matt 	KASSERT(j < 8);
    947       1.1      matt 
    948       1.1      matt 	source_pvo = NULL;
    949       1.1      matt 	victim_pvo = NULL;
    950       1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
    951       1.1      matt 	TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
    952       1.1      matt 
    953       1.1      matt 		/*
    954       1.1      matt 		 * We need to find pvo entry for this address...
    955       1.1      matt 		 */
    956       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
    957       1.1      matt 
    958       1.1      matt 		/*
    959       1.1      matt 		 * If we haven't found the source and we come to a PVO with
    960       1.1      matt 		 * a valid PTE, then we know we can't find it because all
    961       1.1      matt 		 * evicted PVOs always are first in the list.
    962       1.1      matt 		 */
    963       1.1      matt 		if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
    964       1.1      matt 			break;
    965       1.2      matt 		if (source_pvo == NULL && pm == pvo->pvo_pmap &&
    966       1.2      matt 		    addr == PVO_VADDR(pvo)) {
    967       1.1      matt 
    968       1.1      matt 			/*
    969       1.1      matt 			 * Now we have found the entry to be spilled into the
    970       1.1      matt 			 * pteg.  Attempt to insert it into the page table.
    971       1.1      matt 			 */
    972       1.1      matt 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
    973       1.1      matt 			if (j >= 0) {
    974       1.1      matt 				PVO_PTEGIDX_SET(pvo, j);
    975       1.1      matt 				PMAP_PVO_CHECK(pvo);	/* sanity check */
    976      1.12      matt 				PVO_WHERE(pvo, SPILL_INSERT);
    977       1.1      matt 				pvo->pvo_pmap->pm_evictions--;
    978       1.1      matt 				PMAPCOUNT(ptes_spilled);
    979       1.1      matt 				PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
    980       1.1      matt 				    ? pmap_evcnt_ptes_secondary
    981       1.1      matt 				    : pmap_evcnt_ptes_primary)[j]);
    982       1.1      matt 
    983       1.1      matt 				/*
    984       1.1      matt 				 * Since we keep the evicted entries at the
    985       1.1      matt 				 * from of the PVO list, we need move this
    986       1.1      matt 				 * (now resident) PVO after the evicted
    987       1.1      matt 				 * entries.
    988       1.1      matt 				 */
    989       1.1      matt 				next_pvo = TAILQ_NEXT(pvo, pvo_olink);
    990       1.1      matt 
    991       1.1      matt 				/*
    992       1.5      matt 				 * If we don't have to move (either we were the
    993       1.5      matt 				 * last entry or the next entry was valid),
    994       1.1      matt 				 * don't change our position.  Otherwise
    995       1.1      matt 				 * move ourselves to the tail of the queue.
    996       1.1      matt 				 */
    997       1.1      matt 				if (next_pvo != NULL &&
    998       1.1      matt 				    !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
    999       1.1      matt 					TAILQ_REMOVE(pvoh, pvo, pvo_olink);
   1000       1.1      matt 					TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1001       1.1      matt 				}
   1002      1.50        ad 				PMAP_UNLOCK();
   1003       1.1      matt 				return 1;
   1004       1.1      matt 			}
   1005       1.1      matt 			source_pvo = pvo;
   1006      1.39      matt 			if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
   1007      1.14       chs 				return 0;
   1008      1.14       chs 			}
   1009       1.1      matt 			if (victim_pvo != NULL)
   1010       1.1      matt 				break;
   1011       1.1      matt 		}
   1012       1.1      matt 
   1013       1.1      matt 		/*
   1014       1.1      matt 		 * We also need the pvo entry of the victim we are replacing
   1015       1.1      matt 		 * so save the R & C bits of the PTE.
   1016       1.1      matt 		 */
   1017       1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
   1018       1.1      matt 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1019      1.15    dyoung 			vpvoh = pvoh;			/* *1* */
   1020       1.1      matt 			victim_pvo = pvo;
   1021       1.1      matt 			if (source_pvo != NULL)
   1022       1.1      matt 				break;
   1023       1.1      matt 		}
   1024       1.1      matt 	}
   1025       1.1      matt 
   1026       1.1      matt 	if (source_pvo == NULL) {
   1027       1.1      matt 		PMAPCOUNT(ptes_unspilled);
   1028      1.50        ad 		PMAP_UNLOCK();
   1029       1.1      matt 		return 0;
   1030       1.1      matt 	}
   1031       1.1      matt 
   1032       1.1      matt 	if (victim_pvo == NULL) {
   1033       1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0)
   1034       1.1      matt 			panic("pmap_pte_spill: victim p-pte (%p) has "
   1035       1.1      matt 			    "no pvo entry!", pt);
   1036       1.1      matt 
   1037       1.1      matt 		/*
   1038       1.1      matt 		 * If this is a secondary PTE, we need to search
   1039       1.1      matt 		 * its primary pvo bucket for the matching PVO.
   1040       1.1      matt 		 */
   1041      1.15    dyoung 		vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
   1042       1.1      matt 		TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
   1043       1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   1044       1.1      matt 
   1045       1.1      matt 			/*
   1046       1.1      matt 			 * We also need the pvo entry of the victim we are
   1047       1.1      matt 			 * replacing so save the R & C bits of the PTE.
   1048       1.1      matt 			 */
   1049       1.1      matt 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1050       1.1      matt 				victim_pvo = pvo;
   1051       1.1      matt 				break;
   1052       1.1      matt 			}
   1053       1.1      matt 		}
   1054       1.1      matt 		if (victim_pvo == NULL)
   1055       1.1      matt 			panic("pmap_pte_spill: victim s-pte (%p) has "
   1056       1.1      matt 			    "no pvo entry!", pt);
   1057       1.1      matt 	}
   1058       1.1      matt 
   1059       1.1      matt 	/*
   1060      1.12      matt 	 * The victim should be not be a kernel PVO/PTE entry.
   1061      1.12      matt 	 */
   1062      1.12      matt 	KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
   1063      1.12      matt 	KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
   1064      1.12      matt 	KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
   1065      1.12      matt 
   1066      1.12      matt 	/*
   1067       1.1      matt 	 * We are invalidating the TLB entry for the EA for the
   1068       1.1      matt 	 * we are replacing even though its valid; If we don't
   1069       1.1      matt 	 * we lose any ref/chg bit changes contained in the TLB
   1070       1.1      matt 	 * entry.
   1071       1.1      matt 	 */
   1072       1.1      matt 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
   1073       1.1      matt 
   1074       1.1      matt 	/*
   1075       1.1      matt 	 * To enforce the PVO list ordering constraint that all
   1076       1.1      matt 	 * evicted entries should come before all valid entries,
   1077       1.1      matt 	 * move the source PVO to the tail of its list and the
   1078       1.1      matt 	 * victim PVO to the head of its list (which might not be
   1079       1.1      matt 	 * the same list, if the victim was using the secondary hash).
   1080       1.1      matt 	 */
   1081       1.1      matt 	TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
   1082       1.1      matt 	TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
   1083       1.1      matt 	TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
   1084       1.1      matt 	TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
   1085       1.1      matt 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
   1086       1.1      matt 	pmap_pte_set(pt, &source_pvo->pvo_pte);
   1087       1.1      matt 	victim_pvo->pvo_pmap->pm_evictions++;
   1088       1.1      matt 	source_pvo->pvo_pmap->pm_evictions--;
   1089      1.12      matt 	PVO_WHERE(victim_pvo, SPILL_UNSET);
   1090      1.12      matt 	PVO_WHERE(source_pvo, SPILL_SET);
   1091       1.1      matt 
   1092       1.1      matt 	PVO_PTEGIDX_CLR(victim_pvo);
   1093       1.1      matt 	PVO_PTEGIDX_SET(source_pvo, i);
   1094       1.1      matt 	PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
   1095       1.1      matt 	PMAPCOUNT(ptes_spilled);
   1096       1.1      matt 	PMAPCOUNT(ptes_evicted);
   1097       1.1      matt 	PMAPCOUNT(ptes_removed);
   1098       1.1      matt 
   1099       1.1      matt 	PMAP_PVO_CHECK(victim_pvo);
   1100       1.1      matt 	PMAP_PVO_CHECK(source_pvo);
   1101      1.50        ad 
   1102      1.50        ad 	PMAP_UNLOCK();
   1103       1.1      matt 	return 1;
   1104       1.1      matt }
   1105       1.1      matt 
   1106       1.1      matt /*
   1107       1.1      matt  * Restrict given range to physical memory
   1108       1.1      matt  */
   1109       1.1      matt void
   1110       1.1      matt pmap_real_memory(paddr_t *start, psize_t *size)
   1111       1.1      matt {
   1112       1.1      matt 	struct mem_region *mp;
   1113       1.1      matt 
   1114       1.1      matt 	for (mp = mem; mp->size; mp++) {
   1115       1.1      matt 		if (*start + *size > mp->start
   1116       1.1      matt 		    && *start < mp->start + mp->size) {
   1117       1.1      matt 			if (*start < mp->start) {
   1118       1.1      matt 				*size -= mp->start - *start;
   1119       1.1      matt 				*start = mp->start;
   1120       1.1      matt 			}
   1121       1.1      matt 			if (*start + *size > mp->start + mp->size)
   1122       1.1      matt 				*size = mp->start + mp->size - *start;
   1123       1.1      matt 			return;
   1124       1.1      matt 		}
   1125       1.1      matt 	}
   1126       1.1      matt 	*size = 0;
   1127       1.1      matt }
   1128       1.1      matt 
   1129       1.1      matt /*
   1130       1.1      matt  * Initialize anything else for pmap handling.
   1131       1.1      matt  * Called during vm_init().
   1132       1.1      matt  */
   1133       1.1      matt void
   1134       1.1      matt pmap_init(void)
   1135       1.1      matt {
   1136       1.1      matt 	pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
   1137       1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
   1138      1.48        ad 	    &pmap_pool_mallocator, IPL_NONE);
   1139       1.1      matt 
   1140       1.1      matt 	pool_setlowat(&pmap_mpvo_pool, 1008);
   1141       1.1      matt 
   1142       1.1      matt 	pmap_initialized = 1;
   1143       1.1      matt 
   1144       1.1      matt }
   1145       1.1      matt 
   1146       1.1      matt /*
   1147      1.10   thorpej  * How much virtual space does the kernel get?
   1148      1.10   thorpej  */
   1149      1.10   thorpej void
   1150      1.10   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1151      1.10   thorpej {
   1152      1.10   thorpej 	/*
   1153      1.10   thorpej 	 * For now, reserve one segment (minus some overhead) for kernel
   1154      1.10   thorpej 	 * virtual memory
   1155      1.10   thorpej 	 */
   1156      1.10   thorpej 	*start = VM_MIN_KERNEL_ADDRESS;
   1157      1.10   thorpej 	*end = VM_MAX_KERNEL_ADDRESS;
   1158      1.10   thorpej }
   1159      1.10   thorpej 
   1160      1.10   thorpej /*
   1161       1.1      matt  * Allocate, initialize, and return a new physical map.
   1162       1.1      matt  */
   1163       1.1      matt pmap_t
   1164       1.1      matt pmap_create(void)
   1165       1.1      matt {
   1166       1.1      matt 	pmap_t pm;
   1167      1.38   sanjayl 
   1168       1.1      matt 	pm = pool_get(&pmap_pool, PR_WAITOK);
   1169      1.46  christos 	memset((void *)pm, 0, sizeof *pm);
   1170       1.1      matt 	pmap_pinit(pm);
   1171       1.1      matt 
   1172       1.1      matt 	DPRINTFN(CREATE,("pmap_create: pm %p:\n"
   1173      1.54   mlelstv 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
   1174      1.54   mlelstv 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
   1175      1.54   mlelstv 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
   1176      1.54   mlelstv 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
   1177      1.54   mlelstv 	    pm,
   1178      1.54   mlelstv 	    pm->pm_sr[0], pm->pm_sr[1],
   1179      1.54   mlelstv 	    pm->pm_sr[2], pm->pm_sr[3],
   1180      1.54   mlelstv 	    pm->pm_sr[4], pm->pm_sr[5],
   1181      1.54   mlelstv 	    pm->pm_sr[6], pm->pm_sr[7],
   1182      1.54   mlelstv 	    pm->pm_sr[8], pm->pm_sr[9],
   1183      1.54   mlelstv 	    pm->pm_sr[10], pm->pm_sr[11],
   1184      1.54   mlelstv 	    pm->pm_sr[12], pm->pm_sr[13],
   1185      1.54   mlelstv 	    pm->pm_sr[14], pm->pm_sr[15]));
   1186       1.1      matt 	return pm;
   1187       1.1      matt }
   1188       1.1      matt 
   1189       1.1      matt /*
   1190       1.1      matt  * Initialize a preallocated and zeroed pmap structure.
   1191       1.1      matt  */
   1192       1.1      matt void
   1193       1.1      matt pmap_pinit(pmap_t pm)
   1194       1.1      matt {
   1195       1.2      matt 	register_t entropy = MFTB();
   1196       1.2      matt 	register_t mask;
   1197       1.2      matt 	int i;
   1198       1.1      matt 
   1199       1.1      matt 	/*
   1200       1.1      matt 	 * Allocate some segment registers for this pmap.
   1201       1.1      matt 	 */
   1202       1.1      matt 	pm->pm_refs = 1;
   1203      1.50        ad 	PMAP_LOCK();
   1204       1.2      matt 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
   1205       1.2      matt 		static register_t pmap_vsidcontext;
   1206       1.2      matt 		register_t hash;
   1207       1.2      matt 		unsigned int n;
   1208       1.1      matt 
   1209       1.1      matt 		/* Create a new value by multiplying by a prime adding in
   1210       1.1      matt 		 * entropy from the timebase register.  This is to make the
   1211       1.1      matt 		 * VSID more random so that the PT Hash function collides
   1212       1.1      matt 		 * less often. (note that the prime causes gcc to do shifts
   1213       1.1      matt 		 * instead of a multiply)
   1214       1.1      matt 		 */
   1215       1.1      matt 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
   1216       1.1      matt 		hash = pmap_vsidcontext & (NPMAPS - 1);
   1217      1.23   aymeric 		if (hash == 0) {		/* 0 is special, avoid it */
   1218      1.23   aymeric 			entropy += 0xbadf00d;
   1219       1.1      matt 			continue;
   1220      1.23   aymeric 		}
   1221       1.1      matt 		n = hash >> 5;
   1222       1.2      matt 		mask = 1L << (hash & (VSID_NBPW-1));
   1223       1.2      matt 		hash = pmap_vsidcontext;
   1224       1.1      matt 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
   1225       1.1      matt 			/* anything free in this bucket? */
   1226       1.2      matt 			if (~pmap_vsid_bitmap[n] == 0) {
   1227      1.23   aymeric 				entropy = hash ^ (hash >> 16);
   1228       1.1      matt 				continue;
   1229       1.1      matt 			}
   1230       1.1      matt 			i = ffs(~pmap_vsid_bitmap[n]) - 1;
   1231       1.2      matt 			mask = 1L << i;
   1232       1.2      matt 			hash &= ~(VSID_NBPW-1);
   1233       1.1      matt 			hash |= i;
   1234       1.1      matt 		}
   1235      1.18      matt 		hash &= PTE_VSID >> PTE_VSID_SHFT;
   1236       1.1      matt 		pmap_vsid_bitmap[n] |= mask;
   1237      1.18      matt 		pm->pm_vsid = hash;
   1238      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1239       1.1      matt 		for (i = 0; i < 16; i++)
   1240      1.14       chs 			pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
   1241      1.14       chs 			    SR_NOEXEC;
   1242      1.18      matt #endif
   1243      1.50        ad 		PMAP_UNLOCK();
   1244       1.1      matt 		return;
   1245       1.1      matt 	}
   1246      1.50        ad 	PMAP_UNLOCK();
   1247       1.1      matt 	panic("pmap_pinit: out of segments");
   1248       1.1      matt }
   1249       1.1      matt 
   1250       1.1      matt /*
   1251       1.1      matt  * Add a reference to the given pmap.
   1252       1.1      matt  */
   1253       1.1      matt void
   1254       1.1      matt pmap_reference(pmap_t pm)
   1255       1.1      matt {
   1256      1.50        ad 	atomic_inc_uint(&pm->pm_refs);
   1257       1.1      matt }
   1258       1.1      matt 
   1259       1.1      matt /*
   1260       1.1      matt  * Retire the given pmap from service.
   1261       1.1      matt  * Should only be called if the map contains no valid mappings.
   1262       1.1      matt  */
   1263       1.1      matt void
   1264       1.1      matt pmap_destroy(pmap_t pm)
   1265       1.1      matt {
   1266      1.50        ad 	if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
   1267       1.1      matt 		pmap_release(pm);
   1268       1.1      matt 		pool_put(&pmap_pool, pm);
   1269       1.1      matt 	}
   1270       1.1      matt }
   1271       1.1      matt 
   1272       1.1      matt /*
   1273       1.1      matt  * Release any resources held by the given physical map.
   1274       1.1      matt  * Called when a pmap initialized by pmap_pinit is being released.
   1275       1.1      matt  */
   1276       1.1      matt void
   1277       1.1      matt pmap_release(pmap_t pm)
   1278       1.1      matt {
   1279       1.1      matt 	int idx, mask;
   1280      1.39      matt 
   1281      1.39      matt 	KASSERT(pm->pm_stats.resident_count == 0);
   1282      1.39      matt 	KASSERT(pm->pm_stats.wired_count == 0);
   1283       1.1      matt 
   1284      1.50        ad 	PMAP_LOCK();
   1285       1.1      matt 	if (pm->pm_sr[0] == 0)
   1286       1.1      matt 		panic("pmap_release");
   1287      1.22   aymeric 	idx = pm->pm_vsid & (NPMAPS-1);
   1288       1.1      matt 	mask = 1 << (idx % VSID_NBPW);
   1289       1.1      matt 	idx /= VSID_NBPW;
   1290      1.22   aymeric 
   1291      1.22   aymeric 	KASSERT(pmap_vsid_bitmap[idx] & mask);
   1292       1.1      matt 	pmap_vsid_bitmap[idx] &= ~mask;
   1293      1.50        ad 	PMAP_UNLOCK();
   1294       1.1      matt }
   1295       1.1      matt 
   1296       1.1      matt /*
   1297       1.1      matt  * Copy the range specified by src_addr/len
   1298       1.1      matt  * from the source map to the range dst_addr/len
   1299       1.1      matt  * in the destination map.
   1300       1.1      matt  *
   1301       1.1      matt  * This routine is only advisory and need not do anything.
   1302       1.1      matt  */
   1303       1.1      matt void
   1304       1.1      matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
   1305       1.1      matt 	vsize_t len, vaddr_t src_addr)
   1306       1.1      matt {
   1307       1.1      matt 	PMAPCOUNT(copies);
   1308       1.1      matt }
   1309       1.1      matt 
   1310       1.1      matt /*
   1311       1.1      matt  * Require that all active physical maps contain no
   1312       1.1      matt  * incorrect entries NOW.
   1313       1.1      matt  */
   1314       1.1      matt void
   1315       1.1      matt pmap_update(struct pmap *pmap)
   1316       1.1      matt {
   1317       1.1      matt 	PMAPCOUNT(updates);
   1318       1.1      matt 	TLBSYNC();
   1319       1.1      matt }
   1320       1.1      matt 
   1321       1.1      matt /*
   1322       1.1      matt  * Garbage collects the physical map system for
   1323       1.1      matt  * pages which are no longer used.
   1324       1.1      matt  * Success need not be guaranteed -- that is, there
   1325       1.1      matt  * may well be pages which are not referenced, but
   1326       1.1      matt  * others may be collected.
   1327       1.1      matt  * Called by the pageout daemon when pages are scarce.
   1328       1.1      matt  */
   1329       1.1      matt void
   1330       1.1      matt pmap_collect(pmap_t pm)
   1331       1.1      matt {
   1332       1.1      matt 	PMAPCOUNT(collects);
   1333       1.1      matt }
   1334       1.1      matt 
   1335      1.35     perry static inline int
   1336       1.1      matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
   1337       1.1      matt {
   1338       1.1      matt 	int pteidx;
   1339       1.1      matt 	/*
   1340       1.1      matt 	 * We can find the actual pte entry without searching by
   1341       1.1      matt 	 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
   1342       1.1      matt 	 * and by noticing the HID bit.
   1343       1.1      matt 	 */
   1344       1.1      matt 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
   1345       1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_HID)
   1346       1.1      matt 		pteidx ^= pmap_pteg_mask * 8;
   1347       1.1      matt 	return pteidx;
   1348       1.1      matt }
   1349       1.1      matt 
   1350       1.2      matt volatile struct pte *
   1351       1.1      matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
   1352       1.1      matt {
   1353       1.2      matt 	volatile struct pte *pt;
   1354       1.1      matt 
   1355       1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1356       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
   1357       1.1      matt 		return NULL;
   1358       1.1      matt #endif
   1359       1.1      matt 
   1360       1.1      matt 	/*
   1361       1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1362       1.1      matt 	 */
   1363       1.1      matt 	if (pteidx == -1) {
   1364       1.1      matt 		int ptegidx;
   1365       1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1366       1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1367       1.1      matt 	}
   1368       1.1      matt 
   1369       1.1      matt 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
   1370       1.1      matt 
   1371       1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1372       1.1      matt 	return pt;
   1373       1.1      matt #else
   1374       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
   1375       1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1376       1.1      matt 		    "pvo but no valid pte index", pvo);
   1377       1.1      matt 	}
   1378       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
   1379       1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
   1380       1.1      matt 		    "pvo but no valid pte", pvo);
   1381       1.1      matt 	}
   1382       1.1      matt 
   1383       1.1      matt 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
   1384       1.1      matt 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
   1385       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1386       1.1      matt 			pmap_pte_print(pt);
   1387       1.1      matt #endif
   1388       1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1389       1.1      matt 			    "pmap_pteg_table %p but invalid in pvo",
   1390       1.1      matt 			    pvo, pt);
   1391       1.1      matt 		}
   1392       1.1      matt 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
   1393       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1394       1.1      matt 			pmap_pte_print(pt);
   1395       1.1      matt #endif
   1396       1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
   1397       1.1      matt 			    "not match pte %p in pmap_pteg_table",
   1398       1.1      matt 			    pvo, pt);
   1399       1.1      matt 		}
   1400       1.1      matt 		return pt;
   1401       1.1      matt 	}
   1402       1.1      matt 
   1403       1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1404       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1405       1.1      matt 		pmap_pte_print(pt);
   1406       1.1      matt #endif
   1407      1.12      matt 		panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
   1408       1.1      matt 		    "pmap_pteg_table but valid in pvo", pvo, pt);
   1409       1.1      matt 	}
   1410       1.1      matt 	return NULL;
   1411       1.1      matt #endif	/* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
   1412       1.1      matt }
   1413       1.1      matt 
   1414       1.1      matt struct pvo_entry *
   1415       1.1      matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
   1416       1.1      matt {
   1417       1.1      matt 	struct pvo_entry *pvo;
   1418       1.1      matt 	int ptegidx;
   1419       1.1      matt 
   1420       1.1      matt 	va &= ~ADDR_POFF;
   1421       1.2      matt 	ptegidx = va_to_pteg(pm, va);
   1422       1.1      matt 
   1423       1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1424       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1425       1.1      matt 		if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   1426       1.1      matt 			panic("pmap_pvo_find_va: invalid pvo %p on "
   1427       1.1      matt 			    "list %#x (%p)", pvo, ptegidx,
   1428       1.1      matt 			     &pmap_pvo_table[ptegidx]);
   1429       1.1      matt #endif
   1430       1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1431       1.1      matt 			if (pteidx_p)
   1432       1.1      matt 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
   1433       1.1      matt 			return pvo;
   1434       1.1      matt 		}
   1435       1.1      matt 	}
   1436      1.38   sanjayl 	if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
   1437      1.54   mlelstv 		panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
   1438      1.53   garbled 		    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
   1439       1.1      matt 	return NULL;
   1440       1.1      matt }
   1441       1.1      matt 
   1442       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1443       1.1      matt void
   1444       1.1      matt pmap_pvo_check(const struct pvo_entry *pvo)
   1445       1.1      matt {
   1446       1.1      matt 	struct pvo_head *pvo_head;
   1447       1.1      matt 	struct pvo_entry *pvo0;
   1448       1.2      matt 	volatile struct pte *pt;
   1449       1.1      matt 	int failed = 0;
   1450       1.1      matt 
   1451      1.50        ad 	PMAP_LOCK();
   1452      1.50        ad 
   1453       1.1      matt 	if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
   1454       1.1      matt 		panic("pmap_pvo_check: pvo %p: invalid address", pvo);
   1455       1.1      matt 
   1456       1.1      matt 	if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
   1457       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
   1458       1.1      matt 		    pvo, pvo->pvo_pmap);
   1459       1.1      matt 		failed = 1;
   1460       1.1      matt 	}
   1461       1.1      matt 
   1462       1.1      matt 	if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
   1463       1.1      matt 	    (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
   1464       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1465       1.1      matt 		    pvo, TAILQ_NEXT(pvo, pvo_olink));
   1466       1.1      matt 		failed = 1;
   1467       1.1      matt 	}
   1468       1.1      matt 
   1469       1.1      matt 	if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
   1470       1.1      matt 	    (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
   1471       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1472       1.1      matt 		    pvo, LIST_NEXT(pvo, pvo_vlink));
   1473       1.1      matt 		failed = 1;
   1474       1.1      matt 	}
   1475       1.1      matt 
   1476      1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1477       1.1      matt 		pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
   1478       1.1      matt 	} else {
   1479       1.1      matt 		if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
   1480       1.1      matt 			printf("pmap_pvo_check: pvo %p: non kernel address "
   1481       1.1      matt 			    "on kernel unmanaged list\n", pvo);
   1482       1.1      matt 			failed = 1;
   1483       1.1      matt 		}
   1484       1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1485       1.1      matt 	}
   1486       1.1      matt 	LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
   1487       1.1      matt 		if (pvo0 == pvo)
   1488       1.1      matt 			break;
   1489       1.1      matt 	}
   1490       1.1      matt 	if (pvo0 == NULL) {
   1491       1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1492       1.1      matt 		    "on its vlist head %p\n", pvo, pvo_head);
   1493       1.1      matt 		failed = 1;
   1494       1.1      matt 	}
   1495       1.1      matt 	if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
   1496       1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1497       1.1      matt 		    "on its olist head\n", pvo);
   1498       1.1      matt 		failed = 1;
   1499       1.1      matt 	}
   1500       1.1      matt 	pt = pmap_pvo_to_pte(pvo, -1);
   1501       1.1      matt 	if (pt == NULL) {
   1502       1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1503       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1504       1.1      matt 			    "no PTE\n", pvo);
   1505       1.1      matt 			failed = 1;
   1506       1.1      matt 		}
   1507       1.1      matt 	} else {
   1508       1.1      matt 		if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
   1509       1.1      matt 		    (uintptr_t) pt >=
   1510       1.1      matt 		    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
   1511       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte %p not in "
   1512       1.1      matt 			    "pteg table\n", pvo, pt);
   1513       1.1      matt 			failed = 1;
   1514       1.1      matt 		}
   1515       1.1      matt 		if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
   1516       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1517       1.1      matt 			    "no PTE\n", pvo);
   1518       1.1      matt 			failed = 1;
   1519       1.1      matt 		}
   1520       1.1      matt 		if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
   1521       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi differ: "
   1522      1.54   mlelstv 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
   1523      1.54   mlelstv 			    pvo->pvo_pte.pte_hi,
   1524      1.54   mlelstv 			    pt->pte_hi);
   1525       1.1      matt 			failed = 1;
   1526       1.1      matt 		}
   1527       1.1      matt 		if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
   1528       1.1      matt 		    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
   1529       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_lo differ: "
   1530      1.54   mlelstv 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
   1531      1.54   mlelstv 			    (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
   1532      1.54   mlelstv 			    (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
   1533       1.1      matt 			failed = 1;
   1534       1.1      matt 		}
   1535       1.1      matt 		if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
   1536      1.53   garbled 			printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
   1537      1.53   garbled 			    " doesn't not match PVO's VA %#" _PRIxva "\n",
   1538       1.1      matt 			    pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
   1539       1.1      matt 			failed = 1;
   1540       1.1      matt 		}
   1541       1.1      matt 		if (failed)
   1542       1.1      matt 			pmap_pte_print(pt);
   1543       1.1      matt 	}
   1544       1.1      matt 	if (failed)
   1545       1.1      matt 		panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
   1546       1.1      matt 		    pvo->pvo_pmap);
   1547      1.50        ad 
   1548      1.50        ad 	PMAP_UNLOCK();
   1549       1.1      matt }
   1550       1.1      matt #endif /* DEBUG || PMAPCHECK */
   1551       1.1      matt 
   1552       1.1      matt /*
   1553      1.25       chs  * Search the PVO table looking for a non-wired entry.
   1554      1.25       chs  * If we find one, remove it and return it.
   1555      1.25       chs  */
   1556      1.25       chs 
   1557      1.25       chs struct pvo_entry *
   1558      1.25       chs pmap_pvo_reclaim(struct pmap *pm)
   1559      1.25       chs {
   1560      1.25       chs 	struct pvo_tqhead *pvoh;
   1561      1.25       chs 	struct pvo_entry *pvo;
   1562      1.25       chs 	uint32_t idx, endidx;
   1563      1.25       chs 
   1564      1.25       chs 	endidx = pmap_pvo_reclaim_nextidx;
   1565      1.25       chs 	for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
   1566      1.25       chs 	     idx = (idx + 1) & pmap_pteg_mask) {
   1567      1.25       chs 		pvoh = &pmap_pvo_table[idx];
   1568      1.25       chs 		TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
   1569      1.39      matt 			if (!PVO_WIRED_P(pvo)) {
   1570      1.33       chs 				pmap_pvo_remove(pvo, -1, NULL);
   1571      1.25       chs 				pmap_pvo_reclaim_nextidx = idx;
   1572      1.26      matt 				PMAPCOUNT(pvos_reclaimed);
   1573      1.25       chs 				return pvo;
   1574      1.25       chs 			}
   1575      1.25       chs 		}
   1576      1.25       chs 	}
   1577      1.25       chs 	return NULL;
   1578      1.25       chs }
   1579      1.25       chs 
   1580      1.25       chs /*
   1581       1.1      matt  * This returns whether this is the first mapping of a page.
   1582       1.1      matt  */
   1583       1.1      matt int
   1584       1.1      matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
   1585       1.2      matt 	vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
   1586       1.1      matt {
   1587       1.1      matt 	struct pvo_entry *pvo;
   1588       1.1      matt 	struct pvo_tqhead *pvoh;
   1589       1.2      matt 	register_t msr;
   1590       1.1      matt 	int ptegidx;
   1591       1.1      matt 	int i;
   1592       1.1      matt 	int poolflags = PR_NOWAIT;
   1593       1.1      matt 
   1594      1.28       chs 	/*
   1595      1.28       chs 	 * Compute the PTE Group index.
   1596      1.28       chs 	 */
   1597      1.28       chs 	va &= ~ADDR_POFF;
   1598      1.28       chs 	ptegidx = va_to_pteg(pm, va);
   1599      1.28       chs 
   1600      1.28       chs 	msr = pmap_interrupts_off();
   1601      1.28       chs 
   1602       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1603       1.1      matt 	if (pmap_pvo_remove_depth > 0)
   1604       1.1      matt 		panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
   1605       1.1      matt 	if (++pmap_pvo_enter_depth > 1)
   1606       1.1      matt 		panic("pmap_pvo_enter: called recursively!");
   1607       1.1      matt #endif
   1608       1.1      matt 
   1609       1.1      matt 	/*
   1610       1.1      matt 	 * Remove any existing mapping for this page.  Reuse the
   1611       1.1      matt 	 * pvo entry if there a mapping.
   1612       1.1      matt 	 */
   1613       1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1614       1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1615       1.1      matt #ifdef DEBUG
   1616       1.1      matt 			if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
   1617       1.1      matt 			    ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
   1618       1.1      matt 			    ~(PTE_REF|PTE_CHG)) == 0 &&
   1619       1.1      matt 			   va < VM_MIN_KERNEL_ADDRESS) {
   1620      1.56       phx 				printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
   1621      1.54   mlelstv 				    pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
   1622      1.56       phx 				printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
   1623      1.54   mlelstv 				    pvo->pvo_pte.pte_hi,
   1624      1.54   mlelstv 				    pm->pm_sr[va >> ADDR_SR_SHFT]);
   1625       1.1      matt 				pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
   1626       1.1      matt #ifdef DDBX
   1627       1.1      matt 				Debugger();
   1628       1.1      matt #endif
   1629       1.1      matt 			}
   1630       1.1      matt #endif
   1631       1.1      matt 			PMAPCOUNT(mappings_replaced);
   1632      1.33       chs 			pmap_pvo_remove(pvo, -1, NULL);
   1633       1.1      matt 			break;
   1634       1.1      matt 		}
   1635       1.1      matt 	}
   1636       1.1      matt 
   1637       1.1      matt 	/*
   1638       1.1      matt 	 * If we aren't overwriting an mapping, try to allocate
   1639       1.1      matt 	 */
   1640      1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1641      1.26      matt 	--pmap_pvo_enter_depth;
   1642      1.26      matt #endif
   1643       1.1      matt 	pmap_interrupts_restore(msr);
   1644      1.33       chs 	if (pvo) {
   1645      1.33       chs 		pmap_pvo_free(pvo);
   1646      1.33       chs 	}
   1647       1.1      matt 	pvo = pool_get(pl, poolflags);
   1648      1.25       chs 
   1649      1.25       chs #ifdef DEBUG
   1650      1.25       chs 	/*
   1651      1.25       chs 	 * Exercise pmap_pvo_reclaim() a little.
   1652      1.25       chs 	 */
   1653      1.25       chs 	if (pvo && (flags & PMAP_CANFAIL) != 0 &&
   1654      1.25       chs 	    pmap_pvo_reclaim_debugctr++ > 0x1000 &&
   1655      1.25       chs 	    (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
   1656      1.25       chs 		pool_put(pl, pvo);
   1657      1.25       chs 		pvo = NULL;
   1658      1.25       chs 	}
   1659      1.25       chs #endif
   1660      1.25       chs 
   1661       1.1      matt 	msr = pmap_interrupts_off();
   1662      1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1663      1.26      matt 	++pmap_pvo_enter_depth;
   1664      1.26      matt #endif
   1665       1.1      matt 	if (pvo == NULL) {
   1666       1.1      matt 		pvo = pmap_pvo_reclaim(pm);
   1667       1.1      matt 		if (pvo == NULL) {
   1668       1.1      matt 			if ((flags & PMAP_CANFAIL) == 0)
   1669       1.1      matt 				panic("pmap_pvo_enter: failed");
   1670       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1671       1.1      matt 			pmap_pvo_enter_depth--;
   1672       1.1      matt #endif
   1673      1.26      matt 			PMAPCOUNT(pvos_failed);
   1674       1.1      matt 			pmap_interrupts_restore(msr);
   1675       1.1      matt 			return ENOMEM;
   1676       1.1      matt 		}
   1677       1.1      matt 	}
   1678      1.25       chs 
   1679       1.1      matt 	pvo->pvo_vaddr = va;
   1680       1.1      matt 	pvo->pvo_pmap = pm;
   1681       1.1      matt 	pvo->pvo_vaddr &= ~ADDR_POFF;
   1682       1.1      matt 	if (flags & VM_PROT_EXECUTE) {
   1683       1.1      matt 		PMAPCOUNT(exec_mappings);
   1684      1.14       chs 		pvo_set_exec(pvo);
   1685       1.1      matt 	}
   1686       1.1      matt 	if (flags & PMAP_WIRED)
   1687       1.1      matt 		pvo->pvo_vaddr |= PVO_WIRED;
   1688       1.1      matt 	if (pvo_head != &pmap_pvo_kunmanaged) {
   1689       1.1      matt 		pvo->pvo_vaddr |= PVO_MANAGED;
   1690       1.1      matt 		PMAPCOUNT(mappings);
   1691       1.1      matt 	} else {
   1692       1.1      matt 		PMAPCOUNT(kernel_mappings);
   1693       1.1      matt 	}
   1694       1.2      matt 	pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
   1695       1.1      matt 
   1696       1.1      matt 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
   1697      1.39      matt 	if (PVO_WIRED_P(pvo))
   1698       1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count++;
   1699       1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count++;
   1700       1.1      matt #if defined(DEBUG)
   1701      1.38   sanjayl /*	if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
   1702       1.1      matt 		DPRINTFN(PVOENTER,
   1703      1.53   garbled 		    ("pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
   1704       1.1      matt 		    pvo, pm, va, pa));
   1705       1.1      matt #endif
   1706       1.1      matt 
   1707       1.1      matt 	/*
   1708       1.1      matt 	 * We hope this succeeds but it isn't required.
   1709       1.1      matt 	 */
   1710       1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
   1711       1.1      matt 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
   1712       1.1      matt 	if (i >= 0) {
   1713       1.1      matt 		PVO_PTEGIDX_SET(pvo, i);
   1714      1.12      matt 		PVO_WHERE(pvo, ENTER_INSERT);
   1715       1.1      matt 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
   1716       1.1      matt 		    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
   1717       1.1      matt 		TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1718      1.38   sanjayl 
   1719       1.1      matt 	} else {
   1720       1.1      matt 		/*
   1721       1.1      matt 		 * Since we didn't have room for this entry (which makes it
   1722       1.1      matt 		 * and evicted entry), place it at the head of the list.
   1723       1.1      matt 		 */
   1724       1.1      matt 		TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
   1725       1.1      matt 		PMAPCOUNT(ptes_evicted);
   1726       1.1      matt 		pm->pm_evictions++;
   1727      1.12      matt 		/*
   1728      1.12      matt 		 * If this is a kernel page, make sure it's active.
   1729      1.12      matt 		 */
   1730      1.12      matt 		if (pm == pmap_kernel()) {
   1731      1.45   thorpej 			i = pmap_pte_spill(pm, va, false);
   1732      1.12      matt 			KASSERT(i);
   1733      1.12      matt 		}
   1734       1.1      matt 	}
   1735       1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1736       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1737       1.1      matt 	pmap_pvo_enter_depth--;
   1738       1.1      matt #endif
   1739       1.1      matt 	pmap_interrupts_restore(msr);
   1740       1.1      matt 	return 0;
   1741       1.1      matt }
   1742       1.1      matt 
   1743      1.53   garbled static void
   1744      1.33       chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
   1745       1.1      matt {
   1746       1.2      matt 	volatile struct pte *pt;
   1747       1.1      matt 	int ptegidx;
   1748       1.1      matt 
   1749       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1750       1.1      matt 	if (++pmap_pvo_remove_depth > 1)
   1751       1.1      matt 		panic("pmap_pvo_remove: called recursively!");
   1752       1.1      matt #endif
   1753       1.1      matt 
   1754       1.1      matt 	/*
   1755       1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1756       1.1      matt 	 */
   1757       1.1      matt 	if (pteidx == -1) {
   1758       1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1759       1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1760       1.1      matt 	} else {
   1761       1.1      matt 		ptegidx = pteidx >> 3;
   1762       1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_HID)
   1763       1.1      matt 			ptegidx ^= pmap_pteg_mask;
   1764       1.1      matt 	}
   1765       1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1766       1.1      matt 
   1767       1.1      matt 	/*
   1768       1.1      matt 	 * If there is an active pte entry, we need to deactivate it
   1769       1.1      matt 	 * (and save the ref & chg bits).
   1770       1.1      matt 	 */
   1771       1.1      matt 	pt = pmap_pvo_to_pte(pvo, pteidx);
   1772       1.1      matt 	if (pt != NULL) {
   1773       1.1      matt 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   1774      1.12      matt 		PVO_WHERE(pvo, REMOVE);
   1775       1.1      matt 		PVO_PTEGIDX_CLR(pvo);
   1776       1.1      matt 		PMAPCOUNT(ptes_removed);
   1777       1.1      matt 	} else {
   1778       1.1      matt 		KASSERT(pvo->pvo_pmap->pm_evictions > 0);
   1779       1.1      matt 		pvo->pvo_pmap->pm_evictions--;
   1780       1.1      matt 	}
   1781       1.1      matt 
   1782       1.1      matt 	/*
   1783      1.14       chs 	 * Account for executable mappings.
   1784      1.14       chs 	 */
   1785      1.39      matt 	if (PVO_EXECUTABLE_P(pvo))
   1786      1.14       chs 		pvo_clear_exec(pvo);
   1787      1.14       chs 
   1788      1.14       chs 	/*
   1789      1.14       chs 	 * Update our statistics.
   1790       1.1      matt 	 */
   1791       1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count--;
   1792      1.39      matt 	if (PVO_WIRED_P(pvo))
   1793       1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count--;
   1794       1.1      matt 
   1795       1.1      matt 	/*
   1796       1.1      matt 	 * Save the REF/CHG bits into their cache if the page is managed.
   1797       1.1      matt 	 */
   1798      1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1799       1.2      matt 		register_t ptelo = pvo->pvo_pte.pte_lo;
   1800       1.1      matt 		struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
   1801       1.1      matt 
   1802       1.1      matt 		if (pg != NULL) {
   1803      1.37      matt 			/*
   1804      1.37      matt 			 * If this page was changed and it is mapped exec,
   1805      1.37      matt 			 * invalidate it.
   1806      1.37      matt 			 */
   1807      1.37      matt 			if ((ptelo & PTE_CHG) &&
   1808      1.37      matt 			    (pmap_attr_fetch(pg) & PTE_EXEC)) {
   1809      1.37      matt 				struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   1810      1.37      matt 				if (LIST_EMPTY(pvoh)) {
   1811      1.37      matt 					DPRINTFN(EXEC, ("[pmap_pvo_remove: "
   1812      1.53   garbled 					    "%#" _PRIxpa ": clear-exec]\n",
   1813      1.37      matt 					    VM_PAGE_TO_PHYS(pg)));
   1814      1.37      matt 					pmap_attr_clear(pg, PTE_EXEC);
   1815      1.37      matt 					PMAPCOUNT(exec_uncached_pvo_remove);
   1816      1.37      matt 				} else {
   1817      1.37      matt 					DPRINTFN(EXEC, ("[pmap_pvo_remove: "
   1818      1.53   garbled 					    "%#" _PRIxpa ": syncicache]\n",
   1819      1.37      matt 					    VM_PAGE_TO_PHYS(pg)));
   1820      1.37      matt 					pmap_syncicache(VM_PAGE_TO_PHYS(pg),
   1821      1.37      matt 					    PAGE_SIZE);
   1822      1.37      matt 					PMAPCOUNT(exec_synced_pvo_remove);
   1823      1.37      matt 				}
   1824      1.37      matt 			}
   1825      1.37      matt 
   1826       1.1      matt 			pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
   1827       1.1      matt 		}
   1828       1.1      matt 		PMAPCOUNT(unmappings);
   1829       1.1      matt 	} else {
   1830       1.1      matt 		PMAPCOUNT(kernel_unmappings);
   1831       1.1      matt 	}
   1832       1.1      matt 
   1833       1.1      matt 	/*
   1834       1.1      matt 	 * Remove the PVO from its lists and return it to the pool.
   1835       1.1      matt 	 */
   1836       1.1      matt 	LIST_REMOVE(pvo, pvo_vlink);
   1837       1.1      matt 	TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
   1838      1.33       chs 	if (pvol) {
   1839      1.33       chs 		LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
   1840      1.25       chs 	}
   1841       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1842       1.1      matt 	pmap_pvo_remove_depth--;
   1843       1.1      matt #endif
   1844       1.1      matt }
   1845       1.1      matt 
   1846      1.33       chs void
   1847      1.33       chs pmap_pvo_free(struct pvo_entry *pvo)
   1848      1.33       chs {
   1849      1.33       chs 
   1850      1.39      matt 	pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
   1851      1.33       chs }
   1852      1.33       chs 
   1853      1.33       chs void
   1854      1.33       chs pmap_pvo_free_list(struct pvo_head *pvol)
   1855      1.33       chs {
   1856      1.33       chs 	struct pvo_entry *pvo, *npvo;
   1857      1.33       chs 
   1858      1.33       chs 	for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
   1859      1.33       chs 		npvo = LIST_NEXT(pvo, pvo_vlink);
   1860      1.33       chs 		LIST_REMOVE(pvo, pvo_vlink);
   1861      1.33       chs 		pmap_pvo_free(pvo);
   1862      1.33       chs 	}
   1863      1.33       chs }
   1864      1.33       chs 
   1865       1.1      matt /*
   1866      1.14       chs  * Mark a mapping as executable.
   1867      1.14       chs  * If this is the first executable mapping in the segment,
   1868      1.14       chs  * clear the noexec flag.
   1869      1.14       chs  */
   1870      1.53   garbled static void
   1871      1.14       chs pvo_set_exec(struct pvo_entry *pvo)
   1872      1.14       chs {
   1873      1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1874      1.14       chs 
   1875      1.39      matt 	if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
   1876      1.14       chs 		return;
   1877      1.14       chs 	}
   1878      1.14       chs 	pvo->pvo_vaddr |= PVO_EXECUTABLE;
   1879      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1880      1.18      matt 	{
   1881      1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1882      1.18      matt 		if (pm->pm_exec[sr]++ == 0) {
   1883      1.18      matt 			pm->pm_sr[sr] &= ~SR_NOEXEC;
   1884      1.18      matt 		}
   1885      1.14       chs 	}
   1886      1.18      matt #endif
   1887      1.14       chs }
   1888      1.14       chs 
   1889      1.14       chs /*
   1890      1.14       chs  * Mark a mapping as non-executable.
   1891      1.14       chs  * If this was the last executable mapping in the segment,
   1892      1.14       chs  * set the noexec flag.
   1893      1.14       chs  */
   1894      1.53   garbled static void
   1895      1.14       chs pvo_clear_exec(struct pvo_entry *pvo)
   1896      1.14       chs {
   1897      1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1898      1.14       chs 
   1899      1.39      matt 	if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
   1900      1.14       chs 		return;
   1901      1.14       chs 	}
   1902      1.14       chs 	pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
   1903      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1904      1.18      matt 	{
   1905      1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1906      1.18      matt 		if (--pm->pm_exec[sr] == 0) {
   1907      1.18      matt 			pm->pm_sr[sr] |= SR_NOEXEC;
   1908      1.18      matt 		}
   1909      1.14       chs 	}
   1910      1.18      matt #endif
   1911      1.14       chs }
   1912      1.14       chs 
   1913      1.14       chs /*
   1914       1.1      matt  * Insert physical page at pa into the given pmap at virtual address va.
   1915       1.1      matt  */
   1916       1.1      matt int
   1917       1.1      matt pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
   1918       1.1      matt {
   1919       1.1      matt 	struct mem_region *mp;
   1920       1.1      matt 	struct pvo_head *pvo_head;
   1921       1.1      matt 	struct vm_page *pg;
   1922       1.1      matt 	struct pool *pl;
   1923       1.2      matt 	register_t pte_lo;
   1924       1.1      matt 	int error;
   1925       1.1      matt 	u_int pvo_flags;
   1926       1.1      matt 	u_int was_exec = 0;
   1927       1.1      matt 
   1928      1.50        ad 	PMAP_LOCK();
   1929      1.50        ad 
   1930       1.1      matt 	if (__predict_false(!pmap_initialized)) {
   1931       1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1932       1.1      matt 		pl = &pmap_upvo_pool;
   1933       1.1      matt 		pvo_flags = 0;
   1934       1.1      matt 		pg = NULL;
   1935       1.1      matt 		was_exec = PTE_EXEC;
   1936       1.1      matt 	} else {
   1937       1.1      matt 		pvo_head = pa_to_pvoh(pa, &pg);
   1938       1.1      matt 		pl = &pmap_mpvo_pool;
   1939       1.1      matt 		pvo_flags = PVO_MANAGED;
   1940       1.1      matt 	}
   1941       1.1      matt 
   1942       1.1      matt 	DPRINTFN(ENTER,
   1943      1.54   mlelstv 	    ("pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
   1944       1.1      matt 	    pm, va, pa, prot, flags));
   1945       1.1      matt 
   1946       1.1      matt 	/*
   1947       1.1      matt 	 * If this is a managed page, and it's the first reference to the
   1948       1.1      matt 	 * page clear the execness of the page.  Otherwise fetch the execness.
   1949       1.1      matt 	 */
   1950       1.1      matt 	if (pg != NULL)
   1951       1.1      matt 		was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
   1952       1.1      matt 
   1953       1.1      matt 	DPRINTFN(ENTER, (" was_exec=%d", was_exec));
   1954       1.1      matt 
   1955       1.1      matt 	/*
   1956       1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   1957       1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   1958       1.1      matt 	 * asssume it's in memory coherent memory.
   1959       1.1      matt 	 */
   1960       1.1      matt 	pte_lo = PTE_IG;
   1961       1.1      matt 	if ((flags & PMAP_NC) == 0) {
   1962       1.1      matt 		for (mp = mem; mp->size; mp++) {
   1963       1.1      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   1964       1.1      matt 				pte_lo = PTE_M;
   1965       1.1      matt 				break;
   1966       1.1      matt 			}
   1967       1.1      matt 		}
   1968       1.1      matt 	}
   1969       1.1      matt 
   1970       1.1      matt 	if (prot & VM_PROT_WRITE)
   1971       1.1      matt 		pte_lo |= PTE_BW;
   1972       1.1      matt 	else
   1973       1.1      matt 		pte_lo |= PTE_BR;
   1974       1.1      matt 
   1975       1.1      matt 	/*
   1976       1.1      matt 	 * If this was in response to a fault, "pre-fault" the PTE's
   1977       1.1      matt 	 * changed/referenced bit appropriately.
   1978       1.1      matt 	 */
   1979       1.1      matt 	if (flags & VM_PROT_WRITE)
   1980       1.1      matt 		pte_lo |= PTE_CHG;
   1981      1.30       chs 	if (flags & VM_PROT_ALL)
   1982       1.1      matt 		pte_lo |= PTE_REF;
   1983       1.1      matt 
   1984       1.1      matt 	/*
   1985       1.1      matt 	 * We need to know if this page can be executable
   1986       1.1      matt 	 */
   1987       1.1      matt 	flags |= (prot & VM_PROT_EXECUTE);
   1988       1.1      matt 
   1989       1.1      matt 	/*
   1990       1.1      matt 	 * Record mapping for later back-translation and pte spilling.
   1991       1.1      matt 	 * This will overwrite any existing mapping.
   1992       1.1      matt 	 */
   1993       1.1      matt 	error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
   1994       1.1      matt 
   1995       1.1      matt 	/*
   1996       1.1      matt 	 * Flush the real page from the instruction cache if this page is
   1997       1.1      matt 	 * mapped executable and cacheable and has not been flushed since
   1998       1.1      matt 	 * the last time it was modified.
   1999       1.1      matt 	 */
   2000       1.1      matt 	if (error == 0 &&
   2001       1.1      matt             (flags & VM_PROT_EXECUTE) &&
   2002       1.1      matt             (pte_lo & PTE_I) == 0 &&
   2003       1.1      matt 	    was_exec == 0) {
   2004       1.1      matt 		DPRINTFN(ENTER, (" syncicache"));
   2005       1.1      matt 		PMAPCOUNT(exec_synced);
   2006       1.6   thorpej 		pmap_syncicache(pa, PAGE_SIZE);
   2007       1.1      matt 		if (pg != NULL) {
   2008       1.1      matt 			pmap_attr_save(pg, PTE_EXEC);
   2009       1.1      matt 			PMAPCOUNT(exec_cached);
   2010       1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
   2011       1.1      matt 			if (pmapdebug & PMAPDEBUG_ENTER)
   2012       1.1      matt 				printf(" marked-as-exec");
   2013       1.1      matt 			else if (pmapdebug & PMAPDEBUG_EXEC)
   2014      1.53   garbled 				printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
   2015      1.34      yamt 				    VM_PAGE_TO_PHYS(pg));
   2016       1.1      matt 
   2017       1.1      matt #endif
   2018       1.1      matt 		}
   2019       1.1      matt 	}
   2020       1.1      matt 
   2021       1.1      matt 	DPRINTFN(ENTER, (": error=%d\n", error));
   2022       1.1      matt 
   2023      1.50        ad 	PMAP_UNLOCK();
   2024      1.50        ad 
   2025       1.1      matt 	return error;
   2026       1.1      matt }
   2027       1.1      matt 
   2028       1.1      matt void
   2029       1.1      matt pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2030       1.1      matt {
   2031       1.1      matt 	struct mem_region *mp;
   2032       1.2      matt 	register_t pte_lo;
   2033       1.1      matt 	int error;
   2034       1.1      matt 
   2035      1.53   garbled #if defined (PMAP_OEA64_BRIDGE)
   2036       1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2037       1.1      matt 		panic("pmap_kenter_pa: attempt to enter "
   2038      1.53   garbled 		    "non-kernel address %#" _PRIxva "!", va);
   2039      1.38   sanjayl #endif
   2040       1.1      matt 
   2041       1.1      matt 	DPRINTFN(KENTER,
   2042      1.53   garbled 	    ("pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot));
   2043       1.1      matt 
   2044      1.50        ad 	PMAP_LOCK();
   2045      1.50        ad 
   2046       1.1      matt 	/*
   2047       1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   2048       1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   2049       1.1      matt 	 * asssume it's in memory coherent memory.
   2050       1.1      matt 	 */
   2051       1.1      matt 	pte_lo = PTE_IG;
   2052       1.4      matt 	if ((prot & PMAP_NC) == 0) {
   2053       1.4      matt 		for (mp = mem; mp->size; mp++) {
   2054       1.4      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   2055       1.4      matt 				pte_lo = PTE_M;
   2056       1.4      matt 				break;
   2057       1.4      matt 			}
   2058       1.1      matt 		}
   2059       1.1      matt 	}
   2060       1.1      matt 
   2061       1.1      matt 	if (prot & VM_PROT_WRITE)
   2062       1.1      matt 		pte_lo |= PTE_BW;
   2063       1.1      matt 	else
   2064       1.1      matt 		pte_lo |= PTE_BR;
   2065       1.1      matt 
   2066       1.1      matt 	/*
   2067       1.1      matt 	 * We don't care about REF/CHG on PVOs on the unmanaged list.
   2068       1.1      matt 	 */
   2069       1.1      matt 	error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
   2070       1.1      matt 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
   2071       1.1      matt 
   2072       1.1      matt 	if (error != 0)
   2073      1.53   garbled 		panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
   2074       1.1      matt 		      va, pa, error);
   2075      1.50        ad 
   2076      1.50        ad 	PMAP_UNLOCK();
   2077       1.1      matt }
   2078       1.1      matt 
   2079       1.1      matt void
   2080       1.1      matt pmap_kremove(vaddr_t va, vsize_t len)
   2081       1.1      matt {
   2082       1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2083       1.1      matt 		panic("pmap_kremove: attempt to remove "
   2084      1.53   garbled 		    "non-kernel address %#" _PRIxva "!", va);
   2085       1.1      matt 
   2086      1.53   garbled 	DPRINTFN(KREMOVE,("pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len));
   2087       1.1      matt 	pmap_remove(pmap_kernel(), va, va + len);
   2088       1.1      matt }
   2089       1.1      matt 
   2090       1.1      matt /*
   2091       1.1      matt  * Remove the given range of mapping entries.
   2092       1.1      matt  */
   2093       1.1      matt void
   2094       1.1      matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
   2095       1.1      matt {
   2096      1.33       chs 	struct pvo_head pvol;
   2097       1.1      matt 	struct pvo_entry *pvo;
   2098       1.2      matt 	register_t msr;
   2099       1.1      matt 	int pteidx;
   2100       1.1      matt 
   2101      1.50        ad 	PMAP_LOCK();
   2102      1.33       chs 	LIST_INIT(&pvol);
   2103      1.14       chs 	msr = pmap_interrupts_off();
   2104       1.1      matt 	for (; va < endva; va += PAGE_SIZE) {
   2105       1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2106       1.1      matt 		if (pvo != NULL) {
   2107      1.33       chs 			pmap_pvo_remove(pvo, pteidx, &pvol);
   2108       1.1      matt 		}
   2109       1.1      matt 	}
   2110      1.14       chs 	pmap_interrupts_restore(msr);
   2111      1.33       chs 	pmap_pvo_free_list(&pvol);
   2112      1.50        ad 	PMAP_UNLOCK();
   2113       1.1      matt }
   2114       1.1      matt 
   2115       1.1      matt /*
   2116       1.1      matt  * Get the physical page address for the given pmap/virtual address.
   2117       1.1      matt  */
   2118      1.44   thorpej bool
   2119       1.1      matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
   2120       1.1      matt {
   2121       1.1      matt 	struct pvo_entry *pvo;
   2122       1.2      matt 	register_t msr;
   2123       1.7      matt 
   2124      1.50        ad 	PMAP_LOCK();
   2125      1.38   sanjayl 
   2126       1.7      matt 	/*
   2127       1.7      matt 	 * If this is a kernel pmap lookup, also check the battable
   2128       1.7      matt 	 * and if we get a hit, translate the VA to a PA using the
   2129      1.36   nathanw 	 * BAT entries.  Don't check for VM_MAX_KERNEL_ADDRESS is
   2130       1.7      matt 	 * that will wrap back to 0.
   2131       1.7      matt 	 */
   2132       1.7      matt 	if (pm == pmap_kernel() &&
   2133       1.7      matt 	    (va < VM_MIN_KERNEL_ADDRESS ||
   2134       1.7      matt 	     (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
   2135       1.8      matt 		KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
   2136      1.53   garbled #if defined (PMAP_OEA)
   2137      1.55   garbled #ifdef PPC_OEA601
   2138      1.55   garbled 		if ((MFPVR() >> 16) == MPC601) {
   2139      1.24    kleink 			register_t batu = battable[va >> 23].batu;
   2140      1.24    kleink 			register_t batl = battable[va >> 23].batl;
   2141      1.24    kleink 			register_t sr = iosrtable[va >> ADDR_SR_SHFT];
   2142      1.24    kleink 			if (BAT601_VALID_P(batl) &&
   2143      1.24    kleink 			    BAT601_VA_MATCH_P(batu, batl, va)) {
   2144      1.24    kleink 				register_t mask =
   2145      1.24    kleink 				    (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
   2146      1.29    briggs 				if (pap)
   2147      1.29    briggs 					*pap = (batl & mask) | (va & ~mask);
   2148      1.50        ad 				PMAP_UNLOCK();
   2149      1.45   thorpej 				return true;
   2150      1.24    kleink 			} else if (SR601_VALID_P(sr) &&
   2151      1.24    kleink 				   SR601_PA_MATCH_P(sr, va)) {
   2152      1.29    briggs 				if (pap)
   2153      1.29    briggs 					*pap = va;
   2154      1.50        ad 				PMAP_UNLOCK();
   2155      1.45   thorpej 				return true;
   2156      1.24    kleink 			}
   2157      1.55   garbled 		} else
   2158      1.55   garbled #endif /* PPC_OEA601 */
   2159      1.55   garbled 		{
   2160      1.55   garbled 			register_t batu = battable[va >> ADDR_SR_SHFT].batu;
   2161      1.55   garbled 			if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
   2162      1.55   garbled 				register_t batl =
   2163      1.55   garbled 				    battable[va >> ADDR_SR_SHFT].batl;
   2164      1.55   garbled 				register_t mask =
   2165      1.55   garbled 				    (~(batu & BAT_BL) << 15) & ~0x1ffffL;
   2166      1.55   garbled 				if (pap)
   2167      1.55   garbled 					*pap = (batl & mask) | (va & ~mask);
   2168      1.55   garbled 				PMAP_UNLOCK();
   2169      1.55   garbled 				return true;
   2170      1.55   garbled 			}
   2171       1.7      matt 		}
   2172      1.45   thorpej 		return false;
   2173      1.53   garbled #elif defined (PMAP_OEA64_BRIDGE)
   2174      1.52   garbled 	if (va >= SEGMENT_LENGTH)
   2175      1.52   garbled 		panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
   2176      1.52   garbled 		    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
   2177      1.52   garbled 	else {
   2178      1.52   garbled 		if (pap)
   2179      1.52   garbled 			*pap = va;
   2180      1.52   garbled 			PMAP_UNLOCK();
   2181      1.52   garbled 			return true;
   2182      1.52   garbled 	}
   2183      1.53   garbled #elif defined (PMAP_OEA64)
   2184      1.38   sanjayl #error PPC_OEA64 not supported
   2185      1.38   sanjayl #endif /* PPC_OEA */
   2186       1.7      matt 	}
   2187       1.1      matt 
   2188       1.1      matt 	msr = pmap_interrupts_off();
   2189       1.1      matt 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
   2190       1.1      matt 	if (pvo != NULL) {
   2191       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2192      1.29    briggs 		if (pap)
   2193      1.29    briggs 			*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
   2194      1.29    briggs 			    | (va & ADDR_POFF);
   2195       1.1      matt 	}
   2196       1.1      matt 	pmap_interrupts_restore(msr);
   2197      1.50        ad 	PMAP_UNLOCK();
   2198       1.1      matt 	return pvo != NULL;
   2199       1.1      matt }
   2200       1.1      matt 
   2201       1.1      matt /*
   2202       1.1      matt  * Lower the protection on the specified range of this pmap.
   2203       1.1      matt  */
   2204       1.1      matt void
   2205       1.1      matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
   2206       1.1      matt {
   2207       1.1      matt 	struct pvo_entry *pvo;
   2208       1.2      matt 	volatile struct pte *pt;
   2209       1.2      matt 	register_t msr;
   2210       1.1      matt 	int pteidx;
   2211       1.1      matt 
   2212       1.1      matt 	/*
   2213       1.1      matt 	 * Since this routine only downgrades protection, we should
   2214      1.14       chs 	 * always be called with at least one bit not set.
   2215       1.1      matt 	 */
   2216      1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2217       1.1      matt 
   2218       1.1      matt 	/*
   2219       1.1      matt 	 * If there is no protection, this is equivalent to
   2220       1.1      matt 	 * remove the pmap from the pmap.
   2221       1.1      matt 	 */
   2222       1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2223       1.1      matt 		pmap_remove(pm, va, endva);
   2224       1.1      matt 		return;
   2225       1.1      matt 	}
   2226       1.1      matt 
   2227      1.50        ad 	PMAP_LOCK();
   2228      1.50        ad 
   2229       1.1      matt 	msr = pmap_interrupts_off();
   2230       1.6   thorpej 	for (; va < endva; va += PAGE_SIZE) {
   2231       1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2232       1.1      matt 		if (pvo == NULL)
   2233       1.1      matt 			continue;
   2234       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2235       1.1      matt 
   2236       1.1      matt 		/*
   2237       1.1      matt 		 * Revoke executable if asked to do so.
   2238       1.1      matt 		 */
   2239       1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2240      1.14       chs 			pvo_clear_exec(pvo);
   2241       1.1      matt 
   2242       1.1      matt #if 0
   2243       1.1      matt 		/*
   2244       1.1      matt 		 * If the page is already read-only, no change
   2245       1.1      matt 		 * needs to be made.
   2246       1.1      matt 		 */
   2247       1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
   2248       1.1      matt 			continue;
   2249       1.1      matt #endif
   2250       1.1      matt 		/*
   2251       1.1      matt 		 * Grab the PTE pointer before we diddle with
   2252       1.1      matt 		 * the cached PTE copy.
   2253       1.1      matt 		 */
   2254       1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2255       1.1      matt 		/*
   2256       1.1      matt 		 * Change the protection of the page.
   2257       1.1      matt 		 */
   2258       1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2259       1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2260       1.1      matt 
   2261       1.1      matt 		/*
   2262       1.1      matt 		 * If the PVO is in the page table, update
   2263       1.1      matt 		 * that pte at well.
   2264       1.1      matt 		 */
   2265       1.1      matt 		if (pt != NULL) {
   2266       1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2267      1.12      matt 			PVO_WHERE(pvo, PMAP_PROTECT);
   2268       1.1      matt 			PMAPCOUNT(ptes_changed);
   2269       1.1      matt 		}
   2270       1.1      matt 
   2271       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2272       1.1      matt 	}
   2273       1.1      matt 	pmap_interrupts_restore(msr);
   2274      1.50        ad 	PMAP_UNLOCK();
   2275       1.1      matt }
   2276       1.1      matt 
   2277       1.1      matt void
   2278       1.1      matt pmap_unwire(pmap_t pm, vaddr_t va)
   2279       1.1      matt {
   2280       1.1      matt 	struct pvo_entry *pvo;
   2281       1.2      matt 	register_t msr;
   2282       1.1      matt 
   2283      1.50        ad 	PMAP_LOCK();
   2284       1.1      matt 	msr = pmap_interrupts_off();
   2285       1.1      matt 	pvo = pmap_pvo_find_va(pm, va, NULL);
   2286       1.1      matt 	if (pvo != NULL) {
   2287      1.39      matt 		if (PVO_WIRED_P(pvo)) {
   2288       1.1      matt 			pvo->pvo_vaddr &= ~PVO_WIRED;
   2289       1.1      matt 			pm->pm_stats.wired_count--;
   2290       1.1      matt 		}
   2291       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2292       1.1      matt 	}
   2293       1.1      matt 	pmap_interrupts_restore(msr);
   2294      1.50        ad 	PMAP_UNLOCK();
   2295       1.1      matt }
   2296       1.1      matt 
   2297       1.1      matt /*
   2298       1.1      matt  * Lower the protection on the specified physical page.
   2299       1.1      matt  */
   2300       1.1      matt void
   2301       1.1      matt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2302       1.1      matt {
   2303      1.33       chs 	struct pvo_head *pvo_head, pvol;
   2304       1.1      matt 	struct pvo_entry *pvo, *next_pvo;
   2305       1.2      matt 	volatile struct pte *pt;
   2306       1.2      matt 	register_t msr;
   2307       1.1      matt 
   2308      1.50        ad 	PMAP_LOCK();
   2309      1.50        ad 
   2310      1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2311      1.33       chs 	LIST_INIT(&pvol);
   2312       1.1      matt 	msr = pmap_interrupts_off();
   2313       1.1      matt 
   2314       1.1      matt 	/*
   2315       1.1      matt 	 * When UVM reuses a page, it does a pmap_page_protect with
   2316       1.1      matt 	 * VM_PROT_NONE.  At that point, we can clear the exec flag
   2317       1.1      matt 	 * since we know the page will have different contents.
   2318       1.1      matt 	 */
   2319       1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2320      1.53   garbled 		DPRINTFN(EXEC, ("[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
   2321      1.34      yamt 		    VM_PAGE_TO_PHYS(pg)));
   2322       1.1      matt 		if (pmap_attr_fetch(pg) & PTE_EXEC) {
   2323       1.1      matt 			PMAPCOUNT(exec_uncached_page_protect);
   2324       1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2325       1.1      matt 		}
   2326       1.1      matt 	}
   2327       1.1      matt 
   2328       1.1      matt 	pvo_head = vm_page_to_pvoh(pg);
   2329       1.1      matt 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
   2330       1.1      matt 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
   2331       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2332       1.1      matt 
   2333       1.1      matt 		/*
   2334       1.1      matt 		 * Downgrading to no mapping at all, we just remove the entry.
   2335       1.1      matt 		 */
   2336       1.1      matt 		if ((prot & VM_PROT_READ) == 0) {
   2337      1.33       chs 			pmap_pvo_remove(pvo, -1, &pvol);
   2338       1.1      matt 			continue;
   2339       1.1      matt 		}
   2340       1.1      matt 
   2341       1.1      matt 		/*
   2342       1.1      matt 		 * If EXEC permission is being revoked, just clear the
   2343       1.1      matt 		 * flag in the PVO.
   2344       1.1      matt 		 */
   2345       1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2346      1.14       chs 			pvo_clear_exec(pvo);
   2347       1.1      matt 
   2348       1.1      matt 		/*
   2349       1.1      matt 		 * If this entry is already RO, don't diddle with the
   2350       1.1      matt 		 * page table.
   2351       1.1      matt 		 */
   2352       1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
   2353       1.1      matt 			PMAP_PVO_CHECK(pvo);
   2354       1.1      matt 			continue;
   2355       1.1      matt 		}
   2356       1.1      matt 
   2357       1.1      matt 		/*
   2358       1.1      matt 		 * Grab the PTE before the we diddle the bits so
   2359       1.1      matt 		 * pvo_to_pte can verify the pte contents are as
   2360       1.1      matt 		 * expected.
   2361       1.1      matt 		 */
   2362       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2363       1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2364       1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2365       1.1      matt 		if (pt != NULL) {
   2366       1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2367      1.12      matt 			PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
   2368       1.1      matt 			PMAPCOUNT(ptes_changed);
   2369       1.1      matt 		}
   2370       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2371       1.1      matt 	}
   2372       1.1      matt 	pmap_interrupts_restore(msr);
   2373      1.33       chs 	pmap_pvo_free_list(&pvol);
   2374      1.50        ad 
   2375      1.50        ad 	PMAP_UNLOCK();
   2376       1.1      matt }
   2377       1.1      matt 
   2378       1.1      matt /*
   2379       1.1      matt  * Activate the address space for the specified process.  If the process
   2380       1.1      matt  * is the current process, load the new MMU context.
   2381       1.1      matt  */
   2382       1.1      matt void
   2383       1.1      matt pmap_activate(struct lwp *l)
   2384       1.1      matt {
   2385       1.1      matt 	struct pcb *pcb = &l->l_addr->u_pcb;
   2386       1.1      matt 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   2387       1.1      matt 
   2388       1.1      matt 	DPRINTFN(ACTIVATE,
   2389       1.1      matt 	    ("pmap_activate: lwp %p (curlwp %p)\n", l, curlwp));
   2390       1.1      matt 
   2391       1.1      matt 	/*
   2392       1.1      matt 	 * XXX Normally performed in cpu_fork().
   2393       1.1      matt 	 */
   2394      1.13      matt 	pcb->pcb_pm = pmap;
   2395      1.17      matt 
   2396      1.17      matt 	/*
   2397      1.17      matt 	* In theory, the SR registers need only be valid on return
   2398      1.17      matt 	* to user space wait to do them there.
   2399      1.17      matt 	*/
   2400      1.17      matt 	if (l == curlwp) {
   2401      1.17      matt 		/* Store pointer to new current pmap. */
   2402      1.17      matt 		curpm = pmap;
   2403      1.17      matt 	}
   2404       1.1      matt }
   2405       1.1      matt 
   2406       1.1      matt /*
   2407       1.1      matt  * Deactivate the specified process's address space.
   2408       1.1      matt  */
   2409       1.1      matt void
   2410       1.1      matt pmap_deactivate(struct lwp *l)
   2411       1.1      matt {
   2412       1.1      matt }
   2413       1.1      matt 
   2414      1.44   thorpej bool
   2415       1.1      matt pmap_query_bit(struct vm_page *pg, int ptebit)
   2416       1.1      matt {
   2417       1.1      matt 	struct pvo_entry *pvo;
   2418       1.2      matt 	volatile struct pte *pt;
   2419       1.2      matt 	register_t msr;
   2420       1.1      matt 
   2421      1.50        ad 	PMAP_LOCK();
   2422      1.50        ad 
   2423      1.50        ad 	if (pmap_attr_fetch(pg) & ptebit) {
   2424      1.50        ad 		PMAP_UNLOCK();
   2425      1.45   thorpej 		return true;
   2426      1.50        ad 	}
   2427      1.14       chs 
   2428       1.1      matt 	msr = pmap_interrupts_off();
   2429       1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2430       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2431       1.1      matt 		/*
   2432       1.1      matt 		 * See if we saved the bit off.  If so cache, it and return
   2433       1.1      matt 		 * success.
   2434       1.1      matt 		 */
   2435       1.1      matt 		if (pvo->pvo_pte.pte_lo & ptebit) {
   2436       1.1      matt 			pmap_attr_save(pg, ptebit);
   2437       1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   2438       1.1      matt 			pmap_interrupts_restore(msr);
   2439      1.50        ad 			PMAP_UNLOCK();
   2440      1.45   thorpej 			return true;
   2441       1.1      matt 		}
   2442       1.1      matt 	}
   2443       1.1      matt 	/*
   2444       1.1      matt 	 * No luck, now go thru the hard part of looking at the ptes
   2445       1.1      matt 	 * themselves.  Sync so any pending REF/CHG bits are flushed
   2446       1.1      matt 	 * to the PTEs.
   2447       1.1      matt 	 */
   2448       1.1      matt 	SYNC();
   2449       1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2450       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2451       1.1      matt 		/*
   2452       1.1      matt 		 * See if this pvo have a valid PTE.  If so, fetch the
   2453       1.1      matt 		 * REF/CHG bits from the valid PTE.  If the appropriate
   2454       1.1      matt 		 * ptebit is set, cache, it and return success.
   2455       1.1      matt 		 */
   2456       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2457       1.1      matt 		if (pt != NULL) {
   2458       1.1      matt 			pmap_pte_synch(pt, &pvo->pvo_pte);
   2459       1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit) {
   2460       1.1      matt 				pmap_attr_save(pg, ptebit);
   2461       1.1      matt 				PMAP_PVO_CHECK(pvo);		/* sanity check */
   2462       1.1      matt 				pmap_interrupts_restore(msr);
   2463      1.50        ad 				PMAP_UNLOCK();
   2464      1.45   thorpej 				return true;
   2465       1.1      matt 			}
   2466       1.1      matt 		}
   2467       1.1      matt 	}
   2468       1.1      matt 	pmap_interrupts_restore(msr);
   2469      1.50        ad 	PMAP_UNLOCK();
   2470      1.45   thorpej 	return false;
   2471       1.1      matt }
   2472       1.1      matt 
   2473      1.44   thorpej bool
   2474       1.1      matt pmap_clear_bit(struct vm_page *pg, int ptebit)
   2475       1.1      matt {
   2476       1.1      matt 	struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   2477       1.1      matt 	struct pvo_entry *pvo;
   2478       1.2      matt 	volatile struct pte *pt;
   2479       1.2      matt 	register_t msr;
   2480       1.1      matt 	int rv = 0;
   2481       1.1      matt 
   2482      1.50        ad 	PMAP_LOCK();
   2483       1.1      matt 	msr = pmap_interrupts_off();
   2484       1.1      matt 
   2485       1.1      matt 	/*
   2486       1.1      matt 	 * Fetch the cache value
   2487       1.1      matt 	 */
   2488       1.1      matt 	rv |= pmap_attr_fetch(pg);
   2489       1.1      matt 
   2490       1.1      matt 	/*
   2491       1.1      matt 	 * Clear the cached value.
   2492       1.1      matt 	 */
   2493       1.1      matt 	pmap_attr_clear(pg, ptebit);
   2494       1.1      matt 
   2495       1.1      matt 	/*
   2496       1.1      matt 	 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
   2497       1.1      matt 	 * can reset the right ones).  Note that since the pvo entries and
   2498       1.1      matt 	 * list heads are accessed via BAT0 and are never placed in the
   2499       1.1      matt 	 * page table, we don't have to worry about further accesses setting
   2500       1.1      matt 	 * the REF/CHG bits.
   2501       1.1      matt 	 */
   2502       1.1      matt 	SYNC();
   2503       1.1      matt 
   2504       1.1      matt 	/*
   2505       1.1      matt 	 * For each pvo entry, clear pvo's ptebit.  If this pvo have a
   2506       1.1      matt 	 * valid PTE.  If so, clear the ptebit from the valid PTE.
   2507       1.1      matt 	 */
   2508       1.1      matt 	LIST_FOREACH(pvo, pvoh, pvo_vlink) {
   2509       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2510       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2511       1.1      matt 		if (pt != NULL) {
   2512       1.1      matt 			/*
   2513       1.1      matt 			 * Only sync the PTE if the bit we are looking
   2514       1.1      matt 			 * for is not already set.
   2515       1.1      matt 			 */
   2516       1.1      matt 			if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
   2517       1.1      matt 				pmap_pte_synch(pt, &pvo->pvo_pte);
   2518       1.1      matt 			/*
   2519       1.1      matt 			 * If the bit we are looking for was already set,
   2520       1.1      matt 			 * clear that bit in the pte.
   2521       1.1      matt 			 */
   2522       1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit)
   2523       1.1      matt 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
   2524       1.1      matt 		}
   2525       1.1      matt 		rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
   2526       1.1      matt 		pvo->pvo_pte.pte_lo &= ~ptebit;
   2527       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2528       1.1      matt 	}
   2529       1.1      matt 	pmap_interrupts_restore(msr);
   2530      1.14       chs 
   2531       1.1      matt 	/*
   2532       1.1      matt 	 * If we are clearing the modify bit and this page was marked EXEC
   2533       1.1      matt 	 * and the user of the page thinks the page was modified, then we
   2534       1.1      matt 	 * need to clean it from the icache if it's mapped or clear the EXEC
   2535       1.1      matt 	 * bit if it's not mapped.  The page itself might not have the CHG
   2536       1.1      matt 	 * bit set if the modification was done via DMA to the page.
   2537       1.1      matt 	 */
   2538       1.1      matt 	if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
   2539       1.1      matt 		if (LIST_EMPTY(pvoh)) {
   2540      1.53   garbled 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
   2541      1.34      yamt 			    VM_PAGE_TO_PHYS(pg)));
   2542       1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2543       1.1      matt 			PMAPCOUNT(exec_uncached_clear_modify);
   2544       1.1      matt 		} else {
   2545      1.53   garbled 			DPRINTFN(EXEC, ("[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
   2546      1.34      yamt 			    VM_PAGE_TO_PHYS(pg)));
   2547      1.34      yamt 			pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
   2548       1.1      matt 			PMAPCOUNT(exec_synced_clear_modify);
   2549       1.1      matt 		}
   2550       1.1      matt 	}
   2551      1.50        ad 	PMAP_UNLOCK();
   2552       1.1      matt 	return (rv & ptebit) != 0;
   2553       1.1      matt }
   2554       1.1      matt 
   2555       1.1      matt void
   2556       1.1      matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   2557       1.1      matt {
   2558       1.1      matt 	struct pvo_entry *pvo;
   2559       1.1      matt 	size_t offset = va & ADDR_POFF;
   2560       1.1      matt 	int s;
   2561       1.1      matt 
   2562      1.50        ad 	PMAP_LOCK();
   2563       1.1      matt 	s = splvm();
   2564       1.1      matt 	while (len > 0) {
   2565       1.6   thorpej 		size_t seglen = PAGE_SIZE - offset;
   2566       1.1      matt 		if (seglen > len)
   2567       1.1      matt 			seglen = len;
   2568       1.1      matt 		pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
   2569      1.39      matt 		if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
   2570       1.1      matt 			pmap_syncicache(
   2571       1.1      matt 			    (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
   2572       1.1      matt 			PMAP_PVO_CHECK(pvo);
   2573       1.1      matt 		}
   2574       1.1      matt 		va += seglen;
   2575       1.1      matt 		len -= seglen;
   2576       1.1      matt 		offset = 0;
   2577       1.1      matt 	}
   2578       1.1      matt 	splx(s);
   2579      1.50        ad 	PMAP_UNLOCK();
   2580       1.1      matt }
   2581       1.1      matt 
   2582       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
   2583       1.1      matt void
   2584       1.2      matt pmap_pte_print(volatile struct pte *pt)
   2585       1.1      matt {
   2586       1.1      matt 	printf("PTE %p: ", pt);
   2587      1.38   sanjayl 
   2588      1.53   garbled #if defined(PMAP_OEA)
   2589       1.1      matt 	/* High word: */
   2590      1.54   mlelstv 	printf("%#" _PRIxpte ": [", pt->pte_hi);
   2591      1.53   garbled #else
   2592      1.54   mlelstv 	printf("%#" _PRIxpte ": [", pt->pte_hi);
   2593      1.53   garbled #endif /* PMAP_OEA */
   2594      1.38   sanjayl 
   2595       1.1      matt 	printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
   2596       1.1      matt 	printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
   2597      1.38   sanjayl 
   2598      1.54   mlelstv 	printf("%#" _PRIxpte " %#" _PRIxpte "",
   2599      1.38   sanjayl 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2600      1.38   sanjayl 	    pt->pte_hi & PTE_API);
   2601      1.53   garbled #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
   2602      1.54   mlelstv 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
   2603      1.38   sanjayl #else
   2604      1.54   mlelstv 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
   2605      1.53   garbled #endif /* PMAP_OEA */
   2606      1.38   sanjayl 
   2607       1.1      matt 	/* Low word: */
   2608      1.53   garbled #if defined (PMAP_OEA)
   2609      1.54   mlelstv 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
   2610      1.54   mlelstv 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
   2611      1.53   garbled #else
   2612      1.54   mlelstv 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
   2613      1.54   mlelstv 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
   2614      1.38   sanjayl #endif
   2615       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
   2616       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
   2617       1.1      matt 	printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
   2618       1.1      matt 	printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
   2619       1.1      matt 	printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
   2620       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
   2621       1.1      matt 	switch (pt->pte_lo & PTE_PP) {
   2622       1.1      matt 	case PTE_BR: printf("br]\n"); break;
   2623       1.1      matt 	case PTE_BW: printf("bw]\n"); break;
   2624       1.1      matt 	case PTE_SO: printf("so]\n"); break;
   2625       1.1      matt 	case PTE_SW: printf("sw]\n"); break;
   2626       1.1      matt 	}
   2627       1.1      matt }
   2628       1.1      matt #endif
   2629       1.1      matt 
   2630       1.1      matt #if defined(DDB)
   2631       1.1      matt void
   2632       1.1      matt pmap_pteg_check(void)
   2633       1.1      matt {
   2634       1.2      matt 	volatile struct pte *pt;
   2635       1.1      matt 	int i;
   2636       1.1      matt 	int ptegidx;
   2637       1.1      matt 	u_int p_valid = 0;
   2638       1.1      matt 	u_int s_valid = 0;
   2639       1.1      matt 	u_int invalid = 0;
   2640      1.38   sanjayl 
   2641       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2642       1.1      matt 		for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
   2643       1.1      matt 			if (pt->pte_hi & PTE_VALID) {
   2644       1.1      matt 				if (pt->pte_hi & PTE_HID)
   2645       1.1      matt 					s_valid++;
   2646       1.1      matt 				else
   2647      1.38   sanjayl 				{
   2648       1.1      matt 					p_valid++;
   2649      1.38   sanjayl 				}
   2650       1.1      matt 			} else
   2651       1.1      matt 				invalid++;
   2652       1.1      matt 		}
   2653       1.1      matt 	}
   2654       1.1      matt 	printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
   2655       1.1      matt 		p_valid, p_valid, s_valid, s_valid,
   2656       1.1      matt 		invalid, invalid);
   2657       1.1      matt }
   2658       1.1      matt 
   2659       1.1      matt void
   2660       1.1      matt pmap_print_mmuregs(void)
   2661       1.1      matt {
   2662       1.1      matt 	int i;
   2663       1.1      matt 	u_int cpuvers;
   2664      1.53   garbled #ifndef PMAP_OEA64
   2665       1.1      matt 	vaddr_t addr;
   2666       1.2      matt 	register_t soft_sr[16];
   2667      1.18      matt #endif
   2668      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2669       1.1      matt 	struct bat soft_ibat[4];
   2670       1.1      matt 	struct bat soft_dbat[4];
   2671      1.38   sanjayl #endif
   2672      1.53   garbled 	paddr_t sdr1;
   2673       1.1      matt 
   2674       1.1      matt 	cpuvers = MFPVR() >> 16;
   2675      1.35     perry 	__asm volatile ("mfsdr1 %0" : "=r"(sdr1));
   2676      1.53   garbled #ifndef PMAP_OEA64
   2677      1.16    kleink 	addr = 0;
   2678      1.27       chs 	for (i = 0; i < 16; i++) {
   2679       1.1      matt 		soft_sr[i] = MFSRIN(addr);
   2680       1.1      matt 		addr += (1 << ADDR_SR_SHFT);
   2681       1.1      matt 	}
   2682      1.18      matt #endif
   2683       1.1      matt 
   2684      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2685       1.1      matt 	/* read iBAT (601: uBAT) registers */
   2686      1.35     perry 	__asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
   2687      1.35     perry 	__asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
   2688      1.35     perry 	__asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
   2689      1.35     perry 	__asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
   2690      1.35     perry 	__asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
   2691      1.35     perry 	__asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
   2692      1.35     perry 	__asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
   2693      1.35     perry 	__asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
   2694       1.1      matt 
   2695       1.1      matt 
   2696       1.1      matt 	if (cpuvers != MPC601) {
   2697       1.1      matt 		/* read dBAT registers */
   2698      1.35     perry 		__asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
   2699      1.35     perry 		__asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
   2700      1.35     perry 		__asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
   2701      1.35     perry 		__asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
   2702      1.35     perry 		__asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
   2703      1.35     perry 		__asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
   2704      1.35     perry 		__asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
   2705      1.35     perry 		__asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
   2706       1.1      matt 	}
   2707      1.38   sanjayl #endif
   2708       1.1      matt 
   2709      1.54   mlelstv 	printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
   2710      1.53   garbled #ifndef PMAP_OEA64
   2711       1.1      matt 	printf("SR[]:\t");
   2712      1.27       chs 	for (i = 0; i < 4; i++)
   2713      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2714       1.1      matt 	printf("\n\t");
   2715      1.27       chs 	for ( ; i < 8; i++)
   2716      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2717       1.1      matt 	printf("\n\t");
   2718      1.27       chs 	for ( ; i < 12; i++)
   2719      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2720       1.1      matt 	printf("\n\t");
   2721      1.27       chs 	for ( ; i < 16; i++)
   2722      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2723       1.1      matt 	printf("\n");
   2724      1.18      matt #endif
   2725       1.1      matt 
   2726      1.53   garbled #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
   2727       1.1      matt 	printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
   2728      1.27       chs 	for (i = 0; i < 4; i++) {
   2729       1.2      matt 		printf("0x%08lx 0x%08lx, ",
   2730       1.1      matt 			soft_ibat[i].batu, soft_ibat[i].batl);
   2731       1.1      matt 		if (i == 1)
   2732       1.1      matt 			printf("\n\t");
   2733       1.1      matt 	}
   2734       1.1      matt 	if (cpuvers != MPC601) {
   2735       1.1      matt 		printf("\ndBAT[]:\t");
   2736      1.27       chs 		for (i = 0; i < 4; i++) {
   2737       1.2      matt 			printf("0x%08lx 0x%08lx, ",
   2738       1.1      matt 				soft_dbat[i].batu, soft_dbat[i].batl);
   2739       1.1      matt 			if (i == 1)
   2740       1.1      matt 				printf("\n\t");
   2741       1.1      matt 		}
   2742       1.1      matt 	}
   2743       1.1      matt 	printf("\n");
   2744      1.53   garbled #endif /* PMAP_OEA... */
   2745       1.1      matt }
   2746       1.1      matt 
   2747       1.1      matt void
   2748       1.1      matt pmap_print_pte(pmap_t pm, vaddr_t va)
   2749       1.1      matt {
   2750       1.1      matt 	struct pvo_entry *pvo;
   2751       1.2      matt 	volatile struct pte *pt;
   2752       1.1      matt 	int pteidx;
   2753       1.1      matt 
   2754       1.1      matt 	pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2755       1.1      matt 	if (pvo != NULL) {
   2756       1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2757       1.1      matt 		if (pt != NULL) {
   2758      1.53   garbled 			printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
   2759      1.38   sanjayl 				va, pt,
   2760      1.38   sanjayl 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
   2761      1.38   sanjayl 				pt->pte_hi, pt->pte_lo);
   2762       1.1      matt 		} else {
   2763       1.1      matt 			printf("No valid PTE found\n");
   2764       1.1      matt 		}
   2765       1.1      matt 	} else {
   2766       1.1      matt 		printf("Address not in pmap\n");
   2767       1.1      matt 	}
   2768       1.1      matt }
   2769       1.1      matt 
   2770       1.1      matt void
   2771       1.1      matt pmap_pteg_dist(void)
   2772       1.1      matt {
   2773       1.1      matt 	struct pvo_entry *pvo;
   2774       1.1      matt 	int ptegidx;
   2775       1.1      matt 	int depth;
   2776       1.1      matt 	int max_depth = 0;
   2777       1.1      matt 	unsigned int depths[64];
   2778       1.1      matt 
   2779       1.1      matt 	memset(depths, 0, sizeof(depths));
   2780       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2781       1.1      matt 		depth = 0;
   2782       1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2783       1.1      matt 			depth++;
   2784       1.1      matt 		}
   2785       1.1      matt 		if (depth > max_depth)
   2786       1.1      matt 			max_depth = depth;
   2787       1.1      matt 		if (depth > 63)
   2788       1.1      matt 			depth = 63;
   2789       1.1      matt 		depths[depth]++;
   2790       1.1      matt 	}
   2791       1.1      matt 
   2792       1.1      matt 	for (depth = 0; depth < 64; depth++) {
   2793       1.1      matt 		printf("  [%2d]: %8u", depth, depths[depth]);
   2794       1.1      matt 		if ((depth & 3) == 3)
   2795       1.1      matt 			printf("\n");
   2796       1.1      matt 		if (depth == max_depth)
   2797       1.1      matt 			break;
   2798       1.1      matt 	}
   2799       1.1      matt 	if ((depth & 3) != 3)
   2800       1.1      matt 		printf("\n");
   2801       1.1      matt 	printf("Max depth found was %d\n", max_depth);
   2802       1.1      matt }
   2803       1.1      matt #endif /* DEBUG */
   2804       1.1      matt 
   2805       1.1      matt #if defined(PMAPCHECK) || defined(DEBUG)
   2806       1.1      matt void
   2807       1.1      matt pmap_pvo_verify(void)
   2808       1.1      matt {
   2809       1.1      matt 	int ptegidx;
   2810       1.1      matt 	int s;
   2811       1.1      matt 
   2812       1.1      matt 	s = splvm();
   2813       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2814       1.1      matt 		struct pvo_entry *pvo;
   2815       1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2816       1.1      matt 			if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   2817       1.1      matt 				panic("pmap_pvo_verify: invalid pvo %p "
   2818       1.1      matt 				    "on list %#x", pvo, ptegidx);
   2819       1.1      matt 			pmap_pvo_check(pvo);
   2820       1.1      matt 		}
   2821       1.1      matt 	}
   2822       1.1      matt 	splx(s);
   2823       1.1      matt }
   2824       1.1      matt #endif /* PMAPCHECK */
   2825       1.1      matt 
   2826       1.1      matt 
   2827       1.1      matt void *
   2828       1.1      matt pmap_pool_ualloc(struct pool *pp, int flags)
   2829       1.1      matt {
   2830       1.1      matt 	struct pvo_page *pvop;
   2831       1.1      matt 
   2832      1.50        ad 	if (uvm.page_init_done != true) {
   2833      1.50        ad 		return (void *) uvm_pageboot_alloc(PAGE_SIZE);
   2834      1.50        ad 	}
   2835      1.50        ad 
   2836      1.50        ad 	PMAP_LOCK();
   2837       1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
   2838       1.1      matt 	if (pvop != NULL) {
   2839       1.1      matt 		pmap_upvop_free--;
   2840       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
   2841      1.50        ad 		PMAP_UNLOCK();
   2842       1.1      matt 		return pvop;
   2843       1.1      matt 	}
   2844      1.50        ad 	PMAP_UNLOCK();
   2845       1.1      matt 	return pmap_pool_malloc(pp, flags);
   2846       1.1      matt }
   2847       1.1      matt 
   2848       1.1      matt void *
   2849       1.1      matt pmap_pool_malloc(struct pool *pp, int flags)
   2850       1.1      matt {
   2851       1.1      matt 	struct pvo_page *pvop;
   2852       1.1      matt 	struct vm_page *pg;
   2853       1.1      matt 
   2854      1.50        ad 	PMAP_LOCK();
   2855       1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
   2856       1.1      matt 	if (pvop != NULL) {
   2857       1.1      matt 		pmap_mpvop_free--;
   2858       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
   2859      1.50        ad 		PMAP_UNLOCK();
   2860       1.1      matt 		return pvop;
   2861       1.1      matt 	}
   2862      1.50        ad 	PMAP_UNLOCK();
   2863       1.1      matt  again:
   2864       1.1      matt 	pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
   2865       1.1      matt 	    UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
   2866       1.1      matt 	if (__predict_false(pg == NULL)) {
   2867       1.1      matt 		if (flags & PR_WAITOK) {
   2868       1.1      matt 			uvm_wait("plpg");
   2869       1.1      matt 			goto again;
   2870       1.1      matt 		} else {
   2871       1.1      matt 			return (0);
   2872       1.1      matt 		}
   2873       1.1      matt 	}
   2874      1.53   garbled 	KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
   2875      1.53   garbled 	return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
   2876       1.1      matt }
   2877       1.1      matt 
   2878       1.1      matt void
   2879       1.1      matt pmap_pool_ufree(struct pool *pp, void *va)
   2880       1.1      matt {
   2881       1.1      matt 	struct pvo_page *pvop;
   2882       1.1      matt #if 0
   2883       1.1      matt 	if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
   2884       1.1      matt 		pmap_pool_mfree(va, size, tag);
   2885       1.1      matt 		return;
   2886       1.1      matt 	}
   2887       1.1      matt #endif
   2888      1.50        ad 	PMAP_LOCK();
   2889       1.1      matt 	pvop = va;
   2890       1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
   2891       1.1      matt 	pmap_upvop_free++;
   2892       1.1      matt 	if (pmap_upvop_free > pmap_upvop_maxfree)
   2893       1.1      matt 		pmap_upvop_maxfree = pmap_upvop_free;
   2894      1.50        ad 	PMAP_UNLOCK();
   2895       1.1      matt }
   2896       1.1      matt 
   2897       1.1      matt void
   2898       1.1      matt pmap_pool_mfree(struct pool *pp, void *va)
   2899       1.1      matt {
   2900       1.1      matt 	struct pvo_page *pvop;
   2901       1.1      matt 
   2902      1.50        ad 	PMAP_LOCK();
   2903       1.1      matt 	pvop = va;
   2904       1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
   2905       1.1      matt 	pmap_mpvop_free++;
   2906       1.1      matt 	if (pmap_mpvop_free > pmap_mpvop_maxfree)
   2907       1.1      matt 		pmap_mpvop_maxfree = pmap_mpvop_free;
   2908      1.50        ad 	PMAP_UNLOCK();
   2909       1.1      matt #if 0
   2910       1.1      matt 	uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
   2911       1.1      matt #endif
   2912       1.1      matt }
   2913       1.1      matt 
   2914       1.1      matt /*
   2915       1.1      matt  * This routine in bootstraping to steal to-be-managed memory (which will
   2916       1.1      matt  * then be unmanaged).  We use it to grab from the first 256MB for our
   2917       1.1      matt  * pmap needs and above 256MB for other stuff.
   2918       1.1      matt  */
   2919       1.1      matt vaddr_t
   2920      1.10   thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
   2921       1.1      matt {
   2922       1.1      matt 	vsize_t size;
   2923       1.1      matt 	vaddr_t va;
   2924       1.1      matt 	paddr_t pa = 0;
   2925       1.1      matt 	int npgs, bank;
   2926       1.1      matt 	struct vm_physseg *ps;
   2927       1.1      matt 
   2928      1.45   thorpej 	if (uvm.page_init_done == true)
   2929       1.1      matt 		panic("pmap_steal_memory: called _after_ bootstrap");
   2930       1.1      matt 
   2931      1.10   thorpej 	*vstartp = VM_MIN_KERNEL_ADDRESS;
   2932      1.10   thorpej 	*vendp = VM_MAX_KERNEL_ADDRESS;
   2933      1.10   thorpej 
   2934       1.1      matt 	size = round_page(vsize);
   2935       1.1      matt 	npgs = atop(size);
   2936       1.1      matt 
   2937       1.1      matt 	/*
   2938       1.1      matt 	 * PA 0 will never be among those given to UVM so we can use it
   2939       1.1      matt 	 * to indicate we couldn't steal any memory.
   2940       1.1      matt 	 */
   2941       1.1      matt 	for (ps = vm_physmem, bank = 0; bank < vm_nphysseg; bank++, ps++) {
   2942       1.1      matt 		if (ps->free_list == VM_FREELIST_FIRST256 &&
   2943       1.1      matt 		    ps->avail_end - ps->avail_start >= npgs) {
   2944       1.1      matt 			pa = ptoa(ps->avail_start);
   2945       1.1      matt 			break;
   2946       1.1      matt 		}
   2947       1.1      matt 	}
   2948       1.1      matt 
   2949       1.1      matt 	if (pa == 0)
   2950       1.1      matt 		panic("pmap_steal_memory: no approriate memory to steal!");
   2951       1.1      matt 
   2952       1.1      matt 	ps->avail_start += npgs;
   2953       1.1      matt 	ps->start += npgs;
   2954       1.1      matt 
   2955       1.1      matt 	/*
   2956       1.1      matt 	 * If we've used up all the pages in the segment, remove it and
   2957       1.1      matt 	 * compact the list.
   2958       1.1      matt 	 */
   2959       1.1      matt 	if (ps->avail_start == ps->end) {
   2960       1.1      matt 		/*
   2961       1.1      matt 		 * If this was the last one, then a very bad thing has occurred
   2962       1.1      matt 		 */
   2963       1.1      matt 		if (--vm_nphysseg == 0)
   2964       1.1      matt 			panic("pmap_steal_memory: out of memory!");
   2965       1.1      matt 
   2966       1.1      matt 		printf("pmap_steal_memory: consumed bank %d\n", bank);
   2967       1.1      matt 		for (; bank < vm_nphysseg; bank++, ps++) {
   2968       1.1      matt 			ps[0] = ps[1];
   2969       1.1      matt 		}
   2970       1.1      matt 	}
   2971       1.1      matt 
   2972       1.1      matt 	va = (vaddr_t) pa;
   2973      1.46  christos 	memset((void *) va, 0, size);
   2974       1.1      matt 	pmap_pages_stolen += npgs;
   2975       1.1      matt #ifdef DEBUG
   2976       1.1      matt 	if (pmapdebug && npgs > 1) {
   2977       1.1      matt 		u_int cnt = 0;
   2978       1.1      matt 		for (bank = 0, ps = vm_physmem; bank < vm_nphysseg; bank++, ps++)
   2979       1.1      matt 			cnt += ps->avail_end - ps->avail_start;
   2980       1.1      matt 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
   2981       1.1      matt 		    npgs, pmap_pages_stolen, cnt);
   2982       1.1      matt 	}
   2983       1.1      matt #endif
   2984       1.1      matt 
   2985       1.1      matt 	return va;
   2986       1.1      matt }
   2987       1.1      matt 
   2988       1.1      matt /*
   2989       1.1      matt  * Find a chuck of memory with right size and alignment.
   2990       1.1      matt  */
   2991      1.53   garbled paddr_t
   2992       1.1      matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
   2993       1.1      matt {
   2994       1.1      matt 	struct mem_region *mp;
   2995       1.1      matt 	paddr_t s, e;
   2996       1.1      matt 	int i, j;
   2997       1.1      matt 
   2998       1.1      matt 	size = round_page(size);
   2999       1.1      matt 
   3000       1.1      matt 	DPRINTFN(BOOT,
   3001      1.54   mlelstv 	    ("pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
   3002       1.1      matt 	    size, alignment, at_end));
   3003       1.1      matt 
   3004       1.6   thorpej 	if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
   3005      1.54   mlelstv 		panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
   3006       1.1      matt 		    alignment);
   3007       1.1      matt 
   3008       1.1      matt 	if (at_end) {
   3009       1.6   thorpej 		if (alignment != PAGE_SIZE)
   3010       1.1      matt 			panic("pmap_boot_find_memory: invalid ending "
   3011      1.53   garbled 			    "alignment %#" _PRIxpa, alignment);
   3012       1.1      matt 
   3013       1.1      matt 		for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
   3014       1.1      matt 			s = mp->start + mp->size - size;
   3015       1.1      matt 			if (s >= mp->start && mp->size >= size) {
   3016      1.54   mlelstv 				DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
   3017       1.1      matt 				DPRINTFN(BOOT,
   3018       1.1      matt 				    ("pmap_boot_find_memory: b-avail[%d] start "
   3019      1.54   mlelstv 				     "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
   3020       1.1      matt 				     mp->start, mp->size));
   3021       1.1      matt 				mp->size -= size;
   3022       1.1      matt 				DPRINTFN(BOOT,
   3023       1.1      matt 				    ("pmap_boot_find_memory: a-avail[%d] start "
   3024      1.54   mlelstv 				     "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
   3025       1.1      matt 				     mp->start, mp->size));
   3026      1.53   garbled 				return s;
   3027       1.1      matt 			}
   3028       1.1      matt 		}
   3029       1.1      matt 		panic("pmap_boot_find_memory: no available memory");
   3030       1.1      matt 	}
   3031       1.1      matt 
   3032       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3033       1.1      matt 		s = (mp->start + alignment - 1) & ~(alignment-1);
   3034       1.1      matt 		e = s + size;
   3035       1.1      matt 
   3036       1.1      matt 		/*
   3037       1.1      matt 		 * Is the calculated region entirely within the region?
   3038       1.1      matt 		 */
   3039       1.1      matt 		if (s < mp->start || e > mp->start + mp->size)
   3040       1.1      matt 			continue;
   3041       1.1      matt 
   3042      1.54   mlelstv 		DPRINTFN(BOOT,(": %#" _PRIxpa "\n", s));
   3043       1.1      matt 		if (s == mp->start) {
   3044       1.1      matt 			/*
   3045       1.1      matt 			 * If the block starts at the beginning of region,
   3046       1.1      matt 			 * adjust the size & start. (the region may now be
   3047       1.1      matt 			 * zero in length)
   3048       1.1      matt 			 */
   3049       1.1      matt 			DPRINTFN(BOOT,
   3050       1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3051      1.54   mlelstv 			     "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
   3052       1.1      matt 			mp->start += size;
   3053       1.1      matt 			mp->size -= size;
   3054       1.1      matt 			DPRINTFN(BOOT,
   3055       1.1      matt 			    ("pmap_boot_find_memory: a-avail[%d] start "
   3056      1.54   mlelstv 			     "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
   3057       1.1      matt 		} else if (e == mp->start + mp->size) {
   3058       1.1      matt 			/*
   3059       1.1      matt 			 * If the block starts at the beginning of region,
   3060       1.1      matt 			 * adjust only the size.
   3061       1.1      matt 			 */
   3062       1.1      matt 			DPRINTFN(BOOT,
   3063       1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3064      1.54   mlelstv 			     "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
   3065       1.1      matt 			mp->size -= size;
   3066       1.1      matt 			DPRINTFN(BOOT,
   3067       1.1      matt 			    ("pmap_boot_find_memory: a-avail[%d] start "
   3068      1.54   mlelstv 			     "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
   3069       1.1      matt 		} else {
   3070       1.1      matt 			/*
   3071       1.1      matt 			 * Block is in the middle of the region, so we
   3072       1.1      matt 			 * have to split it in two.
   3073       1.1      matt 			 */
   3074       1.1      matt 			for (j = avail_cnt; j > i + 1; j--) {
   3075       1.1      matt 				avail[j] = avail[j-1];
   3076       1.1      matt 			}
   3077       1.1      matt 			DPRINTFN(BOOT,
   3078       1.1      matt 			    ("pmap_boot_find_memory: b-avail[%d] start "
   3079      1.54   mlelstv 			     "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size));
   3080       1.1      matt 			mp[1].start = e;
   3081       1.1      matt 			mp[1].size = mp[0].start + mp[0].size - e;
   3082       1.1      matt 			mp[0].size = s - mp[0].start;
   3083       1.1      matt 			avail_cnt++;
   3084       1.1      matt 			for (; i < avail_cnt; i++) {
   3085       1.1      matt 				DPRINTFN(BOOT,
   3086       1.1      matt 				    ("pmap_boot_find_memory: a-avail[%d] "
   3087      1.54   mlelstv 				     "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
   3088       1.1      matt 				     avail[i].start, avail[i].size));
   3089       1.1      matt 			}
   3090       1.1      matt 		}
   3091      1.53   garbled 		KASSERT(s == (uintptr_t) s);
   3092      1.53   garbled 		return s;
   3093       1.1      matt 	}
   3094       1.1      matt 	panic("pmap_boot_find_memory: not enough memory for "
   3095      1.54   mlelstv 	    "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
   3096       1.1      matt }
   3097       1.1      matt 
   3098      1.38   sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
   3099      1.53   garbled #if defined (PMAP_OEA64_BRIDGE)
   3100      1.38   sanjayl int
   3101      1.38   sanjayl pmap_setup_segment0_map(int use_large_pages, ...)
   3102      1.38   sanjayl {
   3103      1.38   sanjayl     vaddr_t va;
   3104      1.38   sanjayl 
   3105      1.38   sanjayl     register_t pte_lo = 0x0;
   3106      1.38   sanjayl     int ptegidx = 0, i = 0;
   3107      1.38   sanjayl     struct pte pte;
   3108      1.38   sanjayl     va_list ap;
   3109      1.38   sanjayl 
   3110      1.38   sanjayl     /* Coherent + Supervisor RW, no user access */
   3111      1.38   sanjayl     pte_lo = PTE_M;
   3112      1.38   sanjayl 
   3113      1.38   sanjayl     /* XXXSL
   3114      1.38   sanjayl      * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
   3115      1.38   sanjayl      * these have to take priority.
   3116      1.38   sanjayl      */
   3117      1.38   sanjayl     for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
   3118      1.38   sanjayl         ptegidx = va_to_pteg(pmap_kernel(), va);
   3119      1.38   sanjayl         pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
   3120      1.38   sanjayl         i = pmap_pte_insert(ptegidx, &pte);
   3121      1.38   sanjayl     }
   3122      1.38   sanjayl 
   3123      1.38   sanjayl     va_start(ap, use_large_pages);
   3124      1.38   sanjayl     while (1) {
   3125      1.38   sanjayl         paddr_t pa;
   3126      1.38   sanjayl         size_t size;
   3127      1.38   sanjayl 
   3128      1.38   sanjayl         va = va_arg(ap, vaddr_t);
   3129      1.38   sanjayl 
   3130      1.38   sanjayl         if (va == 0)
   3131      1.38   sanjayl             break;
   3132      1.38   sanjayl 
   3133      1.38   sanjayl         pa = va_arg(ap, paddr_t);
   3134      1.38   sanjayl         size = va_arg(ap, size_t);
   3135      1.38   sanjayl 
   3136      1.38   sanjayl         for (; va < (va + size); va += 0x1000, pa += 0x1000) {
   3137      1.38   sanjayl #if 0
   3138      1.54   mlelstv 	    printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__,  va, pa);
   3139      1.38   sanjayl #endif
   3140      1.38   sanjayl             ptegidx = va_to_pteg(pmap_kernel(), va);
   3141      1.38   sanjayl             pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
   3142      1.38   sanjayl             i = pmap_pte_insert(ptegidx, &pte);
   3143      1.38   sanjayl         }
   3144      1.38   sanjayl     }
   3145      1.38   sanjayl 
   3146      1.38   sanjayl     TLBSYNC();
   3147      1.38   sanjayl     SYNC();
   3148      1.38   sanjayl     return (0);
   3149      1.38   sanjayl }
   3150      1.53   garbled #endif /* PMAP_OEA64_BRIDGE */
   3151      1.38   sanjayl 
   3152       1.1      matt /*
   3153       1.1      matt  * This is not part of the defined PMAP interface and is specific to the
   3154       1.1      matt  * PowerPC architecture.  This is called during initppc, before the system
   3155       1.1      matt  * is really initialized.
   3156       1.1      matt  */
   3157       1.1      matt void
   3158       1.1      matt pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
   3159       1.1      matt {
   3160       1.1      matt 	struct mem_region *mp, tmp;
   3161       1.1      matt 	paddr_t s, e;
   3162       1.1      matt 	psize_t size;
   3163       1.1      matt 	int i, j;
   3164       1.1      matt 
   3165       1.1      matt 	/*
   3166       1.1      matt 	 * Get memory.
   3167       1.1      matt 	 */
   3168       1.1      matt 	mem_regions(&mem, &avail);
   3169       1.1      matt #if defined(DEBUG)
   3170       1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3171       1.1      matt 		printf("pmap_bootstrap: memory configuration:\n");
   3172       1.1      matt 		for (mp = mem; mp->size; mp++) {
   3173      1.54   mlelstv 			printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3174       1.1      matt 				mp->start, mp->size);
   3175       1.1      matt 		}
   3176       1.1      matt 		for (mp = avail; mp->size; mp++) {
   3177      1.54   mlelstv 			printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3178       1.1      matt 				mp->start, mp->size);
   3179       1.1      matt 		}
   3180       1.1      matt 	}
   3181       1.1      matt #endif
   3182       1.1      matt 
   3183       1.1      matt 	/*
   3184       1.1      matt 	 * Find out how much physical memory we have and in how many chunks.
   3185       1.1      matt 	 */
   3186       1.1      matt 	for (mem_cnt = 0, mp = mem; mp->size; mp++) {
   3187       1.1      matt 		if (mp->start >= pmap_memlimit)
   3188       1.1      matt 			continue;
   3189       1.1      matt 		if (mp->start + mp->size > pmap_memlimit) {
   3190       1.1      matt 			size = pmap_memlimit - mp->start;
   3191       1.1      matt 			physmem += btoc(size);
   3192       1.1      matt 		} else {
   3193       1.1      matt 			physmem += btoc(mp->size);
   3194       1.1      matt 		}
   3195       1.1      matt 		mem_cnt++;
   3196       1.1      matt 	}
   3197       1.1      matt 
   3198       1.1      matt 	/*
   3199       1.1      matt 	 * Count the number of available entries.
   3200       1.1      matt 	 */
   3201       1.1      matt 	for (avail_cnt = 0, mp = avail; mp->size; mp++)
   3202       1.1      matt 		avail_cnt++;
   3203       1.1      matt 
   3204       1.1      matt 	/*
   3205       1.1      matt 	 * Page align all regions.
   3206       1.1      matt 	 */
   3207       1.1      matt 	kernelstart = trunc_page(kernelstart);
   3208       1.1      matt 	kernelend = round_page(kernelend);
   3209       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3210       1.1      matt 		s = round_page(mp->start);
   3211       1.1      matt 		mp->size -= (s - mp->start);
   3212       1.1      matt 		mp->size = trunc_page(mp->size);
   3213       1.1      matt 		mp->start = s;
   3214       1.1      matt 		e = mp->start + mp->size;
   3215       1.1      matt 
   3216       1.1      matt 		DPRINTFN(BOOT,
   3217      1.54   mlelstv 		    ("pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3218       1.1      matt 		    i, mp->start, mp->size));
   3219       1.1      matt 
   3220       1.1      matt 		/*
   3221       1.1      matt 		 * Don't allow the end to run beyond our artificial limit
   3222       1.1      matt 		 */
   3223       1.1      matt 		if (e > pmap_memlimit)
   3224       1.1      matt 			e = pmap_memlimit;
   3225       1.1      matt 
   3226       1.1      matt 		/*
   3227       1.1      matt 		 * Is this region empty or strange?  skip it.
   3228       1.1      matt 		 */
   3229       1.1      matt 		if (e <= s) {
   3230       1.1      matt 			mp->start = 0;
   3231       1.1      matt 			mp->size = 0;
   3232       1.1      matt 			continue;
   3233       1.1      matt 		}
   3234       1.1      matt 
   3235       1.1      matt 		/*
   3236       1.1      matt 		 * Does this overlap the beginning of kernel?
   3237       1.1      matt 		 *   Does extend past the end of the kernel?
   3238       1.1      matt 		 */
   3239       1.1      matt 		else if (s < kernelstart && e > kernelstart) {
   3240       1.1      matt 			if (e > kernelend) {
   3241       1.1      matt 				avail[avail_cnt].start = kernelend;
   3242       1.1      matt 				avail[avail_cnt].size = e - kernelend;
   3243       1.1      matt 				avail_cnt++;
   3244       1.1      matt 			}
   3245       1.1      matt 			mp->size = kernelstart - s;
   3246       1.1      matt 		}
   3247       1.1      matt 		/*
   3248       1.1      matt 		 * Check whether this region overlaps the end of the kernel.
   3249       1.1      matt 		 */
   3250       1.1      matt 		else if (s < kernelend && e > kernelend) {
   3251       1.1      matt 			mp->start = kernelend;
   3252       1.1      matt 			mp->size = e - kernelend;
   3253       1.1      matt 		}
   3254       1.1      matt 		/*
   3255       1.1      matt 		 * Look whether this regions is completely inside the kernel.
   3256       1.1      matt 		 * Nuke it if it does.
   3257       1.1      matt 		 */
   3258       1.1      matt 		else if (s >= kernelstart && e <= kernelend) {
   3259       1.1      matt 			mp->start = 0;
   3260       1.1      matt 			mp->size = 0;
   3261       1.1      matt 		}
   3262       1.1      matt 		/*
   3263       1.1      matt 		 * If the user imposed a memory limit, enforce it.
   3264       1.1      matt 		 */
   3265       1.1      matt 		else if (s >= pmap_memlimit) {
   3266       1.6   thorpej 			mp->start = -PAGE_SIZE;	/* let's know why */
   3267       1.1      matt 			mp->size = 0;
   3268       1.1      matt 		}
   3269       1.1      matt 		else {
   3270       1.1      matt 			mp->start = s;
   3271       1.1      matt 			mp->size = e - s;
   3272       1.1      matt 		}
   3273       1.1      matt 		DPRINTFN(BOOT,
   3274      1.54   mlelstv 		    ("pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3275       1.1      matt 		    i, mp->start, mp->size));
   3276       1.1      matt 	}
   3277       1.1      matt 
   3278       1.1      matt 	/*
   3279       1.1      matt 	 * Move (and uncount) all the null return to the end.
   3280       1.1      matt 	 */
   3281       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3282       1.1      matt 		if (mp->size == 0) {
   3283       1.1      matt 			tmp = avail[i];
   3284       1.1      matt 			avail[i] = avail[--avail_cnt];
   3285       1.1      matt 			avail[avail_cnt] = avail[i];
   3286       1.1      matt 		}
   3287       1.1      matt 	}
   3288       1.1      matt 
   3289       1.1      matt 	/*
   3290       1.1      matt 	 * (Bubble)sort them into asecnding order.
   3291       1.1      matt 	 */
   3292       1.1      matt 	for (i = 0; i < avail_cnt; i++) {
   3293       1.1      matt 		for (j = i + 1; j < avail_cnt; j++) {
   3294       1.1      matt 			if (avail[i].start > avail[j].start) {
   3295       1.1      matt 				tmp = avail[i];
   3296       1.1      matt 				avail[i] = avail[j];
   3297       1.1      matt 				avail[j] = tmp;
   3298       1.1      matt 			}
   3299       1.1      matt 		}
   3300       1.1      matt 	}
   3301       1.1      matt 
   3302       1.1      matt 	/*
   3303       1.1      matt 	 * Make sure they don't overlap.
   3304       1.1      matt 	 */
   3305       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
   3306       1.1      matt 		if (mp[0].start + mp[0].size > mp[1].start) {
   3307       1.1      matt 			mp[0].size = mp[1].start - mp[0].start;
   3308       1.1      matt 		}
   3309       1.1      matt 		DPRINTFN(BOOT,
   3310      1.54   mlelstv 		    ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3311       1.1      matt 		    i, mp->start, mp->size));
   3312       1.1      matt 	}
   3313       1.1      matt 	DPRINTFN(BOOT,
   3314      1.54   mlelstv 	    ("pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3315       1.1      matt 	    i, mp->start, mp->size));
   3316       1.1      matt 
   3317       1.1      matt #ifdef	PTEGCOUNT
   3318       1.1      matt 	pmap_pteg_cnt = PTEGCOUNT;
   3319       1.1      matt #else /* PTEGCOUNT */
   3320      1.38   sanjayl 
   3321       1.1      matt 	pmap_pteg_cnt = 0x1000;
   3322       1.1      matt 
   3323       1.1      matt 	while (pmap_pteg_cnt < physmem)
   3324       1.1      matt 		pmap_pteg_cnt <<= 1;
   3325       1.1      matt 
   3326       1.1      matt 	pmap_pteg_cnt >>= 1;
   3327       1.1      matt #endif /* PTEGCOUNT */
   3328       1.1      matt 
   3329      1.38   sanjayl #ifdef DEBUG
   3330      1.38   sanjayl 	DPRINTFN(BOOT,
   3331      1.38   sanjayl 		("pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt));
   3332      1.38   sanjayl #endif
   3333      1.38   sanjayl 
   3334       1.1      matt 	/*
   3335       1.1      matt 	 * Find suitably aligned memory for PTEG hash table.
   3336       1.1      matt 	 */
   3337       1.2      matt 	size = pmap_pteg_cnt * sizeof(struct pteg);
   3338      1.53   garbled 	pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
   3339      1.38   sanjayl 
   3340      1.38   sanjayl #ifdef DEBUG
   3341      1.38   sanjayl 	DPRINTFN(BOOT,
   3342      1.38   sanjayl 		("PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table));
   3343      1.38   sanjayl #endif
   3344      1.38   sanjayl 
   3345      1.38   sanjayl 
   3346       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3347       1.1      matt 	if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
   3348      1.54   mlelstv 		panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
   3349       1.1      matt 		    pmap_pteg_table, size);
   3350       1.1      matt #endif
   3351       1.1      matt 
   3352      1.32        he 	memset(__UNVOLATILE(pmap_pteg_table), 0,
   3353      1.32        he 		pmap_pteg_cnt * sizeof(struct pteg));
   3354       1.1      matt 	pmap_pteg_mask = pmap_pteg_cnt - 1;
   3355       1.1      matt 
   3356       1.1      matt 	/*
   3357       1.1      matt 	 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
   3358       1.1      matt 	 * with pages.  So we just steal them before giving them to UVM.
   3359       1.1      matt 	 */
   3360       1.1      matt 	size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
   3361      1.53   garbled 	pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
   3362       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3363       1.1      matt 	if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
   3364      1.54   mlelstv 		panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
   3365       1.1      matt 		    pmap_pvo_table, size);
   3366       1.1      matt #endif
   3367       1.1      matt 
   3368       1.1      matt 	for (i = 0; i < pmap_pteg_cnt; i++)
   3369       1.1      matt 		TAILQ_INIT(&pmap_pvo_table[i]);
   3370       1.1      matt 
   3371       1.1      matt #ifndef MSGBUFADDR
   3372       1.1      matt 	/*
   3373       1.1      matt 	 * Allocate msgbuf in high memory.
   3374       1.1      matt 	 */
   3375      1.53   garbled 	msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
   3376       1.1      matt #endif
   3377       1.1      matt 
   3378       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
   3379       1.1      matt 		paddr_t pfstart = atop(mp->start);
   3380       1.1      matt 		paddr_t pfend = atop(mp->start + mp->size);
   3381       1.1      matt 		if (mp->size == 0)
   3382       1.1      matt 			continue;
   3383       1.1      matt 		if (mp->start + mp->size <= SEGMENT_LENGTH) {
   3384       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3385       1.1      matt 				VM_FREELIST_FIRST256);
   3386       1.1      matt 		} else if (mp->start >= SEGMENT_LENGTH) {
   3387       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3388       1.1      matt 				VM_FREELIST_DEFAULT);
   3389       1.1      matt 		} else {
   3390       1.1      matt 			pfend = atop(SEGMENT_LENGTH);
   3391       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3392       1.1      matt 				VM_FREELIST_FIRST256);
   3393       1.1      matt 			pfstart = atop(SEGMENT_LENGTH);
   3394       1.1      matt 			pfend = atop(mp->start + mp->size);
   3395       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3396       1.1      matt 				VM_FREELIST_DEFAULT);
   3397       1.1      matt 		}
   3398       1.1      matt 	}
   3399       1.1      matt 
   3400       1.1      matt 	/*
   3401       1.1      matt 	 * Make sure kernel vsid is allocated as well as VSID 0.
   3402       1.1      matt 	 */
   3403       1.1      matt 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   3404       1.1      matt 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
   3405      1.53   garbled 	pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   3406      1.53   garbled 		|= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
   3407       1.1      matt 	pmap_vsid_bitmap[0] |= 1;
   3408       1.1      matt 
   3409       1.1      matt 	/*
   3410       1.1      matt 	 * Initialize kernel pmap and hardware.
   3411       1.1      matt 	 */
   3412      1.38   sanjayl 
   3413      1.53   garbled /* PMAP_OEA64_BRIDGE does support these instructions */
   3414      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   3415       1.1      matt 	for (i = 0; i < 16; i++) {
   3416      1.38   sanjayl  		pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
   3417      1.35     perry 		__asm volatile ("mtsrin %0,%1"
   3418      1.38   sanjayl  			      :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
   3419       1.1      matt 	}
   3420       1.1      matt 
   3421       1.1      matt 	pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
   3422      1.35     perry 	__asm volatile ("mtsr %0,%1"
   3423       1.1      matt 		      :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
   3424       1.1      matt #ifdef KERNEL2_SR
   3425       1.1      matt 	pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
   3426      1.35     perry 	__asm volatile ("mtsr %0,%1"
   3427       1.1      matt 		      :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
   3428       1.1      matt #endif
   3429      1.53   garbled #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
   3430      1.53   garbled #if defined (PMAP_OEA)
   3431       1.1      matt 	for (i = 0; i < 16; i++) {
   3432       1.1      matt 		if (iosrtable[i] & SR601_T) {
   3433       1.1      matt 			pmap_kernel()->pm_sr[i] = iosrtable[i];
   3434      1.35     perry 			__asm volatile ("mtsrin %0,%1"
   3435       1.1      matt 			    :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
   3436       1.1      matt 		}
   3437       1.1      matt 	}
   3438      1.35     perry 	__asm volatile ("sync; mtsdr1 %0; isync"
   3439       1.2      matt 		      :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
   3440      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
   3441      1.38   sanjayl  	__asm __volatile ("sync; mtsdr1 %0; isync"
   3442      1.38   sanjayl  		      :: "r"((uintptr_t)pmap_pteg_table | (32 - cntlzw(pmap_pteg_mask >> 11))));
   3443      1.38   sanjayl #endif
   3444       1.1      matt 	tlbia();
   3445       1.1      matt 
   3446       1.1      matt #ifdef ALTIVEC
   3447       1.1      matt 	pmap_use_altivec = cpu_altivec;
   3448       1.1      matt #endif
   3449       1.1      matt 
   3450       1.1      matt #ifdef DEBUG
   3451       1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3452       1.1      matt 		u_int cnt;
   3453       1.1      matt 		int bank;
   3454       1.1      matt 		char pbuf[9];
   3455       1.1      matt 		for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
   3456       1.1      matt 			cnt += vm_physmem[bank].avail_end - vm_physmem[bank].avail_start;
   3457      1.53   garbled 			printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
   3458       1.1      matt 			    bank,
   3459       1.1      matt 			    ptoa(vm_physmem[bank].avail_start),
   3460       1.1      matt 			    ptoa(vm_physmem[bank].avail_end),
   3461       1.1      matt 			    ptoa(vm_physmem[bank].avail_end - vm_physmem[bank].avail_start));
   3462       1.1      matt 		}
   3463       1.1      matt 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
   3464       1.1      matt 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
   3465       1.1      matt 		    pbuf, cnt);
   3466       1.1      matt 	}
   3467       1.1      matt #endif
   3468       1.1      matt 
   3469       1.1      matt 	pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
   3470       1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
   3471      1.48        ad 	    &pmap_pool_uallocator, IPL_NONE);
   3472       1.1      matt 
   3473       1.1      matt 	pool_setlowat(&pmap_upvo_pool, 252);
   3474       1.1      matt 
   3475       1.1      matt 	pool_init(&pmap_pool, sizeof(struct pmap),
   3476      1.48        ad 	    sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
   3477      1.48        ad 	    IPL_NONE);
   3478      1.41      matt 
   3479      1.53   garbled #if defined(PMAP_NEED_MAPKERNEL) || 1
   3480      1.41      matt 	{
   3481      1.53   garbled 		struct pmap *pm = pmap_kernel();
   3482      1.53   garbled #if 0
   3483      1.41      matt 		extern int etext[], kernel_text[];
   3484      1.41      matt 		vaddr_t va, va_etext = (paddr_t) etext;
   3485      1.53   garbled #endif
   3486      1.53   garbled 		paddr_t pa, pa_end;
   3487      1.42      matt 		register_t sr;
   3488      1.53   garbled 		struct pte pt;
   3489      1.53   garbled 		unsigned int ptegidx;
   3490      1.53   garbled 		int bank;
   3491      1.42      matt 
   3492      1.53   garbled 		sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
   3493      1.53   garbled 		pm->pm_sr[0] = sr;
   3494      1.41      matt 
   3495      1.53   garbled 		for (bank = 0; bank < vm_nphysseg; bank++) {
   3496      1.53   garbled 			pa_end = ptoa(vm_physmem[bank].avail_end);
   3497      1.53   garbled 			pa = ptoa(vm_physmem[bank].avail_start);
   3498      1.53   garbled 			for (; pa < pa_end; pa += PAGE_SIZE) {
   3499      1.53   garbled 				ptegidx = va_to_pteg(pm, pa);
   3500      1.53   garbled 				pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
   3501      1.53   garbled 				pmap_pte_insert(ptegidx, &pt);
   3502      1.53   garbled 			}
   3503      1.53   garbled 		}
   3504      1.53   garbled 
   3505      1.53   garbled #if 0
   3506      1.41      matt 		va = (vaddr_t) kernel_text;
   3507      1.41      matt 
   3508      1.41      matt 		for (pa = kernelstart; va < va_etext;
   3509      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3510      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3511      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
   3512      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3513      1.53   garbled 		}
   3514      1.41      matt 
   3515      1.41      matt 		for (; pa < kernelend;
   3516      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3517      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3518      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
   3519      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3520      1.53   garbled 		}
   3521      1.53   garbled 
   3522      1.53   garbled 		for (va = 0, pa = 0; va < 0x3000;
   3523      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3524      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3525      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
   3526      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3527      1.53   garbled 		}
   3528      1.53   garbled #endif
   3529      1.42      matt 
   3530      1.42      matt 		__asm volatile ("mtsrin %0,%1"
   3531      1.42      matt  			      :: "r"(sr), "r"(kernelstart));
   3532      1.41      matt 	}
   3533      1.41      matt #endif
   3534       1.1      matt }
   3535