Home | History | Annotate | Line # | Download | only in oea
pmap.c revision 1.86.2.3
      1  1.86.2.2       tls /*	$NetBSD: pmap.c,v 1.86.2.3 2014/08/20 00:03:20 tls Exp $	*/
      2       1.1      matt /*-
      3       1.1      matt  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      4       1.1      matt  * All rights reserved.
      5       1.1      matt  *
      6       1.1      matt  * This code is derived from software contributed to The NetBSD Foundation
      7       1.1      matt  * by Matt Thomas <matt (at) 3am-software.com> of Allegro Networks, Inc.
      8       1.1      matt  *
      9      1.38   sanjayl  * Support for PPC64 Bridge mode added by Sanjay Lal <sanjayl (at) kymasys.com>
     10      1.38   sanjayl  * of Kyma Systems LLC.
     11      1.38   sanjayl  *
     12       1.1      matt  * Redistribution and use in source and binary forms, with or without
     13       1.1      matt  * modification, are permitted provided that the following conditions
     14       1.1      matt  * are met:
     15       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     16       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     17       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     18       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     19       1.1      matt  *    documentation and/or other materials provided with the distribution.
     20       1.1      matt  *
     21       1.1      matt  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     22       1.1      matt  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     23       1.1      matt  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     24       1.1      matt  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     25       1.1      matt  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     26       1.1      matt  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     27       1.1      matt  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     28       1.1      matt  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     29       1.1      matt  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30       1.1      matt  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     31       1.1      matt  * POSSIBILITY OF SUCH DAMAGE.
     32       1.1      matt  */
     33       1.1      matt 
     34       1.1      matt /*
     35       1.1      matt  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
     36       1.1      matt  * Copyright (C) 1995, 1996 TooLs GmbH.
     37       1.1      matt  * All rights reserved.
     38       1.1      matt  *
     39       1.1      matt  * Redistribution and use in source and binary forms, with or without
     40       1.1      matt  * modification, are permitted provided that the following conditions
     41       1.1      matt  * are met:
     42       1.1      matt  * 1. Redistributions of source code must retain the above copyright
     43       1.1      matt  *    notice, this list of conditions and the following disclaimer.
     44       1.1      matt  * 2. Redistributions in binary form must reproduce the above copyright
     45       1.1      matt  *    notice, this list of conditions and the following disclaimer in the
     46       1.1      matt  *    documentation and/or other materials provided with the distribution.
     47       1.1      matt  * 3. All advertising materials mentioning features or use of this software
     48       1.1      matt  *    must display the following acknowledgement:
     49       1.1      matt  *	This product includes software developed by TooLs GmbH.
     50       1.1      matt  * 4. The name of TooLs GmbH may not be used to endorse or promote products
     51       1.1      matt  *    derived from this software without specific prior written permission.
     52       1.1      matt  *
     53       1.1      matt  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
     54       1.1      matt  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     55       1.1      matt  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     56       1.1      matt  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     57       1.1      matt  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     58       1.1      matt  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
     59       1.1      matt  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
     60       1.1      matt  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
     61       1.1      matt  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
     62       1.1      matt  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     63       1.1      matt  */
     64      1.11     lukem 
     65      1.11     lukem #include <sys/cdefs.h>
     66  1.86.2.2       tls __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.86.2.3 2014/08/20 00:03:20 tls Exp $");
     67      1.53   garbled 
     68      1.53   garbled #define	PMAP_NOOPNAMES
     69       1.1      matt 
     70      1.18      matt #include "opt_ppcarch.h"
     71       1.1      matt #include "opt_altivec.h"
     72      1.57      matt #include "opt_multiprocessor.h"
     73       1.1      matt #include "opt_pmap.h"
     74      1.57      matt 
     75       1.1      matt #include <sys/param.h>
     76       1.1      matt #include <sys/proc.h>
     77       1.1      matt #include <sys/pool.h>
     78       1.1      matt #include <sys/queue.h>
     79       1.1      matt #include <sys/device.h>		/* for evcnt */
     80       1.1      matt #include <sys/systm.h>
     81      1.50        ad #include <sys/atomic.h>
     82       1.1      matt 
     83       1.1      matt #include <uvm/uvm.h>
     84       1.1      matt 
     85       1.1      matt #include <machine/powerpc.h>
     86      1.80      matt #include <powerpc/bat.h>
     87      1.80      matt #include <powerpc/pcb.h>
     88      1.80      matt #include <powerpc/psl.h>
     89       1.1      matt #include <powerpc/spr.h>
     90      1.71      matt #include <powerpc/oea/spr.h>
     91      1.71      matt #include <powerpc/oea/sr_601.h>
     92       1.1      matt 
     93       1.1      matt #ifdef ALTIVEC
     94      1.86      matt extern int pmap_use_altivec;
     95       1.1      matt #endif
     96       1.1      matt 
     97      1.21   aymeric #ifdef PMAP_MEMLIMIT
     98      1.53   garbled static paddr_t pmap_memlimit = PMAP_MEMLIMIT;
     99      1.21   aymeric #else
    100      1.53   garbled static paddr_t pmap_memlimit = -PAGE_SIZE;		/* there is no limit */
    101      1.21   aymeric #endif
    102       1.1      matt 
    103      1.86      matt extern struct pmap kernel_pmap_;
    104      1.86      matt static unsigned int pmap_pages_stolen;
    105      1.86      matt static u_long pmap_pte_valid;
    106       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    107      1.86      matt static u_long pmap_pvo_enter_depth;
    108      1.86      matt static u_long pmap_pvo_remove_depth;
    109       1.1      matt #endif
    110       1.1      matt 
    111       1.1      matt #ifndef MSGBUFADDR
    112       1.1      matt extern paddr_t msgbuf_paddr;
    113       1.1      matt #endif
    114       1.1      matt 
    115       1.1      matt static struct mem_region *mem, *avail;
    116       1.1      matt static u_int mem_cnt, avail_cnt;
    117       1.1      matt 
    118      1.53   garbled #if !defined(PMAP_OEA64) && !defined(PMAP_OEA64_BRIDGE)
    119      1.53   garbled # define	PMAP_OEA 1
    120      1.53   garbled #endif
    121      1.53   garbled 
    122      1.53   garbled #if defined(PMAP_OEA)
    123      1.53   garbled #define	_PRIxpte	"lx"
    124      1.53   garbled #else
    125      1.53   garbled #define	_PRIxpte	PRIx64
    126      1.53   garbled #endif
    127      1.53   garbled #define	_PRIxpa		"lx"
    128      1.53   garbled #define	_PRIxva		"lx"
    129      1.54   mlelstv #define	_PRIsr  	"lx"
    130      1.53   garbled 
    131      1.76      matt #ifdef PMAP_NEEDS_FIXUP
    132      1.53   garbled #if defined(PMAP_OEA)
    133      1.53   garbled #define	PMAPNAME(name)	pmap32_##name
    134      1.53   garbled #elif defined(PMAP_OEA64)
    135      1.53   garbled #define	PMAPNAME(name)	pmap64_##name
    136      1.53   garbled #elif defined(PMAP_OEA64_BRIDGE)
    137      1.53   garbled #define	PMAPNAME(name)	pmap64bridge_##name
    138      1.53   garbled #else
    139      1.53   garbled #error unknown variant for pmap
    140      1.53   garbled #endif
    141      1.76      matt #endif /* PMAP_NEEDS_FIXUP */
    142      1.53   garbled 
    143      1.76      matt #ifdef PMAPNAME
    144      1.53   garbled #define	STATIC			static
    145      1.53   garbled #define pmap_pte_spill		PMAPNAME(pte_spill)
    146      1.53   garbled #define pmap_real_memory	PMAPNAME(real_memory)
    147      1.53   garbled #define pmap_init		PMAPNAME(init)
    148      1.53   garbled #define pmap_virtual_space	PMAPNAME(virtual_space)
    149      1.53   garbled #define pmap_create		PMAPNAME(create)
    150      1.53   garbled #define pmap_reference		PMAPNAME(reference)
    151      1.53   garbled #define pmap_destroy		PMAPNAME(destroy)
    152      1.53   garbled #define pmap_copy		PMAPNAME(copy)
    153      1.53   garbled #define pmap_update		PMAPNAME(update)
    154      1.53   garbled #define pmap_enter		PMAPNAME(enter)
    155      1.53   garbled #define pmap_remove		PMAPNAME(remove)
    156      1.53   garbled #define pmap_kenter_pa		PMAPNAME(kenter_pa)
    157      1.53   garbled #define pmap_kremove		PMAPNAME(kremove)
    158      1.53   garbled #define pmap_extract		PMAPNAME(extract)
    159      1.53   garbled #define pmap_protect		PMAPNAME(protect)
    160      1.53   garbled #define pmap_unwire		PMAPNAME(unwire)
    161      1.53   garbled #define pmap_page_protect	PMAPNAME(page_protect)
    162      1.53   garbled #define pmap_query_bit		PMAPNAME(query_bit)
    163      1.53   garbled #define pmap_clear_bit		PMAPNAME(clear_bit)
    164      1.53   garbled 
    165      1.53   garbled #define pmap_activate		PMAPNAME(activate)
    166      1.53   garbled #define pmap_deactivate		PMAPNAME(deactivate)
    167      1.53   garbled 
    168      1.53   garbled #define pmap_pinit		PMAPNAME(pinit)
    169      1.53   garbled #define pmap_procwr		PMAPNAME(procwr)
    170      1.53   garbled 
    171      1.86      matt #define pmap_pool		PMAPNAME(pool)
    172      1.86      matt #define pmap_upvo_pool		PMAPNAME(upvo_pool)
    173      1.86      matt #define pmap_mpvo_pool		PMAPNAME(mpvo_pool)
    174      1.86      matt #define pmap_pvo_table		PMAPNAME(pvo_table)
    175      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    176      1.53   garbled #define pmap_pte_print		PMAPNAME(pte_print)
    177      1.53   garbled #define pmap_pteg_check		PMAPNAME(pteg_check)
    178      1.53   garbled #define pmap_print_mmruregs	PMAPNAME(print_mmuregs)
    179      1.53   garbled #define pmap_print_pte		PMAPNAME(print_pte)
    180      1.53   garbled #define pmap_pteg_dist		PMAPNAME(pteg_dist)
    181      1.53   garbled #endif
    182      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    183      1.53   garbled #define	pmap_pvo_verify		PMAPNAME(pvo_verify)
    184      1.56       phx #define pmapcheck		PMAPNAME(check)
    185      1.56       phx #endif
    186      1.56       phx #if defined(DEBUG) || defined(PMAPDEBUG)
    187      1.56       phx #define pmapdebug		PMAPNAME(debug)
    188      1.53   garbled #endif
    189      1.53   garbled #define pmap_steal_memory	PMAPNAME(steal_memory)
    190      1.53   garbled #define pmap_bootstrap		PMAPNAME(bootstrap)
    191      1.53   garbled #else
    192      1.53   garbled #define	STATIC			/* nothing */
    193      1.53   garbled #endif /* PMAPNAME */
    194      1.53   garbled 
    195      1.53   garbled STATIC int pmap_pte_spill(struct pmap *, vaddr_t, bool);
    196      1.53   garbled STATIC void pmap_real_memory(paddr_t *, psize_t *);
    197      1.53   garbled STATIC void pmap_init(void);
    198      1.53   garbled STATIC void pmap_virtual_space(vaddr_t *, vaddr_t *);
    199      1.53   garbled STATIC pmap_t pmap_create(void);
    200      1.53   garbled STATIC void pmap_reference(pmap_t);
    201      1.53   garbled STATIC void pmap_destroy(pmap_t);
    202      1.53   garbled STATIC void pmap_copy(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
    203      1.53   garbled STATIC void pmap_update(pmap_t);
    204      1.65    cegger STATIC int pmap_enter(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
    205      1.53   garbled STATIC void pmap_remove(pmap_t, vaddr_t, vaddr_t);
    206      1.68    cegger STATIC void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t, u_int);
    207      1.53   garbled STATIC void pmap_kremove(vaddr_t, vsize_t);
    208      1.53   garbled STATIC bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
    209      1.53   garbled 
    210      1.53   garbled STATIC void pmap_protect(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
    211      1.53   garbled STATIC void pmap_unwire(pmap_t, vaddr_t);
    212      1.53   garbled STATIC void pmap_page_protect(struct vm_page *, vm_prot_t);
    213      1.53   garbled STATIC bool pmap_query_bit(struct vm_page *, int);
    214      1.53   garbled STATIC bool pmap_clear_bit(struct vm_page *, int);
    215      1.53   garbled 
    216      1.53   garbled STATIC void pmap_activate(struct lwp *);
    217      1.53   garbled STATIC void pmap_deactivate(struct lwp *);
    218      1.53   garbled 
    219      1.53   garbled STATIC void pmap_pinit(pmap_t pm);
    220      1.53   garbled STATIC void pmap_procwr(struct proc *, vaddr_t, size_t);
    221      1.53   garbled 
    222      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    223      1.53   garbled STATIC void pmap_pte_print(volatile struct pte *);
    224      1.53   garbled STATIC void pmap_pteg_check(void);
    225      1.53   garbled STATIC void pmap_print_mmuregs(void);
    226      1.53   garbled STATIC void pmap_print_pte(pmap_t, vaddr_t);
    227      1.53   garbled STATIC void pmap_pteg_dist(void);
    228      1.53   garbled #endif
    229      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    230      1.53   garbled STATIC void pmap_pvo_verify(void);
    231      1.53   garbled #endif
    232      1.53   garbled STATIC vaddr_t pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
    233      1.53   garbled STATIC void pmap_bootstrap(paddr_t, paddr_t);
    234      1.53   garbled 
    235      1.53   garbled #ifdef PMAPNAME
    236      1.53   garbled const struct pmap_ops PMAPNAME(ops) = {
    237      1.53   garbled 	.pmapop_pte_spill = pmap_pte_spill,
    238      1.53   garbled 	.pmapop_real_memory = pmap_real_memory,
    239      1.53   garbled 	.pmapop_init = pmap_init,
    240      1.53   garbled 	.pmapop_virtual_space = pmap_virtual_space,
    241      1.53   garbled 	.pmapop_create = pmap_create,
    242      1.53   garbled 	.pmapop_reference = pmap_reference,
    243      1.53   garbled 	.pmapop_destroy = pmap_destroy,
    244      1.53   garbled 	.pmapop_copy = pmap_copy,
    245      1.53   garbled 	.pmapop_update = pmap_update,
    246      1.53   garbled 	.pmapop_enter = pmap_enter,
    247      1.53   garbled 	.pmapop_remove = pmap_remove,
    248      1.53   garbled 	.pmapop_kenter_pa = pmap_kenter_pa,
    249      1.53   garbled 	.pmapop_kremove = pmap_kremove,
    250      1.53   garbled 	.pmapop_extract = pmap_extract,
    251      1.53   garbled 	.pmapop_protect = pmap_protect,
    252      1.53   garbled 	.pmapop_unwire = pmap_unwire,
    253      1.53   garbled 	.pmapop_page_protect = pmap_page_protect,
    254      1.53   garbled 	.pmapop_query_bit = pmap_query_bit,
    255      1.53   garbled 	.pmapop_clear_bit = pmap_clear_bit,
    256      1.53   garbled 	.pmapop_activate = pmap_activate,
    257      1.53   garbled 	.pmapop_deactivate = pmap_deactivate,
    258      1.53   garbled 	.pmapop_pinit = pmap_pinit,
    259      1.53   garbled 	.pmapop_procwr = pmap_procwr,
    260      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    261      1.53   garbled 	.pmapop_pte_print = pmap_pte_print,
    262      1.53   garbled 	.pmapop_pteg_check = pmap_pteg_check,
    263      1.53   garbled 	.pmapop_print_mmuregs = pmap_print_mmuregs,
    264      1.53   garbled 	.pmapop_print_pte = pmap_print_pte,
    265      1.53   garbled 	.pmapop_pteg_dist = pmap_pteg_dist,
    266      1.53   garbled #else
    267      1.53   garbled 	.pmapop_pte_print = NULL,
    268      1.53   garbled 	.pmapop_pteg_check = NULL,
    269      1.53   garbled 	.pmapop_print_mmuregs = NULL,
    270      1.53   garbled 	.pmapop_print_pte = NULL,
    271      1.53   garbled 	.pmapop_pteg_dist = NULL,
    272      1.53   garbled #endif
    273      1.53   garbled #if defined(DEBUG) || defined(PMAPCHECK)
    274      1.53   garbled 	.pmapop_pvo_verify = pmap_pvo_verify,
    275      1.53   garbled #else
    276      1.53   garbled 	.pmapop_pvo_verify = NULL,
    277       1.1      matt #endif
    278      1.53   garbled 	.pmapop_steal_memory = pmap_steal_memory,
    279      1.53   garbled 	.pmapop_bootstrap = pmap_bootstrap,
    280      1.53   garbled };
    281      1.53   garbled #endif /* !PMAPNAME */
    282       1.1      matt 
    283       1.1      matt /*
    284      1.38   sanjayl  * The following structure is aligned to 32 bytes
    285       1.1      matt  */
    286       1.1      matt struct pvo_entry {
    287       1.1      matt 	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
    288       1.1      matt 	TAILQ_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
    289       1.1      matt 	struct pte pvo_pte;			/* Prebuilt PTE */
    290       1.1      matt 	pmap_t pvo_pmap;			/* ptr to owning pmap */
    291       1.1      matt 	vaddr_t pvo_vaddr;			/* VA of entry */
    292       1.1      matt #define	PVO_PTEGIDX_MASK	0x0007		/* which PTEG slot */
    293       1.1      matt #define	PVO_PTEGIDX_VALID	0x0008		/* slot is valid */
    294       1.1      matt #define	PVO_WIRED		0x0010		/* PVO entry is wired */
    295       1.1      matt #define	PVO_MANAGED		0x0020		/* PVO e. for managed page */
    296       1.1      matt #define	PVO_EXECUTABLE		0x0040		/* PVO e. for executable page */
    297      1.39      matt #define	PVO_WIRED_P(pvo)	((pvo)->pvo_vaddr & PVO_WIRED)
    298      1.39      matt #define	PVO_MANAGED_P(pvo)	((pvo)->pvo_vaddr & PVO_MANAGED)
    299      1.39      matt #define	PVO_EXECUTABLE_P(pvo)	((pvo)->pvo_vaddr & PVO_EXECUTABLE)
    300      1.12      matt #define	PVO_ENTER_INSERT	0		/* PVO has been removed */
    301      1.12      matt #define	PVO_SPILL_UNSET		1		/* PVO has been evicted */
    302      1.12      matt #define	PVO_SPILL_SET		2		/* PVO has been spilled */
    303      1.12      matt #define	PVO_SPILL_INSERT	3		/* PVO has been inserted */
    304      1.12      matt #define	PVO_PMAP_PAGE_PROTECT	4		/* PVO has changed */
    305      1.12      matt #define	PVO_PMAP_PROTECT	5		/* PVO has changed */
    306      1.12      matt #define	PVO_REMOVE		6		/* PVO has been removed */
    307      1.12      matt #define	PVO_WHERE_MASK		15
    308      1.12      matt #define	PVO_WHERE_SHFT		8
    309      1.38   sanjayl } __attribute__ ((aligned (32)));
    310       1.1      matt #define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
    311       1.1      matt #define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
    312       1.1      matt #define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
    313       1.1      matt #define	PVO_PTEGIDX_CLR(pvo)	\
    314       1.1      matt 	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
    315       1.1      matt #define	PVO_PTEGIDX_SET(pvo,i)	\
    316       1.1      matt 	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
    317      1.12      matt #define	PVO_WHERE(pvo,w)	\
    318      1.12      matt 	((pvo)->pvo_vaddr &= ~(PVO_WHERE_MASK << PVO_WHERE_SHFT), \
    319      1.12      matt 	 (pvo)->pvo_vaddr |= ((PVO_ ## w) << PVO_WHERE_SHFT))
    320       1.1      matt 
    321       1.1      matt TAILQ_HEAD(pvo_tqhead, pvo_entry);
    322       1.1      matt struct pvo_tqhead *pmap_pvo_table;	/* pvo entries by ptegroup index */
    323      1.53   garbled static struct pvo_head pmap_pvo_kunmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged);	/* list of unmanaged pages */
    324      1.53   garbled static struct pvo_head pmap_pvo_unmanaged = LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged);	/* list of unmanaged pages */
    325       1.1      matt 
    326       1.1      matt struct pool pmap_pool;		/* pool for pmap structures */
    327       1.1      matt struct pool pmap_upvo_pool;	/* pool for pvo entries for unmanaged pages */
    328       1.1      matt struct pool pmap_mpvo_pool;	/* pool for pvo entries for managed pages */
    329       1.1      matt 
    330       1.1      matt /*
    331       1.1      matt  * We keep a cache of unmanaged pages to be used for pvo entries for
    332       1.1      matt  * unmanaged pages.
    333       1.1      matt  */
    334       1.1      matt struct pvo_page {
    335       1.1      matt 	SIMPLEQ_ENTRY(pvo_page) pvop_link;
    336       1.1      matt };
    337       1.1      matt SIMPLEQ_HEAD(pvop_head, pvo_page);
    338      1.53   garbled static struct pvop_head pmap_upvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_upvop_head);
    339      1.53   garbled static struct pvop_head pmap_mpvop_head = SIMPLEQ_HEAD_INITIALIZER(pmap_mpvop_head);
    340      1.86      matt static u_long pmap_upvop_free;
    341      1.86      matt static u_long pmap_upvop_maxfree;
    342      1.86      matt static u_long pmap_mpvop_free;
    343      1.86      matt static u_long pmap_mpvop_maxfree;
    344       1.1      matt 
    345      1.53   garbled static void *pmap_pool_ualloc(struct pool *, int);
    346      1.53   garbled static void *pmap_pool_malloc(struct pool *, int);
    347       1.1      matt 
    348      1.53   garbled static void pmap_pool_ufree(struct pool *, void *);
    349      1.53   garbled static void pmap_pool_mfree(struct pool *, void *);
    350       1.1      matt 
    351       1.1      matt static struct pool_allocator pmap_pool_mallocator = {
    352      1.43   garbled 	.pa_alloc = pmap_pool_malloc,
    353      1.43   garbled 	.pa_free = pmap_pool_mfree,
    354      1.43   garbled 	.pa_pagesz = 0,
    355       1.1      matt };
    356       1.1      matt 
    357       1.1      matt static struct pool_allocator pmap_pool_uallocator = {
    358      1.43   garbled 	.pa_alloc = pmap_pool_ualloc,
    359      1.43   garbled 	.pa_free = pmap_pool_ufree,
    360      1.43   garbled 	.pa_pagesz = 0,
    361       1.1      matt };
    362       1.1      matt 
    363       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    364       1.2      matt void pmap_pte_print(volatile struct pte *);
    365       1.1      matt void pmap_pteg_check(void);
    366       1.1      matt void pmap_pteg_dist(void);
    367       1.1      matt void pmap_print_pte(pmap_t, vaddr_t);
    368       1.1      matt void pmap_print_mmuregs(void);
    369       1.1      matt #endif
    370       1.1      matt 
    371       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
    372       1.1      matt #ifdef PMAPCHECK
    373       1.1      matt int pmapcheck = 1;
    374       1.1      matt #else
    375       1.1      matt int pmapcheck = 0;
    376       1.1      matt #endif
    377       1.1      matt void pmap_pvo_verify(void);
    378      1.53   garbled static void pmap_pvo_check(const struct pvo_entry *);
    379       1.1      matt #define	PMAP_PVO_CHECK(pvo)	 		\
    380       1.1      matt 	do {					\
    381       1.1      matt 		if (pmapcheck)			\
    382       1.1      matt 			pmap_pvo_check(pvo);	\
    383       1.1      matt 	} while (0)
    384       1.1      matt #else
    385       1.1      matt #define	PMAP_PVO_CHECK(pvo)	do { } while (/*CONSTCOND*/0)
    386       1.1      matt #endif
    387      1.53   garbled static int pmap_pte_insert(int, struct pte *);
    388      1.53   garbled static int pmap_pvo_enter(pmap_t, struct pool *, struct pvo_head *,
    389       1.2      matt 	vaddr_t, paddr_t, register_t, int);
    390      1.53   garbled static void pmap_pvo_remove(struct pvo_entry *, int, struct pvo_head *);
    391      1.53   garbled static void pmap_pvo_free(struct pvo_entry *);
    392      1.53   garbled static void pmap_pvo_free_list(struct pvo_head *);
    393      1.53   garbled static struct pvo_entry *pmap_pvo_find_va(pmap_t, vaddr_t, int *);
    394      1.53   garbled static volatile struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int);
    395      1.53   garbled static struct pvo_entry *pmap_pvo_reclaim(struct pmap *);
    396      1.53   garbled static void pvo_set_exec(struct pvo_entry *);
    397      1.53   garbled static void pvo_clear_exec(struct pvo_entry *);
    398       1.1      matt 
    399      1.53   garbled static void tlbia(void);
    400       1.1      matt 
    401      1.53   garbled static void pmap_release(pmap_t);
    402      1.53   garbled static paddr_t pmap_boot_find_memory(psize_t, psize_t, int);
    403       1.1      matt 
    404      1.25       chs static uint32_t pmap_pvo_reclaim_nextidx;
    405      1.25       chs #ifdef DEBUG
    406      1.25       chs static int pmap_pvo_reclaim_debugctr;
    407      1.25       chs #endif
    408      1.25       chs 
    409       1.1      matt #define	VSID_NBPW	(sizeof(uint32_t) * 8)
    410       1.1      matt static uint32_t pmap_vsid_bitmap[NPMAPS / VSID_NBPW];
    411       1.1      matt 
    412       1.1      matt static int pmap_initialized;
    413       1.1      matt 
    414       1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
    415       1.1      matt #define	PMAPDEBUG_BOOT		0x0001
    416       1.1      matt #define	PMAPDEBUG_PTE		0x0002
    417       1.1      matt #define	PMAPDEBUG_EXEC		0x0008
    418       1.1      matt #define	PMAPDEBUG_PVOENTER	0x0010
    419       1.1      matt #define	PMAPDEBUG_PVOREMOVE	0x0020
    420       1.1      matt #define	PMAPDEBUG_ACTIVATE	0x0100
    421       1.1      matt #define	PMAPDEBUG_CREATE	0x0200
    422       1.1      matt #define	PMAPDEBUG_ENTER		0x1000
    423       1.1      matt #define	PMAPDEBUG_KENTER	0x2000
    424       1.1      matt #define	PMAPDEBUG_KREMOVE	0x4000
    425       1.1      matt #define	PMAPDEBUG_REMOVE	0x8000
    426      1.38   sanjayl 
    427       1.1      matt unsigned int pmapdebug = 0;
    428      1.38   sanjayl 
    429      1.85      matt # define DPRINTF(x, ...)	printf(x, __VA_ARGS__)
    430      1.85      matt # define DPRINTFN(n, x, ...)	do if (pmapdebug & PMAPDEBUG_ ## n) printf(x, __VA_ARGS__); while (0)
    431       1.1      matt #else
    432      1.85      matt # define DPRINTF(x, ...)	do { } while (0)
    433      1.85      matt # define DPRINTFN(n, x, ...)	do { } while (0)
    434       1.1      matt #endif
    435       1.1      matt 
    436       1.1      matt 
    437       1.1      matt #ifdef PMAPCOUNTERS
    438       1.1      matt /*
    439       1.1      matt  * From pmap_subr.c
    440       1.1      matt  */
    441      1.53   garbled extern struct evcnt pmap_evcnt_mappings;
    442      1.53   garbled extern struct evcnt pmap_evcnt_unmappings;
    443      1.53   garbled 
    444      1.53   garbled extern struct evcnt pmap_evcnt_kernel_mappings;
    445      1.53   garbled extern struct evcnt pmap_evcnt_kernel_unmappings;
    446      1.53   garbled 
    447      1.53   garbled extern struct evcnt pmap_evcnt_mappings_replaced;
    448      1.53   garbled 
    449      1.53   garbled extern struct evcnt pmap_evcnt_exec_mappings;
    450      1.53   garbled extern struct evcnt pmap_evcnt_exec_cached;
    451      1.53   garbled 
    452      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced;
    453      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced_clear_modify;
    454      1.53   garbled extern struct evcnt pmap_evcnt_exec_synced_pvo_remove;
    455      1.53   garbled 
    456      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_page_protect;
    457      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_clear_modify;
    458      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_zero_page;
    459      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_copy_page;
    460      1.53   garbled extern struct evcnt pmap_evcnt_exec_uncached_pvo_remove;
    461      1.53   garbled 
    462      1.53   garbled extern struct evcnt pmap_evcnt_updates;
    463      1.53   garbled extern struct evcnt pmap_evcnt_collects;
    464      1.53   garbled extern struct evcnt pmap_evcnt_copies;
    465      1.53   garbled 
    466      1.53   garbled extern struct evcnt pmap_evcnt_ptes_spilled;
    467      1.53   garbled extern struct evcnt pmap_evcnt_ptes_unspilled;
    468      1.53   garbled extern struct evcnt pmap_evcnt_ptes_evicted;
    469      1.53   garbled 
    470      1.53   garbled extern struct evcnt pmap_evcnt_ptes_primary[8];
    471      1.53   garbled extern struct evcnt pmap_evcnt_ptes_secondary[8];
    472      1.53   garbled extern struct evcnt pmap_evcnt_ptes_removed;
    473      1.53   garbled extern struct evcnt pmap_evcnt_ptes_changed;
    474      1.53   garbled extern struct evcnt pmap_evcnt_pvos_reclaimed;
    475      1.53   garbled extern struct evcnt pmap_evcnt_pvos_failed;
    476      1.53   garbled 
    477       1.1      matt extern struct evcnt pmap_evcnt_zeroed_pages;
    478       1.1      matt extern struct evcnt pmap_evcnt_copied_pages;
    479       1.1      matt extern struct evcnt pmap_evcnt_idlezeroed_pages;
    480      1.26      matt 
    481      1.53   garbled #define	PMAPCOUNT(ev)	((pmap_evcnt_ ## ev).ev_count++)
    482      1.53   garbled #define	PMAPCOUNT2(ev)	((ev).ev_count++)
    483       1.1      matt #else
    484       1.1      matt #define	PMAPCOUNT(ev)	((void) 0)
    485       1.1      matt #define	PMAPCOUNT2(ev)	((void) 0)
    486       1.1      matt #endif
    487       1.1      matt 
    488      1.35     perry #define	TLBIE(va)	__asm volatile("tlbie %0" :: "r"(va))
    489      1.38   sanjayl 
    490      1.38   sanjayl /* XXXSL: this needs to be moved to assembler */
    491      1.38   sanjayl #define	TLBIEL(va)	__asm __volatile("tlbie %0" :: "r"(va))
    492      1.38   sanjayl 
    493  1.86.2.1       tls #ifdef MD_TLBSYNC
    494  1.86.2.1       tls #define TLBSYNC()	MD_TLBSYNC()
    495  1.86.2.1       tls #else
    496      1.35     perry #define	TLBSYNC()	__asm volatile("tlbsync")
    497  1.86.2.1       tls #endif
    498      1.35     perry #define	SYNC()		__asm volatile("sync")
    499      1.35     perry #define	EIEIO()		__asm volatile("eieio")
    500      1.57      matt #define	DCBST(va)	__asm __volatile("dcbst 0,%0" :: "r"(va))
    501       1.1      matt #define	MFMSR()		mfmsr()
    502       1.1      matt #define	MTMSR(psl)	mtmsr(psl)
    503       1.1      matt #define	MFPVR()		mfpvr()
    504       1.1      matt #define	MFSRIN(va)	mfsrin(va)
    505       1.1      matt #define	MFTB()		mfrtcltbl()
    506       1.1      matt 
    507  1.86.2.3       tls #if defined(DDB) && !defined(PMAP_OEA64)
    508      1.35     perry static inline register_t
    509       1.1      matt mfsrin(vaddr_t va)
    510       1.1      matt {
    511       1.2      matt 	register_t sr;
    512      1.35     perry 	__asm volatile ("mfsrin %0,%1" : "=r"(sr) : "r"(va));
    513       1.1      matt 	return sr;
    514       1.1      matt }
    515  1.86.2.3       tls #endif	/* DDB && !PMAP_OEA64 */
    516      1.38   sanjayl 
    517      1.53   garbled #if defined (PMAP_OEA64_BRIDGE)
    518      1.38   sanjayl extern void mfmsr64 (register64_t *result);
    519      1.53   garbled #endif /* PMAP_OEA64_BRIDGE */
    520      1.38   sanjayl 
    521      1.50        ad #define	PMAP_LOCK()		KERNEL_LOCK(1, NULL)
    522      1.50        ad #define	PMAP_UNLOCK()		KERNEL_UNLOCK_ONE(NULL)
    523       1.1      matt 
    524      1.35     perry static inline register_t
    525       1.1      matt pmap_interrupts_off(void)
    526       1.1      matt {
    527       1.2      matt 	register_t msr = MFMSR();
    528       1.1      matt 	if (msr & PSL_EE)
    529       1.1      matt 		MTMSR(msr & ~PSL_EE);
    530       1.1      matt 	return msr;
    531       1.1      matt }
    532       1.1      matt 
    533       1.1      matt static void
    534       1.2      matt pmap_interrupts_restore(register_t msr)
    535       1.1      matt {
    536       1.1      matt 	if (msr & PSL_EE)
    537       1.1      matt 		MTMSR(msr);
    538       1.1      matt }
    539       1.1      matt 
    540      1.35     perry static inline u_int32_t
    541       1.1      matt mfrtcltbl(void)
    542       1.1      matt {
    543      1.55   garbled #ifdef PPC_OEA601
    544       1.1      matt 	if ((MFPVR() >> 16) == MPC601)
    545       1.1      matt 		return (mfrtcl() >> 7);
    546       1.1      matt 	else
    547      1.55   garbled #endif
    548       1.1      matt 		return (mftbl());
    549       1.1      matt }
    550       1.1      matt 
    551       1.1      matt /*
    552       1.1      matt  * These small routines may have to be replaced,
    553       1.1      matt  * if/when we support processors other that the 604.
    554       1.1      matt  */
    555       1.1      matt 
    556       1.1      matt void
    557       1.1      matt tlbia(void)
    558       1.1      matt {
    559      1.47  macallan 	char *i;
    560       1.1      matt 
    561       1.1      matt 	SYNC();
    562      1.53   garbled #if defined(PMAP_OEA)
    563       1.1      matt 	/*
    564       1.1      matt 	 * Why not use "tlbia"?  Because not all processors implement it.
    565       1.1      matt 	 *
    566      1.20       wiz 	 * This needs to be a per-CPU callback to do the appropriate thing
    567       1.1      matt 	 * for the CPU. XXX
    568       1.1      matt 	 */
    569      1.47  macallan 	for (i = 0; i < (char *)0x00040000; i += 0x00001000) {
    570       1.1      matt 		TLBIE(i);
    571       1.1      matt 		EIEIO();
    572       1.1      matt 		SYNC();
    573       1.1      matt 	}
    574      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
    575      1.38   sanjayl 	/* This is specifically for the 970, 970UM v1.6 pp. 140. */
    576      1.51   garbled 	for (i = 0; i <= (char *)0xFF000; i += 0x00001000) {
    577      1.38   sanjayl 		TLBIEL(i);
    578      1.38   sanjayl 		EIEIO();
    579      1.38   sanjayl 		SYNC();
    580      1.38   sanjayl 	}
    581      1.38   sanjayl #endif
    582       1.1      matt 	TLBSYNC();
    583       1.1      matt 	SYNC();
    584       1.1      matt }
    585       1.1      matt 
    586      1.35     perry static inline register_t
    587       1.2      matt va_to_vsid(const struct pmap *pm, vaddr_t addr)
    588       1.1      matt {
    589      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
    590      1.38   sanjayl 	return (pm->pm_sr[addr >> ADDR_SR_SHFT] & SR_VSID) >> SR_VSID_SHFT;
    591      1.53   garbled #else /* PMAP_OEA64 */
    592      1.18      matt #if 0
    593      1.18      matt 	const struct ste *ste;
    594      1.18      matt 	register_t hash;
    595      1.18      matt 	int i;
    596      1.18      matt 
    597      1.18      matt 	hash = (addr >> ADDR_ESID_SHFT) & ADDR_ESID_HASH;
    598      1.18      matt 
    599      1.18      matt 	/*
    600      1.18      matt 	 * Try the primary group first
    601      1.18      matt 	 */
    602      1.18      matt 	ste = pm->pm_stes[hash].stes;
    603      1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    604      1.18      matt 		if (ste->ste_hi & STE_V) &&
    605      1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    606      1.18      matt 			return ste;
    607      1.18      matt 	}
    608      1.18      matt 
    609      1.18      matt 	/*
    610      1.18      matt 	 * Then the secondary group.
    611      1.18      matt 	 */
    612      1.18      matt 	ste = pm->pm_stes[hash ^ ADDR_ESID_HASH].stes;
    613      1.18      matt 	for (i = 0; i < 8; i++, ste++) {
    614      1.18      matt 		if (ste->ste_hi & STE_V) &&
    615      1.18      matt 		   (addr & ~(ADDR_POFF|ADDR_PIDX)) == (ste->ste_hi & STE_ESID))
    616      1.18      matt 			return addr;
    617      1.18      matt 	}
    618      1.18      matt 
    619      1.18      matt 	return NULL;
    620      1.18      matt #else
    621      1.18      matt 	/*
    622      1.18      matt 	 * Rather than searching the STE groups for the VSID, we know
    623      1.18      matt 	 * how we generate that from the ESID and so do that.
    624      1.18      matt 	 */
    625      1.18      matt 	return VSID_MAKE(addr >> ADDR_SR_SHFT, pm->pm_vsid) >> SR_VSID_SHFT;
    626      1.18      matt #endif
    627      1.53   garbled #endif /* PMAP_OEA */
    628       1.1      matt }
    629       1.1      matt 
    630      1.35     perry static inline register_t
    631       1.2      matt va_to_pteg(const struct pmap *pm, vaddr_t addr)
    632       1.1      matt {
    633       1.2      matt 	register_t hash;
    634       1.2      matt 
    635       1.2      matt 	hash = va_to_vsid(pm, addr) ^ ((addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
    636       1.1      matt 	return hash & pmap_pteg_mask;
    637       1.1      matt }
    638       1.1      matt 
    639       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
    640       1.1      matt /*
    641       1.1      matt  * Given a PTE in the page table, calculate the VADDR that hashes to it.
    642       1.1      matt  * The only bit of magic is that the top 4 bits of the address doesn't
    643       1.1      matt  * technically exist in the PTE.  But we know we reserved 4 bits of the
    644       1.1      matt  * VSID for it so that's how we get it.
    645       1.1      matt  */
    646       1.1      matt static vaddr_t
    647       1.2      matt pmap_pte_to_va(volatile const struct pte *pt)
    648       1.1      matt {
    649       1.1      matt 	vaddr_t va;
    650       1.1      matt 	uintptr_t ptaddr = (uintptr_t) pt;
    651       1.1      matt 
    652       1.1      matt 	if (pt->pte_hi & PTE_HID)
    653       1.2      matt 		ptaddr ^= (pmap_pteg_mask * sizeof(struct pteg));
    654       1.1      matt 
    655      1.18      matt 	/* PPC Bits 10-19  PPC64 Bits 42-51 */
    656      1.53   garbled #if defined(PMAP_OEA)
    657       1.4      matt 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x3ff;
    658      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
    659      1.38   sanjayl 	va = ((pt->pte_hi >> PTE_VSID_SHFT) ^ (ptaddr / sizeof(struct pteg))) & 0x7ff;
    660      1.38   sanjayl #endif
    661       1.1      matt 	va <<= ADDR_PIDX_SHFT;
    662       1.1      matt 
    663      1.18      matt 	/* PPC Bits 4-9  PPC64 Bits 36-41 */
    664       1.1      matt 	va |= (pt->pte_hi & PTE_API) << ADDR_API_SHFT;
    665       1.1      matt 
    666      1.53   garbled #if defined(PMAP_OEA64)
    667      1.18      matt 	/* PPC63 Bits 0-35 */
    668      1.18      matt 	/* va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT; */
    669      1.53   garbled #elif defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
    670       1.1      matt 	/* PPC Bits 0-3 */
    671       1.1      matt 	va |= VSID_TO_SR(pt->pte_hi >> PTE_VSID_SHFT) << ADDR_SR_SHFT;
    672      1.18      matt #endif
    673       1.1      matt 
    674       1.1      matt 	return va;
    675       1.1      matt }
    676       1.1      matt #endif
    677       1.1      matt 
    678      1.35     perry static inline struct pvo_head *
    679       1.1      matt pa_to_pvoh(paddr_t pa, struct vm_page **pg_p)
    680       1.1      matt {
    681       1.1      matt 	struct vm_page *pg;
    682      1.72  uebayasi 	struct vm_page_md *md;
    683       1.1      matt 
    684       1.1      matt 	pg = PHYS_TO_VM_PAGE(pa);
    685       1.1      matt 	if (pg_p != NULL)
    686       1.1      matt 		*pg_p = pg;
    687       1.1      matt 	if (pg == NULL)
    688       1.1      matt 		return &pmap_pvo_unmanaged;
    689      1.72  uebayasi 	md = VM_PAGE_TO_MD(pg);
    690      1.72  uebayasi 	return &md->mdpg_pvoh;
    691       1.1      matt }
    692       1.1      matt 
    693      1.35     perry static inline struct pvo_head *
    694       1.1      matt vm_page_to_pvoh(struct vm_page *pg)
    695       1.1      matt {
    696      1.72  uebayasi 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
    697      1.72  uebayasi 
    698      1.72  uebayasi 	return &md->mdpg_pvoh;
    699       1.1      matt }
    700       1.1      matt 
    701       1.1      matt 
    702      1.35     perry static inline void
    703       1.1      matt pmap_attr_clear(struct vm_page *pg, int ptebit)
    704       1.1      matt {
    705      1.72  uebayasi 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
    706      1.72  uebayasi 
    707      1.72  uebayasi 	md->mdpg_attrs &= ~ptebit;
    708       1.1      matt }
    709       1.1      matt 
    710      1.35     perry static inline int
    711       1.1      matt pmap_attr_fetch(struct vm_page *pg)
    712       1.1      matt {
    713      1.72  uebayasi 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
    714      1.72  uebayasi 
    715      1.72  uebayasi 	return md->mdpg_attrs;
    716       1.1      matt }
    717       1.1      matt 
    718      1.35     perry static inline void
    719       1.1      matt pmap_attr_save(struct vm_page *pg, int ptebit)
    720       1.1      matt {
    721      1.72  uebayasi 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
    722      1.72  uebayasi 
    723      1.72  uebayasi 	md->mdpg_attrs |= ptebit;
    724       1.1      matt }
    725       1.1      matt 
    726      1.35     perry static inline int
    727       1.2      matt pmap_pte_compare(const volatile struct pte *pt, const struct pte *pvo_pt)
    728       1.1      matt {
    729       1.1      matt 	if (pt->pte_hi == pvo_pt->pte_hi
    730       1.1      matt #if 0
    731       1.1      matt 	    && ((pt->pte_lo ^ pvo_pt->pte_lo) &
    732       1.1      matt 	        ~(PTE_REF|PTE_CHG)) == 0
    733       1.1      matt #endif
    734       1.1      matt 	    )
    735       1.1      matt 		return 1;
    736       1.1      matt 	return 0;
    737       1.1      matt }
    738       1.1      matt 
    739      1.35     perry static inline void
    740       1.2      matt pmap_pte_create(struct pte *pt, const struct pmap *pm, vaddr_t va, register_t pte_lo)
    741       1.1      matt {
    742       1.1      matt 	/*
    743       1.1      matt 	 * Construct the PTE.  Default to IMB initially.  Valid bit
    744       1.1      matt 	 * only gets set when the real pte is set in memory.
    745       1.1      matt 	 *
    746       1.1      matt 	 * Note: Don't set the valid bit for correct operation of tlb update.
    747       1.1      matt 	 */
    748      1.53   garbled #if defined(PMAP_OEA)
    749       1.2      matt 	pt->pte_hi = (va_to_vsid(pm, va) << PTE_VSID_SHFT)
    750       1.2      matt 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    751       1.1      matt 	pt->pte_lo = pte_lo;
    752      1.79      matt #elif defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA64)
    753      1.38   sanjayl 	pt->pte_hi = ((u_int64_t)va_to_vsid(pm, va) << PTE_VSID_SHFT)
    754      1.38   sanjayl 	    | (((va & ADDR_PIDX) >> (ADDR_API_SHFT - PTE_API_SHFT)) & PTE_API);
    755      1.38   sanjayl 	pt->pte_lo = (u_int64_t) pte_lo;
    756      1.53   garbled #endif /* PMAP_OEA */
    757       1.1      matt }
    758       1.1      matt 
    759      1.35     perry static inline void
    760       1.2      matt pmap_pte_synch(volatile struct pte *pt, struct pte *pvo_pt)
    761       1.1      matt {
    762       1.1      matt 	pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF|PTE_CHG);
    763       1.1      matt }
    764       1.1      matt 
    765      1.35     perry static inline void
    766       1.2      matt pmap_pte_clear(volatile struct pte *pt, vaddr_t va, int ptebit)
    767       1.1      matt {
    768       1.1      matt 	/*
    769       1.1      matt 	 * As shown in Section 7.6.3.2.3
    770       1.1      matt 	 */
    771       1.1      matt 	pt->pte_lo &= ~ptebit;
    772       1.1      matt 	TLBIE(va);
    773       1.1      matt 	SYNC();
    774       1.1      matt 	EIEIO();
    775       1.1      matt 	TLBSYNC();
    776       1.1      matt 	SYNC();
    777      1.57      matt #ifdef MULTIPROCESSOR
    778      1.57      matt 	DCBST(pt);
    779      1.57      matt #endif
    780       1.1      matt }
    781       1.1      matt 
    782      1.35     perry static inline void
    783       1.2      matt pmap_pte_set(volatile struct pte *pt, struct pte *pvo_pt)
    784       1.1      matt {
    785       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    786       1.1      matt 	if (pvo_pt->pte_hi & PTE_VALID)
    787       1.1      matt 		panic("pte_set: setting an already valid pte %p", pvo_pt);
    788       1.1      matt #endif
    789       1.1      matt 	pvo_pt->pte_hi |= PTE_VALID;
    790      1.38   sanjayl 
    791       1.1      matt 	/*
    792       1.1      matt 	 * Update the PTE as defined in section 7.6.3.1
    793       1.1      matt 	 * Note that the REF/CHG bits are from pvo_pt and thus should
    794       1.1      matt 	 * have been saved so this routine can restore them (if desired).
    795       1.1      matt 	 */
    796       1.1      matt 	pt->pte_lo = pvo_pt->pte_lo;
    797       1.1      matt 	EIEIO();
    798       1.1      matt 	pt->pte_hi = pvo_pt->pte_hi;
    799      1.38   sanjayl 	TLBSYNC();
    800       1.1      matt 	SYNC();
    801      1.57      matt #ifdef MULTIPROCESSOR
    802      1.57      matt 	DCBST(pt);
    803      1.57      matt #endif
    804       1.1      matt 	pmap_pte_valid++;
    805       1.1      matt }
    806       1.1      matt 
    807      1.35     perry static inline void
    808       1.2      matt pmap_pte_unset(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    809       1.1      matt {
    810       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
    811       1.1      matt 	if ((pvo_pt->pte_hi & PTE_VALID) == 0)
    812       1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#1 %p/%p", pvo_pt, pt);
    813       1.1      matt 	if ((pt->pte_hi & PTE_VALID) == 0)
    814       1.1      matt 		panic("pte_unset: attempt to unset an inactive pte#2 %p/%p", pvo_pt, pt);
    815       1.1      matt #endif
    816       1.1      matt 
    817       1.1      matt 	pvo_pt->pte_hi &= ~PTE_VALID;
    818       1.1      matt 	/*
    819       1.1      matt 	 * Force the ref & chg bits back into the PTEs.
    820       1.1      matt 	 */
    821       1.1      matt 	SYNC();
    822       1.1      matt 	/*
    823       1.1      matt 	 * Invalidate the pte ... (Section 7.6.3.3)
    824       1.1      matt 	 */
    825       1.1      matt 	pt->pte_hi &= ~PTE_VALID;
    826       1.1      matt 	SYNC();
    827       1.1      matt 	TLBIE(va);
    828       1.1      matt 	SYNC();
    829       1.1      matt 	EIEIO();
    830       1.1      matt 	TLBSYNC();
    831       1.1      matt 	SYNC();
    832       1.1      matt 	/*
    833       1.1      matt 	 * Save the ref & chg bits ...
    834       1.1      matt 	 */
    835       1.1      matt 	pmap_pte_synch(pt, pvo_pt);
    836       1.1      matt 	pmap_pte_valid--;
    837       1.1      matt }
    838       1.1      matt 
    839      1.35     perry static inline void
    840       1.2      matt pmap_pte_change(volatile struct pte *pt, struct pte *pvo_pt, vaddr_t va)
    841       1.1      matt {
    842       1.1      matt 	/*
    843       1.1      matt 	 * Invalidate the PTE
    844       1.1      matt 	 */
    845       1.1      matt 	pmap_pte_unset(pt, pvo_pt, va);
    846       1.1      matt 	pmap_pte_set(pt, pvo_pt);
    847       1.1      matt }
    848       1.1      matt 
    849       1.1      matt /*
    850       1.1      matt  * Try to insert the PTE @ *pvo_pt into the pmap_pteg_table at ptegidx
    851       1.1      matt  * (either primary or secondary location).
    852       1.1      matt  *
    853       1.1      matt  * Note: both the destination and source PTEs must not have PTE_VALID set.
    854       1.1      matt  */
    855       1.1      matt 
    856      1.53   garbled static int
    857       1.2      matt pmap_pte_insert(int ptegidx, struct pte *pvo_pt)
    858       1.1      matt {
    859       1.2      matt 	volatile struct pte *pt;
    860       1.1      matt 	int i;
    861       1.1      matt 
    862       1.1      matt #if defined(DEBUG)
    863      1.85      matt 	DPRINTFN(PTE, "pmap_pte_insert: idx %#x, pte %#" _PRIxpte " %#" _PRIxpte "\n",
    864      1.85      matt 		ptegidx, pvo_pt->pte_hi, pvo_pt->pte_lo);
    865       1.1      matt #endif
    866       1.1      matt 	/*
    867       1.1      matt 	 * First try primary hash.
    868       1.1      matt 	 */
    869       1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    870       1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    871       1.1      matt 			pvo_pt->pte_hi &= ~PTE_HID;
    872       1.1      matt 			pmap_pte_set(pt, pvo_pt);
    873       1.1      matt 			return i;
    874       1.1      matt 		}
    875       1.1      matt 	}
    876       1.1      matt 
    877       1.1      matt 	/*
    878       1.1      matt 	 * Now try secondary hash.
    879       1.1      matt 	 */
    880       1.1      matt 	ptegidx ^= pmap_pteg_mask;
    881       1.1      matt 	for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) {
    882       1.1      matt 		if ((pt->pte_hi & PTE_VALID) == 0) {
    883       1.1      matt 			pvo_pt->pte_hi |= PTE_HID;
    884       1.1      matt 			pmap_pte_set(pt, pvo_pt);
    885       1.1      matt 			return i;
    886       1.1      matt 		}
    887       1.1      matt 	}
    888       1.1      matt 	return -1;
    889       1.1      matt }
    890       1.1      matt 
    891       1.1      matt /*
    892       1.1      matt  * Spill handler.
    893       1.1      matt  *
    894       1.1      matt  * Tries to spill a page table entry from the overflow area.
    895       1.1      matt  * This runs in either real mode (if dealing with a exception spill)
    896       1.1      matt  * or virtual mode when dealing with manually spilling one of the
    897       1.1      matt  * kernel's pte entries.  In either case, interrupts are already
    898       1.1      matt  * disabled.
    899       1.1      matt  */
    900      1.14       chs 
    901       1.1      matt int
    902      1.44   thorpej pmap_pte_spill(struct pmap *pm, vaddr_t addr, bool exec)
    903       1.1      matt {
    904       1.1      matt 	struct pvo_entry *source_pvo, *victim_pvo, *next_pvo;
    905       1.1      matt 	struct pvo_entry *pvo;
    906      1.15    dyoung 	/* XXX: gcc -- vpvoh is always set at either *1* or *2* */
    907      1.15    dyoung 	struct pvo_tqhead *pvoh, *vpvoh = NULL;
    908       1.1      matt 	int ptegidx, i, j;
    909       1.2      matt 	volatile struct pteg *pteg;
    910       1.2      matt 	volatile struct pte *pt;
    911       1.1      matt 
    912      1.50        ad 	PMAP_LOCK();
    913      1.50        ad 
    914       1.2      matt 	ptegidx = va_to_pteg(pm, addr);
    915       1.1      matt 
    916       1.1      matt 	/*
    917       1.1      matt 	 * Have to substitute some entry. Use the primary hash for this.
    918      1.12      matt 	 * Use low bits of timebase as random generator.  Make sure we are
    919      1.12      matt 	 * not picking a kernel pte for replacement.
    920       1.1      matt 	 */
    921       1.1      matt 	pteg = &pmap_pteg_table[ptegidx];
    922       1.1      matt 	i = MFTB() & 7;
    923      1.12      matt 	for (j = 0; j < 8; j++) {
    924      1.12      matt 		pt = &pteg->pt[i];
    925      1.53   garbled 		if ((pt->pte_hi & PTE_VALID) == 0)
    926      1.53   garbled 			break;
    927      1.53   garbled 		if (VSID_TO_HASH((pt->pte_hi & PTE_VSID) >> PTE_VSID_SHFT)
    928      1.53   garbled 				< PHYSMAP_VSIDBITS)
    929      1.12      matt 			break;
    930      1.12      matt 		i = (i + 1) & 7;
    931      1.12      matt 	}
    932      1.12      matt 	KASSERT(j < 8);
    933       1.1      matt 
    934       1.1      matt 	source_pvo = NULL;
    935       1.1      matt 	victim_pvo = NULL;
    936       1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
    937       1.1      matt 	TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
    938       1.1      matt 
    939       1.1      matt 		/*
    940       1.1      matt 		 * We need to find pvo entry for this address...
    941       1.1      matt 		 */
    942       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
    943       1.1      matt 
    944       1.1      matt 		/*
    945       1.1      matt 		 * If we haven't found the source and we come to a PVO with
    946       1.1      matt 		 * a valid PTE, then we know we can't find it because all
    947       1.1      matt 		 * evicted PVOs always are first in the list.
    948       1.1      matt 		 */
    949       1.1      matt 		if (source_pvo == NULL && (pvo->pvo_pte.pte_hi & PTE_VALID))
    950       1.1      matt 			break;
    951       1.2      matt 		if (source_pvo == NULL && pm == pvo->pvo_pmap &&
    952       1.2      matt 		    addr == PVO_VADDR(pvo)) {
    953       1.1      matt 
    954       1.1      matt 			/*
    955       1.1      matt 			 * Now we have found the entry to be spilled into the
    956       1.1      matt 			 * pteg.  Attempt to insert it into the page table.
    957       1.1      matt 			 */
    958       1.1      matt 			j = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
    959       1.1      matt 			if (j >= 0) {
    960       1.1      matt 				PVO_PTEGIDX_SET(pvo, j);
    961       1.1      matt 				PMAP_PVO_CHECK(pvo);	/* sanity check */
    962      1.12      matt 				PVO_WHERE(pvo, SPILL_INSERT);
    963       1.1      matt 				pvo->pvo_pmap->pm_evictions--;
    964       1.1      matt 				PMAPCOUNT(ptes_spilled);
    965       1.1      matt 				PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
    966       1.1      matt 				    ? pmap_evcnt_ptes_secondary
    967       1.1      matt 				    : pmap_evcnt_ptes_primary)[j]);
    968       1.1      matt 
    969       1.1      matt 				/*
    970       1.1      matt 				 * Since we keep the evicted entries at the
    971       1.1      matt 				 * from of the PVO list, we need move this
    972       1.1      matt 				 * (now resident) PVO after the evicted
    973       1.1      matt 				 * entries.
    974       1.1      matt 				 */
    975       1.1      matt 				next_pvo = TAILQ_NEXT(pvo, pvo_olink);
    976       1.1      matt 
    977       1.1      matt 				/*
    978       1.5      matt 				 * If we don't have to move (either we were the
    979       1.5      matt 				 * last entry or the next entry was valid),
    980       1.1      matt 				 * don't change our position.  Otherwise
    981       1.1      matt 				 * move ourselves to the tail of the queue.
    982       1.1      matt 				 */
    983       1.1      matt 				if (next_pvo != NULL &&
    984       1.1      matt 				    !(next_pvo->pvo_pte.pte_hi & PTE_VALID)) {
    985       1.1      matt 					TAILQ_REMOVE(pvoh, pvo, pvo_olink);
    986       1.1      matt 					TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
    987       1.1      matt 				}
    988      1.50        ad 				PMAP_UNLOCK();
    989       1.1      matt 				return 1;
    990       1.1      matt 			}
    991       1.1      matt 			source_pvo = pvo;
    992      1.39      matt 			if (exec && !PVO_EXECUTABLE_P(source_pvo)) {
    993      1.14       chs 				return 0;
    994      1.14       chs 			}
    995       1.1      matt 			if (victim_pvo != NULL)
    996       1.1      matt 				break;
    997       1.1      matt 		}
    998       1.1      matt 
    999       1.1      matt 		/*
   1000       1.1      matt 		 * We also need the pvo entry of the victim we are replacing
   1001       1.1      matt 		 * so save the R & C bits of the PTE.
   1002       1.1      matt 		 */
   1003       1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL &&
   1004       1.1      matt 		    pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1005      1.15    dyoung 			vpvoh = pvoh;			/* *1* */
   1006       1.1      matt 			victim_pvo = pvo;
   1007       1.1      matt 			if (source_pvo != NULL)
   1008       1.1      matt 				break;
   1009       1.1      matt 		}
   1010       1.1      matt 	}
   1011       1.1      matt 
   1012       1.1      matt 	if (source_pvo == NULL) {
   1013       1.1      matt 		PMAPCOUNT(ptes_unspilled);
   1014      1.50        ad 		PMAP_UNLOCK();
   1015       1.1      matt 		return 0;
   1016       1.1      matt 	}
   1017       1.1      matt 
   1018       1.1      matt 	if (victim_pvo == NULL) {
   1019       1.1      matt 		if ((pt->pte_hi & PTE_HID) == 0)
   1020       1.1      matt 			panic("pmap_pte_spill: victim p-pte (%p) has "
   1021       1.1      matt 			    "no pvo entry!", pt);
   1022       1.1      matt 
   1023       1.1      matt 		/*
   1024       1.1      matt 		 * If this is a secondary PTE, we need to search
   1025       1.1      matt 		 * its primary pvo bucket for the matching PVO.
   1026       1.1      matt 		 */
   1027      1.15    dyoung 		vpvoh = &pmap_pvo_table[ptegidx ^ pmap_pteg_mask]; /* *2* */
   1028       1.1      matt 		TAILQ_FOREACH(pvo, vpvoh, pvo_olink) {
   1029       1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   1030       1.1      matt 
   1031       1.1      matt 			/*
   1032       1.1      matt 			 * We also need the pvo entry of the victim we are
   1033       1.1      matt 			 * replacing so save the R & C bits of the PTE.
   1034       1.1      matt 			 */
   1035       1.1      matt 			if (pmap_pte_compare(pt, &pvo->pvo_pte)) {
   1036       1.1      matt 				victim_pvo = pvo;
   1037       1.1      matt 				break;
   1038       1.1      matt 			}
   1039       1.1      matt 		}
   1040       1.1      matt 		if (victim_pvo == NULL)
   1041       1.1      matt 			panic("pmap_pte_spill: victim s-pte (%p) has "
   1042       1.1      matt 			    "no pvo entry!", pt);
   1043       1.1      matt 	}
   1044       1.1      matt 
   1045       1.1      matt 	/*
   1046      1.12      matt 	 * The victim should be not be a kernel PVO/PTE entry.
   1047      1.12      matt 	 */
   1048      1.12      matt 	KASSERT(victim_pvo->pvo_pmap != pmap_kernel());
   1049      1.12      matt 	KASSERT(PVO_PTEGIDX_ISSET(victim_pvo));
   1050      1.12      matt 	KASSERT(PVO_PTEGIDX_GET(victim_pvo) == i);
   1051      1.12      matt 
   1052      1.12      matt 	/*
   1053       1.1      matt 	 * We are invalidating the TLB entry for the EA for the
   1054       1.1      matt 	 * we are replacing even though its valid; If we don't
   1055       1.1      matt 	 * we lose any ref/chg bit changes contained in the TLB
   1056       1.1      matt 	 * entry.
   1057       1.1      matt 	 */
   1058       1.1      matt 	source_pvo->pvo_pte.pte_hi &= ~PTE_HID;
   1059       1.1      matt 
   1060       1.1      matt 	/*
   1061       1.1      matt 	 * To enforce the PVO list ordering constraint that all
   1062       1.1      matt 	 * evicted entries should come before all valid entries,
   1063       1.1      matt 	 * move the source PVO to the tail of its list and the
   1064       1.1      matt 	 * victim PVO to the head of its list (which might not be
   1065       1.1      matt 	 * the same list, if the victim was using the secondary hash).
   1066       1.1      matt 	 */
   1067       1.1      matt 	TAILQ_REMOVE(pvoh, source_pvo, pvo_olink);
   1068       1.1      matt 	TAILQ_INSERT_TAIL(pvoh, source_pvo, pvo_olink);
   1069       1.1      matt 	TAILQ_REMOVE(vpvoh, victim_pvo, pvo_olink);
   1070       1.1      matt 	TAILQ_INSERT_HEAD(vpvoh, victim_pvo, pvo_olink);
   1071       1.1      matt 	pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr);
   1072       1.1      matt 	pmap_pte_set(pt, &source_pvo->pvo_pte);
   1073       1.1      matt 	victim_pvo->pvo_pmap->pm_evictions++;
   1074       1.1      matt 	source_pvo->pvo_pmap->pm_evictions--;
   1075      1.12      matt 	PVO_WHERE(victim_pvo, SPILL_UNSET);
   1076      1.12      matt 	PVO_WHERE(source_pvo, SPILL_SET);
   1077       1.1      matt 
   1078       1.1      matt 	PVO_PTEGIDX_CLR(victim_pvo);
   1079       1.1      matt 	PVO_PTEGIDX_SET(source_pvo, i);
   1080       1.1      matt 	PMAPCOUNT2(pmap_evcnt_ptes_primary[i]);
   1081       1.1      matt 	PMAPCOUNT(ptes_spilled);
   1082       1.1      matt 	PMAPCOUNT(ptes_evicted);
   1083       1.1      matt 	PMAPCOUNT(ptes_removed);
   1084       1.1      matt 
   1085       1.1      matt 	PMAP_PVO_CHECK(victim_pvo);
   1086       1.1      matt 	PMAP_PVO_CHECK(source_pvo);
   1087      1.50        ad 
   1088      1.50        ad 	PMAP_UNLOCK();
   1089       1.1      matt 	return 1;
   1090       1.1      matt }
   1091       1.1      matt 
   1092       1.1      matt /*
   1093       1.1      matt  * Restrict given range to physical memory
   1094       1.1      matt  */
   1095       1.1      matt void
   1096       1.1      matt pmap_real_memory(paddr_t *start, psize_t *size)
   1097       1.1      matt {
   1098       1.1      matt 	struct mem_region *mp;
   1099       1.1      matt 
   1100       1.1      matt 	for (mp = mem; mp->size; mp++) {
   1101       1.1      matt 		if (*start + *size > mp->start
   1102       1.1      matt 		    && *start < mp->start + mp->size) {
   1103       1.1      matt 			if (*start < mp->start) {
   1104       1.1      matt 				*size -= mp->start - *start;
   1105       1.1      matt 				*start = mp->start;
   1106       1.1      matt 			}
   1107       1.1      matt 			if (*start + *size > mp->start + mp->size)
   1108       1.1      matt 				*size = mp->start + mp->size - *start;
   1109       1.1      matt 			return;
   1110       1.1      matt 		}
   1111       1.1      matt 	}
   1112       1.1      matt 	*size = 0;
   1113       1.1      matt }
   1114       1.1      matt 
   1115       1.1      matt /*
   1116       1.1      matt  * Initialize anything else for pmap handling.
   1117       1.1      matt  * Called during vm_init().
   1118       1.1      matt  */
   1119       1.1      matt void
   1120       1.1      matt pmap_init(void)
   1121       1.1      matt {
   1122       1.1      matt 	pool_init(&pmap_mpvo_pool, sizeof(struct pvo_entry),
   1123       1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_mpvopl",
   1124      1.48        ad 	    &pmap_pool_mallocator, IPL_NONE);
   1125       1.1      matt 
   1126       1.1      matt 	pool_setlowat(&pmap_mpvo_pool, 1008);
   1127       1.1      matt 
   1128       1.1      matt 	pmap_initialized = 1;
   1129       1.1      matt 
   1130       1.1      matt }
   1131       1.1      matt 
   1132       1.1      matt /*
   1133      1.10   thorpej  * How much virtual space does the kernel get?
   1134      1.10   thorpej  */
   1135      1.10   thorpej void
   1136      1.10   thorpej pmap_virtual_space(vaddr_t *start, vaddr_t *end)
   1137      1.10   thorpej {
   1138      1.10   thorpej 	/*
   1139      1.10   thorpej 	 * For now, reserve one segment (minus some overhead) for kernel
   1140      1.10   thorpej 	 * virtual memory
   1141      1.10   thorpej 	 */
   1142      1.10   thorpej 	*start = VM_MIN_KERNEL_ADDRESS;
   1143      1.10   thorpej 	*end = VM_MAX_KERNEL_ADDRESS;
   1144      1.10   thorpej }
   1145      1.10   thorpej 
   1146      1.10   thorpej /*
   1147       1.1      matt  * Allocate, initialize, and return a new physical map.
   1148       1.1      matt  */
   1149       1.1      matt pmap_t
   1150       1.1      matt pmap_create(void)
   1151       1.1      matt {
   1152       1.1      matt 	pmap_t pm;
   1153      1.38   sanjayl 
   1154       1.1      matt 	pm = pool_get(&pmap_pool, PR_WAITOK);
   1155      1.84      matt 	KASSERT((vaddr_t)pm < VM_MIN_KERNEL_ADDRESS);
   1156      1.46  christos 	memset((void *)pm, 0, sizeof *pm);
   1157       1.1      matt 	pmap_pinit(pm);
   1158       1.1      matt 
   1159      1.85      matt 	DPRINTFN(CREATE, "pmap_create: pm %p:\n"
   1160      1.54   mlelstv 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
   1161      1.54   mlelstv 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n"
   1162      1.54   mlelstv 	    "\t%#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr
   1163      1.54   mlelstv 	    "    %#" _PRIsr " %#" _PRIsr " %#" _PRIsr " %#" _PRIsr "\n",
   1164      1.54   mlelstv 	    pm,
   1165      1.54   mlelstv 	    pm->pm_sr[0], pm->pm_sr[1],
   1166      1.54   mlelstv 	    pm->pm_sr[2], pm->pm_sr[3],
   1167      1.54   mlelstv 	    pm->pm_sr[4], pm->pm_sr[5],
   1168      1.54   mlelstv 	    pm->pm_sr[6], pm->pm_sr[7],
   1169      1.54   mlelstv 	    pm->pm_sr[8], pm->pm_sr[9],
   1170      1.54   mlelstv 	    pm->pm_sr[10], pm->pm_sr[11],
   1171      1.54   mlelstv 	    pm->pm_sr[12], pm->pm_sr[13],
   1172      1.85      matt 	    pm->pm_sr[14], pm->pm_sr[15]);
   1173       1.1      matt 	return pm;
   1174       1.1      matt }
   1175       1.1      matt 
   1176       1.1      matt /*
   1177       1.1      matt  * Initialize a preallocated and zeroed pmap structure.
   1178       1.1      matt  */
   1179       1.1      matt void
   1180       1.1      matt pmap_pinit(pmap_t pm)
   1181       1.1      matt {
   1182       1.2      matt 	register_t entropy = MFTB();
   1183       1.2      matt 	register_t mask;
   1184       1.2      matt 	int i;
   1185       1.1      matt 
   1186       1.1      matt 	/*
   1187       1.1      matt 	 * Allocate some segment registers for this pmap.
   1188       1.1      matt 	 */
   1189       1.1      matt 	pm->pm_refs = 1;
   1190      1.50        ad 	PMAP_LOCK();
   1191       1.2      matt 	for (i = 0; i < NPMAPS; i += VSID_NBPW) {
   1192       1.2      matt 		static register_t pmap_vsidcontext;
   1193       1.2      matt 		register_t hash;
   1194       1.2      matt 		unsigned int n;
   1195       1.1      matt 
   1196       1.1      matt 		/* Create a new value by multiplying by a prime adding in
   1197       1.1      matt 		 * entropy from the timebase register.  This is to make the
   1198       1.1      matt 		 * VSID more random so that the PT Hash function collides
   1199       1.1      matt 		 * less often. (note that the prime causes gcc to do shifts
   1200       1.1      matt 		 * instead of a multiply)
   1201       1.1      matt 		 */
   1202       1.1      matt 		pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy;
   1203       1.1      matt 		hash = pmap_vsidcontext & (NPMAPS - 1);
   1204      1.23   aymeric 		if (hash == 0) {		/* 0 is special, avoid it */
   1205      1.23   aymeric 			entropy += 0xbadf00d;
   1206       1.1      matt 			continue;
   1207      1.23   aymeric 		}
   1208       1.1      matt 		n = hash >> 5;
   1209       1.2      matt 		mask = 1L << (hash & (VSID_NBPW-1));
   1210       1.2      matt 		hash = pmap_vsidcontext;
   1211       1.1      matt 		if (pmap_vsid_bitmap[n] & mask) {	/* collision? */
   1212       1.1      matt 			/* anything free in this bucket? */
   1213       1.2      matt 			if (~pmap_vsid_bitmap[n] == 0) {
   1214      1.23   aymeric 				entropy = hash ^ (hash >> 16);
   1215       1.1      matt 				continue;
   1216       1.1      matt 			}
   1217       1.1      matt 			i = ffs(~pmap_vsid_bitmap[n]) - 1;
   1218       1.2      matt 			mask = 1L << i;
   1219       1.2      matt 			hash &= ~(VSID_NBPW-1);
   1220       1.1      matt 			hash |= i;
   1221       1.1      matt 		}
   1222      1.18      matt 		hash &= PTE_VSID >> PTE_VSID_SHFT;
   1223       1.1      matt 		pmap_vsid_bitmap[n] |= mask;
   1224      1.18      matt 		pm->pm_vsid = hash;
   1225      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1226       1.1      matt 		for (i = 0; i < 16; i++)
   1227      1.14       chs 			pm->pm_sr[i] = VSID_MAKE(i, hash) | SR_PRKEY |
   1228      1.14       chs 			    SR_NOEXEC;
   1229      1.18      matt #endif
   1230      1.50        ad 		PMAP_UNLOCK();
   1231       1.1      matt 		return;
   1232       1.1      matt 	}
   1233      1.50        ad 	PMAP_UNLOCK();
   1234       1.1      matt 	panic("pmap_pinit: out of segments");
   1235       1.1      matt }
   1236       1.1      matt 
   1237       1.1      matt /*
   1238       1.1      matt  * Add a reference to the given pmap.
   1239       1.1      matt  */
   1240       1.1      matt void
   1241       1.1      matt pmap_reference(pmap_t pm)
   1242       1.1      matt {
   1243      1.50        ad 	atomic_inc_uint(&pm->pm_refs);
   1244       1.1      matt }
   1245       1.1      matt 
   1246       1.1      matt /*
   1247       1.1      matt  * Retire the given pmap from service.
   1248       1.1      matt  * Should only be called if the map contains no valid mappings.
   1249       1.1      matt  */
   1250       1.1      matt void
   1251       1.1      matt pmap_destroy(pmap_t pm)
   1252       1.1      matt {
   1253      1.50        ad 	if (atomic_dec_uint_nv(&pm->pm_refs) == 0) {
   1254       1.1      matt 		pmap_release(pm);
   1255       1.1      matt 		pool_put(&pmap_pool, pm);
   1256       1.1      matt 	}
   1257       1.1      matt }
   1258       1.1      matt 
   1259       1.1      matt /*
   1260       1.1      matt  * Release any resources held by the given physical map.
   1261       1.1      matt  * Called when a pmap initialized by pmap_pinit is being released.
   1262       1.1      matt  */
   1263       1.1      matt void
   1264       1.1      matt pmap_release(pmap_t pm)
   1265       1.1      matt {
   1266       1.1      matt 	int idx, mask;
   1267      1.39      matt 
   1268      1.39      matt 	KASSERT(pm->pm_stats.resident_count == 0);
   1269      1.39      matt 	KASSERT(pm->pm_stats.wired_count == 0);
   1270       1.1      matt 
   1271      1.50        ad 	PMAP_LOCK();
   1272       1.1      matt 	if (pm->pm_sr[0] == 0)
   1273       1.1      matt 		panic("pmap_release");
   1274      1.22   aymeric 	idx = pm->pm_vsid & (NPMAPS-1);
   1275       1.1      matt 	mask = 1 << (idx % VSID_NBPW);
   1276       1.1      matt 	idx /= VSID_NBPW;
   1277      1.22   aymeric 
   1278      1.22   aymeric 	KASSERT(pmap_vsid_bitmap[idx] & mask);
   1279       1.1      matt 	pmap_vsid_bitmap[idx] &= ~mask;
   1280      1.50        ad 	PMAP_UNLOCK();
   1281       1.1      matt }
   1282       1.1      matt 
   1283       1.1      matt /*
   1284       1.1      matt  * Copy the range specified by src_addr/len
   1285       1.1      matt  * from the source map to the range dst_addr/len
   1286       1.1      matt  * in the destination map.
   1287       1.1      matt  *
   1288       1.1      matt  * This routine is only advisory and need not do anything.
   1289       1.1      matt  */
   1290       1.1      matt void
   1291       1.1      matt pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
   1292       1.1      matt 	vsize_t len, vaddr_t src_addr)
   1293       1.1      matt {
   1294       1.1      matt 	PMAPCOUNT(copies);
   1295       1.1      matt }
   1296       1.1      matt 
   1297       1.1      matt /*
   1298       1.1      matt  * Require that all active physical maps contain no
   1299       1.1      matt  * incorrect entries NOW.
   1300       1.1      matt  */
   1301       1.1      matt void
   1302       1.1      matt pmap_update(struct pmap *pmap)
   1303       1.1      matt {
   1304       1.1      matt 	PMAPCOUNT(updates);
   1305       1.1      matt 	TLBSYNC();
   1306       1.1      matt }
   1307       1.1      matt 
   1308      1.35     perry static inline int
   1309       1.1      matt pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx)
   1310       1.1      matt {
   1311       1.1      matt 	int pteidx;
   1312       1.1      matt 	/*
   1313       1.1      matt 	 * We can find the actual pte entry without searching by
   1314       1.1      matt 	 * grabbing the PTEG index from 3 unused bits in pte_lo[11:9]
   1315       1.1      matt 	 * and by noticing the HID bit.
   1316       1.1      matt 	 */
   1317       1.1      matt 	pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo);
   1318       1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_HID)
   1319       1.1      matt 		pteidx ^= pmap_pteg_mask * 8;
   1320       1.1      matt 	return pteidx;
   1321       1.1      matt }
   1322       1.1      matt 
   1323       1.2      matt volatile struct pte *
   1324       1.1      matt pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx)
   1325       1.1      matt {
   1326       1.2      matt 	volatile struct pte *pt;
   1327       1.1      matt 
   1328       1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1329       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0)
   1330       1.1      matt 		return NULL;
   1331       1.1      matt #endif
   1332       1.1      matt 
   1333       1.1      matt 	/*
   1334       1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1335       1.1      matt 	 */
   1336       1.1      matt 	if (pteidx == -1) {
   1337       1.1      matt 		int ptegidx;
   1338       1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1339       1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1340       1.1      matt 	}
   1341       1.1      matt 
   1342       1.1      matt 	pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7];
   1343       1.1      matt 
   1344       1.1      matt #if !defined(DIAGNOSTIC) && !defined(DEBUG) && !defined(PMAPCHECK)
   1345       1.1      matt 	return pt;
   1346       1.1      matt #else
   1347       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) {
   1348       1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1349       1.1      matt 		    "pvo but no valid pte index", pvo);
   1350       1.1      matt 	}
   1351       1.1      matt 	if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) {
   1352       1.1      matt 		panic("pmap_pvo_to_pte: pvo %p: has valid pte index in "
   1353       1.1      matt 		    "pvo but no valid pte", pvo);
   1354       1.1      matt 	}
   1355       1.1      matt 
   1356       1.1      matt 	if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) {
   1357       1.1      matt 		if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) {
   1358       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1359       1.1      matt 			pmap_pte_print(pt);
   1360       1.1      matt #endif
   1361       1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: has valid pte in "
   1362       1.1      matt 			    "pmap_pteg_table %p but invalid in pvo",
   1363       1.1      matt 			    pvo, pt);
   1364       1.1      matt 		}
   1365       1.1      matt 		if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) != 0) {
   1366       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1367       1.1      matt 			pmap_pte_print(pt);
   1368       1.1      matt #endif
   1369       1.1      matt 			panic("pmap_pvo_to_pte: pvo %p: pvo pte does "
   1370       1.1      matt 			    "not match pte %p in pmap_pteg_table",
   1371       1.1      matt 			    pvo, pt);
   1372       1.1      matt 		}
   1373       1.1      matt 		return pt;
   1374       1.1      matt 	}
   1375       1.1      matt 
   1376       1.1      matt 	if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1377       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1378       1.1      matt 		pmap_pte_print(pt);
   1379       1.1      matt #endif
   1380      1.12      matt 		panic("pmap_pvo_to_pte: pvo %p: has nomatching pte %p in "
   1381       1.1      matt 		    "pmap_pteg_table but valid in pvo", pvo, pt);
   1382       1.1      matt 	}
   1383       1.1      matt 	return NULL;
   1384       1.1      matt #endif	/* !(!DIAGNOSTIC && !DEBUG && !PMAPCHECK) */
   1385       1.1      matt }
   1386       1.1      matt 
   1387       1.1      matt struct pvo_entry *
   1388       1.1      matt pmap_pvo_find_va(pmap_t pm, vaddr_t va, int *pteidx_p)
   1389       1.1      matt {
   1390       1.1      matt 	struct pvo_entry *pvo;
   1391       1.1      matt 	int ptegidx;
   1392       1.1      matt 
   1393       1.1      matt 	va &= ~ADDR_POFF;
   1394       1.2      matt 	ptegidx = va_to_pteg(pm, va);
   1395       1.1      matt 
   1396       1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1397       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1398       1.1      matt 		if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   1399       1.1      matt 			panic("pmap_pvo_find_va: invalid pvo %p on "
   1400       1.1      matt 			    "list %#x (%p)", pvo, ptegidx,
   1401       1.1      matt 			     &pmap_pvo_table[ptegidx]);
   1402       1.1      matt #endif
   1403       1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1404       1.1      matt 			if (pteidx_p)
   1405       1.1      matt 				*pteidx_p = pmap_pvo_pte_index(pvo, ptegidx);
   1406       1.1      matt 			return pvo;
   1407       1.1      matt 		}
   1408       1.1      matt 	}
   1409      1.38   sanjayl 	if ((pm == pmap_kernel()) && (va < SEGMENT_LENGTH))
   1410      1.54   mlelstv 		panic("%s: returning NULL for %s pmap, va: %#" _PRIxva "\n",
   1411      1.53   garbled 		    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
   1412       1.1      matt 	return NULL;
   1413       1.1      matt }
   1414       1.1      matt 
   1415       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK)
   1416       1.1      matt void
   1417       1.1      matt pmap_pvo_check(const struct pvo_entry *pvo)
   1418       1.1      matt {
   1419       1.1      matt 	struct pvo_head *pvo_head;
   1420       1.1      matt 	struct pvo_entry *pvo0;
   1421       1.2      matt 	volatile struct pte *pt;
   1422       1.1      matt 	int failed = 0;
   1423       1.1      matt 
   1424      1.50        ad 	PMAP_LOCK();
   1425      1.50        ad 
   1426       1.1      matt 	if ((uintptr_t)(pvo+1) >= SEGMENT_LENGTH)
   1427       1.1      matt 		panic("pmap_pvo_check: pvo %p: invalid address", pvo);
   1428       1.1      matt 
   1429       1.1      matt 	if ((uintptr_t)(pvo->pvo_pmap+1) >= SEGMENT_LENGTH) {
   1430       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid pmap address %p\n",
   1431       1.1      matt 		    pvo, pvo->pvo_pmap);
   1432       1.1      matt 		failed = 1;
   1433       1.1      matt 	}
   1434       1.1      matt 
   1435       1.1      matt 	if ((uintptr_t)TAILQ_NEXT(pvo, pvo_olink) >= SEGMENT_LENGTH ||
   1436       1.1      matt 	    (((uintptr_t)TAILQ_NEXT(pvo, pvo_olink)) & 0x1f) != 0) {
   1437       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1438       1.1      matt 		    pvo, TAILQ_NEXT(pvo, pvo_olink));
   1439       1.1      matt 		failed = 1;
   1440       1.1      matt 	}
   1441       1.1      matt 
   1442       1.1      matt 	if ((uintptr_t)LIST_NEXT(pvo, pvo_vlink) >= SEGMENT_LENGTH ||
   1443       1.1      matt 	    (((uintptr_t)LIST_NEXT(pvo, pvo_vlink)) & 0x1f) != 0) {
   1444       1.1      matt 		printf("pmap_pvo_check: pvo %p: invalid ovlink address %p\n",
   1445       1.1      matt 		    pvo, LIST_NEXT(pvo, pvo_vlink));
   1446       1.1      matt 		failed = 1;
   1447       1.1      matt 	}
   1448       1.1      matt 
   1449      1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1450       1.1      matt 		pvo_head = pa_to_pvoh(pvo->pvo_pte.pte_lo & PTE_RPGN, NULL);
   1451       1.1      matt 	} else {
   1452       1.1      matt 		if (pvo->pvo_vaddr < VM_MIN_KERNEL_ADDRESS) {
   1453       1.1      matt 			printf("pmap_pvo_check: pvo %p: non kernel address "
   1454       1.1      matt 			    "on kernel unmanaged list\n", pvo);
   1455       1.1      matt 			failed = 1;
   1456       1.1      matt 		}
   1457       1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1458       1.1      matt 	}
   1459       1.1      matt 	LIST_FOREACH(pvo0, pvo_head, pvo_vlink) {
   1460       1.1      matt 		if (pvo0 == pvo)
   1461       1.1      matt 			break;
   1462       1.1      matt 	}
   1463       1.1      matt 	if (pvo0 == NULL) {
   1464       1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1465       1.1      matt 		    "on its vlist head %p\n", pvo, pvo_head);
   1466       1.1      matt 		failed = 1;
   1467       1.1      matt 	}
   1468       1.1      matt 	if (pvo != pmap_pvo_find_va(pvo->pvo_pmap, pvo->pvo_vaddr, NULL)) {
   1469       1.1      matt 		printf("pmap_pvo_check: pvo %p: not present "
   1470       1.1      matt 		    "on its olist head\n", pvo);
   1471       1.1      matt 		failed = 1;
   1472       1.1      matt 	}
   1473       1.1      matt 	pt = pmap_pvo_to_pte(pvo, -1);
   1474       1.1      matt 	if (pt == NULL) {
   1475       1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_VALID) {
   1476       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1477       1.1      matt 			    "no PTE\n", pvo);
   1478       1.1      matt 			failed = 1;
   1479       1.1      matt 		}
   1480       1.1      matt 	} else {
   1481       1.1      matt 		if ((uintptr_t) pt < (uintptr_t) &pmap_pteg_table[0] ||
   1482       1.1      matt 		    (uintptr_t) pt >=
   1483       1.1      matt 		    (uintptr_t) &pmap_pteg_table[pmap_pteg_cnt]) {
   1484       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte %p not in "
   1485       1.1      matt 			    "pteg table\n", pvo, pt);
   1486       1.1      matt 			failed = 1;
   1487       1.1      matt 		}
   1488       1.1      matt 		if (((((uintptr_t) pt) >> 3) & 7) != PVO_PTEGIDX_GET(pvo)) {
   1489       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi VALID but "
   1490       1.1      matt 			    "no PTE\n", pvo);
   1491       1.1      matt 			failed = 1;
   1492       1.1      matt 		}
   1493       1.1      matt 		if (pvo->pvo_pte.pte_hi != pt->pte_hi) {
   1494       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_hi differ: "
   1495      1.54   mlelstv 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
   1496      1.54   mlelstv 			    pvo->pvo_pte.pte_hi,
   1497      1.54   mlelstv 			    pt->pte_hi);
   1498       1.1      matt 			failed = 1;
   1499       1.1      matt 		}
   1500       1.1      matt 		if (((pvo->pvo_pte.pte_lo ^ pt->pte_lo) &
   1501       1.1      matt 		    (PTE_PP|PTE_WIMG|PTE_RPGN)) != 0) {
   1502       1.1      matt 			printf("pmap_pvo_check: pvo %p: pte_lo differ: "
   1503      1.54   mlelstv 			    "%#" _PRIxpte "/%#" _PRIxpte "\n", pvo,
   1504      1.54   mlelstv 			    (pvo->pvo_pte.pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)),
   1505      1.54   mlelstv 			    (pt->pte_lo & (PTE_PP|PTE_WIMG|PTE_RPGN)));
   1506       1.1      matt 			failed = 1;
   1507       1.1      matt 		}
   1508       1.1      matt 		if ((pmap_pte_to_va(pt) ^ PVO_VADDR(pvo)) & 0x0fffffff) {
   1509      1.53   garbled 			printf("pmap_pvo_check: pvo %p: PTE %p derived VA %#" _PRIxva ""
   1510      1.53   garbled 			    " doesn't not match PVO's VA %#" _PRIxva "\n",
   1511       1.1      matt 			    pvo, pt, pmap_pte_to_va(pt), PVO_VADDR(pvo));
   1512       1.1      matt 			failed = 1;
   1513       1.1      matt 		}
   1514       1.1      matt 		if (failed)
   1515       1.1      matt 			pmap_pte_print(pt);
   1516       1.1      matt 	}
   1517       1.1      matt 	if (failed)
   1518       1.1      matt 		panic("pmap_pvo_check: pvo %p, pm %p: bugcheck!", pvo,
   1519       1.1      matt 		    pvo->pvo_pmap);
   1520      1.50        ad 
   1521      1.50        ad 	PMAP_UNLOCK();
   1522       1.1      matt }
   1523       1.1      matt #endif /* DEBUG || PMAPCHECK */
   1524       1.1      matt 
   1525       1.1      matt /*
   1526      1.25       chs  * Search the PVO table looking for a non-wired entry.
   1527      1.25       chs  * If we find one, remove it and return it.
   1528      1.25       chs  */
   1529      1.25       chs 
   1530      1.25       chs struct pvo_entry *
   1531      1.25       chs pmap_pvo_reclaim(struct pmap *pm)
   1532      1.25       chs {
   1533      1.25       chs 	struct pvo_tqhead *pvoh;
   1534      1.25       chs 	struct pvo_entry *pvo;
   1535      1.25       chs 	uint32_t idx, endidx;
   1536      1.25       chs 
   1537      1.25       chs 	endidx = pmap_pvo_reclaim_nextidx;
   1538      1.25       chs 	for (idx = (endidx + 1) & pmap_pteg_mask; idx != endidx;
   1539      1.25       chs 	     idx = (idx + 1) & pmap_pteg_mask) {
   1540      1.25       chs 		pvoh = &pmap_pvo_table[idx];
   1541      1.25       chs 		TAILQ_FOREACH(pvo, pvoh, pvo_olink) {
   1542      1.39      matt 			if (!PVO_WIRED_P(pvo)) {
   1543      1.33       chs 				pmap_pvo_remove(pvo, -1, NULL);
   1544      1.25       chs 				pmap_pvo_reclaim_nextidx = idx;
   1545      1.26      matt 				PMAPCOUNT(pvos_reclaimed);
   1546      1.25       chs 				return pvo;
   1547      1.25       chs 			}
   1548      1.25       chs 		}
   1549      1.25       chs 	}
   1550      1.25       chs 	return NULL;
   1551      1.25       chs }
   1552      1.25       chs 
   1553      1.25       chs /*
   1554       1.1      matt  * This returns whether this is the first mapping of a page.
   1555       1.1      matt  */
   1556       1.1      matt int
   1557       1.1      matt pmap_pvo_enter(pmap_t pm, struct pool *pl, struct pvo_head *pvo_head,
   1558       1.2      matt 	vaddr_t va, paddr_t pa, register_t pte_lo, int flags)
   1559       1.1      matt {
   1560       1.1      matt 	struct pvo_entry *pvo;
   1561       1.1      matt 	struct pvo_tqhead *pvoh;
   1562       1.2      matt 	register_t msr;
   1563       1.1      matt 	int ptegidx;
   1564       1.1      matt 	int i;
   1565       1.1      matt 	int poolflags = PR_NOWAIT;
   1566       1.1      matt 
   1567      1.28       chs 	/*
   1568      1.28       chs 	 * Compute the PTE Group index.
   1569      1.28       chs 	 */
   1570      1.28       chs 	va &= ~ADDR_POFF;
   1571      1.28       chs 	ptegidx = va_to_pteg(pm, va);
   1572      1.28       chs 
   1573      1.28       chs 	msr = pmap_interrupts_off();
   1574      1.28       chs 
   1575       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1576       1.1      matt 	if (pmap_pvo_remove_depth > 0)
   1577       1.1      matt 		panic("pmap_pvo_enter: called while pmap_pvo_remove active!");
   1578       1.1      matt 	if (++pmap_pvo_enter_depth > 1)
   1579       1.1      matt 		panic("pmap_pvo_enter: called recursively!");
   1580       1.1      matt #endif
   1581       1.1      matt 
   1582       1.1      matt 	/*
   1583       1.1      matt 	 * Remove any existing mapping for this page.  Reuse the
   1584       1.1      matt 	 * pvo entry if there a mapping.
   1585       1.1      matt 	 */
   1586       1.1      matt 	TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   1587       1.1      matt 		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
   1588       1.1      matt #ifdef DEBUG
   1589       1.1      matt 			if ((pmapdebug & PMAPDEBUG_PVOENTER) &&
   1590       1.1      matt 			    ((pvo->pvo_pte.pte_lo ^ (pa|pte_lo)) &
   1591       1.1      matt 			    ~(PTE_REF|PTE_CHG)) == 0 &&
   1592       1.1      matt 			   va < VM_MIN_KERNEL_ADDRESS) {
   1593      1.56       phx 				printf("pmap_pvo_enter: pvo %p: dup %#" _PRIxpte "/%#" _PRIxpa "\n",
   1594      1.54   mlelstv 				    pvo, pvo->pvo_pte.pte_lo, pte_lo|pa);
   1595      1.56       phx 				printf("pmap_pvo_enter: pte_hi=%#" _PRIxpte " sr=%#" _PRIsr "\n",
   1596      1.54   mlelstv 				    pvo->pvo_pte.pte_hi,
   1597      1.54   mlelstv 				    pm->pm_sr[va >> ADDR_SR_SHFT]);
   1598       1.1      matt 				pmap_pte_print(pmap_pvo_to_pte(pvo, -1));
   1599       1.1      matt #ifdef DDBX
   1600       1.1      matt 				Debugger();
   1601       1.1      matt #endif
   1602       1.1      matt 			}
   1603       1.1      matt #endif
   1604       1.1      matt 			PMAPCOUNT(mappings_replaced);
   1605      1.33       chs 			pmap_pvo_remove(pvo, -1, NULL);
   1606       1.1      matt 			break;
   1607       1.1      matt 		}
   1608       1.1      matt 	}
   1609       1.1      matt 
   1610       1.1      matt 	/*
   1611       1.1      matt 	 * If we aren't overwriting an mapping, try to allocate
   1612       1.1      matt 	 */
   1613      1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1614      1.26      matt 	--pmap_pvo_enter_depth;
   1615      1.26      matt #endif
   1616       1.1      matt 	pmap_interrupts_restore(msr);
   1617      1.33       chs 	if (pvo) {
   1618      1.33       chs 		pmap_pvo_free(pvo);
   1619      1.33       chs 	}
   1620       1.1      matt 	pvo = pool_get(pl, poolflags);
   1621      1.84      matt 	KASSERT((vaddr_t)pvo < VM_MIN_KERNEL_ADDRESS);
   1622      1.25       chs 
   1623      1.25       chs #ifdef DEBUG
   1624      1.25       chs 	/*
   1625      1.25       chs 	 * Exercise pmap_pvo_reclaim() a little.
   1626      1.25       chs 	 */
   1627      1.25       chs 	if (pvo && (flags & PMAP_CANFAIL) != 0 &&
   1628      1.25       chs 	    pmap_pvo_reclaim_debugctr++ > 0x1000 &&
   1629      1.25       chs 	    (pmap_pvo_reclaim_debugctr & 0xff) == 0) {
   1630      1.25       chs 		pool_put(pl, pvo);
   1631      1.25       chs 		pvo = NULL;
   1632      1.25       chs 	}
   1633      1.25       chs #endif
   1634      1.25       chs 
   1635       1.1      matt 	msr = pmap_interrupts_off();
   1636      1.26      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1637      1.26      matt 	++pmap_pvo_enter_depth;
   1638      1.26      matt #endif
   1639       1.1      matt 	if (pvo == NULL) {
   1640       1.1      matt 		pvo = pmap_pvo_reclaim(pm);
   1641       1.1      matt 		if (pvo == NULL) {
   1642       1.1      matt 			if ((flags & PMAP_CANFAIL) == 0)
   1643       1.1      matt 				panic("pmap_pvo_enter: failed");
   1644       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1645       1.1      matt 			pmap_pvo_enter_depth--;
   1646       1.1      matt #endif
   1647      1.26      matt 			PMAPCOUNT(pvos_failed);
   1648       1.1      matt 			pmap_interrupts_restore(msr);
   1649       1.1      matt 			return ENOMEM;
   1650       1.1      matt 		}
   1651       1.1      matt 	}
   1652      1.25       chs 
   1653       1.1      matt 	pvo->pvo_vaddr = va;
   1654       1.1      matt 	pvo->pvo_pmap = pm;
   1655       1.1      matt 	pvo->pvo_vaddr &= ~ADDR_POFF;
   1656       1.1      matt 	if (flags & VM_PROT_EXECUTE) {
   1657       1.1      matt 		PMAPCOUNT(exec_mappings);
   1658      1.14       chs 		pvo_set_exec(pvo);
   1659       1.1      matt 	}
   1660       1.1      matt 	if (flags & PMAP_WIRED)
   1661       1.1      matt 		pvo->pvo_vaddr |= PVO_WIRED;
   1662       1.1      matt 	if (pvo_head != &pmap_pvo_kunmanaged) {
   1663       1.1      matt 		pvo->pvo_vaddr |= PVO_MANAGED;
   1664       1.1      matt 		PMAPCOUNT(mappings);
   1665       1.1      matt 	} else {
   1666       1.1      matt 		PMAPCOUNT(kernel_mappings);
   1667       1.1      matt 	}
   1668       1.2      matt 	pmap_pte_create(&pvo->pvo_pte, pm, va, pa | pte_lo);
   1669       1.1      matt 
   1670       1.1      matt 	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
   1671      1.39      matt 	if (PVO_WIRED_P(pvo))
   1672       1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count++;
   1673       1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count++;
   1674       1.1      matt #if defined(DEBUG)
   1675      1.38   sanjayl /*	if (pm != pmap_kernel() && va < VM_MIN_KERNEL_ADDRESS) */
   1676       1.1      matt 		DPRINTFN(PVOENTER,
   1677      1.85      matt 		    "pmap_pvo_enter: pvo %p: pm %p va %#" _PRIxva " pa %#" _PRIxpa "\n",
   1678      1.85      matt 		    pvo, pm, va, pa);
   1679       1.1      matt #endif
   1680       1.1      matt 
   1681       1.1      matt 	/*
   1682       1.1      matt 	 * We hope this succeeds but it isn't required.
   1683       1.1      matt 	 */
   1684       1.1      matt 	pvoh = &pmap_pvo_table[ptegidx];
   1685       1.1      matt 	i = pmap_pte_insert(ptegidx, &pvo->pvo_pte);
   1686       1.1      matt 	if (i >= 0) {
   1687       1.1      matt 		PVO_PTEGIDX_SET(pvo, i);
   1688      1.12      matt 		PVO_WHERE(pvo, ENTER_INSERT);
   1689       1.1      matt 		PMAPCOUNT2(((pvo->pvo_pte.pte_hi & PTE_HID)
   1690       1.1      matt 		    ? pmap_evcnt_ptes_secondary : pmap_evcnt_ptes_primary)[i]);
   1691       1.1      matt 		TAILQ_INSERT_TAIL(pvoh, pvo, pvo_olink);
   1692      1.38   sanjayl 
   1693       1.1      matt 	} else {
   1694       1.1      matt 		/*
   1695       1.1      matt 		 * Since we didn't have room for this entry (which makes it
   1696       1.1      matt 		 * and evicted entry), place it at the head of the list.
   1697       1.1      matt 		 */
   1698       1.1      matt 		TAILQ_INSERT_HEAD(pvoh, pvo, pvo_olink);
   1699       1.1      matt 		PMAPCOUNT(ptes_evicted);
   1700       1.1      matt 		pm->pm_evictions++;
   1701      1.12      matt 		/*
   1702      1.12      matt 		 * If this is a kernel page, make sure it's active.
   1703      1.12      matt 		 */
   1704      1.12      matt 		if (pm == pmap_kernel()) {
   1705      1.45   thorpej 			i = pmap_pte_spill(pm, va, false);
   1706      1.12      matt 			KASSERT(i);
   1707      1.12      matt 		}
   1708       1.1      matt 	}
   1709       1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1710       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1711       1.1      matt 	pmap_pvo_enter_depth--;
   1712       1.1      matt #endif
   1713       1.1      matt 	pmap_interrupts_restore(msr);
   1714       1.1      matt 	return 0;
   1715       1.1      matt }
   1716       1.1      matt 
   1717      1.53   garbled static void
   1718      1.33       chs pmap_pvo_remove(struct pvo_entry *pvo, int pteidx, struct pvo_head *pvol)
   1719       1.1      matt {
   1720       1.2      matt 	volatile struct pte *pt;
   1721       1.1      matt 	int ptegidx;
   1722       1.1      matt 
   1723       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1724       1.1      matt 	if (++pmap_pvo_remove_depth > 1)
   1725       1.1      matt 		panic("pmap_pvo_remove: called recursively!");
   1726       1.1      matt #endif
   1727       1.1      matt 
   1728       1.1      matt 	/*
   1729       1.1      matt 	 * If we haven't been supplied the ptegidx, calculate it.
   1730       1.1      matt 	 */
   1731       1.1      matt 	if (pteidx == -1) {
   1732       1.2      matt 		ptegidx = va_to_pteg(pvo->pvo_pmap, pvo->pvo_vaddr);
   1733       1.1      matt 		pteidx = pmap_pvo_pte_index(pvo, ptegidx);
   1734       1.1      matt 	} else {
   1735       1.1      matt 		ptegidx = pteidx >> 3;
   1736       1.1      matt 		if (pvo->pvo_pte.pte_hi & PTE_HID)
   1737       1.1      matt 			ptegidx ^= pmap_pteg_mask;
   1738       1.1      matt 	}
   1739       1.1      matt 	PMAP_PVO_CHECK(pvo);		/* sanity check */
   1740       1.1      matt 
   1741       1.1      matt 	/*
   1742       1.1      matt 	 * If there is an active pte entry, we need to deactivate it
   1743       1.1      matt 	 * (and save the ref & chg bits).
   1744       1.1      matt 	 */
   1745       1.1      matt 	pt = pmap_pvo_to_pte(pvo, pteidx);
   1746       1.1      matt 	if (pt != NULL) {
   1747       1.1      matt 		pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   1748      1.12      matt 		PVO_WHERE(pvo, REMOVE);
   1749       1.1      matt 		PVO_PTEGIDX_CLR(pvo);
   1750       1.1      matt 		PMAPCOUNT(ptes_removed);
   1751       1.1      matt 	} else {
   1752       1.1      matt 		KASSERT(pvo->pvo_pmap->pm_evictions > 0);
   1753       1.1      matt 		pvo->pvo_pmap->pm_evictions--;
   1754       1.1      matt 	}
   1755       1.1      matt 
   1756       1.1      matt 	/*
   1757      1.14       chs 	 * Account for executable mappings.
   1758      1.14       chs 	 */
   1759      1.39      matt 	if (PVO_EXECUTABLE_P(pvo))
   1760      1.14       chs 		pvo_clear_exec(pvo);
   1761      1.14       chs 
   1762      1.14       chs 	/*
   1763      1.14       chs 	 * Update our statistics.
   1764       1.1      matt 	 */
   1765       1.1      matt 	pvo->pvo_pmap->pm_stats.resident_count--;
   1766      1.39      matt 	if (PVO_WIRED_P(pvo))
   1767       1.1      matt 		pvo->pvo_pmap->pm_stats.wired_count--;
   1768       1.1      matt 
   1769       1.1      matt 	/*
   1770       1.1      matt 	 * Save the REF/CHG bits into their cache if the page is managed.
   1771       1.1      matt 	 */
   1772      1.39      matt 	if (PVO_MANAGED_P(pvo)) {
   1773       1.2      matt 		register_t ptelo = pvo->pvo_pte.pte_lo;
   1774       1.1      matt 		struct vm_page *pg = PHYS_TO_VM_PAGE(ptelo & PTE_RPGN);
   1775       1.1      matt 
   1776       1.1      matt 		if (pg != NULL) {
   1777      1.37      matt 			/*
   1778      1.37      matt 			 * If this page was changed and it is mapped exec,
   1779      1.37      matt 			 * invalidate it.
   1780      1.37      matt 			 */
   1781      1.37      matt 			if ((ptelo & PTE_CHG) &&
   1782      1.37      matt 			    (pmap_attr_fetch(pg) & PTE_EXEC)) {
   1783      1.37      matt 				struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   1784      1.37      matt 				if (LIST_EMPTY(pvoh)) {
   1785      1.85      matt 					DPRINTFN(EXEC, "[pmap_pvo_remove: "
   1786      1.53   garbled 					    "%#" _PRIxpa ": clear-exec]\n",
   1787      1.85      matt 					    VM_PAGE_TO_PHYS(pg));
   1788      1.37      matt 					pmap_attr_clear(pg, PTE_EXEC);
   1789      1.37      matt 					PMAPCOUNT(exec_uncached_pvo_remove);
   1790      1.37      matt 				} else {
   1791      1.85      matt 					DPRINTFN(EXEC, "[pmap_pvo_remove: "
   1792      1.53   garbled 					    "%#" _PRIxpa ": syncicache]\n",
   1793      1.85      matt 					    VM_PAGE_TO_PHYS(pg));
   1794      1.37      matt 					pmap_syncicache(VM_PAGE_TO_PHYS(pg),
   1795      1.37      matt 					    PAGE_SIZE);
   1796      1.37      matt 					PMAPCOUNT(exec_synced_pvo_remove);
   1797      1.37      matt 				}
   1798      1.37      matt 			}
   1799      1.37      matt 
   1800       1.1      matt 			pmap_attr_save(pg, ptelo & (PTE_REF|PTE_CHG));
   1801       1.1      matt 		}
   1802       1.1      matt 		PMAPCOUNT(unmappings);
   1803       1.1      matt 	} else {
   1804       1.1      matt 		PMAPCOUNT(kernel_unmappings);
   1805       1.1      matt 	}
   1806       1.1      matt 
   1807       1.1      matt 	/*
   1808       1.1      matt 	 * Remove the PVO from its lists and return it to the pool.
   1809       1.1      matt 	 */
   1810       1.1      matt 	LIST_REMOVE(pvo, pvo_vlink);
   1811       1.1      matt 	TAILQ_REMOVE(&pmap_pvo_table[ptegidx], pvo, pvo_olink);
   1812      1.33       chs 	if (pvol) {
   1813      1.33       chs 		LIST_INSERT_HEAD(pvol, pvo, pvo_vlink);
   1814      1.25       chs 	}
   1815       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   1816       1.1      matt 	pmap_pvo_remove_depth--;
   1817       1.1      matt #endif
   1818       1.1      matt }
   1819       1.1      matt 
   1820      1.33       chs void
   1821      1.33       chs pmap_pvo_free(struct pvo_entry *pvo)
   1822      1.33       chs {
   1823      1.33       chs 
   1824      1.39      matt 	pool_put(PVO_MANAGED_P(pvo) ? &pmap_mpvo_pool : &pmap_upvo_pool, pvo);
   1825      1.33       chs }
   1826      1.33       chs 
   1827      1.33       chs void
   1828      1.33       chs pmap_pvo_free_list(struct pvo_head *pvol)
   1829      1.33       chs {
   1830      1.33       chs 	struct pvo_entry *pvo, *npvo;
   1831      1.33       chs 
   1832      1.33       chs 	for (pvo = LIST_FIRST(pvol); pvo != NULL; pvo = npvo) {
   1833      1.33       chs 		npvo = LIST_NEXT(pvo, pvo_vlink);
   1834      1.33       chs 		LIST_REMOVE(pvo, pvo_vlink);
   1835      1.33       chs 		pmap_pvo_free(pvo);
   1836      1.33       chs 	}
   1837      1.33       chs }
   1838      1.33       chs 
   1839       1.1      matt /*
   1840      1.14       chs  * Mark a mapping as executable.
   1841      1.14       chs  * If this is the first executable mapping in the segment,
   1842      1.14       chs  * clear the noexec flag.
   1843      1.14       chs  */
   1844      1.53   garbled static void
   1845      1.14       chs pvo_set_exec(struct pvo_entry *pvo)
   1846      1.14       chs {
   1847      1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1848      1.14       chs 
   1849      1.39      matt 	if (pm == pmap_kernel() || PVO_EXECUTABLE_P(pvo)) {
   1850      1.14       chs 		return;
   1851      1.14       chs 	}
   1852      1.14       chs 	pvo->pvo_vaddr |= PVO_EXECUTABLE;
   1853      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1854      1.18      matt 	{
   1855      1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1856      1.18      matt 		if (pm->pm_exec[sr]++ == 0) {
   1857      1.18      matt 			pm->pm_sr[sr] &= ~SR_NOEXEC;
   1858      1.18      matt 		}
   1859      1.14       chs 	}
   1860      1.18      matt #endif
   1861      1.14       chs }
   1862      1.14       chs 
   1863      1.14       chs /*
   1864      1.14       chs  * Mark a mapping as non-executable.
   1865      1.14       chs  * If this was the last executable mapping in the segment,
   1866      1.14       chs  * set the noexec flag.
   1867      1.14       chs  */
   1868      1.53   garbled static void
   1869      1.14       chs pvo_clear_exec(struct pvo_entry *pvo)
   1870      1.14       chs {
   1871      1.14       chs 	struct pmap *pm = pvo->pvo_pmap;
   1872      1.14       chs 
   1873      1.39      matt 	if (pm == pmap_kernel() || !PVO_EXECUTABLE_P(pvo)) {
   1874      1.14       chs 		return;
   1875      1.14       chs 	}
   1876      1.14       chs 	pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
   1877      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   1878      1.18      matt 	{
   1879      1.18      matt 		int sr = PVO_VADDR(pvo) >> ADDR_SR_SHFT;
   1880      1.18      matt 		if (--pm->pm_exec[sr] == 0) {
   1881      1.18      matt 			pm->pm_sr[sr] |= SR_NOEXEC;
   1882      1.18      matt 		}
   1883      1.14       chs 	}
   1884      1.18      matt #endif
   1885      1.14       chs }
   1886      1.14       chs 
   1887      1.14       chs /*
   1888       1.1      matt  * Insert physical page at pa into the given pmap at virtual address va.
   1889       1.1      matt  */
   1890       1.1      matt int
   1891      1.65    cegger pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1892       1.1      matt {
   1893       1.1      matt 	struct mem_region *mp;
   1894       1.1      matt 	struct pvo_head *pvo_head;
   1895       1.1      matt 	struct vm_page *pg;
   1896       1.1      matt 	struct pool *pl;
   1897       1.2      matt 	register_t pte_lo;
   1898       1.1      matt 	int error;
   1899       1.1      matt 	u_int was_exec = 0;
   1900       1.1      matt 
   1901      1.50        ad 	PMAP_LOCK();
   1902      1.50        ad 
   1903       1.1      matt 	if (__predict_false(!pmap_initialized)) {
   1904       1.1      matt 		pvo_head = &pmap_pvo_kunmanaged;
   1905       1.1      matt 		pl = &pmap_upvo_pool;
   1906       1.1      matt 		pg = NULL;
   1907       1.1      matt 		was_exec = PTE_EXEC;
   1908       1.1      matt 	} else {
   1909       1.1      matt 		pvo_head = pa_to_pvoh(pa, &pg);
   1910       1.1      matt 		pl = &pmap_mpvo_pool;
   1911       1.1      matt 	}
   1912       1.1      matt 
   1913       1.1      matt 	DPRINTFN(ENTER,
   1914      1.85      matt 	    "pmap_enter(%p, %#" _PRIxva ", %#" _PRIxpa ", 0x%x, 0x%x):",
   1915      1.85      matt 	    pm, va, pa, prot, flags);
   1916       1.1      matt 
   1917       1.1      matt 	/*
   1918       1.1      matt 	 * If this is a managed page, and it's the first reference to the
   1919       1.1      matt 	 * page clear the execness of the page.  Otherwise fetch the execness.
   1920       1.1      matt 	 */
   1921       1.1      matt 	if (pg != NULL)
   1922       1.1      matt 		was_exec = pmap_attr_fetch(pg) & PTE_EXEC;
   1923       1.1      matt 
   1924      1.85      matt 	DPRINTFN(ENTER, " was_exec=%d", was_exec);
   1925       1.1      matt 
   1926       1.1      matt 	/*
   1927       1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   1928       1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   1929       1.1      matt 	 * asssume it's in memory coherent memory.
   1930       1.1      matt 	 */
   1931      1.77  macallan 	if (flags & PMAP_MD_PREFETCHABLE) {
   1932      1.77  macallan 		pte_lo = 0;
   1933      1.77  macallan 	} else
   1934      1.77  macallan 		pte_lo = PTE_G;
   1935      1.77  macallan 
   1936      1.81      matt 	if ((flags & PMAP_NOCACHE) == 0) {
   1937       1.1      matt 		for (mp = mem; mp->size; mp++) {
   1938       1.1      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   1939       1.1      matt 				pte_lo = PTE_M;
   1940       1.1      matt 				break;
   1941       1.1      matt 			}
   1942       1.1      matt 		}
   1943  1.86.2.1       tls #ifdef MULTIPROCESSOR
   1944  1.86.2.1       tls 		if (((mfpvr() >> 16) & 0xffff) == MPC603e)
   1945  1.86.2.1       tls 			pte_lo = PTE_M;
   1946  1.86.2.1       tls #endif
   1947      1.77  macallan 	} else {
   1948      1.77  macallan 		pte_lo |= PTE_I;
   1949       1.1      matt 	}
   1950       1.1      matt 
   1951       1.1      matt 	if (prot & VM_PROT_WRITE)
   1952       1.1      matt 		pte_lo |= PTE_BW;
   1953       1.1      matt 	else
   1954       1.1      matt 		pte_lo |= PTE_BR;
   1955       1.1      matt 
   1956       1.1      matt 	/*
   1957       1.1      matt 	 * If this was in response to a fault, "pre-fault" the PTE's
   1958       1.1      matt 	 * changed/referenced bit appropriately.
   1959       1.1      matt 	 */
   1960       1.1      matt 	if (flags & VM_PROT_WRITE)
   1961       1.1      matt 		pte_lo |= PTE_CHG;
   1962      1.30       chs 	if (flags & VM_PROT_ALL)
   1963       1.1      matt 		pte_lo |= PTE_REF;
   1964       1.1      matt 
   1965       1.1      matt 	/*
   1966       1.1      matt 	 * We need to know if this page can be executable
   1967       1.1      matt 	 */
   1968       1.1      matt 	flags |= (prot & VM_PROT_EXECUTE);
   1969       1.1      matt 
   1970       1.1      matt 	/*
   1971       1.1      matt 	 * Record mapping for later back-translation and pte spilling.
   1972       1.1      matt 	 * This will overwrite any existing mapping.
   1973       1.1      matt 	 */
   1974       1.1      matt 	error = pmap_pvo_enter(pm, pl, pvo_head, va, pa, pte_lo, flags);
   1975       1.1      matt 
   1976       1.1      matt 	/*
   1977       1.1      matt 	 * Flush the real page from the instruction cache if this page is
   1978       1.1      matt 	 * mapped executable and cacheable and has not been flushed since
   1979       1.1      matt 	 * the last time it was modified.
   1980       1.1      matt 	 */
   1981       1.1      matt 	if (error == 0 &&
   1982       1.1      matt             (flags & VM_PROT_EXECUTE) &&
   1983       1.1      matt             (pte_lo & PTE_I) == 0 &&
   1984       1.1      matt 	    was_exec == 0) {
   1985      1.85      matt 		DPRINTFN(ENTER, " %s", "syncicache");
   1986       1.1      matt 		PMAPCOUNT(exec_synced);
   1987       1.6   thorpej 		pmap_syncicache(pa, PAGE_SIZE);
   1988       1.1      matt 		if (pg != NULL) {
   1989       1.1      matt 			pmap_attr_save(pg, PTE_EXEC);
   1990       1.1      matt 			PMAPCOUNT(exec_cached);
   1991       1.1      matt #if defined(DEBUG) || defined(PMAPDEBUG)
   1992       1.1      matt 			if (pmapdebug & PMAPDEBUG_ENTER)
   1993       1.1      matt 				printf(" marked-as-exec");
   1994       1.1      matt 			else if (pmapdebug & PMAPDEBUG_EXEC)
   1995      1.53   garbled 				printf("[pmap_enter: %#" _PRIxpa ": marked-as-exec]\n",
   1996      1.34      yamt 				    VM_PAGE_TO_PHYS(pg));
   1997       1.1      matt 
   1998       1.1      matt #endif
   1999       1.1      matt 		}
   2000       1.1      matt 	}
   2001       1.1      matt 
   2002      1.85      matt 	DPRINTFN(ENTER, ": error=%d\n", error);
   2003       1.1      matt 
   2004      1.50        ad 	PMAP_UNLOCK();
   2005      1.50        ad 
   2006       1.1      matt 	return error;
   2007       1.1      matt }
   2008       1.1      matt 
   2009       1.1      matt void
   2010      1.68    cegger pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   2011       1.1      matt {
   2012       1.1      matt 	struct mem_region *mp;
   2013       1.2      matt 	register_t pte_lo;
   2014       1.1      matt 	int error;
   2015       1.1      matt 
   2016      1.85      matt #if defined (PMAP_OEA64_BRIDGE) || defined (PMAP_OEA)
   2017       1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2018       1.1      matt 		panic("pmap_kenter_pa: attempt to enter "
   2019      1.53   garbled 		    "non-kernel address %#" _PRIxva "!", va);
   2020      1.38   sanjayl #endif
   2021       1.1      matt 
   2022       1.1      matt 	DPRINTFN(KENTER,
   2023      1.85      matt 	    "pmap_kenter_pa(%#" _PRIxva ",%#" _PRIxpa ",%#x)\n", va, pa, prot);
   2024       1.1      matt 
   2025      1.50        ad 	PMAP_LOCK();
   2026      1.50        ad 
   2027       1.1      matt 	/*
   2028       1.1      matt 	 * Assume the page is cache inhibited and access is guarded unless
   2029       1.1      matt 	 * it's in our available memory array.  If it is in the memory array,
   2030       1.1      matt 	 * asssume it's in memory coherent memory.
   2031       1.1      matt 	 */
   2032       1.1      matt 	pte_lo = PTE_IG;
   2033      1.81      matt 	if ((flags & PMAP_NOCACHE) == 0) {
   2034       1.4      matt 		for (mp = mem; mp->size; mp++) {
   2035       1.4      matt 			if (pa >= mp->start && pa < mp->start + mp->size) {
   2036       1.4      matt 				pte_lo = PTE_M;
   2037       1.4      matt 				break;
   2038       1.4      matt 			}
   2039       1.1      matt 		}
   2040  1.86.2.1       tls #ifdef MULTIPROCESSOR
   2041  1.86.2.1       tls 		if (((mfpvr() >> 16) & 0xffff) == MPC603e)
   2042  1.86.2.1       tls 			pte_lo = PTE_M;
   2043  1.86.2.1       tls #endif
   2044       1.1      matt 	}
   2045       1.1      matt 
   2046       1.1      matt 	if (prot & VM_PROT_WRITE)
   2047       1.1      matt 		pte_lo |= PTE_BW;
   2048       1.1      matt 	else
   2049       1.1      matt 		pte_lo |= PTE_BR;
   2050       1.1      matt 
   2051       1.1      matt 	/*
   2052       1.1      matt 	 * We don't care about REF/CHG on PVOs on the unmanaged list.
   2053       1.1      matt 	 */
   2054       1.1      matt 	error = pmap_pvo_enter(pmap_kernel(), &pmap_upvo_pool,
   2055       1.1      matt 	    &pmap_pvo_kunmanaged, va, pa, pte_lo, prot|PMAP_WIRED);
   2056       1.1      matt 
   2057       1.1      matt 	if (error != 0)
   2058      1.53   garbled 		panic("pmap_kenter_pa: failed to enter va %#" _PRIxva " pa %#" _PRIxpa ": %d",
   2059       1.1      matt 		      va, pa, error);
   2060      1.50        ad 
   2061      1.50        ad 	PMAP_UNLOCK();
   2062       1.1      matt }
   2063       1.1      matt 
   2064       1.1      matt void
   2065       1.1      matt pmap_kremove(vaddr_t va, vsize_t len)
   2066       1.1      matt {
   2067       1.1      matt 	if (va < VM_MIN_KERNEL_ADDRESS)
   2068       1.1      matt 		panic("pmap_kremove: attempt to remove "
   2069      1.53   garbled 		    "non-kernel address %#" _PRIxva "!", va);
   2070       1.1      matt 
   2071      1.85      matt 	DPRINTFN(KREMOVE, "pmap_kremove(%#" _PRIxva ",%#" _PRIxva ")\n", va, len);
   2072       1.1      matt 	pmap_remove(pmap_kernel(), va, va + len);
   2073       1.1      matt }
   2074       1.1      matt 
   2075       1.1      matt /*
   2076       1.1      matt  * Remove the given range of mapping entries.
   2077       1.1      matt  */
   2078       1.1      matt void
   2079       1.1      matt pmap_remove(pmap_t pm, vaddr_t va, vaddr_t endva)
   2080       1.1      matt {
   2081      1.33       chs 	struct pvo_head pvol;
   2082       1.1      matt 	struct pvo_entry *pvo;
   2083       1.2      matt 	register_t msr;
   2084       1.1      matt 	int pteidx;
   2085       1.1      matt 
   2086      1.50        ad 	PMAP_LOCK();
   2087      1.33       chs 	LIST_INIT(&pvol);
   2088      1.14       chs 	msr = pmap_interrupts_off();
   2089       1.1      matt 	for (; va < endva; va += PAGE_SIZE) {
   2090       1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2091       1.1      matt 		if (pvo != NULL) {
   2092      1.33       chs 			pmap_pvo_remove(pvo, pteidx, &pvol);
   2093       1.1      matt 		}
   2094       1.1      matt 	}
   2095      1.14       chs 	pmap_interrupts_restore(msr);
   2096      1.33       chs 	pmap_pvo_free_list(&pvol);
   2097      1.50        ad 	PMAP_UNLOCK();
   2098       1.1      matt }
   2099       1.1      matt 
   2100       1.1      matt /*
   2101       1.1      matt  * Get the physical page address for the given pmap/virtual address.
   2102       1.1      matt  */
   2103      1.44   thorpej bool
   2104       1.1      matt pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pap)
   2105       1.1      matt {
   2106       1.1      matt 	struct pvo_entry *pvo;
   2107       1.2      matt 	register_t msr;
   2108       1.7      matt 
   2109      1.50        ad 	PMAP_LOCK();
   2110      1.38   sanjayl 
   2111       1.7      matt 	/*
   2112       1.7      matt 	 * If this is a kernel pmap lookup, also check the battable
   2113       1.7      matt 	 * and if we get a hit, translate the VA to a PA using the
   2114      1.36   nathanw 	 * BAT entries.  Don't check for VM_MAX_KERNEL_ADDRESS is
   2115       1.7      matt 	 * that will wrap back to 0.
   2116       1.7      matt 	 */
   2117       1.7      matt 	if (pm == pmap_kernel() &&
   2118       1.7      matt 	    (va < VM_MIN_KERNEL_ADDRESS ||
   2119       1.7      matt 	     (KERNEL2_SR < 15 && VM_MAX_KERNEL_ADDRESS <= va))) {
   2120       1.8      matt 		KASSERT((va >> ADDR_SR_SHFT) != USER_SR);
   2121      1.53   garbled #if defined (PMAP_OEA)
   2122      1.55   garbled #ifdef PPC_OEA601
   2123      1.55   garbled 		if ((MFPVR() >> 16) == MPC601) {
   2124      1.24    kleink 			register_t batu = battable[va >> 23].batu;
   2125      1.24    kleink 			register_t batl = battable[va >> 23].batl;
   2126      1.24    kleink 			register_t sr = iosrtable[va >> ADDR_SR_SHFT];
   2127      1.24    kleink 			if (BAT601_VALID_P(batl) &&
   2128      1.24    kleink 			    BAT601_VA_MATCH_P(batu, batl, va)) {
   2129      1.24    kleink 				register_t mask =
   2130      1.24    kleink 				    (~(batl & BAT601_BSM) << 17) & ~0x1ffffL;
   2131      1.29    briggs 				if (pap)
   2132      1.29    briggs 					*pap = (batl & mask) | (va & ~mask);
   2133      1.50        ad 				PMAP_UNLOCK();
   2134      1.45   thorpej 				return true;
   2135      1.24    kleink 			} else if (SR601_VALID_P(sr) &&
   2136      1.24    kleink 				   SR601_PA_MATCH_P(sr, va)) {
   2137      1.29    briggs 				if (pap)
   2138      1.29    briggs 					*pap = va;
   2139      1.50        ad 				PMAP_UNLOCK();
   2140      1.45   thorpej 				return true;
   2141      1.24    kleink 			}
   2142      1.55   garbled 		} else
   2143      1.55   garbled #endif /* PPC_OEA601 */
   2144      1.55   garbled 		{
   2145      1.83      matt 			register_t batu = battable[BAT_VA2IDX(va)].batu;
   2146      1.55   garbled 			if (BAT_VALID_P(batu,0) && BAT_VA_MATCH_P(batu,va)) {
   2147      1.83      matt 				register_t batl = battable[BAT_VA2IDX(va)].batl;
   2148      1.55   garbled 				register_t mask =
   2149      1.83      matt 				    (~(batu & (BAT_XBL|BAT_BL)) << 15) & ~0x1ffffL;
   2150      1.55   garbled 				if (pap)
   2151      1.55   garbled 					*pap = (batl & mask) | (va & ~mask);
   2152      1.55   garbled 				PMAP_UNLOCK();
   2153      1.55   garbled 				return true;
   2154      1.55   garbled 			}
   2155       1.7      matt 		}
   2156      1.45   thorpej 		return false;
   2157      1.53   garbled #elif defined (PMAP_OEA64_BRIDGE)
   2158      1.52   garbled 	if (va >= SEGMENT_LENGTH)
   2159      1.52   garbled 		panic("%s: pm: %s va >= SEGMENT_LENGTH, va: 0x%08lx\n",
   2160      1.52   garbled 		    __func__, (pm == pmap_kernel() ? "kernel" : "user"), va);
   2161      1.52   garbled 	else {
   2162      1.52   garbled 		if (pap)
   2163      1.52   garbled 			*pap = va;
   2164      1.52   garbled 			PMAP_UNLOCK();
   2165      1.52   garbled 			return true;
   2166      1.52   garbled 	}
   2167      1.53   garbled #elif defined (PMAP_OEA64)
   2168      1.38   sanjayl #error PPC_OEA64 not supported
   2169      1.38   sanjayl #endif /* PPC_OEA */
   2170       1.7      matt 	}
   2171       1.1      matt 
   2172       1.1      matt 	msr = pmap_interrupts_off();
   2173       1.1      matt 	pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL);
   2174       1.1      matt 	if (pvo != NULL) {
   2175       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2176      1.29    briggs 		if (pap)
   2177      1.29    briggs 			*pap = (pvo->pvo_pte.pte_lo & PTE_RPGN)
   2178      1.29    briggs 			    | (va & ADDR_POFF);
   2179       1.1      matt 	}
   2180       1.1      matt 	pmap_interrupts_restore(msr);
   2181      1.50        ad 	PMAP_UNLOCK();
   2182       1.1      matt 	return pvo != NULL;
   2183       1.1      matt }
   2184       1.1      matt 
   2185       1.1      matt /*
   2186       1.1      matt  * Lower the protection on the specified range of this pmap.
   2187       1.1      matt  */
   2188       1.1      matt void
   2189       1.1      matt pmap_protect(pmap_t pm, vaddr_t va, vaddr_t endva, vm_prot_t prot)
   2190       1.1      matt {
   2191       1.1      matt 	struct pvo_entry *pvo;
   2192       1.2      matt 	volatile struct pte *pt;
   2193       1.2      matt 	register_t msr;
   2194       1.1      matt 	int pteidx;
   2195       1.1      matt 
   2196       1.1      matt 	/*
   2197       1.1      matt 	 * Since this routine only downgrades protection, we should
   2198      1.14       chs 	 * always be called with at least one bit not set.
   2199       1.1      matt 	 */
   2200      1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2201       1.1      matt 
   2202       1.1      matt 	/*
   2203       1.1      matt 	 * If there is no protection, this is equivalent to
   2204       1.1      matt 	 * remove the pmap from the pmap.
   2205       1.1      matt 	 */
   2206       1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2207       1.1      matt 		pmap_remove(pm, va, endva);
   2208       1.1      matt 		return;
   2209       1.1      matt 	}
   2210       1.1      matt 
   2211      1.50        ad 	PMAP_LOCK();
   2212      1.50        ad 
   2213       1.1      matt 	msr = pmap_interrupts_off();
   2214       1.6   thorpej 	for (; va < endva; va += PAGE_SIZE) {
   2215       1.1      matt 		pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2216       1.1      matt 		if (pvo == NULL)
   2217       1.1      matt 			continue;
   2218       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2219       1.1      matt 
   2220       1.1      matt 		/*
   2221       1.1      matt 		 * Revoke executable if asked to do so.
   2222       1.1      matt 		 */
   2223       1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2224      1.14       chs 			pvo_clear_exec(pvo);
   2225       1.1      matt 
   2226       1.1      matt #if 0
   2227       1.1      matt 		/*
   2228       1.1      matt 		 * If the page is already read-only, no change
   2229       1.1      matt 		 * needs to be made.
   2230       1.1      matt 		 */
   2231       1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR)
   2232       1.1      matt 			continue;
   2233       1.1      matt #endif
   2234       1.1      matt 		/*
   2235       1.1      matt 		 * Grab the PTE pointer before we diddle with
   2236       1.1      matt 		 * the cached PTE copy.
   2237       1.1      matt 		 */
   2238       1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2239       1.1      matt 		/*
   2240       1.1      matt 		 * Change the protection of the page.
   2241       1.1      matt 		 */
   2242       1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2243       1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2244       1.1      matt 
   2245       1.1      matt 		/*
   2246       1.1      matt 		 * If the PVO is in the page table, update
   2247       1.1      matt 		 * that pte at well.
   2248       1.1      matt 		 */
   2249       1.1      matt 		if (pt != NULL) {
   2250       1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2251      1.12      matt 			PVO_WHERE(pvo, PMAP_PROTECT);
   2252       1.1      matt 			PMAPCOUNT(ptes_changed);
   2253       1.1      matt 		}
   2254       1.1      matt 
   2255       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2256       1.1      matt 	}
   2257       1.1      matt 	pmap_interrupts_restore(msr);
   2258      1.50        ad 	PMAP_UNLOCK();
   2259       1.1      matt }
   2260       1.1      matt 
   2261       1.1      matt void
   2262       1.1      matt pmap_unwire(pmap_t pm, vaddr_t va)
   2263       1.1      matt {
   2264       1.1      matt 	struct pvo_entry *pvo;
   2265       1.2      matt 	register_t msr;
   2266       1.1      matt 
   2267      1.50        ad 	PMAP_LOCK();
   2268       1.1      matt 	msr = pmap_interrupts_off();
   2269       1.1      matt 	pvo = pmap_pvo_find_va(pm, va, NULL);
   2270       1.1      matt 	if (pvo != NULL) {
   2271      1.39      matt 		if (PVO_WIRED_P(pvo)) {
   2272       1.1      matt 			pvo->pvo_vaddr &= ~PVO_WIRED;
   2273       1.1      matt 			pm->pm_stats.wired_count--;
   2274       1.1      matt 		}
   2275       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2276       1.1      matt 	}
   2277       1.1      matt 	pmap_interrupts_restore(msr);
   2278      1.50        ad 	PMAP_UNLOCK();
   2279       1.1      matt }
   2280       1.1      matt 
   2281       1.1      matt /*
   2282       1.1      matt  * Lower the protection on the specified physical page.
   2283       1.1      matt  */
   2284       1.1      matt void
   2285       1.1      matt pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2286       1.1      matt {
   2287      1.33       chs 	struct pvo_head *pvo_head, pvol;
   2288       1.1      matt 	struct pvo_entry *pvo, *next_pvo;
   2289       1.2      matt 	volatile struct pte *pt;
   2290       1.2      matt 	register_t msr;
   2291       1.1      matt 
   2292      1.50        ad 	PMAP_LOCK();
   2293      1.50        ad 
   2294      1.14       chs 	KASSERT(prot != VM_PROT_ALL);
   2295      1.33       chs 	LIST_INIT(&pvol);
   2296       1.1      matt 	msr = pmap_interrupts_off();
   2297       1.1      matt 
   2298       1.1      matt 	/*
   2299       1.1      matt 	 * When UVM reuses a page, it does a pmap_page_protect with
   2300       1.1      matt 	 * VM_PROT_NONE.  At that point, we can clear the exec flag
   2301       1.1      matt 	 * since we know the page will have different contents.
   2302       1.1      matt 	 */
   2303       1.1      matt 	if ((prot & VM_PROT_READ) == 0) {
   2304      1.85      matt 		DPRINTFN(EXEC, "[pmap_page_protect: %#" _PRIxpa ": clear-exec]\n",
   2305      1.85      matt 		    VM_PAGE_TO_PHYS(pg));
   2306       1.1      matt 		if (pmap_attr_fetch(pg) & PTE_EXEC) {
   2307       1.1      matt 			PMAPCOUNT(exec_uncached_page_protect);
   2308       1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2309       1.1      matt 		}
   2310       1.1      matt 	}
   2311       1.1      matt 
   2312       1.1      matt 	pvo_head = vm_page_to_pvoh(pg);
   2313       1.1      matt 	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
   2314       1.1      matt 		next_pvo = LIST_NEXT(pvo, pvo_vlink);
   2315       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2316       1.1      matt 
   2317       1.1      matt 		/*
   2318       1.1      matt 		 * Downgrading to no mapping at all, we just remove the entry.
   2319       1.1      matt 		 */
   2320       1.1      matt 		if ((prot & VM_PROT_READ) == 0) {
   2321      1.33       chs 			pmap_pvo_remove(pvo, -1, &pvol);
   2322       1.1      matt 			continue;
   2323       1.1      matt 		}
   2324       1.1      matt 
   2325       1.1      matt 		/*
   2326       1.1      matt 		 * If EXEC permission is being revoked, just clear the
   2327       1.1      matt 		 * flag in the PVO.
   2328       1.1      matt 		 */
   2329       1.1      matt 		if ((prot & VM_PROT_EXECUTE) == 0)
   2330      1.14       chs 			pvo_clear_exec(pvo);
   2331       1.1      matt 
   2332       1.1      matt 		/*
   2333       1.1      matt 		 * If this entry is already RO, don't diddle with the
   2334       1.1      matt 		 * page table.
   2335       1.1      matt 		 */
   2336       1.1      matt 		if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
   2337       1.1      matt 			PMAP_PVO_CHECK(pvo);
   2338       1.1      matt 			continue;
   2339       1.1      matt 		}
   2340       1.1      matt 
   2341       1.1      matt 		/*
   2342       1.1      matt 		 * Grab the PTE before the we diddle the bits so
   2343       1.1      matt 		 * pvo_to_pte can verify the pte contents are as
   2344       1.1      matt 		 * expected.
   2345       1.1      matt 		 */
   2346       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2347       1.1      matt 		pvo->pvo_pte.pte_lo &= ~PTE_PP;
   2348       1.1      matt 		pvo->pvo_pte.pte_lo |= PTE_BR;
   2349       1.1      matt 		if (pt != NULL) {
   2350       1.1      matt 			pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
   2351      1.12      matt 			PVO_WHERE(pvo, PMAP_PAGE_PROTECT);
   2352       1.1      matt 			PMAPCOUNT(ptes_changed);
   2353       1.1      matt 		}
   2354       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2355       1.1      matt 	}
   2356       1.1      matt 	pmap_interrupts_restore(msr);
   2357      1.33       chs 	pmap_pvo_free_list(&pvol);
   2358      1.50        ad 
   2359      1.50        ad 	PMAP_UNLOCK();
   2360       1.1      matt }
   2361       1.1      matt 
   2362       1.1      matt /*
   2363       1.1      matt  * Activate the address space for the specified process.  If the process
   2364       1.1      matt  * is the current process, load the new MMU context.
   2365       1.1      matt  */
   2366       1.1      matt void
   2367       1.1      matt pmap_activate(struct lwp *l)
   2368       1.1      matt {
   2369      1.69     rmind 	struct pcb *pcb = lwp_getpcb(l);
   2370       1.1      matt 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
   2371       1.1      matt 
   2372       1.1      matt 	DPRINTFN(ACTIVATE,
   2373      1.85      matt 	    "pmap_activate: lwp %p (curlwp %p)\n", l, curlwp);
   2374       1.1      matt 
   2375       1.1      matt 	/*
   2376      1.70     skrll 	 * XXX Normally performed in cpu_lwp_fork().
   2377       1.1      matt 	 */
   2378      1.13      matt 	pcb->pcb_pm = pmap;
   2379      1.17      matt 
   2380      1.17      matt 	/*
   2381      1.17      matt 	* In theory, the SR registers need only be valid on return
   2382      1.17      matt 	* to user space wait to do them there.
   2383      1.17      matt 	*/
   2384      1.17      matt 	if (l == curlwp) {
   2385      1.17      matt 		/* Store pointer to new current pmap. */
   2386      1.17      matt 		curpm = pmap;
   2387      1.17      matt 	}
   2388       1.1      matt }
   2389       1.1      matt 
   2390       1.1      matt /*
   2391       1.1      matt  * Deactivate the specified process's address space.
   2392       1.1      matt  */
   2393       1.1      matt void
   2394       1.1      matt pmap_deactivate(struct lwp *l)
   2395       1.1      matt {
   2396       1.1      matt }
   2397       1.1      matt 
   2398      1.44   thorpej bool
   2399       1.1      matt pmap_query_bit(struct vm_page *pg, int ptebit)
   2400       1.1      matt {
   2401       1.1      matt 	struct pvo_entry *pvo;
   2402       1.2      matt 	volatile struct pte *pt;
   2403       1.2      matt 	register_t msr;
   2404       1.1      matt 
   2405      1.50        ad 	PMAP_LOCK();
   2406      1.50        ad 
   2407      1.50        ad 	if (pmap_attr_fetch(pg) & ptebit) {
   2408      1.50        ad 		PMAP_UNLOCK();
   2409      1.45   thorpej 		return true;
   2410      1.50        ad 	}
   2411      1.14       chs 
   2412       1.1      matt 	msr = pmap_interrupts_off();
   2413       1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2414       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2415       1.1      matt 		/*
   2416       1.1      matt 		 * See if we saved the bit off.  If so cache, it and return
   2417       1.1      matt 		 * success.
   2418       1.1      matt 		 */
   2419       1.1      matt 		if (pvo->pvo_pte.pte_lo & ptebit) {
   2420       1.1      matt 			pmap_attr_save(pg, ptebit);
   2421       1.1      matt 			PMAP_PVO_CHECK(pvo);		/* sanity check */
   2422       1.1      matt 			pmap_interrupts_restore(msr);
   2423      1.50        ad 			PMAP_UNLOCK();
   2424      1.45   thorpej 			return true;
   2425       1.1      matt 		}
   2426       1.1      matt 	}
   2427       1.1      matt 	/*
   2428       1.1      matt 	 * No luck, now go thru the hard part of looking at the ptes
   2429       1.1      matt 	 * themselves.  Sync so any pending REF/CHG bits are flushed
   2430       1.1      matt 	 * to the PTEs.
   2431       1.1      matt 	 */
   2432       1.1      matt 	SYNC();
   2433       1.1      matt 	LIST_FOREACH(pvo, vm_page_to_pvoh(pg), pvo_vlink) {
   2434       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2435       1.1      matt 		/*
   2436       1.1      matt 		 * See if this pvo have a valid PTE.  If so, fetch the
   2437       1.1      matt 		 * REF/CHG bits from the valid PTE.  If the appropriate
   2438       1.1      matt 		 * ptebit is set, cache, it and return success.
   2439       1.1      matt 		 */
   2440       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2441       1.1      matt 		if (pt != NULL) {
   2442       1.1      matt 			pmap_pte_synch(pt, &pvo->pvo_pte);
   2443       1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit) {
   2444       1.1      matt 				pmap_attr_save(pg, ptebit);
   2445       1.1      matt 				PMAP_PVO_CHECK(pvo);		/* sanity check */
   2446       1.1      matt 				pmap_interrupts_restore(msr);
   2447      1.50        ad 				PMAP_UNLOCK();
   2448      1.45   thorpej 				return true;
   2449       1.1      matt 			}
   2450       1.1      matt 		}
   2451       1.1      matt 	}
   2452       1.1      matt 	pmap_interrupts_restore(msr);
   2453      1.50        ad 	PMAP_UNLOCK();
   2454      1.45   thorpej 	return false;
   2455       1.1      matt }
   2456       1.1      matt 
   2457      1.44   thorpej bool
   2458       1.1      matt pmap_clear_bit(struct vm_page *pg, int ptebit)
   2459       1.1      matt {
   2460       1.1      matt 	struct pvo_head *pvoh = vm_page_to_pvoh(pg);
   2461       1.1      matt 	struct pvo_entry *pvo;
   2462       1.2      matt 	volatile struct pte *pt;
   2463       1.2      matt 	register_t msr;
   2464       1.1      matt 	int rv = 0;
   2465       1.1      matt 
   2466      1.50        ad 	PMAP_LOCK();
   2467       1.1      matt 	msr = pmap_interrupts_off();
   2468       1.1      matt 
   2469       1.1      matt 	/*
   2470       1.1      matt 	 * Fetch the cache value
   2471       1.1      matt 	 */
   2472       1.1      matt 	rv |= pmap_attr_fetch(pg);
   2473       1.1      matt 
   2474       1.1      matt 	/*
   2475       1.1      matt 	 * Clear the cached value.
   2476       1.1      matt 	 */
   2477       1.1      matt 	pmap_attr_clear(pg, ptebit);
   2478       1.1      matt 
   2479       1.1      matt 	/*
   2480       1.1      matt 	 * Sync so any pending REF/CHG bits are flushed to the PTEs (so we
   2481       1.1      matt 	 * can reset the right ones).  Note that since the pvo entries and
   2482       1.1      matt 	 * list heads are accessed via BAT0 and are never placed in the
   2483       1.1      matt 	 * page table, we don't have to worry about further accesses setting
   2484       1.1      matt 	 * the REF/CHG bits.
   2485       1.1      matt 	 */
   2486       1.1      matt 	SYNC();
   2487       1.1      matt 
   2488       1.1      matt 	/*
   2489       1.1      matt 	 * For each pvo entry, clear pvo's ptebit.  If this pvo have a
   2490       1.1      matt 	 * valid PTE.  If so, clear the ptebit from the valid PTE.
   2491       1.1      matt 	 */
   2492       1.1      matt 	LIST_FOREACH(pvo, pvoh, pvo_vlink) {
   2493       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2494       1.1      matt 		pt = pmap_pvo_to_pte(pvo, -1);
   2495       1.1      matt 		if (pt != NULL) {
   2496       1.1      matt 			/*
   2497       1.1      matt 			 * Only sync the PTE if the bit we are looking
   2498       1.1      matt 			 * for is not already set.
   2499       1.1      matt 			 */
   2500       1.1      matt 			if ((pvo->pvo_pte.pte_lo & ptebit) == 0)
   2501       1.1      matt 				pmap_pte_synch(pt, &pvo->pvo_pte);
   2502       1.1      matt 			/*
   2503       1.1      matt 			 * If the bit we are looking for was already set,
   2504       1.1      matt 			 * clear that bit in the pte.
   2505       1.1      matt 			 */
   2506       1.1      matt 			if (pvo->pvo_pte.pte_lo & ptebit)
   2507       1.1      matt 				pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit);
   2508       1.1      matt 		}
   2509       1.1      matt 		rv |= pvo->pvo_pte.pte_lo & (PTE_CHG|PTE_REF);
   2510       1.1      matt 		pvo->pvo_pte.pte_lo &= ~ptebit;
   2511       1.1      matt 		PMAP_PVO_CHECK(pvo);		/* sanity check */
   2512       1.1      matt 	}
   2513       1.1      matt 	pmap_interrupts_restore(msr);
   2514      1.14       chs 
   2515       1.1      matt 	/*
   2516       1.1      matt 	 * If we are clearing the modify bit and this page was marked EXEC
   2517       1.1      matt 	 * and the user of the page thinks the page was modified, then we
   2518       1.1      matt 	 * need to clean it from the icache if it's mapped or clear the EXEC
   2519       1.1      matt 	 * bit if it's not mapped.  The page itself might not have the CHG
   2520       1.1      matt 	 * bit set if the modification was done via DMA to the page.
   2521       1.1      matt 	 */
   2522       1.1      matt 	if ((ptebit & PTE_CHG) && (rv & PTE_EXEC)) {
   2523       1.1      matt 		if (LIST_EMPTY(pvoh)) {
   2524      1.85      matt 			DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": clear-exec]\n",
   2525      1.85      matt 			    VM_PAGE_TO_PHYS(pg));
   2526       1.1      matt 			pmap_attr_clear(pg, PTE_EXEC);
   2527       1.1      matt 			PMAPCOUNT(exec_uncached_clear_modify);
   2528       1.1      matt 		} else {
   2529      1.85      matt 			DPRINTFN(EXEC, "[pmap_clear_bit: %#" _PRIxpa ": syncicache]\n",
   2530      1.85      matt 			    VM_PAGE_TO_PHYS(pg));
   2531      1.34      yamt 			pmap_syncicache(VM_PAGE_TO_PHYS(pg), PAGE_SIZE);
   2532       1.1      matt 			PMAPCOUNT(exec_synced_clear_modify);
   2533       1.1      matt 		}
   2534       1.1      matt 	}
   2535      1.50        ad 	PMAP_UNLOCK();
   2536       1.1      matt 	return (rv & ptebit) != 0;
   2537       1.1      matt }
   2538       1.1      matt 
   2539       1.1      matt void
   2540       1.1      matt pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   2541       1.1      matt {
   2542       1.1      matt 	struct pvo_entry *pvo;
   2543       1.1      matt 	size_t offset = va & ADDR_POFF;
   2544       1.1      matt 	int s;
   2545       1.1      matt 
   2546      1.50        ad 	PMAP_LOCK();
   2547       1.1      matt 	s = splvm();
   2548       1.1      matt 	while (len > 0) {
   2549       1.6   thorpej 		size_t seglen = PAGE_SIZE - offset;
   2550       1.1      matt 		if (seglen > len)
   2551       1.1      matt 			seglen = len;
   2552       1.1      matt 		pvo = pmap_pvo_find_va(p->p_vmspace->vm_map.pmap, va, NULL);
   2553      1.39      matt 		if (pvo != NULL && PVO_EXECUTABLE_P(pvo)) {
   2554       1.1      matt 			pmap_syncicache(
   2555       1.1      matt 			    (pvo->pvo_pte.pte_lo & PTE_RPGN) | offset, seglen);
   2556       1.1      matt 			PMAP_PVO_CHECK(pvo);
   2557       1.1      matt 		}
   2558       1.1      matt 		va += seglen;
   2559       1.1      matt 		len -= seglen;
   2560       1.1      matt 		offset = 0;
   2561       1.1      matt 	}
   2562       1.1      matt 	splx(s);
   2563      1.50        ad 	PMAP_UNLOCK();
   2564       1.1      matt }
   2565       1.1      matt 
   2566       1.1      matt #if defined(DEBUG) || defined(PMAPCHECK) || defined(DDB)
   2567       1.1      matt void
   2568       1.2      matt pmap_pte_print(volatile struct pte *pt)
   2569       1.1      matt {
   2570       1.1      matt 	printf("PTE %p: ", pt);
   2571      1.38   sanjayl 
   2572      1.53   garbled #if defined(PMAP_OEA)
   2573       1.1      matt 	/* High word: */
   2574      1.54   mlelstv 	printf("%#" _PRIxpte ": [", pt->pte_hi);
   2575      1.53   garbled #else
   2576      1.54   mlelstv 	printf("%#" _PRIxpte ": [", pt->pte_hi);
   2577      1.53   garbled #endif /* PMAP_OEA */
   2578      1.38   sanjayl 
   2579       1.1      matt 	printf("%c ", (pt->pte_hi & PTE_VALID) ? 'v' : 'i');
   2580       1.1      matt 	printf("%c ", (pt->pte_hi & PTE_HID) ? 'h' : '-');
   2581      1.38   sanjayl 
   2582      1.54   mlelstv 	printf("%#" _PRIxpte " %#" _PRIxpte "",
   2583      1.38   sanjayl 	    (pt->pte_hi &~ PTE_VALID)>>PTE_VSID_SHFT,
   2584      1.38   sanjayl 	    pt->pte_hi & PTE_API);
   2585      1.53   garbled #if defined(PMAP_OEA) || defined(PMAP_OEA64_BRIDGE)
   2586      1.54   mlelstv 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
   2587      1.38   sanjayl #else
   2588      1.54   mlelstv 	printf(" (va %#" _PRIxva ")] ", pmap_pte_to_va(pt));
   2589      1.53   garbled #endif /* PMAP_OEA */
   2590      1.38   sanjayl 
   2591       1.1      matt 	/* Low word: */
   2592      1.53   garbled #if defined (PMAP_OEA)
   2593      1.54   mlelstv 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
   2594      1.54   mlelstv 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
   2595      1.53   garbled #else
   2596      1.54   mlelstv 	printf(" %#" _PRIxpte ": [", pt->pte_lo);
   2597      1.54   mlelstv 	printf("%#" _PRIxpte "... ", pt->pte_lo >> 12);
   2598      1.38   sanjayl #endif
   2599       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_REF) ? 'r' : 'u');
   2600       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_CHG) ? 'c' : 'n');
   2601       1.1      matt 	printf("%c", (pt->pte_lo & PTE_W) ? 'w' : '.');
   2602       1.1      matt 	printf("%c", (pt->pte_lo & PTE_I) ? 'i' : '.');
   2603       1.1      matt 	printf("%c", (pt->pte_lo & PTE_M) ? 'm' : '.');
   2604       1.1      matt 	printf("%c ", (pt->pte_lo & PTE_G) ? 'g' : '.');
   2605       1.1      matt 	switch (pt->pte_lo & PTE_PP) {
   2606       1.1      matt 	case PTE_BR: printf("br]\n"); break;
   2607       1.1      matt 	case PTE_BW: printf("bw]\n"); break;
   2608       1.1      matt 	case PTE_SO: printf("so]\n"); break;
   2609       1.1      matt 	case PTE_SW: printf("sw]\n"); break;
   2610       1.1      matt 	}
   2611       1.1      matt }
   2612       1.1      matt #endif
   2613       1.1      matt 
   2614       1.1      matt #if defined(DDB)
   2615       1.1      matt void
   2616       1.1      matt pmap_pteg_check(void)
   2617       1.1      matt {
   2618       1.2      matt 	volatile struct pte *pt;
   2619       1.1      matt 	int i;
   2620       1.1      matt 	int ptegidx;
   2621       1.1      matt 	u_int p_valid = 0;
   2622       1.1      matt 	u_int s_valid = 0;
   2623       1.1      matt 	u_int invalid = 0;
   2624      1.38   sanjayl 
   2625       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2626       1.1      matt 		for (pt = pmap_pteg_table[ptegidx].pt, i = 8; --i >= 0; pt++) {
   2627       1.1      matt 			if (pt->pte_hi & PTE_VALID) {
   2628       1.1      matt 				if (pt->pte_hi & PTE_HID)
   2629       1.1      matt 					s_valid++;
   2630       1.1      matt 				else
   2631      1.38   sanjayl 				{
   2632       1.1      matt 					p_valid++;
   2633      1.38   sanjayl 				}
   2634       1.1      matt 			} else
   2635       1.1      matt 				invalid++;
   2636       1.1      matt 		}
   2637       1.1      matt 	}
   2638       1.1      matt 	printf("pteg_check: v(p) %#x (%d), v(s) %#x (%d), i %#x (%d)\n",
   2639       1.1      matt 		p_valid, p_valid, s_valid, s_valid,
   2640       1.1      matt 		invalid, invalid);
   2641       1.1      matt }
   2642       1.1      matt 
   2643       1.1      matt void
   2644       1.1      matt pmap_print_mmuregs(void)
   2645       1.1      matt {
   2646       1.1      matt 	int i;
   2647  1.86.2.3       tls #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2648       1.1      matt 	u_int cpuvers;
   2649  1.86.2.3       tls #endif
   2650      1.53   garbled #ifndef PMAP_OEA64
   2651       1.1      matt 	vaddr_t addr;
   2652       1.2      matt 	register_t soft_sr[16];
   2653      1.18      matt #endif
   2654      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2655       1.1      matt 	struct bat soft_ibat[4];
   2656       1.1      matt 	struct bat soft_dbat[4];
   2657      1.38   sanjayl #endif
   2658      1.53   garbled 	paddr_t sdr1;
   2659       1.1      matt 
   2660  1.86.2.3       tls #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2661       1.1      matt 	cpuvers = MFPVR() >> 16;
   2662  1.86.2.3       tls #endif
   2663      1.35     perry 	__asm volatile ("mfsdr1 %0" : "=r"(sdr1));
   2664      1.53   garbled #ifndef PMAP_OEA64
   2665      1.16    kleink 	addr = 0;
   2666      1.27       chs 	for (i = 0; i < 16; i++) {
   2667       1.1      matt 		soft_sr[i] = MFSRIN(addr);
   2668       1.1      matt 		addr += (1 << ADDR_SR_SHFT);
   2669       1.1      matt 	}
   2670      1.18      matt #endif
   2671       1.1      matt 
   2672      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA_BRIDGE)
   2673       1.1      matt 	/* read iBAT (601: uBAT) registers */
   2674      1.35     perry 	__asm volatile ("mfibatu %0,0" : "=r"(soft_ibat[0].batu));
   2675      1.35     perry 	__asm volatile ("mfibatl %0,0" : "=r"(soft_ibat[0].batl));
   2676      1.35     perry 	__asm volatile ("mfibatu %0,1" : "=r"(soft_ibat[1].batu));
   2677      1.35     perry 	__asm volatile ("mfibatl %0,1" : "=r"(soft_ibat[1].batl));
   2678      1.35     perry 	__asm volatile ("mfibatu %0,2" : "=r"(soft_ibat[2].batu));
   2679      1.35     perry 	__asm volatile ("mfibatl %0,2" : "=r"(soft_ibat[2].batl));
   2680      1.35     perry 	__asm volatile ("mfibatu %0,3" : "=r"(soft_ibat[3].batu));
   2681      1.35     perry 	__asm volatile ("mfibatl %0,3" : "=r"(soft_ibat[3].batl));
   2682       1.1      matt 
   2683       1.1      matt 
   2684       1.1      matt 	if (cpuvers != MPC601) {
   2685       1.1      matt 		/* read dBAT registers */
   2686      1.35     perry 		__asm volatile ("mfdbatu %0,0" : "=r"(soft_dbat[0].batu));
   2687      1.35     perry 		__asm volatile ("mfdbatl %0,0" : "=r"(soft_dbat[0].batl));
   2688      1.35     perry 		__asm volatile ("mfdbatu %0,1" : "=r"(soft_dbat[1].batu));
   2689      1.35     perry 		__asm volatile ("mfdbatl %0,1" : "=r"(soft_dbat[1].batl));
   2690      1.35     perry 		__asm volatile ("mfdbatu %0,2" : "=r"(soft_dbat[2].batu));
   2691      1.35     perry 		__asm volatile ("mfdbatl %0,2" : "=r"(soft_dbat[2].batl));
   2692      1.35     perry 		__asm volatile ("mfdbatu %0,3" : "=r"(soft_dbat[3].batu));
   2693      1.35     perry 		__asm volatile ("mfdbatl %0,3" : "=r"(soft_dbat[3].batl));
   2694       1.1      matt 	}
   2695      1.38   sanjayl #endif
   2696       1.1      matt 
   2697      1.54   mlelstv 	printf("SDR1:\t%#" _PRIxpa "\n", sdr1);
   2698      1.53   garbled #ifndef PMAP_OEA64
   2699       1.1      matt 	printf("SR[]:\t");
   2700      1.27       chs 	for (i = 0; i < 4; i++)
   2701      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2702       1.1      matt 	printf("\n\t");
   2703      1.27       chs 	for ( ; i < 8; i++)
   2704      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2705       1.1      matt 	printf("\n\t");
   2706      1.27       chs 	for ( ; i < 12; i++)
   2707      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2708       1.1      matt 	printf("\n\t");
   2709      1.27       chs 	for ( ; i < 16; i++)
   2710      1.53   garbled 		printf("0x%08lx,   ", soft_sr[i]);
   2711       1.1      matt 	printf("\n");
   2712      1.18      matt #endif
   2713       1.1      matt 
   2714      1.53   garbled #if defined(PMAP_OEA) || defined(PMAP_OEA_BRIDGE)
   2715       1.1      matt 	printf("%cBAT[]:\t", cpuvers == MPC601 ? 'u' : 'i');
   2716      1.27       chs 	for (i = 0; i < 4; i++) {
   2717       1.2      matt 		printf("0x%08lx 0x%08lx, ",
   2718       1.1      matt 			soft_ibat[i].batu, soft_ibat[i].batl);
   2719       1.1      matt 		if (i == 1)
   2720       1.1      matt 			printf("\n\t");
   2721       1.1      matt 	}
   2722       1.1      matt 	if (cpuvers != MPC601) {
   2723       1.1      matt 		printf("\ndBAT[]:\t");
   2724      1.27       chs 		for (i = 0; i < 4; i++) {
   2725       1.2      matt 			printf("0x%08lx 0x%08lx, ",
   2726       1.1      matt 				soft_dbat[i].batu, soft_dbat[i].batl);
   2727       1.1      matt 			if (i == 1)
   2728       1.1      matt 				printf("\n\t");
   2729       1.1      matt 		}
   2730       1.1      matt 	}
   2731       1.1      matt 	printf("\n");
   2732      1.53   garbled #endif /* PMAP_OEA... */
   2733       1.1      matt }
   2734       1.1      matt 
   2735       1.1      matt void
   2736       1.1      matt pmap_print_pte(pmap_t pm, vaddr_t va)
   2737       1.1      matt {
   2738       1.1      matt 	struct pvo_entry *pvo;
   2739       1.2      matt 	volatile struct pte *pt;
   2740       1.1      matt 	int pteidx;
   2741       1.1      matt 
   2742       1.1      matt 	pvo = pmap_pvo_find_va(pm, va, &pteidx);
   2743       1.1      matt 	if (pvo != NULL) {
   2744       1.1      matt 		pt = pmap_pvo_to_pte(pvo, pteidx);
   2745       1.1      matt 		if (pt != NULL) {
   2746      1.53   garbled 			printf("VA %#" _PRIxva " -> %p -> %s %#" _PRIxpte ", %#" _PRIxpte "\n",
   2747      1.38   sanjayl 				va, pt,
   2748      1.38   sanjayl 				pt->pte_hi & PTE_HID ? "(sec)" : "(pri)",
   2749      1.38   sanjayl 				pt->pte_hi, pt->pte_lo);
   2750       1.1      matt 		} else {
   2751       1.1      matt 			printf("No valid PTE found\n");
   2752       1.1      matt 		}
   2753       1.1      matt 	} else {
   2754       1.1      matt 		printf("Address not in pmap\n");
   2755       1.1      matt 	}
   2756       1.1      matt }
   2757       1.1      matt 
   2758       1.1      matt void
   2759       1.1      matt pmap_pteg_dist(void)
   2760       1.1      matt {
   2761       1.1      matt 	struct pvo_entry *pvo;
   2762       1.1      matt 	int ptegidx;
   2763       1.1      matt 	int depth;
   2764       1.1      matt 	int max_depth = 0;
   2765       1.1      matt 	unsigned int depths[64];
   2766       1.1      matt 
   2767       1.1      matt 	memset(depths, 0, sizeof(depths));
   2768       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2769       1.1      matt 		depth = 0;
   2770       1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2771       1.1      matt 			depth++;
   2772       1.1      matt 		}
   2773       1.1      matt 		if (depth > max_depth)
   2774       1.1      matt 			max_depth = depth;
   2775       1.1      matt 		if (depth > 63)
   2776       1.1      matt 			depth = 63;
   2777       1.1      matt 		depths[depth]++;
   2778       1.1      matt 	}
   2779       1.1      matt 
   2780       1.1      matt 	for (depth = 0; depth < 64; depth++) {
   2781       1.1      matt 		printf("  [%2d]: %8u", depth, depths[depth]);
   2782       1.1      matt 		if ((depth & 3) == 3)
   2783       1.1      matt 			printf("\n");
   2784       1.1      matt 		if (depth == max_depth)
   2785       1.1      matt 			break;
   2786       1.1      matt 	}
   2787       1.1      matt 	if ((depth & 3) != 3)
   2788       1.1      matt 		printf("\n");
   2789       1.1      matt 	printf("Max depth found was %d\n", max_depth);
   2790       1.1      matt }
   2791       1.1      matt #endif /* DEBUG */
   2792       1.1      matt 
   2793       1.1      matt #if defined(PMAPCHECK) || defined(DEBUG)
   2794       1.1      matt void
   2795       1.1      matt pmap_pvo_verify(void)
   2796       1.1      matt {
   2797       1.1      matt 	int ptegidx;
   2798       1.1      matt 	int s;
   2799       1.1      matt 
   2800       1.1      matt 	s = splvm();
   2801       1.1      matt 	for (ptegidx = 0; ptegidx < pmap_pteg_cnt; ptegidx++) {
   2802       1.1      matt 		struct pvo_entry *pvo;
   2803       1.1      matt 		TAILQ_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) {
   2804       1.1      matt 			if ((uintptr_t) pvo >= SEGMENT_LENGTH)
   2805       1.1      matt 				panic("pmap_pvo_verify: invalid pvo %p "
   2806       1.1      matt 				    "on list %#x", pvo, ptegidx);
   2807       1.1      matt 			pmap_pvo_check(pvo);
   2808       1.1      matt 		}
   2809       1.1      matt 	}
   2810       1.1      matt 	splx(s);
   2811       1.1      matt }
   2812       1.1      matt #endif /* PMAPCHECK */
   2813       1.1      matt 
   2814       1.1      matt 
   2815       1.1      matt void *
   2816       1.1      matt pmap_pool_ualloc(struct pool *pp, int flags)
   2817       1.1      matt {
   2818       1.1      matt 	struct pvo_page *pvop;
   2819       1.1      matt 
   2820      1.50        ad 	if (uvm.page_init_done != true) {
   2821      1.50        ad 		return (void *) uvm_pageboot_alloc(PAGE_SIZE);
   2822      1.50        ad 	}
   2823      1.50        ad 
   2824      1.50        ad 	PMAP_LOCK();
   2825       1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_upvop_head);
   2826       1.1      matt 	if (pvop != NULL) {
   2827       1.1      matt 		pmap_upvop_free--;
   2828       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_upvop_head, pvop_link);
   2829      1.50        ad 		PMAP_UNLOCK();
   2830       1.1      matt 		return pvop;
   2831       1.1      matt 	}
   2832      1.50        ad 	PMAP_UNLOCK();
   2833       1.1      matt 	return pmap_pool_malloc(pp, flags);
   2834       1.1      matt }
   2835       1.1      matt 
   2836       1.1      matt void *
   2837       1.1      matt pmap_pool_malloc(struct pool *pp, int flags)
   2838       1.1      matt {
   2839       1.1      matt 	struct pvo_page *pvop;
   2840       1.1      matt 	struct vm_page *pg;
   2841       1.1      matt 
   2842      1.50        ad 	PMAP_LOCK();
   2843       1.1      matt 	pvop = SIMPLEQ_FIRST(&pmap_mpvop_head);
   2844       1.1      matt 	if (pvop != NULL) {
   2845       1.1      matt 		pmap_mpvop_free--;
   2846       1.1      matt 		SIMPLEQ_REMOVE_HEAD(&pmap_mpvop_head, pvop_link);
   2847      1.50        ad 		PMAP_UNLOCK();
   2848       1.1      matt 		return pvop;
   2849       1.1      matt 	}
   2850      1.50        ad 	PMAP_UNLOCK();
   2851       1.1      matt  again:
   2852       1.1      matt 	pg = uvm_pagealloc_strat(NULL, 0, NULL, UVM_PGA_USERESERVE,
   2853       1.1      matt 	    UVM_PGA_STRAT_ONLY, VM_FREELIST_FIRST256);
   2854       1.1      matt 	if (__predict_false(pg == NULL)) {
   2855       1.1      matt 		if (flags & PR_WAITOK) {
   2856       1.1      matt 			uvm_wait("plpg");
   2857       1.1      matt 			goto again;
   2858       1.1      matt 		} else {
   2859       1.1      matt 			return (0);
   2860       1.1      matt 		}
   2861       1.1      matt 	}
   2862      1.53   garbled 	KDASSERT(VM_PAGE_TO_PHYS(pg) == (uintptr_t)VM_PAGE_TO_PHYS(pg));
   2863      1.53   garbled 	return (void *)(uintptr_t) VM_PAGE_TO_PHYS(pg);
   2864       1.1      matt }
   2865       1.1      matt 
   2866       1.1      matt void
   2867       1.1      matt pmap_pool_ufree(struct pool *pp, void *va)
   2868       1.1      matt {
   2869       1.1      matt 	struct pvo_page *pvop;
   2870       1.1      matt #if 0
   2871       1.1      matt 	if (PHYS_TO_VM_PAGE((paddr_t) va) != NULL) {
   2872       1.1      matt 		pmap_pool_mfree(va, size, tag);
   2873       1.1      matt 		return;
   2874       1.1      matt 	}
   2875       1.1      matt #endif
   2876      1.50        ad 	PMAP_LOCK();
   2877       1.1      matt 	pvop = va;
   2878       1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_upvop_head, pvop, pvop_link);
   2879       1.1      matt 	pmap_upvop_free++;
   2880       1.1      matt 	if (pmap_upvop_free > pmap_upvop_maxfree)
   2881       1.1      matt 		pmap_upvop_maxfree = pmap_upvop_free;
   2882      1.50        ad 	PMAP_UNLOCK();
   2883       1.1      matt }
   2884       1.1      matt 
   2885       1.1      matt void
   2886       1.1      matt pmap_pool_mfree(struct pool *pp, void *va)
   2887       1.1      matt {
   2888       1.1      matt 	struct pvo_page *pvop;
   2889       1.1      matt 
   2890      1.50        ad 	PMAP_LOCK();
   2891       1.1      matt 	pvop = va;
   2892       1.1      matt 	SIMPLEQ_INSERT_HEAD(&pmap_mpvop_head, pvop, pvop_link);
   2893       1.1      matt 	pmap_mpvop_free++;
   2894       1.1      matt 	if (pmap_mpvop_free > pmap_mpvop_maxfree)
   2895       1.1      matt 		pmap_mpvop_maxfree = pmap_mpvop_free;
   2896      1.50        ad 	PMAP_UNLOCK();
   2897       1.1      matt #if 0
   2898       1.1      matt 	uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t) va));
   2899       1.1      matt #endif
   2900       1.1      matt }
   2901       1.1      matt 
   2902       1.1      matt /*
   2903       1.1      matt  * This routine in bootstraping to steal to-be-managed memory (which will
   2904       1.1      matt  * then be unmanaged).  We use it to grab from the first 256MB for our
   2905       1.1      matt  * pmap needs and above 256MB for other stuff.
   2906       1.1      matt  */
   2907       1.1      matt vaddr_t
   2908      1.10   thorpej pmap_steal_memory(vsize_t vsize, vaddr_t *vstartp, vaddr_t *vendp)
   2909       1.1      matt {
   2910       1.1      matt 	vsize_t size;
   2911       1.1      matt 	vaddr_t va;
   2912       1.1      matt 	paddr_t pa = 0;
   2913       1.1      matt 	int npgs, bank;
   2914       1.1      matt 	struct vm_physseg *ps;
   2915       1.1      matt 
   2916      1.45   thorpej 	if (uvm.page_init_done == true)
   2917       1.1      matt 		panic("pmap_steal_memory: called _after_ bootstrap");
   2918       1.1      matt 
   2919      1.10   thorpej 	*vstartp = VM_MIN_KERNEL_ADDRESS;
   2920      1.10   thorpej 	*vendp = VM_MAX_KERNEL_ADDRESS;
   2921      1.10   thorpej 
   2922       1.1      matt 	size = round_page(vsize);
   2923       1.1      matt 	npgs = atop(size);
   2924       1.1      matt 
   2925       1.1      matt 	/*
   2926       1.1      matt 	 * PA 0 will never be among those given to UVM so we can use it
   2927       1.1      matt 	 * to indicate we couldn't steal any memory.
   2928       1.1      matt 	 */
   2929      1.73  uebayasi 	for (bank = 0; bank < vm_nphysseg; bank++) {
   2930      1.73  uebayasi 		ps = VM_PHYSMEM_PTR(bank);
   2931       1.1      matt 		if (ps->free_list == VM_FREELIST_FIRST256 &&
   2932       1.1      matt 		    ps->avail_end - ps->avail_start >= npgs) {
   2933       1.1      matt 			pa = ptoa(ps->avail_start);
   2934       1.1      matt 			break;
   2935       1.1      matt 		}
   2936       1.1      matt 	}
   2937       1.1      matt 
   2938       1.1      matt 	if (pa == 0)
   2939       1.1      matt 		panic("pmap_steal_memory: no approriate memory to steal!");
   2940       1.1      matt 
   2941       1.1      matt 	ps->avail_start += npgs;
   2942       1.1      matt 	ps->start += npgs;
   2943       1.1      matt 
   2944       1.1      matt 	/*
   2945       1.1      matt 	 * If we've used up all the pages in the segment, remove it and
   2946       1.1      matt 	 * compact the list.
   2947       1.1      matt 	 */
   2948       1.1      matt 	if (ps->avail_start == ps->end) {
   2949       1.1      matt 		/*
   2950       1.1      matt 		 * If this was the last one, then a very bad thing has occurred
   2951       1.1      matt 		 */
   2952       1.1      matt 		if (--vm_nphysseg == 0)
   2953       1.1      matt 			panic("pmap_steal_memory: out of memory!");
   2954       1.1      matt 
   2955       1.1      matt 		printf("pmap_steal_memory: consumed bank %d\n", bank);
   2956       1.1      matt 		for (; bank < vm_nphysseg; bank++, ps++) {
   2957       1.1      matt 			ps[0] = ps[1];
   2958       1.1      matt 		}
   2959       1.1      matt 	}
   2960       1.1      matt 
   2961       1.1      matt 	va = (vaddr_t) pa;
   2962      1.46  christos 	memset((void *) va, 0, size);
   2963       1.1      matt 	pmap_pages_stolen += npgs;
   2964       1.1      matt #ifdef DEBUG
   2965       1.1      matt 	if (pmapdebug && npgs > 1) {
   2966       1.1      matt 		u_int cnt = 0;
   2967      1.73  uebayasi 		for (bank = 0; bank < vm_nphysseg; bank++) {
   2968      1.73  uebayasi 			ps = VM_PHYSMEM_PTR(bank);
   2969       1.1      matt 			cnt += ps->avail_end - ps->avail_start;
   2970      1.73  uebayasi 		}
   2971       1.1      matt 		printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
   2972       1.1      matt 		    npgs, pmap_pages_stolen, cnt);
   2973       1.1      matt 	}
   2974       1.1      matt #endif
   2975       1.1      matt 
   2976       1.1      matt 	return va;
   2977       1.1      matt }
   2978       1.1      matt 
   2979       1.1      matt /*
   2980       1.1      matt  * Find a chuck of memory with right size and alignment.
   2981       1.1      matt  */
   2982      1.53   garbled paddr_t
   2983       1.1      matt pmap_boot_find_memory(psize_t size, psize_t alignment, int at_end)
   2984       1.1      matt {
   2985       1.1      matt 	struct mem_region *mp;
   2986       1.1      matt 	paddr_t s, e;
   2987       1.1      matt 	int i, j;
   2988       1.1      matt 
   2989       1.1      matt 	size = round_page(size);
   2990       1.1      matt 
   2991       1.1      matt 	DPRINTFN(BOOT,
   2992      1.85      matt 	    "pmap_boot_find_memory: size=%#" _PRIxpa ", alignment=%#" _PRIxpa ", at_end=%d",
   2993      1.85      matt 	    size, alignment, at_end);
   2994       1.1      matt 
   2995       1.6   thorpej 	if (alignment < PAGE_SIZE || (alignment & (alignment-1)) != 0)
   2996      1.54   mlelstv 		panic("pmap_boot_find_memory: invalid alignment %#" _PRIxpa,
   2997       1.1      matt 		    alignment);
   2998       1.1      matt 
   2999       1.1      matt 	if (at_end) {
   3000       1.6   thorpej 		if (alignment != PAGE_SIZE)
   3001       1.1      matt 			panic("pmap_boot_find_memory: invalid ending "
   3002      1.53   garbled 			    "alignment %#" _PRIxpa, alignment);
   3003       1.1      matt 
   3004       1.1      matt 		for (mp = &avail[avail_cnt-1]; mp >= avail; mp--) {
   3005       1.1      matt 			s = mp->start + mp->size - size;
   3006       1.1      matt 			if (s >= mp->start && mp->size >= size) {
   3007      1.85      matt 				DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
   3008       1.1      matt 				DPRINTFN(BOOT,
   3009      1.85      matt 				    "pmap_boot_find_memory: b-avail[%d] start "
   3010      1.85      matt 				    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
   3011      1.85      matt 				     mp->start, mp->size);
   3012       1.1      matt 				mp->size -= size;
   3013       1.1      matt 				DPRINTFN(BOOT,
   3014      1.85      matt 				    "pmap_boot_find_memory: a-avail[%d] start "
   3015      1.85      matt 				    "%#" _PRIxpa " size %#" _PRIxpa "\n", mp - avail,
   3016      1.85      matt 				     mp->start, mp->size);
   3017      1.53   garbled 				return s;
   3018       1.1      matt 			}
   3019       1.1      matt 		}
   3020       1.1      matt 		panic("pmap_boot_find_memory: no available memory");
   3021       1.1      matt 	}
   3022       1.1      matt 
   3023       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3024       1.1      matt 		s = (mp->start + alignment - 1) & ~(alignment-1);
   3025       1.1      matt 		e = s + size;
   3026       1.1      matt 
   3027       1.1      matt 		/*
   3028       1.1      matt 		 * Is the calculated region entirely within the region?
   3029       1.1      matt 		 */
   3030       1.1      matt 		if (s < mp->start || e > mp->start + mp->size)
   3031       1.1      matt 			continue;
   3032       1.1      matt 
   3033      1.85      matt 		DPRINTFN(BOOT, ": %#" _PRIxpa "\n", s);
   3034       1.1      matt 		if (s == mp->start) {
   3035       1.1      matt 			/*
   3036       1.1      matt 			 * If the block starts at the beginning of region,
   3037       1.1      matt 			 * adjust the size & start. (the region may now be
   3038       1.1      matt 			 * zero in length)
   3039       1.1      matt 			 */
   3040       1.1      matt 			DPRINTFN(BOOT,
   3041      1.85      matt 			    "pmap_boot_find_memory: b-avail[%d] start "
   3042      1.85      matt 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
   3043       1.1      matt 			mp->start += size;
   3044       1.1      matt 			mp->size -= size;
   3045       1.1      matt 			DPRINTFN(BOOT,
   3046      1.85      matt 			    "pmap_boot_find_memory: a-avail[%d] start "
   3047      1.85      matt 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
   3048       1.1      matt 		} else if (e == mp->start + mp->size) {
   3049       1.1      matt 			/*
   3050       1.1      matt 			 * If the block starts at the beginning of region,
   3051       1.1      matt 			 * adjust only the size.
   3052       1.1      matt 			 */
   3053       1.1      matt 			DPRINTFN(BOOT,
   3054      1.85      matt 			    "pmap_boot_find_memory: b-avail[%d] start "
   3055      1.85      matt 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
   3056       1.1      matt 			mp->size -= size;
   3057       1.1      matt 			DPRINTFN(BOOT,
   3058      1.85      matt 			    "pmap_boot_find_memory: a-avail[%d] start "
   3059      1.85      matt 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
   3060       1.1      matt 		} else {
   3061       1.1      matt 			/*
   3062       1.1      matt 			 * Block is in the middle of the region, so we
   3063       1.1      matt 			 * have to split it in two.
   3064       1.1      matt 			 */
   3065       1.1      matt 			for (j = avail_cnt; j > i + 1; j--) {
   3066       1.1      matt 				avail[j] = avail[j-1];
   3067       1.1      matt 			}
   3068       1.1      matt 			DPRINTFN(BOOT,
   3069      1.85      matt 			    "pmap_boot_find_memory: b-avail[%d] start "
   3070      1.85      matt 			    "%#" _PRIxpa " size %#" _PRIxpa "\n", i, mp->start, mp->size);
   3071       1.1      matt 			mp[1].start = e;
   3072       1.1      matt 			mp[1].size = mp[0].start + mp[0].size - e;
   3073       1.1      matt 			mp[0].size = s - mp[0].start;
   3074       1.1      matt 			avail_cnt++;
   3075       1.1      matt 			for (; i < avail_cnt; i++) {
   3076       1.1      matt 				DPRINTFN(BOOT,
   3077      1.85      matt 				    "pmap_boot_find_memory: a-avail[%d] "
   3078      1.85      matt 				    "start %#" _PRIxpa " size %#" _PRIxpa "\n", i,
   3079      1.85      matt 				     avail[i].start, avail[i].size);
   3080       1.1      matt 			}
   3081       1.1      matt 		}
   3082      1.53   garbled 		KASSERT(s == (uintptr_t) s);
   3083      1.53   garbled 		return s;
   3084       1.1      matt 	}
   3085       1.1      matt 	panic("pmap_boot_find_memory: not enough memory for "
   3086      1.54   mlelstv 	    "%#" _PRIxpa "/%#" _PRIxpa " allocation?", size, alignment);
   3087       1.1      matt }
   3088       1.1      matt 
   3089      1.38   sanjayl /* XXXSL: we dont have any BATs to do this, map in Segment 0 1:1 using page tables */
   3090      1.53   garbled #if defined (PMAP_OEA64_BRIDGE)
   3091      1.38   sanjayl int
   3092      1.38   sanjayl pmap_setup_segment0_map(int use_large_pages, ...)
   3093      1.38   sanjayl {
   3094  1.86.2.2       tls     vaddr_t va, va_end;
   3095      1.38   sanjayl 
   3096      1.38   sanjayl     register_t pte_lo = 0x0;
   3097  1.86.2.3       tls     int ptegidx = 0;
   3098      1.38   sanjayl     struct pte pte;
   3099      1.38   sanjayl     va_list ap;
   3100      1.38   sanjayl 
   3101      1.38   sanjayl     /* Coherent + Supervisor RW, no user access */
   3102      1.38   sanjayl     pte_lo = PTE_M;
   3103      1.38   sanjayl 
   3104      1.38   sanjayl     /* XXXSL
   3105      1.38   sanjayl      * Map in 1st segment 1:1, we'll be careful not to spill kernel entries later,
   3106      1.38   sanjayl      * these have to take priority.
   3107      1.38   sanjayl      */
   3108      1.38   sanjayl     for (va = 0x0; va < SEGMENT_LENGTH; va += 0x1000) {
   3109      1.38   sanjayl         ptegidx = va_to_pteg(pmap_kernel(), va);
   3110      1.38   sanjayl         pmap_pte_create(&pte, pmap_kernel(), va, va | pte_lo);
   3111  1.86.2.3       tls         (void)pmap_pte_insert(ptegidx, &pte);
   3112      1.38   sanjayl     }
   3113      1.38   sanjayl 
   3114      1.38   sanjayl     va_start(ap, use_large_pages);
   3115      1.38   sanjayl     while (1) {
   3116      1.38   sanjayl         paddr_t pa;
   3117      1.38   sanjayl         size_t size;
   3118      1.38   sanjayl 
   3119      1.38   sanjayl         va = va_arg(ap, vaddr_t);
   3120      1.38   sanjayl 
   3121      1.38   sanjayl         if (va == 0)
   3122      1.38   sanjayl             break;
   3123      1.38   sanjayl 
   3124      1.38   sanjayl         pa = va_arg(ap, paddr_t);
   3125      1.38   sanjayl         size = va_arg(ap, size_t);
   3126      1.38   sanjayl 
   3127  1.86.2.2       tls         for (va_end = va + size; va < va_end; va += 0x1000, pa += 0x1000) {
   3128      1.38   sanjayl #if 0
   3129      1.54   mlelstv 	    printf("%s: Inserting: va: %#" _PRIxva ", pa: %#" _PRIxpa "\n", __func__,  va, pa);
   3130      1.38   sanjayl #endif
   3131      1.38   sanjayl             ptegidx = va_to_pteg(pmap_kernel(), va);
   3132      1.38   sanjayl             pmap_pte_create(&pte, pmap_kernel(), va, pa | pte_lo);
   3133  1.86.2.3       tls             (void)pmap_pte_insert(ptegidx, &pte);
   3134      1.38   sanjayl         }
   3135      1.38   sanjayl     }
   3136      1.38   sanjayl 
   3137      1.38   sanjayl     TLBSYNC();
   3138      1.38   sanjayl     SYNC();
   3139      1.38   sanjayl     return (0);
   3140      1.38   sanjayl }
   3141      1.53   garbled #endif /* PMAP_OEA64_BRIDGE */
   3142      1.38   sanjayl 
   3143       1.1      matt /*
   3144       1.1      matt  * This is not part of the defined PMAP interface and is specific to the
   3145       1.1      matt  * PowerPC architecture.  This is called during initppc, before the system
   3146       1.1      matt  * is really initialized.
   3147       1.1      matt  */
   3148       1.1      matt void
   3149       1.1      matt pmap_bootstrap(paddr_t kernelstart, paddr_t kernelend)
   3150       1.1      matt {
   3151       1.1      matt 	struct mem_region *mp, tmp;
   3152       1.1      matt 	paddr_t s, e;
   3153       1.1      matt 	psize_t size;
   3154       1.1      matt 	int i, j;
   3155       1.1      matt 
   3156       1.1      matt 	/*
   3157       1.1      matt 	 * Get memory.
   3158       1.1      matt 	 */
   3159       1.1      matt 	mem_regions(&mem, &avail);
   3160       1.1      matt #if defined(DEBUG)
   3161       1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3162       1.1      matt 		printf("pmap_bootstrap: memory configuration:\n");
   3163       1.1      matt 		for (mp = mem; mp->size; mp++) {
   3164      1.54   mlelstv 			printf("pmap_bootstrap: mem start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3165       1.1      matt 				mp->start, mp->size);
   3166       1.1      matt 		}
   3167       1.1      matt 		for (mp = avail; mp->size; mp++) {
   3168      1.54   mlelstv 			printf("pmap_bootstrap: avail start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3169       1.1      matt 				mp->start, mp->size);
   3170       1.1      matt 		}
   3171       1.1      matt 	}
   3172       1.1      matt #endif
   3173       1.1      matt 
   3174       1.1      matt 	/*
   3175       1.1      matt 	 * Find out how much physical memory we have and in how many chunks.
   3176       1.1      matt 	 */
   3177       1.1      matt 	for (mem_cnt = 0, mp = mem; mp->size; mp++) {
   3178       1.1      matt 		if (mp->start >= pmap_memlimit)
   3179       1.1      matt 			continue;
   3180       1.1      matt 		if (mp->start + mp->size > pmap_memlimit) {
   3181       1.1      matt 			size = pmap_memlimit - mp->start;
   3182       1.1      matt 			physmem += btoc(size);
   3183       1.1      matt 		} else {
   3184       1.1      matt 			physmem += btoc(mp->size);
   3185       1.1      matt 		}
   3186       1.1      matt 		mem_cnt++;
   3187       1.1      matt 	}
   3188       1.1      matt 
   3189       1.1      matt 	/*
   3190       1.1      matt 	 * Count the number of available entries.
   3191       1.1      matt 	 */
   3192       1.1      matt 	for (avail_cnt = 0, mp = avail; mp->size; mp++)
   3193       1.1      matt 		avail_cnt++;
   3194       1.1      matt 
   3195       1.1      matt 	/*
   3196       1.1      matt 	 * Page align all regions.
   3197       1.1      matt 	 */
   3198       1.1      matt 	kernelstart = trunc_page(kernelstart);
   3199       1.1      matt 	kernelend = round_page(kernelend);
   3200       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3201       1.1      matt 		s = round_page(mp->start);
   3202       1.1      matt 		mp->size -= (s - mp->start);
   3203       1.1      matt 		mp->size = trunc_page(mp->size);
   3204       1.1      matt 		mp->start = s;
   3205       1.1      matt 		e = mp->start + mp->size;
   3206       1.1      matt 
   3207       1.1      matt 		DPRINTFN(BOOT,
   3208      1.85      matt 		    "pmap_bootstrap: b-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3209      1.85      matt 		    i, mp->start, mp->size);
   3210       1.1      matt 
   3211       1.1      matt 		/*
   3212       1.1      matt 		 * Don't allow the end to run beyond our artificial limit
   3213       1.1      matt 		 */
   3214       1.1      matt 		if (e > pmap_memlimit)
   3215       1.1      matt 			e = pmap_memlimit;
   3216       1.1      matt 
   3217       1.1      matt 		/*
   3218       1.1      matt 		 * Is this region empty or strange?  skip it.
   3219       1.1      matt 		 */
   3220       1.1      matt 		if (e <= s) {
   3221       1.1      matt 			mp->start = 0;
   3222       1.1      matt 			mp->size = 0;
   3223       1.1      matt 			continue;
   3224       1.1      matt 		}
   3225       1.1      matt 
   3226       1.1      matt 		/*
   3227       1.1      matt 		 * Does this overlap the beginning of kernel?
   3228       1.1      matt 		 *   Does extend past the end of the kernel?
   3229       1.1      matt 		 */
   3230       1.1      matt 		else if (s < kernelstart && e > kernelstart) {
   3231       1.1      matt 			if (e > kernelend) {
   3232       1.1      matt 				avail[avail_cnt].start = kernelend;
   3233       1.1      matt 				avail[avail_cnt].size = e - kernelend;
   3234       1.1      matt 				avail_cnt++;
   3235       1.1      matt 			}
   3236       1.1      matt 			mp->size = kernelstart - s;
   3237       1.1      matt 		}
   3238       1.1      matt 		/*
   3239       1.1      matt 		 * Check whether this region overlaps the end of the kernel.
   3240       1.1      matt 		 */
   3241       1.1      matt 		else if (s < kernelend && e > kernelend) {
   3242       1.1      matt 			mp->start = kernelend;
   3243       1.1      matt 			mp->size = e - kernelend;
   3244       1.1      matt 		}
   3245       1.1      matt 		/*
   3246       1.1      matt 		 * Look whether this regions is completely inside the kernel.
   3247       1.1      matt 		 * Nuke it if it does.
   3248       1.1      matt 		 */
   3249       1.1      matt 		else if (s >= kernelstart && e <= kernelend) {
   3250       1.1      matt 			mp->start = 0;
   3251       1.1      matt 			mp->size = 0;
   3252       1.1      matt 		}
   3253       1.1      matt 		/*
   3254       1.1      matt 		 * If the user imposed a memory limit, enforce it.
   3255       1.1      matt 		 */
   3256       1.1      matt 		else if (s >= pmap_memlimit) {
   3257       1.6   thorpej 			mp->start = -PAGE_SIZE;	/* let's know why */
   3258       1.1      matt 			mp->size = 0;
   3259       1.1      matt 		}
   3260       1.1      matt 		else {
   3261       1.1      matt 			mp->start = s;
   3262       1.1      matt 			mp->size = e - s;
   3263       1.1      matt 		}
   3264       1.1      matt 		DPRINTFN(BOOT,
   3265      1.85      matt 		    "pmap_bootstrap: a-avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3266      1.85      matt 		    i, mp->start, mp->size);
   3267       1.1      matt 	}
   3268       1.1      matt 
   3269       1.1      matt 	/*
   3270       1.1      matt 	 * Move (and uncount) all the null return to the end.
   3271       1.1      matt 	 */
   3272       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; i++, mp++) {
   3273       1.1      matt 		if (mp->size == 0) {
   3274       1.1      matt 			tmp = avail[i];
   3275       1.1      matt 			avail[i] = avail[--avail_cnt];
   3276       1.1      matt 			avail[avail_cnt] = avail[i];
   3277       1.1      matt 		}
   3278       1.1      matt 	}
   3279       1.1      matt 
   3280       1.1      matt 	/*
   3281      1.61     skrll 	 * (Bubble)sort them into ascending order.
   3282       1.1      matt 	 */
   3283       1.1      matt 	for (i = 0; i < avail_cnt; i++) {
   3284       1.1      matt 		for (j = i + 1; j < avail_cnt; j++) {
   3285       1.1      matt 			if (avail[i].start > avail[j].start) {
   3286       1.1      matt 				tmp = avail[i];
   3287       1.1      matt 				avail[i] = avail[j];
   3288       1.1      matt 				avail[j] = tmp;
   3289       1.1      matt 			}
   3290       1.1      matt 		}
   3291       1.1      matt 	}
   3292       1.1      matt 
   3293       1.1      matt 	/*
   3294       1.1      matt 	 * Make sure they don't overlap.
   3295       1.1      matt 	 */
   3296       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt - 1; i++, mp++) {
   3297       1.1      matt 		if (mp[0].start + mp[0].size > mp[1].start) {
   3298       1.1      matt 			mp[0].size = mp[1].start - mp[0].start;
   3299       1.1      matt 		}
   3300       1.1      matt 		DPRINTFN(BOOT,
   3301      1.85      matt 		    "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3302      1.85      matt 		    i, mp->start, mp->size);
   3303       1.1      matt 	}
   3304       1.1      matt 	DPRINTFN(BOOT,
   3305      1.85      matt 	    "pmap_bootstrap: avail[%d] start %#" _PRIxpa " size %#" _PRIxpa "\n",
   3306      1.85      matt 	    i, mp->start, mp->size);
   3307       1.1      matt 
   3308       1.1      matt #ifdef	PTEGCOUNT
   3309       1.1      matt 	pmap_pteg_cnt = PTEGCOUNT;
   3310       1.1      matt #else /* PTEGCOUNT */
   3311      1.38   sanjayl 
   3312       1.1      matt 	pmap_pteg_cnt = 0x1000;
   3313       1.1      matt 
   3314       1.1      matt 	while (pmap_pteg_cnt < physmem)
   3315       1.1      matt 		pmap_pteg_cnt <<= 1;
   3316       1.1      matt 
   3317       1.1      matt 	pmap_pteg_cnt >>= 1;
   3318       1.1      matt #endif /* PTEGCOUNT */
   3319       1.1      matt 
   3320      1.38   sanjayl #ifdef DEBUG
   3321      1.85      matt 	DPRINTFN(BOOT, "pmap_pteg_cnt: 0x%x\n", pmap_pteg_cnt);
   3322      1.38   sanjayl #endif
   3323      1.38   sanjayl 
   3324       1.1      matt 	/*
   3325       1.1      matt 	 * Find suitably aligned memory for PTEG hash table.
   3326       1.1      matt 	 */
   3327       1.2      matt 	size = pmap_pteg_cnt * sizeof(struct pteg);
   3328      1.53   garbled 	pmap_pteg_table = (void *)(uintptr_t) pmap_boot_find_memory(size, size, 0);
   3329      1.38   sanjayl 
   3330      1.38   sanjayl #ifdef DEBUG
   3331      1.38   sanjayl 	DPRINTFN(BOOT,
   3332      1.85      matt 		"PTEG cnt: 0x%x HTAB size: 0x%08x bytes, address: %p\n", pmap_pteg_cnt, (unsigned int)size, pmap_pteg_table);
   3333      1.38   sanjayl #endif
   3334      1.38   sanjayl 
   3335      1.38   sanjayl 
   3336       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3337       1.1      matt 	if ( (uintptr_t) pmap_pteg_table + size > SEGMENT_LENGTH)
   3338      1.54   mlelstv 		panic("pmap_bootstrap: pmap_pteg_table end (%p + %#" _PRIxpa ") > 256MB",
   3339       1.1      matt 		    pmap_pteg_table, size);
   3340       1.1      matt #endif
   3341       1.1      matt 
   3342      1.32        he 	memset(__UNVOLATILE(pmap_pteg_table), 0,
   3343      1.32        he 		pmap_pteg_cnt * sizeof(struct pteg));
   3344       1.1      matt 	pmap_pteg_mask = pmap_pteg_cnt - 1;
   3345       1.1      matt 
   3346       1.1      matt 	/*
   3347       1.1      matt 	 * We cannot do pmap_steal_memory here since UVM hasn't been loaded
   3348       1.1      matt 	 * with pages.  So we just steal them before giving them to UVM.
   3349       1.1      matt 	 */
   3350       1.1      matt 	size = sizeof(pmap_pvo_table[0]) * pmap_pteg_cnt;
   3351      1.53   garbled 	pmap_pvo_table = (void *)(uintptr_t) pmap_boot_find_memory(size, PAGE_SIZE, 0);
   3352       1.1      matt #if defined(DIAGNOSTIC) || defined(DEBUG) || defined(PMAPCHECK)
   3353       1.1      matt 	if ( (uintptr_t) pmap_pvo_table + size > SEGMENT_LENGTH)
   3354      1.54   mlelstv 		panic("pmap_bootstrap: pmap_pvo_table end (%p + %#" _PRIxpa ") > 256MB",
   3355       1.1      matt 		    pmap_pvo_table, size);
   3356       1.1      matt #endif
   3357       1.1      matt 
   3358       1.1      matt 	for (i = 0; i < pmap_pteg_cnt; i++)
   3359       1.1      matt 		TAILQ_INIT(&pmap_pvo_table[i]);
   3360       1.1      matt 
   3361       1.1      matt #ifndef MSGBUFADDR
   3362       1.1      matt 	/*
   3363       1.1      matt 	 * Allocate msgbuf in high memory.
   3364       1.1      matt 	 */
   3365      1.53   garbled 	msgbuf_paddr = pmap_boot_find_memory(MSGBUFSIZE, PAGE_SIZE, 1);
   3366       1.1      matt #endif
   3367       1.1      matt 
   3368       1.1      matt 	for (mp = avail, i = 0; i < avail_cnt; mp++, i++) {
   3369       1.1      matt 		paddr_t pfstart = atop(mp->start);
   3370       1.1      matt 		paddr_t pfend = atop(mp->start + mp->size);
   3371       1.1      matt 		if (mp->size == 0)
   3372       1.1      matt 			continue;
   3373       1.1      matt 		if (mp->start + mp->size <= SEGMENT_LENGTH) {
   3374       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3375       1.1      matt 				VM_FREELIST_FIRST256);
   3376       1.1      matt 		} else if (mp->start >= SEGMENT_LENGTH) {
   3377       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3378       1.1      matt 				VM_FREELIST_DEFAULT);
   3379       1.1      matt 		} else {
   3380       1.1      matt 			pfend = atop(SEGMENT_LENGTH);
   3381       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3382       1.1      matt 				VM_FREELIST_FIRST256);
   3383       1.1      matt 			pfstart = atop(SEGMENT_LENGTH);
   3384       1.1      matt 			pfend = atop(mp->start + mp->size);
   3385       1.1      matt 			uvm_page_physload(pfstart, pfend, pfstart, pfend,
   3386       1.1      matt 				VM_FREELIST_DEFAULT);
   3387       1.1      matt 		}
   3388       1.1      matt 	}
   3389       1.1      matt 
   3390       1.1      matt 	/*
   3391       1.1      matt 	 * Make sure kernel vsid is allocated as well as VSID 0.
   3392       1.1      matt 	 */
   3393       1.1      matt 	pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   3394       1.1      matt 		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
   3395      1.53   garbled 	pmap_vsid_bitmap[(PHYSMAP_VSIDBITS & (NPMAPS-1)) / VSID_NBPW]
   3396      1.53   garbled 		|= 1 << (PHYSMAP_VSIDBITS % VSID_NBPW);
   3397       1.1      matt 	pmap_vsid_bitmap[0] |= 1;
   3398       1.1      matt 
   3399       1.1      matt 	/*
   3400       1.1      matt 	 * Initialize kernel pmap and hardware.
   3401       1.1      matt 	 */
   3402      1.38   sanjayl 
   3403      1.53   garbled /* PMAP_OEA64_BRIDGE does support these instructions */
   3404      1.53   garbled #if defined (PMAP_OEA) || defined (PMAP_OEA64_BRIDGE)
   3405       1.1      matt 	for (i = 0; i < 16; i++) {
   3406  1.86.2.3       tls #if defined(PPC_OEA601)
   3407  1.86.2.3       tls 	    /* XXX wedges for segment register 0xf , so set later */
   3408  1.86.2.3       tls 	    if ((iosrtable[i] & SR601_T) && ((MFPVR() >> 16) == MPC601))
   3409  1.86.2.3       tls 		    continue;
   3410  1.86.2.3       tls #endif
   3411      1.38   sanjayl  		pmap_kernel()->pm_sr[i] = KERNELN_SEGMENT(i)|SR_PRKEY;
   3412      1.35     perry 		__asm volatile ("mtsrin %0,%1"
   3413      1.38   sanjayl  			      :: "r"(KERNELN_SEGMENT(i)|SR_PRKEY), "r"(i << ADDR_SR_SHFT));
   3414       1.1      matt 	}
   3415       1.1      matt 
   3416       1.1      matt 	pmap_kernel()->pm_sr[KERNEL_SR] = KERNEL_SEGMENT|SR_SUKEY|SR_PRKEY;
   3417      1.35     perry 	__asm volatile ("mtsr %0,%1"
   3418       1.1      matt 		      :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
   3419       1.1      matt #ifdef KERNEL2_SR
   3420       1.1      matt 	pmap_kernel()->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT|SR_SUKEY|SR_PRKEY;
   3421      1.35     perry 	__asm volatile ("mtsr %0,%1"
   3422       1.1      matt 		      :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT));
   3423       1.1      matt #endif
   3424      1.53   garbled #endif /* PMAP_OEA || PMAP_OEA64_BRIDGE */
   3425      1.53   garbled #if defined (PMAP_OEA)
   3426       1.1      matt 	for (i = 0; i < 16; i++) {
   3427       1.1      matt 		if (iosrtable[i] & SR601_T) {
   3428       1.1      matt 			pmap_kernel()->pm_sr[i] = iosrtable[i];
   3429      1.35     perry 			__asm volatile ("mtsrin %0,%1"
   3430       1.1      matt 			    :: "r"(iosrtable[i]), "r"(i << ADDR_SR_SHFT));
   3431       1.1      matt 		}
   3432       1.1      matt 	}
   3433      1.35     perry 	__asm volatile ("sync; mtsdr1 %0; isync"
   3434       1.2      matt 		      :: "r"((uintptr_t)pmap_pteg_table | (pmap_pteg_mask >> 10)));
   3435      1.53   garbled #elif defined (PMAP_OEA64) || defined (PMAP_OEA64_BRIDGE)
   3436      1.38   sanjayl  	__asm __volatile ("sync; mtsdr1 %0; isync"
   3437      1.80      matt  		      :: "r"((uintptr_t)pmap_pteg_table | (32 - __builtin_clz(pmap_pteg_mask >> 11))));
   3438      1.38   sanjayl #endif
   3439       1.1      matt 	tlbia();
   3440       1.1      matt 
   3441       1.1      matt #ifdef ALTIVEC
   3442       1.1      matt 	pmap_use_altivec = cpu_altivec;
   3443       1.1      matt #endif
   3444       1.1      matt 
   3445       1.1      matt #ifdef DEBUG
   3446       1.1      matt 	if (pmapdebug & PMAPDEBUG_BOOT) {
   3447       1.1      matt 		u_int cnt;
   3448       1.1      matt 		int bank;
   3449       1.1      matt 		char pbuf[9];
   3450       1.1      matt 		for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
   3451      1.73  uebayasi 			cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
   3452      1.53   garbled 			printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
   3453       1.1      matt 			    bank,
   3454      1.73  uebayasi 			    ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
   3455      1.73  uebayasi 			    ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
   3456      1.73  uebayasi 			    ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
   3457       1.1      matt 		}
   3458       1.1      matt 		format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
   3459       1.1      matt 		printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
   3460       1.1      matt 		    pbuf, cnt);
   3461       1.1      matt 	}
   3462       1.1      matt #endif
   3463       1.1      matt 
   3464       1.1      matt 	pool_init(&pmap_upvo_pool, sizeof(struct pvo_entry),
   3465       1.1      matt 	    sizeof(struct pvo_entry), 0, 0, "pmap_upvopl",
   3466      1.60       chs 	    &pmap_pool_uallocator, IPL_VM);
   3467       1.1      matt 
   3468       1.1      matt 	pool_setlowat(&pmap_upvo_pool, 252);
   3469       1.1      matt 
   3470       1.1      matt 	pool_init(&pmap_pool, sizeof(struct pmap),
   3471      1.48        ad 	    sizeof(void *), 0, 0, "pmap_pl", &pmap_pool_uallocator,
   3472      1.48        ad 	    IPL_NONE);
   3473      1.41      matt 
   3474  1.86.2.2       tls #if defined(PMAP_NEED_MAPKERNEL)
   3475      1.41      matt 	{
   3476      1.53   garbled 		struct pmap *pm = pmap_kernel();
   3477      1.58   garbled #if defined(PMAP_NEED_FULL_MAPKERNEL)
   3478      1.41      matt 		extern int etext[], kernel_text[];
   3479      1.41      matt 		vaddr_t va, va_etext = (paddr_t) etext;
   3480      1.53   garbled #endif
   3481      1.53   garbled 		paddr_t pa, pa_end;
   3482      1.42      matt 		register_t sr;
   3483      1.53   garbled 		struct pte pt;
   3484      1.53   garbled 		unsigned int ptegidx;
   3485      1.53   garbled 		int bank;
   3486      1.42      matt 
   3487      1.53   garbled 		sr = PHYSMAPN_SEGMENT(0) | SR_SUKEY|SR_PRKEY;
   3488      1.53   garbled 		pm->pm_sr[0] = sr;
   3489      1.41      matt 
   3490      1.53   garbled 		for (bank = 0; bank < vm_nphysseg; bank++) {
   3491      1.73  uebayasi 			pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
   3492      1.73  uebayasi 			pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
   3493      1.53   garbled 			for (; pa < pa_end; pa += PAGE_SIZE) {
   3494      1.53   garbled 				ptegidx = va_to_pteg(pm, pa);
   3495      1.53   garbled 				pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
   3496      1.53   garbled 				pmap_pte_insert(ptegidx, &pt);
   3497      1.53   garbled 			}
   3498      1.53   garbled 		}
   3499      1.53   garbled 
   3500      1.58   garbled #if defined(PMAP_NEED_FULL_MAPKERNEL)
   3501      1.41      matt 		va = (vaddr_t) kernel_text;
   3502      1.41      matt 
   3503      1.41      matt 		for (pa = kernelstart; va < va_etext;
   3504      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3505      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3506      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
   3507      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3508      1.53   garbled 		}
   3509      1.41      matt 
   3510      1.41      matt 		for (; pa < kernelend;
   3511      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3512      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3513      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
   3514      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3515      1.53   garbled 		}
   3516      1.53   garbled 
   3517      1.58   garbled 		for (va = 0, pa = 0; va < kernelstart;
   3518      1.53   garbled 		     pa += PAGE_SIZE, va += PAGE_SIZE) {
   3519      1.53   garbled 			ptegidx = va_to_pteg(pm, va);
   3520      1.58   garbled 			if (va < 0x3000)
   3521      1.58   garbled 				pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BR);
   3522      1.58   garbled 			else
   3523      1.58   garbled 				pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
   3524      1.58   garbled 			pmap_pte_insert(ptegidx, &pt);
   3525      1.58   garbled 		}
   3526      1.58   garbled 		for (va = kernelend, pa = kernelend; va < SEGMENT_LENGTH;
   3527      1.58   garbled 		    pa += PAGE_SIZE, va += PAGE_SIZE) {
   3528      1.58   garbled 			ptegidx = va_to_pteg(pm, va);
   3529      1.53   garbled 			pmap_pte_create(&pt, pm, va, pa | PTE_M|PTE_BW);
   3530      1.53   garbled 			pmap_pte_insert(ptegidx, &pt);
   3531      1.53   garbled 		}
   3532      1.53   garbled #endif
   3533      1.42      matt 
   3534      1.42      matt 		__asm volatile ("mtsrin %0,%1"
   3535      1.42      matt  			      :: "r"(sr), "r"(kernelstart));
   3536      1.41      matt 	}
   3537      1.41      matt #endif
   3538  1.86.2.3       tls 
   3539  1.86.2.3       tls #if defined(PMAPDEBUG)
   3540  1.86.2.3       tls 	if ( pmapdebug )
   3541  1.86.2.3       tls 	    pmap_print_mmuregs();
   3542  1.86.2.3       tls #endif
   3543       1.1      matt }
   3544