Home | History | Annotate | Line # | Download | only in m68k
      1 /*	$NetBSD: pmap_motorola.c,v 1.96 2025/11/06 18:42:05 thorpej Exp $        */
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1991, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * This code is derived from software contributed to Berkeley by
     37  * the Systems Programming Group of the University of Utah Computer
     38  * Science Department.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  * 3. Neither the name of the University nor the names of its contributors
     49  *    may be used to endorse or promote products derived from this software
     50  *    without specific prior written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     62  * SUCH DAMAGE.
     63  *
     64  *	@(#)pmap.c	8.6 (Berkeley) 5/27/94
     65  */
     66 
     67 /*
     68  * Motorola m68k-family physical map management code.
     69  *
     70  * Supports:
     71  *	68020 with 68851 MMU
     72  *	68020 with HP MMU
     73  *	68030 with on-chip MMU
     74  *	68040 with on-chip MMU
     75  *	68060 with on-chip MMU
     76  *
     77  * Notes:
     78  *	Don't even pay lip service to multiprocessor support.
     79  *
     80  *	We assume TLB entries don't have process tags (except for the
     81  *	supervisor/user distinction) so we only invalidate TLB entries
     82  *	when changing mappings for the current (or kernel) pmap.  This is
     83  *	technically not true for the 68851 but we flush the TLB on every
     84  *	context switch, so it effectively winds up that way.
     85  *
     86  *	Bitwise and/or operations are significantly faster than bitfield
     87  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
     88  *	macros.  Note also that the two are not always equivalent; e.g.:
     89  *		(*pte & PG_PROT) [4] != pte->pg_prot [1]
     90  *	and a couple of routines that deal with protection and wiring take
     91  *	some shortcuts that assume the and/or definitions.
     92  */
     93 
     94 /*
     95  *	Manages physical address maps.
     96  *
     97  *	In addition to hardware address maps, this
     98  *	module is called upon to provide software-use-only
     99  *	maps which may or may not be stored in the same
    100  *	form as hardware maps.  These pseudo-maps are
    101  *	used to store intermediate results from copy
    102  *	operations to and from address spaces.
    103  *
    104  *	Since the information managed by this module is
    105  *	also stored by the logical address mapping module,
    106  *	this module may throw away valid virtual-to-physical
    107  *	mappings at almost any time.  However, invalidations
    108  *	of virtual-to-physical mappings must be done as
    109  *	requested.
    110  *
    111  *	In order to cope with hardware architectures which
    112  *	make virtual-to-physical map invalidates expensive,
    113  *	this module may delay invalidate or reduced protection
    114  *	operations until such time as they are actually
    115  *	necessary.  This module is given full information as
    116  *	to which processors are currently using which maps,
    117  *	and to when physical maps must be made correct.
    118  */
    119 
    120 #include "opt_m68k_arch.h"
    121 
    122 #include <sys/cdefs.h>
    123 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.96 2025/11/06 18:42:05 thorpej Exp $");
    124 
    125 #include <sys/param.h>
    126 #include <sys/systm.h>
    127 #include <sys/proc.h>
    128 #include <sys/pool.h>
    129 #include <sys/cpu.h>
    130 #include <sys/atomic.h>
    131 
    132 #include <machine/pte.h>
    133 #include <machine/pcb.h>
    134 
    135 #include <uvm/uvm.h>
    136 #include <uvm/uvm_physseg.h>
    137 
    138 #include <m68k/cacheops.h>
    139 
    140 #if !defined(M68K_MMU_MOTOROLA) && !defined(M68K_MMU_HP)
    141 #error Hit the road, Jack...
    142 #endif
    143 
    144 #ifdef DEBUG
    145 #define PDB_FOLLOW	0x0001
    146 #define PDB_INIT	0x0002
    147 #define PDB_ENTER	0x0004
    148 #define PDB_REMOVE	0x0008
    149 #define PDB_CREATE	0x0010
    150 #define PDB_PTPAGE	0x0020
    151 #define PDB_CACHE	0x0040
    152 #define PDB_BITS	0x0080
    153 #define PDB_COLLECT	0x0100
    154 #define PDB_PROTECT	0x0200
    155 #define PDB_SEGTAB	0x0400
    156 #define PDB_MULTIMAP	0x0800
    157 #define PDB_PARANOIA	0x2000
    158 #define PDB_WIRING	0x4000
    159 #define PDB_PVDUMP	0x8000
    160 
    161 int debugmap = 0;
    162 int pmapdebug = PDB_PARANOIA;
    163 
    164 #define	PMAP_DPRINTF(l, x)	if (pmapdebug & (l)) printf x
    165 #else /* ! DEBUG */
    166 #define	PMAP_DPRINTF(l, x)	/* nothing */
    167 #endif /* DEBUG */
    168 
    169 /*
    170  * Get STEs and PTEs for user/kernel address space
    171  */
    172 #if defined(M68040) || defined(M68060)
    173 #define	pmap_ste1(m, v)	\
    174 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
    175 /* XXX assumes physically contiguous ST pages (if more than one) */
    176 #define pmap_ste2(m, v) \
    177 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
    178 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
    179 #if defined(M68020) || defined(M68030)
    180 #define	pmap_ste(m, v)	\
    181 	(&((m)->pm_stab[(vaddr_t)(v) \
    182 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
    183 #define pmap_ste_v(m, v) \
    184 	(mmutype == MMU_68040 \
    185 	 ? ((*pmap_ste1(m, v) & SG_V) && \
    186 	    (*pmap_ste2(m, v) & SG_V)) \
    187 	 : (*pmap_ste(m, v) & SG_V))
    188 #else
    189 #define	pmap_ste(m, v)	\
    190 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
    191 #define pmap_ste_v(m, v) \
    192 	((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
    193 #endif
    194 #else
    195 #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
    196 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
    197 #endif
    198 
    199 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
    200 #define pmap_pte_pa(pte)	(*(pte) & PG_FRAME)
    201 #define pmap_pte_w(pte)		(*(pte) & PG_W)
    202 #define pmap_pte_ci(pte)	(*(pte) & PG_CI)
    203 #define pmap_pte_m(pte)		(*(pte) & PG_M)
    204 #define pmap_pte_u(pte)		(*(pte) & PG_U)
    205 #define pmap_pte_prot(pte)	(*(pte) & PG_PROT)
    206 #define pmap_pte_v(pte)		(*(pte) & PG_V)
    207 
    208 #define pmap_pte_set_w(pte, v) \
    209 	if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
    210 #define pmap_pte_set_prot(pte, v) \
    211 	if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
    212 #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
    213 #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
    214 
    215 /*
    216  * Given a map and a machine independent protection code,
    217  * convert to an m68k protection code.
    218  */
    219 #define pte_prot(m, p)	(protection_codes[p])
    220 static u_int protection_codes[8];
    221 
    222 /*
    223  * Kernel page table page management.
    224  */
    225 struct kpt_page {
    226 	struct kpt_page *kpt_next;	/* link on either used or free list */
    227 	vaddr_t		kpt_va;		/* always valid kernel VA */
    228 	paddr_t		kpt_pa;		/* PA of this page (for speed) */
    229 };
    230 struct kpt_page *kpt_free_list, *kpt_used_list;
    231 struct kpt_page *kpt_pages;
    232 
    233 /*
    234  * Kernel segment/page table and page table map.
    235  * The page table map gives us a level of indirection we need to dynamically
    236  * expand the page table.  It is essentially a copy of the segment table
    237  * with PTEs instead of STEs.  All are initialized in locore at boot time.
    238  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
    239  * Segtabzero is an empty segment table which all processes share til they
    240  * reference something.
    241  */
    242 paddr_t		Sysseg_pa;
    243 st_entry_t	*Sysseg;
    244 pt_entry_t	*Sysmap, *Sysptmap;
    245 st_entry_t	*Segtabzero, *Segtabzeropa;
    246 vsize_t		Sysptsize = VM_KERNEL_PT_PAGES;
    247 
    248 static struct pmap kernel_pmap_store;
    249 struct pmap	*const kernel_pmap_ptr = &kernel_pmap_store;
    250 struct vm_map	*st_map, *pt_map;
    251 struct vm_map st_map_store, pt_map_store;
    252 
    253 vaddr_t		lwp0uarea;	/* lwp0 u-area VA, initialized in bootstrap */
    254 
    255 paddr_t		avail_start;	/* PA of first available physical page */
    256 paddr_t		avail_end;	/* PA of last available physical page */
    257 vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
    258 vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
    259 int		page_cnt;	/* number of pages managed by VM system */
    260 
    261 bool		pmap_initialized = false;	/* Has pmap_init completed? */
    262 
    263 vaddr_t		m68k_uptbase = M68K_PTBASE;
    264 
    265 struct pv_header {
    266 	struct pv_entry		pvh_first;	/* first PV entry */
    267 	uint32_t		pvh_attrs;	/* attributes:
    268 						   bits 0-7: PTE bits
    269 						   bits 8-15: flags */
    270 };
    271 
    272 #define	PVH_CI		0x10	/* all entries are cache-inhibited */
    273 #define	PVH_PTPAGE	0x20	/* entry maps a page table page */
    274 
    275 struct pv_header *pv_table;
    276 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
    277 int		pv_nfree;
    278 
    279 #ifdef CACHE_HAVE_VAC
    280 static u_int	pmap_aliasmask;	/* separation at which VA aliasing ok */
    281 #endif
    282 #if defined(M68040) || defined(M68060)
    283 u_int		protostfree;	/* prototype (default) free ST map */
    284 #endif
    285 
    286 pt_entry_t	*caddr1_pte;	/* PTE for CADDR1 */
    287 pt_entry_t	*caddr2_pte;	/* PTE for CADDR2 */
    288 
    289 struct pool	pmap_pmap_pool;	/* memory pool for pmap structures */
    290 struct pool	pmap_pv_pool;	/* memory pool for pv entries */
    291 
    292 #define pmap_alloc_pv()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    293 #define pmap_free_pv(pv)	pool_put(&pmap_pv_pool, (pv))
    294 
    295 #define	PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    296 
    297 static inline struct pv_header *
    298 pa_to_pvh(paddr_t pa)
    299 {
    300 	uvm_physseg_t bank = 0;	/* XXX gcc4 -Wuninitialized */
    301 	psize_t pg = 0;
    302 
    303 	bank = uvm_physseg_find(atop((pa)), &pg);
    304 	return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
    305 }
    306 
    307 /*
    308  * Internal routines
    309  */
    310 void	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
    311 			    struct pv_entry **);
    312 bool	pmap_testbit(paddr_t, int);
    313 bool	pmap_changebit(paddr_t, pt_entry_t, pt_entry_t);
    314 int	pmap_enter_ptpage(pmap_t, vaddr_t, bool);
    315 void	pmap_ptpage_addref(vaddr_t);
    316 int	pmap_ptpage_delref(vaddr_t);
    317 void	pmap_pinit(pmap_t);
    318 void	pmap_release(pmap_t);
    319 
    320 #ifdef DEBUG
    321 void pmap_pvdump(paddr_t);
    322 void pmap_check_wiring(const char *, vaddr_t);
    323 #endif
    324 
    325 /* pmap_remove_mapping flags */
    326 #define	PRM_TFLUSH	0x01
    327 #define	PRM_CFLUSH	0x02
    328 #define	PRM_KEEPPTPAGE	0x04
    329 
    330 #define	active_pmap(pm) \
    331 	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
    332 
    333 #define	active_user_pmap(pm) \
    334 	(curproc && \
    335 	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
    336 
    337 static void (*pmap_load_urp_func)(paddr_t);
    338 
    339 /*
    340  * pmap_load_urp:
    341  *
    342  *	Load the user root table into the MMU.
    343  */
    344 static inline void
    345 pmap_load_urp(paddr_t urp)
    346 {
    347 	(*pmap_load_urp_func)(urp);
    348 }
    349 
    350 #ifdef CACHE_HAVE_VAC
    351 /*
    352  * pmap_init_vac:
    353  *
    354  *	Set up virtually-addressed cache information.  Only relevant
    355  *	for the HP MMU.
    356  */
    357 void
    358 pmap_init_vac(size_t vacsize)
    359 {
    360 	KASSERT(pmap_aliasmask == 0);
    361 	KASSERT(powerof2(vacsize));
    362 	pmap_aliasmask = vacsize - 1;
    363 }
    364 #endif /* CACHE_HAVE_VAC */
    365 
    366 /*
    367  * pmap_bootstrap2:		[ INTERFACE ]
    368  *
    369  *	Phase 2 of pmap bootstrap.  (Phase 1 is system-specific.)
    370  *
    371  *	Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
    372  *	using lwp0uarea variable saved during pmap_bootstrap().
    373  */
    374 void *
    375 pmap_bootstrap2(void)
    376 {
    377 
    378 	uvmexp.pagesize = NBPG;
    379 	uvm_md_init();
    380 
    381 	/*
    382 	 * Initialize protection array.
    383 	 * XXX: Could this have port specific values? Can't this be static?
    384 	 */
    385 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE]     = 0;
    386 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE]     = PG_RO;
    387 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
    388 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
    389 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
    390 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
    391 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
    392 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
    393 
    394 	/*
    395 	 * Initialize pmap_kernel().
    396 	 */
    397 	pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
    398 	pmap_kernel()->pm_stab = Sysseg;
    399 	pmap_kernel()->pm_ptab = Sysmap;
    400 #if defined(M68040) || defined(M68060)
    401 	if (mmutype == MMU_68040)
    402 		pmap_kernel()->pm_stfree = protostfree;
    403 #endif
    404 	pmap_kernel()->pm_count = 1;
    405 
    406 	/*
    407 	 * Initialize lwp0 uarea, curlwp, and curpcb.
    408 	 */
    409 	memset((void *)lwp0uarea, 0, USPACE);
    410 	uvm_lwp_setuarea(&lwp0, lwp0uarea);
    411 	curlwp = &lwp0;
    412 	curpcb = lwp_getpcb(&lwp0);
    413 
    414 	return (void *)lwp0uarea;
    415 }
    416 
    417 /*
    418  * pmap_virtual_space:		[ INTERFACE ]
    419  *
    420  *	Report the range of available kernel virtual address
    421  *	space to the VM system during bootstrap.
    422  *
    423  *	This is only an interface function if we do not use
    424  *	pmap_steal_memory()!
    425  *
    426  *	Note: no locking is necessary in this function.
    427  */
    428 void
    429 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    430 {
    431 
    432 	*vstartp = virtual_avail;
    433 	*vendp = virtual_end;
    434 }
    435 
    436 /*
    437  * pmap_init:			[ INTERFACE ]
    438  *
    439  *	Initialize the pmap module.  Called by vm_init(), to initialize any
    440  *	structures that the pmap system needs to map virtual memory.
    441  *
    442  *	Note: no locking is necessary in this function.
    443  */
    444 void
    445 pmap_init(void)
    446 {
    447 	vaddr_t		addr, addr2;
    448 	vsize_t		s;
    449 	struct pv_header *pvh;
    450 	int		rv;
    451 	int		npages;
    452 	uvm_physseg_t	bank;
    453 
    454 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
    455 
    456 	/*
    457 	 * Before we do anything else, initialize the PTE pointers
    458 	 * used by pmap_zero_page() and pmap_copy_page().
    459 	 */
    460 	caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
    461 	caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
    462 
    463 	PMAP_DPRINTF(PDB_INIT,
    464 	    ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
    465 	    Sysseg, Sysmap, Sysptmap));
    466 	PMAP_DPRINTF(PDB_INIT,
    467 	    ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
    468 	    avail_start, avail_end, virtual_avail, virtual_end));
    469 
    470 	/*
    471 	 * Allocate memory for random pmap data structures.  Includes the
    472 	 * initial segment table, pv_head_table and pmap_attributes.
    473 	 */
    474 	for (page_cnt = 0, bank = uvm_physseg_get_first();
    475 	     uvm_physseg_valid_p(bank);
    476 	     bank = uvm_physseg_get_next(bank))
    477 		page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    478 	s = M68K_STSIZE;					/* Segtabzero */
    479 	s += page_cnt * sizeof(struct pv_header);	/* pv table */
    480 	s = round_page(s);
    481 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    482 	if (addr == 0)
    483 		panic("pmap_init: can't allocate data structures");
    484 
    485 	Segtabzero = (st_entry_t *)addr;
    486 	(void)pmap_extract(pmap_kernel(), addr,
    487 	    (paddr_t *)(void *)&Segtabzeropa);
    488 	addr += M68K_STSIZE;
    489 
    490 	pv_table = (struct pv_header *) addr;
    491 	addr += page_cnt * sizeof(struct pv_header);
    492 
    493 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
    494 	    "tbl %p\n",
    495 	    s, page_cnt, Segtabzero, Segtabzeropa,
    496 	    pv_table));
    497 
    498 	/*
    499 	 * Now that the pv and attribute tables have been allocated,
    500 	 * assign them to the memory segments.
    501 	 */
    502 	pvh = pv_table;
    503 	for (bank = uvm_physseg_get_first();
    504 	     uvm_physseg_valid_p(bank);
    505 	     bank = uvm_physseg_get_next(bank)) {
    506 		npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    507 		uvm_physseg_get_pmseg(bank)->pvheader = pvh;
    508 		pvh += npages;
    509 	}
    510 
    511 	/*
    512 	 * Allocate physical memory for kernel PT pages and their management.
    513 	 * We need 1 PT page per possible task plus some slop.
    514 	 */
    515 	npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16);
    516 	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
    517 
    518 	/*
    519 	 * Verify that space will be allocated in region for which
    520 	 * we already have kernel PT pages.
    521 	 */
    522 	addr = 0;
    523 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
    524 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
    525 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
    526 	if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
    527 		panic("pmap_init: kernel PT too small");
    528 	uvm_unmap(kernel_map, addr, addr + s);
    529 
    530 	/*
    531 	 * Now allocate the space and link the pages together to
    532 	 * form the KPT free list.
    533 	 */
    534 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    535 	if (addr == 0)
    536 		panic("pmap_init: cannot allocate KPT free list");
    537 	s = ptoa(npages);
    538 	addr2 = addr + s;
    539 	kpt_pages = &((struct kpt_page *)addr2)[npages];
    540 	kpt_free_list = NULL;
    541 	do {
    542 		addr2 -= PAGE_SIZE;
    543 		(--kpt_pages)->kpt_next = kpt_free_list;
    544 		kpt_free_list = kpt_pages;
    545 		kpt_pages->kpt_va = addr2;
    546 		(void) pmap_extract(pmap_kernel(), addr2,
    547 		    (paddr_t *)&kpt_pages->kpt_pa);
    548 	} while (addr != addr2);
    549 
    550 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
    551 	    atop(s), addr, addr + s));
    552 
    553 	/*
    554 	 * Allocate the segment table map and the page table map.
    555 	 */
    556 	s = maxproc * M68K_STSIZE;
    557 	st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
    558 	    &st_map_store);
    559 
    560 	addr = m68k_uptbase;
    561 	if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
    562 		s = M68K_PTMAXSIZE;
    563 		/*
    564 		 * XXX We don't want to hang when we run out of
    565 		 * page tables, so we lower maxproc so that fork()
    566 		 * will fail instead.  Note that root could still raise
    567 		 * this value via sysctl(3).
    568 		 */
    569 		maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
    570 	} else
    571 		s = (maxproc * M68K_MAX_PTSIZE);
    572 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
    573 	    true, &pt_map_store);
    574 
    575 #if defined(M68040) || defined(M68060)
    576 	if (mmutype == MMU_68040) {
    577 		protostfree = ~l2tobm(0);
    578 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
    579 			protostfree &= ~l2tobm(rv);
    580 	}
    581 #endif
    582 
    583 	/*
    584 	 * Initialize the pmap pools.
    585 	 */
    586 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
    587 	    &pool_allocator_nointr, IPL_NONE);
    588 
    589 	/*
    590 	 * Initialize the pv_entry pools.
    591 	 */
    592 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
    593 	    &pool_allocator_meta, IPL_NONE);
    594 
    595 	/*
    596 	 * Now that this is done, mark the pages shared with the
    597 	 * hardware page table search as non-CCB (actually, as CI).
    598 	 *
    599 	 * XXX Hm. Given that this is in the kernel map, can't we just
    600 	 * use the va's?
    601 	 */
    602 #ifdef M68060
    603 #if defined(M68020) || defined(M68030) || defined(M68040)
    604 	if (cputype == CPU_68060)
    605 #endif
    606 	{
    607 		struct kpt_page *kptp = kpt_free_list;
    608 		paddr_t paddr;
    609 
    610 		while (kptp) {
    611 			pmap_changebit(kptp->kpt_pa, PG_CI,
    612 				       (pt_entry_t)~PG_CCB);
    613 			kptp = kptp->kpt_next;
    614 		}
    615 
    616 		paddr = (paddr_t)Segtabzeropa;
    617 		while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
    618 			pmap_changebit(paddr, PG_CI,
    619 				       (pt_entry_t)~PG_CCB);
    620 			paddr += PAGE_SIZE;
    621 		}
    622 
    623 		DCIS();
    624 	}
    625 #endif
    626 
    627 	/*
    628 	 * Set up the routine that loads the MMU root table pointer.
    629 	 */
    630 	switch (cputype) {
    631 #if defined(M68020)
    632 	case CPU_68020:
    633 #ifdef M68K_MMU_MOTOROLA
    634 		if (mmutype == MMU_68851) {
    635 			protorp[0] = MMU51_CRP_BITS;
    636 			pmap_load_urp_func = mmu_load_urp51;
    637 		}
    638 #endif
    639 #ifdef M68K_MMU_HP
    640 		if (mmutype == MMU_HP) {
    641 			pmap_load_urp_func = mmu_load_urp20hp;
    642 		}
    643 #endif
    644 		break;
    645 #endif /* M68020 */
    646 #if defined(M68030)
    647 	case CPU_68030:
    648 		protorp[0] = MMU51_CRP_BITS;
    649 		pmap_load_urp_func = mmu_load_urp51;
    650 		break;
    651 #endif /* M68030 */
    652 #if defined(M68040)
    653 	case CPU_68040:
    654 		pmap_load_urp_func = mmu_load_urp40;
    655 		break;
    656 #endif /* M68040 */
    657 #if defined(M68060)
    658 	case CPU_68060:
    659 		pmap_load_urp_func = mmu_load_urp60;
    660 		break;
    661 #endif /* M68060 */
    662 	default:
    663 		break;
    664 	}
    665 	if (pmap_load_urp_func == NULL) {
    666 		panic("pmap_init: No mmu_load_*() for cpu=%d mmu=%d",
    667 		    cputype, mmutype);
    668 	}
    669 
    670 	/*
    671 	 * Now it is safe to enable pv_table recording.
    672 	 */
    673 	pmap_initialized = true;
    674 }
    675 
    676 /*
    677  * pmap_create:			[ INTERFACE ]
    678  *
    679  *	Create and return a physical map.
    680  *
    681  *	Note: no locking is necessary in this function.
    682  */
    683 pmap_t
    684 pmap_create(void)
    685 {
    686 	struct pmap *pmap;
    687 
    688 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
    689 	    ("pmap_create()\n"));
    690 
    691 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    692 	memset(pmap, 0, sizeof(*pmap));
    693 	pmap_pinit(pmap);
    694 	return pmap;
    695 }
    696 
    697 /*
    698  * pmap_pinit:
    699  *
    700  *	Initialize a preallocated and zeroed pmap structure.
    701  *
    702  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
    703  */
    704 void
    705 pmap_pinit(struct pmap *pmap)
    706 {
    707 
    708 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
    709 	    ("pmap_pinit(%p)\n", pmap));
    710 
    711 	/*
    712 	 * No need to allocate page table space yet but we do need a
    713 	 * valid segment table.  Initially, we point everyone at the
    714 	 * "null" segment table.  On the first pmap_enter, a real
    715 	 * segment table will be allocated.
    716 	 */
    717 	pmap->pm_stab = Segtabzero;
    718 	pmap->pm_stpa = Segtabzeropa;
    719 #if defined(M68040) || defined(M68060)
    720 #if defined(M68020) || defined(M68030)
    721 	if (mmutype == MMU_68040)
    722 #endif
    723 		pmap->pm_stfree = protostfree;
    724 #endif
    725 	pmap->pm_count = 1;
    726 }
    727 
    728 /*
    729  * pmap_destroy:		[ INTERFACE ]
    730  *
    731  *	Drop the reference count on the specified pmap, releasing
    732  *	all resources if the reference count drops to zero.
    733  */
    734 void
    735 pmap_destroy(pmap_t pmap)
    736 {
    737 	int count;
    738 
    739 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
    740 
    741 	count = atomic_dec_uint_nv(&pmap->pm_count);
    742 	if (count == 0) {
    743 		pmap_release(pmap);
    744 		pool_put(&pmap_pmap_pool, pmap);
    745 	}
    746 }
    747 
    748 /*
    749  * pmap_release:
    750  *
    751  *	Release the resources held by a pmap.
    752  *
    753  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
    754  */
    755 void
    756 pmap_release(pmap_t pmap)
    757 {
    758 
    759 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
    760 
    761 #ifdef notdef /* DIAGNOSTIC */
    762 	/* count would be 0 from pmap_destroy... */
    763 	if (pmap->pm_count != 1)
    764 		panic("pmap_release count");
    765 #endif
    766 
    767 	if (pmap->pm_ptab) {
    768 		pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
    769 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
    770 		uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
    771 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
    772 		uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
    773 		    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
    774 	}
    775 	KASSERT(pmap->pm_stab == Segtabzero);
    776 }
    777 
    778 /*
    779  * pmap_reference:		[ INTERFACE ]
    780  *
    781  *	Add a reference to the specified pmap.
    782  */
    783 void
    784 pmap_reference(pmap_t pmap)
    785 {
    786 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
    787 
    788 	atomic_inc_uint(&pmap->pm_count);
    789 }
    790 
    791 /*
    792  * pmap_activate:		[ INTERFACE ]
    793  *
    794  *	Activate the pmap used by the specified process.  This includes
    795  *	reloading the MMU context if the current process, and marking
    796  *	the pmap in use by the processor.
    797  *
    798  *	Note: we may only use spin locks here, since we are called
    799  *	by a critical section in cpu_switch()!
    800  */
    801 void
    802 pmap_activate(struct lwp *l)
    803 {
    804 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    805 
    806 	PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
    807 	    ("pmap_activate(%p)\n", l));
    808 
    809 	KASSERT(l == curlwp);
    810 
    811 	/*
    812 	 * Because the kernel has a separate root pointer, we don't
    813 	 * need to activate the kernel pmap.
    814 	 */
    815 	if (pmap != pmap_kernel()) {
    816 		pmap_load_urp((paddr_t)pmap->pm_stpa);
    817 	}
    818 }
    819 
    820 /*
    821  * pmap_deactivate:		[ INTERFACE ]
    822  *
    823  *	Mark that the pmap used by the specified process is no longer
    824  *	in use by the processor.
    825  *
    826  *	The comment above pmap_activate() wrt. locking applies here,
    827  *	as well.
    828  */
    829 void
    830 pmap_deactivate(struct lwp *l)
    831 {
    832 
    833 	/* No action necessary in this pmap implementation. */
    834 }
    835 
    836 /*
    837  * pmap_remove:			[ INTERFACE ]
    838  *
    839  *	Remove the given range of addresses from the specified map.
    840  *
    841  *	It is assumed that the start and end are properly
    842  *	rounded to the page size.
    843  */
    844 void
    845 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    846 {
    847 	vaddr_t nssva;
    848 	pt_entry_t *pte;
    849 	int flags;
    850 #ifdef CACHE_HAVE_VAC
    851 	bool firstpage = true, needcflush = false;
    852 #endif
    853 
    854 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
    855 	    ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
    856 
    857 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
    858 	while (sva < eva) {
    859 		nssva = m68k_trunc_seg(sva) + NBSEG;
    860 		if (nssva == 0 || nssva > eva)
    861 			nssva = eva;
    862 
    863 		/*
    864 		 * Invalidate every valid mapping within this segment.
    865 		 */
    866 
    867 		pte = pmap_pte(pmap, sva);
    868 		while (sva < nssva) {
    869 
    870 			/*
    871 			 * If this segment is unallocated,
    872 			 * skip to the next segment boundary.
    873 			 */
    874 
    875 			if (!pmap_ste_v(pmap, sva)) {
    876 				sva = nssva;
    877 				break;
    878 			}
    879 
    880 			if (pmap_pte_v(pte)) {
    881 #ifdef CACHE_HAVE_VAC
    882 				if (pmap_aliasmask) {
    883 
    884 					/*
    885 					 * Purge kernel side of VAC to ensure
    886 					 * we get the correct state of any
    887 					 * hardware maintained bits.
    888 					 */
    889 
    890 					if (firstpage) {
    891 						DCIS();
    892 					}
    893 
    894 					/*
    895 					 * Remember if we may need to
    896 					 * flush the VAC due to a non-CI
    897 					 * mapping.
    898 					 */
    899 
    900 					if (!needcflush && !pmap_pte_ci(pte))
    901 						needcflush = true;
    902 
    903 				}
    904 				firstpage = false;
    905 #endif
    906 				pmap_remove_mapping(pmap, sva, pte, flags, NULL);
    907 			}
    908 			pte++;
    909 			sva += PAGE_SIZE;
    910 		}
    911 	}
    912 
    913 #ifdef CACHE_HAVE_VAC
    914 
    915 	/*
    916 	 * Didn't do anything, no need for cache flushes
    917 	 */
    918 
    919 	if (firstpage)
    920 		return;
    921 
    922 	/*
    923 	 * In a couple of cases, we don't need to worry about flushing
    924 	 * the VAC:
    925 	 *	1. if this is a kernel mapping,
    926 	 *	   we have already done it
    927 	 *	2. if it is a user mapping not for the current process,
    928 	 *	   it won't be there
    929 	 */
    930 
    931 	if (pmap_aliasmask && !active_user_pmap(pmap))
    932 		needcflush = false;
    933 	if (needcflush) {
    934 		if (pmap == pmap_kernel()) {
    935 			DCIS();
    936 		} else {
    937 			DCIU();
    938 		}
    939 	}
    940 #endif
    941 }
    942 
    943 /*
    944  * pmap_page_protect:		[ INTERFACE ]
    945  *
    946  *	Lower the permission for all mappings to a given page to
    947  *	the permissions specified.
    948  */
    949 void
    950 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    951 {
    952 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
    953 	struct pv_header *pvh;
    954 	struct pv_entry *pv;
    955 	pt_entry_t *pte;
    956 	int s;
    957 
    958 #ifdef DEBUG
    959 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
    960 	    (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
    961 		printf("pmap_page_protect(%p, %x)\n", pg, prot);
    962 #endif
    963 
    964 	switch (prot) {
    965 	case VM_PROT_READ|VM_PROT_WRITE:
    966 	case VM_PROT_ALL:
    967 		return;
    968 
    969 	/* copy_on_write */
    970 	case VM_PROT_READ:
    971 	case VM_PROT_READ|VM_PROT_EXECUTE:
    972 		pmap_changebit(pa, PG_RO, ~0);
    973 		return;
    974 
    975 	/* remove_all */
    976 	default:
    977 		break;
    978 	}
    979 
    980 	pvh = pa_to_pvh(pa);
    981 	pv = &pvh->pvh_first;
    982 	s = splvm();
    983 	while (pv->pv_pmap != NULL) {
    984 
    985 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
    986 #ifdef DEBUG
    987 		if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
    988 		    pmap_pte_pa(pte) != pa)
    989 			panic("pmap_page_protect: bad mapping");
    990 #endif
    991 		pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
    992 		    pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
    993 	}
    994 	splx(s);
    995 }
    996 
    997 /*
    998  * pmap_protect:		[ INTERFACE ]
    999  *
   1000  *	Set the physical protection on the specified range of this map
   1001  *	as requested.
   1002  */
   1003 void
   1004 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1005 {
   1006 	vaddr_t nssva;
   1007 	pt_entry_t *pte;
   1008 	bool firstpage __unused, needtflush;
   1009 	int isro;
   1010 
   1011 	PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
   1012 	    ("pmap_protect(%p, %lx, %lx, %x)\n",
   1013 	    pmap, sva, eva, prot));
   1014 
   1015 #ifdef PMAPSTATS
   1016 	protect_stats.calls++;
   1017 #endif
   1018 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1019 		pmap_remove(pmap, sva, eva);
   1020 		return;
   1021 	}
   1022 	isro = pte_prot(pmap, prot);
   1023 	needtflush = active_pmap(pmap);
   1024 	firstpage = true;
   1025 	while (sva < eva) {
   1026 		nssva = m68k_trunc_seg(sva) + NBSEG;
   1027 		if (nssva == 0 || nssva > eva)
   1028 			nssva = eva;
   1029 
   1030 		/*
   1031 		 * If VA belongs to an unallocated segment,
   1032 		 * skip to the next segment boundary.
   1033 		 */
   1034 
   1035 		if (!pmap_ste_v(pmap, sva)) {
   1036 			sva = nssva;
   1037 			continue;
   1038 		}
   1039 
   1040 		/*
   1041 		 * Change protection on mapping if it is valid and doesn't
   1042 		 * already have the correct protection.
   1043 		 */
   1044 
   1045 		pte = pmap_pte(pmap, sva);
   1046 		while (sva < nssva) {
   1047 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
   1048 #ifdef CACHE_HAVE_VAC
   1049 
   1050 				/*
   1051 				 * Purge kernel side of VAC to ensure we
   1052 				 * get the correct state of any hardware
   1053 				 * maintained bits.
   1054 				 *
   1055 				 * XXX do we need to clear the VAC in
   1056 				 * general to reflect the new protection?
   1057 				 */
   1058 
   1059 				if (firstpage && pmap_aliasmask)
   1060 					DCIS();
   1061 #endif
   1062 
   1063 #if defined(M68040) || defined(M68060)
   1064 
   1065 				/*
   1066 				 * Clear caches if making RO (see section
   1067 				 * "7.3 Cache Coherency" in the manual).
   1068 				 */
   1069 
   1070 #if defined(M68020) || defined(M68030)
   1071 				if (isro && mmutype == MMU_68040)
   1072 #else
   1073 				if (isro)
   1074 #endif
   1075 				{
   1076 					paddr_t pa = pmap_pte_pa(pte);
   1077 
   1078 					DCFP(pa);
   1079 					ICPP(pa);
   1080 				}
   1081 #endif
   1082 				pmap_pte_set_prot(pte, isro);
   1083 				if (needtflush)
   1084 					TBIS(sva);
   1085 				firstpage = false;
   1086 			}
   1087 			pte++;
   1088 			sva += PAGE_SIZE;
   1089 		}
   1090 	}
   1091 }
   1092 
   1093 /*
   1094  * pmap_enter:			[ INTERFACE ]
   1095  *
   1096  *	Insert the given physical page (pa) at
   1097  *	the specified virtual address (va) in the
   1098  *	target physical map with the protection requested.
   1099  *
   1100  *	If specified, the page will be wired down, meaning
   1101  *	that the related pte cannot be reclaimed.
   1102  *
   1103  *	Note: This is the only routine which MAY NOT lazy-evaluate
   1104  *	or lose information.  Thatis, this routine must actually
   1105  *	insert this page into the given map NOW.
   1106  */
   1107 int
   1108 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1109 {
   1110 	pt_entry_t *pte;
   1111 	struct pv_entry *opv = NULL;
   1112 	int npte;
   1113 	paddr_t opa;
   1114 	bool cacheable = true;
   1115 	bool checkpv = true;
   1116 	bool wired = (flags & PMAP_WIRED) != 0;
   1117 	bool can_fail = (flags & PMAP_CANFAIL) != 0;
   1118 
   1119 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
   1120 	    ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
   1121 	    pmap, va, pa, prot, wired));
   1122 
   1123 #ifdef DIAGNOSTIC
   1124 	/*
   1125 	 * pmap_enter() should never be used for CADDR1 and CADDR2.
   1126 	 */
   1127 	if (pmap == pmap_kernel() &&
   1128 	    (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
   1129 		panic("pmap_enter: used for CADDR1 or CADDR2");
   1130 #endif
   1131 
   1132 	/*
   1133 	 * For user mapping, allocate kernel VM resources if necessary.
   1134 	 */
   1135 	if (pmap->pm_ptab == NULL) {
   1136 		pmap->pm_ptab = (pt_entry_t *)
   1137 		    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
   1138 		    UVM_KMF_VAONLY |
   1139 		    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
   1140 		if (pmap->pm_ptab == NULL)
   1141 			return ENOMEM;
   1142 	}
   1143 
   1144 	/*
   1145 	 * Segment table entry not valid, we need a new PT page
   1146 	 */
   1147 	if (!pmap_ste_v(pmap, va)) {
   1148 		int err = pmap_enter_ptpage(pmap, va, can_fail);
   1149 		if (err)
   1150 			return err;
   1151 	}
   1152 
   1153 	pa = m68k_trunc_page(pa);
   1154 	pte = pmap_pte(pmap, va);
   1155 	opa = pmap_pte_pa(pte);
   1156 
   1157 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
   1158 
   1159 	/*
   1160 	 * Mapping has not changed, must be protection or wiring change.
   1161 	 */
   1162 	if (opa == pa) {
   1163 		/*
   1164 		 * Wiring change, just update stats.
   1165 		 * We don't worry about wiring PT pages as they remain
   1166 		 * resident as long as there are valid mappings in them.
   1167 		 * Hence, if a user page is wired, the PT page will be also.
   1168 		 */
   1169 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
   1170 			PMAP_DPRINTF(PDB_ENTER,
   1171 			    ("enter: wiring change -> %x\n", wired));
   1172 			if (wired)
   1173 				pmap->pm_stats.wired_count++;
   1174 			else
   1175 				pmap->pm_stats.wired_count--;
   1176 		}
   1177 		/*
   1178 		 * Retain cache inhibition status
   1179 		 */
   1180 		checkpv = false;
   1181 		if (pmap_pte_ci(pte))
   1182 			cacheable = false;
   1183 		goto validate;
   1184 	}
   1185 
   1186 	/*
   1187 	 * Mapping has changed, invalidate old range and fall through to
   1188 	 * handle validating new mapping.
   1189 	 */
   1190 	if (opa) {
   1191 		PMAP_DPRINTF(PDB_ENTER,
   1192 		    ("enter: removing old mapping %lx\n", va));
   1193 		pmap_remove_mapping(pmap, va, pte,
   1194 		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
   1195 	}
   1196 
   1197 	/*
   1198 	 * If this is a new user mapping, increment the wiring count
   1199 	 * on this PT page.  PT pages are wired down as long as there
   1200 	 * is a valid mapping in the page.
   1201 	 */
   1202 	if (pmap != pmap_kernel())
   1203 		pmap_ptpage_addref(trunc_page((vaddr_t)pte));
   1204 
   1205 	/*
   1206 	 * Enter on the PV list if part of our managed memory
   1207 	 * Note that we raise IPL while manipulating pv_table
   1208 	 * since pmap_enter can be called at interrupt time.
   1209 	 */
   1210 	if (PAGE_IS_MANAGED(pa)) {
   1211 		struct pv_header *pvh;
   1212 		struct pv_entry *pv, *npv;
   1213 		int s;
   1214 
   1215 		pvh = pa_to_pvh(pa);
   1216 		pv = &pvh->pvh_first;
   1217 		s = splvm();
   1218 
   1219 		PMAP_DPRINTF(PDB_ENTER,
   1220 		    ("enter: pv at %p: %lx/%p/%p\n",
   1221 		    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
   1222 		/*
   1223 		 * No entries yet, use header as the first entry
   1224 		 */
   1225 		if (pv->pv_pmap == NULL) {
   1226 			pv->pv_va = va;
   1227 			pv->pv_pmap = pmap;
   1228 			pv->pv_next = NULL;
   1229 			pv->pv_ptste = NULL;
   1230 			pv->pv_ptpmap = NULL;
   1231 			pvh->pvh_attrs = 0;
   1232 		}
   1233 		/*
   1234 		 * There is at least one other VA mapping this page.
   1235 		 * Place this entry after the header.
   1236 		 */
   1237 		else {
   1238 #ifdef DEBUG
   1239 			for (npv = pv; npv; npv = npv->pv_next)
   1240 				if (pmap == npv->pv_pmap && va == npv->pv_va)
   1241 					panic("pmap_enter: already in pv_tab");
   1242 #endif
   1243 			if (opv != NULL) {
   1244 				npv = opv;
   1245 				opv = NULL;
   1246 			} else {
   1247 				npv = pmap_alloc_pv();
   1248 			}
   1249 			KASSERT(npv != NULL);
   1250 			npv->pv_va = va;
   1251 			npv->pv_pmap = pmap;
   1252 			npv->pv_next = pv->pv_next;
   1253 			npv->pv_ptste = NULL;
   1254 			npv->pv_ptpmap = NULL;
   1255 			pv->pv_next = npv;
   1256 
   1257 #ifdef CACHE_HAVE_VAC
   1258 
   1259 			/*
   1260 			 * Since there is another logical mapping for the
   1261 			 * same page we may need to cache-inhibit the
   1262 			 * descriptors on those CPUs with external VACs.
   1263 			 * We don't need to CI if:
   1264 			 *
   1265 			 * - No two mappings belong to the same user pmaps.
   1266 			 *   Since the cache is flushed on context switches
   1267 			 *   there is no problem between user processes.
   1268 			 *
   1269 			 * - Mappings within a single pmap are a certain
   1270 			 *   magic distance apart.  VAs at these appropriate
   1271 			 *   boundaries map to the same cache entries or
   1272 			 *   otherwise don't conflict.
   1273 			 *
   1274 			 * To keep it simple, we only check for these special
   1275 			 * cases if there are only two mappings, otherwise we
   1276 			 * punt and always CI.
   1277 			 *
   1278 			 * Note that there are no aliasing problems with the
   1279 			 * on-chip data-cache when the WA bit is set.
   1280 			 */
   1281 
   1282 			if (pmap_aliasmask) {
   1283 				if (pvh->pvh_attrs & PVH_CI) {
   1284 					PMAP_DPRINTF(PDB_CACHE,
   1285 					    ("enter: pa %lx already CI'ed\n",
   1286 					    pa));
   1287 					checkpv = cacheable = false;
   1288 				} else if (npv->pv_next ||
   1289 					   ((pmap == pv->pv_pmap ||
   1290 					     pmap == pmap_kernel() ||
   1291 					     pv->pv_pmap == pmap_kernel()) &&
   1292 					    ((pv->pv_va & pmap_aliasmask) !=
   1293 					     (va & pmap_aliasmask)))) {
   1294 					PMAP_DPRINTF(PDB_CACHE,
   1295 					    ("enter: pa %lx CI'ing all\n",
   1296 					    pa));
   1297 					cacheable = false;
   1298 					pvh->pvh_attrs |= PVH_CI;
   1299 				}
   1300 			}
   1301 #endif
   1302 		}
   1303 
   1304 		/*
   1305 		 * Speed pmap_is_referenced() or pmap_is_modified() based
   1306 		 * on the hint provided in access_type.
   1307 		 */
   1308 #ifdef DIAGNOSTIC
   1309 		if ((flags & VM_PROT_ALL) & ~prot)
   1310 			panic("pmap_enter: access_type exceeds prot");
   1311 #endif
   1312 		if (flags & VM_PROT_WRITE)
   1313 			pvh->pvh_attrs |= (PG_U|PG_M);
   1314 		else if (flags & VM_PROT_ALL)
   1315 			pvh->pvh_attrs |= PG_U;
   1316 
   1317 		splx(s);
   1318 	}
   1319 	/*
   1320 	 * Assumption: if it is not part of our managed memory
   1321 	 * then it must be device memory which may be volitile.
   1322 	 */
   1323 	else if (pmap_initialized) {
   1324 		checkpv = cacheable = false;
   1325 	}
   1326 
   1327 	/*
   1328 	 * Increment counters
   1329 	 */
   1330 	pmap->pm_stats.resident_count++;
   1331 	if (wired)
   1332 		pmap->pm_stats.wired_count++;
   1333 
   1334 validate:
   1335 #ifdef CACHE_HAVE_VAC
   1336 	/*
   1337 	 * Purge kernel side of VAC to ensure we get correct state
   1338 	 * of HW bits so we don't clobber them.
   1339 	 */
   1340 	if (pmap_aliasmask)
   1341 		DCIS();
   1342 #endif
   1343 
   1344 	/*
   1345 	 * Build the new PTE.
   1346 	 */
   1347 
   1348 	npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
   1349 	if (wired)
   1350 		npte |= PG_W;
   1351 	if (!checkpv && !cacheable)
   1352 #if defined(M68040) || defined(M68060)
   1353 #if defined(M68020) || defined(M68030)
   1354 		npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
   1355 #else
   1356 		npte |= PG_CIN;
   1357 #endif
   1358 #else
   1359 		npte |= PG_CI;
   1360 #endif
   1361 #if defined(M68040) || defined(M68060)
   1362 #if defined(M68020) || defined(M68030)
   1363 	else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
   1364 #else
   1365 	else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
   1366 #endif
   1367 		npte |= PG_CCB;
   1368 #endif
   1369 
   1370 	PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
   1371 
   1372 	/*
   1373 	 * Remember if this was a wiring-only change.
   1374 	 * If so, we need not flush the TLB and caches.
   1375 	 */
   1376 
   1377 	wired = ((*pte ^ npte) == PG_W);
   1378 #if defined(M68040) || defined(M68060)
   1379 #if defined(M68020) || defined(M68030)
   1380 	if (mmutype == MMU_68040 && !wired)
   1381 #else
   1382 	if (!wired)
   1383 #endif
   1384 	{
   1385 		DCFP(pa);
   1386 		ICPP(pa);
   1387 	}
   1388 #endif
   1389 	*pte = npte;
   1390 	if (!wired && active_pmap(pmap))
   1391 		TBIS(va);
   1392 #ifdef CACHE_HAVE_VAC
   1393 	/*
   1394 	 * The following is executed if we are entering a second
   1395 	 * (or greater) mapping for a physical page and the mappings
   1396 	 * may create an aliasing problem.  In this case we must
   1397 	 * cache inhibit the descriptors involved and flush any
   1398 	 * external VAC.
   1399 	 */
   1400 	if (checkpv && !cacheable) {
   1401 		pmap_changebit(pa, PG_CI, ~0);
   1402 		DCIA();
   1403 #ifdef DEBUG
   1404 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
   1405 		    (PDB_CACHE|PDB_PVDUMP))
   1406 			pmap_pvdump(pa);
   1407 #endif
   1408 	}
   1409 #endif
   1410 #ifdef DEBUG
   1411 	if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
   1412 		pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
   1413 #endif
   1414 
   1415 	if (opv != NULL)
   1416 		pmap_free_pv(opv);
   1417 
   1418 	return 0;
   1419 }
   1420 
   1421 void
   1422 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1423 {
   1424 	pmap_t pmap = pmap_kernel();
   1425 	pt_entry_t *pte;
   1426 	int s, npte;
   1427 
   1428 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
   1429 	    ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
   1430 
   1431 	/*
   1432 	 * Segment table entry not valid, we need a new PT page
   1433 	 */
   1434 
   1435 	if (!pmap_ste_v(pmap, va)) {
   1436 		s = splvm();
   1437 		pmap_enter_ptpage(pmap, va, false);
   1438 		splx(s);
   1439 	}
   1440 
   1441 	pa = m68k_trunc_page(pa);
   1442 	pte = pmap_pte(pmap, va);
   1443 
   1444 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
   1445 	KASSERT(!pmap_pte_v(pte));
   1446 
   1447 	/*
   1448 	 * Increment counters
   1449 	 */
   1450 
   1451 	pmap->pm_stats.resident_count++;
   1452 	pmap->pm_stats.wired_count++;
   1453 
   1454 	/*
   1455 	 * Build the new PTE.
   1456 	 */
   1457 
   1458 	npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
   1459 #if defined(M68040) || defined(M68060)
   1460 #if defined(M68020) || defined(M68030)
   1461 	if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
   1462 #else
   1463 	if ((npte & PG_PROT) == PG_RW)
   1464 #endif
   1465 		npte |= PG_CCB;
   1466 
   1467 	if (mmutype == MMU_68040) {
   1468 		DCFP(pa);
   1469 		ICPP(pa);
   1470 	}
   1471 #endif
   1472 
   1473 	*pte = npte;
   1474 	TBIS(va);
   1475 }
   1476 
   1477 void
   1478 pmap_kremove(vaddr_t va, vsize_t size)
   1479 {
   1480 	pmap_t pmap = pmap_kernel();
   1481 	pt_entry_t *pte;
   1482 	vaddr_t nssva;
   1483 	vaddr_t eva = va + size;
   1484 #ifdef CACHE_HAVE_VAC
   1485 	bool firstpage, needcflush;
   1486 #endif
   1487 
   1488 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
   1489 	    ("pmap_kremove(%lx, %lx)\n", va, size));
   1490 
   1491 #ifdef CACHE_HAVE_VAC
   1492 	firstpage = true;
   1493 	needcflush = false;
   1494 #endif
   1495 	while (va < eva) {
   1496 		nssva = m68k_trunc_seg(va) + NBSEG;
   1497 		if (nssva == 0 || nssva > eva)
   1498 			nssva = eva;
   1499 
   1500 		/*
   1501 		 * If VA belongs to an unallocated segment,
   1502 		 * skip to the next segment boundary.
   1503 		 */
   1504 
   1505 		if (!pmap_ste_v(pmap, va)) {
   1506 			va = nssva;
   1507 			continue;
   1508 		}
   1509 
   1510 		/*
   1511 		 * Invalidate every valid mapping within this segment.
   1512 		 */
   1513 
   1514 		pte = pmap_pte(pmap, va);
   1515 		while (va < nssva) {
   1516 			if (!pmap_pte_v(pte)) {
   1517 				pte++;
   1518 				va += PAGE_SIZE;
   1519 				continue;
   1520 			}
   1521 #ifdef CACHE_HAVE_VAC
   1522 			if (pmap_aliasmask) {
   1523 
   1524 				/*
   1525 				 * Purge kernel side of VAC to ensure
   1526 				 * we get the correct state of any
   1527 				 * hardware maintained bits.
   1528 				 */
   1529 
   1530 				if (firstpage) {
   1531 					DCIS();
   1532 					firstpage = false;
   1533 				}
   1534 
   1535 				/*
   1536 				 * Remember if we may need to
   1537 				 * flush the VAC.
   1538 				 */
   1539 
   1540 				needcflush = true;
   1541 			}
   1542 #endif
   1543 			pmap->pm_stats.wired_count--;
   1544 			pmap->pm_stats.resident_count--;
   1545 			*pte = PG_NV;
   1546 			TBIS(va);
   1547 			pte++;
   1548 			va += PAGE_SIZE;
   1549 		}
   1550 	}
   1551 
   1552 #ifdef CACHE_HAVE_VAC
   1553 
   1554 	/*
   1555 	 * In a couple of cases, we don't need to worry about flushing
   1556 	 * the VAC:
   1557 	 *	1. if this is a kernel mapping,
   1558 	 *	   we have already done it
   1559 	 *	2. if it is a user mapping not for the current process,
   1560 	 *	   it won't be there
   1561 	 */
   1562 
   1563 	if (pmap_aliasmask && !active_user_pmap(pmap))
   1564 		needcflush = false;
   1565 	if (needcflush) {
   1566 		if (pmap == pmap_kernel()) {
   1567 			DCIS();
   1568 		} else {
   1569 			DCIU();
   1570 		}
   1571 	}
   1572 #endif
   1573 }
   1574 
   1575 /*
   1576  * pmap_unwire:			[ INTERFACE ]
   1577  *
   1578  *	Clear the wired attribute for a map/virtual-address pair.
   1579  *
   1580  *	The mapping must already exist in the pmap.
   1581  */
   1582 void
   1583 pmap_unwire(pmap_t pmap, vaddr_t va)
   1584 {
   1585 	pt_entry_t *pte;
   1586 
   1587 	PMAP_DPRINTF(PDB_FOLLOW,
   1588 	    ("pmap_unwire(%p, %lx)\n", pmap, va));
   1589 
   1590 	pte = pmap_pte(pmap, va);
   1591 
   1592 	/*
   1593 	 * If wiring actually changed (always?) clear the wire bit and
   1594 	 * update the wire count.  Note that wiring is not a hardware
   1595 	 * characteristic so there is no need to invalidate the TLB.
   1596 	 */
   1597 
   1598 	if (pmap_pte_w_chg(pte, 0)) {
   1599 		pmap_pte_set_w(pte, false);
   1600 		pmap->pm_stats.wired_count--;
   1601 	}
   1602 }
   1603 
   1604 /*
   1605  * pmap_extract:		[ INTERFACE ]
   1606  *
   1607  *	Extract the physical address associated with the given
   1608  *	pmap/virtual address pair.
   1609  */
   1610 bool
   1611 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1612 {
   1613 	paddr_t pa;
   1614 	u_int pte;
   1615 
   1616 	PMAP_DPRINTF(PDB_FOLLOW,
   1617 	    ("pmap_extract(%p, %lx) -> ", pmap, va));
   1618 
   1619 	if (pmap_ste_v(pmap, va)) {
   1620 		pte = *(u_int *)pmap_pte(pmap, va);
   1621 		if (pte) {
   1622 			pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
   1623 			if (pap != NULL)
   1624 				*pap = pa;
   1625 #ifdef DEBUG
   1626 			if (pmapdebug & PDB_FOLLOW)
   1627 				printf("%lx\n", pa);
   1628 #endif
   1629 			return true;
   1630 		}
   1631 	}
   1632 #ifdef DEBUG
   1633 	if (pmapdebug & PDB_FOLLOW)
   1634 		printf("failed\n");
   1635 #endif
   1636 	return false;
   1637 }
   1638 
   1639 /*
   1640  * vtophys:		[ INTERFACE-ish ]
   1641  *
   1642  *	Kernel virtual to physical.  Use with caution.
   1643  */
   1644 paddr_t
   1645 vtophys(vaddr_t va)
   1646 {
   1647 	paddr_t pa;
   1648 
   1649 	if (pmap_extract(pmap_kernel(), va, &pa))
   1650 		return pa;
   1651 	KASSERT(0);
   1652 	return (paddr_t) -1;
   1653 }
   1654 
   1655 /*
   1656  * pmap_copy:		[ INTERFACE ]
   1657  *
   1658  *	Copy the mapping range specified by src_addr/len
   1659  *	from the source map to the range dst_addr/len
   1660  *	in the destination map.
   1661  *
   1662  *	This routine is only advisory and need not do anything.
   1663  */
   1664 void
   1665 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1666     vaddr_t src_addr)
   1667 {
   1668 
   1669 	PMAP_DPRINTF(PDB_FOLLOW,
   1670 	    ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
   1671 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
   1672 }
   1673 
   1674 /*
   1675  * pmap_collect1():
   1676  *
   1677  *	Garbage-collect KPT pages.  Helper for the above (bogus)
   1678  *	pmap_collect().
   1679  *
   1680  *	Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
   1681  *	WAY OF HANDLING PT PAGES!
   1682  */
   1683 static inline void
   1684 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
   1685 {
   1686 	paddr_t pa;
   1687 	struct pv_header *pvh;
   1688 	struct pv_entry *pv;
   1689 	pt_entry_t *pte;
   1690 	paddr_t kpa;
   1691 #ifdef DEBUG
   1692 	st_entry_t *ste;
   1693 	int opmapdebug = 0;
   1694 #endif
   1695 
   1696 	for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
   1697 		struct kpt_page *kpt, **pkpt;
   1698 
   1699 		/*
   1700 		 * Locate physical pages which are being used as kernel
   1701 		 * page table pages.
   1702 		 */
   1703 
   1704 		pvh = pa_to_pvh(pa);
   1705 		pv = &pvh->pvh_first;
   1706 		if (pv->pv_pmap != pmap_kernel() ||
   1707 		    !(pvh->pvh_attrs & PVH_PTPAGE))
   1708 			continue;
   1709 		do {
   1710 			if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
   1711 				break;
   1712 		} while ((pv = pv->pv_next));
   1713 		if (pv == NULL)
   1714 			continue;
   1715 #ifdef DEBUG
   1716 		if (pv->pv_va < (vaddr_t)Sysmap ||
   1717 		    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
   1718 			printf("collect: kernel PT VA out of range\n");
   1719 			pmap_pvdump(pa);
   1720 			continue;
   1721 		}
   1722 #endif
   1723 		pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
   1724 		while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
   1725 			;
   1726 		if (pte >= (pt_entry_t *)pv->pv_va)
   1727 			continue;
   1728 
   1729 #ifdef DEBUG
   1730 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
   1731 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
   1732 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
   1733 			opmapdebug = pmapdebug;
   1734 			pmapdebug |= PDB_PTPAGE;
   1735 		}
   1736 
   1737 		ste = pv->pv_ptste;
   1738 #endif
   1739 		/*
   1740 		 * If all entries were invalid we can remove the page.
   1741 		 * We call pmap_remove_entry to take care of invalidating
   1742 		 * ST and Sysptmap entries.
   1743 		 */
   1744 
   1745 		if (!pmap_extract(pmap, pv->pv_va, &kpa)) {
   1746 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
   1747 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
   1748 			panic("pmap_collect: mapping not found");
   1749 		}
   1750 		pmap_remove_mapping(pmap, pv->pv_va, NULL,
   1751 		    PRM_TFLUSH|PRM_CFLUSH, NULL);
   1752 
   1753 		/*
   1754 		 * Use the physical address to locate the original
   1755 		 * (kmem_alloc assigned) address for the page and put
   1756 		 * that page back on the free list.
   1757 		 */
   1758 
   1759 		for (pkpt = &kpt_used_list, kpt = *pkpt;
   1760 		     kpt != NULL;
   1761 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
   1762 			if (kpt->kpt_pa == kpa)
   1763 				break;
   1764 #ifdef DEBUG
   1765 		if (kpt == NULL)
   1766 			panic("pmap_collect: lost a KPT page");
   1767 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
   1768 			printf("collect: %lx (%lx) to free list\n",
   1769 			    kpt->kpt_va, kpa);
   1770 #endif
   1771 		*pkpt = kpt->kpt_next;
   1772 		kpt->kpt_next = kpt_free_list;
   1773 		kpt_free_list = kpt;
   1774 #ifdef DEBUG
   1775 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
   1776 			pmapdebug = opmapdebug;
   1777 
   1778 		if (*ste != SG_NV)
   1779 			printf("collect: kernel STE at %p still valid (%x)\n",
   1780 			    ste, *ste);
   1781 		ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
   1782 		if (*ste != SG_NV)
   1783 			printf("collect: kernel PTmap at %p still valid (%x)\n",
   1784 			    ste, *ste);
   1785 #endif
   1786 	}
   1787 }
   1788 
   1789 /*
   1790  * pmap_collect:
   1791  *
   1792  *	Helper for pmap_enter_ptpage().
   1793  *
   1794  *	Garbage collects the physical map system for pages which are no
   1795  *	longer used.  Success need not be guaranteed -- that is, there
   1796  *	may well be pages which are not referenced, but others may be
   1797  *	collected.
   1798  */
   1799 static void
   1800 pmap_collect(void)
   1801 {
   1802 	int s;
   1803 	uvm_physseg_t bank;
   1804 
   1805 	/*
   1806 	 * XXX This is very bogus.  We should handle kernel PT
   1807 	 * XXX pages much differently.
   1808 	 */
   1809 
   1810 	s = splvm();
   1811 	for (bank = uvm_physseg_get_first();
   1812 	     uvm_physseg_valid_p(bank);
   1813 	     bank = uvm_physseg_get_next(bank)) {
   1814 		pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
   1815 		    ptoa(uvm_physseg_get_end(bank)));
   1816 	}
   1817 	splx(s);
   1818 }
   1819 
   1820 /*
   1821  * pmap_zero_page:		[ INTERFACE ]
   1822  *
   1823  *	Zero the specified (machine independent) page by mapping the page
   1824  *	into virtual memory and using memset to clear its contents, one
   1825  *	machine dependent page at a time.
   1826  *
   1827  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
   1828  *	      (Actually, we go to splvm(), and since we don't
   1829  *	      support multiple processors, this is sufficient.)
   1830  */
   1831 void
   1832 pmap_zero_page(paddr_t phys)
   1833 {
   1834 	int npte;
   1835 
   1836 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
   1837 
   1838 	npte = phys | PG_V;
   1839 #ifdef CACHE_HAVE_VAC
   1840 	if (pmap_aliasmask) {
   1841 
   1842 		/*
   1843 		 * Cache-inhibit the mapping on VAC machines, as we would
   1844 		 * be wasting the cache load.
   1845 		 */
   1846 
   1847 		npte |= PG_CI;
   1848 	}
   1849 #endif
   1850 
   1851 #if defined(M68040) || defined(M68060)
   1852 #if defined(M68020) || defined(M68030)
   1853 	if (mmutype == MMU_68040)
   1854 #endif
   1855 	{
   1856 		/*
   1857 		 * Set copyback caching on the page; this is required
   1858 		 * for cache consistency (since regular mappings are
   1859 		 * copyback as well).
   1860 		 */
   1861 
   1862 		npte |= PG_CCB;
   1863 	}
   1864 #endif
   1865 
   1866 	*caddr1_pte = npte;
   1867 	TBIS((vaddr_t)CADDR1);
   1868 
   1869 	zeropage(CADDR1);
   1870 
   1871 #ifdef DEBUG
   1872 	*caddr1_pte = PG_NV;
   1873 	TBIS((vaddr_t)CADDR1);
   1874 #endif
   1875 }
   1876 
   1877 /*
   1878  * pmap_copy_page:		[ INTERFACE ]
   1879  *
   1880  *	Copy the specified (machine independent) page by mapping the page
   1881  *	into virtual memory and using memcpy to copy the page, one machine
   1882  *	dependent page at a time.
   1883  *
   1884  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
   1885  *	      (Actually, we go to splvm(), and since we don't
   1886  *	      support multiple processors, this is sufficient.)
   1887  */
   1888 void
   1889 pmap_copy_page(paddr_t src, paddr_t dst)
   1890 {
   1891 	pt_entry_t npte1, npte2;
   1892 
   1893 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
   1894 
   1895 	npte1 = src | PG_RO | PG_V;
   1896 	npte2 = dst | PG_V;
   1897 #ifdef CACHE_HAVE_VAC
   1898 	if (pmap_aliasmask) {
   1899 
   1900 		/*
   1901 		 * Cache-inhibit the mapping on VAC machines, as we would
   1902 		 * be wasting the cache load.
   1903 		 */
   1904 
   1905 		npte1 |= PG_CI;
   1906 		npte2 |= PG_CI;
   1907 	}
   1908 #endif
   1909 
   1910 #if defined(M68040) || defined(M68060)
   1911 #if defined(M68020) || defined(M68030)
   1912 	if (mmutype == MMU_68040)
   1913 #endif
   1914 	{
   1915 		/*
   1916 		 * Set copyback caching on the pages; this is required
   1917 		 * for cache consistency (since regular mappings are
   1918 		 * copyback as well).
   1919 		 */
   1920 
   1921 		npte1 |= PG_CCB;
   1922 		npte2 |= PG_CCB;
   1923 	}
   1924 #endif
   1925 
   1926 	*caddr1_pte = npte1;
   1927 	TBIS((vaddr_t)CADDR1);
   1928 
   1929 	*caddr2_pte = npte2;
   1930 	TBIS((vaddr_t)CADDR2);
   1931 
   1932 	copypage(CADDR1, CADDR2);
   1933 
   1934 #ifdef DEBUG
   1935 	*caddr1_pte = PG_NV;
   1936 	TBIS((vaddr_t)CADDR1);
   1937 
   1938 	*caddr2_pte = PG_NV;
   1939 	TBIS((vaddr_t)CADDR2);
   1940 #endif
   1941 }
   1942 
   1943 /*
   1944  * pmap_clear_modify:		[ INTERFACE ]
   1945  *
   1946  *	Clear the modify bits on the specified physical page.
   1947  */
   1948 bool
   1949 pmap_clear_modify(struct vm_page *pg)
   1950 {
   1951 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1952 
   1953 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
   1954 
   1955 	return pmap_changebit(pa, 0, (pt_entry_t)~PG_M);
   1956 }
   1957 
   1958 /*
   1959  * pmap_clear_reference:	[ INTERFACE ]
   1960  *
   1961  *	Clear the reference bit on the specified physical page.
   1962  */
   1963 bool
   1964 pmap_clear_reference(struct vm_page *pg)
   1965 {
   1966 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1967 
   1968 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
   1969 
   1970 	return pmap_changebit(pa, 0, (pt_entry_t)~PG_U);
   1971 }
   1972 
   1973 /*
   1974  * pmap_is_referenced:		[ INTERFACE ]
   1975  *
   1976  *	Return whether or not the specified physical page is referenced
   1977  *	by any physical maps.
   1978  */
   1979 bool
   1980 pmap_is_referenced(struct vm_page *pg)
   1981 {
   1982 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1983 
   1984 	return pmap_testbit(pa, PG_U);
   1985 }
   1986 
   1987 /*
   1988  * pmap_is_modified:		[ INTERFACE ]
   1989  *
   1990  *	Return whether or not the specified physical page is modified
   1991  *	by any physical maps.
   1992  */
   1993 bool
   1994 pmap_is_modified(struct vm_page *pg)
   1995 {
   1996 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1997 
   1998 	return pmap_testbit(pa, PG_M);
   1999 }
   2000 
   2001 /*
   2002  * pmap_phys_address:		[ INTERFACE ]
   2003  *
   2004  *	Return the physical address corresponding to the specified
   2005  *	cookie.  Used by the device pager to decode a device driver's
   2006  *	mmap entry point return value.
   2007  *
   2008  *	Note: no locking is necessary in this function.
   2009  */
   2010 paddr_t
   2011 pmap_phys_address(paddr_t ppn)
   2012 {
   2013 	return m68k_ptob(ppn);
   2014 }
   2015 
   2016 #ifdef CACHE_HAVE_VAC
   2017 /*
   2018  * pmap_prefer:			[ INTERFACE ]
   2019  *
   2020  *	Find the first virtual address >= *vap that does not
   2021  *	cause a virtually-addressed cache alias problem.
   2022  */
   2023 void
   2024 pmap_prefer(vaddr_t foff, vaddr_t *vap)
   2025 {
   2026 	vaddr_t va;
   2027 	vsize_t d;
   2028 
   2029 #ifdef M68K_MMU_MOTOROLA
   2030 	if (pmap_aliasmask)
   2031 #endif
   2032 	{
   2033 		va = *vap;
   2034 		d = foff - va;
   2035 		d &= pmap_aliasmask;
   2036 		*vap = va + d;
   2037 	}
   2038 }
   2039 #endif /* CACHE_HAVE_VAC */
   2040 
   2041 /*
   2042  * Miscellaneous support routines follow
   2043  */
   2044 
   2045 /*
   2046  * pmap_remove_mapping:
   2047  *
   2048  *	Invalidate a single page denoted by pmap/va.
   2049  *
   2050  *	If (pte != NULL), it is the already computed PTE for the page.
   2051  *
   2052  *	If (flags & PRM_TFLUSH), we must invalidate any TLB information.
   2053  *
   2054  *	If (flags & PRM_CFLUSH), we must flush/invalidate any cache
   2055  *	information.
   2056  *
   2057  *	If (flags & PRM_KEEPPTPAGE), we don't free the page table page
   2058  *	if the reference drops to zero.
   2059  */
   2060 /* static */
   2061 void
   2062 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
   2063     struct pv_entry **opvp)
   2064 {
   2065 	paddr_t pa;
   2066 	struct pv_header *pvh;
   2067 	struct pv_entry *pv, *npv, *opv = NULL;
   2068 	struct pmap *ptpmap;
   2069 	st_entry_t *ste;
   2070 	int s, bits;
   2071 #ifdef DEBUG
   2072 	pt_entry_t opte;
   2073 #endif
   2074 
   2075 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
   2076 	    ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
   2077 	    pmap, va, pte, flags, opvp));
   2078 
   2079 	/*
   2080 	 * PTE not provided, compute it from pmap and va.
   2081 	 */
   2082 
   2083 	if (pte == NULL) {
   2084 		pte = pmap_pte(pmap, va);
   2085 		if (*pte == PG_NV)
   2086 			return;
   2087 	}
   2088 
   2089 #ifdef CACHE_HAVE_VAC
   2090 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
   2091 
   2092 		/*
   2093 		 * Purge kernel side of VAC to ensure we get the correct
   2094 		 * state of any hardware maintained bits.
   2095 		 */
   2096 
   2097 		DCIS();
   2098 
   2099 		/*
   2100 		 * If this is a non-CI user mapping for the current process,
   2101 		 * flush the VAC.  Note that the kernel side was flushed
   2102 		 * above so we don't worry about non-CI kernel mappings.
   2103 		 */
   2104 
   2105 		if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
   2106 			DCIU();
   2107 		}
   2108 	}
   2109 #endif
   2110 
   2111 	pa = pmap_pte_pa(pte);
   2112 #ifdef DEBUG
   2113 	opte = *pte;
   2114 #endif
   2115 
   2116 	/*
   2117 	 * Update statistics
   2118 	 */
   2119 
   2120 	if (pmap_pte_w(pte))
   2121 		pmap->pm_stats.wired_count--;
   2122 	pmap->pm_stats.resident_count--;
   2123 
   2124 #if defined(M68040) || defined(M68060)
   2125 #if defined(M68020) || defined(M68030)
   2126 	if (mmutype == MMU_68040)
   2127 #endif
   2128 	if ((flags & PRM_CFLUSH)) {
   2129 		DCFP(pa);
   2130 		ICPP(pa);
   2131 	}
   2132 #endif
   2133 
   2134 	/*
   2135 	 * Invalidate the PTE after saving the reference modify info.
   2136 	 */
   2137 
   2138 	PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
   2139 	bits = *pte & (PG_U|PG_M);
   2140 	*pte = PG_NV;
   2141 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
   2142 		TBIS(va);
   2143 
   2144 	/*
   2145 	 * For user mappings decrement the wiring count on
   2146 	 * the PT page.
   2147 	 */
   2148 
   2149 	if (pmap != pmap_kernel()) {
   2150 		vaddr_t ptpva = trunc_page((vaddr_t)pte);
   2151 		int refs = pmap_ptpage_delref(ptpva);
   2152 #ifdef DEBUG
   2153 		if (pmapdebug & PDB_WIRING)
   2154 			pmap_check_wiring("remove", ptpva);
   2155 #endif
   2156 
   2157 		/*
   2158 		 * If reference count drops to 0, and we're not instructed
   2159 		 * to keep it around, free the PT page.
   2160 		 */
   2161 
   2162 		if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
   2163 #ifdef DIAGNOSTIC
   2164 			struct pv_header *ptppvh;
   2165 			struct pv_entry *ptppv;
   2166 #endif
   2167 			paddr_t ptppa;
   2168 
   2169 			ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
   2170 #ifdef DIAGNOSTIC
   2171 			if (PAGE_IS_MANAGED(ptppa) == 0)
   2172 				panic("pmap_remove_mapping: unmanaged PT page");
   2173 			ptppvh = pa_to_pvh(ptppa);
   2174 			ptppv = &ptppvh->pvh_first;
   2175 			if (ptppv->pv_ptste == NULL)
   2176 				panic("pmap_remove_mapping: ptste == NULL");
   2177 			if (ptppv->pv_pmap != pmap_kernel() ||
   2178 			    ptppv->pv_va != ptpva ||
   2179 			    ptppv->pv_next != NULL)
   2180 				panic("pmap_remove_mapping: "
   2181 				    "bad PT page pmap %p, va 0x%lx, next %p",
   2182 				    ptppv->pv_pmap, ptppv->pv_va,
   2183 				    ptppv->pv_next);
   2184 #endif
   2185 			pmap_remove_mapping(pmap_kernel(), ptpva,
   2186 			    NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
   2187 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2188 			uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
   2189 			rw_exit(uvm_kernel_object->vmobjlock);
   2190 			PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
   2191 			    ("remove: PT page 0x%lx (0x%lx) freed\n",
   2192 			    ptpva, ptppa));
   2193 		}
   2194 	}
   2195 
   2196 	/*
   2197 	 * If this isn't a managed page, we are all done.
   2198 	 */
   2199 
   2200 	if (PAGE_IS_MANAGED(pa) == 0)
   2201 		return;
   2202 
   2203 	/*
   2204 	 * Otherwise remove it from the PV table
   2205 	 * (raise IPL since we may be called at interrupt time).
   2206 	 */
   2207 
   2208 	pvh = pa_to_pvh(pa);
   2209 	pv = &pvh->pvh_first;
   2210 	ste = NULL;
   2211 	s = splvm();
   2212 
   2213 	/*
   2214 	 * If it is the first entry on the list, it is actually
   2215 	 * in the header and we must copy the following entry up
   2216 	 * to the header.  Otherwise we must search the list for
   2217 	 * the entry.  In either case we free the now unused entry.
   2218 	 */
   2219 
   2220 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
   2221 		ste = pv->pv_ptste;
   2222 		ptpmap = pv->pv_ptpmap;
   2223 		npv = pv->pv_next;
   2224 		if (npv) {
   2225 			*pv = *npv;
   2226 			opv = npv;
   2227 		} else
   2228 			pv->pv_pmap = NULL;
   2229 	} else {
   2230 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   2231 			if (pmap == npv->pv_pmap && va == npv->pv_va)
   2232 				break;
   2233 			pv = npv;
   2234 		}
   2235 #ifdef DEBUG
   2236 		if (npv == NULL)
   2237 			panic("pmap_remove: PA not in pv_tab");
   2238 #endif
   2239 		ste = npv->pv_ptste;
   2240 		ptpmap = npv->pv_ptpmap;
   2241 		pv->pv_next = npv->pv_next;
   2242 		opv = npv;
   2243 		pvh = pa_to_pvh(pa);
   2244 		pv = &pvh->pvh_first;
   2245 	}
   2246 
   2247 #ifdef CACHE_HAVE_VAC
   2248 
   2249 	/*
   2250 	 * If only one mapping left we no longer need to cache inhibit
   2251 	 */
   2252 
   2253 	if (pmap_aliasmask &&
   2254 	    pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
   2255 		PMAP_DPRINTF(PDB_CACHE,
   2256 		    ("remove: clearing CI for pa %lx\n", pa));
   2257 		pvh->pvh_attrs &= ~PVH_CI;
   2258 		pmap_changebit(pa, 0, (pt_entry_t)~PG_CI);
   2259 #ifdef DEBUG
   2260 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
   2261 		    (PDB_CACHE|PDB_PVDUMP))
   2262 			pmap_pvdump(pa);
   2263 #endif
   2264 	}
   2265 #endif
   2266 
   2267 	/*
   2268 	 * If this was a PT page we must also remove the
   2269 	 * mapping from the associated segment table.
   2270 	 */
   2271 
   2272 	if (ste) {
   2273 		PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
   2274 		    ("remove: ste was %x@%p pte was %x@%p\n",
   2275 		    *ste, ste, opte, pmap_pte(pmap, va)));
   2276 #if defined(M68040) || defined(M68060)
   2277 #if defined(M68020) || defined(M68030)
   2278 		if (mmutype == MMU_68040)
   2279 #endif
   2280 		{
   2281 			st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
   2282 
   2283 			while (ste < este)
   2284 				*ste++ = SG_NV;
   2285 #ifdef DEBUG
   2286 			ste -= NPTEPG/SG4_LEV3SIZE;
   2287 #endif
   2288 		}
   2289 #if defined(M68020) || defined(M68030)
   2290 		else
   2291 #endif
   2292 #endif
   2293 #if defined(M68020) || defined(M68030)
   2294 		*ste = SG_NV;
   2295 #endif
   2296 
   2297 		/*
   2298 		 * If it was a user PT page, we decrement the
   2299 		 * reference count on the segment table as well,
   2300 		 * freeing it if it is now empty.
   2301 		 */
   2302 
   2303 		if (ptpmap != pmap_kernel()) {
   2304 			PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
   2305 			    ("remove: stab %p, refcnt %d\n",
   2306 			    ptpmap->pm_stab, ptpmap->pm_sref - 1));
   2307 #ifdef DEBUG
   2308 			if ((pmapdebug & PDB_PARANOIA) &&
   2309 			    ptpmap->pm_stab !=
   2310 			     (st_entry_t *)trunc_page((vaddr_t)ste))
   2311 				panic("remove: bogus ste");
   2312 #endif
   2313 			if (--(ptpmap->pm_sref) == 0) {
   2314 				PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
   2315 				    ("remove: free stab %p\n",
   2316 				    ptpmap->pm_stab));
   2317 				uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
   2318 				    M68K_STSIZE, UVM_KMF_WIRED);
   2319 				ptpmap->pm_stab = Segtabzero;
   2320 				ptpmap->pm_stpa = Segtabzeropa;
   2321 #if defined(M68040) || defined(M68060)
   2322 #if defined(M68020) || defined(M68030)
   2323 				if (mmutype == MMU_68040)
   2324 #endif
   2325 					ptpmap->pm_stfree = protostfree;
   2326 #endif
   2327 				/*
   2328 				 * Segment table has changed; reload the
   2329 				 * MMU if it's the active user pmap.
   2330 				 */
   2331 				if (active_user_pmap(ptpmap)) {
   2332 					pmap_load_urp((paddr_t)ptpmap->pm_stpa);
   2333 				}
   2334 			}
   2335 		}
   2336 		pvh->pvh_attrs &= ~PVH_PTPAGE;
   2337 		ptpmap->pm_ptpages--;
   2338 	}
   2339 
   2340 	/*
   2341 	 * Update saved attributes for managed page
   2342 	 */
   2343 
   2344 	pvh->pvh_attrs |= bits;
   2345 	splx(s);
   2346 
   2347 	if (opvp != NULL)
   2348 		*opvp = opv;
   2349 	else if (opv != NULL)
   2350 		pmap_free_pv(opv);
   2351 }
   2352 
   2353 /*
   2354  * pmap_testbit:
   2355  *
   2356  *	Test the modified/referenced bits of a physical page.
   2357  */
   2358 /* static */
   2359 bool
   2360 pmap_testbit(paddr_t pa, int bit)
   2361 {
   2362 	struct pv_header *pvh;
   2363 	struct pv_entry *pv;
   2364 	pt_entry_t *pte;
   2365 	int s;
   2366 
   2367 	pvh = pa_to_pvh(pa);
   2368 	pv = &pvh->pvh_first;
   2369 	s = splvm();
   2370 
   2371 	/*
   2372 	 * Check saved info first
   2373 	 */
   2374 
   2375 	if (pvh->pvh_attrs & bit) {
   2376 		splx(s);
   2377 		return true;
   2378 	}
   2379 
   2380 #ifdef CACHE_HAVE_VAC
   2381 
   2382 	/*
   2383 	 * Flush VAC to get correct state of any hardware maintained bits.
   2384 	 */
   2385 
   2386 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
   2387 		DCIS();
   2388 #endif
   2389 
   2390 	/*
   2391 	 * Not found.  Check current mappings, returning immediately if
   2392 	 * found.  Cache a hit to speed future lookups.
   2393 	 */
   2394 
   2395 	if (pv->pv_pmap != NULL) {
   2396 		for (; pv; pv = pv->pv_next) {
   2397 			pte = pmap_pte(pv->pv_pmap, pv->pv_va);
   2398 			if (*pte & bit) {
   2399 				pvh->pvh_attrs |= bit;
   2400 				splx(s);
   2401 				return true;
   2402 			}
   2403 		}
   2404 	}
   2405 	splx(s);
   2406 	return false;
   2407 }
   2408 
   2409 /*
   2410  * pmap_changebit:
   2411  *
   2412  *	Change the modified/referenced bits, or other PTE bits,
   2413  *	for a physical page.
   2414  */
   2415 /* static */
   2416 bool
   2417 pmap_changebit(paddr_t pa, pt_entry_t set, pt_entry_t mask)
   2418 {
   2419 	struct pv_header *pvh;
   2420 	struct pv_entry *pv;
   2421 	pt_entry_t *pte, npte;
   2422 	vaddr_t va;
   2423 	int s;
   2424 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
   2425 	bool firstpage = true;
   2426 #endif
   2427 	bool r;
   2428 
   2429 	PMAP_DPRINTF(PDB_BITS,
   2430 	    ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
   2431 
   2432 	pvh = pa_to_pvh(pa);
   2433 	pv = &pvh->pvh_first;
   2434 	s = splvm();
   2435 
   2436 	/*
   2437 	 * Clear saved attributes (modify, reference)
   2438 	 */
   2439 
   2440 	r = (pvh->pvh_attrs & ~mask) != 0;
   2441 	pvh->pvh_attrs &= mask;
   2442 
   2443 	/*
   2444 	 * Loop over all current mappings setting/clearing as appropriate
   2445 	 * If setting RO do we need to clear the VAC?
   2446 	 */
   2447 
   2448 	if (pv->pv_pmap != NULL) {
   2449 #ifdef DEBUG
   2450 		int toflush = 0;
   2451 #endif
   2452 		for (; pv; pv = pv->pv_next) {
   2453 #ifdef DEBUG
   2454 			toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
   2455 #endif
   2456 			va = pv->pv_va;
   2457 			pte = pmap_pte(pv->pv_pmap, va);
   2458 #ifdef CACHE_HAVE_VAC
   2459 
   2460 			/*
   2461 			 * Flush VAC to ensure we get correct state of HW bits
   2462 			 * so we don't clobber them.
   2463 			 */
   2464 
   2465 			if (firstpage && pmap_aliasmask) {
   2466 				firstpage = false;
   2467 				DCIS();
   2468 			}
   2469 #endif
   2470 			npte = (*pte | set) & mask;
   2471 			if (*pte != npte) {
   2472 				r = true;
   2473 #if defined(M68040) || defined(M68060)
   2474 				/*
   2475 				 * If we are changing caching status or
   2476 				 * protection make sure the caches are
   2477 				 * flushed (but only once).
   2478 				 */
   2479 				if (firstpage &&
   2480 #if defined(M68020) || defined(M68030)
   2481 				    (mmutype == MMU_68040) &&
   2482 #endif
   2483 				    ((set == PG_RO) ||
   2484 				     (set & PG_CMASK) ||
   2485 				     (mask & PG_CMASK) == 0)) {
   2486 					firstpage = false;
   2487 					DCFP(pa);
   2488 					ICPP(pa);
   2489 				}
   2490 #endif
   2491 				*pte = npte;
   2492 				if (active_pmap(pv->pv_pmap))
   2493 					TBIS(va);
   2494 			}
   2495 		}
   2496 	}
   2497 	splx(s);
   2498 	return r;
   2499 }
   2500 
   2501 /*
   2502  * pmap_enter_ptpage:
   2503  *
   2504  *	Allocate and map a PT page for the specified pmap/va pair.
   2505  */
   2506 /* static */
   2507 int
   2508 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
   2509 {
   2510 	paddr_t ptpa;
   2511 	struct vm_page *pg;
   2512 	struct pv_header *pvh;
   2513 	struct pv_entry *pv;
   2514 	st_entry_t *ste;
   2515 	int s;
   2516 
   2517 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
   2518 	    ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
   2519 
   2520 	/*
   2521 	 * Allocate a segment table if necessary.  Note that it is allocated
   2522 	 * from a private map and not pt_map.  This keeps user page tables
   2523 	 * aligned on segment boundaries in the kernel address space.
   2524 	 * The segment table is wired down.  It will be freed whenever the
   2525 	 * reference count drops to zero.
   2526 	 */
   2527 	if (pmap->pm_stab == Segtabzero) {
   2528 		pmap->pm_stab = (st_entry_t *)
   2529 		    uvm_km_alloc(st_map, M68K_STSIZE, 0,
   2530 		    UVM_KMF_WIRED | UVM_KMF_ZERO |
   2531 		    (can_fail ? UVM_KMF_NOWAIT : 0));
   2532 		if (pmap->pm_stab == NULL) {
   2533 			pmap->pm_stab = Segtabzero;
   2534 			return ENOMEM;
   2535 		}
   2536 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
   2537 		    (paddr_t *)&pmap->pm_stpa);
   2538 #if defined(M68040) || defined(M68060)
   2539 #if defined(M68020) || defined(M68030)
   2540 		if (mmutype == MMU_68040)
   2541 #endif
   2542 		{
   2543 			pt_entry_t	*pte;
   2544 
   2545 			pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
   2546 			*pte = (*pte & ~PG_CMASK) | PG_CI;
   2547 			pmap->pm_stfree = protostfree;
   2548 		}
   2549 #endif
   2550 		/*
   2551 		 * Segment table has changed; reload the
   2552 		 * MMU if it's the active user pmap.
   2553 		 */
   2554 		if (active_user_pmap(pmap)) {
   2555 			pmap_load_urp((paddr_t)pmap->pm_stpa);
   2556 		}
   2557 
   2558 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2559 		    ("enter: pmap %p stab %p(%p)\n",
   2560 		    pmap, pmap->pm_stab, pmap->pm_stpa));
   2561 	}
   2562 
   2563 	ste = pmap_ste(pmap, va);
   2564 #if defined(M68040) || defined(M68060)
   2565 	/*
   2566 	 * Allocate level 2 descriptor block if necessary
   2567 	 */
   2568 #if defined(M68020) || defined(M68030)
   2569 	if (mmutype == MMU_68040)
   2570 #endif
   2571 	{
   2572 		if (*ste == SG_NV) {
   2573 			int ix;
   2574 			void *addr;
   2575 
   2576 			ix = bmtol2(pmap->pm_stfree);
   2577 			if (ix == -1)
   2578 				panic("enter: out of address space"); /* XXX */
   2579 			pmap->pm_stfree &= ~l2tobm(ix);
   2580 			addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
   2581 			memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
   2582 			addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
   2583 			*ste = (u_int)addr | SG_RW | SG_U | SG_V;
   2584 
   2585 			PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2586 			    ("enter: alloc ste2 %d(%p)\n", ix, addr));
   2587 		}
   2588 		ste = pmap_ste2(pmap, va);
   2589 		/*
   2590 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
   2591 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
   2592 		 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
   2593 		 * PT page--the unit of allocation.  We set `ste' to point
   2594 		 * to the first entry of that chunk which is validated in its
   2595 		 * entirety below.
   2596 		 */
   2597 		ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
   2598 
   2599 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2600 		    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
   2601 	}
   2602 #endif
   2603 	va = trunc_page((vaddr_t)pmap_pte(pmap, va));
   2604 
   2605 	/*
   2606 	 * In the kernel we allocate a page from the kernel PT page
   2607 	 * free list and map it into the kernel page table map (via
   2608 	 * pmap_enter).
   2609 	 */
   2610 	if (pmap == pmap_kernel()) {
   2611 		struct kpt_page *kpt;
   2612 
   2613 		s = splvm();
   2614 		if ((kpt = kpt_free_list) == NULL) {
   2615 			/*
   2616 			 * No PT pages available.
   2617 			 * Try once to free up unused ones.
   2618 			 */
   2619 			PMAP_DPRINTF(PDB_COLLECT,
   2620 			    ("enter: no KPT pages, collecting...\n"));
   2621 			pmap_collect();
   2622 			if ((kpt = kpt_free_list) == NULL)
   2623 				panic("pmap_enter_ptpage: can't get KPT page");
   2624 		}
   2625 		kpt_free_list = kpt->kpt_next;
   2626 		kpt->kpt_next = kpt_used_list;
   2627 		kpt_used_list = kpt;
   2628 		ptpa = kpt->kpt_pa;
   2629 		memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
   2630 		pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
   2631 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
   2632 		pmap_update(pmap);
   2633 #ifdef DEBUG
   2634 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
   2635 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
   2636 
   2637 			printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
   2638 			    ix, Sysptmap[ix], kpt->kpt_va);
   2639 		}
   2640 #endif
   2641 		splx(s);
   2642 	} else {
   2643 
   2644 		/*
   2645 		 * For user processes we just allocate a page from the
   2646 		 * VM system.  Note that we set the page "wired" count to 1,
   2647 		 * which is what we use to check if the page can be freed.
   2648 		 * See pmap_remove_mapping().
   2649 		 *
   2650 		 * Count the segment table reference first so that we won't
   2651 		 * lose the segment table when low on memory.
   2652 		 */
   2653 
   2654 		pmap->pm_sref++;
   2655 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
   2656 		    ("enter: about to alloc UPT pg at %lx\n", va));
   2657 		rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2658 		while ((pg = uvm_pagealloc(uvm_kernel_object,
   2659 					   va - vm_map_min(kernel_map),
   2660 					   NULL, UVM_PGA_ZERO)) == NULL) {
   2661 			rw_exit(uvm_kernel_object->vmobjlock);
   2662 			if (can_fail) {
   2663 				pmap->pm_sref--;
   2664 				return ENOMEM;
   2665 			}
   2666 			uvm_wait("ptpage");
   2667 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2668 		}
   2669 		rw_exit(uvm_kernel_object->vmobjlock);
   2670 		pg->flags &= ~(PG_BUSY|PG_FAKE);
   2671 		UVM_PAGE_OWN(pg, NULL);
   2672 		ptpa = VM_PAGE_TO_PHYS(pg);
   2673 		pmap_enter(pmap_kernel(), va, ptpa,
   2674 		    VM_PROT_READ | VM_PROT_WRITE,
   2675 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
   2676 		pmap_update(pmap_kernel());
   2677 	}
   2678 #if defined(M68040) || defined(M68060)
   2679 	/*
   2680 	 * Turn off copyback caching of page table pages,
   2681 	 * could get ugly otherwise.
   2682 	 */
   2683 #if defined(M68020) || defined(M68030)
   2684 	if (mmutype == MMU_68040)
   2685 #endif
   2686 	{
   2687 #ifdef DEBUG
   2688 		pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
   2689 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
   2690 			printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
   2691 			    pmap == pmap_kernel() ? "Kernel" : "User",
   2692 			    va, ptpa, pte, *pte);
   2693 #endif
   2694 		if (pmap_changebit(ptpa, PG_CI, (pt_entry_t)~PG_CCB))
   2695 			DCIS();
   2696 	}
   2697 #endif
   2698 	/*
   2699 	 * Locate the PV entry in the kernel for this PT page and
   2700 	 * record the STE address.  This is so that we can invalidate
   2701 	 * the STE when we remove the mapping for the page.
   2702 	 */
   2703 	pvh = pa_to_pvh(ptpa);
   2704 	s = splvm();
   2705 	if (pvh) {
   2706 		pv = &pvh->pvh_first;
   2707 		pvh->pvh_attrs |= PVH_PTPAGE;
   2708 		do {
   2709 			if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
   2710 				break;
   2711 		} while ((pv = pv->pv_next));
   2712 	} else {
   2713 		pv = NULL;
   2714 	}
   2715 #ifdef DEBUG
   2716 	if (pv == NULL)
   2717 		panic("pmap_enter_ptpage: PT page not entered");
   2718 #endif
   2719 	pv->pv_ptste = ste;
   2720 	pv->pv_ptpmap = pmap;
   2721 
   2722 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
   2723 	    ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
   2724 
   2725 	/*
   2726 	 * Map the new PT page into the segment table.
   2727 	 * Also increment the reference count on the segment table if this
   2728 	 * was a user page table page.  Note that we don't use vm_map_pageable
   2729 	 * to keep the count like we do for PT pages, this is mostly because
   2730 	 * it would be difficult to identify ST pages in pmap_pageable to
   2731 	 * release them.  We also avoid the overhead of vm_map_pageable.
   2732 	 */
   2733 #if defined(M68040) || defined(M68060)
   2734 #if defined(M68020) || defined(M68030)
   2735 	if (mmutype == MMU_68040)
   2736 #endif
   2737 	{
   2738 		st_entry_t *este;
   2739 
   2740 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
   2741 			*ste = ptpa | SG_U | SG_RW | SG_V;
   2742 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
   2743 		}
   2744 	}
   2745 #if defined(M68020) || defined(M68030)
   2746 	else
   2747 		*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
   2748 #endif
   2749 #else
   2750 	*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
   2751 #endif
   2752 	if (pmap != pmap_kernel()) {
   2753 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2754 		    ("enter: stab %p refcnt %d\n",
   2755 		    pmap->pm_stab, pmap->pm_sref));
   2756 	}
   2757 	/*
   2758 	 * Flush stale TLB info.
   2759 	 */
   2760 	if (pmap == pmap_kernel())
   2761 		TBIAS();
   2762 	else
   2763 		TBIAU();
   2764 	pmap->pm_ptpages++;
   2765 	splx(s);
   2766 
   2767 	return 0;
   2768 }
   2769 
   2770 /*
   2771  * pmap_ptpage_addref:
   2772  *
   2773  *	Add a reference to the specified PT page.
   2774  */
   2775 void
   2776 pmap_ptpage_addref(vaddr_t ptpva)
   2777 {
   2778 	struct vm_page *pg;
   2779 
   2780 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2781 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
   2782 	pg->wire_count++;
   2783 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2784 	    ("ptpage addref: pg %p now %d\n",
   2785 	     pg, pg->wire_count));
   2786 	rw_exit(uvm_kernel_object->vmobjlock);
   2787 }
   2788 
   2789 /*
   2790  * pmap_ptpage_delref:
   2791  *
   2792  *	Delete a reference to the specified PT page.
   2793  */
   2794 int
   2795 pmap_ptpage_delref(vaddr_t ptpva)
   2796 {
   2797 	struct vm_page *pg;
   2798 	int rv;
   2799 
   2800 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2801 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
   2802 	rv = --pg->wire_count;
   2803 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2804 	    ("ptpage delref: pg %p now %d\n",
   2805 	     pg, pg->wire_count));
   2806 	rw_exit(uvm_kernel_object->vmobjlock);
   2807 	return rv;
   2808 }
   2809 
   2810 /*
   2811  *	Routine:        pmap_procwr
   2812  *
   2813  *	Function:
   2814  *		Synchronize caches corresponding to [addr, addr + len) in p.
   2815  */
   2816 void
   2817 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
   2818 {
   2819 
   2820 	(void)cachectl1(0x80000004, va, len, p);
   2821 }
   2822 
   2823 void
   2824 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
   2825 {
   2826 
   2827 	if (!pmap_ste_v(pmap, va))
   2828 		return;
   2829 
   2830 #if defined(M68040) || defined(M68060)
   2831 #if defined(M68020) || defined(M68030)
   2832 	if (mmutype == MMU_68040) {
   2833 #endif
   2834 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB,
   2835 			   (pt_entry_t)~PG_CI))
   2836 		DCIS();
   2837 
   2838 #if defined(M68020) || defined(M68030)
   2839 	} else
   2840 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
   2841 			       (pt_entry_t)~PG_CI);
   2842 #endif
   2843 #else
   2844 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
   2845 		       (pt_entry_t)~PG_CI);
   2846 #endif
   2847 }
   2848 
   2849 void
   2850 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
   2851 {
   2852 
   2853 	if (!pmap_ste_v(pmap, va))
   2854 		return;
   2855 
   2856 #if defined(M68040) || defined(M68060)
   2857 #if defined(M68020) || defined(M68030)
   2858 	if (mmutype == MMU_68040) {
   2859 #endif
   2860 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI,
   2861 			   (pt_entry_t)~PG_CCB))
   2862 		DCIS();
   2863 #if defined(M68020) || defined(M68030)
   2864 	} else
   2865 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
   2866 #endif
   2867 #else
   2868 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
   2869 #endif
   2870 }
   2871 
   2872 int
   2873 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
   2874 {
   2875 
   2876 	if (!pmap_ste_v(pmap, va))
   2877 		return 0;
   2878 
   2879 	return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
   2880 }
   2881 
   2882 #ifdef DEBUG
   2883 /*
   2884  * pmap_pvdump:
   2885  *
   2886  *	Dump the contents of the PV list for the specified physical page.
   2887  */
   2888 void
   2889 pmap_pvdump(paddr_t pa)
   2890 {
   2891 	struct pv_header *pvh;
   2892 	struct pv_entry *pv;
   2893 
   2894 	printf("pa %lx", pa);
   2895 	pvh = pa_to_pvh(pa);
   2896 	for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
   2897 		printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
   2898 		    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
   2899 	printf("\n");
   2900 }
   2901 
   2902 /*
   2903  * pmap_check_wiring:
   2904  *
   2905  *	Count the number of valid mappings in the specified PT page,
   2906  *	and ensure that it is consistent with the number of wirings
   2907  *	to that page that the VM system has.
   2908  */
   2909 void
   2910 pmap_check_wiring(const char *str, vaddr_t va)
   2911 {
   2912 	pt_entry_t *pte;
   2913 	paddr_t pa;
   2914 	struct vm_page *pg;
   2915 	int count;
   2916 
   2917 	if (!pmap_ste_v(pmap_kernel(), va) ||
   2918 	    !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
   2919 		return;
   2920 
   2921 	pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
   2922 	pg = PHYS_TO_VM_PAGE(pa);
   2923 	if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
   2924 		panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
   2925 	}
   2926 
   2927 	count = 0;
   2928 	for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
   2929 	     pte++)
   2930 		if (*pte)
   2931 			count++;
   2932 	if (pg->wire_count != count)
   2933 		panic("*%s*: 0x%lx: w%d/a%d",
   2934 		       str, va, pg->wire_count, count);
   2935 }
   2936 #endif /* DEBUG */
   2937 
   2938 /*
   2939  * XXX XXX XXX These are legacy remants and should go away XXX XXX XXX
   2940  * (Cribbed from vm_machdep.c because they're tied to this pmap impl.)
   2941  */
   2942 
   2943 /*
   2944  * Map `size' bytes of physical memory starting at `paddr' into
   2945  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
   2946  * are specified by `prot'.
   2947  */
   2948 void
   2949 physaccess(void *vaddr, void *paddr, int size, int prot)
   2950 {
   2951 	pt_entry_t *pte;
   2952 	u_int page;
   2953 
   2954 	pte = kvtopte(vaddr);
   2955 	page = (u_int)paddr & PG_FRAME;
   2956 	for (size = btoc(size); size; size--) {
   2957 		*pte++ = PG_V | prot | page;
   2958 		page += PAGE_SIZE;
   2959 	}
   2960 	TBIAS();
   2961 }
   2962 
   2963 void
   2964 physunaccess(void *vaddr, int size)
   2965 {
   2966 	 pt_entry_t *pte;
   2967 
   2968 	 pte = kvtopte(vaddr);
   2969 	 for (size = btoc(size); size; size--)
   2970 	 	*pte++ = PG_NV;
   2971 	TBIAS();
   2972 }
   2973 
   2974 /*
   2975  * Convert kernel VA to physical address
   2976  */
   2977 int
   2978 kvtop(void *addr)
   2979 {
   2980 	return (int)vtophys((vaddr_t)addr);
   2981 }
   2982