Home | History | Annotate | Line # | Download | only in m68k
      1 /*	$NetBSD: pmap_motorola.c,v 1.97 2025/11/06 20:28:41 thorpej Exp $        */
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1991, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * This code is derived from software contributed to Berkeley by
     37  * the Systems Programming Group of the University of Utah Computer
     38  * Science Department.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  * 3. Neither the name of the University nor the names of its contributors
     49  *    may be used to endorse or promote products derived from this software
     50  *    without specific prior written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     62  * SUCH DAMAGE.
     63  *
     64  *	@(#)pmap.c	8.6 (Berkeley) 5/27/94
     65  */
     66 
     67 /*
     68  * Motorola m68k-family physical map management code.
     69  *
     70  * Supports:
     71  *	68020 with 68851 MMU
     72  *	68020 with HP MMU
     73  *	68030 with on-chip MMU
     74  *	68040 with on-chip MMU
     75  *	68060 with on-chip MMU
     76  *
     77  * Notes:
     78  *	Don't even pay lip service to multiprocessor support.
     79  *
     80  *	We assume TLB entries don't have process tags (except for the
     81  *	supervisor/user distinction) so we only invalidate TLB entries
     82  *	when changing mappings for the current (or kernel) pmap.  This is
     83  *	technically not true for the 68851 but we flush the TLB on every
     84  *	context switch, so it effectively winds up that way.
     85  *
     86  *	Bitwise and/or operations are significantly faster than bitfield
     87  *	references so we use them when accessing STE/PTEs in the pmap_pte_*
     88  *	macros.  Note also that the two are not always equivalent; e.g.:
     89  *		(*pte & PG_PROT) [4] != pte->pg_prot [1]
     90  *	and a couple of routines that deal with protection and wiring take
     91  *	some shortcuts that assume the and/or definitions.
     92  */
     93 
     94 /*
     95  *	Manages physical address maps.
     96  *
     97  *	In addition to hardware address maps, this
     98  *	module is called upon to provide software-use-only
     99  *	maps which may or may not be stored in the same
    100  *	form as hardware maps.  These pseudo-maps are
    101  *	used to store intermediate results from copy
    102  *	operations to and from address spaces.
    103  *
    104  *	Since the information managed by this module is
    105  *	also stored by the logical address mapping module,
    106  *	this module may throw away valid virtual-to-physical
    107  *	mappings at almost any time.  However, invalidations
    108  *	of virtual-to-physical mappings must be done as
    109  *	requested.
    110  *
    111  *	In order to cope with hardware architectures which
    112  *	make virtual-to-physical map invalidates expensive,
    113  *	this module may delay invalidate or reduced protection
    114  *	operations until such time as they are actually
    115  *	necessary.  This module is given full information as
    116  *	to which processors are currently using which maps,
    117  *	and to when physical maps must be made correct.
    118  */
    119 
    120 #include "opt_m68k_arch.h"
    121 
    122 #include <sys/cdefs.h>
    123 __KERNEL_RCSID(0, "$NetBSD: pmap_motorola.c,v 1.97 2025/11/06 20:28:41 thorpej Exp $");
    124 
    125 #include <sys/param.h>
    126 #include <sys/systm.h>
    127 #include <sys/proc.h>
    128 #include <sys/pool.h>
    129 #include <sys/cpu.h>
    130 #include <sys/atomic.h>
    131 
    132 #include <machine/pcb.h>
    133 
    134 #include <uvm/uvm.h>
    135 #include <uvm/uvm_physseg.h>
    136 
    137 #include <m68k/cacheops.h>
    138 
    139 #if !defined(M68K_MMU_MOTOROLA) && !defined(M68K_MMU_HP)
    140 #error Hit the road, Jack...
    141 #endif
    142 
    143 #ifdef DEBUG
    144 #define PDB_FOLLOW	0x0001
    145 #define PDB_INIT	0x0002
    146 #define PDB_ENTER	0x0004
    147 #define PDB_REMOVE	0x0008
    148 #define PDB_CREATE	0x0010
    149 #define PDB_PTPAGE	0x0020
    150 #define PDB_CACHE	0x0040
    151 #define PDB_BITS	0x0080
    152 #define PDB_COLLECT	0x0100
    153 #define PDB_PROTECT	0x0200
    154 #define PDB_SEGTAB	0x0400
    155 #define PDB_MULTIMAP	0x0800
    156 #define PDB_PARANOIA	0x2000
    157 #define PDB_WIRING	0x4000
    158 #define PDB_PVDUMP	0x8000
    159 
    160 int debugmap = 0;
    161 int pmapdebug = PDB_PARANOIA;
    162 
    163 #define	PMAP_DPRINTF(l, x)	if (pmapdebug & (l)) printf x
    164 #else /* ! DEBUG */
    165 #define	PMAP_DPRINTF(l, x)	/* nothing */
    166 #endif /* DEBUG */
    167 
    168 /*
    169  * Get STEs and PTEs for user/kernel address space
    170  */
    171 #if defined(M68040) || defined(M68060)
    172 #define	pmap_ste1(m, v)	\
    173 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
    174 /* XXX assumes physically contiguous ST pages (if more than one) */
    175 #define pmap_ste2(m, v) \
    176 	(&((m)->pm_stab[(st_entry_t *)(*(u_int *)pmap_ste1(m, v) & SG4_ADDR1) \
    177 			- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
    178 #if defined(M68020) || defined(M68030)
    179 #define	pmap_ste(m, v)	\
    180 	(&((m)->pm_stab[(vaddr_t)(v) \
    181 			>> (mmutype == MMU_68040 ? SG4_SHIFT1 : SG_ISHIFT)]))
    182 #define pmap_ste_v(m, v) \
    183 	(mmutype == MMU_68040 \
    184 	 ? ((*pmap_ste1(m, v) & SG_V) && \
    185 	    (*pmap_ste2(m, v) & SG_V)) \
    186 	 : (*pmap_ste(m, v) & SG_V))
    187 #else
    188 #define	pmap_ste(m, v)	\
    189 	(&((m)->pm_stab[(vaddr_t)(v) >> SG4_SHIFT1]))
    190 #define pmap_ste_v(m, v) \
    191 	((*pmap_ste1(m, v) & SG_V) && (*pmap_ste2(m, v) & SG_V))
    192 #endif
    193 #else
    194 #define	pmap_ste(m, v)	 (&((m)->pm_stab[(vaddr_t)(v) >> SG_ISHIFT]))
    195 #define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
    196 #endif
    197 
    198 #define pmap_pte(m, v)	(&((m)->pm_ptab[(vaddr_t)(v) >> PG_SHIFT]))
    199 #define pmap_pte_pa(pte)	(*(pte) & PG_FRAME)
    200 #define pmap_pte_w(pte)		(*(pte) & PG_W)
    201 #define pmap_pte_ci(pte)	(*(pte) & PG_CI)
    202 #define pmap_pte_m(pte)		(*(pte) & PG_M)
    203 #define pmap_pte_u(pte)		(*(pte) & PG_U)
    204 #define pmap_pte_prot(pte)	(*(pte) & PG_PROT)
    205 #define pmap_pte_v(pte)		(*(pte) & PG_V)
    206 
    207 #define pmap_pte_set_w(pte, v) \
    208 	if (v) *(pte) |= PG_W; else *(pte) &= ~PG_W
    209 #define pmap_pte_set_prot(pte, v) \
    210 	if (v) *(pte) |= PG_PROT; else *(pte) &= ~PG_PROT
    211 #define pmap_pte_w_chg(pte, nw)		((nw) ^ pmap_pte_w(pte))
    212 #define pmap_pte_prot_chg(pte, np)	((np) ^ pmap_pte_prot(pte))
    213 
    214 /*
    215  * Given a map and a machine independent protection code,
    216  * convert to an m68k protection code.
    217  */
    218 #define pte_prot(m, p)	(protection_codes[p])
    219 static u_int protection_codes[8];
    220 
    221 /*
    222  * Kernel page table page management.
    223  */
    224 struct kpt_page {
    225 	struct kpt_page *kpt_next;	/* link on either used or free list */
    226 	vaddr_t		kpt_va;		/* always valid kernel VA */
    227 	paddr_t		kpt_pa;		/* PA of this page (for speed) */
    228 };
    229 struct kpt_page *kpt_free_list, *kpt_used_list;
    230 struct kpt_page *kpt_pages;
    231 
    232 /*
    233  * Kernel segment/page table and page table map.
    234  * The page table map gives us a level of indirection we need to dynamically
    235  * expand the page table.  It is essentially a copy of the segment table
    236  * with PTEs instead of STEs.  All are initialized in locore at boot time.
    237  * Sysmap will initially contain VM_KERNEL_PT_PAGES pages of PTEs.
    238  * Segtabzero is an empty segment table which all processes share til they
    239  * reference something.
    240  */
    241 paddr_t		Sysseg_pa;
    242 st_entry_t	*Sysseg;
    243 pt_entry_t	*Sysmap, *Sysptmap;
    244 st_entry_t	*Segtabzero, *Segtabzeropa;
    245 vsize_t		Sysptsize = VM_KERNEL_PT_PAGES;
    246 
    247 static struct pmap kernel_pmap_store;
    248 struct pmap	*const kernel_pmap_ptr = &kernel_pmap_store;
    249 struct vm_map	*st_map, *pt_map;
    250 struct vm_map st_map_store, pt_map_store;
    251 
    252 vaddr_t		lwp0uarea;	/* lwp0 u-area VA, initialized in bootstrap */
    253 
    254 paddr_t		avail_start;	/* PA of first available physical page */
    255 paddr_t		avail_end;	/* PA of last available physical page */
    256 vaddr_t		virtual_avail;  /* VA of first avail page (after kernel bss)*/
    257 vaddr_t		virtual_end;	/* VA of last avail page (end of kernel AS) */
    258 int		page_cnt;	/* number of pages managed by VM system */
    259 
    260 bool		pmap_initialized = false;	/* Has pmap_init completed? */
    261 
    262 vaddr_t		m68k_uptbase = M68K_PTBASE;
    263 
    264 struct pv_header {
    265 	struct pv_entry		pvh_first;	/* first PV entry */
    266 	uint32_t		pvh_attrs;	/* attributes:
    267 						   bits 0-7: PTE bits
    268 						   bits 8-15: flags */
    269 };
    270 
    271 #define	PVH_CI		0x10	/* all entries are cache-inhibited */
    272 #define	PVH_PTPAGE	0x20	/* entry maps a page table page */
    273 
    274 struct pv_header *pv_table;
    275 TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
    276 int		pv_nfree;
    277 
    278 #ifdef CACHE_HAVE_VAC
    279 static u_int	pmap_aliasmask;	/* separation at which VA aliasing ok */
    280 #endif
    281 #if defined(M68040) || defined(M68060)
    282 u_int		protostfree;	/* prototype (default) free ST map */
    283 #endif
    284 
    285 pt_entry_t	*caddr1_pte;	/* PTE for CADDR1 */
    286 pt_entry_t	*caddr2_pte;	/* PTE for CADDR2 */
    287 
    288 struct pool	pmap_pmap_pool;	/* memory pool for pmap structures */
    289 struct pool	pmap_pv_pool;	/* memory pool for pv entries */
    290 
    291 #define pmap_alloc_pv()		pool_get(&pmap_pv_pool, PR_NOWAIT)
    292 #define pmap_free_pv(pv)	pool_put(&pmap_pv_pool, (pv))
    293 
    294 #define	PAGE_IS_MANAGED(pa)	(pmap_initialized && uvm_pageismanaged(pa))
    295 
    296 static inline struct pv_header *
    297 pa_to_pvh(paddr_t pa)
    298 {
    299 	uvm_physseg_t bank = 0;	/* XXX gcc4 -Wuninitialized */
    300 	psize_t pg = 0;
    301 
    302 	bank = uvm_physseg_find(atop((pa)), &pg);
    303 	return &uvm_physseg_get_pmseg(bank)->pvheader[pg];
    304 }
    305 
    306 /*
    307  * Internal routines
    308  */
    309 void	pmap_remove_mapping(pmap_t, vaddr_t, pt_entry_t *, int,
    310 			    struct pv_entry **);
    311 bool	pmap_testbit(paddr_t, int);
    312 bool	pmap_changebit(paddr_t, pt_entry_t, pt_entry_t);
    313 int	pmap_enter_ptpage(pmap_t, vaddr_t, bool);
    314 void	pmap_ptpage_addref(vaddr_t);
    315 int	pmap_ptpage_delref(vaddr_t);
    316 void	pmap_pinit(pmap_t);
    317 void	pmap_release(pmap_t);
    318 
    319 #ifdef DEBUG
    320 void pmap_pvdump(paddr_t);
    321 void pmap_check_wiring(const char *, vaddr_t);
    322 #endif
    323 
    324 /* pmap_remove_mapping flags */
    325 #define	PRM_TFLUSH	0x01
    326 #define	PRM_CFLUSH	0x02
    327 #define	PRM_KEEPPTPAGE	0x04
    328 
    329 #define	active_pmap(pm) \
    330 	((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
    331 
    332 #define	active_user_pmap(pm) \
    333 	(curproc && \
    334 	 (pm) != pmap_kernel() && (pm) == curproc->p_vmspace->vm_map.pmap)
    335 
    336 static void (*pmap_load_urp_func)(paddr_t);
    337 
    338 /*
    339  * pmap_load_urp:
    340  *
    341  *	Load the user root table into the MMU.
    342  */
    343 static inline void
    344 pmap_load_urp(paddr_t urp)
    345 {
    346 	(*pmap_load_urp_func)(urp);
    347 }
    348 
    349 #ifdef CACHE_HAVE_VAC
    350 /*
    351  * pmap_init_vac:
    352  *
    353  *	Set up virtually-addressed cache information.  Only relevant
    354  *	for the HP MMU.
    355  */
    356 void
    357 pmap_init_vac(size_t vacsize)
    358 {
    359 	KASSERT(pmap_aliasmask == 0);
    360 	KASSERT(powerof2(vacsize));
    361 	pmap_aliasmask = vacsize - 1;
    362 }
    363 #endif /* CACHE_HAVE_VAC */
    364 
    365 /*
    366  * pmap_bootstrap2:		[ INTERFACE ]
    367  *
    368  *	Phase 2 of pmap bootstrap.  (Phase 1 is system-specific.)
    369  *
    370  *	Initialize lwp0 uarea, curlwp, and curpcb after MMU is turned on,
    371  *	using lwp0uarea variable saved during pmap_bootstrap().
    372  */
    373 void *
    374 pmap_bootstrap2(void)
    375 {
    376 
    377 	uvmexp.pagesize = NBPG;
    378 	uvm_md_init();
    379 
    380 	/*
    381 	 * Initialize protection array.
    382 	 * XXX: Could this have port specific values? Can't this be static?
    383 	 */
    384 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE]     = 0;
    385 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE]     = PG_RO;
    386 	protection_codes[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
    387 	protection_codes[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE]  = PG_RO;
    388 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
    389 	protection_codes[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
    390 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE]    = PG_RW;
    391 	protection_codes[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
    392 
    393 	/*
    394 	 * Initialize pmap_kernel().
    395 	 */
    396 	pmap_kernel()->pm_stpa = (st_entry_t *)Sysseg_pa;
    397 	pmap_kernel()->pm_stab = Sysseg;
    398 	pmap_kernel()->pm_ptab = Sysmap;
    399 #if defined(M68040) || defined(M68060)
    400 	if (mmutype == MMU_68040)
    401 		pmap_kernel()->pm_stfree = protostfree;
    402 #endif
    403 	pmap_kernel()->pm_count = 1;
    404 
    405 	/*
    406 	 * Initialize lwp0 uarea, curlwp, and curpcb.
    407 	 */
    408 	memset((void *)lwp0uarea, 0, USPACE);
    409 	uvm_lwp_setuarea(&lwp0, lwp0uarea);
    410 	curlwp = &lwp0;
    411 	curpcb = lwp_getpcb(&lwp0);
    412 
    413 	return (void *)lwp0uarea;
    414 }
    415 
    416 /*
    417  * pmap_virtual_space:		[ INTERFACE ]
    418  *
    419  *	Report the range of available kernel virtual address
    420  *	space to the VM system during bootstrap.
    421  *
    422  *	This is only an interface function if we do not use
    423  *	pmap_steal_memory()!
    424  *
    425  *	Note: no locking is necessary in this function.
    426  */
    427 void
    428 pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp)
    429 {
    430 
    431 	*vstartp = virtual_avail;
    432 	*vendp = virtual_end;
    433 }
    434 
    435 /*
    436  * pmap_init:			[ INTERFACE ]
    437  *
    438  *	Initialize the pmap module.  Called by vm_init(), to initialize any
    439  *	structures that the pmap system needs to map virtual memory.
    440  *
    441  *	Note: no locking is necessary in this function.
    442  */
    443 void
    444 pmap_init(void)
    445 {
    446 	vaddr_t		addr, addr2;
    447 	vsize_t		s;
    448 	struct pv_header *pvh;
    449 	int		rv;
    450 	int		npages;
    451 	uvm_physseg_t	bank;
    452 
    453 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
    454 
    455 	/*
    456 	 * Before we do anything else, initialize the PTE pointers
    457 	 * used by pmap_zero_page() and pmap_copy_page().
    458 	 */
    459 	caddr1_pte = pmap_pte(pmap_kernel(), CADDR1);
    460 	caddr2_pte = pmap_pte(pmap_kernel(), CADDR2);
    461 
    462 	PMAP_DPRINTF(PDB_INIT,
    463 	    ("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
    464 	    Sysseg, Sysmap, Sysptmap));
    465 	PMAP_DPRINTF(PDB_INIT,
    466 	    ("  pstart %lx, pend %lx, vstart %lx, vend %lx\n",
    467 	    avail_start, avail_end, virtual_avail, virtual_end));
    468 
    469 	/*
    470 	 * Allocate memory for random pmap data structures.  Includes the
    471 	 * initial segment table, pv_head_table and pmap_attributes.
    472 	 */
    473 	for (page_cnt = 0, bank = uvm_physseg_get_first();
    474 	     uvm_physseg_valid_p(bank);
    475 	     bank = uvm_physseg_get_next(bank))
    476 		page_cnt += uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    477 	s = M68K_STSIZE;					/* Segtabzero */
    478 	s += page_cnt * sizeof(struct pv_header);	/* pv table */
    479 	s = round_page(s);
    480 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    481 	if (addr == 0)
    482 		panic("pmap_init: can't allocate data structures");
    483 
    484 	Segtabzero = (st_entry_t *)addr;
    485 	(void)pmap_extract(pmap_kernel(), addr,
    486 	    (paddr_t *)(void *)&Segtabzeropa);
    487 	addr += M68K_STSIZE;
    488 
    489 	pv_table = (struct pv_header *) addr;
    490 	addr += page_cnt * sizeof(struct pv_header);
    491 
    492 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
    493 	    "tbl %p\n",
    494 	    s, page_cnt, Segtabzero, Segtabzeropa,
    495 	    pv_table));
    496 
    497 	/*
    498 	 * Now that the pv and attribute tables have been allocated,
    499 	 * assign them to the memory segments.
    500 	 */
    501 	pvh = pv_table;
    502 	for (bank = uvm_physseg_get_first();
    503 	     uvm_physseg_valid_p(bank);
    504 	     bank = uvm_physseg_get_next(bank)) {
    505 		npages = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
    506 		uvm_physseg_get_pmseg(bank)->pvheader = pvh;
    507 		pvh += npages;
    508 	}
    509 
    510 	/*
    511 	 * Allocate physical memory for kernel PT pages and their management.
    512 	 * We need 1 PT page per possible task plus some slop.
    513 	 */
    514 	npages = uimin(atop(M68K_MAX_KPTSIZE), maxproc+16);
    515 	s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
    516 
    517 	/*
    518 	 * Verify that space will be allocated in region for which
    519 	 * we already have kernel PT pages.
    520 	 */
    521 	addr = 0;
    522 	rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
    523 	    UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
    524 	    UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
    525 	if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
    526 		panic("pmap_init: kernel PT too small");
    527 	uvm_unmap(kernel_map, addr, addr + s);
    528 
    529 	/*
    530 	 * Now allocate the space and link the pages together to
    531 	 * form the KPT free list.
    532 	 */
    533 	addr = uvm_km_alloc(kernel_map, s, 0, UVM_KMF_WIRED | UVM_KMF_ZERO);
    534 	if (addr == 0)
    535 		panic("pmap_init: cannot allocate KPT free list");
    536 	s = ptoa(npages);
    537 	addr2 = addr + s;
    538 	kpt_pages = &((struct kpt_page *)addr2)[npages];
    539 	kpt_free_list = NULL;
    540 	do {
    541 		addr2 -= PAGE_SIZE;
    542 		(--kpt_pages)->kpt_next = kpt_free_list;
    543 		kpt_free_list = kpt_pages;
    544 		kpt_pages->kpt_va = addr2;
    545 		(void) pmap_extract(pmap_kernel(), addr2,
    546 		    (paddr_t *)&kpt_pages->kpt_pa);
    547 	} while (addr != addr2);
    548 
    549 	PMAP_DPRINTF(PDB_INIT, ("pmap_init: KPT: %ld pages from %lx to %lx\n",
    550 	    atop(s), addr, addr + s));
    551 
    552 	/*
    553 	 * Allocate the segment table map and the page table map.
    554 	 */
    555 	s = maxproc * M68K_STSIZE;
    556 	st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0, false,
    557 	    &st_map_store);
    558 
    559 	addr = m68k_uptbase;
    560 	if ((M68K_PTMAXSIZE / M68K_MAX_PTSIZE) < maxproc) {
    561 		s = M68K_PTMAXSIZE;
    562 		/*
    563 		 * XXX We don't want to hang when we run out of
    564 		 * page tables, so we lower maxproc so that fork()
    565 		 * will fail instead.  Note that root could still raise
    566 		 * this value via sysctl(3).
    567 		 */
    568 		maxproc = (M68K_PTMAXSIZE / M68K_MAX_PTSIZE);
    569 	} else
    570 		s = (maxproc * M68K_MAX_PTSIZE);
    571 	pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, 0,
    572 	    true, &pt_map_store);
    573 
    574 #if defined(M68040) || defined(M68060)
    575 	if (mmutype == MMU_68040) {
    576 		protostfree = ~l2tobm(0);
    577 		for (rv = MAXUL2SIZE; rv < sizeof(protostfree)*NBBY; rv++)
    578 			protostfree &= ~l2tobm(rv);
    579 	}
    580 #endif
    581 
    582 	/*
    583 	 * Initialize the pmap pools.
    584 	 */
    585 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
    586 	    &pool_allocator_nointr, IPL_NONE);
    587 
    588 	/*
    589 	 * Initialize the pv_entry pools.
    590 	 */
    591 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
    592 	    &pool_allocator_meta, IPL_NONE);
    593 
    594 	/*
    595 	 * Now that this is done, mark the pages shared with the
    596 	 * hardware page table search as non-CCB (actually, as CI).
    597 	 *
    598 	 * XXX Hm. Given that this is in the kernel map, can't we just
    599 	 * use the va's?
    600 	 */
    601 #ifdef M68060
    602 #if defined(M68020) || defined(M68030) || defined(M68040)
    603 	if (cputype == CPU_68060)
    604 #endif
    605 	{
    606 		struct kpt_page *kptp = kpt_free_list;
    607 		paddr_t paddr;
    608 
    609 		while (kptp) {
    610 			pmap_changebit(kptp->kpt_pa, PG_CI,
    611 				       (pt_entry_t)~PG_CCB);
    612 			kptp = kptp->kpt_next;
    613 		}
    614 
    615 		paddr = (paddr_t)Segtabzeropa;
    616 		while (paddr < (paddr_t)Segtabzeropa + M68K_STSIZE) {
    617 			pmap_changebit(paddr, PG_CI,
    618 				       (pt_entry_t)~PG_CCB);
    619 			paddr += PAGE_SIZE;
    620 		}
    621 
    622 		DCIS();
    623 	}
    624 #endif
    625 
    626 	/*
    627 	 * Set up the routine that loads the MMU root table pointer.
    628 	 */
    629 	switch (cputype) {
    630 #if defined(M68020)
    631 	case CPU_68020:
    632 #ifdef M68K_MMU_MOTOROLA
    633 		if (mmutype == MMU_68851) {
    634 			protorp[0] = MMU51_CRP_BITS;
    635 			pmap_load_urp_func = mmu_load_urp51;
    636 		}
    637 #endif
    638 #ifdef M68K_MMU_HP
    639 		if (mmutype == MMU_HP) {
    640 			pmap_load_urp_func = mmu_load_urp20hp;
    641 		}
    642 #endif
    643 		break;
    644 #endif /* M68020 */
    645 #if defined(M68030)
    646 	case CPU_68030:
    647 		protorp[0] = MMU51_CRP_BITS;
    648 		pmap_load_urp_func = mmu_load_urp51;
    649 		break;
    650 #endif /* M68030 */
    651 #if defined(M68040)
    652 	case CPU_68040:
    653 		pmap_load_urp_func = mmu_load_urp40;
    654 		break;
    655 #endif /* M68040 */
    656 #if defined(M68060)
    657 	case CPU_68060:
    658 		pmap_load_urp_func = mmu_load_urp60;
    659 		break;
    660 #endif /* M68060 */
    661 	default:
    662 		break;
    663 	}
    664 	if (pmap_load_urp_func == NULL) {
    665 		panic("pmap_init: No mmu_load_*() for cpu=%d mmu=%d",
    666 		    cputype, mmutype);
    667 	}
    668 
    669 	/*
    670 	 * Now it is safe to enable pv_table recording.
    671 	 */
    672 	pmap_initialized = true;
    673 }
    674 
    675 /*
    676  * pmap_create:			[ INTERFACE ]
    677  *
    678  *	Create and return a physical map.
    679  *
    680  *	Note: no locking is necessary in this function.
    681  */
    682 pmap_t
    683 pmap_create(void)
    684 {
    685 	struct pmap *pmap;
    686 
    687 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
    688 	    ("pmap_create()\n"));
    689 
    690 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
    691 	memset(pmap, 0, sizeof(*pmap));
    692 	pmap_pinit(pmap);
    693 	return pmap;
    694 }
    695 
    696 /*
    697  * pmap_pinit:
    698  *
    699  *	Initialize a preallocated and zeroed pmap structure.
    700  *
    701  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_create()!
    702  */
    703 void
    704 pmap_pinit(struct pmap *pmap)
    705 {
    706 
    707 	PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
    708 	    ("pmap_pinit(%p)\n", pmap));
    709 
    710 	/*
    711 	 * No need to allocate page table space yet but we do need a
    712 	 * valid segment table.  Initially, we point everyone at the
    713 	 * "null" segment table.  On the first pmap_enter, a real
    714 	 * segment table will be allocated.
    715 	 */
    716 	pmap->pm_stab = Segtabzero;
    717 	pmap->pm_stpa = Segtabzeropa;
    718 #if defined(M68040) || defined(M68060)
    719 #if defined(M68020) || defined(M68030)
    720 	if (mmutype == MMU_68040)
    721 #endif
    722 		pmap->pm_stfree = protostfree;
    723 #endif
    724 	pmap->pm_count = 1;
    725 }
    726 
    727 /*
    728  * pmap_destroy:		[ INTERFACE ]
    729  *
    730  *	Drop the reference count on the specified pmap, releasing
    731  *	all resources if the reference count drops to zero.
    732  */
    733 void
    734 pmap_destroy(pmap_t pmap)
    735 {
    736 	int count;
    737 
    738 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_destroy(%p)\n", pmap));
    739 
    740 	count = atomic_dec_uint_nv(&pmap->pm_count);
    741 	if (count == 0) {
    742 		pmap_release(pmap);
    743 		pool_put(&pmap_pmap_pool, pmap);
    744 	}
    745 }
    746 
    747 /*
    748  * pmap_release:
    749  *
    750  *	Release the resources held by a pmap.
    751  *
    752  *	Note: THIS FUNCTION SHOULD BE MOVED INTO pmap_destroy().
    753  */
    754 void
    755 pmap_release(pmap_t pmap)
    756 {
    757 
    758 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_release(%p)\n", pmap));
    759 
    760 #ifdef notdef /* DIAGNOSTIC */
    761 	/* count would be 0 from pmap_destroy... */
    762 	if (pmap->pm_count != 1)
    763 		panic("pmap_release count");
    764 #endif
    765 
    766 	if (pmap->pm_ptab) {
    767 		pmap_remove(pmap_kernel(), (vaddr_t)pmap->pm_ptab,
    768 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
    769 		uvm_km_pgremove((vaddr_t)pmap->pm_ptab,
    770 		    (vaddr_t)pmap->pm_ptab + M68K_MAX_PTSIZE);
    771 		uvm_km_free(pt_map, (vaddr_t)pmap->pm_ptab,
    772 		    M68K_MAX_PTSIZE, UVM_KMF_VAONLY);
    773 	}
    774 	KASSERT(pmap->pm_stab == Segtabzero);
    775 }
    776 
    777 /*
    778  * pmap_reference:		[ INTERFACE ]
    779  *
    780  *	Add a reference to the specified pmap.
    781  */
    782 void
    783 pmap_reference(pmap_t pmap)
    784 {
    785 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
    786 
    787 	atomic_inc_uint(&pmap->pm_count);
    788 }
    789 
    790 /*
    791  * pmap_activate:		[ INTERFACE ]
    792  *
    793  *	Activate the pmap used by the specified process.  This includes
    794  *	reloading the MMU context if the current process, and marking
    795  *	the pmap in use by the processor.
    796  *
    797  *	Note: we may only use spin locks here, since we are called
    798  *	by a critical section in cpu_switch()!
    799  */
    800 void
    801 pmap_activate(struct lwp *l)
    802 {
    803 	pmap_t pmap = l->l_proc->p_vmspace->vm_map.pmap;
    804 
    805 	PMAP_DPRINTF(PDB_FOLLOW|PDB_SEGTAB,
    806 	    ("pmap_activate(%p)\n", l));
    807 
    808 	KASSERT(l == curlwp);
    809 
    810 	/*
    811 	 * Because the kernel has a separate root pointer, we don't
    812 	 * need to activate the kernel pmap.
    813 	 */
    814 	if (pmap != pmap_kernel()) {
    815 		pmap_load_urp((paddr_t)pmap->pm_stpa);
    816 	}
    817 }
    818 
    819 /*
    820  * pmap_deactivate:		[ INTERFACE ]
    821  *
    822  *	Mark that the pmap used by the specified process is no longer
    823  *	in use by the processor.
    824  *
    825  *	The comment above pmap_activate() wrt. locking applies here,
    826  *	as well.
    827  */
    828 void
    829 pmap_deactivate(struct lwp *l)
    830 {
    831 
    832 	/* No action necessary in this pmap implementation. */
    833 }
    834 
    835 /*
    836  * pmap_remove:			[ INTERFACE ]
    837  *
    838  *	Remove the given range of addresses from the specified map.
    839  *
    840  *	It is assumed that the start and end are properly
    841  *	rounded to the page size.
    842  */
    843 void
    844 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
    845 {
    846 	vaddr_t nssva;
    847 	pt_entry_t *pte;
    848 	int flags;
    849 #ifdef CACHE_HAVE_VAC
    850 	bool firstpage = true, needcflush = false;
    851 #endif
    852 
    853 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
    854 	    ("pmap_remove(%p, %lx, %lx)\n", pmap, sva, eva));
    855 
    856 	flags = active_pmap(pmap) ? PRM_TFLUSH : 0;
    857 	while (sva < eva) {
    858 		nssva = m68k_trunc_seg(sva) + NBSEG;
    859 		if (nssva == 0 || nssva > eva)
    860 			nssva = eva;
    861 
    862 		/*
    863 		 * Invalidate every valid mapping within this segment.
    864 		 */
    865 
    866 		pte = pmap_pte(pmap, sva);
    867 		while (sva < nssva) {
    868 
    869 			/*
    870 			 * If this segment is unallocated,
    871 			 * skip to the next segment boundary.
    872 			 */
    873 
    874 			if (!pmap_ste_v(pmap, sva)) {
    875 				sva = nssva;
    876 				break;
    877 			}
    878 
    879 			if (pmap_pte_v(pte)) {
    880 #ifdef CACHE_HAVE_VAC
    881 				if (pmap_aliasmask) {
    882 
    883 					/*
    884 					 * Purge kernel side of VAC to ensure
    885 					 * we get the correct state of any
    886 					 * hardware maintained bits.
    887 					 */
    888 
    889 					if (firstpage) {
    890 						DCIS();
    891 					}
    892 
    893 					/*
    894 					 * Remember if we may need to
    895 					 * flush the VAC due to a non-CI
    896 					 * mapping.
    897 					 */
    898 
    899 					if (!needcflush && !pmap_pte_ci(pte))
    900 						needcflush = true;
    901 
    902 				}
    903 				firstpage = false;
    904 #endif
    905 				pmap_remove_mapping(pmap, sva, pte, flags, NULL);
    906 			}
    907 			pte++;
    908 			sva += PAGE_SIZE;
    909 		}
    910 	}
    911 
    912 #ifdef CACHE_HAVE_VAC
    913 
    914 	/*
    915 	 * Didn't do anything, no need for cache flushes
    916 	 */
    917 
    918 	if (firstpage)
    919 		return;
    920 
    921 	/*
    922 	 * In a couple of cases, we don't need to worry about flushing
    923 	 * the VAC:
    924 	 *	1. if this is a kernel mapping,
    925 	 *	   we have already done it
    926 	 *	2. if it is a user mapping not for the current process,
    927 	 *	   it won't be there
    928 	 */
    929 
    930 	if (pmap_aliasmask && !active_user_pmap(pmap))
    931 		needcflush = false;
    932 	if (needcflush) {
    933 		if (pmap == pmap_kernel()) {
    934 			DCIS();
    935 		} else {
    936 			DCIU();
    937 		}
    938 	}
    939 #endif
    940 }
    941 
    942 /*
    943  * pmap_page_protect:		[ INTERFACE ]
    944  *
    945  *	Lower the permission for all mappings to a given page to
    946  *	the permissions specified.
    947  */
    948 void
    949 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
    950 {
    951 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
    952 	struct pv_header *pvh;
    953 	struct pv_entry *pv;
    954 	pt_entry_t *pte;
    955 	int s;
    956 
    957 #ifdef DEBUG
    958 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
    959 	    (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
    960 		printf("pmap_page_protect(%p, %x)\n", pg, prot);
    961 #endif
    962 
    963 	switch (prot) {
    964 	case VM_PROT_READ|VM_PROT_WRITE:
    965 	case VM_PROT_ALL:
    966 		return;
    967 
    968 	/* copy_on_write */
    969 	case VM_PROT_READ:
    970 	case VM_PROT_READ|VM_PROT_EXECUTE:
    971 		pmap_changebit(pa, PG_RO, ~0);
    972 		return;
    973 
    974 	/* remove_all */
    975 	default:
    976 		break;
    977 	}
    978 
    979 	pvh = pa_to_pvh(pa);
    980 	pv = &pvh->pvh_first;
    981 	s = splvm();
    982 	while (pv->pv_pmap != NULL) {
    983 
    984 		pte = pmap_pte(pv->pv_pmap, pv->pv_va);
    985 #ifdef DEBUG
    986 		if (!pmap_ste_v(pv->pv_pmap, pv->pv_va) ||
    987 		    pmap_pte_pa(pte) != pa)
    988 			panic("pmap_page_protect: bad mapping");
    989 #endif
    990 		pmap_remove_mapping(pv->pv_pmap, pv->pv_va,
    991 		    pte, PRM_TFLUSH|PRM_CFLUSH, NULL);
    992 	}
    993 	splx(s);
    994 }
    995 
    996 /*
    997  * pmap_protect:		[ INTERFACE ]
    998  *
    999  *	Set the physical protection on the specified range of this map
   1000  *	as requested.
   1001  */
   1002 void
   1003 pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
   1004 {
   1005 	vaddr_t nssva;
   1006 	pt_entry_t *pte;
   1007 	bool firstpage __unused, needtflush;
   1008 	int isro;
   1009 
   1010 	PMAP_DPRINTF(PDB_FOLLOW|PDB_PROTECT,
   1011 	    ("pmap_protect(%p, %lx, %lx, %x)\n",
   1012 	    pmap, sva, eva, prot));
   1013 
   1014 #ifdef PMAPSTATS
   1015 	protect_stats.calls++;
   1016 #endif
   1017 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
   1018 		pmap_remove(pmap, sva, eva);
   1019 		return;
   1020 	}
   1021 	isro = pte_prot(pmap, prot);
   1022 	needtflush = active_pmap(pmap);
   1023 	firstpage = true;
   1024 	while (sva < eva) {
   1025 		nssva = m68k_trunc_seg(sva) + NBSEG;
   1026 		if (nssva == 0 || nssva > eva)
   1027 			nssva = eva;
   1028 
   1029 		/*
   1030 		 * If VA belongs to an unallocated segment,
   1031 		 * skip to the next segment boundary.
   1032 		 */
   1033 
   1034 		if (!pmap_ste_v(pmap, sva)) {
   1035 			sva = nssva;
   1036 			continue;
   1037 		}
   1038 
   1039 		/*
   1040 		 * Change protection on mapping if it is valid and doesn't
   1041 		 * already have the correct protection.
   1042 		 */
   1043 
   1044 		pte = pmap_pte(pmap, sva);
   1045 		while (sva < nssva) {
   1046 			if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) {
   1047 #ifdef CACHE_HAVE_VAC
   1048 
   1049 				/*
   1050 				 * Purge kernel side of VAC to ensure we
   1051 				 * get the correct state of any hardware
   1052 				 * maintained bits.
   1053 				 *
   1054 				 * XXX do we need to clear the VAC in
   1055 				 * general to reflect the new protection?
   1056 				 */
   1057 
   1058 				if (firstpage && pmap_aliasmask)
   1059 					DCIS();
   1060 #endif
   1061 
   1062 #if defined(M68040) || defined(M68060)
   1063 
   1064 				/*
   1065 				 * Clear caches if making RO (see section
   1066 				 * "7.3 Cache Coherency" in the manual).
   1067 				 */
   1068 
   1069 #if defined(M68020) || defined(M68030)
   1070 				if (isro && mmutype == MMU_68040)
   1071 #else
   1072 				if (isro)
   1073 #endif
   1074 				{
   1075 					paddr_t pa = pmap_pte_pa(pte);
   1076 
   1077 					DCFP(pa);
   1078 					ICPP(pa);
   1079 				}
   1080 #endif
   1081 				pmap_pte_set_prot(pte, isro);
   1082 				if (needtflush)
   1083 					TBIS(sva);
   1084 				firstpage = false;
   1085 			}
   1086 			pte++;
   1087 			sva += PAGE_SIZE;
   1088 		}
   1089 	}
   1090 }
   1091 
   1092 /*
   1093  * pmap_enter:			[ INTERFACE ]
   1094  *
   1095  *	Insert the given physical page (pa) at
   1096  *	the specified virtual address (va) in the
   1097  *	target physical map with the protection requested.
   1098  *
   1099  *	If specified, the page will be wired down, meaning
   1100  *	that the related pte cannot be reclaimed.
   1101  *
   1102  *	Note: This is the only routine which MAY NOT lazy-evaluate
   1103  *	or lose information.  Thatis, this routine must actually
   1104  *	insert this page into the given map NOW.
   1105  */
   1106 int
   1107 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1108 {
   1109 	pt_entry_t *pte;
   1110 	struct pv_entry *opv = NULL;
   1111 	int npte;
   1112 	paddr_t opa;
   1113 	bool cacheable = true;
   1114 	bool checkpv = true;
   1115 	bool wired = (flags & PMAP_WIRED) != 0;
   1116 	bool can_fail = (flags & PMAP_CANFAIL) != 0;
   1117 
   1118 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
   1119 	    ("pmap_enter(%p, %lx, %lx, %x, %x)\n",
   1120 	    pmap, va, pa, prot, wired));
   1121 
   1122 #ifdef DIAGNOSTIC
   1123 	/*
   1124 	 * pmap_enter() should never be used for CADDR1 and CADDR2.
   1125 	 */
   1126 	if (pmap == pmap_kernel() &&
   1127 	    (va == (vaddr_t)CADDR1 || va == (vaddr_t)CADDR2))
   1128 		panic("pmap_enter: used for CADDR1 or CADDR2");
   1129 #endif
   1130 
   1131 	/*
   1132 	 * For user mapping, allocate kernel VM resources if necessary.
   1133 	 */
   1134 	if (pmap->pm_ptab == NULL) {
   1135 		pmap->pm_ptab = (pt_entry_t *)
   1136 		    uvm_km_alloc(pt_map, M68K_MAX_PTSIZE, 0,
   1137 		    UVM_KMF_VAONLY |
   1138 		    (can_fail ? UVM_KMF_NOWAIT : UVM_KMF_WAITVA));
   1139 		if (pmap->pm_ptab == NULL)
   1140 			return ENOMEM;
   1141 	}
   1142 
   1143 	/*
   1144 	 * Segment table entry not valid, we need a new PT page
   1145 	 */
   1146 	if (!pmap_ste_v(pmap, va)) {
   1147 		int err = pmap_enter_ptpage(pmap, va, can_fail);
   1148 		if (err)
   1149 			return err;
   1150 	}
   1151 
   1152 	pa = m68k_trunc_page(pa);
   1153 	pte = pmap_pte(pmap, va);
   1154 	opa = pmap_pte_pa(pte);
   1155 
   1156 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
   1157 
   1158 	/*
   1159 	 * Mapping has not changed, must be protection or wiring change.
   1160 	 */
   1161 	if (opa == pa) {
   1162 		/*
   1163 		 * Wiring change, just update stats.
   1164 		 * We don't worry about wiring PT pages as they remain
   1165 		 * resident as long as there are valid mappings in them.
   1166 		 * Hence, if a user page is wired, the PT page will be also.
   1167 		 */
   1168 		if (pmap_pte_w_chg(pte, wired ? PG_W : 0)) {
   1169 			PMAP_DPRINTF(PDB_ENTER,
   1170 			    ("enter: wiring change -> %x\n", wired));
   1171 			if (wired)
   1172 				pmap->pm_stats.wired_count++;
   1173 			else
   1174 				pmap->pm_stats.wired_count--;
   1175 		}
   1176 		/*
   1177 		 * Retain cache inhibition status
   1178 		 */
   1179 		checkpv = false;
   1180 		if (pmap_pte_ci(pte))
   1181 			cacheable = false;
   1182 		goto validate;
   1183 	}
   1184 
   1185 	/*
   1186 	 * Mapping has changed, invalidate old range and fall through to
   1187 	 * handle validating new mapping.
   1188 	 */
   1189 	if (opa) {
   1190 		PMAP_DPRINTF(PDB_ENTER,
   1191 		    ("enter: removing old mapping %lx\n", va));
   1192 		pmap_remove_mapping(pmap, va, pte,
   1193 		    PRM_TFLUSH|PRM_CFLUSH|PRM_KEEPPTPAGE, &opv);
   1194 	}
   1195 
   1196 	/*
   1197 	 * If this is a new user mapping, increment the wiring count
   1198 	 * on this PT page.  PT pages are wired down as long as there
   1199 	 * is a valid mapping in the page.
   1200 	 */
   1201 	if (pmap != pmap_kernel())
   1202 		pmap_ptpage_addref(trunc_page((vaddr_t)pte));
   1203 
   1204 	/*
   1205 	 * Enter on the PV list if part of our managed memory
   1206 	 * Note that we raise IPL while manipulating pv_table
   1207 	 * since pmap_enter can be called at interrupt time.
   1208 	 */
   1209 	if (PAGE_IS_MANAGED(pa)) {
   1210 		struct pv_header *pvh;
   1211 		struct pv_entry *pv, *npv;
   1212 		int s;
   1213 
   1214 		pvh = pa_to_pvh(pa);
   1215 		pv = &pvh->pvh_first;
   1216 		s = splvm();
   1217 
   1218 		PMAP_DPRINTF(PDB_ENTER,
   1219 		    ("enter: pv at %p: %lx/%p/%p\n",
   1220 		    pv, pv->pv_va, pv->pv_pmap, pv->pv_next));
   1221 		/*
   1222 		 * No entries yet, use header as the first entry
   1223 		 */
   1224 		if (pv->pv_pmap == NULL) {
   1225 			pv->pv_va = va;
   1226 			pv->pv_pmap = pmap;
   1227 			pv->pv_next = NULL;
   1228 			pv->pv_ptste = NULL;
   1229 			pv->pv_ptpmap = NULL;
   1230 			pvh->pvh_attrs = 0;
   1231 		}
   1232 		/*
   1233 		 * There is at least one other VA mapping this page.
   1234 		 * Place this entry after the header.
   1235 		 */
   1236 		else {
   1237 #ifdef DEBUG
   1238 			for (npv = pv; npv; npv = npv->pv_next)
   1239 				if (pmap == npv->pv_pmap && va == npv->pv_va)
   1240 					panic("pmap_enter: already in pv_tab");
   1241 #endif
   1242 			if (opv != NULL) {
   1243 				npv = opv;
   1244 				opv = NULL;
   1245 			} else {
   1246 				npv = pmap_alloc_pv();
   1247 			}
   1248 			KASSERT(npv != NULL);
   1249 			npv->pv_va = va;
   1250 			npv->pv_pmap = pmap;
   1251 			npv->pv_next = pv->pv_next;
   1252 			npv->pv_ptste = NULL;
   1253 			npv->pv_ptpmap = NULL;
   1254 			pv->pv_next = npv;
   1255 
   1256 #ifdef CACHE_HAVE_VAC
   1257 
   1258 			/*
   1259 			 * Since there is another logical mapping for the
   1260 			 * same page we may need to cache-inhibit the
   1261 			 * descriptors on those CPUs with external VACs.
   1262 			 * We don't need to CI if:
   1263 			 *
   1264 			 * - No two mappings belong to the same user pmaps.
   1265 			 *   Since the cache is flushed on context switches
   1266 			 *   there is no problem between user processes.
   1267 			 *
   1268 			 * - Mappings within a single pmap are a certain
   1269 			 *   magic distance apart.  VAs at these appropriate
   1270 			 *   boundaries map to the same cache entries or
   1271 			 *   otherwise don't conflict.
   1272 			 *
   1273 			 * To keep it simple, we only check for these special
   1274 			 * cases if there are only two mappings, otherwise we
   1275 			 * punt and always CI.
   1276 			 *
   1277 			 * Note that there are no aliasing problems with the
   1278 			 * on-chip data-cache when the WA bit is set.
   1279 			 */
   1280 
   1281 			if (pmap_aliasmask) {
   1282 				if (pvh->pvh_attrs & PVH_CI) {
   1283 					PMAP_DPRINTF(PDB_CACHE,
   1284 					    ("enter: pa %lx already CI'ed\n",
   1285 					    pa));
   1286 					checkpv = cacheable = false;
   1287 				} else if (npv->pv_next ||
   1288 					   ((pmap == pv->pv_pmap ||
   1289 					     pmap == pmap_kernel() ||
   1290 					     pv->pv_pmap == pmap_kernel()) &&
   1291 					    ((pv->pv_va & pmap_aliasmask) !=
   1292 					     (va & pmap_aliasmask)))) {
   1293 					PMAP_DPRINTF(PDB_CACHE,
   1294 					    ("enter: pa %lx CI'ing all\n",
   1295 					    pa));
   1296 					cacheable = false;
   1297 					pvh->pvh_attrs |= PVH_CI;
   1298 				}
   1299 			}
   1300 #endif
   1301 		}
   1302 
   1303 		/*
   1304 		 * Speed pmap_is_referenced() or pmap_is_modified() based
   1305 		 * on the hint provided in access_type.
   1306 		 */
   1307 #ifdef DIAGNOSTIC
   1308 		if ((flags & VM_PROT_ALL) & ~prot)
   1309 			panic("pmap_enter: access_type exceeds prot");
   1310 #endif
   1311 		if (flags & VM_PROT_WRITE)
   1312 			pvh->pvh_attrs |= (PG_U|PG_M);
   1313 		else if (flags & VM_PROT_ALL)
   1314 			pvh->pvh_attrs |= PG_U;
   1315 
   1316 		splx(s);
   1317 	}
   1318 	/*
   1319 	 * Assumption: if it is not part of our managed memory
   1320 	 * then it must be device memory which may be volitile.
   1321 	 */
   1322 	else if (pmap_initialized) {
   1323 		checkpv = cacheable = false;
   1324 	}
   1325 
   1326 	/*
   1327 	 * Increment counters
   1328 	 */
   1329 	pmap->pm_stats.resident_count++;
   1330 	if (wired)
   1331 		pmap->pm_stats.wired_count++;
   1332 
   1333 validate:
   1334 #ifdef CACHE_HAVE_VAC
   1335 	/*
   1336 	 * Purge kernel side of VAC to ensure we get correct state
   1337 	 * of HW bits so we don't clobber them.
   1338 	 */
   1339 	if (pmap_aliasmask)
   1340 		DCIS();
   1341 #endif
   1342 
   1343 	/*
   1344 	 * Build the new PTE.
   1345 	 */
   1346 
   1347 	npte = pa | pte_prot(pmap, prot) | (*pte & (PG_M|PG_U)) | PG_V;
   1348 	if (wired)
   1349 		npte |= PG_W;
   1350 	if (!checkpv && !cacheable)
   1351 #if defined(M68040) || defined(M68060)
   1352 #if defined(M68020) || defined(M68030)
   1353 		npte |= (mmutype == MMU_68040 ? PG_CIN : PG_CI);
   1354 #else
   1355 		npte |= PG_CIN;
   1356 #endif
   1357 #else
   1358 		npte |= PG_CI;
   1359 #endif
   1360 #if defined(M68040) || defined(M68060)
   1361 #if defined(M68020) || defined(M68030)
   1362 	else if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
   1363 #else
   1364 	else if ((npte & (PG_PROT|PG_CI)) == PG_RW)
   1365 #endif
   1366 		npte |= PG_CCB;
   1367 #endif
   1368 
   1369 	PMAP_DPRINTF(PDB_ENTER, ("enter: new pte value %x\n", npte));
   1370 
   1371 	/*
   1372 	 * Remember if this was a wiring-only change.
   1373 	 * If so, we need not flush the TLB and caches.
   1374 	 */
   1375 
   1376 	wired = ((*pte ^ npte) == PG_W);
   1377 #if defined(M68040) || defined(M68060)
   1378 #if defined(M68020) || defined(M68030)
   1379 	if (mmutype == MMU_68040 && !wired)
   1380 #else
   1381 	if (!wired)
   1382 #endif
   1383 	{
   1384 		DCFP(pa);
   1385 		ICPP(pa);
   1386 	}
   1387 #endif
   1388 	*pte = npte;
   1389 	if (!wired && active_pmap(pmap))
   1390 		TBIS(va);
   1391 #ifdef CACHE_HAVE_VAC
   1392 	/*
   1393 	 * The following is executed if we are entering a second
   1394 	 * (or greater) mapping for a physical page and the mappings
   1395 	 * may create an aliasing problem.  In this case we must
   1396 	 * cache inhibit the descriptors involved and flush any
   1397 	 * external VAC.
   1398 	 */
   1399 	if (checkpv && !cacheable) {
   1400 		pmap_changebit(pa, PG_CI, ~0);
   1401 		DCIA();
   1402 #ifdef DEBUG
   1403 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
   1404 		    (PDB_CACHE|PDB_PVDUMP))
   1405 			pmap_pvdump(pa);
   1406 #endif
   1407 	}
   1408 #endif
   1409 #ifdef DEBUG
   1410 	if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
   1411 		pmap_check_wiring("enter", trunc_page((vaddr_t)pte));
   1412 #endif
   1413 
   1414 	if (opv != NULL)
   1415 		pmap_free_pv(opv);
   1416 
   1417 	return 0;
   1418 }
   1419 
   1420 void
   1421 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags)
   1422 {
   1423 	pmap_t pmap = pmap_kernel();
   1424 	pt_entry_t *pte;
   1425 	int s, npte;
   1426 
   1427 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER,
   1428 	    ("pmap_kenter_pa(%lx, %lx, %x)\n", va, pa, prot));
   1429 
   1430 	/*
   1431 	 * Segment table entry not valid, we need a new PT page
   1432 	 */
   1433 
   1434 	if (!pmap_ste_v(pmap, va)) {
   1435 		s = splvm();
   1436 		pmap_enter_ptpage(pmap, va, false);
   1437 		splx(s);
   1438 	}
   1439 
   1440 	pa = m68k_trunc_page(pa);
   1441 	pte = pmap_pte(pmap, va);
   1442 
   1443 	PMAP_DPRINTF(PDB_ENTER, ("enter: pte %p, *pte %x\n", pte, *pte));
   1444 	KASSERT(!pmap_pte_v(pte));
   1445 
   1446 	/*
   1447 	 * Increment counters
   1448 	 */
   1449 
   1450 	pmap->pm_stats.resident_count++;
   1451 	pmap->pm_stats.wired_count++;
   1452 
   1453 	/*
   1454 	 * Build the new PTE.
   1455 	 */
   1456 
   1457 	npte = pa | pte_prot(pmap, prot) | PG_V | PG_W;
   1458 #if defined(M68040) || defined(M68060)
   1459 #if defined(M68020) || defined(M68030)
   1460 	if (mmutype == MMU_68040 && (npte & PG_PROT) == PG_RW)
   1461 #else
   1462 	if ((npte & PG_PROT) == PG_RW)
   1463 #endif
   1464 		npte |= PG_CCB;
   1465 
   1466 	if (mmutype == MMU_68040) {
   1467 		DCFP(pa);
   1468 		ICPP(pa);
   1469 	}
   1470 #endif
   1471 
   1472 	*pte = npte;
   1473 	TBIS(va);
   1474 }
   1475 
   1476 void
   1477 pmap_kremove(vaddr_t va, vsize_t size)
   1478 {
   1479 	pmap_t pmap = pmap_kernel();
   1480 	pt_entry_t *pte;
   1481 	vaddr_t nssva;
   1482 	vaddr_t eva = va + size;
   1483 #ifdef CACHE_HAVE_VAC
   1484 	bool firstpage, needcflush;
   1485 #endif
   1486 
   1487 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
   1488 	    ("pmap_kremove(%lx, %lx)\n", va, size));
   1489 
   1490 #ifdef CACHE_HAVE_VAC
   1491 	firstpage = true;
   1492 	needcflush = false;
   1493 #endif
   1494 	while (va < eva) {
   1495 		nssva = m68k_trunc_seg(va) + NBSEG;
   1496 		if (nssva == 0 || nssva > eva)
   1497 			nssva = eva;
   1498 
   1499 		/*
   1500 		 * If VA belongs to an unallocated segment,
   1501 		 * skip to the next segment boundary.
   1502 		 */
   1503 
   1504 		if (!pmap_ste_v(pmap, va)) {
   1505 			va = nssva;
   1506 			continue;
   1507 		}
   1508 
   1509 		/*
   1510 		 * Invalidate every valid mapping within this segment.
   1511 		 */
   1512 
   1513 		pte = pmap_pte(pmap, va);
   1514 		while (va < nssva) {
   1515 			if (!pmap_pte_v(pte)) {
   1516 				pte++;
   1517 				va += PAGE_SIZE;
   1518 				continue;
   1519 			}
   1520 #ifdef CACHE_HAVE_VAC
   1521 			if (pmap_aliasmask) {
   1522 
   1523 				/*
   1524 				 * Purge kernel side of VAC to ensure
   1525 				 * we get the correct state of any
   1526 				 * hardware maintained bits.
   1527 				 */
   1528 
   1529 				if (firstpage) {
   1530 					DCIS();
   1531 					firstpage = false;
   1532 				}
   1533 
   1534 				/*
   1535 				 * Remember if we may need to
   1536 				 * flush the VAC.
   1537 				 */
   1538 
   1539 				needcflush = true;
   1540 			}
   1541 #endif
   1542 			pmap->pm_stats.wired_count--;
   1543 			pmap->pm_stats.resident_count--;
   1544 			*pte = PG_NV;
   1545 			TBIS(va);
   1546 			pte++;
   1547 			va += PAGE_SIZE;
   1548 		}
   1549 	}
   1550 
   1551 #ifdef CACHE_HAVE_VAC
   1552 
   1553 	/*
   1554 	 * In a couple of cases, we don't need to worry about flushing
   1555 	 * the VAC:
   1556 	 *	1. if this is a kernel mapping,
   1557 	 *	   we have already done it
   1558 	 *	2. if it is a user mapping not for the current process,
   1559 	 *	   it won't be there
   1560 	 */
   1561 
   1562 	if (pmap_aliasmask && !active_user_pmap(pmap))
   1563 		needcflush = false;
   1564 	if (needcflush) {
   1565 		if (pmap == pmap_kernel()) {
   1566 			DCIS();
   1567 		} else {
   1568 			DCIU();
   1569 		}
   1570 	}
   1571 #endif
   1572 }
   1573 
   1574 /*
   1575  * pmap_unwire:			[ INTERFACE ]
   1576  *
   1577  *	Clear the wired attribute for a map/virtual-address pair.
   1578  *
   1579  *	The mapping must already exist in the pmap.
   1580  */
   1581 void
   1582 pmap_unwire(pmap_t pmap, vaddr_t va)
   1583 {
   1584 	pt_entry_t *pte;
   1585 
   1586 	PMAP_DPRINTF(PDB_FOLLOW,
   1587 	    ("pmap_unwire(%p, %lx)\n", pmap, va));
   1588 
   1589 	pte = pmap_pte(pmap, va);
   1590 
   1591 	/*
   1592 	 * If wiring actually changed (always?) clear the wire bit and
   1593 	 * update the wire count.  Note that wiring is not a hardware
   1594 	 * characteristic so there is no need to invalidate the TLB.
   1595 	 */
   1596 
   1597 	if (pmap_pte_w_chg(pte, 0)) {
   1598 		pmap_pte_set_w(pte, false);
   1599 		pmap->pm_stats.wired_count--;
   1600 	}
   1601 }
   1602 
   1603 /*
   1604  * pmap_extract:		[ INTERFACE ]
   1605  *
   1606  *	Extract the physical address associated with the given
   1607  *	pmap/virtual address pair.
   1608  */
   1609 bool
   1610 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   1611 {
   1612 	paddr_t pa;
   1613 	u_int pte;
   1614 
   1615 	PMAP_DPRINTF(PDB_FOLLOW,
   1616 	    ("pmap_extract(%p, %lx) -> ", pmap, va));
   1617 
   1618 	if (pmap_ste_v(pmap, va)) {
   1619 		pte = *(u_int *)pmap_pte(pmap, va);
   1620 		if (pte) {
   1621 			pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
   1622 			if (pap != NULL)
   1623 				*pap = pa;
   1624 #ifdef DEBUG
   1625 			if (pmapdebug & PDB_FOLLOW)
   1626 				printf("%lx\n", pa);
   1627 #endif
   1628 			return true;
   1629 		}
   1630 	}
   1631 #ifdef DEBUG
   1632 	if (pmapdebug & PDB_FOLLOW)
   1633 		printf("failed\n");
   1634 #endif
   1635 	return false;
   1636 }
   1637 
   1638 /*
   1639  * vtophys:		[ INTERFACE-ish ]
   1640  *
   1641  *	Kernel virtual to physical.  Use with caution.
   1642  */
   1643 paddr_t
   1644 vtophys(vaddr_t va)
   1645 {
   1646 	paddr_t pa;
   1647 
   1648 	if (pmap_extract(pmap_kernel(), va, &pa))
   1649 		return pa;
   1650 	KASSERT(0);
   1651 	return (paddr_t) -1;
   1652 }
   1653 
   1654 /*
   1655  * pmap_copy:		[ INTERFACE ]
   1656  *
   1657  *	Copy the mapping range specified by src_addr/len
   1658  *	from the source map to the range dst_addr/len
   1659  *	in the destination map.
   1660  *
   1661  *	This routine is only advisory and need not do anything.
   1662  */
   1663 void
   1664 pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr, vsize_t len,
   1665     vaddr_t src_addr)
   1666 {
   1667 
   1668 	PMAP_DPRINTF(PDB_FOLLOW,
   1669 	    ("pmap_copy(%p, %p, %lx, %lx, %lx)\n",
   1670 	    dst_pmap, src_pmap, dst_addr, len, src_addr));
   1671 }
   1672 
   1673 /*
   1674  * pmap_collect1():
   1675  *
   1676  *	Garbage-collect KPT pages.  Helper for the above (bogus)
   1677  *	pmap_collect().
   1678  *
   1679  *	Note: THIS SHOULD GO AWAY, AND BE REPLACED WITH A BETTER
   1680  *	WAY OF HANDLING PT PAGES!
   1681  */
   1682 static inline void
   1683 pmap_collect1(pmap_t pmap, paddr_t startpa, paddr_t endpa)
   1684 {
   1685 	paddr_t pa;
   1686 	struct pv_header *pvh;
   1687 	struct pv_entry *pv;
   1688 	pt_entry_t *pte;
   1689 	paddr_t kpa;
   1690 #ifdef DEBUG
   1691 	st_entry_t *ste;
   1692 	int opmapdebug = 0;
   1693 #endif
   1694 
   1695 	for (pa = startpa; pa < endpa; pa += PAGE_SIZE) {
   1696 		struct kpt_page *kpt, **pkpt;
   1697 
   1698 		/*
   1699 		 * Locate physical pages which are being used as kernel
   1700 		 * page table pages.
   1701 		 */
   1702 
   1703 		pvh = pa_to_pvh(pa);
   1704 		pv = &pvh->pvh_first;
   1705 		if (pv->pv_pmap != pmap_kernel() ||
   1706 		    !(pvh->pvh_attrs & PVH_PTPAGE))
   1707 			continue;
   1708 		do {
   1709 			if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
   1710 				break;
   1711 		} while ((pv = pv->pv_next));
   1712 		if (pv == NULL)
   1713 			continue;
   1714 #ifdef DEBUG
   1715 		if (pv->pv_va < (vaddr_t)Sysmap ||
   1716 		    pv->pv_va >= (vaddr_t)Sysmap + M68K_MAX_PTSIZE) {
   1717 			printf("collect: kernel PT VA out of range\n");
   1718 			pmap_pvdump(pa);
   1719 			continue;
   1720 		}
   1721 #endif
   1722 		pte = (pt_entry_t *)(pv->pv_va + PAGE_SIZE);
   1723 		while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
   1724 			;
   1725 		if (pte >= (pt_entry_t *)pv->pv_va)
   1726 			continue;
   1727 
   1728 #ifdef DEBUG
   1729 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
   1730 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
   1731 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
   1732 			opmapdebug = pmapdebug;
   1733 			pmapdebug |= PDB_PTPAGE;
   1734 		}
   1735 
   1736 		ste = pv->pv_ptste;
   1737 #endif
   1738 		/*
   1739 		 * If all entries were invalid we can remove the page.
   1740 		 * We call pmap_remove_entry to take care of invalidating
   1741 		 * ST and Sysptmap entries.
   1742 		 */
   1743 
   1744 		if (!pmap_extract(pmap, pv->pv_va, &kpa)) {
   1745 			printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
   1746 			    pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
   1747 			panic("pmap_collect: mapping not found");
   1748 		}
   1749 		pmap_remove_mapping(pmap, pv->pv_va, NULL,
   1750 		    PRM_TFLUSH|PRM_CFLUSH, NULL);
   1751 
   1752 		/*
   1753 		 * Use the physical address to locate the original
   1754 		 * (kmem_alloc assigned) address for the page and put
   1755 		 * that page back on the free list.
   1756 		 */
   1757 
   1758 		for (pkpt = &kpt_used_list, kpt = *pkpt;
   1759 		     kpt != NULL;
   1760 		     pkpt = &kpt->kpt_next, kpt = *pkpt)
   1761 			if (kpt->kpt_pa == kpa)
   1762 				break;
   1763 #ifdef DEBUG
   1764 		if (kpt == NULL)
   1765 			panic("pmap_collect: lost a KPT page");
   1766 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
   1767 			printf("collect: %lx (%lx) to free list\n",
   1768 			    kpt->kpt_va, kpa);
   1769 #endif
   1770 		*pkpt = kpt->kpt_next;
   1771 		kpt->kpt_next = kpt_free_list;
   1772 		kpt_free_list = kpt;
   1773 #ifdef DEBUG
   1774 		if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
   1775 			pmapdebug = opmapdebug;
   1776 
   1777 		if (*ste != SG_NV)
   1778 			printf("collect: kernel STE at %p still valid (%x)\n",
   1779 			    ste, *ste);
   1780 		ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
   1781 		if (*ste != SG_NV)
   1782 			printf("collect: kernel PTmap at %p still valid (%x)\n",
   1783 			    ste, *ste);
   1784 #endif
   1785 	}
   1786 }
   1787 
   1788 /*
   1789  * pmap_collect:
   1790  *
   1791  *	Helper for pmap_enter_ptpage().
   1792  *
   1793  *	Garbage collects the physical map system for pages which are no
   1794  *	longer used.  Success need not be guaranteed -- that is, there
   1795  *	may well be pages which are not referenced, but others may be
   1796  *	collected.
   1797  */
   1798 static void
   1799 pmap_collect(void)
   1800 {
   1801 	int s;
   1802 	uvm_physseg_t bank;
   1803 
   1804 	/*
   1805 	 * XXX This is very bogus.  We should handle kernel PT
   1806 	 * XXX pages much differently.
   1807 	 */
   1808 
   1809 	s = splvm();
   1810 	for (bank = uvm_physseg_get_first();
   1811 	     uvm_physseg_valid_p(bank);
   1812 	     bank = uvm_physseg_get_next(bank)) {
   1813 		pmap_collect1(pmap_kernel(), ptoa(uvm_physseg_get_start(bank)),
   1814 		    ptoa(uvm_physseg_get_end(bank)));
   1815 	}
   1816 	splx(s);
   1817 }
   1818 
   1819 /*
   1820  * pmap_zero_page:		[ INTERFACE ]
   1821  *
   1822  *	Zero the specified (machine independent) page by mapping the page
   1823  *	into virtual memory and using memset to clear its contents, one
   1824  *	machine dependent page at a time.
   1825  *
   1826  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
   1827  *	      (Actually, we go to splvm(), and since we don't
   1828  *	      support multiple processors, this is sufficient.)
   1829  */
   1830 void
   1831 pmap_zero_page(paddr_t phys)
   1832 {
   1833 	int npte;
   1834 
   1835 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_zero_page(%lx)\n", phys));
   1836 
   1837 	npte = phys | PG_V;
   1838 #ifdef CACHE_HAVE_VAC
   1839 	if (pmap_aliasmask) {
   1840 
   1841 		/*
   1842 		 * Cache-inhibit the mapping on VAC machines, as we would
   1843 		 * be wasting the cache load.
   1844 		 */
   1845 
   1846 		npte |= PG_CI;
   1847 	}
   1848 #endif
   1849 
   1850 #if defined(M68040) || defined(M68060)
   1851 #if defined(M68020) || defined(M68030)
   1852 	if (mmutype == MMU_68040)
   1853 #endif
   1854 	{
   1855 		/*
   1856 		 * Set copyback caching on the page; this is required
   1857 		 * for cache consistency (since regular mappings are
   1858 		 * copyback as well).
   1859 		 */
   1860 
   1861 		npte |= PG_CCB;
   1862 	}
   1863 #endif
   1864 
   1865 	*caddr1_pte = npte;
   1866 	TBIS((vaddr_t)CADDR1);
   1867 
   1868 	zeropage(CADDR1);
   1869 
   1870 #ifdef DEBUG
   1871 	*caddr1_pte = PG_NV;
   1872 	TBIS((vaddr_t)CADDR1);
   1873 #endif
   1874 }
   1875 
   1876 /*
   1877  * pmap_copy_page:		[ INTERFACE ]
   1878  *
   1879  *	Copy the specified (machine independent) page by mapping the page
   1880  *	into virtual memory and using memcpy to copy the page, one machine
   1881  *	dependent page at a time.
   1882  *
   1883  *	Note: WE DO NOT CURRENTLY LOCK THE TEMPORARY ADDRESSES!
   1884  *	      (Actually, we go to splvm(), and since we don't
   1885  *	      support multiple processors, this is sufficient.)
   1886  */
   1887 void
   1888 pmap_copy_page(paddr_t src, paddr_t dst)
   1889 {
   1890 	pt_entry_t npte1, npte2;
   1891 
   1892 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_copy_page(%lx, %lx)\n", src, dst));
   1893 
   1894 	npte1 = src | PG_RO | PG_V;
   1895 	npte2 = dst | PG_V;
   1896 #ifdef CACHE_HAVE_VAC
   1897 	if (pmap_aliasmask) {
   1898 
   1899 		/*
   1900 		 * Cache-inhibit the mapping on VAC machines, as we would
   1901 		 * be wasting the cache load.
   1902 		 */
   1903 
   1904 		npte1 |= PG_CI;
   1905 		npte2 |= PG_CI;
   1906 	}
   1907 #endif
   1908 
   1909 #if defined(M68040) || defined(M68060)
   1910 #if defined(M68020) || defined(M68030)
   1911 	if (mmutype == MMU_68040)
   1912 #endif
   1913 	{
   1914 		/*
   1915 		 * Set copyback caching on the pages; this is required
   1916 		 * for cache consistency (since regular mappings are
   1917 		 * copyback as well).
   1918 		 */
   1919 
   1920 		npte1 |= PG_CCB;
   1921 		npte2 |= PG_CCB;
   1922 	}
   1923 #endif
   1924 
   1925 	*caddr1_pte = npte1;
   1926 	TBIS((vaddr_t)CADDR1);
   1927 
   1928 	*caddr2_pte = npte2;
   1929 	TBIS((vaddr_t)CADDR2);
   1930 
   1931 	copypage(CADDR1, CADDR2);
   1932 
   1933 #ifdef DEBUG
   1934 	*caddr1_pte = PG_NV;
   1935 	TBIS((vaddr_t)CADDR1);
   1936 
   1937 	*caddr2_pte = PG_NV;
   1938 	TBIS((vaddr_t)CADDR2);
   1939 #endif
   1940 }
   1941 
   1942 /*
   1943  * pmap_clear_modify:		[ INTERFACE ]
   1944  *
   1945  *	Clear the modify bits on the specified physical page.
   1946  */
   1947 bool
   1948 pmap_clear_modify(struct vm_page *pg)
   1949 {
   1950 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1951 
   1952 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
   1953 
   1954 	return pmap_changebit(pa, 0, (pt_entry_t)~PG_M);
   1955 }
   1956 
   1957 /*
   1958  * pmap_clear_reference:	[ INTERFACE ]
   1959  *
   1960  *	Clear the reference bit on the specified physical page.
   1961  */
   1962 bool
   1963 pmap_clear_reference(struct vm_page *pg)
   1964 {
   1965 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1966 
   1967 	PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_reference(%p)\n", pg));
   1968 
   1969 	return pmap_changebit(pa, 0, (pt_entry_t)~PG_U);
   1970 }
   1971 
   1972 /*
   1973  * pmap_is_referenced:		[ INTERFACE ]
   1974  *
   1975  *	Return whether or not the specified physical page is referenced
   1976  *	by any physical maps.
   1977  */
   1978 bool
   1979 pmap_is_referenced(struct vm_page *pg)
   1980 {
   1981 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1982 
   1983 	return pmap_testbit(pa, PG_U);
   1984 }
   1985 
   1986 /*
   1987  * pmap_is_modified:		[ INTERFACE ]
   1988  *
   1989  *	Return whether or not the specified physical page is modified
   1990  *	by any physical maps.
   1991  */
   1992 bool
   1993 pmap_is_modified(struct vm_page *pg)
   1994 {
   1995 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   1996 
   1997 	return pmap_testbit(pa, PG_M);
   1998 }
   1999 
   2000 /*
   2001  * pmap_phys_address:		[ INTERFACE ]
   2002  *
   2003  *	Return the physical address corresponding to the specified
   2004  *	cookie.  Used by the device pager to decode a device driver's
   2005  *	mmap entry point return value.
   2006  *
   2007  *	Note: no locking is necessary in this function.
   2008  */
   2009 paddr_t
   2010 pmap_phys_address(paddr_t ppn)
   2011 {
   2012 	return m68k_ptob(ppn);
   2013 }
   2014 
   2015 #ifdef CACHE_HAVE_VAC
   2016 /*
   2017  * pmap_prefer:			[ INTERFACE ]
   2018  *
   2019  *	Find the first virtual address >= *vap that does not
   2020  *	cause a virtually-addressed cache alias problem.
   2021  */
   2022 void
   2023 pmap_prefer(vaddr_t foff, vaddr_t *vap)
   2024 {
   2025 	vaddr_t va;
   2026 	vsize_t d;
   2027 
   2028 #ifdef M68K_MMU_MOTOROLA
   2029 	if (pmap_aliasmask)
   2030 #endif
   2031 	{
   2032 		va = *vap;
   2033 		d = foff - va;
   2034 		d &= pmap_aliasmask;
   2035 		*vap = va + d;
   2036 	}
   2037 }
   2038 #endif /* CACHE_HAVE_VAC */
   2039 
   2040 /*
   2041  * Miscellaneous support routines follow
   2042  */
   2043 
   2044 /*
   2045  * pmap_remove_mapping:
   2046  *
   2047  *	Invalidate a single page denoted by pmap/va.
   2048  *
   2049  *	If (pte != NULL), it is the already computed PTE for the page.
   2050  *
   2051  *	If (flags & PRM_TFLUSH), we must invalidate any TLB information.
   2052  *
   2053  *	If (flags & PRM_CFLUSH), we must flush/invalidate any cache
   2054  *	information.
   2055  *
   2056  *	If (flags & PRM_KEEPPTPAGE), we don't free the page table page
   2057  *	if the reference drops to zero.
   2058  */
   2059 /* static */
   2060 void
   2061 pmap_remove_mapping(pmap_t pmap, vaddr_t va, pt_entry_t *pte, int flags,
   2062     struct pv_entry **opvp)
   2063 {
   2064 	paddr_t pa;
   2065 	struct pv_header *pvh;
   2066 	struct pv_entry *pv, *npv, *opv = NULL;
   2067 	struct pmap *ptpmap;
   2068 	st_entry_t *ste;
   2069 	int s, bits;
   2070 #ifdef DEBUG
   2071 	pt_entry_t opte;
   2072 #endif
   2073 
   2074 	PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
   2075 	    ("pmap_remove_mapping(%p, %lx, %p, %x, %p)\n",
   2076 	    pmap, va, pte, flags, opvp));
   2077 
   2078 	/*
   2079 	 * PTE not provided, compute it from pmap and va.
   2080 	 */
   2081 
   2082 	if (pte == NULL) {
   2083 		pte = pmap_pte(pmap, va);
   2084 		if (*pte == PG_NV)
   2085 			return;
   2086 	}
   2087 
   2088 #ifdef CACHE_HAVE_VAC
   2089 	if (pmap_aliasmask && (flags & PRM_CFLUSH)) {
   2090 
   2091 		/*
   2092 		 * Purge kernel side of VAC to ensure we get the correct
   2093 		 * state of any hardware maintained bits.
   2094 		 */
   2095 
   2096 		DCIS();
   2097 
   2098 		/*
   2099 		 * If this is a non-CI user mapping for the current process,
   2100 		 * flush the VAC.  Note that the kernel side was flushed
   2101 		 * above so we don't worry about non-CI kernel mappings.
   2102 		 */
   2103 
   2104 		if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) {
   2105 			DCIU();
   2106 		}
   2107 	}
   2108 #endif
   2109 
   2110 	pa = pmap_pte_pa(pte);
   2111 #ifdef DEBUG
   2112 	opte = *pte;
   2113 #endif
   2114 
   2115 	/*
   2116 	 * Update statistics
   2117 	 */
   2118 
   2119 	if (pmap_pte_w(pte))
   2120 		pmap->pm_stats.wired_count--;
   2121 	pmap->pm_stats.resident_count--;
   2122 
   2123 #if defined(M68040) || defined(M68060)
   2124 #if defined(M68020) || defined(M68030)
   2125 	if (mmutype == MMU_68040)
   2126 #endif
   2127 	if ((flags & PRM_CFLUSH)) {
   2128 		DCFP(pa);
   2129 		ICPP(pa);
   2130 	}
   2131 #endif
   2132 
   2133 	/*
   2134 	 * Invalidate the PTE after saving the reference modify info.
   2135 	 */
   2136 
   2137 	PMAP_DPRINTF(PDB_REMOVE, ("remove: invalidating pte at %p\n", pte));
   2138 	bits = *pte & (PG_U|PG_M);
   2139 	*pte = PG_NV;
   2140 	if ((flags & PRM_TFLUSH) && active_pmap(pmap))
   2141 		TBIS(va);
   2142 
   2143 	/*
   2144 	 * For user mappings decrement the wiring count on
   2145 	 * the PT page.
   2146 	 */
   2147 
   2148 	if (pmap != pmap_kernel()) {
   2149 		vaddr_t ptpva = trunc_page((vaddr_t)pte);
   2150 		int refs = pmap_ptpage_delref(ptpva);
   2151 #ifdef DEBUG
   2152 		if (pmapdebug & PDB_WIRING)
   2153 			pmap_check_wiring("remove", ptpva);
   2154 #endif
   2155 
   2156 		/*
   2157 		 * If reference count drops to 0, and we're not instructed
   2158 		 * to keep it around, free the PT page.
   2159 		 */
   2160 
   2161 		if (refs == 0 && (flags & PRM_KEEPPTPAGE) == 0) {
   2162 #ifdef DIAGNOSTIC
   2163 			struct pv_header *ptppvh;
   2164 			struct pv_entry *ptppv;
   2165 #endif
   2166 			paddr_t ptppa;
   2167 
   2168 			ptppa = pmap_pte_pa(pmap_pte(pmap_kernel(), ptpva));
   2169 #ifdef DIAGNOSTIC
   2170 			if (PAGE_IS_MANAGED(ptppa) == 0)
   2171 				panic("pmap_remove_mapping: unmanaged PT page");
   2172 			ptppvh = pa_to_pvh(ptppa);
   2173 			ptppv = &ptppvh->pvh_first;
   2174 			if (ptppv->pv_ptste == NULL)
   2175 				panic("pmap_remove_mapping: ptste == NULL");
   2176 			if (ptppv->pv_pmap != pmap_kernel() ||
   2177 			    ptppv->pv_va != ptpva ||
   2178 			    ptppv->pv_next != NULL)
   2179 				panic("pmap_remove_mapping: "
   2180 				    "bad PT page pmap %p, va 0x%lx, next %p",
   2181 				    ptppv->pv_pmap, ptppv->pv_va,
   2182 				    ptppv->pv_next);
   2183 #endif
   2184 			pmap_remove_mapping(pmap_kernel(), ptpva,
   2185 			    NULL, PRM_TFLUSH|PRM_CFLUSH, NULL);
   2186 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2187 			uvm_pagefree(PHYS_TO_VM_PAGE(ptppa));
   2188 			rw_exit(uvm_kernel_object->vmobjlock);
   2189 			PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
   2190 			    ("remove: PT page 0x%lx (0x%lx) freed\n",
   2191 			    ptpva, ptppa));
   2192 		}
   2193 	}
   2194 
   2195 	/*
   2196 	 * If this isn't a managed page, we are all done.
   2197 	 */
   2198 
   2199 	if (PAGE_IS_MANAGED(pa) == 0)
   2200 		return;
   2201 
   2202 	/*
   2203 	 * Otherwise remove it from the PV table
   2204 	 * (raise IPL since we may be called at interrupt time).
   2205 	 */
   2206 
   2207 	pvh = pa_to_pvh(pa);
   2208 	pv = &pvh->pvh_first;
   2209 	ste = NULL;
   2210 	s = splvm();
   2211 
   2212 	/*
   2213 	 * If it is the first entry on the list, it is actually
   2214 	 * in the header and we must copy the following entry up
   2215 	 * to the header.  Otherwise we must search the list for
   2216 	 * the entry.  In either case we free the now unused entry.
   2217 	 */
   2218 
   2219 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
   2220 		ste = pv->pv_ptste;
   2221 		ptpmap = pv->pv_ptpmap;
   2222 		npv = pv->pv_next;
   2223 		if (npv) {
   2224 			*pv = *npv;
   2225 			opv = npv;
   2226 		} else
   2227 			pv->pv_pmap = NULL;
   2228 	} else {
   2229 		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
   2230 			if (pmap == npv->pv_pmap && va == npv->pv_va)
   2231 				break;
   2232 			pv = npv;
   2233 		}
   2234 #ifdef DEBUG
   2235 		if (npv == NULL)
   2236 			panic("pmap_remove: PA not in pv_tab");
   2237 #endif
   2238 		ste = npv->pv_ptste;
   2239 		ptpmap = npv->pv_ptpmap;
   2240 		pv->pv_next = npv->pv_next;
   2241 		opv = npv;
   2242 		pvh = pa_to_pvh(pa);
   2243 		pv = &pvh->pvh_first;
   2244 	}
   2245 
   2246 #ifdef CACHE_HAVE_VAC
   2247 
   2248 	/*
   2249 	 * If only one mapping left we no longer need to cache inhibit
   2250 	 */
   2251 
   2252 	if (pmap_aliasmask &&
   2253 	    pv->pv_pmap && pv->pv_next == NULL && (pvh->pvh_attrs & PVH_CI)) {
   2254 		PMAP_DPRINTF(PDB_CACHE,
   2255 		    ("remove: clearing CI for pa %lx\n", pa));
   2256 		pvh->pvh_attrs &= ~PVH_CI;
   2257 		pmap_changebit(pa, 0, (pt_entry_t)~PG_CI);
   2258 #ifdef DEBUG
   2259 		if ((pmapdebug & (PDB_CACHE|PDB_PVDUMP)) ==
   2260 		    (PDB_CACHE|PDB_PVDUMP))
   2261 			pmap_pvdump(pa);
   2262 #endif
   2263 	}
   2264 #endif
   2265 
   2266 	/*
   2267 	 * If this was a PT page we must also remove the
   2268 	 * mapping from the associated segment table.
   2269 	 */
   2270 
   2271 	if (ste) {
   2272 		PMAP_DPRINTF(PDB_REMOVE|PDB_PTPAGE,
   2273 		    ("remove: ste was %x@%p pte was %x@%p\n",
   2274 		    *ste, ste, opte, pmap_pte(pmap, va)));
   2275 #if defined(M68040) || defined(M68060)
   2276 #if defined(M68020) || defined(M68030)
   2277 		if (mmutype == MMU_68040)
   2278 #endif
   2279 		{
   2280 			st_entry_t *este = &ste[NPTEPG/SG4_LEV3SIZE];
   2281 
   2282 			while (ste < este)
   2283 				*ste++ = SG_NV;
   2284 #ifdef DEBUG
   2285 			ste -= NPTEPG/SG4_LEV3SIZE;
   2286 #endif
   2287 		}
   2288 #if defined(M68020) || defined(M68030)
   2289 		else
   2290 #endif
   2291 #endif
   2292 #if defined(M68020) || defined(M68030)
   2293 		*ste = SG_NV;
   2294 #endif
   2295 
   2296 		/*
   2297 		 * If it was a user PT page, we decrement the
   2298 		 * reference count on the segment table as well,
   2299 		 * freeing it if it is now empty.
   2300 		 */
   2301 
   2302 		if (ptpmap != pmap_kernel()) {
   2303 			PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
   2304 			    ("remove: stab %p, refcnt %d\n",
   2305 			    ptpmap->pm_stab, ptpmap->pm_sref - 1));
   2306 #ifdef DEBUG
   2307 			if ((pmapdebug & PDB_PARANOIA) &&
   2308 			    ptpmap->pm_stab !=
   2309 			     (st_entry_t *)trunc_page((vaddr_t)ste))
   2310 				panic("remove: bogus ste");
   2311 #endif
   2312 			if (--(ptpmap->pm_sref) == 0) {
   2313 				PMAP_DPRINTF(PDB_REMOVE|PDB_SEGTAB,
   2314 				    ("remove: free stab %p\n",
   2315 				    ptpmap->pm_stab));
   2316 				uvm_km_free(st_map, (vaddr_t)ptpmap->pm_stab,
   2317 				    M68K_STSIZE, UVM_KMF_WIRED);
   2318 				ptpmap->pm_stab = Segtabzero;
   2319 				ptpmap->pm_stpa = Segtabzeropa;
   2320 #if defined(M68040) || defined(M68060)
   2321 #if defined(M68020) || defined(M68030)
   2322 				if (mmutype == MMU_68040)
   2323 #endif
   2324 					ptpmap->pm_stfree = protostfree;
   2325 #endif
   2326 				/*
   2327 				 * Segment table has changed; reload the
   2328 				 * MMU if it's the active user pmap.
   2329 				 */
   2330 				if (active_user_pmap(ptpmap)) {
   2331 					pmap_load_urp((paddr_t)ptpmap->pm_stpa);
   2332 				}
   2333 			}
   2334 		}
   2335 		pvh->pvh_attrs &= ~PVH_PTPAGE;
   2336 		ptpmap->pm_ptpages--;
   2337 	}
   2338 
   2339 	/*
   2340 	 * Update saved attributes for managed page
   2341 	 */
   2342 
   2343 	pvh->pvh_attrs |= bits;
   2344 	splx(s);
   2345 
   2346 	if (opvp != NULL)
   2347 		*opvp = opv;
   2348 	else if (opv != NULL)
   2349 		pmap_free_pv(opv);
   2350 }
   2351 
   2352 /*
   2353  * pmap_testbit:
   2354  *
   2355  *	Test the modified/referenced bits of a physical page.
   2356  */
   2357 /* static */
   2358 bool
   2359 pmap_testbit(paddr_t pa, int bit)
   2360 {
   2361 	struct pv_header *pvh;
   2362 	struct pv_entry *pv;
   2363 	pt_entry_t *pte;
   2364 	int s;
   2365 
   2366 	pvh = pa_to_pvh(pa);
   2367 	pv = &pvh->pvh_first;
   2368 	s = splvm();
   2369 
   2370 	/*
   2371 	 * Check saved info first
   2372 	 */
   2373 
   2374 	if (pvh->pvh_attrs & bit) {
   2375 		splx(s);
   2376 		return true;
   2377 	}
   2378 
   2379 #ifdef CACHE_HAVE_VAC
   2380 
   2381 	/*
   2382 	 * Flush VAC to get correct state of any hardware maintained bits.
   2383 	 */
   2384 
   2385 	if (pmap_aliasmask && (bit & (PG_U|PG_M)))
   2386 		DCIS();
   2387 #endif
   2388 
   2389 	/*
   2390 	 * Not found.  Check current mappings, returning immediately if
   2391 	 * found.  Cache a hit to speed future lookups.
   2392 	 */
   2393 
   2394 	if (pv->pv_pmap != NULL) {
   2395 		for (; pv; pv = pv->pv_next) {
   2396 			pte = pmap_pte(pv->pv_pmap, pv->pv_va);
   2397 			if (*pte & bit) {
   2398 				pvh->pvh_attrs |= bit;
   2399 				splx(s);
   2400 				return true;
   2401 			}
   2402 		}
   2403 	}
   2404 	splx(s);
   2405 	return false;
   2406 }
   2407 
   2408 /*
   2409  * pmap_changebit:
   2410  *
   2411  *	Change the modified/referenced bits, or other PTE bits,
   2412  *	for a physical page.
   2413  */
   2414 /* static */
   2415 bool
   2416 pmap_changebit(paddr_t pa, pt_entry_t set, pt_entry_t mask)
   2417 {
   2418 	struct pv_header *pvh;
   2419 	struct pv_entry *pv;
   2420 	pt_entry_t *pte, npte;
   2421 	vaddr_t va;
   2422 	int s;
   2423 #if defined(CACHE_HAVE_VAC) || defined(M68040) || defined(M68060)
   2424 	bool firstpage = true;
   2425 #endif
   2426 	bool r;
   2427 
   2428 	PMAP_DPRINTF(PDB_BITS,
   2429 	    ("pmap_changebit(%lx, %x, %x)\n", pa, set, mask));
   2430 
   2431 	pvh = pa_to_pvh(pa);
   2432 	pv = &pvh->pvh_first;
   2433 	s = splvm();
   2434 
   2435 	/*
   2436 	 * Clear saved attributes (modify, reference)
   2437 	 */
   2438 
   2439 	r = (pvh->pvh_attrs & ~mask) != 0;
   2440 	pvh->pvh_attrs &= mask;
   2441 
   2442 	/*
   2443 	 * Loop over all current mappings setting/clearing as appropriate
   2444 	 * If setting RO do we need to clear the VAC?
   2445 	 */
   2446 
   2447 	if (pv->pv_pmap != NULL) {
   2448 #ifdef DEBUG
   2449 		int toflush = 0;
   2450 #endif
   2451 		for (; pv; pv = pv->pv_next) {
   2452 #ifdef DEBUG
   2453 			toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
   2454 #endif
   2455 			va = pv->pv_va;
   2456 			pte = pmap_pte(pv->pv_pmap, va);
   2457 #ifdef CACHE_HAVE_VAC
   2458 
   2459 			/*
   2460 			 * Flush VAC to ensure we get correct state of HW bits
   2461 			 * so we don't clobber them.
   2462 			 */
   2463 
   2464 			if (firstpage && pmap_aliasmask) {
   2465 				firstpage = false;
   2466 				DCIS();
   2467 			}
   2468 #endif
   2469 			npte = (*pte | set) & mask;
   2470 			if (*pte != npte) {
   2471 				r = true;
   2472 #if defined(M68040) || defined(M68060)
   2473 				/*
   2474 				 * If we are changing caching status or
   2475 				 * protection make sure the caches are
   2476 				 * flushed (but only once).
   2477 				 */
   2478 				if (firstpage &&
   2479 #if defined(M68020) || defined(M68030)
   2480 				    (mmutype == MMU_68040) &&
   2481 #endif
   2482 				    ((set == PG_RO) ||
   2483 				     (set & PG_CMASK) ||
   2484 				     (mask & PG_CMASK) == 0)) {
   2485 					firstpage = false;
   2486 					DCFP(pa);
   2487 					ICPP(pa);
   2488 				}
   2489 #endif
   2490 				*pte = npte;
   2491 				if (active_pmap(pv->pv_pmap))
   2492 					TBIS(va);
   2493 			}
   2494 		}
   2495 	}
   2496 	splx(s);
   2497 	return r;
   2498 }
   2499 
   2500 /*
   2501  * pmap_enter_ptpage:
   2502  *
   2503  *	Allocate and map a PT page for the specified pmap/va pair.
   2504  */
   2505 /* static */
   2506 int
   2507 pmap_enter_ptpage(pmap_t pmap, vaddr_t va, bool can_fail)
   2508 {
   2509 	paddr_t ptpa;
   2510 	struct vm_page *pg;
   2511 	struct pv_header *pvh;
   2512 	struct pv_entry *pv;
   2513 	st_entry_t *ste;
   2514 	int s;
   2515 
   2516 	PMAP_DPRINTF(PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE,
   2517 	    ("pmap_enter_ptpage: pmap %p, va %lx\n", pmap, va));
   2518 
   2519 	/*
   2520 	 * Allocate a segment table if necessary.  Note that it is allocated
   2521 	 * from a private map and not pt_map.  This keeps user page tables
   2522 	 * aligned on segment boundaries in the kernel address space.
   2523 	 * The segment table is wired down.  It will be freed whenever the
   2524 	 * reference count drops to zero.
   2525 	 */
   2526 	if (pmap->pm_stab == Segtabzero) {
   2527 		pmap->pm_stab = (st_entry_t *)
   2528 		    uvm_km_alloc(st_map, M68K_STSIZE, 0,
   2529 		    UVM_KMF_WIRED | UVM_KMF_ZERO |
   2530 		    (can_fail ? UVM_KMF_NOWAIT : 0));
   2531 		if (pmap->pm_stab == NULL) {
   2532 			pmap->pm_stab = Segtabzero;
   2533 			return ENOMEM;
   2534 		}
   2535 		(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
   2536 		    (paddr_t *)&pmap->pm_stpa);
   2537 #if defined(M68040) || defined(M68060)
   2538 #if defined(M68020) || defined(M68030)
   2539 		if (mmutype == MMU_68040)
   2540 #endif
   2541 		{
   2542 			pt_entry_t	*pte;
   2543 
   2544 			pte = pmap_pte(pmap_kernel(), pmap->pm_stab);
   2545 			*pte = (*pte & ~PG_CMASK) | PG_CI;
   2546 			pmap->pm_stfree = protostfree;
   2547 		}
   2548 #endif
   2549 		/*
   2550 		 * Segment table has changed; reload the
   2551 		 * MMU if it's the active user pmap.
   2552 		 */
   2553 		if (active_user_pmap(pmap)) {
   2554 			pmap_load_urp((paddr_t)pmap->pm_stpa);
   2555 		}
   2556 
   2557 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2558 		    ("enter: pmap %p stab %p(%p)\n",
   2559 		    pmap, pmap->pm_stab, pmap->pm_stpa));
   2560 	}
   2561 
   2562 	ste = pmap_ste(pmap, va);
   2563 #if defined(M68040) || defined(M68060)
   2564 	/*
   2565 	 * Allocate level 2 descriptor block if necessary
   2566 	 */
   2567 #if defined(M68020) || defined(M68030)
   2568 	if (mmutype == MMU_68040)
   2569 #endif
   2570 	{
   2571 		if (*ste == SG_NV) {
   2572 			int ix;
   2573 			void *addr;
   2574 
   2575 			ix = bmtol2(pmap->pm_stfree);
   2576 			if (ix == -1)
   2577 				panic("enter: out of address space"); /* XXX */
   2578 			pmap->pm_stfree &= ~l2tobm(ix);
   2579 			addr = (void *)&pmap->pm_stab[ix*SG4_LEV2SIZE];
   2580 			memset(addr, 0, SG4_LEV2SIZE*sizeof(st_entry_t));
   2581 			addr = (void *)&pmap->pm_stpa[ix*SG4_LEV2SIZE];
   2582 			*ste = (u_int)addr | SG_RW | SG_U | SG_V;
   2583 
   2584 			PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2585 			    ("enter: alloc ste2 %d(%p)\n", ix, addr));
   2586 		}
   2587 		ste = pmap_ste2(pmap, va);
   2588 		/*
   2589 		 * Since a level 2 descriptor maps a block of SG4_LEV3SIZE
   2590 		 * level 3 descriptors, we need a chunk of NPTEPG/SG4_LEV3SIZE
   2591 		 * (16) such descriptors (PAGE_SIZE/SG4_LEV3SIZE bytes) to map a
   2592 		 * PT page--the unit of allocation.  We set `ste' to point
   2593 		 * to the first entry of that chunk which is validated in its
   2594 		 * entirety below.
   2595 		 */
   2596 		ste = (st_entry_t *)((int)ste & ~(PAGE_SIZE/SG4_LEV3SIZE-1));
   2597 
   2598 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2599 		    ("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
   2600 	}
   2601 #endif
   2602 	va = trunc_page((vaddr_t)pmap_pte(pmap, va));
   2603 
   2604 	/*
   2605 	 * In the kernel we allocate a page from the kernel PT page
   2606 	 * free list and map it into the kernel page table map (via
   2607 	 * pmap_enter).
   2608 	 */
   2609 	if (pmap == pmap_kernel()) {
   2610 		struct kpt_page *kpt;
   2611 
   2612 		s = splvm();
   2613 		if ((kpt = kpt_free_list) == NULL) {
   2614 			/*
   2615 			 * No PT pages available.
   2616 			 * Try once to free up unused ones.
   2617 			 */
   2618 			PMAP_DPRINTF(PDB_COLLECT,
   2619 			    ("enter: no KPT pages, collecting...\n"));
   2620 			pmap_collect();
   2621 			if ((kpt = kpt_free_list) == NULL)
   2622 				panic("pmap_enter_ptpage: can't get KPT page");
   2623 		}
   2624 		kpt_free_list = kpt->kpt_next;
   2625 		kpt->kpt_next = kpt_used_list;
   2626 		kpt_used_list = kpt;
   2627 		ptpa = kpt->kpt_pa;
   2628 		memset((void *)kpt->kpt_va, 0, PAGE_SIZE);
   2629 		pmap_enter(pmap, va, ptpa, VM_PROT_READ | VM_PROT_WRITE,
   2630 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
   2631 		pmap_update(pmap);
   2632 #ifdef DEBUG
   2633 		if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) {
   2634 			int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
   2635 
   2636 			printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
   2637 			    ix, Sysptmap[ix], kpt->kpt_va);
   2638 		}
   2639 #endif
   2640 		splx(s);
   2641 	} else {
   2642 
   2643 		/*
   2644 		 * For user processes we just allocate a page from the
   2645 		 * VM system.  Note that we set the page "wired" count to 1,
   2646 		 * which is what we use to check if the page can be freed.
   2647 		 * See pmap_remove_mapping().
   2648 		 *
   2649 		 * Count the segment table reference first so that we won't
   2650 		 * lose the segment table when low on memory.
   2651 		 */
   2652 
   2653 		pmap->pm_sref++;
   2654 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
   2655 		    ("enter: about to alloc UPT pg at %lx\n", va));
   2656 		rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2657 		while ((pg = uvm_pagealloc(uvm_kernel_object,
   2658 					   va - vm_map_min(kernel_map),
   2659 					   NULL, UVM_PGA_ZERO)) == NULL) {
   2660 			rw_exit(uvm_kernel_object->vmobjlock);
   2661 			if (can_fail) {
   2662 				pmap->pm_sref--;
   2663 				return ENOMEM;
   2664 			}
   2665 			uvm_wait("ptpage");
   2666 			rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2667 		}
   2668 		rw_exit(uvm_kernel_object->vmobjlock);
   2669 		pg->flags &= ~(PG_BUSY|PG_FAKE);
   2670 		UVM_PAGE_OWN(pg, NULL);
   2671 		ptpa = VM_PAGE_TO_PHYS(pg);
   2672 		pmap_enter(pmap_kernel(), va, ptpa,
   2673 		    VM_PROT_READ | VM_PROT_WRITE,
   2674 		    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
   2675 		pmap_update(pmap_kernel());
   2676 	}
   2677 #if defined(M68040) || defined(M68060)
   2678 	/*
   2679 	 * Turn off copyback caching of page table pages,
   2680 	 * could get ugly otherwise.
   2681 	 */
   2682 #if defined(M68020) || defined(M68030)
   2683 	if (mmutype == MMU_68040)
   2684 #endif
   2685 	{
   2686 #ifdef DEBUG
   2687 		pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
   2688 		if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
   2689 			printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
   2690 			    pmap == pmap_kernel() ? "Kernel" : "User",
   2691 			    va, ptpa, pte, *pte);
   2692 #endif
   2693 		if (pmap_changebit(ptpa, PG_CI, (pt_entry_t)~PG_CCB))
   2694 			DCIS();
   2695 	}
   2696 #endif
   2697 	/*
   2698 	 * Locate the PV entry in the kernel for this PT page and
   2699 	 * record the STE address.  This is so that we can invalidate
   2700 	 * the STE when we remove the mapping for the page.
   2701 	 */
   2702 	pvh = pa_to_pvh(ptpa);
   2703 	s = splvm();
   2704 	if (pvh) {
   2705 		pv = &pvh->pvh_first;
   2706 		pvh->pvh_attrs |= PVH_PTPAGE;
   2707 		do {
   2708 			if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
   2709 				break;
   2710 		} while ((pv = pv->pv_next));
   2711 	} else {
   2712 		pv = NULL;
   2713 	}
   2714 #ifdef DEBUG
   2715 	if (pv == NULL)
   2716 		panic("pmap_enter_ptpage: PT page not entered");
   2717 #endif
   2718 	pv->pv_ptste = ste;
   2719 	pv->pv_ptpmap = pmap;
   2720 
   2721 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
   2722 	    ("enter: new PT page at PA %lx, ste at %p\n", ptpa, ste));
   2723 
   2724 	/*
   2725 	 * Map the new PT page into the segment table.
   2726 	 * Also increment the reference count on the segment table if this
   2727 	 * was a user page table page.  Note that we don't use vm_map_pageable
   2728 	 * to keep the count like we do for PT pages, this is mostly because
   2729 	 * it would be difficult to identify ST pages in pmap_pageable to
   2730 	 * release them.  We also avoid the overhead of vm_map_pageable.
   2731 	 */
   2732 #if defined(M68040) || defined(M68060)
   2733 #if defined(M68020) || defined(M68030)
   2734 	if (mmutype == MMU_68040)
   2735 #endif
   2736 	{
   2737 		st_entry_t *este;
   2738 
   2739 		for (este = &ste[NPTEPG/SG4_LEV3SIZE]; ste < este; ste++) {
   2740 			*ste = ptpa | SG_U | SG_RW | SG_V;
   2741 			ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
   2742 		}
   2743 	}
   2744 #if defined(M68020) || defined(M68030)
   2745 	else
   2746 		*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
   2747 #endif
   2748 #else
   2749 	*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
   2750 #endif
   2751 	if (pmap != pmap_kernel()) {
   2752 		PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2753 		    ("enter: stab %p refcnt %d\n",
   2754 		    pmap->pm_stab, pmap->pm_sref));
   2755 	}
   2756 	/*
   2757 	 * Flush stale TLB info.
   2758 	 */
   2759 	if (pmap == pmap_kernel())
   2760 		TBIAS();
   2761 	else
   2762 		TBIAU();
   2763 	pmap->pm_ptpages++;
   2764 	splx(s);
   2765 
   2766 	return 0;
   2767 }
   2768 
   2769 /*
   2770  * pmap_ptpage_addref:
   2771  *
   2772  *	Add a reference to the specified PT page.
   2773  */
   2774 void
   2775 pmap_ptpage_addref(vaddr_t ptpva)
   2776 {
   2777 	struct vm_page *pg;
   2778 
   2779 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2780 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
   2781 	pg->wire_count++;
   2782 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2783 	    ("ptpage addref: pg %p now %d\n",
   2784 	     pg, pg->wire_count));
   2785 	rw_exit(uvm_kernel_object->vmobjlock);
   2786 }
   2787 
   2788 /*
   2789  * pmap_ptpage_delref:
   2790  *
   2791  *	Delete a reference to the specified PT page.
   2792  */
   2793 int
   2794 pmap_ptpage_delref(vaddr_t ptpva)
   2795 {
   2796 	struct vm_page *pg;
   2797 	int rv;
   2798 
   2799 	rw_enter(uvm_kernel_object->vmobjlock, RW_WRITER);
   2800 	pg = uvm_pagelookup(uvm_kernel_object, ptpva - vm_map_min(kernel_map));
   2801 	rv = --pg->wire_count;
   2802 	PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
   2803 	    ("ptpage delref: pg %p now %d\n",
   2804 	     pg, pg->wire_count));
   2805 	rw_exit(uvm_kernel_object->vmobjlock);
   2806 	return rv;
   2807 }
   2808 
   2809 /*
   2810  *	Routine:        pmap_procwr
   2811  *
   2812  *	Function:
   2813  *		Synchronize caches corresponding to [addr, addr + len) in p.
   2814  */
   2815 void
   2816 pmap_procwr(struct proc	*p, vaddr_t va, size_t len)
   2817 {
   2818 
   2819 	(void)cachectl1(0x80000004, va, len, p);
   2820 }
   2821 
   2822 void
   2823 _pmap_set_page_cacheable(pmap_t pmap, vaddr_t va)
   2824 {
   2825 
   2826 	if (!pmap_ste_v(pmap, va))
   2827 		return;
   2828 
   2829 #if defined(M68040) || defined(M68060)
   2830 #if defined(M68020) || defined(M68030)
   2831 	if (mmutype == MMU_68040) {
   2832 #endif
   2833 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CCB,
   2834 			   (pt_entry_t)~PG_CI))
   2835 		DCIS();
   2836 
   2837 #if defined(M68020) || defined(M68030)
   2838 	} else
   2839 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
   2840 			       (pt_entry_t)~PG_CI);
   2841 #endif
   2842 #else
   2843 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), 0,
   2844 		       (pt_entry_t)~PG_CI);
   2845 #endif
   2846 }
   2847 
   2848 void
   2849 _pmap_set_page_cacheinhibit(pmap_t pmap, vaddr_t va)
   2850 {
   2851 
   2852 	if (!pmap_ste_v(pmap, va))
   2853 		return;
   2854 
   2855 #if defined(M68040) || defined(M68060)
   2856 #if defined(M68020) || defined(M68030)
   2857 	if (mmutype == MMU_68040) {
   2858 #endif
   2859 	if (pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI,
   2860 			   (pt_entry_t)~PG_CCB))
   2861 		DCIS();
   2862 #if defined(M68020) || defined(M68030)
   2863 	} else
   2864 		pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
   2865 #endif
   2866 #else
   2867 	pmap_changebit(pmap_pte_pa(pmap_pte(pmap, va)), PG_CI, ~0);
   2868 #endif
   2869 }
   2870 
   2871 int
   2872 _pmap_page_is_cacheable(pmap_t pmap, vaddr_t va)
   2873 {
   2874 
   2875 	if (!pmap_ste_v(pmap, va))
   2876 		return 0;
   2877 
   2878 	return (pmap_pte_ci(pmap_pte(pmap, va)) == 0) ? 1 : 0;
   2879 }
   2880 
   2881 #ifdef DEBUG
   2882 /*
   2883  * pmap_pvdump:
   2884  *
   2885  *	Dump the contents of the PV list for the specified physical page.
   2886  */
   2887 void
   2888 pmap_pvdump(paddr_t pa)
   2889 {
   2890 	struct pv_header *pvh;
   2891 	struct pv_entry *pv;
   2892 
   2893 	printf("pa %lx", pa);
   2894 	pvh = pa_to_pvh(pa);
   2895 	for (pv = &pvh->pvh_first; pv; pv = pv->pv_next)
   2896 		printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p",
   2897 		    pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap);
   2898 	printf("\n");
   2899 }
   2900 
   2901 /*
   2902  * pmap_check_wiring:
   2903  *
   2904  *	Count the number of valid mappings in the specified PT page,
   2905  *	and ensure that it is consistent with the number of wirings
   2906  *	to that page that the VM system has.
   2907  */
   2908 void
   2909 pmap_check_wiring(const char *str, vaddr_t va)
   2910 {
   2911 	pt_entry_t *pte;
   2912 	paddr_t pa;
   2913 	struct vm_page *pg;
   2914 	int count;
   2915 
   2916 	if (!pmap_ste_v(pmap_kernel(), va) ||
   2917 	    !pmap_pte_v(pmap_pte(pmap_kernel(), va)))
   2918 		return;
   2919 
   2920 	pa = pmap_pte_pa(pmap_pte(pmap_kernel(), va));
   2921 	pg = PHYS_TO_VM_PAGE(pa);
   2922 	if (pg->wire_count > PAGE_SIZE / sizeof(pt_entry_t)) {
   2923 		panic("*%s*: 0x%lx: wire count %d", str, va, pg->wire_count);
   2924 	}
   2925 
   2926 	count = 0;
   2927 	for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + PAGE_SIZE);
   2928 	     pte++)
   2929 		if (*pte)
   2930 			count++;
   2931 	if (pg->wire_count != count)
   2932 		panic("*%s*: 0x%lx: w%d/a%d",
   2933 		       str, va, pg->wire_count, count);
   2934 }
   2935 #endif /* DEBUG */
   2936 
   2937 /*
   2938  * XXX XXX XXX These are legacy remants and should go away XXX XXX XXX
   2939  * (Cribbed from vm_machdep.c because they're tied to this pmap impl.)
   2940  */
   2941 
   2942 /*
   2943  * Map `size' bytes of physical memory starting at `paddr' into
   2944  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
   2945  * are specified by `prot'.
   2946  */
   2947 void
   2948 physaccess(void *vaddr, void *paddr, int size, int prot)
   2949 {
   2950 	pt_entry_t *pte;
   2951 	u_int page;
   2952 
   2953 	pte = kvtopte(vaddr);
   2954 	page = (u_int)paddr & PG_FRAME;
   2955 	for (size = btoc(size); size; size--) {
   2956 		*pte++ = PG_V | prot | page;
   2957 		page += PAGE_SIZE;
   2958 	}
   2959 	TBIAS();
   2960 }
   2961 
   2962 void
   2963 physunaccess(void *vaddr, int size)
   2964 {
   2965 	 pt_entry_t *pte;
   2966 
   2967 	 pte = kvtopte(vaddr);
   2968 	 for (size = btoc(size); size; size--)
   2969 	 	*pte++ = PG_NV;
   2970 	TBIAS();
   2971 }
   2972 
   2973 /*
   2974  * Convert kernel VA to physical address
   2975  */
   2976 int
   2977 kvtop(void *addr)
   2978 {
   2979 	return (int)vtophys((vaddr_t)addr);
   2980 }
   2981