Home | History | Annotate | Line # | Download | only in sun3x
pmap.c revision 1.24
      1 /*	$NetBSD: pmap.c,v 1.24 1997/05/30 07:02:15 jeremy Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jeremy Cooper.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * XXX These comments aren't quite accurate.  Need to change.
     41  * The sun3x uses the MC68851 Memory Management Unit, which is built
     42  * into the CPU.  The 68851 maps virtual to physical addresses using
     43  * a multi-level table lookup, which is stored in the very memory that
     44  * it maps.  The number of levels of lookup is configurable from one
     45  * to four.  In this implementation, we use three, named 'A' through 'C'.
     46  *
     47  * The MMU translates virtual addresses into physical addresses by
     48  * traversing these tables in a proccess called a 'table walk'.  The most
     49  * significant 7 bits of the Virtual Address ('VA') being translated are
     50  * used as an index into the level A table, whose base in physical memory
     51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
     52  * address found at that index in the A table is used as the base
     53  * address for the next table, the B table.  The next six bits of the VA are
     54  * used as an index into the B table, which in turn gives the base address
     55  * of the third and final C table.
     56  *
     57  * The next six bits of the VA are used as an index into the C table to
     58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
     59  * to which the remaining 13 bits of the VA are added, producing the
     60  * mapped physical address.
     61  *
     62  * To map the entire memory space in this manner would require 2114296 bytes
     63  * of page tables per process - quite expensive.  Instead we will
     64  * allocate a fixed but considerably smaller space for the page tables at
     65  * the time the VM system is initialized.  When the pmap code is asked by
     66  * the kernel to map a VA to a PA, it allocates tables as needed from this
     67  * pool.  When there are no more tables in the pool, tables are stolen
     68  * from the oldest mapped entries in the tree.  This is only possible
     69  * because all memory mappings are stored in the kernel memory map
     70  * structures, independent of the pmap structures.  A VA which references
     71  * one of these invalidated maps will cause a page fault.  The kernel
     72  * will determine that the page fault was caused by a task using a valid
     73  * VA, but for some reason (which does not concern it), that address was
     74  * not mapped.  It will ask the pmap code to re-map the entry and then
     75  * it will resume executing the faulting task.
     76  *
     77  * In this manner the most efficient use of the page table space is
     78  * achieved.  Tasks which do not execute often will have their tables
     79  * stolen and reused by tasks which execute more frequently.  The best
     80  * size for the page table pool will probably be determined by
     81  * experimentation.
     82  *
     83  * You read all of the comments so far.  Good for you.
     84  * Now go play!
     85  */
     86 
     87 /*** A Note About the 68851 Address Translation Cache
     88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
     89  * or 'ATC'.  This cache stores the most recently used page descriptors
     90  * accessed by the MMU when it does translations.  Using a marker called a
     91  * 'task alias' the MMU can store the descriptors from 8 different table
     92  * spaces concurrently.  The task alias is associated with the base
     93  * address of the level A table of that address space.  When an address
     94  * space is currently active (the CRP currently points to its A table)
     95  * the only cached descriptors that will be obeyed are ones which have a
     96  * matching task alias of the current space associated with them.
     97  *
     98  * Since the cache is always consulted before any table lookups are done,
     99  * it is important that it accurately reflect the state of the MMU tables.
    100  * Whenever a change has been made to a table that has been loaded into
    101  * the MMU, the code must be sure to flush any cached entries that are
    102  * affected by the change.  These instances are documented in the code at
    103  * various points.
    104  */
    105 /*** A Note About the Note About the 68851 Address Translation Cache
    106  * 4 months into this code I discovered that the sun3x does not have
    107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
    108  * the 68030 CPU.
    109  * All though it behaves very similarly to the 68851, it only has 1 task
    110  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
    111  * of the previous note does not apply to the sun3x pmap.
    112  */
    113 
    114 #include <sys/param.h>
    115 #include <sys/systm.h>
    116 #include <sys/proc.h>
    117 #include <sys/malloc.h>
    118 #include <sys/user.h>
    119 #include <sys/queue.h>
    120 #include <sys/kcore.h>
    121 
    122 #include <vm/vm.h>
    123 #include <vm/vm_kern.h>
    124 #include <vm/vm_page.h>
    125 
    126 #include <machine/cpu.h>
    127 #include <machine/kcore.h>
    128 #include <machine/pmap.h>
    129 #include <machine/pte.h>
    130 #include <machine/machdep.h>
    131 #include <machine/mon.h>
    132 
    133 #include "pmap_pvt.h"
    134 
    135 /* XXX - What headers declare these? */
    136 extern struct pcb *curpcb;
    137 extern int physmem;
    138 
    139 extern void copypage __P((const void*, void*));
    140 extern void zeropage __P((void*));
    141 
    142 /* Defined in locore.s */
    143 extern char kernel_text[];
    144 
    145 /* Defined by the linker */
    146 extern char etext[], edata[], end[];
    147 extern char *esym;	/* DDB */
    148 
    149 /*************************** DEBUGGING DEFINITIONS ***********************
    150  * Macros, preprocessor defines and variables used in debugging can make *
    151  * code hard to read.  Anything used exclusively for debugging purposes  *
    152  * is defined here to avoid having such mess scattered around the file.  *
    153  *************************************************************************/
    154 #ifdef	PMAP_DEBUG
    155 /*
    156  * To aid the debugging process, macros should be expanded into smaller steps
    157  * that accomplish the same goal, yet provide convenient places for placing
    158  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
    159  * 'INLINE' keyword is defined to an empty string.  This way, any function
    160  * defined to be a 'static INLINE' will become 'outlined' and compiled as
    161  * a separate function, which is much easier to debug.
    162  */
    163 #define	INLINE	/* nothing */
    164 
    165 /*
    166  * It is sometimes convenient to watch the activity of a particular table
    167  * in the system.  The following variables are used for that purpose.
    168  */
    169 a_tmgr_t *pmap_watch_atbl = 0;
    170 b_tmgr_t *pmap_watch_btbl = 0;
    171 c_tmgr_t *pmap_watch_ctbl = 0;
    172 
    173 int pmap_debug = 0;
    174 #define DPRINT(args) if (pmap_debug) printf args
    175 
    176 #else	/********** Stuff below is defined if NOT debugging **************/
    177 
    178 #define	INLINE	inline
    179 #define DPRINT(args)  /* nada */
    180 
    181 #endif	/* PMAP_DEBUG */
    182 /*********************** END OF DEBUGGING DEFINITIONS ********************/
    183 
    184 /*** Management Structure - Memory Layout
    185  * For every MMU table in the sun3x pmap system there must be a way to
    186  * manage it; we must know which process is using it, what other tables
    187  * depend on it, and whether or not it contains any locked pages.  This
    188  * is solved by the creation of 'table management'  or 'tmgr'
    189  * structures.  One for each MMU table in the system.
    190  *
    191  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
    192  *
    193  *      towards lower memory
    194  * kernAbase -> +-------------------------------------------------------+
    195  *              | Kernel     MMU A level table                          |
    196  * kernBbase -> +-------------------------------------------------------+
    197  *              | Kernel     MMU B level tables                         |
    198  * kernCbase -> +-------------------------------------------------------+
    199  *              |                                                       |
    200  *              | Kernel     MMU C level tables                         |
    201  *              |                                                       |
    202  * mmuCbase  -> +-------------------------------------------------------+
    203  *              | User       MMU C level tables                         |
    204  * mmuAbase  -> +-------------------------------------------------------+
    205  *              |                                                       |
    206  *              | User       MMU A level tables                         |
    207  *              |                                                       |
    208  * mmuBbase  -> +-------------------------------------------------------+
    209  *              | User       MMU B level tables                         |
    210  * tmgrAbase -> +-------------------------------------------------------+
    211  *              |  TMGR A level table structures                        |
    212  * tmgrBbase -> +-------------------------------------------------------+
    213  *              |  TMGR B level table structures                        |
    214  * tmgrCbase -> +-------------------------------------------------------+
    215  *              |  TMGR C level table structures                        |
    216  * pvbase    -> +-------------------------------------------------------+
    217  *              |  Physical to Virtual mapping table (list heads)       |
    218  * pvebase   -> +-------------------------------------------------------+
    219  *              |  Physical to Virtual mapping table (list elements)    |
    220  *              |                                                       |
    221  *              +-------------------------------------------------------+
    222  *      towards higher memory
    223  *
    224  * For every A table in the MMU A area, there will be a corresponding
    225  * a_tmgr structure in the TMGR A area.  The same will be true for
    226  * the B and C tables.  This arrangement will make it easy to find the
    227  * controling tmgr structure for any table in the system by use of
    228  * (relatively) simple macros.
    229  */
    230 
    231 /*
    232  * Global variables for storing the base addresses for the areas
    233  * labeled above.
    234  */
    235 static vm_offset_t  	kernAphys;
    236 static mmu_long_dte_t	*kernAbase;
    237 static mmu_short_dte_t	*kernBbase;
    238 static mmu_short_pte_t	*kernCbase;
    239 static mmu_short_pte_t	*mmuCbase;
    240 static mmu_short_dte_t	*mmuBbase;
    241 static mmu_long_dte_t	*mmuAbase;
    242 static a_tmgr_t		*Atmgrbase;
    243 static b_tmgr_t		*Btmgrbase;
    244 static c_tmgr_t		*Ctmgrbase;
    245 static pv_t 		*pvbase;
    246 static pv_elem_t	*pvebase;
    247 struct pmap 		kernel_pmap;
    248 
    249 /*
    250  * This holds the CRP currently loaded into the MMU.
    251  */
    252 struct mmu_rootptr kernel_crp;
    253 
    254 /*
    255  * Just all around global variables.
    256  */
    257 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
    258 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
    259 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
    260 
    261 
    262 /*
    263  * Flags used to mark the safety/availability of certain operations or
    264  * resources.
    265  */
    266 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
    267        bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
    268 int tmp_vpages_inuse;	/* Temporary virtual pages are in use */
    269 
    270 /*
    271  * XXX:  For now, retain the traditional variables that were
    272  * used in the old pmap/vm interface (without NONCONTIG).
    273  */
    274 /* Kernel virtual address space available: */
    275 vm_offset_t	virtual_avail, virtual_end;
    276 /* Physical address space available: */
    277 vm_offset_t	avail_start, avail_end;
    278 
    279 /* This keep track of the end of the contiguously mapped range. */
    280 vm_offset_t virtual_contig_end;
    281 
    282 /* Physical address used by pmap_next_page() */
    283 vm_offset_t avail_next;
    284 
    285 /* These are used by pmap_copy_page(), etc. */
    286 vm_offset_t tmp_vpages[2];
    287 
    288 /*
    289  * The 3/80 is the only member of the sun3x family that has non-contiguous
    290  * physical memory.  Memory is divided into 4 banks which are physically
    291  * locatable on the system board.  Although the size of these banks varies
    292  * with the size of memory they contain, their base addresses are
    293  * permenently fixed.  The following structure, which describes these
    294  * banks, is initialized by pmap_bootstrap() after it reads from a similar
    295  * structure provided by the ROM Monitor.
    296  *
    297  * For the other machines in the sun3x architecture which do have contiguous
    298  * RAM, this list will have only one entry, which will describe the entire
    299  * range of available memory.
    300  */
    301 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
    302 u_int total_phys_mem;
    303 
    304 /*************************************************************************/
    305 
    306 /*
    307  * XXX - Should "tune" these based on statistics.
    308  *
    309  * My first guess about the relative numbers of these needed is
    310  * based on the fact that a "typical" process will have several
    311  * pages mapped at low virtual addresses (text, data, bss), then
    312  * some mapped shared libraries, and then some stack pages mapped
    313  * near the high end of the VA space.  Each process can use only
    314  * one A table, and most will use only two B tables (maybe three)
    315  * and probably about four C tables.  Therefore, the first guess
    316  * at the relative numbers of these needed is 1:2:4 -gwr
    317  *
    318  * The number of C tables needed is closely related to the amount
    319  * of physical memory available plus a certain amount attributable
    320  * to the use of double mappings.  With a few simulation statistics
    321  * we can find a reasonably good estimation of this unknown value.
    322  * Armed with that and the above ratios, we have a good idea of what
    323  * is needed at each level. -j
    324  *
    325  * Note: It is not physical memory memory size, but the total mapped
    326  * virtual space required by the combined working sets of all the
    327  * currently _runnable_ processes.  (Sleeping ones don't count.)
    328  * The amount of physical memory should be irrelevant. -gwr
    329  */
    330 #ifdef	FIXED_NTABLES
    331 #define NUM_A_TABLES	16
    332 #define NUM_B_TABLES	32
    333 #define NUM_C_TABLES	64
    334 #else
    335 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
    336 #endif	/* FIXED_NTABLES */
    337 
    338 /*
    339  * This determines our total virtual mapping capacity.
    340  * Yes, it is a FIXED value so we can pre-allocate.
    341  */
    342 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
    343 
    344 /*
    345  * The size of the Kernel Virtual Address Space (KVAS)
    346  * for purposes of MMU table allocation is -KERNBASE
    347  * (length from KERNBASE to 0xFFFFffff)
    348  */
    349 #define	KVAS_SIZE		(-KERNBASE)
    350 
    351 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
    352 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
    353 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
    354 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
    355 
    356 /*************************** MISCELANEOUS MACROS *************************/
    357 #define PMAP_LOCK()	;	/* Nothing, for now */
    358 #define PMAP_UNLOCK()	;	/* same. */
    359 #define	NULL 0
    360 
    361 static INLINE void *      mmu_ptov __P((vm_offset_t pa));
    362 static INLINE vm_offset_t mmu_vtop __P((void * va));
    363 
    364 #if	0
    365 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
    366 #endif
    367 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
    368 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
    369 
    370 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
    371 static INLINE int   pteidx __P((mmu_short_pte_t *));
    372 static INLINE pmap_t current_pmap __P((void));
    373 
    374 /*
    375  * We can always convert between virtual and physical addresses
    376  * for anything in the range [KERNBASE ... avail_start] because
    377  * that range is GUARANTEED to be mapped linearly.
    378  * We rely heavily upon this feature!
    379  */
    380 static INLINE void *
    381 mmu_ptov(pa)
    382 	vm_offset_t pa;
    383 {
    384 	register vm_offset_t va;
    385 
    386 	va = (pa + KERNBASE);
    387 #ifdef	PMAP_DEBUG
    388 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    389 		panic("mmu_ptov");
    390 #endif
    391 	return ((void*)va);
    392 }
    393 static INLINE vm_offset_t
    394 mmu_vtop(vva)
    395 	void *vva;
    396 {
    397 	register vm_offset_t va;
    398 
    399 	va = (vm_offset_t)vva;
    400 #ifdef	PMAP_DEBUG
    401 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    402 		panic("mmu_ptov");
    403 #endif
    404 	return (va - KERNBASE);
    405 }
    406 
    407 /*
    408  * These macros map MMU tables to their corresponding manager structures.
    409  * They are needed quite often because many of the pointers in the pmap
    410  * system reference MMU tables and not the structures that control them.
    411  * There needs to be a way to find one when given the other and these
    412  * macros do so by taking advantage of the memory layout described above.
    413  * Here's a quick step through the first macro, mmuA2tmgr():
    414  *
    415  * 1) find the offset of the given MMU A table from the base of its table
    416  *    pool (table - mmuAbase).
    417  * 2) convert this offset into a table index by dividing it by the
    418  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
    419  * 3) use this index to select the corresponding 'A' table manager
    420  *    structure from the 'A' table manager pool (Atmgrbase[index]).
    421  */
    422 /*  This function is not currently used. */
    423 #if	0
    424 static INLINE a_tmgr_t *
    425 mmuA2tmgr(mmuAtbl)
    426 	mmu_long_dte_t *mmuAtbl;
    427 {
    428 	register int idx;
    429 
    430 	/* Which table is this in? */
    431 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
    432 #ifdef	PMAP_DEBUG
    433 	if ((idx < 0) || (idx >= NUM_A_TABLES))
    434 		panic("mmuA2tmgr");
    435 #endif
    436 	return (&Atmgrbase[idx]);
    437 }
    438 #endif	/* 0 */
    439 
    440 static INLINE b_tmgr_t *
    441 mmuB2tmgr(mmuBtbl)
    442 	mmu_short_dte_t *mmuBtbl;
    443 {
    444 	register int idx;
    445 
    446 	/* Which table is this in? */
    447 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
    448 #ifdef	PMAP_DEBUG
    449 	if ((idx < 0) || (idx >= NUM_B_TABLES))
    450 		panic("mmuB2tmgr");
    451 #endif
    452 	return (&Btmgrbase[idx]);
    453 }
    454 
    455 /* mmuC2tmgr			INTERNAL
    456  **
    457  * Given a pte known to belong to a C table, return the address of
    458  * that table's management structure.
    459  */
    460 static INLINE c_tmgr_t *
    461 mmuC2tmgr(mmuCtbl)
    462 	mmu_short_pte_t *mmuCtbl;
    463 {
    464 	register int idx;
    465 
    466 	/* Which table is this in? */
    467 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
    468 #ifdef	PMAP_DEBUG
    469 	if ((idx < 0) || (idx >= NUM_C_TABLES))
    470 		panic("mmuC2tmgr");
    471 #endif
    472 	return (&Ctmgrbase[idx]);
    473 }
    474 
    475 /* This is now a function call below.
    476  * #define pa2pv(pa) \
    477  *	(&pvbase[(unsigned long)\
    478  *		_btop(pa)\
    479  *	])
    480  */
    481 
    482 /* pa2pv			INTERNAL
    483  **
    484  * Return the pv_list_head element which manages the given physical
    485  * address.
    486  */
    487 static INLINE pv_t *
    488 pa2pv(pa)
    489 	vm_offset_t pa;
    490 {
    491 	register struct pmap_physmem_struct *bank;
    492 	register int idx;
    493 
    494 	bank = &avail_mem[0];
    495 	while (pa >= bank->pmem_end)
    496 		bank = bank->pmem_next;
    497 
    498 	pa -= bank->pmem_start;
    499 	idx = bank->pmem_pvbase + _btop(pa);
    500 #ifdef	PMAP_DEBUG
    501 	if ((idx < 0) || (idx >= physmem))
    502 		panic("pa2pv");
    503 #endif
    504 	return &pvbase[idx];
    505 }
    506 
    507 /* pteidx			INTERNAL
    508  **
    509  * Return the index of the given PTE within the entire fixed table of
    510  * PTEs.
    511  */
    512 static INLINE int
    513 pteidx(pte)
    514 	mmu_short_pte_t *pte;
    515 {
    516 	return (pte - kernCbase);
    517 }
    518 
    519 /*
    520  * This just offers a place to put some debugging checks,
    521  * and reduces the number of places "curproc" appears...
    522  */
    523 static INLINE pmap_t
    524 current_pmap()
    525 {
    526 	struct proc *p;
    527 	struct vmspace *vm;
    528 	vm_map_t	map;
    529 	pmap_t	pmap;
    530 
    531 	p = curproc;	/* XXX */
    532 	if (p == NULL)
    533 		pmap = &kernel_pmap;
    534 	else {
    535 		vm = p->p_vmspace;
    536 		map = &vm->vm_map;
    537 		pmap = vm_map_pmap(map);
    538 	}
    539 
    540 	return (pmap);
    541 }
    542 
    543 
    544 /*************************** FUNCTION DEFINITIONS ************************
    545  * These appear here merely for the compiler to enforce type checking on *
    546  * all function calls.                                                   *
    547  *************************************************************************/
    548 
    549 /** External functions
    550  ** - functions used within this module but written elsewhere.
    551  **   both of these functions are in locore.s
    552  ** XXX - These functions were later replaced with their more cryptic
    553  **       hp300 counterparts.  They may be removed now.
    554  **/
    555 #if	0	/* deprecated mmu */
    556 void   mmu_seturp __P((vm_offset_t));
    557 void   mmu_flush __P((int, vm_offset_t));
    558 void   mmu_flusha __P((void));
    559 #endif	/* 0 */
    560 
    561 /** Internal functions
    562  ** - all functions used only within this module are defined in
    563  **   pmap_pvt.h
    564  **/
    565 
    566 /** Interface functions
    567  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
    568  **   defined.
    569  **/
    570 #ifdef INCLUDED_IN_PMAP_H
    571 void   pmap_bootstrap __P((void));
    572 void  *pmap_bootstrap_alloc __P((int));
    573 void   pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    574 pmap_t pmap_create __P((vm_size_t));
    575 void   pmap_destroy __P((pmap_t));
    576 void   pmap_reference __P((pmap_t));
    577 boolean_t   pmap_is_referenced __P((vm_offset_t));
    578 boolean_t   pmap_is_modified __P((vm_offset_t));
    579 void   pmap_clear_modify __P((vm_offset_t));
    580 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
    581 void   pmap_activate __P((pmap_t));
    582 int    pmap_page_index __P((vm_offset_t));
    583 u_int  pmap_free_pages __P((void));
    584 #endif /* INCLUDED_IN_PMAP_H */
    585 
    586 /********************************** CODE ********************************
    587  * Functions that are called from other parts of the kernel are labeled *
    588  * as 'INTERFACE' functions.  Functions that are only called from       *
    589  * within the pmap module are labeled as 'INTERNAL' functions.          *
    590  * Functions that are internal, but are not (currently) used at all are *
    591  * labeled 'INTERNAL_X'.                                                *
    592  ************************************************************************/
    593 
    594 /* pmap_bootstrap			INTERNAL
    595  **
    596  * Initializes the pmap system.  Called at boot time from _vm_init()
    597  * in _startup.c.
    598  *
    599  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
    600  *           system implement pmap_steal_memory() is redundant.
    601  *           Don't release this code without removing one or the other!
    602  */
    603 void
    604 pmap_bootstrap(nextva)
    605 	vm_offset_t nextva;
    606 {
    607 	struct physmemory *membank;
    608 	struct pmap_physmem_struct *pmap_membank;
    609 	vm_offset_t va, pa, eva;
    610 	int b, c, i, j;	/* running table counts */
    611 	int size;
    612 
    613 	/*
    614 	 * This function is called by __bootstrap after it has
    615 	 * determined the type of machine and made the appropriate
    616 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
    617 	 * by that.)  It allocates and sets up enough of the pmap system
    618 	 * to manage the kernel's address space.
    619 	 */
    620 
    621 	/*
    622 	 * Determine the range of kernel virtual and physical
    623 	 * space available. Note that we ABSOLUTELY DEPEND on
    624 	 * the fact that the first bank of memory (4MB) is
    625 	 * mapped linearly to KERNBASE (which we guaranteed in
    626 	 * the first instructions of locore.s).
    627 	 * That is plenty for our bootstrap work.
    628 	 */
    629 	virtual_avail = _round_page(nextva);
    630 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
    631 	virtual_end = VM_MAX_KERNEL_ADDRESS;
    632 	/* Don't need avail_start til later. */
    633 
    634 	/* We may now call pmap_bootstrap_alloc(). */
    635 	bootstrap_alloc_enabled = TRUE;
    636 
    637 	/*
    638 	 * This is a somewhat unwrapped loop to deal with
    639 	 * copying the PROM's 'phsymem' banks into the pmap's
    640 	 * banks.  The following is always assumed:
    641 	 * 1. There is always at least one bank of memory.
    642 	 * 2. There is always a last bank of memory, and its
    643 	 *    pmem_next member must be set to NULL.
    644 	 * XXX - Use: do { ... } while (membank->next) instead?
    645 	 * XXX - Why copy this stuff at all? -gwr
    646 	 *     - It is needed in pa2pv().
    647 	 */
    648 	membank = romVectorPtr->v_physmemory;
    649 	pmap_membank = avail_mem;
    650 	total_phys_mem = 0;
    651 
    652 	while (membank->next) {
    653 		pmap_membank->pmem_start = membank->address;
    654 		pmap_membank->pmem_end = membank->address + membank->size;
    655 		total_phys_mem += membank->size;
    656 		/* This silly syntax arises because pmap_membank
    657 		 * is really a pre-allocated array, but it is put into
    658 		 * use as a linked list.
    659 		 */
    660 		pmap_membank->pmem_next = pmap_membank + 1;
    661 		pmap_membank = pmap_membank->pmem_next;
    662 		membank = membank->next;
    663 	}
    664 
    665 	/*
    666 	 * XXX The last bank of memory should be reduced to exclude the
    667 	 * physical pages needed by the PROM monitor from being used
    668 	 * in the VM system.  XXX - See below - Fix!
    669 	 */
    670 	pmap_membank->pmem_start = membank->address;
    671 	pmap_membank->pmem_end = membank->address + membank->size;
    672 	pmap_membank->pmem_next = NULL;
    673 
    674 #if 0	/* XXX - Need to integrate this! */
    675 	/*
    676 	 * The last few pages of physical memory are "owned" by
    677 	 * the PROM.  The total amount of memory we are allowed
    678 	 * to use is given by the romvec pointer. -gwr
    679 	 *
    680 	 * We should dedicate different variables for 'useable'
    681 	 * and 'physically available'.  Most users are used to the
    682 	 * kernel reporting the amount of memory 'physically available'
    683 	 * as opposed to 'useable by the kernel' at boot time. -j
    684 	 */
    685 	total_phys_mem = *romVectorPtr->memoryAvail;
    686 #endif	/* XXX */
    687 
    688 	total_phys_mem += membank->size;	/* XXX see above */
    689 	physmem = btoc(total_phys_mem);
    690 
    691 	/*
    692 	 * Avail_end is set to the first byte of physical memory
    693 	 * after the end of the last bank.  We use this only to
    694 	 * determine if a physical address is "managed" memory.
    695 	 *
    696 	 * XXX - The setting of avail_end is a temporary ROM saving hack.
    697 	 */
    698 	avail_end = pmap_membank->pmem_end -
    699 		(total_phys_mem - *romVectorPtr->memoryAvail);
    700 	avail_end = _trunc_page(avail_end);
    701 
    702 	/*
    703 	 * First allocate enough kernel MMU tables to map all
    704 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
    705 	 * Note: All must be aligned on 256 byte boundaries.
    706 	 * Start with the level-A table (one of those).
    707 	 */
    708 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE;
    709 	kernAbase = pmap_bootstrap_alloc(size);
    710 	bzero(kernAbase, size);
    711 
    712 	/* Now the level-B kernel tables... */
    713 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
    714 	kernBbase = pmap_bootstrap_alloc(size);
    715 	bzero(kernBbase, size);
    716 
    717 	/* Now the level-C kernel tables... */
    718 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
    719 	kernCbase = pmap_bootstrap_alloc(size);
    720 	bzero(kernCbase, size);
    721 	/*
    722 	 * Note: In order for the PV system to work correctly, the kernel
    723 	 * and user-level C tables must be allocated contiguously.
    724 	 * Nothing should be allocated between here and the allocation of
    725 	 * mmuCbase below.  XXX: Should do this as one allocation, and
    726 	 * then compute a pointer for mmuCbase instead of this...
    727 	 *
    728 	 * Allocate user MMU tables.
    729 	 * These must be contiguous with the preceeding.
    730 	 */
    731 
    732 #ifndef	FIXED_NTABLES
    733 	/*
    734 	 * The number of user-level C tables that should be allocated is
    735 	 * related to the size of physical memory.  In general, there should
    736 	 * be enough tables to map four times the amount of available RAM.
    737 	 * The extra amount is needed because some table space is wasted by
    738 	 * fragmentation.
    739 	 */
    740 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
    741 	NUM_B_TABLES = NUM_C_TABLES / 2;
    742 	NUM_A_TABLES = NUM_B_TABLES / 2;
    743 #endif	/* !FIXED_NTABLES */
    744 
    745 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
    746 	mmuCbase = pmap_bootstrap_alloc(size);
    747 
    748 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
    749 	mmuBbase = pmap_bootstrap_alloc(size);
    750 
    751 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE * NUM_A_TABLES;
    752 	mmuAbase = pmap_bootstrap_alloc(size);
    753 
    754 	/*
    755 	 * Fill in the never-changing part of the kernel tables.
    756 	 * For simplicity, the kernel's mappings will be editable as a
    757 	 * flat array of page table entries at kernCbase.  The
    758 	 * higher level 'A' and 'B' tables must be initialized to point
    759 	 * to this lower one.
    760 	 */
    761 	b = c = 0;
    762 
    763 	/*
    764 	 * Invalidate all mappings below KERNBASE in the A table.
    765 	 * This area has already been zeroed out, but it is good
    766 	 * practice to explicitly show that we are interpreting
    767 	 * it as a list of A table descriptors.
    768 	 */
    769 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
    770 		kernAbase[i].addr.raw = 0;
    771 	}
    772 
    773 	/*
    774 	 * Set up the kernel A and B tables so that they will reference the
    775 	 * correct spots in the contiguous table of PTEs allocated for the
    776 	 * kernel's virtual memory space.
    777 	 */
    778 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
    779 		kernAbase[i].attr.raw =
    780 			MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
    781 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
    782 
    783 		for (j=0; j < MMU_B_TBL_SIZE; j++) {
    784 			kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
    785 				| MMU_DT_SHORT;
    786 			c += MMU_C_TBL_SIZE;
    787 		}
    788 		b += MMU_B_TBL_SIZE;
    789 	}
    790 
    791 	/* XXX - Doing kernel_pmap a little further down. */
    792 
    793 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
    794 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
    795 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
    796 
    797 	/*
    798 	 * We are now done with pmap_bootstrap_alloc().  Round up
    799 	 * `virtual_avail' to the nearest page, and set the flag
    800 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
    801 	 */
    802 	pmap_bootstrap_aalign(NBPG);
    803 	bootstrap_alloc_enabled = FALSE;
    804 
    805 	/*
    806 	 * Now that we are done with pmap_bootstrap_alloc(), we
    807 	 * must save the virtual and physical addresses of the
    808 	 * end of the linearly mapped range, which are stored in
    809 	 * virtual_contig_end and avail_start, respectively.
    810 	 * These variables will never change after this point.
    811 	 */
    812 	virtual_contig_end = virtual_avail;
    813 	avail_start = virtual_avail - KERNBASE;
    814 
    815 	/*
    816 	 * `avail_next' is a running pointer used by pmap_next_page() to
    817 	 * keep track of the next available physical page to be handed
    818 	 * to the VM system during its initialization, in which it
    819 	 * asks for physical pages, one at a time.
    820 	 */
    821 	avail_next = avail_start;
    822 
    823 	/*
    824 	 * Now allocate some virtual addresses, but not the physical pages
    825 	 * behind them.  Note that virtual_avail is already page-aligned.
    826 	 *
    827 	 * tmp_vpages[] is an array of two virtual pages used for temporary
    828 	 * kernel mappings in the pmap module to facilitate various physical
    829 	 * address-oritented operations.
    830 	 */
    831 	tmp_vpages[0] = virtual_avail;
    832 	virtual_avail += NBPG;
    833 	tmp_vpages[1] = virtual_avail;
    834 	virtual_avail += NBPG;
    835 
    836 	/** Initialize the PV system **/
    837 	pmap_init_pv();
    838 
    839 	/*
    840 	 * Fill in the kernel_pmap structure and kernel_crp.
    841 	 */
    842 	kernAphys = mmu_vtop(kernAbase);
    843 	kernel_pmap.pm_a_tmgr = NULL;
    844 	kernel_pmap.pm_a_phys = kernAphys;
    845 	kernel_pmap.pm_refcount = 1; /* always in use */
    846 
    847 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
    848 	kernel_crp.rp_addr = kernAphys;
    849 
    850 	/*
    851 	 * Now pmap_enter_kernel() may be used safely and will be
    852 	 * the main interface used hereafter to modify the kernel's
    853 	 * virtual address space.  Note that since we are still running
    854 	 * under the PROM's address table, none of these table modifications
    855 	 * actually take effect until pmap_takeover_mmu() is called.
    856 	 *
    857 	 * Note: Our tables do NOT have the PROM linear mappings!
    858 	 * Only the mappings created here exist in our tables, so
    859 	 * remember to map anything we expect to use.
    860 	 */
    861 	va = (vm_offset_t) KERNBASE;
    862 	pa = 0;
    863 
    864 	/*
    865 	 * The first page of the kernel virtual address space is the msgbuf
    866 	 * page.  The page attributes (data, non-cached) are set here, while
    867 	 * the address is assigned to this global pointer in cpu_startup().
    868 	 * XXX - Make it non-cached?
    869 	 */
    870 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
    871 	va += NBPG; pa += NBPG;
    872 
    873 	/* Next page is used as the temporary stack. */
    874 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
    875 	va += NBPG; pa += NBPG;
    876 
    877 	/*
    878 	 * Map all of the kernel's text segment as read-only and cacheable.
    879 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
    880 	 * of kernel text and the first bytes of kernel data will often be
    881 	 * sharing the same page.  Therefore, the last page of kernel text
    882 	 * has to be mapped as read/write, to accomodate the data.
    883 	 */
    884 	eva = _trunc_page((vm_offset_t)etext);
    885 	for (; va < eva; va += NBPG, pa += NBPG)
    886 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
    887 
    888 	/*
    889 	 * Map all of the kernel's data as read/write and cacheable.
    890 	 * This includes: data, BSS, symbols, and everything in the
    891 	 * contiguous memory used by pmap_bootstrap_alloc()
    892 	 */
    893 	for (; pa < avail_start; va += NBPG, pa += NBPG)
    894 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
    895 
    896 	/*
    897 	 * At this point we are almost ready to take over the MMU.  But first
    898 	 * we must save the PROM's address space in our map, as we call its
    899 	 * routines and make references to its data later in the kernel.
    900 	 */
    901 	pmap_bootstrap_copyprom();
    902 	pmap_takeover_mmu();
    903 	pmap_bootstrap_setprom();
    904 
    905 	/* Notify the VM system of our page size. */
    906 	PAGE_SIZE = NBPG;
    907 	vm_set_page_size();
    908 }
    909 
    910 
    911 /* pmap_alloc_usermmu			INTERNAL
    912  **
    913  * Called from pmap_bootstrap() to allocate MMU tables that will
    914  * eventually be used for user mappings.
    915  */
    916 void
    917 pmap_alloc_usermmu()
    918 {
    919 	/* XXX: Moved into caller. */
    920 }
    921 
    922 /* pmap_alloc_pv			INTERNAL
    923  **
    924  * Called from pmap_bootstrap() to allocate the physical
    925  * to virtual mapping list.  Each physical page of memory
    926  * in the system has a corresponding element in this list.
    927  */
    928 void
    929 pmap_alloc_pv()
    930 {
    931 	int	i;
    932 	unsigned int	total_mem;
    933 
    934 	/*
    935 	 * Allocate a pv_head structure for every page of physical
    936 	 * memory that will be managed by the system.  Since memory on
    937 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
    938 	 * count by subtraction of the lowest available address from the
    939 	 * highest, but rather we have to step through each memory
    940 	 * bank and add the number of pages in each to the total.
    941 	 *
    942 	 * At this time we also initialize the offset of each bank's
    943 	 * starting pv_head within the pv_head list so that the physical
    944 	 * memory state routines (pmap_is_referenced(),
    945 	 * pmap_is_modified(), et al.) can quickly find coresponding
    946 	 * pv_heads in spite of the non-contiguity.
    947 	 */
    948 	total_mem = 0;
    949 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
    950 		avail_mem[i].pmem_pvbase = _btop(total_mem);
    951 		total_mem += avail_mem[i].pmem_end -
    952 			avail_mem[i].pmem_start;
    953 		if (avail_mem[i].pmem_next == NULL)
    954 			break;
    955 	}
    956 #ifdef	PMAP_DEBUG
    957 	if (total_mem != total_phys_mem)
    958 		panic("pmap_alloc_pv did not arrive at correct page count");
    959 #endif
    960 
    961 	pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
    962 		_btop(total_phys_mem));
    963 }
    964 
    965 /* pmap_alloc_usertmgr			INTERNAL
    966  **
    967  * Called from pmap_bootstrap() to allocate the structures which
    968  * facilitate management of user MMU tables.  Each user MMU table
    969  * in the system has one such structure associated with it.
    970  */
    971 void
    972 pmap_alloc_usertmgr()
    973 {
    974 	/* Allocate user MMU table managers */
    975 	/* It would be a lot simpler to just make these BSS, but */
    976 	/* we may want to change their size at boot time... -j */
    977 	Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
    978 		* NUM_A_TABLES);
    979 	Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
    980 		* NUM_B_TABLES);
    981 	Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
    982 		* NUM_C_TABLES);
    983 
    984 	/*
    985 	 * Allocate PV list elements for the physical to virtual
    986 	 * mapping system.
    987 	 */
    988 	pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
    989 		sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
    990 }
    991 
    992 /* pmap_bootstrap_copyprom()			INTERNAL
    993  **
    994  * Copy the PROM mappings into our own tables.  Note, we
    995  * can use physical addresses until __bootstrap returns.
    996  */
    997 void
    998 pmap_bootstrap_copyprom()
    999 {
   1000 	MachMonRomVector *romp;
   1001 	int *mon_ctbl;
   1002 	mmu_short_pte_t *kpte;
   1003 	int i, len;
   1004 
   1005 	romp = romVectorPtr;
   1006 
   1007 	/*
   1008 	 * Copy the mappings in MON_KDB_START...MONEND
   1009 	 * Note: mon_ctbl[0] maps MON_KDB_START
   1010 	 */
   1011 	mon_ctbl = *romp->monptaddr;
   1012 	i = _btop(MON_KDB_START - KERNBASE);
   1013 	kpte = &kernCbase[i];
   1014 	len = _btop(MONEND - MON_KDB_START);
   1015 
   1016 	for (i = 0; i < len; i++) {
   1017 		kpte[i].attr.raw = mon_ctbl[i];
   1018 	}
   1019 
   1020 	/*
   1021 	 * Copy the mappings at MON_DVMA_BASE (to the end).
   1022 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
   1023 	 * XXX - This does not appear to be necessary, but
   1024 	 * I'm not sure yet if it is or not. -gwr
   1025 	 */
   1026 	mon_ctbl = *romp->shadowpteaddr;
   1027 	i = _btop(MON_DVMA_BASE - KERNBASE);
   1028 	kpte = &kernCbase[i];
   1029 	len = _btop(MON_DVMA_SIZE);
   1030 
   1031 	for (i = 0; i < len; i++) {
   1032 		kpte[i].attr.raw = mon_ctbl[i];
   1033 	}
   1034 }
   1035 
   1036 /* pmap_takeover_mmu			INTERNAL
   1037  **
   1038  * Called from pmap_bootstrap() after it has copied enough of the
   1039  * PROM mappings into the kernel map so that we can use our own
   1040  * MMU table.
   1041  */
   1042 void
   1043 pmap_takeover_mmu()
   1044 {
   1045 
   1046 	loadcrp(&kernel_crp);
   1047 }
   1048 
   1049 /* pmap_bootstrap_setprom()			INTERNAL
   1050  **
   1051  * Set the PROM mappings so it can see kernel space.
   1052  * Note that physical addresses are used here, which
   1053  * we can get away with because this runs with the
   1054  * low 1GB set for transparent translation.
   1055  */
   1056 void
   1057 pmap_bootstrap_setprom()
   1058 {
   1059 	mmu_long_dte_t *mon_dte;
   1060 	extern struct mmu_rootptr mon_crp;
   1061 	int i;
   1062 
   1063 	mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
   1064 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
   1065 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
   1066 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
   1067 	}
   1068 }
   1069 
   1070 
   1071 /* pmap_init			INTERFACE
   1072  **
   1073  * Called at the end of vm_init() to set up the pmap system to go
   1074  * into full time operation.  All initialization of kernel_pmap
   1075  * should be already done by now, so this should just do things
   1076  * needed for user-level pmaps to work.
   1077  */
   1078 void
   1079 pmap_init()
   1080 {
   1081 	/** Initialize the manager pools **/
   1082 	TAILQ_INIT(&a_pool);
   1083 	TAILQ_INIT(&b_pool);
   1084 	TAILQ_INIT(&c_pool);
   1085 
   1086 	/**************************************************************
   1087 	 * Initialize all tmgr structures and MMU tables they manage. *
   1088 	 **************************************************************/
   1089 	/** Initialize A tables **/
   1090 	pmap_init_a_tables();
   1091 	/** Initialize B tables **/
   1092 	pmap_init_b_tables();
   1093 	/** Initialize C tables **/
   1094 	pmap_init_c_tables();
   1095 }
   1096 
   1097 /* pmap_init_a_tables()			INTERNAL
   1098  **
   1099  * Initializes all A managers, their MMU A tables, and inserts
   1100  * them into the A manager pool for use by the system.
   1101  */
   1102 void
   1103 pmap_init_a_tables()
   1104 {
   1105 	int i;
   1106 	a_tmgr_t *a_tbl;
   1107 
   1108 	for (i=0; i < NUM_A_TABLES; i++) {
   1109 		/* Select the next available A manager from the pool */
   1110 		a_tbl = &Atmgrbase[i];
   1111 
   1112 		/*
   1113 		 * Clear its parent entry.  Set its wired and valid
   1114 		 * entry count to zero.
   1115 		 */
   1116 		a_tbl->at_parent = NULL;
   1117 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
   1118 
   1119 		/* Assign it the next available MMU A table from the pool */
   1120 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
   1121 
   1122 		/*
   1123 		 * Initialize the MMU A table with the table in the `proc0',
   1124 		 * or kernel, mapping.  This ensures that every process has
   1125 		 * the kernel mapped in the top part of its address space.
   1126 		 */
   1127 		bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
   1128 			sizeof(mmu_long_dte_t));
   1129 
   1130 		/*
   1131 		 * Finally, insert the manager into the A pool,
   1132 		 * making it ready to be used by the system.
   1133 		 */
   1134 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   1135     }
   1136 }
   1137 
   1138 /* pmap_init_b_tables()			INTERNAL
   1139  **
   1140  * Initializes all B table managers, their MMU B tables, and
   1141  * inserts them into the B manager pool for use by the system.
   1142  */
   1143 void
   1144 pmap_init_b_tables()
   1145 {
   1146 	int i,j;
   1147 	b_tmgr_t *b_tbl;
   1148 
   1149 	for (i=0; i < NUM_B_TABLES; i++) {
   1150 		/* Select the next available B manager from the pool */
   1151 		b_tbl = &Btmgrbase[i];
   1152 
   1153 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
   1154 		b_tbl->bt_pidx = 0;		/* parent index,      */
   1155 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
   1156 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
   1157 
   1158 		/* Assign it the next available MMU B table from the pool */
   1159 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
   1160 
   1161 		/* Invalidate every descriptor in the table */
   1162 		for (j=0; j < MMU_B_TBL_SIZE; j++)
   1163 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
   1164 
   1165 		/* Insert the manager into the B pool */
   1166 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   1167 	}
   1168 }
   1169 
   1170 /* pmap_init_c_tables()			INTERNAL
   1171  **
   1172  * Initializes all C table managers, their MMU C tables, and
   1173  * inserts them into the C manager pool for use by the system.
   1174  */
   1175 void
   1176 pmap_init_c_tables()
   1177 {
   1178 	int i,j;
   1179 	c_tmgr_t *c_tbl;
   1180 
   1181 	for (i=0; i < NUM_C_TABLES; i++) {
   1182 		/* Select the next available C manager from the pool */
   1183 		c_tbl = &Ctmgrbase[i];
   1184 
   1185 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
   1186 		c_tbl->ct_pidx = 0;		/* parent index,      */
   1187 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
   1188 		c_tbl->ct_ecnt = 0;		/* valid entry count. */
   1189 
   1190 		/* Assign it the next available MMU C table from the pool */
   1191 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
   1192 
   1193 		for (j=0; j < MMU_C_TBL_SIZE; j++)
   1194 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
   1195 
   1196 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1197 	}
   1198 }
   1199 
   1200 /* pmap_init_pv()			INTERNAL
   1201  **
   1202  * Initializes the Physical to Virtual mapping system.
   1203  */
   1204 void
   1205 pmap_init_pv()
   1206 {
   1207 	int	i;
   1208 
   1209 	/* Initialize every PV head. */
   1210 	for (i = 0; i < _btop(total_phys_mem); i++) {
   1211 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
   1212 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
   1213 	}
   1214 
   1215 	pv_initialized = TRUE;
   1216 }
   1217 
   1218 /* get_a_table			INTERNAL
   1219  **
   1220  * Retrieve and return a level A table for use in a user map.
   1221  */
   1222 a_tmgr_t *
   1223 get_a_table()
   1224 {
   1225 	a_tmgr_t *tbl;
   1226 	pmap_t pmap;
   1227 
   1228 	/* Get the top A table in the pool */
   1229 	tbl = a_pool.tqh_first;
   1230 	if (tbl == NULL) {
   1231 		/*
   1232 		 * XXX - Instead of panicing here and in other get_x_table
   1233 		 * functions, we do have the option of sleeping on the head of
   1234 		 * the table pool.  Any function which updates the table pool
   1235 		 * would then issue a wakeup() on the head, thus waking up any
   1236 		 * processes waiting for a table.
   1237 		 *
   1238 		 * Actually, the place to sleep would be when some process
   1239 		 * asks for a "wired" mapping that would run us short of
   1240 		 * mapping resources.  This design DEPENDS on always having
   1241 		 * some mapping resources in the pool for stealing, so we
   1242 		 * must make sure we NEVER let the pool become empty. -gwr
   1243 		 */
   1244 		panic("get_a_table: out of A tables.");
   1245 	}
   1246 
   1247 	TAILQ_REMOVE(&a_pool, tbl, at_link);
   1248 	/*
   1249 	 * If the table has a non-null parent pointer then it is in use.
   1250 	 * Forcibly abduct it from its parent and clear its entries.
   1251 	 * No re-entrancy worries here.  This table would not be in the
   1252 	 * table pool unless it was available for use.
   1253 	 *
   1254 	 * Note that the second argument to free_a_table() is FALSE.  This
   1255 	 * indicates that the table should not be relinked into the A table
   1256 	 * pool.  That is a job for the function that called us.
   1257 	 */
   1258 	if (tbl->at_parent) {
   1259 		pmap = tbl->at_parent;
   1260 		free_a_table(tbl, FALSE);
   1261 		pmap->pm_a_tmgr = NULL;
   1262 		pmap->pm_a_phys = kernAphys;
   1263 	}
   1264 #ifdef  NON_REENTRANT
   1265 	/*
   1266 	 * If the table isn't to be wired down, re-insert it at the
   1267 	 * end of the pool.
   1268 	 */
   1269 	if (!wired)
   1270 		/*
   1271 		 * Quandary - XXX
   1272 		 * Would it be better to let the calling function insert this
   1273 		 * table into the queue?  By inserting it here, we are allowing
   1274 		 * it to be stolen immediately.  The calling function is
   1275 		 * probably not expecting to use a table that it is not
   1276 		 * assured full control of.
   1277 		 * Answer - In the intrest of re-entrancy, it is best to let
   1278 		 * the calling function determine when a table is available
   1279 		 * for use.  Therefore this code block is not used.
   1280 		 */
   1281 		TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
   1282 #endif	/* NON_REENTRANT */
   1283 	return tbl;
   1284 }
   1285 
   1286 /* get_b_table			INTERNAL
   1287  **
   1288  * Return a level B table for use.
   1289  */
   1290 b_tmgr_t *
   1291 get_b_table()
   1292 {
   1293 	b_tmgr_t *tbl;
   1294 
   1295 	/* See 'get_a_table' for comments. */
   1296 	tbl = b_pool.tqh_first;
   1297 	if (tbl == NULL)
   1298 		panic("get_b_table: out of B tables.");
   1299 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
   1300 	if (tbl->bt_parent) {
   1301 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
   1302 		tbl->bt_parent->at_ecnt--;
   1303 		free_b_table(tbl, FALSE);
   1304 	}
   1305 #ifdef	NON_REENTRANT
   1306 	if (!wired)
   1307 		/* XXX see quandary in get_b_table */
   1308 		/* XXX start lock */
   1309 		TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
   1310 		/* XXX end lock */
   1311 #endif	/* NON_REENTRANT */
   1312 	return tbl;
   1313 }
   1314 
   1315 /* get_c_table			INTERNAL
   1316  **
   1317  * Return a level C table for use.
   1318  */
   1319 c_tmgr_t *
   1320 get_c_table()
   1321 {
   1322 	c_tmgr_t *tbl;
   1323 
   1324 	/* See 'get_a_table' for comments */
   1325 	tbl = c_pool.tqh_first;
   1326 	if (tbl == NULL)
   1327 		panic("get_c_table: out of C tables.");
   1328 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
   1329 	if (tbl->ct_parent) {
   1330 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1331 		tbl->ct_parent->bt_ecnt--;
   1332 		free_c_table(tbl, FALSE);
   1333 	}
   1334 #ifdef	NON_REENTRANT
   1335 	if (!wired)
   1336 		/* XXX See quandary in get_a_table */
   1337 		/* XXX start lock */
   1338 		TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
   1339 		/* XXX end lock */
   1340 #endif	/* NON_REENTRANT */
   1341 
   1342 	return tbl;
   1343 }
   1344 
   1345 /*
   1346  * The following 'free_table' and 'steal_table' functions are called to
   1347  * detach tables from their current obligations (parents and children) and
   1348  * prepare them for reuse in another mapping.
   1349  *
   1350  * Free_table is used when the calling function will handle the fate
   1351  * of the parent table, such as returning it to the free pool when it has
   1352  * no valid entries.  Functions that do not want to handle this should
   1353  * call steal_table, in which the parent table's descriptors and entry
   1354  * count are automatically modified when this table is removed.
   1355  */
   1356 
   1357 /* free_a_table			INTERNAL
   1358  **
   1359  * Unmaps the given A table and all child tables from their current
   1360  * mappings.  Returns the number of pages that were invalidated.
   1361  * If 'relink' is true, the function will return the table to the head
   1362  * of the available table pool.
   1363  *
   1364  * Cache note: The MC68851 will automatically flush all
   1365  * descriptors derived from a given A table from its
   1366  * Automatic Translation Cache (ATC) if we issue a
   1367  * 'PFLUSHR' instruction with the base address of the
   1368  * table.  This function should do, and does so.
   1369  * Note note: We are using an MC68030 - there is no
   1370  * PFLUSHR.
   1371  */
   1372 int
   1373 free_a_table(a_tbl, relink)
   1374 	a_tmgr_t *a_tbl;
   1375 	boolean_t relink;
   1376 {
   1377 	int i, removed_cnt;
   1378 	mmu_long_dte_t	*dte;
   1379 	mmu_short_dte_t *dtbl;
   1380 	b_tmgr_t	*tmgr;
   1381 
   1382 	/*
   1383 	 * Flush the ATC cache of all cached descriptors derived
   1384 	 * from this table.
   1385 	 * Sun3x does not use 68851's cached table feature
   1386 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
   1387 	 */
   1388 
   1389 	/*
   1390 	 * Remove any pending cache flushes that were designated
   1391 	 * for the pmap this A table belongs to.
   1392 	 * a_tbl->parent->atc_flushq[0] = 0;
   1393 	 * Not implemented in sun3x.
   1394 	 */
   1395 
   1396 	/*
   1397 	 * All A tables in the system should retain a map for the
   1398 	 * kernel. If the table contains any valid descriptors
   1399 	 * (other than those for the kernel area), invalidate them all,
   1400 	 * stopping short of the kernel's entries.
   1401 	 */
   1402 	removed_cnt = 0;
   1403 	if (a_tbl->at_ecnt) {
   1404 		dte = a_tbl->at_dtbl;
   1405 		for (i=0; i < MMU_TIA(KERNBASE); i++) {
   1406 			/*
   1407 			 * If a table entry points to a valid B table, free
   1408 			 * it and its children.
   1409 			 */
   1410 			if (MMU_VALID_DT(dte[i])) {
   1411 				/*
   1412 				 * The following block does several things,
   1413 				 * from innermost expression to the
   1414 				 * outermost:
   1415 				 * 1) It extracts the base (cc 1996)
   1416 				 *    address of the B table pointed
   1417 				 *    to in the A table entry dte[i].
   1418 				 * 2) It converts this base address into
   1419 				 *    the virtual address it can be
   1420 				 *    accessed with. (all MMU tables point
   1421 				 *    to physical addresses.)
   1422 				 * 3) It finds the corresponding manager
   1423 				 *    structure which manages this MMU table.
   1424 				 * 4) It frees the manager structure.
   1425 				 *    (This frees the MMU table and all
   1426 				 *    child tables. See 'free_b_table' for
   1427 				 *    details.)
   1428 				 */
   1429 				dtbl = mmu_ptov(dte[i].addr.raw);
   1430 				tmgr = mmuB2tmgr(dtbl);
   1431 				removed_cnt += free_b_table(tmgr, TRUE);
   1432 				dte[i].attr.raw = MMU_DT_INVALID;
   1433 			}
   1434 		}
   1435 		a_tbl->at_ecnt = 0;
   1436 	}
   1437 	if (relink) {
   1438 		a_tbl->at_parent = NULL;
   1439 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1440 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   1441 	}
   1442 	return removed_cnt;
   1443 }
   1444 
   1445 /* free_b_table			INTERNAL
   1446  **
   1447  * Unmaps the given B table and all its children from their current
   1448  * mappings.  Returns the number of pages that were invalidated.
   1449  * (For comments, see 'free_a_table()').
   1450  */
   1451 int
   1452 free_b_table(b_tbl, relink)
   1453 	b_tmgr_t *b_tbl;
   1454 	boolean_t relink;
   1455 {
   1456 	int i, removed_cnt;
   1457 	mmu_short_dte_t *dte;
   1458 	mmu_short_pte_t	*dtbl;
   1459 	c_tmgr_t	*tmgr;
   1460 
   1461 	removed_cnt = 0;
   1462 	if (b_tbl->bt_ecnt) {
   1463 		dte = b_tbl->bt_dtbl;
   1464 		for (i=0; i < MMU_B_TBL_SIZE; i++) {
   1465 			if (MMU_VALID_DT(dte[i])) {
   1466 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
   1467 				tmgr = mmuC2tmgr(dtbl);
   1468 				removed_cnt += free_c_table(tmgr, TRUE);
   1469 				dte[i].attr.raw = MMU_DT_INVALID;
   1470 			}
   1471 		}
   1472 		b_tbl->bt_ecnt = 0;
   1473 	}
   1474 
   1475 	if (relink) {
   1476 		b_tbl->bt_parent = NULL;
   1477 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1478 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   1479 	}
   1480 	return removed_cnt;
   1481 }
   1482 
   1483 /* free_c_table			INTERNAL
   1484  **
   1485  * Unmaps the given C table from use and returns it to the pool for
   1486  * re-use.  Returns the number of pages that were invalidated.
   1487  *
   1488  * This function preserves any physical page modification information
   1489  * contained in the page descriptors within the C table by calling
   1490  * 'pmap_remove_pte().'
   1491  */
   1492 int
   1493 free_c_table(c_tbl, relink)
   1494 	c_tmgr_t *c_tbl;
   1495 	boolean_t relink;
   1496 {
   1497 	int i, removed_cnt;
   1498 
   1499 	removed_cnt = 0;
   1500 	if (c_tbl->ct_ecnt) {
   1501 		for (i=0; i < MMU_C_TBL_SIZE; i++) {
   1502 			if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
   1503 				pmap_remove_pte(&c_tbl->ct_dtbl[i]);
   1504 				removed_cnt++;
   1505 			}
   1506 		}
   1507 		c_tbl->ct_ecnt = 0;
   1508 	}
   1509 
   1510 	if (relink) {
   1511 		c_tbl->ct_parent = NULL;
   1512 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1513 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1514 	}
   1515 	return removed_cnt;
   1516 }
   1517 
   1518 #if 0
   1519 /* free_c_table_novalid			INTERNAL
   1520  **
   1521  * Frees the given C table manager without checking to see whether
   1522  * or not it contains any valid page descriptors as it is assumed
   1523  * that it does not.
   1524  */
   1525 void
   1526 free_c_table_novalid(c_tbl)
   1527 	c_tmgr_t *c_tbl;
   1528 {
   1529 	TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1530 	TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1531 	c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1532 	c_tbl->ct_parent->bt_ecnt--;
   1533 	/*
   1534 	 * XXX - Should call equiv. of 'free_b_table_novalid' here if
   1535 	 * we just removed the last entry of the parent B table.
   1536 	 * But I want to insure that this will not endanger pmap_enter()
   1537 	 * with sudden removal of tables it is working with.
   1538 	 *
   1539 	 * We should probably add another field to each table, indicating
   1540 	 * whether or not it is 'locked', ie. in the process of being
   1541 	 * modified.
   1542 	 */
   1543 	c_tbl->ct_parent = NULL;
   1544 }
   1545 #endif
   1546 
   1547 /* pmap_remove_pte			INTERNAL
   1548  **
   1549  * Unmap the given pte and preserve any page modification
   1550  * information by transfering it to the pv head of the
   1551  * physical page it maps to.  This function does not update
   1552  * any reference counts because it is assumed that the calling
   1553  * function will do so.
   1554  */
   1555 void
   1556 pmap_remove_pte(pte)
   1557 	mmu_short_pte_t *pte;
   1558 {
   1559 	u_short     pv_idx, targ_idx;
   1560 	int         s;
   1561 	vm_offset_t pa;
   1562 	pv_t       *pv;
   1563 
   1564 	pa = MMU_PTE_PA(*pte);
   1565 	if (is_managed(pa)) {
   1566 		pv = pa2pv(pa);
   1567 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
   1568 
   1569 		/*
   1570 		 * If the PTE being removed is the first (or only) PTE in
   1571 		 * the list of PTEs currently mapped to this page, remove the
   1572 		 * PTE by changing the index found on the PV head.  Otherwise
   1573 		 * a linear search through the list will have to be executed
   1574 		 * in order to find the PVE which points to the PTE being
   1575 		 * removed, so that it may be modified to point to its new
   1576 		 * neighbor.
   1577 		 */
   1578 		s = splimp();
   1579 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
   1580 		if (pv_idx == targ_idx) {
   1581 			pv->pv_idx = pvebase[targ_idx].pve_next;
   1582 		} else {
   1583 			/*
   1584 			 * Find the PV element which points to the target
   1585 			 * element.
   1586 			 */
   1587 			while (pvebase[pv_idx].pve_next != targ_idx) {
   1588 				pv_idx = pvebase[pv_idx].pve_next;
   1589 #ifdef	DIAGNOSTIC
   1590 				if (pv_idx == PVE_EOL)
   1591 					panic("pmap_remove_pte: pv list end!");
   1592 #endif
   1593 			}
   1594 
   1595 			/*
   1596 			 * At this point, pv_idx is the index of the PV
   1597 			 * element just before the target element in the list.
   1598 			 * Unlink the target.
   1599 			 */
   1600 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
   1601 		}
   1602 		/*
   1603 		 * Save the mod/ref bits of the pte by simply
   1604 		 * ORing the entire pte onto the pv_flags member
   1605 		 * of the pv structure.
   1606 		 * There is no need to use a separate bit pattern
   1607 		 * for usage information on the pv head than that
   1608 		 * which is used on the MMU ptes.
   1609 		 */
   1610 		pv->pv_flags |= (u_short) pte->attr.raw;
   1611 		splx(s);
   1612 	}
   1613 
   1614 	pte->attr.raw = MMU_DT_INVALID;
   1615 }
   1616 
   1617 /* pmap_stroll			INTERNAL
   1618  **
   1619  * Retrieve the addresses of all table managers involved in the mapping of
   1620  * the given virtual address.  If the table walk completed sucessfully,
   1621  * return TRUE.  If it was only partially sucessful, return FALSE.
   1622  * The table walk performed by this function is important to many other
   1623  * functions in this module.
   1624  *
   1625  * Note: This function ought to be easier to read.
   1626  */
   1627 boolean_t
   1628 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
   1629 	pmap_t pmap;
   1630 	vm_offset_t va;
   1631 	a_tmgr_t **a_tbl;
   1632 	b_tmgr_t **b_tbl;
   1633 	c_tmgr_t **c_tbl;
   1634 	mmu_short_pte_t **pte;
   1635 	int *a_idx, *b_idx, *pte_idx;
   1636 {
   1637 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1638 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1639 
   1640 	if (pmap == pmap_kernel())
   1641 		return FALSE;
   1642 
   1643 	/* Does the given pmap have its own A table? */
   1644 	*a_tbl = pmap->pm_a_tmgr;
   1645 	if (*a_tbl == NULL)
   1646 		return FALSE; /* No.  Return unknown. */
   1647 	/* Does the A table have a valid B table
   1648 	 * under the corresponding table entry?
   1649 	 */
   1650 	*a_idx = MMU_TIA(va);
   1651 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
   1652 	if (!MMU_VALID_DT(*a_dte))
   1653 		return FALSE; /* No. Return unknown. */
   1654 	/* Yes. Extract B table from the A table. */
   1655 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
   1656 	/* Does the B table have a valid C table
   1657 	 * under the corresponding table entry?
   1658 	 */
   1659 	*b_idx = MMU_TIB(va);
   1660 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
   1661 	if (!MMU_VALID_DT(*b_dte))
   1662 		return FALSE; /* No. Return unknown. */
   1663 	/* Yes. Extract C table from the B table. */
   1664 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
   1665 	*pte_idx = MMU_TIC(va);
   1666 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
   1667 
   1668 	return	TRUE;
   1669 }
   1670 
   1671 /* pmap_enter			INTERFACE
   1672  **
   1673  * Called by the kernel to map a virtual address
   1674  * to a physical address in the given process map.
   1675  *
   1676  * Note: this function should apply an exclusive lock
   1677  * on the pmap system for its duration.  (it certainly
   1678  * would save my hair!!)
   1679  * This function ought to be easier to read.
   1680  */
   1681 void
   1682 pmap_enter(pmap, va, pa, prot, wired)
   1683 	pmap_t	pmap;
   1684 	vm_offset_t va;
   1685 	vm_offset_t pa;
   1686 	vm_prot_t prot;
   1687 	boolean_t wired;
   1688 {
   1689 	boolean_t insert, managed; /* Marks the need for PV insertion.*/
   1690 	u_short nidx;            /* PV list index                     */
   1691 	int s;                   /* Used for splimp()/splx()          */
   1692 	int flags;               /* Mapping flags. eg. Cache inhibit  */
   1693 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
   1694 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
   1695 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
   1696 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
   1697 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1698 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1699 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
   1700 	pv_t      *pv;           /* pv list head                      */
   1701 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
   1702 
   1703 	if (pmap == NULL)
   1704 		return;
   1705 	if (pmap == pmap_kernel()) {
   1706 		pmap_enter_kernel(va, pa, prot);
   1707 		return;
   1708 	}
   1709 
   1710 	flags  = (pa & ~MMU_PAGE_MASK);
   1711 	pa    &= MMU_PAGE_MASK;
   1712 
   1713 	/*
   1714 	 * Determine if the physical address being mapped is on-board RAM.
   1715 	 * Any other area of the address space is likely to belong to a
   1716 	 * device and hence it would be disasterous to cache its contents.
   1717 	 */
   1718 	if ((managed = is_managed(pa)) == FALSE)
   1719 		flags |= PMAP_NC;
   1720 
   1721 	/*
   1722 	 * For user mappings we walk along the MMU tables of the given
   1723 	 * pmap, reaching a PTE which describes the virtual page being
   1724 	 * mapped or changed.  If any level of the walk ends in an invalid
   1725 	 * entry, a table must be allocated and the entry must be updated
   1726 	 * to point to it.
   1727 	 * There is a bit of confusion as to whether this code must be
   1728 	 * re-entrant.  For now we will assume it is.  To support
   1729 	 * re-entrancy we must unlink tables from the table pool before
   1730 	 * we assume we may use them.  Tables are re-linked into the pool
   1731 	 * when we are finished with them at the end of the function.
   1732 	 * But I don't feel like doing that until we have proof that this
   1733 	 * needs to be re-entrant.
   1734 	 * 'llevel' records which tables need to be relinked.
   1735 	 */
   1736 	llevel = NONE;
   1737 
   1738 	/*
   1739 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
   1740 	 * A table, allocate a new one from the available pool.
   1741 	 */
   1742 
   1743 	a_tbl = pmap->pm_a_tmgr;
   1744 	if (a_tbl == NULL) {
   1745 		/*
   1746 		 * This pmap does not currently have an A table.  Allocate
   1747 		 * a new one.
   1748 		 */
   1749 		a_tbl = get_a_table();
   1750 		a_tbl->at_parent = pmap;
   1751 
   1752 		/*
   1753 		 * Assign this new A table to the pmap, and calculate its
   1754 		 * physical address so that loadcrp() can be used to make
   1755 		 * the table active.
   1756 		 */
   1757 		pmap->pm_a_tmgr = a_tbl;
   1758 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
   1759 
   1760 		/*
   1761 		 * If the process receiving a new A table is the current
   1762 		 * process, we are responsible for setting the MMU so that
   1763 		 * it becomes the current address space.  This only adds
   1764 		 * new mappings, so no need to flush anything.
   1765 		 */
   1766 		if (pmap == current_pmap()) {
   1767 			kernel_crp.rp_addr = pmap->pm_a_phys;
   1768 			loadcrp(&kernel_crp);
   1769 		}
   1770 
   1771 		if (!wired)
   1772 			llevel = NEWA;
   1773 	} else {
   1774 		/*
   1775 		 * Use the A table already allocated for this pmap.
   1776 		 * Unlink it from the A table pool if necessary.
   1777 		 */
   1778 		if (wired && !a_tbl->at_wcnt)
   1779 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1780 	}
   1781 
   1782 	/*
   1783 	 * Step 2 - Walk into the B table.  If there is no valid B table,
   1784 	 * allocate one.
   1785 	 */
   1786 
   1787 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
   1788 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
   1789 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
   1790 		/* The descriptor is valid.  Use the B table it points to. */
   1791 		/*************************************
   1792 		 *               a_idx               *
   1793 		 *                 v                 *
   1794 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
   1795 		 *          | | | | | | | | | | | |  *
   1796 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
   1797 		 *                 |                 *
   1798 		 *                 \- b_tbl -> +-+-  *
   1799 		 *                             | |   *
   1800 		 *                             +-+-  *
   1801 		 *************************************/
   1802 		b_dte = mmu_ptov(a_dte->addr.raw);
   1803 		b_tbl = mmuB2tmgr(b_dte);
   1804 
   1805 		/*
   1806 		 * If the requested mapping must be wired, but this table
   1807 		 * being used to map it is not, the table must be removed
   1808 		 * from the available pool and its wired entry count
   1809 		 * incremented.
   1810 		 */
   1811 		if (wired && !b_tbl->bt_wcnt) {
   1812 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1813 			a_tbl->at_wcnt++;
   1814 		}
   1815 	} else {
   1816 		/* The descriptor is invalid.  Allocate a new B table. */
   1817 		b_tbl = get_b_table();
   1818 
   1819 		/* Point the parent A table descriptor to this new B table. */
   1820 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
   1821 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
   1822 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
   1823 
   1824 		/* Create the necessary back references to the parent table */
   1825 		b_tbl->bt_parent = a_tbl;
   1826 		b_tbl->bt_pidx = a_idx;
   1827 
   1828 		/*
   1829 		 * If this table is to be wired, make sure the parent A table
   1830 		 * wired count is updated to reflect that it has another wired
   1831 		 * entry.
   1832 		 */
   1833 		if (wired)
   1834 			a_tbl->at_wcnt++;
   1835 		else if (llevel == NONE)
   1836 			llevel = NEWB;
   1837 	}
   1838 
   1839 	/*
   1840 	 * Step 3 - Walk into the C table, if there is no valid C table,
   1841 	 * allocate one.
   1842 	 */
   1843 
   1844 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
   1845 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
   1846 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
   1847 		/* The descriptor is valid.  Use the C table it points to. */
   1848 		/**************************************
   1849 		 *               c_idx                *
   1850 		 * |                v                 *
   1851 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
   1852 		 *             | | | | | | | | | | |  *
   1853 		 *             +-+-+-+-+-+-+-+-+-+-+- *
   1854 		 *                  |                 *
   1855 		 *                  \- c_tbl -> +-+-- *
   1856 		 *                              | | | *
   1857 		 *                              +-+-- *
   1858 		 **************************************/
   1859 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
   1860 		c_tbl = mmuC2tmgr(c_pte);
   1861 
   1862 		/* If mapping is wired and table is not */
   1863 		if (wired && !c_tbl->ct_wcnt) {
   1864 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1865 			b_tbl->bt_wcnt++;
   1866 		}
   1867 	} else {
   1868 		/* The descriptor is invalid.  Allocate a new C table. */
   1869 		c_tbl = get_c_table();
   1870 
   1871 		/* Point the parent B table descriptor to this new C table. */
   1872 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
   1873 		b_dte->attr.raw |= MMU_DT_SHORT;
   1874 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
   1875 
   1876 		/* Create the necessary back references to the parent table */
   1877 		c_tbl->ct_parent = b_tbl;
   1878 		c_tbl->ct_pidx = b_idx;
   1879 
   1880 		/*
   1881 		 * If this table is to be wired, make sure the parent B table
   1882 		 * wired count is updated to reflect that it has another wired
   1883 		 * entry.
   1884 		 */
   1885 		if (wired)
   1886 			b_tbl->bt_wcnt++;
   1887 		else if (llevel == NONE)
   1888 			llevel = NEWC;
   1889 	}
   1890 
   1891 	/*
   1892 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
   1893 	 * slot of the C table, describing the PA to which the VA is mapped.
   1894 	 */
   1895 
   1896 	pte_idx = MMU_TIC(va);
   1897 	c_pte = &c_tbl->ct_dtbl[pte_idx];
   1898 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
   1899 		/*
   1900 		 * The PTE is currently valid.  This particular call
   1901 		 * is just a synonym for one (or more) of the following
   1902 		 * operations:
   1903 		 *     change protection of a page
   1904 		 *     change wiring status of a page
   1905 		 *     remove the mapping of a page
   1906 		 *
   1907 		 * XXX - Semi critical: This code should unwire the PTE
   1908 		 * and, possibly, associated parent tables if this is a
   1909 		 * change wiring operation.  Currently it does not.
   1910 		 *
   1911 		 * This may be ok if pmap_change_wiring() is the only
   1912 		 * interface used to UNWIRE a page.
   1913 		 */
   1914 
   1915 		/* First check if this is a wiring operation. */
   1916 		if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
   1917 			/*
   1918 			 * The PTE is already wired.  To prevent it from being
   1919 			 * counted as a new wiring operation, reset the 'wired'
   1920 			 * variable.
   1921 			 */
   1922 			wired = FALSE;
   1923 		}
   1924 
   1925 		/* Is the new address the same as the old? */
   1926 		if (MMU_PTE_PA(*c_pte) == pa) {
   1927 			/*
   1928 			 * Yes, mark that it does not need to be reinserted
   1929 			 * into the PV list.
   1930 			 */
   1931 			insert = FALSE;
   1932 
   1933 			/*
   1934 			 * Clear all but the modified, referenced and wired
   1935 			 * bits on the PTE.
   1936 			 */
   1937 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
   1938 				| MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
   1939 		} else {
   1940 			/* No, remove the old entry */
   1941 			pmap_remove_pte(c_pte);
   1942 			insert = TRUE;
   1943 		}
   1944 
   1945 		/*
   1946 		 * TLB flush is only necessary if modifying current map.
   1947 		 * However, in pmap_enter(), the pmap almost always IS
   1948 		 * the current pmap, so don't even bother to check.
   1949 		 */
   1950 		TBIS(va);
   1951 	} else {
   1952 		/*
   1953 		 * The PTE is invalid.  Increment the valid entry count in
   1954 		 * the C table manager to reflect the addition of a new entry.
   1955 		 */
   1956 		c_tbl->ct_ecnt++;
   1957 
   1958 		/* XXX - temporarily make sure the PTE is cleared. */
   1959 		c_pte->attr.raw = 0;
   1960 
   1961 		/* It will also need to be inserted into the PV list. */
   1962 		insert = TRUE;
   1963 	}
   1964 
   1965 	/*
   1966 	 * If page is changing from unwired to wired status, set an unused bit
   1967 	 * within the PTE to indicate that it is wired.  Also increment the
   1968 	 * wired entry count in the C table manager.
   1969 	 */
   1970 	if (wired) {
   1971 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
   1972 		c_tbl->ct_wcnt++;
   1973 	}
   1974 
   1975 	/*
   1976 	 * Map the page, being careful to preserve modify/reference/wired
   1977 	 * bits.  At this point it is assumed that the PTE either has no bits
   1978 	 * set, or if there are set bits, they are only modified, reference or
   1979 	 * wired bits.  If not, the following statement will cause erratic
   1980 	 * behavior.
   1981 	 */
   1982 #ifdef	PMAP_DEBUG
   1983 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
   1984 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
   1985 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
   1986 		Debugger();
   1987 	}
   1988 #endif
   1989 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
   1990 
   1991 	/*
   1992 	 * If the mapping should be read-only, set the write protect
   1993 	 * bit in the PTE.
   1994 	 */
   1995 	if (!(prot & VM_PROT_WRITE))
   1996 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
   1997 
   1998 	/*
   1999 	 * If the mapping should be cache inhibited (indicated by the flag
   2000 	 * bits found on the lower order of the physical address.)
   2001 	 * mark the PTE as a cache inhibited page.
   2002 	 */
   2003 	if (flags & PMAP_NC)
   2004 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
   2005 
   2006 	/*
   2007 	 * If the physical address being mapped is managed by the PV
   2008 	 * system then link the pte into the list of pages mapped to that
   2009 	 * address.
   2010 	 */
   2011 	if (insert && managed) {
   2012 		pv = pa2pv(pa);
   2013 		nidx = pteidx(c_pte);
   2014 
   2015 		s = splimp();
   2016 		pvebase[nidx].pve_next = pv->pv_idx;
   2017 		pv->pv_idx = nidx;
   2018 		splx(s);
   2019 	}
   2020 
   2021 	/* Move any allocated tables back into the active pool. */
   2022 
   2023 	switch (llevel) {
   2024 		case NEWA:
   2025 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2026 			/* FALLTHROUGH */
   2027 		case NEWB:
   2028 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2029 			/* FALLTHROUGH */
   2030 		case NEWC:
   2031 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2032 			/* FALLTHROUGH */
   2033 		default:
   2034 			break;
   2035 	}
   2036 }
   2037 
   2038 /* pmap_enter_kernel			INTERNAL
   2039  **
   2040  * Map the given virtual address to the given physical address within the
   2041  * kernel address space.  This function exists because the kernel map does
   2042  * not do dynamic table allocation.  It consists of a contiguous array of ptes
   2043  * and can be edited directly without the need to walk through any tables.
   2044  *
   2045  * XXX: "Danger, Will Robinson!"
   2046  * Note that the kernel should never take a fault on any page
   2047  * between [ KERNBASE .. virtual_avail ] and this is checked in
   2048  * trap.c for kernel-mode MMU faults.  This means that mappings
   2049  * created in that range must be implicily wired. -gwr
   2050  */
   2051 void
   2052 pmap_enter_kernel(va, pa, prot)
   2053 	vm_offset_t va;
   2054 	vm_offset_t pa;
   2055 	vm_prot_t   prot;
   2056 {
   2057 	boolean_t       was_valid, insert;
   2058 	u_short         pte_idx, pv_idx;
   2059 	int             s, flags;
   2060 	mmu_short_pte_t *pte;
   2061 	pv_t            *pv;
   2062 	vm_offset_t     old_pa;
   2063 
   2064 	flags  = (pa & ~MMU_PAGE_MASK);
   2065 	pa    &= MMU_PAGE_MASK;
   2066 
   2067 	/*
   2068 	 * Calculate the index of the PTE being modified.
   2069 	 */
   2070 	pte_idx = (u_long) _btop(va - KERNBASE);
   2071 
   2072 	/* This array is traditionally named "Sysmap" */
   2073 	pte = &kernCbase[pte_idx];
   2074 
   2075 	s = splimp();
   2076 	if (MMU_VALID_DT(*pte)) {
   2077 		was_valid = TRUE;
   2078 		/*
   2079 		 * If the PTE is already mapped to an address and it differs
   2080 		 * from the address requested, unlink it from the PV list.
   2081 		 */
   2082 		old_pa = MMU_PTE_PA(*pte);
   2083 		if (pa != old_pa) {
   2084 		    if (is_managed(old_pa)) {
   2085 		        /* XXX - Make this into a function call? */
   2086 		        pv = pa2pv(old_pa);
   2087 		        pv_idx = pv->pv_idx;
   2088 		        if (pv_idx == pte_idx) {
   2089 		            pv->pv_idx = pvebase[pte_idx].pve_next;
   2090 		        } else {
   2091 		            while (pvebase[pv_idx].pve_next != pte_idx)
   2092 		                pv_idx = pvebase[pv_idx].pve_next;
   2093 		            pvebase[pv_idx].pve_next =
   2094 		                pvebase[pte_idx].pve_next;
   2095 		        }
   2096 		        /* Save modified/reference bits */
   2097 		        pv->pv_flags |= (u_short) pte->attr.raw;
   2098 		    }
   2099 		    if (is_managed(pa))
   2100 		        insert = TRUE;
   2101 		    else
   2102 		        insert = FALSE;
   2103 		    /*
   2104 		     * Clear out any old bits in the PTE.
   2105 		     */
   2106 		    pte->attr.raw = MMU_DT_INVALID;
   2107 		} else {
   2108 		    /*
   2109 		     * Old PA and new PA are the same.  No need to relink
   2110 		     * the mapping within the PV list.
   2111 		     */
   2112 		     insert = FALSE;
   2113 
   2114 		    /*
   2115 		     * Save any mod/ref bits on the PTE.
   2116 		     */
   2117 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
   2118 		}
   2119 	} else {
   2120 		pte->attr.raw = MMU_DT_INVALID;
   2121 		was_valid = FALSE;
   2122 		if (is_managed(pa))
   2123 			insert = TRUE;
   2124 		else
   2125 			insert = FALSE;
   2126 	}
   2127 
   2128 	/*
   2129 	 * Map the page.  Being careful to preserve modified/referenced bits
   2130 	 * on the PTE.
   2131 	 */
   2132 	pte->attr.raw |= (pa | MMU_DT_PAGE);
   2133 
   2134 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
   2135 		pte->attr.raw |= MMU_SHORT_PTE_WP;
   2136 	if (flags & PMAP_NC)
   2137 		pte->attr.raw |= MMU_SHORT_PTE_CI;
   2138 	if (was_valid)
   2139 		TBIS(va);
   2140 
   2141 	/*
   2142 	 * Insert the PTE into the PV system, if need be.
   2143 	 */
   2144 	if (insert) {
   2145 		pv = pa2pv(pa);
   2146 		pvebase[pte_idx].pve_next = pv->pv_idx;
   2147 		pv->pv_idx = pte_idx;
   2148 	}
   2149 	splx(s);
   2150 
   2151 }
   2152 
   2153 /* pmap_protect			INTERFACE
   2154  **
   2155  * Apply the given protection to the given virtual address range within
   2156  * the given map.
   2157  *
   2158  * It is ok for the protection applied to be stronger than what is
   2159  * specified.  We use this to our advantage when the given map has no
   2160  * mapping for the virtual address.  By skipping a page when this
   2161  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
   2162  * and therefore do not need to map the page just to apply a protection
   2163  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
   2164  *
   2165  * XXX - This function could be speeded up by using pmap_stroll() for inital
   2166  *       setup, and then manual scrolling in the for() loop.
   2167  */
   2168 void
   2169 pmap_protect(pmap, startva, endva, prot)
   2170 	pmap_t pmap;
   2171 	vm_offset_t startva, endva;
   2172 	vm_prot_t prot;
   2173 {
   2174 	boolean_t iscurpmap;
   2175 	int a_idx, b_idx, c_idx;
   2176 	a_tmgr_t *a_tbl;
   2177 	b_tmgr_t *b_tbl;
   2178 	c_tmgr_t *c_tbl;
   2179 	mmu_short_pte_t *pte;
   2180 
   2181 	if (pmap == NULL)
   2182 		return;
   2183 	if (pmap == pmap_kernel()) {
   2184 		pmap_protect_kernel(startva, endva, prot);
   2185 		return;
   2186 	}
   2187 
   2188 	/*
   2189 	 * In this particular pmap implementation, there are only three
   2190 	 * types of memory protection: 'all' (read/write/execute),
   2191 	 * 'read-only' (read/execute) and 'none' (no mapping.)
   2192 	 * It is not possible for us to treat 'executable' as a separate
   2193 	 * protection type.  Therefore, protection requests that seek to
   2194 	 * remove execute permission while retaining read or write, and those
   2195 	 * that make little sense (write-only for example) are ignored.
   2196 	 */
   2197 	switch (prot) {
   2198 		case VM_PROT_NONE:
   2199 			/*
   2200 			 * A request to apply the protection code of
   2201 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
   2202 			 */
   2203 			pmap_remove(pmap, startva, endva);
   2204 			return;
   2205 		case	VM_PROT_EXECUTE:
   2206 		case	VM_PROT_READ:
   2207 		case	VM_PROT_READ|VM_PROT_EXECUTE:
   2208 			/* continue */
   2209 			break;
   2210 		case	VM_PROT_WRITE:
   2211 		case	VM_PROT_WRITE|VM_PROT_READ:
   2212 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
   2213 		case	VM_PROT_ALL:
   2214 			/* None of these should happen in a sane system. */
   2215 			return;
   2216 	}
   2217 
   2218 	/*
   2219 	 * If the pmap has no A table, it has no mappings and therefore
   2220 	 * there is nothing to protect.
   2221 	 */
   2222 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
   2223 		return;
   2224 
   2225 	a_idx = MMU_TIA(startva);
   2226 	b_idx = MMU_TIB(startva);
   2227 	c_idx = MMU_TIC(startva);
   2228 	b_tbl = (b_tmgr_t *) c_tbl = NULL;
   2229 
   2230 	iscurpmap = (pmap == current_pmap());
   2231 	while (startva < endva) {
   2232 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
   2233 		  if (b_tbl == NULL) {
   2234 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
   2235 		    b_tbl = mmu_ptov((vm_offset_t) b_tbl);
   2236 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
   2237 		  }
   2238 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
   2239 		    if (c_tbl == NULL) {
   2240 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
   2241 		      c_tbl = mmu_ptov((vm_offset_t) c_tbl);
   2242 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
   2243 		    }
   2244 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
   2245 		      pte = &c_tbl->ct_dtbl[c_idx];
   2246 		      /* make the mapping read-only */
   2247 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
   2248 		      /*
   2249 		       * If we just modified the current address space,
   2250 		       * flush any translations for the modified page from
   2251 		       * the translation cache and any data from it in the
   2252 		       * data cache.
   2253 		       */
   2254 		      if (iscurpmap)
   2255 		          TBIS(startva);
   2256 		    }
   2257 		    startva += NBPG;
   2258 
   2259 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
   2260 		      c_tbl = NULL;
   2261 		      c_idx = 0;
   2262 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
   2263 		        b_tbl = NULL;
   2264 		        b_idx = 0;
   2265 		      }
   2266 		    }
   2267 		  } else { /* C table wasn't valid */
   2268 		    c_tbl = NULL;
   2269 		    c_idx = 0;
   2270 		    startva += MMU_TIB_RANGE;
   2271 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
   2272 		      b_tbl = NULL;
   2273 		      b_idx = 0;
   2274 		    }
   2275 		  } /* C table */
   2276 		} else { /* B table wasn't valid */
   2277 		  b_tbl = NULL;
   2278 		  b_idx = 0;
   2279 		  startva += MMU_TIA_RANGE;
   2280 		  a_idx++;
   2281 		} /* B table */
   2282 	}
   2283 }
   2284 
   2285 /* pmap_protect_kernel			INTERNAL
   2286  **
   2287  * Apply the given protection code to a kernel address range.
   2288  */
   2289 void
   2290 pmap_protect_kernel(startva, endva, prot)
   2291 	vm_offset_t startva, endva;
   2292 	vm_prot_t prot;
   2293 {
   2294 	vm_offset_t va;
   2295 	mmu_short_pte_t *pte;
   2296 
   2297 	pte = &kernCbase[(unsigned long) _btop(startva - KERNBASE)];
   2298 	for (va = startva; va < endva; va += NBPG, pte++) {
   2299 		if (MMU_VALID_DT(*pte)) {
   2300 		    switch (prot) {
   2301 		        case VM_PROT_ALL:
   2302 		            break;
   2303 		        case VM_PROT_EXECUTE:
   2304 		        case VM_PROT_READ:
   2305 		        case VM_PROT_READ|VM_PROT_EXECUTE:
   2306 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
   2307 		            break;
   2308 		        case VM_PROT_NONE:
   2309 		            /* this is an alias for 'pmap_remove_kernel' */
   2310 		            pmap_remove_pte(pte);
   2311 		            break;
   2312 		        default:
   2313 		            break;
   2314 		    }
   2315 		    /*
   2316 		     * since this is the kernel, immediately flush any cached
   2317 		     * descriptors for this address.
   2318 		     */
   2319 		    TBIS(va);
   2320 		}
   2321 	}
   2322 }
   2323 
   2324 /* pmap_change_wiring			INTERFACE
   2325  **
   2326  * Changes the wiring of the specified page.
   2327  *
   2328  * This function is called from vm_fault.c to unwire
   2329  * a mapping.  It really should be called 'pmap_unwire'
   2330  * because it is never asked to do anything but remove
   2331  * wirings.
   2332  */
   2333 void
   2334 pmap_change_wiring(pmap, va, wire)
   2335 	pmap_t pmap;
   2336 	vm_offset_t va;
   2337 	boolean_t wire;
   2338 {
   2339 	int a_idx, b_idx, c_idx;
   2340 	a_tmgr_t *a_tbl;
   2341 	b_tmgr_t *b_tbl;
   2342 	c_tmgr_t *c_tbl;
   2343 	mmu_short_pte_t *pte;
   2344 
   2345 	/* Kernel mappings always remain wired. */
   2346 	if (pmap == pmap_kernel())
   2347 		return;
   2348 
   2349 #ifdef	PMAP_DEBUG
   2350 	if (wire == TRUE)
   2351 		panic("pmap_change_wiring: wire requested.");
   2352 #endif
   2353 
   2354 	/*
   2355 	 * Walk through the tables.  If the walk terminates without
   2356 	 * a valid PTE then the address wasn't wired in the first place.
   2357 	 * Return immediately.
   2358 	 */
   2359 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
   2360 		&b_idx, &c_idx) == FALSE)
   2361 		return;
   2362 
   2363 
   2364 	/* Is the PTE wired?  If not, return. */
   2365 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
   2366 		return;
   2367 
   2368 	/* Remove the wiring bit. */
   2369 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
   2370 
   2371 	/*
   2372 	 * Decrement the wired entry count in the C table.
   2373 	 * If it reaches zero the following things happen:
   2374 	 * 1. The table no longer has any wired entries and is considered
   2375 	 *    unwired.
   2376 	 * 2. It is placed on the available queue.
   2377 	 * 3. The parent table's wired entry count is decremented.
   2378 	 * 4. If it reaches zero, this process repeats at step 1 and
   2379 	 *    stops at after reaching the A table.
   2380 	 */
   2381 	if (--c_tbl->ct_wcnt == 0) {
   2382 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2383 		if (--b_tbl->bt_wcnt == 0) {
   2384 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2385 			if (--a_tbl->at_wcnt == 0) {
   2386 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2387 			}
   2388 		}
   2389 	}
   2390 }
   2391 
   2392 /* pmap_pageable			INTERFACE
   2393  **
   2394  * Make the specified range of addresses within the given pmap,
   2395  * 'pageable' or 'not-pageable'.  A pageable page must not cause
   2396  * any faults when referenced.  A non-pageable page may.
   2397  *
   2398  * This routine is only advisory.  The VM system will call pmap_enter()
   2399  * to wire or unwire pages that are going to be made pageable before calling
   2400  * this function.  By the time this routine is called, everything that needs
   2401  * to be done has already been done.
   2402  */
   2403 void
   2404 pmap_pageable(pmap, start, end, pageable)
   2405 	pmap_t pmap;
   2406 	vm_offset_t start, end;
   2407 	boolean_t pageable;
   2408 {
   2409 	/* not implemented. */
   2410 }
   2411 
   2412 /* pmap_copy				INTERFACE
   2413  **
   2414  * Copy the mappings of a range of addresses in one pmap, into
   2415  * the destination address of another.
   2416  *
   2417  * This routine is advisory.  Should we one day decide that MMU tables
   2418  * may be shared by more than one pmap, this function should be used to
   2419  * link them together.  Until that day however, we do nothing.
   2420  */
   2421 void
   2422 pmap_copy(pmap_a, pmap_b, dst, len, src)
   2423 	pmap_t pmap_a, pmap_b;
   2424 	vm_offset_t dst;
   2425 	vm_size_t   len;
   2426 	vm_offset_t src;
   2427 {
   2428 	/* not implemented. */
   2429 }
   2430 
   2431 /* pmap_copy_page			INTERFACE
   2432  **
   2433  * Copy the contents of one physical page into another.
   2434  *
   2435  * This function makes use of two virtual pages allocated in pmap_bootstrap()
   2436  * to map the two specified physical pages into the kernel address space.
   2437  *
   2438  * Note: We could use the transparent translation registers to make the
   2439  * mappings.  If we do so, be sure to disable interrupts before using them.
   2440  */
   2441 void
   2442 pmap_copy_page(srcpa, dstpa)
   2443 	vm_offset_t srcpa, dstpa;
   2444 {
   2445 	vm_offset_t srcva, dstva;
   2446 	int s;
   2447 
   2448 	srcva = tmp_vpages[0];
   2449 	dstva = tmp_vpages[1];
   2450 
   2451 	s = splimp();
   2452 	if (tmp_vpages_inuse++)
   2453 		panic("pmap_copy_page: temporary vpages are in use.");
   2454 
   2455 	/* Map pages as non-cacheable to avoid cache polution? */
   2456 	pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
   2457 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
   2458 
   2459 	/* Hand-optimized version of bcopy(src, dst, NBPG) */
   2460 	copypage((char *) srcva, (char *) dstva);
   2461 
   2462 	pmap_remove_kernel(srcva, srcva + NBPG);
   2463 	pmap_remove_kernel(dstva, dstva + NBPG);
   2464 
   2465 	--tmp_vpages_inuse;
   2466 	splx(s);
   2467 }
   2468 
   2469 /* pmap_zero_page			INTERFACE
   2470  **
   2471  * Zero the contents of the specified physical page.
   2472  *
   2473  * Uses one of the virtual pages allocated in pmap_boostrap()
   2474  * to map the specified page into the kernel address space.
   2475  */
   2476 void
   2477 pmap_zero_page(dstpa)
   2478 	vm_offset_t dstpa;
   2479 {
   2480 	vm_offset_t dstva;
   2481 	int s;
   2482 
   2483 	dstva = tmp_vpages[1];
   2484 	s = splimp();
   2485 	if (tmp_vpages_inuse)
   2486 		panic("pmap_zero_page: temporary vpages are in use.");
   2487 	tmp_vpages_inuse++;
   2488 
   2489 	/* The comments in pmap_copy_page() above apply here also. */
   2490 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
   2491 
   2492 	/* Hand-optimized version of bzero(ptr, NBPG) */
   2493 	zeropage((char *) dstva);
   2494 
   2495 #if 0
   2496 	/* XXX - See comment above about the PV problem. */
   2497 	pmap_remove_kernel(dstva, dstva + NBPG);
   2498 #endif
   2499 
   2500 	--tmp_vpages_inuse;
   2501 	splx(s);
   2502 }
   2503 
   2504 /* pmap_collect			INTERFACE
   2505  **
   2506  * Called from the VM system when we are about to swap out
   2507  * the process using this pmap.  This should give up any
   2508  * resources held here, including all its MMU tables.
   2509  */
   2510 void
   2511 pmap_collect(pmap)
   2512 	pmap_t pmap;
   2513 {
   2514 	/* XXX - todo... */
   2515 }
   2516 
   2517 /* pmap_create			INTERFACE
   2518  **
   2519  * Create and return a pmap structure.
   2520  */
   2521 pmap_t
   2522 pmap_create(size)
   2523 	vm_size_t size;
   2524 {
   2525 	pmap_t	pmap;
   2526 
   2527 	if (size)
   2528 		return NULL;
   2529 
   2530 	pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
   2531 	pmap_pinit(pmap);
   2532 
   2533 	return pmap;
   2534 }
   2535 
   2536 /* pmap_pinit			INTERNAL
   2537  **
   2538  * Initialize a pmap structure.
   2539  */
   2540 void
   2541 pmap_pinit(pmap)
   2542 	pmap_t pmap;
   2543 {
   2544 	bzero(pmap, sizeof(struct pmap));
   2545 	pmap->pm_a_tmgr = NULL;
   2546 	pmap->pm_a_phys = kernAphys;
   2547 }
   2548 
   2549 /* pmap_release				INTERFACE
   2550  **
   2551  * Release any resources held by the given pmap.
   2552  *
   2553  * This is the reverse analog to pmap_pinit.  It does not
   2554  * necessarily mean for the pmap structure to be deallocated,
   2555  * as in pmap_destroy.
   2556  */
   2557 void
   2558 pmap_release(pmap)
   2559 	pmap_t pmap;
   2560 {
   2561 	/*
   2562 	 * As long as the pmap contains no mappings,
   2563 	 * which always should be the case whenever
   2564 	 * this function is called, there really should
   2565 	 * be nothing to do.
   2566 	 */
   2567 #ifdef	PMAP_DEBUG
   2568 	if (pmap == NULL)
   2569 		return;
   2570 	if (pmap == pmap_kernel())
   2571 		panic("pmap_release: kernel pmap");
   2572 #endif
   2573 	/*
   2574 	 * XXX - If this pmap has an A table, give it back.
   2575 	 * The pmap SHOULD be empty by now, and pmap_remove
   2576 	 * should have already given back the A table...
   2577 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
   2578 	 * at this point, which means some mapping was not
   2579 	 * removed when it should have been. -gwr
   2580 	 */
   2581 	if (pmap->pm_a_tmgr != NULL) {
   2582 		/* First make sure we are not using it! */
   2583 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
   2584 			kernel_crp.rp_addr = kernAphys;
   2585 			loadcrp(&kernel_crp);
   2586 		}
   2587 #ifdef	PMAP_DEBUG /* XXX - todo! */
   2588 		/* XXX - Now complain... */
   2589 		printf("pmap_release: still have table\n");
   2590 		Debugger();
   2591 #endif
   2592 		free_a_table(pmap->pm_a_tmgr, TRUE);
   2593 		pmap->pm_a_tmgr = NULL;
   2594 		pmap->pm_a_phys = kernAphys;
   2595 	}
   2596 }
   2597 
   2598 /* pmap_reference			INTERFACE
   2599  **
   2600  * Increment the reference count of a pmap.
   2601  */
   2602 void
   2603 pmap_reference(pmap)
   2604 	pmap_t pmap;
   2605 {
   2606 	if (pmap == NULL)
   2607 		return;
   2608 
   2609 	/* pmap_lock(pmap); */
   2610 	pmap->pm_refcount++;
   2611 	/* pmap_unlock(pmap); */
   2612 }
   2613 
   2614 /* pmap_dereference			INTERNAL
   2615  **
   2616  * Decrease the reference count on the given pmap
   2617  * by one and return the current count.
   2618  */
   2619 int
   2620 pmap_dereference(pmap)
   2621 	pmap_t pmap;
   2622 {
   2623 	int rtn;
   2624 
   2625 	if (pmap == NULL)
   2626 		return 0;
   2627 
   2628 	/* pmap_lock(pmap); */
   2629 	rtn = --pmap->pm_refcount;
   2630 	/* pmap_unlock(pmap); */
   2631 
   2632 	return rtn;
   2633 }
   2634 
   2635 /* pmap_destroy			INTERFACE
   2636  **
   2637  * Decrement a pmap's reference count and delete
   2638  * the pmap if it becomes zero.  Will be called
   2639  * only after all mappings have been removed.
   2640  */
   2641 void
   2642 pmap_destroy(pmap)
   2643 	pmap_t pmap;
   2644 {
   2645 	if (pmap == NULL)
   2646 		return;
   2647 	if (pmap == &kernel_pmap)
   2648 		panic("pmap_destroy: kernel_pmap!");
   2649 	if (pmap_dereference(pmap) == 0) {
   2650 		pmap_release(pmap);
   2651 		free(pmap, M_VMPMAP);
   2652 	}
   2653 }
   2654 
   2655 /* pmap_is_referenced			INTERFACE
   2656  **
   2657  * Determine if the given physical page has been
   2658  * referenced (read from [or written to.])
   2659  */
   2660 boolean_t
   2661 pmap_is_referenced(pa)
   2662 	vm_offset_t pa;
   2663 {
   2664 	pv_t      *pv;
   2665 	int       idx, s;
   2666 
   2667 	if (!pv_initialized)
   2668 		return FALSE;
   2669 	/* XXX - this may be unecessary. */
   2670 	if (!is_managed(pa))
   2671 		return FALSE;
   2672 
   2673 	pv = pa2pv(pa);
   2674 	/*
   2675 	 * Check the flags on the pv head.  If they are set,
   2676 	 * return immediately.  Otherwise a search must be done.
   2677 	 */
   2678 	if (pv->pv_flags & PV_FLAGS_USED)
   2679 		return TRUE;
   2680 	else {
   2681 		s = splimp();
   2682 		/*
   2683 		 * Search through all pv elements pointing
   2684 		 * to this page and query their reference bits
   2685 		 */
   2686 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
   2687 			pvebase[idx].pve_next)
   2688 			if (MMU_PTE_USED(kernCbase[idx])) {
   2689 				splx(s);
   2690 				return TRUE;
   2691 			}
   2692 		splx(s);
   2693 	}
   2694 
   2695 	return FALSE;
   2696 }
   2697 
   2698 /* pmap_is_modified			INTERFACE
   2699  **
   2700  * Determine if the given physical page has been
   2701  * modified (written to.)
   2702  */
   2703 boolean_t
   2704 pmap_is_modified(pa)
   2705 	vm_offset_t pa;
   2706 {
   2707 	pv_t      *pv;
   2708 	int       idx, s;
   2709 
   2710 	if (!pv_initialized)
   2711 		return FALSE;
   2712 	/* XXX - this may be unecessary. */
   2713 	if (!is_managed(pa))
   2714 		return FALSE;
   2715 
   2716 	/* see comments in pmap_is_referenced() */
   2717 	pv = pa2pv(pa);
   2718 	if (pv->pv_flags & PV_FLAGS_MDFY) {
   2719 		return TRUE;
   2720 	} else {
   2721 		s = splimp();
   2722 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
   2723 			pvebase[idx].pve_next)
   2724 			if (MMU_PTE_MODIFIED(kernCbase[idx])) {
   2725 				splx(s);
   2726 				return TRUE;
   2727 			}
   2728 		splx(s);
   2729 	}
   2730 
   2731 	return FALSE;
   2732 }
   2733 
   2734 /* pmap_page_protect			INTERFACE
   2735  **
   2736  * Applies the given protection to all mappings to the given
   2737  * physical page.
   2738  */
   2739 void
   2740 pmap_page_protect(pa, prot)
   2741 	vm_offset_t pa;
   2742 	vm_prot_t prot;
   2743 {
   2744 	pv_t      *pv;
   2745 	int       idx, s;
   2746 	vm_offset_t va;
   2747 	struct mmu_short_pte_struct *pte;
   2748 	c_tmgr_t  *c_tbl;
   2749 	pmap_t    pmap, curpmap;
   2750 
   2751 	if (!is_managed(pa))
   2752 		return;
   2753 
   2754 	curpmap = current_pmap();
   2755 	pv = pa2pv(pa);
   2756 	s = splimp();
   2757 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2758 		pte = &kernCbase[idx];
   2759 		switch (prot) {
   2760 			case VM_PROT_ALL:
   2761 				/* do nothing */
   2762 				break;
   2763 			case VM_PROT_EXECUTE:
   2764 			case VM_PROT_READ:
   2765 			case VM_PROT_READ|VM_PROT_EXECUTE:
   2766 				pte->attr.raw |= MMU_SHORT_PTE_WP;
   2767 
   2768 				/*
   2769 				 * Determine the virtual address mapped by
   2770 				 * the PTE and flush ATC entries if necessary.
   2771 				 */
   2772 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2773 				if (pmap == curpmap || pmap == pmap_kernel())
   2774 					TBIS(va);
   2775 				break;
   2776 			case VM_PROT_NONE:
   2777 				/* Save the mod/ref bits. */
   2778 				pv->pv_flags |= pte->attr.raw;
   2779 				/* Invalidate the PTE. */
   2780 				pte->attr.raw = MMU_DT_INVALID;
   2781 
   2782 				/*
   2783 				 * Update table counts.  And flush ATC entries
   2784 				 * if necessary.
   2785 				 */
   2786 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2787 
   2788 				/*
   2789 				 * If the PTE belongs to the kernel map,
   2790 				 * be sure to flush the page it maps.
   2791 				 */
   2792 				if (pmap == pmap_kernel()) {
   2793 					TBIS(va);
   2794 				} else {
   2795 					/*
   2796 					 * The PTE belongs to a user map.
   2797 					 * update the entry count in the C
   2798 					 * table to which it belongs and flush
   2799 					 * the ATC if the mapping belongs to
   2800 					 * the current pmap.
   2801 					 */
   2802 					c_tbl->ct_ecnt--;
   2803 					if (pmap == curpmap)
   2804 						TBIS(va);
   2805 				}
   2806 				break;
   2807 			default:
   2808 				break;
   2809 		}
   2810 	}
   2811 
   2812 	/*
   2813 	 * If the protection code indicates that all mappings to the page
   2814 	 * be removed, truncate the PV list to zero entries.
   2815 	 */
   2816 	if (prot == VM_PROT_NONE)
   2817 		pv->pv_idx = PVE_EOL;
   2818 	splx(s);
   2819 }
   2820 
   2821 /* pmap_get_pteinfo		INTERNAL
   2822  **
   2823  * Called internally to find the pmap and virtual address within that
   2824  * map to which the pte at the given index maps.  Also includes the PTE's C
   2825  * table manager.
   2826  *
   2827  * Returns the pmap in the argument provided, and the virtual address
   2828  * by return value.
   2829  */
   2830 vm_offset_t
   2831 pmap_get_pteinfo(idx, pmap, tbl)
   2832 	u_int idx;
   2833 	pmap_t *pmap;
   2834 	c_tmgr_t **tbl;
   2835 {
   2836 	a_tmgr_t    *a_tbl;
   2837 	b_tmgr_t    *b_tbl;
   2838 	c_tmgr_t    *c_tbl;
   2839 	vm_offset_t     va = 0;
   2840 
   2841 	/*
   2842 	 * Determine if the PTE is a kernel PTE or a user PTE.
   2843 	 */
   2844 	if (idx >= NUM_KERN_PTES) {
   2845 		/*
   2846 		 * The PTE belongs to a user mapping.
   2847 		 * Find the virtual address by decoding table indices.
   2848 		 * Each successive decode will reveal the address from
   2849 		 * least to most significant bit fashion.
   2850 		 *
   2851 		 * 31                              0
   2852 		 * +-------------------------------+
   2853 		 * |AAAAAAABBBBBBCCCCCC............|
   2854 		 * +-------------------------------+
   2855 		 */
   2856 		/* XXX: c_tbl = mmuC2tmgr(pte); */
   2857 		/* XXX: Would like an inline for this to validate idx... */
   2858 		c_tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
   2859 		b_tbl = c_tbl->ct_parent;
   2860 		a_tbl = b_tbl->bt_parent;
   2861 		*pmap = a_tbl->at_parent;
   2862 		*tbl = c_tbl;
   2863 
   2864 		/* Start with the 'C' bits, then add B and A... */
   2865 		va |= ((idx % MMU_C_TBL_SIZE) << MMU_TIC_SHIFT);
   2866 		va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
   2867 		va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
   2868 	} else {
   2869 		/*
   2870 		 * The PTE belongs to the kernel map.
   2871 		 */
   2872 		*pmap = pmap_kernel();
   2873 
   2874 		va = _ptob(idx);
   2875 		va += KERNBASE;
   2876 	}
   2877 
   2878 	return va;
   2879 }
   2880 
   2881 /* pmap_clear_modify			INTERFACE
   2882  **
   2883  * Clear the modification bit on the page at the specified
   2884  * physical address.
   2885  *
   2886  */
   2887 void
   2888 pmap_clear_modify(pa)
   2889 	vm_offset_t pa;
   2890 {
   2891 	if (!is_managed(pa))
   2892 		return;
   2893 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
   2894 }
   2895 
   2896 /* pmap_clear_reference			INTERFACE
   2897  **
   2898  * Clear the referenced bit on the page at the specified
   2899  * physical address.
   2900  */
   2901 void
   2902 pmap_clear_reference(pa)
   2903 	vm_offset_t pa;
   2904 {
   2905 	if (!is_managed(pa))
   2906 		return;
   2907 	pmap_clear_pv(pa, PV_FLAGS_USED);
   2908 }
   2909 
   2910 /* pmap_clear_pv			INTERNAL
   2911  **
   2912  * Clears the specified flag from the specified physical address.
   2913  * (Used by pmap_clear_modify() and pmap_clear_reference().)
   2914  *
   2915  * Flag is one of:
   2916  *   PV_FLAGS_MDFY - Page modified bit.
   2917  *   PV_FLAGS_USED - Page used (referenced) bit.
   2918  *
   2919  * This routine must not only clear the flag on the pv list
   2920  * head.  It must also clear the bit on every pte in the pv
   2921  * list associated with the address.
   2922  */
   2923 void
   2924 pmap_clear_pv(pa, flag)
   2925 	vm_offset_t pa;
   2926 	int flag;
   2927 {
   2928 	pv_t      *pv;
   2929 	int       idx, s;
   2930 	vm_offset_t     va;
   2931 	pmap_t          pmap;
   2932 	mmu_short_pte_t *pte;
   2933 	c_tmgr_t        *c_tbl;
   2934 
   2935 	pv = pa2pv(pa);
   2936 
   2937 	s = splimp();
   2938 	pv->pv_flags &= ~(flag);
   2939 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2940 		pte = &kernCbase[idx];
   2941 		pte->attr.raw &= ~(flag);
   2942 		/*
   2943 		 * The MC68030 MMU will not set the modified or
   2944 		 * referenced bits on any MMU tables for which it has
   2945 		 * a cached descriptor with its modify bit set.  To insure
   2946 		 * that it will modify these bits on the PTE during the next
   2947 		 * time it is written to or read from, we must flush it from
   2948 		 * the ATC.
   2949 		 *
   2950 		 * Ordinarily it is only necessary to flush the descriptor
   2951 		 * if it is used in the current address space.  But since I
   2952 		 * am not sure that there will always be a notion of
   2953 		 * 'the current address space' when this function is called,
   2954 		 * I will skip the test and always flush the address.  It
   2955 		 * does no harm.
   2956 		 */
   2957 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2958 		TBIS(va);
   2959 	}
   2960 	splx(s);
   2961 }
   2962 
   2963 /* pmap_extract			INTERFACE
   2964  **
   2965  * Return the physical address mapped by the virtual address
   2966  * in the specified pmap or 0 if it is not known.
   2967  *
   2968  * Note: this function should also apply an exclusive lock
   2969  * on the pmap system during its duration.
   2970  */
   2971 vm_offset_t
   2972 pmap_extract(pmap, va)
   2973 	pmap_t      pmap;
   2974 	vm_offset_t va;
   2975 {
   2976 	int a_idx, b_idx, pte_idx;
   2977 	a_tmgr_t	*a_tbl;
   2978 	b_tmgr_t	*b_tbl;
   2979 	c_tmgr_t	*c_tbl;
   2980 	mmu_short_pte_t	*c_pte;
   2981 
   2982 	if (pmap == pmap_kernel())
   2983 		return pmap_extract_kernel(va);
   2984 	if (pmap == NULL)
   2985 		return 0;
   2986 
   2987 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
   2988 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
   2989 		return 0;
   2990 
   2991 	if (!MMU_VALID_DT(*c_pte))
   2992 		return 0;
   2993 
   2994 	return (MMU_PTE_PA(*c_pte));
   2995 }
   2996 
   2997 /* pmap_extract_kernel		INTERNAL
   2998  **
   2999  * Extract a translation from the kernel address space.
   3000  */
   3001 vm_offset_t
   3002 pmap_extract_kernel(va)
   3003 	vm_offset_t va;
   3004 {
   3005 	mmu_short_pte_t *pte;
   3006 
   3007 	pte = &kernCbase[(u_int) _btop(va - KERNBASE)];
   3008 	return MMU_PTE_PA(*pte);
   3009 }
   3010 
   3011 /* pmap_remove_kernel		INTERNAL
   3012  **
   3013  * Remove the mapping of a range of virtual addresses from the kernel map.
   3014  * The arguments are already page-aligned.
   3015  */
   3016 void
   3017 pmap_remove_kernel(sva, eva)
   3018 	vm_offset_t sva;
   3019 	vm_offset_t eva;
   3020 {
   3021 	int idx, eidx;
   3022 
   3023 #ifdef	PMAP_DEBUG
   3024 	if ((sva & PGOFSET) || (eva & PGOFSET))
   3025 		panic("pmap_remove_kernel: alignment");
   3026 #endif
   3027 
   3028 	idx  = _btop(sva - KERNBASE);
   3029 	eidx = _btop(eva - KERNBASE);
   3030 
   3031 	while (idx < eidx) {
   3032 		pmap_remove_pte(&kernCbase[idx++]);
   3033 		TBIS(sva);
   3034 		sva += NBPG;
   3035 	}
   3036 }
   3037 
   3038 /* pmap_remove			INTERFACE
   3039  **
   3040  * Remove the mapping of a range of virtual addresses from the given pmap.
   3041  *
   3042  * If the range contains any wired entries, this function will probably create
   3043  * disaster.
   3044  */
   3045 void
   3046 pmap_remove(pmap, start, end)
   3047 	pmap_t pmap;
   3048 	vm_offset_t start;
   3049 	vm_offset_t end;
   3050 {
   3051 
   3052 	if (pmap == pmap_kernel()) {
   3053 		pmap_remove_kernel(start, end);
   3054 		return;
   3055 	}
   3056 
   3057 	/*
   3058 	 * XXX - Temporary(?) statement to prevent panic caused
   3059 	 * by vm_alloc_with_pager() handing us a software map (ie NULL)
   3060 	 * to remove because it couldn't get backing store.
   3061 	 * (I guess.)
   3062 	 */
   3063 	if (pmap == NULL)
   3064 		return;
   3065 
   3066 	/*
   3067 	 * If the pmap doesn't have an A table of its own, it has no mappings
   3068 	 * that can be removed.
   3069 	 */
   3070 	if (pmap->pm_a_tmgr == NULL)
   3071 		return;
   3072 
   3073 	/*
   3074 	 * Remove the specified range from the pmap.  If the function
   3075 	 * returns true, the operation removed all the valid mappings
   3076 	 * in the pmap and freed its A table.  If this happened to the
   3077 	 * currently loaded pmap, the MMU root pointer must be reloaded
   3078 	 * with the default 'kernel' map.
   3079 	 */
   3080 	if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
   3081 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
   3082 			kernel_crp.rp_addr = kernAphys;
   3083 			loadcrp(&kernel_crp);
   3084 			/* will do TLB flush below */
   3085 		}
   3086 		pmap->pm_a_tmgr = NULL;
   3087 		pmap->pm_a_phys = kernAphys;
   3088 	}
   3089 
   3090 	/*
   3091 	 * If we just modified the current address space,
   3092 	 * make sure to flush the MMU cache.
   3093 	 *
   3094 	 * XXX - this could be an unecessarily large flush.
   3095 	 * XXX - Could decide, based on the size of the VA range
   3096 	 * to be removed, whether to flush "by pages" or "all".
   3097 	 */
   3098 	if (pmap == current_pmap())
   3099 		TBIAU();
   3100 }
   3101 
   3102 /* pmap_remove_a			INTERNAL
   3103  **
   3104  * This is function number one in a set of three that removes a range
   3105  * of memory in the most efficient manner by removing the highest possible
   3106  * tables from the memory space.  This particular function attempts to remove
   3107  * as many B tables as it can, delegating the remaining fragmented ranges to
   3108  * pmap_remove_b().
   3109  *
   3110  * If the removal operation results in an empty A table, the function returns
   3111  * TRUE.
   3112  *
   3113  * It's ugly but will do for now.
   3114  */
   3115 boolean_t
   3116 pmap_remove_a(a_tbl, start, end)
   3117 	a_tmgr_t *a_tbl;
   3118 	vm_offset_t start;
   3119 	vm_offset_t end;
   3120 {
   3121 	boolean_t empty;
   3122 	int idx;
   3123 	vm_offset_t nstart, nend;
   3124 	b_tmgr_t *b_tbl;
   3125 	mmu_long_dte_t  *a_dte;
   3126 	mmu_short_dte_t *b_dte;
   3127 
   3128 	/*
   3129 	 * The following code works with what I call a 'granularity
   3130 	 * reduction algorithim'.  A range of addresses will always have
   3131 	 * the following properties, which are classified according to
   3132 	 * how the range relates to the size of the current granularity
   3133 	 * - an A table entry:
   3134 	 *
   3135 	 *            1 2       3 4
   3136 	 * -+---+---+---+---+---+---+---+-
   3137 	 * -+---+---+---+---+---+---+---+-
   3138 	 *
   3139 	 * A range will always start on a granularity boundary, illustrated
   3140 	 * by '+' signs in the table above, or it will start at some point
   3141 	 * inbetween a granularity boundary, as illustrated by point 1.
   3142 	 * The first step in removing a range of addresses is to remove the
   3143 	 * range between 1 and 2, the nearest granularity boundary.  This
   3144 	 * job is handled by the section of code governed by the
   3145 	 * 'if (start < nstart)' statement.
   3146 	 *
   3147 	 * A range will always encompass zero or more intergral granules,
   3148 	 * illustrated by points 2 and 3.  Integral granules are easy to
   3149 	 * remove.  The removal of these granules is the second step, and
   3150 	 * is handled by the code block 'if (nstart < nend)'.
   3151 	 *
   3152 	 * Lastly, a range will always end on a granularity boundary,
   3153 	 * ill. by point 3, or it will fall just beyond one, ill. by point
   3154 	 * 4.  The last step involves removing this range and is handled by
   3155 	 * the code block 'if (nend < end)'.
   3156 	 */
   3157 	nstart = MMU_ROUND_UP_A(start);
   3158 	nend = MMU_ROUND_A(end);
   3159 
   3160 	if (start < nstart) {
   3161 		/*
   3162 		 * This block is executed if the range starts between
   3163 		 * a granularity boundary.
   3164 		 *
   3165 		 * First find the DTE which is responsible for mapping
   3166 		 * the start of the range.
   3167 		 */
   3168 		idx = MMU_TIA(start);
   3169 		a_dte = &a_tbl->at_dtbl[idx];
   3170 
   3171 		/*
   3172 		 * If the DTE is valid then delegate the removal of the sub
   3173 		 * range to pmap_remove_b(), which can remove addresses at
   3174 		 * a finer granularity.
   3175 		 */
   3176 		if (MMU_VALID_DT(*a_dte)) {
   3177 			b_dte = mmu_ptov(a_dte->addr.raw);
   3178 			b_tbl = mmuB2tmgr(b_dte);
   3179 
   3180 			/*
   3181 			 * The sub range to be removed starts at the start
   3182 			 * of the full range we were asked to remove, and ends
   3183 			 * at the greater of:
   3184 			 * 1. The end of the full range, -or-
   3185 			 * 2. The end of the full range, rounded down to the
   3186 			 *    nearest granularity boundary.
   3187 			 */
   3188 			if (end < nstart)
   3189 				empty = pmap_remove_b(b_tbl, start, end);
   3190 			else
   3191 				empty = pmap_remove_b(b_tbl, start, nstart);
   3192 
   3193 			/*
   3194 			 * If the removal resulted in an empty B table,
   3195 			 * invalidate the DTE that points to it and decrement
   3196 			 * the valid entry count of the A table.
   3197 			 */
   3198 			if (empty) {
   3199 				a_dte->attr.raw = MMU_DT_INVALID;
   3200 				a_tbl->at_ecnt--;
   3201 			}
   3202 		}
   3203 		/*
   3204 		 * If the DTE is invalid, the address range is already non-
   3205 		 * existant and can simply be skipped.
   3206 		 */
   3207 	}
   3208 	if (nstart < nend) {
   3209 		/*
   3210 		 * This block is executed if the range spans a whole number
   3211 		 * multiple of granules (A table entries.)
   3212 		 *
   3213 		 * First find the DTE which is responsible for mapping
   3214 		 * the start of the first granule involved.
   3215 		 */
   3216 		idx = MMU_TIA(nstart);
   3217 		a_dte = &a_tbl->at_dtbl[idx];
   3218 
   3219 		/*
   3220 		 * Remove entire sub-granules (B tables) one at a time,
   3221 		 * until reaching the end of the range.
   3222 		 */
   3223 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
   3224 			if (MMU_VALID_DT(*a_dte)) {
   3225 				/*
   3226 				 * Find the B table manager for the
   3227 				 * entry and free it.
   3228 				 */
   3229 				b_dte = mmu_ptov(a_dte->addr.raw);
   3230 				b_tbl = mmuB2tmgr(b_dte);
   3231 				free_b_table(b_tbl, TRUE);
   3232 
   3233 				/*
   3234 				 * Invalidate the DTE that points to the
   3235 				 * B table and decrement the valid entry
   3236 				 * count of the A table.
   3237 				 */
   3238 				a_dte->attr.raw = MMU_DT_INVALID;
   3239 				a_tbl->at_ecnt--;
   3240 			}
   3241 	}
   3242 	if (nend < end) {
   3243 		/*
   3244 		 * This block is executed if the range ends beyond a
   3245 		 * granularity boundary.
   3246 		 *
   3247 		 * First find the DTE which is responsible for mapping
   3248 		 * the start of the nearest (rounded down) granularity
   3249 		 * boundary.
   3250 		 */
   3251 		idx = MMU_TIA(nend);
   3252 		a_dte = &a_tbl->at_dtbl[idx];
   3253 
   3254 		/*
   3255 		 * If the DTE is valid then delegate the removal of the sub
   3256 		 * range to pmap_remove_b(), which can remove addresses at
   3257 		 * a finer granularity.
   3258 		 */
   3259 		if (MMU_VALID_DT(*a_dte)) {
   3260 			/*
   3261 			 * Find the B table manager for the entry
   3262 			 * and hand it to pmap_remove_b() along with
   3263 			 * the sub range.
   3264 			 */
   3265 			b_dte = mmu_ptov(a_dte->addr.raw);
   3266 			b_tbl = mmuB2tmgr(b_dte);
   3267 
   3268 			empty = pmap_remove_b(b_tbl, nend, end);
   3269 
   3270 			/*
   3271 			 * If the removal resulted in an empty B table,
   3272 			 * invalidate the DTE that points to it and decrement
   3273 			 * the valid entry count of the A table.
   3274 			 */
   3275 			if (empty) {
   3276 				a_dte->attr.raw = MMU_DT_INVALID;
   3277 				a_tbl->at_ecnt--;
   3278 			}
   3279 		}
   3280 	}
   3281 
   3282 	/*
   3283 	 * If there are no more entries in the A table, release it
   3284 	 * back to the available pool and return TRUE.
   3285 	 */
   3286 	if (a_tbl->at_ecnt == 0) {
   3287 		a_tbl->at_parent = NULL;
   3288 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   3289 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   3290 		empty = TRUE;
   3291 	} else {
   3292 		empty = FALSE;
   3293 	}
   3294 
   3295 	return empty;
   3296 }
   3297 
   3298 /* pmap_remove_b			INTERNAL
   3299  **
   3300  * Remove a range of addresses from an address space, trying to remove entire
   3301  * C tables if possible.
   3302  *
   3303  * If the operation results in an empty B table, the function returns TRUE.
   3304  */
   3305 boolean_t
   3306 pmap_remove_b(b_tbl, start, end)
   3307 	b_tmgr_t *b_tbl;
   3308 	vm_offset_t start;
   3309 	vm_offset_t end;
   3310 {
   3311 	boolean_t empty;
   3312 	int idx;
   3313 	vm_offset_t nstart, nend, rstart;
   3314 	c_tmgr_t *c_tbl;
   3315 	mmu_short_dte_t  *b_dte;
   3316 	mmu_short_pte_t  *c_dte;
   3317 
   3318 
   3319 	nstart = MMU_ROUND_UP_B(start);
   3320 	nend = MMU_ROUND_B(end);
   3321 
   3322 	if (start < nstart) {
   3323 		idx = MMU_TIB(start);
   3324 		b_dte = &b_tbl->bt_dtbl[idx];
   3325 		if (MMU_VALID_DT(*b_dte)) {
   3326 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3327 			c_tbl = mmuC2tmgr(c_dte);
   3328 			if (end < nstart)
   3329 				empty = pmap_remove_c(c_tbl, start, end);
   3330 			else
   3331 				empty = pmap_remove_c(c_tbl, start, nstart);
   3332 			if (empty) {
   3333 				b_dte->attr.raw = MMU_DT_INVALID;
   3334 				b_tbl->bt_ecnt--;
   3335 			}
   3336 		}
   3337 	}
   3338 	if (nstart < nend) {
   3339 		idx = MMU_TIB(nstart);
   3340 		b_dte = &b_tbl->bt_dtbl[idx];
   3341 		rstart = nstart;
   3342 		while (rstart < nend) {
   3343 			if (MMU_VALID_DT(*b_dte)) {
   3344 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3345 				c_tbl = mmuC2tmgr(c_dte);
   3346 				free_c_table(c_tbl, TRUE);
   3347 				b_dte->attr.raw = MMU_DT_INVALID;
   3348 				b_tbl->bt_ecnt--;
   3349 			}
   3350 			b_dte++;
   3351 			rstart += MMU_TIB_RANGE;
   3352 		}
   3353 	}
   3354 	if (nend < end) {
   3355 		idx = MMU_TIB(nend);
   3356 		b_dte = &b_tbl->bt_dtbl[idx];
   3357 		if (MMU_VALID_DT(*b_dte)) {
   3358 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3359 			c_tbl = mmuC2tmgr(c_dte);
   3360 			empty = pmap_remove_c(c_tbl, nend, end);
   3361 			if (empty) {
   3362 				b_dte->attr.raw = MMU_DT_INVALID;
   3363 				b_tbl->bt_ecnt--;
   3364 			}
   3365 		}
   3366 	}
   3367 
   3368 	if (b_tbl->bt_ecnt == 0) {
   3369 		b_tbl->bt_parent = NULL;
   3370 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   3371 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   3372 		empty = TRUE;
   3373 	} else {
   3374 		empty = FALSE;
   3375 	}
   3376 
   3377 	return empty;
   3378 }
   3379 
   3380 /* pmap_remove_c			INTERNAL
   3381  **
   3382  * Remove a range of addresses from the given C table.
   3383  */
   3384 boolean_t
   3385 pmap_remove_c(c_tbl, start, end)
   3386 	c_tmgr_t *c_tbl;
   3387 	vm_offset_t start;
   3388 	vm_offset_t end;
   3389 {
   3390 	boolean_t empty;
   3391 	int idx;
   3392 	mmu_short_pte_t *c_pte;
   3393 
   3394 	idx = MMU_TIC(start);
   3395 	c_pte = &c_tbl->ct_dtbl[idx];
   3396 	for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
   3397 		if (MMU_VALID_DT(*c_pte)) {
   3398 			pmap_remove_pte(c_pte);
   3399 			c_tbl->ct_ecnt--;
   3400 		}
   3401 	}
   3402 
   3403 	if (c_tbl->ct_ecnt == 0) {
   3404 		c_tbl->ct_parent = NULL;
   3405 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   3406 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   3407 		empty = TRUE;
   3408 	} else {
   3409 		empty = FALSE;
   3410 	}
   3411 
   3412 	return empty;
   3413 }
   3414 
   3415 /* is_managed				INTERNAL
   3416  **
   3417  * Determine if the given physical address is managed by the PV system.
   3418  * Note that this logic assumes that no one will ask for the status of
   3419  * addresses which lie in-between the memory banks on the 3/80.  If they
   3420  * do so, it will falsely report that it is managed.
   3421  *
   3422  * Note: A "managed" address is one that was reported to the VM system as
   3423  * a "usable page" during system startup.  As such, the VM system expects the
   3424  * pmap module to keep an accurate track of the useage of those pages.
   3425  * Any page not given to the VM system at startup does not exist (as far as
   3426  * the VM system is concerned) and is therefore "unmanaged."  Examples are
   3427  * those pages which belong to the ROM monitor and the memory allocated before
   3428  * the VM system was started.
   3429  */
   3430 boolean_t
   3431 is_managed(pa)
   3432 	vm_offset_t pa;
   3433 {
   3434 	if (pa >= avail_start && pa < avail_end)
   3435 		return TRUE;
   3436 	else
   3437 		return FALSE;
   3438 }
   3439 
   3440 /* pmap_bootstrap_alloc			INTERNAL
   3441  **
   3442  * Used internally for memory allocation at startup when malloc is not
   3443  * available.  This code will fail once it crosses the first memory
   3444  * bank boundary on the 3/80.  Hopefully by then however, the VM system
   3445  * will be in charge of allocation.
   3446  */
   3447 void *
   3448 pmap_bootstrap_alloc(size)
   3449 	int size;
   3450 {
   3451 	void *rtn;
   3452 
   3453 #ifdef	PMAP_DEBUG
   3454 	if (bootstrap_alloc_enabled == FALSE) {
   3455 		mon_printf("pmap_bootstrap_alloc: disabled\n");
   3456 		sunmon_abort();
   3457 	}
   3458 #endif
   3459 
   3460 	rtn = (void *) virtual_avail;
   3461 	virtual_avail += size;
   3462 
   3463 #ifdef	PMAP_DEBUG
   3464 	if (virtual_avail > virtual_contig_end) {
   3465 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
   3466 		sunmon_abort();
   3467 	}
   3468 #endif
   3469 
   3470 	return rtn;
   3471 }
   3472 
   3473 /* pmap_bootstap_aalign			INTERNAL
   3474  **
   3475  * Used to insure that the next call to pmap_bootstrap_alloc() will
   3476  * return a chunk of memory aligned to the specified size.
   3477  *
   3478  * Note: This function will only support alignment sizes that are powers
   3479  * of two.
   3480  */
   3481 void
   3482 pmap_bootstrap_aalign(size)
   3483 	int size;
   3484 {
   3485 	int off;
   3486 
   3487 	off = virtual_avail & (size - 1);
   3488 	if (off) {
   3489 		(void) pmap_bootstrap_alloc(size - off);
   3490 	}
   3491 }
   3492 
   3493 /* pmap_pa_exists
   3494  **
   3495  * Used by the /dev/mem driver to see if a given PA is memory
   3496  * that can be mapped.  (The PA is not in a hole.)
   3497  */
   3498 int
   3499 pmap_pa_exists(pa)
   3500 	vm_offset_t pa;
   3501 {
   3502 	register int i;
   3503 
   3504 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
   3505 		if ((pa >= avail_mem[i].pmem_start) &&
   3506 			(pa <  avail_mem[i].pmem_end))
   3507 			return (1);
   3508 		if (avail_mem[i].pmem_next == NULL)
   3509 			break;
   3510 	}
   3511 	return (0);
   3512 }
   3513 
   3514 /* pmap_activate			INTERFACE
   3515  **
   3516  * This is called by locore.s:cpu_switch when we are switching to a
   3517  * new process.  This should load the MMU context for the new proc.
   3518  *
   3519  * Note: Only used when locore.s is compiled with PMAP_DEBUG.
   3520  */
   3521 void
   3522 pmap_activate(pmap)
   3523 pmap_t	pmap;
   3524 {
   3525 	u_long rootpa;
   3526 
   3527 	/* Only do reload/flush if we have to. */
   3528 	rootpa = pmap->pm_a_phys;
   3529 	if (kernel_crp.rp_addr != rootpa) {
   3530 		DPRINT(("pmap_activate(%p)\n", pmap));
   3531 		kernel_crp.rp_addr = rootpa;
   3532 		loadcrp(&kernel_crp);
   3533 		TBIAU();
   3534 	}
   3535 }
   3536 
   3537 
   3538 /* pmap_update
   3539  **
   3540  * Apply any delayed changes scheduled for all pmaps immediately.
   3541  *
   3542  * No delayed operations are currently done in this pmap.
   3543  */
   3544 void
   3545 pmap_update()
   3546 {
   3547 	/* not implemented. */
   3548 }
   3549 
   3550 /*
   3551  * Fill in the cpu_kcore header for dumpsys()
   3552  * (See machdep.c)
   3553  */
   3554 void
   3555 pmap_set_kcore_hdr(chdr_p)
   3556 	cpu_kcore_hdr_t *chdr_p;
   3557 {
   3558 	struct sun3x_kcore_hdr *sh = &chdr_p->un._sun3x;
   3559 	u_long spa, len;
   3560 	int i;
   3561 	extern char machine[];
   3562 
   3563 	/*
   3564 	 * Fill in dispatch information.
   3565 	 */
   3566 	strcpy(chdr_p->name, machine);
   3567 	chdr_p->page_size = NBPG;
   3568 	chdr_p->kernbase = KERNBASE;
   3569 
   3570 	sh->contig_end = virtual_contig_end;
   3571 	sh->kernCbase = (u_long) kernCbase;
   3572 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
   3573 		spa = avail_mem[i].pmem_start;
   3574 		spa = _trunc_page(spa);
   3575 		len = avail_mem[i].pmem_end - spa;
   3576 		len = _round_page(len);
   3577 		sh->ram_segs[i].start = spa;
   3578 		sh->ram_segs[i].size  = len;
   3579 	}
   3580 }
   3581 
   3582 
   3583 /* pmap_virtual_space			INTERFACE
   3584  **
   3585  * Return the current available range of virtual addresses in the
   3586  * arguuments provided.  Only really called once.
   3587  */
   3588 void
   3589 pmap_virtual_space(vstart, vend)
   3590 	vm_offset_t *vstart, *vend;
   3591 {
   3592 	*vstart = virtual_avail;
   3593 	*vend = virtual_end;
   3594 }
   3595 
   3596 /* pmap_free_pages			INTERFACE
   3597  **
   3598  * Return the number of physical pages still available.
   3599  *
   3600  * This is probably going to be a mess, but it's only called
   3601  * once and it's the only function left that I have to implement!
   3602  */
   3603 u_int
   3604 pmap_free_pages()
   3605 {
   3606 	int i;
   3607 	u_int left;
   3608 	vm_offset_t avail;
   3609 
   3610 	avail = avail_next;
   3611 	left = 0;
   3612 	i = 0;
   3613 	while (avail >= avail_mem[i].pmem_end) {
   3614 		if (avail_mem[i].pmem_next == NULL)
   3615 			return 0;
   3616 		i++;
   3617 	}
   3618 	while (i < SUN3X_NPHYS_RAM_SEGS) {
   3619 		if (avail < avail_mem[i].pmem_start) {
   3620 			/* Avail is inside a hole, march it
   3621 			 * up to the next bank.
   3622 			 */
   3623 			avail = avail_mem[i].pmem_start;
   3624 		}
   3625 		left += _btop(avail_mem[i].pmem_end - avail);
   3626 		if (avail_mem[i].pmem_next == NULL)
   3627 			break;
   3628 		i++;
   3629 	}
   3630 
   3631 	return left;
   3632 }
   3633 
   3634 /* pmap_page_index			INTERFACE
   3635  **
   3636  * Return the index of the given physical page in a list of useable
   3637  * physical pages in the system.  Holes in physical memory may be counted
   3638  * if so desired.  As long as pmap_free_pages() and pmap_page_index()
   3639  * agree as to whether holes in memory do or do not count as valid pages,
   3640  * it really doesn't matter.  However, if you like to save a little
   3641  * memory, don't count holes as valid pages.  This is even more true when
   3642  * the holes are large.
   3643  *
   3644  * We will not count holes as valid pages.  We can generate page indices
   3645  * that conform to this by using the memory bank structures initialized
   3646  * in pmap_alloc_pv().
   3647  */
   3648 int
   3649 pmap_page_index(pa)
   3650 	vm_offset_t pa;
   3651 {
   3652 	struct pmap_physmem_struct *bank = avail_mem;
   3653 
   3654 	/* Search for the memory bank with this page. */
   3655 	/* XXX - What if it is not physical memory? */
   3656 	while (pa > bank->pmem_end)
   3657 		bank = bank->pmem_next;
   3658 	pa -= bank->pmem_start;
   3659 
   3660 	return (bank->pmem_pvbase + _btop(pa));
   3661 }
   3662 
   3663 /* pmap_next_page			INTERFACE
   3664  **
   3665  * Place the physical address of the next available page in the
   3666  * argument given.  Returns FALSE if there are no more pages left.
   3667  *
   3668  * This function must jump over any holes in physical memory.
   3669  * Once this function is used, any use of pmap_bootstrap_alloc()
   3670  * is a sin.  Sinners will be punished with erratic behavior.
   3671  */
   3672 boolean_t
   3673 pmap_next_page(pa)
   3674 	vm_offset_t *pa;
   3675 {
   3676 	static struct pmap_physmem_struct *curbank = avail_mem;
   3677 
   3678 	/* XXX - temporary ROM saving hack. */
   3679 	if (avail_next >= avail_end)
   3680 		return FALSE;
   3681 
   3682 	if (avail_next >= curbank->pmem_end)
   3683 		if (curbank->pmem_next == NULL)
   3684 			return FALSE;
   3685 		else {
   3686 			curbank = curbank->pmem_next;
   3687 			avail_next = curbank->pmem_start;
   3688 		}
   3689 
   3690 	*pa = avail_next;
   3691 	avail_next += NBPG;
   3692 	return TRUE;
   3693 }
   3694 
   3695 /* pmap_count			INTERFACE
   3696  **
   3697  * Return the number of resident (valid) pages in the given pmap.
   3698  *
   3699  * Note:  If this function is handed the kernel map, it will report
   3700  * that it has no mappings.  Hopefully the VM system won't ask for kernel
   3701  * map statistics.
   3702  */
   3703 segsz_t
   3704 pmap_count(pmap, type)
   3705 	pmap_t pmap;
   3706 	int    type;
   3707 {
   3708 	u_int     count;
   3709 	int       a_idx, b_idx;
   3710 	a_tmgr_t *a_tbl;
   3711 	b_tmgr_t *b_tbl;
   3712 	c_tmgr_t *c_tbl;
   3713 
   3714 	/*
   3715 	 * If the pmap does not have its own A table manager, it has no
   3716 	 * valid entires.
   3717 	 */
   3718 	if (pmap->pm_a_tmgr == NULL)
   3719 		return 0;
   3720 
   3721 	a_tbl = pmap->pm_a_tmgr;
   3722 
   3723 	count = 0;
   3724 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
   3725 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
   3726 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
   3727 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
   3728 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
   3729 	                c_tbl = mmuC2tmgr(
   3730 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
   3731 	                if (type == 0)
   3732 	                    /*
   3733 	                     * A resident entry count has been requested.
   3734 	                     */
   3735 	                    count += c_tbl->ct_ecnt;
   3736 	                else
   3737 	                    /*
   3738 	                     * A wired entry count has been requested.
   3739 	                     */
   3740 	                    count += c_tbl->ct_wcnt;
   3741 	            }
   3742 	        }
   3743 	    }
   3744 	}
   3745 
   3746 	return count;
   3747 }
   3748 
   3749 /************************ SUN3 COMPATIBILITY ROUTINES ********************
   3750  * The following routines are only used by DDB for tricky kernel text    *
   3751  * text operations in db_memrw.c.  They are provided for sun3            *
   3752  * compatibility.                                                        *
   3753  *************************************************************************/
   3754 /* get_pte			INTERNAL
   3755  **
   3756  * Return the page descriptor the describes the kernel mapping
   3757  * of the given virtual address.
   3758  */
   3759 extern u_long ptest_addr __P((u_long));	/* XXX: locore.s */
   3760 u_long
   3761 get_pte(va)
   3762 	vm_offset_t va;
   3763 {
   3764 	u_long pte_pa;
   3765 	mmu_short_pte_t *pte;
   3766 
   3767 	/* Get the physical address of the PTE */
   3768 	pte_pa = ptest_addr(va & ~PGOFSET);
   3769 
   3770 	/* Convert to a virtual address... */
   3771 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
   3772 
   3773 	/* Make sure it is in our level-C tables... */
   3774 	if ((pte < kernCbase) ||
   3775 		(pte >= &mmuCbase[NUM_USER_PTES]))
   3776 		return 0;
   3777 
   3778 	/* ... and just return its contents. */
   3779 	return (pte->attr.raw);
   3780 }
   3781 
   3782 
   3783 /* set_pte			INTERNAL
   3784  **
   3785  * Set the page descriptor that describes the kernel mapping
   3786  * of the given virtual address.
   3787  */
   3788 void
   3789 set_pte(va, pte)
   3790 	vm_offset_t va;
   3791 	vm_offset_t pte;
   3792 {
   3793 	u_long idx;
   3794 
   3795 	if (va < KERNBASE)
   3796 		return;
   3797 
   3798 	idx = (unsigned long) _btop(va - KERNBASE);
   3799 	kernCbase[idx].attr.raw = pte;
   3800 }
   3801 
   3802 #ifdef	PMAP_DEBUG
   3803 /************************** DEBUGGING ROUTINES **************************
   3804  * The following routines are meant to be an aid to debugging the pmap  *
   3805  * system.  They are callable from the DDB command line and should be   *
   3806  * prepared to be handed unstable or incomplete states of the system.   *
   3807  ************************************************************************/
   3808 
   3809 /* pv_list
   3810  **
   3811  * List all pages found on the pv list for the given physical page.
   3812  * To avoid endless loops, the listing will stop at the end of the list
   3813  * or after 'n' entries - whichever comes first.
   3814  */
   3815 void
   3816 pv_list(pa, n)
   3817 	vm_offset_t pa;
   3818 	int n;
   3819 {
   3820 	int  idx;
   3821 	vm_offset_t va;
   3822 	pv_t *pv;
   3823 	c_tmgr_t *c_tbl;
   3824 	pmap_t pmap;
   3825 
   3826 	pv = pa2pv(pa);
   3827 	idx = pv->pv_idx;
   3828 
   3829 	for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
   3830 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   3831 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
   3832 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
   3833 	}
   3834 }
   3835 #endif	/* PMAP_DEBUG */
   3836 
   3837 #ifdef NOT_YET
   3838 /* and maybe not ever */
   3839 /************************** LOW-LEVEL ROUTINES **************************
   3840  * These routines will eventualy be re-written into assembly and placed *
   3841  * in locore.s.  They are here now as stubs so that the pmap module can *
   3842  * be linked as a standalone user program for testing.                  *
   3843  ************************************************************************/
   3844 /* flush_atc_crp			INTERNAL
   3845  **
   3846  * Flush all page descriptors derived from the given CPU Root Pointer
   3847  * (CRP), or 'A' table as it is known here, from the 68851's automatic
   3848  * cache.
   3849  */
   3850 void
   3851 flush_atc_crp(a_tbl)
   3852 {
   3853 	mmu_long_rp_t rp;
   3854 
   3855 	/* Create a temporary root table pointer that points to the
   3856 	 * given A table.
   3857 	 */
   3858 	rp.attr.raw = ~MMU_LONG_RP_LU;
   3859 	rp.addr.raw = (unsigned int) a_tbl;
   3860 
   3861 	mmu_pflushr(&rp);
   3862 	/* mmu_pflushr:
   3863 	 * 	movel   sp(4)@,a0
   3864 	 * 	pflushr a0@
   3865 	 *	rts
   3866 	 */
   3867 }
   3868 #endif /* NOT_YET */
   3869