Home | History | Annotate | Line # | Download | only in sun3x
pmap.c revision 1.1
      1 /*	$NetBSD: pmap.c,v 1.1 1997/01/14 20:57:08 gwr Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jeremy Cooper.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * XXX These comments aren't quite accurate.  Need to change.
     41  * The sun3x uses the MC68851 Memory Management Unit, which is built
     42  * into the CPU.  The 68851 maps virtual to physical addresses using
     43  * a multi-level table lookup, which is stored in the very memory that
     44  * it maps.  The number of levels of lookup is configurable from one
     45  * to four.  In this implementation, we use three, named 'A' through 'C'.
     46  *
     47  * The MMU translates virtual addresses into physical addresses by
     48  * traversing these tables in a proccess called a 'table walk'.  The most
     49  * significant 7 bits of the Virtual Address ('VA') being translated are
     50  * used as an index into the level A table, whose base in physical memory
     51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
     52  * address found at that index in the A table is used as the base
     53  * address for the next table, the B table.  The next six bits of the VA are
     54  * used as an index into the B table, which in turn gives the base address
     55  * of the third and final C table.
     56  *
     57  * The next six bits of the VA are used as an index into the C table to
     58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
     59  * to which the remaining 13 bits of the VA are added, producing the
     60  * mapped physical address.
     61  *
     62  * To map the entire memory space in this manner would require 2114296 bytes
     63  * of page tables per process - quite expensive.  Instead we will
     64  * allocate a fixed but considerably smaller space for the page tables at
     65  * the time the VM system is initialized.  When the pmap code is asked by
     66  * the kernel to map a VA to a PA, it allocates tables as needed from this
     67  * pool.  When there are no more tables in the pool, tables are stolen
     68  * from the oldest mapped entries in the tree.  This is only possible
     69  * because all memory mappings are stored in the kernel memory map
     70  * structures, independent of the pmap structures.  A VA which references
     71  * one of these invalidated maps will cause a page fault.  The kernel
     72  * will determine that the page fault was caused by a task using a valid
     73  * VA, but for some reason (which does not concern it), that address was
     74  * not mapped.  It will ask the pmap code to re-map the entry and then
     75  * it will resume executing the faulting task.
     76  *
     77  * In this manner the most efficient use of the page table space is
     78  * achieved.  Tasks which do not execute often will have their tables
     79  * stolen and reused by tasks which execute more frequently.  The best
     80  * size for the page table pool will probably be determined by
     81  * experimentation.
     82  *
     83  * You read all of the comments so far.  Good for you.
     84  * Now go play!
     85  */
     86 
     87 /*** A Note About the 68851 Address Translation Cache
     88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
     89  * or 'ATC'.  This cache stores the most recently used page descriptors
     90  * accessed by the MMU when it does translations.  Using a marker called a
     91  * 'task alias' the MMU can store the descriptors from 8 different table
     92  * spaces concurrently.  The task alias is associated with the base
     93  * address of the level A table of that address space.  When an address
     94  * space is currently active (the CRP currently points to its A table)
     95  * the only cached descriptors that will be obeyed are ones which have a
     96  * matching task alias of the current space associated with them.
     97  *
     98  * Since the cache is always consulted before any table lookups are done,
     99  * it is important that it accurately reflect the state of the MMU tables.
    100  * Whenever a change has been made to a table that has been loaded into
    101  * the MMU, the code must be sure to flush any cached entries that are
    102  * affected by the change.  These instances are documented in the code at
    103  * various points.
    104  */
    105 /*** A Note About the Note About the 68851 Address Translation Cache
    106  * 4 months into this code I discovered that the sun3x does not have
    107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
    108  * the 68030 CPU.
    109  * All though it behaves very similarly to the 68851, it only has 1 task
    110  * alias and a 22 entry cache.  So sadly (or happily), the previous note
    111  * does not apply to the sun3x pmap.
    112  */
    113 
    114 #include <sys/param.h>
    115 #include <sys/systm.h>
    116 #include <sys/proc.h>
    117 #include <sys/malloc.h>
    118 #include <sys/user.h>
    119 #include <sys/queue.h>
    120 
    121 #include <vm/vm.h>
    122 #include <vm/vm_kern.h>
    123 #include <vm/vm_page.h>
    124 
    125 #include <machine/cpu.h>
    126 #include <machine/pmap.h>
    127 #include <machine/pte.h>
    128 #include <machine/mon.h>
    129 
    130 #include "machdep.h"
    131 #include "pmap_pvt.h"
    132 
    133 /* XXX - What headers declare these? */
    134 extern struct pcb *curpcb;
    135 extern int physmem;
    136 
    137 /* Defined in locore.s */
    138 extern char kernel_text[];
    139 
    140 /* Defined by the linker */
    141 extern char etext[], edata[], end[];
    142 extern char *esym;	/* DDB */
    143 
    144 /*
    145  * I think it might be cleaner to have one of these in each of
    146  * the a_tmgr_t structures, but it's late at night... -gwr
    147  *
    148  */
    149 struct rootptr {
    150 	u_long limit; /* and type */
    151 	u_long paddr;
    152 };
    153 struct rootptr proc0crp;
    154 
    155 /* This is set by locore.s with the monitor's root ptr. */
    156 extern struct rootptr mon_crp;
    157 
    158 /*** Management Structure - Memory Layout
    159  * For every MMU table in the sun3x pmap system there must be a way to
    160  * manage it; we must know which process is using it, what other tables
    161  * depend on it, and whether or not it contains any locked pages.  This
    162  * is solved by the creation of 'table management'  or 'tmgr'
    163  * structures.  One for each MMU table in the system.
    164  *
    165  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
    166  *
    167  *      towards lower memory
    168  * kernAbase -> +-------------------------------------------------------+
    169  *              | Kernel     MMU A level table                          |
    170  * kernBbase -> +-------------------------------------------------------+
    171  *              | Kernel     MMU B level tables                         |
    172  * kernCbase -> +-------------------------------------------------------+
    173  *              |                                                       |
    174  *              | Kernel     MMU C level tables                         |
    175  *              |                                                       |
    176  * mmuAbase  -> +-------------------------------------------------------+
    177  *              |                                                       |
    178  *              | User       MMU A level tables                         |
    179  *              |                                                       |
    180  * mmuBbase  -> +-------------------------------------------------------+
    181  *              | User       MMU B level tables                         |
    182  * mmuCbase  -> +-------------------------------------------------------+
    183  *              | User       MMU C level tables                         |
    184  * tmgrAbase -> +-------------------------------------------------------+
    185  *              |  TMGR A level table structures                        |
    186  * tmgrBbase -> +-------------------------------------------------------+
    187  *              |  TMGR B level table structures                        |
    188  * tmgrCbase -> +-------------------------------------------------------+
    189  *              |  TMGR C level table structures                        |
    190  * pvbase    -> +-------------------------------------------------------+
    191  *              |  Physical to Virtual mapping table (list heads)       |
    192  * pvebase   -> +-------------------------------------------------------+
    193  *              |  Physical to Virtual mapping table (list elements)    |
    194  *              |                                                       |
    195  *              +-------------------------------------------------------+
    196  *      towards higher memory
    197  *
    198  * For every A table in the MMU A area, there will be a corresponding
    199  * a_tmgr structure in the TMGR A area.  The same will be true for
    200  * the B and C tables.  This arrangement will make it easy to find the
    201  * controling tmgr structure for any table in the system by use of
    202  * (relatively) simple macros.
    203  */
    204 /* Global variables for storing the base addresses for the areas
    205  * labeled above.
    206  */
    207 static mmu_long_dte_t	*kernAbase;
    208 static mmu_short_dte_t	*kernBbase;
    209 static mmu_short_pte_t	*kernCbase;
    210 static mmu_long_dte_t	*mmuAbase;
    211 static mmu_short_dte_t	*mmuBbase;
    212 static mmu_short_pte_t	*mmuCbase;
    213 static a_tmgr_t		*Atmgrbase;
    214 static b_tmgr_t		*Btmgrbase;
    215 static c_tmgr_t		*Ctmgrbase;
    216 static pv_t		*pvbase;
    217 static pv_elem_t	*pvebase;
    218 
    219 /* Just all around global variables.
    220  */
    221 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
    222 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
    223 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
    224        struct pmap	kernel_pmap;
    225 static a_tmgr_t		*proc0Atmgr;
    226        a_tmgr_t		*curatbl;
    227 static boolean_t	pv_initialized = 0;
    228 static vm_offset_t	last_mapped = 0;
    229        int		tmp_vpages_inuse = 0;
    230 
    231 /*
    232  * XXX:  For now, retain the traditional variables that were
    233  * used in the old pmap/vm interface (without NONCONTIG).
    234  */
    235 /* Kernel virtual address space available: */
    236 vm_offset_t	virtual_avail, virtual_end;
    237 /* Physical address space available: */
    238 vm_offset_t	avail_start, avail_end;
    239 
    240 vm_offset_t tmp_vpages[2];
    241 
    242 
    243 /* The 3/80 is the only member of the sun3x family that has non-contiguous
    244  * physical memory.  Memory is divided into 4 banks which are physically
    245  * locatable on the system board.  Although the size of these banks varies
    246  * with the size of memory they contain, their base addresses are
    247  * permenently fixed.  The following structure, which describes these
    248  * banks, is initialized by pmap_bootstrap() after it reads from a similar
    249  * structure provided by the ROM Monitor.
    250  *
    251  * For the other machines in the sun3x architecture which do have contiguous
    252  * RAM, this list will have only one entry, which will describe the entire
    253  * range of available memory.
    254  */
    255 struct pmap_physmem_struct avail_mem[SUN3X_80_MEM_BANKS];
    256 u_int total_phys_mem;
    257 
    258 /* These macros map MMU tables to their corresponding manager structures.
    259  * They are needed quite often because many of the pointers in the pmap
    260  * system reference MMU tables and not the structures that control them.
    261  * There needs to be a way to find one when given the other and these
    262  * macros do so by taking advantage of the memory layout described above.
    263  * Here's a quick step through the first macro, mmuA2tmgr():
    264  *
    265  * 1) find the offset of the given MMU A table from the base of its table
    266  *    pool (table - mmuAbase).
    267  * 2) convert this offset into a table index by dividing it by the
    268  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
    269  * 3) use this index to select the corresponding 'A' table manager
    270  *    structure from the 'A' table manager pool (Atmgrbase[index]).
    271  */
    272 #define mmuA2tmgr(table) \
    273 	(&Atmgrbase[\
    274 		((mmu_long_dte_t *)(table) - mmuAbase)\
    275 		/ MMU_A_TBL_SIZE\
    276 	])
    277 #define mmuB2tmgr(table) \
    278 	(&Btmgrbase[\
    279 		((mmu_short_dte_t *)(table) - mmuBbase)\
    280 		/ MMU_B_TBL_SIZE\
    281         ])
    282 #define mmuC2tmgr(table) \
    283 	(&Ctmgrbase[\
    284 		((mmu_short_pte_t *)(table) - mmuCbase)\
    285 		/ MMU_C_TBL_SIZE\
    286 	])
    287 #define pte2pve(pte) \
    288 	(&pvebase[\
    289 		((mmu_short_pte_t *)(pte) - mmuCbase)\
    290 	])
    291 /* I don't think this is actually used.
    292  * #define pte2pv(pte) \
    293  *	(pa2pv(\
    294  *		(pte)->attr.raw & MMU_SHORT_PTE_BASEADDR\
    295  *	))
    296  */
    297 /* This is now a function call
    298  * #define pa2pv(pa) \
    299  *	(&pvbase[(unsigned long)\
    300  *		sun3x_btop(pa)\
    301  *	])
    302  */
    303 #define pve2pte(pve) \
    304 	(&mmuCbase[(unsigned long)\
    305 		(((pv_elem_t *)(pve)) - pvebase)\
    306 		/ sizeof(mmu_short_pte_t)\
    307 	])
    308 
    309 /*************************** TEMPORARY STATMENTS *************************
    310  * These statements will disappear once this code is integrated into the *
    311  * system.  They are here only to make the code `stand alone'.           *
    312  *************************************************************************/
    313 #define mmu_ptov(pa) ((unsigned long) KERNBASE + (unsigned long) (pa))
    314 #define mmu_vtop(va) ((unsigned long) (va) - (unsigned long) KERNBASE)
    315 #define NULL 0
    316 
    317 #define NUM_A_TABLES	20
    318 #define NUM_B_TABLES	60
    319 #define NUM_C_TABLES	60
    320 
    321 /*************************** MISCELANEOUS MACROS *************************/
    322 #define PMAP_LOCK()	;	/* Nothing, for now */
    323 #define PMAP_UNLOCK()	;	/* same. */
    324 /*************************** FUNCTION DEFINITIONS ************************
    325  * These appear here merely for the compiler to enforce type checking on *
    326  * all function calls.                                                   *
    327  *************************************************************************
    328  */
    329 
    330 /** External functions
    331  ** - functions used within this module but written elsewhere.
    332  **   both of these functions are in locore.s
    333  */
    334 void   mmu_seturp __P((vm_offset_t));
    335 void   mmu_flush __P((int, vm_offset_t));
    336 void   mmu_flusha __P((void));
    337 
    338 /** Internal functions
    339  ** - all functions used only within this module are defined in
    340  **   pmap_pvt.h
    341  **/
    342 
    343 /** Interface functions
    344  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
    345  **   defined.
    346  **/
    347 #ifdef INCLUDED_IN_PMAP_H
    348 void   pmap_bootstrap __P((void));
    349 void  *pmap_bootstrap_alloc __P((int));
    350 void   pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    351 pmap_t pmap_create __P((vm_size_t));
    352 void   pmap_destroy __P((pmap_t));
    353 void   pmap_reference __P((pmap_t));
    354 boolean_t   pmap_is_referenced __P((vm_offset_t));
    355 boolean_t   pmap_is_modified __P((vm_offset_t));
    356 void   pmap_clear_modify __P((vm_offset_t));
    357 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
    358 void   pmap_activate __P((pmap_t, struct pcb *));
    359 int    pmap_page_index __P((vm_offset_t));
    360 u_int  pmap_free_pages __P((void));
    361 #endif /* INCLUDED_IN_PMAP_H */
    362 
    363 /********************************** CODE ********************************
    364  * Functions that are called from other parts of the kernel are labeled *
    365  * as 'INTERFACE' functions.  Functions that are only called from       *
    366  * within the pmap module are labeled as 'INTERNAL' functions.          *
    367  * Functions that are internal, but are not (currently) used at all are *
    368  * labeled 'INTERNAL_X'.                                                *
    369  ************************************************************************/
    370 
    371 /* pmap_bootstrap			INTERNAL
    372  **
    373  * Initializes the pmap system.  Called at boot time from sun3x_vm_init()
    374  * in _startup.c.
    375  *
    376  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
    377  *           system implement pmap_steal_memory() is redundant.
    378  *           Don't release this code without removing one or the other!
    379  */
    380 void
    381 pmap_bootstrap(nextva)
    382 	vm_offset_t nextva;
    383 {
    384 	struct physmemory *membank;
    385 	struct pmap_physmem_struct *pmap_membank;
    386 	vm_offset_t va, pa, eva;
    387 	int b, c, i, j;	/* running table counts */
    388 	int size;
    389 
    390 	/*
    391 	 * This function is called by __bootstrap after it has
    392 	 * determined the type of machine and made the appropriate
    393 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
    394 	 * by that.)  It allocates and sets up enough of the pmap system
    395 	 * to manage the kernel's address space.
    396 	 */
    397 
    398 	/* XXX - Attention: moved stuff. */
    399 
    400 	/*
    401 	 * Determine the range of kernel virtual space available.
    402 	 */
    403 	virtual_avail = sun3x_round_page(nextva);
    404 	virtual_end = VM_MAX_KERNEL_ADDRESS;
    405 
    406 	/*
    407 	 * Determine the range of physical memory available and
    408 	 * relay this information to the pmap via the avail_mem[]
    409 	 * array of physical memory segment structures.
    410 	 *
    411 	 * Avail_end is set to the first byte of physical memory
    412 	 * outside the last bank.
    413 	 */
    414 	avail_start = virtual_avail - KERNBASE;
    415 
    416 	/*
    417 	 * This is a somewhat unwrapped loop to deal with
    418 	 * copying the PROM's 'phsymem' banks into the pmap's
    419 	 * banks.  The following is always assumed:
    420 	 * 1. There is always at least one bank of memory.
    421 	 * 2. There is always a last bank of memory, and its
    422 	 *    pmem_next member must be set to NULL.
    423 	 * XXX - Use: do { ... } while (membank->next) instead?
    424 	 * XXX - Why copy this stuff at all? -gwr
    425 	 */
    426 	membank = romVectorPtr->v_physmemory;
    427 	pmap_membank = avail_mem;
    428 	total_phys_mem = 0;
    429 
    430 	while (membank->next) {
    431 		pmap_membank->pmem_start = membank->address;
    432 		pmap_membank->pmem_end = membank->address + membank->size;
    433 		total_phys_mem += membank->size;
    434 		/* This silly syntax arises because pmap_membank
    435 		 * is really a pre-allocated array, but it is put into
    436 		 * use as a linked list.
    437 		 */
    438 		pmap_membank->pmem_next = pmap_membank + 1;
    439 		pmap_membank = pmap_membank->pmem_next;
    440 		membank = membank->next;
    441 	}
    442 
    443 	/*
    444 	 * XXX The last bank of memory should be reduced to exclude the
    445 	 * physical pages needed by the PROM monitor from being used
    446 	 * in the VM system.  XXX - See below - Fix!
    447 	 */
    448 	pmap_membank->pmem_start = membank->address;
    449 	pmap_membank->pmem_end = membank->address + membank->size;
    450 	pmap_membank->pmem_next = NULL;
    451 
    452 #if 0	/* XXX - Need to integrate this! */
    453 	/*
    454 	 * The last few pages of physical memory are "owned" by
    455 	 * the PROM.  The total amount of memory we are allowed
    456 	 * to use is given by the romvec pointer. -gwr
    457 	 *
    458 	 * We should dedicate different variables for 'useable'
    459 	 * and 'physically available'.  Most users are used to the
    460 	 * kernel reporting the amount of memory 'physically available'
    461 	 * as opposed to 'useable by the kernel' at boot time. -j
    462 	 */
    463 	total_phys_mem = *romVectorPtr->memoryAvail;
    464 #endif	/* XXX */
    465 
    466 	total_phys_mem += membank->size;	/* XXX see above */
    467 	physmem = btoc(total_phys_mem);
    468 	avail_end = pmap_membank->pmem_end;
    469 	avail_end = sun3x_trunc_page(avail_end);
    470 
    471 	/* XXX - End moved stuff. */
    472 
    473 	/*
    474 	 * The first step is to allocate MMU tables.
    475 	 * Note: All must be aligned on 256 byte boundaries.
    476 	 *
    477 	 * Start with the top level, or 'A' table.
    478 	 */
    479 	kernAbase = (mmu_long_dte_t *) virtual_avail;
    480 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
    481 	bzero(kernAbase, size);
    482 	avail_start += size;
    483 	virtual_avail += size;
    484 
    485 	/* Allocate enough B tables to map from KERNBASE to
    486 	 * the end of VM.
    487 	 */
    488 	kernBbase = (mmu_short_dte_t *) virtual_avail;
    489 	size = sizeof(mmu_short_dte_t) *
    490 		(MMU_A_TBL_SIZE - MMU_TIA(KERNBASE)) * MMU_B_TBL_SIZE;
    491 	bzero(kernBbase, size);
    492 	avail_start += size;
    493 	virtual_avail += size;
    494 
    495 	/* Allocate enough C tables. */
    496 	kernCbase = (mmu_short_pte_t *) virtual_avail;
    497 	size = sizeof (mmu_short_pte_t) *
    498 		(MMU_A_TBL_SIZE - MMU_TIA(KERNBASE))
    499 		* MMU_B_TBL_SIZE * MMU_C_TBL_SIZE;
    500 	bzero(kernCbase, size);
    501 	avail_start += size;
    502 	virtual_avail += size;
    503 
    504 	/* For simplicity, the kernel's mappings will be editable as a
    505 	 * flat array of page table entries at kernCbase.  The
    506 	 * higher level 'A' and 'B' tables must be initialized to point
    507 	 * to this lower one.
    508 	 */
    509 	b = c = 0;
    510 
    511 	/* Invalidate all mappings below KERNBASE in the A table.
    512 	 * This area has already been zeroed out, but it is good
    513 	 * practice to explicitly show that we are interpreting
    514 	 * it as a list of A table descriptors.
    515 	 */
    516 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
    517 		kernAbase[i].addr.raw = 0;
    518 	}
    519 
    520 	/* Set up the kernel A and B tables so that they will reference the
    521 	 * correct spots in the contiguous table of PTEs allocated for the
    522 	 * kernel's virtual memory space.
    523 	 */
    524 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
    525 		kernAbase[i].attr.raw =
    526 			MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
    527 		kernAbase[i].addr.raw = (unsigned long) mmu_vtop(&kernBbase[b]);
    528 
    529 		for (j=0; j < MMU_B_TBL_SIZE; j++) {
    530 			kernBbase[b + j].attr.raw =
    531 				(unsigned long) mmu_vtop(&kernCbase[c])
    532 				| MMU_DT_SHORT;
    533 			c += MMU_C_TBL_SIZE;
    534 		}
    535 		b += MMU_B_TBL_SIZE;
    536 	}
    537 
    538 	/*
    539 	 * Now pmap_enter_kernel() may be used safely and will be
    540 	 * the main interface used by _startup.c and other various
    541 	 * modules to modify kernel mappings.
    542 	 *
    543 	 * Note: Our tables will NOT have the default linear mappings!
    544 	 */
    545 	va = (vm_offset_t) KERNBASE;
    546 	pa = mmu_vtop(KERNBASE);
    547 
    548 	/*
    549 	 * The first page is the msgbuf page (data, non-cached).
    550 	 * Just fixup the mapping here; setup is in cpu_startup().
    551 	 * XXX - Make it non-cached?
    552 	 */
    553 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
    554 	va += NBPG; pa += NBPG;
    555 
    556 	/* The tmporary stack page. */
    557 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
    558 	va += NBPG; pa += NBPG;
    559 
    560 	/*
    561 	 * Map all of the kernel's text segment as read-only and cacheable.
    562 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
    563 	 * of kernel text and the first bytes of kernel data will often be
    564 	 * sharing the same page.  Therefore, the last page of kernel text
    565 	 * has to be mapped as read/write, to accomodate the data.
    566 	 */
    567 	eva = sun3x_trunc_page((vm_offset_t)etext);
    568 	for (; va < eva; pa += NBPG, va += NBPG)
    569 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
    570 
    571 	/* Map all of the kernel's data (including BSS) segment as read/write
    572 	 * and cacheable.
    573 	 */
    574 	for (; va < (vm_offset_t) esym; pa += NBPG, va += NBPG)
    575 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
    576 
    577 	/* Map all of the data we have allocated since the start of this
    578 	 * function.
    579 	 */
    580 	for (; va < virtual_avail; va += NBPG, pa += NBPG)
    581 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
    582 
    583 	/* Set 'last_mapped' to the address of the last physical page
    584 	 * that was mapped in the kernel.  This variable is used by
    585 	 * pmap_bootstrap_alloc() to determine when it needs to map
    586 	 * a new page.
    587 	 *
    588 	 * XXX - This can be a lot simpler.  We already know that the
    589 	 * first 4MB of memory (at least) is mapped PA=VA-KERNBASE,
    590 	 * so we should never need to creat any new mappings. -gwr
    591 	 *
    592 	 * True, but it only remains so as long as we are using the
    593 	 * ROM's CRP.  Unless, of course, we copy these mappings into
    594 	 * our table. -j
    595 	 */
    596 	last_mapped = sun3x_trunc_page(pa - (NBPG - 1));
    597 
    598 	/* It is now safe to use pmap_bootstrap_alloc(). */
    599 
    600 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
    601 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
    602 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
    603 	pmap_alloc_etc();	/* Allocate miscelaneous things.    */
    604 
    605 	/* Notify the VM system of our page size. */
    606 	PAGE_SIZE = NBPG;
    607 	vm_set_page_size();
    608 
    609 	/* XXX - Attention: moved stuff. */
    610 
    611 	/*
    612 	 * XXX - Make sure avail_start is within the low 4M range
    613 	 * that the Sun PROM guarantees will be mapped in?
    614 	 * Make sure it is below avail_end as well?
    615 	 */
    616 
    617 	/*
    618 	 * Now steal some virtual addresses, but
    619 	 * not the physical pages behind them.
    620 	 */
    621 
    622 	/*
    623 	 * vpages array:  just some virtual addresses for
    624 	 * temporary mappings in the pmap module (two pages)
    625 	 */
    626 	pmap_bootstrap_aalign(NBPG);
    627 	tmp_vpages[0] = virtual_avail;
    628 	virtual_avail += NBPG;
    629 	tmp_vpages[1] = virtual_avail;
    630 	virtual_avail += NBPG;
    631 
    632 	/* XXX - End moved stuff. */
    633 
    634 	/* It should be noted that none of these mappings take
    635 	 * effect until the MMU's root pointer is
    636 	 * is changed from the PROM map, to our own.
    637 	 */
    638 	pmap_bootstrap_copyprom();
    639 	pmap_takeover_mmu();
    640 }
    641 
    642 
    643 /* pmap_alloc_usermmu			INTERNAL
    644  **
    645  * Called from pmap_bootstrap() to allocate MMU tables that will
    646  * eventually be used for user mappings.
    647  */
    648 void
    649 pmap_alloc_usermmu()
    650 {
    651 	/* Allocate user MMU tables.
    652 	 * These must be aligned on 256 byte boundaries.
    653 	 */
    654 	pmap_bootstrap_aalign(256);
    655 	mmuAbase = (mmu_long_dte_t *)
    656 		pmap_bootstrap_alloc(sizeof(mmu_long_dte_t)
    657 		* MMU_A_TBL_SIZE
    658 		* NUM_A_TABLES);
    659 	mmuBbase = (mmu_short_dte_t *)
    660 		pmap_bootstrap_alloc(sizeof(mmu_short_dte_t)
    661 		* MMU_B_TBL_SIZE
    662 		* NUM_B_TABLES);
    663 	mmuCbase = (mmu_short_pte_t *)
    664 		pmap_bootstrap_alloc(sizeof(mmu_short_pte_t)
    665 		* MMU_C_TBL_SIZE
    666 		* NUM_C_TABLES);
    667 }
    668 
    669 /* pmap_alloc_pv			INTERNAL
    670  **
    671  * Called from pmap_bootstrap() to allocate the physical
    672  * to virtual mapping list.  Each physical page of memory
    673  * in the system has a corresponding element in this list.
    674  */
    675 void
    676 pmap_alloc_pv()
    677 {
    678 	int	i;
    679 	unsigned int	total_mem;
    680 
    681 	/* Allocate a pv_head structure for every page of physical
    682 	 * memory that will be managed by the system.  Since memory on
    683 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
    684 	 * count by subtraction of the lowest available address from the
    685 	 * highest, but rather we have to step through each memory
    686 	 * bank and add the number of pages in each to the total.
    687 	 *
    688 	 * At this time we also initialize the offset of each bank's
    689 	 * starting pv_head within the pv_head list so that the physical
    690 	 * memory state routines (pmap_is_referenced(),
    691 	 * pmap_is_modified(), et al.) can quickly find coresponding
    692 	 * pv_heads in spite of the non-contiguity.
    693 	 */
    694 
    695 	total_mem = 0;
    696 	for (i = 0; i < SUN3X_80_MEM_BANKS; i++) {
    697 		avail_mem[i].pmem_pvbase = sun3x_btop(total_mem);
    698 		total_mem += avail_mem[i].pmem_end -
    699 			avail_mem[i].pmem_start;
    700 		if (avail_mem[i].pmem_next == NULL)
    701 			break;
    702 	}
    703 #ifdef	PMAP_DEBUG
    704 	if (total_mem != total_phys_mem)
    705 		panic("pmap_alloc_pv did not arrive at correct page count");
    706 #endif
    707 
    708 	pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
    709 		sun3x_btop(total_phys_mem));
    710 }
    711 
    712 /* pmap_alloc_usertmgr			INTERNAL
    713  **
    714  * Called from pmap_bootstrap() to allocate the structures which
    715  * facilitate management of user MMU tables.  Each user MMU table
    716  * in the system has one such structure associated with it.
    717  */
    718 void
    719 pmap_alloc_usertmgr()
    720 {
    721 	/* Allocate user MMU table managers */
    722 	/* XXX - It would be a lot simpler to just make these BSS. -gwr */
    723 	Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
    724 		* NUM_A_TABLES);
    725 	Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
    726 		* NUM_B_TABLES);
    727 	Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
    728 		* NUM_C_TABLES);
    729 
    730 	/* Allocate PV list elements for the physical to virtual
    731 	 * mapping system.
    732 	 */
    733 	pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
    734 		sizeof(struct pv_elem_struct)
    735 		* MMU_C_TBL_SIZE
    736 		* NUM_C_TABLES );
    737 }
    738 
    739 /* pmap_alloc_etc			INTERNAL
    740  **
    741  * Called from pmap_bootstrap() to allocate any remaining pieces
    742  * that didn't fit neatly into any of the other pmap_alloc
    743  * functions.
    744  */
    745 void
    746 pmap_alloc_etc()
    747 {
    748 	/* Allocate an A table manager for the kernel_pmap */
    749 	proc0Atmgr = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t));
    750 }
    751 
    752 /* pmap_bootstrap_copyprom()			INTERNAL
    753  **
    754  * Copy the PROM mappings into our own tables.  Note, we
    755  * can use physical addresses until __bootstrap returns.
    756  */
    757 void
    758 pmap_bootstrap_copyprom()
    759 {
    760 #if 0	/* XXX - Just history... */
    761 	/*
    762 	 * XXX - This method makes DVMA difficult, because
    763 	 * the PROM only provides PTEs for 1M of DVMA space.
    764 	 * That's OK for boot programs, but not a VM system.
    765 	 */
    766 	int a_idx;
    767 	mmu_long_dte_t *mon_atbl;
    768 
    769 	/*
    770 	 * Copy the last entry (PROM monitor and DVMA mappings)
    771 	 * so our level-A table will use the PROM level-B table.
    772 	 */
    773 	mon_atbl = (mmu_long_dte_t *) mon_crp.paddr;
    774 	a_idx = MMU_TIA(VM_MAX_KERNEL_ADDRESS);
    775 	kernAbase[a_idx].attr.raw = mon_atbl[a_idx].attr.raw;
    776 	kernAbase[a_idx].addr.raw = mon_atbl[a_idx].addr.raw;
    777 
    778 #endif
    779 #if 0	/* XXX - More history... */
    780 	/*
    781 	 * XXX - This method is OK with our DVMA implementation,
    782 	 * but causes pmap_extract() to be ignorant of the PROM
    783 	 * mappings.  Maybe that's OK.  If not, we should just
    784 	 * copy the PTEs (level-C) from the PROM. -gwr
    785 	 */
    786 	mmu_long_dte_t *mon_atbl;
    787 	mmu_short_dte_t *mon_btbl;
    788 	mmu_short_dte_t *our_btbl;
    789 	int a_idx, b_idx;
    790 
    791 	/*
    792 	 * Copy parts of the level-B table from the PROM for
    793 	 * mappings that the PROM cares about.
    794 	 */
    795 	mon_atbl = (mmu_long_dte_t *) mon_crp.paddr;
    796 	a_idx = MMU_TIA(MON_KDB_START);
    797 	mon_btbl = (mmu_short_dte_t *) mon_atbl[a_idx].addr.raw;
    798 
    799 	/* Temporary use of b_idx to find our level-B table. */
    800 	b_idx = MMU_B_TBL_SIZE * (a_idx - MMU_TIA(KERNBASE));
    801 	our_btbl = &kernBbase[b_idx];
    802 
    803 	/*
    804 	 * Preserve both the kadb and monitor mappings (2MB).
    805 	 * We could have started at MONSTART, but this costs
    806 	 * us nothing, and might be useful someday...
    807 	 */
    808 	for (b_idx = MMU_TIB(MON_KDB_START);
    809 		 b_idx < MMU_TIB(MONEND); b_idx++)
    810 		our_btbl[b_idx].attr.raw = mon_btbl[b_idx].attr.raw;
    811 
    812 	/*
    813 	 * Preserve the monitor's DVMA map for now (1MB).
    814 	 * Later, we might want to kill this mapping so we
    815 	 * can have all of the DVMA space (16MB).
    816 	 */
    817 	for (b_idx = MMU_TIB(MON_DVMA_BASE);
    818 		 b_idx < MMU_B_TBL_SIZE; b_idx++)
    819 		our_btbl[b_idx].attr.raw = mon_btbl[b_idx].attr.raw;
    820 #endif
    821 	MachMonRomVector *romp;
    822 	int *mon_ctbl;
    823 	mmu_short_pte_t *kpte;
    824 	int i, len;
    825 
    826 	romp = romVectorPtr;
    827 
    828 	/*
    829 	 * Copy the mappings in MON_KDB_START...MONEND
    830 	 * Note: mon_ctbl[0] maps MON_KDB_START
    831 	 */
    832 	mon_ctbl = *romp->monptaddr;
    833 	i = sun3x_btop(MON_KDB_START - KERNBASE);
    834 	kpte = &kernCbase[i];
    835 	len = sun3x_btop(MONEND - MON_KDB_START);
    836 
    837 	for (i = 0; i < len; i++) {
    838 		kpte[i].attr.raw = mon_ctbl[i];
    839 	}
    840 
    841 	/*
    842 	 * Copy the mappings at MON_DVMA_BASE (to the end).
    843 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
    844 	 * XXX - This does not appear to be necessary, but
    845 	 * I'm not sure yet if it is or not. -gwr
    846 	 */
    847 	mon_ctbl = *romp->shadowpteaddr;
    848 	i = sun3x_btop(MON_DVMA_BASE - KERNBASE);
    849 	kpte = &kernCbase[i];
    850 	len = sun3x_btop(MON_DVMA_SIZE);
    851 
    852 	for (i = 0; i < len; i++) {
    853 		kpte[i].attr.raw = mon_ctbl[i];
    854 	}
    855 }
    856 
    857 /* pmap_takeover_mmu			INTERNAL
    858  **
    859  * Called from pmap_bootstrap() after it has copied enough of the
    860  * PROM mappings into the kernel map so that we can use our own
    861  * MMU table.
    862  */
    863 void
    864 pmap_takeover_mmu()
    865 {
    866 	vm_offset_t tbladdr;
    867 
    868 	tbladdr = mmu_vtop((vm_offset_t) kernAbase);
    869 	mon_printf("pmap_takeover_mmu: tbladdr=0x%x\n", tbladdr);
    870 
    871 	/* Initialize the CPU Root Pointer (CRP) for proc0. */
    872 	/* XXX: I'd prefer per-process CRP storage. -gwr */
    873 	proc0crp.limit = 0x80000003;	/* limit and type */
    874 	proc0crp.paddr = tbladdr;	/* phys. addr. */
    875 	curpcb->pcb_mmuctx = (int) &proc0crp;
    876 
    877 	mon_printf("pmap_takeover_mmu: loadcrp...\n");
    878 	loadcrp((vm_offset_t) &proc0crp);
    879 	mon_printf("pmap_takeover_mmu: survived!\n");
    880 }
    881 
    882 /* pmap_init			INTERFACE
    883  **
    884  * Called at the end of vm_init() to set up the pmap system to go
    885  * into full time operation.
    886  */
    887 void
    888 pmap_init()
    889 {
    890 	/** Initialize the manager pools **/
    891 	TAILQ_INIT(&a_pool);
    892 	TAILQ_INIT(&b_pool);
    893 	TAILQ_INIT(&c_pool);
    894 
    895 	/** Initialize the PV system **/
    896 	pmap_init_pv();
    897 
    898 	/** Zero out the kernel's pmap **/
    899 	bzero(&kernel_pmap, sizeof(struct pmap));
    900 
    901 	/* Initialize the A table manager that is used in pmaps which
    902 	 * do not have an A table of their own.  This table uses the
    903 	 * kernel, or 'proc0' level A MMU table, which contains no valid
    904 	 * user space mappings.  Any user process that attempts to execute
    905 	 * using this A table will fault.  At which point the VM system will
    906 	 * call pmap_enter, which will then allocate it an A table of its own
    907 	 * from the pool.
    908 	 */
    909 	proc0Atmgr->at_dtbl = kernAbase;
    910 	proc0Atmgr->at_parent = &kernel_pmap;
    911 	kernel_pmap.pm_a_tbl = proc0Atmgr;
    912 
    913 	/**************************************************************
    914 	 * Initialize all tmgr structures and MMU tables they manage. *
    915 	 **************************************************************/
    916 	/** Initialize A tables **/
    917 	pmap_init_a_tables();
    918 	/** Initialize B tables **/
    919 	pmap_init_b_tables();
    920 	/** Initialize C tables **/
    921 	pmap_init_c_tables();
    922 }
    923 
    924 /* pmap_init_a_tables()			INTERNAL
    925  **
    926  * Initializes all A managers, their MMU A tables, and inserts
    927  * them into the A manager pool for use by the system.
    928  */
    929 void
    930 pmap_init_a_tables()
    931 {
    932 	int i;
    933 	a_tmgr_t *a_tbl;
    934 
    935 	for (i=0; i < NUM_A_TABLES; i++) {
    936 		/* Select the next available A manager from the pool */
    937 		a_tbl = &Atmgrbase[i];
    938 
    939 		/* Clear its parent entry.  Set its wired and valid
    940 		 * entry count to zero.
    941 		 */
    942 		a_tbl->at_parent = NULL;
    943 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
    944 
    945 		/* Assign it the next available MMU A table from the pool */
    946 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
    947 
    948 		/* Initialize the MMU A table with the table in the `proc0',
    949 		 * or kernel, mapping.  This ensures that every process has
    950 		 * the kernel mapped in the top part of its address space.
    951 		 */
    952 		bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
    953 			sizeof(mmu_long_dte_t));
    954 
    955 		/* Finally, insert the manager into the A pool,
    956 		 * making it ready to be used by the system.
    957 		 */
    958 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
    959     }
    960 }
    961 
    962 /* pmap_init_b_tables()			INTERNAL
    963  **
    964  * Initializes all B table managers, their MMU B tables, and
    965  * inserts them into the B manager pool for use by the system.
    966  */
    967 void
    968 pmap_init_b_tables()
    969 {
    970 	int i,j;
    971 	b_tmgr_t *b_tbl;
    972 
    973 	for (i=0; i < NUM_B_TABLES; i++) {
    974 		/* Select the next available B manager from the pool */
    975 		b_tbl = &Btmgrbase[i];
    976 
    977 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
    978 		b_tbl->bt_pidx = 0;		/* parent index,      */
    979 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
    980 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
    981 
    982 		/* Assign it the next available MMU B table from the pool */
    983 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
    984 
    985 		/* Invalidate every descriptor in the table */
    986 		for (j=0; j < MMU_B_TBL_SIZE; j++)
    987 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
    988 
    989 		/* Insert the manager into the B pool */
    990 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
    991 	}
    992 }
    993 
    994 /* pmap_init_c_tables()			INTERNAL
    995  **
    996  * Initializes all C table managers, their MMU C tables, and
    997  * inserts them into the C manager pool for use by the system.
    998  */
    999 void
   1000 pmap_init_c_tables()
   1001 {
   1002 	int i,j;
   1003 	c_tmgr_t *c_tbl;
   1004 
   1005 	for (i=0; i < NUM_C_TABLES; i++) {
   1006 		/* Select the next available C manager from the pool */
   1007 		c_tbl = &Ctmgrbase[i];
   1008 
   1009 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
   1010 		c_tbl->ct_pidx = 0;		/* parent index,      */
   1011 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
   1012 		c_tbl->ct_ecnt = 0;		/* valid entry count. */
   1013 
   1014 		/* Assign it the next available MMU C table from the pool */
   1015 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
   1016 
   1017 		for (j=0; j < MMU_C_TBL_SIZE; j++)
   1018 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
   1019 
   1020 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1021 	}
   1022 }
   1023 
   1024 /* pmap_init_pv()			INTERNAL
   1025  **
   1026  * Initializes the Physical to Virtual mapping system.
   1027  */
   1028 void
   1029 pmap_init_pv()
   1030 {
   1031 	bzero(pvbase, sizeof(pv_t) * sun3x_btop(total_phys_mem));
   1032 	pv_initialized = TRUE;
   1033 }
   1034 
   1035 /* get_a_table			INTERNAL
   1036  **
   1037  * Retrieve and return a level A table for use in a user map.
   1038  */
   1039 a_tmgr_t *
   1040 get_a_table()
   1041 {
   1042 	a_tmgr_t *tbl;
   1043 
   1044 	/* Get the top A table in the pool */
   1045 	tbl = a_pool.tqh_first;
   1046 	if (tbl == NULL)
   1047 		panic("get_a_table: out of A tables.");
   1048 	TAILQ_REMOVE(&a_pool, tbl, at_link);
   1049 	/* If the table has a non-null parent pointer then it is in use.
   1050 	 * Forcibly abduct it from its parent and clear its entries.
   1051 	 * No re-entrancy worries here.  This table would not be in the
   1052 	 * table pool unless it was available for use.
   1053 	 */
   1054 	if (tbl->at_parent) {
   1055 		tbl->at_parent->pm_stats.resident_count -= free_a_table(tbl);
   1056 		tbl->at_parent->pm_a_tbl = proc0Atmgr;
   1057 	}
   1058 #ifdef  NON_REENTRANT
   1059 	/* If the table isn't to be wired down, re-insert it at the
   1060 	 * end of the pool.
   1061 	 */
   1062 	if (!wired)
   1063 		/* Quandary - XXX
   1064 		 * Would it be better to let the calling function insert this
   1065 		 * table into the queue?  By inserting it here, we are allowing
   1066 		 * it to be stolen immediately.  The calling function is
   1067 		 * probably not expecting to use a table that it is not
   1068 		 * assured full control of.
   1069 		 * Answer - In the intrest of re-entrancy, it is best to let
   1070 		 * the calling function determine when a table is available
   1071 		 * for use.  Therefore this code block is not used.
   1072 		 */
   1073 		TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
   1074 #endif	/* NON_REENTRANT */
   1075 	return tbl;
   1076 }
   1077 
   1078 /* get_b_table			INTERNAL
   1079  **
   1080  * Return a level B table for use.
   1081  */
   1082 b_tmgr_t *
   1083 get_b_table()
   1084 {
   1085 	b_tmgr_t *tbl;
   1086 
   1087 	/* See 'get_a_table' for comments. */
   1088 	tbl = b_pool.tqh_first;
   1089 	if (tbl == NULL)
   1090 		panic("get_b_table: out of B tables.");
   1091 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
   1092 	if (tbl->bt_parent) {
   1093 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
   1094 		tbl->bt_parent->at_ecnt--;
   1095 		tbl->bt_parent->at_parent->pm_stats.resident_count -=
   1096 		    free_b_table(tbl);
   1097 	}
   1098 #ifdef	NON_REENTRANT
   1099 	if (!wired)
   1100 		/* XXX see quandary in get_b_table */
   1101 		/* XXX start lock */
   1102 		TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
   1103 		/* XXX end lock */
   1104 #endif	/* NON_REENTRANT */
   1105 	return tbl;
   1106 }
   1107 
   1108 /* get_c_table			INTERNAL
   1109  **
   1110  * Return a level C table for use.
   1111  */
   1112 c_tmgr_t *
   1113 get_c_table()
   1114 {
   1115 	c_tmgr_t *tbl;
   1116 
   1117 	/* See 'get_a_table' for comments */
   1118 	tbl = c_pool.tqh_first;
   1119 	if (tbl == NULL)
   1120 		panic("get_c_table: out of C tables.");
   1121 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
   1122 	if (tbl->ct_parent) {
   1123 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1124 		tbl->ct_parent->bt_ecnt--;
   1125 		tbl->ct_parent->bt_parent->at_parent->pm_stats.resident_count
   1126 		    -= free_c_table(tbl);
   1127 	}
   1128 #ifdef	NON_REENTRANT
   1129 	if (!wired)
   1130 		/* XXX See quandary in get_a_table */
   1131 		/* XXX start lock */
   1132 		TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
   1133 		/* XXX end lock */
   1134 #endif	/* NON_REENTRANT */
   1135 
   1136 	return tbl;
   1137 }
   1138 
   1139 /* The following 'free_table' and 'steal_table' functions are called to
   1140  * detach tables from their current obligations (parents and children) and
   1141  * prepare them for reuse in another mapping.
   1142  *
   1143  * Free_table is used when the calling function will handle the fate
   1144  * of the parent table, such as returning it to the free pool when it has
   1145  * no valid entries.  Functions that do not want to handle this should
   1146  * call steal_table, in which the parent table's descriptors and entry
   1147  * count are automatically modified when this table is removed.
   1148  */
   1149 
   1150 /* free_a_table			INTERNAL
   1151  **
   1152  * Unmaps the given A table and all child tables from their current
   1153  * mappings.  Returns the number of pages that were invalidated.
   1154  *
   1155  * Cache note: The MC68851 will automatically flush all
   1156  * descriptors derived from a given A table from its
   1157  * Automatic Translation Cache (ATC) if we issue a
   1158  * 'PFLUSHR' instruction with the base address of the
   1159  * table.  This function should do, and does so.
   1160  * Note note: We are using an MC68030 - there is no
   1161  * PFLUSHR.
   1162  */
   1163 int
   1164 free_a_table(a_tbl)
   1165 	a_tmgr_t *a_tbl;
   1166 {
   1167 	int i, removed_cnt;
   1168 	mmu_long_dte_t	*dte;
   1169 	mmu_short_dte_t *dtbl;
   1170 	b_tmgr_t	*tmgr;
   1171 
   1172 	/* Flush the ATC cache of all cached descriptors derived
   1173 	 * from this table.
   1174 	 * XXX - Sun3x does not use 68851's cached table feature
   1175 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
   1176 	 */
   1177 
   1178 	/* Remove any pending cache flushes that were designated
   1179 	 * for the pmap this A table belongs to.
   1180 	 * a_tbl->parent->atc_flushq[0] = 0;
   1181 	 * XXX - Not implemented in sun3x.
   1182 	 */
   1183 
   1184 	/* All A tables in the system should retain a map for the
   1185 	 * kernel. If the table contains any valid descriptors
   1186 	 * (other than those for the kernel area), invalidate them all,
   1187 	 * stopping short of the kernel's entries.
   1188 	 */
   1189 	removed_cnt = 0;
   1190 	if (a_tbl->at_ecnt) {
   1191 		dte = a_tbl->at_dtbl;
   1192 		for (i=0; i < MMU_TIA(KERNBASE); i++)
   1193 			/* If a table entry points to a valid B table, free
   1194 			 * it and its children.
   1195 			 */
   1196 			if (MMU_VALID_DT(dte[i])) {
   1197 				/* The following block does several things,
   1198 				 * from innermost expression to the
   1199 				 * outermost:
   1200 				 * 1) It extracts the base (cc 1996)
   1201 				 *    address of the B table pointed
   1202 				 *    to in the A table entry dte[i].
   1203 				 * 2) It converts this base address into
   1204 				 *    the virtual address it can be
   1205 				 *    accessed with. (all MMU tables point
   1206 				 *    to physical addresses.)
   1207 				 * 3) It finds the corresponding manager
   1208 				 *    structure which manages this MMU table.
   1209 				 * 4) It frees the manager structure.
   1210 				 *    (This frees the MMU table and all
   1211 				 *    child tables. See 'free_b_table' for
   1212 				 *    details.)
   1213 				 */
   1214 				dtbl = (mmu_short_dte_t *) MMU_DTE_PA(dte[i]);
   1215 				dtbl = (mmu_short_dte_t *) mmu_ptov(dtbl);
   1216 				tmgr = mmuB2tmgr(dtbl);
   1217 				removed_cnt += free_b_table(tmgr);
   1218 			}
   1219 	}
   1220 	a_tbl->at_ecnt = 0;
   1221 	return removed_cnt;
   1222 }
   1223 
   1224 /* free_b_table			INTERNAL
   1225  **
   1226  * Unmaps the given B table and all its children from their current
   1227  * mappings.  Returns the number of pages that were invalidated.
   1228  * (For comments, see 'free_a_table()').
   1229  */
   1230 int
   1231 free_b_table(b_tbl)
   1232 	b_tmgr_t *b_tbl;
   1233 {
   1234 	int i, removed_cnt;
   1235 	mmu_short_dte_t *dte;
   1236 	mmu_short_pte_t	*dtbl;
   1237 	c_tmgr_t	*tmgr;
   1238 
   1239 	removed_cnt = 0;
   1240 	if (b_tbl->bt_ecnt) {
   1241 		dte = b_tbl->bt_dtbl;
   1242 		for (i=0; i < MMU_B_TBL_SIZE; i++)
   1243 			if (MMU_VALID_DT(dte[i])) {
   1244 				dtbl = (mmu_short_pte_t *) MMU_DTE_PA(dte[i]);
   1245 				dtbl = (mmu_short_pte_t *) mmu_ptov(dtbl);
   1246 				tmgr = mmuC2tmgr(dtbl);
   1247 				removed_cnt += free_c_table(tmgr);
   1248 			}
   1249 	}
   1250 
   1251 	b_tbl->bt_ecnt = 0;
   1252 	return removed_cnt;
   1253 }
   1254 
   1255 /* free_c_table			INTERNAL
   1256  **
   1257  * Unmaps the given C table from use and returns it to the pool for
   1258  * re-use.  Returns the number of pages that were invalidated.
   1259  *
   1260  * This function preserves any physical page modification information
   1261  * contained in the page descriptors within the C table by calling
   1262  * 'pmap_remove_pte().'
   1263  */
   1264 int
   1265 free_c_table(c_tbl)
   1266 	c_tmgr_t *c_tbl;
   1267 {
   1268 	int i, removed_cnt;
   1269 
   1270 	removed_cnt = 0;
   1271 	if (c_tbl->ct_ecnt)
   1272 		for (i=0; i < MMU_C_TBL_SIZE; i++)
   1273 			if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
   1274 				pmap_remove_pte(&c_tbl->ct_dtbl[i]);
   1275 				removed_cnt++;
   1276 			}
   1277 	c_tbl->ct_ecnt = 0;
   1278 	return removed_cnt;
   1279 }
   1280 
   1281 /* free_c_table_novalid			INTERNAL
   1282  **
   1283  * Frees the given C table manager without checking to see whether
   1284  * or not it contains any valid page descriptors as it is assumed
   1285  * that it does not.
   1286  */
   1287 void
   1288 free_c_table_novalid(c_tbl)
   1289 	c_tmgr_t *c_tbl;
   1290 {
   1291 	TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1292 	TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1293 	c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1294 }
   1295 
   1296 /* pmap_remove_pte			INTERNAL
   1297  **
   1298  * Unmap the given pte and preserve any page modification
   1299  * information by transfering it to the pv head of the
   1300  * physical page it maps to.  This function does not update
   1301  * any reference counts because it is assumed that the calling
   1302  * function will do so.  If the calling function does not have the
   1303  * ability to do so, the function pmap_dereference_pte() exists
   1304  * for this purpose.
   1305  */
   1306 void
   1307 pmap_remove_pte(pte)
   1308 	mmu_short_pte_t *pte;
   1309 {
   1310 	vm_offset_t pa;
   1311 	pv_t       *pv;
   1312 	pv_elem_t  *pve;
   1313 
   1314 	pa = MMU_PTE_PA(*pte);
   1315 	if (is_managed(pa)) {
   1316 		pv = pa2pv(pa);
   1317 		/* Save the mod/ref bits of the pte by simply
   1318 		 * ORing the entire pte onto the pv_flags member
   1319 		 * of the pv structure.
   1320 		 * There is no need to use a separate bit pattern
   1321 		 * for usage information on the pv head than that
   1322 		 * which is used on the MMU ptes.
   1323 		 */
   1324 		pv->pv_flags |= pte->attr.raw;
   1325 
   1326 		pve = pte2pve(pte);
   1327 		if (pve == pv->pv_head.lh_first)
   1328 			pv->pv_head.lh_first = pve->pve_link.le_next;
   1329 		LIST_REMOVE(pve, pve_link);
   1330 	}
   1331 
   1332 	pte->attr.raw = MMU_DT_INVALID;
   1333 }
   1334 
   1335 /* pmap_dereference_pte			INTERNAL
   1336  **
   1337  * Update the necessary reference counts in any tables and pmaps to
   1338  * reflect the removal of the given pte.  Only called when no knowledge of
   1339  * the pte's associated pmap is unknown.  This only occurs in the PV call
   1340  * 'pmap_page_protect()' with a protection of VM_PROT_NONE, which means
   1341  * that all references to a given physical page must be removed.
   1342  */
   1343 void
   1344 pmap_dereference_pte(pte)
   1345 	mmu_short_pte_t *pte;
   1346 {
   1347 	c_tmgr_t *c_tbl;
   1348 
   1349 	c_tbl = pmap_find_c_tmgr(pte);
   1350 	c_tbl->ct_parent->bt_parent->at_parent->pm_stats.resident_count--;
   1351 	if (--c_tbl->ct_ecnt == 0)
   1352 		free_c_table_novalid(c_tbl);
   1353 }
   1354 
   1355 /* pmap_stroll			INTERNAL
   1356  **
   1357  * Retrieve the addresses of all table managers involved in the mapping of
   1358  * the given virtual address.  If the table walk completed sucessfully,
   1359  * return TRUE.  If it was only partial sucessful, return FALSE.
   1360  * The table walk performed by this function is important to many other
   1361  * functions in this module.
   1362  */
   1363 boolean_t
   1364 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
   1365 	pmap_t pmap;
   1366 	vm_offset_t va;
   1367 	a_tmgr_t **a_tbl;
   1368 	b_tmgr_t **b_tbl;
   1369 	c_tmgr_t **c_tbl;
   1370 	mmu_short_pte_t **pte;
   1371 	int *a_idx, *b_idx, *pte_idx;
   1372 {
   1373 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1374 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1375 
   1376 	if (pmap == pmap_kernel())
   1377 		return FALSE;
   1378 
   1379 	/* Does the given pmap have an A table? */
   1380 	*a_tbl = pmap->pm_a_tbl;
   1381 	if (*a_tbl == NULL)
   1382 		return FALSE; /* No.  Return unknown. */
   1383 	/* Does the A table have a valid B table
   1384 	 * under the corresponding table entry?
   1385 	 */
   1386 	*a_idx = MMU_TIA(va);
   1387 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
   1388 	if (!MMU_VALID_DT(*a_dte))
   1389 		return FALSE; /* No. Return unknown. */
   1390 	/* Yes. Extract B table from the A table. */
   1391 	*b_tbl = pmap_find_b_tmgr(
   1392 		  (mmu_short_dte_t *) mmu_ptov(
   1393 		    MMU_DTE_PA(*a_dte)
   1394 		  )
   1395 		);
   1396 	/* Does the B table have a valid C table
   1397 	 * under the corresponding table entry?
   1398 	 */
   1399 	*b_idx = MMU_TIB(va);
   1400 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
   1401 	if (!MMU_VALID_DT(*b_dte))
   1402 		return FALSE; /* No. Return unknown. */
   1403 	/* Yes. Extract C table from the B table. */
   1404 	*c_tbl = pmap_find_c_tmgr(
   1405 		  (mmu_short_pte_t *) mmu_ptov(
   1406 		    MMU_DTE_PA(*b_dte)
   1407 		  )
   1408 		);
   1409 	*pte_idx = MMU_TIC(va);
   1410 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
   1411 
   1412 	return	TRUE;
   1413 }
   1414 
   1415 /* pmap_enter			INTERFACE
   1416  **
   1417  * Called by the kernel to map a virtual address
   1418  * to a physical address in the given process map.
   1419  *
   1420  * Note: this function should apply an exclusive lock
   1421  * on the pmap system for its duration.  (it certainly
   1422  * would save my hair!!)
   1423  */
   1424 void
   1425 pmap_enter(pmap, va, pa, prot, wired)
   1426 	pmap_t	pmap;
   1427 	vm_offset_t va;
   1428 	vm_offset_t pa;
   1429 	vm_prot_t prot;
   1430 	boolean_t wired;
   1431 {
   1432 	u_int a_idx, b_idx, pte_idx; /* table indexes (fix grammar) */
   1433 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
   1434 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
   1435 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
   1436 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1437 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1438 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
   1439 	pv_t      *pv;           /* pv list head                      */
   1440 	pv_elem_t *pve;          /* pv element                        */
   1441 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
   1442 
   1443 	if (pmap == NULL)
   1444 		return;
   1445 	if (pmap == pmap_kernel()) {
   1446 		pmap_enter_kernel(va, pa, prot);
   1447 		return;
   1448 	}
   1449 
   1450 	/* For user mappings we walk along the MMU tables of the given
   1451 	 * pmap, reaching a PTE which describes the virtual page being
   1452 	 * mapped or changed.  If any level of the walk ends in an invalid
   1453 	 * entry, a table must be allocated and the entry must be updated
   1454 	 * to point to it.
   1455 	 * There is a bit of confusion as to whether this code must be
   1456 	 * re-entrant.  For now we will assume it is.  To support
   1457 	 * re-entrancy we must unlink tables from the table pool before
   1458 	 * we assume we may use them.  Tables are re-linked into the pool
   1459 	 * when we are finished with them at the end of the function.
   1460 	 * But I don't feel like doing that until we have proof that this
   1461 	 * needs to be re-entrant.
   1462 	 * 'llevel' records which tables need to be relinked.
   1463 	 */
   1464 	llevel = NONE;
   1465 
   1466 	/* Step 1 - Retrieve the A table from the pmap.  If it is the default
   1467 	 * A table (commonly known as the 'proc0' A table), allocate a new one.
   1468 	 */
   1469 
   1470 	a_tbl = pmap->pm_a_tbl;
   1471 	if (a_tbl == proc0Atmgr) {
   1472 		pmap->pm_a_tbl = a_tbl = get_a_table();
   1473 		if (!wired)
   1474 			llevel = NEWA;
   1475 	} else {
   1476 		/* Use the A table already allocated for this pmap.
   1477 		 * Unlink it from the A table pool if necessary.
   1478 		 */
   1479 		if (wired && !a_tbl->at_wcnt)
   1480 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1481 	}
   1482 
   1483 	/* Step 2 - Walk into the B table.  If there is no valid B table,
   1484 	 * allocate one.
   1485 	 */
   1486 
   1487 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
   1488 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
   1489 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
   1490 		/* Yes, it points to a valid B table.  Use it. */
   1491 		/*************************************
   1492 		 *               a_idx               *
   1493 		 *                 v                 *
   1494 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
   1495 		 *          | | | | | | | | | | | |  *
   1496 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
   1497 		 *                 |                 *
   1498 		 *                 \- b_tbl -> +-+-  *
   1499 		 *                             | |   *
   1500 		 *                             +-+-  *
   1501 		 *************************************/
   1502 		b_dte = (mmu_short_dte_t *) mmu_ptov(a_dte->addr.raw);
   1503 		b_tbl = mmuB2tmgr(b_dte);
   1504 		if (wired && !b_tbl->bt_wcnt) {
   1505 			/* If mapping is wired and table is not */
   1506 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1507 			a_tbl->at_wcnt++; /* Update parent table's wired
   1508 			                   * entry count. */
   1509 		}
   1510 	} else {
   1511 		b_tbl = get_b_table(); /* No, need to allocate a new B table */
   1512 		/* Point the parent A table descriptor to this new B table. */
   1513 		a_dte->addr.raw = (unsigned long) mmu_vtop(b_tbl->bt_dtbl);
   1514 		a_dte->attr.attr_struct.dt = MMU_DT_SHORT;
   1515 		/* Create the necessary back references to the parent table */
   1516 		b_tbl->bt_parent = a_tbl;
   1517 		b_tbl->bt_pidx = a_idx;
   1518 		/* If this table is to be wired, make sure the parent A table
   1519 		 * wired count is updated to reflect that it has another wired
   1520 		 * entry.
   1521 		 */
   1522 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
   1523 		if (wired)
   1524 			a_tbl->at_wcnt++;
   1525 		else if (llevel == NONE)
   1526 			llevel = NEWB;
   1527 	}
   1528 
   1529 	/* Step 3 - Walk into the C table, if there is no valid C table,
   1530 	 * allocate one.
   1531 	 */
   1532 
   1533 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
   1534 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
   1535 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
   1536 		/* Yes, it points to a valid C table.  Use it. */
   1537 		/**************************************
   1538 		 *               c_idx                *
   1539 		 * |                v                 *
   1540 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
   1541 		 *             | | | | | | | | | | |  *
   1542 		 *             +-+-+-+-+-+-+-+-+-+-+- *
   1543 		 *                  |                 *
   1544 		 *                  \- c_tbl -> +-+-- *
   1545 		 *                              | | | *
   1546 		 *                              +-+-- *
   1547 		 **************************************/
   1548 		c_pte = (mmu_short_pte_t *) MMU_PTE_PA(*b_dte);
   1549 		c_pte = (mmu_short_pte_t *) mmu_ptov(c_pte);
   1550 		c_tbl = mmuC2tmgr(c_pte);
   1551 		if (wired && !c_tbl->ct_wcnt) {
   1552 			/* If mapping is wired and table is not */
   1553 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1554 			b_tbl->bt_wcnt++;
   1555 		}
   1556 	} else {
   1557 		c_tbl = get_c_table(); /* No, need to allocate a new C table */
   1558 		/* Point the parent B table descriptor to this new C table. */
   1559 		b_dte->attr.raw = (unsigned long) mmu_vtop(c_tbl->ct_dtbl);
   1560 		b_dte->attr.attr_struct.dt = MMU_DT_SHORT;
   1561 		/* Create the necessary back references to the parent table */
   1562 		c_tbl->ct_parent = b_tbl;
   1563 		c_tbl->ct_pidx = b_idx;
   1564 		/* If this table is to be wired, make sure the parent B table
   1565 		 * wired count is updated to reflect that it has another wired
   1566 		 * entry.
   1567 		 */
   1568 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
   1569 		if (wired)
   1570 			b_tbl->bt_wcnt++;
   1571 		else if (llevel == NONE)
   1572 			llevel = NEWC;
   1573 	}
   1574 
   1575 	/* Step 4 - Deposit a page descriptor (PTE) into the appropriate
   1576 	 * slot of the C table, describing the PA to which the VA is mapped.
   1577 	 */
   1578 
   1579 	pte_idx = MMU_TIC(va);
   1580 	c_pte = &c_tbl->ct_dtbl[pte_idx];
   1581 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
   1582 		/* If the PTE is currently valid, then this function call
   1583 		 * is just a synonym for one (or more) of the following
   1584 		 * operations:
   1585 		 *     change protections on a page
   1586 		 *     change wiring status of a page
   1587 		 *     remove the mapping of a page
   1588 		 */
   1589 		/* Is the new address the same as the old? */
   1590 		if (MMU_PTE_PA(*c_pte) == pa) {
   1591 			/* Yes, do nothing. */
   1592 		} else {
   1593 			/* No, remove the old entry */
   1594 			pmap_remove_pte(c_pte);
   1595 		}
   1596 	} else {
   1597 		/* No, update the valid entry count in the C table */
   1598 		c_tbl->ct_ecnt++;
   1599 		/* and in pmap */
   1600 		pmap->pm_stats.resident_count++;
   1601         }
   1602 	/* Map the page. */
   1603 	c_pte->attr.raw = ((unsigned long) pa | MMU_DT_PAGE);
   1604 
   1605 	if (wired) /* Does the entry need to be wired? */ {
   1606 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
   1607 	}
   1608 
   1609         /* If the physical address being mapped is managed by the PV
   1610          * system then link the pte into the list of pages mapped to that
   1611          * address.
   1612          */
   1613         if (is_managed(pa)) {
   1614             pv = pa2pv(pa);
   1615             pve = pte2pve(c_pte);
   1616             LIST_INSERT_HEAD(&pv->pv_head, pve, pve_link);
   1617         }
   1618 
   1619 	/* Move any allocated tables back into the active pool. */
   1620 
   1621 	switch (llevel) {
   1622 		case NEWA:
   1623 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   1624 			/* FALLTHROUGH */
   1625 		case NEWB:
   1626 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   1627 			/* FALLTHROUGH */
   1628 		case NEWC:
   1629 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1630 			/* FALLTHROUGH */
   1631 		default:
   1632 			break;
   1633 	}
   1634 }
   1635 
   1636 /* pmap_enter_kernel			INTERNAL
   1637  **
   1638  * Map the given virtual address to the given physical address within the
   1639  * kernel address space.  This function exists because the kernel map does
   1640  * not do dynamic table allocation.  It consists of a contiguous array of ptes
   1641  * and can be edited directly without the need to walk through any tables.
   1642  *
   1643  * XXX: "Danger, Will Robinson!"
   1644  * Note that the kernel should never take a fault on any page
   1645  * between [ KERNBASE .. virtual_avail ] and this is checked in
   1646  * trap.c for kernel-mode MMU faults.  This means that mappings
   1647  * created in that range must be implicily wired. -gwr
   1648  */
   1649 void
   1650 pmap_enter_kernel(va, pa, prot)
   1651 	vm_offset_t va;
   1652 	vm_offset_t pa;
   1653 	vm_prot_t   prot;
   1654 {
   1655 	boolean_t was_valid = FALSE;
   1656 	mmu_short_pte_t *pte;
   1657 
   1658 	/* XXX - This array is traditionally named "Sysmap" */
   1659 	pte = &kernCbase[(unsigned long) sun3x_btop(va - KERNBASE)];
   1660 	if (MMU_VALID_DT(*pte))
   1661 		was_valid = TRUE;
   1662 
   1663 	pte->attr.raw = (pa | MMU_DT_PAGE);
   1664 
   1665 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
   1666 		pte->attr.raw |= MMU_SHORT_PTE_WP;
   1667 	if (pa & PMAP_NC)
   1668 		pte->attr.raw |= MMU_SHORT_PTE_CI;
   1669 	if (was_valid) {
   1670 		/* mmu_flusha(FC_SUPERD, va); */
   1671 		/* mmu_flusha(); */
   1672 		TBIA();
   1673 	}
   1674 
   1675 }
   1676 
   1677 /* pmap_protect			INTERFACE
   1678  **
   1679  * Apply the given protection to the given virtual address within
   1680  * the given map.
   1681  *
   1682  * It is ok for the protection applied to be stronger than what is
   1683  * specified.  We use this to our advantage when the given map has no
   1684  * mapping for the virtual address.  By returning immediately when this
   1685  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
   1686  * and therefore do not need to map the page just to apply a protection
   1687  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
   1688  */
   1689 void
   1690 pmap_protect(pmap, va, pa, prot)
   1691 	pmap_t pmap;
   1692 	vm_offset_t va, pa;
   1693 	vm_prot_t prot;
   1694 {
   1695 	int a_idx, b_idx, c_idx;
   1696 	a_tmgr_t *a_tbl;
   1697 	b_tmgr_t *b_tbl;
   1698 	c_tmgr_t *c_tbl;
   1699 	mmu_short_pte_t *pte;
   1700 
   1701 	if (pmap == NULL)
   1702 		return;
   1703 	if (pmap == pmap_kernel()) {
   1704 		pmap_protect_kernel(va, pa, prot);
   1705 		return;
   1706 	}
   1707 
   1708 	/* Retrieve the mapping from the given pmap.  If it does
   1709 	 * not exist then we need not do anything more.
   1710 	 */
   1711 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte,
   1712 		&a_idx, &b_idx, &c_idx) == FALSE) {
   1713 		return;
   1714 	}
   1715 
   1716 	switch (prot) {
   1717 		case VM_PROT_ALL:
   1718 			/* this should never happen in a sane system */
   1719 			break;
   1720 		case VM_PROT_READ:
   1721 		case VM_PROT_READ|VM_PROT_EXECUTE:
   1722 			/* make the mapping read-only */
   1723 			pte->attr.raw |= MMU_SHORT_PTE_WP;
   1724 			break;
   1725 		case VM_PROT_NONE:
   1726 			/* this is an alias for 'pmap_remove' */
   1727 			pmap_dereference_pte(pte);
   1728 			break;
   1729 		default:
   1730 			break;
   1731 	}
   1732 }
   1733 
   1734 /* pmap_protect_kernel			INTERNAL
   1735  **
   1736  * Apply the given protection code to a kernel address mapping.
   1737  */
   1738 void
   1739 pmap_protect_kernel(va, pa, prot)
   1740 	vm_offset_t va, pa;
   1741 	vm_prot_t prot;
   1742 {
   1743 	mmu_short_pte_t *pte;
   1744 
   1745 	pte = &kernCbase[(unsigned long) sun3x_btop(va - KERNBASE)];
   1746 	if (MMU_VALID_DT(*pte)) {
   1747 		switch (prot) {
   1748 			case VM_PROT_ALL:
   1749 				break;
   1750 			case VM_PROT_READ:
   1751 			case VM_PROT_READ|VM_PROT_EXECUTE:
   1752 				pte->attr.raw |= MMU_SHORT_PTE_WP;
   1753 				break;
   1754 			case VM_PROT_NONE:
   1755 				/* this is an alias for 'pmap_remove_kernel' */
   1756 				pte->attr.raw = MMU_DT_INVALID;
   1757 				break;
   1758 			default:
   1759 				break;
   1760 		}
   1761 	}
   1762 	/* since this is the kernel, immediately flush any cached
   1763 	 * descriptors for this address.
   1764 	 */
   1765 	/* mmu_flush(FC_SUPERD, va); */
   1766 	TBIS(va);
   1767 }
   1768 
   1769 /* pmap_change_wiring			INTERFACE
   1770  **
   1771  * Changes the wiring of the specified page.
   1772  *
   1773  * This function is called from vm_fault.c to unwire
   1774  * a mapping.  It really should be called 'pmap_unwire'
   1775  * because it is never asked to do anything but remove
   1776  * wirings.
   1777  */
   1778 void
   1779 pmap_change_wiring(pmap, va, wire)
   1780 	pmap_t pmap;
   1781 	vm_offset_t va;
   1782 	boolean_t wire;
   1783 {
   1784 	int a_idx, b_idx, c_idx;
   1785 	a_tmgr_t *a_tbl;
   1786 	b_tmgr_t *b_tbl;
   1787 	c_tmgr_t *c_tbl;
   1788 	mmu_short_pte_t *pte;
   1789 
   1790 	/* Kernel mappings always remain wired. */
   1791 	if (pmap == pmap_kernel())
   1792 		return;
   1793 
   1794 #ifdef	PMAP_DEBUG
   1795 	if (wire == TRUE)
   1796 		panic("pmap_change_wiring: wire requested.");
   1797 #endif
   1798 
   1799 	/* Walk through the tables.  If the walk terminates without
   1800 	 * a valid PTE then the address wasn't wired in the first place.
   1801 	 * Return immediately.
   1802 	 */
   1803 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
   1804 		&b_idx, &c_idx) == FALSE)
   1805 		return;
   1806 
   1807 
   1808 	/* Is the PTE wired?  If not, return. */
   1809 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
   1810 		return;
   1811 
   1812 	/* Remove the wiring bit. */
   1813 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
   1814 
   1815 	/* Decrement the wired entry count in the C table.
   1816 	 * If it reaches zero the following things happen:
   1817 	 * 1. The table no longer has any wired entries and is considered
   1818 	 *    unwired.
   1819 	 * 2. It is placed on the available queue.
   1820 	 * 3. The parent table's wired entry count is decremented.
   1821 	 * 4. If it reaches zero, this process repeats at step 1 and
   1822 	 *    stops at after reaching the A table.
   1823 	 */
   1824 	if (c_tbl->ct_wcnt-- == 0) {
   1825 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1826 		if (b_tbl->bt_wcnt-- == 0) {
   1827 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   1828 			if (a_tbl->at_wcnt-- == 0) {
   1829 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   1830 			}
   1831 		}
   1832 	}
   1833 
   1834 	pmap->pm_stats.wired_count--;
   1835 }
   1836 
   1837 /* pmap_pageable			INTERFACE
   1838  **
   1839  * Make the specified range of addresses within the given pmap,
   1840  * 'pageable' or 'not-pageable'.  A pageable page must not cause
   1841  * any faults when referenced.  A non-pageable page may.
   1842  *
   1843  * This routine is only advisory.  The VM system will call pmap_enter()
   1844  * to wire or unwire pages that are going to be made pageable before calling
   1845  * this function.  By the time this routine is called, everything that needs
   1846  * to be done has already been done.
   1847  */
   1848 void
   1849 pmap_pageable(pmap, start, end, pageable)
   1850 	pmap_t pmap;
   1851 	vm_offset_t start, end;
   1852 	boolean_t pageable;
   1853 {
   1854 	/* not implemented. */
   1855 }
   1856 
   1857 /* pmap_copy				INTERFACE
   1858  **
   1859  * Copy the mappings of a range of addresses in one pmap, into
   1860  * the destination address of another.
   1861  *
   1862  * This routine is advisory.  Should we one day decide that MMU tables
   1863  * may be shared by more than one pmap, this function should be used to
   1864  * link them together.  Until that day however, we do nothing.
   1865  */
   1866 void
   1867 pmap_copy(pmap_a, pmap_b, dst, len, src)
   1868 	pmap_t pmap_a, pmap_b;
   1869 	vm_offset_t dst;
   1870 	vm_size_t   len;
   1871 	vm_offset_t src;
   1872 {
   1873 	/* not implemented. */
   1874 }
   1875 
   1876 /* pmap_copy_page			INTERFACE
   1877  **
   1878  * Copy the contents of one physical page into another.
   1879  *
   1880  * This function makes use of two virtual pages allocated in sun3x_vm_init()
   1881  * (found in _startup.c) to map the two specified physical pages into the
   1882  * kernel address space.  It then uses bcopy() to copy one into the other.
   1883  */
   1884 void
   1885 pmap_copy_page(src, dst)
   1886 	vm_offset_t src, dst;
   1887 {
   1888 	PMAP_LOCK();
   1889 	if (tmp_vpages_inuse)
   1890 		panic("pmap_copy_page: temporary vpages are in use.");
   1891 	tmp_vpages_inuse++;
   1892 
   1893 	pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
   1894 	pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
   1895 	bcopy((char *) tmp_vpages[1], (char *) tmp_vpages[0], NBPG);
   1896 	/* xxx - there's no real need to unmap the mappings is there? */
   1897 
   1898 	tmp_vpages_inuse--;
   1899 	PMAP_UNLOCK();
   1900 }
   1901 
   1902 /* pmap_zero_page			INTERFACE
   1903  **
   1904  * Zero the contents of the specified physical page.
   1905  *
   1906  * Uses one of the virtual pages allocated in sun3x_vm_init() (_startup.c)
   1907  * to map the specified page into the kernel address space.  Then uses
   1908  * bzero() to zero out the page.
   1909  */
   1910 void
   1911 pmap_zero_page(pa)
   1912 	vm_offset_t pa;
   1913 {
   1914 	PMAP_LOCK();
   1915 	if (tmp_vpages_inuse)
   1916 		panic("pmap_zero_page: temporary vpages are in use.");
   1917 	tmp_vpages_inuse++;
   1918 
   1919 	pmap_enter_kernel(tmp_vpages[0], pa, VM_PROT_READ|VM_PROT_WRITE);
   1920 	bzero((char *) tmp_vpages[0], NBPG);
   1921 	/* xxx - there's no real need to unmap the mapping is there? */
   1922 
   1923 	tmp_vpages_inuse--;
   1924 	PMAP_UNLOCK();
   1925 }
   1926 
   1927 /* pmap_collect			INTERFACE
   1928  **
   1929  * Called from the VM system to collect unused pages in the given
   1930  * pmap.
   1931  *
   1932  * No one implements it, so I'm not even sure how it is supposed to
   1933  * 'collect' anything anyways.  There's nothing to do but do what everyone
   1934  * else does..
   1935  */
   1936 void
   1937 pmap_collect(pmap)
   1938 	pmap_t pmap;
   1939 {
   1940 	/* not implemented. */
   1941 }
   1942 
   1943 /* pmap_create			INTERFACE
   1944  **
   1945  * Create and return a pmap structure.
   1946  */
   1947 pmap_t
   1948 pmap_create(size)
   1949 	vm_size_t size;
   1950 {
   1951 	pmap_t	pmap;
   1952 
   1953 	if (size)
   1954 		return NULL;
   1955 
   1956 	pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
   1957 	pmap_pinit(pmap);
   1958 
   1959 	return pmap;
   1960 }
   1961 
   1962 /* pmap_pinit			INTERNAL
   1963  **
   1964  * Initialize a pmap structure.
   1965  */
   1966 void
   1967 pmap_pinit(pmap)
   1968 	pmap_t pmap;
   1969 {
   1970 	bzero(pmap, sizeof(struct pmap));
   1971 	pmap->pm_a_tbl = proc0Atmgr;
   1972 }
   1973 
   1974 /* pmap_release				INTERFACE
   1975  **
   1976  * Release any resources held by the given pmap.
   1977  *
   1978  * This is the reverse analog to pmap_pinit.  It does not
   1979  * necessarily mean for the pmap structure to be deallocated,
   1980  * as in pmap_destroy.
   1981  */
   1982 void
   1983 pmap_release(pmap)
   1984 	pmap_t pmap;
   1985 {
   1986 	/* As long as the pmap contains no mappings,
   1987 	 * which always should be the case whenever
   1988 	 * this function is called, there really should
   1989 	 * be nothing to do.
   1990 	 */
   1991 #ifdef	PMAP_DEBUG
   1992 	if (pmap == NULL)
   1993 		return;
   1994 	if (pmap == pmap_kernel())
   1995 		panic("pmap_release: kernel pmap release requested.");
   1996 	if (pmap->pm_a_tbl != proc0Atmgr)
   1997 		panic("pmap_release: pmap not empty.");
   1998 #endif
   1999 }
   2000 
   2001 /* pmap_reference			INTERFACE
   2002  **
   2003  * Increment the reference count of a pmap.
   2004  */
   2005 void
   2006 pmap_reference(pmap)
   2007 	pmap_t pmap;
   2008 {
   2009 	if (pmap == NULL)
   2010 		return;
   2011 
   2012 	/* pmap_lock(pmap); */
   2013 	pmap->pm_refcount++;
   2014 	/* pmap_unlock(pmap); */
   2015 }
   2016 
   2017 /* pmap_dereference			INTERNAL
   2018  **
   2019  * Decrease the reference count on the given pmap
   2020  * by one and return the current count.
   2021  */
   2022 int
   2023 pmap_dereference(pmap)
   2024 	pmap_t pmap;
   2025 {
   2026 	int rtn;
   2027 
   2028 	if (pmap == NULL)
   2029 		return 0;
   2030 
   2031 	/* pmap_lock(pmap); */
   2032 	rtn = --pmap->pm_refcount;
   2033 	/* pmap_unlock(pmap); */
   2034 
   2035 	return rtn;
   2036 }
   2037 
   2038 /* pmap_destroy			INTERFACE
   2039  **
   2040  * Decrement a pmap's reference count and delete
   2041  * the pmap if it becomes zero.  Will be called
   2042  * only after all mappings have been removed.
   2043  */
   2044 void
   2045 pmap_destroy(pmap)
   2046 	pmap_t pmap;
   2047 {
   2048 	if (pmap == NULL)
   2049 		return;
   2050 	if (pmap == &kernel_pmap)
   2051 		panic("pmap_destroy: kernel_pmap!");
   2052 	if (pmap_dereference(pmap) == 0) {
   2053 		pmap_release(pmap);
   2054 		free(pmap, M_VMPMAP);
   2055 	}
   2056 }
   2057 
   2058 /* pmap_is_referenced			INTERFACE
   2059  **
   2060  * Determine if the given physical page has been
   2061  * referenced (read from [or written to.])
   2062  */
   2063 boolean_t
   2064 pmap_is_referenced(pa)
   2065 	vm_offset_t pa;
   2066 {
   2067 	pv_t      *pv;
   2068 	pv_elem_t *pve;
   2069 	struct mmu_short_pte_struct *pte;
   2070 
   2071 	if (!pv_initialized)
   2072 		return FALSE;
   2073 	if (!is_managed(pa))
   2074 		return FALSE;
   2075 
   2076 	pv = pa2pv(pa);
   2077 	/* Check the flags on the pv head.  If they are set,
   2078 	 * return immediately.  Otherwise a search must be done.
   2079          */
   2080 	if (pv->pv_flags & PV_FLAGS_USED)
   2081 		return TRUE;
   2082 	else
   2083 		/* Search through all pv elements pointing
   2084 		 * to this page and query their reference bits
   2085 		 */
   2086 		for (pve = pv->pv_head.lh_first;
   2087                      pve != NULL;
   2088                      pve = pve->pve_link.le_next) {
   2089 			pte = pve2pte(pve);
   2090 			if (MMU_PTE_USED(*pte))
   2091 				return TRUE;
   2092 		}
   2093 
   2094 	return FALSE;
   2095 }
   2096 
   2097 /* pmap_is_modified			INTERFACE
   2098  **
   2099  * Determine if the given physical page has been
   2100  * modified (written to.)
   2101  */
   2102 boolean_t
   2103 pmap_is_modified(pa)
   2104 	vm_offset_t pa;
   2105 {
   2106 	pv_t      *pv;
   2107 	pv_elem_t *pve;
   2108 
   2109 	if (!pv_initialized)
   2110 		return FALSE;
   2111 	if (!is_managed(pa))
   2112 		return FALSE;
   2113 
   2114 	/* see comments in pmap_is_referenced() */
   2115 	pv = pa2pv(pa);
   2116 	if (pv->pv_flags & PV_FLAGS_MDFY)
   2117 		return TRUE;
   2118 	else
   2119 		for (pve = pv->pv_head.lh_first; pve != NULL;
   2120                      pve = pve->pve_link.le_next) {
   2121 			struct mmu_short_pte_struct *pte;
   2122 			pte = pve2pte(pve);
   2123 			if (MMU_PTE_MODIFIED(*pte))
   2124 				return TRUE;
   2125 		}
   2126 	return FALSE;
   2127 }
   2128 
   2129 /* pmap_page_protect			INTERFACE
   2130  **
   2131  * Applies the given protection to all mappings to the given
   2132  * physical page.
   2133  */
   2134 void
   2135 pmap_page_protect(pa, prot)
   2136 	vm_offset_t pa;
   2137 	vm_prot_t prot;
   2138 {
   2139 	pv_t      *pv;
   2140 	pv_elem_t *pve;
   2141 	struct mmu_short_pte_struct *pte;
   2142 
   2143 	if (!is_managed(pa))
   2144 		return;
   2145 
   2146 	pv = pa2pv(pa);
   2147 	for (pve = pv->pv_head.lh_first; pve != NULL;
   2148 		pve = pve->pve_link.le_next) {
   2149 		pte = pve2pte(pve);
   2150 		switch (prot) {
   2151 			case VM_PROT_ALL:
   2152 				/* do nothing */
   2153 				break;
   2154 			case VM_PROT_READ:
   2155 			case VM_PROT_READ|VM_PROT_EXECUTE:
   2156 				pte->attr.raw |= MMU_SHORT_PTE_WP;
   2157 				break;
   2158 			case VM_PROT_NONE:
   2159 				pmap_dereference_pte(pte);
   2160 				break;
   2161 			default:
   2162 				break;
   2163 		}
   2164 	}
   2165 }
   2166 
   2167 /* pmap_who_owns_pte			INTERNAL
   2168  **
   2169  * Called internally to find which pmap the given pte is
   2170  * a member of.
   2171  */
   2172 pmap_t
   2173 pmap_who_owns_pte(pte)
   2174 	mmu_short_pte_t *pte;
   2175 {
   2176 	c_tmgr_t *c_tbl;
   2177 
   2178 	c_tbl = pmap_find_c_tmgr(pte);
   2179 
   2180 	return c_tbl->ct_parent->bt_parent->at_parent;
   2181 }
   2182 
   2183 /* pmap_find_va			INTERNAL_X
   2184  **
   2185  * Called internally to find the virtual address that the
   2186  * given pte maps.
   2187  *
   2188  * Note: I don't know if this function will ever be used, but I've
   2189  * implemented it just in case.
   2190  */
   2191 vm_offset_t
   2192 pmap_find_va(pte)
   2193 	mmu_short_pte_t *pte;
   2194 {
   2195 	a_tmgr_t    *a_tbl;
   2196 	b_tmgr_t    *b_tbl;
   2197 	c_tmgr_t    *c_tbl;
   2198 	vm_offset_t     va = 0;
   2199 
   2200 	/* Find the virtual address by decoding table indexes.
   2201 	 * Each successive decode will reveal the address from
   2202 	 * least to most significant bit fashion.
   2203 	 *
   2204 	 * 31                              0
   2205          * +-------------------------------+
   2206 	 * |AAAAAAABBBBBBCCCCCCxxxxxxxxxxxx|
   2207 	 * +-------------------------------+
   2208 	 *
   2209 	 * Start with the 'C' bits.
   2210 	 */
   2211 	va |= (pmap_find_tic(pte) << MMU_TIC_SHIFT);
   2212 	c_tbl = pmap_find_c_tmgr(pte);
   2213 	b_tbl = c_tbl->ct_parent;
   2214 
   2215 	/* Add the 'B' bits. */
   2216 	va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
   2217 	a_tbl = b_tbl->bt_parent;
   2218 
   2219 	/* Add the 'A' bits. */
   2220 	va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
   2221 
   2222 	return va;
   2223 }
   2224 
   2225 /**** These functions should be removed.  Structures have changed, making ****
   2226  **** them uneccessary.                                                   ****/
   2227 
   2228 /* pmap_find_tic			INTERNAL
   2229  **
   2230  * Given the address of a pte, find the TIC (level 'C' table index) for
   2231  * the pte within its C table.
   2232  */
   2233 char
   2234 pmap_find_tic(pte)
   2235 	mmu_short_pte_t *pte;
   2236 {
   2237 	return ((mmuCbase - pte) % MMU_C_TBL_SIZE);
   2238 }
   2239 
   2240 /* pmap_find_tib			INTERNAL
   2241  **
   2242  * Given the address of dte known to belong to a B table, find the TIB
   2243  * (level 'B' table index) for the dte within its table.
   2244  */
   2245 char
   2246 pmap_find_tib(dte)
   2247 	mmu_short_dte_t *dte;
   2248 {
   2249 	return ((mmuBbase - dte) % MMU_B_TBL_SIZE);
   2250 }
   2251 
   2252 /* pmap_find_tia			INTERNAL
   2253  **
   2254  * Given the address of a dte known to belong to an A table, find the
   2255  * TIA (level 'C' table index) for the dte withing its table.
   2256  */
   2257 char
   2258 pmap_find_tia(dte)
   2259 	mmu_long_dte_t *dte;
   2260 {
   2261 	return ((mmuAbase - dte) % MMU_A_TBL_SIZE);
   2262 }
   2263 
   2264 /**** This one should stay ****/
   2265 
   2266 /* pmap_find_c_tmgr			INTERNAL
   2267  **
   2268  * Given a pte known to belong to a C table, return the address of that
   2269  * table's management structure.
   2270  */
   2271 c_tmgr_t *
   2272 pmap_find_c_tmgr(pte)
   2273 	mmu_short_pte_t *pte;
   2274 {
   2275 	return &Ctmgrbase[
   2276 		((mmuCbase - pte) / sizeof(*pte) / MMU_C_TBL_SIZE)
   2277 		];
   2278 }
   2279 
   2280 /* pmap_find_b_tmgr			INTERNAL
   2281  **
   2282  * Given a dte known to belong to a B table, return the address of that
   2283  * table's management structure.
   2284  */
   2285 b_tmgr_t *
   2286 pmap_find_b_tmgr(dte)
   2287 	mmu_short_dte_t *dte;
   2288 {
   2289 	return &Btmgrbase[
   2290 		((mmuBbase - dte) / sizeof(*dte) / MMU_B_TBL_SIZE)
   2291 		];
   2292 }
   2293 
   2294 /* pmap_find_a_tmgr			INTERNAL
   2295  **
   2296  * Given a dte known to belong to an A table, return the address of that
   2297  * table's management structure.
   2298  */
   2299 a_tmgr_t *
   2300 pmap_find_a_tmgr(dte)
   2301 	mmu_long_dte_t *dte;
   2302 {
   2303 	return &Atmgrbase[
   2304 		((mmuAbase - dte) / sizeof(*dte) / MMU_A_TBL_SIZE)
   2305 		];
   2306 }
   2307 
   2308 /**** End of functions that should be removed.                          ****
   2309  ****                                                                   ****/
   2310 
   2311 /* pmap_clear_modify			INTERFACE
   2312  **
   2313  * Clear the modification bit on the page at the specified
   2314  * physical address.
   2315  *
   2316  */
   2317 void
   2318 pmap_clear_modify(pa)
   2319 	vm_offset_t pa;
   2320 {
   2321 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
   2322 }
   2323 
   2324 /* pmap_clear_reference			INTERFACE
   2325  **
   2326  * Clear the referenced bit on the page at the specified
   2327  * physical address.
   2328  */
   2329 void
   2330 pmap_clear_reference(pa)
   2331 	vm_offset_t pa;
   2332 {
   2333 	pmap_clear_pv(pa, PV_FLAGS_USED);
   2334 }
   2335 
   2336 /* pmap_clear_pv			INTERNAL
   2337  **
   2338  * Clears the specified flag from the specified physical address.
   2339  * (Used by pmap_clear_modify() and pmap_clear_reference().)
   2340  *
   2341  * Flag is one of:
   2342  *   PV_FLAGS_MDFY - Page modified bit.
   2343  *   PV_FLAGS_USED - Page used (referenced) bit.
   2344  *
   2345  * This routine must not only clear the flag on the pv list
   2346  * head.  It must also clear the bit on every pte in the pv
   2347  * list associated with the address.
   2348  */
   2349 void
   2350 pmap_clear_pv(pa, flag)
   2351 	vm_offset_t pa;
   2352 	int flag;
   2353 {
   2354 	pv_t      *pv;
   2355 	pv_elem_t *pve;
   2356 	mmu_short_pte_t *pte;
   2357 
   2358 	pv = pa2pv(pa);
   2359 	pv->pv_flags &= ~(flag);
   2360 	for (pve = pv->pv_head.lh_first; pve != NULL;
   2361              pve = pve->pve_link.le_next) {
   2362 		pte = pve2pte(pve);
   2363 		pte->attr.raw &= ~(flag);
   2364 	}
   2365 }
   2366 
   2367 /* pmap_extract			INTERFACE
   2368  **
   2369  * Return the physical address mapped by the virtual address
   2370  * in the specified pmap or 0 if it is not known.
   2371  *
   2372  * Note: this function should also apply an exclusive lock
   2373  * on the pmap system during its duration.
   2374  */
   2375 vm_offset_t
   2376 pmap_extract(pmap, va)
   2377 	pmap_t      pmap;
   2378 	vm_offset_t va;
   2379 {
   2380 	int a_idx, b_idx, pte_idx;
   2381 	a_tmgr_t	*a_tbl;
   2382 	b_tmgr_t	*b_tbl;
   2383 	c_tmgr_t	*c_tbl;
   2384 	mmu_short_pte_t	*c_pte;
   2385 
   2386 	if (pmap == pmap_kernel())
   2387 		return pmap_extract_kernel(va);
   2388 	if (pmap == NULL)
   2389 		return 0;
   2390 
   2391 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
   2392 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE);
   2393 		return 0;
   2394 
   2395 	if (MMU_VALID_DT(*c_pte))
   2396 		return MMU_PTE_PA(*c_pte);
   2397 	else
   2398 		return 0;
   2399 }
   2400 
   2401 /* pmap_extract_kernel		INTERNAL
   2402  **
   2403  * Extract a traslation from the kernel address space.
   2404  */
   2405 vm_offset_t
   2406 pmap_extract_kernel(va)
   2407 	vm_offset_t va;
   2408 {
   2409 	mmu_short_pte_t *pte;
   2410 
   2411 	pte = &kernCbase[(unsigned long) sun3x_btop(va - KERNBASE)];
   2412 	return MMU_PTE_PA(*pte);
   2413 }
   2414 
   2415 /* pmap_remove_kernel		INTERNAL
   2416  **
   2417  * Remove the mapping of a range of virtual addresses from the kernel map.
   2418  */
   2419 void
   2420 pmap_remove_kernel(start, end)
   2421 	vm_offset_t start;
   2422 	vm_offset_t end;
   2423 {
   2424 	start -= KERNBASE;
   2425 	end   -= KERNBASE;
   2426 	start = sun3x_round_page(start); /* round down */
   2427 	start = sun3x_btop(start);
   2428 	end   += MMU_PAGE_SIZE - 1;    /* next round operation will be up */
   2429 	end   = sun3x_round_page(end); /* round */
   2430 	end   = sun3x_btop(end);
   2431 
   2432 	while (start < end)
   2433 		kernCbase[start++].attr.raw = MMU_DT_INVALID;
   2434 }
   2435 
   2436 /* pmap_remove			INTERFACE
   2437  **
   2438  * Remove the mapping of a range of virtual addresses from the given pmap.
   2439  */
   2440 void
   2441 pmap_remove(pmap, start, end)
   2442 	pmap_t pmap;
   2443 	vm_offset_t start;
   2444 	vm_offset_t end;
   2445 {
   2446 	if (pmap == pmap_kernel()) {
   2447 		pmap_remove_kernel(start, end);
   2448 		return;
   2449 	}
   2450 	pmap_remove_a(pmap->pm_a_tbl, start, end);
   2451 
   2452 	/* If we just modified the current address space,
   2453 	 * make sure to flush the MMU cache.
   2454 	 */
   2455 	if (curatbl == pmap->pm_a_tbl) {
   2456 		/* mmu_flusha(); */
   2457 		TBIA();
   2458 	}
   2459 }
   2460 
   2461 /* pmap_remove_a			INTERNAL
   2462  **
   2463  * This is function number one in a set of three that removes a range
   2464  * of memory in the most efficient manner by removing the highest possible
   2465  * tables from the memory space.  This particular function attempts to remove
   2466  * as many B tables as it can, delegating the remaining fragmented ranges to
   2467  * pmap_remove_b().
   2468  *
   2469  * It's ugly but will do for now.
   2470  */
   2471 void
   2472 pmap_remove_a(a_tbl, start, end)
   2473 	a_tmgr_t *a_tbl;
   2474 	vm_offset_t start;
   2475 	vm_offset_t end;
   2476 {
   2477 	int idx;
   2478 	vm_offset_t nstart, nend, rstart;
   2479 	b_tmgr_t *b_tbl;
   2480 	mmu_long_dte_t  *a_dte;
   2481 	mmu_short_dte_t *b_dte;
   2482 
   2483 
   2484 	if (a_tbl == proc0Atmgr) /* If the pmap has no A table, return */
   2485 		return;
   2486 
   2487 	nstart = MMU_ROUND_UP_A(start);
   2488 	nend = MMU_ROUND_A(end);
   2489 
   2490 	if (start < nstart) {
   2491 		idx = MMU_TIA(start);
   2492 		a_dte = &a_tbl->at_dtbl[idx];
   2493 		if (MMU_VALID_DT(*a_dte)) {
   2494 			b_dte = (mmu_short_dte_t *) MMU_DTE_PA(*a_dte);
   2495 			b_dte = (mmu_short_dte_t *) mmu_ptov(b_dte);
   2496 			b_tbl = mmuB2tmgr(b_dte);
   2497 			if (end < nstart) {
   2498 				pmap_remove_b(b_tbl, start, end);
   2499 				return;
   2500 			} else {
   2501 				pmap_remove_b(b_tbl, start, nstart);
   2502 			}
   2503 		} else if (end < nstart) {
   2504 			return;
   2505 		}
   2506 	}
   2507 	if (nstart < nend) {
   2508 		idx = MMU_TIA(nstart);
   2509 		a_dte = &a_tbl->at_dtbl[idx];
   2510 		rstart = nstart;
   2511 		while (rstart < nend) {
   2512 			if (MMU_VALID_DT(*a_dte)) {
   2513 				b_dte = (mmu_short_dte_t *) MMU_DTE_PA(*a_dte);
   2514 				b_dte = (mmu_short_dte_t *) mmu_ptov(b_dte);
   2515 				b_tbl = mmuB2tmgr(b_dte);
   2516 				a_dte->attr.raw = MMU_DT_INVALID;
   2517 				a_tbl->at_ecnt--;
   2518 				free_b_table(b_tbl);
   2519 				TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   2520 				TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   2521 			}
   2522 			a_dte++;
   2523 			rstart += MMU_TIA_RANGE;
   2524 		}
   2525 	}
   2526 	if (nend < end) {
   2527 		idx = MMU_TIA(nend);
   2528 		a_dte = &a_tbl->at_dtbl[idx];
   2529 		if (MMU_VALID_DT(*a_dte)) {
   2530 			b_dte = (mmu_short_dte_t *) MMU_DTE_PA(*a_dte);
   2531 			b_dte = (mmu_short_dte_t *) mmu_ptov(b_dte);
   2532 			b_tbl = mmuB2tmgr(b_dte);
   2533 			pmap_remove_b(b_tbl, nend, end);
   2534 		}
   2535 	}
   2536 }
   2537 
   2538 /* pmap_remove_b			INTERNAL
   2539  **
   2540  * Remove a range of addresses from an address space, trying to remove entire
   2541  * C tables if possible.
   2542  */
   2543 void
   2544 pmap_remove_b(b_tbl, start, end)
   2545 	b_tmgr_t *b_tbl;
   2546 	vm_offset_t start;
   2547 	vm_offset_t end;
   2548 {
   2549 	int idx;
   2550 	vm_offset_t nstart, nend, rstart;
   2551 	c_tmgr_t *c_tbl;
   2552 	mmu_short_dte_t  *b_dte;
   2553 	mmu_short_pte_t  *c_dte;
   2554 
   2555 
   2556 	nstart = MMU_ROUND_UP_B(start);
   2557 	nend = MMU_ROUND_B(end);
   2558 
   2559 	if (start < nstart) {
   2560 		idx = MMU_TIB(start);
   2561 		b_dte = &b_tbl->bt_dtbl[idx];
   2562 		if (MMU_VALID_DT(*b_dte)) {
   2563 			c_dte = (mmu_short_pte_t *) MMU_DTE_PA(*b_dte);
   2564 			c_dte = (mmu_short_pte_t *) mmu_ptov(c_dte);
   2565 			c_tbl = mmuC2tmgr(c_dte);
   2566 			if (end < nstart) {
   2567 				pmap_remove_c(c_tbl, start, end);
   2568 				return;
   2569 			} else {
   2570 				pmap_remove_c(c_tbl, start, nstart);
   2571 			}
   2572 		} else if (end < nstart) {
   2573 			return;
   2574 		}
   2575 	}
   2576 	if (nstart < nend) {
   2577 		idx = MMU_TIB(nstart);
   2578 		b_dte = &b_tbl->bt_dtbl[idx];
   2579 		rstart = nstart;
   2580 		while (rstart < nend) {
   2581 			if (MMU_VALID_DT(*b_dte)) {
   2582 				c_dte = (mmu_short_pte_t *) MMU_DTE_PA(*b_dte);
   2583 				c_dte = (mmu_short_pte_t *) mmu_ptov(c_dte);
   2584 				c_tbl = mmuC2tmgr(c_dte);
   2585 				b_dte->attr.raw = MMU_DT_INVALID;
   2586 				b_tbl->bt_ecnt--;
   2587 				free_c_table(c_tbl);
   2588 				TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   2589 				TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   2590 			}
   2591 			b_dte++;
   2592 			rstart += MMU_TIB_RANGE;
   2593 		}
   2594 	}
   2595 	if (nend < end) {
   2596 		idx = MMU_TIB(nend);
   2597 		b_dte = &b_tbl->bt_dtbl[idx];
   2598 		if (MMU_VALID_DT(*b_dte)) {
   2599 			c_dte = (mmu_short_pte_t *) MMU_DTE_PA(*b_dte);
   2600 			c_dte = (mmu_short_pte_t *) mmu_ptov(c_dte);
   2601 			c_tbl = mmuC2tmgr(c_dte);
   2602 			pmap_remove_c(c_tbl, nend, end);
   2603 		}
   2604 	}
   2605 }
   2606 
   2607 /* pmap_remove_c			INTERNAL
   2608  **
   2609  * Remove a range of addresses from the given C table.
   2610  */
   2611 void
   2612 pmap_remove_c(c_tbl, start, end)
   2613 	c_tmgr_t *c_tbl;
   2614 	vm_offset_t start;
   2615 	vm_offset_t end;
   2616 {
   2617 	int idx;
   2618 	mmu_short_pte_t *c_pte;
   2619 
   2620 	idx = MMU_TIC(start);
   2621 	c_pte = &c_tbl->ct_dtbl[idx];
   2622 	while (start < end) {
   2623 		if (MMU_VALID_DT(*c_pte))
   2624 			pmap_remove_pte(c_pte);
   2625 		c_tbl->ct_ecnt--;
   2626 		start += MMU_PAGE_SIZE;
   2627 		c_pte++;
   2628 	}
   2629 }
   2630 
   2631 /* is_managed				INTERNAL
   2632  **
   2633  * Determine if the given physical address is managed by the PV system.
   2634  * Note that this logic assumes that no one will ask for the status of
   2635  * addresses which lie in-between the memory banks on the 3/80.  If they
   2636  * do so, it will falsely report that it is managed.
   2637  */
   2638 boolean_t
   2639 is_managed(pa)
   2640 	vm_offset_t pa;
   2641 {
   2642 	if (pa >= avail_start && pa < avail_end)
   2643 		return TRUE;
   2644 	else
   2645 		return FALSE;
   2646 }
   2647 
   2648 /* pa2pv			INTERNAL
   2649  **
   2650  * Return the pv_list_head element which manages the given physical
   2651  * address.
   2652  */
   2653 pv_t *
   2654 pa2pv(pa)
   2655 	vm_offset_t pa;
   2656 {
   2657 	struct pmap_physmem_struct *bank = &avail_mem[0];
   2658 
   2659 	while (pa >= bank->pmem_end)
   2660 		bank = bank->pmem_next;
   2661 
   2662 	pa -= bank->pmem_start;
   2663 	return &pvbase[bank->pmem_pvbase + sun3x_btop(pa)];
   2664 }
   2665 
   2666 /* pmap_bootstrap_alloc			INTERNAL
   2667  **
   2668  * Used internally for memory allocation at startup when malloc is not
   2669  * available.  This code will fail once it crosses the first memory
   2670  * bank boundary on the 3/80.  Hopefully by then however, the VM system
   2671  * will be in charge of allocation.
   2672  */
   2673 void *
   2674 pmap_bootstrap_alloc(size)
   2675 	int size;
   2676 {
   2677 	void *rtn;
   2678 
   2679 	rtn = (void *) virtual_avail;
   2680 
   2681 	/* While the size is greater than a page, map single pages,
   2682 	 * decreasing size until it is less than a page.
   2683 	 */
   2684 	while (size > NBPG) {
   2685 		(void) pmap_bootstrap_alloc(NBPG);
   2686 
   2687 		/* If the above code is ok, let's keep it.
   2688 		 * It looks cooler than:
   2689 		 * virtual_avail += NBPG;
   2690 		 * avail_start += NBPG;
   2691 		 * last_mapped = sun3x_trunc_page(avail_start);
   2692 		 * pmap_enter_kernel(last_mapped, last_mapped + KERNBASE,
   2693 		 *    VM_PROT_READ|VM_PROT_WRITE);
   2694 		 */
   2695 
   2696 		 size -= NBPG;
   2697 	}
   2698 	avail_start += size;
   2699 	virtual_avail += size;
   2700 
   2701 	/* did the allocation cross a page boundary? */
   2702 	if (last_mapped != sun3x_trunc_page(avail_start)) {
   2703 		last_mapped = sun3x_trunc_page(avail_start);
   2704 		pmap_enter_kernel(last_mapped + KERNBASE, last_mapped,
   2705 		    VM_PROT_READ|VM_PROT_WRITE);
   2706 	}
   2707 
   2708 	return rtn;
   2709 }
   2710 
   2711 /* pmap_bootstap_aalign			INTERNAL
   2712  **
   2713  * Used to insure that the next call to pmap_bootstrap_alloc() will return
   2714  * a chunk of memory aligned to the specified size.
   2715  */
   2716 void
   2717 pmap_bootstrap_aalign(size)
   2718 	int size;
   2719 {
   2720 	if (((unsigned int) avail_start % size) != 0) {
   2721 		(void) pmap_bootstrap_alloc(size -
   2722 		    ((unsigned int) (avail_start % size)));
   2723 	}
   2724 }
   2725 
   2726 #if 0
   2727 /* pmap_activate			INTERFACE
   2728  **
   2729  * Make the virtual to physical mappings contained in the given
   2730  * pmap the current map used by the system.
   2731  */
   2732 void
   2733 pmap_activate(pmap, pcbp)
   2734 pmap_t	pmap;
   2735 struct  pcb *pcbp;
   2736 {
   2737 	vm_offset_t	pa;
   2738 	/* Save the A table being loaded in 'curatbl'.
   2739 	 * pmap_remove() uses this variable to determine if a given A
   2740 	 * table is currently being used as the system map.  If so, it
   2741 	 * will issue an MMU cache flush whenever mappings are removed.
   2742 	 */
   2743 	curatbl = pmap->pm_a_tbl;
   2744 	/* call the locore routine to set the user root pointer table */
   2745 	pa = mmu_vtop(pmap->pm_a_tbl->at_dtbl);
   2746 	mmu_seturp(pa);
   2747 }
   2748 #endif
   2749 
   2750 /* pmap_pa_exists
   2751  **
   2752  * Used by the /dev/mem driver to see if a given PA is memory
   2753  * that can be mapped.  (The PA is not in a hole.)
   2754  */
   2755 int
   2756 pmap_pa_exists(pa)
   2757 	vm_offset_t pa;
   2758 {
   2759 	/* XXX - NOTYET */
   2760 	return (0);
   2761 }
   2762 
   2763 
   2764 /* pmap_update
   2765  **
   2766  * Apply any delayed changes scheduled for all pmaps immediately.
   2767  *
   2768  * No delayed operations are currently done in this pmap.
   2769  */
   2770 void
   2771 pmap_update()
   2772 {
   2773 	/* not implemented. */
   2774 }
   2775 
   2776 /* pmap_virtual_space			INTERFACE
   2777  **
   2778  * Return the current available range of virtual addresses in the
   2779  * arguuments provided.  Only really called once.
   2780  */
   2781 void
   2782 pmap_virtual_space(vstart, vend)
   2783 	vm_offset_t *vstart, *vend;
   2784 {
   2785 	*vstart = virtual_avail;
   2786 	*vend = virtual_end;
   2787 }
   2788 
   2789 /* pmap_free_pages			INTERFACE
   2790  **
   2791  * Return the number of physical pages still available.
   2792  *
   2793  * This is probably going to be a mess, but it's only called
   2794  * once and it's the only function left that I have to implement!
   2795  */
   2796 u_int
   2797 pmap_free_pages()
   2798 {
   2799 	int i;
   2800 	u_int left;
   2801 	vm_offset_t avail;
   2802 
   2803 	avail = sun3x_round_up_page(avail_start);
   2804 
   2805 	left = 0;
   2806 	i = 0;
   2807 	while (avail >= avail_mem[i].pmem_end) {
   2808 		if (avail_mem[i].pmem_next == NULL)
   2809 			return 0;
   2810 		i++;
   2811 	}
   2812 	while (i < SUN3X_80_MEM_BANKS) {
   2813 		if (avail < avail_mem[i].pmem_start) {
   2814 			/* Avail is inside a hole, march it
   2815 			 * up to the next bank.
   2816 			 */
   2817 			avail = avail_mem[i].pmem_start;
   2818 		}
   2819 		left += sun3x_btop(avail_mem[i].pmem_end - avail);
   2820 		if (avail_mem[i].pmem_next == NULL)
   2821 			break;
   2822 		i++;
   2823 	}
   2824 
   2825 	return left;
   2826 }
   2827 
   2828 /* pmap_page_index			INTERFACE
   2829  **
   2830  * Return the index of the given physical page in a list of useable
   2831  * physical pages in the system.  Holes in physical memory may be counted
   2832  * if so desired.  As long as pmap_free_pages() and pmap_page_index()
   2833  * agree as to whether holes in memory do or do not count as valid pages,
   2834  * it really doesn't matter.  However, if you like to save a little
   2835  * memory, don't count holes as valid pages.  This is even more true when
   2836  * the holes are large.
   2837  *
   2838  * We will not count holes as valid pages.  We can generate page indexes
   2839  * that conform to this by using the memory bank structures initialized
   2840  * in pmap_alloc_pv().
   2841  */
   2842 int
   2843 pmap_page_index(pa)
   2844 	vm_offset_t pa;
   2845 {
   2846 	struct pmap_physmem_struct *bank = avail_mem;
   2847 
   2848 	while (pa > bank->pmem_end)
   2849 		bank = bank->pmem_next;
   2850 	pa -= bank->pmem_start;
   2851 
   2852 	return (bank->pmem_pvbase + sun3x_btop(pa));
   2853 }
   2854 
   2855 /* pmap_next_page			INTERFACE
   2856  **
   2857  * Place the physical address of the next available page in the
   2858  * argument given.  Returns FALSE if there are no more pages left.
   2859  *
   2860  * This function must jump over any holes in physical memory.
   2861  * Once this function is used, any use of pmap_bootstrap_alloc()
   2862  * is a sin.  Sinners will be punished with erratic behavior.
   2863  */
   2864 boolean_t
   2865 pmap_next_page(pa)
   2866 	vm_offset_t *pa;
   2867 {
   2868 	static boolean_t initialized = FALSE;
   2869 	static struct pmap_physmem_struct *curbank = avail_mem;
   2870 
   2871 	if (!initialized) {
   2872 		pmap_bootstrap_aalign(NBPG);
   2873 		initialized = TRUE;
   2874 	}
   2875 
   2876 	if (avail_start >= curbank->pmem_end)
   2877 		if (curbank->pmem_next == NULL)
   2878 			return FALSE;
   2879 		else {
   2880 			curbank = curbank->pmem_next;
   2881 			avail_start = curbank->pmem_start;
   2882 		}
   2883 
   2884 	*pa = avail_start;
   2885 	avail_start += NBPG;
   2886 	return TRUE;
   2887 }
   2888 
   2889 /************************ SUN3 COMPATIBILITY ROUTINES ********************
   2890  * The following routines are only used by DDB for tricky kernel text    *
   2891  * text operations in db_memrw.c.  They are provided for sun3            *
   2892  * compatibility.                                                        *
   2893  *************************************************************************/
   2894 /* get_pte			INTERNAL
   2895  **
   2896  * Return the page descriptor the describes the kernel mapping
   2897  * of the given virtual address.
   2898  *
   2899  * XXX - It might be nice if this worked outside of the MMU
   2900  * structures we manage.  (Could do it with ptest). -gwr
   2901  */
   2902 vm_offset_t
   2903 get_pte(va)
   2904 	vm_offset_t va;
   2905 {
   2906 	u_long idx;
   2907 
   2908 	idx = (unsigned long) sun3x_btop(mmu_vtop(va));
   2909 	return (kernCbase[idx].attr.raw);
   2910 }
   2911 
   2912 /* set_pte			INTERNAL
   2913  **
   2914  * Set the page descriptor that describes the kernel mapping
   2915  * of the given virtual address.
   2916  */
   2917 void
   2918 set_pte(va, pte)
   2919 	vm_offset_t va;
   2920 	vm_offset_t pte;
   2921 {
   2922 	u_long idx;
   2923 
   2924 	idx = (unsigned long) sun3x_btop(mmu_vtop(va));
   2925 	kernCbase[idx].attr.raw = pte;
   2926 }
   2927 
   2928 #ifdef NOT_YET
   2929 /* and maybe not ever */
   2930 /************************** LOW-LEVEL ROUTINES **************************
   2931  * These routines will eventualy be re-written into assembly and placed *
   2932  * in locore.s.  They are here now as stubs so that the pmap module can *
   2933  * be linked as a standalone user program for testing.                  *
   2934  ************************************************************************/
   2935 /* flush_atc_crp			INTERNAL
   2936  **
   2937  * Flush all page descriptors derived from the given CPU Root Pointer
   2938  * (CRP), or 'A' table as it is known here, from the 68851's automatic
   2939  * cache.
   2940  */
   2941 void
   2942 flush_atc_crp(a_tbl)
   2943 {
   2944 	mmu_long_rp_t rp;
   2945 
   2946 	/* Create a temporary root table pointer that points to the
   2947 	 * given A table.
   2948 	 */
   2949 	rp.attr.raw = ~MMU_LONG_RP_LU;
   2950 	rp.addr.raw = (unsigned int) a_tbl;
   2951 
   2952 	mmu_pflushr(&rp);
   2953 	/* mmu_pflushr:
   2954 	 * 	movel   sp(4)@,a0
   2955 	 * 	pflushr a0@
   2956 	 *	rts
   2957 	 */
   2958 }
   2959 #endif /* NOT_YET */
   2960