Home | History | Annotate | Line # | Download | only in sun3x
pmap.c revision 1.7
      1 /*	$NetBSD: pmap.c,v 1.7 1997/02/12 23:09:32 gwr Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jeremy Cooper.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * XXX These comments aren't quite accurate.  Need to change.
     41  * The sun3x uses the MC68851 Memory Management Unit, which is built
     42  * into the CPU.  The 68851 maps virtual to physical addresses using
     43  * a multi-level table lookup, which is stored in the very memory that
     44  * it maps.  The number of levels of lookup is configurable from one
     45  * to four.  In this implementation, we use three, named 'A' through 'C'.
     46  *
     47  * The MMU translates virtual addresses into physical addresses by
     48  * traversing these tables in a proccess called a 'table walk'.  The most
     49  * significant 7 bits of the Virtual Address ('VA') being translated are
     50  * used as an index into the level A table, whose base in physical memory
     51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
     52  * address found at that index in the A table is used as the base
     53  * address for the next table, the B table.  The next six bits of the VA are
     54  * used as an index into the B table, which in turn gives the base address
     55  * of the third and final C table.
     56  *
     57  * The next six bits of the VA are used as an index into the C table to
     58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
     59  * to which the remaining 13 bits of the VA are added, producing the
     60  * mapped physical address.
     61  *
     62  * To map the entire memory space in this manner would require 2114296 bytes
     63  * of page tables per process - quite expensive.  Instead we will
     64  * allocate a fixed but considerably smaller space for the page tables at
     65  * the time the VM system is initialized.  When the pmap code is asked by
     66  * the kernel to map a VA to a PA, it allocates tables as needed from this
     67  * pool.  When there are no more tables in the pool, tables are stolen
     68  * from the oldest mapped entries in the tree.  This is only possible
     69  * because all memory mappings are stored in the kernel memory map
     70  * structures, independent of the pmap structures.  A VA which references
     71  * one of these invalidated maps will cause a page fault.  The kernel
     72  * will determine that the page fault was caused by a task using a valid
     73  * VA, but for some reason (which does not concern it), that address was
     74  * not mapped.  It will ask the pmap code to re-map the entry and then
     75  * it will resume executing the faulting task.
     76  *
     77  * In this manner the most efficient use of the page table space is
     78  * achieved.  Tasks which do not execute often will have their tables
     79  * stolen and reused by tasks which execute more frequently.  The best
     80  * size for the page table pool will probably be determined by
     81  * experimentation.
     82  *
     83  * You read all of the comments so far.  Good for you.
     84  * Now go play!
     85  */
     86 
     87 /*** A Note About the 68851 Address Translation Cache
     88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
     89  * or 'ATC'.  This cache stores the most recently used page descriptors
     90  * accessed by the MMU when it does translations.  Using a marker called a
     91  * 'task alias' the MMU can store the descriptors from 8 different table
     92  * spaces concurrently.  The task alias is associated with the base
     93  * address of the level A table of that address space.  When an address
     94  * space is currently active (the CRP currently points to its A table)
     95  * the only cached descriptors that will be obeyed are ones which have a
     96  * matching task alias of the current space associated with them.
     97  *
     98  * Since the cache is always consulted before any table lookups are done,
     99  * it is important that it accurately reflect the state of the MMU tables.
    100  * Whenever a change has been made to a table that has been loaded into
    101  * the MMU, the code must be sure to flush any cached entries that are
    102  * affected by the change.  These instances are documented in the code at
    103  * various points.
    104  */
    105 /*** A Note About the Note About the 68851 Address Translation Cache
    106  * 4 months into this code I discovered that the sun3x does not have
    107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
    108  * the 68030 CPU.
    109  * All though it behaves very similarly to the 68851, it only has 1 task
    110  * alias and a 22 entry cache.  So sadly (or happily), the previous note
    111  * does not apply to the sun3x pmap.
    112  */
    113 
    114 #include <sys/param.h>
    115 #include <sys/systm.h>
    116 #include <sys/proc.h>
    117 #include <sys/malloc.h>
    118 #include <sys/user.h>
    119 #include <sys/queue.h>
    120 
    121 #include <vm/vm.h>
    122 #include <vm/vm_kern.h>
    123 #include <vm/vm_page.h>
    124 
    125 #include <machine/cpu.h>
    126 #include <machine/pmap.h>
    127 #include <machine/pte.h>
    128 #include <machine/machdep.h>
    129 #include <machine/mon.h>
    130 
    131 #define	DEBUG	/* XXX: Yes, for now. */
    132 
    133 #include "pmap_pvt.h"
    134 
    135 /* XXX - What headers declare these? */
    136 extern struct pcb *curpcb;
    137 extern int physmem;
    138 
    139 extern void copypage __P((const void*, void*));
    140 extern void zeropage __P((void*));
    141 
    142 /* Defined in locore.s */
    143 extern char kernel_text[];
    144 
    145 /* Defined by the linker */
    146 extern char etext[], edata[], end[];
    147 extern char *esym;	/* DDB */
    148 
    149 /*************************** DEBUGGING DEFINITIONS ***********************
    150  * Macros, preprocessor defines and variables used in debugging can make *
    151  * code hard to read.  Anything used exclusively for debugging purposes  *
    152  * is defined here to avoid having such mess scattered around the file.  *
    153  *************************************************************************/
    154 #ifdef	DEBUG
    155 /*
    156  * To aid the debugging process, macros should be expanded into smaller steps
    157  * that accomplish the same goal, yet provide convenient places for placing
    158  * breakpoints.  When this code is compiled with DEBUG mode defined, the
    159  * 'INLINE' keyword is defined to an empty string.  This way, any function
    160  * defined to be a 'static INLINE' will become 'outlined' and compiled as
    161  * a separate function, which is much easier to debug.
    162  */
    163 #define	INLINE	/* nothing */
    164 
    165 /*
    166  * It is sometimes convenient to watch the activity of a particular table
    167  * in the system.  The following variables are used for that purpose.
    168  */
    169 a_tmgr_t *pmap_watch_atbl = 0;
    170 b_tmgr_t *pmap_watch_btbl = 0;
    171 c_tmgr_t *pmap_watch_ctbl = 0;
    172 
    173 int pmap_debug = 0;
    174 #define DPRINT(args) if (pmap_debug) printf args
    175 
    176 #else	/********** Stuff below is defined if NOT debugging **************/
    177 
    178 #define	INLINE	inline
    179 define DPRINT(args)  /* nada */
    180 
    181 #endif
    182 /*********************** END OF DEBUGGING DEFINITIONS ********************/
    183 
    184 /*** Management Structure - Memory Layout
    185  * For every MMU table in the sun3x pmap system there must be a way to
    186  * manage it; we must know which process is using it, what other tables
    187  * depend on it, and whether or not it contains any locked pages.  This
    188  * is solved by the creation of 'table management'  or 'tmgr'
    189  * structures.  One for each MMU table in the system.
    190  *
    191  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
    192  *
    193  *      towards lower memory
    194  * kernAbase -> +-------------------------------------------------------+
    195  *              | Kernel     MMU A level table                          |
    196  * kernBbase -> +-------------------------------------------------------+
    197  *              | Kernel     MMU B level tables                         |
    198  * kernCbase -> +-------------------------------------------------------+
    199  *              |                                                       |
    200  *              | Kernel     MMU C level tables                         |
    201  *              |                                                       |
    202  * mmuCbase  -> +-------------------------------------------------------+
    203  *              | User       MMU C level tables                         |
    204  * mmuAbase  -> +-------------------------------------------------------+
    205  *              |                                                       |
    206  *              | User       MMU A level tables                         |
    207  *              |                                                       |
    208  * mmuBbase  -> +-------------------------------------------------------+
    209  *              | User       MMU B level tables                         |
    210  * tmgrAbase -> +-------------------------------------------------------+
    211  *              |  TMGR A level table structures                        |
    212  * tmgrBbase -> +-------------------------------------------------------+
    213  *              |  TMGR B level table structures                        |
    214  * tmgrCbase -> +-------------------------------------------------------+
    215  *              |  TMGR C level table structures                        |
    216  * pvbase    -> +-------------------------------------------------------+
    217  *              |  Physical to Virtual mapping table (list heads)       |
    218  * pvebase   -> +-------------------------------------------------------+
    219  *              |  Physical to Virtual mapping table (list elements)    |
    220  *              |                                                       |
    221  *              +-------------------------------------------------------+
    222  *      towards higher memory
    223  *
    224  * For every A table in the MMU A area, there will be a corresponding
    225  * a_tmgr structure in the TMGR A area.  The same will be true for
    226  * the B and C tables.  This arrangement will make it easy to find the
    227  * controling tmgr structure for any table in the system by use of
    228  * (relatively) simple macros.
    229  */
    230 
    231 /*
    232  * This holds the CRP currently loaded into the MMU.
    233  */
    234 struct mmu_rootptr kernel_crp;
    235 
    236 /* Global variables for storing the base addresses for the areas
    237  * labeled above.
    238  */
    239 static vm_offset_t  	kernAphys;
    240 static mmu_long_dte_t	*kernAbase;
    241 static mmu_short_dte_t	*kernBbase;
    242 static mmu_short_pte_t	*kernCbase;
    243 static mmu_long_dte_t	*mmuAbase;
    244 static mmu_short_dte_t	*mmuBbase;
    245 static mmu_short_pte_t	*mmuCbase;
    246 static a_tmgr_t		*Atmgrbase;
    247 static b_tmgr_t		*Btmgrbase;
    248 static c_tmgr_t		*Ctmgrbase;
    249 static pv_t		*pvbase;
    250 static pv_elem_t	*pvebase;
    251 struct pmap		kernel_pmap;
    252 
    253 /* Just all around global variables.
    254  */
    255 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
    256 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
    257 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
    258 
    259 
    260 /*
    261  * Flags used to mark the safety/availability of certain operations or
    262  * resources.
    263  */
    264 static boolean_t
    265     pv_initialized = FALSE,          /* PV system has been initialized. */
    266     tmp_vpages_inuse = FALSE,        /*
    267                                       * Temp. virtual pages are in use.
    268                                       * (see pmap_copy_page, et. al.)
    269                                       */
    270     bootstrap_alloc_enabled = FALSE; /* Safe to use pmap_bootstrap_alloc(). */
    271 
    272 /*
    273  * XXX:  For now, retain the traditional variables that were
    274  * used in the old pmap/vm interface (without NONCONTIG).
    275  */
    276 /* Kernel virtual address space available: */
    277 vm_offset_t	virtual_avail, virtual_end;
    278 /* Physical address space available: */
    279 vm_offset_t	avail_start, avail_end;
    280 
    281 /* This keep track of the end of the contiguously mapped range. */
    282 vm_offset_t virtual_contig_end;
    283 
    284 /* Physical address used by pmap_next_page() */
    285 vm_offset_t avail_next;
    286 
    287 /* These are used by pmap_copy_page(), etc. */
    288 vm_offset_t tmp_vpages[2];
    289 
    290 /*
    291  * The 3/80 is the only member of the sun3x family that has non-contiguous
    292  * physical memory.  Memory is divided into 4 banks which are physically
    293  * locatable on the system board.  Although the size of these banks varies
    294  * with the size of memory they contain, their base addresses are
    295  * permenently fixed.  The following structure, which describes these
    296  * banks, is initialized by pmap_bootstrap() after it reads from a similar
    297  * structure provided by the ROM Monitor.
    298  *
    299  * For the other machines in the sun3x architecture which do have contiguous
    300  * RAM, this list will have only one entry, which will describe the entire
    301  * range of available memory.
    302  */
    303 struct pmap_physmem_struct avail_mem[SUN3X_80_MEM_BANKS];
    304 u_int total_phys_mem;
    305 
    306 /*************************************************************************/
    307 
    308 /*
    309  * XXX - Should "tune" these based on statistics.
    310  *
    311  * My first guess about the relative numbers of these needed is
    312  * based on the fact that a "typical" process will have several
    313  * pages mapped at low virtual addresses (text, data, bss), then
    314  * some mapped shared libraries, and then some stack pages mapped
    315  * near the high end of the VA space.  Each process can use only
    316  * one A table, and most will use only two B tables (maybe three)
    317  * and probably about four C tables.  Therefore, the first guess
    318  * at the relative numbers of these needed is 1:2:4 -gwr
    319  *
    320  * The number of C tables needed is closely related to the amount
    321  * of physical memory available plus a certain amount attributable
    322  * to the use of double mappings.  With a few simulation statistics
    323  * we can find a reasonably good estimation of this unknown value.
    324  * Armed with that and the above ratios, we have a good idea of what
    325  * is needed at each level. -j
    326  *
    327  * Note: It is not physical memory memory size, but the total mapped
    328  * virtual space required by the combined working sets of all the
    329  * currently _runnable_ processes.  (Sleeping ones don't count.)
    330  * The amount of physical memory should be irrelevant. -gwr
    331  */
    332 #define NUM_A_TABLES	16
    333 #define NUM_B_TABLES	32
    334 #define NUM_C_TABLES	64
    335 
    336 /*
    337  * This determines our total virtual mapping capacity.
    338  * Yes, it is a FIXED value so we can pre-allocate.
    339  */
    340 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
    341 #define	NUM_KERN_PTES	\
    342 	(sun3x_btop(VM_MIN_KERNEL_ADDRESS - VM_MAX_KERNEL_ADDRESS))
    343 
    344 /*************************** MISCELANEOUS MACROS *************************/
    345 #define PMAP_LOCK()	;	/* Nothing, for now */
    346 #define PMAP_UNLOCK()	;	/* same. */
    347 #define	NULL 0
    348 
    349 static INLINE void *      mmu_ptov __P((vm_offset_t pa));
    350 static INLINE vm_offset_t mmu_vtop __P((void * va));
    351 
    352 #if	0
    353 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
    354 #endif
    355 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
    356 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
    357 
    358 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
    359 static INLINE int   pteidx __P((mmu_short_pte_t *));
    360 static INLINE pmap_t current_pmap __P((void));
    361 
    362 /*
    363  * We can always convert between virtual and physical addresses
    364  * for anything in the range [KERNBASE ... avail_start] because
    365  * that range is GUARANTEED to be mapped linearly.
    366  * We rely heavily upon this feature!
    367  */
    368 static INLINE void *
    369 mmu_ptov(pa)
    370 	vm_offset_t pa;
    371 {
    372 	register vm_offset_t va;
    373 
    374 	va = (pa + KERNBASE);
    375 #ifdef	DEBUG
    376 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    377 		panic("mmu_ptov");
    378 #endif
    379 	return ((void*)va);
    380 }
    381 static INLINE vm_offset_t
    382 mmu_vtop(vva)
    383 	void *vva;
    384 {
    385 	register vm_offset_t va;
    386 
    387 	va = (vm_offset_t)vva;
    388 #ifdef	DEBUG
    389 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    390 		panic("mmu_ptov");
    391 #endif
    392 	return (va - KERNBASE);
    393 }
    394 
    395 /*
    396  * These macros map MMU tables to their corresponding manager structures.
    397  * They are needed quite often because many of the pointers in the pmap
    398  * system reference MMU tables and not the structures that control them.
    399  * There needs to be a way to find one when given the other and these
    400  * macros do so by taking advantage of the memory layout described above.
    401  * Here's a quick step through the first macro, mmuA2tmgr():
    402  *
    403  * 1) find the offset of the given MMU A table from the base of its table
    404  *    pool (table - mmuAbase).
    405  * 2) convert this offset into a table index by dividing it by the
    406  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
    407  * 3) use this index to select the corresponding 'A' table manager
    408  *    structure from the 'A' table manager pool (Atmgrbase[index]).
    409  */
    410 /*  This function is not currently used. */
    411 #if	0
    412 static INLINE a_tmgr_t *
    413 mmuA2tmgr(mmuAtbl)
    414 	mmu_long_dte_t *mmuAtbl;
    415 {
    416 	register int idx;
    417 
    418 	/* Which table is this in? */
    419 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
    420 #ifdef	DEBUG
    421 	if ((idx < 0) || (idx >= NUM_A_TABLES))
    422 		panic("mmuA2tmgr");
    423 #endif
    424 	return (&Atmgrbase[idx]);
    425 }
    426 #endif	/* 0 */
    427 
    428 static INLINE b_tmgr_t *
    429 mmuB2tmgr(mmuBtbl)
    430 	mmu_short_dte_t *mmuBtbl;
    431 {
    432 	register int idx;
    433 
    434 	/* Which table is this in? */
    435 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
    436 #ifdef	DEBUG
    437 	if ((idx < 0) || (idx >= NUM_B_TABLES))
    438 		panic("mmuB2tmgr");
    439 #endif
    440 	return (&Btmgrbase[idx]);
    441 }
    442 
    443 /* mmuC2tmgr			INTERNAL
    444  **
    445  * Given a pte known to belong to a C table, return the address of
    446  * that table's management structure.
    447  */
    448 static INLINE c_tmgr_t *
    449 mmuC2tmgr(mmuCtbl)
    450 	mmu_short_pte_t *mmuCtbl;
    451 {
    452 	register int idx;
    453 
    454 	/* Which table is this in? */
    455 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
    456 #ifdef	DEBUG
    457 	if ((idx < 0) || (idx >= NUM_C_TABLES))
    458 		panic("mmuC2tmgr");
    459 #endif
    460 	return (&Ctmgrbase[idx]);
    461 }
    462 
    463 /* I don't think this is actually used.
    464  * #define pte2pv(pte) \
    465  *	(pa2pv(\
    466  *		(pte)->attr.raw & MMU_SHORT_PTE_BASEADDR\
    467  *	))
    468  */
    469 /* This is now a function call
    470  * #define pa2pv(pa) \
    471  *	(&pvbase[(unsigned long)\
    472  *		sun3x_btop(pa)\
    473  *	])
    474  */
    475 
    476 /* pa2pv			INTERNAL
    477  **
    478  * Return the pv_list_head element which manages the given physical
    479  * address.
    480  */
    481 static INLINE pv_t *
    482 pa2pv(pa)
    483 	vm_offset_t pa;
    484 {
    485 	register struct pmap_physmem_struct *bank;
    486 	register int idx;
    487 
    488 	bank = &avail_mem[0];
    489 	while (pa >= bank->pmem_end)
    490 		bank = bank->pmem_next;
    491 
    492 	pa -= bank->pmem_start;
    493 	idx = bank->pmem_pvbase + sun3x_btop(pa);
    494 #ifdef	DEBUG
    495 	if ((idx < 0) || (idx >= physmem))
    496 		panic("pa2pv");
    497 #endif
    498 	return &pvbase[idx];
    499 }
    500 
    501 /* pteidx			INTERNAL
    502  **
    503  * Return the index of the given PTE within the entire fixed table of
    504  * PTEs.
    505  */
    506 static INLINE int
    507 pteidx(pte)
    508 	mmu_short_pte_t *pte;
    509 {
    510 	return (pte - kernCbase);
    511 }
    512 
    513 /*
    514  * This just offers a place to put some debugging checks.
    515  */
    516 static INLINE pmap_t
    517 current_pmap()
    518 {
    519 	struct proc *p;
    520 	struct vmspace *vm;
    521 	vm_map_t	map;
    522 	pmap_t	pmap;
    523 
    524 	p = curproc;	/* XXX */
    525 	vm = p->p_vmspace;
    526 	map = &vm->vm_map;
    527 	pmap = vm_map_pmap(map);
    528 
    529 	return (pmap);
    530 }
    531 
    532 
    533 /*************************** FUNCTION DEFINITIONS ************************
    534  * These appear here merely for the compiler to enforce type checking on *
    535  * all function calls.                                                   *
    536  *************************************************************************/
    537 
    538 /** External functions
    539  ** - functions used within this module but written elsewhere.
    540  **   both of these functions are in locore.s
    541  ** XXX - These functions were later replaced with their more cryptic
    542  **       hp300 counterparts.  They may be removed now.
    543  **/
    544 #if	0	/* deprecated mmu */
    545 void   mmu_seturp __P((vm_offset_t));
    546 void   mmu_flush __P((int, vm_offset_t));
    547 void   mmu_flusha __P((void));
    548 #endif	/* 0 */
    549 
    550 /** Internal functions
    551  ** - all functions used only within this module are defined in
    552  **   pmap_pvt.h
    553  **/
    554 
    555 /** Interface functions
    556  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
    557  **   defined.
    558  **/
    559 #ifdef INCLUDED_IN_PMAP_H
    560 void   pmap_bootstrap __P((void));
    561 void  *pmap_bootstrap_alloc __P((int));
    562 void   pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
    563 pmap_t pmap_create __P((vm_size_t));
    564 void   pmap_destroy __P((pmap_t));
    565 void   pmap_reference __P((pmap_t));
    566 boolean_t   pmap_is_referenced __P((vm_offset_t));
    567 boolean_t   pmap_is_modified __P((vm_offset_t));
    568 void   pmap_clear_modify __P((vm_offset_t));
    569 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
    570 void   pmap_activate __P((pmap_t));
    571 int    pmap_page_index __P((vm_offset_t));
    572 u_int  pmap_free_pages __P((void));
    573 #endif /* INCLUDED_IN_PMAP_H */
    574 
    575 /********************************** CODE ********************************
    576  * Functions that are called from other parts of the kernel are labeled *
    577  * as 'INTERFACE' functions.  Functions that are only called from       *
    578  * within the pmap module are labeled as 'INTERNAL' functions.          *
    579  * Functions that are internal, but are not (currently) used at all are *
    580  * labeled 'INTERNAL_X'.                                                *
    581  ************************************************************************/
    582 
    583 /* pmap_bootstrap			INTERNAL
    584  **
    585  * Initializes the pmap system.  Called at boot time from sun3x_vm_init()
    586  * in _startup.c.
    587  *
    588  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
    589  *           system implement pmap_steal_memory() is redundant.
    590  *           Don't release this code without removing one or the other!
    591  */
    592 void
    593 pmap_bootstrap(nextva)
    594 	vm_offset_t nextva;
    595 {
    596 	struct physmemory *membank;
    597 	struct pmap_physmem_struct *pmap_membank;
    598 	vm_offset_t va, pa, eva;
    599 	int b, c, i, j;	/* running table counts */
    600 	int size;
    601 
    602 	/*
    603 	 * This function is called by __bootstrap after it has
    604 	 * determined the type of machine and made the appropriate
    605 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
    606 	 * by that.)  It allocates and sets up enough of the pmap system
    607 	 * to manage the kernel's address space.
    608 	 */
    609 
    610 	/*
    611 	 * Determine the range of kernel virtual and physical
    612 	 * space available. Note that we ABSOLUTELY DEPEND on
    613 	 * the fact that the first bank of memory (4MB) is
    614 	 * mapped linearly to KERNBASE (which we guaranteed in
    615 	 * the first instructions of locore.s).
    616 	 * That is plenty for our bootstrap work.
    617 	 */
    618 	virtual_avail = sun3x_round_page(nextva);
    619 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
    620 	virtual_end = VM_MAX_KERNEL_ADDRESS;
    621 	/* Don't need avail_start til later. */
    622 
    623 	/* We may now call pmap_bootstrap_alloc(). */
    624 	bootstrap_alloc_enabled = TRUE;
    625 
    626 	/*
    627 	 * This is a somewhat unwrapped loop to deal with
    628 	 * copying the PROM's 'phsymem' banks into the pmap's
    629 	 * banks.  The following is always assumed:
    630 	 * 1. There is always at least one bank of memory.
    631 	 * 2. There is always a last bank of memory, and its
    632 	 *    pmem_next member must be set to NULL.
    633 	 * XXX - Use: do { ... } while (membank->next) instead?
    634 	 * XXX - Why copy this stuff at all? -gwr
    635 	 */
    636 	membank = romVectorPtr->v_physmemory;
    637 	pmap_membank = avail_mem;
    638 	total_phys_mem = 0;
    639 
    640 	while (membank->next) {
    641 		pmap_membank->pmem_start = membank->address;
    642 		pmap_membank->pmem_end = membank->address + membank->size;
    643 		total_phys_mem += membank->size;
    644 		/* This silly syntax arises because pmap_membank
    645 		 * is really a pre-allocated array, but it is put into
    646 		 * use as a linked list.
    647 		 */
    648 		pmap_membank->pmem_next = pmap_membank + 1;
    649 		pmap_membank = pmap_membank->pmem_next;
    650 		membank = membank->next;
    651 	}
    652 
    653 	/*
    654 	 * XXX The last bank of memory should be reduced to exclude the
    655 	 * physical pages needed by the PROM monitor from being used
    656 	 * in the VM system.  XXX - See below - Fix!
    657 	 */
    658 	pmap_membank->pmem_start = membank->address;
    659 	pmap_membank->pmem_end = membank->address + membank->size;
    660 	pmap_membank->pmem_next = NULL;
    661 
    662 #if 0	/* XXX - Need to integrate this! */
    663 	/*
    664 	 * The last few pages of physical memory are "owned" by
    665 	 * the PROM.  The total amount of memory we are allowed
    666 	 * to use is given by the romvec pointer. -gwr
    667 	 *
    668 	 * We should dedicate different variables for 'useable'
    669 	 * and 'physically available'.  Most users are used to the
    670 	 * kernel reporting the amount of memory 'physically available'
    671 	 * as opposed to 'useable by the kernel' at boot time. -j
    672 	 */
    673 	total_phys_mem = *romVectorPtr->memoryAvail;
    674 #endif	/* XXX */
    675 
    676 	total_phys_mem += membank->size;	/* XXX see above */
    677 	physmem = btoc(total_phys_mem);
    678 
    679 	/*
    680 	 * Avail_end is set to the first byte of physical memory
    681 	 * after the end of the last bank.  We use this only to
    682 	 * determine if a physical address is "managed" memory.
    683 	 */
    684 	avail_end = pmap_membank->pmem_end;
    685 	avail_end = sun3x_trunc_page(avail_end);
    686 
    687 	/*
    688 	 * The first step is to allocate MMU tables.
    689 	 * Note: All must be aligned on 256 byte boundaries.
    690 	 *
    691 	 * Start with the top level, or 'A' table.
    692 	 */
    693 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
    694 	kernAbase = pmap_bootstrap_alloc(size);
    695 	bzero(kernAbase, size);
    696 
    697 	/*
    698 	 * Allocate enough B tables to map from KERNBASE to
    699 	 * the end of VM.
    700 	 */
    701 	size = sizeof(mmu_short_dte_t) *
    702 		(MMU_A_TBL_SIZE - MMU_TIA(KERNBASE)) * MMU_B_TBL_SIZE;
    703 	kernBbase = pmap_bootstrap_alloc(size);
    704 	bzero(kernBbase, size);
    705 
    706 	/*
    707 	 * Allocate enough C tables.
    708 	 * Note: In order for the PV system to work correctly, the kernel
    709 	 * and user-level C tables must be allocated contiguously.
    710 	 * Nothing should be allocated between here and the allocation of
    711 	 * mmuCbase below.  XXX: Should do this as one allocation, and
    712 	 * then compute a pointer for mmuCbase instead of this...
    713 	 */
    714 	size = sizeof (mmu_short_pte_t) *
    715 		(MMU_A_TBL_SIZE - MMU_TIA(KERNBASE))
    716 		* MMU_B_TBL_SIZE * MMU_C_TBL_SIZE;
    717 	kernCbase = pmap_bootstrap_alloc(size);
    718 	bzero(kernCbase, size);
    719 
    720 	/*
    721 	 * Allocate user MMU tables.
    722 	 * These must be aligned on 256 byte boundaries.
    723 	 *
    724 	 * As noted in the comment preceding the allocation of the kernel
    725 	 * C tables in pmap_bootstrap(), user-level C tables must be the
    726 	 * flush with (up against) the kernel-level C tables.
    727 	 */
    728 	mmuCbase = (mmu_short_pte_t *)
    729 		pmap_bootstrap_alloc(sizeof(mmu_short_pte_t)
    730 		* MMU_C_TBL_SIZE
    731 		* NUM_C_TABLES);
    732 	mmuAbase = (mmu_long_dte_t *)
    733 		pmap_bootstrap_alloc(sizeof(mmu_long_dte_t)
    734 		* MMU_A_TBL_SIZE
    735 		* NUM_A_TABLES);
    736 	mmuBbase = (mmu_short_dte_t *)
    737 		pmap_bootstrap_alloc(sizeof(mmu_short_dte_t)
    738 		* MMU_B_TBL_SIZE
    739 		* NUM_B_TABLES);
    740 
    741 	/*
    742 	 * Fill in the never-changing part of the kernel tables.
    743 	 * For simplicity, the kernel's mappings will be editable as a
    744 	 * flat array of page table entries at kernCbase.  The
    745 	 * higher level 'A' and 'B' tables must be initialized to point
    746 	 * to this lower one.
    747 	 */
    748 	b = c = 0;
    749 
    750 	/*
    751 	 * Invalidate all mappings below KERNBASE in the A table.
    752 	 * This area has already been zeroed out, but it is good
    753 	 * practice to explicitly show that we are interpreting
    754 	 * it as a list of A table descriptors.
    755 	 */
    756 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
    757 		kernAbase[i].addr.raw = 0;
    758 	}
    759 
    760 	/*
    761 	 * Set up the kernel A and B tables so that they will reference the
    762 	 * correct spots in the contiguous table of PTEs allocated for the
    763 	 * kernel's virtual memory space.
    764 	 */
    765 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
    766 		kernAbase[i].attr.raw =
    767 			MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
    768 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
    769 
    770 		for (j=0; j < MMU_B_TBL_SIZE; j++) {
    771 			kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
    772 				| MMU_DT_SHORT;
    773 			c += MMU_C_TBL_SIZE;
    774 		}
    775 		b += MMU_B_TBL_SIZE;
    776 	}
    777 
    778 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
    779 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
    780 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
    781 
    782 	/*
    783 	 * We are now done with pmap_bootstrap_alloc().  Round up
    784 	 * `virtual_avail' to the nearest page, and set the flag
    785 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
    786 	 */
    787 	pmap_bootstrap_aalign(NBPG);
    788 	bootstrap_alloc_enabled = FALSE;
    789 
    790 	/*
    791 	 * Now that we are done with pmap_bootstrap_alloc(), we
    792 	 * must save the virtual and physical addresses of the
    793 	 * end of the linearly mapped range, which are stored in
    794 	 * virtual_contig_end and avail_start, respectively.
    795 	 * These variables will never change after this point.
    796 	 */
    797 	virtual_contig_end = virtual_avail;
    798 	avail_start = virtual_avail - KERNBASE;
    799 
    800 	/*
    801 	 * `avail_next' is a running pointer used by pmap_next_page() to
    802 	 * keep track of the next available physical page to be handed
    803 	 * to the VM system during its initialization, in which it
    804 	 * asks for physical pages, one at a time.
    805 	 */
    806 	avail_next = avail_start;
    807 
    808 	/*
    809 	 * Now allocate some virtual addresses, but not the physical pages
    810 	 * behind them.  Note that virtual_avail is already page-aligned.
    811 	 *
    812 	 * tmp_vpages[] is an array of two virtual pages used for temporary
    813 	 * kernel mappings in the pmap module to facilitate various physical
    814 	 * address-oritented operations.
    815 	 */
    816 	tmp_vpages[0] = virtual_avail;
    817 	virtual_avail += NBPG;
    818 	tmp_vpages[1] = virtual_avail;
    819 	virtual_avail += NBPG;
    820 
    821 	/** Initialize the PV system **/
    822 	pmap_init_pv();
    823 
    824 	/*
    825 	 * Fill in the kernel_pmap structure and kernel_crp.
    826 	 */
    827 	kernAphys = mmu_vtop(kernAbase);
    828 	kernel_pmap.pm_a_tmgr = NULL;
    829 	kernel_pmap.pm_a_phys = kernAphys;
    830 	kernel_pmap.pm_refcount = 1; /* always in use */
    831 
    832 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
    833 	kernel_crp.rp_addr = kernAphys;
    834 
    835 	/*
    836 	 * Now pmap_enter_kernel() may be used safely and will be
    837 	 * the main interface used hereafter to modify the kernel's
    838 	 * virtual address space.  Note that since we are still running
    839 	 * under the PROM's address table, none of these table modifications
    840 	 * actually take effect until pmap_takeover_mmu() is called.
    841 	 *
    842 	 * Note: Our tables do NOT have the PROM linear mappings!
    843 	 * Only the mappings created here exist in our tables, so
    844 	 * remember to map anything we expect to use.
    845 	 */
    846 	va = (vm_offset_t) KERNBASE;
    847 	pa = 0;
    848 
    849 	/*
    850 	 * The first page of the kernel virtual address space is the msgbuf
    851 	 * page.  The page attributes (data, non-cached) are set here, while
    852 	 * the address is assigned to this global pointer in cpu_startup().
    853 	 * XXX - Make it non-cached?
    854 	 */
    855 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
    856 	va += NBPG; pa += NBPG;
    857 
    858 	/* Next page is used as the temporary stack. */
    859 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
    860 	va += NBPG; pa += NBPG;
    861 
    862 	/*
    863 	 * Map all of the kernel's text segment as read-only and cacheable.
    864 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
    865 	 * of kernel text and the first bytes of kernel data will often be
    866 	 * sharing the same page.  Therefore, the last page of kernel text
    867 	 * has to be mapped as read/write, to accomodate the data.
    868 	 */
    869 	eva = sun3x_trunc_page((vm_offset_t)etext);
    870 	for (; va < eva; va += NBPG, pa += NBPG)
    871 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
    872 
    873 	/*
    874 	 * Map all of the kernel's data as read/write and cacheable.
    875 	 * This includes: data, BSS, symbols, and everything in the
    876 	 * contiguous memory used by pmap_bootstrap_alloc()
    877 	 */
    878 	for (; pa < avail_start; va += NBPG, pa += NBPG)
    879 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
    880 
    881 	/*
    882 	 * At this point we are almost ready to take over the MMU.  But first
    883 	 * we must save the PROM's address space in our map, as we call its
    884 	 * routines and make references to its data later in the kernel.
    885 	 */
    886 	pmap_bootstrap_copyprom();
    887 	pmap_takeover_mmu();
    888 
    889 	/*
    890 	 * XXX - Todo:  Fill in the PROM's level-A table for the VA range
    891 	 * KERNBASE ... 0xFE000000 so that the PROM monitor can see our
    892 	 * mappings.  This should make bouncing in/out of PROM easier.
    893 	 */
    894 
    895 	/* Notify the VM system of our page size. */
    896 	PAGE_SIZE = NBPG;
    897 	vm_set_page_size();
    898 }
    899 
    900 
    901 /* pmap_alloc_usermmu			INTERNAL
    902  **
    903  * Called from pmap_bootstrap() to allocate MMU tables that will
    904  * eventually be used for user mappings.
    905  */
    906 void
    907 pmap_alloc_usermmu()
    908 {
    909 	/* XXX: Moved into caller. */
    910 }
    911 
    912 /* pmap_alloc_pv			INTERNAL
    913  **
    914  * Called from pmap_bootstrap() to allocate the physical
    915  * to virtual mapping list.  Each physical page of memory
    916  * in the system has a corresponding element in this list.
    917  */
    918 void
    919 pmap_alloc_pv()
    920 {
    921 	int	i;
    922 	unsigned int	total_mem;
    923 
    924 	/*
    925 	 * Allocate a pv_head structure for every page of physical
    926 	 * memory that will be managed by the system.  Since memory on
    927 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
    928 	 * count by subtraction of the lowest available address from the
    929 	 * highest, but rather we have to step through each memory
    930 	 * bank and add the number of pages in each to the total.
    931 	 *
    932 	 * At this time we also initialize the offset of each bank's
    933 	 * starting pv_head within the pv_head list so that the physical
    934 	 * memory state routines (pmap_is_referenced(),
    935 	 * pmap_is_modified(), et al.) can quickly find coresponding
    936 	 * pv_heads in spite of the non-contiguity.
    937 	 */
    938 	total_mem = 0;
    939 	for (i = 0; i < SUN3X_80_MEM_BANKS; i++) {
    940 		avail_mem[i].pmem_pvbase = sun3x_btop(total_mem);
    941 		total_mem += avail_mem[i].pmem_end -
    942 			avail_mem[i].pmem_start;
    943 		if (avail_mem[i].pmem_next == NULL)
    944 			break;
    945 	}
    946 #ifdef	PMAP_DEBUG
    947 	if (total_mem != total_phys_mem)
    948 		panic("pmap_alloc_pv did not arrive at correct page count");
    949 #endif
    950 
    951 	pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
    952 		sun3x_btop(total_phys_mem));
    953 }
    954 
    955 /* pmap_alloc_usertmgr			INTERNAL
    956  **
    957  * Called from pmap_bootstrap() to allocate the structures which
    958  * facilitate management of user MMU tables.  Each user MMU table
    959  * in the system has one such structure associated with it.
    960  */
    961 void
    962 pmap_alloc_usertmgr()
    963 {
    964 	/* Allocate user MMU table managers */
    965 	/* It would be a lot simpler to just make these BSS, but */
    966 	/* we may want to change their size at boot time... -j */
    967 	Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
    968 		* NUM_A_TABLES);
    969 	Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
    970 		* NUM_B_TABLES);
    971 	Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
    972 		* NUM_C_TABLES);
    973 
    974 	/*
    975 	 * Allocate PV list elements for the physical to virtual
    976 	 * mapping system.
    977 	 */
    978 	pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
    979 		sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
    980 }
    981 
    982 /* pmap_bootstrap_copyprom()			INTERNAL
    983  **
    984  * Copy the PROM mappings into our own tables.  Note, we
    985  * can use physical addresses until __bootstrap returns.
    986  */
    987 void
    988 pmap_bootstrap_copyprom()
    989 {
    990 	MachMonRomVector *romp;
    991 	int *mon_ctbl;
    992 	mmu_short_pte_t *kpte;
    993 	int i, len;
    994 
    995 	romp = romVectorPtr;
    996 
    997 	/*
    998 	 * Copy the mappings in MON_KDB_START...MONEND
    999 	 * Note: mon_ctbl[0] maps MON_KDB_START
   1000 	 */
   1001 	mon_ctbl = *romp->monptaddr;
   1002 	i = sun3x_btop(MON_KDB_START - KERNBASE);
   1003 	kpte = &kernCbase[i];
   1004 	len = sun3x_btop(MONEND - MON_KDB_START);
   1005 
   1006 	for (i = 0; i < len; i++) {
   1007 		kpte[i].attr.raw = mon_ctbl[i];
   1008 	}
   1009 
   1010 	/*
   1011 	 * Copy the mappings at MON_DVMA_BASE (to the end).
   1012 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
   1013 	 * XXX - This does not appear to be necessary, but
   1014 	 * I'm not sure yet if it is or not. -gwr
   1015 	 */
   1016 	mon_ctbl = *romp->shadowpteaddr;
   1017 	i = sun3x_btop(MON_DVMA_BASE - KERNBASE);
   1018 	kpte = &kernCbase[i];
   1019 	len = sun3x_btop(MON_DVMA_SIZE);
   1020 
   1021 	for (i = 0; i < len; i++) {
   1022 		kpte[i].attr.raw = mon_ctbl[i];
   1023 	}
   1024 }
   1025 
   1026 /* pmap_takeover_mmu			INTERNAL
   1027  **
   1028  * Called from pmap_bootstrap() after it has copied enough of the
   1029  * PROM mappings into the kernel map so that we can use our own
   1030  * MMU table.
   1031  */
   1032 void
   1033 pmap_takeover_mmu()
   1034 {
   1035 	struct mmu_rootptr *crp;
   1036 
   1037 	crp = &kernel_crp;
   1038 	loadcrp(crp);
   1039 }
   1040 
   1041 /* pmap_init			INTERFACE
   1042  **
   1043  * Called at the end of vm_init() to set up the pmap system to go
   1044  * into full time operation.  All initialization of kernel_pmap
   1045  * should be already done by now, so this should just do things
   1046  * needed for user-level pmaps to work.
   1047  */
   1048 void
   1049 pmap_init()
   1050 {
   1051 	/** Initialize the manager pools **/
   1052 	TAILQ_INIT(&a_pool);
   1053 	TAILQ_INIT(&b_pool);
   1054 	TAILQ_INIT(&c_pool);
   1055 
   1056 	/**************************************************************
   1057 	 * Initialize all tmgr structures and MMU tables they manage. *
   1058 	 **************************************************************/
   1059 	/** Initialize A tables **/
   1060 	pmap_init_a_tables();
   1061 	/** Initialize B tables **/
   1062 	pmap_init_b_tables();
   1063 	/** Initialize C tables **/
   1064 	pmap_init_c_tables();
   1065 }
   1066 
   1067 /* pmap_init_a_tables()			INTERNAL
   1068  **
   1069  * Initializes all A managers, their MMU A tables, and inserts
   1070  * them into the A manager pool for use by the system.
   1071  */
   1072 void
   1073 pmap_init_a_tables()
   1074 {
   1075 	int i;
   1076 	a_tmgr_t *a_tbl;
   1077 
   1078 	for (i=0; i < NUM_A_TABLES; i++) {
   1079 		/* Select the next available A manager from the pool */
   1080 		a_tbl = &Atmgrbase[i];
   1081 
   1082 		/*
   1083 		 * Clear its parent entry.  Set its wired and valid
   1084 		 * entry count to zero.
   1085 		 */
   1086 		a_tbl->at_parent = NULL;
   1087 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
   1088 
   1089 		/* Assign it the next available MMU A table from the pool */
   1090 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
   1091 
   1092 		/*
   1093 		 * Initialize the MMU A table with the table in the `proc0',
   1094 		 * or kernel, mapping.  This ensures that every process has
   1095 		 * the kernel mapped in the top part of its address space.
   1096 		 */
   1097 		bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
   1098 			sizeof(mmu_long_dte_t));
   1099 
   1100 		/*
   1101 		 * Finally, insert the manager into the A pool,
   1102 		 * making it ready to be used by the system.
   1103 		 */
   1104 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   1105     }
   1106 }
   1107 
   1108 /* pmap_init_b_tables()			INTERNAL
   1109  **
   1110  * Initializes all B table managers, their MMU B tables, and
   1111  * inserts them into the B manager pool for use by the system.
   1112  */
   1113 void
   1114 pmap_init_b_tables()
   1115 {
   1116 	int i,j;
   1117 	b_tmgr_t *b_tbl;
   1118 
   1119 	for (i=0; i < NUM_B_TABLES; i++) {
   1120 		/* Select the next available B manager from the pool */
   1121 		b_tbl = &Btmgrbase[i];
   1122 
   1123 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
   1124 		b_tbl->bt_pidx = 0;		/* parent index,      */
   1125 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
   1126 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
   1127 
   1128 		/* Assign it the next available MMU B table from the pool */
   1129 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
   1130 
   1131 		/* Invalidate every descriptor in the table */
   1132 		for (j=0; j < MMU_B_TBL_SIZE; j++)
   1133 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
   1134 
   1135 		/* Insert the manager into the B pool */
   1136 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   1137 	}
   1138 }
   1139 
   1140 /* pmap_init_c_tables()			INTERNAL
   1141  **
   1142  * Initializes all C table managers, their MMU C tables, and
   1143  * inserts them into the C manager pool for use by the system.
   1144  */
   1145 void
   1146 pmap_init_c_tables()
   1147 {
   1148 	int i,j;
   1149 	c_tmgr_t *c_tbl;
   1150 
   1151 	for (i=0; i < NUM_C_TABLES; i++) {
   1152 		/* Select the next available C manager from the pool */
   1153 		c_tbl = &Ctmgrbase[i];
   1154 
   1155 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
   1156 		c_tbl->ct_pidx = 0;		/* parent index,      */
   1157 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
   1158 		c_tbl->ct_ecnt = 0;		/* valid entry count. */
   1159 
   1160 		/* Assign it the next available MMU C table from the pool */
   1161 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
   1162 
   1163 		for (j=0; j < MMU_C_TBL_SIZE; j++)
   1164 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
   1165 
   1166 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1167 	}
   1168 }
   1169 
   1170 /* pmap_init_pv()			INTERNAL
   1171  **
   1172  * Initializes the Physical to Virtual mapping system.
   1173  */
   1174 void
   1175 pmap_init_pv()
   1176 {
   1177 	int	i;
   1178 
   1179 	/* Initialize every PV head. */
   1180 	for (i = 0; i < sun3x_btop(total_phys_mem); i++) {
   1181 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
   1182 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
   1183 	}
   1184 
   1185 	pv_initialized = TRUE;
   1186 }
   1187 
   1188 /* get_a_table			INTERNAL
   1189  **
   1190  * Retrieve and return a level A table for use in a user map.
   1191  */
   1192 a_tmgr_t *
   1193 get_a_table()
   1194 {
   1195 	a_tmgr_t *tbl;
   1196 	pmap_t pmap;
   1197 
   1198 	/* Get the top A table in the pool */
   1199 	tbl = a_pool.tqh_first;
   1200 	if (tbl == NULL) {
   1201 		/*
   1202 		 * XXX - Instead of panicing here and in other get_x_table
   1203 		 * functions, we do have the option of sleeping on the head of
   1204 		 * the table pool.  Any function which updates the table pool
   1205 		 * would then issue a wakeup() on the head, thus waking up any
   1206 		 * processes waiting for a table.
   1207 		 *
   1208 		 * Actually, the place to sleep would be when some process
   1209 		 * asks for a "wired" mapping that would run us short of
   1210 		 * mapping resources.  This design DEPENDS on always having
   1211 		 * some mapping resources in the pool for stealing, so we
   1212 		 * must make sure we NEVER let the pool become empty. -gwr
   1213 		 */
   1214 		panic("get_a_table: out of A tables.");
   1215 	}
   1216 
   1217 	TAILQ_REMOVE(&a_pool, tbl, at_link);
   1218 	/*
   1219 	 * If the table has a non-null parent pointer then it is in use.
   1220 	 * Forcibly abduct it from its parent and clear its entries.
   1221 	 * No re-entrancy worries here.  This table would not be in the
   1222 	 * table pool unless it was available for use.
   1223 	 *
   1224 	 * Note that the second argument to free_a_table() is FALSE.  This
   1225 	 * indicates that the table should not be relinked into the A table
   1226 	 * pool.  That is a job for the function that called us.
   1227 	 */
   1228 	if (tbl->at_parent) {
   1229 		/* XXX - Use pmap_remove_a() to do this job? */
   1230 		pmap = tbl->at_parent;
   1231 		pmap->pm_stats.resident_count -= free_a_table(tbl, FALSE);
   1232 		pmap->pm_a_tmgr = NULL;
   1233 		pmap->pm_a_phys = kernAphys;
   1234 	}
   1235 #ifdef  NON_REENTRANT
   1236 	/*
   1237 	 * If the table isn't to be wired down, re-insert it at the
   1238 	 * end of the pool.
   1239 	 */
   1240 	if (!wired)
   1241 		/*
   1242 		 * Quandary - XXX
   1243 		 * Would it be better to let the calling function insert this
   1244 		 * table into the queue?  By inserting it here, we are allowing
   1245 		 * it to be stolen immediately.  The calling function is
   1246 		 * probably not expecting to use a table that it is not
   1247 		 * assured full control of.
   1248 		 * Answer - In the intrest of re-entrancy, it is best to let
   1249 		 * the calling function determine when a table is available
   1250 		 * for use.  Therefore this code block is not used.
   1251 		 */
   1252 		TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
   1253 #endif	/* NON_REENTRANT */
   1254 	return tbl;
   1255 }
   1256 
   1257 /* get_b_table			INTERNAL
   1258  **
   1259  * Return a level B table for use.
   1260  */
   1261 b_tmgr_t *
   1262 get_b_table()
   1263 {
   1264 	b_tmgr_t *tbl;
   1265 
   1266 	/* See 'get_a_table' for comments. */
   1267 	tbl = b_pool.tqh_first;
   1268 	if (tbl == NULL)
   1269 		panic("get_b_table: out of B tables.");
   1270 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
   1271 	if (tbl->bt_parent) {
   1272 		/* XXX - Use pmap_remove_b() to do this job? */
   1273 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
   1274 		tbl->bt_parent->at_ecnt--;
   1275 		tbl->bt_parent->at_parent->pm_stats.resident_count -=
   1276 			free_b_table(tbl, FALSE);
   1277 	}
   1278 #ifdef	NON_REENTRANT
   1279 	if (!wired)
   1280 		/* XXX see quandary in get_b_table */
   1281 		/* XXX start lock */
   1282 		TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
   1283 		/* XXX end lock */
   1284 #endif	/* NON_REENTRANT */
   1285 	return tbl;
   1286 }
   1287 
   1288 /* get_c_table			INTERNAL
   1289  **
   1290  * Return a level C table for use.
   1291  */
   1292 c_tmgr_t *
   1293 get_c_table()
   1294 {
   1295 	c_tmgr_t *tbl;
   1296 
   1297 	/* See 'get_a_table' for comments */
   1298 	tbl = c_pool.tqh_first;
   1299 	if (tbl == NULL)
   1300 		panic("get_c_table: out of C tables.");
   1301 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
   1302 	if (tbl->ct_parent) {
   1303 		/* XXX - Use pmap_remove_c() to do this job? */
   1304 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1305 		tbl->ct_parent->bt_ecnt--;
   1306 		tbl->ct_parent->bt_parent->at_parent->pm_stats.resident_count
   1307 		    -= free_c_table(tbl, FALSE);
   1308 	}
   1309 #ifdef	NON_REENTRANT
   1310 	if (!wired)
   1311 		/* XXX See quandary in get_a_table */
   1312 		/* XXX start lock */
   1313 		TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
   1314 		/* XXX end lock */
   1315 #endif	/* NON_REENTRANT */
   1316 
   1317 	return tbl;
   1318 }
   1319 
   1320 /*
   1321  * The following 'free_table' and 'steal_table' functions are called to
   1322  * detach tables from their current obligations (parents and children) and
   1323  * prepare them for reuse in another mapping.
   1324  *
   1325  * Free_table is used when the calling function will handle the fate
   1326  * of the parent table, such as returning it to the free pool when it has
   1327  * no valid entries.  Functions that do not want to handle this should
   1328  * call steal_table, in which the parent table's descriptors and entry
   1329  * count are automatically modified when this table is removed.
   1330  */
   1331 
   1332 /* free_a_table			INTERNAL
   1333  **
   1334  * Unmaps the given A table and all child tables from their current
   1335  * mappings.  Returns the number of pages that were invalidated.
   1336  * If 'relink' is true, the function will return the table to the head
   1337  * of the available table pool.
   1338  *
   1339  * Cache note: The MC68851 will automatically flush all
   1340  * descriptors derived from a given A table from its
   1341  * Automatic Translation Cache (ATC) if we issue a
   1342  * 'PFLUSHR' instruction with the base address of the
   1343  * table.  This function should do, and does so.
   1344  * Note note: We are using an MC68030 - there is no
   1345  * PFLUSHR.
   1346  */
   1347 int
   1348 free_a_table(a_tbl, relink)
   1349 	a_tmgr_t *a_tbl;
   1350 	boolean_t relink;
   1351 {
   1352 	int i, removed_cnt;
   1353 	mmu_long_dte_t	*dte;
   1354 	mmu_short_dte_t *dtbl;
   1355 	b_tmgr_t	*tmgr;
   1356 
   1357 	/*
   1358 	 * Flush the ATC cache of all cached descriptors derived
   1359 	 * from this table.
   1360 	 * XXX - Sun3x does not use 68851's cached table feature
   1361 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
   1362 	 */
   1363 
   1364 	/*
   1365 	 * Remove any pending cache flushes that were designated
   1366 	 * for the pmap this A table belongs to.
   1367 	 * a_tbl->parent->atc_flushq[0] = 0;
   1368 	 * XXX - Not implemented in sun3x.
   1369 	 */
   1370 
   1371 	/*
   1372 	 * All A tables in the system should retain a map for the
   1373 	 * kernel. If the table contains any valid descriptors
   1374 	 * (other than those for the kernel area), invalidate them all,
   1375 	 * stopping short of the kernel's entries.
   1376 	 */
   1377 	removed_cnt = 0;
   1378 	if (a_tbl->at_ecnt) {
   1379 		dte = a_tbl->at_dtbl;
   1380 		for (i=0; i < MMU_TIA(KERNBASE); i++)
   1381 			/*
   1382 			 * If a table entry points to a valid B table, free
   1383 			 * it and its children.
   1384 			 */
   1385 			if (MMU_VALID_DT(dte[i])) {
   1386 				/*
   1387 				 * The following block does several things,
   1388 				 * from innermost expression to the
   1389 				 * outermost:
   1390 				 * 1) It extracts the base (cc 1996)
   1391 				 *    address of the B table pointed
   1392 				 *    to in the A table entry dte[i].
   1393 				 * 2) It converts this base address into
   1394 				 *    the virtual address it can be
   1395 				 *    accessed with. (all MMU tables point
   1396 				 *    to physical addresses.)
   1397 				 * 3) It finds the corresponding manager
   1398 				 *    structure which manages this MMU table.
   1399 				 * 4) It frees the manager structure.
   1400 				 *    (This frees the MMU table and all
   1401 				 *    child tables. See 'free_b_table' for
   1402 				 *    details.)
   1403 				 */
   1404 				dtbl = mmu_ptov(dte[i].addr.raw);
   1405 				tmgr = mmuB2tmgr(dtbl);
   1406 				removed_cnt += free_b_table(tmgr, TRUE);
   1407 			}
   1408 	}
   1409 	a_tbl->at_ecnt = 0;
   1410 	if (relink) {
   1411 		a_tbl->at_parent = NULL;
   1412 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1413 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   1414 	}
   1415 	return removed_cnt;
   1416 }
   1417 
   1418 /* free_b_table			INTERNAL
   1419  **
   1420  * Unmaps the given B table and all its children from their current
   1421  * mappings.  Returns the number of pages that were invalidated.
   1422  * (For comments, see 'free_a_table()').
   1423  */
   1424 int
   1425 free_b_table(b_tbl, relink)
   1426 	b_tmgr_t *b_tbl;
   1427 	boolean_t relink;
   1428 {
   1429 	int i, removed_cnt;
   1430 	mmu_short_dte_t *dte;
   1431 	mmu_short_pte_t	*dtbl;
   1432 	c_tmgr_t	*tmgr;
   1433 
   1434 #ifdef	DEBUG
   1435 	/* XXX - One time debug shot */
   1436 	if (b_tbl == pmap_watch_btbl)
   1437 		Debugger();
   1438 	/* XXX - End */
   1439 #endif
   1440 
   1441 	removed_cnt = 0;
   1442 	if (b_tbl->bt_ecnt) {
   1443 		dte = b_tbl->bt_dtbl;
   1444 		for (i=0; i < MMU_B_TBL_SIZE; i++)
   1445 			if (MMU_VALID_DT(dte[i])) {
   1446 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
   1447 				tmgr = mmuC2tmgr(dtbl);
   1448 				removed_cnt += free_c_table(tmgr, TRUE);
   1449 			}
   1450 	}
   1451 
   1452 	b_tbl->bt_ecnt = 0;
   1453 	if (relink) {
   1454 		b_tbl->bt_parent = NULL;
   1455 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1456 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   1457 	}
   1458 	return removed_cnt;
   1459 }
   1460 
   1461 /* free_c_table			INTERNAL
   1462  **
   1463  * Unmaps the given C table from use and returns it to the pool for
   1464  * re-use.  Returns the number of pages that were invalidated.
   1465  *
   1466  * This function preserves any physical page modification information
   1467  * contained in the page descriptors within the C table by calling
   1468  * 'pmap_remove_pte().'
   1469  */
   1470 int
   1471 free_c_table(c_tbl, relink)
   1472 	c_tmgr_t *c_tbl;
   1473 	boolean_t relink;
   1474 {
   1475 	int i, removed_cnt;
   1476 
   1477 	removed_cnt = 0;
   1478 	if (c_tbl->ct_ecnt)
   1479 		for (i=0; i < MMU_C_TBL_SIZE; i++)
   1480 			if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
   1481 				pmap_remove_pte(&c_tbl->ct_dtbl[i]);
   1482 				removed_cnt++;
   1483 			}
   1484 	c_tbl->ct_ecnt = 0;
   1485 	if (relink) {
   1486 		c_tbl->ct_parent = NULL;
   1487 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1488 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1489 	}
   1490 	return removed_cnt;
   1491 }
   1492 
   1493 /* free_c_table_novalid			INTERNAL
   1494  **
   1495  * Frees the given C table manager without checking to see whether
   1496  * or not it contains any valid page descriptors as it is assumed
   1497  * that it does not.
   1498  */
   1499 void
   1500 free_c_table_novalid(c_tbl)
   1501 	c_tmgr_t *c_tbl;
   1502 {
   1503 	TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1504 	TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1505 	c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1506 	c_tbl->ct_parent->bt_ecnt--;
   1507 	/*
   1508 	 * XXX - Should call equiv. of 'free_b_table_novalid' here if
   1509 	 * we just removed the last entry of the parent B table.
   1510 	 * But I want to insure that this will not endanger pmap_enter()
   1511 	 * with sudden removal of tables it is working with.
   1512 	 *
   1513 	 * We should probably add another field to each table, indicating
   1514 	 * whether or not it is 'locked', ie. in the process of being
   1515 	 * modified.
   1516 	 */
   1517 	c_tbl->ct_parent = NULL;
   1518 }
   1519 
   1520 /* pmap_remove_pte			INTERNAL
   1521  **
   1522  * Unmap the given pte and preserve any page modification
   1523  * information by transfering it to the pv head of the
   1524  * physical page it maps to.  This function does not update
   1525  * any reference counts because it is assumed that the calling
   1526  * function will do so.  If the calling function does not have the
   1527  * ability to do so, the function pmap_dereference_pte() exists
   1528  * for this purpose.
   1529  */
   1530 void
   1531 pmap_remove_pte(pte)
   1532 	mmu_short_pte_t *pte;
   1533 {
   1534 	u_short     pv_idx, targ_idx;
   1535 	int         s;
   1536 	vm_offset_t pa;
   1537 	pv_t       *pv;
   1538 
   1539 	pa = MMU_PTE_PA(*pte);
   1540 	if (is_managed(pa)) {
   1541 		pv = pa2pv(pa);
   1542 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
   1543 
   1544 		/*
   1545 		 * If the PTE being removed is the first (or only) PTE in
   1546 		 * the list of PTEs currently mapped to this page, remove the
   1547 		 * PTE by changing the index found on the PV head.  Otherwise
   1548 		 * a linear search through the list will have to be executed
   1549 		 * in order to find the PVE which points to the PTE being
   1550 		 * removed, so that it may be modified to point to its new
   1551 		 * neighbor.
   1552 		 */
   1553 		s = splimp();
   1554 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
   1555 		if (pv_idx == targ_idx) {
   1556 			pv->pv_idx = pvebase[targ_idx].pve_next;
   1557 		} else {
   1558 			/*
   1559 			 * Find the PV element which points to the target
   1560 			 * element.
   1561 			 */
   1562 			while (pvebase[pv_idx].pve_next != targ_idx) {
   1563 				pv_idx = pvebase[pv_idx].pve_next;
   1564 #ifdef	DIAGNOSTIC
   1565 				if (pv_idx == PVE_EOL)
   1566 					panic("pmap_remove_pte: pv list end!");
   1567 #endif
   1568 			}
   1569 
   1570 			/*
   1571 			 * At this point, pv_idx is the index of the PV
   1572 			 * element just before the target element in the list.
   1573 			 * Unlink the target.
   1574 			 */
   1575 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
   1576 		}
   1577 		/*
   1578 		 * Save the mod/ref bits of the pte by simply
   1579 		 * ORing the entire pte onto the pv_flags member
   1580 		 * of the pv structure.
   1581 		 * There is no need to use a separate bit pattern
   1582 		 * for usage information on the pv head than that
   1583 		 * which is used on the MMU ptes.
   1584 		 */
   1585 		pv->pv_flags |= (u_short) pte->attr.raw;
   1586 		splx(s);
   1587 	}
   1588 
   1589 	pte->attr.raw = MMU_DT_INVALID;
   1590 }
   1591 
   1592 /* pmap_dereference_pte			INTERNAL
   1593  **
   1594  * Update the necessary reference counts in any tables and pmaps to
   1595  * reflect the removal of the given pte.  Only called when no knowledge of
   1596  * the pte's associated pmap is unknown.  This only occurs in the PV call
   1597  * 'pmap_page_protect()' with a protection of VM_PROT_NONE, which means
   1598  * that all references to a given physical page must be removed.
   1599  */
   1600 void
   1601 pmap_dereference_pte(pte)
   1602 	mmu_short_pte_t *pte;
   1603 {
   1604 	vm_offset_t va;
   1605 	c_tmgr_t *c_tbl;
   1606 	pmap_t pmap;
   1607 
   1608 	va = pmap_get_pteinfo(pte, &pmap, &c_tbl);
   1609 	/*
   1610 	 * Flush the translation cache of the page mapped by the PTE, should
   1611 	 * it prove to be in the current pmap.  Kernel mappings appear in
   1612 	 * all address spaces, so they always should be flushed
   1613 	 */
   1614 	if (pmap == pmap_kernel() || pmap == current_pmap())
   1615 		TBIS(va);
   1616 
   1617 	/*
   1618 	 * If the mapping belongs to a user map, update the necessary
   1619 	 * reference counts in the table manager.  XXX - It would be
   1620 	 * much easier to keep the resident count in the c_tmgr_t -gwr
   1621 	 */
   1622 	if (pmap != pmap_kernel()) {
   1623 		/*
   1624 		 * Most of the situations in which pmap_dereference_pte() is
   1625 		 * called are usually temporary removals of a mapping.  Often
   1626 		 * the mapping is reinserted shortly afterwards. If the parent
   1627 		 * C table's valid entry count reaches zero as a result of
   1628 		 * removing this mapping, we could return it to the free pool,
   1629 		 * but we leave it alone because it is likely to be used as
   1630 		 * stated above.
   1631 		 */
   1632 		c_tbl->ct_ecnt--;
   1633 		pmap->pm_stats.resident_count--;
   1634 	}
   1635 }
   1636 
   1637 /* pmap_stroll			INTERNAL
   1638  **
   1639  * Retrieve the addresses of all table managers involved in the mapping of
   1640  * the given virtual address.  If the table walk completed sucessfully,
   1641  * return TRUE.  If it was only partially sucessful, return FALSE.
   1642  * The table walk performed by this function is important to many other
   1643  * functions in this module.
   1644  *
   1645  * Note: This function ought to be easier to read.
   1646  */
   1647 boolean_t
   1648 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
   1649 	pmap_t pmap;
   1650 	vm_offset_t va;
   1651 	a_tmgr_t **a_tbl;
   1652 	b_tmgr_t **b_tbl;
   1653 	c_tmgr_t **c_tbl;
   1654 	mmu_short_pte_t **pte;
   1655 	int *a_idx, *b_idx, *pte_idx;
   1656 {
   1657 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1658 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1659 
   1660 	if (pmap == pmap_kernel())
   1661 		return FALSE;
   1662 
   1663 	/* Does the given pmap have its own A table? */
   1664 	*a_tbl = pmap->pm_a_tmgr;
   1665 	if (*a_tbl == NULL)
   1666 		return FALSE; /* No.  Return unknown. */
   1667 	/* Does the A table have a valid B table
   1668 	 * under the corresponding table entry?
   1669 	 */
   1670 	*a_idx = MMU_TIA(va);
   1671 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
   1672 	if (!MMU_VALID_DT(*a_dte))
   1673 		return FALSE; /* No. Return unknown. */
   1674 	/* Yes. Extract B table from the A table. */
   1675 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
   1676 	/* Does the B table have a valid C table
   1677 	 * under the corresponding table entry?
   1678 	 */
   1679 	*b_idx = MMU_TIB(va);
   1680 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
   1681 	if (!MMU_VALID_DT(*b_dte))
   1682 		return FALSE; /* No. Return unknown. */
   1683 	/* Yes. Extract C table from the B table. */
   1684 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
   1685 	*pte_idx = MMU_TIC(va);
   1686 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
   1687 
   1688 	return	TRUE;
   1689 }
   1690 
   1691 /* pmap_enter			INTERFACE
   1692  **
   1693  * Called by the kernel to map a virtual address
   1694  * to a physical address in the given process map.
   1695  *
   1696  * Note: this function should apply an exclusive lock
   1697  * on the pmap system for its duration.  (it certainly
   1698  * would save my hair!!)
   1699  * This function ought to be easier to read.
   1700  */
   1701 void
   1702 pmap_enter(pmap, va, pa, prot, wired)
   1703 	pmap_t	pmap;
   1704 	vm_offset_t va;
   1705 	vm_offset_t pa;
   1706 	vm_prot_t prot;
   1707 	boolean_t wired;
   1708 {
   1709 	boolean_t insert, managed; /* Marks the need for PV insertion.*/
   1710 	u_short nidx;            /* PV list index                     */
   1711 	int s;                   /* Used for splimp()/splx()          */
   1712 	int flags;               /* Mapping flags. eg. Cache inhibit  */
   1713 	u_int a_idx, b_idx, pte_idx; /* table indecies                */
   1714 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
   1715 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
   1716 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
   1717 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1718 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1719 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
   1720 	pv_t      *pv;           /* pv list head                      */
   1721 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
   1722 
   1723 	if (pmap == NULL)
   1724 		return;
   1725 	if (pmap == pmap_kernel()) {
   1726 		pmap_enter_kernel(va, pa, prot);
   1727 		return;
   1728 	}
   1729 
   1730 	flags  = (pa & ~MMU_PAGE_MASK);
   1731 	pa    &= MMU_PAGE_MASK;
   1732 
   1733 	/*
   1734 	 * Determine if the physical address being mapped is managed.
   1735 	 * If it isn't, the mapping should be cache inhibited.  (This is
   1736 	 * applied later in the function.)   XXX - Why non-cached? -gwr
   1737 	 */
   1738 	if ((managed = is_managed(pa)) == FALSE)
   1739 		flags |= PMAP_NC;
   1740 
   1741 	/*
   1742 	 * For user mappings we walk along the MMU tables of the given
   1743 	 * pmap, reaching a PTE which describes the virtual page being
   1744 	 * mapped or changed.  If any level of the walk ends in an invalid
   1745 	 * entry, a table must be allocated and the entry must be updated
   1746 	 * to point to it.
   1747 	 * There is a bit of confusion as to whether this code must be
   1748 	 * re-entrant.  For now we will assume it is.  To support
   1749 	 * re-entrancy we must unlink tables from the table pool before
   1750 	 * we assume we may use them.  Tables are re-linked into the pool
   1751 	 * when we are finished with them at the end of the function.
   1752 	 * But I don't feel like doing that until we have proof that this
   1753 	 * needs to be re-entrant.
   1754 	 * 'llevel' records which tables need to be relinked.
   1755 	 */
   1756 	llevel = NONE;
   1757 
   1758 	/*
   1759 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
   1760 	 * A table, allocate a new one from the available pool.
   1761 	 */
   1762 
   1763 	a_tbl = pmap->pm_a_tmgr;
   1764 	if (a_tbl == NULL) {
   1765 		/*
   1766 		 * This pmap does not currently have an A table.  Allocate
   1767 		 * a new one.
   1768 		 */
   1769 		a_tbl = get_a_table();
   1770 		a_tbl->at_parent = pmap;
   1771 
   1772 		/*
   1773 		 * Assign this new A table to the pmap, and calculate its
   1774 		 * physical address so that loadcrp() can be used to make
   1775 		 * the table active.
   1776 		 */
   1777 		pmap->pm_a_tmgr = a_tbl;
   1778 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
   1779 
   1780 		/*
   1781 		 * If the process receiving a new A table is the current
   1782 		 * process, we are responsible for setting the MMU so that
   1783 		 * it becomes the current address space.
   1784 		 */
   1785 		if (pmap == current_pmap())
   1786 			pmap_activate(pmap);
   1787 
   1788 		if (!wired)
   1789 			llevel = NEWA;
   1790 	} else {
   1791 		/*
   1792 		 * Use the A table already allocated for this pmap.
   1793 		 * Unlink it from the A table pool if necessary.
   1794 		 */
   1795 		if (wired && !a_tbl->at_wcnt)
   1796 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1797 	}
   1798 
   1799 	/*
   1800 	 * Step 2 - Walk into the B table.  If there is no valid B table,
   1801 	 * allocate one.
   1802 	 */
   1803 
   1804 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
   1805 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
   1806 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
   1807 		/* The descriptor is valid.  Use the B table it points to. */
   1808 		/*************************************
   1809 		 *               a_idx               *
   1810 		 *                 v                 *
   1811 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
   1812 		 *          | | | | | | | | | | | |  *
   1813 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
   1814 		 *                 |                 *
   1815 		 *                 \- b_tbl -> +-+-  *
   1816 		 *                             | |   *
   1817 		 *                             +-+-  *
   1818 		 *************************************/
   1819 		b_dte = mmu_ptov(a_dte->addr.raw);
   1820 		b_tbl = mmuB2tmgr(b_dte);
   1821 
   1822 		/*
   1823 		 * If the requested mapping must be wired, but this table
   1824 		 * being used to map it is not, the table must be removed
   1825 		 * from the available pool and its wired entry count
   1826 		 * incremented.
   1827 		 */
   1828 		if (wired && !b_tbl->bt_wcnt) {
   1829 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1830 			a_tbl->at_wcnt++;
   1831 		}
   1832 	} else {
   1833 		/* The descriptor is invalid.  Allocate a new B table. */
   1834 		b_tbl = get_b_table();
   1835 
   1836 		/* Point the parent A table descriptor to this new B table. */
   1837 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
   1838 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
   1839 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
   1840 
   1841 		/* Create the necessary back references to the parent table */
   1842 		b_tbl->bt_parent = a_tbl;
   1843 		b_tbl->bt_pidx = a_idx;
   1844 
   1845 		/*
   1846 		 * If this table is to be wired, make sure the parent A table
   1847 		 * wired count is updated to reflect that it has another wired
   1848 		 * entry.
   1849 		 */
   1850 		if (wired)
   1851 			a_tbl->at_wcnt++;
   1852 		else if (llevel == NONE)
   1853 			llevel = NEWB;
   1854 	}
   1855 
   1856 	/*
   1857 	 * Step 3 - Walk into the C table, if there is no valid C table,
   1858 	 * allocate one.
   1859 	 */
   1860 
   1861 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
   1862 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
   1863 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
   1864 		/* The descriptor is valid.  Use the C table it points to. */
   1865 		/**************************************
   1866 		 *               c_idx                *
   1867 		 * |                v                 *
   1868 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
   1869 		 *             | | | | | | | | | | |  *
   1870 		 *             +-+-+-+-+-+-+-+-+-+-+- *
   1871 		 *                  |                 *
   1872 		 *                  \- c_tbl -> +-+-- *
   1873 		 *                              | | | *
   1874 		 *                              +-+-- *
   1875 		 **************************************/
   1876 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
   1877 		c_tbl = mmuC2tmgr(c_pte);
   1878 
   1879 		/* If mapping is wired and table is not */
   1880 		if (wired && !c_tbl->ct_wcnt) {
   1881 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1882 			b_tbl->bt_wcnt++;
   1883 		}
   1884 	} else {
   1885 		/* The descriptor is invalid.  Allocate a new C table. */
   1886 		c_tbl = get_c_table();
   1887 
   1888 		/* Point the parent B table descriptor to this new C table. */
   1889 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
   1890 		b_dte->attr.raw |= MMU_DT_SHORT;
   1891 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
   1892 
   1893 		/* Create the necessary back references to the parent table */
   1894 		c_tbl->ct_parent = b_tbl;
   1895 		c_tbl->ct_pidx = b_idx;
   1896 
   1897 		/*
   1898 		 * If this table is to be wired, make sure the parent B table
   1899 		 * wired count is updated to reflect that it has another wired
   1900 		 * entry.
   1901 		 */
   1902 		if (wired)
   1903 			b_tbl->bt_wcnt++;
   1904 		else if (llevel == NONE)
   1905 			llevel = NEWC;
   1906 	}
   1907 
   1908 	/*
   1909 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
   1910 	 * slot of the C table, describing the PA to which the VA is mapped.
   1911 	 */
   1912 
   1913 	pte_idx = MMU_TIC(va);
   1914 	c_pte = &c_tbl->ct_dtbl[pte_idx];
   1915 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
   1916 		/*
   1917 		 * The PTE is currently valid.  This particular call
   1918 		 * is just a synonym for one (or more) of the following
   1919 		 * operations:
   1920 		 *     change protection of a page
   1921 		 *     change wiring status of a page
   1922 		 *     remove the mapping of a page
   1923 		 *
   1924 		 * XXX - Semi critical: This code should unwire the PTE
   1925 		 * and, possibly, associated parent tables if this is a
   1926 		 * change wiring operation.  Currently it does not.
   1927 		 *
   1928 		 * This may be ok if pmap_change_wiring() is the only
   1929 		 * interface used to UNWIRE a page.
   1930 		 */
   1931 
   1932 		/* First check if this is a wiring operation. */
   1933 		if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
   1934 			/*
   1935 			 * The PTE is already wired.  To prevent it from being
   1936 			 * counted as a new wiring operation, reset the 'wired'
   1937 			 * variable.
   1938 			 */
   1939 			wired = FALSE;
   1940 		}
   1941 
   1942 		/* Is the new address the same as the old? */
   1943 		if (MMU_PTE_PA(*c_pte) == pa) {
   1944 			/*
   1945 			 * Yes, mark that it does not need to be reinserted
   1946 			 * into the PV list.
   1947 			 */
   1948 			insert = FALSE;
   1949 
   1950 			/*
   1951 			 * Clear all but the modified, referenced and wired
   1952 			 * bits on the PTE.
   1953 			 */
   1954 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
   1955 				| MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
   1956 		} else {
   1957 			/* No, remove the old entry */
   1958 			pmap_remove_pte(c_pte);
   1959 			insert = TRUE;
   1960 		}
   1961 		TBIS(va);  /* XXX - Only necessary if modifying current map. */
   1962 	} else {
   1963 		/*
   1964 		 * The PTE is invalid.  Increment the valid entry count in
   1965 		 * the C table manager to reflect a new entry.
   1966 		 */
   1967 		c_tbl->ct_ecnt++;
   1968 		/* and in pmap */
   1969 		pmap->pm_stats.resident_count++;
   1970 
   1971 		/* It will also need to be inserted into the PV list. */
   1972 		insert = TRUE;
   1973 	}
   1974 
   1975 	/*
   1976 	 * If page is changing from unwired to wired status, set an unused bit
   1977 	 * within the PTE to indicate that it is wired.  Also increment the
   1978 	 * wired entry count in the C table manager.
   1979 	 */
   1980 	if (wired) {
   1981 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
   1982 		c_tbl->ct_wcnt++;
   1983 	}
   1984 
   1985 	/*
   1986 	 * Map the page, being careful to preserve modify/reference/wired
   1987 	 * bits.  At this point it is assumed that the PTE either has no bits
   1988 	 * set, or if there are set bits, they are only modified, reference or
   1989 	 * wired bits.  If not, the following statement will cause erratic
   1990 	 * behavior.
   1991 	 */
   1992 #ifdef	DEBUG
   1993 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
   1994 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
   1995 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
   1996 		Debugger();
   1997 	}
   1998 #endif
   1999 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
   2000 
   2001 	/*
   2002 	 * If the mapping should be read-only, set the write protect
   2003 	 * bit in the PTE.
   2004 	 */
   2005 	if (!(prot & VM_PROT_WRITE))
   2006 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
   2007 
   2008 	/*
   2009 	 * If the mapping should be cache inhibited (indicated by the flag
   2010 	 * bits found on the lower order of the physical address.)
   2011 	 * mark the PTE as a cache inhibited page.
   2012 	 */
   2013 	if (flags & PMAP_NC)
   2014 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
   2015 
   2016 	/*
   2017 	 * If the physical address being mapped is managed by the PV
   2018 	 * system then link the pte into the list of pages mapped to that
   2019 	 * address.
   2020 	 */
   2021 	if (insert && managed) {
   2022 		pv = pa2pv(pa);
   2023 		nidx = pteidx(c_pte);
   2024 
   2025 		s = splimp();
   2026 		pvebase[nidx].pve_next = pv->pv_idx;
   2027 		pv->pv_idx = nidx;
   2028 		splx(s);
   2029 	}
   2030 
   2031 	/* Move any allocated tables back into the active pool. */
   2032 
   2033 	switch (llevel) {
   2034 		case NEWA:
   2035 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2036 			/* FALLTHROUGH */
   2037 		case NEWB:
   2038 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2039 			/* FALLTHROUGH */
   2040 		case NEWC:
   2041 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2042 			/* FALLTHROUGH */
   2043 		default:
   2044 			break;
   2045 	}
   2046 }
   2047 
   2048 /* pmap_enter_kernel			INTERNAL
   2049  **
   2050  * Map the given virtual address to the given physical address within the
   2051  * kernel address space.  This function exists because the kernel map does
   2052  * not do dynamic table allocation.  It consists of a contiguous array of ptes
   2053  * and can be edited directly without the need to walk through any tables.
   2054  *
   2055  * XXX: "Danger, Will Robinson!"
   2056  * Note that the kernel should never take a fault on any page
   2057  * between [ KERNBASE .. virtual_avail ] and this is checked in
   2058  * trap.c for kernel-mode MMU faults.  This means that mappings
   2059  * created in that range must be implicily wired. -gwr
   2060  */
   2061 void
   2062 pmap_enter_kernel(va, pa, prot)
   2063 	vm_offset_t va;
   2064 	vm_offset_t pa;
   2065 	vm_prot_t   prot;
   2066 {
   2067 	boolean_t       was_valid, insert;
   2068 	u_short         pte_idx, pv_idx;
   2069 	int             s, flags;
   2070 	mmu_short_pte_t *pte;
   2071 	pv_t            *pv;
   2072 	vm_offset_t     old_pa;
   2073 
   2074 	flags  = (pa & ~MMU_PAGE_MASK);
   2075 	pa    &= MMU_PAGE_MASK;
   2076 
   2077 	/*
   2078 	 * Calculate the index of the PTE being modified.
   2079 	 */
   2080 	pte_idx = (u_long) sun3x_btop(va - KERNBASE);
   2081 
   2082 	/* XXX - This array is traditionally named "Sysmap" */
   2083 	pte = &kernCbase[pte_idx];
   2084 
   2085 	s = splimp();
   2086 	if (MMU_VALID_DT(*pte)) {
   2087 		was_valid = TRUE;
   2088 		/*
   2089 		 * If the PTE is already mapped to an address and it differs
   2090 		 * from the address requested, unlink it from the PV list.
   2091 		 *
   2092 		 * This only applies to mappings within VM_MIN_KERNEL_ADDRESS
   2093 		 * and VM_MAX_KERNEL_ADDRESS.  All others are not requests
   2094 		 * from the VM system and should not be part of the PV system.
   2095 		 */
   2096 		if (va < VM_MAX_KERNEL_ADDRESS) {
   2097 		    old_pa = MMU_PTE_PA(*pte);
   2098 		    if (pa != old_pa) {
   2099 		        if (is_managed(old_pa)) {
   2100 		            /* XXX - Make this into a function call? */
   2101 		            pv = pa2pv(old_pa);
   2102 		            pv_idx = pv->pv_idx;
   2103 		            if (pv_idx == pte_idx) {
   2104 		                pv->pv_idx = pvebase[pte_idx].pve_next;
   2105 		            } else {
   2106 		                while (pvebase[pv_idx].pve_next != pte_idx)
   2107 		                    pv_idx = pvebase[pv_idx].pve_next;
   2108 		                pvebase[pv_idx].pve_next =
   2109 		                    pvebase[pte_idx].pve_next;
   2110 		            }
   2111 		            /* Save modified/reference bits */
   2112 		            pv->pv_flags |= (u_short) pte->attr.raw;
   2113 		        }
   2114 		        if (is_managed(pa))
   2115 		            insert = TRUE;
   2116 		        else
   2117 		            insert = FALSE;
   2118 		    } else {
   2119 		        /*
   2120 		         * Old PA and new PA are the same.  No need to relink
   2121 		         * the mapping within the PV list.
   2122 		         */
   2123 		        insert = FALSE;
   2124 		    }
   2125 		} else {
   2126 		    /*
   2127 		     * If the VA lies beyond VM_MAX_KERNEL_ADDRESS, it is not
   2128 		     * a request by the VM system and hence does not need to
   2129 		     * be linked into the PV system.
   2130 		     */
   2131 		    insert = FALSE;
   2132 		}
   2133 	} else {
   2134 		was_valid = FALSE;
   2135 		if (va < VM_MAX_KERNEL_ADDRESS)
   2136 			if (is_managed(pa))
   2137 				insert = TRUE;
   2138 			else
   2139 				insert = FALSE;
   2140 		else
   2141 			insert = FALSE;
   2142 	}
   2143 
   2144 	/*
   2145 	 * Save any mod/ref bits on the PTE.
   2146 	 */
   2147 	pte->attr.raw &= (MMU_SHORT_PTE_USED | MMU_SHORT_PTE_M);
   2148 
   2149 	/*
   2150 	 * Map the page.
   2151 	 */
   2152 	pte->attr.raw |= (pa | MMU_DT_PAGE);
   2153 
   2154 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
   2155 		pte->attr.raw |= MMU_SHORT_PTE_WP;
   2156 	if (flags & PMAP_NC)
   2157 		pte->attr.raw |= MMU_SHORT_PTE_CI;
   2158 	if (was_valid) {
   2159 		TBIS(va);
   2160 	}
   2161 
   2162 	/*
   2163 	 * Insert the PTE into the PV system, if need be.
   2164 	 */
   2165 	if (insert) {
   2166 		pv = pa2pv(pa);
   2167 		pvebase[pte_idx].pve_next = pv->pv_idx;
   2168 		pv->pv_idx = pte_idx;
   2169 	}
   2170 	splx(s);
   2171 
   2172 }
   2173 
   2174 /* pmap_protect			INTERFACE
   2175  **
   2176  * Apply the given protection to the given virtual address range within
   2177  * the given map.
   2178  *
   2179  * It is ok for the protection applied to be stronger than what is
   2180  * specified.  We use this to our advantage when the given map has no
   2181  * mapping for the virtual address.  By skipping a page when this
   2182  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
   2183  * and therefore do not need to map the page just to apply a protection
   2184  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
   2185  *
   2186  * XXX - This function could be speeded up by using pmap_stroll() for inital
   2187  *       setup, and then manual scrolling in the for() loop.
   2188  */
   2189 void
   2190 pmap_protect(pmap, startva, endva, prot)
   2191 	pmap_t pmap;
   2192 	vm_offset_t startva, endva;
   2193 	vm_prot_t prot;
   2194 {
   2195 	boolean_t iscurpmap;
   2196 	int a_idx, b_idx, c_idx;
   2197 	vm_offset_t va;
   2198 	a_tmgr_t *a_tbl;
   2199 	b_tmgr_t *b_tbl;
   2200 	c_tmgr_t *c_tbl;
   2201 	mmu_short_pte_t *pte;
   2202 
   2203 	if (pmap == NULL)
   2204 		return;
   2205 	if (pmap == pmap_kernel()) {
   2206 		pmap_protect_kernel(startva, endva, prot);
   2207 		return;
   2208 	}
   2209 
   2210 	iscurpmap = (pmap == current_pmap());
   2211 	for (va = startva; va < endva; va += NBPG) {
   2212 		/*
   2213 		 * Retrieve the mapping for the given page from the given pmap.
   2214 		 * If it does not exist then we need not do anything more for
   2215 		 * the current page.
   2216 		 */
   2217 		if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte,
   2218 		    &a_idx, &b_idx, &c_idx) == FALSE) {
   2219 		    continue;
   2220 		}
   2221 
   2222 		switch (prot) {
   2223 		case VM_PROT_ALL:
   2224 		    /* this should never happen in a sane system */
   2225 		    break;
   2226 		case VM_PROT_EXECUTE:
   2227 		case VM_PROT_READ:
   2228 		case VM_PROT_READ|VM_PROT_EXECUTE:
   2229 		    /* make the mapping read-only */
   2230 		    pte->attr.raw |= MMU_SHORT_PTE_WP;
   2231 		    break;
   2232 		case VM_PROT_NONE:
   2233 		    /* this is an alias for 'pmap_remove' */
   2234 		    pmap_remove_pte(pte);
   2235 		    c_tbl->ct_ecnt--;
   2236 		    pmap->pm_stats.resident_count--;
   2237 		    break;
   2238 		default:
   2239 		    break;
   2240 		}
   2241 		/*
   2242 		 * If we just modified the current address space,
   2243 		 * flush any translations for the modified page from
   2244 		 * the translation cache and any data from it in the
   2245 		 * data cache.
   2246 		 */
   2247 		if (iscurpmap)
   2248 		    TBIS(va);
   2249 	}
   2250 }
   2251 
   2252 /* pmap_protect_kernel			INTERNAL
   2253  **
   2254  * Apply the given protection code to a kernel address range.
   2255  */
   2256 void
   2257 pmap_protect_kernel(startva, endva, prot)
   2258 	vm_offset_t startva, endva;
   2259 	vm_prot_t prot;
   2260 {
   2261 	vm_offset_t va;
   2262 	mmu_short_pte_t *pte;
   2263 
   2264 	pte = &kernCbase[(unsigned long) sun3x_btop(startva - KERNBASE)];
   2265 	for (va = startva; va < endva; va += NBPG, pte++) {
   2266 		if (MMU_VALID_DT(*pte)) {
   2267 		    switch (prot) {
   2268 		        case VM_PROT_ALL:
   2269 		            break;
   2270 		        case VM_PROT_EXECUTE:
   2271 		        case VM_PROT_READ:
   2272 		        case VM_PROT_READ|VM_PROT_EXECUTE:
   2273 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
   2274 		            break;
   2275 		        case VM_PROT_NONE:
   2276 		            /* this is an alias for 'pmap_remove_kernel' */
   2277 		            pmap_remove_pte(pte);
   2278 		            break;
   2279 		        default:
   2280 		            break;
   2281 		    }
   2282 		    /*
   2283 		     * since this is the kernel, immediately flush any cached
   2284 		     * descriptors for this address.
   2285 		     */
   2286 		    TBIS(va);
   2287 		}
   2288 	}
   2289 }
   2290 
   2291 /* pmap_change_wiring			INTERFACE
   2292  **
   2293  * Changes the wiring of the specified page.
   2294  *
   2295  * This function is called from vm_fault.c to unwire
   2296  * a mapping.  It really should be called 'pmap_unwire'
   2297  * because it is never asked to do anything but remove
   2298  * wirings.
   2299  */
   2300 void
   2301 pmap_change_wiring(pmap, va, wire)
   2302 	pmap_t pmap;
   2303 	vm_offset_t va;
   2304 	boolean_t wire;
   2305 {
   2306 	int a_idx, b_idx, c_idx;
   2307 	a_tmgr_t *a_tbl;
   2308 	b_tmgr_t *b_tbl;
   2309 	c_tmgr_t *c_tbl;
   2310 	mmu_short_pte_t *pte;
   2311 
   2312 	/* Kernel mappings always remain wired. */
   2313 	if (pmap == pmap_kernel())
   2314 		return;
   2315 
   2316 #ifdef	PMAP_DEBUG
   2317 	if (wire == TRUE)
   2318 		panic("pmap_change_wiring: wire requested.");
   2319 #endif
   2320 
   2321 	/*
   2322 	 * Walk through the tables.  If the walk terminates without
   2323 	 * a valid PTE then the address wasn't wired in the first place.
   2324 	 * Return immediately.
   2325 	 */
   2326 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
   2327 		&b_idx, &c_idx) == FALSE)
   2328 		return;
   2329 
   2330 
   2331 	/* Is the PTE wired?  If not, return. */
   2332 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
   2333 		return;
   2334 
   2335 	/* Remove the wiring bit. */
   2336 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
   2337 
   2338 	/*
   2339 	 * Decrement the wired entry count in the C table.
   2340 	 * If it reaches zero the following things happen:
   2341 	 * 1. The table no longer has any wired entries and is considered
   2342 	 *    unwired.
   2343 	 * 2. It is placed on the available queue.
   2344 	 * 3. The parent table's wired entry count is decremented.
   2345 	 * 4. If it reaches zero, this process repeats at step 1 and
   2346 	 *    stops at after reaching the A table.
   2347 	 */
   2348 	if (--c_tbl->ct_wcnt == 0) {
   2349 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2350 		if (--b_tbl->bt_wcnt == 0) {
   2351 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2352 			if (--a_tbl->at_wcnt == 0) {
   2353 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2354 			}
   2355 		}
   2356 	}
   2357 
   2358 	pmap->pm_stats.wired_count--;
   2359 }
   2360 
   2361 /* pmap_pageable			INTERFACE
   2362  **
   2363  * Make the specified range of addresses within the given pmap,
   2364  * 'pageable' or 'not-pageable'.  A pageable page must not cause
   2365  * any faults when referenced.  A non-pageable page may.
   2366  *
   2367  * This routine is only advisory.  The VM system will call pmap_enter()
   2368  * to wire or unwire pages that are going to be made pageable before calling
   2369  * this function.  By the time this routine is called, everything that needs
   2370  * to be done has already been done.
   2371  */
   2372 void
   2373 pmap_pageable(pmap, start, end, pageable)
   2374 	pmap_t pmap;
   2375 	vm_offset_t start, end;
   2376 	boolean_t pageable;
   2377 {
   2378 	/* not implemented. */
   2379 }
   2380 
   2381 /* pmap_copy				INTERFACE
   2382  **
   2383  * Copy the mappings of a range of addresses in one pmap, into
   2384  * the destination address of another.
   2385  *
   2386  * This routine is advisory.  Should we one day decide that MMU tables
   2387  * may be shared by more than one pmap, this function should be used to
   2388  * link them together.  Until that day however, we do nothing.
   2389  */
   2390 void
   2391 pmap_copy(pmap_a, pmap_b, dst, len, src)
   2392 	pmap_t pmap_a, pmap_b;
   2393 	vm_offset_t dst;
   2394 	vm_size_t   len;
   2395 	vm_offset_t src;
   2396 {
   2397 	/* not implemented. */
   2398 }
   2399 
   2400 /* pmap_copy_page			INTERFACE
   2401  **
   2402  * Copy the contents of one physical page into another.
   2403  *
   2404  * This function makes use of two virtual pages allocated in pmap_bootstrap()
   2405  * to map the two specified physical pages into the kernel address space.  It
   2406  * then uses bcopy() to copy one into the other.
   2407  *
   2408  * Note: We could use the transparent translation registers to make the
   2409  * mappings.  If we do so, be sure to disable interrupts before using them.
   2410  */
   2411 void
   2412 pmap_copy_page(src, dst)
   2413 	vm_offset_t src, dst;
   2414 {
   2415 	PMAP_LOCK();
   2416 	if (tmp_vpages_inuse)
   2417 		panic("pmap_copy_page: temporary vpages are in use.");
   2418 	tmp_vpages_inuse++;
   2419 
   2420 	/* XXX - Use non-cached mappings to avoid cache polution? */
   2421 	pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
   2422 	pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
   2423 	copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
   2424 
   2425 	/* XXX - there's no real need to unmap the mappings is there? */
   2426 	/* XXX - Only paranoia. -gwr */
   2427 
   2428 	tmp_vpages_inuse--;
   2429 	PMAP_UNLOCK();
   2430 }
   2431 
   2432 /* pmap_zero_page			INTERFACE
   2433  **
   2434  * Zero the contents of the specified physical page.
   2435  *
   2436  * Uses one of the virtual pages allocated in pmap_boostrap()
   2437  * to map the specified page into the kernel address space.  Then uses
   2438  * bzero() to zero out the page.
   2439  */
   2440 void
   2441 pmap_zero_page(pa)
   2442 	vm_offset_t pa;
   2443 {
   2444 	PMAP_LOCK();
   2445 	if (tmp_vpages_inuse)
   2446 		panic("pmap_zero_page: temporary vpages are in use.");
   2447 	tmp_vpages_inuse++;
   2448 
   2449 	pmap_enter_kernel(tmp_vpages[0], pa, VM_PROT_READ|VM_PROT_WRITE);
   2450 	zeropage((char *) tmp_vpages[0]);
   2451 	/* xxx - there's no real need to unmap the mapping is there? */
   2452 
   2453 	tmp_vpages_inuse--;
   2454 	PMAP_UNLOCK();
   2455 }
   2456 
   2457 /* pmap_collect			INTERFACE
   2458  **
   2459  * Called from the VM system when we are about to swap out
   2460  * the process using this pmap.  This should give up any
   2461  * resources held here, including all its MMU tables.
   2462  */
   2463 void
   2464 pmap_collect(pmap)
   2465 	pmap_t pmap;
   2466 {
   2467 	/* XXX - todo... */
   2468 }
   2469 
   2470 /* pmap_create			INTERFACE
   2471  **
   2472  * Create and return a pmap structure.
   2473  */
   2474 pmap_t
   2475 pmap_create(size)
   2476 	vm_size_t size;
   2477 {
   2478 	pmap_t	pmap;
   2479 
   2480 	if (size)
   2481 		return NULL;
   2482 
   2483 	pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
   2484 	pmap_pinit(pmap);
   2485 
   2486 	return pmap;
   2487 }
   2488 
   2489 /* pmap_pinit			INTERNAL
   2490  **
   2491  * Initialize a pmap structure.
   2492  */
   2493 void
   2494 pmap_pinit(pmap)
   2495 	pmap_t pmap;
   2496 {
   2497 	bzero(pmap, sizeof(struct pmap));
   2498 	pmap->pm_a_tmgr = NULL;
   2499 	pmap->pm_a_phys = kernAphys;
   2500 }
   2501 
   2502 /* pmap_release				INTERFACE
   2503  **
   2504  * Release any resources held by the given pmap.
   2505  *
   2506  * This is the reverse analog to pmap_pinit.  It does not
   2507  * necessarily mean for the pmap structure to be deallocated,
   2508  * as in pmap_destroy.
   2509  */
   2510 void
   2511 pmap_release(pmap)
   2512 	pmap_t pmap;
   2513 {
   2514 	/*
   2515 	 * As long as the pmap contains no mappings,
   2516 	 * which always should be the case whenever
   2517 	 * this function is called, there really should
   2518 	 * be nothing to do.
   2519 	 *
   2520 	 * XXX - This function is being called while there are
   2521 	 * still valid mappings, so I guess the above must not
   2522 	 * be true.
   2523 	 * XXX - Unless the mappings persist due to a bug here...
   2524 	 *     + That's what was happening.  The map had no mappings,
   2525 	 *       but it still had an A table.  pmap_remove() was not
   2526 	 *       releasing tables when they were empty.
   2527 	 */
   2528 #ifdef	PMAP_DEBUG
   2529 	if (pmap == NULL)
   2530 		return;
   2531 	if (pmap == pmap_kernel())
   2532 		panic("pmap_release: kernel pmap release requested.");
   2533 #endif
   2534 	if (pmap->pm_a_tmgr != NULL) {
   2535 		free_a_table(pmap->pm_a_tmgr, TRUE);
   2536 		pmap->pm_a_tmgr = NULL;
   2537 		pmap->pm_a_phys = kernAphys;
   2538 		pmap->pm_stats.resident_count = 0;
   2539 	}
   2540 }
   2541 
   2542 /* pmap_reference			INTERFACE
   2543  **
   2544  * Increment the reference count of a pmap.
   2545  */
   2546 void
   2547 pmap_reference(pmap)
   2548 	pmap_t pmap;
   2549 {
   2550 	if (pmap == NULL)
   2551 		return;
   2552 
   2553 	/* pmap_lock(pmap); */
   2554 	pmap->pm_refcount++;
   2555 	/* pmap_unlock(pmap); */
   2556 }
   2557 
   2558 /* pmap_dereference			INTERNAL
   2559  **
   2560  * Decrease the reference count on the given pmap
   2561  * by one and return the current count.
   2562  */
   2563 int
   2564 pmap_dereference(pmap)
   2565 	pmap_t pmap;
   2566 {
   2567 	int rtn;
   2568 
   2569 	if (pmap == NULL)
   2570 		return 0;
   2571 
   2572 	/* pmap_lock(pmap); */
   2573 	rtn = --pmap->pm_refcount;
   2574 	/* pmap_unlock(pmap); */
   2575 
   2576 	return rtn;
   2577 }
   2578 
   2579 /* pmap_destroy			INTERFACE
   2580  **
   2581  * Decrement a pmap's reference count and delete
   2582  * the pmap if it becomes zero.  Will be called
   2583  * only after all mappings have been removed.
   2584  */
   2585 void
   2586 pmap_destroy(pmap)
   2587 	pmap_t pmap;
   2588 {
   2589 	if (pmap == NULL)
   2590 		return;
   2591 	if (pmap == &kernel_pmap)
   2592 		panic("pmap_destroy: kernel_pmap!");
   2593 	if (pmap_dereference(pmap) == 0) {
   2594 		pmap_release(pmap);
   2595 		free(pmap, M_VMPMAP);
   2596 	}
   2597 }
   2598 
   2599 /* pmap_is_referenced			INTERFACE
   2600  **
   2601  * Determine if the given physical page has been
   2602  * referenced (read from [or written to.])
   2603  */
   2604 boolean_t
   2605 pmap_is_referenced(pa)
   2606 	vm_offset_t pa;
   2607 {
   2608 	pv_t      *pv;
   2609 	int       idx, s;
   2610 
   2611 	if (!pv_initialized)
   2612 		return FALSE;
   2613 	/* XXX - this may be unecessary. */
   2614 	if (!is_managed(pa))
   2615 		return FALSE;
   2616 
   2617 	pv = pa2pv(pa);
   2618 	/*
   2619 	 * Check the flags on the pv head.  If they are set,
   2620 	 * return immediately.  Otherwise a search must be done.
   2621 	 */
   2622 	if (pv->pv_flags & PV_FLAGS_USED)
   2623 		return TRUE;
   2624 	else {
   2625 		s = splimp();
   2626 		/*
   2627 		 * Search through all pv elements pointing
   2628 		 * to this page and query their reference bits
   2629 		 */
   2630 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
   2631 			pvebase[idx].pve_next)
   2632 			if (MMU_PTE_USED(kernCbase[idx])) {
   2633 				splx(s);
   2634 				return TRUE;
   2635 			}
   2636 		splx(s);
   2637 	}
   2638 
   2639 	return FALSE;
   2640 }
   2641 
   2642 /* pmap_is_modified			INTERFACE
   2643  **
   2644  * Determine if the given physical page has been
   2645  * modified (written to.)
   2646  */
   2647 boolean_t
   2648 pmap_is_modified(pa)
   2649 	vm_offset_t pa;
   2650 {
   2651 	pv_t      *pv;
   2652 	int       idx, s;
   2653 
   2654 	if (!pv_initialized)
   2655 		return FALSE;
   2656 	/* XXX - this may be unecessary. */
   2657 	if (!is_managed(pa))
   2658 		return FALSE;
   2659 
   2660 	/* see comments in pmap_is_referenced() */
   2661 	pv = pa2pv(pa);
   2662 	if (pv->pv_flags & PV_FLAGS_MDFY) {
   2663 		return TRUE;
   2664 	} else {
   2665 		s = splimp();
   2666 		for (idx = pv->pv_idx; idx != PVE_EOL; idx =
   2667 			pvebase[idx].pve_next)
   2668 			if (MMU_PTE_MODIFIED(kernCbase[idx])) {
   2669 				splx(s);
   2670 				return TRUE;
   2671 			}
   2672 		splx(s);
   2673 	}
   2674 
   2675 	return FALSE;
   2676 }
   2677 
   2678 /* pmap_page_protect			INTERFACE
   2679  **
   2680  * Applies the given protection to all mappings to the given
   2681  * physical page.
   2682  */
   2683 void
   2684 pmap_page_protect(pa, prot)
   2685 	vm_offset_t pa;
   2686 	vm_prot_t prot;
   2687 {
   2688 	pv_t      *pv;
   2689 	int       idx, s;
   2690 	struct mmu_short_pte_struct *pte;
   2691 
   2692 	if (!is_managed(pa))
   2693 		return;
   2694 
   2695 	pv = pa2pv(pa);
   2696 	s = splimp();
   2697 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2698 		pte = &kernCbase[idx];
   2699 		switch (prot) {
   2700 			case VM_PROT_ALL:
   2701 				/* do nothing */
   2702 				break;
   2703 			case VM_PROT_EXECUTE:
   2704 			case VM_PROT_READ:
   2705 			case VM_PROT_READ|VM_PROT_EXECUTE:
   2706 				/* XXX - Must flush cached PTEs here! */
   2707 				pte->attr.raw |= MMU_SHORT_PTE_WP;
   2708 				break;
   2709 			case VM_PROT_NONE:
   2710 				/* Save the mod/ref bits. */
   2711 				pv->pv_flags |= pte->attr.raw;
   2712 				/* Invalidate the PTE. */
   2713 				pte->attr.raw = MMU_DT_INVALID;
   2714 				/* Update table counts. */
   2715 				pmap_dereference_pte(pte);
   2716 				break;
   2717 			default:
   2718 				break;
   2719 		}
   2720 	}
   2721 	if (prot == VM_PROT_NONE)
   2722 		pv->pv_idx = PVE_EOL;
   2723 	splx(s);
   2724 }
   2725 
   2726 /* pmap_get_pteinfo		INTERNAL
   2727  **
   2728  * Called internally to find the pmap and virtual address within that
   2729  * map to which the given pte maps.  Also includes the PTE's C table
   2730  * manager.
   2731  *
   2732  * Returns the pmap in the argument provided, and the virtual address
   2733  * by return value.
   2734  */
   2735 vm_offset_t
   2736 pmap_get_pteinfo(pte, pmap, tbl)
   2737 	mmu_short_pte_t *pte;
   2738 	pmap_t *pmap;
   2739 	c_tmgr_t **tbl;
   2740 {
   2741 	a_tmgr_t    *a_tbl;
   2742 	b_tmgr_t    *b_tbl;
   2743 	c_tmgr_t    *c_tbl;
   2744 	vm_offset_t     va = 0;
   2745 
   2746 	/*
   2747 	 * Determine if the PTE is a kernel PTE or a user PTE.
   2748 	 */
   2749 	if (pte >= mmuCbase) {
   2750 		/*
   2751 		 * The PTE belongs to a user mapping.
   2752 		 * Find the virtual address by decoding table indexes.
   2753 		 * Each successive decode will reveal the address from
   2754 		 * least to most significant bit fashion.
   2755 		 *
   2756 		 * 31                              0
   2757 		 * +-------------------------------+
   2758 		 * |AAAAAAABBBBBBCCCCCCxxxxxxxxxxxx|
   2759 		 * +-------------------------------+
   2760 		 *
   2761 		 * Start with the 'C' bits.
   2762 		 */
   2763 		c_tbl = mmuC2tmgr(pte);
   2764 		va |= (pmap_find_tic(pte) << MMU_TIC_SHIFT);
   2765 		b_tbl = c_tbl->ct_parent;
   2766 
   2767 		/* Add the 'B' bits. */
   2768 		va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
   2769 		a_tbl = b_tbl->bt_parent;
   2770 
   2771 		/* Add the 'A' bits. */
   2772 		va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
   2773 
   2774 		*pmap = a_tbl->at_parent;
   2775 		*tbl = c_tbl;
   2776 	} else {
   2777 		/*
   2778 		 * The PTE belongs to the kernel map.
   2779 		 */
   2780 		va = sun3x_ptob(pteidx(pte));
   2781 		va += KERNBASE;
   2782 
   2783 		*pmap = pmap_kernel();
   2784 	}
   2785 
   2786 	return va;
   2787 }
   2788 
   2789 /* pmap_find_tic			INTERNAL
   2790  **
   2791  * Given the address of a pte, find the TIC (level 'C' table index) for
   2792  * the pte within its C table.
   2793  */
   2794 char
   2795 pmap_find_tic(pte)
   2796 	mmu_short_pte_t *pte;
   2797 {
   2798 	return ((pte - mmuCbase) % MMU_C_TBL_SIZE);
   2799 }
   2800 
   2801 
   2802 /* pmap_clear_modify			INTERFACE
   2803  **
   2804  * Clear the modification bit on the page at the specified
   2805  * physical address.
   2806  *
   2807  */
   2808 void
   2809 pmap_clear_modify(pa)
   2810 	vm_offset_t pa;
   2811 {
   2812 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
   2813 }
   2814 
   2815 /* pmap_clear_reference			INTERFACE
   2816  **
   2817  * Clear the referenced bit on the page at the specified
   2818  * physical address.
   2819  */
   2820 void
   2821 pmap_clear_reference(pa)
   2822 	vm_offset_t pa;
   2823 {
   2824 	pmap_clear_pv(pa, PV_FLAGS_USED);
   2825 }
   2826 
   2827 /* pmap_clear_pv			INTERNAL
   2828  **
   2829  * Clears the specified flag from the specified physical address.
   2830  * (Used by pmap_clear_modify() and pmap_clear_reference().)
   2831  *
   2832  * Flag is one of:
   2833  *   PV_FLAGS_MDFY - Page modified bit.
   2834  *   PV_FLAGS_USED - Page used (referenced) bit.
   2835  *
   2836  * This routine must not only clear the flag on the pv list
   2837  * head.  It must also clear the bit on every pte in the pv
   2838  * list associated with the address.
   2839  */
   2840 void
   2841 pmap_clear_pv(pa, flag)
   2842 	vm_offset_t pa;
   2843 	int flag;
   2844 {
   2845 	pv_t      *pv;
   2846 	int       idx, s;
   2847 	vm_offset_t     va;
   2848 	pmap_t          pmap;
   2849 	mmu_short_pte_t *pte;
   2850 	c_tmgr_t        *c_tbl;
   2851 
   2852 	pv = pa2pv(pa);
   2853 
   2854 	s = splimp();
   2855 	pv->pv_flags &= ~(flag);
   2856 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2857 		pte = &kernCbase[idx];
   2858 		pte->attr.raw &= ~(flag);
   2859 		/*
   2860 		 * The MC68030 MMU will not set the modified or
   2861 		 * referenced bits on any MMU tables for which it has
   2862 		 * a cached descriptor with its modify bit set.  To insure
   2863 		 * that it will modify these bits on the PTE during the next
   2864 		 * time it is written to or read from, we must flush it from
   2865 		 * the ATC.
   2866 		 *
   2867 		 * Ordinarily it is only necessary to flush the descriptor
   2868 		 * if it is used in the current address space.  But since I
   2869 		 * am not sure that there will always be a notion of
   2870 		 * 'the current address space' when this function is called,
   2871 		 * I will skip the test and always flush the address.  It
   2872 		 * does no harm.
   2873 		 */
   2874 		va = pmap_get_pteinfo(pte, &pmap, &c_tbl);
   2875 		TBIS(va);
   2876 	}
   2877 	splx(s);
   2878 }
   2879 
   2880 /* pmap_extract			INTERFACE
   2881  **
   2882  * Return the physical address mapped by the virtual address
   2883  * in the specified pmap or 0 if it is not known.  It is OK
   2884  * to leave low bits in the returned value, because callers
   2885  * will only pass our return value to pmap_page_index(), so
   2886  * we can arrange to ignore the low bits there.
   2887  *
   2888  * Leaving the low bits there allows pmap_extract to be used
   2889  * as an external interface to get a PTE for things like the
   2890  * machine-specific ddb command: "ddb> mach pgmap".
   2891  * XXX - Maybe we should have pmap_extract_raw()...
   2892  *
   2893  * Note: this function should also apply an exclusive lock
   2894  * on the pmap system during its duration.
   2895  */
   2896 vm_offset_t
   2897 pmap_extract(pmap, va)
   2898 	pmap_t      pmap;
   2899 	vm_offset_t va;
   2900 {
   2901 	int a_idx, b_idx, pte_idx;
   2902 	a_tmgr_t	*a_tbl;
   2903 	b_tmgr_t	*b_tbl;
   2904 	c_tmgr_t	*c_tbl;
   2905 	mmu_short_pte_t	*c_pte;
   2906 
   2907 	if (pmap == pmap_kernel())
   2908 		return pmap_extract_kernel(va);
   2909 	if (pmap == NULL)
   2910 		return 0;
   2911 
   2912 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
   2913 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
   2914 		return 0;
   2915 
   2916 	if (!MMU_VALID_DT(*c_pte))
   2917 		return 0;
   2918 
   2919 	/* XXX: was MMU_PTE_PA(*c_pte); */
   2920 	return c_pte->attr.raw; /* XXX */
   2921 }
   2922 
   2923 /* pmap_extract_kernel		INTERNAL
   2924  **
   2925  * Extract a traslation from the kernel address space.
   2926  */
   2927 vm_offset_t
   2928 pmap_extract_kernel(va)
   2929 	vm_offset_t va;
   2930 {
   2931 	mmu_short_pte_t *pte;
   2932 
   2933 	pte = &kernCbase[(unsigned long) sun3x_btop(va - KERNBASE)];
   2934 	return MMU_PTE_PA(*pte);
   2935 }
   2936 
   2937 /* pmap_remove_kernel		INTERNAL
   2938  **
   2939  * Remove the mapping of a range of virtual addresses from the kernel map.
   2940  */
   2941 void
   2942 pmap_remove_kernel(start, end)
   2943 	vm_offset_t start;
   2944 	vm_offset_t end;
   2945 {
   2946 	start -= KERNBASE;
   2947 	end   -= KERNBASE;
   2948 	start = sun3x_round_page(start); /* round down */
   2949 	start = sun3x_btop(start);
   2950 	end   += MMU_PAGE_SIZE - 1;    /* next round operation will be up */
   2951 	end   = sun3x_round_page(end); /* round */
   2952 	end   = sun3x_btop(end);
   2953 
   2954 	while (start < end)
   2955 		pmap_remove_pte(&kernCbase[start++]);
   2956 	/* Always flush the ATC when maniplating the kernel address space. */
   2957 	TBIA();
   2958 }
   2959 
   2960 /* pmap_remove			INTERFACE
   2961  **
   2962  * Remove the mapping of a range of virtual addresses from the given pmap.
   2963  *
   2964  * If the range contains any wired entries, this function will probably create
   2965  * disaster.
   2966  */
   2967 void
   2968 pmap_remove(pmap, start, end)
   2969 	pmap_t pmap;
   2970 	vm_offset_t start;
   2971 	vm_offset_t end;
   2972 {
   2973 
   2974 	if (pmap == pmap_kernel()) {
   2975 		pmap_remove_kernel(start, end);
   2976 		return;
   2977 	}
   2978 
   2979 	/*
   2980 	 * XXX - Temporary(?) statement to prevent panic caused
   2981 	 * by vm_alloc_with_pager() handing us a software map (ie NULL)
   2982 	 * to remove because it couldn't get backing store.
   2983 	 * (I guess.)
   2984 	 */
   2985 	if (pmap == NULL)
   2986 		return;
   2987 
   2988 	/*
   2989 	 * If the pmap doesn't have an A table of its own, it has no mappings
   2990 	 * that can be removed.
   2991 	 */
   2992 	if (pmap->pm_a_tmgr == NULL)
   2993 		return;
   2994 
   2995 	/*
   2996 	 * Remove the specified range from the pmap.  If the function
   2997 	 * returns true, the operation removed all the valid mappings
   2998 	 * in the pmap and freed its A table.  If this happened to the
   2999 	 * currently loaded pmap, the MMU root pointer must be reloaded
   3000 	 * with the default 'kernel' map.
   3001 	 */
   3002 	if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
   3003 		pmap->pm_a_tmgr = NULL;
   3004 		pmap->pm_a_phys = kernAphys;
   3005 		if (pmap == current_pmap())
   3006 			pmap_activate(pmap);
   3007 	} else {
   3008 		/*
   3009 		 * If we just modified the current address space,
   3010 		 * make sure to flush the MMU cache.
   3011 		 *
   3012 		 * XXX - this could be an unecessarily large flush.
   3013 		 * XXX - Could decide, based on the size of the VA range
   3014 		 * to be remove, whether to flush "by pages" or "all".
   3015 		 */
   3016 		if (pmap == current_pmap())
   3017 			TBIA();
   3018 	}
   3019 }
   3020 
   3021 /* pmap_remove_a			INTERNAL
   3022  **
   3023  * This is function number one in a set of three that removes a range
   3024  * of memory in the most efficient manner by removing the highest possible
   3025  * tables from the memory space.  This particular function attempts to remove
   3026  * as many B tables as it can, delegating the remaining fragmented ranges to
   3027  * pmap_remove_b().
   3028  *
   3029  * If the removal operation results in an empty A table, the function returns
   3030  * TRUE.
   3031  *
   3032  * It's ugly but will do for now.
   3033  */
   3034 boolean_t
   3035 pmap_remove_a(a_tbl, start, end)
   3036 	a_tmgr_t *a_tbl;
   3037 	vm_offset_t start;
   3038 	vm_offset_t end;
   3039 {
   3040 	boolean_t empty;
   3041 	int idx;
   3042 	vm_offset_t nstart, nend;
   3043 	b_tmgr_t *b_tbl;
   3044 	mmu_long_dte_t  *a_dte;
   3045 	mmu_short_dte_t *b_dte;
   3046 	pmap_t pmap;
   3047 
   3048 	/*
   3049 	 * The following code works with what I call a 'granularity
   3050 	 * reduction algorithim'.  A range of addresses will always have
   3051 	 * the following properties, which are classified according to
   3052 	 * how the range relates to the size of the current granularity
   3053 	 * - an A table entry:
   3054 	 *
   3055 	 *            1 2       3 4
   3056 	 * -+---+---+---+---+---+---+---+-
   3057 	 * -+---+---+---+---+---+---+---+-
   3058 	 *
   3059 	 * A range will always start on a granularity boundary, illustrated
   3060 	 * by '+' signs in the table above, or it will start at some point
   3061 	 * inbetween a granularity boundary, as illustrated by point 1.
   3062 	 * The first step in removing a range of addresses is to remove the
   3063 	 * range between 1 and 2, the nearest granularity boundary.  This
   3064 	 * job is handled by the section of code governed by the
   3065 	 * 'if (start < nstart)' statement.
   3066 	 *
   3067 	 * A range will always encompass zero or more intergral granules,
   3068 	 * illustrated by points 2 and 3.  Integral granules are easy to
   3069 	 * remove.  The removal of these granules is the second step, and
   3070 	 * is handled by the code block 'if (nstart < nend)'.
   3071 	 *
   3072 	 * Lastly, a range will always end on a granularity boundary,
   3073 	 * ill. by point 3, or it will fall just beyond one, ill. by point
   3074 	 * 4.  The last step involves removing this range and is handled by
   3075 	 * the code block 'if (nend < end)'.
   3076 	 */
   3077 	pmap = a_tbl->at_parent;
   3078 	nstart = MMU_ROUND_UP_A(start);
   3079 	nend = MMU_ROUND_A(end);
   3080 
   3081 	if (start < nstart) {
   3082 		/*
   3083 		 * This block is executed if the range starts between
   3084 		 * a granularity boundary.
   3085 		 *
   3086 		 * First find the DTE which is responsible for mapping
   3087 		 * the start of the range.
   3088 		 */
   3089 		idx = MMU_TIA(start);
   3090 		a_dte = &a_tbl->at_dtbl[idx];
   3091 
   3092 		/*
   3093 		 * If the DTE is valid then delegate the removal of the sub
   3094 		 * range to pmap_remove_b(), which can remove addresses at
   3095 		 * a finer granularity.
   3096 		 */
   3097 		if (MMU_VALID_DT(*a_dte)) {
   3098 			b_dte = mmu_ptov(a_dte->addr.raw);
   3099 			b_tbl = mmuB2tmgr(b_dte);
   3100 
   3101 			/*
   3102 			 * The sub range to be removed starts at the start
   3103 			 * of the full range we were asked to remove, and ends
   3104 			 * at the greater of:
   3105 			 * 1. The end of the full range, -or-
   3106 			 * 2. The end of the full range, rounded down to the
   3107 			 *    nearest granularity boundary.
   3108 			 */
   3109 			if (end < nstart)
   3110 				empty = pmap_remove_b(pmap, b_tbl, start, end);
   3111 			else
   3112 				empty = pmap_remove_b(pmap,b_tbl,start,nstart);
   3113 
   3114 			/*
   3115 			 * If the removal resulted in an empty B table,
   3116 			 * invalidate the DTE that points to it and decrement
   3117 			 * the valid entry count of the A table.
   3118 			 */
   3119 			if (empty) {
   3120 				a_dte->attr.raw = MMU_DT_INVALID;
   3121 				a_tbl->at_ecnt--;
   3122 			}
   3123 		}
   3124 		/*
   3125 		 * If the DTE is invalid, the address range is already non-
   3126 		 * existant and can simply be skipped.
   3127 		 */
   3128 	}
   3129 	if (nstart < nend) {
   3130 		/*
   3131 		 * This block is executed if the range spans an whole number
   3132 		 * multiple of granules (A table entries.)
   3133 		 *
   3134 		 * First find the DTE which is responsible for mapping
   3135 		 * the start of the first granule involved.
   3136 		 */
   3137 		idx = MMU_TIA(nstart);
   3138 		a_dte = &a_tbl->at_dtbl[idx];
   3139 
   3140 		/*
   3141 		 * Remove entire sub-granules (B tables) one at a time,
   3142 		 * until reaching the end of the range.
   3143 		 */
   3144 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
   3145 			if (MMU_VALID_DT(*a_dte)) {
   3146 				/*
   3147 				 * Find the B table manager for the
   3148 				 * entry and free it.
   3149 				 */
   3150 				b_dte = mmu_ptov(a_dte->addr.raw);
   3151 				b_tbl = mmuB2tmgr(b_dte);
   3152 				free_b_table(b_tbl, TRUE);
   3153 
   3154 				/*
   3155 				 * Invalidate the DTE that points to the
   3156 				 * B table and decrement the valid entry
   3157 				 * count of the A table.
   3158 				 */
   3159 				a_dte->attr.raw = MMU_DT_INVALID;
   3160 				a_tbl->at_ecnt--;
   3161 			}
   3162 	}
   3163 	if (nend < end) {
   3164 		/*
   3165 		 * This block is executed if the range ends beyond a
   3166 		 * granularity boundary.
   3167 		 *
   3168 		 * First find the DTE which is responsible for mapping
   3169 		 * the start of the nearest (rounded down) granularity
   3170 		 * boundary.
   3171 		 */
   3172 		idx = MMU_TIA(nend);
   3173 		a_dte = &a_tbl->at_dtbl[idx];
   3174 
   3175 		/*
   3176 		 * If the DTE is valid then delegate the removal of the sub
   3177 		 * range to pmap_remove_b(), which can remove addresses at
   3178 		 * a finer granularity.
   3179 		 */
   3180 		if (MMU_VALID_DT(*a_dte)) {
   3181 			/*
   3182 			 * Find the B table manager for the entry
   3183 			 * and hand it to pmap_remove_b() along with
   3184 			 * the sub range.
   3185 			 */
   3186 			b_dte = mmu_ptov(a_dte->addr.raw);
   3187 			b_tbl = mmuB2tmgr(b_dte);
   3188 
   3189 			empty = pmap_remove_b(pmap, b_tbl, nend, end);
   3190 
   3191 			/*
   3192 			 * If the removal resulted in an empty B table,
   3193 			 * invalidate the DTE that points to it and decrement
   3194 			 * the valid entry count of the A table.
   3195 			 */
   3196 			if (empty) {
   3197 				a_dte->attr.raw = MMU_DT_INVALID;
   3198 				a_tbl->at_ecnt--;
   3199 			}
   3200 		}
   3201 	}
   3202 
   3203 	/*
   3204 	 * If there are no more entries in the A table, release it
   3205 	 * back to the available pool and return TRUE.
   3206 	 */
   3207 	if (a_tbl->at_ecnt == 0) {
   3208 		a_tbl->at_parent = NULL;
   3209 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   3210 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   3211 		empty = TRUE;
   3212 	} else {
   3213 		empty = FALSE;
   3214 	}
   3215 
   3216 	return empty;
   3217 }
   3218 
   3219 /* pmap_remove_b			INTERNAL
   3220  **
   3221  * Remove a range of addresses from an address space, trying to remove entire
   3222  * C tables if possible.
   3223  *
   3224  * If the operation results in an empty B table, the function returns TRUE.
   3225  */
   3226 boolean_t
   3227 pmap_remove_b(pmap, b_tbl, start, end)
   3228 	pmap_t pmap;
   3229 	b_tmgr_t *b_tbl;
   3230 	vm_offset_t start;
   3231 	vm_offset_t end;
   3232 {
   3233 	boolean_t empty;
   3234 	int idx;
   3235 	vm_offset_t nstart, nend, rstart;
   3236 	c_tmgr_t *c_tbl;
   3237 	mmu_short_dte_t  *b_dte;
   3238 	mmu_short_pte_t  *c_dte;
   3239 
   3240 
   3241 	nstart = MMU_ROUND_UP_B(start);
   3242 	nend = MMU_ROUND_B(end);
   3243 
   3244 	if (start < nstart) {
   3245 		idx = MMU_TIB(start);
   3246 		b_dte = &b_tbl->bt_dtbl[idx];
   3247 		if (MMU_VALID_DT(*b_dte)) {
   3248 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3249 			c_tbl = mmuC2tmgr(c_dte);
   3250 			if (end < nstart)
   3251 				empty = pmap_remove_c(pmap, c_tbl, start, end);
   3252 			else
   3253 				empty = pmap_remove_c(pmap,c_tbl,start,nstart);
   3254 			if (empty) {
   3255 				b_dte->attr.raw = MMU_DT_INVALID;
   3256 				b_tbl->bt_ecnt--;
   3257 			}
   3258 		}
   3259 	}
   3260 	if (nstart < nend) {
   3261 		idx = MMU_TIB(nstart);
   3262 		b_dte = &b_tbl->bt_dtbl[idx];
   3263 		rstart = nstart;
   3264 		while (rstart < nend) {
   3265 			if (MMU_VALID_DT(*b_dte)) {
   3266 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3267 				c_tbl = mmuC2tmgr(c_dte);
   3268 				free_c_table(c_tbl, TRUE);
   3269 				b_dte->attr.raw = MMU_DT_INVALID;
   3270 				b_tbl->bt_ecnt--;
   3271 			}
   3272 			b_dte++;
   3273 			rstart += MMU_TIB_RANGE;
   3274 		}
   3275 	}
   3276 	if (nend < end) {
   3277 		idx = MMU_TIB(nend);
   3278 		b_dte = &b_tbl->bt_dtbl[idx];
   3279 		if (MMU_VALID_DT(*b_dte)) {
   3280 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3281 			c_tbl = mmuC2tmgr(c_dte);
   3282 			empty = pmap_remove_c(pmap, c_tbl, nend, end);
   3283 			if (empty) {
   3284 				b_dte->attr.raw = MMU_DT_INVALID;
   3285 				b_tbl->bt_ecnt--;
   3286 			}
   3287 		}
   3288 	}
   3289 
   3290 	if (b_tbl->bt_ecnt == 0) {
   3291 		b_tbl->bt_parent = NULL;
   3292 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   3293 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   3294 		empty = TRUE;
   3295 	} else {
   3296 		empty = FALSE;
   3297 	}
   3298 
   3299 	return empty;
   3300 }
   3301 
   3302 /* pmap_remove_c			INTERNAL
   3303  **
   3304  * Remove a range of addresses from the given C table.
   3305  */
   3306 boolean_t
   3307 pmap_remove_c(pmap, c_tbl, start, end)
   3308 	pmap_t pmap;
   3309 	c_tmgr_t *c_tbl;
   3310 	vm_offset_t start;
   3311 	vm_offset_t end;
   3312 {
   3313 	boolean_t empty;
   3314 	int idx;
   3315 	mmu_short_pte_t *c_pte;
   3316 
   3317 	idx = MMU_TIC(start);
   3318 	c_pte = &c_tbl->ct_dtbl[idx];
   3319 	while (start < end) {
   3320 		if (MMU_VALID_DT(*c_pte)) {
   3321 			pmap_remove_pte(c_pte);
   3322 			c_tbl->ct_ecnt--;
   3323 			pmap->pm_stats.resident_count--;
   3324 		}
   3325 		start += MMU_PAGE_SIZE;
   3326 		c_pte++;
   3327 	}
   3328 
   3329 	if (c_tbl->ct_ecnt == 0) {
   3330 		c_tbl->ct_parent = NULL;
   3331 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   3332                 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   3333                 empty = TRUE;
   3334         } else {
   3335                 empty = FALSE;
   3336         }
   3337 
   3338         return empty;
   3339 }
   3340 
   3341 /* is_managed				INTERNAL
   3342  **
   3343  * Determine if the given physical address is managed by the PV system.
   3344  * Note that this logic assumes that no one will ask for the status of
   3345  * addresses which lie in-between the memory banks on the 3/80.  If they
   3346  * do so, it will falsely report that it is managed.
   3347  *
   3348  * Note: "managed" means that mappings to this physical address are
   3349  * recorded in the "physical to virtual" (PV) system.  Normally,
   3350  * physical memory used permanently by the kernel is not managed
   3351  * (i.e. memory below avail_start).
   3352  */
   3353 boolean_t
   3354 is_managed(pa)
   3355 	vm_offset_t pa;
   3356 {
   3357 	if (pa >= avail_start && pa < avail_end)
   3358 		return TRUE;
   3359 	else
   3360 		return FALSE;
   3361 }
   3362 
   3363 /* pmap_bootstrap_alloc			INTERNAL
   3364  **
   3365  * Used internally for memory allocation at startup when malloc is not
   3366  * available.  This code will fail once it crosses the first memory
   3367  * bank boundary on the 3/80.  Hopefully by then however, the VM system
   3368  * will be in charge of allocation.
   3369  */
   3370 void *
   3371 pmap_bootstrap_alloc(size)
   3372 	int size;
   3373 {
   3374 	void *rtn;
   3375 
   3376 #ifdef	DEBUG
   3377 	if (bootstrap_alloc_enabled == FALSE) {
   3378 		mon_printf("pmap_bootstrap_alloc: disabled\n");
   3379 		sunmon_abort();
   3380 	}
   3381 #endif
   3382 
   3383 	rtn = (void *) virtual_avail;
   3384 	virtual_avail += size;
   3385 
   3386 #ifdef	DEBUG
   3387 	if (virtual_avail > virtual_contig_end) {
   3388 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
   3389 		sunmon_abort();
   3390 	}
   3391 #endif
   3392 
   3393 	return rtn;
   3394 }
   3395 
   3396 /* pmap_bootstap_aalign			INTERNAL
   3397  **
   3398  * Used to insure that the next call to pmap_bootstrap_alloc() will
   3399  * return a chunk of memory aligned to the specified size.
   3400  */
   3401 void
   3402 pmap_bootstrap_aalign(size)
   3403 	int size;
   3404 {
   3405 	int off;
   3406 
   3407 	off = virtual_avail & (size - 1);
   3408 	if (off) {
   3409 		(void) pmap_bootstrap_alloc(size - off);
   3410 	}
   3411 }
   3412 
   3413 /* pmap_activate			INTERFACE
   3414  **
   3415  * This is called by locore.s:cpu_switch when we are switching to a
   3416  * new process.  This should load the MMU context for the new proc.
   3417  * XXX - Later, this should be done directly in locore.s
   3418  */
   3419 void
   3420 pmap_activate(pmap)
   3421 pmap_t	pmap;
   3422 {
   3423 	u_long rootpa;
   3424 
   3425 	/* Only do reload/flush if we have to. */
   3426 	rootpa = pmap->pm_a_phys;
   3427 	if (kernel_crp.rp_addr != rootpa) {
   3428 		DPRINT(("pmap_activate(%p)\n", pmap));
   3429 		kernel_crp.rp_addr = rootpa;
   3430 		loadcrp(&kernel_crp);
   3431 		ICIA();
   3432 	}
   3433 }
   3434 
   3435 /* pmap_pa_exists
   3436  **
   3437  * Used by the /dev/mem driver to see if a given PA is memory
   3438  * that can be mapped.  (The PA is not in a hole.)
   3439  */
   3440 int
   3441 pmap_pa_exists(pa)
   3442 	vm_offset_t pa;
   3443 {
   3444 	/* XXX - NOTYET */
   3445 	return (0);
   3446 }
   3447 
   3448 /*
   3449  * Count resident pages in this pmap.
   3450  * See: kern_sysctl.c:pmap_resident_count
   3451  */
   3452 segsz_t
   3453 pmap_resident_pages(pmap)
   3454 	pmap_t pmap;
   3455 {
   3456 	int pages;
   3457 
   3458 	if (pmap->pm_a_tmgr == NULL)
   3459 		return (0);
   3460 
   3461 	/* XXX - Todo: sum up ct->ct_ecnt on all C tables. */
   3462 	pages = pmap->pm_stats.resident_count;
   3463 
   3464 	return (pages);
   3465 }
   3466 
   3467 /*
   3468  * Count wired pages in this pmap.
   3469  * See vm_mmap.c:pmap_wired_count
   3470  */
   3471 segsz_t
   3472 pmap_wired_pages(pmap)
   3473 	pmap_t pmap;
   3474 {
   3475 	int pages;
   3476 
   3477 	if (pmap->pm_a_tmgr == NULL)
   3478 		return (0);
   3479 
   3480 	/* XXX - Todo: sum up ct->ct_wcnt on all C tables. */
   3481 	pages = pmap->pm_stats.wired_count;
   3482 
   3483 	return (pages);
   3484 }
   3485 
   3486 /* pmap_update
   3487  **
   3488  * Apply any delayed changes scheduled for all pmaps immediately.
   3489  *
   3490  * No delayed operations are currently done in this pmap.
   3491  */
   3492 void
   3493 pmap_update()
   3494 {
   3495 	/* not implemented. */
   3496 }
   3497 
   3498 /* pmap_virtual_space			INTERFACE
   3499  **
   3500  * Return the current available range of virtual addresses in the
   3501  * arguuments provided.  Only really called once.
   3502  */
   3503 void
   3504 pmap_virtual_space(vstart, vend)
   3505 	vm_offset_t *vstart, *vend;
   3506 {
   3507 	*vstart = virtual_avail;
   3508 	*vend = virtual_end;
   3509 }
   3510 
   3511 /* pmap_free_pages			INTERFACE
   3512  **
   3513  * Return the number of physical pages still available.
   3514  *
   3515  * This is probably going to be a mess, but it's only called
   3516  * once and it's the only function left that I have to implement!
   3517  */
   3518 u_int
   3519 pmap_free_pages()
   3520 {
   3521 	int i;
   3522 	u_int left;
   3523 	vm_offset_t avail;
   3524 
   3525 	avail = avail_next;
   3526 	left = 0;
   3527 	i = 0;
   3528 	while (avail >= avail_mem[i].pmem_end) {
   3529 		if (avail_mem[i].pmem_next == NULL)
   3530 			return 0;
   3531 		i++;
   3532 	}
   3533 	while (i < SUN3X_80_MEM_BANKS) {
   3534 		if (avail < avail_mem[i].pmem_start) {
   3535 			/* Avail is inside a hole, march it
   3536 			 * up to the next bank.
   3537 			 */
   3538 			avail = avail_mem[i].pmem_start;
   3539 		}
   3540 		left += sun3x_btop(avail_mem[i].pmem_end - avail);
   3541 		if (avail_mem[i].pmem_next == NULL)
   3542 			break;
   3543 		i++;
   3544 	}
   3545 
   3546 	return left;
   3547 }
   3548 
   3549 /* pmap_page_index			INTERFACE
   3550  **
   3551  * Return the index of the given physical page in a list of useable
   3552  * physical pages in the system.  Holes in physical memory may be counted
   3553  * if so desired.  As long as pmap_free_pages() and pmap_page_index()
   3554  * agree as to whether holes in memory do or do not count as valid pages,
   3555  * it really doesn't matter.  However, if you like to save a little
   3556  * memory, don't count holes as valid pages.  This is even more true when
   3557  * the holes are large.
   3558  *
   3559  * We will not count holes as valid pages.  We can generate page indexes
   3560  * that conform to this by using the memory bank structures initialized
   3561  * in pmap_alloc_pv().
   3562  */
   3563 int
   3564 pmap_page_index(pa)
   3565 	vm_offset_t pa;
   3566 {
   3567 	struct pmap_physmem_struct *bank = avail_mem;
   3568 
   3569 	/* XXX - See comment above pmap_extract(). */
   3570 	pa &= ~PGOFSET;
   3571 
   3572 	/* Search for the memory bank with this page. */
   3573 	/* XXX - What if it is not physical memory? */
   3574 	while (pa > bank->pmem_end)
   3575 		bank = bank->pmem_next;
   3576 	pa -= bank->pmem_start;
   3577 
   3578 	return (bank->pmem_pvbase + sun3x_btop(pa));
   3579 }
   3580 
   3581 /* pmap_next_page			INTERFACE
   3582  **
   3583  * Place the physical address of the next available page in the
   3584  * argument given.  Returns FALSE if there are no more pages left.
   3585  *
   3586  * This function must jump over any holes in physical memory.
   3587  * Once this function is used, any use of pmap_bootstrap_alloc()
   3588  * is a sin.  Sinners will be punished with erratic behavior.
   3589  */
   3590 boolean_t
   3591 pmap_next_page(pa)
   3592 	vm_offset_t *pa;
   3593 {
   3594 	static struct pmap_physmem_struct *curbank = avail_mem;
   3595 
   3596 	if (avail_next >= curbank->pmem_end)
   3597 		if (curbank->pmem_next == NULL)
   3598 			return FALSE;
   3599 		else {
   3600 			curbank = curbank->pmem_next;
   3601 			avail_next = curbank->pmem_start;
   3602 		}
   3603 
   3604 	*pa = avail_next;
   3605 	avail_next += NBPG;
   3606 	return TRUE;
   3607 }
   3608 
   3609 /************************ SUN3 COMPATIBILITY ROUTINES ********************
   3610  * The following routines are only used by DDB for tricky kernel text    *
   3611  * text operations in db_memrw.c.  They are provided for sun3            *
   3612  * compatibility.                                                        *
   3613  *************************************************************************/
   3614 /* get_pte			INTERNAL
   3615  **
   3616  * Return the page descriptor the describes the kernel mapping
   3617  * of the given virtual address.
   3618  *
   3619  * XXX - It might be nice if this worked outside of the MMU
   3620  * structures we manage.  (Could do it with ptest). -gwr
   3621  */
   3622 vm_offset_t
   3623 get_pte(va)
   3624 	vm_offset_t va;
   3625 {
   3626 	u_long idx;
   3627 
   3628 	if (va < KERNBASE)
   3629 		return 0;
   3630 
   3631 	idx = (u_long) sun3x_btop(va - KERNBASE);
   3632 	return (kernCbase[idx].attr.raw);
   3633 }
   3634 
   3635 /* set_pte			INTERNAL
   3636  **
   3637  * Set the page descriptor that describes the kernel mapping
   3638  * of the given virtual address.
   3639  */
   3640 void
   3641 set_pte(va, pte)
   3642 	vm_offset_t va;
   3643 	vm_offset_t pte;
   3644 {
   3645 	u_long idx;
   3646 
   3647 	if (va < KERNBASE)
   3648 		return;
   3649 
   3650 	idx = (unsigned long) sun3x_btop(va - KERNBASE);
   3651 	kernCbase[idx].attr.raw = pte;
   3652 }
   3653 
   3654 #ifdef	DEBUG
   3655 /************************** DEBUGGING ROUTINES **************************
   3656  * The following routines are meant to be an aid to debugging the pmap  *
   3657  * system.  They are callable from the DDB command line and should be   *
   3658  * prepared to be handed unstable or incomplete states of the system.   *
   3659  ************************************************************************/
   3660 
   3661 /* pv_list
   3662  **
   3663  * List all pages found on the pv list for the given physical page.
   3664  * To avoid endless loops the listing will stop at the end of the list
   3665  * or after 'n' entries - whichever comes first.
   3666  */
   3667 void
   3668 pv_list(pa, n)
   3669 	vm_offset_t pa;
   3670 	int n;
   3671 {
   3672 	int  idx;
   3673 	vm_offset_t va;
   3674 	pv_t *pv;
   3675 	c_tmgr_t *c_tbl;
   3676 	pmap_t pmap;
   3677 
   3678 	pv = pa2pv(pa);
   3679 	idx = pv->pv_idx;
   3680 
   3681 	for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
   3682 		va = pmap_get_pteinfo(&kernCbase[idx], &pmap, &c_tbl);
   3683 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
   3684 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
   3685 	}
   3686 }
   3687 #endif	/* DEBUG */
   3688 
   3689 #ifdef NOT_YET
   3690 /* and maybe not ever */
   3691 /************************** LOW-LEVEL ROUTINES **************************
   3692  * These routines will eventualy be re-written into assembly and placed *
   3693  * in locore.s.  They are here now as stubs so that the pmap module can *
   3694  * be linked as a standalone user program for testing.                  *
   3695  ************************************************************************/
   3696 /* flush_atc_crp			INTERNAL
   3697  **
   3698  * Flush all page descriptors derived from the given CPU Root Pointer
   3699  * (CRP), or 'A' table as it is known here, from the 68851's automatic
   3700  * cache.
   3701  */
   3702 void
   3703 flush_atc_crp(a_tbl)
   3704 {
   3705 	mmu_long_rp_t rp;
   3706 
   3707 	/* Create a temporary root table pointer that points to the
   3708 	 * given A table.
   3709 	 */
   3710 	rp.attr.raw = ~MMU_LONG_RP_LU;
   3711 	rp.addr.raw = (unsigned int) a_tbl;
   3712 
   3713 	mmu_pflushr(&rp);
   3714 	/* mmu_pflushr:
   3715 	 * 	movel   sp(4)@,a0
   3716 	 * 	pflushr a0@
   3717 	 *	rts
   3718 	 */
   3719 }
   3720 #endif /* NOT_YET */
   3721