Home | History | Annotate | Line # | Download | only in sun3x
pmap.c revision 1.97.10.1
      1 /*	$NetBSD: pmap.c,v 1.97.10.1 2007/11/06 23:23:08 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jeremy Cooper.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * XXX These comments aren't quite accurate.  Need to change.
     41  * The sun3x uses the MC68851 Memory Management Unit, which is built
     42  * into the CPU.  The 68851 maps virtual to physical addresses using
     43  * a multi-level table lookup, which is stored in the very memory that
     44  * it maps.  The number of levels of lookup is configurable from one
     45  * to four.  In this implementation, we use three, named 'A' through 'C'.
     46  *
     47  * The MMU translates virtual addresses into physical addresses by
     48  * traversing these tables in a process called a 'table walk'.  The most
     49  * significant 7 bits of the Virtual Address ('VA') being translated are
     50  * used as an index into the level A table, whose base in physical memory
     51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
     52  * address found at that index in the A table is used as the base
     53  * address for the next table, the B table.  The next six bits of the VA are
     54  * used as an index into the B table, which in turn gives the base address
     55  * of the third and final C table.
     56  *
     57  * The next six bits of the VA are used as an index into the C table to
     58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
     59  * to which the remaining 13 bits of the VA are added, producing the
     60  * mapped physical address.
     61  *
     62  * To map the entire memory space in this manner would require 2114296 bytes
     63  * of page tables per process - quite expensive.  Instead we will
     64  * allocate a fixed but considerably smaller space for the page tables at
     65  * the time the VM system is initialized.  When the pmap code is asked by
     66  * the kernel to map a VA to a PA, it allocates tables as needed from this
     67  * pool.  When there are no more tables in the pool, tables are stolen
     68  * from the oldest mapped entries in the tree.  This is only possible
     69  * because all memory mappings are stored in the kernel memory map
     70  * structures, independent of the pmap structures.  A VA which references
     71  * one of these invalidated maps will cause a page fault.  The kernel
     72  * will determine that the page fault was caused by a task using a valid
     73  * VA, but for some reason (which does not concern it), that address was
     74  * not mapped.  It will ask the pmap code to re-map the entry and then
     75  * it will resume executing the faulting task.
     76  *
     77  * In this manner the most efficient use of the page table space is
     78  * achieved.  Tasks which do not execute often will have their tables
     79  * stolen and reused by tasks which execute more frequently.  The best
     80  * size for the page table pool will probably be determined by
     81  * experimentation.
     82  *
     83  * You read all of the comments so far.  Good for you.
     84  * Now go play!
     85  */
     86 
     87 /*** A Note About the 68851 Address Translation Cache
     88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
     89  * or 'ATC'.  This cache stores the most recently used page descriptors
     90  * accessed by the MMU when it does translations.  Using a marker called a
     91  * 'task alias' the MMU can store the descriptors from 8 different table
     92  * spaces concurrently.  The task alias is associated with the base
     93  * address of the level A table of that address space.  When an address
     94  * space is currently active (the CRP currently points to its A table)
     95  * the only cached descriptors that will be obeyed are ones which have a
     96  * matching task alias of the current space associated with them.
     97  *
     98  * Since the cache is always consulted before any table lookups are done,
     99  * it is important that it accurately reflect the state of the MMU tables.
    100  * Whenever a change has been made to a table that has been loaded into
    101  * the MMU, the code must be sure to flush any cached entries that are
    102  * affected by the change.  These instances are documented in the code at
    103  * various points.
    104  */
    105 /*** A Note About the Note About the 68851 Address Translation Cache
    106  * 4 months into this code I discovered that the sun3x does not have
    107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
    108  * the 68030 CPU.
    109  * All though it behaves very similarly to the 68851, it only has 1 task
    110  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
    111  * of the previous note does not apply to the sun3x pmap.
    112  */
    113 
    114 #include <sys/cdefs.h>
    115 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97.10.1 2007/11/06 23:23:08 matt Exp $");
    116 
    117 #include "opt_ddb.h"
    118 #include "opt_pmap_debug.h"
    119 
    120 #include <sys/param.h>
    121 #include <sys/systm.h>
    122 #include <sys/proc.h>
    123 #include <sys/malloc.h>
    124 #include <sys/pool.h>
    125 #include <sys/user.h>
    126 #include <sys/queue.h>
    127 #include <sys/kcore.h>
    128 
    129 #include <uvm/uvm.h>
    130 
    131 #include <machine/cpu.h>
    132 #include <machine/kcore.h>
    133 #include <machine/mon.h>
    134 #include <machine/pmap.h>
    135 #include <machine/pte.h>
    136 #include <machine/vmparam.h>
    137 #include <m68k/cacheops.h>
    138 
    139 #include <sun3/sun3/cache.h>
    140 #include <sun3/sun3/machdep.h>
    141 
    142 #include "pmap_pvt.h"
    143 
    144 /* XXX - What headers declare these? */
    145 extern struct pcb *curpcb;
    146 extern int physmem;
    147 
    148 /* Defined in locore.s */
    149 extern char kernel_text[];
    150 
    151 /* Defined by the linker */
    152 extern char etext[], edata[], end[];
    153 extern char *esym;	/* DDB */
    154 
    155 /*************************** DEBUGGING DEFINITIONS ***********************
    156  * Macros, preprocessor defines and variables used in debugging can make *
    157  * code hard to read.  Anything used exclusively for debugging purposes  *
    158  * is defined here to avoid having such mess scattered around the file.  *
    159  *************************************************************************/
    160 #ifdef	PMAP_DEBUG
    161 /*
    162  * To aid the debugging process, macros should be expanded into smaller steps
    163  * that accomplish the same goal, yet provide convenient places for placing
    164  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
    165  * 'INLINE' keyword is defined to an empty string.  This way, any function
    166  * defined to be a 'static INLINE' will become 'outlined' and compiled as
    167  * a separate function, which is much easier to debug.
    168  */
    169 #define	INLINE	/* nothing */
    170 
    171 /*
    172  * It is sometimes convenient to watch the activity of a particular table
    173  * in the system.  The following variables are used for that purpose.
    174  */
    175 a_tmgr_t *pmap_watch_atbl = 0;
    176 b_tmgr_t *pmap_watch_btbl = 0;
    177 c_tmgr_t *pmap_watch_ctbl = 0;
    178 
    179 int pmap_debug = 0;
    180 #define DPRINT(args) if (pmap_debug) printf args
    181 
    182 #else	/********** Stuff below is defined if NOT debugging **************/
    183 
    184 #define	INLINE	inline
    185 #define DPRINT(args)  /* nada */
    186 
    187 #endif	/* PMAP_DEBUG */
    188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
    189 
    190 /*** Management Structure - Memory Layout
    191  * For every MMU table in the sun3x pmap system there must be a way to
    192  * manage it; we must know which process is using it, what other tables
    193  * depend on it, and whether or not it contains any locked pages.  This
    194  * is solved by the creation of 'table management'  or 'tmgr'
    195  * structures.  One for each MMU table in the system.
    196  *
    197  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
    198  *
    199  *      towards lower memory
    200  * kernAbase -> +-------------------------------------------------------+
    201  *              | Kernel     MMU A level table                          |
    202  * kernBbase -> +-------------------------------------------------------+
    203  *              | Kernel     MMU B level tables                         |
    204  * kernCbase -> +-------------------------------------------------------+
    205  *              |                                                       |
    206  *              | Kernel     MMU C level tables                         |
    207  *              |                                                       |
    208  * mmuCbase  -> +-------------------------------------------------------+
    209  *              | User       MMU C level tables                         |
    210  * mmuAbase  -> +-------------------------------------------------------+
    211  *              |                                                       |
    212  *              | User       MMU A level tables                         |
    213  *              |                                                       |
    214  * mmuBbase  -> +-------------------------------------------------------+
    215  *              | User       MMU B level tables                         |
    216  * tmgrAbase -> +-------------------------------------------------------+
    217  *              |  TMGR A level table structures                        |
    218  * tmgrBbase -> +-------------------------------------------------------+
    219  *              |  TMGR B level table structures                        |
    220  * tmgrCbase -> +-------------------------------------------------------+
    221  *              |  TMGR C level table structures                        |
    222  * pvbase    -> +-------------------------------------------------------+
    223  *              |  Physical to Virtual mapping table (list heads)       |
    224  * pvebase   -> +-------------------------------------------------------+
    225  *              |  Physical to Virtual mapping table (list elements)    |
    226  *              |                                                       |
    227  *              +-------------------------------------------------------+
    228  *      towards higher memory
    229  *
    230  * For every A table in the MMU A area, there will be a corresponding
    231  * a_tmgr structure in the TMGR A area.  The same will be true for
    232  * the B and C tables.  This arrangement will make it easy to find the
    233  * controling tmgr structure for any table in the system by use of
    234  * (relatively) simple macros.
    235  */
    236 
    237 /*
    238  * Global variables for storing the base addresses for the areas
    239  * labeled above.
    240  */
    241 static vaddr_t  	kernAphys;
    242 static mmu_long_dte_t	*kernAbase;
    243 static mmu_short_dte_t	*kernBbase;
    244 static mmu_short_pte_t	*kernCbase;
    245 static mmu_short_pte_t	*mmuCbase;
    246 static mmu_short_dte_t	*mmuBbase;
    247 static mmu_long_dte_t	*mmuAbase;
    248 static a_tmgr_t		*Atmgrbase;
    249 static b_tmgr_t		*Btmgrbase;
    250 static c_tmgr_t		*Ctmgrbase;
    251 static pv_t 		*pvbase;
    252 static pv_elem_t	*pvebase;
    253 struct pmap 		kernel_pmap;
    254 
    255 /*
    256  * This holds the CRP currently loaded into the MMU.
    257  */
    258 struct mmu_rootptr kernel_crp;
    259 
    260 /*
    261  * Just all around global variables.
    262  */
    263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
    264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
    265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
    266 
    267 
    268 /*
    269  * Flags used to mark the safety/availability of certain operations or
    270  * resources.
    271  */
    272 /* Safe to use pmap_bootstrap_alloc(). */
    273 static bool bootstrap_alloc_enabled = false;
    274 /* Temporary virtual pages are in use */
    275 int tmp_vpages_inuse;
    276 
    277 /*
    278  * XXX:  For now, retain the traditional variables that were
    279  * used in the old pmap/vm interface (without NONCONTIG).
    280  */
    281 /* Kernel virtual address space available: */
    282 vaddr_t	virtual_avail, virtual_end;
    283 /* Physical address space available: */
    284 paddr_t	avail_start, avail_end;
    285 
    286 /* This keep track of the end of the contiguously mapped range. */
    287 vaddr_t virtual_contig_end;
    288 
    289 /* Physical address used by pmap_next_page() */
    290 paddr_t avail_next;
    291 
    292 /* These are used by pmap_copy_page(), etc. */
    293 vaddr_t tmp_vpages[2];
    294 
    295 /* memory pool for pmap structures */
    296 struct pool	pmap_pmap_pool;
    297 
    298 /*
    299  * The 3/80 is the only member of the sun3x family that has non-contiguous
    300  * physical memory.  Memory is divided into 4 banks which are physically
    301  * locatable on the system board.  Although the size of these banks varies
    302  * with the size of memory they contain, their base addresses are
    303  * permenently fixed.  The following structure, which describes these
    304  * banks, is initialized by pmap_bootstrap() after it reads from a similar
    305  * structure provided by the ROM Monitor.
    306  *
    307  * For the other machines in the sun3x architecture which do have contiguous
    308  * RAM, this list will have only one entry, which will describe the entire
    309  * range of available memory.
    310  */
    311 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
    312 u_int total_phys_mem;
    313 
    314 /*************************************************************************/
    315 
    316 /*
    317  * XXX - Should "tune" these based on statistics.
    318  *
    319  * My first guess about the relative numbers of these needed is
    320  * based on the fact that a "typical" process will have several
    321  * pages mapped at low virtual addresses (text, data, bss), then
    322  * some mapped shared libraries, and then some stack pages mapped
    323  * near the high end of the VA space.  Each process can use only
    324  * one A table, and most will use only two B tables (maybe three)
    325  * and probably about four C tables.  Therefore, the first guess
    326  * at the relative numbers of these needed is 1:2:4 -gwr
    327  *
    328  * The number of C tables needed is closely related to the amount
    329  * of physical memory available plus a certain amount attributable
    330  * to the use of double mappings.  With a few simulation statistics
    331  * we can find a reasonably good estimation of this unknown value.
    332  * Armed with that and the above ratios, we have a good idea of what
    333  * is needed at each level. -j
    334  *
    335  * Note: It is not physical memory memory size, but the total mapped
    336  * virtual space required by the combined working sets of all the
    337  * currently _runnable_ processes.  (Sleeping ones don't count.)
    338  * The amount of physical memory should be irrelevant. -gwr
    339  */
    340 #ifdef	FIXED_NTABLES
    341 #define NUM_A_TABLES	16
    342 #define NUM_B_TABLES	32
    343 #define NUM_C_TABLES	64
    344 #else
    345 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
    346 #endif	/* FIXED_NTABLES */
    347 
    348 /*
    349  * This determines our total virtual mapping capacity.
    350  * Yes, it is a FIXED value so we can pre-allocate.
    351  */
    352 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
    353 
    354 /*
    355  * The size of the Kernel Virtual Address Space (KVAS)
    356  * for purposes of MMU table allocation is -KERNBASE
    357  * (length from KERNBASE to 0xFFFFffff)
    358  */
    359 #define	KVAS_SIZE		(-KERNBASE)
    360 
    361 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
    362 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
    363 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
    364 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
    365 
    366 /*************************** MISCELANEOUS MACROS *************************/
    367 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
    368 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
    369 #define pmap_add_ref(pmap) ++pmap->pm_refcount
    370 #define pmap_del_ref(pmap) --pmap->pm_refcount
    371 #define pmap_refcount(pmap) pmap->pm_refcount
    372 
    373 void *pmap_bootstrap_alloc(int);
    374 
    375 static INLINE void *mmu_ptov(paddr_t);
    376 static INLINE paddr_t mmu_vtop(void *);
    377 
    378 #if	0
    379 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *);
    380 #endif /* 0 */
    381 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *);
    382 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *);
    383 
    384 static INLINE pv_t *pa2pv(paddr_t);
    385 static INLINE int   pteidx(mmu_short_pte_t *);
    386 static INLINE pmap_t current_pmap(void);
    387 
    388 /*
    389  * We can always convert between virtual and physical addresses
    390  * for anything in the range [KERNBASE ... avail_start] because
    391  * that range is GUARANTEED to be mapped linearly.
    392  * We rely heavily upon this feature!
    393  */
    394 static INLINE void *
    395 mmu_ptov(paddr_t pa)
    396 {
    397 	vaddr_t va;
    398 
    399 	va = (pa + KERNBASE);
    400 #ifdef	PMAP_DEBUG
    401 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    402 		panic("mmu_ptov");
    403 #endif
    404 	return (void *)va;
    405 }
    406 
    407 static INLINE paddr_t
    408 mmu_vtop(void *vva)
    409 {
    410 	vaddr_t va;
    411 
    412 	va = (vaddr_t)vva;
    413 #ifdef	PMAP_DEBUG
    414 	if ((va < KERNBASE) || (va >= virtual_contig_end))
    415 		panic("mmu_vtop");
    416 #endif
    417 	return va - KERNBASE;
    418 }
    419 
    420 /*
    421  * These macros map MMU tables to their corresponding manager structures.
    422  * They are needed quite often because many of the pointers in the pmap
    423  * system reference MMU tables and not the structures that control them.
    424  * There needs to be a way to find one when given the other and these
    425  * macros do so by taking advantage of the memory layout described above.
    426  * Here's a quick step through the first macro, mmuA2tmgr():
    427  *
    428  * 1) find the offset of the given MMU A table from the base of its table
    429  *    pool (table - mmuAbase).
    430  * 2) convert this offset into a table index by dividing it by the
    431  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
    432  * 3) use this index to select the corresponding 'A' table manager
    433  *    structure from the 'A' table manager pool (Atmgrbase[index]).
    434  */
    435 /*  This function is not currently used. */
    436 #if	0
    437 static INLINE a_tmgr_t *
    438 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
    439 {
    440 	int idx;
    441 
    442 	/* Which table is this in? */
    443 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
    444 #ifdef	PMAP_DEBUG
    445 	if ((idx < 0) || (idx >= NUM_A_TABLES))
    446 		panic("mmuA2tmgr");
    447 #endif
    448 	return &Atmgrbase[idx];
    449 }
    450 #endif	/* 0 */
    451 
    452 static INLINE b_tmgr_t *
    453 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
    454 {
    455 	int idx;
    456 
    457 	/* Which table is this in? */
    458 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
    459 #ifdef	PMAP_DEBUG
    460 	if ((idx < 0) || (idx >= NUM_B_TABLES))
    461 		panic("mmuB2tmgr");
    462 #endif
    463 	return &Btmgrbase[idx];
    464 }
    465 
    466 /* mmuC2tmgr			INTERNAL
    467  **
    468  * Given a pte known to belong to a C table, return the address of
    469  * that table's management structure.
    470  */
    471 static INLINE c_tmgr_t *
    472 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
    473 {
    474 	int idx;
    475 
    476 	/* Which table is this in? */
    477 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
    478 #ifdef	PMAP_DEBUG
    479 	if ((idx < 0) || (idx >= NUM_C_TABLES))
    480 		panic("mmuC2tmgr");
    481 #endif
    482 	return &Ctmgrbase[idx];
    483 }
    484 
    485 /* This is now a function call below.
    486  * #define pa2pv(pa) \
    487  *	(&pvbase[(unsigned long)\
    488  *		m68k_btop(pa)\
    489  *	])
    490  */
    491 
    492 /* pa2pv			INTERNAL
    493  **
    494  * Return the pv_list_head element which manages the given physical
    495  * address.
    496  */
    497 static INLINE pv_t *
    498 pa2pv(paddr_t pa)
    499 {
    500 	struct pmap_physmem_struct *bank;
    501 	int idx;
    502 
    503 	bank = &avail_mem[0];
    504 	while (pa >= bank->pmem_end)
    505 		bank = bank->pmem_next;
    506 
    507 	pa -= bank->pmem_start;
    508 	idx = bank->pmem_pvbase + m68k_btop(pa);
    509 #ifdef	PMAP_DEBUG
    510 	if ((idx < 0) || (idx >= physmem))
    511 		panic("pa2pv");
    512 #endif
    513 	return &pvbase[idx];
    514 }
    515 
    516 /* pteidx			INTERNAL
    517  **
    518  * Return the index of the given PTE within the entire fixed table of
    519  * PTEs.
    520  */
    521 static INLINE int
    522 pteidx(mmu_short_pte_t *pte)
    523 {
    524 
    525 	return pte - kernCbase;
    526 }
    527 
    528 /*
    529  * This just offers a place to put some debugging checks,
    530  * and reduces the number of places "curlwp" appears...
    531  */
    532 static INLINE pmap_t
    533 current_pmap(void)
    534 {
    535 	struct vmspace *vm;
    536 	struct vm_map *map;
    537 	pmap_t	pmap;
    538 
    539 	vm = curproc->p_vmspace;
    540 	map = &vm->vm_map;
    541 	pmap = vm_map_pmap(map);
    542 
    543 	return pmap;
    544 }
    545 
    546 
    547 /*************************** FUNCTION DEFINITIONS ************************
    548  * These appear here merely for the compiler to enforce type checking on *
    549  * all function calls.                                                   *
    550  *************************************************************************/
    551 
    552 /*
    553  * Internal functions
    554  */
    555 a_tmgr_t *get_a_table(void);
    556 b_tmgr_t *get_b_table(void);
    557 c_tmgr_t *get_c_table(void);
    558 int free_a_table(a_tmgr_t *, bool);
    559 int free_b_table(b_tmgr_t *, bool);
    560 int free_c_table(c_tmgr_t *, bool);
    561 
    562 void pmap_bootstrap_aalign(int);
    563 void pmap_alloc_usermmu(void);
    564 void pmap_alloc_usertmgr(void);
    565 void pmap_alloc_pv(void);
    566 void pmap_init_a_tables(void);
    567 void pmap_init_b_tables(void);
    568 void pmap_init_c_tables(void);
    569 void pmap_init_pv(void);
    570 void pmap_clear_pv(paddr_t, int);
    571 static INLINE bool is_managed(paddr_t);
    572 
    573 bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t);
    574 bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t);
    575 bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t);
    576 void pmap_remove_pte(mmu_short_pte_t *);
    577 
    578 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t);
    579 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t);
    580 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t);
    581 static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *);
    582 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **);
    583 static INLINE int pmap_dereference(pmap_t);
    584 
    585 bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **,
    586     mmu_short_pte_t **, int *, int *, int *);
    587 void pmap_bootstrap_copyprom(void);
    588 void pmap_takeover_mmu(void);
    589 void pmap_bootstrap_setprom(void);
    590 static void pmap_page_upload(void);
    591 
    592 #ifdef PMAP_DEBUG
    593 /* Debugging function definitions */
    594 void  pv_list(paddr_t, int);
    595 #endif /* PMAP_DEBUG */
    596 
    597 /** Interface functions
    598  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
    599  **   defined.
    600  **   The new UVM doesn't require them so now INTERNAL.
    601  **/
    602 static INLINE void pmap_pinit(pmap_t);
    603 static INLINE void pmap_release(pmap_t);
    604 
    605 /********************************** CODE ********************************
    606  * Functions that are called from other parts of the kernel are labeled *
    607  * as 'INTERFACE' functions.  Functions that are only called from       *
    608  * within the pmap module are labeled as 'INTERNAL' functions.          *
    609  * Functions that are internal, but are not (currently) used at all are *
    610  * labeled 'INTERNAL_X'.                                                *
    611  ************************************************************************/
    612 
    613 /* pmap_bootstrap			INTERNAL
    614  **
    615  * Initializes the pmap system.  Called at boot time from
    616  * locore2.c:_vm_init()
    617  *
    618  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
    619  *           system implement pmap_steal_memory() is redundant.
    620  *           Don't release this code without removing one or the other!
    621  */
    622 void
    623 pmap_bootstrap(vaddr_t nextva)
    624 {
    625 	struct physmemory *membank;
    626 	struct pmap_physmem_struct *pmap_membank;
    627 	vaddr_t va, eva;
    628 	paddr_t pa;
    629 	int b, c, i, j;	/* running table counts */
    630 	int size, resvmem;
    631 
    632 	/*
    633 	 * This function is called by __bootstrap after it has
    634 	 * determined the type of machine and made the appropriate
    635 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
    636 	 * by that.)  It allocates and sets up enough of the pmap system
    637 	 * to manage the kernel's address space.
    638 	 */
    639 
    640 	/*
    641 	 * Determine the range of kernel virtual and physical
    642 	 * space available. Note that we ABSOLUTELY DEPEND on
    643 	 * the fact that the first bank of memory (4MB) is
    644 	 * mapped linearly to KERNBASE (which we guaranteed in
    645 	 * the first instructions of locore.s).
    646 	 * That is plenty for our bootstrap work.
    647 	 */
    648 	virtual_avail = m68k_round_page(nextva);
    649 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
    650 	virtual_end = VM_MAX_KERNEL_ADDRESS;
    651 	/* Don't need avail_start til later. */
    652 
    653 	/* We may now call pmap_bootstrap_alloc(). */
    654 	bootstrap_alloc_enabled = true;
    655 
    656 	/*
    657 	 * This is a somewhat unwrapped loop to deal with
    658 	 * copying the PROM's 'phsymem' banks into the pmap's
    659 	 * banks.  The following is always assumed:
    660 	 * 1. There is always at least one bank of memory.
    661 	 * 2. There is always a last bank of memory, and its
    662 	 *    pmem_next member must be set to NULL.
    663 	 */
    664 	membank = romVectorPtr->v_physmemory;
    665 	pmap_membank = avail_mem;
    666 	total_phys_mem = 0;
    667 
    668 	for (;;) { /* break on !membank */
    669 		pmap_membank->pmem_start = membank->address;
    670 		pmap_membank->pmem_end = membank->address + membank->size;
    671 		total_phys_mem += membank->size;
    672 		membank = membank->next;
    673 		if (!membank)
    674 			break;
    675 		/* This silly syntax arises because pmap_membank
    676 		 * is really a pre-allocated array, but it is put into
    677 		 * use as a linked list.
    678 		 */
    679 		pmap_membank->pmem_next = pmap_membank + 1;
    680 		pmap_membank = pmap_membank->pmem_next;
    681 	}
    682 	/* This is the last element. */
    683 	pmap_membank->pmem_next = NULL;
    684 
    685 	/*
    686 	 * Note: total_phys_mem, physmem represent
    687 	 * actual physical memory, including that
    688 	 * reserved for the PROM monitor.
    689 	 */
    690 	physmem = btoc(total_phys_mem);
    691 
    692 	/*
    693 	 * Avail_end is set to the first byte of physical memory
    694 	 * after the end of the last bank.  We use this only to
    695 	 * determine if a physical address is "managed" memory.
    696 	 * This address range should be reduced to prevent the
    697 	 * physical pages needed by the PROM monitor from being used
    698 	 * in the VM system.
    699 	 */
    700 	resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
    701 	resvmem = m68k_round_page(resvmem);
    702 	avail_end = pmap_membank->pmem_end - resvmem;
    703 
    704 	/*
    705 	 * First allocate enough kernel MMU tables to map all
    706 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
    707 	 * Note: All must be aligned on 256 byte boundaries.
    708 	 * Start with the level-A table (one of those).
    709 	 */
    710 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
    711 	kernAbase = pmap_bootstrap_alloc(size);
    712 	memset(kernAbase, 0, size);
    713 
    714 	/* Now the level-B kernel tables... */
    715 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
    716 	kernBbase = pmap_bootstrap_alloc(size);
    717 	memset(kernBbase, 0, size);
    718 
    719 	/* Now the level-C kernel tables... */
    720 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
    721 	kernCbase = pmap_bootstrap_alloc(size);
    722 	memset(kernCbase, 0, size);
    723 	/*
    724 	 * Note: In order for the PV system to work correctly, the kernel
    725 	 * and user-level C tables must be allocated contiguously.
    726 	 * Nothing should be allocated between here and the allocation of
    727 	 * mmuCbase below.  XXX: Should do this as one allocation, and
    728 	 * then compute a pointer for mmuCbase instead of this...
    729 	 *
    730 	 * Allocate user MMU tables.
    731 	 * These must be contiguous with the preceding.
    732 	 */
    733 
    734 #ifndef	FIXED_NTABLES
    735 	/*
    736 	 * The number of user-level C tables that should be allocated is
    737 	 * related to the size of physical memory.  In general, there should
    738 	 * be enough tables to map four times the amount of available RAM.
    739 	 * The extra amount is needed because some table space is wasted by
    740 	 * fragmentation.
    741 	 */
    742 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
    743 	NUM_B_TABLES = NUM_C_TABLES / 2;
    744 	NUM_A_TABLES = NUM_B_TABLES / 2;
    745 #endif	/* !FIXED_NTABLES */
    746 
    747 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
    748 	mmuCbase = pmap_bootstrap_alloc(size);
    749 
    750 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
    751 	mmuBbase = pmap_bootstrap_alloc(size);
    752 
    753 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
    754 	mmuAbase = pmap_bootstrap_alloc(size);
    755 
    756 	/*
    757 	 * Fill in the never-changing part of the kernel tables.
    758 	 * For simplicity, the kernel's mappings will be editable as a
    759 	 * flat array of page table entries at kernCbase.  The
    760 	 * higher level 'A' and 'B' tables must be initialized to point
    761 	 * to this lower one.
    762 	 */
    763 	b = c = 0;
    764 
    765 	/*
    766 	 * Invalidate all mappings below KERNBASE in the A table.
    767 	 * This area has already been zeroed out, but it is good
    768 	 * practice to explicitly show that we are interpreting
    769 	 * it as a list of A table descriptors.
    770 	 */
    771 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
    772 		kernAbase[i].addr.raw = 0;
    773 	}
    774 
    775 	/*
    776 	 * Set up the kernel A and B tables so that they will reference the
    777 	 * correct spots in the contiguous table of PTEs allocated for the
    778 	 * kernel's virtual memory space.
    779 	 */
    780 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
    781 		kernAbase[i].attr.raw =
    782 		    MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
    783 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
    784 
    785 		for (j = 0; j < MMU_B_TBL_SIZE; j++) {
    786 			kernBbase[b + j].attr.raw =
    787 			    mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT;
    788 			c += MMU_C_TBL_SIZE;
    789 		}
    790 		b += MMU_B_TBL_SIZE;
    791 	}
    792 
    793 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
    794 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
    795 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
    796 
    797 	/*
    798 	 * We are now done with pmap_bootstrap_alloc().  Round up
    799 	 * `virtual_avail' to the nearest page, and set the flag
    800 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
    801 	 */
    802 	pmap_bootstrap_aalign(PAGE_SIZE);
    803 	bootstrap_alloc_enabled = false;
    804 
    805 	/*
    806 	 * Now that we are done with pmap_bootstrap_alloc(), we
    807 	 * must save the virtual and physical addresses of the
    808 	 * end of the linearly mapped range, which are stored in
    809 	 * virtual_contig_end and avail_start, respectively.
    810 	 * These variables will never change after this point.
    811 	 */
    812 	virtual_contig_end = virtual_avail;
    813 	avail_start = virtual_avail - KERNBASE;
    814 
    815 	/*
    816 	 * `avail_next' is a running pointer used by pmap_next_page() to
    817 	 * keep track of the next available physical page to be handed
    818 	 * to the VM system during its initialization, in which it
    819 	 * asks for physical pages, one at a time.
    820 	 */
    821 	avail_next = avail_start;
    822 
    823 	/*
    824 	 * Now allocate some virtual addresses, but not the physical pages
    825 	 * behind them.  Note that virtual_avail is already page-aligned.
    826 	 *
    827 	 * tmp_vpages[] is an array of two virtual pages used for temporary
    828 	 * kernel mappings in the pmap module to facilitate various physical
    829 	 * address-oritented operations.
    830 	 */
    831 	tmp_vpages[0] = virtual_avail;
    832 	virtual_avail += PAGE_SIZE;
    833 	tmp_vpages[1] = virtual_avail;
    834 	virtual_avail += PAGE_SIZE;
    835 
    836 	/** Initialize the PV system **/
    837 	pmap_init_pv();
    838 
    839 	/*
    840 	 * Fill in the kernel_pmap structure and kernel_crp.
    841 	 */
    842 	kernAphys = mmu_vtop(kernAbase);
    843 	kernel_pmap.pm_a_tmgr = NULL;
    844 	kernel_pmap.pm_a_phys = kernAphys;
    845 	kernel_pmap.pm_refcount = 1; /* always in use */
    846 	simple_lock_init(&kernel_pmap.pm_lock);
    847 
    848 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
    849 	kernel_crp.rp_addr = kernAphys;
    850 
    851 	/*
    852 	 * Now pmap_enter_kernel() may be used safely and will be
    853 	 * the main interface used hereafter to modify the kernel's
    854 	 * virtual address space.  Note that since we are still running
    855 	 * under the PROM's address table, none of these table modifications
    856 	 * actually take effect until pmap_takeover_mmu() is called.
    857 	 *
    858 	 * Note: Our tables do NOT have the PROM linear mappings!
    859 	 * Only the mappings created here exist in our tables, so
    860 	 * remember to map anything we expect to use.
    861 	 */
    862 	va = (vaddr_t)KERNBASE;
    863 	pa = 0;
    864 
    865 	/*
    866 	 * The first page of the kernel virtual address space is the msgbuf
    867 	 * page.  The page attributes (data, non-cached) are set here, while
    868 	 * the address is assigned to this global pointer in cpu_startup().
    869 	 * It is non-cached, mostly due to paranoia.
    870 	 */
    871 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
    872 	va += PAGE_SIZE;
    873 	pa += PAGE_SIZE;
    874 
    875 	/* Next page is used as the temporary stack. */
    876 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
    877 	va += PAGE_SIZE;
    878 	pa += PAGE_SIZE;
    879 
    880 	/*
    881 	 * Map all of the kernel's text segment as read-only and cacheable.
    882 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
    883 	 * of kernel text and the first bytes of kernel data will often be
    884 	 * sharing the same page.  Therefore, the last page of kernel text
    885 	 * has to be mapped as read/write, to accommodate the data.
    886 	 */
    887 	eva = m68k_trunc_page((vaddr_t)etext);
    888 	for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
    889 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
    890 
    891 	/*
    892 	 * Map all of the kernel's data as read/write and cacheable.
    893 	 * This includes: data, BSS, symbols, and everything in the
    894 	 * contiguous memory used by pmap_bootstrap_alloc()
    895 	 */
    896 	for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
    897 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
    898 
    899 	/*
    900 	 * At this point we are almost ready to take over the MMU.  But first
    901 	 * we must save the PROM's address space in our map, as we call its
    902 	 * routines and make references to its data later in the kernel.
    903 	 */
    904 	pmap_bootstrap_copyprom();
    905 	pmap_takeover_mmu();
    906 	pmap_bootstrap_setprom();
    907 
    908 	/* Notify the VM system of our page size. */
    909 	uvmexp.pagesize = PAGE_SIZE;
    910 	uvm_setpagesize();
    911 
    912 	pmap_page_upload();
    913 }
    914 
    915 
    916 /* pmap_alloc_usermmu			INTERNAL
    917  **
    918  * Called from pmap_bootstrap() to allocate MMU tables that will
    919  * eventually be used for user mappings.
    920  */
    921 void
    922 pmap_alloc_usermmu(void)
    923 {
    924 
    925 	/* XXX: Moved into caller. */
    926 }
    927 
    928 /* pmap_alloc_pv			INTERNAL
    929  **
    930  * Called from pmap_bootstrap() to allocate the physical
    931  * to virtual mapping list.  Each physical page of memory
    932  * in the system has a corresponding element in this list.
    933  */
    934 void
    935 pmap_alloc_pv(void)
    936 {
    937 	int	i;
    938 	unsigned int	total_mem;
    939 
    940 	/*
    941 	 * Allocate a pv_head structure for every page of physical
    942 	 * memory that will be managed by the system.  Since memory on
    943 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
    944 	 * count by subtraction of the lowest available address from the
    945 	 * highest, but rather we have to step through each memory
    946 	 * bank and add the number of pages in each to the total.
    947 	 *
    948 	 * At this time we also initialize the offset of each bank's
    949 	 * starting pv_head within the pv_head list so that the physical
    950 	 * memory state routines (pmap_is_referenced(),
    951 	 * pmap_is_modified(), et al.) can quickly find coresponding
    952 	 * pv_heads in spite of the non-contiguity.
    953 	 */
    954 	total_mem = 0;
    955 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
    956 		avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
    957 		total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start;
    958 		if (avail_mem[i].pmem_next == NULL)
    959 			break;
    960 	}
    961 	pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) *
    962 	    m68k_btop(total_phys_mem));
    963 }
    964 
    965 /* pmap_alloc_usertmgr			INTERNAL
    966  **
    967  * Called from pmap_bootstrap() to allocate the structures which
    968  * facilitate management of user MMU tables.  Each user MMU table
    969  * in the system has one such structure associated with it.
    970  */
    971 void
    972 pmap_alloc_usertmgr(void)
    973 {
    974 	/* Allocate user MMU table managers */
    975 	/* It would be a lot simpler to just make these BSS, but */
    976 	/* we may want to change their size at boot time... -j */
    977 	Atmgrbase =
    978 	    (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES);
    979 	Btmgrbase =
    980 	    (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES);
    981 	Ctmgrbase =
    982 	    (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES);
    983 
    984 	/*
    985 	 * Allocate PV list elements for the physical to virtual
    986 	 * mapping system.
    987 	 */
    988 	pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) *
    989 	    (NUM_USER_PTES + NUM_KERN_PTES));
    990 }
    991 
    992 /* pmap_bootstrap_copyprom()			INTERNAL
    993  **
    994  * Copy the PROM mappings into our own tables.  Note, we
    995  * can use physical addresses until __bootstrap returns.
    996  */
    997 void
    998 pmap_bootstrap_copyprom(void)
    999 {
   1000 	struct sunromvec *romp;
   1001 	int *mon_ctbl;
   1002 	mmu_short_pte_t *kpte;
   1003 	int i, len;
   1004 
   1005 	romp = romVectorPtr;
   1006 
   1007 	/*
   1008 	 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
   1009 	 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
   1010 	 */
   1011 	mon_ctbl = *romp->monptaddr;
   1012 	i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
   1013 	kpte = &kernCbase[i];
   1014 	len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
   1015 
   1016 	for (i = 0; i < len; i++) {
   1017 		kpte[i].attr.raw = mon_ctbl[i];
   1018 	}
   1019 
   1020 	/*
   1021 	 * Copy the mappings at MON_DVMA_BASE (to the end).
   1022 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
   1023 	 * Actually, we only want the last page, which the
   1024 	 * PROM has set up for use by the "ie" driver.
   1025 	 * (The i82686 needs its SCP there.)
   1026 	 * If we copy all the mappings, pmap_enter_kernel
   1027 	 * may complain about finding valid PTEs that are
   1028 	 * not recorded in our PV lists...
   1029 	 */
   1030 	mon_ctbl = *romp->shadowpteaddr;
   1031 	i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
   1032 	kpte = &kernCbase[i];
   1033 	len = m68k_btop(SUN3X_MON_DVMA_SIZE);
   1034 	for (i = (len - 1); i < len; i++) {
   1035 		kpte[i].attr.raw = mon_ctbl[i];
   1036 	}
   1037 }
   1038 
   1039 /* pmap_takeover_mmu			INTERNAL
   1040  **
   1041  * Called from pmap_bootstrap() after it has copied enough of the
   1042  * PROM mappings into the kernel map so that we can use our own
   1043  * MMU table.
   1044  */
   1045 void
   1046 pmap_takeover_mmu(void)
   1047 {
   1048 
   1049 	loadcrp(&kernel_crp);
   1050 }
   1051 
   1052 /* pmap_bootstrap_setprom()			INTERNAL
   1053  **
   1054  * Set the PROM mappings so it can see kernel space.
   1055  * Note that physical addresses are used here, which
   1056  * we can get away with because this runs with the
   1057  * low 1GB set for transparent translation.
   1058  */
   1059 void
   1060 pmap_bootstrap_setprom(void)
   1061 {
   1062 	mmu_long_dte_t *mon_dte;
   1063 	extern struct mmu_rootptr mon_crp;
   1064 	int i;
   1065 
   1066 	mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr;
   1067 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
   1068 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
   1069 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
   1070 	}
   1071 }
   1072 
   1073 
   1074 /* pmap_init			INTERFACE
   1075  **
   1076  * Called at the end of vm_init() to set up the pmap system to go
   1077  * into full time operation.  All initialization of kernel_pmap
   1078  * should be already done by now, so this should just do things
   1079  * needed for user-level pmaps to work.
   1080  */
   1081 void
   1082 pmap_init(void)
   1083 {
   1084 
   1085 	/** Initialize the manager pools **/
   1086 	TAILQ_INIT(&a_pool);
   1087 	TAILQ_INIT(&b_pool);
   1088 	TAILQ_INIT(&c_pool);
   1089 
   1090 	/**************************************************************
   1091 	 * Initialize all tmgr structures and MMU tables they manage. *
   1092 	 **************************************************************/
   1093 	/** Initialize A tables **/
   1094 	pmap_init_a_tables();
   1095 	/** Initialize B tables **/
   1096 	pmap_init_b_tables();
   1097 	/** Initialize C tables **/
   1098 	pmap_init_c_tables();
   1099 
   1100 	/** Initialize the pmap pools **/
   1101 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
   1102 	    &pool_allocator_nointr, IPL_NONE);
   1103 }
   1104 
   1105 /* pmap_init_a_tables()			INTERNAL
   1106  **
   1107  * Initializes all A managers, their MMU A tables, and inserts
   1108  * them into the A manager pool for use by the system.
   1109  */
   1110 void
   1111 pmap_init_a_tables(void)
   1112 {
   1113 	int i;
   1114 	a_tmgr_t *a_tbl;
   1115 
   1116 	for (i = 0; i < NUM_A_TABLES; i++) {
   1117 		/* Select the next available A manager from the pool */
   1118 		a_tbl = &Atmgrbase[i];
   1119 
   1120 		/*
   1121 		 * Clear its parent entry.  Set its wired and valid
   1122 		 * entry count to zero.
   1123 		 */
   1124 		a_tbl->at_parent = NULL;
   1125 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
   1126 
   1127 		/* Assign it the next available MMU A table from the pool */
   1128 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
   1129 
   1130 		/*
   1131 		 * Initialize the MMU A table with the table in the `proc0',
   1132 		 * or kernel, mapping.  This ensures that every process has
   1133 		 * the kernel mapped in the top part of its address space.
   1134 		 */
   1135 		memcpy(a_tbl->at_dtbl, kernAbase,
   1136 		    MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t));
   1137 
   1138 		/*
   1139 		 * Finally, insert the manager into the A pool,
   1140 		 * making it ready to be used by the system.
   1141 		 */
   1142 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   1143     }
   1144 }
   1145 
   1146 /* pmap_init_b_tables()			INTERNAL
   1147  **
   1148  * Initializes all B table managers, their MMU B tables, and
   1149  * inserts them into the B manager pool for use by the system.
   1150  */
   1151 void
   1152 pmap_init_b_tables(void)
   1153 {
   1154 	int i, j;
   1155 	b_tmgr_t *b_tbl;
   1156 
   1157 	for (i = 0; i < NUM_B_TABLES; i++) {
   1158 		/* Select the next available B manager from the pool */
   1159 		b_tbl = &Btmgrbase[i];
   1160 
   1161 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
   1162 		b_tbl->bt_pidx = 0;		/* parent index,      */
   1163 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
   1164 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
   1165 
   1166 		/* Assign it the next available MMU B table from the pool */
   1167 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
   1168 
   1169 		/* Invalidate every descriptor in the table */
   1170 		for (j = 0; j < MMU_B_TBL_SIZE; j++)
   1171 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
   1172 
   1173 		/* Insert the manager into the B pool */
   1174 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   1175 	}
   1176 }
   1177 
   1178 /* pmap_init_c_tables()			INTERNAL
   1179  **
   1180  * Initializes all C table managers, their MMU C tables, and
   1181  * inserts them into the C manager pool for use by the system.
   1182  */
   1183 void
   1184 pmap_init_c_tables(void)
   1185 {
   1186 	int i, j;
   1187 	c_tmgr_t *c_tbl;
   1188 
   1189 	for (i = 0; i < NUM_C_TABLES; i++) {
   1190 		/* Select the next available C manager from the pool */
   1191 		c_tbl = &Ctmgrbase[i];
   1192 
   1193 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
   1194 		c_tbl->ct_pidx = 0;		/* parent index,      */
   1195 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
   1196 		c_tbl->ct_ecnt = 0;		/* valid entry count, */
   1197 		c_tbl->ct_pmap = NULL;		/* parent pmap,       */
   1198 		c_tbl->ct_va = 0;		/* base of managed range */
   1199 
   1200 		/* Assign it the next available MMU C table from the pool */
   1201 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
   1202 
   1203 		for (j = 0; j < MMU_C_TBL_SIZE; j++)
   1204 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
   1205 
   1206 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   1207 	}
   1208 }
   1209 
   1210 /* pmap_init_pv()			INTERNAL
   1211  **
   1212  * Initializes the Physical to Virtual mapping system.
   1213  */
   1214 void
   1215 pmap_init_pv(void)
   1216 {
   1217 	int i;
   1218 
   1219 	/* Initialize every PV head. */
   1220 	for (i = 0; i < m68k_btop(total_phys_mem); i++) {
   1221 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
   1222 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
   1223 	}
   1224 }
   1225 
   1226 /* is_managed				INTERNAL
   1227  **
   1228  * Determine if the given physical address is managed by the PV system.
   1229  * Note that this logic assumes that no one will ask for the status of
   1230  * addresses which lie in-between the memory banks on the 3/80.  If they
   1231  * do so, it will falsely report that it is managed.
   1232  *
   1233  * Note: A "managed" address is one that was reported to the VM system as
   1234  * a "usable page" during system startup.  As such, the VM system expects the
   1235  * pmap module to keep an accurate track of the useage of those pages.
   1236  * Any page not given to the VM system at startup does not exist (as far as
   1237  * the VM system is concerned) and is therefore "unmanaged."  Examples are
   1238  * those pages which belong to the ROM monitor and the memory allocated before
   1239  * the VM system was started.
   1240  */
   1241 static INLINE bool
   1242 is_managed(paddr_t pa)
   1243 {
   1244 	if (pa >= avail_start && pa < avail_end)
   1245 		return true;
   1246 	else
   1247 		return false;
   1248 }
   1249 
   1250 /* get_a_table			INTERNAL
   1251  **
   1252  * Retrieve and return a level A table for use in a user map.
   1253  */
   1254 a_tmgr_t *
   1255 get_a_table(void)
   1256 {
   1257 	a_tmgr_t *tbl;
   1258 	pmap_t pmap;
   1259 
   1260 	/* Get the top A table in the pool */
   1261 	tbl = TAILQ_FIRST(&a_pool);
   1262 	if (tbl == NULL) {
   1263 		/*
   1264 		 * XXX - Instead of panicking here and in other get_x_table
   1265 		 * functions, we do have the option of sleeping on the head of
   1266 		 * the table pool.  Any function which updates the table pool
   1267 		 * would then issue a wakeup() on the head, thus waking up any
   1268 		 * processes waiting for a table.
   1269 		 *
   1270 		 * Actually, the place to sleep would be when some process
   1271 		 * asks for a "wired" mapping that would run us short of
   1272 		 * mapping resources.  This design DEPENDS on always having
   1273 		 * some mapping resources in the pool for stealing, so we
   1274 		 * must make sure we NEVER let the pool become empty. -gwr
   1275 		 */
   1276 		panic("get_a_table: out of A tables.");
   1277 	}
   1278 
   1279 	TAILQ_REMOVE(&a_pool, tbl, at_link);
   1280 	/*
   1281 	 * If the table has a non-null parent pointer then it is in use.
   1282 	 * Forcibly abduct it from its parent and clear its entries.
   1283 	 * No re-entrancy worries here.  This table would not be in the
   1284 	 * table pool unless it was available for use.
   1285 	 *
   1286 	 * Note that the second argument to free_a_table() is false.  This
   1287 	 * indicates that the table should not be relinked into the A table
   1288 	 * pool.  That is a job for the function that called us.
   1289 	 */
   1290 	if (tbl->at_parent) {
   1291 		KASSERT(tbl->at_wcnt == 0);
   1292 		pmap = tbl->at_parent;
   1293 		free_a_table(tbl, false);
   1294 		pmap->pm_a_tmgr = NULL;
   1295 		pmap->pm_a_phys = kernAphys;
   1296 	}
   1297 	return tbl;
   1298 }
   1299 
   1300 /* get_b_table			INTERNAL
   1301  **
   1302  * Return a level B table for use.
   1303  */
   1304 b_tmgr_t *
   1305 get_b_table(void)
   1306 {
   1307 	b_tmgr_t *tbl;
   1308 
   1309 	/* See 'get_a_table' for comments. */
   1310 	tbl = TAILQ_FIRST(&b_pool);
   1311 	if (tbl == NULL)
   1312 		panic("get_b_table: out of B tables.");
   1313 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
   1314 	if (tbl->bt_parent) {
   1315 		KASSERT(tbl->bt_wcnt == 0);
   1316 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
   1317 		tbl->bt_parent->at_ecnt--;
   1318 		free_b_table(tbl, false);
   1319 	}
   1320 	return tbl;
   1321 }
   1322 
   1323 /* get_c_table			INTERNAL
   1324  **
   1325  * Return a level C table for use.
   1326  */
   1327 c_tmgr_t *
   1328 get_c_table(void)
   1329 {
   1330 	c_tmgr_t *tbl;
   1331 
   1332 	/* See 'get_a_table' for comments */
   1333 	tbl = TAILQ_FIRST(&c_pool);
   1334 	if (tbl == NULL)
   1335 		panic("get_c_table: out of C tables.");
   1336 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
   1337 	if (tbl->ct_parent) {
   1338 		KASSERT(tbl->ct_wcnt == 0);
   1339 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
   1340 		tbl->ct_parent->bt_ecnt--;
   1341 		free_c_table(tbl, false);
   1342 	}
   1343 	return tbl;
   1344 }
   1345 
   1346 /*
   1347  * The following 'free_table' and 'steal_table' functions are called to
   1348  * detach tables from their current obligations (parents and children) and
   1349  * prepare them for reuse in another mapping.
   1350  *
   1351  * Free_table is used when the calling function will handle the fate
   1352  * of the parent table, such as returning it to the free pool when it has
   1353  * no valid entries.  Functions that do not want to handle this should
   1354  * call steal_table, in which the parent table's descriptors and entry
   1355  * count are automatically modified when this table is removed.
   1356  */
   1357 
   1358 /* free_a_table			INTERNAL
   1359  **
   1360  * Unmaps the given A table and all child tables from their current
   1361  * mappings.  Returns the number of pages that were invalidated.
   1362  * If 'relink' is true, the function will return the table to the head
   1363  * of the available table pool.
   1364  *
   1365  * Cache note: The MC68851 will automatically flush all
   1366  * descriptors derived from a given A table from its
   1367  * Automatic Translation Cache (ATC) if we issue a
   1368  * 'PFLUSHR' instruction with the base address of the
   1369  * table.  This function should do, and does so.
   1370  * Note note: We are using an MC68030 - there is no
   1371  * PFLUSHR.
   1372  */
   1373 int
   1374 free_a_table(a_tmgr_t *a_tbl, bool relink)
   1375 {
   1376 	int i, removed_cnt;
   1377 	mmu_long_dte_t	*dte;
   1378 	mmu_short_dte_t *dtbl;
   1379 	b_tmgr_t	*b_tbl;
   1380 	uint8_t at_wired, bt_wired;
   1381 
   1382 	/*
   1383 	 * Flush the ATC cache of all cached descriptors derived
   1384 	 * from this table.
   1385 	 * Sun3x does not use 68851's cached table feature
   1386 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
   1387 	 */
   1388 
   1389 	/*
   1390 	 * Remove any pending cache flushes that were designated
   1391 	 * for the pmap this A table belongs to.
   1392 	 * a_tbl->parent->atc_flushq[0] = 0;
   1393 	 * Not implemented in sun3x.
   1394 	 */
   1395 
   1396 	/*
   1397 	 * All A tables in the system should retain a map for the
   1398 	 * kernel. If the table contains any valid descriptors
   1399 	 * (other than those for the kernel area), invalidate them all,
   1400 	 * stopping short of the kernel's entries.
   1401 	 */
   1402 	removed_cnt = 0;
   1403 	at_wired = a_tbl->at_wcnt;
   1404 	if (a_tbl->at_ecnt) {
   1405 		dte = a_tbl->at_dtbl;
   1406 		for (i = 0; i < MMU_TIA(KERNBASE); i++) {
   1407 			/*
   1408 			 * If a table entry points to a valid B table, free
   1409 			 * it and its children.
   1410 			 */
   1411 			if (MMU_VALID_DT(dte[i])) {
   1412 				/*
   1413 				 * The following block does several things,
   1414 				 * from innermost expression to the
   1415 				 * outermost:
   1416 				 * 1) It extracts the base (cc 1996)
   1417 				 *    address of the B table pointed
   1418 				 *    to in the A table entry dte[i].
   1419 				 * 2) It converts this base address into
   1420 				 *    the virtual address it can be
   1421 				 *    accessed with. (all MMU tables point
   1422 				 *    to physical addresses.)
   1423 				 * 3) It finds the corresponding manager
   1424 				 *    structure which manages this MMU table.
   1425 				 * 4) It frees the manager structure.
   1426 				 *    (This frees the MMU table and all
   1427 				 *    child tables. See 'free_b_table' for
   1428 				 *    details.)
   1429 				 */
   1430 				dtbl = mmu_ptov(dte[i].addr.raw);
   1431 				b_tbl = mmuB2tmgr(dtbl);
   1432 				bt_wired = b_tbl->bt_wcnt;
   1433 				removed_cnt += free_b_table(b_tbl, true);
   1434 				if (bt_wired)
   1435 					a_tbl->at_wcnt--;
   1436 				dte[i].attr.raw = MMU_DT_INVALID;
   1437 			}
   1438 		}
   1439 		a_tbl->at_ecnt = 0;
   1440 	}
   1441 	KASSERT(a_tbl->at_wcnt == 0);
   1442 
   1443 	if (relink) {
   1444 		a_tbl->at_parent = NULL;
   1445 		if (!at_wired)
   1446 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1447 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   1448 	}
   1449 	return removed_cnt;
   1450 }
   1451 
   1452 /* free_b_table			INTERNAL
   1453  **
   1454  * Unmaps the given B table and all its children from their current
   1455  * mappings.  Returns the number of pages that were invalidated.
   1456  * (For comments, see 'free_a_table()').
   1457  */
   1458 int
   1459 free_b_table(b_tmgr_t *b_tbl, bool relink)
   1460 {
   1461 	int i, removed_cnt;
   1462 	mmu_short_dte_t *dte;
   1463 	mmu_short_pte_t	*dtbl;
   1464 	c_tmgr_t	*c_tbl;
   1465 	uint8_t bt_wired, ct_wired;
   1466 
   1467 	removed_cnt = 0;
   1468 	bt_wired = b_tbl->bt_wcnt;
   1469 	if (b_tbl->bt_ecnt) {
   1470 		dte = b_tbl->bt_dtbl;
   1471 		for (i = 0; i < MMU_B_TBL_SIZE; i++) {
   1472 			if (MMU_VALID_DT(dte[i])) {
   1473 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
   1474 				c_tbl = mmuC2tmgr(dtbl);
   1475 				ct_wired = c_tbl->ct_wcnt;
   1476 				removed_cnt += free_c_table(c_tbl, true);
   1477 				if (ct_wired)
   1478 					b_tbl->bt_wcnt--;
   1479 				dte[i].attr.raw = MMU_DT_INVALID;
   1480 			}
   1481 		}
   1482 		b_tbl->bt_ecnt = 0;
   1483 	}
   1484 	KASSERT(b_tbl->bt_wcnt == 0);
   1485 
   1486 	if (relink) {
   1487 		b_tbl->bt_parent = NULL;
   1488 		if (!bt_wired)
   1489 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1490 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   1491 	}
   1492 	return removed_cnt;
   1493 }
   1494 
   1495 /* free_c_table			INTERNAL
   1496  **
   1497  * Unmaps the given C table from use and returns it to the pool for
   1498  * re-use.  Returns the number of pages that were invalidated.
   1499  *
   1500  * This function preserves any physical page modification information
   1501  * contained in the page descriptors within the C table by calling
   1502  * 'pmap_remove_pte().'
   1503  */
   1504 int
   1505 free_c_table(c_tmgr_t *c_tbl, bool relink)
   1506 {
   1507 	mmu_short_pte_t *c_pte;
   1508 	int i, removed_cnt;
   1509 	uint8_t ct_wired;
   1510 
   1511 	removed_cnt = 0;
   1512 	ct_wired = c_tbl->ct_wcnt;
   1513 	if (c_tbl->ct_ecnt) {
   1514 		for (i = 0; i < MMU_C_TBL_SIZE; i++) {
   1515 			c_pte = &c_tbl->ct_dtbl[i];
   1516 			if (MMU_VALID_DT(*c_pte)) {
   1517 				if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
   1518 					c_tbl->ct_wcnt--;
   1519 				pmap_remove_pte(c_pte);
   1520 				removed_cnt++;
   1521 			}
   1522 		}
   1523 		c_tbl->ct_ecnt = 0;
   1524 	}
   1525 	KASSERT(c_tbl->ct_wcnt == 0);
   1526 
   1527 	if (relink) {
   1528 		c_tbl->ct_parent = NULL;
   1529 		if (!ct_wired)
   1530 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1531 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   1532 	}
   1533 	return removed_cnt;
   1534 }
   1535 
   1536 
   1537 /* pmap_remove_pte			INTERNAL
   1538  **
   1539  * Unmap the given pte and preserve any page modification
   1540  * information by transfering it to the pv head of the
   1541  * physical page it maps to.  This function does not update
   1542  * any reference counts because it is assumed that the calling
   1543  * function will do so.
   1544  */
   1545 void
   1546 pmap_remove_pte(mmu_short_pte_t *pte)
   1547 {
   1548 	u_short     pv_idx, targ_idx;
   1549 	paddr_t     pa;
   1550 	pv_t       *pv;
   1551 
   1552 	pa = MMU_PTE_PA(*pte);
   1553 	if (is_managed(pa)) {
   1554 		pv = pa2pv(pa);
   1555 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
   1556 
   1557 		/*
   1558 		 * If the PTE being removed is the first (or only) PTE in
   1559 		 * the list of PTEs currently mapped to this page, remove the
   1560 		 * PTE by changing the index found on the PV head.  Otherwise
   1561 		 * a linear search through the list will have to be executed
   1562 		 * in order to find the PVE which points to the PTE being
   1563 		 * removed, so that it may be modified to point to its new
   1564 		 * neighbor.
   1565 		 */
   1566 
   1567 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
   1568 		if (pv_idx == targ_idx) {
   1569 			pv->pv_idx = pvebase[targ_idx].pve_next;
   1570 		} else {
   1571 
   1572 			/*
   1573 			 * Find the PV element pointing to the target
   1574 			 * element.  Note: may have pv_idx==PVE_EOL
   1575 			 */
   1576 
   1577 			for (;;) {
   1578 				if (pv_idx == PVE_EOL) {
   1579 					goto pv_not_found;
   1580 				}
   1581 				if (pvebase[pv_idx].pve_next == targ_idx)
   1582 					break;
   1583 				pv_idx = pvebase[pv_idx].pve_next;
   1584 			}
   1585 
   1586 			/*
   1587 			 * At this point, pv_idx is the index of the PV
   1588 			 * element just before the target element in the list.
   1589 			 * Unlink the target.
   1590 			 */
   1591 
   1592 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
   1593 		}
   1594 
   1595 		/*
   1596 		 * Save the mod/ref bits of the pte by simply
   1597 		 * ORing the entire pte onto the pv_flags member
   1598 		 * of the pv structure.
   1599 		 * There is no need to use a separate bit pattern
   1600 		 * for usage information on the pv head than that
   1601 		 * which is used on the MMU ptes.
   1602 		 */
   1603 
   1604  pv_not_found:
   1605 		pv->pv_flags |= (u_short) pte->attr.raw;
   1606 	}
   1607 	pte->attr.raw = MMU_DT_INVALID;
   1608 }
   1609 
   1610 /* pmap_stroll			INTERNAL
   1611  **
   1612  * Retrieve the addresses of all table managers involved in the mapping of
   1613  * the given virtual address.  If the table walk completed successfully,
   1614  * return true.  If it was only partially successful, return false.
   1615  * The table walk performed by this function is important to many other
   1616  * functions in this module.
   1617  *
   1618  * Note: This function ought to be easier to read.
   1619  */
   1620 bool
   1621 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
   1622     c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
   1623     int *pte_idx)
   1624 {
   1625 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1626 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1627 
   1628 	if (pmap == pmap_kernel())
   1629 		return false;
   1630 
   1631 	/* Does the given pmap have its own A table? */
   1632 	*a_tbl = pmap->pm_a_tmgr;
   1633 	if (*a_tbl == NULL)
   1634 		return false; /* No.  Return unknown. */
   1635 	/* Does the A table have a valid B table
   1636 	 * under the corresponding table entry?
   1637 	 */
   1638 	*a_idx = MMU_TIA(va);
   1639 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
   1640 	if (!MMU_VALID_DT(*a_dte))
   1641 		return false; /* No. Return unknown. */
   1642 	/* Yes. Extract B table from the A table. */
   1643 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
   1644 	/*
   1645 	 * Does the B table have a valid C table
   1646 	 * under the corresponding table entry?
   1647 	 */
   1648 	*b_idx = MMU_TIB(va);
   1649 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
   1650 	if (!MMU_VALID_DT(*b_dte))
   1651 		return false; /* No. Return unknown. */
   1652 	/* Yes. Extract C table from the B table. */
   1653 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
   1654 	*pte_idx = MMU_TIC(va);
   1655 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
   1656 
   1657 	return true;
   1658 }
   1659 
   1660 /* pmap_enter			INTERFACE
   1661  **
   1662  * Called by the kernel to map a virtual address
   1663  * to a physical address in the given process map.
   1664  *
   1665  * Note: this function should apply an exclusive lock
   1666  * on the pmap system for its duration.  (it certainly
   1667  * would save my hair!!)
   1668  * This function ought to be easier to read.
   1669  */
   1670 int
   1671 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
   1672 {
   1673 	bool insert, managed; /* Marks the need for PV insertion.*/
   1674 	u_short nidx;            /* PV list index                     */
   1675 	int mapflags;            /* Flags for the mapping (see NOTE1) */
   1676 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
   1677 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
   1678 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
   1679 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
   1680 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
   1681 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
   1682 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
   1683 	pv_t      *pv;           /* pv list head                      */
   1684 	bool wired;         /* is the mapping to be wired?       */
   1685 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
   1686 
   1687 	if (pmap == pmap_kernel()) {
   1688 		pmap_enter_kernel(va, pa, prot);
   1689 		return 0;
   1690 	}
   1691 
   1692 	/*
   1693 	 * Determine if the mapping should be wired.
   1694 	 */
   1695 	wired = ((flags & PMAP_WIRED) != 0);
   1696 
   1697 	/*
   1698 	 * NOTE1:
   1699 	 *
   1700 	 * On November 13, 1999, someone changed the pmap_enter() API such
   1701 	 * that it now accepts a 'flags' argument.  This new argument
   1702 	 * contains bit-flags for the architecture-independent (UVM) system to
   1703 	 * use in signalling certain mapping requirements to the architecture-
   1704 	 * dependent (pmap) system.  The argument it replaces, 'wired', is now
   1705 	 * one of the flags within it.
   1706 	 *
   1707 	 * In addition to flags signaled by the architecture-independent
   1708 	 * system, parts of the architecture-dependent section of the sun3x
   1709 	 * kernel pass their own flags in the lower, unused bits of the
   1710 	 * physical address supplied to this function.  These flags are
   1711 	 * extracted and stored in the temporary variable 'mapflags'.
   1712 	 *
   1713 	 * Extract sun3x specific flags from the physical address.
   1714 	 */
   1715 	mapflags = (pa & ~MMU_PAGE_MASK);
   1716 	pa &= MMU_PAGE_MASK;
   1717 
   1718 	/*
   1719 	 * Determine if the physical address being mapped is on-board RAM.
   1720 	 * Any other area of the address space is likely to belong to a
   1721 	 * device and hence it would be disasterous to cache its contents.
   1722 	 */
   1723 	if ((managed = is_managed(pa)) == false)
   1724 		mapflags |= PMAP_NC;
   1725 
   1726 	/*
   1727 	 * For user mappings we walk along the MMU tables of the given
   1728 	 * pmap, reaching a PTE which describes the virtual page being
   1729 	 * mapped or changed.  If any level of the walk ends in an invalid
   1730 	 * entry, a table must be allocated and the entry must be updated
   1731 	 * to point to it.
   1732 	 * There is a bit of confusion as to whether this code must be
   1733 	 * re-entrant.  For now we will assume it is.  To support
   1734 	 * re-entrancy we must unlink tables from the table pool before
   1735 	 * we assume we may use them.  Tables are re-linked into the pool
   1736 	 * when we are finished with them at the end of the function.
   1737 	 * But I don't feel like doing that until we have proof that this
   1738 	 * needs to be re-entrant.
   1739 	 * 'llevel' records which tables need to be relinked.
   1740 	 */
   1741 	llevel = NONE;
   1742 
   1743 	/*
   1744 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
   1745 	 * A table, allocate a new one from the available pool.
   1746 	 */
   1747 
   1748 	a_tbl = pmap->pm_a_tmgr;
   1749 	if (a_tbl == NULL) {
   1750 		/*
   1751 		 * This pmap does not currently have an A table.  Allocate
   1752 		 * a new one.
   1753 		 */
   1754 		a_tbl = get_a_table();
   1755 		a_tbl->at_parent = pmap;
   1756 
   1757 		/*
   1758 		 * Assign this new A table to the pmap, and calculate its
   1759 		 * physical address so that loadcrp() can be used to make
   1760 		 * the table active.
   1761 		 */
   1762 		pmap->pm_a_tmgr = a_tbl;
   1763 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
   1764 
   1765 		/*
   1766 		 * If the process receiving a new A table is the current
   1767 		 * process, we are responsible for setting the MMU so that
   1768 		 * it becomes the current address space.  This only adds
   1769 		 * new mappings, so no need to flush anything.
   1770 		 */
   1771 		if (pmap == current_pmap()) {
   1772 			kernel_crp.rp_addr = pmap->pm_a_phys;
   1773 			loadcrp(&kernel_crp);
   1774 		}
   1775 
   1776 		if (!wired)
   1777 			llevel = NEWA;
   1778 	} else {
   1779 		/*
   1780 		 * Use the A table already allocated for this pmap.
   1781 		 * Unlink it from the A table pool if necessary.
   1782 		 */
   1783 		if (wired && !a_tbl->at_wcnt)
   1784 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   1785 	}
   1786 
   1787 	/*
   1788 	 * Step 2 - Walk into the B table.  If there is no valid B table,
   1789 	 * allocate one.
   1790 	 */
   1791 
   1792 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
   1793 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
   1794 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
   1795 		/* The descriptor is valid.  Use the B table it points to. */
   1796 		/*************************************
   1797 		 *               a_idx               *
   1798 		 *                 v                 *
   1799 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
   1800 		 *          | | | | | | | | | | | |  *
   1801 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
   1802 		 *                 |                 *
   1803 		 *                 \- b_tbl -> +-+-  *
   1804 		 *                             | |   *
   1805 		 *                             +-+-  *
   1806 		 *************************************/
   1807 		b_dte = mmu_ptov(a_dte->addr.raw);
   1808 		b_tbl = mmuB2tmgr(b_dte);
   1809 
   1810 		/*
   1811 		 * If the requested mapping must be wired, but this table
   1812 		 * being used to map it is not, the table must be removed
   1813 		 * from the available pool and its wired entry count
   1814 		 * incremented.
   1815 		 */
   1816 		if (wired && !b_tbl->bt_wcnt) {
   1817 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   1818 			a_tbl->at_wcnt++;
   1819 		}
   1820 	} else {
   1821 		/* The descriptor is invalid.  Allocate a new B table. */
   1822 		b_tbl = get_b_table();
   1823 
   1824 		/* Point the parent A table descriptor to this new B table. */
   1825 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
   1826 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
   1827 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
   1828 
   1829 		/* Create the necessary back references to the parent table */
   1830 		b_tbl->bt_parent = a_tbl;
   1831 		b_tbl->bt_pidx = a_idx;
   1832 
   1833 		/*
   1834 		 * If this table is to be wired, make sure the parent A table
   1835 		 * wired count is updated to reflect that it has another wired
   1836 		 * entry.
   1837 		 */
   1838 		if (wired)
   1839 			a_tbl->at_wcnt++;
   1840 		else if (llevel == NONE)
   1841 			llevel = NEWB;
   1842 	}
   1843 
   1844 	/*
   1845 	 * Step 3 - Walk into the C table, if there is no valid C table,
   1846 	 * allocate one.
   1847 	 */
   1848 
   1849 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
   1850 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
   1851 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
   1852 		/* The descriptor is valid.  Use the C table it points to. */
   1853 		/**************************************
   1854 		 *               c_idx                *
   1855 		 * |                v                 *
   1856 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
   1857 		 *             | | | | | | | | | | |  *
   1858 		 *             +-+-+-+-+-+-+-+-+-+-+- *
   1859 		 *                  |                 *
   1860 		 *                  \- c_tbl -> +-+-- *
   1861 		 *                              | | | *
   1862 		 *                              +-+-- *
   1863 		 **************************************/
   1864 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
   1865 		c_tbl = mmuC2tmgr(c_pte);
   1866 
   1867 		/* If mapping is wired and table is not */
   1868 		if (wired && !c_tbl->ct_wcnt) {
   1869 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   1870 			b_tbl->bt_wcnt++;
   1871 		}
   1872 	} else {
   1873 		/* The descriptor is invalid.  Allocate a new C table. */
   1874 		c_tbl = get_c_table();
   1875 
   1876 		/* Point the parent B table descriptor to this new C table. */
   1877 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
   1878 		b_dte->attr.raw |= MMU_DT_SHORT;
   1879 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
   1880 
   1881 		/* Create the necessary back references to the parent table */
   1882 		c_tbl->ct_parent = b_tbl;
   1883 		c_tbl->ct_pidx = b_idx;
   1884 		/*
   1885 		 * Store the pmap and base virtual managed address for faster
   1886 		 * retrieval in the PV functions.
   1887 		 */
   1888 		c_tbl->ct_pmap = pmap;
   1889 		c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
   1890 
   1891 		/*
   1892 		 * If this table is to be wired, make sure the parent B table
   1893 		 * wired count is updated to reflect that it has another wired
   1894 		 * entry.
   1895 		 */
   1896 		if (wired)
   1897 			b_tbl->bt_wcnt++;
   1898 		else if (llevel == NONE)
   1899 			llevel = NEWC;
   1900 	}
   1901 
   1902 	/*
   1903 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
   1904 	 * slot of the C table, describing the PA to which the VA is mapped.
   1905 	 */
   1906 
   1907 	pte_idx = MMU_TIC(va);
   1908 	c_pte = &c_tbl->ct_dtbl[pte_idx];
   1909 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
   1910 		/*
   1911 		 * The PTE is currently valid.  This particular call
   1912 		 * is just a synonym for one (or more) of the following
   1913 		 * operations:
   1914 		 *     change protection of a page
   1915 		 *     change wiring status of a page
   1916 		 *     remove the mapping of a page
   1917 		 */
   1918 
   1919 		/* First check if this is a wiring operation. */
   1920 		if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) {
   1921 			/*
   1922 			 * The existing mapping is wired, so adjust wired
   1923 			 * entry count here. If new mapping is still wired,
   1924 			 * wired entry count will be incremented again later.
   1925 			 */
   1926 			c_tbl->ct_wcnt--;
   1927 			if (!wired) {
   1928 				/*
   1929 				 * The mapping of this PTE is being changed
   1930 				 * from wired to unwired.
   1931 				 * Adjust wired entry counts in each table and
   1932 				 * set llevel flag to put unwired tables back
   1933 				 * into the active pool.
   1934 				 */
   1935 				if (c_tbl->ct_wcnt == 0) {
   1936 					llevel = NEWC;
   1937 					if (--b_tbl->bt_wcnt == 0) {
   1938 						llevel = NEWB;
   1939 						if (--a_tbl->at_wcnt == 0) {
   1940 							llevel = NEWA;
   1941 						}
   1942 					}
   1943 				}
   1944 			}
   1945 		}
   1946 
   1947 		/* Is the new address the same as the old? */
   1948 		if (MMU_PTE_PA(*c_pte) == pa) {
   1949 			/*
   1950 			 * Yes, mark that it does not need to be reinserted
   1951 			 * into the PV list.
   1952 			 */
   1953 			insert = false;
   1954 
   1955 			/*
   1956 			 * Clear all but the modified, referenced and wired
   1957 			 * bits on the PTE.
   1958 			 */
   1959 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
   1960 			    | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
   1961 		} else {
   1962 			/* No, remove the old entry */
   1963 			pmap_remove_pte(c_pte);
   1964 			insert = true;
   1965 		}
   1966 
   1967 		/*
   1968 		 * TLB flush is only necessary if modifying current map.
   1969 		 * However, in pmap_enter(), the pmap almost always IS
   1970 		 * the current pmap, so don't even bother to check.
   1971 		 */
   1972 		TBIS(va);
   1973 	} else {
   1974 		/*
   1975 		 * The PTE is invalid.  Increment the valid entry count in
   1976 		 * the C table manager to reflect the addition of a new entry.
   1977 		 */
   1978 		c_tbl->ct_ecnt++;
   1979 
   1980 		/* XXX - temporarily make sure the PTE is cleared. */
   1981 		c_pte->attr.raw = 0;
   1982 
   1983 		/* It will also need to be inserted into the PV list. */
   1984 		insert = true;
   1985 	}
   1986 
   1987 	/*
   1988 	 * If page is changing from unwired to wired status, set an unused bit
   1989 	 * within the PTE to indicate that it is wired.  Also increment the
   1990 	 * wired entry count in the C table manager.
   1991 	 */
   1992 	if (wired) {
   1993 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
   1994 		c_tbl->ct_wcnt++;
   1995 	}
   1996 
   1997 	/*
   1998 	 * Map the page, being careful to preserve modify/reference/wired
   1999 	 * bits.  At this point it is assumed that the PTE either has no bits
   2000 	 * set, or if there are set bits, they are only modified, reference or
   2001 	 * wired bits.  If not, the following statement will cause erratic
   2002 	 * behavior.
   2003 	 */
   2004 #ifdef	PMAP_DEBUG
   2005 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
   2006 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
   2007 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
   2008 		Debugger();
   2009 	}
   2010 #endif
   2011 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
   2012 
   2013 	/*
   2014 	 * If the mapping should be read-only, set the write protect
   2015 	 * bit in the PTE.
   2016 	 */
   2017 	if (!(prot & VM_PROT_WRITE))
   2018 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
   2019 
   2020 	/*
   2021 	 * Mark the PTE as used and/or modified as specified by the flags arg.
   2022 	 */
   2023 	if (flags & VM_PROT_ALL) {
   2024 		c_pte->attr.raw |= MMU_SHORT_PTE_USED;
   2025 		if (flags & VM_PROT_WRITE) {
   2026 			c_pte->attr.raw |= MMU_SHORT_PTE_M;
   2027 		}
   2028 	}
   2029 
   2030 	/*
   2031 	 * If the mapping should be cache inhibited (indicated by the flag
   2032 	 * bits found on the lower order of the physical address.)
   2033 	 * mark the PTE as a cache inhibited page.
   2034 	 */
   2035 	if (mapflags & PMAP_NC)
   2036 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
   2037 
   2038 	/*
   2039 	 * If the physical address being mapped is managed by the PV
   2040 	 * system then link the pte into the list of pages mapped to that
   2041 	 * address.
   2042 	 */
   2043 	if (insert && managed) {
   2044 		pv = pa2pv(pa);
   2045 		nidx = pteidx(c_pte);
   2046 
   2047 		pvebase[nidx].pve_next = pv->pv_idx;
   2048 		pv->pv_idx = nidx;
   2049 	}
   2050 
   2051 	/* Move any allocated or unwired tables back into the active pool. */
   2052 
   2053 	switch (llevel) {
   2054 		case NEWA:
   2055 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2056 			/* FALLTHROUGH */
   2057 		case NEWB:
   2058 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2059 			/* FALLTHROUGH */
   2060 		case NEWC:
   2061 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2062 			/* FALLTHROUGH */
   2063 		default:
   2064 			break;
   2065 	}
   2066 
   2067 	return 0;
   2068 }
   2069 
   2070 /* pmap_enter_kernel			INTERNAL
   2071  **
   2072  * Map the given virtual address to the given physical address within the
   2073  * kernel address space.  This function exists because the kernel map does
   2074  * not do dynamic table allocation.  It consists of a contiguous array of ptes
   2075  * and can be edited directly without the need to walk through any tables.
   2076  *
   2077  * XXX: "Danger, Will Robinson!"
   2078  * Note that the kernel should never take a fault on any page
   2079  * between [ KERNBASE .. virtual_avail ] and this is checked in
   2080  * trap.c for kernel-mode MMU faults.  This means that mappings
   2081  * created in that range must be implicily wired. -gwr
   2082  */
   2083 void
   2084 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2085 {
   2086 	bool       was_valid, insert;
   2087 	u_short         pte_idx;
   2088 	int             flags;
   2089 	mmu_short_pte_t *pte;
   2090 	pv_t            *pv;
   2091 	paddr_t     old_pa;
   2092 
   2093 	flags = (pa & ~MMU_PAGE_MASK);
   2094 	pa &= MMU_PAGE_MASK;
   2095 
   2096 	if (is_managed(pa))
   2097 		insert = true;
   2098 	else
   2099 		insert = false;
   2100 
   2101 	/*
   2102 	 * Calculate the index of the PTE being modified.
   2103 	 */
   2104 	pte_idx = (u_long)m68k_btop(va - KERNBASE);
   2105 
   2106 	/* This array is traditionally named "Sysmap" */
   2107 	pte = &kernCbase[pte_idx];
   2108 
   2109 	if (MMU_VALID_DT(*pte)) {
   2110 		was_valid = true;
   2111 		/*
   2112 		 * If the PTE already maps a different
   2113 		 * physical address, umap and pv_unlink.
   2114 		 */
   2115 		old_pa = MMU_PTE_PA(*pte);
   2116 		if (pa != old_pa)
   2117 			pmap_remove_pte(pte);
   2118 		else {
   2119 		    /*
   2120 		     * Old PA and new PA are the same.  No need to
   2121 		     * relink the mapping within the PV list.
   2122 		     */
   2123 		     insert = false;
   2124 
   2125 		    /*
   2126 		     * Save any mod/ref bits on the PTE.
   2127 		     */
   2128 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
   2129 		}
   2130 	} else {
   2131 		pte->attr.raw = MMU_DT_INVALID;
   2132 		was_valid = false;
   2133 	}
   2134 
   2135 	/*
   2136 	 * Map the page.  Being careful to preserve modified/referenced bits
   2137 	 * on the PTE.
   2138 	 */
   2139 	pte->attr.raw |= (pa | MMU_DT_PAGE);
   2140 
   2141 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
   2142 		pte->attr.raw |= MMU_SHORT_PTE_WP;
   2143 	if (flags & PMAP_NC)
   2144 		pte->attr.raw |= MMU_SHORT_PTE_CI;
   2145 	if (was_valid)
   2146 		TBIS(va);
   2147 
   2148 	/*
   2149 	 * Insert the PTE into the PV system, if need be.
   2150 	 */
   2151 	if (insert) {
   2152 		pv = pa2pv(pa);
   2153 		pvebase[pte_idx].pve_next = pv->pv_idx;
   2154 		pv->pv_idx = pte_idx;
   2155 	}
   2156 }
   2157 
   2158 void
   2159 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
   2160 {
   2161 	mmu_short_pte_t	*pte;
   2162 
   2163 	/* This array is traditionally named "Sysmap" */
   2164 	pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
   2165 
   2166 	KASSERT(!MMU_VALID_DT(*pte));
   2167 	pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
   2168 	if (!(prot & VM_PROT_WRITE))
   2169 		pte->attr.raw |= MMU_SHORT_PTE_WP;
   2170 }
   2171 
   2172 void
   2173 pmap_kremove(vaddr_t va, vsize_t len)
   2174 {
   2175 	int idx, eidx;
   2176 
   2177 #ifdef	PMAP_DEBUG
   2178 	if ((va & PGOFSET) || (len & PGOFSET))
   2179 		panic("pmap_kremove: alignment");
   2180 #endif
   2181 
   2182 	idx  = m68k_btop(va - KERNBASE);
   2183 	eidx = m68k_btop(va + len - KERNBASE);
   2184 
   2185 	while (idx < eidx) {
   2186 		kernCbase[idx++].attr.raw = MMU_DT_INVALID;
   2187 		TBIS(va);
   2188 		va += PAGE_SIZE;
   2189 	}
   2190 }
   2191 
   2192 /* pmap_map			INTERNAL
   2193  **
   2194  * Map a contiguous range of physical memory into a contiguous range of
   2195  * the kernel virtual address space.
   2196  *
   2197  * Used for device mappings and early mapping of the kernel text/data/bss.
   2198  * Returns the first virtual address beyond the end of the range.
   2199  */
   2200 vaddr_t
   2201 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
   2202 {
   2203 	int sz;
   2204 
   2205 	sz = endpa - pa;
   2206 	do {
   2207 		pmap_enter_kernel(va, pa, prot);
   2208 		va += PAGE_SIZE;
   2209 		pa += PAGE_SIZE;
   2210 		sz -= PAGE_SIZE;
   2211 	} while (sz > 0);
   2212 	pmap_update(pmap_kernel());
   2213 	return va;
   2214 }
   2215 
   2216 /* pmap_protect_kernel			INTERNAL
   2217  **
   2218  * Apply the given protection code to a kernel address range.
   2219  */
   2220 static INLINE void
   2221 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
   2222 {
   2223 	vaddr_t va;
   2224 	mmu_short_pte_t *pte;
   2225 
   2226 	pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
   2227 	for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
   2228 		if (MMU_VALID_DT(*pte)) {
   2229 		    switch (prot) {
   2230 		        case VM_PROT_ALL:
   2231 		            break;
   2232 		        case VM_PROT_EXECUTE:
   2233 		        case VM_PROT_READ:
   2234 		        case VM_PROT_READ|VM_PROT_EXECUTE:
   2235 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
   2236 		            break;
   2237 		        case VM_PROT_NONE:
   2238 		            /* this is an alias for 'pmap_remove_kernel' */
   2239 		            pmap_remove_pte(pte);
   2240 		            break;
   2241 		        default:
   2242 		            break;
   2243 		    }
   2244 		    /*
   2245 		     * since this is the kernel, immediately flush any cached
   2246 		     * descriptors for this address.
   2247 		     */
   2248 		    TBIS(va);
   2249 		}
   2250 	}
   2251 }
   2252 
   2253 /* pmap_protect			INTERFACE
   2254  **
   2255  * Apply the given protection to the given virtual address range within
   2256  * the given map.
   2257  *
   2258  * It is ok for the protection applied to be stronger than what is
   2259  * specified.  We use this to our advantage when the given map has no
   2260  * mapping for the virtual address.  By skipping a page when this
   2261  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
   2262  * and therefore do not need to map the page just to apply a protection
   2263  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
   2264  *
   2265  * XXX - This function could be speeded up by using pmap_stroll() for inital
   2266  *       setup, and then manual scrolling in the for() loop.
   2267  */
   2268 void
   2269 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
   2270 {
   2271 	bool iscurpmap;
   2272 	int a_idx, b_idx, c_idx;
   2273 	a_tmgr_t *a_tbl;
   2274 	b_tmgr_t *b_tbl;
   2275 	c_tmgr_t *c_tbl;
   2276 	mmu_short_pte_t *pte;
   2277 
   2278 	if (pmap == pmap_kernel()) {
   2279 		pmap_protect_kernel(startva, endva, prot);
   2280 		return;
   2281 	}
   2282 
   2283 	/*
   2284 	 * In this particular pmap implementation, there are only three
   2285 	 * types of memory protection: 'all' (read/write/execute),
   2286 	 * 'read-only' (read/execute) and 'none' (no mapping.)
   2287 	 * It is not possible for us to treat 'executable' as a separate
   2288 	 * protection type.  Therefore, protection requests that seek to
   2289 	 * remove execute permission while retaining read or write, and those
   2290 	 * that make little sense (write-only for example) are ignored.
   2291 	 */
   2292 	switch (prot) {
   2293 		case VM_PROT_NONE:
   2294 			/*
   2295 			 * A request to apply the protection code of
   2296 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
   2297 			 */
   2298 			pmap_remove(pmap, startva, endva);
   2299 			return;
   2300 		case	VM_PROT_EXECUTE:
   2301 		case	VM_PROT_READ:
   2302 		case	VM_PROT_READ|VM_PROT_EXECUTE:
   2303 			/* continue */
   2304 			break;
   2305 		case	VM_PROT_WRITE:
   2306 		case	VM_PROT_WRITE|VM_PROT_READ:
   2307 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
   2308 		case	VM_PROT_ALL:
   2309 			/* None of these should happen in a sane system. */
   2310 			return;
   2311 	}
   2312 
   2313 	/*
   2314 	 * If the pmap has no A table, it has no mappings and therefore
   2315 	 * there is nothing to protect.
   2316 	 */
   2317 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
   2318 		return;
   2319 
   2320 	a_idx = MMU_TIA(startva);
   2321 	b_idx = MMU_TIB(startva);
   2322 	c_idx = MMU_TIC(startva);
   2323 	b_tbl = NULL;
   2324 	c_tbl = NULL;
   2325 
   2326 	iscurpmap = (pmap == current_pmap());
   2327 	while (startva < endva) {
   2328 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
   2329 		  if (b_tbl == NULL) {
   2330 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
   2331 		    b_tbl = mmu_ptov((vaddr_t)b_tbl);
   2332 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
   2333 		  }
   2334 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
   2335 		    if (c_tbl == NULL) {
   2336 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
   2337 		      c_tbl = mmu_ptov((vaddr_t)c_tbl);
   2338 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
   2339 		    }
   2340 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
   2341 		      pte = &c_tbl->ct_dtbl[c_idx];
   2342 		      /* make the mapping read-only */
   2343 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
   2344 		      /*
   2345 		       * If we just modified the current address space,
   2346 		       * flush any translations for the modified page from
   2347 		       * the translation cache and any data from it in the
   2348 		       * data cache.
   2349 		       */
   2350 		      if (iscurpmap)
   2351 		          TBIS(startva);
   2352 		    }
   2353 		    startva += PAGE_SIZE;
   2354 
   2355 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
   2356 		      c_tbl = NULL;
   2357 		      c_idx = 0;
   2358 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
   2359 		        b_tbl = NULL;
   2360 		        b_idx = 0;
   2361 		      }
   2362 		    }
   2363 		  } else { /* C table wasn't valid */
   2364 		    c_tbl = NULL;
   2365 		    c_idx = 0;
   2366 		    startva += MMU_TIB_RANGE;
   2367 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
   2368 		      b_tbl = NULL;
   2369 		      b_idx = 0;
   2370 		    }
   2371 		  } /* C table */
   2372 		} else { /* B table wasn't valid */
   2373 		  b_tbl = NULL;
   2374 		  b_idx = 0;
   2375 		  startva += MMU_TIA_RANGE;
   2376 		  a_idx++;
   2377 		} /* B table */
   2378 	}
   2379 }
   2380 
   2381 /* pmap_unwire				INTERFACE
   2382  **
   2383  * Clear the wired attribute of the specified page.
   2384  *
   2385  * This function is called from vm_fault.c to unwire
   2386  * a mapping.
   2387  */
   2388 void
   2389 pmap_unwire(pmap_t pmap, vaddr_t va)
   2390 {
   2391 	int a_idx, b_idx, c_idx;
   2392 	a_tmgr_t *a_tbl;
   2393 	b_tmgr_t *b_tbl;
   2394 	c_tmgr_t *c_tbl;
   2395 	mmu_short_pte_t *pte;
   2396 
   2397 	/* Kernel mappings always remain wired. */
   2398 	if (pmap == pmap_kernel())
   2399 		return;
   2400 
   2401 	/*
   2402 	 * Walk through the tables.  If the walk terminates without
   2403 	 * a valid PTE then the address wasn't wired in the first place.
   2404 	 * Return immediately.
   2405 	 */
   2406 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
   2407 		&b_idx, &c_idx) == false)
   2408 		return;
   2409 
   2410 
   2411 	/* Is the PTE wired?  If not, return. */
   2412 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
   2413 		return;
   2414 
   2415 	/* Remove the wiring bit. */
   2416 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
   2417 
   2418 	/*
   2419 	 * Decrement the wired entry count in the C table.
   2420 	 * If it reaches zero the following things happen:
   2421 	 * 1. The table no longer has any wired entries and is considered
   2422 	 *    unwired.
   2423 	 * 2. It is placed on the available queue.
   2424 	 * 3. The parent table's wired entry count is decremented.
   2425 	 * 4. If it reaches zero, this process repeats at step 1 and
   2426 	 *    stops at after reaching the A table.
   2427 	 */
   2428 	if (--c_tbl->ct_wcnt == 0) {
   2429 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   2430 		if (--b_tbl->bt_wcnt == 0) {
   2431 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   2432 			if (--a_tbl->at_wcnt == 0) {
   2433 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   2434 			}
   2435 		}
   2436 	}
   2437 }
   2438 
   2439 /* pmap_copy				INTERFACE
   2440  **
   2441  * Copy the mappings of a range of addresses in one pmap, into
   2442  * the destination address of another.
   2443  *
   2444  * This routine is advisory.  Should we one day decide that MMU tables
   2445  * may be shared by more than one pmap, this function should be used to
   2446  * link them together.  Until that day however, we do nothing.
   2447  */
   2448 void
   2449 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
   2450 {
   2451 
   2452 	/* not implemented. */
   2453 }
   2454 
   2455 /* pmap_copy_page			INTERFACE
   2456  **
   2457  * Copy the contents of one physical page into another.
   2458  *
   2459  * This function makes use of two virtual pages allocated in pmap_bootstrap()
   2460  * to map the two specified physical pages into the kernel address space.
   2461  *
   2462  * Note: We could use the transparent translation registers to make the
   2463  * mappings.  If we do so, be sure to disable interrupts before using them.
   2464  */
   2465 void
   2466 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
   2467 {
   2468 	vaddr_t srcva, dstva;
   2469 	int s;
   2470 
   2471 	srcva = tmp_vpages[0];
   2472 	dstva = tmp_vpages[1];
   2473 
   2474 	s = splvm();
   2475 #ifdef DIAGNOSTIC
   2476 	if (tmp_vpages_inuse++)
   2477 		panic("pmap_copy_page: temporary vpages are in use.");
   2478 #endif
   2479 
   2480 	/* Map pages as non-cacheable to avoid cache polution? */
   2481 	pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
   2482 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
   2483 
   2484 	/* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
   2485 	copypage((char *)srcva, (char *)dstva);
   2486 
   2487 	pmap_kremove(srcva, PAGE_SIZE);
   2488 	pmap_kremove(dstva, PAGE_SIZE);
   2489 
   2490 #ifdef DIAGNOSTIC
   2491 	--tmp_vpages_inuse;
   2492 #endif
   2493 	splx(s);
   2494 }
   2495 
   2496 /* pmap_zero_page			INTERFACE
   2497  **
   2498  * Zero the contents of the specified physical page.
   2499  *
   2500  * Uses one of the virtual pages allocated in pmap_boostrap()
   2501  * to map the specified page into the kernel address space.
   2502  */
   2503 void
   2504 pmap_zero_page(paddr_t dstpa)
   2505 {
   2506 	vaddr_t dstva;
   2507 	int s;
   2508 
   2509 	dstva = tmp_vpages[1];
   2510 	s = splvm();
   2511 #ifdef DIAGNOSTIC
   2512 	if (tmp_vpages_inuse++)
   2513 		panic("pmap_zero_page: temporary vpages are in use.");
   2514 #endif
   2515 
   2516 	/* The comments in pmap_copy_page() above apply here also. */
   2517 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
   2518 
   2519 	/* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
   2520 	zeropage((char *)dstva);
   2521 
   2522 	pmap_kremove(dstva, PAGE_SIZE);
   2523 #ifdef DIAGNOSTIC
   2524 	--tmp_vpages_inuse;
   2525 #endif
   2526 	splx(s);
   2527 }
   2528 
   2529 /* pmap_collect			INTERFACE
   2530  **
   2531  * Called from the VM system when we are about to swap out
   2532  * the process using this pmap.  This should give up any
   2533  * resources held here, including all its MMU tables.
   2534  */
   2535 void
   2536 pmap_collect(pmap_t pmap)
   2537 {
   2538 
   2539 	/* XXX - todo... */
   2540 }
   2541 
   2542 /* pmap_pinit			INTERNAL
   2543  **
   2544  * Initialize a pmap structure.
   2545  */
   2546 static INLINE void
   2547 pmap_pinit(pmap_t pmap)
   2548 {
   2549 
   2550 	memset(pmap, 0, sizeof(struct pmap));
   2551 	pmap->pm_a_tmgr = NULL;
   2552 	pmap->pm_a_phys = kernAphys;
   2553 	pmap->pm_refcount = 1;
   2554 	simple_lock_init(&pmap->pm_lock);
   2555 }
   2556 
   2557 /* pmap_create			INTERFACE
   2558  **
   2559  * Create and return a pmap structure.
   2560  */
   2561 pmap_t
   2562 pmap_create(void)
   2563 {
   2564 	pmap_t	pmap;
   2565 
   2566 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
   2567 	pmap_pinit(pmap);
   2568 	return pmap;
   2569 }
   2570 
   2571 /* pmap_release				INTERNAL
   2572  **
   2573  * Release any resources held by the given pmap.
   2574  *
   2575  * This is the reverse analog to pmap_pinit.  It does not
   2576  * necessarily mean for the pmap structure to be deallocated,
   2577  * as in pmap_destroy.
   2578  */
   2579 static INLINE void
   2580 pmap_release(pmap_t pmap)
   2581 {
   2582 
   2583 	/*
   2584 	 * As long as the pmap contains no mappings,
   2585 	 * which always should be the case whenever
   2586 	 * this function is called, there really should
   2587 	 * be nothing to do.
   2588 	 */
   2589 #ifdef	PMAP_DEBUG
   2590 	if (pmap == pmap_kernel())
   2591 		panic("pmap_release: kernel pmap");
   2592 #endif
   2593 	/*
   2594 	 * XXX - If this pmap has an A table, give it back.
   2595 	 * The pmap SHOULD be empty by now, and pmap_remove
   2596 	 * should have already given back the A table...
   2597 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
   2598 	 * at this point, which means some mapping was not
   2599 	 * removed when it should have been. -gwr
   2600 	 */
   2601 	if (pmap->pm_a_tmgr != NULL) {
   2602 		/* First make sure we are not using it! */
   2603 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
   2604 			kernel_crp.rp_addr = kernAphys;
   2605 			loadcrp(&kernel_crp);
   2606 		}
   2607 #ifdef	PMAP_DEBUG /* XXX - todo! */
   2608 		/* XXX - Now complain... */
   2609 		printf("pmap_release: still have table\n");
   2610 		Debugger();
   2611 #endif
   2612 		free_a_table(pmap->pm_a_tmgr, true);
   2613 		pmap->pm_a_tmgr = NULL;
   2614 		pmap->pm_a_phys = kernAphys;
   2615 	}
   2616 }
   2617 
   2618 /* pmap_reference			INTERFACE
   2619  **
   2620  * Increment the reference count of a pmap.
   2621  */
   2622 void
   2623 pmap_reference(pmap_t pmap)
   2624 {
   2625 	pmap_lock(pmap);
   2626 	pmap_add_ref(pmap);
   2627 	pmap_unlock(pmap);
   2628 }
   2629 
   2630 /* pmap_dereference			INTERNAL
   2631  **
   2632  * Decrease the reference count on the given pmap
   2633  * by one and return the current count.
   2634  */
   2635 static INLINE int
   2636 pmap_dereference(pmap_t pmap)
   2637 {
   2638 	int rtn;
   2639 
   2640 	pmap_lock(pmap);
   2641 	rtn = pmap_del_ref(pmap);
   2642 	pmap_unlock(pmap);
   2643 
   2644 	return rtn;
   2645 }
   2646 
   2647 /* pmap_destroy			INTERFACE
   2648  **
   2649  * Decrement a pmap's reference count and delete
   2650  * the pmap if it becomes zero.  Will be called
   2651  * only after all mappings have been removed.
   2652  */
   2653 void
   2654 pmap_destroy(pmap_t pmap)
   2655 {
   2656 
   2657 	if (pmap_dereference(pmap) == 0) {
   2658 		pmap_release(pmap);
   2659 		pool_put(&pmap_pmap_pool, pmap);
   2660 	}
   2661 }
   2662 
   2663 /* pmap_is_referenced			INTERFACE
   2664  **
   2665  * Determine if the given physical page has been
   2666  * referenced (read from [or written to.])
   2667  */
   2668 bool
   2669 pmap_is_referenced(struct vm_page *pg)
   2670 {
   2671 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
   2672 	pv_t      *pv;
   2673 	int       idx;
   2674 
   2675 	/*
   2676 	 * Check the flags on the pv head.  If they are set,
   2677 	 * return immediately.  Otherwise a search must be done.
   2678 	 */
   2679 
   2680 	pv = pa2pv(pa);
   2681 	if (pv->pv_flags & PV_FLAGS_USED)
   2682 		return true;
   2683 
   2684 	/*
   2685 	 * Search through all pv elements pointing
   2686 	 * to this page and query their reference bits
   2687 	 */
   2688 
   2689 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2690 		if (MMU_PTE_USED(kernCbase[idx])) {
   2691 			return true;
   2692 		}
   2693 	}
   2694 	return false;
   2695 }
   2696 
   2697 /* pmap_is_modified			INTERFACE
   2698  **
   2699  * Determine if the given physical page has been
   2700  * modified (written to.)
   2701  */
   2702 bool
   2703 pmap_is_modified(struct vm_page *pg)
   2704 {
   2705 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
   2706 	pv_t      *pv;
   2707 	int       idx;
   2708 
   2709 	/* see comments in pmap_is_referenced() */
   2710 	pv = pa2pv(pa);
   2711 	if (pv->pv_flags & PV_FLAGS_MDFY)
   2712 		return true;
   2713 
   2714 	for (idx = pv->pv_idx;
   2715 		 idx != PVE_EOL;
   2716 		 idx = pvebase[idx].pve_next) {
   2717 
   2718 		if (MMU_PTE_MODIFIED(kernCbase[idx])) {
   2719 			return true;
   2720 		}
   2721 	}
   2722 
   2723 	return false;
   2724 }
   2725 
   2726 /* pmap_page_protect			INTERFACE
   2727  **
   2728  * Applies the given protection to all mappings to the given
   2729  * physical page.
   2730  */
   2731 void
   2732 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
   2733 {
   2734 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
   2735 	pv_t      *pv;
   2736 	int       idx;
   2737 	vaddr_t va;
   2738 	struct mmu_short_pte_struct *pte;
   2739 	c_tmgr_t  *c_tbl;
   2740 	pmap_t    pmap, curpmap;
   2741 
   2742 	curpmap = current_pmap();
   2743 	pv = pa2pv(pa);
   2744 
   2745 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2746 		pte = &kernCbase[idx];
   2747 		switch (prot) {
   2748 			case VM_PROT_ALL:
   2749 				/* do nothing */
   2750 				break;
   2751 			case VM_PROT_EXECUTE:
   2752 			case VM_PROT_READ:
   2753 			case VM_PROT_READ|VM_PROT_EXECUTE:
   2754 				/*
   2755 				 * Determine the virtual address mapped by
   2756 				 * the PTE and flush ATC entries if necessary.
   2757 				 */
   2758 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2759 				pte->attr.raw |= MMU_SHORT_PTE_WP;
   2760 				if (pmap == curpmap || pmap == pmap_kernel())
   2761 					TBIS(va);
   2762 				break;
   2763 			case VM_PROT_NONE:
   2764 				/* Save the mod/ref bits. */
   2765 				pv->pv_flags |= pte->attr.raw;
   2766 				/* Invalidate the PTE. */
   2767 				pte->attr.raw = MMU_DT_INVALID;
   2768 
   2769 				/*
   2770 				 * Update table counts.  And flush ATC entries
   2771 				 * if necessary.
   2772 				 */
   2773 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2774 
   2775 				/*
   2776 				 * If the PTE belongs to the kernel map,
   2777 				 * be sure to flush the page it maps.
   2778 				 */
   2779 				if (pmap == pmap_kernel()) {
   2780 					TBIS(va);
   2781 				} else {
   2782 					/*
   2783 					 * The PTE belongs to a user map.
   2784 					 * update the entry count in the C
   2785 					 * table to which it belongs and flush
   2786 					 * the ATC if the mapping belongs to
   2787 					 * the current pmap.
   2788 					 */
   2789 					c_tbl->ct_ecnt--;
   2790 					if (pmap == curpmap)
   2791 						TBIS(va);
   2792 				}
   2793 				break;
   2794 			default:
   2795 				break;
   2796 		}
   2797 	}
   2798 
   2799 	/*
   2800 	 * If the protection code indicates that all mappings to the page
   2801 	 * be removed, truncate the PV list to zero entries.
   2802 	 */
   2803 	if (prot == VM_PROT_NONE)
   2804 		pv->pv_idx = PVE_EOL;
   2805 }
   2806 
   2807 /* pmap_get_pteinfo		INTERNAL
   2808  **
   2809  * Called internally to find the pmap and virtual address within that
   2810  * map to which the pte at the given index maps.  Also includes the PTE's C
   2811  * table manager.
   2812  *
   2813  * Returns the pmap in the argument provided, and the virtual address
   2814  * by return value.
   2815  */
   2816 vaddr_t
   2817 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
   2818 {
   2819 	vaddr_t     va = 0;
   2820 
   2821 	/*
   2822 	 * Determine if the PTE is a kernel PTE or a user PTE.
   2823 	 */
   2824 	if (idx >= NUM_KERN_PTES) {
   2825 		/*
   2826 		 * The PTE belongs to a user mapping.
   2827 		 */
   2828 		/* XXX: Would like an inline for this to validate idx... */
   2829 		*tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
   2830 
   2831 		*pmap = (*tbl)->ct_pmap;
   2832 		/*
   2833 		 * To find the va to which the PTE maps, we first take
   2834 		 * the table's base virtual address mapping which is stored
   2835 		 * in ct_va.  We then increment this address by a page for
   2836 		 * every slot skipped until we reach the PTE.
   2837 		 */
   2838 		va = (*tbl)->ct_va;
   2839 		va += m68k_ptob(idx % MMU_C_TBL_SIZE);
   2840 	} else {
   2841 		/*
   2842 		 * The PTE belongs to the kernel map.
   2843 		 */
   2844 		*pmap = pmap_kernel();
   2845 
   2846 		va = m68k_ptob(idx);
   2847 		va += KERNBASE;
   2848 	}
   2849 
   2850 	return va;
   2851 }
   2852 
   2853 /* pmap_clear_modify			INTERFACE
   2854  **
   2855  * Clear the modification bit on the page at the specified
   2856  * physical address.
   2857  *
   2858  */
   2859 bool
   2860 pmap_clear_modify(struct vm_page *pg)
   2861 {
   2862 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2863 	bool rv;
   2864 
   2865 	rv = pmap_is_modified(pg);
   2866 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
   2867 	return rv;
   2868 }
   2869 
   2870 /* pmap_clear_reference			INTERFACE
   2871  **
   2872  * Clear the referenced bit on the page at the specified
   2873  * physical address.
   2874  */
   2875 bool
   2876 pmap_clear_reference(struct vm_page *pg)
   2877 {
   2878 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
   2879 	bool rv;
   2880 
   2881 	rv = pmap_is_referenced(pg);
   2882 	pmap_clear_pv(pa, PV_FLAGS_USED);
   2883 	return rv;
   2884 }
   2885 
   2886 /* pmap_clear_pv			INTERNAL
   2887  **
   2888  * Clears the specified flag from the specified physical address.
   2889  * (Used by pmap_clear_modify() and pmap_clear_reference().)
   2890  *
   2891  * Flag is one of:
   2892  *   PV_FLAGS_MDFY - Page modified bit.
   2893  *   PV_FLAGS_USED - Page used (referenced) bit.
   2894  *
   2895  * This routine must not only clear the flag on the pv list
   2896  * head.  It must also clear the bit on every pte in the pv
   2897  * list associated with the address.
   2898  */
   2899 void
   2900 pmap_clear_pv(paddr_t pa, int flag)
   2901 {
   2902 	pv_t      *pv;
   2903 	int       idx;
   2904 	vaddr_t   va;
   2905 	pmap_t          pmap;
   2906 	mmu_short_pte_t *pte;
   2907 	c_tmgr_t        *c_tbl;
   2908 
   2909 	pv = pa2pv(pa);
   2910 	pv->pv_flags &= ~(flag);
   2911 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
   2912 		pte = &kernCbase[idx];
   2913 		pte->attr.raw &= ~(flag);
   2914 
   2915 		/*
   2916 		 * The MC68030 MMU will not set the modified or
   2917 		 * referenced bits on any MMU tables for which it has
   2918 		 * a cached descriptor with its modify bit set.  To insure
   2919 		 * that it will modify these bits on the PTE during the next
   2920 		 * time it is written to or read from, we must flush it from
   2921 		 * the ATC.
   2922 		 *
   2923 		 * Ordinarily it is only necessary to flush the descriptor
   2924 		 * if it is used in the current address space.  But since I
   2925 		 * am not sure that there will always be a notion of
   2926 		 * 'the current address space' when this function is called,
   2927 		 * I will skip the test and always flush the address.  It
   2928 		 * does no harm.
   2929 		 */
   2930 
   2931 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   2932 		TBIS(va);
   2933 	}
   2934 }
   2935 
   2936 /* pmap_extract_kernel		INTERNAL
   2937  **
   2938  * Extract a translation from the kernel address space.
   2939  */
   2940 static INLINE bool
   2941 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
   2942 {
   2943 	mmu_short_pte_t *pte;
   2944 
   2945 	pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE)];
   2946 	if (!MMU_VALID_DT(*pte))
   2947 		return false;
   2948 	if (pap != NULL)
   2949 		*pap = MMU_PTE_PA(*pte);
   2950 	return true;
   2951 }
   2952 
   2953 /* pmap_extract			INTERFACE
   2954  **
   2955  * Return the physical address mapped by the virtual address
   2956  * in the specified pmap.
   2957  *
   2958  * Note: this function should also apply an exclusive lock
   2959  * on the pmap system during its duration.
   2960  */
   2961 bool
   2962 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
   2963 {
   2964 	int a_idx, b_idx, pte_idx;
   2965 	a_tmgr_t	*a_tbl;
   2966 	b_tmgr_t	*b_tbl;
   2967 	c_tmgr_t	*c_tbl;
   2968 	mmu_short_pte_t	*c_pte;
   2969 
   2970 	if (pmap == pmap_kernel())
   2971 		return pmap_extract_kernel(va, pap);
   2972 
   2973 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
   2974 		&c_pte, &a_idx, &b_idx, &pte_idx) == false)
   2975 		return false;
   2976 
   2977 	if (!MMU_VALID_DT(*c_pte))
   2978 		return false;
   2979 
   2980 	if (pap != NULL)
   2981 		*pap = MMU_PTE_PA(*c_pte);
   2982 	return true;
   2983 }
   2984 
   2985 /* pmap_remove_kernel		INTERNAL
   2986  **
   2987  * Remove the mapping of a range of virtual addresses from the kernel map.
   2988  * The arguments are already page-aligned.
   2989  */
   2990 static INLINE void
   2991 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
   2992 {
   2993 	int idx, eidx;
   2994 
   2995 #ifdef	PMAP_DEBUG
   2996 	if ((sva & PGOFSET) || (eva & PGOFSET))
   2997 		panic("pmap_remove_kernel: alignment");
   2998 #endif
   2999 
   3000 	idx  = m68k_btop(sva - KERNBASE);
   3001 	eidx = m68k_btop(eva - KERNBASE);
   3002 
   3003 	while (idx < eidx) {
   3004 		pmap_remove_pte(&kernCbase[idx++]);
   3005 		TBIS(sva);
   3006 		sva += PAGE_SIZE;
   3007 	}
   3008 }
   3009 
   3010 /* pmap_remove			INTERFACE
   3011  **
   3012  * Remove the mapping of a range of virtual addresses from the given pmap.
   3013  *
   3014  */
   3015 void
   3016 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
   3017 {
   3018 
   3019 	if (pmap == pmap_kernel()) {
   3020 		pmap_remove_kernel(sva, eva);
   3021 		return;
   3022 	}
   3023 
   3024 	/*
   3025 	 * If the pmap doesn't have an A table of its own, it has no mappings
   3026 	 * that can be removed.
   3027 	 */
   3028 	if (pmap->pm_a_tmgr == NULL)
   3029 		return;
   3030 
   3031 	/*
   3032 	 * Remove the specified range from the pmap.  If the function
   3033 	 * returns true, the operation removed all the valid mappings
   3034 	 * in the pmap and freed its A table.  If this happened to the
   3035 	 * currently loaded pmap, the MMU root pointer must be reloaded
   3036 	 * with the default 'kernel' map.
   3037 	 */
   3038 	if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
   3039 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
   3040 			kernel_crp.rp_addr = kernAphys;
   3041 			loadcrp(&kernel_crp);
   3042 			/* will do TLB flush below */
   3043 		}
   3044 		pmap->pm_a_tmgr = NULL;
   3045 		pmap->pm_a_phys = kernAphys;
   3046 	}
   3047 
   3048 	/*
   3049 	 * If we just modified the current address space,
   3050 	 * make sure to flush the MMU cache.
   3051 	 *
   3052 	 * XXX - this could be an unecessarily large flush.
   3053 	 * XXX - Could decide, based on the size of the VA range
   3054 	 * to be removed, whether to flush "by pages" or "all".
   3055 	 */
   3056 	if (pmap == current_pmap())
   3057 		TBIAU();
   3058 }
   3059 
   3060 /* pmap_remove_a			INTERNAL
   3061  **
   3062  * This is function number one in a set of three that removes a range
   3063  * of memory in the most efficient manner by removing the highest possible
   3064  * tables from the memory space.  This particular function attempts to remove
   3065  * as many B tables as it can, delegating the remaining fragmented ranges to
   3066  * pmap_remove_b().
   3067  *
   3068  * If the removal operation results in an empty A table, the function returns
   3069  * true.
   3070  *
   3071  * It's ugly but will do for now.
   3072  */
   3073 bool
   3074 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
   3075 {
   3076 	bool empty;
   3077 	int idx;
   3078 	vaddr_t nstart, nend;
   3079 	b_tmgr_t *b_tbl;
   3080 	mmu_long_dte_t  *a_dte;
   3081 	mmu_short_dte_t *b_dte;
   3082 	uint8_t at_wired, bt_wired;
   3083 
   3084 	/*
   3085 	 * The following code works with what I call a 'granularity
   3086 	 * reduction algorithim'.  A range of addresses will always have
   3087 	 * the following properties, which are classified according to
   3088 	 * how the range relates to the size of the current granularity
   3089 	 * - an A table entry:
   3090 	 *
   3091 	 *            1 2       3 4
   3092 	 * -+---+---+---+---+---+---+---+-
   3093 	 * -+---+---+---+---+---+---+---+-
   3094 	 *
   3095 	 * A range will always start on a granularity boundary, illustrated
   3096 	 * by '+' signs in the table above, or it will start at some point
   3097 	 * inbetween a granularity boundary, as illustrated by point 1.
   3098 	 * The first step in removing a range of addresses is to remove the
   3099 	 * range between 1 and 2, the nearest granularity boundary.  This
   3100 	 * job is handled by the section of code governed by the
   3101 	 * 'if (start < nstart)' statement.
   3102 	 *
   3103 	 * A range will always encompass zero or more intergral granules,
   3104 	 * illustrated by points 2 and 3.  Integral granules are easy to
   3105 	 * remove.  The removal of these granules is the second step, and
   3106 	 * is handled by the code block 'if (nstart < nend)'.
   3107 	 *
   3108 	 * Lastly, a range will always end on a granularity boundary,
   3109 	 * ill. by point 3, or it will fall just beyond one, ill. by point
   3110 	 * 4.  The last step involves removing this range and is handled by
   3111 	 * the code block 'if (nend < end)'.
   3112 	 */
   3113 	nstart = MMU_ROUND_UP_A(sva);
   3114 	nend = MMU_ROUND_A(eva);
   3115 
   3116 	at_wired = a_tbl->at_wcnt;
   3117 
   3118 	if (sva < nstart) {
   3119 		/*
   3120 		 * This block is executed if the range starts between
   3121 		 * a granularity boundary.
   3122 		 *
   3123 		 * First find the DTE which is responsible for mapping
   3124 		 * the start of the range.
   3125 		 */
   3126 		idx = MMU_TIA(sva);
   3127 		a_dte = &a_tbl->at_dtbl[idx];
   3128 
   3129 		/*
   3130 		 * If the DTE is valid then delegate the removal of the sub
   3131 		 * range to pmap_remove_b(), which can remove addresses at
   3132 		 * a finer granularity.
   3133 		 */
   3134 		if (MMU_VALID_DT(*a_dte)) {
   3135 			b_dte = mmu_ptov(a_dte->addr.raw);
   3136 			b_tbl = mmuB2tmgr(b_dte);
   3137 			bt_wired = b_tbl->bt_wcnt;
   3138 
   3139 			/*
   3140 			 * The sub range to be removed starts at the start
   3141 			 * of the full range we were asked to remove, and ends
   3142 			 * at the greater of:
   3143 			 * 1. The end of the full range, -or-
   3144 			 * 2. The end of the full range, rounded down to the
   3145 			 *    nearest granularity boundary.
   3146 			 */
   3147 			if (eva < nstart)
   3148 				empty = pmap_remove_b(b_tbl, sva, eva);
   3149 			else
   3150 				empty = pmap_remove_b(b_tbl, sva, nstart);
   3151 
   3152 			/*
   3153 			 * If the child table no longer has wired entries,
   3154 			 * decrement wired entry count.
   3155 			 */
   3156 			if (bt_wired && b_tbl->bt_wcnt == 0)
   3157 				a_tbl->at_wcnt--;
   3158 
   3159 			/*
   3160 			 * If the removal resulted in an empty B table,
   3161 			 * invalidate the DTE that points to it and decrement
   3162 			 * the valid entry count of the A table.
   3163 			 */
   3164 			if (empty) {
   3165 				a_dte->attr.raw = MMU_DT_INVALID;
   3166 				a_tbl->at_ecnt--;
   3167 			}
   3168 		}
   3169 		/*
   3170 		 * If the DTE is invalid, the address range is already non-
   3171 		 * existent and can simply be skipped.
   3172 		 */
   3173 	}
   3174 	if (nstart < nend) {
   3175 		/*
   3176 		 * This block is executed if the range spans a whole number
   3177 		 * multiple of granules (A table entries.)
   3178 		 *
   3179 		 * First find the DTE which is responsible for mapping
   3180 		 * the start of the first granule involved.
   3181 		 */
   3182 		idx = MMU_TIA(nstart);
   3183 		a_dte = &a_tbl->at_dtbl[idx];
   3184 
   3185 		/*
   3186 		 * Remove entire sub-granules (B tables) one at a time,
   3187 		 * until reaching the end of the range.
   3188 		 */
   3189 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
   3190 			if (MMU_VALID_DT(*a_dte)) {
   3191 				/*
   3192 				 * Find the B table manager for the
   3193 				 * entry and free it.
   3194 				 */
   3195 				b_dte = mmu_ptov(a_dte->addr.raw);
   3196 				b_tbl = mmuB2tmgr(b_dte);
   3197 				bt_wired = b_tbl->bt_wcnt;
   3198 
   3199 				free_b_table(b_tbl, true);
   3200 
   3201 				/*
   3202 				 * All child entries has been removed.
   3203 				 * If there were any wired entries in it,
   3204 				 * decrement wired entry count.
   3205 				 */
   3206 				if (bt_wired)
   3207 					a_tbl->at_wcnt--;
   3208 
   3209 				/*
   3210 				 * Invalidate the DTE that points to the
   3211 				 * B table and decrement the valid entry
   3212 				 * count of the A table.
   3213 				 */
   3214 				a_dte->attr.raw = MMU_DT_INVALID;
   3215 				a_tbl->at_ecnt--;
   3216 			}
   3217 	}
   3218 	if (nend < eva) {
   3219 		/*
   3220 		 * This block is executed if the range ends beyond a
   3221 		 * granularity boundary.
   3222 		 *
   3223 		 * First find the DTE which is responsible for mapping
   3224 		 * the start of the nearest (rounded down) granularity
   3225 		 * boundary.
   3226 		 */
   3227 		idx = MMU_TIA(nend);
   3228 		a_dte = &a_tbl->at_dtbl[idx];
   3229 
   3230 		/*
   3231 		 * If the DTE is valid then delegate the removal of the sub
   3232 		 * range to pmap_remove_b(), which can remove addresses at
   3233 		 * a finer granularity.
   3234 		 */
   3235 		if (MMU_VALID_DT(*a_dte)) {
   3236 			/*
   3237 			 * Find the B table manager for the entry
   3238 			 * and hand it to pmap_remove_b() along with
   3239 			 * the sub range.
   3240 			 */
   3241 			b_dte = mmu_ptov(a_dte->addr.raw);
   3242 			b_tbl = mmuB2tmgr(b_dte);
   3243 			bt_wired = b_tbl->bt_wcnt;
   3244 
   3245 			empty = pmap_remove_b(b_tbl, nend, eva);
   3246 
   3247 			/*
   3248 			 * If the child table no longer has wired entries,
   3249 			 * decrement wired entry count.
   3250 			 */
   3251 			if (bt_wired && b_tbl->bt_wcnt == 0)
   3252 				a_tbl->at_wcnt--;
   3253 			/*
   3254 			 * If the removal resulted in an empty B table,
   3255 			 * invalidate the DTE that points to it and decrement
   3256 			 * the valid entry count of the A table.
   3257 			 */
   3258 			if (empty) {
   3259 				a_dte->attr.raw = MMU_DT_INVALID;
   3260 				a_tbl->at_ecnt--;
   3261 			}
   3262 		}
   3263 	}
   3264 
   3265 	/*
   3266 	 * If there are no more entries in the A table, release it
   3267 	 * back to the available pool and return true.
   3268 	 */
   3269 	if (a_tbl->at_ecnt == 0) {
   3270 		KASSERT(a_tbl->at_wcnt == 0);
   3271 		a_tbl->at_parent = NULL;
   3272 		if (!at_wired)
   3273 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
   3274 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
   3275 		empty = true;
   3276 	} else {
   3277 		/*
   3278 		 * If the table doesn't have wired entries any longer
   3279 		 * but still has unwired entries, put it back into
   3280 		 * the available queue.
   3281 		 */
   3282 		if (at_wired && a_tbl->at_wcnt == 0)
   3283 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
   3284 		empty = false;
   3285 	}
   3286 
   3287 	return empty;
   3288 }
   3289 
   3290 /* pmap_remove_b			INTERNAL
   3291  **
   3292  * Remove a range of addresses from an address space, trying to remove entire
   3293  * C tables if possible.
   3294  *
   3295  * If the operation results in an empty B table, the function returns true.
   3296  */
   3297 bool
   3298 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
   3299 {
   3300 	bool empty;
   3301 	int idx;
   3302 	vaddr_t nstart, nend, rstart;
   3303 	c_tmgr_t *c_tbl;
   3304 	mmu_short_dte_t  *b_dte;
   3305 	mmu_short_pte_t  *c_dte;
   3306 	uint8_t bt_wired, ct_wired;
   3307 
   3308 	nstart = MMU_ROUND_UP_B(sva);
   3309 	nend = MMU_ROUND_B(eva);
   3310 
   3311 	bt_wired = b_tbl->bt_wcnt;
   3312 
   3313 	if (sva < nstart) {
   3314 		idx = MMU_TIB(sva);
   3315 		b_dte = &b_tbl->bt_dtbl[idx];
   3316 		if (MMU_VALID_DT(*b_dte)) {
   3317 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3318 			c_tbl = mmuC2tmgr(c_dte);
   3319 			ct_wired = c_tbl->ct_wcnt;
   3320 
   3321 			if (eva < nstart)
   3322 				empty = pmap_remove_c(c_tbl, sva, eva);
   3323 			else
   3324 				empty = pmap_remove_c(c_tbl, sva, nstart);
   3325 
   3326 			/*
   3327 			 * If the child table no longer has wired entries,
   3328 			 * decrement wired entry count.
   3329 			 */
   3330 			if (ct_wired && c_tbl->ct_wcnt == 0)
   3331 				b_tbl->bt_wcnt--;
   3332 
   3333 			if (empty) {
   3334 				b_dte->attr.raw = MMU_DT_INVALID;
   3335 				b_tbl->bt_ecnt--;
   3336 			}
   3337 		}
   3338 	}
   3339 	if (nstart < nend) {
   3340 		idx = MMU_TIB(nstart);
   3341 		b_dte = &b_tbl->bt_dtbl[idx];
   3342 		rstart = nstart;
   3343 		while (rstart < nend) {
   3344 			if (MMU_VALID_DT(*b_dte)) {
   3345 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3346 				c_tbl = mmuC2tmgr(c_dte);
   3347 				ct_wired = c_tbl->ct_wcnt;
   3348 
   3349 				free_c_table(c_tbl, true);
   3350 
   3351 				/*
   3352 				 * All child entries has been removed.
   3353 				 * If there were any wired entries in it,
   3354 				 * decrement wired entry count.
   3355 				 */
   3356 				if (ct_wired)
   3357 					b_tbl->bt_wcnt--;
   3358 
   3359 				b_dte->attr.raw = MMU_DT_INVALID;
   3360 				b_tbl->bt_ecnt--;
   3361 			}
   3362 			b_dte++;
   3363 			rstart += MMU_TIB_RANGE;
   3364 		}
   3365 	}
   3366 	if (nend < eva) {
   3367 		idx = MMU_TIB(nend);
   3368 		b_dte = &b_tbl->bt_dtbl[idx];
   3369 		if (MMU_VALID_DT(*b_dte)) {
   3370 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
   3371 			c_tbl = mmuC2tmgr(c_dte);
   3372 			ct_wired = c_tbl->ct_wcnt;
   3373 			empty = pmap_remove_c(c_tbl, nend, eva);
   3374 
   3375 			/*
   3376 			 * If the child table no longer has wired entries,
   3377 			 * decrement wired entry count.
   3378 			 */
   3379 			if (ct_wired && c_tbl->ct_wcnt == 0)
   3380 				b_tbl->bt_wcnt--;
   3381 
   3382 			if (empty) {
   3383 				b_dte->attr.raw = MMU_DT_INVALID;
   3384 				b_tbl->bt_ecnt--;
   3385 			}
   3386 		}
   3387 	}
   3388 
   3389 	if (b_tbl->bt_ecnt == 0) {
   3390 		KASSERT(b_tbl->bt_wcnt == 0);
   3391 		b_tbl->bt_parent = NULL;
   3392 		if (!bt_wired)
   3393 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
   3394 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
   3395 		empty = true;
   3396 	} else {
   3397 		/*
   3398 		 * If the table doesn't have wired entries any longer
   3399 		 * but still has unwired entries, put it back into
   3400 		 * the available queue.
   3401 		 */
   3402 		if (bt_wired && b_tbl->bt_wcnt == 0)
   3403 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
   3404 
   3405 		empty = false;
   3406 	}
   3407 
   3408 	return empty;
   3409 }
   3410 
   3411 /* pmap_remove_c			INTERNAL
   3412  **
   3413  * Remove a range of addresses from the given C table.
   3414  */
   3415 bool
   3416 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
   3417 {
   3418 	bool empty;
   3419 	int idx;
   3420 	mmu_short_pte_t *c_pte;
   3421 	uint8_t ct_wired;
   3422 
   3423 	ct_wired = c_tbl->ct_wcnt;
   3424 
   3425 	idx = MMU_TIC(sva);
   3426 	c_pte = &c_tbl->ct_dtbl[idx];
   3427 	for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
   3428 		if (MMU_VALID_DT(*c_pte)) {
   3429 			if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
   3430 				c_tbl->ct_wcnt--;
   3431 			pmap_remove_pte(c_pte);
   3432 			c_tbl->ct_ecnt--;
   3433 		}
   3434 	}
   3435 
   3436 	if (c_tbl->ct_ecnt == 0) {
   3437 		KASSERT(c_tbl->ct_wcnt == 0);
   3438 		c_tbl->ct_parent = NULL;
   3439 		if (!ct_wired)
   3440 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
   3441 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
   3442 		empty = true;
   3443 	} else {
   3444 		/*
   3445 		 * If the table doesn't have wired entries any longer
   3446 		 * but still has unwired entries, put it back into
   3447 		 * the available queue.
   3448 		 */
   3449 		if (ct_wired && c_tbl->ct_wcnt == 0)
   3450 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
   3451 		empty = false;
   3452 	}
   3453 
   3454 	return empty;
   3455 }
   3456 
   3457 /* pmap_bootstrap_alloc			INTERNAL
   3458  **
   3459  * Used internally for memory allocation at startup when malloc is not
   3460  * available.  This code will fail once it crosses the first memory
   3461  * bank boundary on the 3/80.  Hopefully by then however, the VM system
   3462  * will be in charge of allocation.
   3463  */
   3464 void *
   3465 pmap_bootstrap_alloc(int size)
   3466 {
   3467 	void *rtn;
   3468 
   3469 #ifdef	PMAP_DEBUG
   3470 	if (bootstrap_alloc_enabled == false) {
   3471 		mon_printf("pmap_bootstrap_alloc: disabled\n");
   3472 		sunmon_abort();
   3473 	}
   3474 #endif
   3475 
   3476 	rtn = (void *) virtual_avail;
   3477 	virtual_avail += size;
   3478 
   3479 #ifdef	PMAP_DEBUG
   3480 	if (virtual_avail > virtual_contig_end) {
   3481 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
   3482 		sunmon_abort();
   3483 	}
   3484 #endif
   3485 
   3486 	return rtn;
   3487 }
   3488 
   3489 /* pmap_bootstap_aalign			INTERNAL
   3490  **
   3491  * Used to insure that the next call to pmap_bootstrap_alloc() will
   3492  * return a chunk of memory aligned to the specified size.
   3493  *
   3494  * Note: This function will only support alignment sizes that are powers
   3495  * of two.
   3496  */
   3497 void
   3498 pmap_bootstrap_aalign(int size)
   3499 {
   3500 	int off;
   3501 
   3502 	off = virtual_avail & (size - 1);
   3503 	if (off) {
   3504 		(void)pmap_bootstrap_alloc(size - off);
   3505 	}
   3506 }
   3507 
   3508 /* pmap_pa_exists
   3509  **
   3510  * Used by the /dev/mem driver to see if a given PA is memory
   3511  * that can be mapped.  (The PA is not in a hole.)
   3512  */
   3513 int
   3514 pmap_pa_exists(paddr_t pa)
   3515 {
   3516 	int i;
   3517 
   3518 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
   3519 		if ((pa >= avail_mem[i].pmem_start) &&
   3520 			(pa <  avail_mem[i].pmem_end))
   3521 			return 1;
   3522 		if (avail_mem[i].pmem_next == NULL)
   3523 			break;
   3524 	}
   3525 	return 0;
   3526 }
   3527 
   3528 /* Called only from locore.s and pmap.c */
   3529 void	_pmap_switch(pmap_t pmap);
   3530 
   3531 /*
   3532  * _pmap_switch			INTERNAL
   3533  *
   3534  * This is called by locore.s:cpu_switch() when it is
   3535  * switching to a new process.  Load new translations.
   3536  * Note: done in-line by locore.s unless PMAP_DEBUG
   3537  *
   3538  * Note that we do NOT allocate a context here, but
   3539  * share the "kernel only" context until we really
   3540  * need our own context for user-space mappings in
   3541  * pmap_enter_user().  [ s/context/mmu A table/ ]
   3542  */
   3543 void
   3544 _pmap_switch(pmap_t pmap)
   3545 {
   3546 	u_long rootpa;
   3547 
   3548 	/*
   3549 	 * Only do reload/flush if we have to.
   3550 	 * Note that if the old and new process
   3551 	 * were BOTH using the "null" context,
   3552 	 * then this will NOT flush the TLB.
   3553 	 */
   3554 	rootpa = pmap->pm_a_phys;
   3555 	if (kernel_crp.rp_addr != rootpa) {
   3556 		DPRINT(("pmap_activate(%p)\n", pmap));
   3557 		kernel_crp.rp_addr = rootpa;
   3558 		loadcrp(&kernel_crp);
   3559 		TBIAU();
   3560 	}
   3561 }
   3562 
   3563 /*
   3564  * Exported version of pmap_activate().  This is called from the
   3565  * machine-independent VM code when a process is given a new pmap.
   3566  * If (p == curlwp) do like cpu_switch would do; otherwise just
   3567  * take this as notification that the process has a new pmap.
   3568  */
   3569 void
   3570 pmap_activate(struct lwp *l)
   3571 {
   3572 
   3573 	if (l->l_proc == curproc) {
   3574 		_pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
   3575 	}
   3576 }
   3577 
   3578 /*
   3579  * pmap_deactivate			INTERFACE
   3580  **
   3581  * This is called to deactivate the specified process's address space.
   3582  */
   3583 void
   3584 pmap_deactivate(struct lwp *l)
   3585 {
   3586 
   3587 	/* Nothing to do. */
   3588 }
   3589 
   3590 /*
   3591  * Fill in the sun3x-specific part of the kernel core header
   3592  * for dumpsys().  (See machdep.c for the rest.)
   3593  */
   3594 void
   3595 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
   3596 {
   3597 	u_long spa, len;
   3598 	int i;
   3599 
   3600 	sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
   3601 	sh->pg_valid = MMU_DT_PAGE;
   3602 	sh->contig_end = virtual_contig_end;
   3603 	sh->kernCbase = (u_long)kernCbase;
   3604 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
   3605 		spa = avail_mem[i].pmem_start;
   3606 		spa = m68k_trunc_page(spa);
   3607 		len = avail_mem[i].pmem_end - spa;
   3608 		len = m68k_round_page(len);
   3609 		sh->ram_segs[i].start = spa;
   3610 		sh->ram_segs[i].size  = len;
   3611 	}
   3612 }
   3613 
   3614 
   3615 /* pmap_virtual_space			INTERFACE
   3616  **
   3617  * Return the current available range of virtual addresses in the
   3618  * arguuments provided.  Only really called once.
   3619  */
   3620 void
   3621 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
   3622 {
   3623 
   3624 	*vstart = virtual_avail;
   3625 	*vend = virtual_end;
   3626 }
   3627 
   3628 /*
   3629  * Provide memory to the VM system.
   3630  *
   3631  * Assume avail_start is always in the
   3632  * first segment as pmap_bootstrap does.
   3633  */
   3634 static void
   3635 pmap_page_upload(void)
   3636 {
   3637 	paddr_t	a, b;	/* memory range */
   3638 	int i;
   3639 
   3640 	/* Supply the memory in segments. */
   3641 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
   3642 		a = atop(avail_mem[i].pmem_start);
   3643 		b = atop(avail_mem[i].pmem_end);
   3644 		if (i == 0)
   3645 			a = atop(avail_start);
   3646 		if (avail_mem[i].pmem_end > avail_end)
   3647 			b = atop(avail_end);
   3648 
   3649 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
   3650 
   3651 		if (avail_mem[i].pmem_next == NULL)
   3652 			break;
   3653 	}
   3654 }
   3655 
   3656 /* pmap_count			INTERFACE
   3657  **
   3658  * Return the number of resident (valid) pages in the given pmap.
   3659  *
   3660  * Note:  If this function is handed the kernel map, it will report
   3661  * that it has no mappings.  Hopefully the VM system won't ask for kernel
   3662  * map statistics.
   3663  */
   3664 segsz_t
   3665 pmap_count(pmap_t pmap, int type)
   3666 {
   3667 	u_int     count;
   3668 	int       a_idx, b_idx;
   3669 	a_tmgr_t *a_tbl;
   3670 	b_tmgr_t *b_tbl;
   3671 	c_tmgr_t *c_tbl;
   3672 
   3673 	/*
   3674 	 * If the pmap does not have its own A table manager, it has no
   3675 	 * valid entires.
   3676 	 */
   3677 	if (pmap->pm_a_tmgr == NULL)
   3678 		return 0;
   3679 
   3680 	a_tbl = pmap->pm_a_tmgr;
   3681 
   3682 	count = 0;
   3683 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
   3684 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
   3685 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
   3686 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
   3687 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
   3688 	                c_tbl = mmuC2tmgr(
   3689 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
   3690 	                if (type == 0)
   3691 	                    /*
   3692 	                     * A resident entry count has been requested.
   3693 	                     */
   3694 	                    count += c_tbl->ct_ecnt;
   3695 	                else
   3696 	                    /*
   3697 	                     * A wired entry count has been requested.
   3698 	                     */
   3699 	                    count += c_tbl->ct_wcnt;
   3700 	            }
   3701 	        }
   3702 	    }
   3703 	}
   3704 
   3705 	return count;
   3706 }
   3707 
   3708 /************************ SUN3 COMPATIBILITY ROUTINES ********************
   3709  * The following routines are only used by DDB for tricky kernel text    *
   3710  * text operations in db_memrw.c.  They are provided for sun3            *
   3711  * compatibility.                                                        *
   3712  *************************************************************************/
   3713 /* get_pte			INTERNAL
   3714  **
   3715  * Return the page descriptor the describes the kernel mapping
   3716  * of the given virtual address.
   3717  */
   3718 extern u_long ptest_addr(u_long);	/* XXX: locore.s */
   3719 u_int
   3720 get_pte(vaddr_t va)
   3721 {
   3722 	u_long pte_pa;
   3723 	mmu_short_pte_t *pte;
   3724 
   3725 	/* Get the physical address of the PTE */
   3726 	pte_pa = ptest_addr(va & ~PGOFSET);
   3727 
   3728 	/* Convert to a virtual address... */
   3729 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
   3730 
   3731 	/* Make sure it is in our level-C tables... */
   3732 	if ((pte < kernCbase) ||
   3733 		(pte >= &mmuCbase[NUM_USER_PTES]))
   3734 		return 0;
   3735 
   3736 	/* ... and just return its contents. */
   3737 	return (pte->attr.raw);
   3738 }
   3739 
   3740 
   3741 /* set_pte			INTERNAL
   3742  **
   3743  * Set the page descriptor that describes the kernel mapping
   3744  * of the given virtual address.
   3745  */
   3746 void
   3747 set_pte(vaddr_t va, u_int pte)
   3748 {
   3749 	u_long idx;
   3750 
   3751 	if (va < KERNBASE)
   3752 		return;
   3753 
   3754 	idx = (unsigned long) m68k_btop(va - KERNBASE);
   3755 	kernCbase[idx].attr.raw = pte;
   3756 	TBIS(va);
   3757 }
   3758 
   3759 /*
   3760  *	Routine:        pmap_procwr
   3761  *
   3762  *	Function:
   3763  *		Synchronize caches corresponding to [addr, addr+len) in p.
   3764  */
   3765 void
   3766 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
   3767 {
   3768 
   3769 	(void)cachectl1(0x80000004, va, len, p);
   3770 }
   3771 
   3772 
   3773 #ifdef	PMAP_DEBUG
   3774 /************************** DEBUGGING ROUTINES **************************
   3775  * The following routines are meant to be an aid to debugging the pmap  *
   3776  * system.  They are callable from the DDB command line and should be   *
   3777  * prepared to be handed unstable or incomplete states of the system.   *
   3778  ************************************************************************/
   3779 
   3780 /* pv_list
   3781  **
   3782  * List all pages found on the pv list for the given physical page.
   3783  * To avoid endless loops, the listing will stop at the end of the list
   3784  * or after 'n' entries - whichever comes first.
   3785  */
   3786 void
   3787 pv_list(paddr_t pa, int n)
   3788 {
   3789 	int  idx;
   3790 	vaddr_t va;
   3791 	pv_t *pv;
   3792 	c_tmgr_t *c_tbl;
   3793 	pmap_t pmap;
   3794 
   3795 	pv = pa2pv(pa);
   3796 	idx = pv->pv_idx;
   3797 	for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
   3798 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
   3799 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
   3800 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
   3801 	}
   3802 }
   3803 #endif	/* PMAP_DEBUG */
   3804 
   3805 #ifdef NOT_YET
   3806 /* and maybe not ever */
   3807 /************************** LOW-LEVEL ROUTINES **************************
   3808  * These routines will eventually be re-written into assembly and placed*
   3809  * in locore.s.  They are here now as stubs so that the pmap module can *
   3810  * be linked as a standalone user program for testing.                  *
   3811  ************************************************************************/
   3812 /* flush_atc_crp			INTERNAL
   3813  **
   3814  * Flush all page descriptors derived from the given CPU Root Pointer
   3815  * (CRP), or 'A' table as it is known here, from the 68851's automatic
   3816  * cache.
   3817  */
   3818 void
   3819 flush_atc_crp(int a_tbl)
   3820 {
   3821 	mmu_long_rp_t rp;
   3822 
   3823 	/* Create a temporary root table pointer that points to the
   3824 	 * given A table.
   3825 	 */
   3826 	rp.attr.raw = ~MMU_LONG_RP_LU;
   3827 	rp.addr.raw = (unsigned int) a_tbl;
   3828 
   3829 	mmu_pflushr(&rp);
   3830 	/* mmu_pflushr:
   3831 	 * 	movel   sp(4)@,a0
   3832 	 * 	pflushr a0@
   3833 	 *	rts
   3834 	 */
   3835 }
   3836 #endif /* NOT_YET */
   3837