pmap.c revision 1.8 1 /* $NetBSD: pmap.c,v 1.8 1997/02/14 03:56:50 gwr Exp $ */
2
3 /*-
4 * Copyright (c) 1996 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_kern.h>
123 #include <vm/vm_page.h>
124
125 #include <machine/cpu.h>
126 #include <machine/pmap.h>
127 #include <machine/pte.h>
128 #include <machine/machdep.h>
129 #include <machine/mon.h>
130
131 #include "pmap_pvt.h"
132
133 /* XXX - What headers declare these? */
134 extern struct pcb *curpcb;
135 extern int physmem;
136
137 extern void copypage __P((const void*, void*));
138 extern void zeropage __P((void*));
139
140 /* Defined in locore.s */
141 extern char kernel_text[];
142
143 /* Defined by the linker */
144 extern char etext[], edata[], end[];
145 extern char *esym; /* DDB */
146
147 /*************************** DEBUGGING DEFINITIONS ***********************
148 * Macros, preprocessor defines and variables used in debugging can make *
149 * code hard to read. Anything used exclusively for debugging purposes *
150 * is defined here to avoid having such mess scattered around the file. *
151 *************************************************************************/
152 #ifdef PMAP_DEBUG
153 /*
154 * To aid the debugging process, macros should be expanded into smaller steps
155 * that accomplish the same goal, yet provide convenient places for placing
156 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
157 * 'INLINE' keyword is defined to an empty string. This way, any function
158 * defined to be a 'static INLINE' will become 'outlined' and compiled as
159 * a separate function, which is much easier to debug.
160 */
161 #define INLINE /* nothing */
162
163 /*
164 * It is sometimes convenient to watch the activity of a particular table
165 * in the system. The following variables are used for that purpose.
166 */
167 a_tmgr_t *pmap_watch_atbl = 0;
168 b_tmgr_t *pmap_watch_btbl = 0;
169 c_tmgr_t *pmap_watch_ctbl = 0;
170
171 int pmap_debug = 0;
172 #define DPRINT(args) if (pmap_debug) printf args
173
174 #else /********** Stuff below is defined if NOT debugging **************/
175
176 #define INLINE inline
177 define DPRINT(args) /* nada */
178
179 #endif
180 /*********************** END OF DEBUGGING DEFINITIONS ********************/
181
182 /*** Management Structure - Memory Layout
183 * For every MMU table in the sun3x pmap system there must be a way to
184 * manage it; we must know which process is using it, what other tables
185 * depend on it, and whether or not it contains any locked pages. This
186 * is solved by the creation of 'table management' or 'tmgr'
187 * structures. One for each MMU table in the system.
188 *
189 * MAP OF MEMORY USED BY THE PMAP SYSTEM
190 *
191 * towards lower memory
192 * kernAbase -> +-------------------------------------------------------+
193 * | Kernel MMU A level table |
194 * kernBbase -> +-------------------------------------------------------+
195 * | Kernel MMU B level tables |
196 * kernCbase -> +-------------------------------------------------------+
197 * | |
198 * | Kernel MMU C level tables |
199 * | |
200 * mmuCbase -> +-------------------------------------------------------+
201 * | User MMU C level tables |
202 * mmuAbase -> +-------------------------------------------------------+
203 * | |
204 * | User MMU A level tables |
205 * | |
206 * mmuBbase -> +-------------------------------------------------------+
207 * | User MMU B level tables |
208 * tmgrAbase -> +-------------------------------------------------------+
209 * | TMGR A level table structures |
210 * tmgrBbase -> +-------------------------------------------------------+
211 * | TMGR B level table structures |
212 * tmgrCbase -> +-------------------------------------------------------+
213 * | TMGR C level table structures |
214 * pvbase -> +-------------------------------------------------------+
215 * | Physical to Virtual mapping table (list heads) |
216 * pvebase -> +-------------------------------------------------------+
217 * | Physical to Virtual mapping table (list elements) |
218 * | |
219 * +-------------------------------------------------------+
220 * towards higher memory
221 *
222 * For every A table in the MMU A area, there will be a corresponding
223 * a_tmgr structure in the TMGR A area. The same will be true for
224 * the B and C tables. This arrangement will make it easy to find the
225 * controling tmgr structure for any table in the system by use of
226 * (relatively) simple macros.
227 */
228
229 /*
230 * Global variables for storing the base addresses for the areas
231 * labeled above.
232 */
233 static vm_offset_t kernAphys;
234 static mmu_long_dte_t *kernAbase;
235 static mmu_short_dte_t *kernBbase;
236 static mmu_short_pte_t *kernCbase;
237 static mmu_long_dte_t *mmuAbase;
238 static mmu_short_dte_t *mmuBbase;
239 static mmu_short_pte_t *mmuCbase;
240 static a_tmgr_t *Atmgrbase;
241 static b_tmgr_t *Btmgrbase;
242 static c_tmgr_t *Ctmgrbase;
243 static pv_t *pvbase;
244 static pv_elem_t *pvebase;
245 struct pmap kernel_pmap;
246
247 /*
248 * This holds the CRP currently loaded into the MMU.
249 */
250 struct mmu_rootptr kernel_crp;
251
252 /*
253 * Just all around global variables.
254 */
255 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
256 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
257 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
258
259
260 /*
261 * Flags used to mark the safety/availability of certain operations or
262 * resources.
263 */
264 static boolean_t
265 pv_initialized = FALSE, /* PV system has been initialized. */
266 tmp_vpages_inuse = FALSE, /*
267 * Temp. virtual pages are in use.
268 * (see pmap_copy_page, et. al.)
269 */
270 bootstrap_alloc_enabled = FALSE; /* Safe to use pmap_bootstrap_alloc(). */
271
272 /*
273 * XXX: For now, retain the traditional variables that were
274 * used in the old pmap/vm interface (without NONCONTIG).
275 */
276 /* Kernel virtual address space available: */
277 vm_offset_t virtual_avail, virtual_end;
278 /* Physical address space available: */
279 vm_offset_t avail_start, avail_end;
280
281 /* This keep track of the end of the contiguously mapped range. */
282 vm_offset_t virtual_contig_end;
283
284 /* Physical address used by pmap_next_page() */
285 vm_offset_t avail_next;
286
287 /* These are used by pmap_copy_page(), etc. */
288 vm_offset_t tmp_vpages[2];
289
290 /*
291 * The 3/80 is the only member of the sun3x family that has non-contiguous
292 * physical memory. Memory is divided into 4 banks which are physically
293 * locatable on the system board. Although the size of these banks varies
294 * with the size of memory they contain, their base addresses are
295 * permenently fixed. The following structure, which describes these
296 * banks, is initialized by pmap_bootstrap() after it reads from a similar
297 * structure provided by the ROM Monitor.
298 *
299 * For the other machines in the sun3x architecture which do have contiguous
300 * RAM, this list will have only one entry, which will describe the entire
301 * range of available memory.
302 */
303 struct pmap_physmem_struct avail_mem[SUN3X_80_MEM_BANKS];
304 u_int total_phys_mem;
305
306 /*************************************************************************/
307
308 /*
309 * XXX - Should "tune" these based on statistics.
310 *
311 * My first guess about the relative numbers of these needed is
312 * based on the fact that a "typical" process will have several
313 * pages mapped at low virtual addresses (text, data, bss), then
314 * some mapped shared libraries, and then some stack pages mapped
315 * near the high end of the VA space. Each process can use only
316 * one A table, and most will use only two B tables (maybe three)
317 * and probably about four C tables. Therefore, the first guess
318 * at the relative numbers of these needed is 1:2:4 -gwr
319 *
320 * The number of C tables needed is closely related to the amount
321 * of physical memory available plus a certain amount attributable
322 * to the use of double mappings. With a few simulation statistics
323 * we can find a reasonably good estimation of this unknown value.
324 * Armed with that and the above ratios, we have a good idea of what
325 * is needed at each level. -j
326 *
327 * Note: It is not physical memory memory size, but the total mapped
328 * virtual space required by the combined working sets of all the
329 * currently _runnable_ processes. (Sleeping ones don't count.)
330 * The amount of physical memory should be irrelevant. -gwr
331 */
332 #define NUM_A_TABLES 16
333 #define NUM_B_TABLES 32
334 #define NUM_C_TABLES 64
335
336 /*
337 * This determines our total virtual mapping capacity.
338 * Yes, it is a FIXED value so we can pre-allocate.
339 */
340 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
341 #define NUM_KERN_PTES \
342 (sun3x_btop(VM_MIN_KERNEL_ADDRESS - VM_MAX_KERNEL_ADDRESS))
343
344 /*************************** MISCELANEOUS MACROS *************************/
345 #define PMAP_LOCK() ; /* Nothing, for now */
346 #define PMAP_UNLOCK() ; /* same. */
347 #define NULL 0
348
349 static INLINE void * mmu_ptov __P((vm_offset_t pa));
350 static INLINE vm_offset_t mmu_vtop __P((void * va));
351
352 #if 0
353 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
354 #endif
355 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
356 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
357
358 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
359 static INLINE int pteidx __P((mmu_short_pte_t *));
360 static INLINE pmap_t current_pmap __P((void));
361
362 /*
363 * We can always convert between virtual and physical addresses
364 * for anything in the range [KERNBASE ... avail_start] because
365 * that range is GUARANTEED to be mapped linearly.
366 * We rely heavily upon this feature!
367 */
368 static INLINE void *
369 mmu_ptov(pa)
370 vm_offset_t pa;
371 {
372 register vm_offset_t va;
373
374 va = (pa + KERNBASE);
375 #ifdef PMAP_DEBUG
376 if ((va < KERNBASE) || (va >= virtual_contig_end))
377 panic("mmu_ptov");
378 #endif
379 return ((void*)va);
380 }
381 static INLINE vm_offset_t
382 mmu_vtop(vva)
383 void *vva;
384 {
385 register vm_offset_t va;
386
387 va = (vm_offset_t)vva;
388 #ifdef PMAP_DEBUG
389 if ((va < KERNBASE) || (va >= virtual_contig_end))
390 panic("mmu_ptov");
391 #endif
392 return (va - KERNBASE);
393 }
394
395 /*
396 * These macros map MMU tables to their corresponding manager structures.
397 * They are needed quite often because many of the pointers in the pmap
398 * system reference MMU tables and not the structures that control them.
399 * There needs to be a way to find one when given the other and these
400 * macros do so by taking advantage of the memory layout described above.
401 * Here's a quick step through the first macro, mmuA2tmgr():
402 *
403 * 1) find the offset of the given MMU A table from the base of its table
404 * pool (table - mmuAbase).
405 * 2) convert this offset into a table index by dividing it by the
406 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
407 * 3) use this index to select the corresponding 'A' table manager
408 * structure from the 'A' table manager pool (Atmgrbase[index]).
409 */
410 /* This function is not currently used. */
411 #if 0
412 static INLINE a_tmgr_t *
413 mmuA2tmgr(mmuAtbl)
414 mmu_long_dte_t *mmuAtbl;
415 {
416 register int idx;
417
418 /* Which table is this in? */
419 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
420 #ifdef PMAP_DEBUG
421 if ((idx < 0) || (idx >= NUM_A_TABLES))
422 panic("mmuA2tmgr");
423 #endif
424 return (&Atmgrbase[idx]);
425 }
426 #endif /* 0 */
427
428 static INLINE b_tmgr_t *
429 mmuB2tmgr(mmuBtbl)
430 mmu_short_dte_t *mmuBtbl;
431 {
432 register int idx;
433
434 /* Which table is this in? */
435 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
436 #ifdef PMAP_DEBUG
437 if ((idx < 0) || (idx >= NUM_B_TABLES))
438 panic("mmuB2tmgr");
439 #endif
440 return (&Btmgrbase[idx]);
441 }
442
443 /* mmuC2tmgr INTERNAL
444 **
445 * Given a pte known to belong to a C table, return the address of
446 * that table's management structure.
447 */
448 static INLINE c_tmgr_t *
449 mmuC2tmgr(mmuCtbl)
450 mmu_short_pte_t *mmuCtbl;
451 {
452 register int idx;
453
454 /* Which table is this in? */
455 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
456 #ifdef PMAP_DEBUG
457 if ((idx < 0) || (idx >= NUM_C_TABLES))
458 panic("mmuC2tmgr");
459 #endif
460 return (&Ctmgrbase[idx]);
461 }
462
463 /* This is now a function call below.
464 * #define pa2pv(pa) \
465 * (&pvbase[(unsigned long)\
466 * sun3x_btop(pa)\
467 * ])
468 */
469
470 /* pa2pv INTERNAL
471 **
472 * Return the pv_list_head element which manages the given physical
473 * address.
474 */
475 static INLINE pv_t *
476 pa2pv(pa)
477 vm_offset_t pa;
478 {
479 register struct pmap_physmem_struct *bank;
480 register int idx;
481
482 bank = &avail_mem[0];
483 while (pa >= bank->pmem_end)
484 bank = bank->pmem_next;
485
486 pa -= bank->pmem_start;
487 idx = bank->pmem_pvbase + sun3x_btop(pa);
488 #ifdef PMAP_DEBUG
489 if ((idx < 0) || (idx >= physmem))
490 panic("pa2pv");
491 #endif
492 return &pvbase[idx];
493 }
494
495 /* pteidx INTERNAL
496 **
497 * Return the index of the given PTE within the entire fixed table of
498 * PTEs.
499 */
500 static INLINE int
501 pteidx(pte)
502 mmu_short_pte_t *pte;
503 {
504 return (pte - kernCbase);
505 }
506
507 /*
508 * This just offers a place to put some debugging checks,
509 * and reduces the number of places "curproc" appears...
510 */
511 static INLINE pmap_t
512 current_pmap()
513 {
514 struct proc *p;
515 struct vmspace *vm;
516 vm_map_t map;
517 pmap_t pmap;
518
519 p = curproc; /* XXX */
520 vm = p->p_vmspace;
521 map = &vm->vm_map;
522 pmap = vm_map_pmap(map);
523
524 return (pmap);
525 }
526
527
528 /*************************** FUNCTION DEFINITIONS ************************
529 * These appear here merely for the compiler to enforce type checking on *
530 * all function calls. *
531 *************************************************************************/
532
533 /** External functions
534 ** - functions used within this module but written elsewhere.
535 ** both of these functions are in locore.s
536 ** XXX - These functions were later replaced with their more cryptic
537 ** hp300 counterparts. They may be removed now.
538 **/
539 #if 0 /* deprecated mmu */
540 void mmu_seturp __P((vm_offset_t));
541 void mmu_flush __P((int, vm_offset_t));
542 void mmu_flusha __P((void));
543 #endif /* 0 */
544
545 /** Internal functions
546 ** - all functions used only within this module are defined in
547 ** pmap_pvt.h
548 **/
549
550 /** Interface functions
551 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
552 ** defined.
553 **/
554 #ifdef INCLUDED_IN_PMAP_H
555 void pmap_bootstrap __P((void));
556 void *pmap_bootstrap_alloc __P((int));
557 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
558 pmap_t pmap_create __P((vm_size_t));
559 void pmap_destroy __P((pmap_t));
560 void pmap_reference __P((pmap_t));
561 boolean_t pmap_is_referenced __P((vm_offset_t));
562 boolean_t pmap_is_modified __P((vm_offset_t));
563 void pmap_clear_modify __P((vm_offset_t));
564 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
565 void pmap_activate __P((pmap_t));
566 int pmap_page_index __P((vm_offset_t));
567 u_int pmap_free_pages __P((void));
568 #endif /* INCLUDED_IN_PMAP_H */
569
570 /********************************** CODE ********************************
571 * Functions that are called from other parts of the kernel are labeled *
572 * as 'INTERFACE' functions. Functions that are only called from *
573 * within the pmap module are labeled as 'INTERNAL' functions. *
574 * Functions that are internal, but are not (currently) used at all are *
575 * labeled 'INTERNAL_X'. *
576 ************************************************************************/
577
578 /* pmap_bootstrap INTERNAL
579 **
580 * Initializes the pmap system. Called at boot time from sun3x_vm_init()
581 * in _startup.c.
582 *
583 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
584 * system implement pmap_steal_memory() is redundant.
585 * Don't release this code without removing one or the other!
586 */
587 void
588 pmap_bootstrap(nextva)
589 vm_offset_t nextva;
590 {
591 struct physmemory *membank;
592 struct pmap_physmem_struct *pmap_membank;
593 vm_offset_t va, pa, eva;
594 int b, c, i, j; /* running table counts */
595 int size;
596
597 /*
598 * This function is called by __bootstrap after it has
599 * determined the type of machine and made the appropriate
600 * patches to the ROM vectors (XXX- I don't quite know what I meant
601 * by that.) It allocates and sets up enough of the pmap system
602 * to manage the kernel's address space.
603 */
604
605 /*
606 * Determine the range of kernel virtual and physical
607 * space available. Note that we ABSOLUTELY DEPEND on
608 * the fact that the first bank of memory (4MB) is
609 * mapped linearly to KERNBASE (which we guaranteed in
610 * the first instructions of locore.s).
611 * That is plenty for our bootstrap work.
612 */
613 virtual_avail = sun3x_round_page(nextva);
614 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
615 virtual_end = VM_MAX_KERNEL_ADDRESS;
616 /* Don't need avail_start til later. */
617
618 /* We may now call pmap_bootstrap_alloc(). */
619 bootstrap_alloc_enabled = TRUE;
620
621 /*
622 * This is a somewhat unwrapped loop to deal with
623 * copying the PROM's 'phsymem' banks into the pmap's
624 * banks. The following is always assumed:
625 * 1. There is always at least one bank of memory.
626 * 2. There is always a last bank of memory, and its
627 * pmem_next member must be set to NULL.
628 * XXX - Use: do { ... } while (membank->next) instead?
629 * XXX - Why copy this stuff at all? -gwr
630 * - It is needed in pa2pv().
631 */
632 membank = romVectorPtr->v_physmemory;
633 pmap_membank = avail_mem;
634 total_phys_mem = 0;
635
636 while (membank->next) {
637 pmap_membank->pmem_start = membank->address;
638 pmap_membank->pmem_end = membank->address + membank->size;
639 total_phys_mem += membank->size;
640 /* This silly syntax arises because pmap_membank
641 * is really a pre-allocated array, but it is put into
642 * use as a linked list.
643 */
644 pmap_membank->pmem_next = pmap_membank + 1;
645 pmap_membank = pmap_membank->pmem_next;
646 membank = membank->next;
647 }
648
649 /*
650 * XXX The last bank of memory should be reduced to exclude the
651 * physical pages needed by the PROM monitor from being used
652 * in the VM system. XXX - See below - Fix!
653 */
654 pmap_membank->pmem_start = membank->address;
655 pmap_membank->pmem_end = membank->address + membank->size;
656 pmap_membank->pmem_next = NULL;
657
658 #if 0 /* XXX - Need to integrate this! */
659 /*
660 * The last few pages of physical memory are "owned" by
661 * the PROM. The total amount of memory we are allowed
662 * to use is given by the romvec pointer. -gwr
663 *
664 * We should dedicate different variables for 'useable'
665 * and 'physically available'. Most users are used to the
666 * kernel reporting the amount of memory 'physically available'
667 * as opposed to 'useable by the kernel' at boot time. -j
668 */
669 total_phys_mem = *romVectorPtr->memoryAvail;
670 #endif /* XXX */
671
672 total_phys_mem += membank->size; /* XXX see above */
673 physmem = btoc(total_phys_mem);
674
675 /*
676 * Avail_end is set to the first byte of physical memory
677 * after the end of the last bank. We use this only to
678 * determine if a physical address is "managed" memory.
679 *
680 * XXX - The setting of avail_end is a temporary ROM saving hack.
681 */
682 avail_end = pmap_membank->pmem_end -
683 (total_phys_mem - *romVectorPtr->memoryAvail);
684 avail_end = sun3x_trunc_page(avail_end);
685
686 /*
687 * The first step is to allocate MMU tables.
688 * Note: All must be aligned on 256 byte boundaries.
689 *
690 * Start with the top level, or 'A' table.
691 */
692 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
693 kernAbase = pmap_bootstrap_alloc(size);
694 bzero(kernAbase, size);
695
696 /*
697 * Allocate enough B tables to map from KERNBASE to
698 * the end of VM.
699 */
700 size = sizeof(mmu_short_dte_t) *
701 (MMU_A_TBL_SIZE - MMU_TIA(KERNBASE)) * MMU_B_TBL_SIZE;
702 kernBbase = pmap_bootstrap_alloc(size);
703 bzero(kernBbase, size);
704
705 /*
706 * Allocate enough C tables.
707 * Note: In order for the PV system to work correctly, the kernel
708 * and user-level C tables must be allocated contiguously.
709 * Nothing should be allocated between here and the allocation of
710 * mmuCbase below. XXX: Should do this as one allocation, and
711 * then compute a pointer for mmuCbase instead of this...
712 */
713 size = sizeof (mmu_short_pte_t) *
714 (MMU_A_TBL_SIZE - MMU_TIA(KERNBASE))
715 * MMU_B_TBL_SIZE * MMU_C_TBL_SIZE;
716 kernCbase = pmap_bootstrap_alloc(size);
717 bzero(kernCbase, size);
718
719 /*
720 * Allocate user MMU tables.
721 * These must be aligned on 256 byte boundaries.
722 *
723 * As noted in the comment preceding the allocation of the kernel
724 * C tables in pmap_bootstrap(), user-level C tables must be the
725 * flush with (up against) the kernel-level C tables.
726 */
727 mmuCbase = (mmu_short_pte_t *)
728 pmap_bootstrap_alloc(sizeof(mmu_short_pte_t)
729 * MMU_C_TBL_SIZE
730 * NUM_C_TABLES);
731 mmuAbase = (mmu_long_dte_t *)
732 pmap_bootstrap_alloc(sizeof(mmu_long_dte_t)
733 * MMU_A_TBL_SIZE
734 * NUM_A_TABLES);
735 mmuBbase = (mmu_short_dte_t *)
736 pmap_bootstrap_alloc(sizeof(mmu_short_dte_t)
737 * MMU_B_TBL_SIZE
738 * NUM_B_TABLES);
739
740 /*
741 * Fill in the never-changing part of the kernel tables.
742 * For simplicity, the kernel's mappings will be editable as a
743 * flat array of page table entries at kernCbase. The
744 * higher level 'A' and 'B' tables must be initialized to point
745 * to this lower one.
746 */
747 b = c = 0;
748
749 /*
750 * Invalidate all mappings below KERNBASE in the A table.
751 * This area has already been zeroed out, but it is good
752 * practice to explicitly show that we are interpreting
753 * it as a list of A table descriptors.
754 */
755 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
756 kernAbase[i].addr.raw = 0;
757 }
758
759 /*
760 * Set up the kernel A and B tables so that they will reference the
761 * correct spots in the contiguous table of PTEs allocated for the
762 * kernel's virtual memory space.
763 */
764 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
765 kernAbase[i].attr.raw =
766 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
767 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
768
769 for (j=0; j < MMU_B_TBL_SIZE; j++) {
770 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
771 | MMU_DT_SHORT;
772 c += MMU_C_TBL_SIZE;
773 }
774 b += MMU_B_TBL_SIZE;
775 }
776
777 /* XXX - Doing kernel_pmap a little further down. */
778
779 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
780 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
781 pmap_alloc_pv(); /* Allocate physical->virtual map. */
782
783 /*
784 * We are now done with pmap_bootstrap_alloc(). Round up
785 * `virtual_avail' to the nearest page, and set the flag
786 * to prevent use of pmap_bootstrap_alloc() hereafter.
787 */
788 pmap_bootstrap_aalign(NBPG);
789 bootstrap_alloc_enabled = FALSE;
790
791 /*
792 * Now that we are done with pmap_bootstrap_alloc(), we
793 * must save the virtual and physical addresses of the
794 * end of the linearly mapped range, which are stored in
795 * virtual_contig_end and avail_start, respectively.
796 * These variables will never change after this point.
797 */
798 virtual_contig_end = virtual_avail;
799 avail_start = virtual_avail - KERNBASE;
800
801 /*
802 * `avail_next' is a running pointer used by pmap_next_page() to
803 * keep track of the next available physical page to be handed
804 * to the VM system during its initialization, in which it
805 * asks for physical pages, one at a time.
806 */
807 avail_next = avail_start;
808
809 /*
810 * Now allocate some virtual addresses, but not the physical pages
811 * behind them. Note that virtual_avail is already page-aligned.
812 *
813 * tmp_vpages[] is an array of two virtual pages used for temporary
814 * kernel mappings in the pmap module to facilitate various physical
815 * address-oritented operations.
816 */
817 tmp_vpages[0] = virtual_avail;
818 virtual_avail += NBPG;
819 tmp_vpages[1] = virtual_avail;
820 virtual_avail += NBPG;
821
822 /** Initialize the PV system **/
823 pmap_init_pv();
824
825 /*
826 * Fill in the kernel_pmap structure and kernel_crp.
827 */
828 kernAphys = mmu_vtop(kernAbase);
829 kernel_pmap.pm_a_tmgr = NULL;
830 kernel_pmap.pm_a_phys = kernAphys;
831 kernel_pmap.pm_refcount = 1; /* always in use */
832
833 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
834 kernel_crp.rp_addr = kernAphys;
835
836 /*
837 * Now pmap_enter_kernel() may be used safely and will be
838 * the main interface used hereafter to modify the kernel's
839 * virtual address space. Note that since we are still running
840 * under the PROM's address table, none of these table modifications
841 * actually take effect until pmap_takeover_mmu() is called.
842 *
843 * Note: Our tables do NOT have the PROM linear mappings!
844 * Only the mappings created here exist in our tables, so
845 * remember to map anything we expect to use.
846 */
847 va = (vm_offset_t) KERNBASE;
848 pa = 0;
849
850 /*
851 * The first page of the kernel virtual address space is the msgbuf
852 * page. The page attributes (data, non-cached) are set here, while
853 * the address is assigned to this global pointer in cpu_startup().
854 * XXX - Make it non-cached?
855 */
856 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
857 va += NBPG; pa += NBPG;
858
859 /* Next page is used as the temporary stack. */
860 pmap_enter_kernel(va, pa, VM_PROT_ALL);
861 va += NBPG; pa += NBPG;
862
863 /*
864 * Map all of the kernel's text segment as read-only and cacheable.
865 * (Cacheable is implied by default). Unfortunately, the last bytes
866 * of kernel text and the first bytes of kernel data will often be
867 * sharing the same page. Therefore, the last page of kernel text
868 * has to be mapped as read/write, to accomodate the data.
869 */
870 eva = sun3x_trunc_page((vm_offset_t)etext);
871 for (; va < eva; va += NBPG, pa += NBPG)
872 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
873
874 /*
875 * Map all of the kernel's data as read/write and cacheable.
876 * This includes: data, BSS, symbols, and everything in the
877 * contiguous memory used by pmap_bootstrap_alloc()
878 */
879 for (; pa < avail_start; va += NBPG, pa += NBPG)
880 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
881
882 /*
883 * At this point we are almost ready to take over the MMU. But first
884 * we must save the PROM's address space in our map, as we call its
885 * routines and make references to its data later in the kernel.
886 */
887 pmap_bootstrap_copyprom();
888 pmap_takeover_mmu();
889
890 /*
891 * XXX - Todo: Fill in the PROM's level-A table for the VA range
892 * KERNBASE ... 0xFE000000 so that the PROM monitor can see our
893 * mappings. This should make bouncing in/out of PROM easier.
894 * XXX - Add (i.e.) pmap_setup_prommap();
895 */
896
897 /* Notify the VM system of our page size. */
898 PAGE_SIZE = NBPG;
899 vm_set_page_size();
900 }
901
902
903 /* pmap_alloc_usermmu INTERNAL
904 **
905 * Called from pmap_bootstrap() to allocate MMU tables that will
906 * eventually be used for user mappings.
907 */
908 void
909 pmap_alloc_usermmu()
910 {
911 /* XXX: Moved into caller. */
912 }
913
914 /* pmap_alloc_pv INTERNAL
915 **
916 * Called from pmap_bootstrap() to allocate the physical
917 * to virtual mapping list. Each physical page of memory
918 * in the system has a corresponding element in this list.
919 */
920 void
921 pmap_alloc_pv()
922 {
923 int i;
924 unsigned int total_mem;
925
926 /*
927 * Allocate a pv_head structure for every page of physical
928 * memory that will be managed by the system. Since memory on
929 * the 3/80 is non-contiguous, we cannot arrive at a total page
930 * count by subtraction of the lowest available address from the
931 * highest, but rather we have to step through each memory
932 * bank and add the number of pages in each to the total.
933 *
934 * At this time we also initialize the offset of each bank's
935 * starting pv_head within the pv_head list so that the physical
936 * memory state routines (pmap_is_referenced(),
937 * pmap_is_modified(), et al.) can quickly find coresponding
938 * pv_heads in spite of the non-contiguity.
939 */
940 total_mem = 0;
941 for (i = 0; i < SUN3X_80_MEM_BANKS; i++) {
942 avail_mem[i].pmem_pvbase = sun3x_btop(total_mem);
943 total_mem += avail_mem[i].pmem_end -
944 avail_mem[i].pmem_start;
945 if (avail_mem[i].pmem_next == NULL)
946 break;
947 }
948 #ifdef PMAP_DEBUG
949 if (total_mem != total_phys_mem)
950 panic("pmap_alloc_pv did not arrive at correct page count");
951 #endif
952
953 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
954 sun3x_btop(total_phys_mem));
955 }
956
957 /* pmap_alloc_usertmgr INTERNAL
958 **
959 * Called from pmap_bootstrap() to allocate the structures which
960 * facilitate management of user MMU tables. Each user MMU table
961 * in the system has one such structure associated with it.
962 */
963 void
964 pmap_alloc_usertmgr()
965 {
966 /* Allocate user MMU table managers */
967 /* It would be a lot simpler to just make these BSS, but */
968 /* we may want to change their size at boot time... -j */
969 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
970 * NUM_A_TABLES);
971 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
972 * NUM_B_TABLES);
973 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
974 * NUM_C_TABLES);
975
976 /*
977 * Allocate PV list elements for the physical to virtual
978 * mapping system.
979 */
980 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
981 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
982 }
983
984 /* pmap_bootstrap_copyprom() INTERNAL
985 **
986 * Copy the PROM mappings into our own tables. Note, we
987 * can use physical addresses until __bootstrap returns.
988 */
989 void
990 pmap_bootstrap_copyprom()
991 {
992 MachMonRomVector *romp;
993 int *mon_ctbl;
994 mmu_short_pte_t *kpte;
995 int i, len;
996
997 romp = romVectorPtr;
998
999 /*
1000 * Copy the mappings in MON_KDB_START...MONEND
1001 * Note: mon_ctbl[0] maps MON_KDB_START
1002 */
1003 mon_ctbl = *romp->monptaddr;
1004 i = sun3x_btop(MON_KDB_START - KERNBASE);
1005 kpte = &kernCbase[i];
1006 len = sun3x_btop(MONEND - MON_KDB_START);
1007
1008 for (i = 0; i < len; i++) {
1009 kpte[i].attr.raw = mon_ctbl[i];
1010 }
1011
1012 /*
1013 * Copy the mappings at MON_DVMA_BASE (to the end).
1014 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1015 * XXX - This does not appear to be necessary, but
1016 * I'm not sure yet if it is or not. -gwr
1017 */
1018 mon_ctbl = *romp->shadowpteaddr;
1019 i = sun3x_btop(MON_DVMA_BASE - KERNBASE);
1020 kpte = &kernCbase[i];
1021 len = sun3x_btop(MON_DVMA_SIZE);
1022
1023 for (i = 0; i < len; i++) {
1024 kpte[i].attr.raw = mon_ctbl[i];
1025 }
1026 }
1027
1028 /* pmap_takeover_mmu INTERNAL
1029 **
1030 * Called from pmap_bootstrap() after it has copied enough of the
1031 * PROM mappings into the kernel map so that we can use our own
1032 * MMU table.
1033 */
1034 void
1035 pmap_takeover_mmu()
1036 {
1037 struct mmu_rootptr *crp;
1038
1039 crp = &kernel_crp;
1040 loadcrp(crp);
1041 }
1042
1043 /* pmap_init INTERFACE
1044 **
1045 * Called at the end of vm_init() to set up the pmap system to go
1046 * into full time operation. All initialization of kernel_pmap
1047 * should be already done by now, so this should just do things
1048 * needed for user-level pmaps to work.
1049 */
1050 void
1051 pmap_init()
1052 {
1053 /** Initialize the manager pools **/
1054 TAILQ_INIT(&a_pool);
1055 TAILQ_INIT(&b_pool);
1056 TAILQ_INIT(&c_pool);
1057
1058 /**************************************************************
1059 * Initialize all tmgr structures and MMU tables they manage. *
1060 **************************************************************/
1061 /** Initialize A tables **/
1062 pmap_init_a_tables();
1063 /** Initialize B tables **/
1064 pmap_init_b_tables();
1065 /** Initialize C tables **/
1066 pmap_init_c_tables();
1067 }
1068
1069 /* pmap_init_a_tables() INTERNAL
1070 **
1071 * Initializes all A managers, their MMU A tables, and inserts
1072 * them into the A manager pool for use by the system.
1073 */
1074 void
1075 pmap_init_a_tables()
1076 {
1077 int i;
1078 a_tmgr_t *a_tbl;
1079
1080 for (i=0; i < NUM_A_TABLES; i++) {
1081 /* Select the next available A manager from the pool */
1082 a_tbl = &Atmgrbase[i];
1083
1084 /*
1085 * Clear its parent entry. Set its wired and valid
1086 * entry count to zero.
1087 */
1088 a_tbl->at_parent = NULL;
1089 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1090
1091 /* Assign it the next available MMU A table from the pool */
1092 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1093
1094 /*
1095 * Initialize the MMU A table with the table in the `proc0',
1096 * or kernel, mapping. This ensures that every process has
1097 * the kernel mapped in the top part of its address space.
1098 */
1099 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1100 sizeof(mmu_long_dte_t));
1101
1102 /*
1103 * Finally, insert the manager into the A pool,
1104 * making it ready to be used by the system.
1105 */
1106 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1107 }
1108 }
1109
1110 /* pmap_init_b_tables() INTERNAL
1111 **
1112 * Initializes all B table managers, their MMU B tables, and
1113 * inserts them into the B manager pool for use by the system.
1114 */
1115 void
1116 pmap_init_b_tables()
1117 {
1118 int i,j;
1119 b_tmgr_t *b_tbl;
1120
1121 for (i=0; i < NUM_B_TABLES; i++) {
1122 /* Select the next available B manager from the pool */
1123 b_tbl = &Btmgrbase[i];
1124
1125 b_tbl->bt_parent = NULL; /* clear its parent, */
1126 b_tbl->bt_pidx = 0; /* parent index, */
1127 b_tbl->bt_wcnt = 0; /* wired entry count, */
1128 b_tbl->bt_ecnt = 0; /* valid entry count. */
1129
1130 /* Assign it the next available MMU B table from the pool */
1131 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1132
1133 /* Invalidate every descriptor in the table */
1134 for (j=0; j < MMU_B_TBL_SIZE; j++)
1135 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1136
1137 /* Insert the manager into the B pool */
1138 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1139 }
1140 }
1141
1142 /* pmap_init_c_tables() INTERNAL
1143 **
1144 * Initializes all C table managers, their MMU C tables, and
1145 * inserts them into the C manager pool for use by the system.
1146 */
1147 void
1148 pmap_init_c_tables()
1149 {
1150 int i,j;
1151 c_tmgr_t *c_tbl;
1152
1153 for (i=0; i < NUM_C_TABLES; i++) {
1154 /* Select the next available C manager from the pool */
1155 c_tbl = &Ctmgrbase[i];
1156
1157 c_tbl->ct_parent = NULL; /* clear its parent, */
1158 c_tbl->ct_pidx = 0; /* parent index, */
1159 c_tbl->ct_wcnt = 0; /* wired entry count, */
1160 c_tbl->ct_ecnt = 0; /* valid entry count. */
1161
1162 /* Assign it the next available MMU C table from the pool */
1163 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1164
1165 for (j=0; j < MMU_C_TBL_SIZE; j++)
1166 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1167
1168 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1169 }
1170 }
1171
1172 /* pmap_init_pv() INTERNAL
1173 **
1174 * Initializes the Physical to Virtual mapping system.
1175 */
1176 void
1177 pmap_init_pv()
1178 {
1179 int i;
1180
1181 /* Initialize every PV head. */
1182 for (i = 0; i < sun3x_btop(total_phys_mem); i++) {
1183 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1184 pvbase[i].pv_flags = 0; /* Zero out page flags */
1185 }
1186
1187 pv_initialized = TRUE;
1188 }
1189
1190 /* get_a_table INTERNAL
1191 **
1192 * Retrieve and return a level A table for use in a user map.
1193 */
1194 a_tmgr_t *
1195 get_a_table()
1196 {
1197 a_tmgr_t *tbl;
1198 pmap_t pmap;
1199
1200 /* Get the top A table in the pool */
1201 tbl = a_pool.tqh_first;
1202 if (tbl == NULL) {
1203 /*
1204 * XXX - Instead of panicing here and in other get_x_table
1205 * functions, we do have the option of sleeping on the head of
1206 * the table pool. Any function which updates the table pool
1207 * would then issue a wakeup() on the head, thus waking up any
1208 * processes waiting for a table.
1209 *
1210 * Actually, the place to sleep would be when some process
1211 * asks for a "wired" mapping that would run us short of
1212 * mapping resources. This design DEPENDS on always having
1213 * some mapping resources in the pool for stealing, so we
1214 * must make sure we NEVER let the pool become empty. -gwr
1215 */
1216 panic("get_a_table: out of A tables.");
1217 }
1218
1219 TAILQ_REMOVE(&a_pool, tbl, at_link);
1220 /*
1221 * If the table has a non-null parent pointer then it is in use.
1222 * Forcibly abduct it from its parent and clear its entries.
1223 * No re-entrancy worries here. This table would not be in the
1224 * table pool unless it was available for use.
1225 *
1226 * Note that the second argument to free_a_table() is FALSE. This
1227 * indicates that the table should not be relinked into the A table
1228 * pool. That is a job for the function that called us.
1229 */
1230 if (tbl->at_parent) {
1231 pmap = tbl->at_parent;
1232 free_a_table(tbl, FALSE);
1233 pmap->pm_a_tmgr = NULL;
1234 pmap->pm_a_phys = kernAphys;
1235 }
1236 #ifdef NON_REENTRANT
1237 /*
1238 * If the table isn't to be wired down, re-insert it at the
1239 * end of the pool.
1240 */
1241 if (!wired)
1242 /*
1243 * Quandary - XXX
1244 * Would it be better to let the calling function insert this
1245 * table into the queue? By inserting it here, we are allowing
1246 * it to be stolen immediately. The calling function is
1247 * probably not expecting to use a table that it is not
1248 * assured full control of.
1249 * Answer - In the intrest of re-entrancy, it is best to let
1250 * the calling function determine when a table is available
1251 * for use. Therefore this code block is not used.
1252 */
1253 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1254 #endif /* NON_REENTRANT */
1255 return tbl;
1256 }
1257
1258 /* get_b_table INTERNAL
1259 **
1260 * Return a level B table for use.
1261 */
1262 b_tmgr_t *
1263 get_b_table()
1264 {
1265 b_tmgr_t *tbl;
1266
1267 /* See 'get_a_table' for comments. */
1268 tbl = b_pool.tqh_first;
1269 if (tbl == NULL)
1270 panic("get_b_table: out of B tables.");
1271 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1272 if (tbl->bt_parent) {
1273 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1274 tbl->bt_parent->at_ecnt--;
1275 free_b_table(tbl, FALSE);
1276 }
1277 #ifdef NON_REENTRANT
1278 if (!wired)
1279 /* XXX see quandary in get_b_table */
1280 /* XXX start lock */
1281 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1282 /* XXX end lock */
1283 #endif /* NON_REENTRANT */
1284 return tbl;
1285 }
1286
1287 /* get_c_table INTERNAL
1288 **
1289 * Return a level C table for use.
1290 */
1291 c_tmgr_t *
1292 get_c_table()
1293 {
1294 c_tmgr_t *tbl;
1295
1296 /* See 'get_a_table' for comments */
1297 tbl = c_pool.tqh_first;
1298 if (tbl == NULL)
1299 panic("get_c_table: out of C tables.");
1300 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1301 if (tbl->ct_parent) {
1302 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1303 tbl->ct_parent->bt_ecnt--;
1304 free_c_table(tbl, FALSE);
1305 }
1306 #ifdef NON_REENTRANT
1307 if (!wired)
1308 /* XXX See quandary in get_a_table */
1309 /* XXX start lock */
1310 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1311 /* XXX end lock */
1312 #endif /* NON_REENTRANT */
1313
1314 return tbl;
1315 }
1316
1317 /*
1318 * The following 'free_table' and 'steal_table' functions are called to
1319 * detach tables from their current obligations (parents and children) and
1320 * prepare them for reuse in another mapping.
1321 *
1322 * Free_table is used when the calling function will handle the fate
1323 * of the parent table, such as returning it to the free pool when it has
1324 * no valid entries. Functions that do not want to handle this should
1325 * call steal_table, in which the parent table's descriptors and entry
1326 * count are automatically modified when this table is removed.
1327 */
1328
1329 /* free_a_table INTERNAL
1330 **
1331 * Unmaps the given A table and all child tables from their current
1332 * mappings. Returns the number of pages that were invalidated.
1333 * If 'relink' is true, the function will return the table to the head
1334 * of the available table pool.
1335 *
1336 * Cache note: The MC68851 will automatically flush all
1337 * descriptors derived from a given A table from its
1338 * Automatic Translation Cache (ATC) if we issue a
1339 * 'PFLUSHR' instruction with the base address of the
1340 * table. This function should do, and does so.
1341 * Note note: We are using an MC68030 - there is no
1342 * PFLUSHR.
1343 */
1344 int
1345 free_a_table(a_tbl, relink)
1346 a_tmgr_t *a_tbl;
1347 boolean_t relink;
1348 {
1349 int i, removed_cnt;
1350 mmu_long_dte_t *dte;
1351 mmu_short_dte_t *dtbl;
1352 b_tmgr_t *tmgr;
1353
1354 /*
1355 * Flush the ATC cache of all cached descriptors derived
1356 * from this table.
1357 * XXX - Sun3x does not use 68851's cached table feature
1358 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1359 */
1360
1361 /*
1362 * Remove any pending cache flushes that were designated
1363 * for the pmap this A table belongs to.
1364 * a_tbl->parent->atc_flushq[0] = 0;
1365 * XXX - Not implemented in sun3x.
1366 */
1367
1368 /*
1369 * All A tables in the system should retain a map for the
1370 * kernel. If the table contains any valid descriptors
1371 * (other than those for the kernel area), invalidate them all,
1372 * stopping short of the kernel's entries.
1373 */
1374 removed_cnt = 0;
1375 if (a_tbl->at_ecnt) {
1376 dte = a_tbl->at_dtbl;
1377 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1378 /*
1379 * If a table entry points to a valid B table, free
1380 * it and its children.
1381 */
1382 if (MMU_VALID_DT(dte[i])) {
1383 /*
1384 * The following block does several things,
1385 * from innermost expression to the
1386 * outermost:
1387 * 1) It extracts the base (cc 1996)
1388 * address of the B table pointed
1389 * to in the A table entry dte[i].
1390 * 2) It converts this base address into
1391 * the virtual address it can be
1392 * accessed with. (all MMU tables point
1393 * to physical addresses.)
1394 * 3) It finds the corresponding manager
1395 * structure which manages this MMU table.
1396 * 4) It frees the manager structure.
1397 * (This frees the MMU table and all
1398 * child tables. See 'free_b_table' for
1399 * details.)
1400 */
1401 dtbl = mmu_ptov(dte[i].addr.raw);
1402 tmgr = mmuB2tmgr(dtbl);
1403 removed_cnt += free_b_table(tmgr, TRUE);
1404 dte[i].attr.raw = MMU_DT_INVALID;
1405 }
1406 }
1407 a_tbl->at_ecnt = 0;
1408 }
1409 if (relink) {
1410 a_tbl->at_parent = NULL;
1411 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1412 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1413 }
1414 return removed_cnt;
1415 }
1416
1417 /* free_b_table INTERNAL
1418 **
1419 * Unmaps the given B table and all its children from their current
1420 * mappings. Returns the number of pages that were invalidated.
1421 * (For comments, see 'free_a_table()').
1422 */
1423 int
1424 free_b_table(b_tbl, relink)
1425 b_tmgr_t *b_tbl;
1426 boolean_t relink;
1427 {
1428 int i, removed_cnt;
1429 mmu_short_dte_t *dte;
1430 mmu_short_pte_t *dtbl;
1431 c_tmgr_t *tmgr;
1432
1433 removed_cnt = 0;
1434 if (b_tbl->bt_ecnt) {
1435 dte = b_tbl->bt_dtbl;
1436 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1437 if (MMU_VALID_DT(dte[i])) {
1438 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1439 tmgr = mmuC2tmgr(dtbl);
1440 removed_cnt += free_c_table(tmgr, TRUE);
1441 dte[i].attr.raw = MMU_DT_INVALID;
1442 }
1443 }
1444 b_tbl->bt_ecnt = 0;
1445 }
1446
1447 if (relink) {
1448 b_tbl->bt_parent = NULL;
1449 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1450 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1451 }
1452 return removed_cnt;
1453 }
1454
1455 /* free_c_table INTERNAL
1456 **
1457 * Unmaps the given C table from use and returns it to the pool for
1458 * re-use. Returns the number of pages that were invalidated.
1459 *
1460 * This function preserves any physical page modification information
1461 * contained in the page descriptors within the C table by calling
1462 * 'pmap_remove_pte().'
1463 */
1464 int
1465 free_c_table(c_tbl, relink)
1466 c_tmgr_t *c_tbl;
1467 boolean_t relink;
1468 {
1469 int i, removed_cnt;
1470
1471 removed_cnt = 0;
1472 if (c_tbl->ct_ecnt) {
1473 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1474 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1475 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1476 removed_cnt++;
1477 }
1478 }
1479 c_tbl->ct_ecnt = 0;
1480 }
1481
1482 if (relink) {
1483 c_tbl->ct_parent = NULL;
1484 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1485 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1486 }
1487 return removed_cnt;
1488 }
1489
1490 #if 0
1491 /* free_c_table_novalid INTERNAL
1492 **
1493 * Frees the given C table manager without checking to see whether
1494 * or not it contains any valid page descriptors as it is assumed
1495 * that it does not.
1496 */
1497 void
1498 free_c_table_novalid(c_tbl)
1499 c_tmgr_t *c_tbl;
1500 {
1501 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1502 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1503 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1504 c_tbl->ct_parent->bt_ecnt--;
1505 /*
1506 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1507 * we just removed the last entry of the parent B table.
1508 * But I want to insure that this will not endanger pmap_enter()
1509 * with sudden removal of tables it is working with.
1510 *
1511 * We should probably add another field to each table, indicating
1512 * whether or not it is 'locked', ie. in the process of being
1513 * modified.
1514 */
1515 c_tbl->ct_parent = NULL;
1516 }
1517 #endif
1518
1519 /* pmap_remove_pte INTERNAL
1520 **
1521 * Unmap the given pte and preserve any page modification
1522 * information by transfering it to the pv head of the
1523 * physical page it maps to. This function does not update
1524 * any reference counts because it is assumed that the calling
1525 * function will do so.
1526 */
1527 void
1528 pmap_remove_pte(pte)
1529 mmu_short_pte_t *pte;
1530 {
1531 u_short pv_idx, targ_idx;
1532 int s;
1533 vm_offset_t pa;
1534 pv_t *pv;
1535
1536 pa = MMU_PTE_PA(*pte);
1537 if (is_managed(pa)) {
1538 pv = pa2pv(pa);
1539 targ_idx = pteidx(pte); /* Index of PTE being removed */
1540
1541 /*
1542 * If the PTE being removed is the first (or only) PTE in
1543 * the list of PTEs currently mapped to this page, remove the
1544 * PTE by changing the index found on the PV head. Otherwise
1545 * a linear search through the list will have to be executed
1546 * in order to find the PVE which points to the PTE being
1547 * removed, so that it may be modified to point to its new
1548 * neighbor.
1549 */
1550 s = splimp();
1551 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1552 if (pv_idx == targ_idx) {
1553 pv->pv_idx = pvebase[targ_idx].pve_next;
1554 } else {
1555 /*
1556 * Find the PV element which points to the target
1557 * element.
1558 */
1559 while (pvebase[pv_idx].pve_next != targ_idx) {
1560 pv_idx = pvebase[pv_idx].pve_next;
1561 #ifdef DIAGNOSTIC
1562 if (pv_idx == PVE_EOL)
1563 panic("pmap_remove_pte: pv list end!");
1564 #endif
1565 }
1566
1567 /*
1568 * At this point, pv_idx is the index of the PV
1569 * element just before the target element in the list.
1570 * Unlink the target.
1571 */
1572 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1573 }
1574 /*
1575 * Save the mod/ref bits of the pte by simply
1576 * ORing the entire pte onto the pv_flags member
1577 * of the pv structure.
1578 * There is no need to use a separate bit pattern
1579 * for usage information on the pv head than that
1580 * which is used on the MMU ptes.
1581 */
1582 pv->pv_flags |= (u_short) pte->attr.raw;
1583 splx(s);
1584 }
1585
1586 pte->attr.raw = MMU_DT_INVALID;
1587 }
1588
1589 #if 0 /* XXX - I am eliminating this function. -j */
1590 /* pmap_dereference_pte INTERNAL
1591 **
1592 * Update the necessary reference counts in any tables and pmaps to
1593 * reflect the removal of the given pte. Only called when no knowledge of
1594 * the pte's associated pmap is unknown. This only occurs in the PV call
1595 * 'pmap_page_protect()' with a protection of VM_PROT_NONE, which means
1596 * that all references to a given physical page must be removed.
1597 */
1598 void
1599 pmap_dereference_pte(pte)
1600 mmu_short_pte_t *pte;
1601 {
1602 vm_offset_t va;
1603 c_tmgr_t *c_tbl;
1604 pmap_t pmap;
1605
1606 va = pmap_get_pteinfo(pte, &pmap, &c_tbl);
1607 /*
1608 * Flush the translation cache of the page mapped by the PTE, should
1609 * it prove to be in the current pmap. Kernel mappings appear in
1610 * all address spaces, so they always should be flushed
1611 */
1612 if (pmap == pmap_kernel() || pmap == current_pmap())
1613 TBIS(va);
1614
1615 /*
1616 * If the mapping belongs to a user map, update the necessary
1617 * reference counts in the table manager. XXX - It would be
1618 * much easier to keep the resident count in the c_tmgr_t -gwr
1619 */
1620 if (pmap != pmap_kernel()) {
1621 /*
1622 * Most of the situations in which pmap_dereference_pte() is
1623 * called are usually temporary removals of a mapping. Often
1624 * the mapping is reinserted shortly afterwards. If the parent
1625 * C table's valid entry count reaches zero as a result of
1626 * removing this mapping, we could return it to the free pool,
1627 * but we leave it alone because it is likely to be used as
1628 * stated above.
1629 */
1630 c_tbl->ct_ecnt--;
1631 pmap->pm_stats.resident_count--;
1632 }
1633 }
1634 #endif 0 /* function elimination */
1635
1636 /* pmap_stroll INTERNAL
1637 **
1638 * Retrieve the addresses of all table managers involved in the mapping of
1639 * the given virtual address. If the table walk completed sucessfully,
1640 * return TRUE. If it was only partially sucessful, return FALSE.
1641 * The table walk performed by this function is important to many other
1642 * functions in this module.
1643 *
1644 * Note: This function ought to be easier to read.
1645 */
1646 boolean_t
1647 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1648 pmap_t pmap;
1649 vm_offset_t va;
1650 a_tmgr_t **a_tbl;
1651 b_tmgr_t **b_tbl;
1652 c_tmgr_t **c_tbl;
1653 mmu_short_pte_t **pte;
1654 int *a_idx, *b_idx, *pte_idx;
1655 {
1656 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1657 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1658
1659 if (pmap == pmap_kernel())
1660 return FALSE;
1661
1662 /* Does the given pmap have its own A table? */
1663 *a_tbl = pmap->pm_a_tmgr;
1664 if (*a_tbl == NULL)
1665 return FALSE; /* No. Return unknown. */
1666 /* Does the A table have a valid B table
1667 * under the corresponding table entry?
1668 */
1669 *a_idx = MMU_TIA(va);
1670 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1671 if (!MMU_VALID_DT(*a_dte))
1672 return FALSE; /* No. Return unknown. */
1673 /* Yes. Extract B table from the A table. */
1674 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1675 /* Does the B table have a valid C table
1676 * under the corresponding table entry?
1677 */
1678 *b_idx = MMU_TIB(va);
1679 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1680 if (!MMU_VALID_DT(*b_dte))
1681 return FALSE; /* No. Return unknown. */
1682 /* Yes. Extract C table from the B table. */
1683 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1684 *pte_idx = MMU_TIC(va);
1685 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1686
1687 return TRUE;
1688 }
1689
1690 /* pmap_enter INTERFACE
1691 **
1692 * Called by the kernel to map a virtual address
1693 * to a physical address in the given process map.
1694 *
1695 * Note: this function should apply an exclusive lock
1696 * on the pmap system for its duration. (it certainly
1697 * would save my hair!!)
1698 * This function ought to be easier to read.
1699 */
1700 void
1701 pmap_enter(pmap, va, pa, prot, wired)
1702 pmap_t pmap;
1703 vm_offset_t va;
1704 vm_offset_t pa;
1705 vm_prot_t prot;
1706 boolean_t wired;
1707 {
1708 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1709 u_short nidx; /* PV list index */
1710 int s; /* Used for splimp()/splx() */
1711 int flags; /* Mapping flags. eg. Cache inhibit */
1712 u_int a_idx, b_idx, pte_idx; /* table indices */
1713 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1714 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1715 c_tmgr_t *c_tbl; /* C: short page table manager */
1716 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1717 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1718 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1719 pv_t *pv; /* pv list head */
1720 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1721
1722 if (pmap == NULL)
1723 return;
1724 if (pmap == pmap_kernel()) {
1725 pmap_enter_kernel(va, pa, prot);
1726 return;
1727 }
1728
1729 flags = (pa & ~MMU_PAGE_MASK);
1730 pa &= MMU_PAGE_MASK;
1731
1732 /*
1733 * Determine if the physical address being mapped is managed.
1734 * If it isn't, the mapping should be cache inhibited. (This is
1735 * applied later in the function.) XXX - Why non-cached? -gwr
1736 */
1737 if ((managed = is_managed(pa)) == FALSE)
1738 flags |= PMAP_NC;
1739
1740 /*
1741 * For user mappings we walk along the MMU tables of the given
1742 * pmap, reaching a PTE which describes the virtual page being
1743 * mapped or changed. If any level of the walk ends in an invalid
1744 * entry, a table must be allocated and the entry must be updated
1745 * to point to it.
1746 * There is a bit of confusion as to whether this code must be
1747 * re-entrant. For now we will assume it is. To support
1748 * re-entrancy we must unlink tables from the table pool before
1749 * we assume we may use them. Tables are re-linked into the pool
1750 * when we are finished with them at the end of the function.
1751 * But I don't feel like doing that until we have proof that this
1752 * needs to be re-entrant.
1753 * 'llevel' records which tables need to be relinked.
1754 */
1755 llevel = NONE;
1756
1757 /*
1758 * Step 1 - Retrieve the A table from the pmap. If it has no
1759 * A table, allocate a new one from the available pool.
1760 */
1761
1762 a_tbl = pmap->pm_a_tmgr;
1763 if (a_tbl == NULL) {
1764 /*
1765 * This pmap does not currently have an A table. Allocate
1766 * a new one.
1767 */
1768 a_tbl = get_a_table();
1769 a_tbl->at_parent = pmap;
1770
1771 /*
1772 * Assign this new A table to the pmap, and calculate its
1773 * physical address so that loadcrp() can be used to make
1774 * the table active.
1775 */
1776 pmap->pm_a_tmgr = a_tbl;
1777 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1778
1779 /*
1780 * If the process receiving a new A table is the current
1781 * process, we are responsible for setting the MMU so that
1782 * it becomes the current address space.
1783 */
1784 if (pmap == current_pmap())
1785 pmap_activate(pmap);
1786
1787 if (!wired)
1788 llevel = NEWA;
1789 } else {
1790 /*
1791 * Use the A table already allocated for this pmap.
1792 * Unlink it from the A table pool if necessary.
1793 */
1794 if (wired && !a_tbl->at_wcnt)
1795 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1796 }
1797
1798 /*
1799 * Step 2 - Walk into the B table. If there is no valid B table,
1800 * allocate one.
1801 */
1802
1803 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1804 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1805 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1806 /* The descriptor is valid. Use the B table it points to. */
1807 /*************************************
1808 * a_idx *
1809 * v *
1810 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1811 * | | | | | | | | | | | | *
1812 * +-+-+-+-+-+-+-+-+-+-+-+- *
1813 * | *
1814 * \- b_tbl -> +-+- *
1815 * | | *
1816 * +-+- *
1817 *************************************/
1818 b_dte = mmu_ptov(a_dte->addr.raw);
1819 b_tbl = mmuB2tmgr(b_dte);
1820
1821 /*
1822 * If the requested mapping must be wired, but this table
1823 * being used to map it is not, the table must be removed
1824 * from the available pool and its wired entry count
1825 * incremented.
1826 */
1827 if (wired && !b_tbl->bt_wcnt) {
1828 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1829 a_tbl->at_wcnt++;
1830 }
1831 } else {
1832 /* The descriptor is invalid. Allocate a new B table. */
1833 b_tbl = get_b_table();
1834
1835 /* Point the parent A table descriptor to this new B table. */
1836 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1837 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1838 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1839
1840 /* Create the necessary back references to the parent table */
1841 b_tbl->bt_parent = a_tbl;
1842 b_tbl->bt_pidx = a_idx;
1843
1844 /*
1845 * If this table is to be wired, make sure the parent A table
1846 * wired count is updated to reflect that it has another wired
1847 * entry.
1848 */
1849 if (wired)
1850 a_tbl->at_wcnt++;
1851 else if (llevel == NONE)
1852 llevel = NEWB;
1853 }
1854
1855 /*
1856 * Step 3 - Walk into the C table, if there is no valid C table,
1857 * allocate one.
1858 */
1859
1860 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1861 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1862 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1863 /* The descriptor is valid. Use the C table it points to. */
1864 /**************************************
1865 * c_idx *
1866 * | v *
1867 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1868 * | | | | | | | | | | | *
1869 * +-+-+-+-+-+-+-+-+-+-+- *
1870 * | *
1871 * \- c_tbl -> +-+-- *
1872 * | | | *
1873 * +-+-- *
1874 **************************************/
1875 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1876 c_tbl = mmuC2tmgr(c_pte);
1877
1878 /* If mapping is wired and table is not */
1879 if (wired && !c_tbl->ct_wcnt) {
1880 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1881 b_tbl->bt_wcnt++;
1882 }
1883 } else {
1884 /* The descriptor is invalid. Allocate a new C table. */
1885 c_tbl = get_c_table();
1886
1887 /* Point the parent B table descriptor to this new C table. */
1888 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1889 b_dte->attr.raw |= MMU_DT_SHORT;
1890 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1891
1892 /* Create the necessary back references to the parent table */
1893 c_tbl->ct_parent = b_tbl;
1894 c_tbl->ct_pidx = b_idx;
1895
1896 /*
1897 * If this table is to be wired, make sure the parent B table
1898 * wired count is updated to reflect that it has another wired
1899 * entry.
1900 */
1901 if (wired)
1902 b_tbl->bt_wcnt++;
1903 else if (llevel == NONE)
1904 llevel = NEWC;
1905 }
1906
1907 /*
1908 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1909 * slot of the C table, describing the PA to which the VA is mapped.
1910 */
1911
1912 pte_idx = MMU_TIC(va);
1913 c_pte = &c_tbl->ct_dtbl[pte_idx];
1914 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1915 /*
1916 * The PTE is currently valid. This particular call
1917 * is just a synonym for one (or more) of the following
1918 * operations:
1919 * change protection of a page
1920 * change wiring status of a page
1921 * remove the mapping of a page
1922 *
1923 * XXX - Semi critical: This code should unwire the PTE
1924 * and, possibly, associated parent tables if this is a
1925 * change wiring operation. Currently it does not.
1926 *
1927 * This may be ok if pmap_change_wiring() is the only
1928 * interface used to UNWIRE a page.
1929 */
1930
1931 /* First check if this is a wiring operation. */
1932 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1933 /*
1934 * The PTE is already wired. To prevent it from being
1935 * counted as a new wiring operation, reset the 'wired'
1936 * variable.
1937 */
1938 wired = FALSE;
1939 }
1940
1941 /* Is the new address the same as the old? */
1942 if (MMU_PTE_PA(*c_pte) == pa) {
1943 /*
1944 * Yes, mark that it does not need to be reinserted
1945 * into the PV list.
1946 */
1947 insert = FALSE;
1948
1949 /*
1950 * Clear all but the modified, referenced and wired
1951 * bits on the PTE.
1952 */
1953 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1954 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1955 } else {
1956 /* No, remove the old entry */
1957 pmap_remove_pte(c_pte);
1958 insert = TRUE;
1959 }
1960
1961 /*
1962 * TLB flush is only necessary if modifying current map.
1963 * However, in pmap_enter(), the pmap almost always IS
1964 * the current pmap, so don't even bother to check.
1965 */
1966 TBIS(va);
1967 } else {
1968 /*
1969 * The PTE is invalid. Increment the valid entry count in
1970 * the C table manager to reflect the addition of a new entry.
1971 */
1972 c_tbl->ct_ecnt++;
1973
1974 /* XXX - temporarily make sure the PTE is cleared. */
1975 c_pte->attr.raw = 0;
1976
1977 /* It will also need to be inserted into the PV list. */
1978 insert = TRUE;
1979 }
1980
1981 /*
1982 * If page is changing from unwired to wired status, set an unused bit
1983 * within the PTE to indicate that it is wired. Also increment the
1984 * wired entry count in the C table manager.
1985 */
1986 if (wired) {
1987 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1988 c_tbl->ct_wcnt++;
1989 }
1990
1991 /*
1992 * Map the page, being careful to preserve modify/reference/wired
1993 * bits. At this point it is assumed that the PTE either has no bits
1994 * set, or if there are set bits, they are only modified, reference or
1995 * wired bits. If not, the following statement will cause erratic
1996 * behavior.
1997 */
1998 #ifdef PMAP_DEBUG
1999 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2000 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2001 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2002 Debugger();
2003 }
2004 #endif
2005 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2006
2007 /*
2008 * If the mapping should be read-only, set the write protect
2009 * bit in the PTE.
2010 */
2011 if (!(prot & VM_PROT_WRITE))
2012 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2013
2014 /*
2015 * If the mapping should be cache inhibited (indicated by the flag
2016 * bits found on the lower order of the physical address.)
2017 * mark the PTE as a cache inhibited page.
2018 */
2019 if (flags & PMAP_NC)
2020 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2021
2022 /*
2023 * If the physical address being mapped is managed by the PV
2024 * system then link the pte into the list of pages mapped to that
2025 * address.
2026 */
2027 if (insert && managed) {
2028 pv = pa2pv(pa);
2029 nidx = pteidx(c_pte);
2030
2031 s = splimp();
2032 pvebase[nidx].pve_next = pv->pv_idx;
2033 pv->pv_idx = nidx;
2034 splx(s);
2035 }
2036
2037 /* Move any allocated tables back into the active pool. */
2038
2039 switch (llevel) {
2040 case NEWA:
2041 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2042 /* FALLTHROUGH */
2043 case NEWB:
2044 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2045 /* FALLTHROUGH */
2046 case NEWC:
2047 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2048 /* FALLTHROUGH */
2049 default:
2050 break;
2051 }
2052 }
2053
2054 /* pmap_enter_kernel INTERNAL
2055 **
2056 * Map the given virtual address to the given physical address within the
2057 * kernel address space. This function exists because the kernel map does
2058 * not do dynamic table allocation. It consists of a contiguous array of ptes
2059 * and can be edited directly without the need to walk through any tables.
2060 *
2061 * XXX: "Danger, Will Robinson!"
2062 * Note that the kernel should never take a fault on any page
2063 * between [ KERNBASE .. virtual_avail ] and this is checked in
2064 * trap.c for kernel-mode MMU faults. This means that mappings
2065 * created in that range must be implicily wired. -gwr
2066 */
2067 void
2068 pmap_enter_kernel(va, pa, prot)
2069 vm_offset_t va;
2070 vm_offset_t pa;
2071 vm_prot_t prot;
2072 {
2073 boolean_t was_valid, insert;
2074 u_short pte_idx, pv_idx;
2075 int s, flags;
2076 mmu_short_pte_t *pte;
2077 pv_t *pv;
2078 vm_offset_t old_pa;
2079
2080 flags = (pa & ~MMU_PAGE_MASK);
2081 pa &= MMU_PAGE_MASK;
2082
2083 /*
2084 * Calculate the index of the PTE being modified.
2085 */
2086 pte_idx = (u_long) sun3x_btop(va - KERNBASE);
2087
2088 /* XXX - This array is traditionally named "Sysmap" */
2089 pte = &kernCbase[pte_idx];
2090
2091 s = splimp();
2092 if (MMU_VALID_DT(*pte)) {
2093 was_valid = TRUE;
2094 /*
2095 * If the PTE is already mapped to an address and it differs
2096 * from the address requested, unlink it from the PV list.
2097 *
2098 * This only applies to mappings within virtual_avail
2099 * and VM_MAX_KERNEL_ADDRESS. All others are not requests
2100 * from the VM system and should not be part of the PV system.
2101 */
2102 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2103 old_pa = MMU_PTE_PA(*pte);
2104 if (pa != old_pa) {
2105 if (is_managed(old_pa)) {
2106 /* XXX - Make this into a function call? */
2107 pv = pa2pv(old_pa);
2108 pv_idx = pv->pv_idx;
2109 if (pv_idx == pte_idx) {
2110 pv->pv_idx = pvebase[pte_idx].pve_next;
2111 } else {
2112 while (pvebase[pv_idx].pve_next != pte_idx)
2113 pv_idx = pvebase[pv_idx].pve_next;
2114 pvebase[pv_idx].pve_next =
2115 pvebase[pte_idx].pve_next;
2116 }
2117 /* Save modified/reference bits */
2118 pv->pv_flags |= (u_short) pte->attr.raw;
2119 }
2120 if (is_managed(pa))
2121 insert = TRUE;
2122 else
2123 insert = FALSE;
2124 /*
2125 * Clear out any old bits in the PTE.
2126 */
2127 pte->attr.raw = MMU_DT_INVALID;
2128 } else {
2129 /*
2130 * Old PA and new PA are the same. No need to relink
2131 * the mapping within the PV list.
2132 */
2133 insert = FALSE;
2134
2135 /*
2136 * Save any mod/ref bits on the PTE.
2137 */
2138 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2139 }
2140 } else {
2141 /*
2142 * If the VA lies below virtual_avail or beyond
2143 * VM_MAX_KERNEL_ADDRESS, it is not a request by the VM
2144 * system and hence does not need to be linked into the PV
2145 * system.
2146 */
2147 insert = FALSE;
2148 pte->attr.raw = MMU_DT_INVALID;
2149 }
2150 } else {
2151 pte->attr.raw = MMU_DT_INVALID;
2152 was_valid = FALSE;
2153 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2154 if (is_managed(pa))
2155 insert = TRUE;
2156 else
2157 insert = FALSE;
2158 } else
2159 insert = FALSE;
2160 }
2161
2162 /*
2163 * Map the page. Being careful to preserve modified/referenced bits
2164 * on the PTE.
2165 */
2166 pte->attr.raw |= (pa | MMU_DT_PAGE);
2167
2168 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2169 pte->attr.raw |= MMU_SHORT_PTE_WP;
2170 if (flags & PMAP_NC)
2171 pte->attr.raw |= MMU_SHORT_PTE_CI;
2172 if (was_valid)
2173 TBIS(va);
2174
2175 /*
2176 * Insert the PTE into the PV system, if need be.
2177 */
2178 if (insert) {
2179 pv = pa2pv(pa);
2180 pvebase[pte_idx].pve_next = pv->pv_idx;
2181 pv->pv_idx = pte_idx;
2182 }
2183 splx(s);
2184
2185 }
2186
2187 /* pmap_protect INTERFACE
2188 **
2189 * Apply the given protection to the given virtual address range within
2190 * the given map.
2191 *
2192 * It is ok for the protection applied to be stronger than what is
2193 * specified. We use this to our advantage when the given map has no
2194 * mapping for the virtual address. By skipping a page when this
2195 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2196 * and therefore do not need to map the page just to apply a protection
2197 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2198 *
2199 * XXX - This function could be speeded up by using pmap_stroll() for inital
2200 * setup, and then manual scrolling in the for() loop.
2201 */
2202 void
2203 pmap_protect(pmap, startva, endva, prot)
2204 pmap_t pmap;
2205 vm_offset_t startva, endva;
2206 vm_prot_t prot;
2207 {
2208 boolean_t iscurpmap;
2209 int a_idx, b_idx, c_idx;
2210 vm_offset_t va;
2211 a_tmgr_t *a_tbl;
2212 b_tmgr_t *b_tbl;
2213 c_tmgr_t *c_tbl;
2214 mmu_short_pte_t *pte;
2215
2216 if (pmap == NULL)
2217 return;
2218 if (pmap == pmap_kernel()) {
2219 pmap_protect_kernel(startva, endva, prot);
2220 return;
2221 }
2222
2223 iscurpmap = (pmap == current_pmap());
2224 for (va = startva; va < endva; va += NBPG) {
2225 /*
2226 * Retrieve the mapping for the given page from the given pmap.
2227 * If it does not exist then we need not do anything more for
2228 * the current page.
2229 */
2230 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte,
2231 &a_idx, &b_idx, &c_idx) == FALSE) {
2232 continue;
2233 }
2234
2235 switch (prot) {
2236 case VM_PROT_ALL:
2237 /* this should never happen in a sane system */
2238 break;
2239 case VM_PROT_EXECUTE:
2240 case VM_PROT_READ:
2241 case VM_PROT_READ|VM_PROT_EXECUTE:
2242 /* make the mapping read-only */
2243 pte->attr.raw |= MMU_SHORT_PTE_WP;
2244 break;
2245 case VM_PROT_NONE:
2246 /* this is an alias for 'pmap_remove' */
2247 pmap_remove_pte(pte);
2248 c_tbl->ct_ecnt--;
2249 break;
2250 default:
2251 break;
2252 }
2253 /*
2254 * If we just modified the current address space,
2255 * flush any translations for the modified page from
2256 * the translation cache and any data from it in the
2257 * data cache.
2258 */
2259 if (iscurpmap)
2260 TBIS(va);
2261 }
2262 }
2263
2264 /* pmap_protect_kernel INTERNAL
2265 **
2266 * Apply the given protection code to a kernel address range.
2267 */
2268 void
2269 pmap_protect_kernel(startva, endva, prot)
2270 vm_offset_t startva, endva;
2271 vm_prot_t prot;
2272 {
2273 vm_offset_t va;
2274 mmu_short_pte_t *pte;
2275
2276 pte = &kernCbase[(unsigned long) sun3x_btop(startva - KERNBASE)];
2277 for (va = startva; va < endva; va += NBPG, pte++) {
2278 if (MMU_VALID_DT(*pte)) {
2279 switch (prot) {
2280 case VM_PROT_ALL:
2281 break;
2282 case VM_PROT_EXECUTE:
2283 case VM_PROT_READ:
2284 case VM_PROT_READ|VM_PROT_EXECUTE:
2285 pte->attr.raw |= MMU_SHORT_PTE_WP;
2286 break;
2287 case VM_PROT_NONE:
2288 /* this is an alias for 'pmap_remove_kernel' */
2289 pmap_remove_pte(pte);
2290 break;
2291 default:
2292 break;
2293 }
2294 /*
2295 * since this is the kernel, immediately flush any cached
2296 * descriptors for this address.
2297 */
2298 TBIS(va);
2299 }
2300 }
2301 }
2302
2303 /* pmap_change_wiring INTERFACE
2304 **
2305 * Changes the wiring of the specified page.
2306 *
2307 * This function is called from vm_fault.c to unwire
2308 * a mapping. It really should be called 'pmap_unwire'
2309 * because it is never asked to do anything but remove
2310 * wirings.
2311 */
2312 void
2313 pmap_change_wiring(pmap, va, wire)
2314 pmap_t pmap;
2315 vm_offset_t va;
2316 boolean_t wire;
2317 {
2318 int a_idx, b_idx, c_idx;
2319 a_tmgr_t *a_tbl;
2320 b_tmgr_t *b_tbl;
2321 c_tmgr_t *c_tbl;
2322 mmu_short_pte_t *pte;
2323
2324 /* Kernel mappings always remain wired. */
2325 if (pmap == pmap_kernel())
2326 return;
2327
2328 #ifdef PMAP_DEBUG
2329 if (wire == TRUE)
2330 panic("pmap_change_wiring: wire requested.");
2331 #endif
2332
2333 /*
2334 * Walk through the tables. If the walk terminates without
2335 * a valid PTE then the address wasn't wired in the first place.
2336 * Return immediately.
2337 */
2338 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2339 &b_idx, &c_idx) == FALSE)
2340 return;
2341
2342
2343 /* Is the PTE wired? If not, return. */
2344 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2345 return;
2346
2347 /* Remove the wiring bit. */
2348 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2349
2350 /*
2351 * Decrement the wired entry count in the C table.
2352 * If it reaches zero the following things happen:
2353 * 1. The table no longer has any wired entries and is considered
2354 * unwired.
2355 * 2. It is placed on the available queue.
2356 * 3. The parent table's wired entry count is decremented.
2357 * 4. If it reaches zero, this process repeats at step 1 and
2358 * stops at after reaching the A table.
2359 */
2360 if (--c_tbl->ct_wcnt == 0) {
2361 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2362 if (--b_tbl->bt_wcnt == 0) {
2363 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2364 if (--a_tbl->at_wcnt == 0) {
2365 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2366 }
2367 }
2368 }
2369 }
2370
2371 /* pmap_pageable INTERFACE
2372 **
2373 * Make the specified range of addresses within the given pmap,
2374 * 'pageable' or 'not-pageable'. A pageable page must not cause
2375 * any faults when referenced. A non-pageable page may.
2376 *
2377 * This routine is only advisory. The VM system will call pmap_enter()
2378 * to wire or unwire pages that are going to be made pageable before calling
2379 * this function. By the time this routine is called, everything that needs
2380 * to be done has already been done.
2381 */
2382 void
2383 pmap_pageable(pmap, start, end, pageable)
2384 pmap_t pmap;
2385 vm_offset_t start, end;
2386 boolean_t pageable;
2387 {
2388 /* not implemented. */
2389 }
2390
2391 /* pmap_copy INTERFACE
2392 **
2393 * Copy the mappings of a range of addresses in one pmap, into
2394 * the destination address of another.
2395 *
2396 * This routine is advisory. Should we one day decide that MMU tables
2397 * may be shared by more than one pmap, this function should be used to
2398 * link them together. Until that day however, we do nothing.
2399 */
2400 void
2401 pmap_copy(pmap_a, pmap_b, dst, len, src)
2402 pmap_t pmap_a, pmap_b;
2403 vm_offset_t dst;
2404 vm_size_t len;
2405 vm_offset_t src;
2406 {
2407 /* not implemented. */
2408 }
2409
2410 /* pmap_copy_page INTERFACE
2411 **
2412 * Copy the contents of one physical page into another.
2413 *
2414 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2415 * to map the two specified physical pages into the kernel address space. It
2416 * then uses bcopy() to copy one into the other.
2417 *
2418 * Note: We could use the transparent translation registers to make the
2419 * mappings. If we do so, be sure to disable interrupts before using them.
2420 */
2421 void
2422 pmap_copy_page(src, dst)
2423 vm_offset_t src, dst;
2424 {
2425 PMAP_LOCK();
2426 if (tmp_vpages_inuse)
2427 panic("pmap_copy_page: temporary vpages are in use.");
2428 tmp_vpages_inuse++;
2429
2430 /* XXX - Use non-cached mappings to avoid cache polution? */
2431 pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
2432 pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
2433 copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
2434
2435 tmp_vpages_inuse--;
2436 PMAP_UNLOCK();
2437 }
2438
2439 /* pmap_zero_page INTERFACE
2440 **
2441 * Zero the contents of the specified physical page.
2442 *
2443 * Uses one of the virtual pages allocated in pmap_boostrap()
2444 * to map the specified page into the kernel address space. Then uses
2445 * bzero() to zero out the page.
2446 */
2447 void
2448 pmap_zero_page(pa)
2449 vm_offset_t pa;
2450 {
2451 PMAP_LOCK();
2452 if (tmp_vpages_inuse)
2453 panic("pmap_zero_page: temporary vpages are in use.");
2454 tmp_vpages_inuse++;
2455
2456 pmap_enter_kernel(tmp_vpages[0], pa, VM_PROT_READ|VM_PROT_WRITE);
2457 zeropage((char *) tmp_vpages[0]);
2458
2459 tmp_vpages_inuse--;
2460 PMAP_UNLOCK();
2461 }
2462
2463 /* pmap_collect INTERFACE
2464 **
2465 * Called from the VM system when we are about to swap out
2466 * the process using this pmap. This should give up any
2467 * resources held here, including all its MMU tables.
2468 */
2469 void
2470 pmap_collect(pmap)
2471 pmap_t pmap;
2472 {
2473 /* XXX - todo... */
2474 }
2475
2476 /* pmap_create INTERFACE
2477 **
2478 * Create and return a pmap structure.
2479 */
2480 pmap_t
2481 pmap_create(size)
2482 vm_size_t size;
2483 {
2484 pmap_t pmap;
2485
2486 if (size)
2487 return NULL;
2488
2489 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2490 pmap_pinit(pmap);
2491
2492 return pmap;
2493 }
2494
2495 /* pmap_pinit INTERNAL
2496 **
2497 * Initialize a pmap structure.
2498 */
2499 void
2500 pmap_pinit(pmap)
2501 pmap_t pmap;
2502 {
2503 bzero(pmap, sizeof(struct pmap));
2504 pmap->pm_a_tmgr = NULL;
2505 pmap->pm_a_phys = kernAphys;
2506 }
2507
2508 /* pmap_release INTERFACE
2509 **
2510 * Release any resources held by the given pmap.
2511 *
2512 * This is the reverse analog to pmap_pinit. It does not
2513 * necessarily mean for the pmap structure to be deallocated,
2514 * as in pmap_destroy.
2515 */
2516 void
2517 pmap_release(pmap)
2518 pmap_t pmap;
2519 {
2520 /*
2521 * As long as the pmap contains no mappings,
2522 * which always should be the case whenever
2523 * this function is called, there really should
2524 * be nothing to do.
2525 *
2526 * XXX - This function is being called while there are
2527 * still valid mappings, so I guess the above must not
2528 * be true.
2529 * XXX - Unless the mappings persist due to a bug here...
2530 * + That's what was happening. The map had no mappings,
2531 * but it still had an A table. pmap_remove() was not
2532 * releasing tables when they were empty.
2533 */
2534 #ifdef PMAP_DEBUG
2535 if (pmap == NULL)
2536 return;
2537 if (pmap == pmap_kernel())
2538 panic("pmap_release: kernel pmap release requested.");
2539 #endif
2540 if (pmap->pm_a_tmgr != NULL) {
2541 free_a_table(pmap->pm_a_tmgr, TRUE);
2542 pmap->pm_a_tmgr = NULL;
2543 pmap->pm_a_phys = kernAphys;
2544 }
2545 }
2546
2547 /* pmap_reference INTERFACE
2548 **
2549 * Increment the reference count of a pmap.
2550 */
2551 void
2552 pmap_reference(pmap)
2553 pmap_t pmap;
2554 {
2555 if (pmap == NULL)
2556 return;
2557
2558 /* pmap_lock(pmap); */
2559 pmap->pm_refcount++;
2560 /* pmap_unlock(pmap); */
2561 }
2562
2563 /* pmap_dereference INTERNAL
2564 **
2565 * Decrease the reference count on the given pmap
2566 * by one and return the current count.
2567 */
2568 int
2569 pmap_dereference(pmap)
2570 pmap_t pmap;
2571 {
2572 int rtn;
2573
2574 if (pmap == NULL)
2575 return 0;
2576
2577 /* pmap_lock(pmap); */
2578 rtn = --pmap->pm_refcount;
2579 /* pmap_unlock(pmap); */
2580
2581 return rtn;
2582 }
2583
2584 /* pmap_destroy INTERFACE
2585 **
2586 * Decrement a pmap's reference count and delete
2587 * the pmap if it becomes zero. Will be called
2588 * only after all mappings have been removed.
2589 */
2590 void
2591 pmap_destroy(pmap)
2592 pmap_t pmap;
2593 {
2594 if (pmap == NULL)
2595 return;
2596 if (pmap == &kernel_pmap)
2597 panic("pmap_destroy: kernel_pmap!");
2598 if (pmap_dereference(pmap) == 0) {
2599 pmap_release(pmap);
2600 free(pmap, M_VMPMAP);
2601 }
2602 }
2603
2604 /* pmap_is_referenced INTERFACE
2605 **
2606 * Determine if the given physical page has been
2607 * referenced (read from [or written to.])
2608 */
2609 boolean_t
2610 pmap_is_referenced(pa)
2611 vm_offset_t pa;
2612 {
2613 pv_t *pv;
2614 int idx, s;
2615
2616 if (!pv_initialized)
2617 return FALSE;
2618 /* XXX - this may be unecessary. */
2619 if (!is_managed(pa))
2620 return FALSE;
2621
2622 pv = pa2pv(pa);
2623 /*
2624 * Check the flags on the pv head. If they are set,
2625 * return immediately. Otherwise a search must be done.
2626 */
2627 if (pv->pv_flags & PV_FLAGS_USED)
2628 return TRUE;
2629 else {
2630 s = splimp();
2631 /*
2632 * Search through all pv elements pointing
2633 * to this page and query their reference bits
2634 */
2635 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2636 pvebase[idx].pve_next)
2637 if (MMU_PTE_USED(kernCbase[idx])) {
2638 splx(s);
2639 return TRUE;
2640 }
2641 splx(s);
2642 }
2643
2644 return FALSE;
2645 }
2646
2647 /* pmap_is_modified INTERFACE
2648 **
2649 * Determine if the given physical page has been
2650 * modified (written to.)
2651 */
2652 boolean_t
2653 pmap_is_modified(pa)
2654 vm_offset_t pa;
2655 {
2656 pv_t *pv;
2657 int idx, s;
2658
2659 if (!pv_initialized)
2660 return FALSE;
2661 /* XXX - this may be unecessary. */
2662 if (!is_managed(pa))
2663 return FALSE;
2664
2665 /* see comments in pmap_is_referenced() */
2666 pv = pa2pv(pa);
2667 if (pv->pv_flags & PV_FLAGS_MDFY) {
2668 return TRUE;
2669 } else {
2670 s = splimp();
2671 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2672 pvebase[idx].pve_next)
2673 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2674 splx(s);
2675 return TRUE;
2676 }
2677 splx(s);
2678 }
2679
2680 return FALSE;
2681 }
2682
2683 /* pmap_page_protect INTERFACE
2684 **
2685 * Applies the given protection to all mappings to the given
2686 * physical page.
2687 */
2688 void
2689 pmap_page_protect(pa, prot)
2690 vm_offset_t pa;
2691 vm_prot_t prot;
2692 {
2693 pv_t *pv;
2694 int idx, s;
2695 vm_offset_t va;
2696 struct mmu_short_pte_struct *pte;
2697 c_tmgr_t *c_tbl;
2698 pmap_t pmap, curpmap;
2699
2700 if (!is_managed(pa))
2701 return;
2702
2703 curpmap = current_pmap();
2704 pv = pa2pv(pa);
2705 s = splimp();
2706 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2707 pte = &kernCbase[idx];
2708 switch (prot) {
2709 case VM_PROT_ALL:
2710 /* do nothing */
2711 break;
2712 case VM_PROT_EXECUTE:
2713 case VM_PROT_READ:
2714 case VM_PROT_READ|VM_PROT_EXECUTE:
2715 pte->attr.raw |= MMU_SHORT_PTE_WP;
2716
2717 /*
2718 * Determine the virtual address mapped by
2719 * the PTE and flush ATC entries if necessary.
2720 */
2721 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2722 if (pmap == curpmap || pmap == pmap_kernel())
2723 TBIS(va);
2724 break;
2725 case VM_PROT_NONE:
2726 /* Save the mod/ref bits. */
2727 pv->pv_flags |= pte->attr.raw;
2728 /* Invalidate the PTE. */
2729 pte->attr.raw = MMU_DT_INVALID;
2730
2731 /*
2732 * Update table counts. And flush ATC entries
2733 * if necessary.
2734 */
2735 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2736
2737 /*
2738 * If the PTE belongs to the kernel map,
2739 * be sure to flush the page it maps.
2740 */
2741 if (pmap == pmap_kernel()) {
2742 TBIS(va);
2743 } else {
2744 /*
2745 * The PTE belongs to a user map.
2746 * update the entry count in the C
2747 * table to which it belongs and flush
2748 * the ATC if the mapping belongs to
2749 * the current pmap.
2750 */
2751 c_tbl->ct_ecnt--;
2752 if (pmap == curpmap)
2753 TBIS(va);
2754 }
2755 break;
2756 default:
2757 break;
2758 }
2759 }
2760
2761 /*
2762 * If the protection code indicates that all mappings to the page
2763 * be removed, truncate the PV list to zero entries.
2764 */
2765 if (prot == VM_PROT_NONE)
2766 pv->pv_idx = PVE_EOL;
2767 splx(s);
2768 }
2769
2770 /* pmap_get_pteinfo INTERNAL
2771 **
2772 * Called internally to find the pmap and virtual address within that
2773 * map to which the pte at the given index maps. Also includes the PTE's C
2774 * table manager.
2775 *
2776 * Returns the pmap in the argument provided, and the virtual address
2777 * by return value.
2778 */
2779 vm_offset_t
2780 pmap_get_pteinfo(idx, pmap, tbl)
2781 u_int idx;
2782 pmap_t *pmap;
2783 c_tmgr_t **tbl;
2784 {
2785 a_tmgr_t *a_tbl;
2786 b_tmgr_t *b_tbl;
2787 c_tmgr_t *c_tbl;
2788 vm_offset_t va = 0;
2789
2790 /*
2791 * Determine if the PTE is a kernel PTE or a user PTE.
2792 */
2793 if (idx >= NUM_KERN_PTES) {
2794 /*
2795 * The PTE belongs to a user mapping.
2796 * Find the virtual address by decoding table indices.
2797 * Each successive decode will reveal the address from
2798 * least to most significant bit fashion.
2799 *
2800 * 31 0
2801 * +-------------------------------+
2802 * |AAAAAAABBBBBBCCCCCC............|
2803 * +-------------------------------+
2804 */
2805 /* XXX: c_tbl = mmuC2tmgr(pte); */
2806 /* XXX: Would like an inline for this to validate idx... */
2807 c_tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2808 b_tbl = c_tbl->ct_parent;
2809 a_tbl = b_tbl->bt_parent;
2810 *pmap = a_tbl->at_parent;
2811 *tbl = c_tbl;
2812
2813 /* Start with the 'C' bits, then add B and A... */
2814 va |= ((idx % MMU_C_TBL_SIZE) << MMU_TIC_SHIFT);
2815 va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
2816 va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
2817 } else {
2818 /*
2819 * The PTE belongs to the kernel map.
2820 */
2821 *pmap = pmap_kernel();
2822
2823 va = sun3x_ptob(idx);
2824 va += KERNBASE;
2825 }
2826
2827 return va;
2828 }
2829
2830 #if 0 /* XXX - I am eliminating this function. */
2831 /* pmap_find_tic INTERNAL
2832 **
2833 * Given the address of a pte, find the TIC (level 'C' table index) for
2834 * the pte within its C table.
2835 */
2836 char
2837 pmap_find_tic(pte)
2838 mmu_short_pte_t *pte;
2839 {
2840 return ((pte - mmuCbase) % MMU_C_TBL_SIZE);
2841 }
2842 #endif /* 0 */
2843
2844
2845 /* pmap_clear_modify INTERFACE
2846 **
2847 * Clear the modification bit on the page at the specified
2848 * physical address.
2849 *
2850 */
2851 void
2852 pmap_clear_modify(pa)
2853 vm_offset_t pa;
2854 {
2855 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2856 }
2857
2858 /* pmap_clear_reference INTERFACE
2859 **
2860 * Clear the referenced bit on the page at the specified
2861 * physical address.
2862 */
2863 void
2864 pmap_clear_reference(pa)
2865 vm_offset_t pa;
2866 {
2867 pmap_clear_pv(pa, PV_FLAGS_USED);
2868 }
2869
2870 /* pmap_clear_pv INTERNAL
2871 **
2872 * Clears the specified flag from the specified physical address.
2873 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2874 *
2875 * Flag is one of:
2876 * PV_FLAGS_MDFY - Page modified bit.
2877 * PV_FLAGS_USED - Page used (referenced) bit.
2878 *
2879 * This routine must not only clear the flag on the pv list
2880 * head. It must also clear the bit on every pte in the pv
2881 * list associated with the address.
2882 */
2883 void
2884 pmap_clear_pv(pa, flag)
2885 vm_offset_t pa;
2886 int flag;
2887 {
2888 pv_t *pv;
2889 int idx, s;
2890 vm_offset_t va;
2891 pmap_t pmap;
2892 mmu_short_pte_t *pte;
2893 c_tmgr_t *c_tbl;
2894
2895 pv = pa2pv(pa);
2896
2897 s = splimp();
2898 pv->pv_flags &= ~(flag);
2899 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2900 pte = &kernCbase[idx];
2901 pte->attr.raw &= ~(flag);
2902 /*
2903 * The MC68030 MMU will not set the modified or
2904 * referenced bits on any MMU tables for which it has
2905 * a cached descriptor with its modify bit set. To insure
2906 * that it will modify these bits on the PTE during the next
2907 * time it is written to or read from, we must flush it from
2908 * the ATC.
2909 *
2910 * Ordinarily it is only necessary to flush the descriptor
2911 * if it is used in the current address space. But since I
2912 * am not sure that there will always be a notion of
2913 * 'the current address space' when this function is called,
2914 * I will skip the test and always flush the address. It
2915 * does no harm.
2916 */
2917 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2918 TBIS(va);
2919 }
2920 splx(s);
2921 }
2922
2923 /* pmap_extract INTERFACE
2924 **
2925 * Return the physical address mapped by the virtual address
2926 * in the specified pmap or 0 if it is not known.
2927 *
2928 * Note: this function should also apply an exclusive lock
2929 * on the pmap system during its duration.
2930 */
2931 vm_offset_t
2932 pmap_extract(pmap, va)
2933 pmap_t pmap;
2934 vm_offset_t va;
2935 {
2936 int a_idx, b_idx, pte_idx;
2937 a_tmgr_t *a_tbl;
2938 b_tmgr_t *b_tbl;
2939 c_tmgr_t *c_tbl;
2940 mmu_short_pte_t *c_pte;
2941
2942 if (pmap == pmap_kernel())
2943 return pmap_extract_kernel(va);
2944 if (pmap == NULL)
2945 return 0;
2946
2947 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2948 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
2949 return 0;
2950
2951 if (!MMU_VALID_DT(*c_pte))
2952 return 0;
2953
2954 return (MMU_PTE_PA(*c_pte));
2955 }
2956
2957 /* pmap_extract_kernel INTERNAL
2958 **
2959 * Extract a translation from the kernel address space.
2960 */
2961 vm_offset_t
2962 pmap_extract_kernel(va)
2963 vm_offset_t va;
2964 {
2965 mmu_short_pte_t *pte;
2966
2967 pte = &kernCbase[(u_int) sun3x_btop(va - KERNBASE)];
2968 return MMU_PTE_PA(*pte);
2969 }
2970
2971 /* pmap_remove_kernel INTERNAL
2972 **
2973 * Remove the mapping of a range of virtual addresses from the kernel map.
2974 */
2975 void
2976 pmap_remove_kernel(start, end)
2977 vm_offset_t start;
2978 vm_offset_t end;
2979 {
2980 start -= KERNBASE;
2981 end -= KERNBASE;
2982 start = sun3x_round_page(start); /* round down */
2983 start = sun3x_btop(start);
2984 end += MMU_PAGE_SIZE - 1; /* next round operation will be up */
2985 end = sun3x_round_page(end); /* round */
2986 end = sun3x_btop(end);
2987
2988 while (start < end)
2989 pmap_remove_pte(&kernCbase[start++]);
2990 /* Always flush the ATC when maniplating the kernel address space. */
2991 TBIAS();
2992 }
2993
2994 /* pmap_remove INTERFACE
2995 **
2996 * Remove the mapping of a range of virtual addresses from the given pmap.
2997 *
2998 * If the range contains any wired entries, this function will probably create
2999 * disaster.
3000 */
3001 void
3002 pmap_remove(pmap, start, end)
3003 pmap_t pmap;
3004 vm_offset_t start;
3005 vm_offset_t end;
3006 {
3007
3008 if (pmap == pmap_kernel()) {
3009 pmap_remove_kernel(start, end);
3010 return;
3011 }
3012
3013 /*
3014 * XXX - Temporary(?) statement to prevent panic caused
3015 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3016 * to remove because it couldn't get backing store.
3017 * (I guess.)
3018 */
3019 if (pmap == NULL)
3020 return;
3021
3022 /*
3023 * If the pmap doesn't have an A table of its own, it has no mappings
3024 * that can be removed.
3025 */
3026 if (pmap->pm_a_tmgr == NULL)
3027 return;
3028
3029 /*
3030 * Remove the specified range from the pmap. If the function
3031 * returns true, the operation removed all the valid mappings
3032 * in the pmap and freed its A table. If this happened to the
3033 * currently loaded pmap, the MMU root pointer must be reloaded
3034 * with the default 'kernel' map.
3035 */
3036 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3037 pmap->pm_a_tmgr = NULL;
3038 pmap->pm_a_phys = kernAphys;
3039 if (pmap == current_pmap())
3040 pmap_activate(pmap);
3041 } else {
3042 /*
3043 * If we just modified the current address space,
3044 * make sure to flush the MMU cache.
3045 *
3046 * XXX - this could be an unecessarily large flush.
3047 * XXX - Could decide, based on the size of the VA range
3048 * to be removed, whether to flush "by pages" or "all".
3049 */
3050 if (pmap == current_pmap())
3051 TBIAU();
3052 }
3053 }
3054
3055 /* pmap_remove_a INTERNAL
3056 **
3057 * This is function number one in a set of three that removes a range
3058 * of memory in the most efficient manner by removing the highest possible
3059 * tables from the memory space. This particular function attempts to remove
3060 * as many B tables as it can, delegating the remaining fragmented ranges to
3061 * pmap_remove_b().
3062 *
3063 * If the removal operation results in an empty A table, the function returns
3064 * TRUE.
3065 *
3066 * It's ugly but will do for now.
3067 */
3068 boolean_t
3069 pmap_remove_a(a_tbl, start, end)
3070 a_tmgr_t *a_tbl;
3071 vm_offset_t start;
3072 vm_offset_t end;
3073 {
3074 boolean_t empty;
3075 int idx;
3076 vm_offset_t nstart, nend;
3077 b_tmgr_t *b_tbl;
3078 mmu_long_dte_t *a_dte;
3079 mmu_short_dte_t *b_dte;
3080
3081 /*
3082 * The following code works with what I call a 'granularity
3083 * reduction algorithim'. A range of addresses will always have
3084 * the following properties, which are classified according to
3085 * how the range relates to the size of the current granularity
3086 * - an A table entry:
3087 *
3088 * 1 2 3 4
3089 * -+---+---+---+---+---+---+---+-
3090 * -+---+---+---+---+---+---+---+-
3091 *
3092 * A range will always start on a granularity boundary, illustrated
3093 * by '+' signs in the table above, or it will start at some point
3094 * inbetween a granularity boundary, as illustrated by point 1.
3095 * The first step in removing a range of addresses is to remove the
3096 * range between 1 and 2, the nearest granularity boundary. This
3097 * job is handled by the section of code governed by the
3098 * 'if (start < nstart)' statement.
3099 *
3100 * A range will always encompass zero or more intergral granules,
3101 * illustrated by points 2 and 3. Integral granules are easy to
3102 * remove. The removal of these granules is the second step, and
3103 * is handled by the code block 'if (nstart < nend)'.
3104 *
3105 * Lastly, a range will always end on a granularity boundary,
3106 * ill. by point 3, or it will fall just beyond one, ill. by point
3107 * 4. The last step involves removing this range and is handled by
3108 * the code block 'if (nend < end)'.
3109 */
3110 nstart = MMU_ROUND_UP_A(start);
3111 nend = MMU_ROUND_A(end);
3112
3113 if (start < nstart) {
3114 /*
3115 * This block is executed if the range starts between
3116 * a granularity boundary.
3117 *
3118 * First find the DTE which is responsible for mapping
3119 * the start of the range.
3120 */
3121 idx = MMU_TIA(start);
3122 a_dte = &a_tbl->at_dtbl[idx];
3123
3124 /*
3125 * If the DTE is valid then delegate the removal of the sub
3126 * range to pmap_remove_b(), which can remove addresses at
3127 * a finer granularity.
3128 */
3129 if (MMU_VALID_DT(*a_dte)) {
3130 b_dte = mmu_ptov(a_dte->addr.raw);
3131 b_tbl = mmuB2tmgr(b_dte);
3132
3133 /*
3134 * The sub range to be removed starts at the start
3135 * of the full range we were asked to remove, and ends
3136 * at the greater of:
3137 * 1. The end of the full range, -or-
3138 * 2. The end of the full range, rounded down to the
3139 * nearest granularity boundary.
3140 */
3141 if (end < nstart)
3142 empty = pmap_remove_b(b_tbl, start, end);
3143 else
3144 empty = pmap_remove_b(b_tbl, start, nstart);
3145
3146 /*
3147 * If the removal resulted in an empty B table,
3148 * invalidate the DTE that points to it and decrement
3149 * the valid entry count of the A table.
3150 */
3151 if (empty) {
3152 a_dte->attr.raw = MMU_DT_INVALID;
3153 a_tbl->at_ecnt--;
3154 }
3155 }
3156 /*
3157 * If the DTE is invalid, the address range is already non-
3158 * existant and can simply be skipped.
3159 */
3160 }
3161 if (nstart < nend) {
3162 /*
3163 * This block is executed if the range spans a whole number
3164 * multiple of granules (A table entries.)
3165 *
3166 * First find the DTE which is responsible for mapping
3167 * the start of the first granule involved.
3168 */
3169 idx = MMU_TIA(nstart);
3170 a_dte = &a_tbl->at_dtbl[idx];
3171
3172 /*
3173 * Remove entire sub-granules (B tables) one at a time,
3174 * until reaching the end of the range.
3175 */
3176 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3177 if (MMU_VALID_DT(*a_dte)) {
3178 /*
3179 * Find the B table manager for the
3180 * entry and free it.
3181 */
3182 b_dte = mmu_ptov(a_dte->addr.raw);
3183 b_tbl = mmuB2tmgr(b_dte);
3184 free_b_table(b_tbl, TRUE);
3185
3186 /*
3187 * Invalidate the DTE that points to the
3188 * B table and decrement the valid entry
3189 * count of the A table.
3190 */
3191 a_dte->attr.raw = MMU_DT_INVALID;
3192 a_tbl->at_ecnt--;
3193 }
3194 }
3195 if (nend < end) {
3196 /*
3197 * This block is executed if the range ends beyond a
3198 * granularity boundary.
3199 *
3200 * First find the DTE which is responsible for mapping
3201 * the start of the nearest (rounded down) granularity
3202 * boundary.
3203 */
3204 idx = MMU_TIA(nend);
3205 a_dte = &a_tbl->at_dtbl[idx];
3206
3207 /*
3208 * If the DTE is valid then delegate the removal of the sub
3209 * range to pmap_remove_b(), which can remove addresses at
3210 * a finer granularity.
3211 */
3212 if (MMU_VALID_DT(*a_dte)) {
3213 /*
3214 * Find the B table manager for the entry
3215 * and hand it to pmap_remove_b() along with
3216 * the sub range.
3217 */
3218 b_dte = mmu_ptov(a_dte->addr.raw);
3219 b_tbl = mmuB2tmgr(b_dte);
3220
3221 empty = pmap_remove_b(b_tbl, nend, end);
3222
3223 /*
3224 * If the removal resulted in an empty B table,
3225 * invalidate the DTE that points to it and decrement
3226 * the valid entry count of the A table.
3227 */
3228 if (empty) {
3229 a_dte->attr.raw = MMU_DT_INVALID;
3230 a_tbl->at_ecnt--;
3231 }
3232 }
3233 }
3234
3235 /*
3236 * If there are no more entries in the A table, release it
3237 * back to the available pool and return TRUE.
3238 */
3239 if (a_tbl->at_ecnt == 0) {
3240 a_tbl->at_parent = NULL;
3241 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3242 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3243 empty = TRUE;
3244 } else {
3245 empty = FALSE;
3246 }
3247
3248 return empty;
3249 }
3250
3251 /* pmap_remove_b INTERNAL
3252 **
3253 * Remove a range of addresses from an address space, trying to remove entire
3254 * C tables if possible.
3255 *
3256 * If the operation results in an empty B table, the function returns TRUE.
3257 */
3258 boolean_t
3259 pmap_remove_b(b_tbl, start, end)
3260 b_tmgr_t *b_tbl;
3261 vm_offset_t start;
3262 vm_offset_t end;
3263 {
3264 boolean_t empty;
3265 int idx;
3266 vm_offset_t nstart, nend, rstart;
3267 c_tmgr_t *c_tbl;
3268 mmu_short_dte_t *b_dte;
3269 mmu_short_pte_t *c_dte;
3270
3271
3272 nstart = MMU_ROUND_UP_B(start);
3273 nend = MMU_ROUND_B(end);
3274
3275 if (start < nstart) {
3276 idx = MMU_TIB(start);
3277 b_dte = &b_tbl->bt_dtbl[idx];
3278 if (MMU_VALID_DT(*b_dte)) {
3279 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3280 c_tbl = mmuC2tmgr(c_dte);
3281 if (end < nstart)
3282 empty = pmap_remove_c(c_tbl, start, end);
3283 else
3284 empty = pmap_remove_c(c_tbl, start, nstart);
3285 if (empty) {
3286 b_dte->attr.raw = MMU_DT_INVALID;
3287 b_tbl->bt_ecnt--;
3288 }
3289 }
3290 }
3291 if (nstart < nend) {
3292 idx = MMU_TIB(nstart);
3293 b_dte = &b_tbl->bt_dtbl[idx];
3294 rstart = nstart;
3295 while (rstart < nend) {
3296 if (MMU_VALID_DT(*b_dte)) {
3297 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3298 c_tbl = mmuC2tmgr(c_dte);
3299 free_c_table(c_tbl, TRUE);
3300 b_dte->attr.raw = MMU_DT_INVALID;
3301 b_tbl->bt_ecnt--;
3302 }
3303 b_dte++;
3304 rstart += MMU_TIB_RANGE;
3305 }
3306 }
3307 if (nend < end) {
3308 idx = MMU_TIB(nend);
3309 b_dte = &b_tbl->bt_dtbl[idx];
3310 if (MMU_VALID_DT(*b_dte)) {
3311 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3312 c_tbl = mmuC2tmgr(c_dte);
3313 empty = pmap_remove_c(c_tbl, nend, end);
3314 if (empty) {
3315 b_dte->attr.raw = MMU_DT_INVALID;
3316 b_tbl->bt_ecnt--;
3317 }
3318 }
3319 }
3320
3321 if (b_tbl->bt_ecnt == 0) {
3322 b_tbl->bt_parent = NULL;
3323 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3324 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3325 empty = TRUE;
3326 } else {
3327 empty = FALSE;
3328 }
3329
3330 return empty;
3331 }
3332
3333 /* pmap_remove_c INTERNAL
3334 **
3335 * Remove a range of addresses from the given C table.
3336 */
3337 boolean_t
3338 pmap_remove_c(c_tbl, start, end)
3339 c_tmgr_t *c_tbl;
3340 vm_offset_t start;
3341 vm_offset_t end;
3342 {
3343 boolean_t empty;
3344 int idx;
3345 mmu_short_pte_t *c_pte;
3346
3347 idx = MMU_TIC(start);
3348 c_pte = &c_tbl->ct_dtbl[idx];
3349 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3350 if (MMU_VALID_DT(*c_pte)) {
3351 pmap_remove_pte(c_pte);
3352 c_tbl->ct_ecnt--;
3353 }
3354 }
3355
3356 if (c_tbl->ct_ecnt == 0) {
3357 c_tbl->ct_parent = NULL;
3358 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3359 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3360 empty = TRUE;
3361 } else {
3362 empty = FALSE;
3363 }
3364
3365 return empty;
3366 }
3367
3368 /* is_managed INTERNAL
3369 **
3370 * Determine if the given physical address is managed by the PV system.
3371 * Note that this logic assumes that no one will ask for the status of
3372 * addresses which lie in-between the memory banks on the 3/80. If they
3373 * do so, it will falsely report that it is managed.
3374 *
3375 * Note: A "managed" address is one that was reported to the VM system as
3376 * a "usable page" during system startup. As such, the VM system expects the
3377 * pmap module to keep an accurate track of the useage of those pages.
3378 * Any page not given to the VM system at startup does not exist (as far as
3379 * the VM system is concerned) and is therefore "unmanaged." Examples are
3380 * those pages which belong to the ROM monitor and the memory allocated before
3381 * the VM system was started.
3382 */
3383 boolean_t
3384 is_managed(pa)
3385 vm_offset_t pa;
3386 {
3387 if (pa >= avail_start && pa < avail_end)
3388 return TRUE;
3389 else
3390 return FALSE;
3391 }
3392
3393 /* pmap_bootstrap_alloc INTERNAL
3394 **
3395 * Used internally for memory allocation at startup when malloc is not
3396 * available. This code will fail once it crosses the first memory
3397 * bank boundary on the 3/80. Hopefully by then however, the VM system
3398 * will be in charge of allocation.
3399 */
3400 void *
3401 pmap_bootstrap_alloc(size)
3402 int size;
3403 {
3404 void *rtn;
3405
3406 #ifdef PMAP_DEBUG
3407 if (bootstrap_alloc_enabled == FALSE) {
3408 mon_printf("pmap_bootstrap_alloc: disabled\n");
3409 sunmon_abort();
3410 }
3411 #endif
3412
3413 rtn = (void *) virtual_avail;
3414 virtual_avail += size;
3415
3416 #ifdef PMAP_DEBUG
3417 if (virtual_avail > virtual_contig_end) {
3418 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3419 sunmon_abort();
3420 }
3421 #endif
3422
3423 return rtn;
3424 }
3425
3426 /* pmap_bootstap_aalign INTERNAL
3427 **
3428 * Used to insure that the next call to pmap_bootstrap_alloc() will
3429 * return a chunk of memory aligned to the specified size.
3430 *
3431 * Note: This function will only support alignment sizes that are powers
3432 * of two.
3433 */
3434 void
3435 pmap_bootstrap_aalign(size)
3436 int size;
3437 {
3438 int off;
3439
3440 off = virtual_avail & (size - 1);
3441 if (off) {
3442 (void) pmap_bootstrap_alloc(size - off);
3443 }
3444 }
3445
3446 /* pmap_pa_exists
3447 **
3448 * Used by the /dev/mem driver to see if a given PA is memory
3449 * that can be mapped. (The PA is not in a hole.)
3450 */
3451 int
3452 pmap_pa_exists(pa)
3453 vm_offset_t pa;
3454 {
3455 /* XXX - NOTYET */
3456 return (0);
3457 }
3458
3459 /* pmap_activate INTERFACE
3460 **
3461 * This is called by locore.s:cpu_switch when we are switching to a
3462 * new process. This should load the MMU context for the new proc.
3463 * XXX - Later, this should be done directly in locore.s
3464 */
3465 void
3466 pmap_activate(pmap)
3467 pmap_t pmap;
3468 {
3469 u_long rootpa;
3470
3471 /* Only do reload/flush if we have to. */
3472 rootpa = pmap->pm_a_phys;
3473 if (kernel_crp.rp_addr != rootpa) {
3474 DPRINT(("pmap_activate(%p)\n", pmap));
3475 kernel_crp.rp_addr = rootpa;
3476 loadcrp(&kernel_crp);
3477 TBIAU();
3478 }
3479 }
3480
3481
3482 /* pmap_update
3483 **
3484 * Apply any delayed changes scheduled for all pmaps immediately.
3485 *
3486 * No delayed operations are currently done in this pmap.
3487 */
3488 void
3489 pmap_update()
3490 {
3491 /* not implemented. */
3492 }
3493
3494 /* pmap_virtual_space INTERFACE
3495 **
3496 * Return the current available range of virtual addresses in the
3497 * arguuments provided. Only really called once.
3498 */
3499 void
3500 pmap_virtual_space(vstart, vend)
3501 vm_offset_t *vstart, *vend;
3502 {
3503 *vstart = virtual_avail;
3504 *vend = virtual_end;
3505 }
3506
3507 /* pmap_free_pages INTERFACE
3508 **
3509 * Return the number of physical pages still available.
3510 *
3511 * This is probably going to be a mess, but it's only called
3512 * once and it's the only function left that I have to implement!
3513 */
3514 u_int
3515 pmap_free_pages()
3516 {
3517 int i;
3518 u_int left;
3519 vm_offset_t avail;
3520
3521 avail = avail_next;
3522 left = 0;
3523 i = 0;
3524 while (avail >= avail_mem[i].pmem_end) {
3525 if (avail_mem[i].pmem_next == NULL)
3526 return 0;
3527 i++;
3528 }
3529 while (i < SUN3X_80_MEM_BANKS) {
3530 if (avail < avail_mem[i].pmem_start) {
3531 /* Avail is inside a hole, march it
3532 * up to the next bank.
3533 */
3534 avail = avail_mem[i].pmem_start;
3535 }
3536 left += sun3x_btop(avail_mem[i].pmem_end - avail);
3537 if (avail_mem[i].pmem_next == NULL)
3538 break;
3539 i++;
3540 }
3541
3542 return left;
3543 }
3544
3545 /* pmap_page_index INTERFACE
3546 **
3547 * Return the index of the given physical page in a list of useable
3548 * physical pages in the system. Holes in physical memory may be counted
3549 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3550 * agree as to whether holes in memory do or do not count as valid pages,
3551 * it really doesn't matter. However, if you like to save a little
3552 * memory, don't count holes as valid pages. This is even more true when
3553 * the holes are large.
3554 *
3555 * We will not count holes as valid pages. We can generate page indices
3556 * that conform to this by using the memory bank structures initialized
3557 * in pmap_alloc_pv().
3558 */
3559 int
3560 pmap_page_index(pa)
3561 vm_offset_t pa;
3562 {
3563 struct pmap_physmem_struct *bank = avail_mem;
3564
3565 /* Search for the memory bank with this page. */
3566 /* XXX - What if it is not physical memory? */
3567 while (pa > bank->pmem_end)
3568 bank = bank->pmem_next;
3569 pa -= bank->pmem_start;
3570
3571 return (bank->pmem_pvbase + sun3x_btop(pa));
3572 }
3573
3574 /* pmap_next_page INTERFACE
3575 **
3576 * Place the physical address of the next available page in the
3577 * argument given. Returns FALSE if there are no more pages left.
3578 *
3579 * This function must jump over any holes in physical memory.
3580 * Once this function is used, any use of pmap_bootstrap_alloc()
3581 * is a sin. Sinners will be punished with erratic behavior.
3582 */
3583 boolean_t
3584 pmap_next_page(pa)
3585 vm_offset_t *pa;
3586 {
3587 static struct pmap_physmem_struct *curbank = avail_mem;
3588
3589 /* XXX - temporary ROM saving hack. */
3590 if (avail_next >= avail_end)
3591 return FALSE;
3592
3593 if (avail_next >= curbank->pmem_end)
3594 if (curbank->pmem_next == NULL)
3595 return FALSE;
3596 else {
3597 curbank = curbank->pmem_next;
3598 avail_next = curbank->pmem_start;
3599 }
3600
3601 *pa = avail_next;
3602 avail_next += NBPG;
3603 return TRUE;
3604 }
3605
3606 /* pmap_count INTERFACE
3607 **
3608 * Return the number of resident (valid) pages in the given pmap.
3609 *
3610 * Note: If this function is handed the kernel map, it will report
3611 * that it has no mappings. Hopefully the VM system won't ask for kernel
3612 * map statistics.
3613 */
3614 segsz_t
3615 pmap_count(pmap, type)
3616 pmap_t pmap;
3617 int type;
3618 {
3619 u_int count;
3620 int a_idx, b_idx;
3621 a_tmgr_t *a_tbl;
3622 b_tmgr_t *b_tbl;
3623 c_tmgr_t *c_tbl;
3624
3625 /*
3626 * If the pmap does not have its own A table manager, it has no
3627 * valid entires.
3628 */
3629 if (pmap->pm_a_tmgr == NULL)
3630 return 0;
3631
3632 a_tbl = pmap->pm_a_tmgr;
3633
3634 count = 0;
3635 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3636 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3637 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3638 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3639 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3640 c_tbl = mmuC2tmgr(
3641 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3642 if (type == 0)
3643 /*
3644 * A resident entry count has been requested.
3645 */
3646 count += c_tbl->ct_ecnt;
3647 else
3648 /*
3649 * A wired entry count has been requested.
3650 */
3651 count += c_tbl->ct_wcnt;
3652 }
3653 }
3654 }
3655 }
3656
3657 return count;
3658 }
3659
3660 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3661 * The following routines are only used by DDB for tricky kernel text *
3662 * text operations in db_memrw.c. They are provided for sun3 *
3663 * compatibility. *
3664 *************************************************************************/
3665 /* get_pte INTERNAL
3666 **
3667 * Return the page descriptor the describes the kernel mapping
3668 * of the given virtual address.
3669 *
3670 * XXX - It might be nice if this worked outside of the MMU
3671 * structures we manage. (Could do it with ptest). -gwr
3672 */
3673 vm_offset_t
3674 get_pte(va)
3675 vm_offset_t va;
3676 {
3677 u_long idx;
3678
3679 if (va < KERNBASE)
3680 return 0;
3681
3682 idx = (u_long) sun3x_btop(va - KERNBASE);
3683 return (kernCbase[idx].attr.raw);
3684 }
3685
3686 /* set_pte INTERNAL
3687 **
3688 * Set the page descriptor that describes the kernel mapping
3689 * of the given virtual address.
3690 */
3691 void
3692 set_pte(va, pte)
3693 vm_offset_t va;
3694 vm_offset_t pte;
3695 {
3696 u_long idx;
3697
3698 if (va < KERNBASE)
3699 return;
3700
3701 idx = (unsigned long) sun3x_btop(va - KERNBASE);
3702 kernCbase[idx].attr.raw = pte;
3703 }
3704
3705 #ifdef PMAP_DEBUG
3706 /************************** DEBUGGING ROUTINES **************************
3707 * The following routines are meant to be an aid to debugging the pmap *
3708 * system. They are callable from the DDB command line and should be *
3709 * prepared to be handed unstable or incomplete states of the system. *
3710 ************************************************************************/
3711
3712 /* pv_list
3713 **
3714 * List all pages found on the pv list for the given physical page.
3715 * To avoid endless loops, the listing will stop at the end of the list
3716 * or after 'n' entries - whichever comes first.
3717 */
3718 void
3719 pv_list(pa, n)
3720 vm_offset_t pa;
3721 int n;
3722 {
3723 int idx;
3724 vm_offset_t va;
3725 pv_t *pv;
3726 c_tmgr_t *c_tbl;
3727 pmap_t pmap;
3728
3729 pv = pa2pv(pa);
3730 idx = pv->pv_idx;
3731
3732 for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
3733 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3734 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3735 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3736 }
3737 }
3738 #endif /* PMAP_DEBUG */
3739
3740 #ifdef NOT_YET
3741 /* and maybe not ever */
3742 /************************** LOW-LEVEL ROUTINES **************************
3743 * These routines will eventualy be re-written into assembly and placed *
3744 * in locore.s. They are here now as stubs so that the pmap module can *
3745 * be linked as a standalone user program for testing. *
3746 ************************************************************************/
3747 /* flush_atc_crp INTERNAL
3748 **
3749 * Flush all page descriptors derived from the given CPU Root Pointer
3750 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3751 * cache.
3752 */
3753 void
3754 flush_atc_crp(a_tbl)
3755 {
3756 mmu_long_rp_t rp;
3757
3758 /* Create a temporary root table pointer that points to the
3759 * given A table.
3760 */
3761 rp.attr.raw = ~MMU_LONG_RP_LU;
3762 rp.addr.raw = (unsigned int) a_tbl;
3763
3764 mmu_pflushr(&rp);
3765 /* mmu_pflushr:
3766 * movel sp(4)@,a0
3767 * pflushr a0@
3768 * rts
3769 */
3770 }
3771 #endif /* NOT_YET */
3772