pmap.c revision 1.18 1 /* $NetBSD: pmap.c,v 1.18 1997/03/26 23:27:12 gwr Exp $ */
2
3 /*-
4 * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jeremy Cooper.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * XXX These comments aren't quite accurate. Need to change.
41 * The sun3x uses the MC68851 Memory Management Unit, which is built
42 * into the CPU. The 68851 maps virtual to physical addresses using
43 * a multi-level table lookup, which is stored in the very memory that
44 * it maps. The number of levels of lookup is configurable from one
45 * to four. In this implementation, we use three, named 'A' through 'C'.
46 *
47 * The MMU translates virtual addresses into physical addresses by
48 * traversing these tables in a proccess called a 'table walk'. The most
49 * significant 7 bits of the Virtual Address ('VA') being translated are
50 * used as an index into the level A table, whose base in physical memory
51 * is stored in a special MMU register, the 'CPU Root Pointer' or CRP. The
52 * address found at that index in the A table is used as the base
53 * address for the next table, the B table. The next six bits of the VA are
54 * used as an index into the B table, which in turn gives the base address
55 * of the third and final C table.
56 *
57 * The next six bits of the VA are used as an index into the C table to
58 * locate a Page Table Entry (PTE). The PTE is a physical address in memory
59 * to which the remaining 13 bits of the VA are added, producing the
60 * mapped physical address.
61 *
62 * To map the entire memory space in this manner would require 2114296 bytes
63 * of page tables per process - quite expensive. Instead we will
64 * allocate a fixed but considerably smaller space for the page tables at
65 * the time the VM system is initialized. When the pmap code is asked by
66 * the kernel to map a VA to a PA, it allocates tables as needed from this
67 * pool. When there are no more tables in the pool, tables are stolen
68 * from the oldest mapped entries in the tree. This is only possible
69 * because all memory mappings are stored in the kernel memory map
70 * structures, independent of the pmap structures. A VA which references
71 * one of these invalidated maps will cause a page fault. The kernel
72 * will determine that the page fault was caused by a task using a valid
73 * VA, but for some reason (which does not concern it), that address was
74 * not mapped. It will ask the pmap code to re-map the entry and then
75 * it will resume executing the faulting task.
76 *
77 * In this manner the most efficient use of the page table space is
78 * achieved. Tasks which do not execute often will have their tables
79 * stolen and reused by tasks which execute more frequently. The best
80 * size for the page table pool will probably be determined by
81 * experimentation.
82 *
83 * You read all of the comments so far. Good for you.
84 * Now go play!
85 */
86
87 /*** A Note About the 68851 Address Translation Cache
88 * The MC68851 has a 64 entry cache, called the Address Translation Cache
89 * or 'ATC'. This cache stores the most recently used page descriptors
90 * accessed by the MMU when it does translations. Using a marker called a
91 * 'task alias' the MMU can store the descriptors from 8 different table
92 * spaces concurrently. The task alias is associated with the base
93 * address of the level A table of that address space. When an address
94 * space is currently active (the CRP currently points to its A table)
95 * the only cached descriptors that will be obeyed are ones which have a
96 * matching task alias of the current space associated with them.
97 *
98 * Since the cache is always consulted before any table lookups are done,
99 * it is important that it accurately reflect the state of the MMU tables.
100 * Whenever a change has been made to a table that has been loaded into
101 * the MMU, the code must be sure to flush any cached entries that are
102 * affected by the change. These instances are documented in the code at
103 * various points.
104 */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106 * 4 months into this code I discovered that the sun3x does not have
107 * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108 * the 68030 CPU.
109 * All though it behaves very similarly to the 68851, it only has 1 task
110 * alias and a 22 entry cache. So sadly (or happily), the first paragraph
111 * of the previous note does not apply to the sun3x pmap.
112 */
113
114 #include <sys/param.h>
115 #include <sys/systm.h>
116 #include <sys/proc.h>
117 #include <sys/malloc.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120
121 #include <vm/vm.h>
122 #include <vm/vm_kern.h>
123 #include <vm/vm_page.h>
124
125 #include <machine/cpu.h>
126 #include <machine/kcore.h>
127 #include <machine/pmap.h>
128 #include <machine/pte.h>
129 #include <machine/machdep.h>
130 #include <machine/mon.h>
131
132 #include "pmap_pvt.h"
133
134 /* XXX - What headers declare these? */
135 extern struct pcb *curpcb;
136 extern int physmem;
137
138 extern void copypage __P((const void*, void*));
139 extern void zeropage __P((void*));
140
141 /* Defined in locore.s */
142 extern char kernel_text[];
143
144 /* Defined by the linker */
145 extern char etext[], edata[], end[];
146 extern char *esym; /* DDB */
147
148 /*************************** DEBUGGING DEFINITIONS ***********************
149 * Macros, preprocessor defines and variables used in debugging can make *
150 * code hard to read. Anything used exclusively for debugging purposes *
151 * is defined here to avoid having such mess scattered around the file. *
152 *************************************************************************/
153 #ifdef PMAP_DEBUG
154 /*
155 * To aid the debugging process, macros should be expanded into smaller steps
156 * that accomplish the same goal, yet provide convenient places for placing
157 * breakpoints. When this code is compiled with PMAP_DEBUG mode defined, the
158 * 'INLINE' keyword is defined to an empty string. This way, any function
159 * defined to be a 'static INLINE' will become 'outlined' and compiled as
160 * a separate function, which is much easier to debug.
161 */
162 #define INLINE /* nothing */
163
164 /*
165 * It is sometimes convenient to watch the activity of a particular table
166 * in the system. The following variables are used for that purpose.
167 */
168 a_tmgr_t *pmap_watch_atbl = 0;
169 b_tmgr_t *pmap_watch_btbl = 0;
170 c_tmgr_t *pmap_watch_ctbl = 0;
171
172 int pmap_debug = 0;
173 #define DPRINT(args) if (pmap_debug) printf args
174
175 #else /********** Stuff below is defined if NOT debugging **************/
176
177 #define INLINE inline
178 #define DPRINT(args) /* nada */
179
180 #endif /* PMAP_DEBUG */
181 /*********************** END OF DEBUGGING DEFINITIONS ********************/
182
183 /*** Management Structure - Memory Layout
184 * For every MMU table in the sun3x pmap system there must be a way to
185 * manage it; we must know which process is using it, what other tables
186 * depend on it, and whether or not it contains any locked pages. This
187 * is solved by the creation of 'table management' or 'tmgr'
188 * structures. One for each MMU table in the system.
189 *
190 * MAP OF MEMORY USED BY THE PMAP SYSTEM
191 *
192 * towards lower memory
193 * kernAbase -> +-------------------------------------------------------+
194 * | Kernel MMU A level table |
195 * kernBbase -> +-------------------------------------------------------+
196 * | Kernel MMU B level tables |
197 * kernCbase -> +-------------------------------------------------------+
198 * | |
199 * | Kernel MMU C level tables |
200 * | |
201 * mmuCbase -> +-------------------------------------------------------+
202 * | User MMU C level tables |
203 * mmuAbase -> +-------------------------------------------------------+
204 * | |
205 * | User MMU A level tables |
206 * | |
207 * mmuBbase -> +-------------------------------------------------------+
208 * | User MMU B level tables |
209 * tmgrAbase -> +-------------------------------------------------------+
210 * | TMGR A level table structures |
211 * tmgrBbase -> +-------------------------------------------------------+
212 * | TMGR B level table structures |
213 * tmgrCbase -> +-------------------------------------------------------+
214 * | TMGR C level table structures |
215 * pvbase -> +-------------------------------------------------------+
216 * | Physical to Virtual mapping table (list heads) |
217 * pvebase -> +-------------------------------------------------------+
218 * | Physical to Virtual mapping table (list elements) |
219 * | |
220 * +-------------------------------------------------------+
221 * towards higher memory
222 *
223 * For every A table in the MMU A area, there will be a corresponding
224 * a_tmgr structure in the TMGR A area. The same will be true for
225 * the B and C tables. This arrangement will make it easy to find the
226 * controling tmgr structure for any table in the system by use of
227 * (relatively) simple macros.
228 */
229
230 /*
231 * Global variables for storing the base addresses for the areas
232 * labeled above.
233 */
234 static vm_offset_t kernAphys;
235 static mmu_long_dte_t *kernAbase;
236 static mmu_short_dte_t *kernBbase;
237 static mmu_short_pte_t *kernCbase;
238 static mmu_short_pte_t *mmuCbase;
239 static mmu_short_dte_t *mmuBbase;
240 static mmu_long_dte_t *mmuAbase;
241 static a_tmgr_t *Atmgrbase;
242 static b_tmgr_t *Btmgrbase;
243 static c_tmgr_t *Ctmgrbase;
244 static pv_t *pvbase;
245 static pv_elem_t *pvebase;
246 struct pmap kernel_pmap;
247
248 /*
249 * This holds the CRP currently loaded into the MMU.
250 */
251 struct mmu_rootptr kernel_crp;
252
253 /*
254 * Just all around global variables.
255 */
256 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
257 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
258 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
259
260
261 /*
262 * Flags used to mark the safety/availability of certain operations or
263 * resources.
264 */
265 static boolean_t
266 pv_initialized = FALSE, /* PV system has been initialized. */
267 tmp_vpages_inuse = FALSE, /*
268 * Temp. virtual pages are in use.
269 * (see pmap_copy_page, et. al.)
270 */
271 bootstrap_alloc_enabled = FALSE; /* Safe to use pmap_bootstrap_alloc(). */
272
273 /*
274 * XXX: For now, retain the traditional variables that were
275 * used in the old pmap/vm interface (without NONCONTIG).
276 */
277 /* Kernel virtual address space available: */
278 vm_offset_t virtual_avail, virtual_end;
279 /* Physical address space available: */
280 vm_offset_t avail_start, avail_end;
281
282 /* This keep track of the end of the contiguously mapped range. */
283 vm_offset_t virtual_contig_end;
284
285 /* Physical address used by pmap_next_page() */
286 vm_offset_t avail_next;
287
288 /* These are used by pmap_copy_page(), etc. */
289 vm_offset_t tmp_vpages[2];
290
291 /*
292 * The 3/80 is the only member of the sun3x family that has non-contiguous
293 * physical memory. Memory is divided into 4 banks which are physically
294 * locatable on the system board. Although the size of these banks varies
295 * with the size of memory they contain, their base addresses are
296 * permenently fixed. The following structure, which describes these
297 * banks, is initialized by pmap_bootstrap() after it reads from a similar
298 * structure provided by the ROM Monitor.
299 *
300 * For the other machines in the sun3x architecture which do have contiguous
301 * RAM, this list will have only one entry, which will describe the entire
302 * range of available memory.
303 */
304 struct pmap_physmem_struct avail_mem[NPHYS_RAM_SEGS];
305 u_int total_phys_mem;
306
307 /*************************************************************************/
308
309 /*
310 * XXX - Should "tune" these based on statistics.
311 *
312 * My first guess about the relative numbers of these needed is
313 * based on the fact that a "typical" process will have several
314 * pages mapped at low virtual addresses (text, data, bss), then
315 * some mapped shared libraries, and then some stack pages mapped
316 * near the high end of the VA space. Each process can use only
317 * one A table, and most will use only two B tables (maybe three)
318 * and probably about four C tables. Therefore, the first guess
319 * at the relative numbers of these needed is 1:2:4 -gwr
320 *
321 * The number of C tables needed is closely related to the amount
322 * of physical memory available plus a certain amount attributable
323 * to the use of double mappings. With a few simulation statistics
324 * we can find a reasonably good estimation of this unknown value.
325 * Armed with that and the above ratios, we have a good idea of what
326 * is needed at each level. -j
327 *
328 * Note: It is not physical memory memory size, but the total mapped
329 * virtual space required by the combined working sets of all the
330 * currently _runnable_ processes. (Sleeping ones don't count.)
331 * The amount of physical memory should be irrelevant. -gwr
332 */
333 #define NUM_A_TABLES 16
334 #define NUM_B_TABLES 32
335 #define NUM_C_TABLES 64
336
337 /*
338 * This determines our total virtual mapping capacity.
339 * Yes, it is a FIXED value so we can pre-allocate.
340 */
341 #define NUM_USER_PTES (NUM_C_TABLES * MMU_C_TBL_SIZE)
342
343 /*
344 * The size of the Kernel Virtual Address Space (KVAS)
345 * for purposes of MMU table allocation is -KERNBASE
346 * (length from KERNBASE to 0xFFFFffff)
347 */
348 #define KVAS_SIZE (-KERNBASE)
349
350 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
351 #define KERN_B_TABLES (KVAS_SIZE >> MMU_TIA_SHIFT)
352 #define KERN_C_TABLES (KVAS_SIZE >> MMU_TIB_SHIFT)
353 #define NUM_KERN_PTES (KVAS_SIZE >> MMU_TIC_SHIFT)
354
355 /*************************** MISCELANEOUS MACROS *************************/
356 #define PMAP_LOCK() ; /* Nothing, for now */
357 #define PMAP_UNLOCK() ; /* same. */
358 #define NULL 0
359
360 static INLINE void * mmu_ptov __P((vm_offset_t pa));
361 static INLINE vm_offset_t mmu_vtop __P((void * va));
362
363 #if 0
364 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
365 #endif
366 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
367 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
368
369 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
370 static INLINE int pteidx __P((mmu_short_pte_t *));
371 static INLINE pmap_t current_pmap __P((void));
372
373 /*
374 * We can always convert between virtual and physical addresses
375 * for anything in the range [KERNBASE ... avail_start] because
376 * that range is GUARANTEED to be mapped linearly.
377 * We rely heavily upon this feature!
378 */
379 static INLINE void *
380 mmu_ptov(pa)
381 vm_offset_t pa;
382 {
383 register vm_offset_t va;
384
385 va = (pa + KERNBASE);
386 #ifdef PMAP_DEBUG
387 if ((va < KERNBASE) || (va >= virtual_contig_end))
388 panic("mmu_ptov");
389 #endif
390 return ((void*)va);
391 }
392 static INLINE vm_offset_t
393 mmu_vtop(vva)
394 void *vva;
395 {
396 register vm_offset_t va;
397
398 va = (vm_offset_t)vva;
399 #ifdef PMAP_DEBUG
400 if ((va < KERNBASE) || (va >= virtual_contig_end))
401 panic("mmu_ptov");
402 #endif
403 return (va - KERNBASE);
404 }
405
406 /*
407 * These macros map MMU tables to their corresponding manager structures.
408 * They are needed quite often because many of the pointers in the pmap
409 * system reference MMU tables and not the structures that control them.
410 * There needs to be a way to find one when given the other and these
411 * macros do so by taking advantage of the memory layout described above.
412 * Here's a quick step through the first macro, mmuA2tmgr():
413 *
414 * 1) find the offset of the given MMU A table from the base of its table
415 * pool (table - mmuAbase).
416 * 2) convert this offset into a table index by dividing it by the
417 * size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
418 * 3) use this index to select the corresponding 'A' table manager
419 * structure from the 'A' table manager pool (Atmgrbase[index]).
420 */
421 /* This function is not currently used. */
422 #if 0
423 static INLINE a_tmgr_t *
424 mmuA2tmgr(mmuAtbl)
425 mmu_long_dte_t *mmuAtbl;
426 {
427 register int idx;
428
429 /* Which table is this in? */
430 idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
431 #ifdef PMAP_DEBUG
432 if ((idx < 0) || (idx >= NUM_A_TABLES))
433 panic("mmuA2tmgr");
434 #endif
435 return (&Atmgrbase[idx]);
436 }
437 #endif /* 0 */
438
439 static INLINE b_tmgr_t *
440 mmuB2tmgr(mmuBtbl)
441 mmu_short_dte_t *mmuBtbl;
442 {
443 register int idx;
444
445 /* Which table is this in? */
446 idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
447 #ifdef PMAP_DEBUG
448 if ((idx < 0) || (idx >= NUM_B_TABLES))
449 panic("mmuB2tmgr");
450 #endif
451 return (&Btmgrbase[idx]);
452 }
453
454 /* mmuC2tmgr INTERNAL
455 **
456 * Given a pte known to belong to a C table, return the address of
457 * that table's management structure.
458 */
459 static INLINE c_tmgr_t *
460 mmuC2tmgr(mmuCtbl)
461 mmu_short_pte_t *mmuCtbl;
462 {
463 register int idx;
464
465 /* Which table is this in? */
466 idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
467 #ifdef PMAP_DEBUG
468 if ((idx < 0) || (idx >= NUM_C_TABLES))
469 panic("mmuC2tmgr");
470 #endif
471 return (&Ctmgrbase[idx]);
472 }
473
474 /* This is now a function call below.
475 * #define pa2pv(pa) \
476 * (&pvbase[(unsigned long)\
477 * _btop(pa)\
478 * ])
479 */
480
481 /* pa2pv INTERNAL
482 **
483 * Return the pv_list_head element which manages the given physical
484 * address.
485 */
486 static INLINE pv_t *
487 pa2pv(pa)
488 vm_offset_t pa;
489 {
490 register struct pmap_physmem_struct *bank;
491 register int idx;
492
493 bank = &avail_mem[0];
494 while (pa >= bank->pmem_end)
495 bank = bank->pmem_next;
496
497 pa -= bank->pmem_start;
498 idx = bank->pmem_pvbase + _btop(pa);
499 #ifdef PMAP_DEBUG
500 if ((idx < 0) || (idx >= physmem))
501 panic("pa2pv");
502 #endif
503 return &pvbase[idx];
504 }
505
506 /* pteidx INTERNAL
507 **
508 * Return the index of the given PTE within the entire fixed table of
509 * PTEs.
510 */
511 static INLINE int
512 pteidx(pte)
513 mmu_short_pte_t *pte;
514 {
515 return (pte - kernCbase);
516 }
517
518 /*
519 * This just offers a place to put some debugging checks,
520 * and reduces the number of places "curproc" appears...
521 */
522 static INLINE pmap_t
523 current_pmap()
524 {
525 struct proc *p;
526 struct vmspace *vm;
527 vm_map_t map;
528 pmap_t pmap;
529
530 p = curproc; /* XXX */
531 if (p == NULL)
532 pmap = &kernel_pmap;
533 else {
534 vm = p->p_vmspace;
535 map = &vm->vm_map;
536 pmap = vm_map_pmap(map);
537 }
538
539 return (pmap);
540 }
541
542
543 /*************************** FUNCTION DEFINITIONS ************************
544 * These appear here merely for the compiler to enforce type checking on *
545 * all function calls. *
546 *************************************************************************/
547
548 /** External functions
549 ** - functions used within this module but written elsewhere.
550 ** both of these functions are in locore.s
551 ** XXX - These functions were later replaced with their more cryptic
552 ** hp300 counterparts. They may be removed now.
553 **/
554 #if 0 /* deprecated mmu */
555 void mmu_seturp __P((vm_offset_t));
556 void mmu_flush __P((int, vm_offset_t));
557 void mmu_flusha __P((void));
558 #endif /* 0 */
559
560 /** Internal functions
561 ** - all functions used only within this module are defined in
562 ** pmap_pvt.h
563 **/
564
565 /** Interface functions
566 ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
567 ** defined.
568 **/
569 #ifdef INCLUDED_IN_PMAP_H
570 void pmap_bootstrap __P((void));
571 void *pmap_bootstrap_alloc __P((int));
572 void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t));
573 pmap_t pmap_create __P((vm_size_t));
574 void pmap_destroy __P((pmap_t));
575 void pmap_reference __P((pmap_t));
576 boolean_t pmap_is_referenced __P((vm_offset_t));
577 boolean_t pmap_is_modified __P((vm_offset_t));
578 void pmap_clear_modify __P((vm_offset_t));
579 vm_offset_t pmap_extract __P((pmap_t, vm_offset_t));
580 void pmap_activate __P((pmap_t));
581 int pmap_page_index __P((vm_offset_t));
582 u_int pmap_free_pages __P((void));
583 #endif /* INCLUDED_IN_PMAP_H */
584
585 /********************************** CODE ********************************
586 * Functions that are called from other parts of the kernel are labeled *
587 * as 'INTERFACE' functions. Functions that are only called from *
588 * within the pmap module are labeled as 'INTERNAL' functions. *
589 * Functions that are internal, but are not (currently) used at all are *
590 * labeled 'INTERNAL_X'. *
591 ************************************************************************/
592
593 /* pmap_bootstrap INTERNAL
594 **
595 * Initializes the pmap system. Called at boot time from _vm_init()
596 * in _startup.c.
597 *
598 * Reminder: having a pmap_bootstrap_alloc() and also having the VM
599 * system implement pmap_steal_memory() is redundant.
600 * Don't release this code without removing one or the other!
601 */
602 void
603 pmap_bootstrap(nextva)
604 vm_offset_t nextva;
605 {
606 struct physmemory *membank;
607 struct pmap_physmem_struct *pmap_membank;
608 vm_offset_t va, pa, eva;
609 int b, c, i, j; /* running table counts */
610 int size;
611
612 /*
613 * This function is called by __bootstrap after it has
614 * determined the type of machine and made the appropriate
615 * patches to the ROM vectors (XXX- I don't quite know what I meant
616 * by that.) It allocates and sets up enough of the pmap system
617 * to manage the kernel's address space.
618 */
619
620 /*
621 * Determine the range of kernel virtual and physical
622 * space available. Note that we ABSOLUTELY DEPEND on
623 * the fact that the first bank of memory (4MB) is
624 * mapped linearly to KERNBASE (which we guaranteed in
625 * the first instructions of locore.s).
626 * That is plenty for our bootstrap work.
627 */
628 virtual_avail = _round_page(nextva);
629 virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
630 virtual_end = VM_MAX_KERNEL_ADDRESS;
631 /* Don't need avail_start til later. */
632
633 /* We may now call pmap_bootstrap_alloc(). */
634 bootstrap_alloc_enabled = TRUE;
635
636 /*
637 * This is a somewhat unwrapped loop to deal with
638 * copying the PROM's 'phsymem' banks into the pmap's
639 * banks. The following is always assumed:
640 * 1. There is always at least one bank of memory.
641 * 2. There is always a last bank of memory, and its
642 * pmem_next member must be set to NULL.
643 * XXX - Use: do { ... } while (membank->next) instead?
644 * XXX - Why copy this stuff at all? -gwr
645 * - It is needed in pa2pv().
646 */
647 membank = romVectorPtr->v_physmemory;
648 pmap_membank = avail_mem;
649 total_phys_mem = 0;
650
651 while (membank->next) {
652 pmap_membank->pmem_start = membank->address;
653 pmap_membank->pmem_end = membank->address + membank->size;
654 total_phys_mem += membank->size;
655 /* This silly syntax arises because pmap_membank
656 * is really a pre-allocated array, but it is put into
657 * use as a linked list.
658 */
659 pmap_membank->pmem_next = pmap_membank + 1;
660 pmap_membank = pmap_membank->pmem_next;
661 membank = membank->next;
662 }
663
664 /*
665 * XXX The last bank of memory should be reduced to exclude the
666 * physical pages needed by the PROM monitor from being used
667 * in the VM system. XXX - See below - Fix!
668 */
669 pmap_membank->pmem_start = membank->address;
670 pmap_membank->pmem_end = membank->address + membank->size;
671 pmap_membank->pmem_next = NULL;
672
673 #if 0 /* XXX - Need to integrate this! */
674 /*
675 * The last few pages of physical memory are "owned" by
676 * the PROM. The total amount of memory we are allowed
677 * to use is given by the romvec pointer. -gwr
678 *
679 * We should dedicate different variables for 'useable'
680 * and 'physically available'. Most users are used to the
681 * kernel reporting the amount of memory 'physically available'
682 * as opposed to 'useable by the kernel' at boot time. -j
683 */
684 total_phys_mem = *romVectorPtr->memoryAvail;
685 #endif /* XXX */
686
687 total_phys_mem += membank->size; /* XXX see above */
688 physmem = btoc(total_phys_mem);
689
690 /*
691 * Avail_end is set to the first byte of physical memory
692 * after the end of the last bank. We use this only to
693 * determine if a physical address is "managed" memory.
694 *
695 * XXX - The setting of avail_end is a temporary ROM saving hack.
696 */
697 avail_end = pmap_membank->pmem_end -
698 (total_phys_mem - *romVectorPtr->memoryAvail);
699 avail_end = _trunc_page(avail_end);
700
701 /*
702 * First allocate enough kernel MMU tables to map all
703 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
704 * Note: All must be aligned on 256 byte boundaries.
705 * Start with the level-A table (one of those).
706 */
707 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
708 kernAbase = pmap_bootstrap_alloc(size);
709 bzero(kernAbase, size);
710
711 /* Now the level-B kernel tables... */
712 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
713 kernBbase = pmap_bootstrap_alloc(size);
714 bzero(kernBbase, size);
715
716 /* Now the level-C kernel tables... */
717 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
718 kernCbase = pmap_bootstrap_alloc(size);
719 bzero(kernCbase, size);
720 /*
721 * Note: In order for the PV system to work correctly, the kernel
722 * and user-level C tables must be allocated contiguously.
723 * Nothing should be allocated between here and the allocation of
724 * mmuCbase below. XXX: Should do this as one allocation, and
725 * then compute a pointer for mmuCbase instead of this...
726 *
727 * Allocate user MMU tables.
728 * These must be contiguous with the preceeding.
729 */
730 size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * NUM_C_TABLES;
731 mmuCbase = pmap_bootstrap_alloc(size);
732
733 size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * NUM_B_TABLES;
734 mmuBbase = pmap_bootstrap_alloc(size);
735
736 size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
737 mmuAbase = pmap_bootstrap_alloc(size);
738
739 /*
740 * Fill in the never-changing part of the kernel tables.
741 * For simplicity, the kernel's mappings will be editable as a
742 * flat array of page table entries at kernCbase. The
743 * higher level 'A' and 'B' tables must be initialized to point
744 * to this lower one.
745 */
746 b = c = 0;
747
748 /*
749 * Invalidate all mappings below KERNBASE in the A table.
750 * This area has already been zeroed out, but it is good
751 * practice to explicitly show that we are interpreting
752 * it as a list of A table descriptors.
753 */
754 for (i = 0; i < MMU_TIA(KERNBASE); i++) {
755 kernAbase[i].addr.raw = 0;
756 }
757
758 /*
759 * Set up the kernel A and B tables so that they will reference the
760 * correct spots in the contiguous table of PTEs allocated for the
761 * kernel's virtual memory space.
762 */
763 for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
764 kernAbase[i].attr.raw =
765 MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
766 kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
767
768 for (j=0; j < MMU_B_TBL_SIZE; j++) {
769 kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
770 | MMU_DT_SHORT;
771 c += MMU_C_TBL_SIZE;
772 }
773 b += MMU_B_TBL_SIZE;
774 }
775
776 /* XXX - Doing kernel_pmap a little further down. */
777
778 pmap_alloc_usermmu(); /* Allocate user MMU tables. */
779 pmap_alloc_usertmgr(); /* Allocate user MMU table managers.*/
780 pmap_alloc_pv(); /* Allocate physical->virtual map. */
781
782 /*
783 * We are now done with pmap_bootstrap_alloc(). Round up
784 * `virtual_avail' to the nearest page, and set the flag
785 * to prevent use of pmap_bootstrap_alloc() hereafter.
786 */
787 pmap_bootstrap_aalign(NBPG);
788 bootstrap_alloc_enabled = FALSE;
789
790 /*
791 * Now that we are done with pmap_bootstrap_alloc(), we
792 * must save the virtual and physical addresses of the
793 * end of the linearly mapped range, which are stored in
794 * virtual_contig_end and avail_start, respectively.
795 * These variables will never change after this point.
796 */
797 virtual_contig_end = virtual_avail;
798 avail_start = virtual_avail - KERNBASE;
799
800 /*
801 * `avail_next' is a running pointer used by pmap_next_page() to
802 * keep track of the next available physical page to be handed
803 * to the VM system during its initialization, in which it
804 * asks for physical pages, one at a time.
805 */
806 avail_next = avail_start;
807
808 /*
809 * Now allocate some virtual addresses, but not the physical pages
810 * behind them. Note that virtual_avail is already page-aligned.
811 *
812 * tmp_vpages[] is an array of two virtual pages used for temporary
813 * kernel mappings in the pmap module to facilitate various physical
814 * address-oritented operations.
815 */
816 tmp_vpages[0] = virtual_avail;
817 virtual_avail += NBPG;
818 tmp_vpages[1] = virtual_avail;
819 virtual_avail += NBPG;
820
821 /** Initialize the PV system **/
822 pmap_init_pv();
823
824 /*
825 * Fill in the kernel_pmap structure and kernel_crp.
826 */
827 kernAphys = mmu_vtop(kernAbase);
828 kernel_pmap.pm_a_tmgr = NULL;
829 kernel_pmap.pm_a_phys = kernAphys;
830 kernel_pmap.pm_refcount = 1; /* always in use */
831
832 kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
833 kernel_crp.rp_addr = kernAphys;
834
835 /*
836 * Now pmap_enter_kernel() may be used safely and will be
837 * the main interface used hereafter to modify the kernel's
838 * virtual address space. Note that since we are still running
839 * under the PROM's address table, none of these table modifications
840 * actually take effect until pmap_takeover_mmu() is called.
841 *
842 * Note: Our tables do NOT have the PROM linear mappings!
843 * Only the mappings created here exist in our tables, so
844 * remember to map anything we expect to use.
845 */
846 va = (vm_offset_t) KERNBASE;
847 pa = 0;
848
849 /*
850 * The first page of the kernel virtual address space is the msgbuf
851 * page. The page attributes (data, non-cached) are set here, while
852 * the address is assigned to this global pointer in cpu_startup().
853 * XXX - Make it non-cached?
854 */
855 pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
856 va += NBPG; pa += NBPG;
857
858 /* Next page is used as the temporary stack. */
859 pmap_enter_kernel(va, pa, VM_PROT_ALL);
860 va += NBPG; pa += NBPG;
861
862 /*
863 * Map all of the kernel's text segment as read-only and cacheable.
864 * (Cacheable is implied by default). Unfortunately, the last bytes
865 * of kernel text and the first bytes of kernel data will often be
866 * sharing the same page. Therefore, the last page of kernel text
867 * has to be mapped as read/write, to accomodate the data.
868 */
869 eva = _trunc_page((vm_offset_t)etext);
870 for (; va < eva; va += NBPG, pa += NBPG)
871 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
872
873 /*
874 * Map all of the kernel's data as read/write and cacheable.
875 * This includes: data, BSS, symbols, and everything in the
876 * contiguous memory used by pmap_bootstrap_alloc()
877 */
878 for (; pa < avail_start; va += NBPG, pa += NBPG)
879 pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
880
881 /*
882 * At this point we are almost ready to take over the MMU. But first
883 * we must save the PROM's address space in our map, as we call its
884 * routines and make references to its data later in the kernel.
885 */
886 pmap_bootstrap_copyprom();
887 pmap_takeover_mmu();
888 pmap_bootstrap_setprom();
889
890 /* Notify the VM system of our page size. */
891 PAGE_SIZE = NBPG;
892 vm_set_page_size();
893 }
894
895
896 /* pmap_alloc_usermmu INTERNAL
897 **
898 * Called from pmap_bootstrap() to allocate MMU tables that will
899 * eventually be used for user mappings.
900 */
901 void
902 pmap_alloc_usermmu()
903 {
904 /* XXX: Moved into caller. */
905 }
906
907 /* pmap_alloc_pv INTERNAL
908 **
909 * Called from pmap_bootstrap() to allocate the physical
910 * to virtual mapping list. Each physical page of memory
911 * in the system has a corresponding element in this list.
912 */
913 void
914 pmap_alloc_pv()
915 {
916 int i;
917 unsigned int total_mem;
918
919 /*
920 * Allocate a pv_head structure for every page of physical
921 * memory that will be managed by the system. Since memory on
922 * the 3/80 is non-contiguous, we cannot arrive at a total page
923 * count by subtraction of the lowest available address from the
924 * highest, but rather we have to step through each memory
925 * bank and add the number of pages in each to the total.
926 *
927 * At this time we also initialize the offset of each bank's
928 * starting pv_head within the pv_head list so that the physical
929 * memory state routines (pmap_is_referenced(),
930 * pmap_is_modified(), et al.) can quickly find coresponding
931 * pv_heads in spite of the non-contiguity.
932 */
933 total_mem = 0;
934 for (i = 0; i < NPHYS_RAM_SEGS; i++) {
935 avail_mem[i].pmem_pvbase = _btop(total_mem);
936 total_mem += avail_mem[i].pmem_end -
937 avail_mem[i].pmem_start;
938 if (avail_mem[i].pmem_next == NULL)
939 break;
940 }
941 #ifdef PMAP_DEBUG
942 if (total_mem != total_phys_mem)
943 panic("pmap_alloc_pv did not arrive at correct page count");
944 #endif
945
946 pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
947 _btop(total_phys_mem));
948 }
949
950 /* pmap_alloc_usertmgr INTERNAL
951 **
952 * Called from pmap_bootstrap() to allocate the structures which
953 * facilitate management of user MMU tables. Each user MMU table
954 * in the system has one such structure associated with it.
955 */
956 void
957 pmap_alloc_usertmgr()
958 {
959 /* Allocate user MMU table managers */
960 /* It would be a lot simpler to just make these BSS, but */
961 /* we may want to change their size at boot time... -j */
962 Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
963 * NUM_A_TABLES);
964 Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
965 * NUM_B_TABLES);
966 Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
967 * NUM_C_TABLES);
968
969 /*
970 * Allocate PV list elements for the physical to virtual
971 * mapping system.
972 */
973 pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
974 sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
975 }
976
977 /* pmap_bootstrap_copyprom() INTERNAL
978 **
979 * Copy the PROM mappings into our own tables. Note, we
980 * can use physical addresses until __bootstrap returns.
981 */
982 void
983 pmap_bootstrap_copyprom()
984 {
985 MachMonRomVector *romp;
986 int *mon_ctbl;
987 mmu_short_pte_t *kpte;
988 int i, len;
989
990 romp = romVectorPtr;
991
992 /*
993 * Copy the mappings in MON_KDB_START...MONEND
994 * Note: mon_ctbl[0] maps MON_KDB_START
995 */
996 mon_ctbl = *romp->monptaddr;
997 i = _btop(MON_KDB_START - KERNBASE);
998 kpte = &kernCbase[i];
999 len = _btop(MONEND - MON_KDB_START);
1000
1001 for (i = 0; i < len; i++) {
1002 kpte[i].attr.raw = mon_ctbl[i];
1003 }
1004
1005 /*
1006 * Copy the mappings at MON_DVMA_BASE (to the end).
1007 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1008 * XXX - This does not appear to be necessary, but
1009 * I'm not sure yet if it is or not. -gwr
1010 */
1011 mon_ctbl = *romp->shadowpteaddr;
1012 i = _btop(MON_DVMA_BASE - KERNBASE);
1013 kpte = &kernCbase[i];
1014 len = _btop(MON_DVMA_SIZE);
1015
1016 for (i = 0; i < len; i++) {
1017 kpte[i].attr.raw = mon_ctbl[i];
1018 }
1019 }
1020
1021 /* pmap_takeover_mmu INTERNAL
1022 **
1023 * Called from pmap_bootstrap() after it has copied enough of the
1024 * PROM mappings into the kernel map so that we can use our own
1025 * MMU table.
1026 */
1027 void
1028 pmap_takeover_mmu()
1029 {
1030
1031 loadcrp(&kernel_crp);
1032 }
1033
1034 /* pmap_bootstrap_setprom() INTERNAL
1035 **
1036 * Set the PROM mappings so it can see kernel space.
1037 * Note that physical addresses are used here, which
1038 * we can get away with because this runs with the
1039 * low 1GB set for transparent translation.
1040 */
1041 void
1042 pmap_bootstrap_setprom()
1043 {
1044 mmu_long_dte_t *mon_dte;
1045 extern struct mmu_rootptr mon_crp;
1046 int i;
1047
1048 mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1049 for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1050 mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1051 mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1052 }
1053 }
1054
1055
1056 /* pmap_init INTERFACE
1057 **
1058 * Called at the end of vm_init() to set up the pmap system to go
1059 * into full time operation. All initialization of kernel_pmap
1060 * should be already done by now, so this should just do things
1061 * needed for user-level pmaps to work.
1062 */
1063 void
1064 pmap_init()
1065 {
1066 /** Initialize the manager pools **/
1067 TAILQ_INIT(&a_pool);
1068 TAILQ_INIT(&b_pool);
1069 TAILQ_INIT(&c_pool);
1070
1071 /**************************************************************
1072 * Initialize all tmgr structures and MMU tables they manage. *
1073 **************************************************************/
1074 /** Initialize A tables **/
1075 pmap_init_a_tables();
1076 /** Initialize B tables **/
1077 pmap_init_b_tables();
1078 /** Initialize C tables **/
1079 pmap_init_c_tables();
1080 }
1081
1082 /* pmap_init_a_tables() INTERNAL
1083 **
1084 * Initializes all A managers, their MMU A tables, and inserts
1085 * them into the A manager pool for use by the system.
1086 */
1087 void
1088 pmap_init_a_tables()
1089 {
1090 int i;
1091 a_tmgr_t *a_tbl;
1092
1093 for (i=0; i < NUM_A_TABLES; i++) {
1094 /* Select the next available A manager from the pool */
1095 a_tbl = &Atmgrbase[i];
1096
1097 /*
1098 * Clear its parent entry. Set its wired and valid
1099 * entry count to zero.
1100 */
1101 a_tbl->at_parent = NULL;
1102 a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1103
1104 /* Assign it the next available MMU A table from the pool */
1105 a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1106
1107 /*
1108 * Initialize the MMU A table with the table in the `proc0',
1109 * or kernel, mapping. This ensures that every process has
1110 * the kernel mapped in the top part of its address space.
1111 */
1112 bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1113 sizeof(mmu_long_dte_t));
1114
1115 /*
1116 * Finally, insert the manager into the A pool,
1117 * making it ready to be used by the system.
1118 */
1119 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1120 }
1121 }
1122
1123 /* pmap_init_b_tables() INTERNAL
1124 **
1125 * Initializes all B table managers, their MMU B tables, and
1126 * inserts them into the B manager pool for use by the system.
1127 */
1128 void
1129 pmap_init_b_tables()
1130 {
1131 int i,j;
1132 b_tmgr_t *b_tbl;
1133
1134 for (i=0; i < NUM_B_TABLES; i++) {
1135 /* Select the next available B manager from the pool */
1136 b_tbl = &Btmgrbase[i];
1137
1138 b_tbl->bt_parent = NULL; /* clear its parent, */
1139 b_tbl->bt_pidx = 0; /* parent index, */
1140 b_tbl->bt_wcnt = 0; /* wired entry count, */
1141 b_tbl->bt_ecnt = 0; /* valid entry count. */
1142
1143 /* Assign it the next available MMU B table from the pool */
1144 b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1145
1146 /* Invalidate every descriptor in the table */
1147 for (j=0; j < MMU_B_TBL_SIZE; j++)
1148 b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1149
1150 /* Insert the manager into the B pool */
1151 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1152 }
1153 }
1154
1155 /* pmap_init_c_tables() INTERNAL
1156 **
1157 * Initializes all C table managers, their MMU C tables, and
1158 * inserts them into the C manager pool for use by the system.
1159 */
1160 void
1161 pmap_init_c_tables()
1162 {
1163 int i,j;
1164 c_tmgr_t *c_tbl;
1165
1166 for (i=0; i < NUM_C_TABLES; i++) {
1167 /* Select the next available C manager from the pool */
1168 c_tbl = &Ctmgrbase[i];
1169
1170 c_tbl->ct_parent = NULL; /* clear its parent, */
1171 c_tbl->ct_pidx = 0; /* parent index, */
1172 c_tbl->ct_wcnt = 0; /* wired entry count, */
1173 c_tbl->ct_ecnt = 0; /* valid entry count. */
1174
1175 /* Assign it the next available MMU C table from the pool */
1176 c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1177
1178 for (j=0; j < MMU_C_TBL_SIZE; j++)
1179 c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1180
1181 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1182 }
1183 }
1184
1185 /* pmap_init_pv() INTERNAL
1186 **
1187 * Initializes the Physical to Virtual mapping system.
1188 */
1189 void
1190 pmap_init_pv()
1191 {
1192 int i;
1193
1194 /* Initialize every PV head. */
1195 for (i = 0; i < _btop(total_phys_mem); i++) {
1196 pvbase[i].pv_idx = PVE_EOL; /* Indicate no mappings */
1197 pvbase[i].pv_flags = 0; /* Zero out page flags */
1198 }
1199
1200 pv_initialized = TRUE;
1201 }
1202
1203 /* get_a_table INTERNAL
1204 **
1205 * Retrieve and return a level A table for use in a user map.
1206 */
1207 a_tmgr_t *
1208 get_a_table()
1209 {
1210 a_tmgr_t *tbl;
1211 pmap_t pmap;
1212
1213 /* Get the top A table in the pool */
1214 tbl = a_pool.tqh_first;
1215 if (tbl == NULL) {
1216 /*
1217 * XXX - Instead of panicing here and in other get_x_table
1218 * functions, we do have the option of sleeping on the head of
1219 * the table pool. Any function which updates the table pool
1220 * would then issue a wakeup() on the head, thus waking up any
1221 * processes waiting for a table.
1222 *
1223 * Actually, the place to sleep would be when some process
1224 * asks for a "wired" mapping that would run us short of
1225 * mapping resources. This design DEPENDS on always having
1226 * some mapping resources in the pool for stealing, so we
1227 * must make sure we NEVER let the pool become empty. -gwr
1228 */
1229 panic("get_a_table: out of A tables.");
1230 }
1231
1232 TAILQ_REMOVE(&a_pool, tbl, at_link);
1233 /*
1234 * If the table has a non-null parent pointer then it is in use.
1235 * Forcibly abduct it from its parent and clear its entries.
1236 * No re-entrancy worries here. This table would not be in the
1237 * table pool unless it was available for use.
1238 *
1239 * Note that the second argument to free_a_table() is FALSE. This
1240 * indicates that the table should not be relinked into the A table
1241 * pool. That is a job for the function that called us.
1242 */
1243 if (tbl->at_parent) {
1244 pmap = tbl->at_parent;
1245 free_a_table(tbl, FALSE);
1246 pmap->pm_a_tmgr = NULL;
1247 pmap->pm_a_phys = kernAphys;
1248 }
1249 #ifdef NON_REENTRANT
1250 /*
1251 * If the table isn't to be wired down, re-insert it at the
1252 * end of the pool.
1253 */
1254 if (!wired)
1255 /*
1256 * Quandary - XXX
1257 * Would it be better to let the calling function insert this
1258 * table into the queue? By inserting it here, we are allowing
1259 * it to be stolen immediately. The calling function is
1260 * probably not expecting to use a table that it is not
1261 * assured full control of.
1262 * Answer - In the intrest of re-entrancy, it is best to let
1263 * the calling function determine when a table is available
1264 * for use. Therefore this code block is not used.
1265 */
1266 TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1267 #endif /* NON_REENTRANT */
1268 return tbl;
1269 }
1270
1271 /* get_b_table INTERNAL
1272 **
1273 * Return a level B table for use.
1274 */
1275 b_tmgr_t *
1276 get_b_table()
1277 {
1278 b_tmgr_t *tbl;
1279
1280 /* See 'get_a_table' for comments. */
1281 tbl = b_pool.tqh_first;
1282 if (tbl == NULL)
1283 panic("get_b_table: out of B tables.");
1284 TAILQ_REMOVE(&b_pool, tbl, bt_link);
1285 if (tbl->bt_parent) {
1286 tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1287 tbl->bt_parent->at_ecnt--;
1288 free_b_table(tbl, FALSE);
1289 }
1290 #ifdef NON_REENTRANT
1291 if (!wired)
1292 /* XXX see quandary in get_b_table */
1293 /* XXX start lock */
1294 TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1295 /* XXX end lock */
1296 #endif /* NON_REENTRANT */
1297 return tbl;
1298 }
1299
1300 /* get_c_table INTERNAL
1301 **
1302 * Return a level C table for use.
1303 */
1304 c_tmgr_t *
1305 get_c_table()
1306 {
1307 c_tmgr_t *tbl;
1308
1309 /* See 'get_a_table' for comments */
1310 tbl = c_pool.tqh_first;
1311 if (tbl == NULL)
1312 panic("get_c_table: out of C tables.");
1313 TAILQ_REMOVE(&c_pool, tbl, ct_link);
1314 if (tbl->ct_parent) {
1315 tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1316 tbl->ct_parent->bt_ecnt--;
1317 free_c_table(tbl, FALSE);
1318 }
1319 #ifdef NON_REENTRANT
1320 if (!wired)
1321 /* XXX See quandary in get_a_table */
1322 /* XXX start lock */
1323 TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1324 /* XXX end lock */
1325 #endif /* NON_REENTRANT */
1326
1327 return tbl;
1328 }
1329
1330 /*
1331 * The following 'free_table' and 'steal_table' functions are called to
1332 * detach tables from their current obligations (parents and children) and
1333 * prepare them for reuse in another mapping.
1334 *
1335 * Free_table is used when the calling function will handle the fate
1336 * of the parent table, such as returning it to the free pool when it has
1337 * no valid entries. Functions that do not want to handle this should
1338 * call steal_table, in which the parent table's descriptors and entry
1339 * count are automatically modified when this table is removed.
1340 */
1341
1342 /* free_a_table INTERNAL
1343 **
1344 * Unmaps the given A table and all child tables from their current
1345 * mappings. Returns the number of pages that were invalidated.
1346 * If 'relink' is true, the function will return the table to the head
1347 * of the available table pool.
1348 *
1349 * Cache note: The MC68851 will automatically flush all
1350 * descriptors derived from a given A table from its
1351 * Automatic Translation Cache (ATC) if we issue a
1352 * 'PFLUSHR' instruction with the base address of the
1353 * table. This function should do, and does so.
1354 * Note note: We are using an MC68030 - there is no
1355 * PFLUSHR.
1356 */
1357 int
1358 free_a_table(a_tbl, relink)
1359 a_tmgr_t *a_tbl;
1360 boolean_t relink;
1361 {
1362 int i, removed_cnt;
1363 mmu_long_dte_t *dte;
1364 mmu_short_dte_t *dtbl;
1365 b_tmgr_t *tmgr;
1366
1367 /*
1368 * Flush the ATC cache of all cached descriptors derived
1369 * from this table.
1370 * XXX - Sun3x does not use 68851's cached table feature
1371 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1372 */
1373
1374 /*
1375 * Remove any pending cache flushes that were designated
1376 * for the pmap this A table belongs to.
1377 * a_tbl->parent->atc_flushq[0] = 0;
1378 * XXX - Not implemented in sun3x.
1379 */
1380
1381 /*
1382 * All A tables in the system should retain a map for the
1383 * kernel. If the table contains any valid descriptors
1384 * (other than those for the kernel area), invalidate them all,
1385 * stopping short of the kernel's entries.
1386 */
1387 removed_cnt = 0;
1388 if (a_tbl->at_ecnt) {
1389 dte = a_tbl->at_dtbl;
1390 for (i=0; i < MMU_TIA(KERNBASE); i++) {
1391 /*
1392 * If a table entry points to a valid B table, free
1393 * it and its children.
1394 */
1395 if (MMU_VALID_DT(dte[i])) {
1396 /*
1397 * The following block does several things,
1398 * from innermost expression to the
1399 * outermost:
1400 * 1) It extracts the base (cc 1996)
1401 * address of the B table pointed
1402 * to in the A table entry dte[i].
1403 * 2) It converts this base address into
1404 * the virtual address it can be
1405 * accessed with. (all MMU tables point
1406 * to physical addresses.)
1407 * 3) It finds the corresponding manager
1408 * structure which manages this MMU table.
1409 * 4) It frees the manager structure.
1410 * (This frees the MMU table and all
1411 * child tables. See 'free_b_table' for
1412 * details.)
1413 */
1414 dtbl = mmu_ptov(dte[i].addr.raw);
1415 tmgr = mmuB2tmgr(dtbl);
1416 removed_cnt += free_b_table(tmgr, TRUE);
1417 dte[i].attr.raw = MMU_DT_INVALID;
1418 }
1419 }
1420 a_tbl->at_ecnt = 0;
1421 }
1422 if (relink) {
1423 a_tbl->at_parent = NULL;
1424 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1425 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1426 }
1427 return removed_cnt;
1428 }
1429
1430 /* free_b_table INTERNAL
1431 **
1432 * Unmaps the given B table and all its children from their current
1433 * mappings. Returns the number of pages that were invalidated.
1434 * (For comments, see 'free_a_table()').
1435 */
1436 int
1437 free_b_table(b_tbl, relink)
1438 b_tmgr_t *b_tbl;
1439 boolean_t relink;
1440 {
1441 int i, removed_cnt;
1442 mmu_short_dte_t *dte;
1443 mmu_short_pte_t *dtbl;
1444 c_tmgr_t *tmgr;
1445
1446 removed_cnt = 0;
1447 if (b_tbl->bt_ecnt) {
1448 dte = b_tbl->bt_dtbl;
1449 for (i=0; i < MMU_B_TBL_SIZE; i++) {
1450 if (MMU_VALID_DT(dte[i])) {
1451 dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1452 tmgr = mmuC2tmgr(dtbl);
1453 removed_cnt += free_c_table(tmgr, TRUE);
1454 dte[i].attr.raw = MMU_DT_INVALID;
1455 }
1456 }
1457 b_tbl->bt_ecnt = 0;
1458 }
1459
1460 if (relink) {
1461 b_tbl->bt_parent = NULL;
1462 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1463 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1464 }
1465 return removed_cnt;
1466 }
1467
1468 /* free_c_table INTERNAL
1469 **
1470 * Unmaps the given C table from use and returns it to the pool for
1471 * re-use. Returns the number of pages that were invalidated.
1472 *
1473 * This function preserves any physical page modification information
1474 * contained in the page descriptors within the C table by calling
1475 * 'pmap_remove_pte().'
1476 */
1477 int
1478 free_c_table(c_tbl, relink)
1479 c_tmgr_t *c_tbl;
1480 boolean_t relink;
1481 {
1482 int i, removed_cnt;
1483
1484 removed_cnt = 0;
1485 if (c_tbl->ct_ecnt) {
1486 for (i=0; i < MMU_C_TBL_SIZE; i++) {
1487 if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1488 pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1489 removed_cnt++;
1490 }
1491 }
1492 c_tbl->ct_ecnt = 0;
1493 }
1494
1495 if (relink) {
1496 c_tbl->ct_parent = NULL;
1497 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1498 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1499 }
1500 return removed_cnt;
1501 }
1502
1503 #if 0
1504 /* free_c_table_novalid INTERNAL
1505 **
1506 * Frees the given C table manager without checking to see whether
1507 * or not it contains any valid page descriptors as it is assumed
1508 * that it does not.
1509 */
1510 void
1511 free_c_table_novalid(c_tbl)
1512 c_tmgr_t *c_tbl;
1513 {
1514 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1515 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1516 c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1517 c_tbl->ct_parent->bt_ecnt--;
1518 /*
1519 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1520 * we just removed the last entry of the parent B table.
1521 * But I want to insure that this will not endanger pmap_enter()
1522 * with sudden removal of tables it is working with.
1523 *
1524 * We should probably add another field to each table, indicating
1525 * whether or not it is 'locked', ie. in the process of being
1526 * modified.
1527 */
1528 c_tbl->ct_parent = NULL;
1529 }
1530 #endif
1531
1532 /* pmap_remove_pte INTERNAL
1533 **
1534 * Unmap the given pte and preserve any page modification
1535 * information by transfering it to the pv head of the
1536 * physical page it maps to. This function does not update
1537 * any reference counts because it is assumed that the calling
1538 * function will do so.
1539 */
1540 void
1541 pmap_remove_pte(pte)
1542 mmu_short_pte_t *pte;
1543 {
1544 u_short pv_idx, targ_idx;
1545 int s;
1546 vm_offset_t pa;
1547 pv_t *pv;
1548
1549 pa = MMU_PTE_PA(*pte);
1550 if (is_managed(pa)) {
1551 pv = pa2pv(pa);
1552 targ_idx = pteidx(pte); /* Index of PTE being removed */
1553
1554 /*
1555 * If the PTE being removed is the first (or only) PTE in
1556 * the list of PTEs currently mapped to this page, remove the
1557 * PTE by changing the index found on the PV head. Otherwise
1558 * a linear search through the list will have to be executed
1559 * in order to find the PVE which points to the PTE being
1560 * removed, so that it may be modified to point to its new
1561 * neighbor.
1562 */
1563 s = splimp();
1564 pv_idx = pv->pv_idx; /* Index of first PTE in PV list */
1565 if (pv_idx == targ_idx) {
1566 pv->pv_idx = pvebase[targ_idx].pve_next;
1567 } else {
1568 /*
1569 * Find the PV element which points to the target
1570 * element.
1571 */
1572 while (pvebase[pv_idx].pve_next != targ_idx) {
1573 pv_idx = pvebase[pv_idx].pve_next;
1574 #ifdef DIAGNOSTIC
1575 if (pv_idx == PVE_EOL)
1576 panic("pmap_remove_pte: pv list end!");
1577 #endif
1578 }
1579
1580 /*
1581 * At this point, pv_idx is the index of the PV
1582 * element just before the target element in the list.
1583 * Unlink the target.
1584 */
1585 pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1586 }
1587 /*
1588 * Save the mod/ref bits of the pte by simply
1589 * ORing the entire pte onto the pv_flags member
1590 * of the pv structure.
1591 * There is no need to use a separate bit pattern
1592 * for usage information on the pv head than that
1593 * which is used on the MMU ptes.
1594 */
1595 pv->pv_flags |= (u_short) pte->attr.raw;
1596 splx(s);
1597 }
1598
1599 pte->attr.raw = MMU_DT_INVALID;
1600 }
1601
1602 #if 0 /* XXX - I am eliminating this function. -j */
1603 /* pmap_dereference_pte INTERNAL
1604 **
1605 * Update the necessary reference counts in any tables and pmaps to
1606 * reflect the removal of the given pte. Only called when no knowledge of
1607 * the pte's associated pmap is unknown. This only occurs in the PV call
1608 * 'pmap_page_protect()' with a protection of VM_PROT_NONE, which means
1609 * that all references to a given physical page must be removed.
1610 */
1611 void
1612 pmap_dereference_pte(pte)
1613 mmu_short_pte_t *pte;
1614 {
1615 vm_offset_t va;
1616 c_tmgr_t *c_tbl;
1617 pmap_t pmap;
1618
1619 va = pmap_get_pteinfo(pte, &pmap, &c_tbl);
1620 /*
1621 * Flush the translation cache of the page mapped by the PTE, should
1622 * it prove to be in the current pmap. Kernel mappings appear in
1623 * all address spaces, so they always should be flushed
1624 */
1625 if (pmap == pmap_kernel() || pmap == current_pmap())
1626 TBIS(va);
1627
1628 /*
1629 * If the mapping belongs to a user map, update the necessary
1630 * reference counts in the table manager. XXX - It would be
1631 * much easier to keep the resident count in the c_tmgr_t -gwr
1632 */
1633 if (pmap != pmap_kernel()) {
1634 /*
1635 * Most of the situations in which pmap_dereference_pte() is
1636 * called are usually temporary removals of a mapping. Often
1637 * the mapping is reinserted shortly afterwards. If the parent
1638 * C table's valid entry count reaches zero as a result of
1639 * removing this mapping, we could return it to the free pool,
1640 * but we leave it alone because it is likely to be used as
1641 * stated above.
1642 */
1643 c_tbl->ct_ecnt--;
1644 pmap->pm_stats.resident_count--;
1645 }
1646 }
1647 #endif 0 /* function elimination */
1648
1649 /* pmap_stroll INTERNAL
1650 **
1651 * Retrieve the addresses of all table managers involved in the mapping of
1652 * the given virtual address. If the table walk completed sucessfully,
1653 * return TRUE. If it was only partially sucessful, return FALSE.
1654 * The table walk performed by this function is important to many other
1655 * functions in this module.
1656 *
1657 * Note: This function ought to be easier to read.
1658 */
1659 boolean_t
1660 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1661 pmap_t pmap;
1662 vm_offset_t va;
1663 a_tmgr_t **a_tbl;
1664 b_tmgr_t **b_tbl;
1665 c_tmgr_t **c_tbl;
1666 mmu_short_pte_t **pte;
1667 int *a_idx, *b_idx, *pte_idx;
1668 {
1669 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1670 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1671
1672 if (pmap == pmap_kernel())
1673 return FALSE;
1674
1675 /* Does the given pmap have its own A table? */
1676 *a_tbl = pmap->pm_a_tmgr;
1677 if (*a_tbl == NULL)
1678 return FALSE; /* No. Return unknown. */
1679 /* Does the A table have a valid B table
1680 * under the corresponding table entry?
1681 */
1682 *a_idx = MMU_TIA(va);
1683 a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1684 if (!MMU_VALID_DT(*a_dte))
1685 return FALSE; /* No. Return unknown. */
1686 /* Yes. Extract B table from the A table. */
1687 *b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1688 /* Does the B table have a valid C table
1689 * under the corresponding table entry?
1690 */
1691 *b_idx = MMU_TIB(va);
1692 b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1693 if (!MMU_VALID_DT(*b_dte))
1694 return FALSE; /* No. Return unknown. */
1695 /* Yes. Extract C table from the B table. */
1696 *c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1697 *pte_idx = MMU_TIC(va);
1698 *pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1699
1700 return TRUE;
1701 }
1702
1703 /* pmap_enter INTERFACE
1704 **
1705 * Called by the kernel to map a virtual address
1706 * to a physical address in the given process map.
1707 *
1708 * Note: this function should apply an exclusive lock
1709 * on the pmap system for its duration. (it certainly
1710 * would save my hair!!)
1711 * This function ought to be easier to read.
1712 */
1713 void
1714 pmap_enter(pmap, va, pa, prot, wired)
1715 pmap_t pmap;
1716 vm_offset_t va;
1717 vm_offset_t pa;
1718 vm_prot_t prot;
1719 boolean_t wired;
1720 {
1721 boolean_t insert, managed; /* Marks the need for PV insertion.*/
1722 u_short nidx; /* PV list index */
1723 int s; /* Used for splimp()/splx() */
1724 int flags; /* Mapping flags. eg. Cache inhibit */
1725 u_int a_idx, b_idx, pte_idx; /* table indices */
1726 a_tmgr_t *a_tbl; /* A: long descriptor table manager */
1727 b_tmgr_t *b_tbl; /* B: short descriptor table manager */
1728 c_tmgr_t *c_tbl; /* C: short page table manager */
1729 mmu_long_dte_t *a_dte; /* A: long descriptor table */
1730 mmu_short_dte_t *b_dte; /* B: short descriptor table */
1731 mmu_short_pte_t *c_pte; /* C: short page descriptor table */
1732 pv_t *pv; /* pv list head */
1733 enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end */
1734
1735 if (pmap == NULL)
1736 return;
1737 if (pmap == pmap_kernel()) {
1738 pmap_enter_kernel(va, pa, prot);
1739 return;
1740 }
1741
1742 flags = (pa & ~MMU_PAGE_MASK);
1743 pa &= MMU_PAGE_MASK;
1744
1745 /*
1746 * Determine if the physical address being mapped is managed.
1747 * If it isn't, the mapping should be cache inhibited. (This is
1748 * applied later in the function.) XXX - Why non-cached? -gwr
1749 */
1750 if ((managed = is_managed(pa)) == FALSE)
1751 flags |= PMAP_NC;
1752
1753 /*
1754 * For user mappings we walk along the MMU tables of the given
1755 * pmap, reaching a PTE which describes the virtual page being
1756 * mapped or changed. If any level of the walk ends in an invalid
1757 * entry, a table must be allocated and the entry must be updated
1758 * to point to it.
1759 * There is a bit of confusion as to whether this code must be
1760 * re-entrant. For now we will assume it is. To support
1761 * re-entrancy we must unlink tables from the table pool before
1762 * we assume we may use them. Tables are re-linked into the pool
1763 * when we are finished with them at the end of the function.
1764 * But I don't feel like doing that until we have proof that this
1765 * needs to be re-entrant.
1766 * 'llevel' records which tables need to be relinked.
1767 */
1768 llevel = NONE;
1769
1770 /*
1771 * Step 1 - Retrieve the A table from the pmap. If it has no
1772 * A table, allocate a new one from the available pool.
1773 */
1774
1775 a_tbl = pmap->pm_a_tmgr;
1776 if (a_tbl == NULL) {
1777 /*
1778 * This pmap does not currently have an A table. Allocate
1779 * a new one.
1780 */
1781 a_tbl = get_a_table();
1782 a_tbl->at_parent = pmap;
1783
1784 /*
1785 * Assign this new A table to the pmap, and calculate its
1786 * physical address so that loadcrp() can be used to make
1787 * the table active.
1788 */
1789 pmap->pm_a_tmgr = a_tbl;
1790 pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1791
1792 /*
1793 * If the process receiving a new A table is the current
1794 * process, we are responsible for setting the MMU so that
1795 * it becomes the current address space. This only adds
1796 * new mappings, so no need to flush anything.
1797 */
1798 if (pmap == current_pmap()) {
1799 kernel_crp.rp_addr = pmap->pm_a_phys;
1800 loadcrp(&kernel_crp);
1801 }
1802
1803 if (!wired)
1804 llevel = NEWA;
1805 } else {
1806 /*
1807 * Use the A table already allocated for this pmap.
1808 * Unlink it from the A table pool if necessary.
1809 */
1810 if (wired && !a_tbl->at_wcnt)
1811 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1812 }
1813
1814 /*
1815 * Step 2 - Walk into the B table. If there is no valid B table,
1816 * allocate one.
1817 */
1818
1819 a_idx = MMU_TIA(va); /* Calculate the TIA of the VA. */
1820 a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1821 if (MMU_VALID_DT(*a_dte)) { /* Is the descriptor valid? */
1822 /* The descriptor is valid. Use the B table it points to. */
1823 /*************************************
1824 * a_idx *
1825 * v *
1826 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1827 * | | | | | | | | | | | | *
1828 * +-+-+-+-+-+-+-+-+-+-+-+- *
1829 * | *
1830 * \- b_tbl -> +-+- *
1831 * | | *
1832 * +-+- *
1833 *************************************/
1834 b_dte = mmu_ptov(a_dte->addr.raw);
1835 b_tbl = mmuB2tmgr(b_dte);
1836
1837 /*
1838 * If the requested mapping must be wired, but this table
1839 * being used to map it is not, the table must be removed
1840 * from the available pool and its wired entry count
1841 * incremented.
1842 */
1843 if (wired && !b_tbl->bt_wcnt) {
1844 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1845 a_tbl->at_wcnt++;
1846 }
1847 } else {
1848 /* The descriptor is invalid. Allocate a new B table. */
1849 b_tbl = get_b_table();
1850
1851 /* Point the parent A table descriptor to this new B table. */
1852 a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1853 a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1854 a_tbl->at_ecnt++; /* Update parent's valid entry count */
1855
1856 /* Create the necessary back references to the parent table */
1857 b_tbl->bt_parent = a_tbl;
1858 b_tbl->bt_pidx = a_idx;
1859
1860 /*
1861 * If this table is to be wired, make sure the parent A table
1862 * wired count is updated to reflect that it has another wired
1863 * entry.
1864 */
1865 if (wired)
1866 a_tbl->at_wcnt++;
1867 else if (llevel == NONE)
1868 llevel = NEWB;
1869 }
1870
1871 /*
1872 * Step 3 - Walk into the C table, if there is no valid C table,
1873 * allocate one.
1874 */
1875
1876 b_idx = MMU_TIB(va); /* Calculate the TIB of the VA */
1877 b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1878 if (MMU_VALID_DT(*b_dte)) { /* Is the descriptor valid? */
1879 /* The descriptor is valid. Use the C table it points to. */
1880 /**************************************
1881 * c_idx *
1882 * | v *
1883 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1884 * | | | | | | | | | | | *
1885 * +-+-+-+-+-+-+-+-+-+-+- *
1886 * | *
1887 * \- c_tbl -> +-+-- *
1888 * | | | *
1889 * +-+-- *
1890 **************************************/
1891 c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1892 c_tbl = mmuC2tmgr(c_pte);
1893
1894 /* If mapping is wired and table is not */
1895 if (wired && !c_tbl->ct_wcnt) {
1896 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1897 b_tbl->bt_wcnt++;
1898 }
1899 } else {
1900 /* The descriptor is invalid. Allocate a new C table. */
1901 c_tbl = get_c_table();
1902
1903 /* Point the parent B table descriptor to this new C table. */
1904 b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1905 b_dte->attr.raw |= MMU_DT_SHORT;
1906 b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1907
1908 /* Create the necessary back references to the parent table */
1909 c_tbl->ct_parent = b_tbl;
1910 c_tbl->ct_pidx = b_idx;
1911
1912 /*
1913 * If this table is to be wired, make sure the parent B table
1914 * wired count is updated to reflect that it has another wired
1915 * entry.
1916 */
1917 if (wired)
1918 b_tbl->bt_wcnt++;
1919 else if (llevel == NONE)
1920 llevel = NEWC;
1921 }
1922
1923 /*
1924 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1925 * slot of the C table, describing the PA to which the VA is mapped.
1926 */
1927
1928 pte_idx = MMU_TIC(va);
1929 c_pte = &c_tbl->ct_dtbl[pte_idx];
1930 if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1931 /*
1932 * The PTE is currently valid. This particular call
1933 * is just a synonym for one (or more) of the following
1934 * operations:
1935 * change protection of a page
1936 * change wiring status of a page
1937 * remove the mapping of a page
1938 *
1939 * XXX - Semi critical: This code should unwire the PTE
1940 * and, possibly, associated parent tables if this is a
1941 * change wiring operation. Currently it does not.
1942 *
1943 * This may be ok if pmap_change_wiring() is the only
1944 * interface used to UNWIRE a page.
1945 */
1946
1947 /* First check if this is a wiring operation. */
1948 if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1949 /*
1950 * The PTE is already wired. To prevent it from being
1951 * counted as a new wiring operation, reset the 'wired'
1952 * variable.
1953 */
1954 wired = FALSE;
1955 }
1956
1957 /* Is the new address the same as the old? */
1958 if (MMU_PTE_PA(*c_pte) == pa) {
1959 /*
1960 * Yes, mark that it does not need to be reinserted
1961 * into the PV list.
1962 */
1963 insert = FALSE;
1964
1965 /*
1966 * Clear all but the modified, referenced and wired
1967 * bits on the PTE.
1968 */
1969 c_pte->attr.raw &= (MMU_SHORT_PTE_M
1970 | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1971 } else {
1972 /* No, remove the old entry */
1973 pmap_remove_pte(c_pte);
1974 insert = TRUE;
1975 }
1976
1977 /*
1978 * TLB flush is only necessary if modifying current map.
1979 * However, in pmap_enter(), the pmap almost always IS
1980 * the current pmap, so don't even bother to check.
1981 */
1982 TBIS(va);
1983 } else {
1984 /*
1985 * The PTE is invalid. Increment the valid entry count in
1986 * the C table manager to reflect the addition of a new entry.
1987 */
1988 c_tbl->ct_ecnt++;
1989
1990 /* XXX - temporarily make sure the PTE is cleared. */
1991 c_pte->attr.raw = 0;
1992
1993 /* It will also need to be inserted into the PV list. */
1994 insert = TRUE;
1995 }
1996
1997 /*
1998 * If page is changing from unwired to wired status, set an unused bit
1999 * within the PTE to indicate that it is wired. Also increment the
2000 * wired entry count in the C table manager.
2001 */
2002 if (wired) {
2003 c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
2004 c_tbl->ct_wcnt++;
2005 }
2006
2007 /*
2008 * Map the page, being careful to preserve modify/reference/wired
2009 * bits. At this point it is assumed that the PTE either has no bits
2010 * set, or if there are set bits, they are only modified, reference or
2011 * wired bits. If not, the following statement will cause erratic
2012 * behavior.
2013 */
2014 #ifdef PMAP_DEBUG
2015 if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2016 MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2017 printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2018 Debugger();
2019 }
2020 #endif
2021 c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2022
2023 /*
2024 * If the mapping should be read-only, set the write protect
2025 * bit in the PTE.
2026 */
2027 if (!(prot & VM_PROT_WRITE))
2028 c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2029
2030 /*
2031 * If the mapping should be cache inhibited (indicated by the flag
2032 * bits found on the lower order of the physical address.)
2033 * mark the PTE as a cache inhibited page.
2034 */
2035 if (flags & PMAP_NC)
2036 c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2037
2038 /*
2039 * If the physical address being mapped is managed by the PV
2040 * system then link the pte into the list of pages mapped to that
2041 * address.
2042 */
2043 if (insert && managed) {
2044 pv = pa2pv(pa);
2045 nidx = pteidx(c_pte);
2046
2047 s = splimp();
2048 pvebase[nidx].pve_next = pv->pv_idx;
2049 pv->pv_idx = nidx;
2050 splx(s);
2051 }
2052
2053 /* Move any allocated tables back into the active pool. */
2054
2055 switch (llevel) {
2056 case NEWA:
2057 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2058 /* FALLTHROUGH */
2059 case NEWB:
2060 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2061 /* FALLTHROUGH */
2062 case NEWC:
2063 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2064 /* FALLTHROUGH */
2065 default:
2066 break;
2067 }
2068 }
2069
2070 /* pmap_enter_kernel INTERNAL
2071 **
2072 * Map the given virtual address to the given physical address within the
2073 * kernel address space. This function exists because the kernel map does
2074 * not do dynamic table allocation. It consists of a contiguous array of ptes
2075 * and can be edited directly without the need to walk through any tables.
2076 *
2077 * XXX: "Danger, Will Robinson!"
2078 * Note that the kernel should never take a fault on any page
2079 * between [ KERNBASE .. virtual_avail ] and this is checked in
2080 * trap.c for kernel-mode MMU faults. This means that mappings
2081 * created in that range must be implicily wired. -gwr
2082 */
2083 void
2084 pmap_enter_kernel(va, pa, prot)
2085 vm_offset_t va;
2086 vm_offset_t pa;
2087 vm_prot_t prot;
2088 {
2089 boolean_t was_valid, insert;
2090 u_short pte_idx, pv_idx;
2091 int s, flags;
2092 mmu_short_pte_t *pte;
2093 pv_t *pv;
2094 vm_offset_t old_pa;
2095
2096 flags = (pa & ~MMU_PAGE_MASK);
2097 pa &= MMU_PAGE_MASK;
2098
2099 /*
2100 * Calculate the index of the PTE being modified.
2101 */
2102 pte_idx = (u_long) _btop(va - KERNBASE);
2103
2104 /* XXX - This array is traditionally named "Sysmap" */
2105 pte = &kernCbase[pte_idx];
2106
2107 s = splimp();
2108 if (MMU_VALID_DT(*pte)) {
2109 was_valid = TRUE;
2110 /*
2111 * If the PTE is already mapped to an address and it differs
2112 * from the address requested, unlink it from the PV list.
2113 *
2114 * This only applies to mappings within virtual_avail
2115 * and VM_MAX_KERNEL_ADDRESS. All others are not requests
2116 * from the VM system and should not be part of the PV system.
2117 */
2118 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2119 old_pa = MMU_PTE_PA(*pte);
2120 if (pa != old_pa) {
2121 if (is_managed(old_pa)) {
2122 /* XXX - Make this into a function call? */
2123 pv = pa2pv(old_pa);
2124 pv_idx = pv->pv_idx;
2125 if (pv_idx == pte_idx) {
2126 pv->pv_idx = pvebase[pte_idx].pve_next;
2127 } else {
2128 while (pvebase[pv_idx].pve_next != pte_idx)
2129 pv_idx = pvebase[pv_idx].pve_next;
2130 pvebase[pv_idx].pve_next =
2131 pvebase[pte_idx].pve_next;
2132 }
2133 /* Save modified/reference bits */
2134 pv->pv_flags |= (u_short) pte->attr.raw;
2135 }
2136 if (is_managed(pa))
2137 insert = TRUE;
2138 else
2139 insert = FALSE;
2140 /*
2141 * Clear out any old bits in the PTE.
2142 */
2143 pte->attr.raw = MMU_DT_INVALID;
2144 } else {
2145 /*
2146 * Old PA and new PA are the same. No need to relink
2147 * the mapping within the PV list.
2148 */
2149 insert = FALSE;
2150
2151 /*
2152 * Save any mod/ref bits on the PTE.
2153 */
2154 pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2155 }
2156 } else {
2157 /*
2158 * If the VA lies below virtual_avail or beyond
2159 * VM_MAX_KERNEL_ADDRESS, it is not a request by the VM
2160 * system and hence does not need to be linked into the PV
2161 * system.
2162 */
2163 insert = FALSE;
2164 pte->attr.raw = MMU_DT_INVALID;
2165 }
2166 } else {
2167 pte->attr.raw = MMU_DT_INVALID;
2168 was_valid = FALSE;
2169 if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
2170 if (is_managed(pa))
2171 insert = TRUE;
2172 else
2173 insert = FALSE;
2174 } else
2175 insert = FALSE;
2176 }
2177
2178 /*
2179 * Map the page. Being careful to preserve modified/referenced bits
2180 * on the PTE.
2181 */
2182 pte->attr.raw |= (pa | MMU_DT_PAGE);
2183
2184 if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2185 pte->attr.raw |= MMU_SHORT_PTE_WP;
2186 if (flags & PMAP_NC)
2187 pte->attr.raw |= MMU_SHORT_PTE_CI;
2188 if (was_valid)
2189 TBIS(va);
2190
2191 /*
2192 * Insert the PTE into the PV system, if need be.
2193 */
2194 if (insert) {
2195 pv = pa2pv(pa);
2196 pvebase[pte_idx].pve_next = pv->pv_idx;
2197 pv->pv_idx = pte_idx;
2198 }
2199 splx(s);
2200
2201 }
2202
2203 /* pmap_protect INTERFACE
2204 **
2205 * Apply the given protection to the given virtual address range within
2206 * the given map.
2207 *
2208 * It is ok for the protection applied to be stronger than what is
2209 * specified. We use this to our advantage when the given map has no
2210 * mapping for the virtual address. By skipping a page when this
2211 * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2212 * and therefore do not need to map the page just to apply a protection
2213 * code. Only pmap_enter() needs to create new mappings if they do not exist.
2214 *
2215 * XXX - This function could be speeded up by using pmap_stroll() for inital
2216 * setup, and then manual scrolling in the for() loop.
2217 */
2218 void
2219 pmap_protect(pmap, startva, endva, prot)
2220 pmap_t pmap;
2221 vm_offset_t startva, endva;
2222 vm_prot_t prot;
2223 {
2224 boolean_t iscurpmap;
2225 int a_idx, b_idx, c_idx;
2226 a_tmgr_t *a_tbl;
2227 b_tmgr_t *b_tbl;
2228 c_tmgr_t *c_tbl;
2229 mmu_short_pte_t *pte;
2230
2231 if (pmap == NULL)
2232 return;
2233 if (pmap == pmap_kernel()) {
2234 pmap_protect_kernel(startva, endva, prot);
2235 return;
2236 }
2237
2238 /*
2239 * In this particular pmap implementation, there are only three
2240 * types of memory protection: 'all' (read/write/execute),
2241 * 'read-only' (read/execute) and 'none' (no mapping.)
2242 * It is not possible for us to treat 'executable' as a separate
2243 * protection type. Therefore, protection requests that seek to
2244 * remove execute permission while retaining read or write, and those
2245 * that make little sense (write-only for example) are ignored.
2246 */
2247 switch (prot) {
2248 case VM_PROT_NONE:
2249 /*
2250 * A request to apply the protection code of
2251 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2252 */
2253 pmap_remove(pmap, startva, endva);
2254 return;
2255 case VM_PROT_EXECUTE:
2256 case VM_PROT_READ:
2257 case VM_PROT_READ|VM_PROT_EXECUTE:
2258 /* continue */
2259 break;
2260 case VM_PROT_WRITE:
2261 case VM_PROT_WRITE|VM_PROT_READ:
2262 case VM_PROT_WRITE|VM_PROT_EXECUTE:
2263 case VM_PROT_ALL:
2264 /* None of these should happen in a sane system. */
2265 return;
2266 }
2267
2268 /*
2269 * If the pmap has no A table, it has no mappings and therefore
2270 * there is nothing to protect.
2271 */
2272 if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2273 return;
2274
2275 a_idx = MMU_TIA(startva);
2276 b_idx = MMU_TIB(startva);
2277 c_idx = MMU_TIC(startva);
2278 b_tbl = (b_tmgr_t *) c_tbl = NULL;
2279
2280 iscurpmap = (pmap == current_pmap());
2281 while (startva < endva) {
2282 if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2283 if (b_tbl == NULL) {
2284 b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2285 b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2286 b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2287 }
2288 if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2289 if (c_tbl == NULL) {
2290 c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2291 c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2292 c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2293 }
2294 if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2295 pte = &c_tbl->ct_dtbl[c_idx];
2296 /* make the mapping read-only */
2297 pte->attr.raw |= MMU_SHORT_PTE_WP;
2298 /*
2299 * If we just modified the current address space,
2300 * flush any translations for the modified page from
2301 * the translation cache and any data from it in the
2302 * data cache.
2303 */
2304 if (iscurpmap)
2305 TBIS(startva);
2306 }
2307 startva += NBPG;
2308
2309 if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2310 c_tbl = NULL;
2311 c_idx = 0;
2312 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2313 b_tbl = NULL;
2314 b_idx = 0;
2315 }
2316 }
2317 } else { /* C table wasn't valid */
2318 c_tbl = NULL;
2319 c_idx = 0;
2320 startva += MMU_TIB_RANGE;
2321 if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2322 b_tbl = NULL;
2323 b_idx = 0;
2324 }
2325 } /* C table */
2326 } else { /* B table wasn't valid */
2327 b_tbl = NULL;
2328 b_idx = 0;
2329 startva += MMU_TIA_RANGE;
2330 a_idx++;
2331 } /* B table */
2332 }
2333 }
2334
2335 /* pmap_protect_kernel INTERNAL
2336 **
2337 * Apply the given protection code to a kernel address range.
2338 */
2339 void
2340 pmap_protect_kernel(startva, endva, prot)
2341 vm_offset_t startva, endva;
2342 vm_prot_t prot;
2343 {
2344 vm_offset_t va;
2345 mmu_short_pte_t *pte;
2346
2347 pte = &kernCbase[(unsigned long) _btop(startva - KERNBASE)];
2348 for (va = startva; va < endva; va += NBPG, pte++) {
2349 if (MMU_VALID_DT(*pte)) {
2350 switch (prot) {
2351 case VM_PROT_ALL:
2352 break;
2353 case VM_PROT_EXECUTE:
2354 case VM_PROT_READ:
2355 case VM_PROT_READ|VM_PROT_EXECUTE:
2356 pte->attr.raw |= MMU_SHORT_PTE_WP;
2357 break;
2358 case VM_PROT_NONE:
2359 /* this is an alias for 'pmap_remove_kernel' */
2360 pmap_remove_pte(pte);
2361 break;
2362 default:
2363 break;
2364 }
2365 /*
2366 * since this is the kernel, immediately flush any cached
2367 * descriptors for this address.
2368 */
2369 TBIS(va);
2370 }
2371 }
2372 }
2373
2374 /* pmap_change_wiring INTERFACE
2375 **
2376 * Changes the wiring of the specified page.
2377 *
2378 * This function is called from vm_fault.c to unwire
2379 * a mapping. It really should be called 'pmap_unwire'
2380 * because it is never asked to do anything but remove
2381 * wirings.
2382 */
2383 void
2384 pmap_change_wiring(pmap, va, wire)
2385 pmap_t pmap;
2386 vm_offset_t va;
2387 boolean_t wire;
2388 {
2389 int a_idx, b_idx, c_idx;
2390 a_tmgr_t *a_tbl;
2391 b_tmgr_t *b_tbl;
2392 c_tmgr_t *c_tbl;
2393 mmu_short_pte_t *pte;
2394
2395 /* Kernel mappings always remain wired. */
2396 if (pmap == pmap_kernel())
2397 return;
2398
2399 #ifdef PMAP_DEBUG
2400 if (wire == TRUE)
2401 panic("pmap_change_wiring: wire requested.");
2402 #endif
2403
2404 /*
2405 * Walk through the tables. If the walk terminates without
2406 * a valid PTE then the address wasn't wired in the first place.
2407 * Return immediately.
2408 */
2409 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2410 &b_idx, &c_idx) == FALSE)
2411 return;
2412
2413
2414 /* Is the PTE wired? If not, return. */
2415 if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2416 return;
2417
2418 /* Remove the wiring bit. */
2419 pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2420
2421 /*
2422 * Decrement the wired entry count in the C table.
2423 * If it reaches zero the following things happen:
2424 * 1. The table no longer has any wired entries and is considered
2425 * unwired.
2426 * 2. It is placed on the available queue.
2427 * 3. The parent table's wired entry count is decremented.
2428 * 4. If it reaches zero, this process repeats at step 1 and
2429 * stops at after reaching the A table.
2430 */
2431 if (--c_tbl->ct_wcnt == 0) {
2432 TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2433 if (--b_tbl->bt_wcnt == 0) {
2434 TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2435 if (--a_tbl->at_wcnt == 0) {
2436 TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2437 }
2438 }
2439 }
2440 }
2441
2442 /* pmap_pageable INTERFACE
2443 **
2444 * Make the specified range of addresses within the given pmap,
2445 * 'pageable' or 'not-pageable'. A pageable page must not cause
2446 * any faults when referenced. A non-pageable page may.
2447 *
2448 * This routine is only advisory. The VM system will call pmap_enter()
2449 * to wire or unwire pages that are going to be made pageable before calling
2450 * this function. By the time this routine is called, everything that needs
2451 * to be done has already been done.
2452 */
2453 void
2454 pmap_pageable(pmap, start, end, pageable)
2455 pmap_t pmap;
2456 vm_offset_t start, end;
2457 boolean_t pageable;
2458 {
2459 /* not implemented. */
2460 }
2461
2462 /* pmap_copy INTERFACE
2463 **
2464 * Copy the mappings of a range of addresses in one pmap, into
2465 * the destination address of another.
2466 *
2467 * This routine is advisory. Should we one day decide that MMU tables
2468 * may be shared by more than one pmap, this function should be used to
2469 * link them together. Until that day however, we do nothing.
2470 */
2471 void
2472 pmap_copy(pmap_a, pmap_b, dst, len, src)
2473 pmap_t pmap_a, pmap_b;
2474 vm_offset_t dst;
2475 vm_size_t len;
2476 vm_offset_t src;
2477 {
2478 /* not implemented. */
2479 }
2480
2481 /* pmap_copy_page INTERFACE
2482 **
2483 * Copy the contents of one physical page into another.
2484 *
2485 * This function makes use of two virtual pages allocated in pmap_bootstrap()
2486 * to map the two specified physical pages into the kernel address space. It
2487 * then uses bcopy() to copy one into the other.
2488 *
2489 * Note: We could use the transparent translation registers to make the
2490 * mappings. If we do so, be sure to disable interrupts before using them.
2491 */
2492 void
2493 pmap_copy_page(src, dst)
2494 vm_offset_t src, dst;
2495 {
2496 PMAP_LOCK();
2497 if (tmp_vpages_inuse)
2498 panic("pmap_copy_page: temporary vpages are in use.");
2499 tmp_vpages_inuse++;
2500
2501 /* XXX - Use non-cached mappings to avoid cache polution? */
2502 pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
2503 pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
2504 copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
2505
2506 tmp_vpages_inuse--;
2507 PMAP_UNLOCK();
2508 }
2509
2510 /* pmap_zero_page INTERFACE
2511 **
2512 * Zero the contents of the specified physical page.
2513 *
2514 * Uses one of the virtual pages allocated in pmap_boostrap()
2515 * to map the specified page into the kernel address space. Then uses
2516 * bzero() to zero out the page.
2517 */
2518 void
2519 pmap_zero_page(pa)
2520 vm_offset_t pa;
2521 {
2522 PMAP_LOCK();
2523 if (tmp_vpages_inuse)
2524 panic("pmap_zero_page: temporary vpages are in use.");
2525 tmp_vpages_inuse++;
2526
2527 pmap_enter_kernel(tmp_vpages[0], pa, VM_PROT_READ|VM_PROT_WRITE);
2528 zeropage((char *) tmp_vpages[0]);
2529
2530 tmp_vpages_inuse--;
2531 PMAP_UNLOCK();
2532 }
2533
2534 /* pmap_collect INTERFACE
2535 **
2536 * Called from the VM system when we are about to swap out
2537 * the process using this pmap. This should give up any
2538 * resources held here, including all its MMU tables.
2539 */
2540 void
2541 pmap_collect(pmap)
2542 pmap_t pmap;
2543 {
2544 /* XXX - todo... */
2545 }
2546
2547 /* pmap_create INTERFACE
2548 **
2549 * Create and return a pmap structure.
2550 */
2551 pmap_t
2552 pmap_create(size)
2553 vm_size_t size;
2554 {
2555 pmap_t pmap;
2556
2557 if (size)
2558 return NULL;
2559
2560 pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2561 pmap_pinit(pmap);
2562
2563 return pmap;
2564 }
2565
2566 /* pmap_pinit INTERNAL
2567 **
2568 * Initialize a pmap structure.
2569 */
2570 void
2571 pmap_pinit(pmap)
2572 pmap_t pmap;
2573 {
2574 bzero(pmap, sizeof(struct pmap));
2575 pmap->pm_a_tmgr = NULL;
2576 pmap->pm_a_phys = kernAphys;
2577 }
2578
2579 /* pmap_release INTERFACE
2580 **
2581 * Release any resources held by the given pmap.
2582 *
2583 * This is the reverse analog to pmap_pinit. It does not
2584 * necessarily mean for the pmap structure to be deallocated,
2585 * as in pmap_destroy.
2586 */
2587 void
2588 pmap_release(pmap)
2589 pmap_t pmap;
2590 {
2591 /*
2592 * As long as the pmap contains no mappings,
2593 * which always should be the case whenever
2594 * this function is called, there really should
2595 * be nothing to do.
2596 *
2597 * XXX - This function is being called while there are
2598 * still valid mappings, so I guess the above must not
2599 * be true.
2600 * XXX - Unless the mappings persist due to a bug here...
2601 * + That's what was happening. The map had no mappings,
2602 * but it still had an A table. pmap_remove() was not
2603 * releasing tables when they were empty.
2604 */
2605 #ifdef PMAP_DEBUG
2606 if (pmap == NULL)
2607 return;
2608 if (pmap == pmap_kernel())
2609 panic("pmap_release: kernel pmap");
2610 #endif
2611 /*
2612 * XXX - If this pmap has an A table, give it back.
2613 * The pmap SHOULD be empty by now, and pmap_remove
2614 * should have already given back the A table...
2615 * However, I see: pmap->pm_a_tmgr->at_ecnt == 1
2616 * at this point, which means some mapping was not
2617 * removed when it should have been. -gwr
2618 */
2619 if (pmap->pm_a_tmgr != NULL) {
2620 /* First make sure we are not using it! */
2621 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2622 kernel_crp.rp_addr = kernAphys;
2623 loadcrp(&kernel_crp);
2624 }
2625 #ifdef PMAP_DEBUG /* XXX - todo! */
2626 /* XXX - Now complain... */
2627 printf("pmap_release: still have table\n");
2628 Debugger();
2629 #endif
2630 free_a_table(pmap->pm_a_tmgr, TRUE);
2631 pmap->pm_a_tmgr = NULL;
2632 pmap->pm_a_phys = kernAphys;
2633 }
2634 }
2635
2636 /* pmap_reference INTERFACE
2637 **
2638 * Increment the reference count of a pmap.
2639 */
2640 void
2641 pmap_reference(pmap)
2642 pmap_t pmap;
2643 {
2644 if (pmap == NULL)
2645 return;
2646
2647 /* pmap_lock(pmap); */
2648 pmap->pm_refcount++;
2649 /* pmap_unlock(pmap); */
2650 }
2651
2652 /* pmap_dereference INTERNAL
2653 **
2654 * Decrease the reference count on the given pmap
2655 * by one and return the current count.
2656 */
2657 int
2658 pmap_dereference(pmap)
2659 pmap_t pmap;
2660 {
2661 int rtn;
2662
2663 if (pmap == NULL)
2664 return 0;
2665
2666 /* pmap_lock(pmap); */
2667 rtn = --pmap->pm_refcount;
2668 /* pmap_unlock(pmap); */
2669
2670 return rtn;
2671 }
2672
2673 /* pmap_destroy INTERFACE
2674 **
2675 * Decrement a pmap's reference count and delete
2676 * the pmap if it becomes zero. Will be called
2677 * only after all mappings have been removed.
2678 */
2679 void
2680 pmap_destroy(pmap)
2681 pmap_t pmap;
2682 {
2683 if (pmap == NULL)
2684 return;
2685 if (pmap == &kernel_pmap)
2686 panic("pmap_destroy: kernel_pmap!");
2687 if (pmap_dereference(pmap) == 0) {
2688 pmap_release(pmap);
2689 free(pmap, M_VMPMAP);
2690 }
2691 }
2692
2693 /* pmap_is_referenced INTERFACE
2694 **
2695 * Determine if the given physical page has been
2696 * referenced (read from [or written to.])
2697 */
2698 boolean_t
2699 pmap_is_referenced(pa)
2700 vm_offset_t pa;
2701 {
2702 pv_t *pv;
2703 int idx, s;
2704
2705 if (!pv_initialized)
2706 return FALSE;
2707 /* XXX - this may be unecessary. */
2708 if (!is_managed(pa))
2709 return FALSE;
2710
2711 pv = pa2pv(pa);
2712 /*
2713 * Check the flags on the pv head. If they are set,
2714 * return immediately. Otherwise a search must be done.
2715 */
2716 if (pv->pv_flags & PV_FLAGS_USED)
2717 return TRUE;
2718 else {
2719 s = splimp();
2720 /*
2721 * Search through all pv elements pointing
2722 * to this page and query their reference bits
2723 */
2724 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2725 pvebase[idx].pve_next)
2726 if (MMU_PTE_USED(kernCbase[idx])) {
2727 splx(s);
2728 return TRUE;
2729 }
2730 splx(s);
2731 }
2732
2733 return FALSE;
2734 }
2735
2736 /* pmap_is_modified INTERFACE
2737 **
2738 * Determine if the given physical page has been
2739 * modified (written to.)
2740 */
2741 boolean_t
2742 pmap_is_modified(pa)
2743 vm_offset_t pa;
2744 {
2745 pv_t *pv;
2746 int idx, s;
2747
2748 if (!pv_initialized)
2749 return FALSE;
2750 /* XXX - this may be unecessary. */
2751 if (!is_managed(pa))
2752 return FALSE;
2753
2754 /* see comments in pmap_is_referenced() */
2755 pv = pa2pv(pa);
2756 if (pv->pv_flags & PV_FLAGS_MDFY) {
2757 return TRUE;
2758 } else {
2759 s = splimp();
2760 for (idx = pv->pv_idx; idx != PVE_EOL; idx =
2761 pvebase[idx].pve_next)
2762 if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2763 splx(s);
2764 return TRUE;
2765 }
2766 splx(s);
2767 }
2768
2769 return FALSE;
2770 }
2771
2772 /* pmap_page_protect INTERFACE
2773 **
2774 * Applies the given protection to all mappings to the given
2775 * physical page.
2776 */
2777 void
2778 pmap_page_protect(pa, prot)
2779 vm_offset_t pa;
2780 vm_prot_t prot;
2781 {
2782 pv_t *pv;
2783 int idx, s;
2784 vm_offset_t va;
2785 struct mmu_short_pte_struct *pte;
2786 c_tmgr_t *c_tbl;
2787 pmap_t pmap, curpmap;
2788
2789 if (!is_managed(pa))
2790 return;
2791
2792 curpmap = current_pmap();
2793 pv = pa2pv(pa);
2794 s = splimp();
2795 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2796 pte = &kernCbase[idx];
2797 switch (prot) {
2798 case VM_PROT_ALL:
2799 /* do nothing */
2800 break;
2801 case VM_PROT_EXECUTE:
2802 case VM_PROT_READ:
2803 case VM_PROT_READ|VM_PROT_EXECUTE:
2804 pte->attr.raw |= MMU_SHORT_PTE_WP;
2805
2806 /*
2807 * Determine the virtual address mapped by
2808 * the PTE and flush ATC entries if necessary.
2809 */
2810 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2811 if (pmap == curpmap || pmap == pmap_kernel())
2812 TBIS(va);
2813 break;
2814 case VM_PROT_NONE:
2815 /* Save the mod/ref bits. */
2816 pv->pv_flags |= pte->attr.raw;
2817 /* Invalidate the PTE. */
2818 pte->attr.raw = MMU_DT_INVALID;
2819
2820 /*
2821 * Update table counts. And flush ATC entries
2822 * if necessary.
2823 */
2824 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2825
2826 /*
2827 * If the PTE belongs to the kernel map,
2828 * be sure to flush the page it maps.
2829 */
2830 if (pmap == pmap_kernel()) {
2831 TBIS(va);
2832 } else {
2833 /*
2834 * The PTE belongs to a user map.
2835 * update the entry count in the C
2836 * table to which it belongs and flush
2837 * the ATC if the mapping belongs to
2838 * the current pmap.
2839 */
2840 c_tbl->ct_ecnt--;
2841 if (pmap == curpmap)
2842 TBIS(va);
2843 }
2844 break;
2845 default:
2846 break;
2847 }
2848 }
2849
2850 /*
2851 * If the protection code indicates that all mappings to the page
2852 * be removed, truncate the PV list to zero entries.
2853 */
2854 if (prot == VM_PROT_NONE)
2855 pv->pv_idx = PVE_EOL;
2856 splx(s);
2857 }
2858
2859 /* pmap_get_pteinfo INTERNAL
2860 **
2861 * Called internally to find the pmap and virtual address within that
2862 * map to which the pte at the given index maps. Also includes the PTE's C
2863 * table manager.
2864 *
2865 * Returns the pmap in the argument provided, and the virtual address
2866 * by return value.
2867 */
2868 vm_offset_t
2869 pmap_get_pteinfo(idx, pmap, tbl)
2870 u_int idx;
2871 pmap_t *pmap;
2872 c_tmgr_t **tbl;
2873 {
2874 a_tmgr_t *a_tbl;
2875 b_tmgr_t *b_tbl;
2876 c_tmgr_t *c_tbl;
2877 vm_offset_t va = 0;
2878
2879 /*
2880 * Determine if the PTE is a kernel PTE or a user PTE.
2881 */
2882 if (idx >= NUM_KERN_PTES) {
2883 /*
2884 * The PTE belongs to a user mapping.
2885 * Find the virtual address by decoding table indices.
2886 * Each successive decode will reveal the address from
2887 * least to most significant bit fashion.
2888 *
2889 * 31 0
2890 * +-------------------------------+
2891 * |AAAAAAABBBBBBCCCCCC............|
2892 * +-------------------------------+
2893 */
2894 /* XXX: c_tbl = mmuC2tmgr(pte); */
2895 /* XXX: Would like an inline for this to validate idx... */
2896 c_tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2897 b_tbl = c_tbl->ct_parent;
2898 a_tbl = b_tbl->bt_parent;
2899 *pmap = a_tbl->at_parent;
2900 *tbl = c_tbl;
2901
2902 /* Start with the 'C' bits, then add B and A... */
2903 va |= ((idx % MMU_C_TBL_SIZE) << MMU_TIC_SHIFT);
2904 va |= (c_tbl->ct_pidx << MMU_TIB_SHIFT);
2905 va |= (b_tbl->bt_pidx << MMU_TIA_SHIFT);
2906 } else {
2907 /*
2908 * The PTE belongs to the kernel map.
2909 */
2910 *pmap = pmap_kernel();
2911
2912 va = _ptob(idx);
2913 va += KERNBASE;
2914 }
2915
2916 return va;
2917 }
2918
2919 #if 0 /* XXX - I am eliminating this function. */
2920 /* pmap_find_tic INTERNAL
2921 **
2922 * Given the address of a pte, find the TIC (level 'C' table index) for
2923 * the pte within its C table.
2924 */
2925 char
2926 pmap_find_tic(pte)
2927 mmu_short_pte_t *pte;
2928 {
2929 return ((pte - mmuCbase) % MMU_C_TBL_SIZE);
2930 }
2931 #endif /* 0 */
2932
2933
2934 /* pmap_clear_modify INTERFACE
2935 **
2936 * Clear the modification bit on the page at the specified
2937 * physical address.
2938 *
2939 */
2940 void
2941 pmap_clear_modify(pa)
2942 vm_offset_t pa;
2943 {
2944 pmap_clear_pv(pa, PV_FLAGS_MDFY);
2945 }
2946
2947 /* pmap_clear_reference INTERFACE
2948 **
2949 * Clear the referenced bit on the page at the specified
2950 * physical address.
2951 */
2952 void
2953 pmap_clear_reference(pa)
2954 vm_offset_t pa;
2955 {
2956 pmap_clear_pv(pa, PV_FLAGS_USED);
2957 }
2958
2959 /* pmap_clear_pv INTERNAL
2960 **
2961 * Clears the specified flag from the specified physical address.
2962 * (Used by pmap_clear_modify() and pmap_clear_reference().)
2963 *
2964 * Flag is one of:
2965 * PV_FLAGS_MDFY - Page modified bit.
2966 * PV_FLAGS_USED - Page used (referenced) bit.
2967 *
2968 * This routine must not only clear the flag on the pv list
2969 * head. It must also clear the bit on every pte in the pv
2970 * list associated with the address.
2971 */
2972 void
2973 pmap_clear_pv(pa, flag)
2974 vm_offset_t pa;
2975 int flag;
2976 {
2977 pv_t *pv;
2978 int idx, s;
2979 vm_offset_t va;
2980 pmap_t pmap;
2981 mmu_short_pte_t *pte;
2982 c_tmgr_t *c_tbl;
2983
2984 pv = pa2pv(pa);
2985
2986 s = splimp();
2987 pv->pv_flags &= ~(flag);
2988 for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2989 pte = &kernCbase[idx];
2990 pte->attr.raw &= ~(flag);
2991 /*
2992 * The MC68030 MMU will not set the modified or
2993 * referenced bits on any MMU tables for which it has
2994 * a cached descriptor with its modify bit set. To insure
2995 * that it will modify these bits on the PTE during the next
2996 * time it is written to or read from, we must flush it from
2997 * the ATC.
2998 *
2999 * Ordinarily it is only necessary to flush the descriptor
3000 * if it is used in the current address space. But since I
3001 * am not sure that there will always be a notion of
3002 * 'the current address space' when this function is called,
3003 * I will skip the test and always flush the address. It
3004 * does no harm.
3005 */
3006 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3007 TBIS(va);
3008 }
3009 splx(s);
3010 }
3011
3012 /* pmap_extract INTERFACE
3013 **
3014 * Return the physical address mapped by the virtual address
3015 * in the specified pmap or 0 if it is not known.
3016 *
3017 * Note: this function should also apply an exclusive lock
3018 * on the pmap system during its duration.
3019 */
3020 vm_offset_t
3021 pmap_extract(pmap, va)
3022 pmap_t pmap;
3023 vm_offset_t va;
3024 {
3025 int a_idx, b_idx, pte_idx;
3026 a_tmgr_t *a_tbl;
3027 b_tmgr_t *b_tbl;
3028 c_tmgr_t *c_tbl;
3029 mmu_short_pte_t *c_pte;
3030
3031 if (pmap == pmap_kernel())
3032 return pmap_extract_kernel(va);
3033 if (pmap == NULL)
3034 return 0;
3035
3036 if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3037 &c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3038 return 0;
3039
3040 if (!MMU_VALID_DT(*c_pte))
3041 return 0;
3042
3043 return (MMU_PTE_PA(*c_pte));
3044 }
3045
3046 /* pmap_extract_kernel INTERNAL
3047 **
3048 * Extract a translation from the kernel address space.
3049 */
3050 vm_offset_t
3051 pmap_extract_kernel(va)
3052 vm_offset_t va;
3053 {
3054 mmu_short_pte_t *pte;
3055
3056 pte = &kernCbase[(u_int) _btop(va - KERNBASE)];
3057 return MMU_PTE_PA(*pte);
3058 }
3059
3060 /* pmap_remove_kernel INTERNAL
3061 **
3062 * Remove the mapping of a range of virtual addresses from the kernel map.
3063 * The arguments are already page-aligned.
3064 */
3065 void
3066 pmap_remove_kernel(sva, eva)
3067 vm_offset_t sva;
3068 vm_offset_t eva;
3069 {
3070 int idx, eidx;
3071
3072 #ifdef PMAP_DEBUG
3073 if ((sva & PGOFSET) || (eva & PGOFSET))
3074 panic("pmap_remove_kernel: alignment");
3075 #endif
3076
3077 idx = _btop(sva - KERNBASE);
3078 eidx = _btop(eva - KERNBASE);
3079
3080 while (idx < eidx)
3081 pmap_remove_pte(&kernCbase[idx++]);
3082 /* Always flush the ATC when maniplating the kernel address space. */
3083 TBIAS();
3084 }
3085
3086 /* pmap_remove INTERFACE
3087 **
3088 * Remove the mapping of a range of virtual addresses from the given pmap.
3089 *
3090 * If the range contains any wired entries, this function will probably create
3091 * disaster.
3092 */
3093 void
3094 pmap_remove(pmap, start, end)
3095 pmap_t pmap;
3096 vm_offset_t start;
3097 vm_offset_t end;
3098 {
3099
3100 if (pmap == pmap_kernel()) {
3101 pmap_remove_kernel(start, end);
3102 return;
3103 }
3104
3105 /*
3106 * XXX - Temporary(?) statement to prevent panic caused
3107 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3108 * to remove because it couldn't get backing store.
3109 * (I guess.)
3110 */
3111 if (pmap == NULL)
3112 return;
3113
3114 /*
3115 * If the pmap doesn't have an A table of its own, it has no mappings
3116 * that can be removed.
3117 */
3118 if (pmap->pm_a_tmgr == NULL)
3119 return;
3120
3121 /*
3122 * Remove the specified range from the pmap. If the function
3123 * returns true, the operation removed all the valid mappings
3124 * in the pmap and freed its A table. If this happened to the
3125 * currently loaded pmap, the MMU root pointer must be reloaded
3126 * with the default 'kernel' map.
3127 */
3128 if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3129 if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3130 kernel_crp.rp_addr = kernAphys;
3131 loadcrp(&kernel_crp);
3132 /* will do TLB flush below */
3133 }
3134 pmap->pm_a_tmgr = NULL;
3135 pmap->pm_a_phys = kernAphys;
3136 }
3137
3138 /*
3139 * If we just modified the current address space,
3140 * make sure to flush the MMU cache.
3141 *
3142 * XXX - this could be an unecessarily large flush.
3143 * XXX - Could decide, based on the size of the VA range
3144 * to be removed, whether to flush "by pages" or "all".
3145 */
3146 if (pmap == current_pmap())
3147 TBIAU();
3148 }
3149
3150 /* pmap_remove_a INTERNAL
3151 **
3152 * This is function number one in a set of three that removes a range
3153 * of memory in the most efficient manner by removing the highest possible
3154 * tables from the memory space. This particular function attempts to remove
3155 * as many B tables as it can, delegating the remaining fragmented ranges to
3156 * pmap_remove_b().
3157 *
3158 * If the removal operation results in an empty A table, the function returns
3159 * TRUE.
3160 *
3161 * It's ugly but will do for now.
3162 */
3163 boolean_t
3164 pmap_remove_a(a_tbl, start, end)
3165 a_tmgr_t *a_tbl;
3166 vm_offset_t start;
3167 vm_offset_t end;
3168 {
3169 boolean_t empty;
3170 int idx;
3171 vm_offset_t nstart, nend;
3172 b_tmgr_t *b_tbl;
3173 mmu_long_dte_t *a_dte;
3174 mmu_short_dte_t *b_dte;
3175
3176 /*
3177 * The following code works with what I call a 'granularity
3178 * reduction algorithim'. A range of addresses will always have
3179 * the following properties, which are classified according to
3180 * how the range relates to the size of the current granularity
3181 * - an A table entry:
3182 *
3183 * 1 2 3 4
3184 * -+---+---+---+---+---+---+---+-
3185 * -+---+---+---+---+---+---+---+-
3186 *
3187 * A range will always start on a granularity boundary, illustrated
3188 * by '+' signs in the table above, or it will start at some point
3189 * inbetween a granularity boundary, as illustrated by point 1.
3190 * The first step in removing a range of addresses is to remove the
3191 * range between 1 and 2, the nearest granularity boundary. This
3192 * job is handled by the section of code governed by the
3193 * 'if (start < nstart)' statement.
3194 *
3195 * A range will always encompass zero or more intergral granules,
3196 * illustrated by points 2 and 3. Integral granules are easy to
3197 * remove. The removal of these granules is the second step, and
3198 * is handled by the code block 'if (nstart < nend)'.
3199 *
3200 * Lastly, a range will always end on a granularity boundary,
3201 * ill. by point 3, or it will fall just beyond one, ill. by point
3202 * 4. The last step involves removing this range and is handled by
3203 * the code block 'if (nend < end)'.
3204 */
3205 nstart = MMU_ROUND_UP_A(start);
3206 nend = MMU_ROUND_A(end);
3207
3208 if (start < nstart) {
3209 /*
3210 * This block is executed if the range starts between
3211 * a granularity boundary.
3212 *
3213 * First find the DTE which is responsible for mapping
3214 * the start of the range.
3215 */
3216 idx = MMU_TIA(start);
3217 a_dte = &a_tbl->at_dtbl[idx];
3218
3219 /*
3220 * If the DTE is valid then delegate the removal of the sub
3221 * range to pmap_remove_b(), which can remove addresses at
3222 * a finer granularity.
3223 */
3224 if (MMU_VALID_DT(*a_dte)) {
3225 b_dte = mmu_ptov(a_dte->addr.raw);
3226 b_tbl = mmuB2tmgr(b_dte);
3227
3228 /*
3229 * The sub range to be removed starts at the start
3230 * of the full range we were asked to remove, and ends
3231 * at the greater of:
3232 * 1. The end of the full range, -or-
3233 * 2. The end of the full range, rounded down to the
3234 * nearest granularity boundary.
3235 */
3236 if (end < nstart)
3237 empty = pmap_remove_b(b_tbl, start, end);
3238 else
3239 empty = pmap_remove_b(b_tbl, start, nstart);
3240
3241 /*
3242 * If the removal resulted in an empty B table,
3243 * invalidate the DTE that points to it and decrement
3244 * the valid entry count of the A table.
3245 */
3246 if (empty) {
3247 a_dte->attr.raw = MMU_DT_INVALID;
3248 a_tbl->at_ecnt--;
3249 }
3250 }
3251 /*
3252 * If the DTE is invalid, the address range is already non-
3253 * existant and can simply be skipped.
3254 */
3255 }
3256 if (nstart < nend) {
3257 /*
3258 * This block is executed if the range spans a whole number
3259 * multiple of granules (A table entries.)
3260 *
3261 * First find the DTE which is responsible for mapping
3262 * the start of the first granule involved.
3263 */
3264 idx = MMU_TIA(nstart);
3265 a_dte = &a_tbl->at_dtbl[idx];
3266
3267 /*
3268 * Remove entire sub-granules (B tables) one at a time,
3269 * until reaching the end of the range.
3270 */
3271 for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3272 if (MMU_VALID_DT(*a_dte)) {
3273 /*
3274 * Find the B table manager for the
3275 * entry and free it.
3276 */
3277 b_dte = mmu_ptov(a_dte->addr.raw);
3278 b_tbl = mmuB2tmgr(b_dte);
3279 free_b_table(b_tbl, TRUE);
3280
3281 /*
3282 * Invalidate the DTE that points to the
3283 * B table and decrement the valid entry
3284 * count of the A table.
3285 */
3286 a_dte->attr.raw = MMU_DT_INVALID;
3287 a_tbl->at_ecnt--;
3288 }
3289 }
3290 if (nend < end) {
3291 /*
3292 * This block is executed if the range ends beyond a
3293 * granularity boundary.
3294 *
3295 * First find the DTE which is responsible for mapping
3296 * the start of the nearest (rounded down) granularity
3297 * boundary.
3298 */
3299 idx = MMU_TIA(nend);
3300 a_dte = &a_tbl->at_dtbl[idx];
3301
3302 /*
3303 * If the DTE is valid then delegate the removal of the sub
3304 * range to pmap_remove_b(), which can remove addresses at
3305 * a finer granularity.
3306 */
3307 if (MMU_VALID_DT(*a_dte)) {
3308 /*
3309 * Find the B table manager for the entry
3310 * and hand it to pmap_remove_b() along with
3311 * the sub range.
3312 */
3313 b_dte = mmu_ptov(a_dte->addr.raw);
3314 b_tbl = mmuB2tmgr(b_dte);
3315
3316 empty = pmap_remove_b(b_tbl, nend, end);
3317
3318 /*
3319 * If the removal resulted in an empty B table,
3320 * invalidate the DTE that points to it and decrement
3321 * the valid entry count of the A table.
3322 */
3323 if (empty) {
3324 a_dte->attr.raw = MMU_DT_INVALID;
3325 a_tbl->at_ecnt--;
3326 }
3327 }
3328 }
3329
3330 /*
3331 * If there are no more entries in the A table, release it
3332 * back to the available pool and return TRUE.
3333 */
3334 if (a_tbl->at_ecnt == 0) {
3335 a_tbl->at_parent = NULL;
3336 TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3337 TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3338 empty = TRUE;
3339 } else {
3340 empty = FALSE;
3341 }
3342
3343 return empty;
3344 }
3345
3346 /* pmap_remove_b INTERNAL
3347 **
3348 * Remove a range of addresses from an address space, trying to remove entire
3349 * C tables if possible.
3350 *
3351 * If the operation results in an empty B table, the function returns TRUE.
3352 */
3353 boolean_t
3354 pmap_remove_b(b_tbl, start, end)
3355 b_tmgr_t *b_tbl;
3356 vm_offset_t start;
3357 vm_offset_t end;
3358 {
3359 boolean_t empty;
3360 int idx;
3361 vm_offset_t nstart, nend, rstart;
3362 c_tmgr_t *c_tbl;
3363 mmu_short_dte_t *b_dte;
3364 mmu_short_pte_t *c_dte;
3365
3366
3367 nstart = MMU_ROUND_UP_B(start);
3368 nend = MMU_ROUND_B(end);
3369
3370 if (start < nstart) {
3371 idx = MMU_TIB(start);
3372 b_dte = &b_tbl->bt_dtbl[idx];
3373 if (MMU_VALID_DT(*b_dte)) {
3374 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3375 c_tbl = mmuC2tmgr(c_dte);
3376 if (end < nstart)
3377 empty = pmap_remove_c(c_tbl, start, end);
3378 else
3379 empty = pmap_remove_c(c_tbl, start, nstart);
3380 if (empty) {
3381 b_dte->attr.raw = MMU_DT_INVALID;
3382 b_tbl->bt_ecnt--;
3383 }
3384 }
3385 }
3386 if (nstart < nend) {
3387 idx = MMU_TIB(nstart);
3388 b_dte = &b_tbl->bt_dtbl[idx];
3389 rstart = nstart;
3390 while (rstart < nend) {
3391 if (MMU_VALID_DT(*b_dte)) {
3392 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3393 c_tbl = mmuC2tmgr(c_dte);
3394 free_c_table(c_tbl, TRUE);
3395 b_dte->attr.raw = MMU_DT_INVALID;
3396 b_tbl->bt_ecnt--;
3397 }
3398 b_dte++;
3399 rstart += MMU_TIB_RANGE;
3400 }
3401 }
3402 if (nend < end) {
3403 idx = MMU_TIB(nend);
3404 b_dte = &b_tbl->bt_dtbl[idx];
3405 if (MMU_VALID_DT(*b_dte)) {
3406 c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3407 c_tbl = mmuC2tmgr(c_dte);
3408 empty = pmap_remove_c(c_tbl, nend, end);
3409 if (empty) {
3410 b_dte->attr.raw = MMU_DT_INVALID;
3411 b_tbl->bt_ecnt--;
3412 }
3413 }
3414 }
3415
3416 if (b_tbl->bt_ecnt == 0) {
3417 b_tbl->bt_parent = NULL;
3418 TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3419 TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3420 empty = TRUE;
3421 } else {
3422 empty = FALSE;
3423 }
3424
3425 return empty;
3426 }
3427
3428 /* pmap_remove_c INTERNAL
3429 **
3430 * Remove a range of addresses from the given C table.
3431 */
3432 boolean_t
3433 pmap_remove_c(c_tbl, start, end)
3434 c_tmgr_t *c_tbl;
3435 vm_offset_t start;
3436 vm_offset_t end;
3437 {
3438 boolean_t empty;
3439 int idx;
3440 mmu_short_pte_t *c_pte;
3441
3442 idx = MMU_TIC(start);
3443 c_pte = &c_tbl->ct_dtbl[idx];
3444 for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3445 if (MMU_VALID_DT(*c_pte)) {
3446 pmap_remove_pte(c_pte);
3447 c_tbl->ct_ecnt--;
3448 }
3449 }
3450
3451 if (c_tbl->ct_ecnt == 0) {
3452 c_tbl->ct_parent = NULL;
3453 TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3454 TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3455 empty = TRUE;
3456 } else {
3457 empty = FALSE;
3458 }
3459
3460 return empty;
3461 }
3462
3463 /* is_managed INTERNAL
3464 **
3465 * Determine if the given physical address is managed by the PV system.
3466 * Note that this logic assumes that no one will ask for the status of
3467 * addresses which lie in-between the memory banks on the 3/80. If they
3468 * do so, it will falsely report that it is managed.
3469 *
3470 * Note: A "managed" address is one that was reported to the VM system as
3471 * a "usable page" during system startup. As such, the VM system expects the
3472 * pmap module to keep an accurate track of the useage of those pages.
3473 * Any page not given to the VM system at startup does not exist (as far as
3474 * the VM system is concerned) and is therefore "unmanaged." Examples are
3475 * those pages which belong to the ROM monitor and the memory allocated before
3476 * the VM system was started.
3477 */
3478 boolean_t
3479 is_managed(pa)
3480 vm_offset_t pa;
3481 {
3482 if (pa >= avail_start && pa < avail_end)
3483 return TRUE;
3484 else
3485 return FALSE;
3486 }
3487
3488 /* pmap_bootstrap_alloc INTERNAL
3489 **
3490 * Used internally for memory allocation at startup when malloc is not
3491 * available. This code will fail once it crosses the first memory
3492 * bank boundary on the 3/80. Hopefully by then however, the VM system
3493 * will be in charge of allocation.
3494 */
3495 void *
3496 pmap_bootstrap_alloc(size)
3497 int size;
3498 {
3499 void *rtn;
3500
3501 #ifdef PMAP_DEBUG
3502 if (bootstrap_alloc_enabled == FALSE) {
3503 mon_printf("pmap_bootstrap_alloc: disabled\n");
3504 sunmon_abort();
3505 }
3506 #endif
3507
3508 rtn = (void *) virtual_avail;
3509 virtual_avail += size;
3510
3511 #ifdef PMAP_DEBUG
3512 if (virtual_avail > virtual_contig_end) {
3513 mon_printf("pmap_bootstrap_alloc: out of mem\n");
3514 sunmon_abort();
3515 }
3516 #endif
3517
3518 return rtn;
3519 }
3520
3521 /* pmap_bootstap_aalign INTERNAL
3522 **
3523 * Used to insure that the next call to pmap_bootstrap_alloc() will
3524 * return a chunk of memory aligned to the specified size.
3525 *
3526 * Note: This function will only support alignment sizes that are powers
3527 * of two.
3528 */
3529 void
3530 pmap_bootstrap_aalign(size)
3531 int size;
3532 {
3533 int off;
3534
3535 off = virtual_avail & (size - 1);
3536 if (off) {
3537 (void) pmap_bootstrap_alloc(size - off);
3538 }
3539 }
3540
3541 /* pmap_pa_exists
3542 **
3543 * Used by the /dev/mem driver to see if a given PA is memory
3544 * that can be mapped. (The PA is not in a hole.)
3545 */
3546 int
3547 pmap_pa_exists(pa)
3548 vm_offset_t pa;
3549 {
3550 /* XXX - NOTYET */
3551 return (0);
3552 }
3553
3554 /* pmap_activate INTERFACE
3555 **
3556 * This is called by locore.s:cpu_switch when we are switching to a
3557 * new process. This should load the MMU context for the new proc.
3558 * XXX - Later, this should be done directly in locore.s
3559 */
3560 void
3561 pmap_activate(pmap)
3562 pmap_t pmap;
3563 {
3564 u_long rootpa;
3565
3566 /* Only do reload/flush if we have to. */
3567 rootpa = pmap->pm_a_phys;
3568 if (kernel_crp.rp_addr != rootpa) {
3569 DPRINT(("pmap_activate(%p)\n", pmap));
3570 kernel_crp.rp_addr = rootpa;
3571 loadcrp(&kernel_crp);
3572 TBIAU();
3573 }
3574 }
3575
3576
3577 /* pmap_update
3578 **
3579 * Apply any delayed changes scheduled for all pmaps immediately.
3580 *
3581 * No delayed operations are currently done in this pmap.
3582 */
3583 void
3584 pmap_update()
3585 {
3586 /* not implemented. */
3587 }
3588
3589 /*
3590 * Fill in the cpu_kcore header for dumpsys()
3591 * (See machdep.c)
3592 */
3593 void
3594 pmap_set_kcore_hdr(chdr_p)
3595 cpu_kcore_hdr_t *chdr_p;
3596 {
3597 u_long spa, len;
3598 int i;
3599
3600 chdr_p->ckh_contig_end = virtual_contig_end;
3601 chdr_p->ckh_kernCbase = (u_long) kernCbase;
3602 for (i = 0; i < NPHYS_RAM_SEGS; i++) {
3603 spa = avail_mem[i].pmem_start;
3604 spa = _trunc_page(spa);
3605 len = avail_mem[i].pmem_end - spa;
3606 len = _round_page(len);
3607 chdr_p->ram_segs[i].start = spa;
3608 chdr_p->ram_segs[i].size = len;
3609 }
3610 }
3611
3612
3613 /* pmap_virtual_space INTERFACE
3614 **
3615 * Return the current available range of virtual addresses in the
3616 * arguuments provided. Only really called once.
3617 */
3618 void
3619 pmap_virtual_space(vstart, vend)
3620 vm_offset_t *vstart, *vend;
3621 {
3622 *vstart = virtual_avail;
3623 *vend = virtual_end;
3624 }
3625
3626 /* pmap_free_pages INTERFACE
3627 **
3628 * Return the number of physical pages still available.
3629 *
3630 * This is probably going to be a mess, but it's only called
3631 * once and it's the only function left that I have to implement!
3632 */
3633 u_int
3634 pmap_free_pages()
3635 {
3636 int i;
3637 u_int left;
3638 vm_offset_t avail;
3639
3640 avail = avail_next;
3641 left = 0;
3642 i = 0;
3643 while (avail >= avail_mem[i].pmem_end) {
3644 if (avail_mem[i].pmem_next == NULL)
3645 return 0;
3646 i++;
3647 }
3648 while (i < NPHYS_RAM_SEGS) {
3649 if (avail < avail_mem[i].pmem_start) {
3650 /* Avail is inside a hole, march it
3651 * up to the next bank.
3652 */
3653 avail = avail_mem[i].pmem_start;
3654 }
3655 left += _btop(avail_mem[i].pmem_end - avail);
3656 if (avail_mem[i].pmem_next == NULL)
3657 break;
3658 i++;
3659 }
3660
3661 return left;
3662 }
3663
3664 /* pmap_page_index INTERFACE
3665 **
3666 * Return the index of the given physical page in a list of useable
3667 * physical pages in the system. Holes in physical memory may be counted
3668 * if so desired. As long as pmap_free_pages() and pmap_page_index()
3669 * agree as to whether holes in memory do or do not count as valid pages,
3670 * it really doesn't matter. However, if you like to save a little
3671 * memory, don't count holes as valid pages. This is even more true when
3672 * the holes are large.
3673 *
3674 * We will not count holes as valid pages. We can generate page indices
3675 * that conform to this by using the memory bank structures initialized
3676 * in pmap_alloc_pv().
3677 */
3678 int
3679 pmap_page_index(pa)
3680 vm_offset_t pa;
3681 {
3682 struct pmap_physmem_struct *bank = avail_mem;
3683
3684 /* Search for the memory bank with this page. */
3685 /* XXX - What if it is not physical memory? */
3686 while (pa > bank->pmem_end)
3687 bank = bank->pmem_next;
3688 pa -= bank->pmem_start;
3689
3690 return (bank->pmem_pvbase + _btop(pa));
3691 }
3692
3693 /* pmap_next_page INTERFACE
3694 **
3695 * Place the physical address of the next available page in the
3696 * argument given. Returns FALSE if there are no more pages left.
3697 *
3698 * This function must jump over any holes in physical memory.
3699 * Once this function is used, any use of pmap_bootstrap_alloc()
3700 * is a sin. Sinners will be punished with erratic behavior.
3701 */
3702 boolean_t
3703 pmap_next_page(pa)
3704 vm_offset_t *pa;
3705 {
3706 static struct pmap_physmem_struct *curbank = avail_mem;
3707
3708 /* XXX - temporary ROM saving hack. */
3709 if (avail_next >= avail_end)
3710 return FALSE;
3711
3712 if (avail_next >= curbank->pmem_end)
3713 if (curbank->pmem_next == NULL)
3714 return FALSE;
3715 else {
3716 curbank = curbank->pmem_next;
3717 avail_next = curbank->pmem_start;
3718 }
3719
3720 *pa = avail_next;
3721 avail_next += NBPG;
3722 return TRUE;
3723 }
3724
3725 /* pmap_count INTERFACE
3726 **
3727 * Return the number of resident (valid) pages in the given pmap.
3728 *
3729 * Note: If this function is handed the kernel map, it will report
3730 * that it has no mappings. Hopefully the VM system won't ask for kernel
3731 * map statistics.
3732 */
3733 segsz_t
3734 pmap_count(pmap, type)
3735 pmap_t pmap;
3736 int type;
3737 {
3738 u_int count;
3739 int a_idx, b_idx;
3740 a_tmgr_t *a_tbl;
3741 b_tmgr_t *b_tbl;
3742 c_tmgr_t *c_tbl;
3743
3744 /*
3745 * If the pmap does not have its own A table manager, it has no
3746 * valid entires.
3747 */
3748 if (pmap->pm_a_tmgr == NULL)
3749 return 0;
3750
3751 a_tbl = pmap->pm_a_tmgr;
3752
3753 count = 0;
3754 for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3755 if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3756 b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3757 for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3758 if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3759 c_tbl = mmuC2tmgr(
3760 mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3761 if (type == 0)
3762 /*
3763 * A resident entry count has been requested.
3764 */
3765 count += c_tbl->ct_ecnt;
3766 else
3767 /*
3768 * A wired entry count has been requested.
3769 */
3770 count += c_tbl->ct_wcnt;
3771 }
3772 }
3773 }
3774 }
3775
3776 return count;
3777 }
3778
3779 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3780 * The following routines are only used by DDB for tricky kernel text *
3781 * text operations in db_memrw.c. They are provided for sun3 *
3782 * compatibility. *
3783 *************************************************************************/
3784 /* get_pte INTERNAL
3785 **
3786 * Return the page descriptor the describes the kernel mapping
3787 * of the given virtual address.
3788 */
3789 extern u_long ptest_addr __P((u_long)); /* XXX: locore.s */
3790 u_long
3791 get_pte(va)
3792 vm_offset_t va;
3793 {
3794 u_long pte_pa;
3795 mmu_short_pte_t *pte;
3796
3797 /* Get the physical address of the PTE */
3798 pte_pa = ptest_addr(va & ~PGOFSET);
3799
3800 /* Convert to a virtual address... */
3801 pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3802
3803 /* Make sure it is in our level-C tables... */
3804 if ((pte < kernCbase) ||
3805 (pte >= &mmuCbase[NUM_USER_PTES]))
3806 return 0;
3807
3808 /* ... and just return its contents. */
3809 return (pte->attr.raw);
3810 }
3811
3812
3813 /* set_pte INTERNAL
3814 **
3815 * Set the page descriptor that describes the kernel mapping
3816 * of the given virtual address.
3817 */
3818 void
3819 set_pte(va, pte)
3820 vm_offset_t va;
3821 vm_offset_t pte;
3822 {
3823 u_long idx;
3824
3825 if (va < KERNBASE)
3826 return;
3827
3828 idx = (unsigned long) _btop(va - KERNBASE);
3829 kernCbase[idx].attr.raw = pte;
3830 }
3831
3832 #ifdef PMAP_DEBUG
3833 /************************** DEBUGGING ROUTINES **************************
3834 * The following routines are meant to be an aid to debugging the pmap *
3835 * system. They are callable from the DDB command line and should be *
3836 * prepared to be handed unstable or incomplete states of the system. *
3837 ************************************************************************/
3838
3839 /* pv_list
3840 **
3841 * List all pages found on the pv list for the given physical page.
3842 * To avoid endless loops, the listing will stop at the end of the list
3843 * or after 'n' entries - whichever comes first.
3844 */
3845 void
3846 pv_list(pa, n)
3847 vm_offset_t pa;
3848 int n;
3849 {
3850 int idx;
3851 vm_offset_t va;
3852 pv_t *pv;
3853 c_tmgr_t *c_tbl;
3854 pmap_t pmap;
3855
3856 pv = pa2pv(pa);
3857 idx = pv->pv_idx;
3858
3859 for (;idx != PVE_EOL && n > 0; idx=pvebase[idx].pve_next, n--) {
3860 va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3861 printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3862 idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3863 }
3864 }
3865 #endif /* PMAP_DEBUG */
3866
3867 #ifdef NOT_YET
3868 /* and maybe not ever */
3869 /************************** LOW-LEVEL ROUTINES **************************
3870 * These routines will eventualy be re-written into assembly and placed *
3871 * in locore.s. They are here now as stubs so that the pmap module can *
3872 * be linked as a standalone user program for testing. *
3873 ************************************************************************/
3874 /* flush_atc_crp INTERNAL
3875 **
3876 * Flush all page descriptors derived from the given CPU Root Pointer
3877 * (CRP), or 'A' table as it is known here, from the 68851's automatic
3878 * cache.
3879 */
3880 void
3881 flush_atc_crp(a_tbl)
3882 {
3883 mmu_long_rp_t rp;
3884
3885 /* Create a temporary root table pointer that points to the
3886 * given A table.
3887 */
3888 rp.attr.raw = ~MMU_LONG_RP_LU;
3889 rp.addr.raw = (unsigned int) a_tbl;
3890
3891 mmu_pflushr(&rp);
3892 /* mmu_pflushr:
3893 * movel sp(4)@,a0
3894 * pflushr a0@
3895 * rts
3896 */
3897 }
3898 #endif /* NOT_YET */
3899